diff options
331 files changed, 3499 insertions, 2345 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-scsi_host b/Documentation/ABI/testing/sysfs-class-scsi_host new file mode 100644 index 00000000000..29a4f892e43 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-scsi_host @@ -0,0 +1,13 @@ +What: /sys/class/scsi_host/hostX/isci_id +Date: June 2011 +Contact: Dave Jiang <dave.jiang@intel.com> +Description: + This file contains the enumerated host ID for the Intel + SCU controller. The Intel(R) C600 Series Chipset SATA/SAS + Storage Control Unit embeds up to two 4-port controllers in + a single PCI device. The controllers are enumerated in order + which usually means the lowest number scsi_host corresponds + with the first controller, but this association is not + guaranteed. The 'isci_id' attribute unambiguously identifies + the controller index: '0' for the first controller, + '1' for the second. diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml index 85164016ed2..23fdf79f8cf 100644 --- a/Documentation/DocBook/media/v4l/controls.xml +++ b/Documentation/DocBook/media/v4l/controls.xml @@ -1455,7 +1455,7 @@ Applicable to the H264 encoder.</entry> </row> <row><entry></entry></row> - <row> + <row id="v4l2-mpeg-video-h264-vui-sar-idc"> <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC</constant> </entry> <entry>enum v4l2_mpeg_video_h264_vui_sar_idc</entry> </row> @@ -1561,7 +1561,7 @@ Applicable to the H264 encoder.</entry> </row> <row><entry></entry></row> - <row> + <row id="v4l2-mpeg-video-h264-level"> <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LEVEL</constant> </entry> <entry>enum v4l2_mpeg_video_h264_level</entry> </row> @@ -1641,7 +1641,7 @@ Possible values are:</entry> </row> <row><entry></entry></row> - <row> + <row id="v4l2-mpeg-video-mpeg4-level"> <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL</constant> </entry> <entry>enum v4l2_mpeg_video_mpeg4_level</entry> </row> @@ -1689,9 +1689,9 @@ Possible values are:</entry> </row> <row><entry></entry></row> - <row> + <row id="v4l2-mpeg-video-h264-profile"> <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_PROFILE</constant> </entry> - <entry>enum v4l2_mpeg_h264_profile</entry> + <entry>enum v4l2_mpeg_video_h264_profile</entry> </row> <row><entry spanname="descr">The profile information for H264. Applicable to the H264 encoder. @@ -1774,9 +1774,9 @@ Possible values are:</entry> </row> <row><entry></entry></row> - <row> + <row id="v4l2-mpeg-video-mpeg4-profile"> <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE</constant> </entry> - <entry>enum v4l2_mpeg_mpeg4_profile</entry> + <entry>enum v4l2_mpeg_video_mpeg4_profile</entry> </row> <row><entry spanname="descr">The profile information for MPEG4. Applicable to the MPEG4 encoder. @@ -1820,9 +1820,9 @@ Applicable to the encoder. </row> <row><entry></entry></row> - <row> + <row id="v4l2-mpeg-video-multi-slice-mode"> <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE</constant> </entry> - <entry>enum v4l2_mpeg_multi_slice_mode</entry> + <entry>enum v4l2_mpeg_video_multi_slice_mode</entry> </row> <row><entry spanname="descr">Determines how the encoder should handle division of frame into slices. Applicable to the encoder. @@ -1868,9 +1868,9 @@ Applicable to the encoder.</entry> </row> <row><entry></entry></row> - <row> + <row id="v4l2-mpeg-video-h264-loop-filter-mode"> <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE</constant> </entry> - <entry>enum v4l2_mpeg_h264_loop_filter_mode</entry> + <entry>enum v4l2_mpeg_video_h264_loop_filter_mode</entry> </row> <row><entry spanname="descr">Loop filter mode for H264 encoder. Possible values are:</entry> @@ -1913,9 +1913,9 @@ Applicable to the H264 encoder.</entry> </row> <row><entry></entry></row> - <row> + <row id="v4l2-mpeg-video-h264-entropy-mode"> <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE</constant> </entry> - <entry>enum v4l2_mpeg_h264_symbol_mode</entry> + <entry>enum v4l2_mpeg_video_h264_entropy_mode</entry> </row> <row><entry spanname="descr">Entropy coding mode for H264 - CABAC/CAVALC. Applicable to the H264 encoder. @@ -2140,9 +2140,9 @@ previous frames. Applicable to the H264 encoder.</entry> </row> <row><entry></entry></row> - <row> + <row id="v4l2-mpeg-video-header-mode"> <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_HEADER_MODE</constant> </entry> - <entry>enum v4l2_mpeg_header_mode</entry> + <entry>enum v4l2_mpeg_video_header_mode</entry> </row> <row><entry spanname="descr">Determines whether the header is returned as the first buffer or is it returned together with the first frame. Applicable to encoders. @@ -2320,9 +2320,9 @@ Valid only when H.264 and macroblock level RC is enabled (<constant>V4L2_CID_MPE Applicable to the H264 encoder.</entry> </row> <row><entry></entry></row> - <row> + <row id="v4l2-mpeg-mfc51-video-frame-skip-mode"> <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE</constant> </entry> - <entry>enum v4l2_mpeg_mfc51_frame_skip_mode</entry> + <entry>enum v4l2_mpeg_mfc51_video_frame_skip_mode</entry> </row> <row><entry spanname="descr"> Indicates in what conditions the encoder should skip frames. If encoding a frame would cause the encoded stream to be larger then @@ -2361,9 +2361,9 @@ the stream will meet tight bandwidth contraints. Applicable to encoders. </entry> </row> <row><entry></entry></row> - <row> + <row id="v4l2-mpeg-mfc51-video-force-frame-type"> <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE</constant> </entry> - <entry>enum v4l2_mpeg_mfc51_force_frame_type</entry> + <entry>enum v4l2_mpeg_mfc51_video_force_frame_type</entry> </row> <row><entry spanname="descr">Force a frame type for the next queued buffer. Applicable to encoders. Possible values are:</entry> diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt index 6f3c598971f..06eb6d957c8 100644 --- a/Documentation/cgroups/memory.txt +++ b/Documentation/cgroups/memory.txt @@ -380,7 +380,7 @@ will be charged as a new owner of it. 5.2 stat file -5.2.1 memory.stat file includes following statistics +memory.stat file includes following statistics # per-memory cgroup local status cache - # of bytes of page cache memory. @@ -438,89 +438,6 @@ Note: file_mapped is accounted only when the memory cgroup is owner of page cache.) -5.2.2 memory.vmscan_stat - -memory.vmscan_stat includes statistics information for memory scanning and -freeing, reclaiming. The statistics shows memory scanning information since -memory cgroup creation and can be reset to 0 by writing 0 as - - #echo 0 > ../memory.vmscan_stat - -This file contains following statistics. - -[param]_[file_or_anon]_pages_by_[reason]_[under_heararchy] -[param]_elapsed_ns_by_[reason]_[under_hierarchy] - -For example, - - scanned_file_pages_by_limit indicates the number of scanned - file pages at vmscan. - -Now, 3 parameters are supported - - scanned - the number of pages scanned by vmscan - rotated - the number of pages activated at vmscan - freed - the number of pages freed by vmscan - -If "rotated" is high against scanned/freed, the memcg seems busy. - -Now, 2 reason are supported - - limit - the memory cgroup's limit - system - global memory pressure + softlimit - (global memory pressure not under softlimit is not handled now) - -When under_hierarchy is added in the tail, the number indicates the -total memcg scan of its children and itself. - -elapsed_ns is a elapsed time in nanosecond. This may include sleep time -and not indicates CPU usage. So, please take this as just showing -latency. - -Here is an example. - -# cat /cgroup/memory/A/memory.vmscan_stat -scanned_pages_by_limit 9471864 -scanned_anon_pages_by_limit 6640629 -scanned_file_pages_by_limit 2831235 -rotated_pages_by_limit 4243974 -rotated_anon_pages_by_limit 3971968 -rotated_file_pages_by_limit 272006 -freed_pages_by_limit 2318492 -freed_anon_pages_by_limit 962052 -freed_file_pages_by_limit 1356440 -elapsed_ns_by_limit 351386416101 -scanned_pages_by_system 0 -scanned_anon_pages_by_system 0 -scanned_file_pages_by_system 0 -rotated_pages_by_system 0 -rotated_anon_pages_by_system 0 -rotated_file_pages_by_system 0 -freed_pages_by_system 0 -freed_anon_pages_by_system 0 -freed_file_pages_by_system 0 -elapsed_ns_by_system 0 -scanned_pages_by_limit_under_hierarchy 9471864 -scanned_anon_pages_by_limit_under_hierarchy 6640629 -scanned_file_pages_by_limit_under_hierarchy 2831235 -rotated_pages_by_limit_under_hierarchy 4243974 -rotated_anon_pages_by_limit_under_hierarchy 3971968 -rotated_file_pages_by_limit_under_hierarchy 272006 -freed_pages_by_limit_under_hierarchy 2318492 -freed_anon_pages_by_limit_under_hierarchy 962052 -freed_file_pages_by_limit_under_hierarchy 1356440 -elapsed_ns_by_limit_under_hierarchy 351386416101 -scanned_pages_by_system_under_hierarchy 0 -scanned_anon_pages_by_system_under_hierarchy 0 -scanned_file_pages_by_system_under_hierarchy 0 -rotated_pages_by_system_under_hierarchy 0 -rotated_anon_pages_by_system_under_hierarchy 0 -rotated_file_pages_by_system_under_hierarchy 0 -freed_pages_by_system_under_hierarchy 0 -freed_anon_pages_by_system_under_hierarchy 0 -freed_file_pages_by_system_under_hierarchy 0 -elapsed_ns_by_system_under_hierarchy 0 - 5.3 swappiness Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only. diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index c4a6e148732..4dc46547766 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -592,3 +592,11 @@ Why: In 3.0, we can now autodetect internal 3G device and already have interface that was used by acer-wmi driver. It will replaced by information log when acer-wmi initial. Who: Lee, Chun-Yi <jlee@novell.com> + +---------------------------- +What: The XFS nodelaylog mount option +When: 3.3 +Why: The delaylog mode that has been the default since 2.6.39 has proven + stable, and the old code is in the way of additional improvements in + the log code. +Who: Christoph Hellwig <hch@lst.de> diff --git a/Documentation/hwmon/max16065 b/Documentation/hwmon/max16065 index 44b4f61e04f..c11f64a1f2a 100644 --- a/Documentation/hwmon/max16065 +++ b/Documentation/hwmon/max16065 @@ -62,6 +62,13 @@ can be safely used to identify the chip. You will have to instantiate the devices explicitly. Please see Documentation/i2c/instantiating-devices for details. +WARNING: Do not access chip registers using the i2cdump command, and do not use +any of the i2ctools commands on a command register (0xa5 to 0xac). The chips +supported by this driver interpret any access to a command register (including +read commands) as request to execute the command in question. This may result in +power loss, board resets, and/or Flash corruption. Worst case, your board may +turn into a brick. + Sysfs entries ------------- diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 845a191004b..54078ed96b3 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -319,4 +319,6 @@ Code Seq#(hex) Include File Comments <mailto:thomas@winischhofer.net> 0xF4 00-1F video/mbxfb.h mbxfb <mailto:raph@8d.com> +0xF6 all LTTng Linux Trace Toolkit Next Generation + <mailto:mathieu.desnoyers@efficios.com> 0xFD all linux/dm-ioctl.h diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 614d0382e2c..854ed5ca7e3 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2086,9 +2086,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. Override pmtimer IOPort with a hex value. e.g. pmtmr=0x508 - pnp.debug [PNP] - Enable PNP debug messages. This depends on the - CONFIG_PNP_DEBUG_MESSAGES option. + pnp.debug=1 [PNP] + Enable PNP debug messages (depends on the + CONFIG_PNP_DEBUG_MESSAGES option). Change at run-time + via /sys/module/pnp/parameters/debug. We always show + current resource usage; turning this on also shows + possible settings and some assignment information. pnpacpi= [ACPI] { off } diff --git a/Documentation/networking/dmfe.txt b/Documentation/networking/dmfe.txt index 8006c227fda..25320bf19c8 100644 --- a/Documentation/networking/dmfe.txt +++ b/Documentation/networking/dmfe.txt @@ -1,3 +1,5 @@ +Note: This driver doesn't have a maintainer. + Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver for Linux. This program is free software; you can redistribute it and/or @@ -55,7 +57,6 @@ Test and make sure PCI latency is now correct for all cases. Authors: Sten Wang <sten_wang@davicom.com.tw > : Original Author -Tobias Ringstrom <tori@unhappy.mine.nu> : Current Maintainer Contributors: diff --git a/MAINTAINERS b/MAINTAINERS index 28f65c249b9..ae8820e173a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1278,7 +1278,6 @@ F: drivers/input/misc/ati_remote2.c ATLX ETHERNET DRIVERS M: Jay Cliburn <jcliburn@gmail.com> M: Chris Snook <chris.snook@gmail.com> -M: Jie Yang <jie.yang@atheros.com> L: netdev@vger.kernel.org W: http://sourceforge.net/projects/atl1 W: http://atl1.sourceforge.net @@ -1574,7 +1573,6 @@ F: drivers/scsi/bfa/ BROCADE BNA 10 GIGABIT ETHERNET DRIVER M: Rasesh Mody <rmody@brocade.com> -M: Debashis Dutt <ddutt@brocade.com> L: netdev@vger.kernel.org S: Supported F: drivers/net/bna/ @@ -1758,7 +1756,6 @@ F: Documentation/zh_CN/ CISCO VIC ETHERNET NIC DRIVER M: Christian Benvenuti <benve@cisco.com> -M: Vasanthy Kolluri <vkolluri@cisco.com> M: Roopa Prabhu <roprabhu@cisco.com> M: David Wang <dwang2@cisco.com> S: Supported @@ -3262,6 +3259,17 @@ F: Documentation/input/multi-touch-protocol.txt F: drivers/input/input-mt.c K: \b(ABS|SYN)_MT_ +INTEL C600 SERIES SAS CONTROLLER DRIVER +M: Intel SCU Linux support <intel-linux-scu@intel.com> +M: Dan Williams <dan.j.williams@intel.com> +M: Dave Jiang <dave.jiang@intel.com> +M: Ed Nadolski <edmund.nadolski@intel.com> +L: linux-scsi@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/isci.git +S: Maintained +F: drivers/scsi/isci/ +F: firmware/isci/ + INTEL IDLE DRIVER M: Len Brown <lenb@kernel.org> L: linux-pm@lists.linux-foundation.org @@ -4404,7 +4412,8 @@ L: netfilter@vger.kernel.org L: coreteam@netfilter.org W: http://www.netfilter.org/ W: http://www.iptables.org/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git S: Supported F: include/linux/netfilter* F: include/linux/netfilter/ @@ -4774,7 +4783,7 @@ F: drivers/net/wireless/orinoco/ OSD LIBRARY and FILESYSTEM M: Boaz Harrosh <bharrosh@panasas.com> -M: Benny Halevy <bhalevy@panasas.com> +M: Benny Halevy <bhalevy@tonian.com> L: osd-dev@open-osd.org W: http://open-osd.org T: git git://git.open-osd.org/open-osd.git @@ -7200,6 +7209,9 @@ W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices S: Supported F: Documentation/hwmon/wm83?? F: drivers/leds/leds-wm83*.c +F: drivers/input/misc/wm831x-on.c +F: drivers/input/touchscreen/wm831x-ts.c +F: drivers/input/touchscreen/wm97*.c F: drivers/mfd/wm8*.c F: drivers/power/wm83*.c F: drivers/rtc/rtc-wm83*.c @@ -7209,6 +7221,7 @@ F: drivers/watchdog/wm83*_wdt.c F: include/linux/mfd/wm831x/ F: include/linux/mfd/wm8350/ F: include/linux/mfd/wm8400* +F: include/linux/wm97xx.h F: include/sound/wm????.h F: sound/soc/codecs/wm* @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 1 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc6 NAME = "Divemaster Edition" # *DOCUMENTATION* diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 60cde53d266..8bb936226de 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -51,7 +51,7 @@ config GENERIC_CMOS_UPDATE def_bool y config GENERIC_GPIO - def_bool y + bool config ZONE_DMA bool diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index bfa706ffd96..99a6ed7e1bf 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -45,8 +45,13 @@ #define L2X0_CLEAN_INV_LINE_PA 0x7F0 #define L2X0_CLEAN_INV_LINE_IDX 0x7F8 #define L2X0_CLEAN_INV_WAY 0x7FC -#define L2X0_LOCKDOWN_WAY_D 0x900 -#define L2X0_LOCKDOWN_WAY_I 0x904 +/* + * The lockdown registers repeat 8 times for L310, the L210 has only one + * D and one I lockdown register at 0x0900 and 0x0904. + */ +#define L2X0_LOCKDOWN_WAY_D_BASE 0x900 +#define L2X0_LOCKDOWN_WAY_I_BASE 0x904 +#define L2X0_LOCKDOWN_STRIDE 0x08 #define L2X0_TEST_OPERATION 0xF00 #define L2X0_LINE_DATA 0xF10 #define L2X0_LINE_TAG 0xF30 diff --git a/arch/arm/mach-cns3xxx/include/mach/entry-macro.S b/arch/arm/mach-cns3xxx/include/mach/entry-macro.S index 6bd83ed90af..d87bfc397d3 100644 --- a/arch/arm/mach-cns3xxx/include/mach/entry-macro.S +++ b/arch/arm/mach-cns3xxx/include/mach/entry-macro.S @@ -8,7 +8,6 @@ * published by the Free Software Foundation. */ -#include <mach/hardware.h> #include <asm/hardware/entry-macro-gic.S> .macro disable_fiq diff --git a/arch/arm/mach-cns3xxx/include/mach/system.h b/arch/arm/mach-cns3xxx/include/mach/system.h index 58bb03ae3cf..4f16c9b79f7 100644 --- a/arch/arm/mach-cns3xxx/include/mach/system.h +++ b/arch/arm/mach-cns3xxx/include/mach/system.h @@ -13,7 +13,6 @@ #include <linux/io.h> #include <asm/proc-fns.h> -#include <mach/hardware.h> static inline void arch_idle(void) { diff --git a/arch/arm/mach-cns3xxx/include/mach/uncompress.h b/arch/arm/mach-cns3xxx/include/mach/uncompress.h index de8ead9b91f..a91b6058ab4 100644 --- a/arch/arm/mach-cns3xxx/include/mach/uncompress.h +++ b/arch/arm/mach-cns3xxx/include/mach/uncompress.h @@ -8,7 +8,6 @@ */ #include <asm/mach-types.h> -#include <mach/hardware.h> #include <mach/cns3xxx.h> #define AMBA_UART_DR(base) (*(volatile unsigned char *)((base) + 0x00)) diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c index 06fd25d70ae..0f8fca48a5e 100644 --- a/arch/arm/mach-cns3xxx/pcie.c +++ b/arch/arm/mach-cns3xxx/pcie.c @@ -49,7 +49,7 @@ static struct cns3xxx_pcie *sysdata_to_cnspci(void *sysdata) return &cns3xxx_pcie[root->domain]; } -static struct cns3xxx_pcie *pdev_to_cnspci(struct pci_dev *dev) +static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev) { return sysdata_to_cnspci(dev->sysdata); } diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index bd5394537c8..008d51407cd 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -115,6 +115,32 @@ static struct spi_board_info da850evm_spi_info[] = { }, }; +#ifdef CONFIG_MTD +static void da850_evm_m25p80_notify_add(struct mtd_info *mtd) +{ + char *mac_addr = davinci_soc_info.emac_pdata->mac_addr; + size_t retlen; + + if (!strcmp(mtd->name, "MAC-Address")) { + mtd->read(mtd, 0, ETH_ALEN, &retlen, mac_addr); + if (retlen == ETH_ALEN) + pr_info("Read MAC addr from SPI Flash: %pM\n", + mac_addr); + } +} + +static struct mtd_notifier da850evm_spi_notifier = { + .add = da850_evm_m25p80_notify_add, +}; + +static void da850_evm_setup_mac_addr(void) +{ + register_mtd_user(&da850evm_spi_notifier); +} +#else +static void da850_evm_setup_mac_addr(void) { } +#endif + static struct mtd_partition da850_evm_norflash_partition[] = { { .name = "bootloaders + env", @@ -1244,6 +1270,8 @@ static __init void da850_evm_init(void) if (ret) pr_warning("da850_evm_init: sata registration failed: %d\n", ret); + + da850_evm_setup_mac_addr(); } #ifdef CONFIG_SERIAL_8250_CONSOLE diff --git a/arch/arm/mach-davinci/include/mach/psc.h b/arch/arm/mach-davinci/include/mach/psc.h index 47fd0bc3d3e..fa59c097223 100644 --- a/arch/arm/mach-davinci/include/mach/psc.h +++ b/arch/arm/mach-davinci/include/mach/psc.h @@ -243,7 +243,7 @@ #define PSC_STATE_DISABLE 2 #define PSC_STATE_ENABLE 3 -#define MDSTAT_STATE_MASK 0x1f +#define MDSTAT_STATE_MASK 0x3f #define MDCTL_FORCE BIT(31) #ifndef __ASSEMBLER__ diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S index fb5e72b532b..5f1e045a3ad 100644 --- a/arch/arm/mach-davinci/sleep.S +++ b/arch/arm/mach-davinci/sleep.S @@ -217,7 +217,11 @@ ddr2clk_stop_done: ENDPROC(davinci_ddr_psc_config) CACHE_FLUSH: - .word arm926_flush_kern_cache_all +#ifdef CONFIG_CPU_V6 + .word v6_flush_kern_cache_all +#else + .word arm926_flush_kern_cache_all +#endif ENTRY(davinci_cpu_suspend_sz) .word . - davinci_cpu_suspend diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c index 2fbbdd5eac3..fcf0ae95651 100644 --- a/arch/arm/mach-integrator/integrator_ap.c +++ b/arch/arm/mach-integrator/integrator_ap.c @@ -337,15 +337,15 @@ static unsigned long timer_reload; static void integrator_clocksource_init(u32 khz) { void __iomem *base = (void __iomem *)TIMER2_VA_BASE; - u32 ctrl = TIMER_CTRL_ENABLE; + u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC; if (khz >= 1500) { khz /= 16; - ctrl = TIMER_CTRL_DIV16; + ctrl |= TIMER_CTRL_DIV16; } - writel(ctrl, base + TIMER_CTRL); writel(0xffff, base + TIMER_LOAD); + writel(ctrl, base + TIMER_CTRL); clocksource_mmio_init(base + TIMER_VALUE, "timer2", khz * 1000, 200, 16, clocksource_mmio_readl_down); diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c index ffd55b1c439..b9b84468314 100644 --- a/arch/arm/mach-omap2/clock3xxx_data.c +++ b/arch/arm/mach-omap2/clock3xxx_data.c @@ -3078,6 +3078,7 @@ static struct clk gpt12_fck = { .name = "gpt12_fck", .ops = &clkops_null, .parent = &secure_32k_fck, + .clkdm_name = "wkup_clkdm", .recalc = &followparent_recalc, }; @@ -3085,6 +3086,7 @@ static struct clk wdt1_fck = { .name = "wdt1_fck", .ops = &clkops_null, .parent = &secure_32k_fck, + .clkdm_name = "wkup_clkdm", .recalc = &followparent_recalc, }; diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c index 2af0e3f00ce..c0b6fbda340 100644 --- a/arch/arm/mach-omap2/clock44xx_data.c +++ b/arch/arm/mach-omap2/clock44xx_data.c @@ -3376,10 +3376,18 @@ int __init omap4xxx_clk_init(void) } else if (cpu_is_omap446x()) { cpu_mask = RATE_IN_4460; cpu_clkflg = CK_446X; + } else { + return 0; } clk_init(&omap2_clk_functions); - omap2_clk_disable_clkdm_control(); + + /* + * Must stay commented until all OMAP SoC drivers are + * converted to runtime PM, or drivers may start crashing + * + * omap2_clk_disable_clkdm_control(); + */ for (c = omap44xx_clks; c < omap44xx_clks + ARRAY_SIZE(omap44xx_clks); c++) diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c index ab7db083f97..8f0890685d7 100644 --- a/arch/arm/mach-omap2/clockdomain.c +++ b/arch/arm/mach-omap2/clockdomain.c @@ -747,6 +747,7 @@ int clkdm_wakeup(struct clockdomain *clkdm) spin_lock_irqsave(&clkdm->lock, flags); clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED; ret = arch_clkdm->clkdm_wakeup(clkdm); + ret |= pwrdm_state_switch(clkdm->pwrdm.ptr); spin_unlock_irqrestore(&clkdm->lock, flags); return ret; } @@ -818,6 +819,7 @@ void clkdm_deny_idle(struct clockdomain *clkdm) spin_lock_irqsave(&clkdm->lock, flags); clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED; arch_clkdm->clkdm_deny_idle(clkdm); + pwrdm_state_switch(clkdm->pwrdm.ptr); spin_unlock_irqrestore(&clkdm->lock, flags); } diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c index 16743c7d6e8..408193d8e04 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c @@ -192,6 +192,7 @@ static struct omap_hwmod_addr_space omap2430_usbhsotg_addrs[] = { .pa_end = OMAP243X_HS_BASE + SZ_4K - 1, .flags = ADDR_TYPE_RT }, + { } }; /* l4_core ->usbhsotg interface */ diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index 3feb35911a3..472bf22d5e8 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c @@ -130,7 +130,6 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state) } else { hwsup = clkdm_in_hwsup(pwrdm->pwrdm_clkdms[0]); clkdm_wakeup(pwrdm->pwrdm_clkdms[0]); - pwrdm_wait_transition(pwrdm); sleep_switch = FORCEWAKEUP_SWITCH; } } @@ -156,7 +155,6 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state) return ret; } - pwrdm_wait_transition(pwrdm); pwrdm_state_switch(pwrdm); err: return ret; diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c index 9af08473bf1..ef71fdd40fc 100644 --- a/arch/arm/mach-omap2/powerdomain.c +++ b/arch/arm/mach-omap2/powerdomain.c @@ -195,28 +195,35 @@ static int _pwrdm_post_transition_cb(struct powerdomain *pwrdm, void *unused) /** * pwrdm_init - set up the powerdomain layer - * @pwrdm_list: array of struct powerdomain pointers to register + * @pwrdms: array of struct powerdomain pointers to register * @custom_funcs: func pointers for arch specific implementations * - * Loop through the array of powerdomains @pwrdm_list, registering all - * that are available on the current CPU. If pwrdm_list is supplied - * and not null, all of the referenced powerdomains will be - * registered. No return value. XXX pwrdm_list is not really a - * "list"; it is an array. Rename appropriately. + * Loop through the array of powerdomains @pwrdms, registering all + * that are available on the current CPU. Also, program all + * powerdomain target state as ON; this is to prevent domains from + * hitting low power states (if bootloader has target states set to + * something other than ON) and potentially even losing context while + * PM is not fully initialized. The PM late init code can then program + * the desired target state for all the power domains. No return + * value. */ -void pwrdm_init(struct powerdomain **pwrdm_list, struct pwrdm_ops *custom_funcs) +void pwrdm_init(struct powerdomain **pwrdms, struct pwrdm_ops *custom_funcs) { struct powerdomain **p = NULL; + struct powerdomain *temp_p; if (!custom_funcs) WARN(1, "powerdomain: No custom pwrdm functions registered\n"); else arch_pwrdm = custom_funcs; - if (pwrdm_list) { - for (p = pwrdm_list; *p; p++) + if (pwrdms) { + for (p = pwrdms; *p; p++) _pwrdm_register(*p); } + + list_for_each_entry(temp_p, &pwrdm_list, node) + pwrdm_set_next_pwrst(temp_p, PWRDM_POWER_ON); } /** diff --git a/arch/arm/mach-prima2/clock.c b/arch/arm/mach-prima2/clock.c index f9a2aaf63f7..615a4e75cea 100644 --- a/arch/arm/mach-prima2/clock.c +++ b/arch/arm/mach-prima2/clock.c @@ -481,6 +481,7 @@ static void __init sirfsoc_clk_init(void) static struct of_device_id clkc_ids[] = { { .compatible = "sirf,prima2-clkc" }, + {}, }; void __init sirfsoc_of_clk_init(void) diff --git a/arch/arm/mach-prima2/irq.c b/arch/arm/mach-prima2/irq.c index c3404cbb6ff..7af254d046b 100644 --- a/arch/arm/mach-prima2/irq.c +++ b/arch/arm/mach-prima2/irq.c @@ -51,6 +51,7 @@ static __init void sirfsoc_irq_init(void) static struct of_device_id intc_ids[] = { { .compatible = "sirf,prima2-intc" }, + {}, }; void __init sirfsoc_of_irq_init(void) diff --git a/arch/arm/mach-prima2/rstc.c b/arch/arm/mach-prima2/rstc.c index d074786e83d..492cfa8d261 100644 --- a/arch/arm/mach-prima2/rstc.c +++ b/arch/arm/mach-prima2/rstc.c @@ -19,6 +19,7 @@ static DEFINE_MUTEX(rstc_lock); static struct of_device_id rstc_ids[] = { { .compatible = "sirf,prima2-rstc" }, + {}, }; static int __init sirfsoc_of_rstc_init(void) diff --git a/arch/arm/mach-prima2/timer.c b/arch/arm/mach-prima2/timer.c index 44027f34a88..ed7ec48d11d 100644 --- a/arch/arm/mach-prima2/timer.c +++ b/arch/arm/mach-prima2/timer.c @@ -190,6 +190,7 @@ static void __init sirfsoc_timer_init(void) static struct of_device_id timer_ids[] = { { .compatible = "sirf,prima2-tick" }, + {}, }; static void __init sirfsoc_of_timer_map(void) diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S index 52162d59407..2cbf68ef0e8 100644 --- a/arch/arm/mm/abort-macro.S +++ b/arch/arm/mm/abort-macro.S @@ -17,7 +17,7 @@ cmp \tmp, # 0x5600 @ Is it ldrsb? orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes tst \tmp, #1 << 11 @ L = 0 -> write - orreq \psr, \psr, #1 << 11 @ yes. + orreq \fsr, \fsr, #1 << 11 @ yes. b do_DataAbort not_thumb: .endm diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 44c086710d2..9ecfdb51195 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -277,6 +277,25 @@ static void l2x0_disable(void) spin_unlock_irqrestore(&l2x0_lock, flags); } +static void __init l2x0_unlock(__u32 cache_id) +{ + int lockregs; + int i; + + if (cache_id == L2X0_CACHE_ID_PART_L310) + lockregs = 8; + else + /* L210 and unknown types */ + lockregs = 1; + + for (i = 0; i < lockregs; i++) { + writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + + i * L2X0_LOCKDOWN_STRIDE); + writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE + + i * L2X0_LOCKDOWN_STRIDE); + } +} + void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) { __u32 aux; @@ -328,6 +347,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) * accessing the below registers will fault. */ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { + /* Make sure that I&D is not locked down when starting */ + l2x0_unlock(cache_id); /* l2x0 controller is disabled */ writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 91bca355cd3..cc7e2d8be9a 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -298,7 +298,7 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, #ifdef CONFIG_HAVE_ARCH_PFN_VALID int pfn_valid(unsigned long pfn) { - return memblock_is_memory(pfn << PAGE_SHIFT); + return memblock_is_memory(__pfn_to_phys(pfn)); } EXPORT_SYMBOL(pfn_valid); #endif diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c index 9a6a5385491..02609eee056 100644 --- a/arch/arm/plat-omap/omap_device.c +++ b/arch/arm/plat-omap/omap_device.c @@ -615,6 +615,9 @@ static int _od_resume_noirq(struct device *dev) return pm_generic_resume_noirq(dev); } +#else +#define _od_suspend_noirq NULL +#define _od_resume_noirq NULL #endif static struct dev_pm_domain omap_device_pm_domain = { diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index 052f877b52a..60b47223390 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -31,7 +31,6 @@ #define DMA_ERROR_CODE (~(dma_addr_t)0x0) -int dma_mapping_error(struct device *dev, dma_addr_t dma_addr); #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) @@ -47,6 +46,12 @@ dma_addr_t or1k_map_page(struct device *dev, struct page *page, void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); +int or1k_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs); +void or1k_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs); void or1k_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir); @@ -98,6 +103,51 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, debug_dma_unmap_page(dev, addr, size, dir, true); } +static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + int i, ents; + struct scatterlist *s; + + for_each_sg(sg, s, nents, i) + kmemcheck_mark_initialized(sg_virt(s), s->length); + BUG_ON(!valid_dma_direction(dir)); + ents = or1k_map_sg(dev, sg, nents, dir, NULL); + debug_dma_map_sg(dev, sg, nents, ents, dir); + + return ents; +} + +static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + BUG_ON(!valid_dma_direction(dir)); + debug_dma_unmap_sg(dev, sg, nents, dir); + or1k_unmap_sg(dev, sg, nents, dir, NULL); +} + +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + dma_addr_t addr; + + kmemcheck_mark_initialized(page_address(page) + offset, size); + BUG_ON(!valid_dma_direction(dir)); + addr = or1k_map_page(dev, page, offset, size, dir, NULL); + debug_dma_map_page(dev, page, offset, size, dir, addr, false); + + return addr; +} + +static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + BUG_ON(!valid_dma_direction(dir)); + or1k_unmap_page(dev, addr, size, dir, NULL); + debug_dma_unmap_page(dev, addr, size, dir, true); +} + static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) @@ -119,7 +169,12 @@ static inline void dma_sync_single_for_device(struct device *dev, static inline int dma_supported(struct device *dev, u64 dma_mask) { /* Support 32 bit DMA mask exclusively */ - return dma_mask == 0xffffffffULL; + return dma_mask == DMA_BIT_MASK(32); +} + +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return 0; } static inline int dma_set_mask(struct device *dev, u64 dma_mask) diff --git a/arch/openrisc/include/asm/sigcontext.h b/arch/openrisc/include/asm/sigcontext.h index 54a5c50132e..b79c2b19afb 100644 --- a/arch/openrisc/include/asm/sigcontext.h +++ b/arch/openrisc/include/asm/sigcontext.h @@ -23,16 +23,11 @@ /* This struct is saved by setup_frame in signal.c, to keep the current context while a signal handler is executed. It's restored by sys_sigreturn. - - To keep things simple, we use pt_regs here even though normally you just - specify the list of regs to save. Then we can use copy_from_user on the - entire regs instead of a bunch of get_user's as well... */ struct sigcontext { - struct pt_regs regs; /* needs to be first */ + struct user_regs_struct regs; /* needs to be first */ unsigned long oldmask; - unsigned long usp; /* usp before stacking this gunk on it */ }; #endif /* __ASM_OPENRISC_SIGCONTEXT_H */ diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index 968d3ee477e..f1c8ee2895d 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -154,6 +154,33 @@ void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, /* Nothing special to do here... */ } +int or1k_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + s->dma_address = or1k_map_page(dev, sg_page(s), s->offset, + s->length, dir, NULL); + } + + return nents; +} + +void or1k_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL); + } +} + void or1k_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) @@ -187,5 +214,4 @@ static int __init dma_init(void) return 0; } - fs_initcall(dma_init); diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index 5f759c76834..95207ab0c99 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c @@ -52,31 +52,25 @@ struct rt_sigframe { static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc) { unsigned int err = 0; - unsigned long old_usp; /* Alwys make any pending restarted system call return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; - /* restore the regs from &sc->regs (same as sc, since regs is first) + /* + * Restore the regs from &sc->regs. * (sc is already checked for VERIFY_READ since the sigframe was * checked in sys_sigreturn previously) */ - - if (__copy_from_user(regs, sc, sizeof(struct pt_regs))) + if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long))) + goto badframe; + if (__copy_from_user(®s->pc, &sc->regs.pc, sizeof(unsigned long))) + goto badframe; + if (__copy_from_user(®s->sr, &sc->regs.sr, sizeof(unsigned long))) goto badframe; /* make sure the SM-bit is cleared so user-mode cannot fool us */ regs->sr &= ~SPR_SR_SM; - /* restore the old USP as it was before we stacked the sc etc. - * (we cannot just pop the sigcontext since we aligned the sp and - * stuff after pushing it) - */ - - err |= __get_user(old_usp, &sc->usp); - - regs->sp = old_usp; - /* TODO: the other ports use regs->orig_XX to disable syscall checks * after this completes, but we don't use that mechanism. maybe we can * use it now ? @@ -137,18 +131,17 @@ static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, unsigned long mask) { int err = 0; - unsigned long usp = regs->sp; - /* copy the regs. they are first in sc so we can use sc directly */ + /* copy the regs */ - err |= __copy_to_user(sc, regs, sizeof(struct pt_regs)); + err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long)); + err |= __copy_to_user(&sc->regs.pc, ®s->pc, sizeof(unsigned long)); + err |= __copy_to_user(&sc->regs.sr, ®s->sr, sizeof(unsigned long)); /* then some other stuff */ err |= __put_user(mask, &sc->oldmask); - err |= __put_user(usp, &sc->usp); - return err; } diff --git a/arch/powerpc/boot/dts/p1023rds.dts b/arch/powerpc/boot/dts/p1023rds.dts index bfa96aa8f2c..d9b776740a6 100644 --- a/arch/powerpc/boot/dts/p1023rds.dts +++ b/arch/powerpc/boot/dts/p1023rds.dts @@ -387,7 +387,7 @@ #size-cells = <1>; compatible = "cfi-flash"; reg = <0x0 0x0 0x02000000>; - bank-width = <1>; + bank-width = <2>; device-width = <1>; partition@0 { label = "ramdisk"; diff --git a/arch/powerpc/configs/85xx/p1023rds_defconfig b/arch/powerpc/configs/85xx/p1023rds_defconfig index 980ff8f61fd..3ff5a81c709 100644 --- a/arch/powerpc/configs/85xx/p1023rds_defconfig +++ b/arch/powerpc/configs/85xx/p1023rds_defconfig @@ -171,3 +171,4 @@ CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y CONFIG_CRYPTO_AES=y # CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_DEV_FSL_CAAM=y diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig index 10562a5c65b..4311d02a3bf 100644 --- a/arch/powerpc/configs/corenet32_smp_defconfig +++ b/arch/powerpc/configs/corenet32_smp_defconfig @@ -185,3 +185,4 @@ CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y CONFIG_CRYPTO_AES=y # CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_DEV_FSL_CAAM=y diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig index d32283555b5..c92c204a204 100644 --- a/arch/powerpc/configs/corenet64_smp_defconfig +++ b/arch/powerpc/configs/corenet64_smp_defconfig @@ -100,5 +100,8 @@ CONFIG_DEBUG_INFO=y CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_VIRQ_DEBUG=y CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_AES=y # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRYPTO_DEV_TALITOS=y +CONFIG_CRYPTO_DEV_FSL_CAAM=y diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig index fcd85d2c72d..a3467bfb767 100644 --- a/arch/powerpc/configs/mpc85xx_defconfig +++ b/arch/powerpc/configs/mpc85xx_defconfig @@ -139,6 +139,7 @@ CONFIG_SND=y CONFIG_SND_INTEL8X0=y # CONFIG_SND_PPC is not set # CONFIG_SND_USB is not set +CONFIG_SND_SOC=y CONFIG_HID_A4TECH=y CONFIG_HID_APPLE=y CONFIG_HID_BELKIN=y diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig index 908c941fc24..9693f6ed3da 100644 --- a/arch/powerpc/configs/mpc85xx_smp_defconfig +++ b/arch/powerpc/configs/mpc85xx_smp_defconfig @@ -140,6 +140,7 @@ CONFIG_SND=y CONFIG_SND_INTEL8X0=y # CONFIG_SND_PPC is not set # CONFIG_SND_USB is not set +CONFIG_SND_SOC=y CONFIG_HID_A4TECH=y CONFIG_HID_APPLE=y CONFIG_HID_BELKIN=y diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86 index d31ecf346b4..21bebe63df6 100644 --- a/arch/um/Kconfig.x86 +++ b/arch/um/Kconfig.x86 @@ -10,6 +10,10 @@ config CMPXCHG_LOCAL bool default n +config CMPXCHG_DOUBLE + bool + default n + source "arch/x86/Kconfig.cpu" endmenu diff --git a/arch/um/Makefile b/arch/um/Makefile index fab8121d2b3..c0f712cc7c5 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -41,7 +41,7 @@ KBUILD_CPPFLAGS += -I$(srctree)/$(ARCH_DIR)/sys-$(SUBARCH) KBUILD_CFLAGS += $(CFLAGS) $(CFLAGS-y) -D__arch_um__ -DSUBARCH=\"$(SUBARCH)\" \ $(ARCH_INCLUDE) $(MODE_INCLUDE) -Dvmap=kernel_vmap \ -Din6addr_loopback=kernel_in6addr_loopback \ - -Din6addr_any=kernel_in6addr_any + -Din6addr_any=kernel_in6addr_any -Dstrrchr=kernel_strrchr KBUILD_AFLAGS += $(ARCH_INCLUDE) diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index d51c404239a..364c8a15c4c 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -399,8 +399,8 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data) * is done under a spinlock. Checking whether the device is in use is * line->tty->count > 1, also under the spinlock. * - * tty->count serves to decide whether the device should be enabled or - * disabled on the host. If it's equal to 1, then we are doing the + * line->count serves to decide whether the device should be enabled or + * disabled on the host. If it's equal to 0, then we are doing the * first open or last close. Otherwise, open and close just return. */ @@ -414,16 +414,16 @@ int line_open(struct line *lines, struct tty_struct *tty) goto out_unlock; err = 0; - if (tty->count > 1) + if (line->count++) goto out_unlock; - spin_unlock(&line->count_lock); - + BUG_ON(tty->driver_data); tty->driver_data = line; line->tty = tty; + spin_unlock(&line->count_lock); err = enable_chan(line); - if (err) + if (err) /* line_close() will be called by our caller */ return err; INIT_DELAYED_WORK(&line->task, line_timer_cb); @@ -436,7 +436,7 @@ int line_open(struct line *lines, struct tty_struct *tty) chan_window_size(&line->chan_list, &tty->winsize.ws_row, &tty->winsize.ws_col); - return err; + return 0; out_unlock: spin_unlock(&line->count_lock); @@ -460,17 +460,16 @@ void line_close(struct tty_struct *tty, struct file * filp) flush_buffer(line); spin_lock(&line->count_lock); - if (!line->valid) - goto out_unlock; + BUG_ON(!line->valid); - if (tty->count > 1) + if (--line->count) goto out_unlock; - spin_unlock(&line->count_lock); - line->tty = NULL; tty->driver_data = NULL; + spin_unlock(&line->count_lock); + if (line->sigio) { unregister_winch(tty); line->sigio = 0; @@ -498,7 +497,7 @@ static int setup_one_line(struct line *lines, int n, char *init, int init_prio, spin_lock(&line->count_lock); - if (line->tty != NULL) { + if (line->count) { *error_out = "Device is already open"; goto out; } @@ -722,41 +721,53 @@ struct winch { int pid; struct tty_struct *tty; unsigned long stack; + struct work_struct work; }; -static void free_winch(struct winch *winch, int free_irq_ok) +static void __free_winch(struct work_struct *work) { - if (free_irq_ok) - free_irq(WINCH_IRQ, winch); - - list_del(&winch->list); + struct winch *winch = container_of(work, struct winch, work); + free_irq(WINCH_IRQ, winch); if (winch->pid != -1) os_kill_process(winch->pid, 1); - if (winch->fd != -1) - os_close_file(winch->fd); if (winch->stack != 0) free_stack(winch->stack, 0); kfree(winch); } +static void free_winch(struct winch *winch) +{ + int fd = winch->fd; + winch->fd = -1; + if (fd != -1) + os_close_file(fd); + list_del(&winch->list); + __free_winch(&winch->work); +} + static irqreturn_t winch_interrupt(int irq, void *data) { struct winch *winch = data; struct tty_struct *tty; struct line *line; + int fd = winch->fd; int err; char c; - if (winch->fd != -1) { - err = generic_read(winch->fd, &c, NULL); + if (fd != -1) { + err = generic_read(fd, &c, NULL); if (err < 0) { if (err != -EAGAIN) { + winch->fd = -1; + list_del(&winch->list); + os_close_file(fd); printk(KERN_ERR "winch_interrupt : " "read failed, errno = %d\n", -err); printk(KERN_ERR "fd %d is losing SIGWINCH " "support\n", winch->tty_fd); - free_winch(winch, 0); + INIT_WORK(&winch->work, __free_winch); + schedule_work(&winch->work); return IRQ_HANDLED; } goto out; @@ -828,7 +839,7 @@ static void unregister_winch(struct tty_struct *tty) list_for_each_safe(ele, next, &winch_handlers) { winch = list_entry(ele, struct winch, list); if (winch->tty == tty) { - free_winch(winch, 1); + free_winch(winch); break; } } @@ -844,7 +855,7 @@ static void winch_cleanup(void) list_for_each_safe(ele, next, &winch_handlers) { winch = list_entry(ele, struct winch, list); - free_winch(winch, 1); + free_winch(winch); } spin_unlock(&winch_handler_lock); diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c index 8ac7146c237..2e1de572860 100644 --- a/arch/um/drivers/xterm.c +++ b/arch/um/drivers/xterm.c @@ -123,6 +123,7 @@ static int xterm_open(int input, int output, int primary, void *d, err = -errno; printk(UM_KERN_ERR "xterm_open : unlink failed, errno = %d\n", errno); + close(fd); return err; } close(fd); diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h index ae084ad1a3a..1a7d2757fe0 100644 --- a/arch/um/include/asm/ptrace-generic.h +++ b/arch/um/include/asm/ptrace-generic.h @@ -42,10 +42,6 @@ extern long subarch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data); extern unsigned long getreg(struct task_struct *child, int regno); extern int putreg(struct task_struct *child, int regno, unsigned long value); -extern int get_fpregs(struct user_i387_struct __user *buf, - struct task_struct *child); -extern int set_fpregs(struct user_i387_struct __user *buf, - struct task_struct *child); extern int arch_copy_tls(struct task_struct *new); extern void clear_flushed_tls(struct task_struct *task); diff --git a/arch/um/include/shared/line.h b/arch/um/include/shared/line.h index 72f4f25af24..63df3ca02ac 100644 --- a/arch/um/include/shared/line.h +++ b/arch/um/include/shared/line.h @@ -33,6 +33,7 @@ struct line_driver { struct line { struct tty_struct *tty; spinlock_t count_lock; + unsigned long count; int valid; char *init_str; diff --git a/arch/um/include/shared/registers.h b/arch/um/include/shared/registers.h index b0b4589e0eb..f1e0aa56c52 100644 --- a/arch/um/include/shared/registers.h +++ b/arch/um/include/shared/registers.h @@ -16,7 +16,7 @@ extern int restore_fpx_registers(int pid, unsigned long *fp_regs); extern int save_registers(int pid, struct uml_pt_regs *regs); extern int restore_registers(int pid, struct uml_pt_regs *regs); extern int init_registers(int pid); -extern void get_safe_registers(unsigned long *regs); +extern void get_safe_registers(unsigned long *regs, unsigned long *fp_regs); extern unsigned long get_thread_reg(int reg, jmp_buf *buf); extern int get_fp_registers(int pid, unsigned long *regs); extern int put_fp_registers(int pid, unsigned long *regs); diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index fab4371184f..21c1ae7c3d7 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -202,7 +202,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, arch_copy_thread(¤t->thread.arch, &p->thread.arch); } else { - get_safe_registers(p->thread.regs.regs.gp); + get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); p->thread.request.u.thread = current->thread.request.u.thread; handler = new_thread_handler; } diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c index 701b672c112..c9da32b0c70 100644 --- a/arch/um/kernel/ptrace.c +++ b/arch/um/kernel/ptrace.c @@ -50,23 +50,11 @@ long arch_ptrace(struct task_struct *child, long request, void __user *vp = p; switch (request) { - /* read word at location addr. */ - case PTRACE_PEEKTEXT: - case PTRACE_PEEKDATA: - ret = generic_ptrace_peekdata(child, addr, data); - break; - /* read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: ret = peek_user(child, addr, data); break; - /* write the word at location addr. */ - case PTRACE_POKETEXT: - case PTRACE_POKEDATA: - ret = generic_ptrace_pokedata(child, addr, data); - break; - /* write the word at location addr in the USER area */ case PTRACE_POKEUSR: ret = poke_user(child, addr, data); @@ -107,16 +95,6 @@ long arch_ptrace(struct task_struct *child, long request, break; } #endif -#ifdef PTRACE_GETFPREGS - case PTRACE_GETFPREGS: /* Get the child FPU state. */ - ret = get_fpregs(vp, child); - break; -#endif -#ifdef PTRACE_SETFPREGS - case PTRACE_SETFPREGS: /* Set the child FPU state. */ - ret = set_fpregs(vp, child); - break; -#endif case PTRACE_GET_THREAD_AREA: ret = ptrace_get_thread_area(child, addr, vp); break; @@ -154,12 +132,6 @@ long arch_ptrace(struct task_struct *child, long request, break; } #endif -#ifdef PTRACE_ARCH_PRCTL - case PTRACE_ARCH_PRCTL: - /* XXX Calls ptrace on the host - needs some SMP thinking */ - ret = arch_prctl(child, data, (void __user *) addr); - break; -#endif default: ret = ptrace_request(child, request, addr, data); if (ret == -EIO) diff --git a/arch/um/os-Linux/registers.c b/arch/um/os-Linux/registers.c index 830fe6a1518..b866b9e3bef 100644 --- a/arch/um/os-Linux/registers.c +++ b/arch/um/os-Linux/registers.c @@ -8,6 +8,8 @@ #include <string.h> #include <sys/ptrace.h> #include "sysdep/ptrace.h" +#include "sysdep/ptrace_user.h" +#include "registers.h" int save_registers(int pid, struct uml_pt_regs *regs) { @@ -32,6 +34,7 @@ int restore_registers(int pid, struct uml_pt_regs *regs) /* This is set once at boot time and not changed thereafter */ static unsigned long exec_regs[MAX_REG_NR]; +static unsigned long exec_fp_regs[FP_SIZE]; int init_registers(int pid) { @@ -42,10 +45,14 @@ int init_registers(int pid) return -errno; arch_init_registers(pid); + get_fp_registers(pid, exec_fp_regs); return 0; } -void get_safe_registers(unsigned long *regs) +void get_safe_registers(unsigned long *regs, unsigned long *fp_regs) { memcpy(regs, exec_regs, sizeof(exec_regs)); + + if (fp_regs) + memcpy(fp_regs, exec_fp_regs, sizeof(exec_fp_regs)); } diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c index d261f170d12..e771398be5f 100644 --- a/arch/um/os-Linux/skas/mem.c +++ b/arch/um/os-Linux/skas/mem.c @@ -39,7 +39,7 @@ static unsigned long syscall_regs[MAX_REG_NR]; static int __init init_syscall_regs(void) { - get_safe_registers(syscall_regs); + get_safe_registers(syscall_regs, NULL); syscall_regs[REGS_IP_INDEX] = STUB_CODE + ((unsigned long) &batch_syscall_stub - (unsigned long) &__syscall_stub_start); diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index d6e0a2234b8..dee0e8cf8ad 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -373,6 +373,9 @@ void userspace(struct uml_pt_regs *regs) if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp)) fatal_sigsegv(); + if (put_fp_registers(pid, regs->fp)) + fatal_sigsegv(); + /* Now we set local_using_sysemu to be used for one loop */ local_using_sysemu = get_using_sysemu(); @@ -399,6 +402,12 @@ void userspace(struct uml_pt_regs *regs) fatal_sigsegv(); } + if (get_fp_registers(pid, regs->fp)) { + printk(UM_KERN_ERR "userspace - get_fp_registers failed, " + "errno = %d\n", errno); + fatal_sigsegv(); + } + UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */ if (WIFSTOPPED(status)) { @@ -457,10 +466,11 @@ void userspace(struct uml_pt_regs *regs) } static unsigned long thread_regs[MAX_REG_NR]; +static unsigned long thread_fp_regs[FP_SIZE]; static int __init init_thread_regs(void) { - get_safe_registers(thread_regs); + get_safe_registers(thread_regs, thread_fp_regs); /* Set parent's instruction pointer to start of clone-stub */ thread_regs[REGS_IP_INDEX] = STUB_CODE + (unsigned long) stub_clone_handler - @@ -503,6 +513,13 @@ int copy_context_skas0(unsigned long new_stack, int pid) return err; } + err = put_fp_registers(pid, thread_fp_regs); + if (err < 0) { + printk(UM_KERN_ERR "copy_context_skas0 : put_fp_registers " + "failed, pid = %d, err = %d\n", pid, err); + return err; + } + /* set a well known return code for detection of child write failure */ child_data->err = 12345678; diff --git a/arch/um/sys-i386/asm/ptrace.h b/arch/um/sys-i386/asm/ptrace.h index 0273e4d09af..5d2a5911253 100644 --- a/arch/um/sys-i386/asm/ptrace.h +++ b/arch/um/sys-i386/asm/ptrace.h @@ -42,11 +42,6 @@ */ struct user_desc; -extern int get_fpxregs(struct user_fxsr_struct __user *buf, - struct task_struct *child); -extern int set_fpxregs(struct user_fxsr_struct __user *buf, - struct task_struct *tsk); - extern int ptrace_get_thread_area(struct task_struct *child, int idx, struct user_desc __user *user_desc); diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c index d23b2d3ea38..3375c271785 100644 --- a/arch/um/sys-i386/ptrace.c +++ b/arch/um/sys-i386/ptrace.c @@ -145,7 +145,7 @@ int peek_user(struct task_struct *child, long addr, long data) return put_user(tmp, (unsigned long __user *) data); } -int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) +static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) { int err, n, cpu = ((struct thread_info *) child->stack)->cpu; struct user_i387_struct fpregs; @@ -161,7 +161,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) return n; } -int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) +static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) { int n, cpu = ((struct thread_info *) child->stack)->cpu; struct user_i387_struct fpregs; @@ -174,7 +174,7 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) (unsigned long *) &fpregs); } -int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) +static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) { int err, n, cpu = ((struct thread_info *) child->stack)->cpu; struct user_fxsr_struct fpregs; @@ -190,7 +190,7 @@ int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) return n; } -int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) +static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) { int n, cpu = ((struct thread_info *) child->stack)->cpu; struct user_fxsr_struct fpregs; @@ -206,5 +206,23 @@ int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) long subarch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { - return -EIO; + int ret = -EIO; + void __user *datap = (void __user *) data; + switch (request) { + case PTRACE_GETFPREGS: /* Get the child FPU state. */ + ret = get_fpregs(datap, child); + break; + case PTRACE_SETFPREGS: /* Set the child FPU state. */ + ret = set_fpregs(datap, child); + break; + case PTRACE_GETFPXREGS: /* Get the child FPU state. */ + ret = get_fpxregs(datap, child); + break; + case PTRACE_SETFPXREGS: /* Set the child FPU state. */ + ret = set_fpxregs(datap, child); + break; + default: + ret = -EIO; + } + return ret; } diff --git a/arch/um/sys-i386/shared/sysdep/ptrace.h b/arch/um/sys-i386/shared/sysdep/ptrace.h index d50e62e0707..c398a507611 100644 --- a/arch/um/sys-i386/shared/sysdep/ptrace.h +++ b/arch/um/sys-i386/shared/sysdep/ptrace.h @@ -53,6 +53,7 @@ extern int sysemu_supported; struct uml_pt_regs { unsigned long gp[MAX_REG_NR]; + unsigned long fp[HOST_FPX_SIZE]; struct faultinfo faultinfo; long syscall; int is_user; diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c index f43613643cd..4005506834f 100644 --- a/arch/um/sys-x86_64/ptrace.c +++ b/arch/um/sys-x86_64/ptrace.c @@ -145,7 +145,7 @@ int is_syscall(unsigned long addr) return instr == 0x050f; } -int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) +static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) { int err, n, cpu = ((struct thread_info *) child->stack)->cpu; long fpregs[HOST_FP_SIZE]; @@ -162,7 +162,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) return n; } -int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) +static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) { int n, cpu = ((struct thread_info *) child->stack)->cpu; long fpregs[HOST_FP_SIZE]; @@ -182,12 +182,16 @@ long subarch_ptrace(struct task_struct *child, long request, void __user *datap = (void __user *) data; switch (request) { - case PTRACE_GETFPXREGS: /* Get the child FPU state. */ + case PTRACE_GETFPREGS: /* Get the child FPU state. */ ret = get_fpregs(datap, child); break; - case PTRACE_SETFPXREGS: /* Set the child FPU state. */ + case PTRACE_SETFPREGS: /* Set the child FPU state. */ ret = set_fpregs(datap, child); break; + case PTRACE_ARCH_PRCTL: + /* XXX Calls ptrace on the host - needs some SMP thinking */ + ret = arch_prctl(child, data, (void __user *) addr); + break; } return ret; diff --git a/arch/um/sys-x86_64/shared/sysdep/ptrace.h b/arch/um/sys-x86_64/shared/sysdep/ptrace.h index fdba5457947..8ee8f8e12af 100644 --- a/arch/um/sys-x86_64/shared/sysdep/ptrace.h +++ b/arch/um/sys-x86_64/shared/sysdep/ptrace.h @@ -85,6 +85,7 @@ struct uml_pt_regs { unsigned long gp[MAX_REG_NR]; + unsigned long fp[HOST_FP_SIZE]; struct faultinfo faultinfo; long syscall; int is_user; diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index 4554cc6fb96..091508b533b 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h @@ -16,7 +16,6 @@ #endif .macro altinstruction_entry orig alt feature orig_len alt_len - .align 8 .long \orig - . .long \alt - . .word \feature diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 23fb6d79f20..37ad100a221 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -48,9 +48,6 @@ struct alt_instr { u16 cpuid; /* cpuid bit set for replacement */ u8 instrlen; /* length of original instruction */ u8 replacementlen; /* length of new instruction, <= instrlen */ -#ifdef CONFIG_X86_64 - u32 pad2; -#endif }; extern void alternative_instructions(void); @@ -83,7 +80,6 @@ static inline int alternatives_text_reserved(void *start, void *end) \ "661:\n\t" oldinstr "\n662:\n" \ ".section .altinstructions,\"a\"\n" \ - _ASM_ALIGN "\n" \ " .long 661b - .\n" /* label */ \ " .long 663f - .\n" /* new instruction */ \ " .word " __stringify(feature) "\n" /* feature bit */ \ diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 4258aac99a6..88b23a43f34 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -332,7 +332,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) asm goto("1: jmp %l[t_no]\n" "2:\n" ".section .altinstructions,\"a\"\n" - _ASM_ALIGN "\n" " .long 1b - .\n" " .long 0\n" /* no replacement */ " .word %P0\n" /* feature bit */ @@ -350,7 +349,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) asm volatile("1: movb $0,%0\n" "2:\n" ".section .altinstructions,\"a\"\n" - _ASM_ALIGN "\n" " .long 1b - .\n" " .long 3f - .\n" " .word %P1\n" /* feature bit */ diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index a518c0a4504..c59cc97fe6c 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -44,7 +44,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); #elif defined(__x86_64__) __asm__ ( - "mul %[mul_frac] ; shrd $32, %[hi], %[lo]" + "mulq %[mul_frac] ; shrd $32, %[hi], %[lo]" : [lo]"=a"(product), [hi]"=d"(tmp) : "0"(delta), diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 4ee3abf20ed..cfa62ec090e 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1900,6 +1900,9 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_store(entry, regs->ip); + if (!current->mm) + return; + if (perf_callchain_user32(regs, entry)) return; diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index c95330267f0..039d91315bc 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -365,8 +365,13 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root) */ if (bus) { struct pci_bus *child; - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child, child->self->pcie_mpss); + list_for_each_entry(child, &bus->children, node) { + struct pci_dev *self = child->self; + if (!self) + continue; + + pcie_bus_configure_settings(child, self->pcie_mpss); + } } if (!bus) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 20a61427506..3dd53f997b1 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1721,10 +1721,8 @@ void __init xen_setup_machphys_mapping(void) machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; } #ifdef CONFIG_X86_32 - if ((machine_to_phys_mapping + machine_to_phys_nr) - < machine_to_phys_mapping) - machine_to_phys_nr = (unsigned long *)NULL - - machine_to_phys_mapping; + WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1)) + < machine_to_phys_mapping); #endif } diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index df118a825f3..46d6d21dbdb 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -184,6 +184,19 @@ static unsigned long __init xen_set_identity(const struct e820entry *list, PFN_UP(start_pci), PFN_DOWN(last)); return identity; } + +static unsigned long __init xen_get_max_pages(void) +{ + unsigned long max_pages = MAX_DOMAIN_PAGES; + domid_t domid = DOMID_SELF; + int ret; + + ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid); + if (ret > 0) + max_pages = ret; + return min(max_pages, MAX_DOMAIN_PAGES); +} + /** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ @@ -292,6 +305,14 @@ char * __init xen_memory_setup(void) sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); + extra_limit = xen_get_max_pages(); + if (max_pfn + extra_pages > extra_limit) { + if (extra_limit > max_pfn) + extra_pages = extra_limit - max_pfn; + else + extra_pages = 0; + } + extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); /* diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index e79dbb95482..041d4fe9dfe 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -32,6 +32,7 @@ #include <xen/page.h> #include <xen/events.h> +#include <xen/hvc-console.h> #include "xen-ops.h" #include "mmu.h" @@ -207,6 +208,15 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) unsigned cpu; unsigned int i; + if (skip_ioapic_setup) { + char *m = (max_cpus == 0) ? + "The nosmp parameter is incompatible with Xen; " \ + "use Xen dom0_max_vcpus=1 parameter" : + "The noapic parameter is incompatible with Xen"; + + xen_raw_printk(m); + panic(m); + } xen_init_lock_cpu(0); smp_store_cpu_info(0); @@ -522,7 +532,6 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) WARN_ON(xen_smp_intr_init(0)); xen_init_lock_cpu(0); - xen_init_spinlocks(); } static int __cpuinit xen_hvm_cpu_up(unsigned int cpu) diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 5158c505bef..163b4679556 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -168,9 +168,10 @@ cycle_t xen_clocksource_read(void) struct pvclock_vcpu_time_info *src; cycle_t ret; - src = &get_cpu_var(xen_vcpu)->time; + preempt_disable_notrace(); + src = &__get_cpu_var(xen_vcpu)->time; ret = pvclock_clocksource_read(src); - put_cpu_var(xen_vcpu); + preempt_enable_notrace(); return ret; } diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index 22a2093b586..b040b0e518c 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S @@ -113,11 +113,13 @@ xen_iret_start_crit: /* * If there's something pending, mask events again so we can - * jump back into xen_hypervisor_callback + * jump back into xen_hypervisor_callback. Otherwise do not + * touch XEN_vcpu_info_mask. */ - sete XEN_vcpu_info_mask(%eax) + jne 1f + movb $1, XEN_vcpu_info_mask(%eax) - popl %eax +1: popl %eax /* * From this point on the registers are restored and the stack diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h index bc533dde16c..f895a244ca7 100644 --- a/drivers/acpi/acpica/acconfig.h +++ b/drivers/acpi/acpica/acconfig.h @@ -121,7 +121,7 @@ /* Maximum sleep allowed via Sleep() operator */ -#define ACPI_MAX_SLEEP 20000 /* Two seconds */ +#define ACPI_MAX_SLEEP 2000 /* Two seconds */ /****************************************************************************** * diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index c34aa51af4e..e3f47872ec2 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig @@ -13,6 +13,7 @@ config ACPI_APEI_GHES bool "APEI Generic Hardware Error Source" depends on ACPI_APEI && X86 select ACPI_HED + select IRQ_WORK select LLIST select GENERIC_ALLOCATOR help diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 8041248fce9..61540360d5c 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -618,7 +618,7 @@ int apei_osc_setup(void) }; capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; - capbuf[OSC_SUPPORT_TYPE] = 0; + capbuf[OSC_SUPPORT_TYPE] = 1; capbuf[OSC_CONTROL_TYPE] = 0; if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 0eef4da1ac6..20663f8dae4 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -168,13 +168,11 @@ struct regmap *regmap_init(struct device *dev, map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL); if (map->work_buf == NULL) { ret = -ENOMEM; - goto err_bus; + goto err_map; } return map; -err_bus: - module_put(map->bus->owner); err_map: kfree(map); err: @@ -188,7 +186,6 @@ EXPORT_SYMBOL_GPL(regmap_init); void regmap_exit(struct regmap *map) { kfree(map->work_buf); - module_put(map->bus->owner); kfree(map); } EXPORT_SYMBOL_GPL(regmap_exit); diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 7b0603eb012..cdc02ac8f41 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -261,6 +261,9 @@ static int pcc_get_offset(int cpu) pr = per_cpu(processors, cpu); pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); + if (!pr) + return -ENODEV; + status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer); if (ACPI_FAILURE(status)) return -ENODEV; diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index cd3a7c726bf..467e4dcb20a 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -174,8 +174,10 @@ struct d40_base; * @tasklet: Tasklet that gets scheduled from interrupt context to complete a * transfer and call client callback. * @client: Cliented owned descriptor list. + * @pending_queue: Submitted jobs, to be issued by issue_pending() * @active: Active descriptor. * @queue: Queued jobs. + * @prepare_queue: Prepared jobs. * @dma_cfg: The client configuration of this dma channel. * @configured: whether the dma_cfg configuration is valid * @base: Pointer to the device instance struct. @@ -203,6 +205,7 @@ struct d40_chan { struct list_head pending_queue; struct list_head active; struct list_head queue; + struct list_head prepare_queue; struct stedma40_chan_cfg dma_cfg; bool configured; struct d40_base *base; @@ -477,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) list_for_each_entry_safe(d, _d, &d40c->client, node) if (async_tx_test_ack(&d->txd)) { - d40_pool_lli_free(d40c, d); d40_desc_remove(d); desc = d; memset(desc, 0, sizeof(*desc)); @@ -644,8 +646,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) return d; } +/* remove desc from current queue and add it to the pending_queue */ static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) { + d40_desc_remove(desc); + desc->is_in_client_list = false; list_add_tail(&desc->node, &d40c->pending_queue); } @@ -803,6 +808,7 @@ done: static void d40_term_all(struct d40_chan *d40c) { struct d40_desc *d40d; + struct d40_desc *_d; /* Release active descriptors */ while ((d40d = d40_first_active_get(d40c))) { @@ -822,6 +828,21 @@ static void d40_term_all(struct d40_chan *d40c) d40_desc_free(d40c, d40d); } + /* Release client owned descriptors */ + if (!list_empty(&d40c->client)) + list_for_each_entry_safe(d40d, _d, &d40c->client, node) { + d40_desc_remove(d40d); + d40_desc_free(d40c, d40d); + } + + /* Release descriptors in prepare queue */ + if (!list_empty(&d40c->prepare_queue)) + list_for_each_entry_safe(d40d, _d, + &d40c->prepare_queue, node) { + d40_desc_remove(d40d); + d40_desc_free(d40c, d40d); + } + d40c->pending_tx = 0; d40c->busy = false; } @@ -1208,7 +1229,6 @@ static void dma_tasklet(unsigned long data) if (!d40d->cyclic) { if (async_tx_test_ack(&d40d->txd)) { - d40_pool_lli_free(d40c, d40d); d40_desc_remove(d40d); d40_desc_free(d40c, d40d); } else { @@ -1595,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c) u32 event; struct d40_phy_res *phy = d40c->phy_chan; bool is_src; - struct d40_desc *d; - struct d40_desc *_d; - /* Terminate all queued and active transfers */ d40_term_all(d40c); - /* Release client owned descriptors */ - if (!list_empty(&d40c->client)) - list_for_each_entry_safe(d, _d, &d40c->client, node) { - d40_pool_lli_free(d40c, d); - d40_desc_remove(d); - d40_desc_free(d40c, d); - } - if (phy == NULL) { chan_err(d40c, "phy == null\n"); return -EINVAL; @@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, goto err; } + /* + * add descriptor to the prepare queue in order to be able + * to free them later in terminate_all + */ + list_add_tail(&desc->node, &chan->prepare_queue); + spin_unlock_irqrestore(&chan->lock, flags); return &desc->txd; @@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, INIT_LIST_HEAD(&d40c->queue); INIT_LIST_HEAD(&d40c->pending_queue); INIT_LIST_HEAD(&d40c->client); + INIT_LIST_HEAD(&d40c->prepare_queue); tasklet_init(&d40c->tasklet, dma_tasklet, (unsigned long) d40c); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 57cd3a406ed..fd7170a9ad2 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -290,6 +290,9 @@ static const struct { {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, + {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID, + QUIRK_NO_MSI}, + {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c index 231714def4d..4e24436b0f8 100644 --- a/drivers/gpio/gpio-generic.c +++ b/drivers/gpio/gpio-generic.c @@ -351,7 +351,7 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc, return 0; } -int __devexit bgpio_remove(struct bgpio_chip *bgc) +int bgpio_remove(struct bgpio_chip *bgc) { int err = gpiochip_remove(&bgc->gc); @@ -361,15 +361,10 @@ int __devexit bgpio_remove(struct bgpio_chip *bgc) } EXPORT_SYMBOL_GPL(bgpio_remove); -int __devinit bgpio_init(struct bgpio_chip *bgc, - struct device *dev, - unsigned long sz, - void __iomem *dat, - void __iomem *set, - void __iomem *clr, - void __iomem *dirout, - void __iomem *dirin, - bool big_endian) +int bgpio_init(struct bgpio_chip *bgc, struct device *dev, + unsigned long sz, void __iomem *dat, void __iomem *set, + void __iomem *clr, void __iomem *dirout, void __iomem *dirin, + bool big_endian) { int ret; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 802b61ac313..f7c6854eb4d 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -256,7 +256,6 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, { printk(KERN_ERR "panic occurred, switching back to text console\n"); return drm_fb_helper_force_kernel_mode(); - return 0; } EXPORT_SYMBOL(drm_fb_helper_panic); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 8d02d875376..c919cfc8f2f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -530,7 +530,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) nouveau_gpuobj_ref(NULL, &obj); if (ret) return ret; - } else { + } else + if (USE_SEMA(dev)) { /* map fence bo into channel's vm */ ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm, &chan->fence.vma); diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index c444cadbf84..2706cb3d871 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -37,8 +37,11 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, return -ENOMEM; nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); - if (!nvbe->ttm_alloced) + if (!nvbe->ttm_alloced) { + kfree(nvbe->pages); + nvbe->pages = NULL; return -ENOMEM; + } nvbe->nr_pages = 0; while (num_pages--) { @@ -126,7 +129,7 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); - dma_offset += NV_CTXDMA_PAGE_SIZE; + offset_l += NV_CTXDMA_PAGE_SIZE; } } diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index 118261d4927..5e45398a9e2 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -781,11 +781,20 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; - struct drm_framebuffer *drm_fb = nv_crtc->base.fb; - struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); + struct drm_framebuffer *drm_fb; + struct nouveau_framebuffer *fb; int arb_burst, arb_lwm; int ret; + NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); + + /* no fb bound */ + if (!atomic && !crtc->fb) { + NV_DEBUG_KMS(dev, "No FB bound\n"); + return 0; + } + + /* If atomic, we want to switch to the fb we were passed, so * now we update pointers to do that. (We don't pin; just * assume we're already pinned and update the base address.) @@ -794,6 +803,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, drm_fb = passed_fb; fb = nouveau_framebuffer(passed_fb); } else { + drm_fb = crtc->fb; + fb = nouveau_framebuffer(crtc->fb); /* If not atomic, we can go ahead and pin, and unpin the * old fb we were passed. */ diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 46ad59ea218..5d989073ba6 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -519,12 +519,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, struct drm_device *dev = nv_crtc->base.dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *evo = nv50_display(dev)->master; - struct drm_framebuffer *drm_fb = nv_crtc->base.fb; - struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); + struct drm_framebuffer *drm_fb; + struct nouveau_framebuffer *fb; int ret; NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); + /* no fb bound */ + if (!atomic && !crtc->fb) { + NV_DEBUG_KMS(dev, "No FB bound\n"); + return 0; + } + /* If atomic, we want to switch to the fb we were passed, so * now we update pointers to do that. (We don't pin; just * assume we're already pinned and update the base address.) @@ -533,6 +539,8 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, drm_fb = passed_fb; fb = nouveau_framebuffer(passed_fb); } else { + drm_fb = crtc->fb; + fb = nouveau_framebuffer(crtc->fb); /* If not atomic, we can go ahead and pin, and unpin the * old fb we were passed. */ diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index d8d71a399f5..e8a746712b5 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -41,6 +41,31 @@ static void evergreen_gpu_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); static void evergreen_pcie_gen2_enable(struct radeon_device *rdev); +void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) +{ + u16 ctl, v; + int cap, err; + + cap = pci_pcie_cap(rdev->pdev); + if (!cap) + return; + + err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl); + if (err) + return; + + v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12; + + /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it + * to avoid hangs or perfomance issues + */ + if ((v == 0) || (v == 6) || (v == 7)) { + ctl &= ~PCI_EXP_DEVCTL_READRQ; + ctl |= (2 << 12); + pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl); + } +} + void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) { /* enable the pflip int */ @@ -1379,7 +1404,8 @@ int evergreen_cp_resume(struct radeon_device *rdev) /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB_RPTR_WR, 0); - WREG32(CP_RB_WPTR, 0); + rdev->cp.wptr = 0; + WREG32(CP_RB_WPTR, rdev->cp.wptr); /* set the wb address wether it's enabled or not */ WREG32(CP_RB_RPTR_ADDR, @@ -1401,7 +1427,6 @@ int evergreen_cp_resume(struct radeon_device *rdev) WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); rdev->cp.rptr = RREG32(CP_RB_RPTR); - rdev->cp.wptr = RREG32(CP_RB_WPTR); evergreen_cp_start(rdev); rdev->cp.ready = true; @@ -1863,6 +1888,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); + evergreen_fix_pci_max_read_req_size(rdev); + cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; cc_gc_shader_pipe_config |= @@ -3144,21 +3171,23 @@ int evergreen_suspend(struct radeon_device *rdev) } int evergreen_copy_blit(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, struct radeon_fence *fence) + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence *fence) { int r; mutex_lock(&rdev->r600_blit.mutex); rdev->r600_blit.vb_ib = NULL; - r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); + r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE); if (r) { if (rdev->r600_blit.vb_ib) radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); mutex_unlock(&rdev->r600_blit.mutex); return r; } - evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); + evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE); evergreen_blit_done_copy(rdev, fence); mutex_unlock(&rdev->r600_blit.mutex); return 0; diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index a2e00fa9c61..99fbd793c08 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -39,6 +39,7 @@ extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev); extern void evergreen_mc_program(struct radeon_device *rdev); extern void evergreen_irq_suspend(struct radeon_device *rdev); extern int evergreen_mc_init(struct radeon_device *rdev); +extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); #define EVERGREEN_PFP_UCODE_SIZE 1120 #define EVERGREEN_PM4_UCODE_SIZE 1376 @@ -669,6 +670,8 @@ static void cayman_gpu_init(struct radeon_device *rdev) WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); + evergreen_fix_pci_max_read_req_size(rdev); + mc_shared_chmap = RREG32(MC_SHARED_CHMAP); mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); @@ -1184,7 +1187,8 @@ int cayman_cp_resume(struct radeon_device *rdev) /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); - WREG32(CP_RB0_WPTR, 0); + rdev->cp.wptr = 0; + WREG32(CP_RB0_WPTR, rdev->cp.wptr); /* set the wb address wether it's enabled or not */ WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); @@ -1204,7 +1208,6 @@ int cayman_cp_resume(struct radeon_device *rdev) WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); rdev->cp.rptr = RREG32(CP_RB0_RPTR); - rdev->cp.wptr = RREG32(CP_RB0_WPTR); /* ring1 - compute only */ /* Set ring buffer size */ @@ -1217,7 +1220,8 @@ int cayman_cp_resume(struct radeon_device *rdev) /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); - WREG32(CP_RB1_WPTR, 0); + rdev->cp1.wptr = 0; + WREG32(CP_RB1_WPTR, rdev->cp1.wptr); /* set the wb address wether it's enabled or not */ WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); @@ -1229,7 +1233,6 @@ int cayman_cp_resume(struct radeon_device *rdev) WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); rdev->cp1.rptr = RREG32(CP_RB1_RPTR); - rdev->cp1.wptr = RREG32(CP_RB1_WPTR); /* ring2 - compute only */ /* Set ring buffer size */ @@ -1242,7 +1245,8 @@ int cayman_cp_resume(struct radeon_device *rdev) /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); - WREG32(CP_RB2_WPTR, 0); + rdev->cp2.wptr = 0; + WREG32(CP_RB2_WPTR, rdev->cp2.wptr); /* set the wb address wether it's enabled or not */ WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); @@ -1254,7 +1258,6 @@ int cayman_cp_resume(struct radeon_device *rdev) WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); rdev->cp2.rptr = RREG32(CP_RB2_RPTR); - rdev->cp2.wptr = RREG32(CP_RB2_WPTR); /* start the rings */ cayman_cp_start(rdev); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index f2204cb1ccd..5b1837b4aac 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev, int r100_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, + unsigned num_gpu_pages, struct radeon_fence *fence) { uint32_t cur_pages; - uint32_t stride_bytes = PAGE_SIZE; + uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; uint32_t pitch; uint32_t stride_pixels; unsigned ndw; @@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev, /* radeon pitch is /64 */ pitch = stride_bytes / 64; stride_pixels = stride_bytes / 4; - num_loops = DIV_ROUND_UP(num_pages, 8191); + num_loops = DIV_ROUND_UP(num_gpu_pages, 8191); /* Ask for enough room for blit + flush + fence */ ndw = 64 + (10 * num_loops); @@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev, DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); return -EINVAL; } - while (num_pages > 0) { - cur_pages = num_pages; + while (num_gpu_pages > 0) { + cur_pages = num_gpu_pages; if (cur_pages > 8191) { cur_pages = 8191; } - num_pages -= cur_pages; + num_gpu_pages -= cur_pages; /* pages are in Y direction - height page width in X direction - width */ @@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev, radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); radeon_ring_write(rdev, 0); radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); - radeon_ring_write(rdev, num_pages); - radeon_ring_write(rdev, num_pages); + radeon_ring_write(rdev, cur_pages); + radeon_ring_write(rdev, cur_pages); radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); } radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); @@ -990,7 +990,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) /* Force read & write ptr to 0 */ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); WREG32(RADEON_CP_RB_RPTR_WR, 0); - WREG32(RADEON_CP_RB_WPTR, 0); + rdev->cp.wptr = 0; + WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); /* set the wb address whether it's enabled or not */ WREG32(R_00070C_CP_RB_RPTR_ADDR, @@ -1007,9 +1008,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) WREG32(RADEON_CP_RB_CNTL, tmp); udelay(10); rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); - rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); - /* protect against crazy HW on resume */ - rdev->cp.wptr &= rdev->cp.ptr_mask; /* Set cp mode to bus mastering & enable cp*/ WREG32(RADEON_CP_CSQ_MODE, REG_SET(RADEON_INDIRECT2_START, indirect2_start) | diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index f2405830041..a1f3ba063c2 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0) int r200_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, + unsigned num_gpu_pages, struct radeon_fence *fence) { uint32_t size; @@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev, int r = 0; /* radeon pitch is /64 */ - size = num_pages << PAGE_SHIFT; + size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT; num_loops = DIV_ROUND_UP(size, 0x1FFFFF); r = radeon_ring_lock(rdev, num_loops * 4 + 64); if (r) { diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index aa5571b73aa..720dd99163f 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2209,7 +2209,8 @@ int r600_cp_resume(struct radeon_device *rdev) /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB_RPTR_WR, 0); - WREG32(CP_RB_WPTR, 0); + rdev->cp.wptr = 0; + WREG32(CP_RB_WPTR, rdev->cp.wptr); /* set the wb address whether it's enabled or not */ WREG32(CP_RB_RPTR_ADDR, @@ -2231,7 +2232,6 @@ int r600_cp_resume(struct radeon_device *rdev) WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); rdev->cp.rptr = RREG32(CP_RB_RPTR); - rdev->cp.wptr = RREG32(CP_RB_WPTR); r600_cp_start(rdev); rdev->cp.ready = true; @@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev, } int r600_copy_blit(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, struct radeon_fence *fence) + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence *fence) { int r; mutex_lock(&rdev->r600_blit.mutex); rdev->r600_blit.vb_ib = NULL; - r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); + r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE); if (r) { if (rdev->r600_blit.vb_ib) radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); mutex_unlock(&rdev->r600_blit.mutex); return r; } - r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); + r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE); r600_blit_done_copy(rdev, fence); mutex_unlock(&rdev->r600_blit.mutex); return 0; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 32807baf55e..c1e056b35b2 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -322,6 +322,7 @@ union radeon_gart_table { #define RADEON_GPU_PAGE_SIZE 4096 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) +#define RADEON_GPU_PAGE_SHIFT 12 struct radeon_gart { dma_addr_t table_addr; @@ -914,17 +915,17 @@ struct radeon_asic { int (*copy_blit)(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, + unsigned num_gpu_pages, struct radeon_fence *fence); int (*copy_dma)(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, + unsigned num_gpu_pages, struct radeon_fence *fence); int (*copy)(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, + unsigned num_gpu_pages, struct radeon_fence *fence); uint32_t (*get_engine_clock)(struct radeon_device *rdev); void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 3d7a0d7c6a9..3dedaa07aac 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg); int r100_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, + unsigned num_gpu_pages, struct radeon_fence *fence); int r100_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t tiling_flags, uint32_t pitch, @@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); extern int r200_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, + unsigned num_gpu_pages, struct radeon_fence *fence); void r200_set_safe_registers(struct radeon_device *rdev); @@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int r600_ring_test(struct radeon_device *rdev); int r600_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, struct radeon_fence *fence); + unsigned num_gpu_pages, struct radeon_fence *fence); void r600_hpd_init(struct radeon_device *rdev); void r600_hpd_fini(struct radeon_device *rdev); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); @@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev); void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int evergreen_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, struct radeon_fence *fence); + unsigned num_gpu_pages, struct radeon_fence *fence); void evergreen_hpd_init(struct radeon_device *rdev); void evergreen_hpd_fini(struct radeon_device *rdev); bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index dcd0863e31a..b6e18c8db9f 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c @@ -219,6 +219,9 @@ void radeon_get_clock_info(struct drm_device *dev) } else { DRM_INFO("Using generic clock info\n"); + /* may need to be per card */ + rdev->clock.max_pixel_clock = 35000; + if (rdev->flags & RADEON_IS_IGP) { p1pll->reference_freq = 1432; p2pll->reference_freq = 1432; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 4f0c1ecac72..c4b8741dbf5 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1297,12 +1297,33 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (!radeon_dig_connector->edp_on) atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_OFF); - } else { - /* need to setup ddc on the bridge */ - if (radeon_connector_encoder_is_dp_bridge(connector)) { + } else if (radeon_connector_encoder_is_dp_bridge(connector)) { + /* DP bridges are always DP */ + radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; + /* get the DPCD from the bridge */ + radeon_dp_getdpcd(radeon_connector); + + if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) + ret = connector_status_connected; + else { + /* need to setup ddc on the bridge */ if (encoder) radeon_atom_ext_encoder_setup_ddc(encoder); + if (radeon_ddc_probe(radeon_connector, + radeon_connector->requires_extended_probe)) + ret = connector_status_connected; + } + + if ((ret == connector_status_disconnected) && + radeon_connector->dac_load_detect) { + struct drm_encoder *encoder = radeon_best_single_encoder(connector); + struct drm_encoder_helper_funcs *encoder_funcs; + if (encoder) { + encoder_funcs = encoder->helper_private; + ret = encoder_funcs->detect(encoder, connector); + } } + } else { radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { ret = connector_status_connected; @@ -1318,16 +1339,6 @@ radeon_dp_detect(struct drm_connector *connector, bool force) ret = connector_status_connected; } } - - if ((ret == connector_status_disconnected) && - radeon_connector->dac_load_detect) { - struct drm_encoder *encoder = radeon_best_single_encoder(connector); - struct drm_encoder_helper_funcs *encoder_funcs; - if (encoder) { - encoder_funcs = encoder->helper_private; - ret = encoder_funcs->detect(encoder, connector); - } - } } radeon_connector_update_scratch_regs(connector, ret); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 1a858944e4f..6adb3e58aff 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -473,8 +473,8 @@ pflip_cleanup: spin_lock_irqsave(&dev->event_lock, flags); radeon_crtc->unpin_work = NULL; unlock_free: - drm_gem_object_unreference_unlocked(old_radeon_fb->obj); spin_unlock_irqrestore(&dev->event_lock, flags); + drm_gem_object_unreference_unlocked(old_radeon_fb->obj); radeon_fence_unref(&work->fence); kfree(work); @@ -707,16 +707,21 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) radeon_router_select_ddc_port(radeon_connector); if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || - (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { + (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || + radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) { struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; + if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) - radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); - } - if (!radeon_connector->ddc_bus) - return -1; - if (!radeon_connector->edid) { - radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); + radeon_connector->edid = drm_get_edid(&radeon_connector->base, + &dig->dp_i2c_bus->adapter); + else if (radeon_connector->ddc_bus && !radeon_connector->edid) + radeon_connector->edid = drm_get_edid(&radeon_connector->base, + &radeon_connector->ddc_bus->adapter); + } else { + if (radeon_connector->ddc_bus && !radeon_connector->edid) + radeon_connector->edid = drm_get_edid(&radeon_connector->base, + &radeon_connector->ddc_bus->adapter); } if (!radeon_connector->edid) { diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 9b86fb0e412..0b5468bfaf5 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, DRM_ERROR("Trying to move memory with CP turned off.\n"); return -EINVAL; } - r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); + + BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); + + r = radeon_copy(rdev, old_start, new_start, + new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ + fence); /* FIXME: handle copy error */ r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, evict, no_wait_reserve, no_wait_gpu, new_mem); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a4d38d85909..ef06194c5aa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -394,7 +394,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { if (bo->ttm == NULL) { - ret = ttm_bo_add_ttm(bo, false); + bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); + ret = ttm_bo_add_ttm(bo, zero); if (ret) goto out_err; } diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 7d27d2b0445..7484e1b6724 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -277,6 +277,7 @@ #define USB_DEVICE_ID_PENPOWER 0x00f4 #define USB_VENDOR_ID_GREENASIA 0x0e8f +#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013 #define USB_VENDOR_ID_GRETAGMACBETH 0x0971 #define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005 diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 0ec91c18a42..f0fbd7bd239 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -81,6 +81,28 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie #define NO_TOUCHES -1 #define SINGLE_TOUCH_UP -2 +/* Touch surface information. Dimension is in hundredths of a mm, min and max + * are in units. */ +#define MOUSE_DIMENSION_X (float)9056 +#define MOUSE_MIN_X -1100 +#define MOUSE_MAX_X 1258 +#define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100)) +#define MOUSE_DIMENSION_Y (float)5152 +#define MOUSE_MIN_Y -1589 +#define MOUSE_MAX_Y 2047 +#define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100)) + +#define TRACKPAD_DIMENSION_X (float)13000 +#define TRACKPAD_MIN_X -2909 +#define TRACKPAD_MAX_X 3167 +#define TRACKPAD_RES_X \ + ((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100)) +#define TRACKPAD_DIMENSION_Y (float)11000 +#define TRACKPAD_MIN_Y -2456 +#define TRACKPAD_MAX_Y 2565 +#define TRACKPAD_RES_Y \ + ((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100)) + /** * struct magicmouse_sc - Tracks Magic Mouse-specific data. * @input: Input device through which we report events. @@ -406,17 +428,31 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h * inverse of the reported Y. */ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { - input_set_abs_params(input, ABS_MT_POSITION_X, -1100, - 1358, 4, 0); - input_set_abs_params(input, ABS_MT_POSITION_Y, -1589, - 2047, 4, 0); + input_set_abs_params(input, ABS_MT_POSITION_X, + MOUSE_MIN_X, MOUSE_MAX_X, 4, 0); + input_set_abs_params(input, ABS_MT_POSITION_Y, + MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0); + + input_abs_set_res(input, ABS_MT_POSITION_X, + MOUSE_RES_X); + input_abs_set_res(input, ABS_MT_POSITION_Y, + MOUSE_RES_Y); } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ - input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0); - input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0); - input_set_abs_params(input, ABS_MT_POSITION_X, -2909, - 3167, 4, 0); - input_set_abs_params(input, ABS_MT_POSITION_Y, -2456, - 2565, 4, 0); + input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X, + TRACKPAD_MAX_X, 4, 0); + input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y, + TRACKPAD_MAX_Y, 4, 0); + input_set_abs_params(input, ABS_MT_POSITION_X, + TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0); + input_set_abs_params(input, ABS_MT_POSITION_Y, + TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0); + + input_abs_set_res(input, ABS_X, TRACKPAD_RES_X); + input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y); + input_abs_set_res(input, ABS_MT_POSITION_X, + TRACKPAD_RES_X); + input_abs_set_res(input, ABS_MT_POSITION_Y, + TRACKPAD_RES_Y); } input_set_events_per_packet(input, 60); @@ -501,9 +537,17 @@ static int magicmouse_probe(struct hid_device *hdev, } report->size = 6; + /* + * Some devices repond with 'invalid report id' when feature + * report switching it into multitouch mode is sent to it. + * + * This results in -EIO from the _raw low-level transport callback, + * but there seems to be no other way of switching the mode. + * Thus the super-ugly hacky success check below. + */ ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature), HID_FEATURE_REPORT); - if (ret != sizeof(feature)) { + if (ret != -EIO && ret != sizeof(feature)) { hid_err(hdev, "unable to request touch data (%d)\n", ret); goto err_stop_hw; } diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c index 06888323828..72ca689b647 100644 --- a/drivers/hid/hid-wacom.c +++ b/drivers/hid/hid-wacom.c @@ -353,11 +353,7 @@ static int wacom_probe(struct hid_device *hdev, if (ret) { hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n", ret); - /* - * battery attribute is not critical for the tablet, but if it - * failed then there is no need to create ac attribute - */ - goto move_on; + goto err_battery; } wdata->ac.properties = wacom_ac_props; @@ -371,18 +367,14 @@ static int wacom_probe(struct hid_device *hdev, if (ret) { hid_warn(hdev, "can't create ac battery attribute, err: %d\n", ret); - /* - * ac attribute is not critical for the tablet, but if it - * failed then we don't want to battery attribute to exist - */ - power_supply_unregister(&wdata->battery); + goto err_ac; } - -move_on: #endif hidinput = list_entry(hdev->inputs.next, struct hid_input, list); input = hidinput->input; + __set_bit(INPUT_PROP_POINTER, input->propbit); + /* Basics */ input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL); @@ -416,6 +408,13 @@ move_on: return 0; +#ifdef CONFIG_HID_WACOM_POWER_SUPPLY +err_ac: + power_supply_unregister(&wdata->battery); +err_battery: + device_remove_file(&hdev->dev, &dev_attr_speed); + hid_hw_stop(hdev); +#endif err_free: kfree(wdata); return ret; @@ -426,6 +425,7 @@ static void wacom_remove(struct hid_device *hdev) #ifdef CONFIG_HID_WACOM_POWER_SUPPLY struct wacom_data *wdata = hid_get_drvdata(hdev); #endif + device_remove_file(&hdev->dev, &dev_attr_speed); hid_hw_stop(hdev); #ifdef CONFIG_HID_WACOM_POWER_SUPPLY diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 4bdb5d46c52..3146fdcda27 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -47,6 +47,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL }, { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT }, diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 59d83e83da7..41125767613 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -601,7 +601,12 @@ static int create_core_data(struct platform_data *pdata, err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx); if (!err) { tdata->attr_size += MAX_THRESH_ATTRS; - tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000; + tdata->tmin = tdata->tjmax - + ((eax & THERM_MASK_THRESHOLD0) >> + THERM_SHIFT_THRESHOLD0) * 1000; + tdata->ttarget = tdata->tjmax - + ((eax & THERM_MASK_THRESHOLD1) >> + THERM_SHIFT_THRESHOLD1) * 1000; } pdata->core_data[attr_no] = tdata; diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c index d94a24fdf4b..dd2d7b9620c 100644 --- a/drivers/hwmon/max16065.c +++ b/drivers/hwmon/max16065.c @@ -124,7 +124,7 @@ static inline int MV_TO_LIMIT(int mv, int range) static inline int ADC_TO_CURR(int adc, int gain) { - return adc * 1400000 / gain * 255; + return adc * 1400000 / (gain * 255); } /* diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index a561c3a0e91..397fc59b568 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -978,6 +978,8 @@ static void pmbus_find_max_attr(struct i2c_client *client, struct pmbus_limit_attr { u16 reg; /* Limit register */ bool update; /* True if register needs updates */ + bool low; /* True if low limit; for limits with compare + functions only */ const char *attr; /* Attribute name */ const char *alarm; /* Alarm attribute name */ u32 sbit; /* Alarm attribute status bit */ @@ -1029,7 +1031,8 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client, if (attr->compare) { pmbus_add_boolean_cmp(data, name, l->alarm, index, - cbase, cindex, + l->low ? cindex : cbase, + l->low ? cbase : cindex, attr->sbase + page, l->sbit); } else { pmbus_add_boolean_reg(data, name, @@ -1366,11 +1369,13 @@ static const struct pmbus_sensor_attr power_attributes[] = { static const struct pmbus_limit_attr temp_limit_attrs[] = { { .reg = PMBUS_UT_WARN_LIMIT, + .low = true, .attr = "min", .alarm = "min_alarm", .sbit = PB_TEMP_UT_WARNING, }, { .reg = PMBUS_UT_FAULT_LIMIT, + .low = true, .attr = "lcrit", .alarm = "lcrit_alarm", .sbit = PB_TEMP_UT_FAULT, @@ -1399,11 +1404,13 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = { static const struct pmbus_limit_attr temp_limit_attrs23[] = { { .reg = PMBUS_UT_WARN_LIMIT, + .low = true, .attr = "min", .alarm = "min_alarm", .sbit = PB_TEMP_UT_WARNING, }, { .reg = PMBUS_UT_FAULT_LIMIT, + .low = true, .attr = "lcrit", .alarm = "lcrit_alarm", .sbit = PB_TEMP_UT_FAULT, diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c index ace1c731973..d0ddb60155c 100644 --- a/drivers/hwmon/pmbus/ucd9000.c +++ b/drivers/hwmon/pmbus/ucd9000.c @@ -141,13 +141,11 @@ static int ucd9000_probe(struct i2c_client *client, block_buffer[ret] = '\0'; dev_info(&client->dev, "Device ID %s\n", block_buffer); - mid = NULL; - for (i = 0; i < ARRAY_SIZE(ucd9000_id); i++) { - mid = &ucd9000_id[i]; + for (mid = ucd9000_id; mid->name[0]; mid++) { if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) break; } - if (!mid || !strlen(mid->name)) { + if (!mid->name[0]) { dev_err(&client->dev, "Unsupported device\n"); return -ENODEV; } diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c index ffcc1cf3609..c65e9da707c 100644 --- a/drivers/hwmon/pmbus/ucd9200.c +++ b/drivers/hwmon/pmbus/ucd9200.c @@ -68,13 +68,11 @@ static int ucd9200_probe(struct i2c_client *client, block_buffer[ret] = '\0'; dev_info(&client->dev, "Device ID %s\n", block_buffer); - mid = NULL; - for (i = 0; i < ARRAY_SIZE(ucd9200_id); i++) { - mid = &ucd9200_id[i]; + for (mid = ucd9200_id; mid->name[0]; mid++) { if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) break; } - if (!mid || !strlen(mid->name)) { + if (!mid->name[0]) { dev_err(&client->dev, "Unsupported device\n"); return -ENODEV; } diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c index 6659d269b84..b73da6cd6f9 100644 --- a/drivers/i2c/busses/i2c-pxa-pci.c +++ b/drivers/i2c/busses/i2c-pxa-pci.c @@ -109,12 +109,15 @@ static int __devinit ce4100_i2c_probe(struct pci_dev *dev, return -EINVAL; } sds = kzalloc(sizeof(*sds), GFP_KERNEL); - if (!sds) + if (!sds) { + ret = -ENOMEM; goto err_mem; + } for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) { sds->pdev[i] = add_i2c_device(dev, i); if (IS_ERR(sds->pdev[i])) { + ret = PTR_ERR(sds->pdev[i]); while (--i >= 0) platform_device_unregister(sds->pdev[i]); goto err_dev_add; diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 2440b741197..3c94c4a81a5 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -270,14 +270,30 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev) /* Rounds down to not include partial word at the end of buf */ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD; - if (words_to_transfer > tx_fifo_avail) - words_to_transfer = tx_fifo_avail; - i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); - - buf += words_to_transfer * BYTES_PER_FIFO_WORD; - buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; - tx_fifo_avail -= words_to_transfer; + /* It's very common to have < 4 bytes, so optimize that case. */ + if (words_to_transfer) { + if (words_to_transfer > tx_fifo_avail) + words_to_transfer = tx_fifo_avail; + + /* + * Update state before writing to FIFO. If this casues us + * to finish writing all bytes (AKA buf_remaining goes to 0) we + * have a potential for an interrupt (PACKET_XFER_COMPLETE is + * not maskable). We need to make sure that the isr sees + * buf_remaining as 0 and doesn't call us back re-entrantly. + */ + buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; + tx_fifo_avail -= words_to_transfer; + i2c_dev->msg_buf_remaining = buf_remaining; + i2c_dev->msg_buf = buf + + words_to_transfer * BYTES_PER_FIFO_WORD; + barrier(); + + i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); + + buf += words_to_transfer * BYTES_PER_FIFO_WORD; + } /* * If there is a partial word at the end of buf, handle it manually to @@ -287,14 +303,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev) if (tx_fifo_avail > 0 && buf_remaining > 0) { BUG_ON(buf_remaining > 3); memcpy(&val, buf, buf_remaining); + + /* Again update before writing to FIFO to make sure isr sees. */ + i2c_dev->msg_buf_remaining = 0; + i2c_dev->msg_buf = NULL; + barrier(); + i2c_writel(i2c_dev, val, I2C_TX_FIFO); - buf_remaining = 0; - tx_fifo_avail--; } - BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0); - i2c_dev->msg_buf_remaining = buf_remaining; - i2c_dev->msg_buf = buf; return 0; } @@ -411,9 +428,10 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id) tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ); } - if ((status & I2C_INT_PACKET_XFER_COMPLETE) && - !i2c_dev->msg_buf_remaining) + if (status & I2C_INT_PACKET_XFER_COMPLETE) { + BUG_ON(i2c_dev->msg_buf_remaining); complete(&i2c_dev->msg_complete); + } i2c_writel(i2c_dev, status, I2C_INT_STATUS); if (i2c_dev->is_dvc) @@ -531,7 +549,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], static u32 tegra_i2c_func(struct i2c_adapter *adap) { - return I2C_FUNC_I2C; + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm tegra_i2c_algo = { @@ -719,6 +737,17 @@ static int tegra_i2c_resume(struct platform_device *pdev) } #endif +#if defined(CONFIG_OF) +/* Match table for of_platform binding */ +static const struct of_device_id tegra_i2c_of_match[] __devinitconst = { + { .compatible = "nvidia,tegra20-i2c", }, + {}, +}; +MODULE_DEVICE_TABLE(of, tegra_i2c_of_match); +#else +#define tegra_i2c_of_match NULL +#endif + static struct platform_driver tegra_i2c_driver = { .probe = tegra_i2c_probe, .remove = tegra_i2c_remove, @@ -729,6 +758,7 @@ static struct platform_driver tegra_i2c_driver = { .driver = { .name = "tegra-i2c", .owner = THIS_MODULE, + .of_match_table = tegra_i2c_of_match, }, }; diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index 7b404e5443e..e34eeb8ae37 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c @@ -668,4 +668,3 @@ module_exit(adp5588_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("ADP5588/87 Keypad driver"); -MODULE_ALIAS("platform:adp5588-keys"); diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c index b09c7d12721..ab860511f01 100644 --- a/drivers/input/misc/cm109.c +++ b/drivers/input/misc/cm109.c @@ -475,7 +475,7 @@ static void cm109_toggle_buzzer_sync(struct cm109_dev *dev, int on) le16_to_cpu(dev->ctl_req->wIndex), dev->ctl_data, USB_PKT_LEN, USB_CTRL_SET_TIMEOUT); - if (error && error != EINTR) + if (error < 0 && error != -EINTR) err("%s: usb_control_msg() failed %d", __func__, error); } diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index da280189ef0..5ec617e28f7 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -67,6 +67,10 @@ #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 +/* MacbookAir4,1 (unibody, July 2011) */ +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249 +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a +#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b /* MacbookAir4,2 (unibody, July 2011) */ #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d @@ -112,6 +116,10 @@ static const struct usb_device_id bcm5974_table[] = { BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), + /* MacbookAir4,1 */ + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS), /* MacbookAir4,2 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO), @@ -334,6 +342,18 @@ static const struct bcm5974_config bcm5974_config_table[] = { { DIM_X, DIM_X / SN_COORD, -4750, 5280 }, { DIM_Y, DIM_Y / SN_COORD, -150, 6730 } }, + { + USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI, + USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO, + USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS, + HAS_INTEGRATED_BUTTON, + 0x84, sizeof(struct bt_data), + 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, + { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, + { DIM_X, DIM_X / SN_COORD, -4620, 5140 }, + { DIM_Y, DIM_Y / SN_COORD, -150, 6600 } + }, {} }; diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index d27c9d91630..958b4eb6369 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c @@ -229,13 +229,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi get_unaligned_le16(&report[i + 3]); i += 4; } - } else if (usage == WCM_DIGITIZER) { - /* max pressure isn't reported - features->pressure_max = (unsigned short) - (report[i+4] << 8 | report[i + 3]); - */ - features->pressure_max = 255; - i += 4; } break; @@ -291,13 +284,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi pen = 1; i++; break; - - case HID_USAGE_UNDEFINED: - if (usage == WCM_DESKTOP && finger) /* capacity */ - features->pressure_max = - get_unaligned_le16(&report[i + 3]); - i += 4; - break; } break; diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index c1c2f7b28d8..0dc97ec15c2 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -800,25 +800,26 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) int i; for (i = 0; i < 2; i++) { - int p = data[9 * i + 2]; - bool touch = p && !wacom->shared->stylus_in_proximity; + int offset = (data[1] & 0x80) ? (8 * i) : (9 * i); + bool touch = data[offset + 3] & 0x80; - input_mt_slot(input, i); - input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); /* * Touch events need to be disabled while stylus is * in proximity because user's hand is resting on touchpad * and sending unwanted events. User expects tablet buttons * to continue working though. */ + touch = touch && !wacom->shared->stylus_in_proximity; + + input_mt_slot(input, i); + input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); if (touch) { - int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff; - int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff; + int x = get_unaligned_be16(&data[offset + 3]) & 0x7ff; + int y = get_unaligned_be16(&data[offset + 5]) & 0x7ff; if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) { x <<= 5; y <<= 5; } - input_report_abs(input, ABS_MT_PRESSURE, p); input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); } @@ -1056,10 +1057,11 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, features->x_fuzz, 0); input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, features->y_fuzz, 0); - input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, - features->pressure_fuzz, 0); if (features->device_type == BTN_TOOL_PEN) { + input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, + features->pressure_fuzz, 0); + /* penabled devices have fixed resolution for each model */ input_abs_set_res(input_dev, ABS_X, features->x_resolution); input_abs_set_res(input_dev, ABS_Y, features->y_resolution); @@ -1098,6 +1100,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, __set_bit(BTN_TOOL_MOUSE, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); __set_bit(BTN_STYLUS2, input_dev->keybit); + + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); break; case WACOM_21UX2: @@ -1126,6 +1130,9 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, } input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); + wacom_setup_cintiq(wacom_wac); break; @@ -1150,6 +1157,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, /* fall through */ case INTUOS: + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); + wacom_setup_intuos(wacom_wac); break; @@ -1165,6 +1174,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); wacom_setup_intuos(wacom_wac); + + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); break; case TABLETPC2FG: @@ -1183,26 +1194,40 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, case TABLETPC: __clear_bit(ABS_MISC, input_dev->absbit); + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); + if (features->device_type != BTN_TOOL_PEN) break; /* no need to process stylus stuff */ /* fall through */ case PL: - case PTU: case DTU: __set_bit(BTN_TOOL_PEN, input_dev->keybit); + __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); __set_bit(BTN_STYLUS2, input_dev->keybit); + + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); + break; + + case PTU: + __set_bit(BTN_STYLUS2, input_dev->keybit); /* fall through */ case PENPARTNER: + __set_bit(BTN_TOOL_PEN, input_dev->keybit); __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); + __set_bit(BTN_STYLUS, input_dev->keybit); + + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); break; case BAMBOO_PT: __clear_bit(ABS_MISC, input_dev->absbit); + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); + if (features->device_type == BTN_TOOL_DOUBLETAP) { __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_FORWARD, input_dev->keybit); diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c index c14412ef464..9941d39df43 100644 --- a/drivers/input/touchscreen/wacom_w8001.c +++ b/drivers/input/touchscreen/wacom_w8001.c @@ -383,6 +383,8 @@ static int w8001_setup(struct w8001 *w8001) dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name)); + __set_bit(INPUT_PROP_DIRECT, dev->propbit); + /* penabled? */ error = w8001_command(w8001, W8001_CMD_QUERY, true); if (!error) { diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index a14f8dc2346..0e4227f457a 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -605,7 +605,9 @@ static void build_inv_all(struct iommu_cmd *cmd) * Writes the command to the IOMMUs command buffer and informs the * hardware about the new command. */ -static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) +static int iommu_queue_command_sync(struct amd_iommu *iommu, + struct iommu_cmd *cmd, + bool sync) { u32 left, tail, head, next_tail; unsigned long flags; @@ -639,13 +641,18 @@ again: copy_cmd_to_buffer(iommu, cmd, tail); /* We need to sync now to make sure all commands are processed */ - iommu->need_sync = true; + iommu->need_sync = sync; spin_unlock_irqrestore(&iommu->lock, flags); return 0; } +static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) +{ + return iommu_queue_command_sync(iommu, cmd, true); +} + /* * This function queues a completion wait command into the command * buffer of an IOMMU @@ -661,7 +668,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu) build_completion_wait(&cmd, (u64)&sem); - ret = iommu_queue_command(iommu, &cmd); + ret = iommu_queue_command_sync(iommu, &cmd, false); if (ret) return ret; @@ -840,14 +847,9 @@ static void domain_flush_complete(struct protection_domain *domain) static void domain_flush_devices(struct protection_domain *domain) { struct iommu_dev_data *dev_data; - unsigned long flags; - - spin_lock_irqsave(&domain->lock, flags); list_for_each_entry(dev_data, &domain->dev_list, list) device_flush_dte(dev_data); - - spin_unlock_irqrestore(&domain->lock, flags); } /**************************************************************************** diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 3dc9befa5ae..6dcc7e2d54d 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1388,7 +1388,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu) return ret; } - ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); + ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); if (ret) printk(KERN_ERR "IOMMU: can't request irq\n"); return ret; diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c index d87c9d02f78..328c64c0841 100644 --- a/drivers/leds/ledtrig-timer.c +++ b/drivers/leds/ledtrig-timer.c @@ -41,6 +41,7 @@ static ssize_t led_delay_on_store(struct device *dev, if (count == size) { led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off); + led_cdev->blink_delay_on = state; ret = count; } @@ -69,6 +70,7 @@ static ssize_t led_delay_off_store(struct device *dev, if (count == size) { led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state); + led_cdev->blink_delay_off = state; ret = count; } diff --git a/drivers/md/linear.h b/drivers/md/linear.h index 0ce29b61605..2f2da05b2ce 100644 --- a/drivers/md/linear.h +++ b/drivers/md/linear.h @@ -10,9 +10,9 @@ typedef struct dev_info dev_info_t; struct linear_private_data { + struct rcu_head rcu; sector_t array_sectors; dev_info_t disks[0]; - struct rcu_head rcu; }; diff --git a/drivers/md/md.c b/drivers/md/md.c index 8e221a20f5d..5404b229582 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -848,7 +848,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, bio->bi_end_io = super_written; atomic_inc(&mddev->pending_writes); - submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio); + submit_bio(WRITE_FLUSH_FUA, bio); } void md_super_wait(mddev_t *mddev) @@ -1138,8 +1138,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version ret = 0; } rdev->sectors = rdev->sb_start; + /* Limit to 4TB as metadata cannot record more than that */ + if (rdev->sectors >= (2ULL << 32)) + rdev->sectors = (2ULL << 32) - 2; - if (rdev->sectors < sb->size * 2 && sb->level > 1) + if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) /* "this cannot possibly happen" ... */ ret = -EINVAL; @@ -1173,7 +1176,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) mddev->clevel[0] = 0; mddev->layout = sb->layout; mddev->raid_disks = sb->raid_disks; - mddev->dev_sectors = sb->size * 2; + mddev->dev_sectors = ((sector_t)sb->size) * 2; mddev->events = ev1; mddev->bitmap_info.offset = 0; mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; @@ -1415,6 +1418,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) rdev->sb_start = calc_dev_sboffset(rdev); if (!num_sectors || num_sectors > rdev->sb_start) num_sectors = rdev->sb_start; + /* Limit to 4TB as metadata cannot record more than that. + * 4TB == 2^32 KB, or 2*2^32 sectors. + */ + if (num_sectors >= (2ULL << 32)) + num_sectors = (2ULL << 32) - 2; md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); md_super_wait(rdev->mddev); @@ -1738,6 +1746,11 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->level = cpu_to_le32(mddev->level); sb->layout = cpu_to_le32(mddev->layout); + if (test_bit(WriteMostly, &rdev->flags)) + sb->devflags |= WriteMostly1; + else + sb->devflags &= ~WriteMostly1; + if (mddev->bitmap && mddev->bitmap_info.file == NULL) { sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); @@ -2561,7 +2574,10 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len) int err = -EINVAL; if (cmd_match(buf, "faulty") && rdev->mddev->pers) { md_error(rdev->mddev, rdev); - err = 0; + if (test_bit(Faulty, &rdev->flags)) + err = 0; + else + err = -EBUSY; } else if (cmd_match(buf, "remove")) { if (rdev->raid_disk >= 0) err = -EBUSY; @@ -2584,7 +2600,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len) err = 0; } else if (cmd_match(buf, "-blocked")) { if (!test_bit(Faulty, &rdev->flags) && - test_bit(BlockedBadBlocks, &rdev->flags)) { + rdev->badblocks.unacked_exist) { /* metadata handler doesn't understand badblocks, * so we need to fail the device */ @@ -5983,6 +5999,8 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev) return -ENODEV; md_error(mddev, rdev); + if (!test_bit(Faulty, &rdev->flags)) + return -EBUSY; return 0; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 32323f0afd8..f4622dd8fc5 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1099,12 +1099,11 @@ read_again: bio_list_add(&conf->pending_bio_list, mbio); spin_unlock_irqrestore(&conf->device_lock, flags); } - r1_bio_write_done(r1_bio); - - /* In case raid1d snuck in to freeze_array */ - wake_up(&conf->wait_barrier); - + /* Mustn't call r1_bio_write_done before this next test, + * as it could result in the bio being freed. + */ if (sectors_handled < (bio->bi_size >> 9)) { + r1_bio_write_done(r1_bio); /* We need another r1_bio. It has already been counted * in bio->bi_phys_segments */ @@ -1117,6 +1116,11 @@ read_again: goto retry_write; } + r1_bio_write_done(r1_bio); + + /* In case raid1d snuck in to freeze_array */ + wake_up(&conf->wait_barrier); + if (do_sync || !bitmap || !plugged) md_wakeup_thread(mddev->thread); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8b29cd4f01c..d7a8468ddea 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -337,6 +337,21 @@ static void close_write(r10bio_t *r10_bio) md_write_end(r10_bio->mddev); } +static void one_write_done(r10bio_t *r10_bio) +{ + if (atomic_dec_and_test(&r10_bio->remaining)) { + if (test_bit(R10BIO_WriteError, &r10_bio->state)) + reschedule_retry(r10_bio); + else { + close_write(r10_bio); + if (test_bit(R10BIO_MadeGood, &r10_bio->state)) + reschedule_retry(r10_bio); + else + raid_end_bio_io(r10_bio); + } + } +} + static void raid10_end_write_request(struct bio *bio, int error) { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); @@ -387,17 +402,7 @@ static void raid10_end_write_request(struct bio *bio, int error) * Let's see if all mirrored write operations have finished * already. */ - if (atomic_dec_and_test(&r10_bio->remaining)) { - if (test_bit(R10BIO_WriteError, &r10_bio->state)) - reschedule_retry(r10_bio); - else { - close_write(r10_bio); - if (test_bit(R10BIO_MadeGood, &r10_bio->state)) - reschedule_retry(r10_bio); - else - raid_end_bio_io(r10_bio); - } - } + one_write_done(r10_bio); if (dec_rdev) rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); } @@ -1127,20 +1132,12 @@ retry_write: spin_unlock_irqrestore(&conf->device_lock, flags); } - if (atomic_dec_and_test(&r10_bio->remaining)) { - /* This matches the end of raid10_end_write_request() */ - bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, - r10_bio->sectors, - !test_bit(R10BIO_Degraded, &r10_bio->state), - 0); - md_write_end(mddev); - raid_end_bio_io(r10_bio); - } - - /* In case raid10d snuck in to freeze_array */ - wake_up(&conf->wait_barrier); + /* Don't remove the bias on 'remaining' (one_write_done) until + * after checking if we need to go around again. + */ if (sectors_handled < (bio->bi_size >> 9)) { + one_write_done(r10_bio); /* We need another r10_bio. It has already been counted * in bio->bi_phys_segments. */ @@ -1154,6 +1151,10 @@ retry_write: r10_bio->state = 0; goto retry_write; } + one_write_done(r10_bio); + + /* In case raid10d snuck in to freeze_array */ + wake_up(&conf->wait_barrier); if (do_sync || !mddev->bitmap || !plugged) md_wakeup_thread(mddev->thread); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index dbae459fb02..43709fa6b6d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3336,7 +3336,7 @@ static void handle_stripe(struct stripe_head *sh) finish: /* wait for this device to become unblocked */ - if (unlikely(s.blocked_rdev)) + if (conf->mddev->external && unlikely(s.blocked_rdev)) md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); if (s.handle_bad_blocks) diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c index 3db89e3cb0b..536c16c943b 100644 --- a/drivers/media/dvb/dvb-usb/vp7045.c +++ b/drivers/media/dvb/dvb-usb/vp7045.c @@ -224,26 +224,8 @@ static struct dvb_usb_device_properties vp7045_properties; static int vp7045_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { - struct dvb_usb_device *d; - int ret = dvb_usb_device_init(intf, &vp7045_properties, - THIS_MODULE, &d, adapter_nr); - if (ret) - return ret; - - d->priv = kmalloc(20, GFP_KERNEL); - if (!d->priv) { - dvb_usb_device_exit(intf); - return -ENOMEM; - } - - return ret; -} - -static void vp7045_usb_disconnect(struct usb_interface *intf) -{ - struct dvb_usb_device *d = usb_get_intfdata(intf); - kfree(d->priv); - dvb_usb_device_exit(intf); + return dvb_usb_device_init(intf, &vp7045_properties, + THIS_MODULE, NULL, adapter_nr); } static struct usb_device_id vp7045_usb_table [] = { @@ -258,7 +240,7 @@ MODULE_DEVICE_TABLE(usb, vp7045_usb_table); static struct dvb_usb_device_properties vp7045_properties = { .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-vp7045-01.fw", - .size_of_priv = sizeof(u8 *), + .size_of_priv = 20, .num_adapters = 1, .adapter = { @@ -305,7 +287,7 @@ static struct dvb_usb_device_properties vp7045_properties = { static struct usb_driver vp7045_usb_driver = { .name = "dvb_usb_vp7045", .probe = vp7045_usb_probe, - .disconnect = vp7045_usb_disconnect, + .disconnect = dvb_usb_device_exit, .id_table = vp7045_usb_table, }; diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c index eae05b50047..144f3f55d76 100644 --- a/drivers/media/rc/nuvoton-cir.c +++ b/drivers/media/rc/nuvoton-cir.c @@ -618,7 +618,6 @@ static void nvt_dump_rx_buf(struct nvt_dev *nvt) static void nvt_process_rx_ir_data(struct nvt_dev *nvt) { DEFINE_IR_RAW_EVENT(rawir); - unsigned int count; u32 carrier; u8 sample; int i; @@ -631,65 +630,38 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt) if (nvt->carrier_detect_enabled) carrier = nvt_rx_carrier_detect(nvt); - count = nvt->pkts; - nvt_dbg_verbose("Processing buffer of len %d", count); + nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts); init_ir_raw_event(&rawir); - for (i = 0; i < count; i++) { - nvt->pkts--; + for (i = 0; i < nvt->pkts; i++) { sample = nvt->buf[i]; rawir.pulse = ((sample & BUF_PULSE_BIT) != 0); rawir.duration = US_TO_NS((sample & BUF_LEN_MASK) * SAMPLE_PERIOD); - if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) { - if (nvt->rawir.pulse == rawir.pulse) - nvt->rawir.duration += rawir.duration; - else { - nvt->rawir.duration = rawir.duration; - nvt->rawir.pulse = rawir.pulse; - } - continue; - } - - rawir.duration += nvt->rawir.duration; + nvt_dbg("Storing %s with duration %d", + rawir.pulse ? "pulse" : "space", rawir.duration); - init_ir_raw_event(&nvt->rawir); - nvt->rawir.duration = 0; - nvt->rawir.pulse = rawir.pulse; - - if (sample == BUF_PULSE_BIT) - rawir.pulse = false; - - if (rawir.duration) { - nvt_dbg("Storing %s with duration %d", - rawir.pulse ? "pulse" : "space", - rawir.duration); - - ir_raw_event_store_with_filter(nvt->rdev, &rawir); - } + ir_raw_event_store_with_filter(nvt->rdev, &rawir); /* * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE * indicates end of IR signal, but new data incoming. In both * cases, it means we're ready to call ir_raw_event_handle */ - if ((sample == BUF_PULSE_BIT) && nvt->pkts) { + if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) { nvt_dbg("Calling ir_raw_event_handle (signal end)\n"); ir_raw_event_handle(nvt->rdev); } } + nvt->pkts = 0; + nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n"); ir_raw_event_handle(nvt->rdev); - if (nvt->pkts) { - nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts); - nvt->pkts = 0; - } - nvt_dbg_verbose("%s done", __func__); } @@ -1048,7 +1020,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) spin_lock_init(&nvt->nvt_lock); spin_lock_init(&nvt->tx.lock); - init_ir_raw_event(&nvt->rawir); ret = -EBUSY; /* now claim resources */ diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h index 1241fc89a36..0d5e0872a2e 100644 --- a/drivers/media/rc/nuvoton-cir.h +++ b/drivers/media/rc/nuvoton-cir.h @@ -67,7 +67,6 @@ static int debug; struct nvt_dev { struct pnp_dev *pdev; struct rc_dev *rdev; - struct ir_raw_event rawir; spinlock_t nvt_lock; diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c index 0800433b209..18305c89083 100644 --- a/drivers/media/video/gspca/ov519.c +++ b/drivers/media/video/gspca/ov519.c @@ -2858,7 +2858,6 @@ static void ov7xx0_configure(struct sd *sd) case 0x60: PDEBUG(D_PROBE, "Sensor is a OV7660"); sd->sensor = SEN_OV7660; - sd->invert_led = 0; break; default: PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low); @@ -3337,7 +3336,6 @@ static int sd_config(struct gspca_dev *gspca_dev, case BRIDGE_OV519: cam->cam_mode = ov519_vga_mode; cam->nmodes = ARRAY_SIZE(ov519_vga_mode); - sd->invert_led = !sd->invert_led; break; case BRIDGE_OVFX2: cam->cam_mode = ov519_vga_mode; @@ -5005,24 +5003,24 @@ static const struct sd_desc sd_desc = { /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF }, - {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 }, - {USB_DEVICE(0x041e, 0x405f), + {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, + {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 }, {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 }, {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 }, - {USB_DEVICE(0x041e, 0x4064), - .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, + {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 }, {USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 }, - {USB_DEVICE(0x041e, 0x4068), + {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 }, + {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, - {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 }, {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 }, - {USB_DEVICE(0x054c, 0x0155), - .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, + {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 }, {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 }, {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 }, - {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 }, - {USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 }, + {USB_DEVICE(0x05a9, 0x0519), + .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, + {USB_DEVICE(0x05a9, 0x0530), + .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, {USB_DEVICE(0x05a9, 0x2800), .driver_info = BRIDGE_OVFX2 }, {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 }, {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 }, diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c index 81b8a600783..c477ad11f10 100644 --- a/drivers/media/video/gspca/sonixj.c +++ b/drivers/media/video/gspca/sonixj.c @@ -2386,7 +2386,7 @@ static int sd_start(struct gspca_dev *gspca_dev) reg_w1(gspca_dev, 0x01, 0x22); msleep(100); reg01 = SCL_SEL_OD | S_PDN_INV; - reg17 &= MCK_SIZE_MASK; + reg17 &= ~MCK_SIZE_MASK; reg17 |= 0x04; /* clock / 4 */ break; } @@ -2532,6 +2532,10 @@ static int sd_start(struct gspca_dev *gspca_dev) if (!mode) { /* if 640x480 */ reg17 &= ~MCK_SIZE_MASK; reg17 |= 0x04; /* clock / 4 */ + } else { + reg01 &= ~SYS_SEL_48M; /* clk 24Mz */ + reg17 &= ~MCK_SIZE_MASK; + reg17 |= 0x02; /* clock / 2 */ } break; case SENSOR_OV7630: diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c index e9a0e94b999..8c70e64444e 100644 --- a/drivers/media/video/pwc/pwc-v4l.c +++ b/drivers/media/video/pwc/pwc-v4l.c @@ -338,7 +338,7 @@ int pwc_init_controls(struct pwc_device *pdev) if (pdev->restore_factory) pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE; - if (!pdev->features & FEATURE_MOTOR_PANTILT) + if (!(pdev->features & FEATURE_MOTOR_PANTILT)) return hdl->error; /* Motor pan / tilt / reset */ diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c index 85d3048c1d6..bb7f17f2a33 100644 --- a/drivers/media/video/via-camera.c +++ b/drivers/media/video/via-camera.c @@ -1332,6 +1332,8 @@ static __devinit bool viacam_serial_is_enabled(void) struct pci_bus *pbus = pci_find_bus(0, 0); u8 cbyte; + if (!pbus) + return false; pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN, VIACAM_SERIAL_CREG, &cbyte); if ((cbyte & VIACAM_SERIAL_BIT) == 0) diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c index 5d1fca0277e..f83103b8970 100644 --- a/drivers/mfd/max8997.c +++ b/drivers/mfd/max8997.c @@ -135,10 +135,13 @@ static int max8997_i2c_probe(struct i2c_client *i2c, max8997->dev = &i2c->dev; max8997->i2c = i2c; max8997->type = id->driver_data; + max8997->irq = i2c->irq; if (!pdata) goto err; + max8997->irq_base = pdata->irq_base; + max8997->ono = pdata->ono; max8997->wakeup = pdata->wakeup; mutex_init(&max8997->iolock); @@ -152,6 +155,8 @@ static int max8997_i2c_probe(struct i2c_client *i2c, pm_runtime_set_active(max8997->dev); + max8997_irq_init(max8997); + mfd_add_devices(max8997->dev, -1, max8997_devs, ARRAY_SIZE(max8997_devs), NULL, 0); diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 29601e7d606..86e14583a08 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -17,6 +17,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> +#include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/delay.h> @@ -676,7 +677,6 @@ static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count) | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); - reg |= (1 << (i + 1)); } else continue; diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c index 2bfad5c86cc..a56be931551 100644 --- a/drivers/mfd/tps65910-irq.c +++ b/drivers/mfd/tps65910-irq.c @@ -178,8 +178,10 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq, switch (tps65910_chip_id(tps65910)) { case TPS65910: tps65910->irq_num = TPS65910_NUM_IRQ; + break; case TPS65911: tps65910->irq_num = TPS65911_NUM_IRQ; + break; } /* Register with genirq */ diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c index b5d598c3aa7..7cbf2aa9e64 100644 --- a/drivers/mfd/twl4030-madc.c +++ b/drivers/mfd/twl4030-madc.c @@ -510,8 +510,9 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req) u8 ch_msb, ch_lsb; int ret; - if (!req) + if (!req || !twl4030_madc) return -EINVAL; + mutex_lock(&twl4030_madc->lock); if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) { ret = -EINVAL; @@ -706,6 +707,8 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev) if (!madc) return -ENOMEM; + madc->dev = &pdev->dev; + /* * Phoenix provides 2 interrupt lines. The first one is connected to * the OMAP. The other one can be connected to the other processor such diff --git a/drivers/mfd/wm8350-gpio.c b/drivers/mfd/wm8350-gpio.c index ebf99bef392..d584f6b4d6e 100644 --- a/drivers/mfd/wm8350-gpio.c +++ b/drivers/mfd/wm8350-gpio.c @@ -37,7 +37,7 @@ static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir) return ret; } -static int gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) +static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) { if (db == WM8350_GPIO_DEBOUNCE_ON) return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE, @@ -210,7 +210,7 @@ int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func, goto err; if (gpio_set_polarity(wm8350, gpio, pol)) goto err; - if (gpio_set_debounce(wm8350, gpio, debounce)) + if (wm8350_gpio_set_debounce(wm8350, gpio, debounce)) goto err; if (gpio_set_dir(wm8350, gpio, dir)) goto err; diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c index 06df1877ad0..0b56e3f4357 100644 --- a/drivers/misc/pti.c +++ b/drivers/misc/pti.c @@ -165,6 +165,11 @@ static void pti_write_to_aperture(struct pti_masterchannel *mc, static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc, const char *thread_name) { + /* + * Since we access the comm member in current's task_struct, we only + * need to be as large as what 'comm' in that structure is. + */ + char comm[TASK_COMM_LEN]; struct pti_masterchannel mccontrol = {.master = CONTROL_ID, .channel = 0}; const char *thread_name_p; @@ -172,13 +177,6 @@ static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc, u8 control_frame[CONTROL_FRAME_LEN]; if (!thread_name) { - /* - * Since we access the comm member in current's task_struct, - * we only need to be as large as what 'comm' in that - * structure is. - */ - char comm[TASK_COMM_LEN]; - if (!in_interrupt()) get_task_comm(comm, current); else diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 91a0a7460eb..b27b94078c2 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -133,7 +133,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) if (mrq->done) mrq->done(mrq); - mmc_host_clk_gate(host); + mmc_host_clk_release(host); } } @@ -192,7 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) mrq->stop->mrq = mrq; } } - mmc_host_clk_ungate(host); + mmc_host_clk_hold(host); led_trigger_event(host->led, LED_FULL); host->ops->request(host, mrq); } @@ -728,15 +728,17 @@ static inline void mmc_set_ios(struct mmc_host *host) */ void mmc_set_chip_select(struct mmc_host *host, int mode) { + mmc_host_clk_hold(host); host->ios.chip_select = mode; mmc_set_ios(host); + mmc_host_clk_release(host); } /* * Sets the host clock to the highest possible frequency that * is below "hz". */ -void mmc_set_clock(struct mmc_host *host, unsigned int hz) +static void __mmc_set_clock(struct mmc_host *host, unsigned int hz) { WARN_ON(hz < host->f_min); @@ -747,6 +749,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz) mmc_set_ios(host); } +void mmc_set_clock(struct mmc_host *host, unsigned int hz) +{ + mmc_host_clk_hold(host); + __mmc_set_clock(host, hz); + mmc_host_clk_release(host); +} + #ifdef CONFIG_MMC_CLKGATE /* * This gates the clock by setting it to 0 Hz. @@ -779,7 +788,7 @@ void mmc_ungate_clock(struct mmc_host *host) if (host->clk_old) { BUG_ON(host->ios.clock); /* This call will also set host->clk_gated to false */ - mmc_set_clock(host, host->clk_old); + __mmc_set_clock(host, host->clk_old); } } @@ -807,8 +816,10 @@ void mmc_set_ungated(struct mmc_host *host) */ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) { + mmc_host_clk_hold(host); host->ios.bus_mode = mode; mmc_set_ios(host); + mmc_host_clk_release(host); } /* @@ -816,8 +827,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) */ void mmc_set_bus_width(struct mmc_host *host, unsigned int width) { + mmc_host_clk_hold(host); host->ios.bus_width = width; mmc_set_ios(host); + mmc_host_clk_release(host); } /** @@ -1015,8 +1028,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) ocr &= 3 << bit; + mmc_host_clk_hold(host); host->ios.vdd = bit; mmc_set_ios(host); + mmc_host_clk_release(host); } else { pr_warning("%s: host doesn't support card's voltages\n", mmc_hostname(host)); @@ -1063,8 +1078,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11 */ void mmc_set_timing(struct mmc_host *host, unsigned int timing) { + mmc_host_clk_hold(host); host->ios.timing = timing; mmc_set_ios(host); + mmc_host_clk_release(host); } /* @@ -1072,8 +1089,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing) */ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) { + mmc_host_clk_hold(host); host->ios.drv_type = drv_type; mmc_set_ios(host); + mmc_host_clk_release(host); } /* @@ -1091,6 +1110,8 @@ static void mmc_power_up(struct mmc_host *host) { int bit; + mmc_host_clk_hold(host); + /* If ocr is set, we use it */ if (host->ocr) bit = ffs(host->ocr) - 1; @@ -1126,10 +1147,14 @@ static void mmc_power_up(struct mmc_host *host) * time required to reach a stable voltage. */ mmc_delay(10); + + mmc_host_clk_release(host); } static void mmc_power_off(struct mmc_host *host) { + mmc_host_clk_hold(host); + host->ios.clock = 0; host->ios.vdd = 0; @@ -1147,6 +1172,8 @@ static void mmc_power_off(struct mmc_host *host) host->ios.bus_width = MMC_BUS_WIDTH_1; host->ios.timing = MMC_TIMING_LEGACY; mmc_set_ios(host); + + mmc_host_clk_release(host); } /* diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index b29d3e8fd3a..793d0a0dad8 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work) } /** - * mmc_host_clk_ungate - ungate hardware MCI clocks + * mmc_host_clk_hold - ungate hardware MCI clocks * @host: host to ungate. * * Makes sure the host ios.clock is restored to a non-zero value * past this call. Increase clock reference count and ungate clock * if we're the first user. */ -void mmc_host_clk_ungate(struct mmc_host *host) +void mmc_host_clk_hold(struct mmc_host *host) { unsigned long flags; @@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card) } /** - * mmc_host_clk_gate - gate off hardware MCI clocks + * mmc_host_clk_release - gate off hardware MCI clocks * @host: host to gate. * * Calls the host driver with ios.clock set to zero as often as possible * in order to gate off hardware MCI clocks. Decrease clock reference * count and schedule disabling of clock. */ -void mmc_host_clk_gate(struct mmc_host *host) +void mmc_host_clk_release(struct mmc_host *host) { unsigned long flags; @@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host) host->clk_requests--; if (mmc_host_may_gate_card(host->card) && !host->clk_requests) - schedule_work(&host->clk_gate_work); + queue_work(system_nrt_wq, &host->clk_gate_work); spin_unlock_irqrestore(&host->clk_lock, flags); } @@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host) if (cancel_work_sync(&host->clk_gate_work)) mmc_host_clk_gate_delayed(host); if (host->clk_gated) - mmc_host_clk_ungate(host); + mmc_host_clk_hold(host); /* There should be only one user now */ WARN_ON(host->clk_requests > 1); } diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index de199f91192..fb8a5cd2e4a 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h @@ -16,16 +16,16 @@ int mmc_register_host_class(void); void mmc_unregister_host_class(void); #ifdef CONFIG_MMC_CLKGATE -void mmc_host_clk_ungate(struct mmc_host *host); -void mmc_host_clk_gate(struct mmc_host *host); +void mmc_host_clk_hold(struct mmc_host *host); +void mmc_host_clk_release(struct mmc_host *host); unsigned int mmc_host_clk_rate(struct mmc_host *host); #else -static inline void mmc_host_clk_ungate(struct mmc_host *host) +static inline void mmc_host_clk_hold(struct mmc_host *host) { } -static inline void mmc_host_clk_gate(struct mmc_host *host) +static inline void mmc_host_clk_release(struct mmc_host *host) { } diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 633975ff2bb..0370e03e314 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -469,56 +469,75 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status) return 0; } -static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) +static void sd_update_bus_speed_mode(struct mmc_card *card) { - unsigned int bus_speed = 0, timing = 0; - int err; - /* * If the host doesn't support any of the UHS-I modes, fallback on * default speed. */ if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | - MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) - return 0; + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) { + card->sd_bus_speed = 0; + return; + } if ((card->host->caps & MMC_CAP_UHS_SDR104) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) { - bus_speed = UHS_SDR104_BUS_SPEED; - timing = MMC_TIMING_UHS_SDR104; - card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; + card->sd_bus_speed = UHS_SDR104_BUS_SPEED; } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { - bus_speed = UHS_DDR50_BUS_SPEED; - timing = MMC_TIMING_UHS_DDR50; - card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; + card->sd_bus_speed = UHS_DDR50_BUS_SPEED; } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50)) { - bus_speed = UHS_SDR50_BUS_SPEED; - timing = MMC_TIMING_UHS_SDR50; - card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; + card->sd_bus_speed = UHS_SDR50_BUS_SPEED; } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { - bus_speed = UHS_SDR25_BUS_SPEED; - timing = MMC_TIMING_UHS_SDR25; - card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; + card->sd_bus_speed = UHS_SDR25_BUS_SPEED; } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR12)) { - bus_speed = UHS_SDR12_BUS_SPEED; - timing = MMC_TIMING_UHS_SDR12; - card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; + card->sd_bus_speed = UHS_SDR12_BUS_SPEED; + } +} + +static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) +{ + int err; + unsigned int timing = 0; + + switch (card->sd_bus_speed) { + case UHS_SDR104_BUS_SPEED: + timing = MMC_TIMING_UHS_SDR104; + card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; + break; + case UHS_DDR50_BUS_SPEED: + timing = MMC_TIMING_UHS_DDR50; + card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; + break; + case UHS_SDR50_BUS_SPEED: + timing = MMC_TIMING_UHS_SDR50; + card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; + break; + case UHS_SDR25_BUS_SPEED: + timing = MMC_TIMING_UHS_SDR25; + card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; + break; + case UHS_SDR12_BUS_SPEED: + timing = MMC_TIMING_UHS_SDR12; + card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; + break; + default: + return 0; } - card->sd_bus_speed = bus_speed; - err = mmc_sd_switch(card, 1, 0, bus_speed, status); + err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status); if (err) return err; - if ((status[16] & 0xF) != bus_speed) + if ((status[16] & 0xF) != card->sd_bus_speed) printk(KERN_WARNING "%s: Problem setting bus speed mode!\n", mmc_hostname(card->host)); else { @@ -618,18 +637,24 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card) mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); } + /* + * Select the bus speed mode depending on host + * and card capability. + */ + sd_update_bus_speed_mode(card); + /* Set the driver strength for the card */ err = sd_select_driver_type(card, status); if (err) goto out; - /* Set bus speed mode of the card */ - err = sd_set_bus_speed_mode(card, status); + /* Set current limit for the card */ + err = sd_set_current_limit(card, status); if (err) goto out; - /* Set current limit for the card */ - err = sd_set_current_limit(card, status); + /* Set bus speed mode of the card */ + err = sd_set_bus_speed_mode(card, status); if (err) goto out; diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 0e9780f5a4a..4dc0028086a 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -16,6 +16,7 @@ #include <linux/err.h> #include <linux/clk.h> #include <linux/gpio.h> +#include <linux/module.h> #include <linux/slab.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 2bd7bf4fece..fe886d6c474 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -302,6 +302,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width) ctrl &= ~SDHCI_CTRL_8BITBUS; break; default: + ctrl &= ~SDHCI_CTRL_4BITBUS; + ctrl &= ~SDHCI_CTRL_8BITBUS; break; } diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c index 774f6439d7c..0c4a672f5db 100644 --- a/drivers/mmc/host/sh_mobile_sdhi.c +++ b/drivers/mmc/host/sh_mobile_sdhi.c @@ -120,11 +120,11 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) mmc_data->hclk = clk_get_rate(priv->clk); mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; mmc_data->get_cd = sh_mobile_sdhi_get_cd; - if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT) - mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; if (p) { mmc_data->flags = p->tmio_flags; + if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT) + mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; mmc_data->ocr_mask = p->tmio_ocr_mask; mmc_data->capabilities |= p->tmio_caps; diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h index 65b5b76cc37..64fbb002182 100644 --- a/drivers/mtd/ubi/debug.h +++ b/drivers/mtd/ubi/debug.h @@ -181,7 +181,7 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi) #define ubi_dbg_msg(fmt, ...) do { \ if (0) \ - pr_debug(fmt "\n", ##__VA_ARGS__); \ + printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \ } while (0) #define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__) diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 8d0314dbd94..a44874e24f2 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2535,7 +2535,7 @@ config S6GMAC source "drivers/net/stmmac/Kconfig" config PCH_GBE - tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" + tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" depends on PCI select MII ---help--- @@ -2548,10 +2548,11 @@ config PCH_GBE This driver enables Gigabit Ethernet function. This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ - Output Hub), ML7223. - ML7223 IOH is for MP(Media Phone) use. - ML7223 is companion chip for Intel Atom E6xx series. - ML7223 is completely compatible for Intel EG20T PCH. + Output Hub), ML7223/ML7831. + ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general + purpose use. + ML7223/ML7831 is companion chip for Intel Atom E6xx series. + ML7223/ML7831 is completely compatible for Intel EG20T PCH. config FTGMAC100 tristate "Faraday FTGMAC100 Gigabit Ethernet support" diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c index 52fe21e1e2c..3b1416e3d21 100644 --- a/drivers/net/arm/am79c961a.c +++ b/drivers/net/arm/am79c961a.c @@ -308,8 +308,11 @@ static void am79c961_timer(unsigned long data) struct net_device *dev = (struct net_device *)data; struct dev_priv *priv = netdev_priv(dev); unsigned int lnkstat, carrier; + unsigned long flags; + spin_lock_irqsave(&priv->chip_lock, flags); lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; + spin_unlock_irqrestore(&priv->chip_lock, flags); carrier = netif_carrier_ok(dev); if (lnkstat && !carrier) { diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index c423504a755..e46df5331c5 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -315,6 +315,14 @@ union db_prod { u32 raw; }; +/* dropless fc FW/HW related params */ +#define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512) +#define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \ + ETH_MAX_AGGREGATION_QUEUES_E1 :\ + ETH_MAX_AGGREGATION_QUEUES_E1H_E2) +#define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp)) +#define FW_PREFETCH_CNT 16 +#define DROPLESS_FC_HEADROOM 100 /* MC hsi */ #define BCM_PAGE_SHIFT 12 @@ -331,15 +339,35 @@ union db_prod { /* SGE ring related macros */ #define NUM_RX_SGE_PAGES 2 #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) -#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) +#define NEXT_PAGE_SGE_DESC_CNT 2 +#define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT) /* RX_SGE_CNT is promised to be a power of 2 */ #define RX_SGE_MASK (RX_SGE_CNT - 1) #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) #define MAX_RX_SGE (NUM_RX_SGE - 1) #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ - (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) + (MAX_RX_SGE_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \ + (x) + 1) #define RX_SGE(x) ((x) & MAX_RX_SGE) +/* + * Number of required SGEs is the sum of two: + * 1. Number of possible opened aggregations (next packet for + * these aggregations will probably consume SGE immidiatelly) + * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only + * after placement on BD for new TPA aggregation) + * + * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page + */ +#define NUM_SGE_REQ (MAX_AGG_QS(bp) + \ + (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2) +#define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \ + MAX_RX_SGE_CNT) +#define SGE_TH_LO(bp) (NUM_SGE_REQ + \ + NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT) +#define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM) + /* Manipulate a bit vector defined as an array of u64 */ /* Number of bits in one sge_mask array element */ @@ -551,24 +579,43 @@ struct bnx2x_fastpath { #define NUM_TX_RINGS 16 #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) -#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) +#define NEXT_PAGE_TX_DESC_CNT 1 +#define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT) #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) #define MAX_TX_BD (NUM_TX_BD - 1) #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ - (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) + (MAX_TX_DESC_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \ + (x) + 1) #define TX_BD(x) ((x) & MAX_TX_BD) #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ #define NUM_RX_RINGS 8 #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) -#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) +#define NEXT_PAGE_RX_DESC_CNT 2 +#define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT) #define RX_DESC_MASK (RX_DESC_CNT - 1) #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) #define MAX_RX_BD (NUM_RX_BD - 1) #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) -#define MIN_RX_AVAIL 128 + +/* dropless fc calculations for BDs + * + * Number of BDs should as number of buffers in BRB: + * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT + * "next" elements on each page + */ +#define NUM_BD_REQ BRB_SIZE(bp) +#define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \ + MAX_RX_DESC_CNT) +#define BD_TH_LO(bp) (NUM_BD_REQ + \ + NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \ + FW_DROP_LEVEL(bp)) +#define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM) + +#define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128) #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ ETH_MIN_RX_CQES_WITH_TPA_E1 : \ @@ -579,7 +626,9 @@ struct bnx2x_fastpath { MIN_RX_AVAIL)) #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ - (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) + (MAX_RX_DESC_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \ + (x) + 1) #define RX_BD(x) ((x) & MAX_RX_BD) /* @@ -589,14 +638,31 @@ struct bnx2x_fastpath { #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) -#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) +#define NEXT_PAGE_RCQ_DESC_CNT 1 +#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT) #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) #define MAX_RCQ_BD (NUM_RCQ_BD - 1) #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ - (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) + (MAX_RCQ_DESC_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \ + (x) + 1) #define RCQ_BD(x) ((x) & MAX_RCQ_BD) +/* dropless fc calculations for RCQs + * + * Number of RCQs should be as number of buffers in BRB: + * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT + * "next" elements on each page + */ +#define NUM_RCQ_REQ BRB_SIZE(bp) +#define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \ + MAX_RCQ_DESC_CNT) +#define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \ + NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \ + FW_DROP_LEVEL(bp)) +#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) + /* This is needed for determining of last_max */ #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) @@ -685,24 +751,17 @@ struct bnx2x_fastpath { #define FP_CSB_FUNC_OFF \ offsetof(struct cstorm_status_block_c, func) -#define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ - /* (HC_INDEX_U_TOE_RX_CQ_CONS) */ -#define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */ - /* (HC_INDEX_U_ETH_RX_CQ_CONS) */ -#define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */ - /* (HC_INDEX_U_ETH_RX_BD_CONS) */ - -#define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */ - /* (HC_INDEX_C_TOE_TX_CQ_CONS) */ -#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */ - /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ -#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */ - /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ -#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */ - /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ +#define HC_INDEX_ETH_RX_CQ_CONS 1 -#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 +#define HC_INDEX_OOO_TX_CQ_CONS 4 +#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 + +#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 + +#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 + +#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 #define BNX2X_RX_SB_INDEX \ (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) @@ -1100,11 +1159,12 @@ struct bnx2x { #define BP_PORT(bp) (bp->pfid & 1) #define BP_FUNC(bp) (bp->pfid) #define BP_ABS_FUNC(bp) (bp->pf_num) -#define BP_E1HVN(bp) (bp->pfid >> 1) -#define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ -#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) -#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ - BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) +#define BP_VN(bp) ((bp)->pfid >> 1) +#define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4) +#define BP_L_ID(bp) (BP_VN(bp) << 2) +#define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\ + (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) +#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) struct net_device *dev; struct pci_dev *pdev; @@ -1767,7 +1827,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define MAX_DMAE_C_PER_PORT 8 #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ - BP_E1HVN(bp)) + BP_VN(bp)) #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ E1HVN_MAX) @@ -1793,7 +1853,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, /* must be used on a CID before placing it on a HW ring */ #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ - (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ + (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \ (x)) #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 37e5790681a..c4cbf973641 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c @@ -987,8 +987,6 @@ void __bnx2x_link_report(struct bnx2x *bp) void bnx2x_init_rx_rings(struct bnx2x *bp) { int func = BP_FUNC(bp); - int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : - ETH_MAX_AGGREGATION_QUEUES_E1H_E2; u16 ring_prod; int i, j; @@ -1001,7 +999,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) if (!fp->disable_tpa) { /* Fill the per-aggregtion pool */ - for (i = 0; i < max_agg_queues; i++) { + for (i = 0; i < MAX_AGG_QS(bp); i++) { struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; struct sw_rx_bd *first_buf = @@ -1041,7 +1039,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) bnx2x_free_rx_sge_range(bp, fp, ring_prod); bnx2x_free_tpa_pool(bp, fp, - max_agg_queues); + MAX_AGG_QS(bp)); fp->disable_tpa = 1; ring_prod = 0; break; @@ -1137,9 +1135,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) bnx2x_free_rx_bds(fp); if (!fp->disable_tpa) - bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? - ETH_MAX_AGGREGATION_QUEUES_E1 : - ETH_MAX_AGGREGATION_QUEUES_E1H_E2); + bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); } } @@ -3095,15 +3091,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) struct bnx2x_fastpath *fp = &bp->fp[index]; int ring_size = 0; u8 cos; + int rx_ring_size = 0; /* if rx_ring_size specified - use it */ - int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : - MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); + if (!bp->rx_ring_size) { - /* allocate at least number of buffers required by FW */ - rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : - MIN_RX_SIZE_TPA, - rx_ring_size); + rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); + + /* allocate at least number of buffers required by FW */ + rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : + MIN_RX_SIZE_TPA, rx_ring_size); + + bp->rx_ring_size = rx_ring_size; + } else + rx_ring_size = bp->rx_ring_size; /* Common */ sb = &bnx2x_fp(bp, index, status_blk); diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 221863059da..cf3e47914dd 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c @@ -363,13 +363,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } /* advertise the requested speed and duplex if supported */ - cmd->advertising &= bp->port.supported[cfg_idx]; + if (cmd->advertising & ~(bp->port.supported[cfg_idx])) { + DP(NETIF_MSG_LINK, "Advertisement parameters " + "are not supported\n"); + return -EINVAL; + } bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; - bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL; - bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg | + bp->link_params.req_duplex[cfg_idx] = cmd->duplex; + bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg | cmd->advertising); + if (cmd->advertising) { + + bp->link_params.speed_cap_mask[cfg_idx] = 0; + if (cmd->advertising & ADVERTISED_10baseT_Half) { + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF; + } + if (cmd->advertising & ADVERTISED_10baseT_Full) + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL; + if (cmd->advertising & ADVERTISED_100baseT_Full) + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL; + + if (cmd->advertising & ADVERTISED_100baseT_Half) { + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF; + } + if (cmd->advertising & ADVERTISED_1000baseT_Half) { + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; + } + if (cmd->advertising & (ADVERTISED_1000baseT_Full | + ADVERTISED_1000baseKX_Full)) + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; + + if (cmd->advertising & (ADVERTISED_10000baseT_Full | + ADVERTISED_10000baseKX4_Full | + ADVERTISED_10000baseKR_Full)) + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; + } } else { /* forced speed */ /* advertise the requested speed and duplex if supported */ switch (speed) { @@ -1310,10 +1347,7 @@ static void bnx2x_get_ringparam(struct net_device *dev, if (bp->rx_ring_size) ering->rx_pending = bp->rx_ring_size; else - if (bp->state == BNX2X_STATE_OPEN && bp->num_queues) - ering->rx_pending = MAX_RX_AVAIL/bp->num_queues; - else - ering->rx_pending = MAX_RX_AVAIL; + ering->rx_pending = MAX_RX_AVAIL; ering->rx_mini_pending = 0; ering->rx_jumbo_pending = 0; diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index d45b1555a60..ba15bdc5a1a 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c @@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, { u32 nig_reg_adress_crd_weight = 0; u32 pbf_reg_adress_crd_weight = 0; - /* Calculate and set BW for this COS*/ - const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; - const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; + /* Calculate and set BW for this COS - use 1 instead of 0 for BW */ + const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw; + const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw; switch (cos_entry) { case 0: @@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw( /* Calculate total BW requested */ for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { - - if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { - DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" - "was set to 0\n"); - return -EINVAL; + *total_bw += + ets_params->cos[cos_idx].params.bw_params.bw; } - *total_bw += - ets_params->cos[cos_idx].params.bw_params.bw; - } } - /*Check taotl BW is valid */ + /* Check total BW is valid */ if ((100 != *total_bw) || (0 == *total_bw)) { if (0 == *total_bw) { DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" @@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params, /* Check loopback mode */ if (lb) - val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; + val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK; REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); bnx2x_set_xumac_nig(params, ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); @@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); + /* Advertised and set FEC (Forward Error Correction) */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2, + (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY | + MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ)); + /* Enable CL37 BAM */ if (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, dev_info. @@ -5924,7 +5924,7 @@ int bnx2x_set_led(struct link_params *params, (tmp | EMAC_LED_OVERRIDE)); /* * return here without enabling traffic - * LED blink andsetting rate in ON mode. + * LED blink and setting rate in ON mode. * In oper mode, enabling LED blink * and setting rate is needed. */ @@ -5936,7 +5936,11 @@ int bnx2x_set_led(struct link_params *params, * This is a work-around for HW issue found when link * is up in CL73 */ - REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); + if ((!CHIP_IS_E3(bp)) || + (CHIP_IS_E3(bp) && + mode == LED_MODE_ON)) + REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); + if (CHIP_IS_E1x(bp) || CHIP_IS_E2(bp) || (mode == LED_MODE_ON)) @@ -10638,8 +10642,7 @@ static struct bnx2x_phy phy_warpcore = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, .addr = 0xff, .def_md_devad = 0, - .flags = (FLAGS_HW_LOCK_REQUIRED | - FLAGS_TX_ERROR_CHECK), + .flags = FLAGS_HW_LOCK_REQUIRED, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -10765,8 +10768,7 @@ static struct bnx2x_phy phy_8706 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, .addr = 0xff, .def_md_devad = 0, - .flags = (FLAGS_INIT_XGXS_FIRST | - FLAGS_TX_ERROR_CHECK), + .flags = FLAGS_INIT_XGXS_FIRST, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -10797,8 +10799,7 @@ static struct bnx2x_phy phy_8726 = { .addr = 0xff, .def_md_devad = 0, .flags = (FLAGS_HW_LOCK_REQUIRED | - FLAGS_INIT_XGXS_FIRST | - FLAGS_TX_ERROR_CHECK), + FLAGS_INIT_XGXS_FIRST), .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -10829,8 +10830,7 @@ static struct bnx2x_phy phy_8727 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, .addr = 0xff, .def_md_devad = 0, - .flags = (FLAGS_FAN_FAILURE_DET_REQ | - FLAGS_TX_ERROR_CHECK), + .flags = FLAGS_FAN_FAILURE_DET_REQ, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index f74582a22c6..c027e9341a1 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -407,8 +407,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); - opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | - (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); + opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | + (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); #ifdef __BIG_ENDIAN @@ -1419,7 +1419,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) if (!CHIP_IS_E1(bp)) { /* init leading/trailing edge */ if (IS_MF(bp)) { - val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); + val = (0xee0f | (1 << (BP_VN(bp) + 4))); if (bp->port.pmf) /* enable nig and gpio3 attention */ val |= 0x1100; @@ -1471,7 +1471,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp) /* init leading/trailing edge */ if (IS_MF(bp)) { - val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); + val = (0xee0f | (1 << (BP_VN(bp) + 4))); if (bp->port.pmf) /* enable nig and gpio3 attention */ val |= 0x1100; @@ -2287,7 +2287,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) int vn; bp->vn_weight_sum = 0; - for (vn = VN_0; vn < E1HVN_MAX; vn++) { + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { u32 vn_cfg = bp->mf_config[vn]; u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT) * 100; @@ -2320,12 +2320,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) CMNG_FLAGS_PER_PORT_FAIRNESS_VN; } +/* returns func by VN for current port */ +static inline int func_by_vn(struct bnx2x *bp, int vn) +{ + return 2 * vn + BP_PORT(bp); +} + static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) { struct rate_shaping_vars_per_vn m_rs_vn; struct fairness_vars_per_vn m_fair_vn; u32 vn_cfg = bp->mf_config[vn]; - int func = 2*vn + BP_PORT(bp); + int func = func_by_vn(bp, vn); u16 vn_min_rate, vn_max_rate; int i; @@ -2422,7 +2428,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp) * * and there are 2 functions per port */ - for (vn = VN_0; vn < E1HVN_MAX; vn++) { + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); if (func >= E1H_FUNC_MAX) @@ -2454,7 +2460,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) /* calculate and set min-max rate for each vn */ if (bp->port.pmf) - for (vn = VN_0; vn < E1HVN_MAX; vn++) + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) bnx2x_init_vn_minmax(bp, vn); /* always enable rate shaping and fairness */ @@ -2473,16 +2479,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) static inline void bnx2x_link_sync_notify(struct bnx2x *bp) { - int port = BP_PORT(bp); int func; int vn; /* Set the attention towards other drivers on the same port */ - for (vn = VN_0; vn < E1HVN_MAX; vn++) { - if (vn == BP_E1HVN(bp)) + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { + if (vn == BP_VN(bp)) continue; - func = ((vn << 1) | port); + func = func_by_vn(bp, vn); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); } @@ -2577,7 +2582,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp) bnx2x_dcbx_pmf_update(bp); /* enable nig attention */ - val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); + val = (0xff0f | (1 << (BP_VN(bp) + 4))); if (bp->common.int_block == INT_BLOCK_HC) { REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); @@ -2756,8 +2761,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, u16 tpa_agg_size = 0; if (!fp->disable_tpa) { - pause->sge_th_hi = 250; - pause->sge_th_lo = 150; + pause->sge_th_lo = SGE_TH_LO(bp); + pause->sge_th_hi = SGE_TH_HI(bp); + + /* validate SGE ring has enough to cross high threshold */ + WARN_ON(bp->dropless_fc && + pause->sge_th_hi + FW_PREFETCH_CNT > + MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); + tpa_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); @@ -2771,10 +2782,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, /* pause - not for e1 */ if (!CHIP_IS_E1(bp)) { - pause->bd_th_hi = 350; - pause->bd_th_lo = 250; - pause->rcq_th_hi = 350; - pause->rcq_th_lo = 250; + pause->bd_th_lo = BD_TH_LO(bp); + pause->bd_th_hi = BD_TH_HI(bp); + + pause->rcq_th_lo = RCQ_TH_LO(bp); + pause->rcq_th_hi = RCQ_TH_HI(bp); + /* + * validate that rings have enough entries to cross + * high thresholds + */ + WARN_ON(bp->dropless_fc && + pause->bd_th_hi + FW_PREFETCH_CNT > + bp->rx_ring_size); + WARN_ON(bp->dropless_fc && + pause->rcq_th_hi + FW_PREFETCH_CNT > + NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); pause->pri_map = 1; } @@ -2802,9 +2824,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, * For PF Clients it should be the maximum avaliable number. * VF driver(s) may want to define it to a smaller value. */ - rxq_init->max_tpa_queues = - (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : - ETH_MAX_AGGREGATION_QUEUES_E1H_E2); + rxq_init->max_tpa_queues = MAX_AGG_QS(bp); rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; rxq_init->fw_sb_id = fp->fw_sb_id; @@ -4808,6 +4828,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, hc_sm->time_to_expire = 0xFFFFFFFF; } + +/* allocates state machine ids. */ +static inline +void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) +{ + /* zero out state machine indices */ + /* rx indices */ + index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; + + /* tx indices */ + index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; + + /* map indices */ + /* rx indices */ + index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= + SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; + + /* tx indices */ + index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; +} + static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, u8 vf_valid, int fw_sb_id, int igu_sb_id) { @@ -4839,6 +4890,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, hc_sm_p = sb_data_e2.common.state_machine; sb_data_p = (u32 *)&sb_data_e2; data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); + bnx2x_map_sb_state_machines(sb_data_e2.index_data); } else { memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); @@ -4853,6 +4905,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, hc_sm_p = sb_data_e1x.common.state_machine; sb_data_p = (u32 *)&sb_data_e1x; data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); + bnx2x_map_sb_state_machines(sb_data_e1x.index_data); } bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], @@ -5802,7 +5855,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) * take the UNDI lock to protect undi_unload flow from accessing * registers while we're resetting the chip */ - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); bnx2x_reset_common(bp); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); @@ -5814,7 +5867,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) } REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); @@ -6671,12 +6724,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) if (CHIP_MODE_IS_4_PORT(bp)) dsb_idx = BP_FUNC(bp); else - dsb_idx = BP_E1HVN(bp); + dsb_idx = BP_VN(bp); prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? IGU_BC_BASE_DSB_PROD + dsb_idx : IGU_NORM_BASE_DSB_PROD + dsb_idx); + /* + * igu prods come in chunks of E1HVN_MAX (4) - + * does not matters what is the current chip mode + */ for (i = 0; i < (num_segs * E1HVN_MAX); i += E1HVN_MAX) { addr = IGU_REG_PROD_CONS_MEMORY + @@ -7570,7 +7627,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) u32 val; /* The mac address is written to entries 1-4 to preserve entry 0 which is used by the PMF */ - u8 entry = (BP_E1HVN(bp) + 1)*8; + u8 entry = (BP_VN(bp) + 1)*8; val = (mac_addr[0] << 8) | mac_addr[1]; EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); @@ -8546,10 +8603,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) /* Check if there is any driver already loaded */ val = REG_RD(bp, MISC_REG_UNPREPARED); if (val == 0x1) { - /* Check if it is the UNDI driver + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); + /* + * Check if it is the UNDI driver * UNDI driver initializes CID offset for normal bell to 0x7 */ - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); if (val == 0x7) { u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; @@ -8587,9 +8646,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) bnx2x_fw_command(bp, reset_code, 0); } - /* now it's safe to release the lock */ - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); - bnx2x_undi_int_disable(bp); port = BP_PORT(bp); @@ -8639,8 +8695,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) bp->fw_seq = (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK); - } else - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); + } + + /* now it's safe to release the lock */ + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); } } @@ -8777,13 +8835,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) { int pfid = BP_FUNC(bp); - int vn = BP_E1HVN(bp); int igu_sb_id; u32 val; u8 fid, igu_sb_cnt = 0; bp->igu_base_sb = 0xff; if (CHIP_INT_MODE_IS_BC(bp)) { + int vn = BP_VN(bp); igu_sb_cnt = bp->igu_sb_cnt; bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * FP_SB_MAX_E1x; @@ -9416,6 +9474,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) bp->igu_base_sb = 0; } else { bp->common.int_block = INT_BLOCK_IGU; + + /* do not allow device reset during IGU info preocessing */ + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); + val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { @@ -9447,6 +9509,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) bnx2x_get_igu_cam_info(bp); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); } /* @@ -9473,7 +9536,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) bp->mf_ov = 0; bp->mf_mode = 0; - vn = BP_E1HVN(bp); + vn = BP_VN(bp); if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", @@ -9593,13 +9656,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) /* port info */ bnx2x_get_port_hwinfo(bp); - if (!BP_NOMCP(bp)) { - bp->fw_seq = - (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK); - BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); - } - /* Get MAC addresses */ bnx2x_get_mac_hwinfo(bp); @@ -9765,6 +9821,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) if (!BP_NOMCP(bp)) bnx2x_undi_unload(bp); + /* init fw_seq after undi_unload! */ + if (!BP_NOMCP(bp)) { + bp->fw_seq = + (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); + } + if (CHIP_REV_IS_FPGA(bp)) dev_err(&bp->pdev->dev, "FPGA detected\n"); @@ -10259,17 +10323,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, /* clean indirect addresses */ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); - /* Clean the following indirect addresses for all functions since it + /* + * Clean the following indirect addresses for all functions since it * is not used by the driver. */ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); + + if (CHIP_IS_E1x(bp)) { + REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); + } /* * Enable internal target-read (in case we are probed after PF FLR). diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 40266c14e6d..750e8445dac 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h @@ -5320,7 +5320,7 @@ #define XCM_REG_XX_OVFL_EVNT_ID 0x20058 #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) -#define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) +#define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2) #define XMAC_CTRL_REG_RX_EN (0x1<<1) #define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) #define XMAC_CTRL_REG_TX_EN (0x1<<0) @@ -5766,7 +5766,7 @@ #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 #define HW_LOCK_RESOURCE_SPIO 2 -#define HW_LOCK_RESOURCE_UNDI 5 +#define HW_LOCK_RESOURCE_RESET 5 #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) @@ -6853,6 +6853,9 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000 #define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index 771f6803b23..9908f2bbcf7 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c @@ -710,7 +710,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) break; case MAC_TYPE_NONE: /* unreached */ - BNX2X_ERR("stats updated by DMAE but no MAC active\n"); + DP(BNX2X_MSG_STATS, + "stats updated by DMAE but no MAC active\n"); return -1; default: /* unreached */ @@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp) static void bnx2x_func_stats_base_init(struct bnx2x *bp) { - int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; + int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX; u32 func_stx; /* sanity */ @@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp) func_stx = bp->func_stx; for (vn = VN_0; vn < vn_max; vn++) { - int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; + int mb_idx = BP_FW_MB_IDX_VN(bp, vn); bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); bnx2x_func_stats_init(bp); diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index a81249246ec..2adc294f512 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -46,6 +46,7 @@ #include <linux/skbuff.h> #include <linux/platform_device.h> #include <linux/clk.h> +#include <linux/io.h> #include <linux/can/dev.h> #include <linux/can/error.h> diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 8545c7aa93e..a5a89ecb6f3 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -4026,6 +4026,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) checksum += eeprom_data; } +#ifdef CONFIG_PARISC + /* This is a signature and not a checksum on HP c8000 */ + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) + return E1000_SUCCESS; + +#endif if (checksum == (u16) EEPROM_SUM) return E1000_SUCCESS; else { diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 3e667926940..8dd5fccef72 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -757,7 +757,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) struct ibmveth_adapter *adapter = netdev_priv(dev); unsigned long set_attr, clr_attr, ret_attr; unsigned long set_attr6, clr_attr6; - long ret, ret6; + long ret, ret4, ret6; int rc1 = 0, rc2 = 0; int restart = 0; @@ -770,6 +770,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) set_attr = 0; clr_attr = 0; + set_attr6 = 0; + clr_attr6 = 0; if (data) { set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; @@ -784,16 +786,20 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { - ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, + ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, set_attr, &ret_attr); - if (ret != H_SUCCESS) { + if (ret4 != H_SUCCESS) { netdev_err(dev, "unable to change IPv4 checksum " "offload settings. %d rc=%ld\n", - data, ret); + data, ret4); + + h_illan_attributes(adapter->vdev->unit_address, + set_attr, clr_attr, &ret_attr); + + if (data == 1) + dev->features &= ~NETIF_F_IP_CSUM; - ret = h_illan_attributes(adapter->vdev->unit_address, - set_attr, clr_attr, &ret_attr); } else { adapter->fw_ipv4_csum_support = data; } @@ -804,15 +810,18 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) if (ret6 != H_SUCCESS) { netdev_err(dev, "unable to change IPv6 checksum " "offload settings. %d rc=%ld\n", - data, ret); + data, ret6); + + h_illan_attributes(adapter->vdev->unit_address, + set_attr6, clr_attr6, &ret_attr); + + if (data == 1) + dev->features &= ~NETIF_F_IPV6_CSUM; - ret = h_illan_attributes(adapter->vdev->unit_address, - set_attr6, clr_attr6, - &ret_attr); } else adapter->fw_ipv6_csum_support = data; - if (ret != H_SUCCESS || ret6 != H_SUCCESS) + if (ret4 == H_SUCCESS || ret6 == H_SUCCESS) adapter->rx_csum = data; else rc1 = -EIO; @@ -930,6 +939,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, union ibmveth_buf_desc descs[6]; int last, i; int force_bounce = 0; + dma_addr_t dma_addr; /* * veth handles a maximum of 6 segments including the header, so @@ -994,17 +1004,16 @@ retry_bounce: } /* Map the header */ - descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, - skb_headlen(skb), - DMA_TO_DEVICE); - if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address)) + dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) goto map_failed; descs[0].fields.flags_len = desc_flags | skb_headlen(skb); + descs[0].fields.address = dma_addr; /* Map the frags */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - unsigned long dma_addr; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr = dma_map_page(&adapter->vdev->dev, frag->page, @@ -1026,7 +1035,12 @@ retry_bounce: netdev->stats.tx_bytes += skb->len; } - for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++) + dma_unmap_single(&adapter->vdev->dev, + descs[0].fields.address, + descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK, + DMA_TO_DEVICE); + + for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, DMA_TO_DEVICE); diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h index 59fac77d0db..a09a07197eb 100644 --- a/drivers/net/pch_gbe/pch_gbe.h +++ b/drivers/net/pch_gbe/pch_gbe.h @@ -127,8 +127,8 @@ struct pch_gbe_regs { /* Reset */ #define PCH_GBE_ALL_RST 0x80000000 /* All reset */ -#define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */ -#define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */ +#define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */ +#define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */ /* TCP/IP Accelerator Control */ #define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */ @@ -276,6 +276,9 @@ struct pch_gbe_regs { #define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */ #define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */ +/* RX DMA STATUS */ +#define PCH_GBE_IDLE_CHECK 0xFFFFFFFE + /* Wake On LAN Status */ #define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */ #define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */ @@ -471,6 +474,7 @@ struct pch_gbe_tx_desc { struct pch_gbe_buffer { struct sk_buff *skb; dma_addr_t dma; + unsigned char *rx_buffer; unsigned long time_stamp; u16 length; bool mapped; @@ -511,6 +515,9 @@ struct pch_gbe_tx_ring { struct pch_gbe_rx_ring { struct pch_gbe_rx_desc *desc; dma_addr_t dma; + unsigned char *rx_buff_pool; + dma_addr_t rx_buff_pool_logic; + unsigned int rx_buff_pool_size; unsigned int size; unsigned int count; unsigned int next_to_use; @@ -622,6 +629,7 @@ struct pch_gbe_adapter { unsigned long rx_buffer_len; unsigned long tx_queue_len; bool have_msi; + bool rx_stop_flag; }; extern const char pch_driver_version[]; diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index eac3c5ca973..567ff10889b 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c @@ -20,7 +20,6 @@ #include "pch_gbe.h" #include "pch_gbe_api.h" -#include <linux/prefetch.h> #define DRV_VERSION "1.00" const char pch_driver_version[] = DRV_VERSION; @@ -34,11 +33,15 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ #define PCH_GBE_COPYBREAK_DEFAULT 256 #define PCH_GBE_PCI_BAR 1 +#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */ /* Macros for ML7223 */ #define PCI_VENDOR_ID_ROHM 0x10db #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 +/* Macros for ML7831 */ +#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802 + #define PCH_GBE_TX_WEIGHT 64 #define PCH_GBE_RX_WEIGHT 64 #define PCH_GBE_RX_BUFFER_WRITE 16 @@ -52,6 +55,7 @@ const char pch_driver_version[] = DRV_VERSION; ) /* Ethertype field values */ +#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880 #define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318 #define PCH_GBE_FRAME_SIZE_2048 2048 #define PCH_GBE_FRAME_SIZE_4096 4096 @@ -83,10 +87,12 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_INT_ENABLE_MASK ( \ PCH_GBE_INT_RX_DMA_CMPLT | \ PCH_GBE_INT_RX_DSC_EMP | \ + PCH_GBE_INT_RX_FIFO_ERR | \ PCH_GBE_INT_WOL_DET | \ PCH_GBE_INT_TX_CMPLT \ ) +#define PCH_GBE_INT_DISABLE_ALL 0 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; @@ -138,6 +144,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit) if (!tmp) pr_err("Error: busy bit is not cleared\n"); } + +/** + * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context + * @reg: Pointer of register + * @busy: Busy bit + */ +static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit) +{ + u32 tmp; + int ret = -1; + /* wait busy */ + tmp = 20; + while ((ioread32(reg) & bit) && --tmp) + udelay(5); + if (!tmp) + pr_err("Error: busy bit is not cleared\n"); + else + ret = 0; + return ret; +} + /** * pch_gbe_mac_mar_set - Set MAC address register * @hw: Pointer to the HW structure @@ -189,6 +216,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw) return; } +static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) +{ + /* Read the MAC address. and store to the private data */ + pch_gbe_mac_read_mac_addr(hw); + iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); + pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); + /* Setup the MAC address */ + pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); + return; +} + /** * pch_gbe_mac_init_rx_addrs - Initialize receive address's * @hw: Pointer to the HW structure @@ -671,13 +709,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter) tcpip = ioread32(&hw->reg->TCPIP_ACC); - if (netdev->features & NETIF_F_RXCSUM) { - tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF; - tcpip |= PCH_GBE_RX_TCPIPACC_EN; - } else { - tcpip |= PCH_GBE_RX_TCPIPACC_OFF; - tcpip &= ~PCH_GBE_RX_TCPIPACC_EN; - } + tcpip |= PCH_GBE_RX_TCPIPACC_OFF; + tcpip &= ~PCH_GBE_RX_TCPIPACC_EN; iowrite32(tcpip, &hw->reg->TCPIP_ACC); return; } @@ -717,13 +750,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) iowrite32(rdba, &hw->reg->RX_DSC_BASE); iowrite32(rdlen, &hw->reg->RX_DSC_SIZE); iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); - - /* Enables Receive DMA */ - rxdma = ioread32(&hw->reg->DMA_CTRL); - rxdma |= PCH_GBE_RX_DMA_EN; - iowrite32(rxdma, &hw->reg->DMA_CTRL); - /* Enables Receive */ - iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); } /** @@ -1097,6 +1123,48 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter) spin_unlock_irqrestore(&adapter->stats_lock, flags); } +static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter) +{ + struct pch_gbe_hw *hw = &adapter->hw; + u32 rxdma; + u16 value; + int ret; + + /* Disable Receive DMA */ + rxdma = ioread32(&hw->reg->DMA_CTRL); + rxdma &= ~PCH_GBE_RX_DMA_EN; + iowrite32(rxdma, &hw->reg->DMA_CTRL); + /* Wait Rx DMA BUS is IDLE */ + ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK); + if (ret) { + /* Disable Bus master */ + pci_read_config_word(adapter->pdev, PCI_COMMAND, &value); + value &= ~PCI_COMMAND_MASTER; + pci_write_config_word(adapter->pdev, PCI_COMMAND, value); + /* Stop Receive */ + pch_gbe_mac_reset_rx(hw); + /* Enable Bus master */ + value |= PCI_COMMAND_MASTER; + pci_write_config_word(adapter->pdev, PCI_COMMAND, value); + } else { + /* Stop Receive */ + pch_gbe_mac_reset_rx(hw); + } +} + +static void pch_gbe_start_receive(struct pch_gbe_hw *hw) +{ + u32 rxdma; + + /* Enables Receive DMA */ + rxdma = ioread32(&hw->reg->DMA_CTRL); + rxdma |= PCH_GBE_RX_DMA_EN; + iowrite32(rxdma, &hw->reg->DMA_CTRL); + /* Enables Receive */ + iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); + return; +} + /** * pch_gbe_intr - Interrupt Handler * @irq: Interrupt number @@ -1123,7 +1191,15 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) if (int_st & PCH_GBE_INT_RX_FRAME_ERR) adapter->stats.intr_rx_frame_err_count++; if (int_st & PCH_GBE_INT_RX_FIFO_ERR) - adapter->stats.intr_rx_fifo_err_count++; + if (!adapter->rx_stop_flag) { + adapter->stats.intr_rx_fifo_err_count++; + pr_debug("Rx fifo over run\n"); + adapter->rx_stop_flag = true; + int_en = ioread32(&hw->reg->INT_EN); + iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), + &hw->reg->INT_EN); + pch_gbe_stop_receive(adapter); + } if (int_st & PCH_GBE_INT_RX_DMA_ERR) adapter->stats.intr_rx_dma_err_count++; if (int_st & PCH_GBE_INT_TX_FIFO_ERR) @@ -1135,7 +1211,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) /* When Rx descriptor is empty */ if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { adapter->stats.intr_rx_dsc_empty_count++; - pr_err("Rx descriptor is empty\n"); + pr_debug("Rx descriptor is empty\n"); int_en = ioread32(&hw->reg->INT_EN); iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); if (hw->mac.tx_fc_enable) { @@ -1185,29 +1261,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter, unsigned int i; unsigned int bufsz; - bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN; + bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; i = rx_ring->next_to_use; while ((cleaned_count--)) { buffer_info = &rx_ring->buffer_info[i]; - skb = buffer_info->skb; - if (skb) { - skb_trim(skb, 0); - } else { - skb = netdev_alloc_skb(netdev, bufsz); - if (unlikely(!skb)) { - /* Better luck next round */ - adapter->stats.rx_alloc_buff_failed++; - break; - } - /* 64byte align */ - skb_reserve(skb, PCH_GBE_DMA_ALIGN); - - buffer_info->skb = skb; - buffer_info->length = adapter->rx_buffer_len; + skb = netdev_alloc_skb(netdev, bufsz); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->stats.rx_alloc_buff_failed++; + break; } + /* align */ + skb_reserve(skb, NET_IP_ALIGN); + buffer_info->skb = skb; + buffer_info->dma = dma_map_single(&pdev->dev, - skb->data, + buffer_info->rx_buffer, buffer_info->length, DMA_FROM_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { @@ -1240,6 +1310,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter, return; } +static int +pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, + struct pch_gbe_rx_ring *rx_ring, int cleaned_count) +{ + struct pci_dev *pdev = adapter->pdev; + struct pch_gbe_buffer *buffer_info; + unsigned int i; + unsigned int bufsz; + unsigned int size; + + bufsz = adapter->rx_buffer_len; + + size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; + rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size, + &rx_ring->rx_buff_pool_logic, + GFP_KERNEL); + if (!rx_ring->rx_buff_pool) { + pr_err("Unable to allocate memory for the receive poll buffer\n"); + return -ENOMEM; + } + memset(rx_ring->rx_buff_pool, 0, size); + rx_ring->rx_buff_pool_size = size; + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i; + buffer_info->length = bufsz; + } + return 0; +} + /** * pch_gbe_alloc_tx_buffers - Allocate transmit buffers * @adapter: Board private structure @@ -1380,7 +1480,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, unsigned int i; unsigned int cleaned_count = 0; bool cleaned = false; - struct sk_buff *skb, *new_skb; + struct sk_buff *skb; u8 dma_status; u16 gbec_status; u32 tcp_ip_status; @@ -1401,13 +1501,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, rx_desc->gbec_status = DSC_INIT16; buffer_info = &rx_ring->buffer_info[i]; skb = buffer_info->skb; + buffer_info->skb = NULL; /* unmap dma */ dma_unmap_single(&pdev->dev, buffer_info->dma, buffer_info->length, DMA_FROM_DEVICE); buffer_info->mapped = false; - /* Prefetch the packet */ - prefetch(skb->data); pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " "TCP:0x%08x] BufInf = 0x%p\n", @@ -1427,70 +1526,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, pr_err("Receive CRC Error\n"); } else { /* get receive length */ - /* length convert[-3] */ - length = (rx_desc->rx_words_eob) - 3; - - /* Decide the data conversion method */ - if (!(netdev->features & NETIF_F_RXCSUM)) { - /* [Header:14][payload] */ - if (NET_IP_ALIGN) { - /* Because alignment differs, - * the new_skb is newly allocated, - * and data is copied to new_skb.*/ - new_skb = netdev_alloc_skb(netdev, - length + NET_IP_ALIGN); - if (!new_skb) { - /* dorrop error */ - pr_err("New skb allocation " - "Error\n"); - goto dorrop; - } - skb_reserve(new_skb, NET_IP_ALIGN); - memcpy(new_skb->data, skb->data, - length); - skb = new_skb; - } else { - /* DMA buffer is used as SKB as it is.*/ - buffer_info->skb = NULL; - } - } else { - /* [Header:14][padding:2][payload] */ - /* The length includes padding length */ - length = length - PCH_GBE_DMA_PADDING; - if ((length < copybreak) || - (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { - /* Because alignment differs, - * the new_skb is newly allocated, - * and data is copied to new_skb. - * Padding data is deleted - * at the time of a copy.*/ - new_skb = netdev_alloc_skb(netdev, - length + NET_IP_ALIGN); - if (!new_skb) { - /* dorrop error */ - pr_err("New skb allocation " - "Error\n"); - goto dorrop; - } - skb_reserve(new_skb, NET_IP_ALIGN); - memcpy(new_skb->data, skb->data, - ETH_HLEN); - memcpy(&new_skb->data[ETH_HLEN], - &skb->data[ETH_HLEN + - PCH_GBE_DMA_PADDING], - length - ETH_HLEN); - skb = new_skb; - } else { - /* Padding data is deleted - * by moving header data.*/ - memmove(&skb->data[PCH_GBE_DMA_PADDING], - &skb->data[0], ETH_HLEN); - skb_reserve(skb, NET_IP_ALIGN); - buffer_info->skb = NULL; - } - } - /* The length includes FCS length */ - length = length - ETH_FCS_LEN; + /* length convert[-3], length includes FCS length */ + length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN; + if (rx_desc->rx_words_eob & 0x02) + length = length - 4; + /* + * buffer_info->rx_buffer: [Header:14][payload] + * skb->data: [Reserve:2][Header:14][payload] + */ + memcpy(skb->data, buffer_info->rx_buffer, length); + /* update status of driver */ adapter->stats.rx_bytes += length; adapter->stats.rx_packets++; @@ -1509,7 +1554,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, pr_debug("Receive skb->ip_summed: %d length: %d\n", skb->ip_summed, length); } -dorrop: /* return some buffers to hardware, one at a time is too slow */ if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { pch_gbe_alloc_rx_buffers(adapter, rx_ring, @@ -1714,9 +1758,15 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) pr_err("Error: can't bring device up\n"); return err; } + err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); + if (err) { + pr_err("Error: can't bring device up\n"); + return err; + } pch_gbe_alloc_tx_buffers(adapter, tx_ring); pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); adapter->tx_queue_len = netdev->tx_queue_len; + pch_gbe_start_receive(&adapter->hw); mod_timer(&adapter->watchdog_timer, jiffies); @@ -1734,6 +1784,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) void pch_gbe_down(struct pch_gbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; /* signal that we're down so the interrupt handler does not * reschedule our watchdog timer */ @@ -1752,6 +1803,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter) pch_gbe_reset(adapter); pch_gbe_clean_tx_ring(adapter, adapter->tx_ring); pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); + + pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size, + rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic); + rx_ring->rx_buff_pool_logic = 0; + rx_ring->rx_buff_pool_size = 0; + rx_ring->rx_buff_pool = NULL; } /** @@ -2004,6 +2061,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); int max_frame; + unsigned long old_rx_buffer_len = adapter->rx_buffer_len; + int err; max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || @@ -2018,14 +2077,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) else if (max_frame <= PCH_GBE_FRAME_SIZE_8192) adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192; else - adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE; - netdev->mtu = new_mtu; - adapter->hw.mac.max_frame_size = max_frame; + adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE; - if (netif_running(netdev)) - pch_gbe_reinit_locked(adapter); - else + if (netif_running(netdev)) { + pch_gbe_down(adapter); + err = pch_gbe_up(adapter); + if (err) { + adapter->rx_buffer_len = old_rx_buffer_len; + pch_gbe_up(adapter); + return -ENOMEM; + } else { + netdev->mtu = new_mtu; + adapter->hw.mac.max_frame_size = max_frame; + } + } else { pch_gbe_reset(adapter); + netdev->mtu = new_mtu; + adapter->hw.mac.max_frame_size = max_frame; + } pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, @@ -2103,6 +2172,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) int work_done = 0; bool poll_end_flag = false; bool cleaned = false; + u32 int_en; pr_debug("budget : %d\n", budget); @@ -2110,8 +2180,15 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) if (!netif_carrier_ok(netdev)) { poll_end_flag = true; } else { - cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); + if (adapter->rx_stop_flag) { + adapter->rx_stop_flag = false; + pch_gbe_start_receive(&adapter->hw); + int_en = ioread32(&adapter->hw.reg->INT_EN); + iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), + &adapter->hw.reg->INT_EN); + } + cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); if (cleaned) work_done = budget; @@ -2452,6 +2529,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = (0xFFFF00) }, + {.vendor = PCI_VENDOR_ID_ROHM, + .device = PCI_DEVICE_ID_ROHM_ML7831_GBE, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), + .class_mask = (0xFFFF00) + }, /* required last entry */ {0} }; diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index faca764aa21..b59abc706d9 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -1050,7 +1050,6 @@ static int efx_init_io(struct efx_nic *efx) { struct pci_dev *pci_dev = efx->pci_dev; dma_addr_t dma_mask = efx->type->max_dma_mask; - bool use_wc; int rc; netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); @@ -1101,21 +1100,8 @@ static int efx_init_io(struct efx_nic *efx) rc = -EIO; goto fail3; } - - /* bug22643: If SR-IOV is enabled then tx push over a write combined - * mapping is unsafe. We need to disable write combining in this case. - * MSI is unsupported when SR-IOV is enabled, and the firmware will - * have removed the MSI capability. So write combining is safe if - * there is an MSI capability. - */ - use_wc = (!EFX_WORKAROUND_22643(efx) || - pci_find_capability(pci_dev, PCI_CAP_ID_MSI)); - if (use_wc) - efx->membase = ioremap_wc(efx->membase_phys, - efx->type->mem_map_size); - else - efx->membase = ioremap_nocache(efx->membase_phys, - efx->type->mem_map_size); + efx->membase = ioremap_nocache(efx->membase_phys, + efx->type->mem_map_size); if (!efx->membase) { netif_err(efx, probe, efx->net_dev, "could not map memory BAR at %llx+%x\n", diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h index cc978803d48..751d1ec112c 100644 --- a/drivers/net/sfc/io.h +++ b/drivers/net/sfc/io.h @@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, _efx_writed(efx, value->u32[2], reg + 8); _efx_writed(efx, value->u32[3], reg + 12); #endif - wmb(); mmiowb(); spin_unlock_irqrestore(&efx->biu_lock, flags); } @@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, __raw_writel((__force u32)value->u32[0], membase + addr); __raw_writel((__force u32)value->u32[1], membase + addr + 4); #endif - wmb(); mmiowb(); spin_unlock_irqrestore(&efx->biu_lock, flags); } @@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, /* No lock required */ _efx_writed(efx, value->u32[0], reg); - wmb(); } /* Read a 128-bit CSR, locking as appropriate. */ @@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, spin_lock_irqsave(&efx->biu_lock, flags); value->u32[0] = _efx_readd(efx, reg + 0); - rmb(); value->u32[1] = _efx_readd(efx, reg + 4); value->u32[2] = _efx_readd(efx, reg + 8); value->u32[3] = _efx_readd(efx, reg + 12); @@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, value->u64[0] = (__force __le64)__raw_readq(membase + addr); #else value->u32[0] = (__force __le32)__raw_readl(membase + addr); - rmb(); value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); #endif spin_unlock_irqrestore(&efx->biu_lock, flags); @@ -249,7 +244,6 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, _efx_writed(efx, value->u32[2], reg + 8); _efx_writed(efx, value->u32[3], reg + 12); #endif - wmb(); } #define efx_writeo_page(efx, value, reg, page) \ _efx_writeo_page(efx, value, \ diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c index 3dd45ed61f0..81a42539746 100644 --- a/drivers/net/sfc/mcdi.c +++ b/drivers/net/sfc/mcdi.c @@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) return &nic_data->mcdi; } -static inline void -efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg) -{ - struct siena_nic_data *nic_data = efx->nic_data; - value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg); -} - -static inline void -efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg) -{ - struct siena_nic_data *nic_data = efx->nic_data; - __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg); -} - void efx_mcdi_init(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; @@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - unsigned pdu = MCDI_PDU(efx); - unsigned doorbell = MCDI_DOORBELL(efx); + unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); unsigned int i; efx_dword_t hdr; u32 xflags, seqno; @@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, MCDI_HEADER_SEQ, seqno, MCDI_HEADER_XFLAGS, xflags); - efx_mcdi_writed(efx, &hdr, pdu); + efx_writed(efx, &hdr, pdu); for (i = 0; i < inlen; i += 4) - efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), - pdu + 4 + i); + _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); + + /* Ensure the payload is written out before the header */ + wmb(); /* ring the doorbell with a distinctive value */ - EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); - efx_mcdi_writed(efx, &hdr, doorbell); + _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); } static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - unsigned int pdu = MCDI_PDU(efx); + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); int i; BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); BUG_ON(outlen & 3 || outlen >= 0x100); for (i = 0; i < outlen; i += 4) - efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); + *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); } static int efx_mcdi_poll(struct efx_nic *efx) @@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned int time, finish; unsigned int respseq, respcmd, error; - unsigned int pdu = MCDI_PDU(efx); + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); unsigned int rc, spins; efx_dword_t reg; @@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx) time = get_seconds(); - efx_mcdi_readd(efx, ®, pdu); + rmb(); + efx_readd(efx, ®, pdu); /* All 1's indicates that shared memory is in reset (and is * not a valid header). Wait for it to come out reset before @@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) respseq, mcdi->seqno); rc = EIO; } else if (error) { - efx_mcdi_readd(efx, ®, pdu + 4); + efx_readd(efx, ®, pdu + 4); switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { #define TRANSLATE_ERROR(name) \ case MC_CMD_ERR_ ## name: \ @@ -222,21 +210,21 @@ out: /* Test and clear MC-rebooted flag for this port/function */ int efx_mcdi_poll_reboot(struct efx_nic *efx) { - unsigned int addr = MCDI_REBOOT_FLAG(efx); + unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); efx_dword_t reg; uint32_t value; if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) return false; - efx_mcdi_readd(efx, ®, addr); + efx_readd(efx, ®, addr); value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); if (value == 0) return 0; EFX_ZERO_DWORD(reg); - efx_mcdi_writed(efx, ®, addr); + efx_writed(efx, ®, addr); if (value == MC_STATUS_DWORD_ASSERT) return -EINTR; diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index bafa23a6874..3edfbaf5f02 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c @@ -1936,13 +1936,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) size = min_t(size_t, table->step, 16); - if (table->offset >= efx->type->mem_map_size) { - /* No longer mapped; return dummy data */ - memcpy(buf, "\xde\xc0\xad\xde", 4); - buf += table->rows * size; - continue; - } - for (i = 0; i < table->rows; i++) { switch (table->step) { case 4: /* 32-bit register or SRAM */ diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index 4bd1f2839df..7443f99c977 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h @@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx) /** * struct siena_nic_data - Siena NIC state * @mcdi: Management-Controller-to-Driver Interface - * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable. * @wol_filter_id: Wake-on-LAN packet filter id */ struct siena_nic_data { struct efx_mcdi_iface mcdi; - void __iomem *mcdi_smem; int wol_filter_id; }; diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index 5735e84c69d..2c3bd93fab5 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c @@ -250,26 +250,12 @@ static int siena_probe_nic(struct efx_nic *efx) efx_reado(efx, ®, FR_AZ_CS_DEBUG); efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; - /* Initialise MCDI */ - nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys + - FR_CZ_MC_TREG_SMEM, - FR_CZ_MC_TREG_SMEM_STEP * - FR_CZ_MC_TREG_SMEM_ROWS); - if (!nic_data->mcdi_smem) { - netif_err(efx, probe, efx->net_dev, - "could not map MCDI at %llx+%x\n", - (unsigned long long)efx->membase_phys + - FR_CZ_MC_TREG_SMEM, - FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); - rc = -ENOMEM; - goto fail1; - } efx_mcdi_init(efx); /* Recover from a failed assertion before probing */ rc = efx_mcdi_handle_assertion(efx); if (rc) - goto fail2; + goto fail1; /* Let the BMC know that the driver is now in charge of link and * filter settings. We must do this before we reset the NIC */ @@ -324,7 +310,6 @@ fail4: fail3: efx_mcdi_drv_attach(efx, false, NULL); fail2: - iounmap(nic_data->mcdi_smem); fail1: kfree(efx->nic_data); return rc; @@ -404,8 +389,6 @@ static int siena_init_nic(struct efx_nic *efx) static void siena_remove_nic(struct efx_nic *efx) { - struct siena_nic_data *nic_data = efx->nic_data; - efx_nic_free_buffer(efx, &efx->irq_status); siena_reset_hw(efx, RESET_TYPE_ALL); @@ -415,8 +398,7 @@ static void siena_remove_nic(struct efx_nic *efx) efx_mcdi_drv_attach(efx, false, NULL); /* Tear down the private nic state */ - iounmap(nic_data->mcdi_smem); - kfree(nic_data); + kfree(efx->nic_data); efx->nic_data = NULL; } @@ -656,7 +638,8 @@ const struct efx_nic_type siena_a0_nic_type = { .default_mac_ops = &efx_mcdi_mac_operations, .revision = EFX_REV_SIENA_A0, - .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ + .mem_map_size = (FR_CZ_MC_TREG_SMEM + + FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, .buf_tbl_base = FR_BZ_BUF_FULL_TBL, diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index 99ff11400ce..e4dd3a7f304 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h @@ -38,8 +38,6 @@ #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS /* Legacy interrupt storm when interrupt fifo fills */ #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA -/* Write combining and sriov=enabled are incompatible */ -#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA /* Spurious parity errors in TSORT buffers */ #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 15772b1b6a9..13c1f044b40 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -59,6 +59,7 @@ #define USB_PRODUCT_IPHONE_3G 0x1292 #define USB_PRODUCT_IPHONE_3GS 0x1294 #define USB_PRODUCT_IPHONE_4 0x1297 +#define USB_PRODUCT_IPHONE_4_VZW 0x129c #define IPHETH_USBINTF_CLASS 255 #define IPHETH_USBINTF_SUBCLASS 253 @@ -98,6 +99,10 @@ static struct usb_device_id ipheth_table[] = { USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, IPHETH_USBINTF_PROTO) }, + { USB_DEVICE_AND_INTERFACE_INFO( + USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW, + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, + IPHETH_USBINTF_PROTO) }, { } }; MODULE_DEVICE_TABLE(usb, ipheth_table); diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index 2d4c0910295..2d394af8217 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c @@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah, case ADC_DC_CAL: /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */ if (!IS_CHAN_B(chan) && - !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) + !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) && + IS_CHAN_HT20(chan))) supported = true; break; } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 1baca8e4715..fcafec0605f 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -671,7 +671,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex, regWrites); - if (AR_SREV_9300(ah)) + if (AR_SREV_9330(ah)) REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites); if (AR_SREV_9340(ah) && !ah->is_clk_25mhz) diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 6530694a59a..722967b86cf 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -2303,6 +2303,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop) mutex_lock(&sc->mutex); cancel_delayed_work_sync(&sc->tx_complete_work); + if (ah->ah_flags & AH_UNPLUGGED) { + ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n"); + mutex_unlock(&sc->mutex); + return; + } + if (sc->sc_flags & SC_OP_INVALID) { ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); mutex_unlock(&sc->mutex); diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c index 977bd2477c6..164bcae821f 100644 --- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c +++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c @@ -822,12 +822,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, out: - rs_sta->last_txrate_idx = index; - if (sband->band == IEEE80211_BAND_5GHZ) - info->control.rates[0].idx = rs_sta->last_txrate_idx - - IWL_FIRST_OFDM_RATE; - else + if (sband->band == IEEE80211_BAND_5GHZ) { + if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE)) + index = IWL_FIRST_OFDM_RATE; + rs_sta->last_txrate_idx = index; + info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE; + } else { + rs_sta->last_txrate_idx = index; info->control.rates[0].idx = rs_sta->last_txrate_idx; + } IWL_DEBUG_RATE(priv, "leave: %d\n", index); } diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c index a895a099d08..56211006a18 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c @@ -167,7 +167,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv) memset(&cmd, 0, sizeof(cmd)); iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); - memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib)); + memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib)); if (!(cmd.radio_sensor_offset)) cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c index a6b2b1db0b1..222d410c586 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c @@ -771,6 +771,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) cmd = txq->cmd[cmd_index]; meta = &txq->meta[cmd_index]; + txq->time_stamp = jiffies; + iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); /* Input error checking is done when commands are added to queue. */ diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index 1bdc1aa305c..04c4e9eb6ee 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c @@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, mac->link_state = MAC80211_NOLINK; memset(mac->bssid, 0, 6); + + /* reset sec info */ + rtl_cam_reset_sec_info(hw); + + rtl_cam_reset_all_entry(hw); mac->vendor = PEER_UNKNOWN; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, @@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, *or clear all entry here. */ rtl_cam_delete_one_entry(hw, mac_addr, key_idx); + + rtl_cam_reset_sec_info(hw); + break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c index 906e7aa55bc..3e52a549622 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c @@ -549,15 +549,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, (tcb_desc->rts_use_shortpreamble ? 1 : 0) : (tcb_desc->rts_use_shortgi ? 1 : 0))); if (mac->bw_40) { - if (tcb_desc->packet_bw) { + if (rate_flag & IEEE80211_TX_RC_DUP_DATA) { SET_TX_DESC_DATA_BW(txdesc, 1); SET_TX_DESC_DATA_SC(txdesc, 3); + } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){ + SET_TX_DESC_DATA_BW(txdesc, 1); + SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc); } else { SET_TX_DESC_DATA_BW(txdesc, 0); - if (rate_flag & IEEE80211_TX_RC_DUP_DATA) - SET_TX_DESC_DATA_SC(txdesc, - mac->cur_40_prime_sc); - } + SET_TX_DESC_DATA_SC(txdesc, 0); + } } else { SET_TX_DESC_DATA_BW(txdesc, 0); SET_TX_DESC_DATA_SC(txdesc, 0); diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c index 753b21aaea6..3ffd9c1acc0 100644 --- a/drivers/pci/hotplug/pcihp_slot.c +++ b/drivers/pci/hotplug/pcihp_slot.c @@ -169,7 +169,9 @@ void pci_configure_slot(struct pci_dev *dev) (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) return; - pcie_bus_configure_settings(dev->bus, dev->bus->self->pcie_mpss); + if (dev->bus && dev->bus->self) + pcie_bus_configure_settings(dev->bus, + dev->bus->self->pcie_mpss); memset(&hpp, 0, sizeof(hpp)); ret = pci_get_hp_params(dev, &hpp); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 0ce67423a0a..4e84fd4a431 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; -enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE; +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; /* * The default CLS is used if arch didn't set CLS explicitly and not diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 8473727b29f..f3f94a5c068 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1351,7 +1351,8 @@ static int pcie_find_smpss(struct pci_dev *dev, void *data) * will occur as normal. */ if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || - dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)) + (dev->bus->self && + dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))) *smpss = 0; if (*smpss > dev->pcie_mpss) @@ -1396,34 +1397,37 @@ static void pcie_write_mps(struct pci_dev *dev, int mps) static void pcie_write_mrrs(struct pci_dev *dev, int mps) { - int rc, mrrs; + int rc, mrrs, dev_mpss; - if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { - int dev_mpss = 128 << dev->pcie_mpss; + /* In the "safe" case, do not configure the MRRS. There appear to be + * issues with setting MRRS to 0 on a number of devices. + */ - /* For Max performance, the MRRS must be set to the largest - * supported value. However, it cannot be configured larger - * than the MPS the device or the bus can support. This assumes - * that the largest MRRS available on the device cannot be - * smaller than the device MPSS. - */ - mrrs = mps < dev_mpss ? mps : dev_mpss; - } else - /* In the "safe" case, configure the MRRS for fairness on the - * bus by making all devices have the same size - */ - mrrs = mps; + if (pcie_bus_config != PCIE_BUS_PERFORMANCE) + return; + dev_mpss = 128 << dev->pcie_mpss; + + /* For Max performance, the MRRS must be set to the largest supported + * value. However, it cannot be configured larger than the MPS the + * device or the bus can support. This assumes that the largest MRRS + * available on the device cannot be smaller than the device MPSS. + */ + mrrs = min(mps, dev_mpss); /* MRRS is a R/W register. Invalid values can be written, but a - * subsiquent read will verify if the value is acceptable or not. + * subsequent read will verify if the value is acceptable or not. * If the MRRS value provided is not acceptable (e.g., too large), * shrink the value until it is acceptable to the HW. */ while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { + dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value" + " to %d. If any issues are encountered, please try " + "running with pci=pcie_bus_safe\n", mrrs); rc = pcie_set_readrq(dev, mrrs); if (rc) - dev_err(&dev->dev, "Failed attempting to set the MRRS\n"); + dev_err(&dev->dev, + "Failed attempting to set the MRRS\n"); mrrs /= 2; } @@ -1436,13 +1440,13 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data) if (!pci_is_pcie(dev)) return 0; - dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", + dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev)); pcie_write_mps(dev, mps); pcie_write_mrrs(dev, mps); - dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", + dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev)); return 0; @@ -1456,9 +1460,6 @@ void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) { u8 smpss = mpss; - if (!bus->self) - return; - if (!pci_is_pcie(bus->self)) return; diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c index 335551d333b..14a42a1edc6 100644 --- a/drivers/rtc/rtc-ep93xx.c +++ b/drivers/rtc/rtc-ep93xx.c @@ -36,6 +36,7 @@ */ struct ep93xx_rtc { void __iomem *mmio_base; + struct rtc_device *rtc; }; static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload, @@ -130,7 +131,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) { struct ep93xx_rtc *ep93xx_rtc; struct resource *res; - struct rtc_device *rtc; int err; ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL); @@ -151,12 +151,12 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) return -ENXIO; pdev->dev.platform_data = ep93xx_rtc; - platform_set_drvdata(pdev, rtc); + platform_set_drvdata(pdev, ep93xx_rtc); - rtc = rtc_device_register(pdev->name, + ep93xx_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); - if (IS_ERR(rtc)) { - err = PTR_ERR(rtc); + if (IS_ERR(ep93xx_rtc->rtc)) { + err = PTR_ERR(ep93xx_rtc->rtc); goto exit; } @@ -167,7 +167,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) return 0; fail: - rtc_device_unregister(rtc); + rtc_device_unregister(ep93xx_rtc->rtc); exit: platform_set_drvdata(pdev, NULL); pdev->dev.platform_data = NULL; @@ -176,11 +176,11 @@ exit: static int __exit ep93xx_rtc_remove(struct platform_device *pdev) { - struct rtc_device *rtc = platform_get_drvdata(pdev); + struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev); sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); platform_set_drvdata(pdev, NULL); - rtc_device_unregister(rtc); + rtc_device_unregister(ep93xx_rtc->rtc); pdev->dev.platform_data = NULL; return 0; diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c index 2dd3c016327..d93a9608b1f 100644 --- a/drivers/rtc/rtc-imxdi.c +++ b/drivers/rtc/rtc-imxdi.c @@ -35,6 +35,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/rtc.h> +#include <linux/sched.h> #include <linux/workqueue.h> /* DryIce Register Definitions */ diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c index 075f1708dea..c4cf0573111 100644 --- a/drivers/rtc/rtc-lib.c +++ b/drivers/rtc/rtc-lib.c @@ -85,6 +85,8 @@ void rtc_time_to_tm(unsigned long time, struct rtc_time *tm) time -= tm->tm_hour * 3600; tm->tm_min = time / 60; tm->tm_sec = time - tm->tm_min * 60; + + tm->tm_isdst = 0; } EXPORT_SYMBOL(rtc_time_to_tm); diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 4e7c04e773e..7639ab906f0 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -51,6 +51,27 @@ static enum s3c_cpu_type s3c_rtc_cpu_type; static DEFINE_SPINLOCK(s3c_rtc_pie_lock); +static void s3c_rtc_alarm_clk_enable(bool enable) +{ + static DEFINE_SPINLOCK(s3c_rtc_alarm_clk_lock); + static bool alarm_clk_enabled; + unsigned long irq_flags; + + spin_lock_irqsave(&s3c_rtc_alarm_clk_lock, irq_flags); + if (enable) { + if (!alarm_clk_enabled) { + clk_enable(rtc_clk); + alarm_clk_enabled = true; + } + } else { + if (alarm_clk_enabled) { + clk_disable(rtc_clk); + alarm_clk_enabled = false; + } + } + spin_unlock_irqrestore(&s3c_rtc_alarm_clk_lock, irq_flags); +} + /* IRQ Handlers */ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id) @@ -64,6 +85,9 @@ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id) writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP); clk_disable(rtc_clk); + + s3c_rtc_alarm_clk_enable(false); + return IRQ_HANDLED; } @@ -97,6 +121,8 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); clk_disable(rtc_clk); + s3c_rtc_alarm_clk_enable(enabled); + return 0; } diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c index 9a81f778d6b..20687d55e7a 100644 --- a/drivers/rtc/rtc-twl.c +++ b/drivers/rtc/rtc-twl.c @@ -362,14 +362,6 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc) int res; u8 rd_reg; -#ifdef CONFIG_LOCKDEP - /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which - * we don't want and can't tolerate. Although it might be - * friendlier not to borrow this thread context... - */ - local_irq_enable(); -#endif - res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); if (res) goto out; @@ -428,24 +420,12 @@ static struct rtc_class_ops twl_rtc_ops = { static int __devinit twl_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; - int ret = 0; + int ret = -EINVAL; int irq = platform_get_irq(pdev, 0); u8 rd_reg; if (irq <= 0) - return -EINVAL; - - rtc = rtc_device_register(pdev->name, - &pdev->dev, &twl_rtc_ops, THIS_MODULE); - if (IS_ERR(rtc)) { - ret = PTR_ERR(rtc); - dev_err(&pdev->dev, "can't register RTC device, err %ld\n", - PTR_ERR(rtc)); - goto out0; - - } - - platform_set_drvdata(pdev, rtc); + goto out1; ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); if (ret < 0) @@ -462,14 +442,6 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev) if (ret < 0) goto out1; - ret = request_irq(irq, twl_rtc_interrupt, - IRQF_TRIGGER_RISING, - dev_name(&rtc->dev), rtc); - if (ret < 0) { - dev_err(&pdev->dev, "IRQ is not free.\n"); - goto out1; - } - if (twl_class_is_6030()) { twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK, REG_INT_MSK_LINE_A); @@ -480,28 +452,44 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev) /* Check RTC module status, Enable if it is off */ ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG); if (ret < 0) - goto out2; + goto out1; if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) { dev_info(&pdev->dev, "Enabling TWL-RTC.\n"); rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M; ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG); if (ret < 0) - goto out2; + goto out1; } /* init cached IRQ enable bits */ ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); if (ret < 0) + goto out1; + + rtc = rtc_device_register(pdev->name, + &pdev->dev, &twl_rtc_ops, THIS_MODULE); + if (IS_ERR(rtc)) { + ret = PTR_ERR(rtc); + dev_err(&pdev->dev, "can't register RTC device, err %ld\n", + PTR_ERR(rtc)); + goto out1; + } + + ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt, + IRQF_TRIGGER_RISING, + dev_name(&rtc->dev), rtc); + if (ret < 0) { + dev_err(&pdev->dev, "IRQ is not free.\n"); goto out2; + } - return ret; + platform_set_drvdata(pdev, rtc); + return 0; out2: - free_irq(irq, rtc); -out1: rtc_device_unregister(rtc); -out0: +out1: return ret; } diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 9ae80cd5953..dba72a4e6a1 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -563,7 +563,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, nopout_wqe->itt = ((u16)task->itt | (ISCSI_TASK_TYPE_MPATH << ISCSI_TMF_REQUEST_TYPE_SHIFT)); - nopout_wqe->ttt = nopout_hdr->ttt; + nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt); nopout_wqe->flags = 0; if (!unsol) nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index ba710e350ac..5d0e9a24ae9 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -432,6 +432,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) u8 flogi_maddr[ETH_ALEN]; const struct net_device_ops *ops; + rtnl_lock(); + /* * Don't listen for Ethernet packets anymore. * synchronize_net() ensures that the packet handlers are not running @@ -461,6 +463,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) " specific feature for LLD.\n"); } + rtnl_unlock(); + /* Release the self-reference taken during fcoe_interface_create() */ fcoe_interface_put(fcoe); } @@ -1951,11 +1955,8 @@ static void fcoe_destroy_work(struct work_struct *work) fcoe_if_destroy(port->lport); /* Do not tear down the fcoe interface for NPIV port */ - if (!npiv) { - rtnl_lock(); + if (!npiv) fcoe_interface_cleanup(fcoe); - rtnl_unlock(); - } mutex_unlock(&fcoe_config_mutex); } @@ -2009,8 +2010,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", netdev->name); rc = -EIO; + rtnl_unlock(); fcoe_interface_cleanup(fcoe); - goto out_nodev; + goto out_nortnl; } /* Make this the "master" N_Port */ @@ -2027,6 +2029,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) out_nodev: rtnl_unlock(); +out_nortnl: mutex_unlock(&fcoe_config_mutex); return rc; } diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index ec61bdb833a..b200b736b00 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -676,6 +676,16 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); removed[*nremoved] = h->dev[entry]; (*nremoved)++; + + /* + * New physical devices won't have target/lun assigned yet + * so we need to preserve the values in the slot we are replacing. + */ + if (new_entry->target == -1) { + new_entry->target = h->dev[entry]->target; + new_entry->lun = h->dev[entry]->lun; + } + h->dev[entry] = new_entry; added[*nadded] = new_entry; (*nadded)++; @@ -1548,10 +1558,17 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, } static int hpsa_update_device_info(struct ctlr_info *h, - unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) + unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, + unsigned char *is_OBDR_device) { -#define OBDR_TAPE_INQ_SIZE 49 + +#define OBDR_SIG_OFFSET 43 +#define OBDR_TAPE_SIG "$DR-10" +#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) +#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) + unsigned char *inq_buff; + unsigned char *obdr_sig; inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); if (!inq_buff) @@ -1583,6 +1600,16 @@ static int hpsa_update_device_info(struct ctlr_info *h, else this_device->raid_level = RAID_UNKNOWN; + if (is_OBDR_device) { + /* See if this is a One-Button-Disaster-Recovery device + * by looking for "$DR-10" at offset 43 in inquiry data. + */ + obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; + *is_OBDR_device = (this_device->devtype == TYPE_ROM && + strncmp(obdr_sig, OBDR_TAPE_SIG, + OBDR_SIG_LEN) == 0); + } + kfree(inq_buff); return 0; @@ -1716,7 +1743,7 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h, return 0; } - if (hpsa_update_device_info(h, scsi3addr, this_device)) + if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) return 0; (*nmsa2xxx_enclosures)++; hpsa_set_bus_target_lun(this_device, bus, target, 0); @@ -1808,7 +1835,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) */ struct ReportLUNdata *physdev_list = NULL; struct ReportLUNdata *logdev_list = NULL; - unsigned char *inq_buff = NULL; u32 nphysicals = 0; u32 nlogicals = 0; u32 ndev_allocated = 0; @@ -1824,11 +1850,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) GFP_KERNEL); physdev_list = kzalloc(reportlunsize, GFP_KERNEL); logdev_list = kzalloc(reportlunsize, GFP_KERNEL); - inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); - if (!currentsd || !physdev_list || !logdev_list || - !inq_buff || !tmpdevice) { + if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { dev_err(&h->pdev->dev, "out of memory\n"); goto out; } @@ -1863,7 +1887,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) /* adjust our table of devices */ nmsa2xxx_enclosures = 0; for (i = 0; i < nphysicals + nlogicals + 1; i++) { - u8 *lunaddrbytes; + u8 *lunaddrbytes, is_OBDR = 0; /* Figure out where the LUN ID info is coming from */ lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, @@ -1874,7 +1898,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) continue; /* Get device type, vendor, model, device id */ - if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice)) + if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, + &is_OBDR)) continue; /* skip it if we can't talk to it. */ figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, tmpdevice); @@ -1898,7 +1923,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) hpsa_set_bus_target_lun(this_device, bus, target, lun); switch (this_device->devtype) { - case TYPE_ROM: { + case TYPE_ROM: /* We don't *really* support actual CD-ROM devices, * just "One Button Disaster Recovery" tape drive * which temporarily pretends to be a CD-ROM drive. @@ -1906,15 +1931,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) * device by checking for "$DR-10" in bytes 43-48 of * the inquiry data. */ - char obdr_sig[7]; -#define OBDR_TAPE_SIG "$DR-10" - strncpy(obdr_sig, &inq_buff[43], 6); - obdr_sig[6] = '\0'; - if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) - /* Not OBDR device, ignore it. */ - break; - } - ncurrent++; + if (is_OBDR) + ncurrent++; break; case TYPE_DISK: if (i < nphysicals) @@ -1947,7 +1965,6 @@ out: for (i = 0; i < ndev_allocated; i++) kfree(currentsd[i]); kfree(currentsd); - kfree(inq_buff); kfree(physdev_list); kfree(logdev_list); } diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 26072f1e985..6981b773a88 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -531,6 +531,9 @@ static void sci_controller_process_completions(struct isci_host *ihost) break; case SCU_COMPLETION_TYPE_EVENT: + sci_controller_event_completion(ihost, ent); + break; + case SCU_COMPLETION_TYPE_NOTIFY: { event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) << (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); @@ -1091,6 +1094,7 @@ static void isci_host_completion_routine(unsigned long data) struct isci_request *request; struct isci_request *next_request; struct sas_task *task; + u16 active; INIT_LIST_HEAD(&completed_request_list); INIT_LIST_HEAD(&errored_request_list); @@ -1181,6 +1185,13 @@ static void isci_host_completion_routine(unsigned long data) } } + /* the coalesence timeout doubles at each encoding step, so + * update it based on the ilog2 value of the outstanding requests + */ + active = isci_tci_active(ihost); + writel(SMU_ICC_GEN_VAL(NUMBER, active) | + SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)), + &ihost->smu_registers->interrupt_coalesce_control); } /** @@ -1471,7 +1482,7 @@ static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm) struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); /* set the default interrupt coalescence number and timeout value. */ - sci_controller_set_interrupt_coalescence(ihost, 0x10, 250); + sci_controller_set_interrupt_coalescence(ihost, 0, 0); } static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 062101a39f7..9f33831a2f0 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -369,6 +369,9 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) #define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1)) #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) +/* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */ +#define ISCI_COALESCE_BASE 9 + /* expander attached sata devices require 3 rnc slots */ static inline int sci_remote_device_node_count(struct isci_remote_device *idev) { diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 61e0d09e2b5..29aa34efb0f 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -59,10 +59,19 @@ #include <linux/firmware.h> #include <linux/efi.h> #include <asm/string.h> +#include <scsi/scsi_host.h> #include "isci.h" #include "task.h" #include "probe_roms.h" +#define MAJ 1 +#define MIN 0 +#define BUILD 0 +#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ + __stringify(BUILD) + +MODULE_VERSION(DRV_VERSION); + static struct scsi_transport_template *isci_transport_template; static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = { @@ -113,6 +122,22 @@ unsigned char max_concurr_spinup = 1; module_param(max_concurr_spinup, byte, 0); MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); +static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); + + return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); +} + +static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); + +struct device_attribute *isci_host_attrs[] = { + &dev_attr_isci_id, + NULL +}; + static struct scsi_host_template isci_sht = { .module = THIS_MODULE, @@ -138,6 +163,7 @@ static struct scsi_host_template isci_sht = { .slave_alloc = sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, + .shost_attrs = isci_host_attrs, }; static struct sas_domain_function_template isci_transport_ops = { @@ -232,17 +258,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host) return 0; } -static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); - struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); - struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); - - return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); -} - -static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); - static void isci_unregister(struct isci_host *isci_host) { struct Scsi_Host *shost; @@ -251,7 +266,6 @@ static void isci_unregister(struct isci_host *isci_host) return; shost = isci_host->shost; - device_remove_file(&shost->shost_dev, &dev_attr_isci_id); sas_unregister_ha(&isci_host->sas_ha); @@ -415,14 +429,8 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) if (err) goto err_shost_remove; - err = device_create_file(&shost->shost_dev, &dev_attr_isci_id); - if (err) - goto err_unregister_ha; - return isci_host; - err_unregister_ha: - sas_unregister_ha(&(isci_host->sas_ha)); err_shost_remove: scsi_remove_host(shost); err_shost: @@ -540,7 +548,8 @@ static __init int isci_init(void) { int err; - pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME); + pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n", + DRV_NAME, DRV_VERSION); isci_transport_template = sas_domain_attach_transport(&isci_transport_ops); if (!isci_transport_template) diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index 79313a7a235..430fc8ff014 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c @@ -104,6 +104,7 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, u32 parity_count = 0; u32 llctl, link_rate; u32 clksm_value = 0; + u32 sp_timeouts = 0; iphy->link_layer_registers = reg; @@ -211,6 +212,18 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); writel(llctl, &iphy->link_layer_registers->link_layer_control); + sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts); + + /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */ + sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); + + /* Set RATE_CHANGE timeout value to 0x3B (59us). This ensures SCU can + * lock with 3Gb drive when SCU max rate is set to 1.5Gb. + */ + sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); + + writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts); + if (is_a2(ihost->pdev)) { /* Program the max ARB time for the PHY to 700us so we inter-operate with * the PMC expander which shuts down PHYs if the expander PHY generates too diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h index 9b266c7428e..00afc738bbe 100644 --- a/drivers/scsi/isci/registers.h +++ b/drivers/scsi/isci/registers.h @@ -1299,6 +1299,18 @@ struct scu_transport_layer_registers { #define SCU_AFE_XCVRCR_OFFSET 0x00DC #define SCU_AFE_LUTCR_OFFSET 0x00E0 +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT (0UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK (0x000000FFUL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT (8UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK (0x0000FF00UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT (16UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK (0x00FF0000UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT (24UL) +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK (0xFF000000UL) + +#define SCU_SAS_PHYTOV_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value) + #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0) #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003) #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0) diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index a46e07ac789..b5d3a8c4d32 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -732,12 +732,20 @@ sci_io_request_terminate(struct isci_request *ireq) sci_change_state(&ireq->sm, SCI_REQ_ABORTING); return SCI_SUCCESS; case SCI_REQ_TASK_WAIT_TC_RESP: + /* The task frame was already confirmed to have been + * sent by the SCU HW. Since the state machine is + * now only waiting for the task response itself, + * abort the request and complete it immediately + * and don't wait for the task response. + */ sci_change_state(&ireq->sm, SCI_REQ_ABORTING); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; case SCI_REQ_ABORTING: - sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); - return SCI_SUCCESS; + /* If a request has a termination requested twice, return + * a failure indication, since HW confirmation of the first + * abort is still outstanding. + */ case SCI_REQ_COMPLETED: default: dev_warn(&ireq->owning_controller->pdev->dev, @@ -2399,22 +2407,19 @@ static void isci_task_save_for_upper_layer_completion( } } -static void isci_request_process_stp_response(struct sas_task *task, - void *response_buffer) +static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) { - struct dev_to_host_fis *d2h_reg_fis = response_buffer; struct task_status_struct *ts = &task->task_status; struct ata_task_resp *resp = (void *)&ts->buf[0]; - resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6)); - memcpy(&resp->ending_fis[0], response_buffer + 16, 24); + resp->frame_len = sizeof(*fis); + memcpy(resp->ending_fis, fis, sizeof(*fis)); ts->buf_valid_size = sizeof(*resp); - /** - * If the device fault bit is set in the status register, then + /* If the device fault bit is set in the status register, then * set the sense data and return. */ - if (d2h_reg_fis->status & ATA_DF) + if (fis->status & ATA_DF) ts->stat = SAS_PROTO_RESPONSE; else ts->stat = SAM_STAT_GOOD; @@ -2428,7 +2433,6 @@ static void isci_request_io_request_complete(struct isci_host *ihost, { struct sas_task *task = isci_request_access_task(request); struct ssp_response_iu *resp_iu; - void *resp_buf; unsigned long task_flags; struct isci_remote_device *idev = isci_lookup_device(task->dev); enum service_response response = SAS_TASK_UNDELIVERED; @@ -2565,9 +2569,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost, task); if (sas_protocol_ata(task->task_proto)) { - resp_buf = &request->stp.rsp; - isci_request_process_stp_response(task, - resp_buf); + isci_process_stp_response(task, &request->stp.rsp); } else if (SAS_PROTOCOL_SSP == task->task_proto) { /* crack the iu response buffer. */ diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c index e9e1e2abacb..16f88ab939c 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.c +++ b/drivers/scsi/isci/unsolicited_frame_control.c @@ -72,7 +72,7 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost) */ buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE; header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header); - size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t); + size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]); /* * The Unsolicited Frame buffers are set at the start of the UF diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h index 31cb9506f52..75d896686f5 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.h +++ b/drivers/scsi/isci/unsolicited_frame_control.h @@ -214,7 +214,7 @@ struct sci_uf_address_table_array { * starting address of the UF address table. * 64-bit pointers are required by the hardware. */ - dma_addr_t *array; + u64 *array; /** * This field specifies the physical address location for the UF diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 01ff082dc34..d261e982a2f 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -494,6 +494,9 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, */ error = lport->tt.frame_send(lport, fp); + if (fh->fh_type == FC_TYPE_BLS) + return error; + /* * Update the exchange and sequence flags, * assuming all frames for the sequence have been sent. @@ -575,42 +578,35 @@ static void fc_seq_set_resp(struct fc_seq *sp, } /** - * fc_seq_exch_abort() - Abort an exchange and sequence - * @req_sp: The sequence to be aborted + * fc_exch_abort_locked() - Abort an exchange + * @ep: The exchange to be aborted * @timer_msec: The period of time to wait before aborting * - * Generally called because of a timeout or an abort from the upper layer. + * Locking notes: Called with exch lock held + * + * Return value: 0 on success else error code */ -static int fc_seq_exch_abort(const struct fc_seq *req_sp, - unsigned int timer_msec) +static int fc_exch_abort_locked(struct fc_exch *ep, + unsigned int timer_msec) { struct fc_seq *sp; - struct fc_exch *ep; struct fc_frame *fp; int error; - ep = fc_seq_exch(req_sp); - - spin_lock_bh(&ep->ex_lock); if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || - ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) { - spin_unlock_bh(&ep->ex_lock); + ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) return -ENXIO; - } /* * Send the abort on a new sequence if possible. */ sp = fc_seq_start_next_locked(&ep->seq); - if (!sp) { - spin_unlock_bh(&ep->ex_lock); + if (!sp) return -ENOMEM; - } ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL; if (timer_msec) fc_exch_timer_set_locked(ep, timer_msec); - spin_unlock_bh(&ep->ex_lock); /* * If not logged into the fabric, don't send ABTS but leave @@ -633,6 +629,28 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp, } /** + * fc_seq_exch_abort() - Abort an exchange and sequence + * @req_sp: The sequence to be aborted + * @timer_msec: The period of time to wait before aborting + * + * Generally called because of a timeout or an abort from the upper layer. + * + * Return value: 0 on success else error code + */ +static int fc_seq_exch_abort(const struct fc_seq *req_sp, + unsigned int timer_msec) +{ + struct fc_exch *ep; + int error; + + ep = fc_seq_exch(req_sp); + spin_lock_bh(&ep->ex_lock); + error = fc_exch_abort_locked(ep, timer_msec); + spin_unlock_bh(&ep->ex_lock); + return error; +} + +/** * fc_exch_timeout() - Handle exchange timer expiration * @work: The work_struct identifying the exchange that timed out */ @@ -1715,6 +1733,7 @@ static void fc_exch_reset(struct fc_exch *ep) int rc = 1; spin_lock_bh(&ep->ex_lock); + fc_exch_abort_locked(ep, 0); ep->state |= FC_EX_RST_CLEANUP; if (cancel_delayed_work(&ep->timeout_work)) atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ @@ -1962,6 +1981,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, struct fc_exch *ep; struct fc_seq *sp = NULL; struct fc_frame_header *fh; + struct fc_fcp_pkt *fsp = NULL; int rc = 1; ep = fc_exch_alloc(lport, fp); @@ -1984,8 +2004,10 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, fc_exch_setup_hdr(ep, fp, ep->f_ctl); sp->cnt++; - if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) + if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) { + fsp = fr_fsp(fp); fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); + } if (unlikely(lport->tt.frame_send(lport, fp))) goto err; @@ -1999,7 +2021,8 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, spin_unlock_bh(&ep->ex_lock); return sp; err: - fc_fcp_ddp_done(fr_fsp(fp)); + if (fsp) + fc_fcp_ddp_done(fsp); rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); if (!rc) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index afb63c84314..4c41ee816f0 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -2019,6 +2019,11 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd) struct fc_fcp_internal *si; int rc = FAILED; unsigned long flags; + int rval; + + rval = fc_block_scsi_eh(sc_cmd); + if (rval) + return rval; lport = shost_priv(sc_cmd->device->host); if (lport->state != LPORT_ST_READY) @@ -2068,9 +2073,9 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) int rc = FAILED; int rval; - rval = fc_remote_port_chkready(rport); + rval = fc_block_scsi_eh(sc_cmd); if (rval) - goto out; + return rval; lport = shost_priv(sc_cmd->device->host); @@ -2116,6 +2121,8 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) FC_SCSI_DBG(lport, "Resetting host\n"); + fc_block_scsi_eh(sc_cmd); + lport->tt.lport_reset(lport); wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index e55ed9cf23f..628f347404f 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -88,6 +88,7 @@ */ #include <linux/timer.h> +#include <linux/delay.h> #include <linux/slab.h> #include <asm/unaligned.h> @@ -1029,8 +1030,16 @@ static void fc_lport_enter_reset(struct fc_lport *lport) FCH_EVT_LIPRESET, 0); fc_vports_linkchange(lport); fc_lport_reset_locked(lport); - if (lport->link_up) + if (lport->link_up) { + /* + * Wait upto resource allocation time out before + * doing re-login since incomplete FIP exchanged + * from last session may collide with exchanges + * in new session. + */ + msleep(lport->r_a_tov); fc_lport_enter_flogi(lport); + } } /** diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 7836eb01c7f..a31e05f3bfd 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1786,13 +1786,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); } - if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) { + int prot = 0; vha->flags.difdix_supported = 1; ql_dbg(ql_dbg_user, vha, 0x7082, "Registered for DIF/DIX type 1 and 3 protection.\n"); + if (ql2xenabledif == 1) + prot = SHOST_DIX_TYPE0_PROTECTION; scsi_host_set_prot(vha->host, - SHOST_DIF_TYPE1_PROTECTION + prot | SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 2155071f310..d79cd8a5f83 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -8,24 +8,24 @@ /* * Table for showing the current message id in use for particular level * Change this table for addition of log/debug messages. - * ----------------------------------------------------- - * | Level | Last Value Used | - * ----------------------------------------------------- - * | Module Init and Probe | 0x0116 | - * | Mailbox commands | 0x111e | - * | Device Discovery | 0x2083 | - * | Queue Command and IO tracing | 0x302e | - * | DPC Thread | 0x401c | - * | Async Events | 0x5059 | - * | Timer Routines | 0x600d | - * | User Space Interactions | 0x709c | - * | Task Management | 0x8043 | - * | AER/EEH | 0x900f | - * | Virtual Port | 0xa007 | - * | ISP82XX Specific | 0xb027 | - * | MultiQ | 0xc00b | - * | Misc | 0xd00b | - * ----------------------------------------------------- + * ---------------------------------------------------------------------- + * | Level | Last Value Used | Holes | + * ---------------------------------------------------------------------- + * | Module Init and Probe | 0x0116 | | + * | Mailbox commands | 0x1126 | | + * | Device Discovery | 0x2083 | | + * | Queue Command and IO tracing | 0x302e | 0x3008 | + * | DPC Thread | 0x401c | | + * | Async Events | 0x5059 | | + * | Timer Routines | 0x600d | | + * | User Space Interactions | 0x709d | | + * | Task Management | 0x8041 | | + * | AER/EEH | 0x900f | | + * | Virtual Port | 0xa007 | | + * | ISP82XX Specific | 0xb04f | | + * | MultiQ | 0xc00b | | + * | Misc | 0xd00b | | + * ---------------------------------------------------------------------- */ #include "qla_def.h" diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index cc5a79259d3..a03eaf40f37 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2529,6 +2529,7 @@ struct qla_hw_data { #define DT_ISP8021 BIT_14 #define DT_ISP_LAST (DT_ISP8021 << 1) +#define DT_T10_PI BIT_25 #define DT_IIDMA BIT_26 #define DT_FWI2 BIT_27 #define DT_ZIO_SUPPORTED BIT_28 @@ -2572,6 +2573,7 @@ struct qla_hw_data { #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha)) #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) +#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI) #define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA) #define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2) #define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED) diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 691783abfb6..aa69486dc06 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -537,6 +537,11 @@ struct sts_entry_24xx { /* * If DIF Error is set in comp_status, these additional fields are * defined: + * + * !!! NOTE: Firmware sends expected/actual DIF data in big endian + * format; but all of the "data" field gets swab32-d in the beginning + * of qla2x00_status_entry(). + * * &data[10] : uint8_t report_runt_bg[2]; - computed guard * &data[12] : uint8_t actual_dif[8]; - DIF Data received * &data[20] : uint8_t expected_dif[8]; - DIF Data computed diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index def694271bf..37da04d3db2 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3838,15 +3838,12 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) req = vha->req; rsp = req->rsp; - atomic_set(&vha->loop_state, LOOP_UPDATE); clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); if (vha->flags.online) { if (!(rval = qla2x00_fw_ready(vha))) { /* Wait at most MAX_TARGET RSCNs for a stable link. */ wait_time = 256; do { - atomic_set(&vha->loop_state, LOOP_UPDATE); - /* Issue a marker after FW becomes ready. */ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index d2e904bc21c..9902834e0b7 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -102,3 +102,32 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state) fcport->d_id.b.al_pa); } } + +static inline int +qla2x00_hba_err_chk_enabled(srb_t *sp) +{ + /* + * Uncomment when corresponding SCSI changes are done. + * + if (!sp->cmd->prot_chk) + return 0; + * + */ + + switch (scsi_get_prot_op(sp->cmd)) { + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + if (ql2xenablehba_err_chk >= 1) + return 1; + break; + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + if (ql2xenablehba_err_chk >= 2) + return 1; + break; + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + return 1; + } + return 0; +} diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 49d6906af88..dbec89622a0 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -709,20 +709,28 @@ struct fw_dif_context { * */ static inline void -qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, +qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, unsigned int protcnt) { - struct sd_dif_tuple *spt; + struct scsi_cmnd *cmd = sp->cmd; scsi_qla_host_t *vha = shost_priv(cmd->device->host); - unsigned char op = scsi_get_prot_op(cmd); switch (scsi_get_prot_type(cmd)) { - /* For TYPE 0 protection: no checking */ case SCSI_PROT_DIF_TYPE0: - pkt->ref_tag_mask[0] = 0x00; - pkt->ref_tag_mask[1] = 0x00; - pkt->ref_tag_mask[2] = 0x00; - pkt->ref_tag_mask[3] = 0x00; + /* + * No check for ql2xenablehba_err_chk, as it would be an + * I/O error if hba tag generation is not done. + */ + pkt->ref_tag = cpu_to_le32((uint32_t) + (0xffffffff & scsi_get_lba(cmd))); + + if (!qla2x00_hba_err_chk_enabled(sp)) + break; + + pkt->ref_tag_mask[0] = 0xff; + pkt->ref_tag_mask[1] = 0xff; + pkt->ref_tag_mask[2] = 0xff; + pkt->ref_tag_mask[3] = 0xff; break; /* @@ -730,20 +738,16 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, * match LBA in CDB + N */ case SCSI_PROT_DIF_TYPE2: - if (!ql2xenablehba_err_chk) - break; - - if (scsi_prot_sg_count(cmd)) { - spt = page_address(sg_page(scsi_prot_sglist(cmd))) + - scsi_prot_sglist(cmd)[0].offset; - pkt->app_tag = swab32(spt->app_tag); - pkt->app_tag_mask[0] = 0xff; - pkt->app_tag_mask[1] = 0xff; - } + pkt->app_tag = __constant_cpu_to_le16(0); + pkt->app_tag_mask[0] = 0x0; + pkt->app_tag_mask[1] = 0x0; pkt->ref_tag = cpu_to_le32((uint32_t) (0xffffffff & scsi_get_lba(cmd))); + if (!qla2x00_hba_err_chk_enabled(sp)) + break; + /* enable ALL bytes of the ref tag */ pkt->ref_tag_mask[0] = 0xff; pkt->ref_tag_mask[1] = 0xff; @@ -763,26 +767,15 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, * 16 bit app tag. */ case SCSI_PROT_DIF_TYPE1: - if (!ql2xenablehba_err_chk) + pkt->ref_tag = cpu_to_le32((uint32_t) + (0xffffffff & scsi_get_lba(cmd))); + pkt->app_tag = __constant_cpu_to_le16(0); + pkt->app_tag_mask[0] = 0x0; + pkt->app_tag_mask[1] = 0x0; + + if (!qla2x00_hba_err_chk_enabled(sp)) break; - if (protcnt && (op == SCSI_PROT_WRITE_STRIP || - op == SCSI_PROT_WRITE_PASS)) { - spt = page_address(sg_page(scsi_prot_sglist(cmd))) + - scsi_prot_sglist(cmd)[0].offset; - ql_dbg(ql_dbg_io, vha, 0x3008, - "LBA from user %p, lba = 0x%x for cmd=%p.\n", - spt, (int)spt->ref_tag, cmd); - pkt->ref_tag = swab32(spt->ref_tag); - pkt->app_tag_mask[0] = 0x0; - pkt->app_tag_mask[1] = 0x0; - } else { - pkt->ref_tag = cpu_to_le32((uint32_t) - (0xffffffff & scsi_get_lba(cmd))); - pkt->app_tag = __constant_cpu_to_le16(0); - pkt->app_tag_mask[0] = 0x0; - pkt->app_tag_mask[1] = 0x0; - } /* enable ALL bytes of the ref tag */ pkt->ref_tag_mask[0] = 0xff; pkt->ref_tag_mask[1] = 0xff; @@ -798,8 +791,162 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, scsi_get_prot_type(cmd), cmd); } +struct qla2_sgx { + dma_addr_t dma_addr; /* OUT */ + uint32_t dma_len; /* OUT */ + + uint32_t tot_bytes; /* IN */ + struct scatterlist *cur_sg; /* IN */ + + /* for book keeping, bzero on initial invocation */ + uint32_t bytes_consumed; + uint32_t num_bytes; + uint32_t tot_partial; + + /* for debugging */ + uint32_t num_sg; + srb_t *sp; +}; static int +qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, + uint32_t *partial) +{ + struct scatterlist *sg; + uint32_t cumulative_partial, sg_len; + dma_addr_t sg_dma_addr; + + if (sgx->num_bytes == sgx->tot_bytes) + return 0; + + sg = sgx->cur_sg; + cumulative_partial = sgx->tot_partial; + + sg_dma_addr = sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; + + if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { + sgx->dma_len = (blk_sz - cumulative_partial); + sgx->tot_partial = 0; + sgx->num_bytes += blk_sz; + *partial = 0; + } else { + sgx->dma_len = sg_len - sgx->bytes_consumed; + sgx->tot_partial += sgx->dma_len; + *partial = 1; + } + + sgx->bytes_consumed += sgx->dma_len; + + if (sg_len == sgx->bytes_consumed) { + sg = sg_next(sg); + sgx->num_sg++; + sgx->cur_sg = sg; + sgx->bytes_consumed = 0; + } + + return 1; +} + +static int +qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, + uint32_t *dsd, uint16_t tot_dsds) +{ + void *next_dsd; + uint8_t avail_dsds = 0; + uint32_t dsd_list_len; + struct dsd_dma *dsd_ptr; + struct scatterlist *sg_prot; + uint32_t *cur_dsd = dsd; + uint16_t used_dsds = tot_dsds; + + uint32_t prot_int; + uint32_t partial; + struct qla2_sgx sgx; + dma_addr_t sle_dma; + uint32_t sle_dma_len, tot_prot_dma_len = 0; + struct scsi_cmnd *cmd = sp->cmd; + + prot_int = cmd->device->sector_size; + + memset(&sgx, 0, sizeof(struct qla2_sgx)); + sgx.tot_bytes = scsi_bufflen(sp->cmd); + sgx.cur_sg = scsi_sglist(sp->cmd); + sgx.sp = sp; + + sg_prot = scsi_prot_sglist(sp->cmd); + + while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { + + sle_dma = sgx.dma_addr; + sle_dma_len = sgx.dma_len; +alloc_and_fill: + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? + QLA_DSDS_PER_IOCB : used_dsds; + dsd_list_len = (avail_dsds + 1) * 12; + used_dsds -= avail_dsds; + + /* allocate tracking DS */ + dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); + if (!dsd_ptr) + return 1; + + /* allocate new list */ + dsd_ptr->dsd_addr = next_dsd = + dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, + &dsd_ptr->dsd_list_dma); + + if (!next_dsd) { + /* + * Need to cleanup only this dsd_ptr, rest + * will be done by sp_free_dma() + */ + kfree(dsd_ptr); + return 1; + } + + list_add_tail(&dsd_ptr->list, + &((struct crc_context *)sp->ctx)->dsd_list); + + sp->flags |= SRB_CRC_CTX_DSD_VALID; + + /* add new list to cmd iocb or last list */ + *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); + *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); + *cur_dsd++ = dsd_list_len; + cur_dsd = (uint32_t *)next_dsd; + } + *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(sle_dma_len); + avail_dsds--; + + if (partial == 0) { + /* Got a full protection interval */ + sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; + sle_dma_len = 8; + + tot_prot_dma_len += sle_dma_len; + if (tot_prot_dma_len == sg_dma_len(sg_prot)) { + tot_prot_dma_len = 0; + sg_prot = sg_next(sg_prot); + } + + partial = 1; /* So as to not re-enter this block */ + goto alloc_and_fill; + } + } + /* Null termination */ + *cur_dsd++ = 0; + *cur_dsd++ = 0; + *cur_dsd++ = 0; + return 0; +} +static int qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, uint16_t tot_dsds) { @@ -981,7 +1128,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, struct scsi_cmnd *cmd; struct scatterlist *cur_seg; int sgc; - uint32_t total_bytes; + uint32_t total_bytes = 0; uint32_t data_bytes; uint32_t dif_bytes; uint8_t bundling = 1; @@ -1023,8 +1170,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, __constant_cpu_to_le16(CF_READ_DATA); } - tot_prot_dsds = scsi_prot_sg_count(cmd); - if (!tot_prot_dsds) + if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) || + (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) || + (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) || + (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT)) bundling = 0; /* Allocate CRC context from global pool */ @@ -1047,7 +1196,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); - qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *) + qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *) &crc_ctx_pkt->ref_tag, tot_prot_dsds); cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); @@ -1076,7 +1225,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, fcp_cmnd->additional_cdb_len |= 2; int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); - host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun)); memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( @@ -1107,15 +1255,28 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ /* Compute dif len and adjust data len to incude protection */ - total_bytes = data_bytes; dif_bytes = 0; blk_size = cmd->device->sector_size; - if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { - dif_bytes = (data_bytes / blk_size) * 8; - total_bytes += dif_bytes; + dif_bytes = (data_bytes / blk_size) * 8; + + switch (scsi_get_prot_op(sp->cmd)) { + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + total_bytes = data_bytes; + data_bytes += dif_bytes; + break; + + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + total_bytes = data_bytes + dif_bytes; + break; + default: + BUG(); } - if (!ql2xenablehba_err_chk) + if (!qla2x00_hba_err_chk_enabled(sp)) fw_prot_opts |= 0x10; /* Disable Guard tag checking */ if (!bundling) { @@ -1151,7 +1312,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, cmd_pkt->control_flags |= __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); - if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, + + if (!bundling && tot_prot_dsds) { + if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, + cur_dsd, tot_dsds)) + goto crc_queuing_error; + } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, (tot_dsds - tot_prot_dsds))) goto crc_queuing_error; @@ -1414,6 +1580,22 @@ qla24xx_dif_start_scsi(srb_t *sp) goto queuing_error; else sp->flags |= SRB_DMA_VALID; + + if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || + (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { + struct qla2_sgx sgx; + uint32_t partial; + + memset(&sgx, 0, sizeof(struct qla2_sgx)); + sgx.tot_bytes = scsi_bufflen(cmd); + sgx.cur_sg = scsi_sglist(cmd); + sgx.sp = sp; + + nseg = 0; + while (qla24xx_get_one_block_sg( + cmd->device->sector_size, &sgx, &partial)) + nseg++; + } } else nseg = 0; @@ -1428,6 +1610,11 @@ qla24xx_dif_start_scsi(srb_t *sp) goto queuing_error; else sp->flags |= SRB_CRC_PROT_DMA_VALID; + + if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || + (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { + nseg = scsi_bufflen(cmd) / cmd->device->sector_size; + } } else { nseg = 0; } @@ -1454,6 +1641,7 @@ qla24xx_dif_start_scsi(srb_t *sp) /* Build header part of command packet (excluding the OPCODE). */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; + sp->handle = handle; sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; req->cnt -= req_cnt; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index b16b7725dee..646fc5263d5 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -719,7 +719,6 @@ skip_rio: vha->flags.rscn_queue_overflow = 1; } - atomic_set(&vha->loop_state, LOOP_UPDATE); atomic_set(&vha->loop_down_timer, 0); vha->flags.management_server_logged_in = 0; @@ -1435,25 +1434,27 @@ struct scsi_dif_tuple { * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST * to indicate to the kernel that the HBA detected error. */ -static inline void +static inline int qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) { struct scsi_qla_host *vha = sp->fcport->vha; struct scsi_cmnd *cmd = sp->cmd; - struct scsi_dif_tuple *ep = - (struct scsi_dif_tuple *)&sts24->data[20]; - struct scsi_dif_tuple *ap = - (struct scsi_dif_tuple *)&sts24->data[12]; + uint8_t *ap = &sts24->data[12]; + uint8_t *ep = &sts24->data[20]; uint32_t e_ref_tag, a_ref_tag; uint16_t e_app_tag, a_app_tag; uint16_t e_guard, a_guard; - e_ref_tag = be32_to_cpu(ep->ref_tag); - a_ref_tag = be32_to_cpu(ap->ref_tag); - e_app_tag = be16_to_cpu(ep->app_tag); - a_app_tag = be16_to_cpu(ap->app_tag); - e_guard = be16_to_cpu(ep->guard); - a_guard = be16_to_cpu(ap->guard); + /* + * swab32 of the "data" field in the beginning of qla2x00_status_entry() + * would make guard field appear at offset 2 + */ + a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); + a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); + a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); + e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); + e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); + e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); ql_dbg(ql_dbg_io, vha, 0x3023, "iocb(s) %p Returned STATUS.\n", sts24); @@ -1465,6 +1466,63 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); + /* + * Ignore sector if: + * For type 3: ref & app tag is all 'f's + * For type 0,1,2: app tag is all 'f's + */ + if ((a_app_tag == 0xffff) && + ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || + (a_ref_tag == 0xffffffff))) { + uint32_t blocks_done, resid; + sector_t lba_s = scsi_get_lba(cmd); + + /* 2TB boundary case covered automatically with this */ + blocks_done = e_ref_tag - (uint32_t)lba_s + 1; + + resid = scsi_bufflen(cmd) - (blocks_done * + cmd->device->sector_size); + + scsi_set_resid(cmd, resid); + cmd->result = DID_OK << 16; + + /* Update protection tag */ + if (scsi_prot_sg_count(cmd)) { + uint32_t i, j = 0, k = 0, num_ent; + struct scatterlist *sg; + struct sd_dif_tuple *spt; + + /* Patch the corresponding protection tags */ + scsi_for_each_prot_sg(cmd, sg, + scsi_prot_sg_count(cmd), i) { + num_ent = sg_dma_len(sg) / 8; + if (k + num_ent < blocks_done) { + k += num_ent; + continue; + } + j = blocks_done - k - 1; + k = blocks_done; + break; + } + + if (k != blocks_done) { + qla_printk(KERN_WARNING, sp->fcport->vha->hw, + "unexpected tag values tag:lba=%x:%lx)\n", + e_ref_tag, lba_s); + return 1; + } + + spt = page_address(sg_page(sg)) + sg->offset; + spt += j; + + spt->app_tag = 0xffff; + if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) + spt->ref_tag = 0xffffffff; + } + + return 0; + } + /* check guard */ if (e_guard != a_guard) { scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, @@ -1472,28 +1530,30 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) set_driver_byte(cmd, DRIVER_SENSE); set_host_byte(cmd, DID_ABORT); cmd->result |= SAM_STAT_CHECK_CONDITION << 1; - return; + return 1; } - /* check appl tag */ - if (e_app_tag != a_app_tag) { + /* check ref tag */ + if (e_ref_tag != a_ref_tag) { scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, - 0x10, 0x2); + 0x10, 0x3); set_driver_byte(cmd, DRIVER_SENSE); set_host_byte(cmd, DID_ABORT); cmd->result |= SAM_STAT_CHECK_CONDITION << 1; - return; + return 1; } - /* check ref tag */ - if (e_ref_tag != a_ref_tag) { + /* check appl tag */ + if (e_app_tag != a_app_tag) { scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, - 0x10, 0x3); + 0x10, 0x2); set_driver_byte(cmd, DRIVER_SENSE); set_host_byte(cmd, DID_ABORT); cmd->result |= SAM_STAT_CHECK_CONDITION << 1; - return; + return 1; } + + return 1; } /** @@ -1767,7 +1827,7 @@ check_scsi_status: break; case CS_DIF_ERROR: - qla2x00_handle_dif_error(sp, sts24); + logit = qla2x00_handle_dif_error(sp, sts24); break; default: cp->result = DID_ERROR << 16; @@ -2468,11 +2528,10 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) goto skip_msi; } - if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || - !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { + if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { ql_log(ql_log_warn, vha, 0x0035, "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", - ha->pdev->revision, ha->fw_attributes); + ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); goto skip_msix; } diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index c706ed37000..f488cc69fc7 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -472,7 +472,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) host->can_queue = base_vha->req->length + 128; host->this_id = 255; host->cmd_per_lun = 3; - if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) host->max_cmd_len = 32; else host->max_cmd_len = MAX_CMDSZ; diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 5cbf33a50b1..049807cda41 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -2208,6 +2208,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id) struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; + unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -2218,11 +2219,11 @@ qla82xx_msix_rsp_q(int irq, void *dev_id) ha = rsp->hw; reg = &ha->iobase->isp82; - spin_lock_irq(&ha->hardware_lock); + spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, rsp); WRT_REG_DWORD(®->host_int, 0); - spin_unlock_irq(&ha->hardware_lock); + spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } @@ -2838,6 +2839,16 @@ sufficient_dsds: int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + /* build FCP_CMND IU */ + memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); + int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); + ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; + + if (cmd->sc_data_direction == DMA_TO_DEVICE) + ctx->fcp_cmnd->additional_cdb_len |= 1; + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) + ctx->fcp_cmnd->additional_cdb_len |= 2; + /* * Update tagged queuing modifier -- default is TSK_SIMPLE (0). */ @@ -2854,16 +2865,6 @@ sufficient_dsds: } } - /* build FCP_CMND IU */ - memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); - int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); - ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; - - if (cmd->sc_data_direction == DMA_TO_DEVICE) - ctx->fcp_cmnd->additional_cdb_len |= 1; - else if (cmd->sc_data_direction == DMA_FROM_DEVICE) - ctx->fcp_cmnd->additional_cdb_len |= 2; - memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e02df276804..4cace3f20c0 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -106,17 +106,21 @@ MODULE_PARM_DESC(ql2xmaxqdepth, "Maximum queue depth to report for target devices."); /* Do not change the value of this after module load */ -int ql2xenabledif = 1; +int ql2xenabledif = 0; module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xenabledif, " Enable T10-CRC-DIF " - " Default is 0 - No DIF Support. 1 - Enable it"); + " Default is 0 - No DIF Support. 1 - Enable it" + ", 2 - Enable DIF for all types, except Type 0."); -int ql2xenablehba_err_chk; +int ql2xenablehba_err_chk = 2; module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xenablehba_err_chk, - " Enable T10-CRC-DIF Error isolation by HBA" - " Default is 0 - Error isolation disabled, 1 - Enable it"); + " Enable T10-CRC-DIF Error isolation by HBA:\n" + " Default is 1.\n" + " 0 -- Error isolation disabled\n" + " 1 -- Error isolation enabled only for DIX Type 0\n" + " 2 -- Error isolation enabled for all Types\n"); int ql2xiidmaenable=1; module_param(ql2xiidmaenable, int, S_IRUGO); @@ -909,7 +913,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) "Abort command mbx success.\n"); wait = 1; } + + spin_lock_irqsave(&ha->hardware_lock, flags); qla2x00_sp_compl(ha, sp); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* Did the command return during mailbox execution? */ + if (ret == FAILED && !CMD_SP(cmd)) + ret = SUCCESS; /* Wait for the command to be returned. */ if (wait) { @@ -2251,7 +2262,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->this_id = 255; host->cmd_per_lun = 3; host->unique_id = host->host_no; - if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) host->max_cmd_len = 32; else host->max_cmd_len = MAX_CMDSZ; @@ -2378,13 +2389,16 @@ skip_dpc: "Detected hba at address=%p.\n", ha); - if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) { + int prot = 0; base_vha->flags.difdix_supported = 1; ql_dbg(ql_dbg_init, base_vha, 0x00f1, "Registering for DIF/DIX type 1 and 3 protection.\n"); + if (ql2xenabledif == 1) + prot = SHOST_DIX_TYPE0_PROTECTION; scsi_host_set_prot(host, - SHOST_DIF_TYPE1_PROTECTION + prot | SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 062c97bf62f..13b6357c1fa 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.07.03-k" +#define QLA2XXX_VERSION "8.03.07.07-k" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig index 2c33ce6eac1..0f5599e0abf 100644 --- a/drivers/scsi/qla4xxx/Kconfig +++ b/drivers/scsi/qla4xxx/Kconfig @@ -1,6 +1,6 @@ config SCSI_QLA_ISCSI tristate "QLogic ISP4XXX and ISP82XX host adapter family support" - depends on PCI && SCSI + depends on PCI && SCSI && NET select SCSI_ISCSI_ATTRS ---help--- This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX) diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c index 6859af0778c..7611def97d0 100644 --- a/drivers/staging/comedi/drivers/ni_labpc.c +++ b/drivers/staging/comedi/drivers/ni_labpc.c @@ -241,8 +241,10 @@ static int labpc_eeprom_write_insn(struct comedi_device *dev, struct comedi_insn *insn, unsigned int *data); static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd); -#ifdef CONFIG_COMEDI_PCI +#ifdef CONFIG_ISA_DMA_API static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd); +#endif +#ifdef CONFIG_COMEDI_PCI static int labpc_find_device(struct comedi_device *dev, int bus, int slot); #endif static int labpc_dio_mem_callback(int dir, int port, int data, diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 497b2e718a7..5b773160200 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -1430,7 +1430,7 @@ static int iscsi_enforce_integrity_rules( u8 DataSequenceInOrder = 0; u8 ErrorRecoveryLevel = 0, SessionType = 0; u8 IFMarker = 0, OFMarker = 0; - u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0; + u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1; u32 FirstBurstLength = 0, MaxBurstLength = 0; struct iscsi_param *param = NULL; diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index a0d23bc0fc9..f00137f377b 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -875,40 +875,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess) } /* - * Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker - * array counts needed for sync and steering. - */ -static int iscsit_determine_sync_and_steering_counts( - struct iscsi_conn *conn, - struct iscsi_data_count *count) -{ - u32 length = count->data_length; - u32 marker, markint; - - count->sync_and_steering = 1; - - marker = (count->type == ISCSI_RX_DATA) ? - conn->of_marker : conn->if_marker; - markint = (count->type == ISCSI_RX_DATA) ? - (conn->conn_ops->OFMarkInt * 4) : - (conn->conn_ops->IFMarkInt * 4); - count->ss_iov_count = count->iov_count; - - while (length > 0) { - if (length >= marker) { - count->ss_iov_count += 3; - count->ss_marker_count += 2; - - length -= marker; - marker = markint; - } else - length = 0; - } - - return 0; -} - -/* * Setup conn->if_marker and conn->of_marker values based upon * the initial marker-less interval. (see iSCSI v19 A.2) */ @@ -1290,7 +1256,7 @@ int iscsit_fe_sendpage_sg( struct kvec iov; u32 tx_hdr_size, data_len; u32 offset = cmd->first_data_sg_off; - int tx_sent; + int tx_sent, iov_off; send_hdr: tx_hdr_size = ISCSI_HDR_LEN; @@ -1310,9 +1276,19 @@ send_hdr: } data_len = cmd->tx_size - tx_hdr_size - cmd->padding; - if (conn->conn_ops->DataDigest) + /* + * Set iov_off used by padding and data digest tx_data() calls below + * in order to determine proper offset into cmd->iov_data[] + */ + if (conn->conn_ops->DataDigest) { data_len -= ISCSI_CRC_LEN; - + if (cmd->padding) + iov_off = (cmd->iov_data_count - 2); + else + iov_off = (cmd->iov_data_count - 1); + } else { + iov_off = (cmd->iov_data_count - 1); + } /* * Perform sendpage() for each page in the scatterlist */ @@ -1341,8 +1317,7 @@ send_pg: send_padding: if (cmd->padding) { - struct kvec *iov_p = - &cmd->iov_data[cmd->iov_data_count-1]; + struct kvec *iov_p = &cmd->iov_data[iov_off++]; tx_sent = tx_data(conn, iov_p, 1, cmd->padding); if (cmd->padding != tx_sent) { @@ -1356,8 +1331,7 @@ send_padding: send_datacrc: if (conn->conn_ops->DataDigest) { - struct kvec *iov_d = - &cmd->iov_data[cmd->iov_data_count]; + struct kvec *iov_d = &cmd->iov_data[iov_off]; tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); if (ISCSI_CRC_LEN != tx_sent) { @@ -1431,8 +1405,7 @@ static int iscsit_do_rx_data( struct iscsi_data_count *count) { int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; - u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0; - struct kvec iov[count->ss_iov_count], *iov_p; + struct kvec *iov_p; struct msghdr msg; if (!conn || !conn->sock || !conn->conn_ops) @@ -1440,93 +1413,8 @@ static int iscsit_do_rx_data( memset(&msg, 0, sizeof(struct msghdr)); - if (count->sync_and_steering) { - int size = 0; - u32 i, orig_iov_count = 0; - u32 orig_iov_len = 0, orig_iov_loc = 0; - u32 iov_count = 0, per_iov_bytes = 0; - u32 *rx_marker, old_rx_marker = 0; - struct kvec *iov_record; - - memset(&rx_marker_val, 0, - count->ss_marker_count * sizeof(u32)); - memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec)); - - iov_record = count->iov; - orig_iov_count = count->iov_count; - rx_marker = &conn->of_marker; - - i = 0; - size = data; - orig_iov_len = iov_record[orig_iov_loc].iov_len; - while (size > 0) { - pr_debug("rx_data: #1 orig_iov_len %u," - " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc); - pr_debug("rx_data: #2 rx_marker %u, size" - " %u\n", *rx_marker, size); - - if (orig_iov_len >= *rx_marker) { - iov[iov_count].iov_len = *rx_marker; - iov[iov_count++].iov_base = - (iov_record[orig_iov_loc].iov_base + - per_iov_bytes); - - iov[iov_count].iov_len = (MARKER_SIZE / 2); - iov[iov_count++].iov_base = - &rx_marker_val[rx_marker_iov++]; - iov[iov_count].iov_len = (MARKER_SIZE / 2); - iov[iov_count++].iov_base = - &rx_marker_val[rx_marker_iov++]; - old_rx_marker = *rx_marker; - - /* - * OFMarkInt is in 32-bit words. - */ - *rx_marker = (conn->conn_ops->OFMarkInt * 4); - size -= old_rx_marker; - orig_iov_len -= old_rx_marker; - per_iov_bytes += old_rx_marker; - - pr_debug("rx_data: #3 new_rx_marker" - " %u, size %u\n", *rx_marker, size); - } else { - iov[iov_count].iov_len = orig_iov_len; - iov[iov_count++].iov_base = - (iov_record[orig_iov_loc].iov_base + - per_iov_bytes); - - per_iov_bytes = 0; - *rx_marker -= orig_iov_len; - size -= orig_iov_len; - - if (size) - orig_iov_len = - iov_record[++orig_iov_loc].iov_len; - - pr_debug("rx_data: #4 new_rx_marker" - " %u, size %u\n", *rx_marker, size); - } - } - data += (rx_marker_iov * (MARKER_SIZE / 2)); - - iov_p = &iov[0]; - iov_len = iov_count; - - if (iov_count > count->ss_iov_count) { - pr_err("iov_count: %d, count->ss_iov_count:" - " %d\n", iov_count, count->ss_iov_count); - return -1; - } - if (rx_marker_iov > count->ss_marker_count) { - pr_err("rx_marker_iov: %d, count->ss_marker" - "_count: %d\n", rx_marker_iov, - count->ss_marker_count); - return -1; - } - } else { - iov_p = count->iov; - iov_len = count->iov_count; - } + iov_p = count->iov; + iov_len = count->iov_count; while (total_rx < data) { rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, @@ -1541,16 +1429,6 @@ static int iscsit_do_rx_data( rx_loop, total_rx, data); } - if (count->sync_and_steering) { - int j; - for (j = 0; j < rx_marker_iov; j++) { - pr_debug("rx_data: #5 j: %d, offset: %d\n", - j, rx_marker_val[j]); - conn->of_marker_offset = rx_marker_val[j]; - } - total_rx -= (rx_marker_iov * (MARKER_SIZE / 2)); - } - return total_rx; } @@ -1559,8 +1437,7 @@ static int iscsit_do_tx_data( struct iscsi_data_count *count) { int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; - u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0; - struct kvec iov[count->ss_iov_count], *iov_p; + struct kvec *iov_p; struct msghdr msg; if (!conn || !conn->sock || !conn->conn_ops) @@ -1573,98 +1450,8 @@ static int iscsit_do_tx_data( memset(&msg, 0, sizeof(struct msghdr)); - if (count->sync_and_steering) { - int size = 0; - u32 i, orig_iov_count = 0; - u32 orig_iov_len = 0, orig_iov_loc = 0; - u32 iov_count = 0, per_iov_bytes = 0; - u32 *tx_marker, old_tx_marker = 0; - struct kvec *iov_record; - - memset(&tx_marker_val, 0, - count->ss_marker_count * sizeof(u32)); - memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec)); - - iov_record = count->iov; - orig_iov_count = count->iov_count; - tx_marker = &conn->if_marker; - - i = 0; - size = data; - orig_iov_len = iov_record[orig_iov_loc].iov_len; - while (size > 0) { - pr_debug("tx_data: #1 orig_iov_len %u," - " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc); - pr_debug("tx_data: #2 tx_marker %u, size" - " %u\n", *tx_marker, size); - - if (orig_iov_len >= *tx_marker) { - iov[iov_count].iov_len = *tx_marker; - iov[iov_count++].iov_base = - (iov_record[orig_iov_loc].iov_base + - per_iov_bytes); - - tx_marker_val[tx_marker_iov] = - (size - *tx_marker); - iov[iov_count].iov_len = (MARKER_SIZE / 2); - iov[iov_count++].iov_base = - &tx_marker_val[tx_marker_iov++]; - iov[iov_count].iov_len = (MARKER_SIZE / 2); - iov[iov_count++].iov_base = - &tx_marker_val[tx_marker_iov++]; - old_tx_marker = *tx_marker; - - /* - * IFMarkInt is in 32-bit words. - */ - *tx_marker = (conn->conn_ops->IFMarkInt * 4); - size -= old_tx_marker; - orig_iov_len -= old_tx_marker; - per_iov_bytes += old_tx_marker; - - pr_debug("tx_data: #3 new_tx_marker" - " %u, size %u\n", *tx_marker, size); - pr_debug("tx_data: #4 offset %u\n", - tx_marker_val[tx_marker_iov-1]); - } else { - iov[iov_count].iov_len = orig_iov_len; - iov[iov_count++].iov_base - = (iov_record[orig_iov_loc].iov_base + - per_iov_bytes); - - per_iov_bytes = 0; - *tx_marker -= orig_iov_len; - size -= orig_iov_len; - - if (size) - orig_iov_len = - iov_record[++orig_iov_loc].iov_len; - - pr_debug("tx_data: #5 new_tx_marker" - " %u, size %u\n", *tx_marker, size); - } - } - - data += (tx_marker_iov * (MARKER_SIZE / 2)); - - iov_p = &iov[0]; - iov_len = iov_count; - - if (iov_count > count->ss_iov_count) { - pr_err("iov_count: %d, count->ss_iov_count:" - " %d\n", iov_count, count->ss_iov_count); - return -1; - } - if (tx_marker_iov > count->ss_marker_count) { - pr_err("tx_marker_iov: %d, count->ss_marker" - "_count: %d\n", tx_marker_iov, - count->ss_marker_count); - return -1; - } - } else { - iov_p = count->iov; - iov_len = count->iov_count; - } + iov_p = count->iov; + iov_len = count->iov_count; while (total_tx < data) { tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, @@ -1679,9 +1466,6 @@ static int iscsit_do_tx_data( tx_loop, total_tx, data); } - if (count->sync_and_steering) - total_tx -= (tx_marker_iov * (MARKER_SIZE / 2)); - return total_tx; } @@ -1702,12 +1486,6 @@ int rx_data( c.data_length = data; c.type = ISCSI_RX_DATA; - if (conn->conn_ops->OFMarker && - (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) { - if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0) - return -1; - } - return iscsit_do_rx_data(conn, &c); } @@ -1728,12 +1506,6 @@ int tx_data( c.data_length = data; c.type = ISCSI_TX_DATA; - if (conn->conn_ops->IFMarker && - (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) { - if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0) - return -1; - } - return iscsit_do_tx_data(conn, &c); } diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 89ae923c5da..f04d4ef99dc 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -24,6 +24,7 @@ */ #include <linux/kernel.h> +#include <linux/ctype.h> #include <asm/unaligned.h> #include <scsi/scsi.h> @@ -154,6 +155,37 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) return 0; } +static void +target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off) +{ + unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; + unsigned char *buf = buf_off; + int cnt = 0, next = 1; + /* + * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on + * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field + * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION + * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL + * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure + * per device uniqeness. + */ + while (*p != '\0') { + if (cnt >= 13) + break; + if (!isxdigit(*p)) { + p++; + continue; + } + if (next != 0) { + buf[cnt++] |= hex_to_bin(*p++); + next = 0; + } else { + buf[cnt] = hex_to_bin(*p++) << 4; + next = 1; + } + } +} + /* * Device identification VPD, for a complete list of * DESIGNATOR TYPEs see spc4r17 Table 459. @@ -219,8 +251,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) * VENDOR_SPECIFIC_IDENTIFIER and * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION */ - buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]); - hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12); + target_parse_naa_6h_vendor_specific(dev, &buf[off]); len = 20; off = (len + 4); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 8d0c58ea631..a4b0a8d27f2 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -977,15 +977,17 @@ static void target_qf_do_work(struct work_struct *work) { struct se_device *dev = container_of(work, struct se_device, qf_work_queue); + LIST_HEAD(qf_cmd_list); struct se_cmd *cmd, *cmd_tmp; spin_lock_irq(&dev->qf_cmd_lock); - list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { + list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); + spin_unlock_irq(&dev->qf_cmd_lock); + list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { list_del(&cmd->se_qf_node); atomic_dec(&dev->dev_qf_count); smp_mb__after_atomic_dec(); - spin_unlock_irq(&dev->qf_cmd_lock); pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, @@ -997,10 +999,7 @@ static void target_qf_do_work(struct work_struct *work) * has been added to head of queue */ transport_add_cmd_to_queue(cmd, cmd->t_state); - - spin_lock_irq(&dev->qf_cmd_lock); } - spin_unlock_irq(&dev->qf_cmd_lock); } unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index bd4fe21a23b..3749d8b4b42 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -98,8 +98,7 @@ struct ft_tpg { struct list_head list; /* linkage in ft_lport_acl tpg_list */ struct list_head lun_list; /* head of LUNs */ struct se_portal_group se_tpg; - struct task_struct *thread; /* processing thread */ - struct se_queue_obj qobj; /* queue for processing thread */ + struct workqueue_struct *workqueue; }; struct ft_lport_acl { @@ -110,16 +109,10 @@ struct ft_lport_acl { struct se_wwn fc_lport_wwn; }; -enum ft_cmd_state { - FC_CMD_ST_NEW = 0, - FC_CMD_ST_REJ -}; - /* * Commands */ struct ft_cmd { - enum ft_cmd_state state; u32 lun; /* LUN from request */ struct ft_sess *sess; /* session held for cmd */ struct fc_seq *seq; /* sequence in exchange mgr */ @@ -127,7 +120,7 @@ struct ft_cmd { struct fc_frame *req_frame; unsigned char *cdb; /* pointer to CDB inside frame */ u32 write_data_len; /* data received on writes */ - struct se_queue_req se_req; + struct work_struct work; /* Local sense buffer */ unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER]; u32 was_ddp_setup:1; /* Set only if ddp is setup */ @@ -177,7 +170,6 @@ int ft_is_state_remove(struct se_cmd *); /* * other internal functions. */ -int ft_thread(void *); void ft_recv_req(struct ft_sess *, struct fc_frame *); struct ft_tpg *ft_lport_find_tpg(struct fc_lport *); struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *); diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 5654dc22f7a..80fbcde00cb 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -62,8 +62,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) int count; se_cmd = &cmd->se_cmd; - pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n", - caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); + pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n", + caller, cmd, cmd->sess, cmd->seq, se_cmd); pr_debug("%s: cmd %p cdb %p\n", caller, cmd, cmd->cdb); pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); @@ -90,38 +90,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0); } -static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) -{ - struct ft_tpg *tpg = sess->tport->tpg; - struct se_queue_obj *qobj = &tpg->qobj; - unsigned long flags; - - qobj = &sess->tport->tpg->qobj; - spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list); - atomic_inc(&qobj->queue_cnt); - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - - wake_up_process(tpg->thread); -} - -static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj) -{ - unsigned long flags; - struct se_queue_req *qr; - - spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - if (list_empty(&qobj->qobj_list)) { - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - return NULL; - } - qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list); - list_del(&qr->qr_list); - atomic_dec(&qobj->queue_cnt); - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - return container_of(qr, struct ft_cmd, se_req); -} - static void ft_free_cmd(struct ft_cmd *cmd) { struct fc_frame *fp; @@ -282,9 +250,7 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd) int ft_get_cmd_state(struct se_cmd *se_cmd) { - struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); - - return cmd->state; + return 0; } int ft_is_state_remove(struct se_cmd *se_cmd) @@ -505,6 +471,8 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd) return 0; } +static void ft_send_work(struct work_struct *work); + /* * Handle incoming FCP command. */ @@ -523,7 +491,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) goto busy; } cmd->req_frame = fp; /* hold frame during cmd */ - ft_queue_cmd(sess, cmd); + + INIT_WORK(&cmd->work, ft_send_work); + queue_work(sess->tport->tpg->workqueue, &cmd->work); return; busy: @@ -563,12 +533,13 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp) /* * Send new command to target. */ -static void ft_send_cmd(struct ft_cmd *cmd) +static void ft_send_work(struct work_struct *work) { + struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); struct se_cmd *se_cmd; struct fcp_cmnd *fcp; - int data_dir; + int data_dir = 0; u32 data_len; int task_attr; int ret; @@ -675,42 +646,3 @@ static void ft_send_cmd(struct ft_cmd *cmd) err: ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); } - -/* - * Handle request in the command thread. - */ -static void ft_exec_req(struct ft_cmd *cmd) -{ - pr_debug("cmd state %x\n", cmd->state); - switch (cmd->state) { - case FC_CMD_ST_NEW: - ft_send_cmd(cmd); - break; - default: - break; - } -} - -/* - * Processing thread. - * Currently one thread per tpg. - */ -int ft_thread(void *arg) -{ - struct ft_tpg *tpg = arg; - struct se_queue_obj *qobj = &tpg->qobj; - struct ft_cmd *cmd; - - while (!kthread_should_stop()) { - schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); - if (kthread_should_stop()) - goto out; - - cmd = ft_dequeue_cmd(qobj); - if (cmd) - ft_exec_req(cmd); - } - -out: - return 0; -} diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index b15879d43e2..8fa39b74f22 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -327,7 +327,6 @@ static struct se_portal_group *ft_add_tpg( tpg->index = index; tpg->lport_acl = lacl; INIT_LIST_HEAD(&tpg->lun_list); - transport_init_queue_obj(&tpg->qobj); ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); @@ -336,8 +335,8 @@ static struct se_portal_group *ft_add_tpg( return NULL; } - tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index); - if (IS_ERR(tpg->thread)) { + tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1); + if (!tpg->workqueue) { kfree(tpg); return NULL; } @@ -356,7 +355,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg) pr_debug("del tpg %s\n", config_item_name(&tpg->se_tpg.tpg_group.cg_item)); - kthread_stop(tpg->thread); + destroy_workqueue(tpg->workqueue); /* Wait for sessions to be freed thru RCU, for BUG_ON below */ synchronize_rcu(); diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index c37f4cd9645..d35ea5a3d56 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -219,43 +219,41 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) if (cmd->was_ddp_setup) { BUG_ON(!ep); BUG_ON(!lport); - } - - /* - * Doesn't expect payload if DDP is setup. Payload - * is expected to be copied directly to user buffers - * due to DDP (Large Rx offload), - */ - buf = fc_frame_payload_get(fp, 1); - if (buf) - pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " + /* + * Since DDP (Large Rx offload) was setup for this request, + * payload is expected to be copied directly to user buffers. + */ + buf = fc_frame_payload_get(fp, 1); + if (buf) + pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " "cmd->sg_cnt 0x%x. DDP was setup" " hence not expected to receive frame with " - "payload, Frame will be dropped if " - "'Sequence Initiative' bit in f_ctl is " + "payload, Frame will be dropped if" + "'Sequence Initiative' bit in f_ctl is" "not set\n", __func__, ep->xid, f_ctl, cmd->sg, cmd->sg_cnt); - /* - * Invalidate HW DDP context if it was setup for respective - * command. Invalidation of HW DDP context is requited in both - * situation (success and error). - */ - ft_invl_hw_context(cmd); + /* + * Invalidate HW DDP context if it was setup for respective + * command. Invalidation of HW DDP context is requited in both + * situation (success and error). + */ + ft_invl_hw_context(cmd); - /* - * If "Sequence Initiative (TSI)" bit set in f_ctl, means last - * write data frame is received successfully where payload is - * posted directly to user buffer and only the last frame's - * header is posted in receive queue. - * - * If "Sequence Initiative (TSI)" bit is not set, means error - * condition w.r.t. DDP, hence drop the packet and let explict - * ABORTS from other end of exchange timer trigger the recovery. - */ - if (f_ctl & FC_FC_SEQ_INIT) - goto last_frame; - else - goto drop; + /* + * If "Sequence Initiative (TSI)" bit set in f_ctl, means last + * write data frame is received successfully where payload is + * posted directly to user buffer and only the last frame's + * header is posted in receive queue. + * + * If "Sequence Initiative (TSI)" bit is not set, means error + * condition w.r.t. DDP, hence drop the packet and let explict + * ABORTS from other end of exchange timer trigger the recovery. + */ + if (f_ctl & FC_FC_SEQ_INIT) + goto last_frame; + else + goto drop; + } rel_off = ntohl(fh->fh_parm_offset); frame_len = fr_len(fp); diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c index 225123b37f1..58be715913c 100644 --- a/drivers/tty/serial/crisv10.c +++ b/drivers/tty/serial/crisv10.c @@ -4450,7 +4450,7 @@ static int __init rs_init(void) #if defined(CONFIG_ETRAX_RS485) #if defined(CONFIG_ETRAX_RS485_ON_PA) - if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit, + if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit, rs485_pa_bit)) { printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " "RS485 pin\n"); @@ -4459,7 +4459,7 @@ static int __init rs_init(void) } #endif #if defined(CONFIG_ETRAX_RS485_ON_PORT_G) - if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit, + if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit, rs485_port_g_bit)) { printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " "RS485 pin\n"); diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 1e96d1f1fe6..723f8231193 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -761,7 +761,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) memset(buf, 0, retval); status = 0; - mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC; + mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC; spin_lock_irqsave(&xhci->lock, flags); /* For each port, did anything change? If so, set that bit in buf. */ diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 54139a2f06c..952e2ded61a 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1934,8 +1934,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, int status = -EINPROGRESS; struct urb_priv *urb_priv; struct xhci_ep_ctx *ep_ctx; + struct list_head *tmp; u32 trb_comp_code; int ret = 0; + int td_num = 0; slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); xdev = xhci->devs[slot_id]; @@ -1957,6 +1959,12 @@ static int handle_tx_event(struct xhci_hcd *xhci, return -ENODEV; } + /* Count current td numbers if ep->skip is set */ + if (ep->skip) { + list_for_each(tmp, &ep_ring->td_list) + td_num++; + } + event_dma = le64_to_cpu(event->buffer); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); /* Look for common error cases */ @@ -2068,7 +2076,18 @@ static int handle_tx_event(struct xhci_hcd *xhci, goto cleanup; } + /* We've skipped all the TDs on the ep ring when ep->skip set */ + if (ep->skip && td_num == 0) { + ep->skip = false; + xhci_dbg(xhci, "All tds on the ep_ring skipped. " + "Clear skip flag.\n"); + ret = 0; + goto cleanup; + } + td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); + if (ep->skip) + td_num--; /* Is this a TRB in the currently executing TD? */ event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 80d292fb92d..7363c1b169e 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -19,7 +19,7 @@ #include <asm/backlight.h> #endif -static const char const *backlight_types[] = { +static const char *const backlight_types[] = { [BACKLIGHT_RAW] = "raw", [BACKLIGHT_PLATFORM] = "platform", [BACKLIGHT_FIRMWARE] = "firmware", diff --git a/drivers/xen/events.c b/drivers/xen/events.c index da70f5c32eb..7523719bf8a 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -54,7 +54,7 @@ * This lock protects updates to the following mapping and reference-count * arrays. The lock does not need to be acquired to read the mapping tables. */ -static DEFINE_SPINLOCK(irq_mapping_update_lock); +static DEFINE_MUTEX(irq_mapping_update_lock); static LIST_HEAD(xen_irq_list_head); @@ -631,7 +631,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, int irq = -1; struct physdev_irq irq_op; - spin_lock(&irq_mapping_update_lock); + mutex_lock(&irq_mapping_update_lock); irq = find_irq_by_gsi(gsi); if (irq != -1) { @@ -684,7 +684,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, handle_edge_irq, name); out: - spin_unlock(&irq_mapping_update_lock); + mutex_unlock(&irq_mapping_update_lock); return irq; } @@ -710,7 +710,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, { int irq, ret; - spin_lock(&irq_mapping_update_lock); + mutex_lock(&irq_mapping_update_lock); irq = xen_allocate_irq_dynamic(); if (irq == -1) @@ -724,10 +724,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, if (ret < 0) goto error_irq; out: - spin_unlock(&irq_mapping_update_lock); + mutex_unlock(&irq_mapping_update_lock); return irq; error_irq: - spin_unlock(&irq_mapping_update_lock); + mutex_unlock(&irq_mapping_update_lock); xen_free_irq(irq); return -1; } @@ -740,7 +740,7 @@ int xen_destroy_irq(int irq) struct irq_info *info = info_for_irq(irq); int rc = -ENOENT; - spin_lock(&irq_mapping_update_lock); + mutex_lock(&irq_mapping_update_lock); desc = irq_to_desc(irq); if (!desc) @@ -766,7 +766,7 @@ int xen_destroy_irq(int irq) xen_free_irq(irq); out: - spin_unlock(&irq_mapping_update_lock); + mutex_unlock(&irq_mapping_update_lock); return rc; } @@ -776,7 +776,7 @@ int xen_irq_from_pirq(unsigned pirq) struct irq_info *info; - spin_lock(&irq_mapping_update_lock); + mutex_lock(&irq_mapping_update_lock); list_for_each_entry(info, &xen_irq_list_head, list) { if (info == NULL || info->type != IRQT_PIRQ) @@ -787,7 +787,7 @@ int xen_irq_from_pirq(unsigned pirq) } irq = -1; out: - spin_unlock(&irq_mapping_update_lock); + mutex_unlock(&irq_mapping_update_lock); return irq; } @@ -802,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) { int irq; - spin_lock(&irq_mapping_update_lock); + mutex_lock(&irq_mapping_update_lock); irq = evtchn_to_irq[evtchn]; @@ -818,7 +818,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) } out: - spin_unlock(&irq_mapping_update_lock); + mutex_unlock(&irq_mapping_update_lock); return irq; } @@ -829,7 +829,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) struct evtchn_bind_ipi bind_ipi; int evtchn, irq; - spin_lock(&irq_mapping_update_lock); + mutex_lock(&irq_mapping_update_lock); irq = per_cpu(ipi_to_irq, cpu)[ipi]; @@ -853,7 +853,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) } out: - spin_unlock(&irq_mapping_update_lock); + mutex_unlock(&irq_mapping_update_lock); return irq; } @@ -878,7 +878,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) struct evtchn_bind_virq bind_virq; int evtchn, irq; - spin_lock(&irq_mapping_update_lock); + mutex_lock(&irq_mapping_update_lock); irq = per_cpu(virq_to_irq, cpu)[virq]; @@ -903,7 +903,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) } out: - spin_unlock(&irq_mapping_update_lock); + mutex_unlock(&irq_mapping_update_lock); return irq; } @@ -913,7 +913,7 @@ static void unbind_from_irq(unsigned int irq) struct evtchn_close close; int evtchn = evtchn_from_irq(irq); - spin_lock(&irq_mapping_update_lock); + mutex_lock(&irq_mapping_update_lock); if (VALID_EVTCHN(evtchn)) { close.port = evtchn; @@ -943,7 +943,7 @@ static void unbind_from_irq(unsigned int irq) xen_free_irq(irq); - spin_unlock(&irq_mapping_update_lock); + mutex_unlock(&irq_mapping_update_lock); } int bind_evtchn_to_irqhandler(unsigned int evtchn, @@ -1279,7 +1279,7 @@ void rebind_evtchn_irq(int evtchn, int irq) will also be masked. */ disable_irq(irq); - spin_lock(&irq_mapping_update_lock); + mutex_lock(&irq_mapping_update_lock); /* After resume the irq<->evtchn mappings are all cleared out */ BUG_ON(evtchn_to_irq[evtchn] != -1); @@ -1289,7 +1289,7 @@ void rebind_evtchn_irq(int evtchn, int irq) xen_irq_info_evtchn_init(irq, evtchn); - spin_unlock(&irq_mapping_update_lock); + mutex_unlock(&irq_mapping_update_lock); /* new event channels are always bound to cpu 0 */ irq_set_affinity(irq, cpumask_of(0)); diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h index 46ce357ca1a..410ffd6ceb5 100644 --- a/fs/9p/v9fs_vfs.h +++ b/fs/9p/v9fs_vfs.h @@ -54,9 +54,9 @@ extern struct kmem_cache *v9fs_inode_cache; struct inode *v9fs_alloc_inode(struct super_block *sb); void v9fs_destroy_inode(struct inode *inode); -struct inode *v9fs_get_inode(struct super_block *sb, int mode); +struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t); int v9fs_init_inode(struct v9fs_session_info *v9ses, - struct inode *inode, int mode); + struct inode *inode, int mode, dev_t); void v9fs_evict_inode(struct inode *inode); ino_t v9fs_qid2ino(struct p9_qid *qid); void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *); @@ -83,4 +83,6 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode) v9inode->cache_validity |= V9FS_INO_INVALID_ATTR; return; } + +int v9fs_open_to_dotl_flags(int flags); #endif diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 3c173fcc2c5..62857a810a7 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -65,7 +65,7 @@ int v9fs_file_open(struct inode *inode, struct file *file) v9inode = V9FS_I(inode); v9ses = v9fs_inode2v9ses(inode); if (v9fs_proto_dotl(v9ses)) - omode = file->f_flags; + omode = v9fs_open_to_dotl_flags(file->f_flags); else omode = v9fs_uflags2omode(file->f_flags, v9fs_proto_dotu(v9ses)); @@ -169,7 +169,18 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) /* convert posix lock to p9 tlock args */ memset(&flock, 0, sizeof(flock)); - flock.type = fl->fl_type; + /* map the lock type */ + switch (fl->fl_type) { + case F_RDLCK: + flock.type = P9_LOCK_TYPE_RDLCK; + break; + case F_WRLCK: + flock.type = P9_LOCK_TYPE_WRLCK; + break; + case F_UNLCK: + flock.type = P9_LOCK_TYPE_UNLCK; + break; + } flock.start = fl->fl_start; if (fl->fl_end == OFFSET_MAX) flock.length = 0; @@ -245,7 +256,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl) /* convert posix lock to p9 tgetlock args */ memset(&glock, 0, sizeof(glock)); - glock.type = fl->fl_type; + glock.type = P9_LOCK_TYPE_UNLCK; glock.start = fl->fl_start; if (fl->fl_end == OFFSET_MAX) glock.length = 0; @@ -257,17 +268,26 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl) res = p9_client_getlock_dotl(fid, &glock); if (res < 0) return res; - if (glock.type != F_UNLCK) { - fl->fl_type = glock.type; + /* map 9p lock type to os lock type */ + switch (glock.type) { + case P9_LOCK_TYPE_RDLCK: + fl->fl_type = F_RDLCK; + break; + case P9_LOCK_TYPE_WRLCK: + fl->fl_type = F_WRLCK; + break; + case P9_LOCK_TYPE_UNLCK: + fl->fl_type = F_UNLCK; + break; + } + if (glock.type != P9_LOCK_TYPE_UNLCK) { fl->fl_start = glock.start; if (glock.length == 0) fl->fl_end = OFFSET_MAX; else fl->fl_end = glock.start + glock.length - 1; fl->fl_pid = glock.proc_id; - } else - fl->fl_type = F_UNLCK; - + } return res; } diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 8bb5507e822..e3c03db3c78 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -95,15 +95,18 @@ static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode) /** * p9mode2unixmode- convert plan9 mode bits to unix mode bits * @v9ses: v9fs session information - * @mode: mode to convert + * @stat: p9_wstat from which mode need to be derived + * @rdev: major number, minor number in case of device files. * */ - -static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode) +static int p9mode2unixmode(struct v9fs_session_info *v9ses, + struct p9_wstat *stat, dev_t *rdev) { int res; + int mode = stat->mode; - res = mode & 0777; + res = mode & S_IALLUGO; + *rdev = 0; if ((mode & P9_DMDIR) == P9_DMDIR) res |= S_IFDIR; @@ -116,9 +119,26 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode) && (v9ses->nodev == 0)) res |= S_IFIFO; else if ((mode & P9_DMDEVICE) && (v9fs_proto_dotu(v9ses)) - && (v9ses->nodev == 0)) - res |= S_IFBLK; - else + && (v9ses->nodev == 0)) { + char type = 0, ext[32]; + int major = -1, minor = -1; + + strncpy(ext, stat->extension, sizeof(ext)); + sscanf(ext, "%c %u %u", &type, &major, &minor); + switch (type) { + case 'c': + res |= S_IFCHR; + break; + case 'b': + res |= S_IFBLK; + break; + default: + P9_DPRINTK(P9_DEBUG_ERROR, + "Unknown special type %c %s\n", type, + stat->extension); + }; + *rdev = MKDEV(major, minor); + } else res |= S_IFREG; if (v9fs_proto_dotu(v9ses)) { @@ -131,7 +151,6 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode) if ((mode & P9_DMSETVTX) == P9_DMSETVTX) res |= S_ISVTX; } - return res; } @@ -242,13 +261,13 @@ void v9fs_destroy_inode(struct inode *inode) } int v9fs_init_inode(struct v9fs_session_info *v9ses, - struct inode *inode, int mode) + struct inode *inode, int mode, dev_t rdev) { int err = 0; inode_init_owner(inode, NULL, mode); inode->i_blocks = 0; - inode->i_rdev = 0; + inode->i_rdev = rdev; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_mapping->a_ops = &v9fs_addr_operations; @@ -335,7 +354,7 @@ error: * */ -struct inode *v9fs_get_inode(struct super_block *sb, int mode) +struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t rdev) { int err; struct inode *inode; @@ -348,7 +367,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode) P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n"); return ERR_PTR(-ENOMEM); } - err = v9fs_init_inode(v9ses, inode, mode); + err = v9fs_init_inode(v9ses, inode, mode, rdev); if (err) { iput(inode); return ERR_PTR(err); @@ -435,11 +454,12 @@ void v9fs_evict_inode(struct inode *inode) static int v9fs_test_inode(struct inode *inode, void *data) { int umode; + dev_t rdev; struct v9fs_inode *v9inode = V9FS_I(inode); struct p9_wstat *st = (struct p9_wstat *)data; struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); - umode = p9mode2unixmode(v9ses, st->mode); + umode = p9mode2unixmode(v9ses, st, &rdev); /* don't match inode of different type */ if ((inode->i_mode & S_IFMT) != (umode & S_IFMT)) return 0; @@ -473,6 +493,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb, struct p9_wstat *st, int new) { + dev_t rdev; int retval, umode; unsigned long i_ino; struct inode *inode; @@ -496,8 +517,8 @@ static struct inode *v9fs_qid_iget(struct super_block *sb, * later. */ inode->i_ino = i_ino; - umode = p9mode2unixmode(v9ses, st->mode); - retval = v9fs_init_inode(v9ses, inode, umode); + umode = p9mode2unixmode(v9ses, st, &rdev); + retval = v9fs_init_inode(v9ses, inode, umode, rdev); if (retval) goto error; @@ -532,6 +553,19 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, } /** + * v9fs_at_to_dotl_flags- convert Linux specific AT flags to + * plan 9 AT flag. + * @flags: flags to convert + */ +static int v9fs_at_to_dotl_flags(int flags) +{ + int rflags = 0; + if (flags & AT_REMOVEDIR) + rflags |= P9_DOTL_AT_REMOVEDIR; + return rflags; +} + +/** * v9fs_remove - helper function to remove files and directories * @dir: directory inode that is being deleted * @dentry: dentry that is being deleted @@ -558,7 +592,8 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags) return retval; } if (v9fs_proto_dotl(v9ses)) - retval = p9_client_unlinkat(dfid, dentry->d_name.name, flags); + retval = p9_client_unlinkat(dfid, dentry->d_name.name, + v9fs_at_to_dotl_flags(flags)); if (retval == -EOPNOTSUPP) { /* Try the one based on path */ v9fid = v9fs_fid_clone(dentry); @@ -645,13 +680,11 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir, P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); goto error; } - d_instantiate(dentry, inode); err = v9fs_fid_add(dentry, fid); if (err < 0) goto error; - + d_instantiate(dentry, inode); return ofid; - error: if (ofid) p9_client_clunk(ofid); @@ -792,6 +825,7 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nameidata) { + struct dentry *res; struct super_block *sb; struct v9fs_session_info *v9ses; struct p9_fid *dfid, *fid; @@ -823,22 +857,35 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, return ERR_PTR(result); } - - inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb); + /* + * Make sure we don't use a wrong inode due to parallel + * unlink. For cached mode create calls request for new + * inode. But with cache disabled, lookup should do this. + */ + if (v9ses->cache) + inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb); + else + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); if (IS_ERR(inode)) { result = PTR_ERR(inode); inode = NULL; goto error; } - result = v9fs_fid_add(dentry, fid); if (result < 0) goto error_iput; - inst_out: - d_add(dentry, inode); - return NULL; - + /* + * If we had a rename on the server and a parallel lookup + * for the new name, then make sure we instantiate with + * the new name. ie look up for a/b, while on server somebody + * moved b under k and client parallely did a lookup for + * k/b. + */ + res = d_materialise_unique(dentry, inode); + if (!IS_ERR(res)) + return res; + result = PTR_ERR(res); error_iput: iput(inode); error: @@ -1002,7 +1049,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, return PTR_ERR(st); v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb); - generic_fillattr(dentry->d_inode, stat); + generic_fillattr(dentry->d_inode, stat); p9stat_free(st); kfree(st); @@ -1086,6 +1133,7 @@ void v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, struct super_block *sb) { + mode_t mode; char ext[32]; char tag_name[14]; unsigned int i_nlink; @@ -1121,31 +1169,9 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, inode->i_nlink = i_nlink; } } - inode->i_mode = p9mode2unixmode(v9ses, stat->mode); - if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) { - char type = 0; - int major = -1; - int minor = -1; - - strncpy(ext, stat->extension, sizeof(ext)); - sscanf(ext, "%c %u %u", &type, &major, &minor); - switch (type) { - case 'c': - inode->i_mode &= ~S_IFBLK; - inode->i_mode |= S_IFCHR; - break; - case 'b': - break; - default: - P9_DPRINTK(P9_DEBUG_ERROR, - "Unknown special type %c %s\n", type, - stat->extension); - }; - inode->i_rdev = MKDEV(major, minor); - init_special_inode(inode, inode->i_mode, inode->i_rdev); - } else - inode->i_rdev = 0; - + mode = stat->mode & S_IALLUGO; + mode |= inode->i_mode & ~S_IALLUGO; + inode->i_mode = mode; i_size_write(inode, stat->length); /* not real number of blocks, but 512 byte ones ... */ @@ -1411,6 +1437,8 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode) { + int umode; + dev_t rdev; loff_t i_size; struct p9_wstat *st; struct v9fs_session_info *v9ses; @@ -1419,6 +1447,12 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode) st = p9_client_stat(fid); if (IS_ERR(st)) return PTR_ERR(st); + /* + * Don't update inode if the file type is different + */ + umode = p9mode2unixmode(v9ses, st, &rdev); + if ((inode->i_mode & S_IFMT) != (umode & S_IFMT)) + goto out; spin_lock(&inode->i_lock); /* @@ -1430,6 +1464,7 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode) if (v9ses->cache) inode->i_size = i_size; spin_unlock(&inode->i_lock); +out: p9stat_free(st); kfree(st); return 0; diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index b6c8ed20519..aded79fcd5c 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -153,7 +153,8 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb, * later. */ inode->i_ino = i_ino; - retval = v9fs_init_inode(v9ses, inode, st->st_mode); + retval = v9fs_init_inode(v9ses, inode, + st->st_mode, new_decode_dev(st->st_rdev)); if (retval) goto error; @@ -190,6 +191,58 @@ v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid, return inode; } +struct dotl_openflag_map { + int open_flag; + int dotl_flag; +}; + +static int v9fs_mapped_dotl_flags(int flags) +{ + int i; + int rflags = 0; + struct dotl_openflag_map dotl_oflag_map[] = { + { O_CREAT, P9_DOTL_CREATE }, + { O_EXCL, P9_DOTL_EXCL }, + { O_NOCTTY, P9_DOTL_NOCTTY }, + { O_TRUNC, P9_DOTL_TRUNC }, + { O_APPEND, P9_DOTL_APPEND }, + { O_NONBLOCK, P9_DOTL_NONBLOCK }, + { O_DSYNC, P9_DOTL_DSYNC }, + { FASYNC, P9_DOTL_FASYNC }, + { O_DIRECT, P9_DOTL_DIRECT }, + { O_LARGEFILE, P9_DOTL_LARGEFILE }, + { O_DIRECTORY, P9_DOTL_DIRECTORY }, + { O_NOFOLLOW, P9_DOTL_NOFOLLOW }, + { O_NOATIME, P9_DOTL_NOATIME }, + { O_CLOEXEC, P9_DOTL_CLOEXEC }, + { O_SYNC, P9_DOTL_SYNC}, + }; + for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) { + if (flags & dotl_oflag_map[i].open_flag) + rflags |= dotl_oflag_map[i].dotl_flag; + } + return rflags; +} + +/** + * v9fs_open_to_dotl_flags- convert Linux specific open flags to + * plan 9 open flag. + * @flags: flags to convert + */ +int v9fs_open_to_dotl_flags(int flags) +{ + int rflags = 0; + + /* + * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY + * and P9_DOTL_NOACCESS + */ + rflags |= flags & O_ACCMODE; + rflags |= v9fs_mapped_dotl_flags(flags); + + return rflags; +} + /** * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol. * @dir: directory inode that is being created @@ -258,7 +311,8 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode, "Failed to get acl values in creat %d\n", err); goto error; } - err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid); + err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags), + mode, gid, &qid); if (err < 0) { P9_DPRINTK(P9_DEBUG_VFS, "p9_client_open_dotl failed in creat %d\n", @@ -281,10 +335,10 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode, P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); goto error; } - d_instantiate(dentry, inode); err = v9fs_fid_add(dentry, fid); if (err < 0) goto error; + d_instantiate(dentry, inode); /* Now set the ACL based on the default value */ v9fs_set_create_acl(dentry, &dacl, &pacl); @@ -403,10 +457,10 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir, err); goto error; } - d_instantiate(dentry, inode); err = v9fs_fid_add(dentry, fid); if (err < 0) goto error; + d_instantiate(dentry, inode); fid = NULL; } else { /* @@ -414,7 +468,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir, * inode with stat. We need to get an inode * so that we can set the acl with dentry */ - inode = v9fs_get_inode(dir->i_sb, mode); + inode = v9fs_get_inode(dir->i_sb, mode, 0); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto error; @@ -540,6 +594,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) void v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode) { + mode_t mode; struct v9fs_inode *v9inode = V9FS_I(inode); if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) { @@ -552,11 +607,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode) inode->i_uid = stat->st_uid; inode->i_gid = stat->st_gid; inode->i_nlink = stat->st_nlink; - inode->i_mode = stat->st_mode; - inode->i_rdev = new_decode_dev(stat->st_rdev); - if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) - init_special_inode(inode, inode->i_mode, inode->i_rdev); + mode = stat->st_mode & S_IALLUGO; + mode |= inode->i_mode & ~S_IALLUGO; + inode->i_mode = mode; i_size_write(inode, stat->st_size); inode->i_blocks = stat->st_blocks; @@ -657,14 +711,14 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry, err); goto error; } - d_instantiate(dentry, inode); err = v9fs_fid_add(dentry, fid); if (err < 0) goto error; + d_instantiate(dentry, inode); fid = NULL; } else { /* Not in cached mode. No need to populate inode with stat */ - inode = v9fs_get_inode(dir->i_sb, S_IFLNK); + inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto error; @@ -810,17 +864,17 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode, err); goto error; } - d_instantiate(dentry, inode); err = v9fs_fid_add(dentry, fid); if (err < 0) goto error; + d_instantiate(dentry, inode); fid = NULL; } else { /* * Not in cached mode. No need to populate inode with stat. * socket syscall returns a fd, so we need instantiate */ - inode = v9fs_get_inode(dir->i_sb, mode); + inode = v9fs_get_inode(dir->i_sb, mode, rdev); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto error; @@ -886,6 +940,11 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode) st = p9_client_getattr_dotl(fid, P9_STATS_ALL); if (IS_ERR(st)) return PTR_ERR(st); + /* + * Don't update inode if the file type is different + */ + if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT)) + goto out; spin_lock(&inode->i_lock); /* @@ -897,6 +956,7 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode) if (v9ses->cache) inode->i_size = i_size; spin_unlock(&inode->i_lock); +out: kfree(st); return 0; } diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index feef6cdc1fd..c70251d47ed 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -149,7 +149,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, else sb->s_d_op = &v9fs_dentry_operations; - inode = v9fs_get_inode(sb, S_IFDIR | mode); + inode = v9fs_get_inode(sb, S_IFDIR | mode, 0); if (IS_ERR(inode)) { retval = PTR_ERR(inode); goto release_sb; diff --git a/fs/block_dev.c b/fs/block_dev.c index ff77262e887..95f786ec7f0 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1429,6 +1429,11 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) WARN_ON_ONCE(bdev->bd_holders); sync_blockdev(bdev); kill_bdev(bdev); + /* ->release can cause the old bdi to disappear, + * so must switch it out first + */ + bdev_inode_switch_bdi(bdev->bd_inode, + &default_backing_dev_info); } if (bdev->bd_contains == bdev) { if (disk->fops->release) @@ -1442,8 +1447,6 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) disk_put_part(bdev->bd_part); bdev->bd_part = NULL; bdev->bd_disk = NULL; - bdev_inode_switch_bdi(bdev->bd_inode, - &default_backing_dev_info); if (bdev != bdev->bd_contains) victim = bdev->bd_contains; bdev->bd_contains = NULL; diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 502b9e98867..d9f99a16edd 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -176,7 +176,11 @@ static inline u64 btrfs_ino(struct inode *inode) { u64 ino = BTRFS_I(inode)->location.objectid; - if (ino <= BTRFS_FIRST_FREE_OBJECTID) + /* + * !ino: btree_inode + * type == BTRFS_ROOT_ITEM_KEY: subvol dir + */ + if (!ino || BTRFS_I(inode)->location.type == BTRFS_ROOT_ITEM_KEY) ino = inode->i_ino; return ino; } diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index b910694f61e..a1cb7821bec 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -183,8 +183,10 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, * read from the commit root and sidestep a nasty deadlock * between reading the free space cache and updating the csum tree. */ - if (btrfs_is_free_space_inode(root, inode)) + if (btrfs_is_free_space_inode(root, inode)) { path->search_commit_root = 1; + path->skip_locking = 1; + } disk_bytenr = (u64)bio->bi_sector << 9; if (dio) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index e7872e485f1..a381cd22f51 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1075,12 +1075,6 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, start_pos = pos & ~((u64)root->sectorsize - 1); last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; - if (start_pos > inode->i_size) { - err = btrfs_cont_expand(inode, i_size_read(inode), start_pos); - if (err) - return err; - } - again: for (i = 0; i < num_pages; i++) { pages[i] = find_or_create_page(inode->i_mapping, index + i, @@ -1338,6 +1332,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, struct inode *inode = fdentry(file)->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; loff_t *ppos = &iocb->ki_pos; + u64 start_pos; ssize_t num_written = 0; ssize_t err = 0; size_t count, ocount; @@ -1386,6 +1381,15 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, file_update_time(file); BTRFS_I(inode)->sequence++; + start_pos = round_down(pos, root->sectorsize); + if (start_pos > i_size_read(inode)) { + err = btrfs_cont_expand(inode, i_size_read(inode), start_pos); + if (err) { + mutex_unlock(&inode->i_mutex); + goto out; + } + } + if (unlikely(file->f_flags & O_DIRECT)) { num_written = __btrfs_direct_write(iocb, iov, nr_segs, pos, ppos, count, ocount); @@ -1813,6 +1817,11 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin) goto out; case SEEK_DATA: case SEEK_HOLE: + if (offset >= i_size_read(inode)) { + mutex_unlock(&inode->i_mutex); + return -ENXIO; + } + ret = find_desired_extent(inode, &offset, origin); if (ret) { mutex_unlock(&inode->i_mutex); @@ -1821,11 +1830,11 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin) } if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) { - ret = -EINVAL; + offset = -EINVAL; goto out; } if (offset > inode->i_sb->s_maxbytes) { - ret = -EINVAL; + offset = -EINVAL; goto out; } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 6a265b9f85f..41ac927401d 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -190,9 +190,11 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, struct btrfs_path *path, struct inode *inode) { + struct btrfs_block_rsv *rsv; loff_t oldsize; int ret = 0; + rsv = trans->block_rsv; trans->block_rsv = root->orphan_block_rsv; ret = btrfs_block_rsv_check(trans, root, root->orphan_block_rsv, @@ -210,6 +212,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, */ ret = btrfs_truncate_inode_items(trans, root, inode, 0, BTRFS_EXTENT_DATA_KEY); + + trans->block_rsv = rsv; if (ret) { WARN_ON(1); return ret; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 0ccc7438ad3..b2d004ad66a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1786,7 +1786,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) &ordered_extent->list); ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); - if (!ret) { + if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); } @@ -3510,15 +3510,19 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) err = btrfs_drop_extents(trans, inode, cur_offset, cur_offset + hole_size, &hint_byte, 1); - if (err) + if (err) { + btrfs_end_transaction(trans, root); break; + } err = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), cur_offset, 0, 0, hole_size, 0, hole_size, 0, 0, 0); - if (err) + if (err) { + btrfs_end_transaction(trans, root); break; + } btrfs_drop_extent_cache(inode, hole_start, last_byte - 1, 0); @@ -3952,7 +3956,6 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, struct btrfs_root *root, int *new) { struct inode *inode; - int bad_inode = 0; inode = btrfs_iget_locked(s, location->objectid, root); if (!inode) @@ -3968,15 +3971,12 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, if (new) *new = 1; } else { - bad_inode = 1; + unlock_new_inode(inode); + iput(inode); + inode = ERR_PTR(-ESTALE); } } - if (bad_inode) { - iput(inode); - inode = ERR_PTR(-ESTALE); - } - return inode; } @@ -4018,7 +4018,8 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key)); kfree(dentry->d_fsdata); dentry->d_fsdata = NULL; - d_clear_need_lookup(dentry); + /* This thing is hashed, drop it for now */ + d_drop(dentry); } else { ret = btrfs_inode_by_name(dir, dentry, &location); } @@ -4085,7 +4086,15 @@ static void btrfs_dentry_release(struct dentry *dentry) static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { - return d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); + struct dentry *ret; + + ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); + if (unlikely(d_need_lookup(dentry))) { + spin_lock(&dentry->d_lock); + dentry->d_flags &= ~DCACHE_NEED_LOOKUP; + spin_unlock(&dentry->d_lock); + } + return ret; } unsigned char btrfs_filetype_table[] = { @@ -4125,7 +4134,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, /* special case for "." */ if (filp->f_pos == 0) { - over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR); + over = filldir(dirent, ".", 1, + filp->f_pos, btrfs_ino(inode), DT_DIR); if (over) return 0; filp->f_pos = 1; @@ -4134,7 +4144,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, if (filp->f_pos == 1) { u64 pino = parent_ino(filp->f_path.dentry); over = filldir(dirent, "..", 2, - 2, pino, DT_DIR); + filp->f_pos, pino, DT_DIR); if (over) return 0; filp->f_pos = 2; @@ -5823,7 +5833,7 @@ again: add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); ret = btrfs_ordered_update_i_size(inode, 0, ordered); - if (!ret) + if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) btrfs_update_inode(trans, root, inode); ret = 0; out_unlock: diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 970977aab22..d11fd28efa6 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2177,6 +2177,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, if (!(src_file->f_mode & FMODE_READ)) goto out_fput; + /* don't make the dst file partly checksummed */ + if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) != + (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) + goto out_fput; + ret = -EISDIR; if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode)) goto out_fput; @@ -2220,6 +2225,16 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, !IS_ALIGNED(destoff, bs)) goto out_unlock; + if (destoff > inode->i_size) { + ret = btrfs_cont_expand(inode, inode->i_size, destoff); + if (ret) + goto out_unlock; + } + + /* truncate page cache pages from target inode range */ + truncate_inode_pages_range(&inode->i_data, destoff, + PAGE_CACHE_ALIGN(destoff + len) - 1); + /* do any pending delalloc/csum calc on src, one way or another, and lock file content */ while (1) { @@ -2236,10 +2251,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, btrfs_wait_ordered_range(src, off, len); } - /* truncate page cache pages from target inode range */ - truncate_inode_pages_range(&inode->i_data, off, - ALIGN(off + len, PAGE_CACHE_SIZE) - 1); - /* clone data */ key.objectid = btrfs_ino(src); key.type = BTRFS_EXTENT_DATA_KEY; @@ -2325,14 +2336,21 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, if (type == BTRFS_FILE_EXTENT_REG || type == BTRFS_FILE_EXTENT_PREALLOC) { + /* + * a | --- range to clone ---| b + * | ------------- extent ------------- | + */ + + /* substract range b */ + if (key.offset + datal > off + len) + datal = off + len - key.offset; + + /* substract range a */ if (off > key.offset) { datao += off - key.offset; datal -= off - key.offset; } - if (key.offset + datal > off + len) - datal = off + len - key.offset; - ret = btrfs_drop_extents(trans, inode, new_key.offset, new_key.offset + datal, @@ -2429,7 +2447,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, if (endoff > inode->i_size) btrfs_i_size_write(inode, endoff); - BTRFS_I(inode)->flags = BTRFS_I(src)->flags; ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); btrfs_end_transaction(trans, root); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 7dc36fab4af..e24b7964a15 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -884,6 +884,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_root *root = pending->root; struct btrfs_root *parent_root; + struct btrfs_block_rsv *rsv; struct inode *parent_inode; struct dentry *parent; struct dentry *dentry; @@ -895,6 +896,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, u64 objectid; u64 root_flags; + rsv = trans->block_rsv; + new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); if (!new_root_item) { pending->error = -ENOMEM; @@ -1002,6 +1005,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, btrfs_orphan_post_snapshot(trans, pending); fail: kfree(new_root_item); + trans->block_rsv = rsv; btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1); return 0; } diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index d733b9cfea3..69565e5fc6a 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -116,6 +116,12 @@ static int do_setxattr(struct btrfs_trans_handle *trans, if (ret) goto out; btrfs_release_path(path); + + /* + * remove the attribute + */ + if (!value) + goto out; } again: @@ -158,6 +164,9 @@ out: return ret; } +/* + * @value: "" makes the attribute to empty, NULL removes it + */ int __btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode, const char *name, const void *value, size_t size, int flags) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index fee028b5332..86c59e16ba7 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1595,7 +1595,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath); dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, *ppath); - } else if (rpath) { + } else if (rpath || rino) { *ino = rino; *ppath = rpath; *pathlen = strlen(rpath); diff --git a/fs/ceph/super.c b/fs/ceph/super.c index d47c5ec7fb1..88bacaf385d 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -813,8 +813,8 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type, fsc = create_fs_client(fsopt, opt); if (IS_ERR(fsc)) { res = ERR_CAST(fsc); - kfree(fsopt); - kfree(opt); + destroy_mount_options(fsopt); + ceph_destroy_options(opt); goto out_final; } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index e717dfd2f2b..b7d7bd0f066 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -175,6 +175,7 @@ struct mpage_da_data { */ #define EXT4_IO_END_UNWRITTEN 0x0001 #define EXT4_IO_END_ERROR 0x0002 +#define EXT4_IO_END_QUEUED 0x0004 struct ext4_io_page { struct page *p_page; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c4da98a959a..18d2558b762 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -121,9 +121,6 @@ void ext4_evict_inode(struct inode *inode) trace_ext4_evict_inode(inode); - mutex_lock(&inode->i_mutex); - ext4_flush_completed_IO(inode); - mutex_unlock(&inode->i_mutex); ext4_ioend_wait(inode); if (inode->i_nlink) { diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 78839af7ce2..92f38ee13f8 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -142,7 +142,23 @@ static void ext4_end_io_work(struct work_struct *work) unsigned long flags; int ret; - mutex_lock(&inode->i_mutex); + if (!mutex_trylock(&inode->i_mutex)) { + /* + * Requeue the work instead of waiting so that the work + * items queued after this can be processed. + */ + queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work); + /* + * To prevent the ext4-dio-unwritten thread from keeping + * requeueing end_io requests and occupying cpu for too long, + * yield the cpu if it sees an end_io request that has already + * been requeued. + */ + if (io->flag & EXT4_IO_END_QUEUED) + yield(); + io->flag |= EXT4_IO_END_QUEUED; + return; + } ret = ext4_end_io_nolock(io); if (ret < 0) { mutex_unlock(&inode->i_mutex); diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 168a80f7f12..5cb8614508c 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -258,10 +258,14 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, forget->forget_one.nlookup = nlookup; spin_lock(&fc->lock); - fc->forget_list_tail->next = forget; - fc->forget_list_tail = forget; - wake_up(&fc->waitq); - kill_fasync(&fc->fasync, SIGIO, POLL_IN); + if (fc->connected) { + fc->forget_list_tail->next = forget; + fc->forget_list_tail = forget; + wake_up(&fc->waitq); + kill_fasync(&fc->fasync, SIGIO, POLL_IN); + } else { + kfree(forget); + } spin_unlock(&fc->lock); } diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 12b502929da..add96f6ffda 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -812,6 +812,9 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) if (arg->minor >= 17) { if (!(arg->flags & FUSE_FLOCK_LOCKS)) fc->no_flock = 1; + } else { + if (!(arg->flags & FUSE_POSIX_LOCKS)) + fc->no_flock = 1; } if (arg->flags & FUSE_ATOMIC_O_TRUNC) fc->atomic_o_trunc = 1; diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index c106ca22e81..d24a9b666a2 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -344,6 +344,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) struct inode *root, *inode; struct qstr str; struct nls_table *nls = NULL; + u64 last_fs_block, last_fs_page; int err; err = -EINVAL; @@ -399,9 +400,13 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) if (!sbi->rsrc_clump_blocks) sbi->rsrc_clump_blocks = 1; - err = generic_check_addressable(sbi->alloc_blksz_shift, - sbi->total_blocks); - if (err) { + err = -EFBIG; + last_fs_block = sbi->total_blocks - 1; + last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >> + PAGE_CACHE_SHIFT; + + if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) || + (last_fs_page > (pgoff_t)(~0ULL))) { printk(KERN_ERR "hfs: filesystem size too large.\n"); goto out_free_vhdr; } @@ -525,8 +530,8 @@ out_close_cat_tree: out_close_ext_tree: hfs_btree_close(sbi->ext_tree); out_free_vhdr: - kfree(sbi->s_vhdr); - kfree(sbi->s_backup_vhdr); + kfree(sbi->s_vhdr_buf); + kfree(sbi->s_backup_vhdr_buf); out_unload_nls: unload_nls(sbi->nls); unload_nls(nls); diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 10e515a0d45..7daf4b852d1 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -272,9 +272,9 @@ reread: return 0; out_free_backup_vhdr: - kfree(sbi->s_backup_vhdr); + kfree(sbi->s_backup_vhdr_buf); out_free_vhdr: - kfree(sbi->s_vhdr); + kfree(sbi->s_vhdr_buf); out: return error; } diff --git a/fs/namei.c b/fs/namei.c index 2826db35dc2..f4788365ea2 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -727,25 +727,22 @@ static int follow_automount(struct path *path, unsigned flags, if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT)) return -EISDIR; /* we actually want to stop here */ - /* - * We don't want to mount if someone's just doing a stat and they've - * set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and - * appended a '/' to the name. + /* We don't want to mount if someone's just doing a stat - + * unless they're stat'ing a directory and appended a '/' to + * the name. + * + * We do, however, want to mount if someone wants to open or + * create a file of any type under the mountpoint, wants to + * traverse through the mountpoint or wants to open the + * mounted directory. Also, autofs may mark negative dentries + * as being automount points. These will need the attentions + * of the daemon to instantiate them before they can be used. */ - if (!(flags & LOOKUP_FOLLOW)) { - /* We do, however, want to mount if someone wants to open or - * create a file of any type under the mountpoint, wants to - * traverse through the mountpoint or wants to open the mounted - * directory. - * Also, autofs may mark negative dentries as being automount - * points. These will need the attentions of the daemon to - * instantiate them before they can be used. - */ - if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | - LOOKUP_OPEN | LOOKUP_CREATE)) && - path->dentry->d_inode) - return -EISDIR; - } + if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | + LOOKUP_OPEN | LOOKUP_CREATE)) && + path->dentry->d_inode) + return -EISDIR; + current->total_link_count++; if (current->total_link_count >= 40) return -ELOOP; @@ -2619,6 +2616,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) if (!dir->i_op->rmdir) return -EPERM; + dget(dentry); mutex_lock(&dentry->d_inode->i_mutex); error = -EBUSY; @@ -2639,6 +2637,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) out: mutex_unlock(&dentry->d_inode->i_mutex); + dput(dentry); if (!error) d_delete(dentry); return error; @@ -3028,6 +3027,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, if (error) return error; + dget(new_dentry); if (target) mutex_lock(&target->i_mutex); @@ -3048,6 +3048,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, out: if (target) mutex_unlock(&target->i_mutex); + dput(new_dentry); if (!error) if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) d_move(old_dentry,new_dentry); diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 1ec1a85fa71..3e93e9a1bee 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -56,6 +56,9 @@ enum nfs4_session_state { NFS4_SESSION_DRAINING, }; +#define NFS4_RENEW_TIMEOUT 0x01 +#define NFS4_RENEW_DELEGATION_CB 0x02 + struct nfs4_minor_version_ops { u32 minor_version; @@ -225,7 +228,7 @@ struct nfs4_state_recovery_ops { }; struct nfs4_state_maintenance_ops { - int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *); + int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *, unsigned); struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *); int (*renew_lease)(struct nfs_client *, struct rpc_cred *); }; @@ -237,8 +240,6 @@ extern const struct inode_operations nfs4_dir_inode_operations; extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *); extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *); extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred); -extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *); -extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *); extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *); extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *); extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc); @@ -349,6 +350,7 @@ extern void nfs4_close_sync(struct nfs4_state *, fmode_t); extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); extern void nfs4_schedule_lease_recovery(struct nfs_client *); extern void nfs4_schedule_state_manager(struct nfs_client *); +extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp); extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *); extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); extern void nfs41_handle_recall_slot(struct nfs_client *clp); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 8c77039e7a8..4700fae1ada 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3374,9 +3374,13 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata) if (task->tk_status < 0) { /* Unless we're shutting down, schedule state recovery! */ - if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0) + if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) + return; + if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { nfs4_schedule_lease_recovery(clp); - return; + return; + } + nfs4_schedule_path_down_recovery(clp); } do_renew_lease(clp, timestamp); } @@ -3386,7 +3390,7 @@ static const struct rpc_call_ops nfs4_renew_ops = { .rpc_release = nfs4_renew_release, }; -int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) +static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) { struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], @@ -3395,9 +3399,11 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) }; struct nfs4_renewdata *data; + if (renew_flags == 0) + return 0; if (!atomic_inc_not_zero(&clp->cl_count)) return -EIO; - data = kmalloc(sizeof(*data), GFP_KERNEL); + data = kmalloc(sizeof(*data), GFP_NOFS); if (data == NULL) return -ENOMEM; data->client = clp; @@ -3406,7 +3412,7 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) &nfs4_renew_ops, data); } -int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) +static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) { struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], @@ -5504,11 +5510,13 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_ return rpc_run_task(&task_setup_data); } -static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred) +static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) { struct rpc_task *task; int ret = 0; + if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) + return 0; task = _nfs41_proc_sequence(clp, cred); if (IS_ERR(task)) ret = PTR_ERR(task); diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index df8e7f3ca56..dc484c0eae7 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -60,6 +60,7 @@ nfs4_renew_state(struct work_struct *work) struct rpc_cred *cred; long lease; unsigned long last, now; + unsigned renew_flags = 0; ops = clp->cl_mvops->state_renewal_ops; dprintk("%s: start\n", __func__); @@ -72,18 +73,23 @@ nfs4_renew_state(struct work_struct *work) last = clp->cl_last_renewal; now = jiffies; /* Are we close to a lease timeout? */ - if (time_after(now, last + lease/3)) { + if (time_after(now, last + lease/3)) + renew_flags |= NFS4_RENEW_TIMEOUT; + if (nfs_delegations_present(clp)) + renew_flags |= NFS4_RENEW_DELEGATION_CB; + + if (renew_flags != 0) { cred = ops->get_state_renewal_cred_locked(clp); spin_unlock(&clp->cl_lock); if (cred == NULL) { - if (!nfs_delegations_present(clp)) { + if (!(renew_flags & NFS4_RENEW_DELEGATION_CB)) { set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); goto out; } nfs_expire_all_delegations(clp); } else { /* Queue an asynchronous RENEW. */ - ops->sched_state_renewal(clp, cred); + ops->sched_state_renewal(clp, cred, renew_flags); put_rpccred(cred); goto out_exp; } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 72ab97ef3d6..39914be40b0 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1038,6 +1038,12 @@ void nfs4_schedule_lease_recovery(struct nfs_client *clp) nfs4_schedule_state_manager(clp); } +void nfs4_schedule_path_down_recovery(struct nfs_client *clp) +{ + nfs_handle_cb_pathdown(clp); + nfs4_schedule_state_manager(clp); +} + static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) { diff --git a/fs/nfs/super.c b/fs/nfs/super.c index b961ceac66b..9b7dd7013b1 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2035,9 +2035,6 @@ static inline void nfs_initialise_sb(struct super_block *sb) sb->s_blocksize = nfs_block_bits(server->wsize, &sb->s_blocksize_bits); - if (server->flags & NFS_MOUNT_NOAC) - sb->s_flags |= MS_SYNCHRONOUS; - sb->s_bdi = &server->backing_dev_info; nfs_super_set_maxbytes(sb, server->maxfilesize); @@ -2249,6 +2246,10 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type, if (server->flags & NFS_MOUNT_UNSHARED) compare_super = NULL; + /* -o noac implies -o sync */ + if (server->flags & NFS_MOUNT_NOAC) + sb_mntdata.mntflags |= MS_SYNCHRONOUS; + /* Get a superblock - note that we may end up sharing one that already exists */ s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata); if (IS_ERR(s)) { @@ -2361,6 +2362,10 @@ nfs_xdev_mount(struct file_system_type *fs_type, int flags, if (server->flags & NFS_MOUNT_UNSHARED) compare_super = NULL; + /* -o noac implies -o sync */ + if (server->flags & NFS_MOUNT_NOAC) + sb_mntdata.mntflags |= MS_SYNCHRONOUS; + /* Get a superblock - note that we may end up sharing one that already exists */ s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata); if (IS_ERR(s)) { @@ -2628,6 +2633,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags, if (server->flags & NFS4_MOUNT_UNSHARED) compare_super = NULL; + /* -o noac implies -o sync */ + if (server->flags & NFS_MOUNT_NOAC) + sb_mntdata.mntflags |= MS_SYNCHRONOUS; + /* Get a superblock - note that we may end up sharing one that already exists */ s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata); if (IS_ERR(s)) { @@ -2916,6 +2925,10 @@ nfs4_xdev_mount(struct file_system_type *fs_type, int flags, if (server->flags & NFS4_MOUNT_UNSHARED) compare_super = NULL; + /* -o noac implies -o sync */ + if (server->flags & NFS_MOUNT_NOAC) + sb_mntdata.mntflags |= MS_SYNCHRONOUS; + /* Get a superblock - note that we may end up sharing one that already exists */ s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata); if (IS_ERR(s)) { @@ -3003,6 +3016,10 @@ nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags, if (server->flags & NFS4_MOUNT_UNSHARED) compare_super = NULL; + /* -o noac implies -o sync */ + if (server->flags & NFS_MOUNT_NOAC) + sb_mntdata.mntflags |= MS_SYNCHRONOUS; + /* Get a superblock - note that we may end up sharing one that already exists */ s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata); if (IS_ERR(s)) { diff --git a/fs/nfs/write.c b/fs/nfs/write.c index b39b37f8091..c9bd2a6b7d4 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -958,7 +958,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head if (!data) goto out_bad; data->pagevec[0] = page; - nfs_write_rpcsetup(req, data, wsize, offset, desc->pg_ioflags); + nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags); list_add(&data->list, res); requests++; nbytes -= len; diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index 45174b53437..feb361e252a 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -335,9 +335,9 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c); #define DBGKEY(key) ((char *)(key)) #define DBGKEY1(key) ((char *)(key)) -#define ubifs_dbg_msg(fmt, ...) do { \ - if (0) \ - pr_debug(fmt "\n", ##__VA_ARGS__); \ +#define ubifs_dbg_msg(fmt, ...) do { \ + if (0) \ + printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \ } while (0) #define dbg_dump_stack() diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 63e971e2b83..8c37dde4c52 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -1300,6 +1300,7 @@ xfs_end_io_direct_write( bool is_async) { struct xfs_ioend *ioend = iocb->private; + struct inode *inode = ioend->io_inode; /* * blockdev_direct_IO can return an error even after the I/O @@ -1331,7 +1332,7 @@ xfs_end_io_direct_write( } /* XXX: probably should move into the real I/O completion handler */ - inode_dio_done(ioend->io_inode); + inode_dio_done(inode); } STATIC ssize_t diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index b9c172b3fbb..673704fab74 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -70,9 +70,8 @@ xfs_synchronize_times( } /* - * If the linux inode is valid, mark it dirty. - * Used when committing a dirty inode into a transaction so that - * the inode will get written back by the linux code + * If the linux inode is valid, mark it dirty, else mark the dirty state + * in the XFS inode to make sure we pick it up when reclaiming the inode. */ void xfs_mark_inode_dirty_sync( @@ -82,6 +81,10 @@ xfs_mark_inode_dirty_sync( if (!(inode->i_state & (I_WILL_FREE|I_FREEING))) mark_inode_dirty_sync(inode); + else { + barrier(); + ip->i_update_core = 1; + } } void @@ -92,6 +95,11 @@ xfs_mark_inode_dirty( if (!(inode->i_state & (I_WILL_FREE|I_FREEING))) mark_inode_dirty(inode); + else { + barrier(); + ip->i_update_core = 1; + } + } /* diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 9a72dda58bd..2366c54cc4f 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -356,6 +356,8 @@ xfs_parseargs( mp->m_flags |= XFS_MOUNT_DELAYLOG; } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { mp->m_flags &= ~XFS_MOUNT_DELAYLOG; + xfs_warn(mp, + "nodelaylog is deprecated and will be removed in Linux 3.3"); } else if (!strcmp(this_char, MNTOPT_DISCARD)) { mp->m_flags |= XFS_MOUNT_DISCARD; } else if (!strcmp(this_char, MNTOPT_NODISCARD)) { @@ -877,33 +879,17 @@ xfs_log_inode( struct xfs_trans *tp; int error; - xfs_iunlock(ip, XFS_ILOCK_SHARED); tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); - if (error) { xfs_trans_cancel(tp, 0); - /* we need to return with the lock hold shared */ - xfs_ilock(ip, XFS_ILOCK_SHARED); return error; } xfs_ilock(ip, XFS_ILOCK_EXCL); - - /* - * Note - it's possible that we might have pushed ourselves out of the - * way during trans_reserve which would flush the inode. But there's - * no guarantee that the inode buffer has actually gone out yet (it's - * delwri). Plus the buffer could be pinned anyway if it's part of - * an inode in another recent transaction. So we play it safe and - * fire off the transaction anyway. - */ - xfs_trans_ijoin(tp, ip); + xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - error = xfs_trans_commit(tp, 0); - xfs_ilock_demote(ip, XFS_ILOCK_EXCL); - - return error; + return xfs_trans_commit(tp, 0); } STATIC int @@ -918,7 +904,9 @@ xfs_fs_write_inode( trace_xfs_write_inode(ip); if (XFS_FORCED_SHUTDOWN(mp)) - return XFS_ERROR(EIO); + return -XFS_ERROR(EIO); + if (!ip->i_update_core) + return 0; if (wbc->sync_mode == WB_SYNC_ALL) { /* @@ -929,12 +917,10 @@ xfs_fs_write_inode( * of synchronous log foces dramatically. */ xfs_ioend_wait(ip); - xfs_ilock(ip, XFS_ILOCK_SHARED); - if (ip->i_update_core) { - error = xfs_log_inode(ip); - if (error) - goto out_unlock; - } + error = xfs_log_inode(ip); + if (error) + goto out; + return 0; } else { /* * We make this non-blocking if the inode is contended, return diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h index 98999cf107c..feb91219674 100644 --- a/include/linux/basic_mmio_gpio.h +++ b/include/linux/basic_mmio_gpio.h @@ -63,15 +63,10 @@ static inline struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc) return container_of(gc, struct bgpio_chip, gc); } -int __devexit bgpio_remove(struct bgpio_chip *bgc); -int __devinit bgpio_init(struct bgpio_chip *bgc, - struct device *dev, - unsigned long sz, - void __iomem *dat, - void __iomem *set, - void __iomem *clr, - void __iomem *dirout, - void __iomem *dirin, - bool big_endian); +int bgpio_remove(struct bgpio_chip *bgc); +int bgpio_init(struct bgpio_chip *bgc, struct device *dev, + unsigned long sz, void __iomem *dat, void __iomem *set, + void __iomem *clr, void __iomem *dirout, void __iomem *dirin, + bool big_endian); #endif /* __BASIC_MMIO_GPIO_H */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 3b535db00a9..343bd7661f2 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -39,16 +39,6 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct mem_cgroup *mem_cont, int active, int file); -struct memcg_scanrecord { - struct mem_cgroup *mem; /* scanend memory cgroup */ - struct mem_cgroup *root; /* scan target hierarchy root */ - int context; /* scanning context (see memcontrol.c) */ - unsigned long nr_scanned[2]; /* the number of scanned pages */ - unsigned long nr_rotated[2]; /* the number of rotated pages */ - unsigned long nr_freed[2]; /* the number of freed pages */ - unsigned long elapsed; /* nsec of time elapsed while scanning */ -}; - #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* * All "charge" functions with gfp_mask should use GFP_KERNEL or @@ -127,15 +117,6 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page); extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p); -extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, - gfp_t gfp_mask, bool noswap, - struct memcg_scanrecord *rec); -extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, - gfp_t gfp_mask, bool noswap, - struct zone *zone, - struct memcg_scanrecord *rec, - unsigned long *nr_scanned); - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP extern int do_swap_account; #endif diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h index d12f8d635a8..97cf4f27d64 100644 --- a/include/linux/mfd/wm8994/pdata.h +++ b/include/linux/mfd/wm8994/pdata.h @@ -26,7 +26,7 @@ struct wm8994_ldo_pdata { struct regulator_init_data *init_data; }; -#define WM8994_CONFIGURE_GPIO 0x8000 +#define WM8994_CONFIGURE_GPIO 0x10000 #define WM8994_DRC_REGS 5 #define WM8994_EQ_REGS 20 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 245bafdafd5..c816075c01c 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -944,8 +944,10 @@ extern void perf_pmu_unregister(struct pmu *pmu); extern int perf_num_counters(void); extern const char *perf_pmu_name(void); -extern void __perf_event_task_sched_in(struct task_struct *task); -extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); +extern void __perf_event_task_sched_in(struct task_struct *prev, + struct task_struct *task); +extern void __perf_event_task_sched_out(struct task_struct *prev, + struct task_struct *next); extern int perf_event_init_task(struct task_struct *child); extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_free_task(struct task_struct *task); @@ -1059,17 +1061,20 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) extern struct jump_label_key perf_sched_events; -static inline void perf_event_task_sched_in(struct task_struct *task) +static inline void perf_event_task_sched_in(struct task_struct *prev, + struct task_struct *task) { if (static_branch(&perf_sched_events)) - __perf_event_task_sched_in(task); + __perf_event_task_sched_in(prev, task); } -static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) +static inline void perf_event_task_sched_out(struct task_struct *prev, + struct task_struct *next) { perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); - __perf_event_task_sched_out(task, next); + if (static_branch(&perf_sched_events)) + __perf_event_task_sched_out(prev, next); } extern void perf_event_mmap(struct vm_area_struct *vma); @@ -1139,10 +1144,11 @@ extern void perf_event_disable(struct perf_event *event); extern void perf_event_task_tick(void); #else static inline void -perf_event_task_sched_in(struct task_struct *task) { } +perf_event_task_sched_in(struct task_struct *prev, + struct task_struct *task) { } static inline void -perf_event_task_sched_out(struct task_struct *task, - struct task_struct *next) { } +perf_event_task_sched_out(struct task_struct *prev, + struct task_struct *next) { } static inline int perf_event_init_task(struct task_struct *child) { return 0; } static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_free_task(struct task_struct *task) { } diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 26f6ea4444e..b47771aa571 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -123,7 +123,7 @@ struct regulator_bulk_data { const char *supply; struct regulator *consumer; - /* Internal use */ + /* private: Internal use */ int ret; }; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7b996ed86d5..8bd383caa36 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -524,6 +524,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); +extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); extern struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); extern struct sk_buff *skb_copy(const struct sk_buff *skb, diff --git a/include/linux/snmp.h b/include/linux/snmp.h index 12b2b18e50c..e16557a357e 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -231,6 +231,8 @@ enum LINUX_MIB_TCPDEFERACCEPTDROP, LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */ LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */ + LINUX_MIB_TCPREQQFULLDOCOOKIES, /* TCPReqQFullDoCookies */ + LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */ __LINUX_MIB_MAX }; diff --git a/include/linux/swap.h b/include/linux/swap.h index 14d62490922..c71f84bb62e 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -252,6 +252,12 @@ static inline void lru_cache_add_file(struct page *page) extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); extern int __isolate_lru_page(struct page *page, int mode, int file); +extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, + gfp_t gfp_mask, bool noswap); +extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, + gfp_t gfp_mask, bool noswap, + struct zone *zone, + unsigned long *nr_scanned); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; extern int remove_mapping(struct address_space *mapping, struct page *page); diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 342dcf13d03..a6326ef8ade 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -288,6 +288,35 @@ enum p9_perm_t { P9_DMSETVTX = 0x00010000, }; +/* 9p2000.L open flags */ +#define P9_DOTL_RDONLY 00000000 +#define P9_DOTL_WRONLY 00000001 +#define P9_DOTL_RDWR 00000002 +#define P9_DOTL_NOACCESS 00000003 +#define P9_DOTL_CREATE 00000100 +#define P9_DOTL_EXCL 00000200 +#define P9_DOTL_NOCTTY 00000400 +#define P9_DOTL_TRUNC 00001000 +#define P9_DOTL_APPEND 00002000 +#define P9_DOTL_NONBLOCK 00004000 +#define P9_DOTL_DSYNC 00010000 +#define P9_DOTL_FASYNC 00020000 +#define P9_DOTL_DIRECT 00040000 +#define P9_DOTL_LARGEFILE 00100000 +#define P9_DOTL_DIRECTORY 00200000 +#define P9_DOTL_NOFOLLOW 00400000 +#define P9_DOTL_NOATIME 01000000 +#define P9_DOTL_CLOEXEC 02000000 +#define P9_DOTL_SYNC 04000000 + +/* 9p2000.L at flags */ +#define P9_DOTL_AT_REMOVEDIR 0x200 + +/* 9p2000.L lock type */ +#define P9_LOCK_TYPE_RDLCK 0 +#define P9_LOCK_TYPE_WRLCK 1 +#define P9_LOCK_TYPE_UNLCK 2 + /** * enum p9_qid_t - QID types * @P9_QTDIR: directory diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 408ae4882d2..401d73bd151 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1744,6 +1744,8 @@ struct wiphy_wowlan_support { * by default for perm_addr. In this case, the mask should be set to * all-zeroes. In this case it is assumed that the device can handle * the same number of arbitrary MAC addresses. + * @registered: protects ->resume and ->suspend sysfs callbacks against + * unregister hardware * @debugfsdir: debugfs directory used for this wiphy, will be renamed * automatically on wiphy renames * @dev: (virtual) struct device for this wiphy diff --git a/include/net/flow.h b/include/net/flow.h index 78113daadd6..a09447749e2 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -7,6 +7,7 @@ #ifndef _NET_FLOW_H #define _NET_FLOW_H +#include <linux/socket.h> #include <linux/in6.h> #include <linux/atomic.h> @@ -68,7 +69,7 @@ struct flowi4 { #define fl4_ipsec_spi uli.spi #define fl4_mh_type uli.mht.type #define fl4_gre_key uli.gre_key -}; +} __attribute__((__aligned__(BITS_PER_LONG/8))); static inline void flowi4_init_output(struct flowi4 *fl4, int oif, __u32 mark, __u8 tos, __u8 scope, @@ -112,7 +113,7 @@ struct flowi6 { #define fl6_ipsec_spi uli.spi #define fl6_mh_type uli.mht.type #define fl6_gre_key uli.gre_key -}; +} __attribute__((__aligned__(BITS_PER_LONG/8))); struct flowidn { struct flowi_common __fl_common; @@ -127,7 +128,7 @@ struct flowidn { union flowi_uli uli; #define fld_sport uli.ports.sport #define fld_dport uli.ports.dport -}; +} __attribute__((__aligned__(BITS_PER_LONG/8))); struct flowi { union { @@ -161,6 +162,24 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn) return container_of(fldn, struct flowi, u.dn); } +typedef unsigned long flow_compare_t; + +static inline size_t flow_key_size(u16 family) +{ + switch (family) { + case AF_INET: + BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t)); + return sizeof(struct flowi4) / sizeof(flow_compare_t); + case AF_INET6: + BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t)); + return sizeof(struct flowi6) / sizeof(flow_compare_t); + case AF_DECnet: + BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t)); + return sizeof(struct flowidn) / sizeof(flow_compare_t); + } + return 0; +} + #define FLOW_DIR_IN 0 #define FLOW_DIR_OUT 1 #define FLOW_DIR_FWD 2 diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 99e6e19b57c..4c0766e201e 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -96,7 +96,8 @@ extern int sysctl_max_syn_backlog; */ struct listen_sock { u8 max_qlen_log; - /* 3 bytes hole, try to use */ + u8 synflood_warned; + /* 2 bytes hole, try to use */ int qlen; int qlen_young; int clock_hand; diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index 6506458ccd3..712b3bebeda 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -109,6 +109,7 @@ typedef enum { SCTP_CMD_SEND_MSG, /* Send the whole use message */ SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */ SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/ + SCTP_CMD_SET_ASOC, /* Restore association context */ SCTP_CMD_LAST } sctp_verb_t; diff --git a/include/net/tcp.h b/include/net/tcp.h index 149a415d1e0..acc620a4a45 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -431,17 +431,34 @@ extern int tcp_disconnect(struct sock *sk, int flags); extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct ip_options *opt); +#ifdef CONFIG_SYN_COOKIES extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss); +#else +static inline __u32 cookie_v4_init_sequence(struct sock *sk, + struct sk_buff *skb, + __u16 *mss) +{ + return 0; +} +#endif extern __u32 cookie_init_timestamp(struct request_sock *req); extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *); /* From net/ipv6/syncookies.c */ extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); +#ifdef CONFIG_SYN_COOKIES extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss); - +#else +static inline __u32 cookie_v6_init_sequence(struct sock *sk, + struct sk_buff *skb, + __u16 *mss) +{ + return 0; +} +#endif /* tcp_output.c */ extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, @@ -460,6 +477,9 @@ extern int tcp_write_wakeup(struct sock *); extern void tcp_send_fin(struct sock *sk); extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); extern int tcp_send_synack(struct sock *); +extern int tcp_syn_flood_action(struct sock *sk, + const struct sk_buff *skb, + const char *proto); extern void tcp_push_one(struct sock *, unsigned int mss_now); extern void tcp_send_ack(struct sock *sk); extern void tcp_send_delayed_ack(struct sock *sk); diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index 5271a741c3a..498433dd067 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -39,6 +39,7 @@ extern int datagram_recv_ctl(struct sock *sk, struct sk_buff *skb); extern int datagram_send_ctl(struct net *net, + struct sock *sk, struct msghdr *msg, struct flowi6 *fl6, struct ipv6_txoptions *opt, diff --git a/kernel/events/core.c b/kernel/events/core.c index b8785e26ee1..0f857782d06 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -399,14 +399,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode) local_irq_restore(flags); } -static inline void perf_cgroup_sched_out(struct task_struct *task) +static inline void perf_cgroup_sched_out(struct task_struct *task, + struct task_struct *next) { - perf_cgroup_switch(task, PERF_CGROUP_SWOUT); + struct perf_cgroup *cgrp1; + struct perf_cgroup *cgrp2 = NULL; + + /* + * we come here when we know perf_cgroup_events > 0 + */ + cgrp1 = perf_cgroup_from_task(task); + + /* + * next is NULL when called from perf_event_enable_on_exec() + * that will systematically cause a cgroup_switch() + */ + if (next) + cgrp2 = perf_cgroup_from_task(next); + + /* + * only schedule out current cgroup events if we know + * that we are switching to a different cgroup. Otherwise, + * do no touch the cgroup events. + */ + if (cgrp1 != cgrp2) + perf_cgroup_switch(task, PERF_CGROUP_SWOUT); } -static inline void perf_cgroup_sched_in(struct task_struct *task) +static inline void perf_cgroup_sched_in(struct task_struct *prev, + struct task_struct *task) { - perf_cgroup_switch(task, PERF_CGROUP_SWIN); + struct perf_cgroup *cgrp1; + struct perf_cgroup *cgrp2 = NULL; + + /* + * we come here when we know perf_cgroup_events > 0 + */ + cgrp1 = perf_cgroup_from_task(task); + + /* prev can never be NULL */ + cgrp2 = perf_cgroup_from_task(prev); + + /* + * only need to schedule in cgroup events if we are changing + * cgroup during ctxsw. Cgroup events were not scheduled + * out of ctxsw out if that was not the case. + */ + if (cgrp1 != cgrp2) + perf_cgroup_switch(task, PERF_CGROUP_SWIN); } static inline int perf_cgroup_connect(int fd, struct perf_event *event, @@ -518,11 +558,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) { } -static inline void perf_cgroup_sched_out(struct task_struct *task) +static inline void perf_cgroup_sched_out(struct task_struct *task, + struct task_struct *next) { } -static inline void perf_cgroup_sched_in(struct task_struct *task) +static inline void perf_cgroup_sched_in(struct task_struct *prev, + struct task_struct *task) { } @@ -1988,7 +2030,7 @@ void __perf_event_task_sched_out(struct task_struct *task, * cgroup event are system-wide mode only */ if (atomic_read(&__get_cpu_var(perf_cgroup_events))) - perf_cgroup_sched_out(task); + perf_cgroup_sched_out(task, next); } static void task_ctx_sched_out(struct perf_event_context *ctx) @@ -2153,7 +2195,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, * accessing the event control register. If a NMI hits, then it will * keep the event running. */ -void __perf_event_task_sched_in(struct task_struct *task) +void __perf_event_task_sched_in(struct task_struct *prev, + struct task_struct *task) { struct perf_event_context *ctx; int ctxn; @@ -2171,7 +2214,7 @@ void __perf_event_task_sched_in(struct task_struct *task) * cgroup event are system-wide mode only */ if (atomic_read(&__get_cpu_var(perf_cgroup_events))) - perf_cgroup_sched_in(task); + perf_cgroup_sched_in(prev, task); } static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) @@ -2427,7 +2470,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) * ctxswin cgroup events which are already scheduled * in. */ - perf_cgroup_sched_out(current); + perf_cgroup_sched_out(current, NULL); raw_spin_lock(&ctx->lock); task_ctx_sched_out(ctx); @@ -3353,8 +3396,8 @@ static int perf_event_index(struct perf_event *event) } static void calc_timer_values(struct perf_event *event, - u64 *running, - u64 *enabled) + u64 *enabled, + u64 *running) { u64 now, ctx_time; diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index d5a3009da71..dc5114b4c16 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -178,7 +178,7 @@ void irq_shutdown(struct irq_desc *desc) desc->depth = 1; if (desc->irq_data.chip->irq_shutdown) desc->irq_data.chip->irq_shutdown(&desc->irq_data); - if (desc->irq_data.chip->irq_disable) + else if (desc->irq_data.chip->irq_disable) desc->irq_data.chip->irq_disable(&desc->irq_data); else desc->irq_data.chip->irq_mask(&desc->irq_data); diff --git a/kernel/sched.c b/kernel/sched.c index ccacdbdecf4..ec5f472bc5b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW local_irq_disable(); #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ - perf_event_task_sched_in(current); + perf_event_task_sched_in(prev, current); #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW local_irq_enable(); #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ @@ -4279,9 +4279,9 @@ pick_next_task(struct rq *rq) } /* - * schedule() is the main scheduler function. + * __schedule() is the main scheduler function. */ -asmlinkage void __sched schedule(void) +static void __sched __schedule(void) { struct task_struct *prev, *next; unsigned long *switch_count; @@ -4322,16 +4322,6 @@ need_resched: if (to_wakeup) try_to_wake_up_local(to_wakeup); } - - /* - * If we are going to sleep and we have plugged IO - * queued, make sure to submit it to avoid deadlocks. - */ - if (blk_needs_flush_plug(prev)) { - raw_spin_unlock(&rq->lock); - blk_schedule_flush_plug(prev); - raw_spin_lock(&rq->lock); - } } switch_count = &prev->nvcsw; } @@ -4369,6 +4359,26 @@ need_resched: if (need_resched()) goto need_resched; } + +static inline void sched_submit_work(struct task_struct *tsk) +{ + if (!tsk->state) + return; + /* + * If we are going to sleep and we have plugged IO queued, + * make sure to submit it to avoid deadlocks. + */ + if (blk_needs_flush_plug(tsk)) + blk_schedule_flush_plug(tsk); +} + +asmlinkage void schedule(void) +{ + struct task_struct *tsk = current; + + sched_submit_work(tsk); + __schedule(); +} EXPORT_SYMBOL(schedule); #ifdef CONFIG_MUTEX_SPIN_ON_OWNER @@ -4435,7 +4445,7 @@ asmlinkage void __sched notrace preempt_schedule(void) do { add_preempt_count_notrace(PREEMPT_ACTIVE); - schedule(); + __schedule(); sub_preempt_count_notrace(PREEMPT_ACTIVE); /* @@ -4463,7 +4473,7 @@ asmlinkage void __sched preempt_schedule_irq(void) do { add_preempt_count(PREEMPT_ACTIVE); local_irq_enable(); - schedule(); + __schedule(); local_irq_disable(); sub_preempt_count(PREEMPT_ACTIVE); @@ -5588,7 +5598,7 @@ static inline int should_resched(void) static void __cond_resched(void) { add_preempt_count(PREEMPT_ACTIVE); - schedule(); + __schedule(); sub_preempt_count(PREEMPT_ACTIVE); } @@ -7443,6 +7453,7 @@ static void __sdt_free(const struct cpumask *cpu_map) struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); if (sd && (sd->flags & SD_OVERLAP)) free_sched_groups(sd->groups, 0); + kfree(*per_cpu_ptr(sdd->sd, j)); kfree(*per_cpu_ptr(sdd->sg, j)); kfree(*per_cpu_ptr(sdd->sgp, j)); } diff --git a/kernel/taskstats.c b/kernel/taskstats.c index e19ce1454ee..e66046456f4 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -655,6 +655,7 @@ static struct genl_ops taskstats_ops = { .cmd = TASKSTATS_CMD_GET, .doit = taskstats_user_cmd, .policy = taskstats_cmd_get_policy, + .flags = GENL_ADMIN_PERM, }; static struct genl_ops cgroupstats_ops = { diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 59f369f98a0..ea5e1a928d5 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -441,6 +441,8 @@ static int alarm_timer_create(struct k_itimer *new_timer) static void alarm_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) { + memset(cur_setting, 0, sizeof(struct itimerspec)); + cur_setting->it_interval = ktime_to_timespec(timr->it.alarmtimer.period); cur_setting->it_value = @@ -479,11 +481,17 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, if (!rtcdev) return -ENOTSUPP; - /* Save old values */ - old_setting->it_interval = - ktime_to_timespec(timr->it.alarmtimer.period); - old_setting->it_value = - ktime_to_timespec(timr->it.alarmtimer.node.expires); + /* + * XXX HACK! Currently we can DOS a system if the interval + * period on alarmtimers is too small. Cap the interval here + * to 100us and solve this properly in a future patch! -jstultz + */ + if ((new_setting->it_interval.tv_sec == 0) && + (new_setting->it_interval.tv_nsec < 100000)) + new_setting->it_interval.tv_nsec = 100000; + + if (old_setting) + alarm_timer_get(timr, old_setting); /* If the timer was already set, cancel it */ alarm_cancel(&timr->it.alarmtimer); diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 24dc60d9fa1..5bbfac85866 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -78,6 +78,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) #define KB 1024 #define MB (1024*KB) +#define KB_MASK (~(KB-1)) /* * fill in extended accounting fields */ @@ -95,14 +96,14 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB; mmput(mm); } - stats->read_char = p->ioac.rchar; - stats->write_char = p->ioac.wchar; - stats->read_syscalls = p->ioac.syscr; - stats->write_syscalls = p->ioac.syscw; + stats->read_char = p->ioac.rchar & KB_MASK; + stats->write_char = p->ioac.wchar & KB_MASK; + stats->read_syscalls = p->ioac.syscr & KB_MASK; + stats->write_syscalls = p->ioac.syscw & KB_MASK; #ifdef CONFIG_TASK_IO_ACCOUNTING - stats->read_bytes = p->ioac.read_bytes; - stats->write_bytes = p->ioac.write_bytes; - stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes; + stats->read_bytes = p->ioac.read_bytes & KB_MASK; + stats->write_bytes = p->ioac.write_bytes & KB_MASK; + stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK; #else stats->read_bytes = 0; stats->write_bytes = 0; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 25fb1b0e53f..1783aabc612 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2412,8 +2412,13 @@ reflush: for_each_cwq_cpu(cpu, wq) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + bool drained; - if (!cwq->nr_active && list_empty(&cwq->delayed_works)) + spin_lock_irq(&cwq->gcwq->lock); + drained = !cwq->nr_active && list_empty(&cwq->delayed_works); + spin_unlock_irq(&cwq->gcwq->lock); + + if (drained) continue; if (++flush_cnt == 10 || diff --git a/lib/sha1.c b/lib/sha1.c index f33271dd00c..1de509a159c 100644 --- a/lib/sha1.c +++ b/lib/sha1.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bitops.h> +#include <linux/cryptohash.h> #include <asm/unaligned.h> /* diff --git a/mm/filemap.c b/mm/filemap.c index 645a080ba4d..7771871fa35 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -827,13 +827,14 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, { unsigned int i; unsigned int ret; - unsigned int nr_found; + unsigned int nr_found, nr_skip; rcu_read_lock(); restart: nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, (void ***)pages, NULL, start, nr_pages); ret = 0; + nr_skip = 0; for (i = 0; i < nr_found; i++) { struct page *page; repeat: @@ -856,6 +857,7 @@ repeat: * here as an exceptional entry: so skip over it - * we only reach this from invalidate_mapping_pages(). */ + nr_skip++; continue; } @@ -876,7 +878,7 @@ repeat: * If all entries were removed before we could secure them, * try again, because callers stop trying once 0 is returned. */ - if (unlikely(!ret && nr_found)) + if (unlikely(!ret && nr_found > nr_skip)) goto restart; rcu_read_unlock(); return ret; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ebd1e86bef1..3508777837c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -204,50 +204,6 @@ struct mem_cgroup_eventfd_list { static void mem_cgroup_threshold(struct mem_cgroup *mem); static void mem_cgroup_oom_notify(struct mem_cgroup *mem); -enum { - SCAN_BY_LIMIT, - SCAN_BY_SYSTEM, - NR_SCAN_CONTEXT, - SCAN_BY_SHRINK, /* not recorded now */ -}; - -enum { - SCAN, - SCAN_ANON, - SCAN_FILE, - ROTATE, - ROTATE_ANON, - ROTATE_FILE, - FREED, - FREED_ANON, - FREED_FILE, - ELAPSED, - NR_SCANSTATS, -}; - -struct scanstat { - spinlock_t lock; - unsigned long stats[NR_SCAN_CONTEXT][NR_SCANSTATS]; - unsigned long rootstats[NR_SCAN_CONTEXT][NR_SCANSTATS]; -}; - -const char *scanstat_string[NR_SCANSTATS] = { - "scanned_pages", - "scanned_anon_pages", - "scanned_file_pages", - "rotated_pages", - "rotated_anon_pages", - "rotated_file_pages", - "freed_pages", - "freed_anon_pages", - "freed_file_pages", - "elapsed_ns", -}; -#define SCANSTAT_WORD_LIMIT "_by_limit" -#define SCANSTAT_WORD_SYSTEM "_by_system" -#define SCANSTAT_WORD_HIERARCHY "_under_hierarchy" - - /* * The memory controller data structure. The memory controller controls both * page cache and RSS per cgroup. We would eventually like to provide @@ -313,8 +269,7 @@ struct mem_cgroup { /* For oom notifier event fd */ struct list_head oom_notify; - /* For recording LRU-scan statistics */ - struct scanstat scanstat; + /* * Should we move charges of a task when a task is moved into this * mem_cgroup ? And what type of charges should we move ? @@ -1678,44 +1633,6 @@ bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap) } #endif -static void __mem_cgroup_record_scanstat(unsigned long *stats, - struct memcg_scanrecord *rec) -{ - - stats[SCAN] += rec->nr_scanned[0] + rec->nr_scanned[1]; - stats[SCAN_ANON] += rec->nr_scanned[0]; - stats[SCAN_FILE] += rec->nr_scanned[1]; - - stats[ROTATE] += rec->nr_rotated[0] + rec->nr_rotated[1]; - stats[ROTATE_ANON] += rec->nr_rotated[0]; - stats[ROTATE_FILE] += rec->nr_rotated[1]; - - stats[FREED] += rec->nr_freed[0] + rec->nr_freed[1]; - stats[FREED_ANON] += rec->nr_freed[0]; - stats[FREED_FILE] += rec->nr_freed[1]; - - stats[ELAPSED] += rec->elapsed; -} - -static void mem_cgroup_record_scanstat(struct memcg_scanrecord *rec) -{ - struct mem_cgroup *mem; - int context = rec->context; - - if (context >= NR_SCAN_CONTEXT) - return; - - mem = rec->mem; - spin_lock(&mem->scanstat.lock); - __mem_cgroup_record_scanstat(mem->scanstat.stats[context], rec); - spin_unlock(&mem->scanstat.lock); - - mem = rec->root; - spin_lock(&mem->scanstat.lock); - __mem_cgroup_record_scanstat(mem->scanstat.rootstats[context], rec); - spin_unlock(&mem->scanstat.lock); -} - /* * Scan the hierarchy if needed to reclaim memory. We remember the last child * we reclaimed from, so that we don't end up penalizing one child extensively @@ -1740,9 +1657,8 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP; bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; - struct memcg_scanrecord rec; unsigned long excess; - unsigned long scanned; + unsigned long nr_scanned; excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; @@ -1750,15 +1666,6 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, if (!check_soft && !shrink && root_mem->memsw_is_minimum) noswap = true; - if (shrink) - rec.context = SCAN_BY_SHRINK; - else if (check_soft) - rec.context = SCAN_BY_SYSTEM; - else - rec.context = SCAN_BY_LIMIT; - - rec.root = root_mem; - while (1) { victim = mem_cgroup_select_victim(root_mem); if (victim == root_mem) { @@ -1799,23 +1706,14 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, css_put(&victim->css); continue; } - rec.mem = victim; - rec.nr_scanned[0] = 0; - rec.nr_scanned[1] = 0; - rec.nr_rotated[0] = 0; - rec.nr_rotated[1] = 0; - rec.nr_freed[0] = 0; - rec.nr_freed[1] = 0; - rec.elapsed = 0; /* we use swappiness of local cgroup */ if (check_soft) { ret = mem_cgroup_shrink_node_zone(victim, gfp_mask, - noswap, zone, &rec, &scanned); - *total_scanned += scanned; + noswap, zone, &nr_scanned); + *total_scanned += nr_scanned; } else ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, - noswap, &rec); - mem_cgroup_record_scanstat(&rec); + noswap); css_put(&victim->css); /* * At shrinking usage, we can't check we should stop here or @@ -3854,18 +3752,14 @@ try_to_free: /* try to free all pages in this cgroup */ shrink = 1; while (nr_retries && mem->res.usage > 0) { - struct memcg_scanrecord rec; int progress; if (signal_pending(current)) { ret = -EINTR; goto out; } - rec.context = SCAN_BY_SHRINK; - rec.mem = mem; - rec.root = mem; progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL, - false, &rec); + false); if (!progress) { nr_retries--; /* maybe some writeback is necessary */ @@ -4709,54 +4603,6 @@ static int mem_control_numa_stat_open(struct inode *unused, struct file *file) } #endif /* CONFIG_NUMA */ -static int mem_cgroup_vmscan_stat_read(struct cgroup *cgrp, - struct cftype *cft, - struct cgroup_map_cb *cb) -{ - struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); - char string[64]; - int i; - - for (i = 0; i < NR_SCANSTATS; i++) { - strcpy(string, scanstat_string[i]); - strcat(string, SCANSTAT_WORD_LIMIT); - cb->fill(cb, string, mem->scanstat.stats[SCAN_BY_LIMIT][i]); - } - - for (i = 0; i < NR_SCANSTATS; i++) { - strcpy(string, scanstat_string[i]); - strcat(string, SCANSTAT_WORD_SYSTEM); - cb->fill(cb, string, mem->scanstat.stats[SCAN_BY_SYSTEM][i]); - } - - for (i = 0; i < NR_SCANSTATS; i++) { - strcpy(string, scanstat_string[i]); - strcat(string, SCANSTAT_WORD_LIMIT); - strcat(string, SCANSTAT_WORD_HIERARCHY); - cb->fill(cb, string, mem->scanstat.rootstats[SCAN_BY_LIMIT][i]); - } - for (i = 0; i < NR_SCANSTATS; i++) { - strcpy(string, scanstat_string[i]); - strcat(string, SCANSTAT_WORD_SYSTEM); - strcat(string, SCANSTAT_WORD_HIERARCHY); - cb->fill(cb, string, mem->scanstat.rootstats[SCAN_BY_SYSTEM][i]); - } - return 0; -} - -static int mem_cgroup_reset_vmscan_stat(struct cgroup *cgrp, - unsigned int event) -{ - struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); - - spin_lock(&mem->scanstat.lock); - memset(&mem->scanstat.stats, 0, sizeof(mem->scanstat.stats)); - memset(&mem->scanstat.rootstats, 0, sizeof(mem->scanstat.rootstats)); - spin_unlock(&mem->scanstat.lock); - return 0; -} - - static struct cftype mem_cgroup_files[] = { { .name = "usage_in_bytes", @@ -4827,11 +4673,6 @@ static struct cftype mem_cgroup_files[] = { .mode = S_IRUGO, }, #endif - { - .name = "vmscan_stat", - .read_map = mem_cgroup_vmscan_stat_read, - .trigger = mem_cgroup_reset_vmscan_stat, - }, }; #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP @@ -5095,7 +4936,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) atomic_set(&mem->refcnt, 1); mem->move_charge_at_immigrate = 0; mutex_init(&mem->thresholds_lock); - spin_lock_init(&mem->scanstat.lock); return &mem->css; free_out: __mem_cgroup_free(mem); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 8b57173c1dd..9c51f9f58ca 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -636,7 +636,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; - pgoff_t pgoff; unsigned long vmstart; unsigned long vmend; @@ -649,9 +648,9 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, vmstart = max(start, vma->vm_start); vmend = min(end, vma->vm_end); - pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, - vma->anon_vma, vma->vm_file, pgoff, new_pol); + vma->anon_vma, vma->vm_file, vma->vm_pgoff, + new_pol); if (prev) { vma = prev; next = vma->vm_next; @@ -1412,7 +1411,9 @@ asmlinkage long compat_sys_get_mempolicy(int __user *policy, err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); if (!err && nmask) { - err = copy_from_user(bm, nm, alloc_size); + unsigned long copy_size; + copy_size = min_t(unsigned long, sizeof(bm), alloc_size); + err = copy_from_user(bm, nm, copy_size); /* ensure entire bitmap is zeroed */ err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); err |= compat_put_bitmap(nmask, bm, nr_bits); diff --git a/mm/slub.c b/mm/slub.c index 9f662d70eb4..7c54fe83a90 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2377,7 +2377,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, */ if (unlikely(!prior)) { remove_full(s, page); - add_partial(n, page, 0); + add_partial(n, page, 1); stat(s, FREE_ADD_PARTIAL); } } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7ef0903058e..5016f19e166 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2140,6 +2140,14 @@ struct vm_struct *alloc_vm_area(size_t size) return NULL; } + /* + * If the allocated address space is passed to a hypercall + * before being used then we cannot rely on a page fault to + * trigger an update of the page tables. So sync all the page + * tables here. + */ + vmalloc_sync_all(); + return area; } EXPORT_SYMBOL_GPL(alloc_vm_area); diff --git a/mm/vmscan.c b/mm/vmscan.c index b7719ec10dc..b55699cd906 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -105,7 +105,6 @@ struct scan_control { /* Which cgroup do we reclaim from */ struct mem_cgroup *mem_cgroup; - struct memcg_scanrecord *memcg_record; /* * Nodemask of nodes allowed by the caller. If NULL, all nodes @@ -1349,8 +1348,6 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc, int file = is_file_lru(lru); int numpages = hpage_nr_pages(page); reclaim_stat->recent_rotated[file] += numpages; - if (!scanning_global_lru(sc)) - sc->memcg_record->nr_rotated[file] += numpages; } if (!pagevec_add(&pvec, page)) { spin_unlock_irq(&zone->lru_lock); @@ -1394,10 +1391,6 @@ static noinline_for_stack void update_isolated_counts(struct zone *zone, reclaim_stat->recent_scanned[0] += *nr_anon; reclaim_stat->recent_scanned[1] += *nr_file; - if (!scanning_global_lru(sc)) { - sc->memcg_record->nr_scanned[0] += *nr_anon; - sc->memcg_record->nr_scanned[1] += *nr_file; - } } /* @@ -1511,9 +1504,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, nr_reclaimed += shrink_page_list(&page_list, zone, sc); } - if (!scanning_global_lru(sc)) - sc->memcg_record->nr_freed[file] += nr_reclaimed; - local_irq_disable(); if (current_is_kswapd()) __count_vm_events(KSWAPD_STEAL, nr_reclaimed); @@ -1613,8 +1603,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, } reclaim_stat->recent_scanned[file] += nr_taken; - if (!scanning_global_lru(sc)) - sc->memcg_record->nr_scanned[file] += nr_taken; __count_zone_vm_events(PGREFILL, zone, pgscanned); if (file) @@ -1666,8 +1654,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, * get_scan_ratio. */ reclaim_stat->recent_rotated[file] += nr_rotated; - if (!scanning_global_lru(sc)) - sc->memcg_record->nr_rotated[file] += nr_rotated; move_active_pages_to_lru(zone, &l_active, LRU_ACTIVE + file * LRU_FILE); @@ -1808,23 +1794,15 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, u64 fraction[2], denominator; enum lru_list l; int noswap = 0; - int force_scan = 0; + bool force_scan = false; unsigned long nr_force_scan[2]; - - anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + - zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); - file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + - zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); - - if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) { - /* kswapd does zone balancing and need to scan this zone */ - if (scanning_global_lru(sc) && current_is_kswapd()) - force_scan = 1; - /* memcg may have small limit and need to avoid priority drop */ - if (!scanning_global_lru(sc)) - force_scan = 1; - } + /* kswapd does zone balancing and needs to scan this zone */ + if (scanning_global_lru(sc) && current_is_kswapd()) + force_scan = true; + /* memcg may have small limit and need to avoid priority drop */ + if (!scanning_global_lru(sc)) + force_scan = true; /* If we have no swap space, do not bother scanning anon pages. */ if (!sc->may_swap || (nr_swap_pages <= 0)) { @@ -1837,6 +1815,11 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, goto out; } + anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + + zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); + file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + + zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); + if (scanning_global_lru(sc)) { free = zone_page_state(zone, NR_FREE_PAGES); /* If we have very few page cache pages, @@ -2268,10 +2251,9 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, #ifdef CONFIG_CGROUP_MEM_RES_CTLR unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, - gfp_t gfp_mask, bool noswap, - struct zone *zone, - struct memcg_scanrecord *rec, - unsigned long *scanned) + gfp_t gfp_mask, bool noswap, + struct zone *zone, + unsigned long *nr_scanned) { struct scan_control sc = { .nr_scanned = 0, @@ -2281,9 +2263,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, .may_swap = !noswap, .order = 0, .mem_cgroup = mem, - .memcg_record = rec, }; - ktime_t start, end; sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); @@ -2292,7 +2272,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, sc.may_writepage, sc.gfp_mask); - start = ktime_get(); /* * NOTE: Although we can get the priority field, using it * here is not a good idea, since it limits the pages we can scan. @@ -2301,25 +2280,19 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, * the priority and make it zero. */ shrink_zone(0, zone, &sc); - end = ktime_get(); - - if (rec) - rec->elapsed += ktime_to_ns(ktime_sub(end, start)); - *scanned = sc.nr_scanned; trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); + *nr_scanned = sc.nr_scanned; return sc.nr_reclaimed; } unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, gfp_t gfp_mask, - bool noswap, - struct memcg_scanrecord *rec) + bool noswap) { struct zonelist *zonelist; unsigned long nr_reclaimed; - ktime_t start, end; int nid; struct scan_control sc = { .may_writepage = !laptop_mode, @@ -2328,7 +2301,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, .nr_to_reclaim = SWAP_CLUSTER_MAX, .order = 0, .mem_cgroup = mem_cont, - .memcg_record = rec, .nodemask = NULL, /* we don't care the placement */ .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), @@ -2337,7 +2309,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, .gfp_mask = sc.gfp_mask, }; - start = ktime_get(); /* * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't * take care of from where we get pages. So the node where we start the @@ -2352,9 +2323,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, sc.gfp_mask); nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); - end = ktime_get(); - if (rec) - rec->elapsed += ktime_to_ns(ktime_sub(end, start)); trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); diff --git a/mm/vmstat.c b/mm/vmstat.c index 20c18b7694b..d52b13d28e8 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -659,7 +659,7 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, } #endif -#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) +#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA) #ifdef CONFIG_ZONE_DMA #define TEXT_FOR_DMA(xx) xx "_dma", #else @@ -788,7 +788,7 @@ const char * const vmstat_text[] = { #endif /* CONFIG_VM_EVENTS_COUNTERS */ }; -#endif /* CONFIG_PROC_FS || CONFIG_SYSFS */ +#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */ #ifdef CONFIG_PROC_FS diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 175b5135bdc..e317583fcc7 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -263,7 +263,6 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) { int in, out, inp, outp; struct virtio_chan *chan = client->trans; - char *rdata = (char *)req->rc+sizeof(struct p9_fcall); unsigned long flags; size_t pdata_off = 0; struct trans_rpage_info *rpinfo = NULL; @@ -346,7 +345,8 @@ req_retry_pinned: * Arrange in such a way that server places header in the * alloced memory and payload onto the user buffer. */ - inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11); + inp = pack_sg_list(chan->sg, out, + VIRTQUEUE_NUM, req->rc->sdata, 11); /* * Running executables in the filesystem may result in * a read request with kernel buffer as opposed to user buffer. @@ -366,8 +366,8 @@ req_retry_pinned: } in += inp; } else { - in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, - req->rc->capacity); + in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, + req->rc->sdata, req->rc->capacity); } err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc); @@ -592,7 +592,14 @@ static struct p9_trans_module p9_virtio_trans = { .close = p9_virtio_close, .request = p9_virtio_request, .cancel = p9_virtio_cancel, - .maxsize = PAGE_SIZE*VIRTQUEUE_NUM, + + /* + * We leave one entry for input and one entry for response + * headers. We also skip one more entry to accomodate, address + * that are not at page boundary, that can result in an extra + * page in zero copy. + */ + .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3), .pref = P9_TRANS_PREF_PAYLOAD_SEP, .def = 0, .owner = THIS_MODULE, diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig index ba6f73eb06c..a9aff9c7d02 100644 --- a/net/bridge/netfilter/Kconfig +++ b/net/bridge/netfilter/Kconfig @@ -4,7 +4,7 @@ menuconfig BRIDGE_NF_EBTABLES tristate "Ethernet Bridge tables (ebtables) support" - depends on BRIDGE && BRIDGE_NETFILTER + depends on BRIDGE && NETFILTER select NETFILTER_XTABLES help ebtables is a general, extensible frame/packet identification diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 7c2fa0a0814..7f9ac0742d1 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -93,10 +93,14 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev) caifdevs = caif_device_list(dev_net(dev)); BUG_ON(!caifdevs); - caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); + caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); if (!caifd) return NULL; caifd->pcpu_refcnt = alloc_percpu(int); + if (!caifd->pcpu_refcnt) { + kfree(caifd); + return NULL; + } caifd->netdev = dev; dev_hold(dev); return caifd; diff --git a/net/can/af_can.c b/net/can/af_can.c index 8ce926d3b2c..9b0c32a2690 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -857,7 +857,7 @@ static __exit void can_exit(void) struct net_device *dev; if (stats_timer) - del_timer(&can_stattimer); + del_timer_sync(&can_stattimer); can_remove_proc(); diff --git a/net/ceph/msgpool.c b/net/ceph/msgpool.c index d5f2d97ac05..1f4cb30a42c 100644 --- a/net/ceph/msgpool.c +++ b/net/ceph/msgpool.c @@ -7,27 +7,37 @@ #include <linux/ceph/msgpool.h> -static void *alloc_fn(gfp_t gfp_mask, void *arg) +static void *msgpool_alloc(gfp_t gfp_mask, void *arg) { struct ceph_msgpool *pool = arg; - void *p; + struct ceph_msg *msg; - p = ceph_msg_new(0, pool->front_len, gfp_mask); - if (!p) - pr_err("msgpool %s alloc failed\n", pool->name); - return p; + msg = ceph_msg_new(0, pool->front_len, gfp_mask); + if (!msg) { + dout("msgpool_alloc %s failed\n", pool->name); + } else { + dout("msgpool_alloc %s %p\n", pool->name, msg); + msg->pool = pool; + } + return msg; } -static void free_fn(void *element, void *arg) +static void msgpool_free(void *element, void *arg) { - ceph_msg_put(element); + struct ceph_msgpool *pool = arg; + struct ceph_msg *msg = element; + + dout("msgpool_release %s %p\n", pool->name, msg); + msg->pool = NULL; + ceph_msg_put(msg); } int ceph_msgpool_init(struct ceph_msgpool *pool, int front_len, int size, bool blocking, const char *name) { + dout("msgpool %s init\n", name); pool->front_len = front_len; - pool->pool = mempool_create(size, alloc_fn, free_fn, pool); + pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool); if (!pool->pool) return -ENOMEM; pool->name = name; @@ -36,14 +46,17 @@ int ceph_msgpool_init(struct ceph_msgpool *pool, void ceph_msgpool_destroy(struct ceph_msgpool *pool) { + dout("msgpool %s destroy\n", pool->name); mempool_destroy(pool->pool); } struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len) { + struct ceph_msg *msg; + if (front_len > pool->front_len) { - pr_err("msgpool_get pool %s need front %d, pool size is %d\n", + dout("msgpool_get %s need front %d, pool size is %d\n", pool->name, front_len, pool->front_len); WARN_ON(1); @@ -51,14 +64,19 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, return ceph_msg_new(0, front_len, GFP_NOFS); } - return mempool_alloc(pool->pool, GFP_NOFS); + msg = mempool_alloc(pool->pool, GFP_NOFS); + dout("msgpool_get %s %p\n", pool->name, msg); + return msg; } void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg) { + dout("msgpool_put %s %p\n", pool->name, msg); + /* reset msg front_len; user may have changed it */ msg->front.iov_len = pool->front_len; msg->hdr.front_len = cpu_to_le32(pool->front_len); kref_init(&msg->kref); /* retake single ref */ + mempool_free(msg, pool->pool); } diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index ce310eee708..16836a7df7a 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -685,6 +685,18 @@ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) put_osd(osd); } +static void remove_all_osds(struct ceph_osd_client *osdc) +{ + dout("__remove_old_osds %p\n", osdc); + mutex_lock(&osdc->request_mutex); + while (!RB_EMPTY_ROOT(&osdc->osds)) { + struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), + struct ceph_osd, o_node); + __remove_osd(osdc, osd); + } + mutex_unlock(&osdc->request_mutex); +} + static void __move_osd_to_lru(struct ceph_osd_client *osdc, struct ceph_osd *osd) { @@ -701,14 +713,14 @@ static void __remove_osd_from_lru(struct ceph_osd *osd) list_del_init(&osd->o_osd_lru); } -static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all) +static void remove_old_osds(struct ceph_osd_client *osdc) { struct ceph_osd *osd, *nosd; dout("__remove_old_osds %p\n", osdc); mutex_lock(&osdc->request_mutex); list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { - if (!remove_all && time_before(jiffies, osd->lru_ttl)) + if (time_before(jiffies, osd->lru_ttl)) break; __remove_osd(osdc, osd); } @@ -751,6 +763,7 @@ static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new) struct rb_node *parent = NULL; struct ceph_osd *osd = NULL; + dout("__insert_osd %p osd%d\n", new, new->o_osd); while (*p) { parent = *p; osd = rb_entry(parent, struct ceph_osd, o_node); @@ -1144,7 +1157,7 @@ static void handle_osds_timeout(struct work_struct *work) dout("osds timeout\n"); down_read(&osdc->map_sem); - remove_old_osds(osdc, 0); + remove_old_osds(osdc); up_read(&osdc->map_sem); schedule_delayed_work(&osdc->osds_timeout_work, @@ -1862,8 +1875,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc) ceph_osdmap_destroy(osdc->osdmap); osdc->osdmap = NULL; } - remove_old_osds(osdc, 1); - WARN_ON(!RB_EMPTY_ROOT(&osdc->osds)); + remove_all_osds(osdc); mempool_destroy(osdc->req_mempool); ceph_msgpool_destroy(&osdc->msgpool_op); ceph_msgpool_destroy(&osdc->msgpool_op_reply); diff --git a/net/core/dev.c b/net/core/dev.c index 17d67b579be..b10ff0a7185 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1515,6 +1515,14 @@ static inline bool is_skb_forwardable(struct net_device *dev, */ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { + if (skb_copy_ubufs(skb, GFP_ATOMIC)) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } + } + skb_orphan(skb); nf_reset(skb); diff --git a/net/core/flow.c b/net/core/flow.c index bf32c33cad3..555a456efb0 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -30,6 +30,7 @@ struct flow_cache_entry { struct hlist_node hlist; struct list_head gc_list; } u; + struct net *net; u16 family; u8 dir; u32 genid; @@ -172,29 +173,26 @@ static void flow_new_hash_rnd(struct flow_cache *fc, static u32 flow_hash_code(struct flow_cache *fc, struct flow_cache_percpu *fcp, - const struct flowi *key) + const struct flowi *key, + size_t keysize) { const u32 *k = (const u32 *) key; + const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32); - return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) + return jhash2(k, length, fcp->hash_rnd) & (flow_cache_hash_size(fc) - 1); } -typedef unsigned long flow_compare_t; - /* I hear what you're saying, use memcmp. But memcmp cannot make - * important assumptions that we can here, such as alignment and - * constant size. + * important assumptions that we can here, such as alignment. */ -static int flow_key_compare(const struct flowi *key1, const struct flowi *key2) +static int flow_key_compare(const struct flowi *key1, const struct flowi *key2, + size_t keysize) { const flow_compare_t *k1, *k1_lim, *k2; - const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t); - - BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t)); k1 = (const flow_compare_t *) key1; - k1_lim = k1 + n_elem; + k1_lim = k1 + keysize; k2 = (const flow_compare_t *) key2; @@ -215,6 +213,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, struct flow_cache_entry *fle, *tfle; struct hlist_node *entry; struct flow_cache_object *flo; + size_t keysize; unsigned int hash; local_bh_disable(); @@ -222,6 +221,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, fle = NULL; flo = NULL; + + keysize = flow_key_size(family); + if (!keysize) + goto nocache; + /* Packet really early in init? Making flow_cache_init a * pre-smp initcall would solve this. --RR */ if (!fcp->hash_table) @@ -230,11 +234,12 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, if (fcp->hash_rnd_recalc) flow_new_hash_rnd(fc, fcp); - hash = flow_hash_code(fc, fcp, key); + hash = flow_hash_code(fc, fcp, key, keysize); hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { - if (tfle->family == family && + if (tfle->net == net && + tfle->family == family && tfle->dir == dir && - flow_key_compare(key, &tfle->key) == 0) { + flow_key_compare(key, &tfle->key, keysize) == 0) { fle = tfle; break; } @@ -246,9 +251,10 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); if (fle) { + fle->net = net; fle->family = family; fle->dir = dir; - memcpy(&fle->key, key, sizeof(*key)); + memcpy(&fle->key, key, keysize * sizeof(flow_compare_t)); fle->object = NULL; hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); fcp->hash_count++; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 27002dffe7e..387703f56fc 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -611,8 +611,21 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) } EXPORT_SYMBOL_GPL(skb_morph); -/* skb frags copy userspace buffers to kernel */ -static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) +/* skb_copy_ubufs - copy userspace skb frags buffers to kernel + * @skb: the skb to modify + * @gfp_mask: allocation priority + * + * This must be called on SKBTX_DEV_ZEROCOPY skb. + * It will copy all frags into kernel and drop the reference + * to userspace pages. + * + * If this function is called from an interrupt gfp_mask() must be + * %GFP_ATOMIC. + * + * Returns 0 on success or a negative error code on failure + * to allocate kernel memory to copy to. + */ +int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) { int i; int num_frags = skb_shinfo(skb)->nr_frags; @@ -652,6 +665,8 @@ static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) skb_shinfo(skb)->frags[i - 1].page = head; head = (struct page *)head->private; } + + skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; return 0; } @@ -677,7 +692,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { if (skb_copy_ubufs(skb, gfp_mask)) return NULL; - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; } n = skb + 1; @@ -803,7 +817,6 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) n = NULL; goto out; } - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; @@ -896,7 +909,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { if (skb_copy_ubufs(skb, gfp_mask)) goto nofrags; - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) get_page(skb_shinfo(skb)->frags[i].page); diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 27997d35ebd..a2468363978 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -340,7 +340,7 @@ void ether_setup(struct net_device *dev) dev->addr_len = ETH_ALEN; dev->tx_queue_len = 1000; /* Ethernet wants good queues */ dev->flags = IFF_BROADCAST|IFF_MULTICAST; - dev->priv_flags = IFF_TX_SKB_SHARING; + dev->priv_flags |= IFF_TX_SKB_SHARING; memset(dev->broadcast, 0xFF, ETH_ALEN); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 1b745d412cf..dd2b9478ddd 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -466,8 +466,13 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) goto out; if (addr->sin_family != AF_INET) { + /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET) + * only if s_addr is INADDR_ANY. + */ err = -EAFNOSUPPORT; - goto out; + if (addr->sin_family != AF_UNSPEC || + addr->sin_addr.s_addr != htonl(INADDR_ANY)) + goto out; } chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 33e2c35b74b..80106d89d54 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -142,6 +142,14 @@ const struct fib_prop fib_props[RTN_MAX + 1] = { }; /* Release a nexthop info record */ +static void free_fib_info_rcu(struct rcu_head *head) +{ + struct fib_info *fi = container_of(head, struct fib_info, rcu); + + if (fi->fib_metrics != (u32 *) dst_default_metrics) + kfree(fi->fib_metrics); + kfree(fi); +} void free_fib_info(struct fib_info *fi) { @@ -156,7 +164,7 @@ void free_fib_info(struct fib_info *fi) } endfor_nexthops(fi); fib_info_cnt--; release_net(fi->fib_net); - kfree_rcu(fi, rcu); + call_rcu(&fi->rcu, free_fib_info_rcu); } void fib_release_info(struct fib_info *fi) diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 5c9b9d96391..e59aabd0eae 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) return skb; nlmsg_failure: + kfree_skb(skb); *errp = -EINVAL; printk(KERN_ERR "ip_queue: error creating packet message\n"); return NULL; @@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) { struct nf_queue_entry *entry; - if (vmsg->value > NF_MAX_VERDICT) + if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN) return -EINVAL; entry = ipq_find_dequeue_entry(vmsg->id); @@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg, break; case IPQM_VERDICT: - if (pmsg->msg.verdict.value > NF_MAX_VERDICT) - status = -EINVAL; - else - status = ipq_set_verdict(&pmsg->msg.verdict, - len - sizeof(*pmsg)); - break; + status = ipq_set_verdict(&pmsg->msg.verdict, + len - sizeof(*pmsg)); + break; default: status = -EINVAL; } diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index b14ec7d03b6..4bfad5da94f 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -254,6 +254,8 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), + SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES), + SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ea0d2183df4..21fab3edb92 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1124,7 +1124,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, return 0; /* ...Then it's D-SACK, and must reside below snd_una completely */ - if (!after(end_seq, tp->snd_una)) + if (after(end_seq, tp->snd_una)) return 0; if (!before(start_seq, tp->undo_marker)) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 1c12b8ec849..c34f0151394 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -808,20 +808,38 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req) kfree(inet_rsk(req)->opt); } -static void syn_flood_warning(const struct sk_buff *skb) +/* + * Return 1 if a syncookie should be sent + */ +int tcp_syn_flood_action(struct sock *sk, + const struct sk_buff *skb, + const char *proto) { - const char *msg; + const char *msg = "Dropping request"; + int want_cookie = 0; + struct listen_sock *lopt; + + #ifdef CONFIG_SYN_COOKIES - if (sysctl_tcp_syncookies) + if (sysctl_tcp_syncookies) { msg = "Sending cookies"; - else + want_cookie = 1; + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); + } else #endif - msg = "Dropping request"; + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); - pr_info("TCP: Possible SYN flooding on port %d. %s.\n", - ntohs(tcp_hdr(skb)->dest), msg); + lopt = inet_csk(sk)->icsk_accept_queue.listen_opt; + if (!lopt->synflood_warned) { + lopt->synflood_warned = 1; + pr_info("%s: Possible SYN flooding on port %d. %s. " + " Check SNMP counters.\n", + proto, ntohs(tcp_hdr(skb)->dest), msg); + } + return want_cookie; } +EXPORT_SYMBOL(tcp_syn_flood_action); /* * Save and compile IPv4 options into the request_sock if needed. @@ -1235,11 +1253,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) __be32 saddr = ip_hdr(skb)->saddr; __be32 daddr = ip_hdr(skb)->daddr; __u32 isn = TCP_SKB_CB(skb)->when; -#ifdef CONFIG_SYN_COOKIES int want_cookie = 0; -#else -#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */ -#endif /* Never answer to SYNs send to broadcast or multicast */ if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) @@ -1250,14 +1264,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) * evidently real one. */ if (inet_csk_reqsk_queue_is_full(sk) && !isn) { - if (net_ratelimit()) - syn_flood_warning(skb); -#ifdef CONFIG_SYN_COOKIES - if (sysctl_tcp_syncookies) { - want_cookie = 1; - } else -#endif - goto drop; + want_cookie = tcp_syn_flood_action(sk, skb, "TCP"); + if (!want_cookie) + goto drop; } /* Accept backlog is full. If we have already queued enough @@ -1303,9 +1312,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) while (l-- > 0) *c++ ^= *hash_location++; -#ifdef CONFIG_SYN_COOKIES want_cookie = 0; /* not our kind of cookie */ -#endif tmp_ext.cookie_out_never = 0; /* false */ tmp_ext.cookie_plus = tmp_opt.cookie_plus; } else if (!tp->rx_opt.cookie_in_always) { diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 9ef1831746e..b46e9f88ce3 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -599,7 +599,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) return 0; } -int datagram_send_ctl(struct net *net, +int datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, struct flowi6 *fl6, struct ipv6_txoptions *opt, int *hlimit, int *tclass, int *dontfrag) @@ -658,7 +658,8 @@ int datagram_send_ctl(struct net *net, if (addr_type != IPV6_ADDR_ANY) { int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; - if (!ipv6_chk_addr(net, &src_info->ipi6_addr, + if (!inet_sk(sk)->transparent && + !ipv6_chk_addr(net, &src_info->ipi6_addr, strict ? dev : NULL, 0)) err = -EINVAL; else diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index f3caf1b8d57..54303945019 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -322,8 +322,8 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo } static struct ip6_flowlabel * -fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, - int optlen, int *err_p) +fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, + char __user *optval, int optlen, int *err_p) { struct ip6_flowlabel *fl = NULL; int olen; @@ -360,7 +360,7 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, msg.msg_control = (void*)(fl->opt+1); memset(&flowi6, 0, sizeof(flowi6)); - err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk, + err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, &junk, &junk); if (err) goto done; @@ -528,7 +528,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) if (freq.flr_label & ~IPV6_FLOWLABEL_MASK) return -EINVAL; - fl = fl_create(net, &freq, optval, optlen, &err); + fl = fl_create(net, sk, &freq, optval, optlen, &err); if (fl == NULL) return err; sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 147ede38ab4..2fbda5fc4cc 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -475,7 +475,7 @@ sticky_done: msg.msg_controllen = optlen; msg.msg_control = (void*)(opt+1); - retv = datagram_send_ctl(net, &msg, &fl6, opt, &junk, &junk, + retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, &junk); if (retv) goto done; diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 24939486328..e63c3972a73 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) return skb; nlmsg_failure: + kfree_skb(skb); *errp = -EINVAL; printk(KERN_ERR "ip6_queue: error creating packet message\n"); return NULL; @@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) { struct nf_queue_entry *entry; - if (vmsg->value > NF_MAX_VERDICT) + if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN) return -EINVAL; entry = ipq_find_dequeue_entry(vmsg->id); @@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg, break; case IPQM_VERDICT: - if (pmsg->msg.verdict.value > NF_MAX_VERDICT) - status = -EINVAL; - else - status = ipq_set_verdict(&pmsg->msg.verdict, - len - sizeof(*pmsg)); - break; + status = ipq_set_verdict(&pmsg->msg.verdict, + len - sizeof(*pmsg)); + break; default: status = -EINVAL; } diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 6a79f3081bd..343852e5c70 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -817,8 +817,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(struct ipv6_txoptions); - err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, - &tclass, &dontfrag); + err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, + &hlimit, &tclass, &dontfrag); if (err < 0) { fl6_sock_release(flowlabel); return err; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 9e69eb0ec6d..1250f902067 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -104,6 +104,9 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) struct inet_peer *peer; u32 *p = NULL; + if (!(rt->dst.flags & DST_HOST)) + return NULL; + if (!rt->rt6i_peer) rt6_bind_peer(rt, 1); @@ -252,6 +255,9 @@ static void ip6_dst_destroy(struct dst_entry *dst) struct inet6_dev *idev = rt->rt6i_idev; struct inet_peer *peer = rt->rt6i_peer; + if (!(rt->dst.flags & DST_HOST)) + dst_destroy_metrics_generic(dst); + if (idev != NULL) { rt->rt6i_idev = NULL; in6_dev_put(idev); @@ -723,9 +729,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort, ipv6_addr_copy(&rt->rt6i_gateway, daddr); } - rt->rt6i_dst.plen = 128; rt->rt6i_flags |= RTF_CACHE; - rt->dst.flags |= DST_HOST; #ifdef CONFIG_IPV6_SUBTREES if (rt->rt6i_src.plen && saddr) { @@ -775,9 +779,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct rt6_info *rt = ip6_rt_copy(ort, daddr); if (rt) { - rt->rt6i_dst.plen = 128; rt->rt6i_flags |= RTF_CACHE; - rt->dst.flags |= DST_HOST; dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst))); } return rt; @@ -1078,12 +1080,15 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, neigh = NULL; } - rt->rt6i_idev = idev; + rt->dst.flags |= DST_HOST; + rt->dst.output = ip6_output; dst_set_neighbour(&rt->dst, neigh); atomic_set(&rt->dst.__refcnt, 1); - ipv6_addr_copy(&rt->rt6i_dst.addr, addr); dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); - rt->dst.output = ip6_output; + + ipv6_addr_copy(&rt->rt6i_dst.addr, addr); + rt->rt6i_dst.plen = 128; + rt->rt6i_idev = idev; spin_lock_bh(&icmp6_dst_lock); rt->dst.next = icmp6_dst_gc_list; @@ -1261,6 +1266,14 @@ int ip6_route_add(struct fib6_config *cfg) if (rt->rt6i_dst.plen == 128) rt->dst.flags |= DST_HOST; + if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) { + u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); + if (!metrics) { + err = -ENOMEM; + goto out; + } + dst_init_metrics(&rt->dst, metrics, 0); + } #ifdef CONFIG_IPV6_SUBTREES ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); rt->rt6i_src.plen = cfg->fc_src_len; @@ -1607,9 +1620,6 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src, if (on_link) nrt->rt6i_flags &= ~RTF_GATEWAY; - nrt->rt6i_dst.plen = 128; - nrt->dst.flags |= DST_HOST; - ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); dst_set_neighbour(&nrt->dst, neigh_clone(neigh)); @@ -1754,9 +1764,10 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, if (rt) { rt->dst.input = ort->dst.input; rt->dst.output = ort->dst.output; + rt->dst.flags |= DST_HOST; ipv6_addr_copy(&rt->rt6i_dst.addr, dest); - rt->rt6i_dst.plen = ort->rt6i_dst.plen; + rt->rt6i_dst.plen = 128; dst_copy_metrics(&rt->dst, &ort->dst); rt->dst.error = ort->dst.error; rt->rt6i_idev = ort->rt6i_idev; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index d1fb63f4aeb..3c9fa618b69 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -531,20 +531,6 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req, return tcp_v6_send_synack(sk, req, rvp); } -static inline void syn_flood_warning(struct sk_buff *skb) -{ -#ifdef CONFIG_SYN_COOKIES - if (sysctl_tcp_syncookies) - printk(KERN_INFO - "TCPv6: Possible SYN flooding on port %d. " - "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest)); - else -#endif - printk(KERN_INFO - "TCPv6: Possible SYN flooding on port %d. " - "Dropping request.\n", ntohs(tcp_hdr(skb)->dest)); -} - static void tcp_v6_reqsk_destructor(struct request_sock *req) { kfree_skb(inet6_rsk(req)->pktopts); @@ -1179,11 +1165,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) struct tcp_sock *tp = tcp_sk(sk); __u32 isn = TCP_SKB_CB(skb)->when; struct dst_entry *dst = NULL; -#ifdef CONFIG_SYN_COOKIES int want_cookie = 0; -#else -#define want_cookie 0 -#endif if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_conn_request(sk, skb); @@ -1192,14 +1174,9 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) goto drop; if (inet_csk_reqsk_queue_is_full(sk) && !isn) { - if (net_ratelimit()) - syn_flood_warning(skb); -#ifdef CONFIG_SYN_COOKIES - if (sysctl_tcp_syncookies) - want_cookie = 1; - else -#endif - goto drop; + want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6"); + if (!want_cookie) + goto drop; } if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) @@ -1249,9 +1226,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) while (l-- > 0) *c++ ^= *hash_location++; -#ifdef CONFIG_SYN_COOKIES want_cookie = 0; /* not our kind of cookie */ -#endif tmp_ext.cookie_out_never = 0; /* false */ tmp_ext.cookie_plus = tmp_opt.cookie_plus; } else if (!tp->rx_opt.cookie_in_always) { diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 29213b51c49..bb95e8e1c6f 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1090,8 +1090,8 @@ do_udp_sendmsg: memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(*opt); - err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, - &tclass, &dontfrag); + err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, + &hlimit, &tclass, &dontfrag); if (err < 0) { fl6_sock_release(flowlabel); return err; diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c index d0b70dadf73..2615ffc8e78 100644 --- a/net/irda/irsysctl.c +++ b/net/irda/irsysctl.c @@ -40,9 +40,9 @@ extern int sysctl_slot_timeout; extern int sysctl_fast_poll_increase; extern char sysctl_devname[]; extern int sysctl_max_baud_rate; -extern int sysctl_min_tx_turn_time; -extern int sysctl_max_tx_data_size; -extern int sysctl_max_tx_window; +extern unsigned int sysctl_min_tx_turn_time; +extern unsigned int sysctl_max_tx_data_size; +extern unsigned int sysctl_max_tx_window; extern int sysctl_max_noreply_time; extern int sysctl_warn_noreply_time; extern int sysctl_lap_keepalive_time; diff --git a/net/irda/qos.c b/net/irda/qos.c index 1b51bcf4239..4369f7f41bc 100644 --- a/net/irda/qos.c +++ b/net/irda/qos.c @@ -60,7 +60,7 @@ int sysctl_max_noreply_time = 12; * Default is 10us which means using the unmodified value given by the * peer except if it's 0 (0 is likely a bug in the other stack). */ -unsigned sysctl_min_tx_turn_time = 10; +unsigned int sysctl_min_tx_turn_time = 10; /* * Maximum data size to be used in transmission in payload of LAP frame. * There is a bit of confusion in the IrDA spec : @@ -75,13 +75,13 @@ unsigned sysctl_min_tx_turn_time = 10; * bytes frames or all negotiated frame sizes, but you can use the sysctl * to play with this value anyway. * Jean II */ -unsigned sysctl_max_tx_data_size = 2042; +unsigned int sysctl_max_tx_data_size = 2042; /* * Maximum transmit window, i.e. number of LAP frames between turn-around. * This allow to override what the peer told us. Some peers are buggy and * don't always support what they tell us. * Jean II */ -unsigned sysctl_max_tx_window = 7; +unsigned int sysctl_max_tx_window = 7; static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get); static int irlap_param_link_disconnect(void *instance, irda_param_t *parm, diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 3db78b696c5..21070e9bc8d 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -665,7 +665,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) BUG_ON(!sdata->bss); atomic_dec(&sdata->bss->num_sta_ps); - __sta_info_clear_tim_bit(sdata->bss, sta); + sta_info_clear_tim_bit(sta); } local->num_sta--; diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index 2fd4565144d..31d56b23b9e 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c @@ -364,6 +364,7 @@ pptp_inbound_pkt(struct sk_buff *skb, break; case PPTP_WAN_ERROR_NOTIFY: + case PPTP_SET_LINK_INFO: case PPTP_ECHO_REQUEST: case PPTP_ECHO_REPLY: /* I don't have to explain these ;) */ diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 37bf94394be..8235b86b4e8 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -409,7 +409,7 @@ static void tcp_options(const struct sk_buff *skb, if (opsize < 2) /* "silly options" */ return; if (opsize > length) - break; /* don't parse partial options */ + return; /* don't parse partial options */ if (opcode == TCPOPT_SACK_PERM && opsize == TCPOLEN_SACK_PERM) @@ -447,7 +447,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, BUG_ON(ptr == NULL); /* Fast path for timestamp-only option */ - if (length == TCPOLEN_TSTAMP_ALIGNED*4 + if (length == TCPOLEN_TSTAMP_ALIGNED && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) @@ -469,7 +469,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, if (opsize < 2) /* "silly options" */ return; if (opsize > length) - break; /* don't parse partial options */ + return; /* don't parse partial options */ if (opcode == TCPOPT_SACK && opsize >= (TCPOLEN_SACK_BASE diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 00bd475eab4..a80b0cb03f1 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -646,8 +646,8 @@ verdicthdr_get(const struct nlattr * const nfqa[]) return NULL; vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); - verdict = ntohl(vhdr->verdict); - if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) + verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK; + if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN) return NULL; return vhdr; } diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c index 76a083184d8..ed0db15ab00 100644 --- a/net/netfilter/xt_rateest.c +++ b/net/netfilter/xt_rateest.c @@ -78,7 +78,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par) { struct xt_rateest_match_info *info = par->matchinfo; struct xt_rateest *est1, *est2; - int ret = false; + int ret = -EINVAL; if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS | XT_RATEEST_MATCH_REL)) != 1) @@ -101,13 +101,12 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par) if (!est1) goto err1; + est2 = NULL; if (info->flags & XT_RATEEST_MATCH_REL) { est2 = xt_rateest_lookup(info->name2); if (!est2) goto err2; - } else - est2 = NULL; - + } info->est1 = est1; info->est2 = est2; @@ -116,7 +115,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par) err2: xt_rateest_put(est1); err1: - return -EINVAL; + return ret; } static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par) diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index be4505ee67a..b01427924f8 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -425,7 +425,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, struct rsvp_filter *f, **fp; struct rsvp_session *s, **sp; struct tc_rsvp_pinfo *pinfo = NULL; - struct nlattr *opt = tca[TCA_OPTIONS-1]; + struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_RSVP_MAX + 1]; struct tcf_exts e; unsigned int h1, h2; @@ -439,7 +439,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, if (err < 0) return err; - err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &rsvp_ext_map); + err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map); if (err < 0) return err; @@ -449,8 +449,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, if (f->handle != handle && handle) goto errout2; - if (tb[TCA_RSVP_CLASSID-1]) { - f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); + if (tb[TCA_RSVP_CLASSID]) { + f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); tcf_bind_filter(tp, &f->res, base); } @@ -462,7 +462,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, err = -EINVAL; if (handle) goto errout2; - if (tb[TCA_RSVP_DST-1] == NULL) + if (tb[TCA_RSVP_DST] == NULL) goto errout2; err = -ENOBUFS; @@ -471,19 +471,19 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, goto errout2; h2 = 16; - if (tb[TCA_RSVP_SRC-1]) { - memcpy(f->src, nla_data(tb[TCA_RSVP_SRC-1]), sizeof(f->src)); + if (tb[TCA_RSVP_SRC]) { + memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src)); h2 = hash_src(f->src); } - if (tb[TCA_RSVP_PINFO-1]) { - pinfo = nla_data(tb[TCA_RSVP_PINFO-1]); + if (tb[TCA_RSVP_PINFO]) { + pinfo = nla_data(tb[TCA_RSVP_PINFO]); f->spi = pinfo->spi; f->tunnelhdr = pinfo->tunnelhdr; } - if (tb[TCA_RSVP_CLASSID-1]) - f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); + if (tb[TCA_RSVP_CLASSID]) + f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); - dst = nla_data(tb[TCA_RSVP_DST-1]); + dst = nla_data(tb[TCA_RSVP_DST]); h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0); err = -ENOMEM; @@ -642,8 +642,7 @@ nla_put_failure: return -1; } -static struct tcf_proto_ops RSVP_OPS = { - .next = NULL, +static struct tcf_proto_ops RSVP_OPS __read_mostly = { .kind = RSVP_ID, .classify = rsvp_classify, .init = rsvp_init, diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 167c880cf8d..76388b083f2 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1689,6 +1689,11 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, case SCTP_CMD_PURGE_ASCONF_QUEUE: sctp_asconf_queue_teardown(asoc); break; + + case SCTP_CMD_SET_ASOC: + asoc = cmd->obj.asoc; + break; + default: pr_warn("Impossible command: %u, %p\n", cmd->verb, cmd->obj.ptr); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 49b847b00f9..a0f31e6c1c6 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2047,6 +2047,12 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); + /* Restore association pointer to provide SCTP command interpeter + * with a valid context in case it needs to manipulate + * the queues */ + sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, + SCTP_ASOC((struct sctp_association *)asoc)); + return retval; nomem: diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 86d0caf91b3..62e90b862a0 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -1761,6 +1761,10 @@ static int wait_for_avail(struct snd_pcm_substream *substream, snd_pcm_uframes_t avail = 0; long wait_time, tout; + init_waitqueue_entry(&wait, current); + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&runtime->tsleep, &wait); + if (runtime->no_period_wakeup) wait_time = MAX_SCHEDULE_TIMEOUT; else { @@ -1771,16 +1775,32 @@ static int wait_for_avail(struct snd_pcm_substream *substream, } wait_time = msecs_to_jiffies(wait_time * 1000); } - init_waitqueue_entry(&wait, current); - add_wait_queue(&runtime->tsleep, &wait); + for (;;) { if (signal_pending(current)) { err = -ERESTARTSYS; break; } + + /* + * We need to check if space became available already + * (and thus the wakeup happened already) first to close + * the race of space already having become available. + * This check must happen after been added to the waitqueue + * and having current state be INTERRUPTIBLE. + */ + if (is_playback) + avail = snd_pcm_playback_avail(runtime); + else + avail = snd_pcm_capture_avail(runtime); + if (avail >= runtime->twake) + break; snd_pcm_stream_unlock_irq(substream); - tout = schedule_timeout_interruptible(wait_time); + + tout = schedule_timeout(wait_time); + snd_pcm_stream_lock_irq(substream); + set_current_state(TASK_INTERRUPTIBLE); switch (runtime->status->state) { case SNDRV_PCM_STATE_SUSPENDED: err = -ESTRPIPE; @@ -1806,14 +1826,9 @@ static int wait_for_avail(struct snd_pcm_substream *substream, err = -EIO; break; } - if (is_playback) - avail = snd_pcm_playback_avail(runtime); - else - avail = snd_pcm_capture_avail(runtime); - if (avail >= runtime->twake) - break; } _endloop: + set_current_state(TASK_RUNNING); remove_wait_queue(&runtime->tsleep, &wait); *availp = avail; return err; diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 3e7850c238c..f3aefef3721 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -579,9 +579,13 @@ int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux, return -1; } recursive++; - for (i = 0; i < nums; i++) + for (i = 0; i < nums; i++) { + unsigned int type = get_wcaps_type(get_wcaps(codec, conn[i])); + if (type == AC_WID_PIN || type == AC_WID_AUD_OUT) + continue; if (snd_hda_get_conn_index(codec, conn[i], nid, recursive) >= 0) return i; + } return -1; } EXPORT_SYMBOL_HDA(snd_hda_get_conn_index); diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index d6c93d92b55..c45f3e69bcf 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -535,7 +535,7 @@ static int add_volume(struct hda_codec *codec, const char *name, int index, unsigned int pval, int dir, struct snd_kcontrol **kctlp) { - char tmp[32]; + char tmp[44]; struct snd_kcontrol_new knew = HDA_CODEC_VOLUME_IDX(tmp, index, 0, 0, HDA_OUTPUT); knew.private_value = pval; diff --git a/sound/soc/blackfin/bf5xx-ad193x.c b/sound/soc/blackfin/bf5xx-ad193x.c index a118a0fb9d8..5956584ea3a 100644 --- a/sound/soc/blackfin/bf5xx-ad193x.c +++ b/sound/soc/blackfin/bf5xx-ad193x.c @@ -103,7 +103,7 @@ static struct snd_soc_dai_link bf5xx_ad193x_dai[] = { .cpu_dai_name = "bfin-tdm.0", .codec_dai_name ="ad193x-hifi", .platform_name = "bfin-tdm-pcm-audio", - .codec_name = "ad193x.5", + .codec_name = "spi0.5", .ops = &bf5xx_ad193x_ops, }, { @@ -112,7 +112,7 @@ static struct snd_soc_dai_link bf5xx_ad193x_dai[] = { .cpu_dai_name = "bfin-tdm.1", .codec_dai_name ="ad193x-hifi", .platform_name = "bfin-tdm-pcm-audio", - .codec_name = "ad193x.5", + .codec_name = "spi0.5", .ops = &bf5xx_ad193x_ops, }, }; diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c index fd0dc46afc3..5c6c2457386 100644 --- a/sound/soc/fsl/mpc5200_dma.c +++ b/sound/soc/fsl/mpc5200_dma.c @@ -369,7 +369,7 @@ static struct snd_soc_platform_driver mpc5200_audio_dma_platform = { .pcm_free = &psc_dma_free, }; -static int mpc5200_hpcd_probe(struct of_device *op) +static int mpc5200_hpcd_probe(struct platform_device *op) { phys_addr_t fifo; struct psc_dma *psc_dma; @@ -487,7 +487,7 @@ out_unmap: return ret; } -static int mpc5200_hpcd_remove(struct of_device *op) +static int mpc5200_hpcd_remove(struct platform_device *op) { struct psc_dma *psc_dma = dev_get_drvdata(&op->dev); @@ -519,7 +519,7 @@ MODULE_DEVICE_TABLE(of, mpc5200_hpcd_match); static struct platform_driver mpc5200_hpcd_of_driver = { .probe = mpc5200_hpcd_probe, .remove = mpc5200_hpcd_remove, - .dev = { + .driver = { .owner = THIS_MODULE, .name = "mpc5200-pcm-audio", .of_match_table = mpc5200_hpcd_match, diff --git a/sound/soc/imx/imx-pcm-fiq.c b/sound/soc/imx/imx-pcm-fiq.c index 309c59e6fb6..7945625e0e0 100644 --- a/sound/soc/imx/imx-pcm-fiq.c +++ b/sound/soc/imx/imx-pcm-fiq.c @@ -240,7 +240,6 @@ static int ssi_irq = 0; static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd) { - struct snd_card *card = rtd->card->snd_card; struct snd_soc_dai *dai = rtd->cpu_dai; struct snd_pcm *pcm = rtd->pcm; int ret; diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c index 8f16cd37c2a..d0bcf3fcea0 100644 --- a/sound/soc/kirkwood/kirkwood-i2s.c +++ b/sound/soc/kirkwood/kirkwood-i2s.c @@ -424,7 +424,7 @@ static __devinit int kirkwood_i2s_dev_probe(struct platform_device *pdev) if (!priv->mem) { dev_err(&pdev->dev, "request_mem_region failed\n"); err = -EBUSY; - goto error_alloc; + goto err_alloc; } priv->io = ioremap(priv->mem->start, SZ_16K); diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c index d9f8aded51f..20b7f3b003a 100644 --- a/sound/soc/soc-cache.c +++ b/sound/soc/soc-cache.c @@ -203,14 +203,14 @@ static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec) rbnode = rb_entry(node, struct snd_soc_rbtree_node, node); for (i = 0; i < rbnode->blklen; ++i) { regtmp = rbnode->base_reg + i; - WARN_ON(codec->writable_register && - codec->writable_register(codec, regtmp)); val = snd_soc_rbtree_get_register(rbnode, i); def = snd_soc_get_cache_val(codec->reg_def_copy, i, rbnode->word_size); if (val == def) continue; + WARN_ON(!snd_soc_codec_writable_register(codec, regtmp)); + codec->cache_bypass = 1; ret = snd_soc_write(codec, regtmp, val); codec->cache_bypass = 0; @@ -563,8 +563,7 @@ static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec) lzo_blocks = codec->reg_cache; for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) { - WARN_ON(codec->writable_register && - codec->writable_register(codec, i)); + WARN_ON(!snd_soc_codec_writable_register(codec, i)); ret = snd_soc_cache_read(codec, i, &val); if (ret) return ret; @@ -823,8 +822,6 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec) codec_drv = codec->driver; for (i = 0; i < codec_drv->reg_cache_size; ++i) { - WARN_ON(codec->writable_register && - codec->writable_register(codec, i)); ret = snd_soc_cache_read(codec, i, &val); if (ret) return ret; @@ -832,6 +829,9 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec) if (snd_soc_get_cache_val(codec->reg_def_copy, i, codec_drv->reg_word_size) == val) continue; + + WARN_ON(!snd_soc_codec_writable_register(codec, i)); + ret = snd_soc_write(codec, i, val); if (ret) return ret; diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index b085d8e8757..d2ef014af21 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1633,7 +1633,7 @@ int snd_soc_codec_readable_register(struct snd_soc_codec *codec, if (codec->readable_register) return codec->readable_register(codec, reg); else - return 0; + return 1; } EXPORT_SYMBOL_GPL(snd_soc_codec_readable_register); @@ -1651,7 +1651,7 @@ int snd_soc_codec_writable_register(struct snd_soc_codec *codec, if (codec->writable_register) return codec->writable_register(codec, reg); else - return 0; + return 1; } EXPORT_SYMBOL_GPL(snd_soc_codec_writable_register); diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 7e15914b363..d67c637557a 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -2763,7 +2763,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend); /** * snd_soc_dapm_free - free dapm resources - * @card: SoC device + * @dapm: DAPM context * * Free all dapm widgets and resources. */ diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c index 38b00131b2f..fa31d9c2abd 100644 --- a/sound/soc/soc-jack.c +++ b/sound/soc/soc-jack.c @@ -105,7 +105,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask) snd_soc_dapm_sync(dapm); - snd_jack_report(jack->jack, status); + snd_jack_report(jack->jack, jack->status); out: mutex_unlock(&codec->mutex); |