diff options
1629 files changed, 26615 insertions, 12782 deletions
diff --git a/Documentation/ABI/testing/sysfs-ibft b/Documentation/ABI/testing/sysfs-ibft index c2b7d1154be..cac3930bdb0 100644 --- a/Documentation/ABI/testing/sysfs-ibft +++ b/Documentation/ABI/testing/sysfs-ibft @@ -20,4 +20,4 @@ Date: November 2007 Contact: Konrad Rzeszutek <ketuzsezr@darnok.org> Description: The /sys/firmware/ibft/ethernetX directory will contain files that expose the iSCSI Boot Firmware Table NIC data. - This can this can the IP address, MAC, and gateway of the NIC. + Usually this contains the IP address, MAC, and gateway of the NIC. diff --git a/Documentation/DocBook/media/Makefile b/Documentation/DocBook/media/Makefile index df2962d9e11..8bf7c619129 100644 --- a/Documentation/DocBook/media/Makefile +++ b/Documentation/DocBook/media/Makefile @@ -25,7 +25,7 @@ GENFILES := $(addprefix $(MEDIA_OBJ_DIR)/, $(MEDIA_TEMP)) PHONY += cleanmediadocs cleanmediadocs: - -@rm `find $(MEDIA_OBJ_DIR) -type l` $(GENFILES) $(OBJIMGFILES) 2>/dev/null + -@rm -f `find $(MEDIA_OBJ_DIR) -type l` $(GENFILES) $(OBJIMGFILES) 2>/dev/null $(obj)/media_api.xml: $(GENFILES) FORCE diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml index 07ffc76553b..0a2debfa68f 100644 --- a/Documentation/DocBook/media/v4l/compat.xml +++ b/Documentation/DocBook/media/v4l/compat.xml @@ -2566,6 +2566,10 @@ fields changed from _s32 to _u32. <para>Added compound control types and &VIDIOC-QUERY-EXT-CTRL;. </para> </listitem> + </orderedlist> + </section> + + <section> <title>V4L2 in Linux 3.18</title> <orderedlist> <listitem> diff --git a/Documentation/HOWTO b/Documentation/HOWTO index 57cf5efb044..93aa8604630 100644 --- a/Documentation/HOWTO +++ b/Documentation/HOWTO @@ -324,7 +324,6 @@ tree, they need to be integration-tested. For this purpose, a special testing repository exists into which virtually all subsystem trees are pulled on an almost daily basis: http://git.kernel.org/?p=linux/kernel/git/next/linux-next.git - http://linux.f-seidel.de/linux-next/pmwiki/ This way, the -next kernel gives a summary outlook onto what will be expected to go into the mainline kernel at the next merge period. diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches index 482c74947de..1fa1caa198e 100644 --- a/Documentation/SubmittingPatches +++ b/Documentation/SubmittingPatches @@ -483,12 +483,10 @@ have been included in the discussion 14) Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes: -If this patch fixes a problem reported by somebody else, consider adding a -Reported-by: tag to credit the reporter for their contribution. Please -note that this tag should not be added without the reporter's permission, -especially if the problem was not reported in a public forum. That said, -if we diligently credit our bug reporters, they will, hopefully, be -inspired to help us again in the future. +The Reported-by tag gives credit to people who find bugs and report them and it +hopefully inspires them to help us again in the future. Please note that if +the bug was reported in private, then ask for permission first before using the +Reported-by tag. A Tested-by: tag indicates that the patch has been successfully tested (in some environment) by the person named. This tag informs maintainers that diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt index 344e85cc732..d7273a5f645 100644 --- a/Documentation/arm64/memory.txt +++ b/Documentation/arm64/memory.txt @@ -17,7 +17,7 @@ User addresses have bits 63:48 set to 0 while the kernel addresses have the same bits set to 1. TTBRx selection is given by bit 63 of the virtual address. The swapper_pg_dir contains only kernel (global) mappings while the user pgd contains only user (non-global) mappings. -The swapper_pgd_dir address is written to TTBR1 and never written to +The swapper_pg_dir address is written to TTBR1 and never written to TTBR0. diff --git a/Documentation/development-process/2.Process b/Documentation/development-process/2.Process index 2e0617936e8..c24e156a611 100644 --- a/Documentation/development-process/2.Process +++ b/Documentation/development-process/2.Process @@ -289,10 +289,6 @@ lists when they are assembled; they can be downloaded from: http://www.kernel.org/pub/linux/kernel/next/ -Some information about linux-next has been gathered at: - - http://linux.f-seidel.de/linux-next/pmwiki/ - Linux-next has become an integral part of the kernel development process; all patches merged during a given merge window should really have found their way into linux-next some time before the merge window opens. diff --git a/Documentation/development-process/8.Conclusion b/Documentation/development-process/8.Conclusion index 1990ab4b494..caef69022e9 100644 --- a/Documentation/development-process/8.Conclusion +++ b/Documentation/development-process/8.Conclusion @@ -22,10 +22,6 @@ Beyond that, a valuable resource for kernel developers is: http://kernelnewbies.org/ -Information about the linux-next tree gathers at: - - http://linux.f-seidel.de/linux-next/pmwiki/ - And, of course, one should not forget http://kernel.org/, the definitive location for kernel release information. diff --git a/Documentation/devicetree/bindings/ata/sata_rcar.txt b/Documentation/devicetree/bindings/ata/sata_rcar.txt index 1e6111333fa..80ae87a0784 100644 --- a/Documentation/devicetree/bindings/ata/sata_rcar.txt +++ b/Documentation/devicetree/bindings/ata/sata_rcar.txt @@ -3,8 +3,10 @@ Required properties: - compatible : should contain one of the following: - "renesas,sata-r8a7779" for R-Car H1 - - "renesas,sata-r8a7790" for R-Car H2 - - "renesas,sata-r8a7791" for R-Car M2 + - "renesas,sata-r8a7790-es1" for R-Car H2 ES1 + - "renesas,sata-r8a7790" for R-Car H2 other than ES1 + - "renesas,sata-r8a7791" for R-Car M2-W + - "renesas,sata-r8a7793" for R-Car M2-N - reg : address and length of the SATA registers; - interrupts : must consist of one interrupt specifier. diff --git a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt index ce6a1a07202..8a3c4082989 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt @@ -30,10 +30,6 @@ should only be used when a device has multiple interrupt parents. Example: interrupts-extended = <&intc1 5 1>, <&intc2 1 0>; -A device node may contain either "interrupts" or "interrupts-extended", but not -both. If both properties are present, then the operating system should log an -error and use only the data in "interrupts". - 2) Interrupt controller nodes ----------------------------- diff --git a/Documentation/devicetree/bindings/mailbox/mailbox.txt b/Documentation/devicetree/bindings/mailbox/mailbox.txt new file mode 100644 index 00000000000..1a2cd3d266d --- /dev/null +++ b/Documentation/devicetree/bindings/mailbox/mailbox.txt @@ -0,0 +1,38 @@ +* Generic Mailbox Controller and client driver bindings + +Generic binding to provide a way for Mailbox controller drivers to +assign appropriate mailbox channel to client drivers. + +* Mailbox Controller + +Required property: +- #mbox-cells: Must be at least 1. Number of cells in a mailbox + specifier. + +Example: + mailbox: mailbox { + ... + #mbox-cells = <1>; + }; + + +* Mailbox Client + +Required property: +- mboxes: List of phandle and mailbox channel specifiers. + +Optional property: +- mbox-names: List of identifier strings for each mailbox channel + required by the client. The use of this property + is discouraged in favor of using index in list of + 'mboxes' while requesting a mailbox. Instead the + platforms may define channel indices, in DT headers, + to something legible. + +Example: + pwr_cntrl: power { + ... + mbox-names = "pwr-ctrl", "rpc"; + mboxes = <&mailbox 0 + &mailbox 1>; + }; diff --git a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt index 0f8487b8882..e77e167593d 100644 --- a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt +++ b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt @@ -11,3 +11,5 @@ Optional properties: are supported on the device. Valid value for SMSC LAN91c111 are 1, 2 or 4. If it's omitted or invalid, the size would be 2 meaning 16-bit access only. +- power-gpios: GPIO to control the PWRDWN pin +- reset-gpios: GPIO to control the RESET pin diff --git a/Documentation/devicetree/bindings/pci/pci.txt b/Documentation/devicetree/bindings/pci/pci.txt index 41aeed38926..f8fbe9af7b2 100644 --- a/Documentation/devicetree/bindings/pci/pci.txt +++ b/Documentation/devicetree/bindings/pci/pci.txt @@ -7,3 +7,14 @@ And for the interrupt mapping part: Open Firmware Recommended Practice: Interrupt Mapping http://www.openfirmware.org/1275/practice/imap/imap0_9d.pdf + +Additionally to the properties specified in the above standards a host bridge +driver implementation may support the following properties: + +- linux,pci-domain: + If present this property assigns a fixed PCI domain number to a host bridge, + otherwise an unstable (across boots) unique number will be assigned. + It is required to either not set this property at all or set it for all + host bridges in the system, otherwise potentially conflicting domain numbers + may be assigned to root buses behind different host bridges. The domain + number for each host bridge in the system must be unique. diff --git a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt index a186181c402..51b943cc977 100644 --- a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt @@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -TZ1090-PDC's pin configuration nodes act as a container for an abitrary number +TZ1090-PDC's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt index 4b27c99f7f9..49d0e605094 100644 --- a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt @@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -TZ1090's pin configuration nodes act as a container for an abitrary number of +TZ1090's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt b/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt index daa76895606..ac4da9fe07b 100644 --- a/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt +++ b/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt @@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Lantiq's pin configuration nodes act as a container for an abitrary number of +Lantiq's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those group(s), and two pin configuration parameters: diff --git a/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt b/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt index b5469db1d7a..e89b4677567 100644 --- a/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt +++ b/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt @@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Lantiq's pin configuration nodes act as a container for an abitrary number of +Lantiq's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those group(s), and two pin configuration parameters: diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt index 61e73cde9ae..3c8ce28baad 100644 --- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt +++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt @@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Tegra's pin configuration nodes act as a container for an abitrary number of +Tegra's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt index c596a6ad328..5f55be59d91 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt @@ -13,7 +13,7 @@ Optional properties: Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices. -SiRFprimaII's pinmux nodes act as a container for an abitrary number of subnodes. +SiRFprimaII's pinmux nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a group of pins. Required subnode-properties: diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt index b4480d5c3ac..45861559694 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt @@ -32,7 +32,7 @@ Required properties: Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices. -SPEAr's pinmux nodes act as a container for an abitrary number of subnodes. Each +SPEAr's pinmux nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents muxing for a pin, a group, or a list of pins or groups. diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt index 2fb90b37aa0..a7bde64798c 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt @@ -18,7 +18,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Qualcomm's pin configuration nodes act as a container for an abitrary number of +Qualcomm's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt index ffafa1990a3..c4ea61ac56f 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt @@ -47,7 +47,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -The pin configuration nodes act as a container for an abitrary number of +The pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt index e33e4dcdce7..6e88e91feb1 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt @@ -18,7 +18,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Qualcomm's pin configuration nodes act as a container for an abitrary number of +Qualcomm's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt index 93b7de91b9f..eb8d8aa41f2 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt @@ -47,7 +47,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -The pin configuration nodes act as a container for an abitrary number of +The pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt index d2ea80dc43e..e4d6a9d20f7 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt @@ -18,7 +18,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Qualcomm's pin configuration nodes act as a container for an abitrary number of +Qualcomm's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt b/Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt index 0bda229a617..3899d6a557c 100644 --- a/Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt +++ b/Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt @@ -1,5 +1,20 @@ Freescale FlexTimer Module (FTM) PWM controller +The same FTM PWM device can have a different endianness on different SoCs. The +device tree provides a property to describing this so that an operating system +device driver can handle all variants of the device. Refer to the table below +for the endianness of the FTM PWM block as integrated into the existing SoCs: + + SoC | FTM-PWM endianness + --------+------------------- + Vybrid | LE + LS1 | BE + LS2 | LE + +Please see ../regmap/regmap.txt for more detail about how to specify endian +modes in device tree. + + Required properties: - compatible: Should be "fsl,vf610-ftm-pwm". - reg: Physical base address and length of the controller's registers @@ -16,7 +31,8 @@ Required properties: - pinctrl-names: Must contain a "default" entry. - pinctrl-NNN: One property must exist for each entry in pinctrl-names. See pinctrl/pinctrl-bindings.txt for details of the property values. - +- big-endian: Boolean property, required if the FTM PWM registers use a big- + endian rather than little-endian layout. Example: @@ -32,4 +48,5 @@ pwm0: pwm@40038000 { <&clks VF610_CLK_FTM0_EXT_FIX_EN>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_pwm0_1>; + big-endian; }; diff --git a/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt b/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt index d47d15a6a29..b8be3d09ee2 100644 --- a/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt +++ b/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt @@ -7,8 +7,8 @@ Required properties: "rockchip,vop-pwm": found integrated in VOP on RK3288 SoC - reg: physical base address and length of the controller's registers - clocks: phandle and clock specifier of the PWM reference clock - - #pwm-cells: should be 2. See pwm.txt in this directory for a - description of the cell format. + - #pwm-cells: must be 2 (rk2928) or 3 (rk3288). See pwm.txt in this directory + for a description of the cell format. Example: diff --git a/Documentation/devicetree/bindings/sound/sgtl5000.txt b/Documentation/devicetree/bindings/sound/sgtl5000.txt index 955df60a118..d556dcb8816 100644 --- a/Documentation/devicetree/bindings/sound/sgtl5000.txt +++ b/Documentation/devicetree/bindings/sound/sgtl5000.txt @@ -7,10 +7,20 @@ Required properties: - clocks : the clock provider of SYS_MCLK +- VDDA-supply : the regulator provider of VDDA + +- VDDIO-supply: the regulator provider of VDDIO + +Optional properties: + +- VDDD-supply : the regulator provider of VDDD + Example: codec: sgtl5000@0a { compatible = "fsl,sgtl5000"; reg = <0x0a>; clocks = <&clks 150>; + VDDA-supply = <®_3p3v>; + VDDIO-supply = <®_3p3v>; }; diff --git a/Documentation/devicetree/bindings/submitting-patches.txt b/Documentation/devicetree/bindings/submitting-patches.txt index 042a0273b8b..b7ba01ad142 100644 --- a/Documentation/devicetree/bindings/submitting-patches.txt +++ b/Documentation/devicetree/bindings/submitting-patches.txt @@ -12,6 +12,9 @@ I. For patch submitters devicetree@vger.kernel.org + 3) The Documentation/ portion of the patch should come in the series before + the code implementing the binding. + II. For kernel maintainers 1) If you aren't comfortable reviewing a given binding, reply to it and ask diff --git a/Documentation/devicetree/bindings/thermal/imx-thermal.txt b/Documentation/devicetree/bindings/thermal/imx-thermal.txt index 1f0f67234a9..3c67bd50aa1 100644 --- a/Documentation/devicetree/bindings/thermal/imx-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/imx-thermal.txt @@ -1,7 +1,10 @@ * Temperature Monitor (TEMPMON) on Freescale i.MX SoCs Required properties: -- compatible : "fsl,imx6q-thermal" +- compatible : "fsl,imx6q-tempmon" for i.MX6Q, "fsl,imx6sx-tempmon" for i.MX6SX. + i.MX6SX has two more IRQs than i.MX6Q, one is IRQ_LOW and the other is IRQ_PANIC, + when temperature is below than low threshold, IRQ_LOW will be triggered, when temperature + is higher than panic threshold, system will auto reboot by SRC module. - fsl,tempmon : phandle pointer to system controller that contains TEMPMON control registers, e.g. ANATOP on imx6q. - fsl,tempmon-data : phandle pointer to fuse controller that contains TEMPMON diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt index 0ef00be44b0..43404b19793 100644 --- a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt @@ -7,7 +7,10 @@ Required properties: - "renesas,thermal-r8a73a4" (R-Mobile AP6) - "renesas,thermal-r8a7779" (R-Car H1) - "renesas,thermal-r8a7790" (R-Car H2) - - "renesas,thermal-r8a7791" (R-Car M2) + - "renesas,thermal-r8a7791" (R-Car M2-W) + - "renesas,thermal-r8a7792" (R-Car V2H) + - "renesas,thermal-r8a7793" (R-Car M2-N) + - "renesas,thermal-r8a7794" (R-Car E2) - reg : Address range of the thermal registers. The 1st reg will be recognized as common register if it has "interrupts". diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 723999d7374..a344ec2713a 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -34,6 +34,7 @@ chipidea Chipidea, Inc chrp Common Hardware Reference Platform chunghwa Chunghwa Picture Tubes Ltd. cirrus Cirrus Logic, Inc. +cnm Chips&Media, Inc. cortina Cortina Systems, Inc. crystalfontz Crystalfontz America, Inc. dallas Maxim Integrated Products (formerly Dallas Semiconductor) @@ -92,6 +93,7 @@ maxim Maxim Integrated Products mediatek MediaTek Inc. micrel Micrel Inc. microchip Microchip Technology Inc. +micron Micron Technology Inc. mitsubishi Mitsubishi Electric Corporation mosaixtech Mosaix Technologies, Inc. moxa Moxa @@ -127,6 +129,7 @@ renesas Renesas Electronics Corporation ricoh Ricoh Co. Ltd. rockchip Fuzhou Rockchip Electronics Co., Ltd samsung Samsung Semiconductor +sandisk Sandisk Corporation sbs Smart Battery System schindler Schindler seagate Seagate Technology PLC @@ -138,7 +141,7 @@ silergy Silergy Corp. sirf SiRF Technology, Inc. sitronix Sitronix Technology Corporation smsc Standard Microsystems Corporation -snps Synopsys, Inc. +snps Synopsys, Inc. solidrun SolidRun sony Sony Corporation spansion Spansion Inc. diff --git a/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt b/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt new file mode 100644 index 00000000000..c3a36ee4555 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt @@ -0,0 +1,24 @@ +Zynq Watchdog Device Tree Bindings +------------------------------------------- + +Required properties: +- compatible : Should be "cdns,wdt-r1p2". +- clocks : This is pclk (APB clock). +- interrupts : This is wd_irq - watchdog timeout interrupt. +- interrupt-parent : Must be core interrupt controller. + +Optional properties +- reset-on-timeout : If this property exists, then a reset is done + when watchdog times out. +- timeout-sec : Watchdog timeout value (in seconds). + +Example: + watchdog@f8005000 { + compatible = "cdns,wdt-r1p2"; + clocks = <&clkc 45>; + interrupt-parent = <&intc>; + interrupts = <0 9 1>; + reg = <0xf8005000 0x1000>; + reset-on-timeout; + timeout-sec = <10>; + }; diff --git a/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt b/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt index e52ba2da868..8dab6fd024a 100644 --- a/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt @@ -7,7 +7,8 @@ Required properties: Optional property: - big-endian: If present the watchdog device's registers are implemented - in big endian mode, otherwise in little mode. + in big endian mode, otherwise in native mode(same with CPU), for more + detail please see: Documentation/devicetree/bindings/regmap/regmap.txt. Examples: diff --git a/Documentation/devicetree/bindings/watchdog/meson6-wdt.txt b/Documentation/devicetree/bindings/watchdog/meson6-wdt.txt new file mode 100644 index 00000000000..9200fc2d508 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/meson6-wdt.txt @@ -0,0 +1,13 @@ +Meson SoCs Watchdog timer + +Required properties: + +- compatible : should be "amlogic,meson6-wdt" +- reg : Specifies base physical address and size of the registers. + +Example: + +wdt: watchdog@c1109900 { + compatible = "amlogic,meson6-wdt"; + reg = <0xc1109900 0x8>; +}; diff --git a/Documentation/devicetree/bindings/watchdog/qcom-wdt.txt b/Documentation/devicetree/bindings/watchdog/qcom-wdt.txt new file mode 100644 index 00000000000..4726924d034 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/qcom-wdt.txt @@ -0,0 +1,24 @@ +Qualcomm Krait Processor Sub-system (KPSS) Watchdog +--------------------------------------------------- + +Required properties : +- compatible : shall contain only one of the following: + + "qcom,kpss-wdt-msm8960" + "qcom,kpss-wdt-apq8064" + "qcom,kpss-wdt-ipq8064" + +- reg : shall contain base register location and length +- clocks : shall contain the input clock + +Optional properties : +- timeout-sec : shall contain the default watchdog timeout in seconds, + if unset, the default timeout is 30 seconds + +Example: + watchdog@208a038 { + compatible = "qcom,kpss-wdt-ipq8064"; + reg = <0x0208a038 0x40>; + clocks = <&sleep_clk>; + timeout-sec = <10>; + }; diff --git a/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt b/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt index cfff37511aa..8f3d96af81d 100644 --- a/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt @@ -9,6 +9,7 @@ Required properties: (a) "samsung,s3c2410-wdt" for Exynos4 and previous SoCs (b) "samsung,exynos5250-wdt" for Exynos5250 (c) "samsung,exynos5420-wdt" for Exynos5420 + (c) "samsung,exynos7-wdt" for Exynos7 - reg : base physical address of the controller and length of memory mapped region. diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 94d93b1f8b5..b30753cbf43 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -67,6 +67,7 @@ prototypes: struct file *, unsigned open_flag, umode_t create_mode, int *opened); int (*tmpfile) (struct inode *, struct dentry *, umode_t); + int (*dentry_open)(struct dentry *, struct file *, const struct cred *); locking rules: all may block @@ -96,6 +97,7 @@ fiemap: no update_time: no atomic_open: yes tmpfile: no +dentry_open: no Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on victim. diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt new file mode 100644 index 00000000000..a27c950ece6 --- /dev/null +++ b/Documentation/filesystems/overlayfs.txt @@ -0,0 +1,198 @@ +Written by: Neil Brown <neilb@suse.de> + +Overlay Filesystem +================== + +This document describes a prototype for a new approach to providing +overlay-filesystem functionality in Linux (sometimes referred to as +union-filesystems). An overlay-filesystem tries to present a +filesystem which is the result over overlaying one filesystem on top +of the other. + +The result will inevitably fail to look exactly like a normal +filesystem for various technical reasons. The expectation is that +many use cases will be able to ignore these differences. + +This approach is 'hybrid' because the objects that appear in the +filesystem do not all appear to belong to that filesystem. In many +cases an object accessed in the union will be indistinguishable +from accessing the corresponding object from the original filesystem. +This is most obvious from the 'st_dev' field returned by stat(2). + +While directories will report an st_dev from the overlay-filesystem, +all non-directory objects will report an st_dev from the lower or +upper filesystem that is providing the object. Similarly st_ino will +only be unique when combined with st_dev, and both of these can change +over the lifetime of a non-directory object. Many applications and +tools ignore these values and will not be affected. + +Upper and Lower +--------------- + +An overlay filesystem combines two filesystems - an 'upper' filesystem +and a 'lower' filesystem. When a name exists in both filesystems, the +object in the 'upper' filesystem is visible while the object in the +'lower' filesystem is either hidden or, in the case of directories, +merged with the 'upper' object. + +It would be more correct to refer to an upper and lower 'directory +tree' rather than 'filesystem' as it is quite possible for both +directory trees to be in the same filesystem and there is no +requirement that the root of a filesystem be given for either upper or +lower. + +The lower filesystem can be any filesystem supported by Linux and does +not need to be writable. The lower filesystem can even be another +overlayfs. The upper filesystem will normally be writable and if it +is it must support the creation of trusted.* extended attributes, and +must provide valid d_type in readdir responses, so NFS is not suitable. + +A read-only overlay of two read-only filesystems may use any +filesystem type. + +Directories +----------- + +Overlaying mainly involves directories. If a given name appears in both +upper and lower filesystems and refers to a non-directory in either, +then the lower object is hidden - the name refers only to the upper +object. + +Where both upper and lower objects are directories, a merged directory +is formed. + +At mount time, the two directories given as mount options "lowerdir" and +"upperdir" are combined into a merged directory: + + mount -t overlay overlay -olowerdir=/lower,upperdir=/upper,\ +workdir=/work /merged + +The "workdir" needs to be an empty directory on the same filesystem +as upperdir. + +Then whenever a lookup is requested in such a merged directory, the +lookup is performed in each actual directory and the combined result +is cached in the dentry belonging to the overlay filesystem. If both +actual lookups find directories, both are stored and a merged +directory is created, otherwise only one is stored: the upper if it +exists, else the lower. + +Only the lists of names from directories are merged. Other content +such as metadata and extended attributes are reported for the upper +directory only. These attributes of the lower directory are hidden. + +whiteouts and opaque directories +-------------------------------- + +In order to support rm and rmdir without changing the lower +filesystem, an overlay filesystem needs to record in the upper filesystem +that files have been removed. This is done using whiteouts and opaque +directories (non-directories are always opaque). + +A whiteout is created as a character device with 0/0 device number. +When a whiteout is found in the upper level of a merged directory, any +matching name in the lower level is ignored, and the whiteout itself +is also hidden. + +A directory is made opaque by setting the xattr "trusted.overlay.opaque" +to "y". Where the upper filesystem contains an opaque directory, any +directory in the lower filesystem with the same name is ignored. + +readdir +------- + +When a 'readdir' request is made on a merged directory, the upper and +lower directories are each read and the name lists merged in the +obvious way (upper is read first, then lower - entries that already +exist are not re-added). This merged name list is cached in the +'struct file' and so remains as long as the file is kept open. If the +directory is opened and read by two processes at the same time, they +will each have separate caches. A seekdir to the start of the +directory (offset 0) followed by a readdir will cause the cache to be +discarded and rebuilt. + +This means that changes to the merged directory do not appear while a +directory is being read. This is unlikely to be noticed by many +programs. + +seek offsets are assigned sequentially when the directories are read. +Thus if + - read part of a directory + - remember an offset, and close the directory + - re-open the directory some time later + - seek to the remembered offset + +there may be little correlation between the old and new locations in +the list of filenames, particularly if anything has changed in the +directory. + +Readdir on directories that are not merged is simply handled by the +underlying directory (upper or lower). + + +Non-directories +--------------- + +Objects that are not directories (files, symlinks, device-special +files etc.) are presented either from the upper or lower filesystem as +appropriate. When a file in the lower filesystem is accessed in a way +the requires write-access, such as opening for write access, changing +some metadata etc., the file is first copied from the lower filesystem +to the upper filesystem (copy_up). Note that creating a hard-link +also requires copy_up, though of course creation of a symlink does +not. + +The copy_up may turn out to be unnecessary, for example if the file is +opened for read-write but the data is not modified. + +The copy_up process first makes sure that the containing directory +exists in the upper filesystem - creating it and any parents as +necessary. It then creates the object with the same metadata (owner, +mode, mtime, symlink-target etc.) and then if the object is a file, the +data is copied from the lower to the upper filesystem. Finally any +extended attributes are copied up. + +Once the copy_up is complete, the overlay filesystem simply +provides direct access to the newly created file in the upper +filesystem - future operations on the file are barely noticed by the +overlay filesystem (though an operation on the name of the file such as +rename or unlink will of course be noticed and handled). + + +Non-standard behavior +--------------------- + +The copy_up operation essentially creates a new, identical file and +moves it over to the old name. The new file may be on a different +filesystem, so both st_dev and st_ino of the file may change. + +Any open files referring to this inode will access the old data and +metadata. Similarly any file locks obtained before copy_up will not +apply to the copied up file. + +On a file opened with O_RDONLY fchmod(2), fchown(2), futimesat(2) and +fsetxattr(2) will fail with EROFS. + +If a file with multiple hard links is copied up, then this will +"break" the link. Changes will not be propagated to other names +referring to the same inode. + +Symlinks in /proc/PID/ and /proc/PID/fd which point to a non-directory +object in overlayfs will not contain valid absolute paths, only +relative paths leading up to the filesystem's root. This will be +fixed in the future. + +Some operations are not atomic, for example a crash during copy_up or +rename will leave the filesystem in an inconsistent state. This will +be addressed in the future. + +Changes to underlying filesystems +--------------------------------- + +Offline changes, when the overlay is not mounted, are allowed to either +the upper or the lower trees. + +Changes to the underlying filesystems while part of a mounted overlay +filesystem are not allowed. If the underlying filesystem is changed, +the behavior of the overlay is undefined, though it will not result in +a crash or deadlock. diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index fceff7c00a3..20bf204426c 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -364,6 +364,7 @@ struct inode_operations { int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned open_flag, umode_t create_mode, int *opened); int (*tmpfile) (struct inode *, struct dentry *, umode_t); + int (*dentry_open)(struct dentry *, struct file *, const struct cred *); }; Again, all methods are called without any locks being held, unless @@ -696,6 +697,12 @@ struct address_space_operations { but instead uses bmap to find out where the blocks in the file are and uses those addresses directly. + dentry_open: *WARNING: probably going away soon, do not use!* This is an + alternative to f_op->open(), the difference is that this method may open + a file not necessarily originating from the same filesystem as the one + i_op->open() was called on. It may be useful for stacking filesystems + which want to allow native I/O directly on underlying files. + invalidatepage: If a page has PagePrivate set, then invalidatepage will be called when part or all of the page is to be removed diff --git a/Documentation/input/elantech.txt b/Documentation/input/elantech.txt index e1ae127ed09..1ec0db7879d 100644 --- a/Documentation/input/elantech.txt +++ b/Documentation/input/elantech.txt @@ -38,22 +38,38 @@ Contents 7.2.1 Status packet 7.2.2 Head packet 7.2.3 Motion packet + 8. Trackpoint (for Hardware version 3 and 4) + 8.1 Registers + 8.2 Native relative mode 6 byte packet format + 8.2.1 Status Packet 1. Introduction ~~~~~~~~~~~~ -Currently the Linux Elantech touchpad driver is aware of two different -hardware versions unimaginatively called version 1 and version 2. Version 1 -is found in "older" laptops and uses 4 bytes per packet. Version 2 seems to -be introduced with the EeePC and uses 6 bytes per packet, and provides -additional features such as position of two fingers, and width of the touch. +Currently the Linux Elantech touchpad driver is aware of four different +hardware versions unimaginatively called version 1,version 2, version 3 +and version 4. Version 1 is found in "older" laptops and uses 4 bytes per +packet. Version 2 seems to be introduced with the EeePC and uses 6 bytes +per packet, and provides additional features such as position of two fingers, +and width of the touch. Hardware version 3 uses 6 bytes per packet (and +for 2 fingers the concatenation of two 6 bytes packets) and allows tracking +of up to 3 fingers. Hardware version 4 uses 6 bytes per packet, and can +combine a status packet with multiple head or motion packets. Hardware version +4 allows tracking up to 5 fingers. + +Some Hardware version 3 and version 4 also have a trackpoint which uses a +separate packet format. It is also 6 bytes per packet. The driver tries to support both hardware versions and should be compatible with the Xorg Synaptics touchpad driver and its graphical configuration utilities. +Note that a mouse button is also associated with either the touchpad or the +trackpoint when a trackpoint is available. Disabling the Touchpad in xorg +(TouchPadOff=0) will also disable the buttons associated with the touchpad. + Additionally the operation of the touchpad can be altered by adjusting the contents of some of its internal registers. These registers are represented by the driver as sysfs entries under /sys/bus/serio/drivers/psmouse/serio? @@ -78,7 +94,7 @@ completeness sake. 2. Extra knobs ~~~~~~~~~~~ -Currently the Linux Elantech touchpad driver provides two extra knobs under +Currently the Linux Elantech touchpad driver provides three extra knobs under /sys/bus/serio/drivers/psmouse/serio? for the user. * debug @@ -112,6 +128,20 @@ Currently the Linux Elantech touchpad driver provides two extra knobs under data consistency checking can be done. For now checking is disabled by default. Currently even turning it on will do nothing. +* crc_enabled + + Sets crc_enabled to 0/1. The name "crc_enabled" is the official name of + this integrity check, even though it is not an actual cyclic redundancy + check. + + Depending on the state of crc_enabled, certain basic data integrity + verification is done by the driver on hardware version 3 and 4. The + driver will reject any packet that appears corrupted. Using this knob, + The state of crc_enabled can be altered with this knob. + + Reading the crc_enabled value will show the active value. Echoing + "0" or "1" to this file will set the state to "0" or "1". + ///////////////////////////////////////////////////////////////////////////// 3. Differentiating hardware versions @@ -746,3 +776,42 @@ byte 5: byte 0 ~ 2 for one finger byte 3 ~ 5 for another + + +8. Trackpoint (for Hardware version 3 and 4) + ========================================= +8.1 Registers + ~~~~~~~~~ +No special registers have been identified. + +8.2 Native relative mode 6 byte packet format + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +8.2.1 Status Packet + ~~~~~~~~~~~~~ + +byte 0: + bit 7 6 5 4 3 2 1 0 + 0 0 sx sy 0 M R L +byte 1: + bit 7 6 5 4 3 2 1 0 + ~sx 0 0 0 0 0 0 0 +byte 2: + bit 7 6 5 4 3 2 1 0 + ~sy 0 0 0 0 0 0 0 +byte 3: + bit 7 6 5 4 3 2 1 0 + 0 0 ~sy ~sx 0 1 1 0 +byte 4: + bit 7 6 5 4 3 2 1 0 + x7 x6 x5 x4 x3 x2 x1 x0 +byte 5: + bit 7 6 5 4 3 2 1 0 + y7 y6 y5 y4 y3 y2 y1 y0 + + + x and y are written in two's complement spread + over 9 bits with sx/sy the relative top bit and + x7..x0 and y7..y0 the lower bits. + ~sx is the inverse of sx, ~sy is the inverse of sy. + The sign of y is opposite to what the input driver + expects for a relative movement diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 7dbe5ec9d9c..479f33204a3 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1015,10 +1015,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted. Format: {"off" | "on" | "skip[mbr]"} efi= [EFI] - Format: { "old_map" } + Format: { "old_map", "nochunk", "noruntime" } old_map [X86-64]: switch to the old ioremap-based EFI runtime services mapping. 32-bit still uses this one by default. + nochunk: disable reading files in "chunks" in the EFI + boot stub, as chunking can cause problems with some + firmware implementations. + noruntime : disable EFI runtime services support efi_no_storage_paranoia [EFI; X86] Using this parameter you can use more than 50% of @@ -1260,7 +1264,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. i8042.noloop [HW] Disable the AUX Loopback command while probing for the AUX port i8042.nomux [HW] Don't check presence of an active multiplexing - controller. Default: true. + controller i8042.nopnp [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX controllers i8042.notimeout [HW] Ignore timeout condition signalled by controller @@ -1303,6 +1307,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted. .cdrom .chs .ignore_cable are additional options See Documentation/ide/ide.txt. + ide-generic.probe-mask= [HW] (E)IDE subsystem + Format: <int> + Probe mask for legacy ISA IDE ports. Depending on + platform up to 6 ports are supported, enabled by + setting corresponding bits in the mask to 1. The + default value is 0x0, which has a special meaning. + On systems that have PCI, it triggers scanning the + PCI bus for the first and the second port, which + are then probed. On systems without PCI the value + of 0x0 enables probing the two first ports as if it + was 0x3. + ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem Claim all unknown PCI IDE storage controllers. @@ -1583,6 +1599,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. kmemleak= [KNL] Boot-time kmemleak enable/disable Valid arguments: on, off Default: on + Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y, + the default is off. kmemcheck= [X86] Boot-time kmemcheck enable/disable/one-shot mode Valid arguments: 0, 1, 2 @@ -2232,7 +2250,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. nodsp [SH] Disable hardware DSP at boot time. - noefi [X86] Disable EFI runtime services support. + noefi Disable EFI runtime services support. noexec [IA-64] @@ -3465,6 +3483,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. e.g. base its process migration decisions on it. Default is on. + topology_updates= [KNL, PPC, NUMA] + Format: {off} + Specify if the kernel should ignore (off) + topology updates sent by the hypervisor to this + LPAR. + tp720= [HW,PS2] tpm_suspend_pcr=[HW,TPM] @@ -3597,7 +3621,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. usb-storage.delay_use= [UMS] The delay in seconds before a new device is - scanned for Logical Units (default 5). + scanned for Logical Units (default 1). usb-storage.quirks= [UMS] A list of quirks entries to supplement or diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt index f4f033c8d85..45e777f4e41 100644 --- a/Documentation/kmemleak.txt +++ b/Documentation/kmemleak.txt @@ -62,6 +62,10 @@ Memory may be allocated or freed before kmemleak is initialised and these actions are stored in an early log buffer. The size of this buffer is configured via the CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE option. +If CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF are enabled, the kmemleak is +disabled by default. Passing "kmemleak=on" on the kernel command +line enables the function. + Basic Algorithm --------------- diff --git a/Documentation/mailbox.txt b/Documentation/mailbox.txt new file mode 100644 index 00000000000..60f43ff629a --- /dev/null +++ b/Documentation/mailbox.txt @@ -0,0 +1,122 @@ + The Common Mailbox Framework + Jassi Brar <jaswinder.singh@linaro.org> + + This document aims to help developers write client and controller +drivers for the API. But before we start, let us note that the +client (especially) and controller drivers are likely going to be +very platform specific because the remote firmware is likely to be +proprietary and implement non-standard protocol. So even if two +platforms employ, say, PL320 controller, the client drivers can't +be shared across them. Even the PL320 driver might need to accommodate +some platform specific quirks. So the API is meant mainly to avoid +similar copies of code written for each platform. Having said that, +nothing prevents the remote f/w to also be Linux based and use the +same api there. However none of that helps us locally because we only +ever deal at client's protocol level. + Some of the choices made during implementation are the result of this +peculiarity of this "common" framework. + + + + Part 1 - Controller Driver (See include/linux/mailbox_controller.h) + + Allocate mbox_controller and the array of mbox_chan. +Populate mbox_chan_ops, except peek_data() all are mandatory. +The controller driver might know a message has been consumed +by the remote by getting an IRQ or polling some hardware flag +or it can never know (the client knows by way of the protocol). +The method in order of preference is IRQ -> Poll -> None, which +the controller driver should set via 'txdone_irq' or 'txdone_poll' +or neither. + + + Part 2 - Client Driver (See include/linux/mailbox_client.h) + + The client might want to operate in blocking mode (synchronously +send a message through before returning) or non-blocking/async mode (submit +a message and a callback function to the API and return immediately). + + +struct demo_client { + struct mbox_client cl; + struct mbox_chan *mbox; + struct completion c; + bool async; + /* ... */ +}; + +/* + * This is the handler for data received from remote. The behaviour is purely + * dependent upon the protocol. This is just an example. + */ +static void message_from_remote(struct mbox_client *cl, void *mssg) +{ + struct demo_client *dc = container_of(mbox_client, + struct demo_client, cl); + if (dc->aysnc) { + if (is_an_ack(mssg)) { + /* An ACK to our last sample sent */ + return; /* Or do something else here */ + } else { /* A new message from remote */ + queue_req(mssg); + } + } else { + /* Remote f/w sends only ACK packets on this channel */ + return; + } +} + +static void sample_sent(struct mbox_client *cl, void *mssg, int r) +{ + struct demo_client *dc = container_of(mbox_client, + struct demo_client, cl); + complete(&dc->c); +} + +static void client_demo(struct platform_device *pdev) +{ + struct demo_client *dc_sync, *dc_async; + /* The controller already knows async_pkt and sync_pkt */ + struct async_pkt ap; + struct sync_pkt sp; + + dc_sync = kzalloc(sizeof(*dc_sync), GFP_KERNEL); + dc_async = kzalloc(sizeof(*dc_async), GFP_KERNEL); + + /* Populate non-blocking mode client */ + dc_async->cl.dev = &pdev->dev; + dc_async->cl.rx_callback = message_from_remote; + dc_async->cl.tx_done = sample_sent; + dc_async->cl.tx_block = false; + dc_async->cl.tx_tout = 0; /* doesn't matter here */ + dc_async->cl.knows_txdone = false; /* depending upon protocol */ + dc_async->async = true; + init_completion(&dc_async->c); + + /* Populate blocking mode client */ + dc_sync->cl.dev = &pdev->dev; + dc_sync->cl.rx_callback = message_from_remote; + dc_sync->cl.tx_done = NULL; /* operate in blocking mode */ + dc_sync->cl.tx_block = true; + dc_sync->cl.tx_tout = 500; /* by half a second */ + dc_sync->cl.knows_txdone = false; /* depending upon protocol */ + dc_sync->async = false; + + /* ASync mailbox is listed second in 'mboxes' property */ + dc_async->mbox = mbox_request_channel(&dc_async->cl, 1); + /* Populate data packet */ + /* ap.xxx = 123; etc */ + /* Send async message to remote */ + mbox_send_message(dc_async->mbox, &ap); + + /* Sync mailbox is listed first in 'mboxes' property */ + dc_sync->mbox = mbox_request_channel(&dc_sync->cl, 0); + /* Populate data packet */ + /* sp.abc = 123; etc */ + /* Send message to remote in blocking mode */ + mbox_send_message(dc_sync->mbox, &sp); + /* At this point 'sp' has been sent */ + + /* Now wait for async chan to be done */ + wait_for_completion(&dc_async->c); +} diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 0307e2875f2..a476b08a43e 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -56,6 +56,13 @@ ip_forward_use_pmtu - BOOLEAN 0 - disabled 1 - enabled +fwmark_reflect - BOOLEAN + Controls the fwmark of kernel-generated IPv4 reply packets that are not + associated with a socket for example, TCP RSTs or ICMP echo replies). + If unset, these packets have a fwmark of zero. If set, they have the + fwmark of the packet they are replying to. + Default: 0 + route/max_size - INTEGER Maximum number of routes allowed in the kernel. Increase this when using large numbers of interfaces and/or routes. @@ -1201,6 +1208,13 @@ conf/all/forwarding - BOOLEAN proxy_ndp - BOOLEAN Do proxy ndp. +fwmark_reflect - BOOLEAN + Controls the fwmark of kernel-generated IPv6 reply packets that are not + associated with a socket for example, TCP RSTs or ICMPv6 echo replies). + If unset, these packets have a fwmark of zero. If set, they have the + fwmark of the packet they are replying to. + Default: 0 + conf/interface/*: Change special settings per interface. diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 412f45ca2d7..1d6d02d6ba5 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt @@ -136,7 +136,7 @@ SOF_TIMESTAMPING_OPT_ID: This option is implemented only for transmit timestamps. There, the timestamp is always looped along with a struct sock_extended_err. - The option modifies field ee_info to pass an id that is unique + The option modifies field ee_data to pass an id that is unique among all possibly concurrently outstanding timestamp requests for that socket. In practice, it is a monotonically increasing u32 (that wraps). diff --git a/Documentation/power/pm_qos_interface.txt b/Documentation/power/pm_qos_interface.txt index a5da5c7e712..129f7c0e148 100644 --- a/Documentation/power/pm_qos_interface.txt +++ b/Documentation/power/pm_qos_interface.txt @@ -5,7 +5,8 @@ performance expectations by drivers, subsystems and user space applications on one of the parameters. Two different PM QoS frameworks are available: -1. PM QoS classes for cpu_dma_latency, network_latency, network_throughput. +1. PM QoS classes for cpu_dma_latency, network_latency, network_throughput, +memory_bandwidth. 2. the per-device PM QoS framework provides the API to manage the per-device latency constraints and PM QoS flags. @@ -13,6 +14,7 @@ Each parameters have defined units: * latency: usec * timeout: usec * throughput: kbs (kilo bit / sec) + * memory bandwidth: mbs (mega bit / sec) 1. PM QoS framework diff --git a/Documentation/prctl/Makefile b/Documentation/prctl/Makefile index 3e3232dcb2b..2948b7b124b 100644 --- a/Documentation/prctl/Makefile +++ b/Documentation/prctl/Makefile @@ -1,5 +1,5 @@ # List of programs to build -hostprogs-y := disable-tsc-ctxt-sw-stress-test disable-tsc-on-off-stress-test disable-tsc-test +hostprogs-$(CONFIG_X86) := disable-tsc-ctxt-sw-stress-test disable-tsc-on-off-stress-test disable-tsc-test # Tell kbuild to always build the programs always := $(hostprogs-y) diff --git a/Documentation/ptp/testptp.mk b/Documentation/ptp/testptp.mk new file mode 100644 index 00000000000..4ef2d975542 --- /dev/null +++ b/Documentation/ptp/testptp.mk @@ -0,0 +1,33 @@ +# PTP 1588 clock support - User space test program +# +# Copyright (C) 2010 OMICRON electronics GmbH +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + +CC = $(CROSS_COMPILE)gcc +INC = -I$(KBUILD_OUTPUT)/usr/include +CFLAGS = -Wall $(INC) +LDLIBS = -lrt +PROGS = testptp + +all: $(PROGS) + +testptp: testptp.o + +clean: + rm -f testptp.o + +distclean: clean + rm -f $(PROGS) diff --git a/Documentation/scsi/osd.txt b/Documentation/scsi/osd.txt index da162f7fd5f..5a9879bad07 100644 --- a/Documentation/scsi/osd.txt +++ b/Documentation/scsi/osd.txt @@ -184,8 +184,7 @@ Any problems, questions, bug reports, lonely OSD nights, please email: More up-to-date information can be found on: http://open-osd.org -Boaz Harrosh <bharrosh@panasas.com> -Benny Halevy <bhalevy@panasas.com> +Boaz Harrosh <ooo@electrozaur.com> References ========== diff --git a/Documentation/target/tcmu-design.txt b/Documentation/target/tcmu-design.txt new file mode 100644 index 00000000000..5518465290b --- /dev/null +++ b/Documentation/target/tcmu-design.txt @@ -0,0 +1,378 @@ +Contents: + +1) TCM Userspace Design + a) Background + b) Benefits + c) Design constraints + d) Implementation overview + i. Mailbox + ii. Command ring + iii. Data Area + e) Device discovery + f) Device events + g) Other contingencies +2) Writing a user pass-through handler + a) Discovering and configuring TCMU uio devices + b) Waiting for events on the device(s) + c) Managing the command ring +3) Command filtering and pass_level +4) A final note + + +TCM Userspace Design +-------------------- + +TCM is another name for LIO, an in-kernel iSCSI target (server). +Existing TCM targets run in the kernel. TCMU (TCM in Userspace) +allows userspace programs to be written which act as iSCSI targets. +This document describes the design. + +The existing kernel provides modules for different SCSI transport +protocols. TCM also modularizes the data storage. There are existing +modules for file, block device, RAM or using another SCSI device as +storage. These are called "backstores" or "storage engines". These +built-in modules are implemented entirely as kernel code. + +Background: + +In addition to modularizing the transport protocol used for carrying +SCSI commands ("fabrics"), the Linux kernel target, LIO, also modularizes +the actual data storage as well. These are referred to as "backstores" +or "storage engines". The target comes with backstores that allow a +file, a block device, RAM, or another SCSI device to be used for the +local storage needed for the exported SCSI LUN. Like the rest of LIO, +these are implemented entirely as kernel code. + +These backstores cover the most common use cases, but not all. One new +use case that other non-kernel target solutions, such as tgt, are able +to support is using Gluster's GLFS or Ceph's RBD as a backstore. The +target then serves as a translator, allowing initiators to store data +in these non-traditional networked storage systems, while still only +using standard protocols themselves. + +If the target is a userspace process, supporting these is easy. tgt, +for example, needs only a small adapter module for each, because the +modules just use the available userspace libraries for RBD and GLFS. + +Adding support for these backstores in LIO is considerably more +difficult, because LIO is entirely kernel code. Instead of undertaking +the significant work to port the GLFS or RBD APIs and protocols to the +kernel, another approach is to create a userspace pass-through +backstore for LIO, "TCMU". + + +Benefits: + +In addition to allowing relatively easy support for RBD and GLFS, TCMU +will also allow easier development of new backstores. TCMU combines +with the LIO loopback fabric to become something similar to FUSE +(Filesystem in Userspace), but at the SCSI layer instead of the +filesystem layer. A SUSE, if you will. + +The disadvantage is there are more distinct components to configure, and +potentially to malfunction. This is unavoidable, but hopefully not +fatal if we're careful to keep things as simple as possible. + +Design constraints: + +- Good performance: high throughput, low latency +- Cleanly handle if userspace: + 1) never attaches + 2) hangs + 3) dies + 4) misbehaves +- Allow future flexibility in user & kernel implementations +- Be reasonably memory-efficient +- Simple to configure & run +- Simple to write a userspace backend + + +Implementation overview: + +The core of the TCMU interface is a memory region that is shared +between kernel and userspace. Within this region is: a control area +(mailbox); a lockless producer/consumer circular buffer for commands +to be passed up, and status returned; and an in/out data buffer area. + +TCMU uses the pre-existing UIO subsystem. UIO allows device driver +development in userspace, and this is conceptually very close to the +TCMU use case, except instead of a physical device, TCMU implements a +memory-mapped layout designed for SCSI commands. Using UIO also +benefits TCMU by handling device introspection (e.g. a way for +userspace to determine how large the shared region is) and signaling +mechanisms in both directions. + +There are no embedded pointers in the memory region. Everything is +expressed as an offset from the region's starting address. This allows +the ring to still work if the user process dies and is restarted with +the region mapped at a different virtual address. + +See target_core_user.h for the struct definitions. + +The Mailbox: + +The mailbox is always at the start of the shared memory region, and +contains a version, details about the starting offset and size of the +command ring, and head and tail pointers to be used by the kernel and +userspace (respectively) to put commands on the ring, and indicate +when the commands are completed. + +version - 1 (userspace should abort if otherwise) +flags - none yet defined. +cmdr_off - The offset of the start of the command ring from the start +of the memory region, to account for the mailbox size. +cmdr_size - The size of the command ring. This does *not* need to be a +power of two. +cmd_head - Modified by the kernel to indicate when a command has been +placed on the ring. +cmd_tail - Modified by userspace to indicate when it has completed +processing of a command. + +The Command Ring: + +Commands are placed on the ring by the kernel incrementing +mailbox.cmd_head by the size of the command, modulo cmdr_size, and +then signaling userspace via uio_event_notify(). Once the command is +completed, userspace updates mailbox.cmd_tail in the same way and +signals the kernel via a 4-byte write(). When cmd_head equals +cmd_tail, the ring is empty -- no commands are currently waiting to be +processed by userspace. + +TCMU commands start with a common header containing "len_op", a 32-bit +value that stores the length, as well as the opcode in the lowest +unused bits. Currently only two opcodes are defined, TCMU_OP_PAD and +TCMU_OP_CMD. When userspace encounters a command with PAD opcode, it +should skip ahead by the bytes in "length". (The kernel inserts PAD +entries to ensure each CMD entry fits contigously into the circular +buffer.) + +When userspace handles a CMD, it finds the SCSI CDB (Command Data +Block) via tcmu_cmd_entry.req.cdb_off. This is an offset from the +start of the overall shared memory region, not the entry. The data +in/out buffers are accessible via tht req.iov[] array. Note that +each iov.iov_base is also an offset from the start of the region. + +TCMU currently does not support BIDI operations. + +When completing a command, userspace sets rsp.scsi_status, and +rsp.sense_buffer if necessary. Userspace then increments +mailbox.cmd_tail by entry.hdr.length (mod cmdr_size) and signals the +kernel via the UIO method, a 4-byte write to the file descriptor. + +The Data Area: + +This is shared-memory space after the command ring. The organization +of this area is not defined in the TCMU interface, and userspace +should access only the parts referenced by pending iovs. + + +Device Discovery: + +Other devices may be using UIO besides TCMU. Unrelated user processes +may also be handling different sets of TCMU devices. TCMU userspace +processes must find their devices by scanning sysfs +class/uio/uio*/name. For TCMU devices, these names will be of the +format: + +tcm-user/<hba_num>/<device_name>/<subtype>/<path> + +where "tcm-user" is common for all TCMU-backed UIO devices. <hba_num> +and <device_name> allow userspace to find the device's path in the +kernel target's configfs tree. Assuming the usual mount point, it is +found at: + +/sys/kernel/config/target/core/user_<hba_num>/<device_name> + +This location contains attributes such as "hw_block_size", that +userspace needs to know for correct operation. + +<subtype> will be a userspace-process-unique string to identify the +TCMU device as expecting to be backed by a certain handler, and <path> +will be an additional handler-specific string for the user process to +configure the device, if needed. The name cannot contain ':', due to +LIO limitations. + +For all devices so discovered, the user handler opens /dev/uioX and +calls mmap(): + +mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0) + +where size must be equal to the value read from +/sys/class/uio/uioX/maps/map0/size. + + +Device Events: + +If a new device is added or removed, a notification will be broadcast +over netlink, using a generic netlink family name of "TCM-USER" and a +multicast group named "config". This will include the UIO name as +described in the previous section, as well as the UIO minor +number. This should allow userspace to identify both the UIO device and +the LIO device, so that after determining the device is supported +(based on subtype) it can take the appropriate action. + + +Other contingencies: + +Userspace handler process never attaches: + +- TCMU will post commands, and then abort them after a timeout period + (30 seconds.) + +Userspace handler process is killed: + +- It is still possible to restart and re-connect to TCMU + devices. Command ring is preserved. However, after the timeout period, + the kernel will abort pending tasks. + +Userspace handler process hangs: + +- The kernel will abort pending tasks after a timeout period. + +Userspace handler process is malicious: + +- The process can trivially break the handling of devices it controls, + but should not be able to access kernel memory outside its shared + memory areas. + + +Writing a user pass-through handler (with example code) +------------------------------------------------------- + +A user process handing a TCMU device must support the following: + +a) Discovering and configuring TCMU uio devices +b) Waiting for events on the device(s) +c) Managing the command ring: Parsing operations and commands, + performing work as needed, setting response fields (scsi_status and + possibly sense_buffer), updating cmd_tail, and notifying the kernel + that work has been finished + +First, consider instead writing a plugin for tcmu-runner. tcmu-runner +implements all of this, and provides a higher-level API for plugin +authors. + +TCMU is designed so that multiple unrelated processes can manage TCMU +devices separately. All handlers should make sure to only open their +devices, based opon a known subtype string. + +a) Discovering and configuring TCMU UIO devices: + +(error checking omitted for brevity) + +int fd, dev_fd; +char buf[256]; +unsigned long long map_len; +void *map; + +fd = open("/sys/class/uio/uio0/name", O_RDONLY); +ret = read(fd, buf, sizeof(buf)); +close(fd); +buf[ret-1] = '\0'; /* null-terminate and chop off the \n */ + +/* we only want uio devices whose name is a format we expect */ +if (strncmp(buf, "tcm-user", 8)) + exit(-1); + +/* Further checking for subtype also needed here */ + +fd = open(/sys/class/uio/%s/maps/map0/size, O_RDONLY); +ret = read(fd, buf, sizeof(buf)); +close(fd); +str_buf[ret-1] = '\0'; /* null-terminate and chop off the \n */ + +map_len = strtoull(buf, NULL, 0); + +dev_fd = open("/dev/uio0", O_RDWR); +map = mmap(NULL, map_len, PROT_READ|PROT_WRITE, MAP_SHARED, dev_fd, 0); + + +b) Waiting for events on the device(s) + +while (1) { + char buf[4]; + + int ret = read(dev_fd, buf, 4); /* will block */ + + handle_device_events(dev_fd, map); +} + + +c) Managing the command ring + +#include <linux/target_core_user.h> + +int handle_device_events(int fd, void *map) +{ + struct tcmu_mailbox *mb = map; + struct tcmu_cmd_entry *ent = (void *) mb + mb->cmdr_off + mb->cmd_tail; + int did_some_work = 0; + + /* Process events from cmd ring until we catch up with cmd_head */ + while (ent != (void *)mb + mb->cmdr_off + mb->cmd_head) { + + if (tcmu_hdr_get_op(&ent->hdr) == TCMU_OP_CMD) { + uint8_t *cdb = (void *)mb + ent->req.cdb_off; + bool success = true; + + /* Handle command here. */ + printf("SCSI opcode: 0x%x\n", cdb[0]); + + /* Set response fields */ + if (success) + ent->rsp.scsi_status = SCSI_NO_SENSE; + else { + /* Also fill in rsp->sense_buffer here */ + ent->rsp.scsi_status = SCSI_CHECK_CONDITION; + } + } + else { + /* Do nothing for PAD entries */ + } + + /* update cmd_tail */ + mb->cmd_tail = (mb->cmd_tail + tcmu_hdr_get_len(&ent->hdr)) % mb->cmdr_size; + ent = (void *) mb + mb->cmdr_off + mb->cmd_tail; + did_some_work = 1; + } + + /* Notify the kernel that work has been finished */ + if (did_some_work) { + uint32_t buf = 0; + + write(fd, &buf, 4); + } + + return 0; +} + + +Command filtering and pass_level +-------------------------------- + +TCMU supports a "pass_level" option with valid values of 0 or 1. When +the value is 0 (the default), nearly all SCSI commands received for +the device are passed through to the handler. This allows maximum +flexibility but increases the amount of code required by the handler, +to support all mandatory SCSI commands. If pass_level is set to 1, +then only IO-related commands are presented, and the rest are handled +by LIO's in-kernel command emulation. The commands presented at level +1 include all versions of: + +READ +WRITE +WRITE_VERIFY +XDWRITEREAD +WRITE_SAME +COMPARE_AND_WRITE +SYNCHRONIZE_CACHE +UNMAP + + +A final note +------------ + +Please be careful to return codes as defined by the SCSI +specifications. These are different than some values defined in the +scsi/scsi.h include file. For example, CHECK CONDITION's status code +is 2, not 1. diff --git a/Documentation/vDSO/Makefile b/Documentation/vDSO/Makefile index 2b99e57207c..ee075c3d212 100644 --- a/Documentation/vDSO/Makefile +++ b/Documentation/vDSO/Makefile @@ -10,3 +10,6 @@ always := $(hostprogs-y) HOSTCFLAGS := -I$(objtree)/usr/include -std=gnu99 HOSTCFLAGS_vdso_standalone_test_x86.o := -fno-asynchronous-unwind-tables -fno-stack-protector HOSTLOADLIBES_vdso_standalone_test_x86 := -nostdlib +ifeq ($(CONFIG_X86_32),y) +HOSTLOADLIBES_vdso_standalone_test_x86 += -lgcc_s +endif diff --git a/Documentation/vDSO/vdso_standalone_test_x86.c b/Documentation/vDSO/vdso_standalone_test_x86.c index d46240265c5..93b0ebf8cc3 100644 --- a/Documentation/vDSO/vdso_standalone_test_x86.c +++ b/Documentation/vDSO/vdso_standalone_test_x86.c @@ -63,7 +63,7 @@ static inline void linux_exit(int code) x86_syscall3(__NR_exit, code, 0, 0); } -void to_base10(char *lastdig, uint64_t n) +void to_base10(char *lastdig, time_t n) { while (n) { *lastdig = (n % 10) + '0'; diff --git a/Documentation/video4linux/vivid.txt b/Documentation/video4linux/vivid.txt index eeb11a28e4f..e5a940e3d30 100644 --- a/Documentation/video4linux/vivid.txt +++ b/Documentation/video4linux/vivid.txt @@ -221,12 +221,11 @@ ccs_out_mode: specify the allowed video output crop/compose/scaling combination key, not quality. multiplanar: select whether each device instance supports multi-planar formats, - and thus the V4L2 multi-planar API. By default the first device instance - is single-planar, the second multi-planar, and it keeps alternating. + and thus the V4L2 multi-planar API. By default device instances are + single-planar. This module option can override that for each instance. Values are: - 0: use alternating single and multi-planar devices. 1: this is a single-planar instance. 2: this is a multi-planar instance. @@ -975,9 +974,8 @@ is set, then the alpha component is only used for the color red and set to 0 otherwise. The driver has to be configured to support the multiplanar formats. By default -the first driver instance is single-planar, the second is multi-planar, and it -keeps alternating. This can be changed by setting the multiplanar module option, -see section 1 for more details on that option. +the driver instances are single-planar. This can be changed by setting the +multiplanar module option, see section 1 for more details on that option. If the driver instance is using the multiplanar formats/API, then the first single planar format (YUYV) and the multiplanar NV16M and NV61M formats the @@ -1021,7 +1019,7 @@ the output overlay for the video output, turn on video looping and capture to see the blended framebuffer overlay that's being written to by the second instance. This setup would require the following commands: - $ sudo modprobe vivid n_devs=2 node_types=0x10101,0x1 multiplanar=1,1 + $ sudo modprobe vivid n_devs=2 node_types=0x10101,0x1 $ v4l2-ctl -d1 --find-fb /dev/fb1 is the framebuffer associated with base address 0x12800000 $ sudo v4l2-ctl -d2 --set-fbuf fb=1 diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt index bdd4bb97fff..b64e0af9cc5 100644 --- a/Documentation/vm/hugetlbpage.txt +++ b/Documentation/vm/hugetlbpage.txt @@ -274,7 +274,7 @@ This command mounts a (pseudo) filesystem of type hugetlbfs on the directory /mnt/huge. Any files created on /mnt/huge uses huge pages. The uid and gid options sets the owner and group of the root of the file system. By default the uid and gid of the current process are taken. The mode option sets the -mode of root of file system to value & 0777. This value is given in octal. +mode of root of file system to value & 01777. This value is given in octal. By default the value 0755 is picked. The size option sets the maximum value of memory (huge pages) allowed for that filesystem (/mnt/huge). The size is rounded down to HPAGE_SIZE. The option nr_inodes sets the maximum number of diff --git a/MAINTAINERS b/MAINTAINERS index a20df9bf8ab..0ff630de8a6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1543,6 +1543,7 @@ F: arch/arm/mach-pxa/include/mach/z2.h ARM/ZYNQ ARCHITECTURE M: Michal Simek <michal.simek@xilinx.com> +R: Sören Brinkmann <soren.brinkmann@xilinx.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://wiki.xilinx.com T: git git://git.xilinx.com/linux-xlnx.git @@ -1749,6 +1750,13 @@ M: Nicolas Ferre <nicolas.ferre@atmel.com> S: Supported F: drivers/spi/spi-atmel.* +ATMEL SSC DRIVER +M: Bo Shen <voice.shen@atmel.com> +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Supported +F: drivers/misc/atmel-ssc.c +F: include/linux/atmel-ssc.h + ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS M: Nicolas Ferre <nicolas.ferre@atmel.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -2064,8 +2072,9 @@ F: drivers/clocksource/bcm_kona_timer.c BROADCOM BCM2835 ARM ARCHITECTURE M: Stephen Warren <swarren@wwwdotorg.org> +M: Lee Jones <lee@kernel.org> L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers) -T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-rpi.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git S: Maintained N: bcm2835 @@ -2735,6 +2744,13 @@ W: http://www.chelsio.com S: Supported F: drivers/net/ethernet/chelsio/cxgb3/ +CXGB3 ISCSI DRIVER (CXGB3I) +M: Karen Xie <kxie@chelsio.com> +L: linux-scsi@vger.kernel.org +W: http://www.chelsio.com +S: Supported +F: drivers/scsi/cxgbi/cxgb3i + CXGB3 IWARP RNIC DRIVER (IW_CXGB3) M: Steve Wise <swise@chelsio.com> L: linux-rdma@vger.kernel.org @@ -2749,6 +2765,13 @@ W: http://www.chelsio.com S: Supported F: drivers/net/ethernet/chelsio/cxgb4/ +CXGB4 ISCSI DRIVER (CXGB4I) +M: Karen Xie <kxie@chelsio.com> +L: linux-scsi@vger.kernel.org +W: http://www.chelsio.com +S: Supported +F: drivers/scsi/cxgbi/cxgb4i + CXGB4 IWARP RNIC DRIVER (IW_CXGB4) M: Steve Wise <swise@chelsio.com> L: linux-rdma@vger.kernel.org @@ -4305,8 +4328,10 @@ F: Documentation/blockdev/cpqarray.txt F: drivers/block/cpqarray.* HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa) -M: "Stephen M. Cameron" <scameron@beardog.cce.hp.com> +M: Don Brace <don.brace@pmcs.com> L: iss_storagedev@hp.com +L: storagedev@pmcs.com +L: linux-scsi@vger.kernel.org S: Supported F: Documentation/scsi/hpsa.txt F: drivers/scsi/hpsa*.[ch] @@ -4314,8 +4339,10 @@ F: include/linux/cciss*.h F: include/uapi/linux/cciss*.h HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss) -M: Mike Miller <mike.miller@hp.com> +M: Don Brace <don.brace@pmcs.com> L: iss_storagedev@hp.com +L: storagedev@pmcs.com +L: linux-scsi@vger.kernel.org S: Supported F: Documentation/blockdev/cciss.txt F: drivers/block/cciss* @@ -4601,7 +4628,7 @@ S: Supported F: drivers/crypto/nx/ IBM Power 842 compression accelerator -M: Nathan Fontenot <nfont@linux.vnet.ibm.com> +M: Dan Streetman <ddstreet@us.ibm.com> S: Supported F: drivers/crypto/nx/nx-842.c F: include/linux/nx842.h @@ -4703,6 +4730,7 @@ L: linux-iio@vger.kernel.org S: Maintained F: drivers/iio/ F: drivers/staging/iio/ +F: include/linux/iio/ IKANOS/ADI EAGLE ADSL USB DRIVER M: Matthieu Castet <castet.matthieu@free.fr> @@ -5834,6 +5862,14 @@ S: Maintained F: drivers/net/macvlan.c F: include/linux/if_macvlan.h +MAILBOX API +M: Jassi Brar <jassisinghbrar@gmail.com> +L: linux-kernel@vger.kernel.org +S: Maintained +F: drivers/mailbox/ +F: include/linux/mailbox_client.h +F: include/linux/mailbox_controller.h + MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7 M: Michael Kerrisk <mtk.manpages@gmail.com> W: http://www.kernel.org/doc/man-pages @@ -6575,6 +6611,23 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap.git S: Maintained F: arch/arm/*omap*/ F: drivers/i2c/busses/i2c-omap.c +F: drivers/irqchip/irq-omap-intc.c +F: drivers/mfd/*omap*.c +F: drivers/mfd/menelaus.c +F: drivers/mfd/palmas.c +F: drivers/mfd/tps65217.c +F: drivers/mfd/tps65218.c +F: drivers/mfd/tps65910.c +F: drivers/mfd/twl-core.[ch] +F: drivers/mfd/twl4030*.c +F: drivers/mfd/twl6030*.c +F: drivers/mfd/twl6040*.c +F: drivers/regulator/palmas-regulator*.c +F: drivers/regulator/pbias-regulator.c +F: drivers/regulator/tps65217-regulator.c +F: drivers/regulator/tps65218-regulator.c +F: drivers/regulator/tps65910-regulator.c +F: drivers/regulator/twl-regulator.c F: include/linux/i2c-omap.h OMAP DEVICE TREE SUPPORT @@ -6585,6 +6638,9 @@ L: devicetree@vger.kernel.org S: Maintained F: arch/arm/boot/dts/*omap* F: arch/arm/boot/dts/*am3* +F: arch/arm/boot/dts/*am4* +F: arch/arm/boot/dts/*am5* +F: arch/arm/boot/dts/*dra7* OMAP CLOCK FRAMEWORK SUPPORT M: Paul Walmsley <paul@pwsan.com> @@ -6822,7 +6878,7 @@ S: Orphan F: drivers/net/wireless/orinoco/ OSD LIBRARY and FILESYSTEM -M: Boaz Harrosh <bharrosh@panasas.com> +M: Boaz Harrosh <ooo@electrozaur.com> M: Benny Halevy <bhalevy@primarydata.com> L: osd-dev@open-osd.org W: http://open-osd.org @@ -6832,6 +6888,14 @@ F: drivers/scsi/osd/ F: include/scsi/osd_* F: fs/exofs/ +OVERLAY FILESYSTEM +M: Miklos Szeredi <miklos@szeredi.hu> +L: linux-unionfs@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs.git +S: Supported +F: fs/overlayfs/ +F: Documentation/filesystems/overlayfs.txt + P54 WIRELESS DRIVER M: Christian Lamparter <chunkeey@googlemail.com> L: linux-wireless@vger.kernel.org @@ -7153,6 +7217,7 @@ F: drivers/crypto/picoxcell* PIN CONTROL SUBSYSTEM M: Linus Walleij <linus.walleij@linaro.org> +L: linux-gpio@vger.kernel.org S: Maintained F: drivers/pinctrl/ F: include/linux/pinctrl/ @@ -8457,7 +8522,6 @@ F: arch/arm/mach-s3c24xx/bast-irq.c TI DAVINCI MACHINE SUPPORT M: Sekhar Nori <nsekhar@ti.com> M: Kevin Hilman <khilman@deeprootsystems.com> -L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers) T: git git://gitorious.org/linux-davinci/linux-davinci.git Q: http://patchwork.kernel.org/project/linux-davinci/list/ S: Supported @@ -8467,7 +8531,6 @@ F: drivers/i2c/busses/i2c-davinci.c TI DAVINCI SERIES MEDIA DRIVER M: Lad, Prabhakar <prabhakar.csengg@gmail.com> L: linux-media@vger.kernel.org -L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers) W: http://linuxtv.org/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git @@ -9584,7 +9647,6 @@ F: drivers/staging/unisys/ UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER M: Vinayak Holikatti <vinholikatti@gmail.com> -M: Santosh Y <santoshsy@gmail.com> L: linux-scsi@vger.kernel.org S: Supported F: Documentation/scsi/ufs.txt @@ -9678,11 +9740,6 @@ S: Maintained F: Documentation/hid/hiddev.txt F: drivers/hid/usbhid/ -USB/IP DRIVERS -L: linux-usb@vger.kernel.org -S: Orphan -F: drivers/staging/usbip/ - USB ISP116X DRIVER M: Olav Kongas <ok@artecdesign.ee> L: linux-usb@vger.kernel.org @@ -1,8 +1,8 @@ VERSION = 3 PATCHLEVEL = 18 SUBLEVEL = 0 -EXTRAVERSION = -rc1 -NAME = Shuffling Zombie Juror +EXTRAVERSION = -rc7 +NAME = Diseased Newt # *DOCUMENTATION* # To see a list of typical targets execute "make help" @@ -297,7 +297,7 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ HOSTCC = gcc HOSTCXX = g++ -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer +HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89 HOSTCXXFLAGS = -O2 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1) @@ -401,7 +401,8 @@ KBUILD_CPPFLAGS := -D__KERNEL__ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common \ -Werror-implicit-function-declaration \ - -Wno-format-security + -Wno-format-security \ + -std=gnu89 KBUILD_AFLAGS_KERNEL := KBUILD_CFLAGS_KERNEL := diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 9596b0ab108..fe44b249460 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -9,6 +9,7 @@ config ARC def_bool y select BUILDTIME_EXTABLE_SORT + select COMMON_CLK select CLONE_BACKWARDS # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev select DEVTMPFS if !INITRAMFS_SOURCE="" @@ -73,9 +74,6 @@ config STACKTRACE_SUPPORT config HAVE_LATENCYTOP_SUPPORT def_bool y -config NO_DMA - def_bool n - source "init/Kconfig" source "kernel/Kconfig.freezer" @@ -354,7 +352,7 @@ config ARC_CURR_IN_REG kernel mode. This saves memory access for each such access -config ARC_MISALIGN_ACCESS +config ARC_EMUL_UNALIGNED bool "Emulate unaligned memory access (userspace only)" select SYSCTL_ARCH_UNALIGN_NO_WARN select SYSCTL_ARCH_UNALIGN_ALLOW diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 8c0b1aa56f7..10bc3d4e8a4 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -25,7 +25,6 @@ ifdef CONFIG_ARC_CURR_IN_REG LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h endif -upto_gcc42 := $(call cc-ifversion, -le, 0402, y) upto_gcc44 := $(call cc-ifversion, -le, 0404, y) atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y) atleast_gcc48 := $(call cc-ifversion, -ge, 0408, y) @@ -60,25 +59,11 @@ ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB # --build-id w/o "-marclinux". Default arc-elf32-ld is OK ldflags-$(upto_gcc44) += -marclinux -ARC_LIBGCC := -mA7 -cflags-$(CONFIG_ARC_HAS_HW_MPY) += -multcost=16 - ifndef CONFIG_ARC_HAS_HW_MPY cflags-y += -mno-mpy - -# newlib for ARC700 assumes MPY to be always present, which is generally true -# However, if someone really doesn't want MPY, we need to use the 600 ver -# which coupled with -mno-mpy will use mpy emulation -# With gcc 4.4.7, -mno-mpy is enough to make any other related adjustments, -# e.g. increased cost of MPY. With gcc 4.2.1 this had to be explicitly hinted - - ifeq ($(upto_gcc42),y) - ARC_LIBGCC := -marc600 - cflags-y += -multcost=30 - endif endif -LIBGCC := $(shell $(CC) $(ARC_LIBGCC) $(cflags-y) --print-libgcc-file-name) +LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) # Modules with short calls might break for calls into builtin-kernel KBUILD_CFLAGS_MODULE += -mlong-calls diff --git a/arch/arc/boot/dts/angel4.dts b/arch/arc/boot/dts/angel4.dts index 6b57475967a..757e0c62c4f 100644 --- a/arch/arc/boot/dts/angel4.dts +++ b/arch/arc/boot/dts/angel4.dts @@ -24,11 +24,6 @@ serial0 = &arcuart0; }; - memory { - device_type = "memory"; - reg = <0x00000000 0x10000000>; /* 256M */ - }; - fpga { compatible = "simple-bus"; #address-cells = <1>; diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts index 4f31b2eb5cd..cfaedd9c61c 100644 --- a/arch/arc/boot/dts/nsimosci.dts +++ b/arch/arc/boot/dts/nsimosci.dts @@ -20,18 +20,13 @@ /* this is for console on PGU */ /* bootargs = "console=tty0 consoleblank=0"; */ /* this is for console on serial */ - bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=ttyS0,115200n8 consoleblank=0 debug"; + bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug"; }; aliases { serial0 = &uart0; }; - memory { - device_type = "memory"; - reg = <0x80000000 0x10000000>; /* 256M */ - }; - fpga { compatible = "simple-bus"; #address-cells = <1>; diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/fpga_defconfig index e283aa58693..ef4d3bc7b6c 100644 --- a/arch/arc/configs/fpga_defconfig +++ b/arch/arc/configs/fpga_defconfig @@ -23,7 +23,6 @@ CONFIG_MODULES=y # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set CONFIG_ARC_PLAT_FPGA_LEGACY=y -CONFIG_ARC_BOARD_ML509=y # CONFIG_ARC_HAS_RTSC is not set CONFIG_ARC_BUILTIN_DTB_NAME="angel4" CONFIG_PREEMPT=y diff --git a/arch/arc/configs/fpga_noramfs_defconfig b/arch/arc/configs/fpga_noramfs_defconfig index 5276a52f6a2..49c93011ab9 100644 --- a/arch/arc/configs/fpga_noramfs_defconfig +++ b/arch/arc/configs/fpga_noramfs_defconfig @@ -20,7 +20,6 @@ CONFIG_MODULES=y # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set CONFIG_ARC_PLAT_FPGA_LEGACY=y -CONFIG_ARC_BOARD_ML509=y # CONFIG_ARC_HAS_RTSC is not set CONFIG_ARC_BUILTIN_DTB_NAME="angel4" CONFIG_PREEMPT=y diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index c01ba35a4ef..278dacf2a3f 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig @@ -21,7 +21,6 @@ CONFIG_MODULES=y # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set CONFIG_ARC_PLAT_FPGA_LEGACY=y -CONFIG_ARC_BOARD_ML509=y # CONFIG_ARC_IDE is not set # CONFIG_ARCTANGENT_EMAC is not set # CONFIG_ARC_HAS_RTSC is not set diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 372466b371b..be33db8a2ee 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -9,19 +9,16 @@ #ifndef _ASM_ARC_ARCREGS_H #define _ASM_ARC_ARCREGS_H -#ifdef __KERNEL__ - /* Build Configuration Registers */ #define ARC_REG_DCCMBASE_BCR 0x61 /* DCCM Base Addr */ #define ARC_REG_CRC_BCR 0x62 -#define ARC_REG_DVFB_BCR 0x64 -#define ARC_REG_EXTARITH_BCR 0x65 #define ARC_REG_VECBASE_BCR 0x68 #define ARC_REG_PERIBASE_BCR 0x69 -#define ARC_REG_FP_BCR 0x6B /* Single-Precision FPU */ -#define ARC_REG_DPFP_BCR 0x6C /* Dbl Precision FPU */ +#define ARC_REG_FP_BCR 0x6B /* ARCompact: Single-Precision FPU */ +#define ARC_REG_DPFP_BCR 0x6C /* ARCompact: Dbl Precision FPU */ #define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */ #define ARC_REG_TIMERS_BCR 0x75 +#define ARC_REG_AP_BCR 0x76 #define ARC_REG_ICCM_BCR 0x78 #define ARC_REG_XY_MEM_BCR 0x79 #define ARC_REG_MAC_BCR 0x7a @@ -31,6 +28,9 @@ #define ARC_REG_MIXMAX_BCR 0x7e #define ARC_REG_BARREL_BCR 0x7f #define ARC_REG_D_UNCACH_BCR 0x6A +#define ARC_REG_BPU_BCR 0xc0 +#define ARC_REG_ISA_CFG_BCR 0xc1 +#define ARC_REG_SMART_BCR 0xFF /* status32 Bits Positions */ #define STATUS_AE_BIT 5 /* Exception active */ @@ -191,14 +191,6 @@ #define PAGES_TO_KB(n_pages) ((n_pages) << (PAGE_SHIFT - 10)) #define PAGES_TO_MB(n_pages) (PAGES_TO_KB(n_pages) >> 10) -#ifdef CONFIG_ARC_FPU_SAVE_RESTORE -/* These DPFP regs need to be saved/restored across ctx-sw */ -struct arc_fpu { - struct { - unsigned int l, h; - } aux_dpfp[2]; -}; -#endif /* *************************************************************** @@ -212,27 +204,19 @@ struct bcr_identity { #endif }; -#define EXTN_SWAP_VALID 0x1 -#define EXTN_NORM_VALID 0x2 -#define EXTN_MINMAX_VALID 0x2 -#define EXTN_BARREL_VALID 0x2 - -struct bcr_extn { +struct bcr_isa { #ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int pad:20, crc:1, ext_arith:2, mul:2, barrel:2, minmax:2, - norm:2, swap:1; + unsigned int pad1:23, atomic1:1, ver:8; #else - unsigned int swap:1, norm:2, minmax:2, barrel:2, mul:2, ext_arith:2, - crc:1, pad:20; + unsigned int ver:8, atomic1:1, pad1:23; #endif }; -/* DSP Options Ref Manual */ -struct bcr_extn_mac_mul { +struct bcr_mpy { #ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int pad:16, type:8, ver:8; + unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8; #else - unsigned int ver:8, type:8, pad:16; + unsigned int ver:8, type:2, cycles:2, dsp:4, x1616:8, pad:8; #endif }; @@ -251,6 +235,7 @@ struct bcr_perip { unsigned int pad:8, sz:8, pad2:8, start:8; #endif }; + struct bcr_iccm { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int base:16, pad:5, sz:3, ver:8; @@ -277,8 +262,8 @@ struct bcr_dccm { #endif }; -/* Both SP and DP FPU BCRs have same format */ -struct bcr_fp { +/* ARCompact: Both SP and DP FPU BCRs have same format */ +struct bcr_fp_arcompact { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int fast:1, ver:8; #else @@ -286,6 +271,30 @@ struct bcr_fp { #endif }; +struct bcr_timer { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad2:15, rtsc:1, pad1:6, t1:1, t0:1, ver:8; +#else + unsigned int ver:8, t0:1, t1:1, pad1:6, rtsc:1, pad2:15; +#endif +}; + +struct bcr_bpu_arcompact { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad2:19, fam:1, pad:2, ent:2, ver:8; +#else + unsigned int ver:8, ent:2, pad:2, fam:1, pad2:19; +#endif +}; + +struct bcr_generic { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:24, ver:8; +#else + unsigned int ver:8, pad:24; +#endif +}; + /* ******************************************************************* * Generic structures to hold build configuration used at runtime @@ -299,6 +308,10 @@ struct cpuinfo_arc_cache { unsigned int sz_k:8, line_len:8, assoc:4, ver:4, alias:1, vipt:1, pad:6; }; +struct cpuinfo_arc_bpu { + unsigned int ver, full, num_cache, num_pred; +}; + struct cpuinfo_arc_ccm { unsigned int base_addr, sz; }; @@ -306,21 +319,25 @@ struct cpuinfo_arc_ccm { struct cpuinfo_arc { struct cpuinfo_arc_cache icache, dcache; struct cpuinfo_arc_mmu mmu; + struct cpuinfo_arc_bpu bpu; struct bcr_identity core; - unsigned int timers; + struct bcr_isa isa; + struct bcr_timer timers; unsigned int vec_base; unsigned int uncached_base; struct cpuinfo_arc_ccm iccm, dccm; - struct bcr_extn extn; + struct { + unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3, + fpu_sp:1, fpu_dp:1, pad2:6, + debug:1, ap:1, smart:1, rtt:1, pad3:4, + pad4:8; + } extn; + struct bcr_mpy extn_mpy; struct bcr_extn_xymem extn_xymem; - struct bcr_extn_mac_mul extn_mac_mul; - struct bcr_fp fp, dpfp; }; extern struct cpuinfo_arc cpuinfo_arc700[]; #endif /* __ASEMBLY__ */ -#endif /* __KERNEL__ */ - #endif /* _ASM_ARC_ARCREGS_H */ diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 173f303a868..067551b6920 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -9,8 +9,6 @@ #ifndef _ASM_ARC_ATOMIC_H #define _ASM_ARC_ATOMIC_H -#ifdef __KERNEL__ - #ifndef __ASSEMBLY__ #include <linux/types.h> @@ -170,5 +168,3 @@ ATOMIC_OP(and, &=, and) #endif #endif - -#endif diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h index ebc0cf3164d..1a5bf07eefe 100644 --- a/arch/arc/include/asm/bitops.h +++ b/arch/arc/include/asm/bitops.h @@ -13,8 +13,6 @@ #error only <linux/bitops.h> can be included directly #endif -#ifdef __KERNEL__ - #ifndef __ASSEMBLY__ #include <linux/types.h> @@ -508,6 +506,4 @@ static inline __attribute__ ((const)) int __ffs(unsigned long word) #endif /* !__ASSEMBLY__ */ -#endif /* __KERNEL__ */ - #endif diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h index 5b18e94c667..ea022d47896 100644 --- a/arch/arc/include/asm/bug.h +++ b/arch/arc/include/asm/bug.h @@ -21,10 +21,9 @@ void show_kernel_fault_diag(const char *str, struct pt_regs *regs, unsigned long address); void die(const char *str, struct pt_regs *regs, unsigned long address); -#define BUG() do { \ - dump_stack(); \ - pr_warn("Kernel BUG in %s: %s: %d!\n", \ - __FILE__, __func__, __LINE__); \ +#define BUG() do { \ + pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ + dump_stack(); \ } while (0) #define HAVE_ARCH_BUG diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index b3c750979aa..7861255da32 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -20,7 +20,7 @@ #define CACHE_LINE_MASK (~(L1_CACHE_BYTES - 1)) /* - * ARC700 doesn't cache any access in top 256M. + * ARC700 doesn't cache any access in top 1G (0xc000_0000 to 0xFFFF_FFFF) * Ideal for wiring memory mapped peripherals as we don't need to do * explicit uncached accesses (LD.di/ST.di) hence more portable drivers */ diff --git a/arch/arc/include/asm/current.h b/arch/arc/include/asm/current.h index 87b918585c4..c2453ee6280 100644 --- a/arch/arc/include/asm/current.h +++ b/arch/arc/include/asm/current.h @@ -12,8 +12,6 @@ #ifndef _ASM_ARC_CURRENT_H #define _ASM_ARC_CURRENT_H -#ifdef __KERNEL__ - #ifndef __ASSEMBLY__ #ifdef CONFIG_ARC_CURR_IN_REG @@ -27,6 +25,4 @@ register struct task_struct *curr_arc asm("r25"); #endif /* ! __ASSEMBLY__ */ -#endif /* __KERNEL__ */ - #endif /* _ASM_ARC_CURRENT_H */ diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h index 587df8236e8..742816f1b21 100644 --- a/arch/arc/include/asm/irqflags.h +++ b/arch/arc/include/asm/irqflags.h @@ -15,8 +15,6 @@ * -Conditionally disable interrupts (if they are not enabled, don't disable) */ -#ifdef __KERNEL__ - #include <asm/arcregs.h> /* status32 Reg bits related to Interrupt Handling */ @@ -169,6 +167,4 @@ static inline int arch_irqs_disabled(void) #endif /* __ASSEMBLY__ */ -#endif /* KERNEL */ - #endif diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h index b65fca7ffeb..fea93163413 100644 --- a/arch/arc/include/asm/kgdb.h +++ b/arch/arc/include/asm/kgdb.h @@ -19,7 +19,7 @@ * register API yet */ #undef DBG_MAX_REG_NUM -#define GDB_MAX_REGS 39 +#define GDB_MAX_REGS 87 #define BREAK_INSTR_SIZE 2 #define CACHE_FLUSH_IS_SAFE 1 @@ -33,23 +33,27 @@ static inline void arch_kgdb_breakpoint(void) extern void kgdb_trap(struct pt_regs *regs); -enum arc700_linux_regnums { +/* This is the numbering of registers according to the GDB. See GDB's + * arc-tdep.h for details. + * + * Registers are ordered for GDB 7.5. It is incompatible with GDB 6.8. */ +enum arc_linux_regnums { _R0 = 0, _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13, _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24, _R25, _R26, - _BTA = 27, - _LP_START = 28, - _LP_END = 29, - _LP_COUNT = 30, - _STATUS32 = 31, - _BLINK = 32, - _FP = 33, - __SP = 34, - _EFA = 35, - _RET = 36, - _ORIG_R8 = 37, - _STOP_PC = 38 + _FP = 27, + __SP = 28, + _R30 = 30, + _BLINK = 31, + _LP_COUNT = 60, + _STOP_PC = 64, + _RET = 64, + _LP_START = 65, + _LP_END = 66, + _STATUS32 = 67, + _ECR = 76, + _BTA = 82, }; #else diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index 82588f3ba77..210fe97464c 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h @@ -14,12 +14,19 @@ #ifndef __ASM_ARC_PROCESSOR_H #define __ASM_ARC_PROCESSOR_H -#ifdef __KERNEL__ - #ifndef __ASSEMBLY__ #include <asm/ptrace.h> +#ifdef CONFIG_ARC_FPU_SAVE_RESTORE +/* These DPFP regs need to be saved/restored across ctx-sw */ +struct arc_fpu { + struct { + unsigned int l, h; + } aux_dpfp[2]; +}; +#endif + /* Arch specific stuff which needs to be saved per task. * However these items are not so important so as to earn a place in * struct thread_info @@ -128,6 +135,4 @@ extern unsigned int get_wchan(struct task_struct *p); */ #define TASK_UNMAPPED_BASE (TASK_SIZE / 3) -#endif /* __KERNEL__ */ - #endif /* __ASM_ARC_PROCESSOR_H */ diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h index e10f8cef56a..6e3ef5ba4f7 100644 --- a/arch/arc/include/asm/setup.h +++ b/arch/arc/include/asm/setup.h @@ -29,7 +29,6 @@ struct cpuinfo_data { }; extern int root_mountflags, end_mem; -extern int running_on_hw; void setup_processor(void); void __init setup_arch_memory(void); diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h index 5d06eee43ea..3845b9e94f6 100644 --- a/arch/arc/include/asm/smp.h +++ b/arch/arc/include/asm/smp.h @@ -59,7 +59,15 @@ struct plat_smp_ops { /* TBD: stop exporting it for direct population by platform */ extern struct plat_smp_ops plat_smp_ops; -#endif /* CONFIG_SMP */ +#else /* CONFIG_SMP */ + +static inline void smp_init_cpus(void) {} +static inline const char *arc_platform_smp_cpuinfo(void) +{ + return ""; +} + +#endif /* !CONFIG_SMP */ /* * ARC700 doesn't support atomic Read-Modify-Write ops. diff --git a/arch/arc/include/asm/string.h b/arch/arc/include/asm/string.h index 87676c8f141..95822b550a1 100644 --- a/arch/arc/include/asm/string.h +++ b/arch/arc/include/asm/string.h @@ -17,8 +17,6 @@ #include <linux/types.h> -#ifdef __KERNEL__ - #define __HAVE_ARCH_MEMSET #define __HAVE_ARCH_MEMCPY #define __HAVE_ARCH_MEMCMP @@ -36,5 +34,4 @@ extern char *strcpy(char *dest, const char *src); extern int strcmp(const char *cs, const char *ct); extern __kernel_size_t strlen(const char *); -#endif /* __KERNEL__ */ #endif /* _ASM_ARC_STRING_H */ diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h index dd785befe7f..e56f9fcc558 100644 --- a/arch/arc/include/asm/syscalls.h +++ b/arch/arc/include/asm/syscalls.h @@ -9,8 +9,6 @@ #ifndef _ASM_ARC_SYSCALLS_H #define _ASM_ARC_SYSCALLS_H 1 -#ifdef __KERNEL__ - #include <linux/compiler.h> #include <linux/linkage.h> #include <linux/types.h> @@ -22,6 +20,4 @@ int sys_arc_gettls(void); #include <asm-generic/syscalls.h> -#endif /* __KERNEL__ */ - #endif diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h index 45be2167201..02bc5ec0fb2 100644 --- a/arch/arc/include/asm/thread_info.h +++ b/arch/arc/include/asm/thread_info.h @@ -16,8 +16,6 @@ #ifndef _ASM_THREAD_INFO_H #define _ASM_THREAD_INFO_H -#ifdef __KERNEL__ - #include <asm/page.h> #ifdef CONFIG_16KSTACKS @@ -114,6 +112,4 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void) * syscall, so all that reamins to be tested is _TIF_WORK_MASK */ -#endif /* __KERNEL__ */ - #endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h index 3e5f071bc00..6da6b4edaed 100644 --- a/arch/arc/include/asm/unaligned.h +++ b/arch/arc/include/asm/unaligned.h @@ -14,7 +14,7 @@ #include <asm-generic/unaligned.h> #include <asm/ptrace.h> -#ifdef CONFIG_ARC_MISALIGN_ACCESS +#ifdef CONFIG_ARC_EMUL_UNALIGNED int misaligned_fixup(unsigned long address, struct pt_regs *regs, struct callee_regs *cregs); #else diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile index 8004b4fa646..113f2033da9 100644 --- a/arch/arc/kernel/Makefile +++ b/arch/arc/kernel/Makefile @@ -16,7 +16,7 @@ obj-$(CONFIG_MODULES) += arcksyms.o module.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_ARC_DW2_UNWIND) += unwind.o obj-$(CONFIG_KPROBES) += kprobes.o -obj-$(CONFIG_ARC_MISALIGN_ACCESS) += unaligned.o +obj-$(CONFIG_ARC_EMUL_UNALIGNED) += unaligned.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_ARC_METAWARE_HLINK) += arc_hostlink.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c index b8a549c4f54..3b7cd4864ba 100644 --- a/arch/arc/kernel/disasm.c +++ b/arch/arc/kernel/disasm.c @@ -15,7 +15,7 @@ #include <linux/uaccess.h> #include <asm/disasm.h> -#if defined(CONFIG_KGDB) || defined(CONFIG_ARC_MISALIGN_ACCESS) || \ +#if defined(CONFIG_KGDB) || defined(CONFIG_ARC_EMUL_UNALIGNED) || \ defined(CONFIG_KPROBES) /* disasm_instr: Analyses instruction at addr, stores @@ -535,4 +535,4 @@ int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs, return instr.is_branch; } -#endif /* CONFIG_KGDB || CONFIG_ARC_MISALIGN_ACCESS || CONFIG_KPROBES */ +#endif /* CONFIG_KGDB || CONFIG_ARC_EMUL_UNALIGNED || CONFIG_KPROBES */ diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 4d2481bd8b9..b0e8666fdcc 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -91,16 +91,6 @@ stext: st r0, [@uboot_tag] st r2, [@uboot_arg] - ; Identify if running on ISS vs Silicon - ; IDENTITY Reg [ 3 2 1 0 ] - ; (chip-id) ^^^^^ ==> 0xffff for ISS - lr r0, [identity] - lsr r3, r0, 16 - cmp r3, 0xffff - mov.z r4, 0 - mov.nz r4, 1 - st r4, [@running_on_hw] - ; setup "current" tsk and optionally cache it in dedicated r25 mov r9, @init_task SET_CURR_TASK_ON_CPU r9, r0 ; r9 = tsk, r0 = scratch diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c index a2ff5c5d145..ecf6a786937 100644 --- a/arch/arc/kernel/kgdb.c +++ b/arch/arc/kernel/kgdb.c @@ -158,11 +158,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, return -1; } -unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs) -{ - return instruction_pointer(regs); -} - int kgdb_arch_init(void) { single_step_data.armed = 0; diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index b9a5685a990..ae1c485cbc6 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -244,25 +244,23 @@ static int arc_pmu_device_probe(struct platform_device *pdev) pr_err("This core does not have performance counters!\n"); return -ENODEV; } + BUG_ON(pct_bcr.c > ARC_PMU_MAX_HWEVENTS); - arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), - GFP_KERNEL); + READ_BCR(ARC_REG_CC_BUILD, cc_bcr); + if (!cc_bcr.v) { + pr_err("Performance counters exist, but no countable conditions?\n"); + return -ENODEV; + } + + arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL); if (!arc_pmu) return -ENOMEM; arc_pmu->n_counters = pct_bcr.c; - BUG_ON(arc_pmu->n_counters > ARC_PMU_MAX_HWEVENTS); - arc_pmu->counter_size = 32 + (pct_bcr.s << 4); - pr_info("ARC PMU found with %d counters of size %d bits\n", - arc_pmu->n_counters, arc_pmu->counter_size); - - READ_BCR(ARC_REG_CC_BUILD, cc_bcr); - - if (!cc_bcr.v) - pr_err("Strange! Performance counters exist, but no countable conditions?\n"); - pr_info("ARC PMU has %d countable conditions\n", cc_bcr.c); + pr_info("ARC perf\t: %d counters (%d bits), %d countable conditions\n", + arc_pmu->n_counters, arc_pmu->counter_size, cc_bcr.c); cc_name.str[8] = 0; for (i = 0; i < PERF_COUNT_HW_MAX; i++) diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 119dddb752b..252bf603db9 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -13,7 +13,9 @@ #include <linux/console.h> #include <linux/module.h> #include <linux/cpu.h> +#include <linux/clk-provider.h> #include <linux/of_fdt.h> +#include <linux/of_platform.h> #include <linux/cache.h> #include <asm/sections.h> #include <asm/arcregs.h> @@ -24,11 +26,10 @@ #include <asm/unwind.h> #include <asm/clk.h> #include <asm/mach_desc.h> +#include <asm/smp.h> #define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x)) -int running_on_hw = 1; /* vs. on ISS */ - /* Part of U-boot ABI: see head.S */ int __initdata uboot_tag; char __initdata *uboot_arg; @@ -42,26 +43,26 @@ struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; static void read_arc_build_cfg_regs(void) { struct bcr_perip uncached_space; + struct bcr_generic bcr; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; FIX_PTR(cpu); READ_BCR(AUX_IDENTITY, cpu->core); + READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa); - cpu->timers = read_aux_reg(ARC_REG_TIMERS_BCR); + READ_BCR(ARC_REG_TIMERS_BCR, cpu->timers); cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); cpu->uncached_base = uncached_space.start << 24; - cpu->extn.mul = read_aux_reg(ARC_REG_MUL_BCR); - cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR); - cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR); - cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR); - cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR); - READ_BCR(ARC_REG_MAC_BCR, cpu->extn_mac_mul); + READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy); - cpu->extn.ext_arith = read_aux_reg(ARC_REG_EXTARITH_BCR); - cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR); + cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR) > 1 ? 1 : 0; /* 2,3 */ + cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR) > 1 ? 1 : 0; /* 2,3 */ + cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */ + cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0; + cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */ /* Note that we read the CCM BCRs independent of kernel config * This is to catch the cases where user doesn't know that @@ -95,43 +96,76 @@ static void read_arc_build_cfg_regs(void) read_decode_mmu_bcr(); read_decode_cache_bcr(); - READ_BCR(ARC_REG_FP_BCR, cpu->fp); - READ_BCR(ARC_REG_DPFP_BCR, cpu->dpfp); + { + struct bcr_fp_arcompact sp, dp; + struct bcr_bpu_arcompact bpu; + + READ_BCR(ARC_REG_FP_BCR, sp); + READ_BCR(ARC_REG_DPFP_BCR, dp); + cpu->extn.fpu_sp = sp.ver ? 1 : 0; + cpu->extn.fpu_dp = dp.ver ? 1 : 0; + + READ_BCR(ARC_REG_BPU_BCR, bpu); + cpu->bpu.ver = bpu.ver; + cpu->bpu.full = bpu.fam ? 1 : 0; + if (bpu.ent) { + cpu->bpu.num_cache = 256 << (bpu.ent - 1); + cpu->bpu.num_pred = 256 << (bpu.ent - 1); + } + } + + READ_BCR(ARC_REG_AP_BCR, bcr); + cpu->extn.ap = bcr.ver ? 1 : 0; + + READ_BCR(ARC_REG_SMART_BCR, bcr); + cpu->extn.smart = bcr.ver ? 1 : 0; + + cpu->extn.debug = cpu->extn.ap | cpu->extn.smart; } static const struct cpuinfo_data arc_cpu_tbl[] = { - { {0x10, "ARCTangent A5"}, 0x1F}, { {0x20, "ARC 600" }, 0x2F}, { {0x30, "ARC 700" }, 0x33}, { {0x34, "ARC 700 R4.10"}, 0x34}, + { {0x35, "ARC 700 R4.11"}, 0x35}, { {0x00, NULL } } }; +#define IS_AVAIL1(v, str) ((v) ? str : "") +#define IS_USED(cfg) (IS_ENABLED(cfg) ? "" : "(not used) ") +#define IS_AVAIL2(v, str, cfg) IS_AVAIL1(v, str), IS_AVAIL1(v, IS_USED(cfg)) + static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) { - int n = 0; struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; struct bcr_identity *core = &cpu->core; const struct cpuinfo_data *tbl; - int be = 0; -#ifdef CONFIG_CPU_BIG_ENDIAN - be = 1; -#endif + char *isa_nm; + int i, be, atomic; + int n = 0; + FIX_PTR(cpu); + { + isa_nm = "ARCompact"; + be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); + + atomic = cpu->isa.atomic1; + if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */ + atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC); + } + n += scnprintf(buf + n, len - n, - "\nARC IDENTITY\t: Family [%#02x]" - " Cpu-id [%#02x] Chip-id [%#4x]\n", - core->family, core->cpu_id, - core->chip_id); + "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n", + core->family, core->cpu_id, core->chip_id); for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) { if ((core->family >= tbl->info.id) && (core->family <= tbl->up_range)) { n += scnprintf(buf + n, len - n, - "processor\t: %s %s\n", - tbl->info.str, - be ? "[Big Endian]" : ""); + "processor [%d]\t: %s (%s ISA) %s\n", + cpu_id, tbl->info.str, isa_nm, + IS_AVAIL1(be, "[Big-Endian]")); break; } } @@ -143,34 +177,35 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) (unsigned int)(arc_get_core_freq() / 1000000), (unsigned int)(arc_get_core_freq() / 10000) % 100); - n += scnprintf(buf + n, len - n, "Timers\t\t: %s %s\n", - (cpu->timers & 0x200) ? "TIMER1" : "", - (cpu->timers & 0x100) ? "TIMER0" : ""); + n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ", + IS_AVAIL1(cpu->timers.t0, "Timer0 "), + IS_AVAIL1(cpu->timers.t1, "Timer1 "), + IS_AVAIL2(cpu->timers.rtsc, "64-bit RTSC ", CONFIG_ARC_HAS_RTSC)); - n += scnprintf(buf + n, len - n, "Vect Tbl Base\t: %#x\n", - cpu->vec_base); + n += i = scnprintf(buf + n, len - n, "%s%s", + IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC)); - n += scnprintf(buf + n, len - n, "UNCACHED Base\t: %#x\n", - cpu->uncached_base); + if (i) + n += scnprintf(buf + n, len - n, "\n\t\t: "); - return buf; -} + n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n", + IS_AVAIL1(cpu->extn_mpy.ver, "mpy "), + IS_AVAIL1(cpu->extn.norm, "norm "), + IS_AVAIL1(cpu->extn.barrel, "barrel-shift "), + IS_AVAIL1(cpu->extn.swap, "swap "), + IS_AVAIL1(cpu->extn.minmax, "minmax "), + IS_AVAIL1(cpu->extn.crc, "crc "), + IS_AVAIL2(1, "swape", CONFIG_ARC_HAS_SWAPE)); -static const struct id_to_str mul_type_nm[] = { - { 0x0, "N/A"}, - { 0x1, "32x32 (spl Result Reg)" }, - { 0x2, "32x32 (ANY Result Reg)" } -}; + if (cpu->bpu.ver) + n += scnprintf(buf + n, len - n, + "BPU\t\t: %s%s match, cache:%d, Predict Table:%d\n", + IS_AVAIL1(cpu->bpu.full, "full"), + IS_AVAIL1(!cpu->bpu.full, "partial"), + cpu->bpu.num_cache, cpu->bpu.num_pred); -static const struct id_to_str mac_mul_nm[] = { - {0x0, "N/A"}, - {0x1, "N/A"}, - {0x2, "Dual 16 x 16"}, - {0x3, "N/A"}, - {0x4, "32x16"}, - {0x5, "N/A"}, - {0x6, "Dual 16x16 and 32x16"} -}; + return buf; +} static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) { @@ -178,67 +213,46 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; FIX_PTR(cpu); -#define IS_AVAIL1(var, str) ((var) ? str : "") -#define IS_AVAIL2(var, str) ((var == 0x2) ? str : "") -#define IS_USED(cfg) (IS_ENABLED(cfg) ? "(in-use)" : "(not used)") n += scnprintf(buf + n, len - n, - "Extn [700-Base]\t: %s %s %s %s %s %s\n", - IS_AVAIL2(cpu->extn.norm, "norm,"), - IS_AVAIL2(cpu->extn.barrel, "barrel-shift,"), - IS_AVAIL1(cpu->extn.swap, "swap,"), - IS_AVAIL2(cpu->extn.minmax, "minmax,"), - IS_AVAIL1(cpu->extn.crc, "crc,"), - IS_AVAIL2(cpu->extn.ext_arith, "ext-arith")); - - n += scnprintf(buf + n, len - n, "Extn [700-MPY]\t: %s", - mul_type_nm[cpu->extn.mul].str); - - n += scnprintf(buf + n, len - n, " MAC MPY: %s\n", - mac_mul_nm[cpu->extn_mac_mul.type].str); - - if (cpu->core.family == 0x34) { - n += scnprintf(buf + n, len - n, - "Extn [700-4.10]\t: LLOCK/SCOND %s, SWAPE %s, RTSC %s\n", - IS_USED(CONFIG_ARC_HAS_LLSC), - IS_USED(CONFIG_ARC_HAS_SWAPE), - IS_USED(CONFIG_ARC_HAS_RTSC)); - } - - n += scnprintf(buf + n, len - n, "Extn [CCM]\t: %s", - !(cpu->dccm.sz || cpu->iccm.sz) ? "N/A" : ""); - - if (cpu->dccm.sz) - n += scnprintf(buf + n, len - n, "DCCM: @ %x, %d KB ", - cpu->dccm.base_addr, TO_KB(cpu->dccm.sz)); - - if (cpu->iccm.sz) - n += scnprintf(buf + n, len - n, "ICCM: @ %x, %d KB", + "Vector Table\t: %#x\nUncached Base\t: %#x\n", + cpu->vec_base, cpu->uncached_base); + + if (cpu->extn.fpu_sp || cpu->extn.fpu_dp) + n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n", + IS_AVAIL1(cpu->extn.fpu_sp, "SP "), + IS_AVAIL1(cpu->extn.fpu_dp, "DP ")); + + if (cpu->extn.debug) + n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s%s\n", + IS_AVAIL1(cpu->extn.ap, "ActionPoint "), + IS_AVAIL1(cpu->extn.smart, "smaRT "), + IS_AVAIL1(cpu->extn.rtt, "RTT ")); + + if (cpu->dccm.sz || cpu->iccm.sz) + n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n", + cpu->dccm.base_addr, TO_KB(cpu->dccm.sz), cpu->iccm.base_addr, TO_KB(cpu->iccm.sz)); - n += scnprintf(buf + n, len - n, "\nExtn [FPU]\t: %s", - !(cpu->fp.ver || cpu->dpfp.ver) ? "N/A" : ""); - - if (cpu->fp.ver) - n += scnprintf(buf + n, len - n, "SP [v%d] %s", - cpu->fp.ver, cpu->fp.fast ? "(fast)" : ""); - - if (cpu->dpfp.ver) - n += scnprintf(buf + n, len - n, "DP [v%d] %s", - cpu->dpfp.ver, cpu->dpfp.fast ? "(fast)" : ""); - - n += scnprintf(buf + n, len - n, "\n"); - n += scnprintf(buf + n, len - n, "OS ABI [v3]\t: no-legacy-syscalls\n"); return buf; } -static void arc_chk_ccms(void) +static void arc_chk_core_config(void) { -#if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM) struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; + int fpu_enabled; + + if (!cpu->timers.t0) + panic("Timer0 is not present!\n"); + + if (!cpu->timers.t1) + panic("Timer1 is not present!\n"); + + if (IS_ENABLED(CONFIG_ARC_HAS_RTSC) && !cpu->timers.rtsc) + panic("RTSC is not present\n"); #ifdef CONFIG_ARC_HAS_DCCM /* @@ -256,33 +270,20 @@ static void arc_chk_ccms(void) if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz) panic("Linux built with incorrect ICCM Size\n"); #endif -#endif -} -/* - * Ensure that FP hardware and kernel config match - * -If hardware contains DPFP, kernel needs to save/restore FPU state - * across context switches - * -If hardware lacks DPFP, but kernel configured to save FPU state then - * kernel trying to access non-existant DPFP regs will crash - * - * We only check for Dbl precision Floating Point, because only DPFP - * hardware has dedicated regs which need to be saved/restored on ctx-sw - * (Single Precision uses core regs), thus kernel is kind of oblivious to it - */ -static void arc_chk_fpu(void) -{ - struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; + /* + * FP hardware/software config sanity + * -If hardware contains DPFP, kernel needs to save/restore FPU state + * -If not, it will crash trying to save/restore the non-existant regs + * + * (only DPDP checked since SP has no arch visible regs) + */ + fpu_enabled = IS_ENABLED(CONFIG_ARC_FPU_SAVE_RESTORE); - if (cpu->dpfp.ver) { -#ifndef CONFIG_ARC_FPU_SAVE_RESTORE - pr_warn("DPFP support broken in this kernel...\n"); -#endif - } else { -#ifdef CONFIG_ARC_FPU_SAVE_RESTORE - panic("H/w lacks DPFP support, apps won't work\n"); -#endif - } + if (cpu->extn.fpu_dp && !fpu_enabled) + pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n"); + else if (!cpu->extn.fpu_dp && fpu_enabled) + panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n"); } /* @@ -303,15 +304,11 @@ void setup_processor(void) arc_mmu_init(); arc_cache_init(); - arc_chk_ccms(); printk(arc_extn_mumbojumbo(cpu_id, str, sizeof(str))); - -#ifdef CONFIG_SMP printk(arc_platform_smp_cpuinfo()); -#endif - arc_chk_fpu(); + arc_chk_core_config(); } static inline int is_kernel(unsigned long addr) @@ -360,11 +357,7 @@ void __init setup_arch(char **cmdline_p) machine_desc->init_early(); setup_processor(); - -#ifdef CONFIG_SMP smp_init_cpus(); -#endif - setup_arch_memory(); /* copy flat DT out of .init and then unflatten it */ @@ -385,7 +378,13 @@ void __init setup_arch(char **cmdline_p) static int __init customize_machine(void) { - /* Add platform devices */ + of_clk_init(NULL); + /* + * Traverses flattened DeviceTree - registering platform devices + * (if any) complete with their resources + */ + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); + if (machine_desc->init_machine) machine_desc->init_machine(); @@ -419,19 +418,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE)); - seq_printf(m, "Bogo MIPS : \t%lu.%02lu\n", + seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n", loops_per_jiffy / (500000 / HZ), (loops_per_jiffy / (5000 / HZ)) % 100); seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE)); - seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE)); - seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE)); - -#ifdef CONFIG_SMP seq_printf(m, arc_platform_smp_cpuinfo()); -#endif free_page((unsigned long)str); done: diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index dcd317c47d0..d01df0c517a 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -101,7 +101,7 @@ void __weak arc_platform_smp_wait_to_boot(int cpu) const char *arc_platform_smp_cpuinfo(void) { - return plat_smp_ops.info; + return plat_smp_ops.info ? : ""; } /* diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index 9e1142729fd..8c3a3e02ba9 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c @@ -530,16 +530,9 @@ EXPORT_SYMBOL(dma_cache_wback); */ void flush_icache_range(unsigned long kstart, unsigned long kend) { - unsigned int tot_sz, off, sz; - unsigned long phy, pfn; + unsigned int tot_sz; - /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */ - - /* This is not the right API for user virtual address */ - if (kstart < TASK_SIZE) { - BUG_ON("Flush icache range for user virtual addr space"); - return; - } + WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__); /* Shortcut for bigger flush ranges. * Here we don't care if this was kernel virtual or phy addr @@ -572,6 +565,9 @@ void flush_icache_range(unsigned long kstart, unsigned long kend) * straddles across 2 virtual pages and hence need for loop */ while (tot_sz > 0) { + unsigned int off, sz; + unsigned long phy, pfn; + off = kstart % PAGE_SIZE; pfn = vmalloc_to_pfn((void *)kstart); phy = (pfn << PAGE_SHIFT) + off; diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index e1acf0ce564..7f47d2a56f4 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -609,14 +609,12 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) int n = 0; struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu; - n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ", - p_mmu->ver, TO_KB(p_mmu->pg_sz)); - n += scnprintf(buf + n, len - n, - "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n", + "MMU [v%x]\t: %dk PAGE, JTLB %d (%dx%d), uDTLB %d, uITLB %d %s\n", + p_mmu->ver, TO_KB(p_mmu->pg_sz), p_mmu->num_tlb, p_mmu->sets, p_mmu->ways, p_mmu->u_dtlb, p_mmu->u_itlb, - IS_ENABLED(CONFIG_ARC_MMU_SASID) ? "SASID" : ""); + IS_ENABLED(CONFIG_ARC_MMU_SASID) ? ",SASID" : ""); return buf; } diff --git a/arch/arc/plat-arcfpga/Kconfig b/arch/arc/plat-arcfpga/Kconfig index b9f34cf55ac..217593a7075 100644 --- a/arch/arc/plat-arcfpga/Kconfig +++ b/arch/arc/plat-arcfpga/Kconfig @@ -8,7 +8,7 @@ menuconfig ARC_PLAT_FPGA_LEGACY bool "\"Legacy\" ARC FPGA dev Boards" - select ISS_SMP_EXTN if SMP + select ARC_HAS_COH_CACHES if SMP help Support for ARC development boards, provided by Synopsys. These are based on FPGA or ISS. e.g. @@ -18,17 +18,6 @@ menuconfig ARC_PLAT_FPGA_LEGACY if ARC_PLAT_FPGA_LEGACY -config ARC_BOARD_ANGEL4 - bool "ARC Angel4" - default y - help - ARC Angel4 FPGA Ref Platform (Xilinx Virtex Based) - -config ARC_BOARD_ML509 - bool "ML509" - help - ARC ML509 FPGA Ref Platform (Xilinx Virtex-5 Based) - config ISS_SMP_EXTN bool "ARC SMP Extensions (ISS Models only)" default n diff --git a/arch/arc/plat-arcfpga/include/plat/irq.h b/arch/arc/plat-arcfpga/include/plat/irq.h deleted file mode 100644 index 2c9dea690ac..00000000000 --- a/arch/arc/plat-arcfpga/include/plat/irq.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * vineetg: Feb 2009 - * -For AA4 board, IRQ assignments to peripherals - */ - -#ifndef __PLAT_IRQ_H -#define __PLAT_IRQ_H - -#define UART0_IRQ 5 -#define UART1_IRQ 10 -#define UART2_IRQ 11 - -#define IDE_IRQ 13 -#define PCI_IRQ 14 -#define PS2_IRQ 15 - -#ifdef CONFIG_SMP -#define IDU_INTERRUPT_0 16 -#endif - -#endif diff --git a/arch/arc/plat-arcfpga/include/plat/memmap.h b/arch/arc/plat-arcfpga/include/plat/memmap.h deleted file mode 100644 index 5c78e6135a1..00000000000 --- a/arch/arc/plat-arcfpga/include/plat/memmap.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * vineetg: Feb 2009 - * -For AA4 board, System Memory Map for Peripherals etc - */ - -#ifndef __PLAT_MEMMAP_H -#define __PLAT_MEMMAP_H - -#define UART0_BASE 0xC0FC1000 -#define UART1_BASE 0xC0FC1100 - -#define IDE_CONTROLLER_BASE 0xC0FC9000 - -#define AHB_PCI_HOST_BRG_BASE 0xC0FD0000 - -#define PGU_BASEADDR 0xC0FC8000 -#define VLCK_ADDR 0xC0FCF028 - -#define BVCI_LAT_UNIT_BASE 0xC0FED000 - -#define PS2_BASE_ADDR 0xC0FCC000 - -#endif diff --git a/arch/arc/plat-arcfpga/platform.c b/arch/arc/plat-arcfpga/platform.c index 1038949a99a..afc88254acc 100644 --- a/arch/arc/plat-arcfpga/platform.c +++ b/arch/arc/plat-arcfpga/platform.c @@ -8,37 +8,9 @@ * published by the Free Software Foundation. */ -#include <linux/types.h> #include <linux/init.h> -#include <linux/device.h> -#include <linux/platform_device.h> -#include <linux/io.h> -#include <linux/console.h> -#include <linux/of_platform.h> -#include <asm/setup.h> -#include <asm/clk.h> #include <asm/mach_desc.h> -#include <plat/memmap.h> #include <plat/smp.h> -#include <plat/irq.h> - -static void __init plat_fpga_early_init(void) -{ - pr_info("[plat-arcfpga]: registering early dev resources\n"); - -#ifdef CONFIG_ISS_SMP_EXTN - iss_model_init_early_smp(); -#endif -} - -static void __init plat_fpga_populate_dev(void) -{ - /* - * Traverses flattened DeviceTree - registering platform devices - * (if any) complete with their resources - */ - of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); -} /*----------------------- Machine Descriptions ------------------------------ * @@ -48,41 +20,26 @@ static void __init plat_fpga_populate_dev(void) * callback set, by matching the DT compatible name. */ -static const char *aa4_compat[] __initconst = { +static const char *legacy_fpga_compat[] __initconst = { "snps,arc-angel4", - NULL, -}; - -MACHINE_START(ANGEL4, "angel4") - .dt_compat = aa4_compat, - .init_early = plat_fpga_early_init, - .init_machine = plat_fpga_populate_dev, -#ifdef CONFIG_ISS_SMP_EXTN - .init_smp = iss_model_init_smp, -#endif -MACHINE_END - -static const char *ml509_compat[] __initconst = { "snps,arc-ml509", NULL, }; -MACHINE_START(ML509, "ml509") - .dt_compat = ml509_compat, - .init_early = plat_fpga_early_init, - .init_machine = plat_fpga_populate_dev, -#ifdef CONFIG_SMP +MACHINE_START(LEGACY_FPGA, "legacy_fpga") + .dt_compat = legacy_fpga_compat, +#ifdef CONFIG_ISS_SMP_EXTN + .init_early = iss_model_init_early_smp, .init_smp = iss_model_init_smp, #endif MACHINE_END -static const char *nsimosci_compat[] __initconst = { +static const char *simulation_compat[] __initconst = { + "snps,nsim", "snps,nsimosci", NULL, }; -MACHINE_START(NSIMOSCI, "nsimosci") - .dt_compat = nsimosci_compat, - .init_early = NULL, - .init_machine = plat_fpga_populate_dev, +MACHINE_START(SIMULATION, "simulation") + .dt_compat = simulation_compat, MACHINE_END diff --git a/arch/arc/plat-arcfpga/smp.c b/arch/arc/plat-arcfpga/smp.c index 92bad912207..64797ba3bbe 100644 --- a/arch/arc/plat-arcfpga/smp.c +++ b/arch/arc/plat-arcfpga/smp.c @@ -13,9 +13,10 @@ #include <linux/smp.h> #include <linux/irq.h> -#include <plat/irq.h> #include <plat/smp.h> +#define IDU_INTERRUPT_0 16 + static char smp_cpuinfo_buf[128]; /* diff --git a/arch/arc/plat-tb10x/Kconfig b/arch/arc/plat-tb10x/Kconfig index 6994c188dc8..d14b3d3c5df 100644 --- a/arch/arc/plat-tb10x/Kconfig +++ b/arch/arc/plat-tb10x/Kconfig @@ -18,7 +18,6 @@ menuconfig ARC_PLAT_TB10X bool "Abilis TB10x" - select COMMON_CLK select PINCTRL select PINCTRL_TB10X select PINMUX diff --git a/arch/arc/plat-tb10x/tb10x.c b/arch/arc/plat-tb10x/tb10x.c index 06cb3092946..da0ac0960a4 100644 --- a/arch/arc/plat-tb10x/tb10x.c +++ b/arch/arc/plat-tb10x/tb10x.c @@ -19,21 +19,9 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - #include <linux/init.h> -#include <linux/of_platform.h> -#include <linux/clk-provider.h> -#include <linux/pinctrl/consumer.h> - #include <asm/mach_desc.h> - -static void __init tb10x_platform_init(void) -{ - of_clk_init(NULL); - of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); -} - static const char *tb10x_compat[] __initdata = { "abilis,arc-tb10x", NULL, @@ -41,5 +29,4 @@ static const char *tb10x_compat[] __initdata = { MACHINE_START(TB10x, "tb10x") .dt_compat = tb10x_compat, - .init_machine = tb10x_platform_init, MACHINE_END diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 03dc4c1a873..d8f6a2ec3d4 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -1187,7 +1187,7 @@ config DEBUG_UART_VIRT default 0xf1c28000 if DEBUG_SUNXI_UART0 default 0xf1c28400 if DEBUG_SUNXI_UART1 default 0xf1f02800 if DEBUG_SUNXI_R_UART - default 0xf2100000 if DEBUG_PXA_UART1 + default 0xf6200000 if DEBUG_PXA_UART1 default 0xf4090000 if ARCH_LPC32XX default 0xf4200000 if ARCH_GEMINI default 0xf7000000 if DEBUG_S3C24XX_UART && (DEBUG_S3C_UART0 || \ diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 413fd94b530..68be9017593 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -397,8 +397,7 @@ dtb_check_done: add sp, sp, r6 #endif - tst r4, #1 - bleq cache_clean_flush + bl cache_clean_flush adr r0, BSYM(restart) add r0, r0, r6 @@ -1047,6 +1046,8 @@ cache_clean_flush: b call_cache_fn __armv4_mpu_cache_flush: + tst r4, #1 + movne pc, lr mov r2, #1 mov r3, #0 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache @@ -1064,6 +1065,8 @@ __armv4_mpu_cache_flush: mov pc, lr __fa526_cache_flush: + tst r4, #1 + movne pc, lr mov r1, #0 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache mcr p15, 0, r1, c7, c5, 0 @ flush I cache @@ -1072,13 +1075,16 @@ __fa526_cache_flush: __armv6_mmu_cache_flush: mov r1, #0 - mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D + tst r4, #1 + mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB - mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified + mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified mcr p15, 0, r1, c7, c10, 4 @ drain WB mov pc, lr __armv7_mmu_cache_flush: + tst r4, #1 + bne iflush mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 tst r10, #0xf << 16 @ hierarchical cache (ARMv7) mov r10, #0 @@ -1139,6 +1145,8 @@ iflush: mov pc, lr __armv5tej_mmu_cache_flush: + tst r4, #1 + movne pc, lr 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache bne 1b mcr p15, 0, r0, c7, c5, 0 @ flush I cache @@ -1146,6 +1154,8 @@ __armv5tej_mmu_cache_flush: mov pc, lr __armv4_mmu_cache_flush: + tst r4, #1 + movne pc, lr mov r2, #64*1024 @ default: 32K dcache size (*2) mov r11, #32 @ default: 32 byte line size mrc p15, 0, r3, c0, c0, 1 @ read cache type @@ -1179,6 +1189,8 @@ no_cache_id: __armv3_mmu_cache_flush: __armv3_mpu_cache_flush: + tst r4, #1 + movne pc, lr mov r1, #0 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 mov pc, lr diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index e2156a583de..c4b968f0feb 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts @@ -489,7 +489,7 @@ reg = <0x00060000 0x00020000>; }; partition@4 { - label = "NAND.u-boot-spl"; + label = "NAND.u-boot-spl-os"; reg = <0x00080000 0x00040000>; }; partition@5 { diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts index e7ac47fa661..a521ac0a7d5 100644 --- a/arch/arm/boot/dts/am437x-gp-evm.dts +++ b/arch/arm/boot/dts/am437x-gp-evm.dts @@ -291,8 +291,8 @@ dcdc3: regulator-dcdc3 { compatible = "ti,tps65218-dcdc3"; regulator-name = "vdcdc3"; - regulator-min-microvolt = <1350000>; - regulator-max-microvolt = <1350000>; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; regulator-boot-on; regulator-always-on; }; diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts index 859ff3d620e..87aa4f3b8b3 100644 --- a/arch/arm/boot/dts/am437x-sk-evm.dts +++ b/arch/arm/boot/dts/am437x-sk-evm.dts @@ -363,8 +363,8 @@ dcdc3: regulator-dcdc3 { compatible = "ti,tps65218-dcdc3"; regulator-name = "vdds_ddr"; - regulator-min-microvolt = <1350000>; - regulator-max-microvolt = <1350000>; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; regulator-boot-on; regulator-always-on; }; diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index ac3e4859935..f7e9bba10bd 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts @@ -358,8 +358,8 @@ dcdc3: regulator-dcdc3 { compatible = "ti,tps65218-dcdc3"; regulator-name = "vdcdc3"; - regulator-min-microvolt = <1350000>; - regulator-max-microvolt = <1350000>; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; regulator-boot-on; regulator-always-on; }; diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi index d68b3c4862b..51416c7d062 100644 --- a/arch/arm/boot/dts/at91sam9263.dtsi +++ b/arch/arm/boot/dts/at91sam9263.dtsi @@ -122,9 +122,10 @@ interrupts-extended = <&pmc AT91_PMC_LOCKB>; clocks = <&main>; reg = <1>; - atmel,clk-input-range = <1000000 5000000>; + atmel,clk-input-range = <1000000 32000000>; #atmel,pll-clk-output-range-cells = <4>; - atmel,pll-clk-output-ranges = <70000000 130000000 1 1>; + atmel,pll-clk-output-ranges = <80000000 200000000 0 1>, + <190000000 240000000 2 1>; }; mck: masterck { diff --git a/arch/arm/boot/dts/exynos5250-snow.dts b/arch/arm/boot/dts/exynos5250-snow.dts index e51fcef884a..60429ad1c5d 100644 --- a/arch/arm/boot/dts/exynos5250-snow.dts +++ b/arch/arm/boot/dts/exynos5250-snow.dts @@ -624,4 +624,8 @@ num-cs = <1>; }; +&usbdrd_dwc3 { + dr_mode = "host"; +}; + #include "cros-ec-keyboard.dtsi" diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index f21b9aa00fb..d55c1a2eb79 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -555,7 +555,7 @@ #size-cells = <1>; ranges; - dwc3 { + usbdrd_dwc3: dwc3 { compatible = "synopsys,dwc3"; reg = <0x12000000 0x10000>; interrupts = <0 72 0>; diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts index 09664fcf5af..0e13b4b10a9 100644 --- a/arch/arm/boot/dts/imx28-evk.dts +++ b/arch/arm/boot/dts/imx28-evk.dts @@ -193,7 +193,6 @@ i2c0: i2c@80058000 { pinctrl-names = "default"; pinctrl-0 = <&i2c0_pins_a>; - clock-frequency = <400000>; status = "okay"; sgtl5000: codec@0a { diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 739fcf29c64..bc82a12d4c2 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -668,6 +668,8 @@ bank-width = <2>; pinctrl-names = "default"; pinctrl-0 = <ðernet_pins>; + power-gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>; /* gpio86 */ + reset-gpios = <&gpio6 4 GPIO_ACTIVE_HIGH>; /* gpio164 */ gpmc,device-width = <2>; gpmc,sync-clk-ps = <0>; gpmc,cs-on-ns = <0>; diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi index d46c213a17a..eed697a6bd6 100644 --- a/arch/arm/boot/dts/r8a7740.dtsi +++ b/arch/arm/boot/dts/r8a7740.dtsi @@ -433,7 +433,7 @@ clocks = <&cpg_clocks R8A7740_CLK_S>, <&cpg_clocks R8A7740_CLK_S>, <&sub_clk>, <&cpg_clocks R8A7740_CLK_B>, - <&sub_clk>, <&sub_clk>, + <&cpg_clocks R8A7740_CLK_HPP>, <&sub_clk>, <&cpg_clocks R8A7740_CLK_B>; #clock-cells = <1>; renesas,clock-indices = < diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index d0e17733dc1..e20affe156c 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -666,9 +666,9 @@ #clock-cells = <0>; clock-output-names = "sd2"; }; - sd3_clk: sd3_clk@e615007c { + sd3_clk: sd3_clk@e615026c { compatible = "renesas,r8a7790-div6-clock", "renesas,cpg-div6-clock"; - reg = <0 0xe615007c 0 4>; + reg = <0 0xe615026c 0 4>; clocks = <&pll1_div2_clk>; #clock-cells = <0>; clock-output-names = "sd3"; diff --git a/arch/arm/boot/dts/sama5d31.dtsi b/arch/arm/boot/dts/sama5d31.dtsi index 7997dc9863e..883878b3297 100644 --- a/arch/arm/boot/dts/sama5d31.dtsi +++ b/arch/arm/boot/dts/sama5d31.dtsi @@ -12,5 +12,5 @@ #include "sama5d3_uart.dtsi" / { - compatible = "atmel,samad31", "atmel,sama5d3", "atmel,sama5"; + compatible = "atmel,sama5d31", "atmel,sama5d3", "atmel,sama5"; }; diff --git a/arch/arm/boot/dts/sama5d33.dtsi b/arch/arm/boot/dts/sama5d33.dtsi index 39f832253ca..4b4434aca35 100644 --- a/arch/arm/boot/dts/sama5d33.dtsi +++ b/arch/arm/boot/dts/sama5d33.dtsi @@ -10,5 +10,5 @@ #include "sama5d3_gmac.dtsi" / { - compatible = "atmel,samad33", "atmel,sama5d3", "atmel,sama5"; + compatible = "atmel,sama5d33", "atmel,sama5d3", "atmel,sama5"; }; diff --git a/arch/arm/boot/dts/sama5d34.dtsi b/arch/arm/boot/dts/sama5d34.dtsi index 89cda2c0da3..aa01573fdee 100644 --- a/arch/arm/boot/dts/sama5d34.dtsi +++ b/arch/arm/boot/dts/sama5d34.dtsi @@ -12,5 +12,5 @@ #include "sama5d3_mci2.dtsi" / { - compatible = "atmel,samad34", "atmel,sama5d3", "atmel,sama5"; + compatible = "atmel,sama5d34", "atmel,sama5d3", "atmel,sama5"; }; diff --git a/arch/arm/boot/dts/sama5d35.dtsi b/arch/arm/boot/dts/sama5d35.dtsi index d20cd71b5f0..16c39f4c96a 100644 --- a/arch/arm/boot/dts/sama5d35.dtsi +++ b/arch/arm/boot/dts/sama5d35.dtsi @@ -14,5 +14,5 @@ #include "sama5d3_tcb1.dtsi" / { - compatible = "atmel,samad35", "atmel,sama5d3", "atmel,sama5"; + compatible = "atmel,sama5d35", "atmel,sama5d3", "atmel,sama5"; }; diff --git a/arch/arm/boot/dts/sama5d36.dtsi b/arch/arm/boot/dts/sama5d36.dtsi index db58cad6acd..e85139ef40a 100644 --- a/arch/arm/boot/dts/sama5d36.dtsi +++ b/arch/arm/boot/dts/sama5d36.dtsi @@ -16,5 +16,5 @@ #include "sama5d3_uart.dtsi" / { - compatible = "atmel,samad36", "atmel,sama5d3", "atmel,sama5"; + compatible = "atmel,sama5d36", "atmel,sama5d3", "atmel,sama5"; }; diff --git a/arch/arm/boot/dts/sama5d3xcm.dtsi b/arch/arm/boot/dts/sama5d3xcm.dtsi index 962dc28dc37..cfcd200b0c1 100644 --- a/arch/arm/boot/dts/sama5d3xcm.dtsi +++ b/arch/arm/boot/dts/sama5d3xcm.dtsi @@ -8,7 +8,7 @@ */ / { - compatible = "atmel,samad3xcm", "atmel,sama5d3", "atmel,sama5"; + compatible = "atmel,sama5d3xcm", "atmel,sama5d3", "atmel,sama5"; chosen { bootargs = "console=ttyS0,115200 rootfstype=ubifs ubi.mtd=5 root=ubi0:rootfs"; diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 45fce2cf6fe..4472fd92685 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi @@ -547,7 +547,7 @@ status = "disabled"; }; - gpio@ff708000 { + gpio0: gpio@ff708000 { #address-cells = <1>; #size-cells = <0>; compatible = "snps,dw-apb-gpio"; @@ -555,7 +555,7 @@ clocks = <&per_base_clk>; status = "disabled"; - gpio0: gpio-controller@0 { + porta: gpio-controller@0 { compatible = "snps,dw-apb-gpio-port"; gpio-controller; #gpio-cells = <2>; @@ -567,7 +567,7 @@ }; }; - gpio@ff709000 { + gpio1: gpio@ff709000 { #address-cells = <1>; #size-cells = <0>; compatible = "snps,dw-apb-gpio"; @@ -575,7 +575,7 @@ clocks = <&per_base_clk>; status = "disabled"; - gpio1: gpio-controller@0 { + portb: gpio-controller@0 { compatible = "snps,dw-apb-gpio-port"; gpio-controller; #gpio-cells = <2>; @@ -587,7 +587,7 @@ }; }; - gpio@ff70a000 { + gpio2: gpio@ff70a000 { #address-cells = <1>; #size-cells = <0>; compatible = "snps,dw-apb-gpio"; @@ -595,7 +595,7 @@ clocks = <&per_base_clk>; status = "disabled"; - gpio2: gpio-controller@0 { + portc: gpio-controller@0 { compatible = "snps,dw-apb-gpio-port"; gpio-controller; #gpio-cells = <2>; diff --git a/arch/arm/boot/dts/socfpga_arria5.dtsi b/arch/arm/boot/dts/socfpga_arria5.dtsi index 03e8268ae21..1907cc60045 100644 --- a/arch/arm/boot/dts/socfpga_arria5.dtsi +++ b/arch/arm/boot/dts/socfpga_arria5.dtsi @@ -29,7 +29,7 @@ }; }; - dwmmc0@ff704000 { + mmc0: dwmmc0@ff704000 { num-slots = <1>; broken-cd; bus-width = <4>; diff --git a/arch/arm/boot/dts/socfpga_arria5_socdk.dts b/arch/arm/boot/dts/socfpga_arria5_socdk.dts index 27d551c384d..ccaf41742fc 100644 --- a/arch/arm/boot/dts/socfpga_arria5_socdk.dts +++ b/arch/arm/boot/dts/socfpga_arria5_socdk.dts @@ -37,6 +37,13 @@ */ ethernet0 = &gmac1; }; + + regulator_3_3v: 3-3-v-regulator { + compatible = "regulator-fixed"; + regulator-name = "3.3V"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; }; &gmac1 { @@ -68,6 +75,11 @@ }; }; +&mmc0 { + vmmc-supply = <®ulator_3_3v>; + vqmmc-supply = <®ulator_3_3v>; +}; + &usb1 { status = "okay"; }; diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts index d7296a5f750..258865da8f6 100644 --- a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts +++ b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts @@ -37,6 +37,13 @@ */ ethernet0 = &gmac1; }; + + regulator_3_3v: 3-3-v-regulator { + compatible = "regulator-fixed"; + regulator-name = "3.3V"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; }; &gmac1 { @@ -53,6 +60,10 @@ rxc-skew-ps = <2000>; }; +&gpio1 { + status = "okay"; +}; + &i2c0 { status = "okay"; @@ -69,7 +80,9 @@ }; &mmc0 { - cd-gpios = <&gpio1 18 0>; + cd-gpios = <&portb 18 0>; + vmmc-supply = <®ulator_3_3v>; + vqmmc-supply = <®ulator_3_3v>; }; &usb1 { diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts index d26f155f5fd..16ea6f5f2ab 100644 --- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts +++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts @@ -37,6 +37,13 @@ */ ethernet0 = &gmac1; }; + + regulator_3_3v: vcc3p3-regulator { + compatible = "regulator-fixed"; + regulator-name = "VCC3P3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; }; &gmac1 { @@ -53,6 +60,11 @@ rxc-skew-ps = <2000>; }; +&mmc0 { + vmmc-supply = <®ulator_3_3v>; + vqmmc-supply = <®ulator_3_3v>; +}; + &usb1 { status = "okay"; }; diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 543f895d18d..2e652e2339e 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -361,6 +361,10 @@ clocks = <&ahb1_gates 6>; resets = <&ahb1_rst 6>; #dma-cells = <1>; + + /* DMA controller requires AHB1 clocked from PLL6 */ + assigned-clocks = <&ahb1_mux>; + assigned-clock-parents = <&pll6>; }; mmc0: mmc@01c0f000 { diff --git a/arch/arm/boot/dts/tegra114-dalmore.dts b/arch/arm/boot/dts/tegra114-dalmore.dts index 5c21d216515..8b7aa0dcdc6 100644 --- a/arch/arm/boot/dts/tegra114-dalmore.dts +++ b/arch/arm/boot/dts/tegra114-dalmore.dts @@ -15,6 +15,7 @@ aliases { rtc0 = "/i2c@7000d000/tps65913@58"; rtc1 = "/rtc@7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra114-roth.dts b/arch/arm/boot/dts/tegra114-roth.dts index c7c6825f11f..38acf78d781 100644 --- a/arch/arm/boot/dts/tegra114-roth.dts +++ b/arch/arm/boot/dts/tegra114-roth.dts @@ -15,6 +15,10 @@ linux,initrd-end = <0x82800000>; }; + aliases { + serial0 = &uartd; + }; + firmware { trusted-foundations { compatible = "tlm,trusted-foundations"; @@ -916,8 +920,6 @@ regulator-name = "vddio-sdmmc3"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; - regulator-always-on; - regulator-boot-on; }; ldousb { @@ -962,7 +964,7 @@ sdhci@78000400 { status = "okay"; bus-width = <4>; - vmmc-supply = <&vddio_sdmmc3>; + vqmmc-supply = <&vddio_sdmmc3>; cd-gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_LOW>; power-gpios = <&gpio TEGRA_GPIO(H, 0) GPIO_ACTIVE_HIGH>; }; @@ -971,7 +973,6 @@ sdhci@78000600 { status = "okay"; bus-width = <8>; - vmmc-supply = <&vdd_1v8>; non-removable; }; diff --git a/arch/arm/boot/dts/tegra114-tn7.dts b/arch/arm/boot/dts/tegra114-tn7.dts index 96366214563..f91c2c9b2f9 100644 --- a/arch/arm/boot/dts/tegra114-tn7.dts +++ b/arch/arm/boot/dts/tegra114-tn7.dts @@ -15,6 +15,10 @@ linux,initrd-end = <0x82800000>; }; + aliases { + serial0 = &uartd; + }; + firmware { trusted-foundations { compatible = "tlm,trusted-foundations"; @@ -240,7 +244,6 @@ sdhci@78000600 { status = "okay"; bus-width = <8>; - vmmc-supply = <&vdd_1v8>; non-removable; }; diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi index 2ca9c1807f7..222f3b3f4dd 100644 --- a/arch/arm/boot/dts/tegra114.dtsi +++ b/arch/arm/boot/dts/tegra114.dtsi @@ -9,13 +9,6 @@ compatible = "nvidia,tegra114"; interrupt-parent = <&gic>; - aliases { - serial0 = &uarta; - serial1 = &uartb; - serial2 = &uartc; - serial3 = &uartd; - }; - host1x@50000000 { compatible = "nvidia,tegra114-host1x", "simple-bus"; reg = <0x50000000 0x00028000>; diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts index 029c9a02154..51b373ff106 100644 --- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts +++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@0,7000d000/pmic@40"; rtc1 = "/rtc@0,7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra124-nyan-big.dts b/arch/arm/boot/dts/tegra124-nyan-big.dts index 7d0784ce4c7..53181d31024 100644 --- a/arch/arm/boot/dts/tegra124-nyan-big.dts +++ b/arch/arm/boot/dts/tegra124-nyan-big.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@0,7000d000/pmic@40"; rtc1 = "/rtc@0,7000e000"; + serial0 = &uarta; }; memory { diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts index 13008858e96..5c3f7813360 100644 --- a/arch/arm/boot/dts/tegra124-venice2.dts +++ b/arch/arm/boot/dts/tegra124-venice2.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@0,7000d000/pmic@40"; rtc1 = "/rtc@0,7000e000"; + serial0 = &uarta; }; memory { diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi index 478c555ebd9..df2b06b2998 100644 --- a/arch/arm/boot/dts/tegra124.dtsi +++ b/arch/arm/boot/dts/tegra124.dtsi @@ -286,7 +286,7 @@ * the APB DMA based serial driver, the comptible is * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart". */ - serial@0,70006000 { + uarta: serial@0,70006000 { compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; reg = <0x0 0x70006000 0x0 0x40>; reg-shift = <2>; @@ -299,7 +299,7 @@ status = "disabled"; }; - serial@0,70006040 { + uartb: serial@0,70006040 { compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; reg = <0x0 0x70006040 0x0 0x40>; reg-shift = <2>; @@ -312,7 +312,7 @@ status = "disabled"; }; - serial@0,70006200 { + uartc: serial@0,70006200 { compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; reg = <0x0 0x70006200 0x0 0x40>; reg-shift = <2>; @@ -325,7 +325,7 @@ status = "disabled"; }; - serial@0,70006300 { + uartd: serial@0,70006300 { compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; reg = <0x0 0x70006300 0x0 0x40>; reg-shift = <2>; diff --git a/arch/arm/boot/dts/tegra20-harmony.dts b/arch/arm/boot/dts/tegra20-harmony.dts index a37279af687..b926a07b944 100644 --- a/arch/arm/boot/dts/tegra20-harmony.dts +++ b/arch/arm/boot/dts/tegra20-harmony.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@7000d000/tps6586x@34"; rtc1 = "/rtc@7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra20-iris-512.dts b/arch/arm/boot/dts/tegra20-iris-512.dts index 8cfb83f42e1..1dd7d7bfdfc 100644 --- a/arch/arm/boot/dts/tegra20-iris-512.dts +++ b/arch/arm/boot/dts/tegra20-iris-512.dts @@ -6,6 +6,11 @@ model = "Toradex Colibri T20 512MB on Iris"; compatible = "toradex,iris", "toradex,colibri_t20-512", "nvidia,tegra20"; + aliases { + serial0 = &uarta; + serial1 = &uartd; + }; + host1x@50000000 { hdmi@54280000 { status = "okay"; diff --git a/arch/arm/boot/dts/tegra20-medcom-wide.dts b/arch/arm/boot/dts/tegra20-medcom-wide.dts index 1b7c56b33ac..9b87526ab0b 100644 --- a/arch/arm/boot/dts/tegra20-medcom-wide.dts +++ b/arch/arm/boot/dts/tegra20-medcom-wide.dts @@ -6,6 +6,10 @@ model = "Avionic Design Medcom-Wide board"; compatible = "ad,medcom-wide", "ad,tamonten", "nvidia,tegra20"; + aliases { + serial0 = &uartd; + }; + pwm@7000a000 { status = "okay"; }; diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts index d4438e30de4..ed7e1009326 100644 --- a/arch/arm/boot/dts/tegra20-paz00.dts +++ b/arch/arm/boot/dts/tegra20-paz00.dts @@ -10,6 +10,8 @@ aliases { rtc0 = "/i2c@7000d000/tps6586x@34"; rtc1 = "/rtc@7000e000"; + serial0 = &uarta; + serial1 = &uartc; }; memory { diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts index a1d4bf9895d..ea282c7c0ca 100644 --- a/arch/arm/boot/dts/tegra20-seaboard.dts +++ b/arch/arm/boot/dts/tegra20-seaboard.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@7000d000/tps6586x@34"; rtc1 = "/rtc@7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi index 80e7d386ce3..13d4e618527 100644 --- a/arch/arm/boot/dts/tegra20-tamonten.dtsi +++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi @@ -7,6 +7,7 @@ aliases { rtc0 = "/i2c@7000d000/tps6586x@34"; rtc1 = "/rtc@7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts index 5ad87979ab1..d99af4ef9c6 100644 --- a/arch/arm/boot/dts/tegra20-trimslice.dts +++ b/arch/arm/boot/dts/tegra20-trimslice.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@7000c500/rtc@56"; rtc1 = "/rtc@7000e000"; + serial0 = &uarta; }; memory { diff --git a/arch/arm/boot/dts/tegra20-ventana.dts b/arch/arm/boot/dts/tegra20-ventana.dts index ca8484cccdd..04c58e9ca49 100644 --- a/arch/arm/boot/dts/tegra20-ventana.dts +++ b/arch/arm/boot/dts/tegra20-ventana.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@7000d000/tps6586x@34"; rtc1 = "/rtc@7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts index 1843725785c..340d81108df 100644 --- a/arch/arm/boot/dts/tegra20-whistler.dts +++ b/arch/arm/boot/dts/tegra20-whistler.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@7000d000/max8907@3c"; rtc1 = "/rtc@7000e000"; + serial0 = &uarta; }; memory { diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index 3b374c49d04..8acf5d85c99 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi @@ -9,14 +9,6 @@ compatible = "nvidia,tegra20"; interrupt-parent = <&intc>; - aliases { - serial0 = &uarta; - serial1 = &uartb; - serial2 = &uartc; - serial3 = &uartd; - serial4 = &uarte; - }; - host1x@50000000 { compatible = "nvidia,tegra20-host1x", "simple-bus"; reg = <0x50000000 0x00024000>; diff --git a/arch/arm/boot/dts/tegra30-apalis-eval.dts b/arch/arm/boot/dts/tegra30-apalis-eval.dts index 45d40f02458..6236bdecb48 100644 --- a/arch/arm/boot/dts/tegra30-apalis-eval.dts +++ b/arch/arm/boot/dts/tegra30-apalis-eval.dts @@ -11,6 +11,10 @@ rtc0 = "/i2c@7000c000/rtc@68"; rtc1 = "/i2c@7000d000/tps65911@2d"; rtc2 = "/rtc@7000e000"; + serial0 = &uarta; + serial1 = &uartb; + serial2 = &uartc; + serial3 = &uartd; }; pcie-controller@00003000 { diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts index cee8f2246fd..6b157eeabcc 100644 --- a/arch/arm/boot/dts/tegra30-beaver.dts +++ b/arch/arm/boot/dts/tegra30-beaver.dts @@ -9,6 +9,7 @@ aliases { rtc0 = "/i2c@7000d000/tps65911@2d"; rtc1 = "/rtc@7000e000"; + serial0 = &uarta; }; memory { diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi index 20637954624..a1b682ea01b 100644 --- a/arch/arm/boot/dts/tegra30-cardhu.dtsi +++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi @@ -30,6 +30,8 @@ aliases { rtc0 = "/i2c@7000d000/tps65911@2d"; rtc1 = "/rtc@7000e000"; + serial0 = &uarta; + serial1 = &uartc; }; memory { diff --git a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts index 7793abd5bef..4d3ddc58564 100644 --- a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts +++ b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts @@ -10,6 +10,9 @@ rtc0 = "/i2c@7000c000/rtc@68"; rtc1 = "/i2c@7000d000/tps65911@2d"; rtc2 = "/rtc@7000e000"; + serial0 = &uarta; + serial1 = &uartb; + serial2 = &uartd; }; host1x@50000000 { diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index aa6ccea13d3..b270b9e3d45 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi @@ -9,14 +9,6 @@ compatible = "nvidia,tegra30"; interrupt-parent = <&intc>; - aliases { - serial0 = &uarta; - serial1 = &uartb; - serial2 = &uartc; - serial3 = &uartd; - serial4 = &uarte; - }; - pcie-controller@00003000 { compatible = "nvidia,tegra30-pcie"; device_type = "pci"; diff --git a/arch/arm/boot/dts/vf610-cosmic.dts b/arch/arm/boot/dts/vf610-cosmic.dts index 3fd1b74e121..de1b453c293 100644 --- a/arch/arm/boot/dts/vf610-cosmic.dts +++ b/arch/arm/boot/dts/vf610-cosmic.dts @@ -33,6 +33,13 @@ }; +&esdhc1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_esdhc1>; + bus-width = <4>; + status = "okay"; +}; + &fec1 { phy-mode = "rmii"; pinctrl-names = "default"; @@ -42,6 +49,18 @@ &iomuxc { vf610-cosmic { + pinctrl_esdhc1: esdhc1grp { + fsl,pins = < + VF610_PAD_PTA24__ESDHC1_CLK 0x31ef + VF610_PAD_PTA25__ESDHC1_CMD 0x31ef + VF610_PAD_PTA26__ESDHC1_DAT0 0x31ef + VF610_PAD_PTA27__ESDHC1_DAT1 0x31ef + VF610_PAD_PTA28__ESDHC1_DATA2 0x31ef + VF610_PAD_PTA29__ESDHC1_DAT3 0x31ef + VF610_PAD_PTB28__GPIO_98 0x219d + >; + }; + pinctrl_fec1: fec1grp { fsl,pins = < VF610_PAD_PTC9__ENET_RMII1_MDC 0x30d2 diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi index 24036c44044..ce2ef5bec4f 100644 --- a/arch/arm/boot/dts/zynq-7000.dtsi +++ b/arch/arm/boot/dts/zynq-7000.dtsi @@ -30,7 +30,6 @@ /* kHz uV */ 666667 1000000 333334 1000000 - 222223 1000000 >; }; @@ -65,7 +64,7 @@ interrupt-parent = <&intc>; ranges; - adc@f8007100 { + adc: adc@f8007100 { compatible = "xlnx,zynq-xadc-1.00.a"; reg = <0xf8007100 0x20>; interrupts = <0 7 4>; @@ -137,7 +136,7 @@ <0xF8F00100 0x100>; }; - L2: cache-controller { + L2: cache-controller@f8f02000 { compatible = "arm,pl310-cache"; reg = <0xF8F02000 0x1000>; arm,data-latency = <3 2 2>; @@ -146,10 +145,10 @@ cache-level = <2>; }; - memory-controller@f8006000 { + mc: memory-controller@f8006000 { compatible = "xlnx,zynq-ddrc-a05"; reg = <0xf8006000 0x1000>; - } ; + }; uart0: serial@e0000000 { compatible = "xlnx,xuartps", "cdns,uart-r1p8"; @@ -195,7 +194,7 @@ gem0: ethernet@e000b000 { compatible = "cdns,gem"; - reg = <0xe000b000 0x4000>; + reg = <0xe000b000 0x1000>; status = "disabled"; interrupts = <0 22 4>; clocks = <&clkc 30>, <&clkc 30>, <&clkc 13>; @@ -206,7 +205,7 @@ gem1: ethernet@e000c000 { compatible = "cdns,gem"; - reg = <0xe000c000 0x4000>; + reg = <0xe000c000 0x1000>; status = "disabled"; interrupts = <0 45 4>; clocks = <&clkc 31>, <&clkc 31>, <&clkc 14>; @@ -315,5 +314,16 @@ reg = <0xf8f00600 0x20>; clocks = <&clkc 4>; }; + + watchdog0: watchdog@f8005000 { + clocks = <&clkc 45>; + compatible = "xlnx,zynq-wdt-r1p2"; + device_type = "watchdog"; + interrupt-parent = <&intc>; + interrupts = <0 9 1>; + reg = <0xf8005000 0x1000>; + reset = <0>; + timeout-sec = <10>; + }; }; }; diff --git a/arch/arm/boot/dts/zynq-parallella.dts b/arch/arm/boot/dts/zynq-parallella.dts index e1f51ca127f..0429bbd89fb 100644 --- a/arch/arm/boot/dts/zynq-parallella.dts +++ b/arch/arm/boot/dts/zynq-parallella.dts @@ -34,6 +34,10 @@ }; }; +&clkc { + fclk-enable = <0xf>; +}; + &gem0 { status = "okay"; phy-mode = "rgmii-id"; diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c index d86771abbf5..72041f002b7 100644 --- a/arch/arm/common/edma.c +++ b/arch/arm/common/edma.c @@ -26,6 +26,7 @@ #include <linux/io.h> #include <linux/slab.h> #include <linux/edma.h> +#include <linux/dma-mapping.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_dma.h> @@ -1623,6 +1624,11 @@ static int edma_probe(struct platform_device *pdev) struct device_node *node = pdev->dev.of_node; struct device *dev = &pdev->dev; int ret; + struct platform_device_info edma_dev_info = { + .name = "edma-dma-engine", + .dma_mask = DMA_BIT_MASK(32), + .parent = &pdev->dev, + }; if (node) { /* Check if this is a second instance registered */ @@ -1793,6 +1799,9 @@ static int edma_probe(struct platform_device *pdev) edma_write_array(j, EDMA_QRAE, i, 0x0); } arch_num_cc++; + + edma_dev_info.id = j; + platform_device_register_full(&edma_dev_info); } return 0; diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig index 72058b8a6f4..e21ef830a48 100644 --- a/arch/arm/configs/exynos_defconfig +++ b/arch/arm/configs/exynos_defconfig @@ -142,11 +142,13 @@ CONFIG_MMC_DW_IDMAC=y CONFIG_MMC_DW_EXYNOS=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_MAX77686=y +CONFIG_RTC_DRV_MAX77802=y CONFIG_RTC_DRV_S5M=y CONFIG_RTC_DRV_S3C=y CONFIG_DMADEVICES=y CONFIG_PL330_DMA=y CONFIG_COMMON_CLK_MAX77686=y +CONFIG_COMMON_CLK_MAX77802=y CONFIG_COMMON_CLK_S2MPS11=y CONFIG_EXYNOS_IOMMU=y CONFIG_IIO=y diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig index e688741c89a..e6b0007355f 100644 --- a/arch/arm/configs/imx_v4_v5_defconfig +++ b/arch/arm/configs/imx_v4_v5_defconfig @@ -97,6 +97,7 @@ CONFIG_SERIAL_IMX_CONSOLE=y # CONFIG_HW_RANDOM is not set CONFIG_I2C_CHARDEV=y CONFIG_I2C_IMX=y +CONFIG_SPI=y CONFIG_SPI_IMX=y CONFIG_SPI_SPIDEV=y CONFIG_GPIO_SYSFS=y diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index 8fca6e276b6..6790f1b3f3a 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -158,6 +158,7 @@ CONFIG_I2C_CHARDEV=y CONFIG_I2C_ALGOPCF=m CONFIG_I2C_ALGOPCA=m CONFIG_I2C_IMX=y +CONFIG_SPI=y CONFIG_SPI_IMX=y CONFIG_GPIO_SYSFS=y CONFIG_GPIO_MC9S08DZ60=y diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 491b7d5523b..9d7a32f93fc 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -217,6 +217,7 @@ CONFIG_I2C_CADENCE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_EXYNOS5=y CONFIG_I2C_MV64XXX=y +CONFIG_I2C_S3C2410=y CONFIG_I2C_SIRF=y CONFIG_I2C_TEGRA=y CONFIG_I2C_ST=y @@ -235,6 +236,7 @@ CONFIG_SPI_TEGRA20_SLINK=y CONFIG_SPI_XILINX=y CONFIG_PINCTRL_AS3722=y CONFIG_PINCTRL_PALMAS=y +CONFIG_PINCTRL_APQ8084=y CONFIG_GPIO_SYSFS=y CONFIG_GPIO_GENERIC_PLATFORM=y CONFIG_GPIO_DWAPB=y @@ -261,6 +263,7 @@ CONFIG_WATCHDOG=y CONFIG_XILINX_WATCHDOG=y CONFIG_ORION_WATCHDOG=y CONFIG_SUNXI_WATCHDOG=y +CONFIG_MESON_WATCHDOG=y CONFIG_MFD_AS3722=y CONFIG_MFD_BCM590XX=y CONFIG_MFD_CROS_EC=y @@ -353,6 +356,7 @@ CONFIG_MMC_MVSDIO=y CONFIG_MMC_SUNXI=y CONFIG_MMC_DW=y CONFIG_MMC_DW_EXYNOS=y +CONFIG_MMC_DW_ROCKCHIP=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y @@ -409,6 +413,7 @@ CONFIG_NVEC_POWER=y CONFIG_NVEC_PAZ00=y CONFIG_QCOM_GSBI=y CONFIG_COMMON_CLK_QCOM=y +CONFIG_APQ_MMCC_8084=y CONFIG_MSM_GCC_8660=y CONFIG_MSM_MMCC_8960=y CONFIG_MSM_MMCC_8974=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 16e719c268d..b3f86670d2e 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -86,7 +86,6 @@ CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y # CONFIG_INET_LRO is not set -CONFIG_IPV6=y CONFIG_NETFILTER=y CONFIG_CAN=m CONFIG_CAN_C_CAN=m @@ -112,6 +111,7 @@ CONFIG_MTD_OOPS=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_INTELEXT=y CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_ECC_BCH=y CONFIG_MTD_NAND_OMAP2=y CONFIG_MTD_ONENAND=y CONFIG_MTD_ONENAND_VERIFY_WRITE=y @@ -317,7 +317,7 @@ CONFIG_EXT4_FS=y CONFIG_FANOTIFY=y CONFIG_QUOTA=y CONFIG_QFMT_V2=y -CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS4_FS=m CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig index d7a5855a5db..a2956c3112f 100644 --- a/arch/arm/configs/socfpga_defconfig +++ b/arch/arm/configs/socfpga_defconfig @@ -1,5 +1,6 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y +CONFIG_FHANDLE=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 @@ -11,23 +12,17 @@ CONFIG_PROFILING=y CONFIG_OPROFILE=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_HOTPLUG=y # CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_CFQ is not set CONFIG_ARCH_SOCFPGA=y -CONFIG_MACH_SOCFPGA_CYCLONE5=y CONFIG_ARM_THUMBEE=y -# CONFIG_ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA is not set -# CONFIG_CACHE_L2X0 is not set -CONFIG_HIGH_RES_TIMERS=y CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_AEABI=y CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 -CONFIG_CMDLINE="" CONFIG_VFP=y CONFIG_NEON=y CONFIG_NET=y @@ -41,38 +36,30 @@ CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y +CONFIG_IPV6=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_VLAN_8021Q=y +CONFIG_VLAN_8021Q_GVRP=y CONFIG_CAN=y -CONFIG_CAN_RAW=y -CONFIG_CAN_BCM=y -CONFIG_CAN_GW=y -CONFIG_CAN_DEV=y -CONFIG_CAN_CALC_BITTIMING=y CONFIG_CAN_C_CAN=y CONFIG_CAN_C_CAN_PLATFORM=y CONFIG_CAN_DEBUG_DEVICES=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y -CONFIG_PROC_DEVICETREE=y +CONFIG_DEVTMPFS_MOUNT=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=2 CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_SRAM=y CONFIG_SCSI=y # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set CONFIG_NETDEVICES=y CONFIG_STMMAC_ETH=y +CONFIG_DWMAC_SOCFPGA=y CONFIG_MICREL_PHY=y -# CONFIG_STMMAC_PHY_ID_ZERO_WORKAROUND is not set CONFIG_INPUT_EVDEV=y -CONFIG_DWMAC_SOCFPGA=y -CONFIG_PPS=y -CONFIG_NETWORK_PHY_TIMESTAMPING=y -CONFIG_PTP_1588_CLOCK=y -CONFIG_VLAN_8021Q=y -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_GARP=y -CONFIG_IPV6=y # CONFIG_SERIO_SERPORT is not set CONFIG_SERIO_AMBAKMI=y CONFIG_LEGACY_PTY_COUNT=16 @@ -81,45 +68,43 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 CONFIG_SERIAL_8250_DW=y +CONFIG_I2C=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_GPIOLIB=y CONFIG_GPIO_SYSFS=y CONFIG_GPIO_DWAPB=y -# CONFIG_RTC_HCTOSYS is not set +CONFIG_PMBUS=y +CONFIG_SENSORS_LTC2978=y +CONFIG_SENSORS_LTC2978_REGULATOR=y CONFIG_WATCHDOG=y CONFIG_DW_WATCHDOG=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_USB=y +CONFIG_USB_DWC2=y +CONFIG_USB_DWC2_HOST=y +CONFIG_MMC=y +CONFIG_MMC_DW=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT3_FS=y -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -# CONFIG_DNOTIFY is not set -# CONFIG_INOTIFY_USER is not set -CONFIG_FHANDLE=y +CONFIG_EXT4_FS=y CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y CONFIG_NTFS_RW=y CONFIG_TMPFS=y -CONFIG_JFFS2_FS=y +CONFIG_CONFIGFS_FS=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y CONFIG_MAGIC_SYSRQ=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set -CONFIG_DEBUG_INFO=y CONFIG_ENABLE_DEFAULT_TRACERS=y CONFIG_DEBUG_USER=y CONFIG_XZ_DEC=y -CONFIG_I2C=y -CONFIG_I2C_DESIGNWARE_CORE=y -CONFIG_I2C_DESIGNWARE_PLATFORM=y -CONFIG_I2C_CHARDEV=y -CONFIG_MMC=y -CONFIG_MMC_DW=y -CONFIG_PM=y -CONFIG_SUSPEND=y -CONFIG_MMC_UNSAFE_RESUME=y -CONFIG_USB=y -CONFIG_USB_DWC2=y -CONFIG_USB_DWC2_HOST=y -CONFIG_USB_DWC2_PLATFORM=y diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig index 84704531310..f7ac0379850 100644 --- a/arch/arm/configs/sunxi_defconfig +++ b/arch/arm/configs/sunxi_defconfig @@ -76,6 +76,7 @@ CONFIG_WATCHDOG=y CONFIG_SUNXI_WATCHDOG=y CONFIG_MFD_AXP20X=y CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_GPIO=y CONFIG_USB=y CONFIG_USB_EHCI_HCD=y diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index fc44d3761f9..ce73ab63541 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -44,16 +44,6 @@ struct cpu_context_save { __u32 extra[2]; /* Xscale 'acc' register, etc */ }; -struct arm_restart_block { - union { - /* For user cache flushing */ - struct { - unsigned long start; - unsigned long end; - } cache; - }; -}; - /* * low level task data that entry.S needs immediate access to. * __switch_to() assumes cpu_context follows immediately after cpu_domain. @@ -79,7 +69,6 @@ struct thread_info { unsigned long thumbee_state; /* ThumbEE Handler Base register */ #endif struct restart_block restart_block; - struct arm_restart_block arm_restart_block; }; #define INIT_THREAD_INFO(tsk) \ diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 3aaa75cae90..705bb762067 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -412,6 +412,7 @@ #define __NR_seccomp (__NR_SYSCALL_BASE+383) #define __NR_getrandom (__NR_SYSCALL_BASE+384) #define __NR_memfd_create (__NR_SYSCALL_BASE+385) +#define __NR_bpf (__NR_SYSCALL_BASE+386) /* * The following SWIs are ARM private. diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 713e807621d..2d2d6087b9b 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -10,6 +10,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#include <linux/compiler.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/dma-mapping.h> @@ -39,10 +40,19 @@ * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c * (http://gcc.gnu.org/PR8896) and incorrect structure * initialisation in fs/jffs2/erase.c + * GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854 + * miscompiles find_get_entry(), and can result in EXT3 and EXT4 + * filesystem corruption (possibly other FS too). */ +#ifdef __GNUC__ #if (__GNUC__ == 3 && __GNUC_MINOR__ < 3) #error Your compiler is too buggy; it is known to miscompile kernels. -#error Known good compilers: 3.3 +#error Known good compilers: 3.3, 4.x +#endif +#if GCC_VERSION >= 40800 && GCC_VERSION < 40803 +#error Your compiler is too buggy; it is known to miscompile kernels +#error and result in filesystem corruption and oopses. +#endif #endif int main(void) diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 9f899d8fdcc..e51833f8cc3 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -395,6 +395,7 @@ CALL(sys_seccomp) CALL(sys_getrandom) /* 385 */ CALL(sys_memfd_create) + CALL(sys_bpf) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 0c8b10801d3..9f5d81881eb 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -533,8 +533,6 @@ static int bad_syscall(int n, struct pt_regs *regs) return regs->ARM_r0; } -static long do_cache_op_restart(struct restart_block *); - static inline int __do_cache_op(unsigned long start, unsigned long end) { @@ -543,24 +541,8 @@ __do_cache_op(unsigned long start, unsigned long end) do { unsigned long chunk = min(PAGE_SIZE, end - start); - if (signal_pending(current)) { - struct thread_info *ti = current_thread_info(); - - ti->restart_block = (struct restart_block) { - .fn = do_cache_op_restart, - }; - - ti->arm_restart_block = (struct arm_restart_block) { - { - .cache = { - .start = start, - .end = end, - }, - }, - }; - - return -ERESTART_RESTARTBLOCK; - } + if (fatal_signal_pending(current)) + return 0; ret = flush_cache_user_range(start, start + chunk); if (ret) @@ -573,15 +555,6 @@ __do_cache_op(unsigned long start, unsigned long end) return 0; } -static long do_cache_op_restart(struct restart_block *unused) -{ - struct arm_restart_block *restart_block; - - restart_block = ¤t_thread_info()->arm_restart_block; - return __do_cache_op(restart_block->cache.start, - restart_block->cache.end); -} - static inline int do_cache_op(unsigned long start, unsigned long end, int flags) { diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 57a403a5c22..8664ff17cbb 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -197,7 +197,8 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, pgd = pgdp + pgd_index(addr); do { next = kvm_pgd_addr_end(addr, end); - unmap_puds(kvm, pgd, addr, next); + if (!pgd_none(*pgd)) + unmap_puds(kvm, pgd, addr, next); } while (pgd++, addr = next, addr != end); } @@ -834,6 +835,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) return kvm_vcpu_dabt_iswrite(vcpu); } +static bool kvm_is_device_pfn(unsigned long pfn) +{ + return !pfn_valid(pfn); +} + static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long hva, unsigned long fault_status) @@ -904,7 +910,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (is_error_pfn(pfn)) return -EFAULT; - if (kvm_is_mmio_pfn(pfn)) + if (kvm_is_device_pfn(pfn)) mem_type = PAGE_S2_DEVICE; spin_lock(&kvm->mmu_lock); diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index 8c35ae4ff17..07a09570175 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c @@ -20,7 +20,7 @@ #include <linux/input.h> #include <linux/io.h> #include <linux/irqchip.h> -#include <linux/mailbox.h> +#include <linux/pl320-ipc.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_platform.h> diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index 1412daf4a71..4e79da7c5e3 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c @@ -50,8 +50,8 @@ static const char *pcie_axi_sels[] = { "axi", "ahb", }; static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_audio_div", }; static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", }; static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", }; -static const char *emi_sels[] = { "pll2_pfd2_396m", "pll3_usb_otg", "axi", "pll2_pfd0_352m", }; -static const char *emi_slow_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", }; +static const char *eim_sels[] = { "pll2_pfd2_396m", "pll3_usb_otg", "axi", "pll2_pfd0_352m", }; +static const char *eim_slow_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", }; static const char *vdo_axi_sels[] = { "axi", "ahb", }; static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", }; static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div", @@ -302,8 +302,8 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) clk[IMX6QDL_CLK_USDHC3_SEL] = imx_clk_fixup_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); clk[IMX6QDL_CLK_USDHC4_SEL] = imx_clk_fixup_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); clk[IMX6QDL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels)); - clk[IMX6QDL_CLK_EMI_SEL] = imx_clk_fixup_mux("emi_sel", base + 0x1c, 27, 2, emi_sels, ARRAY_SIZE(emi_sels), imx_cscmr1_fixup); - clk[IMX6QDL_CLK_EMI_SLOW_SEL] = imx_clk_fixup_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_slow_sels, ARRAY_SIZE(emi_slow_sels), imx_cscmr1_fixup); + clk[IMX6QDL_CLK_EIM_SEL] = imx_clk_fixup_mux("eim_sel", base + 0x1c, 27, 2, eim_sels, ARRAY_SIZE(eim_sels), imx_cscmr1_fixup); + clk[IMX6QDL_CLK_EIM_SLOW_SEL] = imx_clk_fixup_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels), imx_cscmr1_fixup); clk[IMX6QDL_CLK_VDO_AXI_SEL] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels)); clk[IMX6QDL_CLK_VPU_AXI_SEL] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels)); clk[IMX6QDL_CLK_CKO1_SEL] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels)); @@ -354,8 +354,8 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) clk[IMX6QDL_CLK_USDHC4_PODF] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3); clk[IMX6QDL_CLK_ENFC_PRED] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3); clk[IMX6QDL_CLK_ENFC_PODF] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6); - clk[IMX6QDL_CLK_EMI_PODF] = imx_clk_fixup_divider("emi_podf", "emi_sel", base + 0x1c, 20, 3, imx_cscmr1_fixup); - clk[IMX6QDL_CLK_EMI_SLOW_PODF] = imx_clk_fixup_divider("emi_slow_podf", "emi_slow_sel", base + 0x1c, 23, 3, imx_cscmr1_fixup); + clk[IMX6QDL_CLK_EIM_PODF] = imx_clk_fixup_divider("eim_podf", "eim_sel", base + 0x1c, 20, 3, imx_cscmr1_fixup); + clk[IMX6QDL_CLK_EIM_SLOW_PODF] = imx_clk_fixup_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3, imx_cscmr1_fixup); clk[IMX6QDL_CLK_VPU_AXI_PODF] = imx_clk_divider("vpu_axi_podf", "vpu_axi_sel", base + 0x24, 25, 3); clk[IMX6QDL_CLK_CKO1_PODF] = imx_clk_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3); clk[IMX6QDL_CLK_CKO2_PODF] = imx_clk_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3); @@ -456,7 +456,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) clk[IMX6QDL_CLK_USDHC2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); clk[IMX6QDL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6); clk[IMX6QDL_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8); - clk[IMX6QDL_CLK_EIM_SLOW] = imx_clk_gate2("eim_slow", "emi_slow_podf", base + 0x80, 10); + clk[IMX6QDL_CLK_EIM_SLOW] = imx_clk_gate2("eim_slow", "eim_slow_podf", base + 0x80, 10); clk[IMX6QDL_CLK_VDO_AXI] = imx_clk_gate2("vdo_axi", "vdo_axi_sel", base + 0x80, 12); clk[IMX6QDL_CLK_VPU_AXI] = imx_clk_gate2("vpu_axi", "vpu_axi_podf", base + 0x80, 14); clk[IMX6QDL_CLK_CKO1] = imx_clk_gate("cko1", "cko1_podf", base + 0x60, 7); diff --git a/arch/arm/mach-imx/clk-vf610.c b/arch/arm/mach-imx/clk-vf610.c index a1781847505..40963725459 100644 --- a/arch/arm/mach-imx/clk-vf610.c +++ b/arch/arm/mach-imx/clk-vf610.c @@ -58,8 +58,14 @@ #define PFD_PLL1_BASE (anatop_base + 0x2b0) #define PFD_PLL2_BASE (anatop_base + 0x100) #define PFD_PLL3_BASE (anatop_base + 0xf0) +#define PLL1_CTRL (anatop_base + 0x270) +#define PLL2_CTRL (anatop_base + 0x30) #define PLL3_CTRL (anatop_base + 0x10) +#define PLL4_CTRL (anatop_base + 0x70) +#define PLL5_CTRL (anatop_base + 0xe0) +#define PLL6_CTRL (anatop_base + 0xa0) #define PLL7_CTRL (anatop_base + 0x20) +#define ANA_MISC1 (anatop_base + 0x160) static void __iomem *anatop_base; static void __iomem *ccm_base; @@ -67,25 +73,34 @@ static void __iomem *ccm_base; /* sources for multiplexer clocks, this is used multiple times */ static const char *fast_sels[] = { "firc", "fxosc", }; static const char *slow_sels[] = { "sirc_32k", "sxosc", }; -static const char *pll1_sels[] = { "pll1_main", "pll1_pfd1", "pll1_pfd2", "pll1_pfd3", "pll1_pfd4", }; -static const char *pll2_sels[] = { "pll2_main", "pll2_pfd1", "pll2_pfd2", "pll2_pfd3", "pll2_pfd4", }; -static const char *sys_sels[] = { "fast_clk_sel", "slow_clk_sel", "pll2_pfd_sel", "pll2_main", "pll1_pfd_sel", "pll3_main", }; +static const char *pll1_sels[] = { "pll1_sys", "pll1_pfd1", "pll1_pfd2", "pll1_pfd3", "pll1_pfd4", }; +static const char *pll2_sels[] = { "pll2_bus", "pll2_pfd1", "pll2_pfd2", "pll2_pfd3", "pll2_pfd4", }; +static const char *pll_bypass_src_sels[] = { "fast_clk_sel", "lvds1_in", }; +static const char *pll1_bypass_sels[] = { "pll1", "pll1_bypass_src", }; +static const char *pll2_bypass_sels[] = { "pll2", "pll2_bypass_src", }; +static const char *pll3_bypass_sels[] = { "pll3", "pll3_bypass_src", }; +static const char *pll4_bypass_sels[] = { "pll4", "pll4_bypass_src", }; +static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", }; +static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", }; +static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", }; +static const char *sys_sels[] = { "fast_clk_sel", "slow_clk_sel", "pll2_pfd_sel", "pll2_bus", "pll1_pfd_sel", "pll3_usb_otg", }; static const char *ddr_sels[] = { "pll2_pfd2", "sys_sel", }; static const char *rmii_sels[] = { "enet_ext", "audio_ext", "enet_50m", "enet_25m", }; static const char *enet_ts_sels[] = { "enet_ext", "fxosc", "audio_ext", "usb", "enet_ts", "enet_25m", "enet_50m", }; -static const char *esai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_main_div", }; -static const char *sai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_main_div", }; +static const char *esai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_audio_div", }; +static const char *sai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_audio_div", }; static const char *nfc_sels[] = { "platform_bus", "pll1_pfd1", "pll3_pfd1", "pll3_pfd3", }; -static const char *qspi_sels[] = { "pll3_main", "pll3_pfd4", "pll2_pfd4", "pll1_pfd4", }; -static const char *esdhc_sels[] = { "pll3_main", "pll3_pfd3", "pll1_pfd3", "platform_bus", }; -static const char *dcu_sels[] = { "pll1_pfd2", "pll3_main", }; +static const char *qspi_sels[] = { "pll3_usb_otg", "pll3_pfd4", "pll2_pfd4", "pll1_pfd4", }; +static const char *esdhc_sels[] = { "pll3_usb_otg", "pll3_pfd3", "pll1_pfd3", "platform_bus", }; +static const char *dcu_sels[] = { "pll1_pfd2", "pll3_usb_otg", }; static const char *gpu_sels[] = { "pll2_pfd2", "pll3_pfd2", }; -static const char *vadc_sels[] = { "pll6_main_div", "pll3_main_div", "pll3_main", }; +static const char *vadc_sels[] = { "pll6_video_div", "pll3_usb_otg_div", "pll3_usb_otg", }; /* FTM counter clock source, not module clock */ static const char *ftm_ext_sels[] = {"sirc_128k", "sxosc", "fxosc_half", "audio_ext", }; static const char *ftm_fix_sels[] = { "sxosc", "ipg_bus", }; -static struct clk_div_table pll4_main_div_table[] = { + +static struct clk_div_table pll4_audio_div_table[] = { { .val = 0, .div = 1 }, { .val = 1, .div = 2 }, { .val = 2, .div = 6 }, @@ -120,6 +135,9 @@ static void __init vf610_clocks_init(struct device_node *ccm_node) clk[VF610_CLK_AUDIO_EXT] = imx_obtain_fixed_clock("audio_ext", 0); clk[VF610_CLK_ENET_EXT] = imx_obtain_fixed_clock("enet_ext", 0); + /* Clock source from external clock via LVDs PAD */ + clk[VF610_CLK_ANACLK1] = imx_obtain_fixed_clock("anaclk1", 0); + clk[VF610_CLK_FXOSC_HALF] = imx_clk_fixed_factor("fxosc_half", "fxosc", 1, 2); np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop"); @@ -133,31 +151,63 @@ static void __init vf610_clocks_init(struct device_node *ccm_node) clk[VF610_CLK_SLOW_CLK_SEL] = imx_clk_mux("slow_clk_sel", CCM_CCSR, 4, 1, slow_sels, ARRAY_SIZE(slow_sels)); clk[VF610_CLK_FASK_CLK_SEL] = imx_clk_mux("fast_clk_sel", CCM_CCSR, 5, 1, fast_sels, ARRAY_SIZE(fast_sels)); - clk[VF610_CLK_PLL1_MAIN] = imx_clk_fixed_factor("pll1_main", "fast_clk_sel", 22, 1); - clk[VF610_CLK_PLL1_PFD1] = imx_clk_pfd("pll1_pfd1", "pll1_main", PFD_PLL1_BASE, 0); - clk[VF610_CLK_PLL1_PFD2] = imx_clk_pfd("pll1_pfd2", "pll1_main", PFD_PLL1_BASE, 1); - clk[VF610_CLK_PLL1_PFD3] = imx_clk_pfd("pll1_pfd3", "pll1_main", PFD_PLL1_BASE, 2); - clk[VF610_CLK_PLL1_PFD4] = imx_clk_pfd("pll1_pfd4", "pll1_main", PFD_PLL1_BASE, 3); - - clk[VF610_CLK_PLL2_MAIN] = imx_clk_fixed_factor("pll2_main", "fast_clk_sel", 22, 1); - clk[VF610_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1", "pll2_main", PFD_PLL2_BASE, 0); - clk[VF610_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2", "pll2_main", PFD_PLL2_BASE, 1); - clk[VF610_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3", "pll2_main", PFD_PLL2_BASE, 2); - clk[VF610_CLK_PLL2_PFD4] = imx_clk_pfd("pll2_pfd4", "pll2_main", PFD_PLL2_BASE, 3); - - clk[VF610_CLK_PLL3_MAIN] = imx_clk_fixed_factor("pll3_main", "fast_clk_sel", 20, 1); - clk[VF610_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1", "pll3_main", PFD_PLL3_BASE, 0); - clk[VF610_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2", "pll3_main", PFD_PLL3_BASE, 1); - clk[VF610_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3", "pll3_main", PFD_PLL3_BASE, 2); - clk[VF610_CLK_PLL3_PFD4] = imx_clk_pfd("pll3_pfd4", "pll3_main", PFD_PLL3_BASE, 3); - - clk[VF610_CLK_PLL4_MAIN] = imx_clk_fixed_factor("pll4_main", "fast_clk_sel", 25, 1); - /* Enet pll: fixed 50Mhz */ - clk[VF610_CLK_PLL5_MAIN] = imx_clk_fixed_factor("pll5_main", "fast_clk_sel", 125, 6); - /* pll6: default 960Mhz */ - clk[VF610_CLK_PLL6_MAIN] = imx_clk_fixed_factor("pll6_main", "fast_clk_sel", 40, 1); - /* pll7: USB1 PLL at 480MHz */ - clk[VF610_CLK_PLL7_MAIN] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_main", "fast_clk_sel", PLL7_CTRL, 0x2); + clk[VF610_CLK_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", PLL1_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clk[VF610_CLK_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", PLL2_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clk[VF610_CLK_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", PLL3_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clk[VF610_CLK_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", PLL4_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clk[VF610_CLK_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", PLL5_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clk[VF610_CLK_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", PLL6_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + clk[VF610_CLK_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", PLL7_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + + clk[VF610_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll1", "pll1_bypass_src", PLL1_CTRL, 0x1); + clk[VF610_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", PLL2_CTRL, 0x1); + clk[VF610_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", PLL3_CTRL, 0x1); + clk[VF610_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", PLL4_CTRL, 0x7f); + clk[VF610_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll5", "pll5_bypass_src", PLL5_CTRL, 0x3); + clk[VF610_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_AV, "pll6", "pll6_bypass_src", PLL6_CTRL, 0x7f); + clk[VF610_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", PLL7_CTRL, 0x1); + + clk[VF610_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", PLL1_CTRL, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); + clk[VF610_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", PLL2_CTRL, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); + clk[VF610_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", PLL3_CTRL, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT); + clk[VF610_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", PLL4_CTRL, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT); + clk[VF610_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", PLL5_CTRL, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT); + clk[VF610_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", PLL6_CTRL, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT); + clk[VF610_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", PLL7_CTRL, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT); + + /* Do not bypass PLLs initially */ + clk_set_parent(clk[VF610_PLL1_BYPASS], clk[VF610_CLK_PLL1]); + clk_set_parent(clk[VF610_PLL2_BYPASS], clk[VF610_CLK_PLL2]); + clk_set_parent(clk[VF610_PLL3_BYPASS], clk[VF610_CLK_PLL3]); + clk_set_parent(clk[VF610_PLL4_BYPASS], clk[VF610_CLK_PLL4]); + clk_set_parent(clk[VF610_PLL5_BYPASS], clk[VF610_CLK_PLL5]); + clk_set_parent(clk[VF610_PLL6_BYPASS], clk[VF610_CLK_PLL6]); + clk_set_parent(clk[VF610_PLL7_BYPASS], clk[VF610_CLK_PLL7]); + + clk[VF610_CLK_PLL1_SYS] = imx_clk_gate("pll1_sys", "pll1_bypass", PLL1_CTRL, 13); + clk[VF610_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", PLL2_CTRL, 13); + clk[VF610_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", PLL3_CTRL, 13); + clk[VF610_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", PLL4_CTRL, 13); + clk[VF610_CLK_PLL5_ENET] = imx_clk_gate("pll5_enet", "pll5_bypass", PLL5_CTRL, 13); + clk[VF610_CLK_PLL6_VIDEO] = imx_clk_gate("pll6_video", "pll6_bypass", PLL6_CTRL, 13); + clk[VF610_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", PLL7_CTRL, 13); + + clk[VF610_CLK_LVDS1_IN] = imx_clk_gate_exclusive("lvds1_in", "anaclk1", ANA_MISC1, 12, BIT(10)); + + clk[VF610_CLK_PLL1_PFD1] = imx_clk_pfd("pll1_pfd1", "pll1_sys", PFD_PLL1_BASE, 0); + clk[VF610_CLK_PLL1_PFD2] = imx_clk_pfd("pll1_pfd2", "pll1_sys", PFD_PLL1_BASE, 1); + clk[VF610_CLK_PLL1_PFD3] = imx_clk_pfd("pll1_pfd3", "pll1_sys", PFD_PLL1_BASE, 2); + clk[VF610_CLK_PLL1_PFD4] = imx_clk_pfd("pll1_pfd4", "pll1_sys", PFD_PLL1_BASE, 3); + + clk[VF610_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1", "pll2_bus", PFD_PLL2_BASE, 0); + clk[VF610_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2", "pll2_bus", PFD_PLL2_BASE, 1); + clk[VF610_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3", "pll2_bus", PFD_PLL2_BASE, 2); + clk[VF610_CLK_PLL2_PFD4] = imx_clk_pfd("pll2_pfd4", "pll2_bus", PFD_PLL2_BASE, 3); + + clk[VF610_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1", "pll3_usb_otg", PFD_PLL3_BASE, 0); + clk[VF610_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2", "pll3_usb_otg", PFD_PLL3_BASE, 1); + clk[VF610_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3", "pll3_usb_otg", PFD_PLL3_BASE, 2); + clk[VF610_CLK_PLL3_PFD4] = imx_clk_pfd("pll3_pfd4", "pll3_usb_otg", PFD_PLL3_BASE, 3); clk[VF610_CLK_PLL1_PFD_SEL] = imx_clk_mux("pll1_pfd_sel", CCM_CCSR, 16, 3, pll1_sels, 5); clk[VF610_CLK_PLL2_PFD_SEL] = imx_clk_mux("pll2_pfd_sel", CCM_CCSR, 19, 3, pll2_sels, 5); @@ -167,12 +217,12 @@ static void __init vf610_clocks_init(struct device_node *ccm_node) clk[VF610_CLK_PLATFORM_BUS] = imx_clk_divider("platform_bus", "sys_bus", CCM_CACRR, 3, 3); clk[VF610_CLK_IPG_BUS] = imx_clk_divider("ipg_bus", "platform_bus", CCM_CACRR, 11, 2); - clk[VF610_CLK_PLL3_MAIN_DIV] = imx_clk_divider("pll3_main_div", "pll3_main", CCM_CACRR, 20, 1); - clk[VF610_CLK_PLL4_MAIN_DIV] = clk_register_divider_table(NULL, "pll4_main_div", "pll4_main", 0, CCM_CACRR, 6, 3, 0, pll4_main_div_table, &imx_ccm_lock); - clk[VF610_CLK_PLL6_MAIN_DIV] = imx_clk_divider("pll6_main_div", "pll6_main", CCM_CACRR, 21, 1); + clk[VF610_CLK_PLL3_MAIN_DIV] = imx_clk_divider("pll3_usb_otg_div", "pll3_usb_otg", CCM_CACRR, 20, 1); + clk[VF610_CLK_PLL4_MAIN_DIV] = clk_register_divider_table(NULL, "pll4_audio_div", "pll4_audio", 0, CCM_CACRR, 6, 3, 0, pll4_audio_div_table, &imx_ccm_lock); + clk[VF610_CLK_PLL6_MAIN_DIV] = imx_clk_divider("pll6_video_div", "pll6_video", CCM_CACRR, 21, 1); - clk[VF610_CLK_USBPHY0] = imx_clk_gate("usbphy0", "pll3_main", PLL3_CTRL, 6); - clk[VF610_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll7_main", PLL7_CTRL, 6); + clk[VF610_CLK_USBPHY0] = imx_clk_gate("usbphy0", "pll3_usb_otg", PLL3_CTRL, 6); + clk[VF610_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll7_usb_host", PLL7_CTRL, 6); clk[VF610_CLK_USBC0] = imx_clk_gate2("usbc0", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(4)); clk[VF610_CLK_USBC1] = imx_clk_gate2("usbc1", "ipg_bus", CCM_CCGR7, CCM_CCGRx_CGn(4)); @@ -191,8 +241,8 @@ static void __init vf610_clocks_init(struct device_node *ccm_node) clk[VF610_CLK_QSPI1_X1_DIV] = imx_clk_divider("qspi1_x1", "qspi1_x2", CCM_CSCDR3, 11, 1); clk[VF610_CLK_QSPI1] = imx_clk_gate2("qspi1", "qspi1_x1", CCM_CCGR8, CCM_CCGRx_CGn(4)); - clk[VF610_CLK_ENET_50M] = imx_clk_fixed_factor("enet_50m", "pll5_main", 1, 10); - clk[VF610_CLK_ENET_25M] = imx_clk_fixed_factor("enet_25m", "pll5_main", 1, 20); + clk[VF610_CLK_ENET_50M] = imx_clk_fixed_factor("enet_50m", "pll5_enet", 1, 10); + clk[VF610_CLK_ENET_25M] = imx_clk_fixed_factor("enet_25m", "pll5_enet", 1, 20); clk[VF610_CLK_ENET_SEL] = imx_clk_mux("enet_sel", CCM_CSCMR2, 4, 2, rmii_sels, 4); clk[VF610_CLK_ENET_TS_SEL] = imx_clk_mux("enet_ts_sel", CCM_CSCMR2, 0, 3, enet_ts_sels, 7); clk[VF610_CLK_ENET] = imx_clk_gate("enet", "enet_sel", CCM_CSCDR1, 24); diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h index 559c69a4773..7d11979da03 100644 --- a/arch/arm/mach-ixp4xx/include/mach/io.h +++ b/arch/arm/mach-ixp4xx/include/mach/io.h @@ -76,7 +76,7 @@ static inline void __indirect_writeb(u8 value, volatile void __iomem *p) u32 n, byte_enables, data; if (!is_pci_memory(addr)) { - __raw_writeb(value, addr); + __raw_writeb(value, p); return; } @@ -141,7 +141,7 @@ static inline unsigned char __indirect_readb(const volatile void __iomem *p) u32 n, byte_enables, data; if (!is_pci_memory(addr)) - return __raw_readb(addr); + return __raw_readb(p); n = addr % 4; byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL; diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c index 6478626e3ff..d0d39f150fa 100644 --- a/arch/arm/mach-mvebu/board-v7.c +++ b/arch/arm/mach-mvebu/board-v7.c @@ -188,7 +188,7 @@ static void __init thermal_quirk(void) static void __init mvebu_dt_init(void) { - if (of_machine_is_compatible("plathome,openblocks-ax3-4")) + if (of_machine_is_compatible("marvell,armadaxp")) i2c_quirk(); if (of_machine_is_compatible("marvell,a375-db")) { external_abort_quirk(); diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c index 2bdc3233abe..044b51185fc 100644 --- a/arch/arm/mach-mvebu/coherency.c +++ b/arch/arm/mach-mvebu/coherency.c @@ -400,6 +400,8 @@ int __init coherency_init(void) type == COHERENCY_FABRIC_TYPE_ARMADA_380) armada_375_380_coherency_init(np); + of_node_put(np); + return 0; } diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index d22c30d3ccf..8c58b71c272 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -917,6 +917,10 @@ static int __init omap_device_late_idle(struct device *dev, void *data) static int __init omap_device_late_init(void) { bus_for_each_dev(&platform_bus_type, NULL, NULL, omap_device_late_idle); + + WARN(!of_have_populated_dt(), + "legacy booting deprecated, please update to boot with .dts\n"); + return 0; } omap_late_initcall_sync(omap_device_late_init); diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index c95346c9482..cec9d6c6442 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -252,9 +252,6 @@ static void __init nokia_n900_legacy_init(void) platform_device_register(&omap3_rom_rng_device); } - - /* Only on some development boards */ - gpio_request_one(164, GPIOF_OUT_INIT_LOW, "smc91x reset"); } static void __init omap3_tao3530_legacy_init(void) diff --git a/arch/arm/mach-pxa/include/mach/addr-map.h b/arch/arm/mach-pxa/include/mach/addr-map.h index bbf9df37ad4..d28fe291233 100644 --- a/arch/arm/mach-pxa/include/mach/addr-map.h +++ b/arch/arm/mach-pxa/include/mach/addr-map.h @@ -39,6 +39,11 @@ #define DMEMC_SIZE 0x00100000 /* + * Reserved space for low level debug virtual addresses within + * 0xf6200000..0xf6201000 + */ + +/* * Internal Memory Controller (PXA27x and later) */ #define IMEMC_PHYS 0x58000000 diff --git a/arch/arm/mach-shmobile/clock-r8a7740.c b/arch/arm/mach-shmobile/clock-r8a7740.c index 0794f0426e7..19df9cb3049 100644 --- a/arch/arm/mach-shmobile/clock-r8a7740.c +++ b/arch/arm/mach-shmobile/clock-r8a7740.c @@ -455,7 +455,7 @@ enum { MSTP128, MSTP127, MSTP125, MSTP116, MSTP111, MSTP100, MSTP117, - MSTP230, + MSTP230, MSTP229, MSTP222, MSTP218, MSTP217, MSTP216, MSTP214, MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, @@ -474,11 +474,12 @@ static struct clk mstp_clks[MSTP_NR] = { [MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_S], SMSTPCR1, 27, 0), /* CEU20 */ [MSTP125] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ [MSTP117] = SH_CLK_MSTP32(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */ - [MSTP116] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */ + [MSTP116] = SH_CLK_MSTP32(&div4_clks[DIV4_HPP], SMSTPCR1, 16, 0), /* IIC0 */ [MSTP111] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 11, 0), /* TMU1 */ [MSTP100] = SH_CLK_MSTP32(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */ [MSTP230] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 30, 0), /* SCIFA6 */ + [MSTP229] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 29, 0), /* INTCA */ [MSTP222] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 22, 0), /* SCIFA7 */ [MSTP218] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */ [MSTP217] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */ @@ -575,6 +576,10 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP222]), CLKDEV_DEV_ID("e6cd0000.serial", &mstp_clks[MSTP222]), + CLKDEV_DEV_ID("renesas_intc_irqpin.0", &mstp_clks[MSTP229]), + CLKDEV_DEV_ID("renesas_intc_irqpin.1", &mstp_clks[MSTP229]), + CLKDEV_DEV_ID("renesas_intc_irqpin.2", &mstp_clks[MSTP229]), + CLKDEV_DEV_ID("renesas_intc_irqpin.3", &mstp_clks[MSTP229]), CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP230]), CLKDEV_DEV_ID("e6cc0000.serial", &mstp_clks[MSTP230]), diff --git a/arch/arm/mach-shmobile/clock-r8a7790.c b/arch/arm/mach-shmobile/clock-r8a7790.c index 126ddafad52..f6226520059 100644 --- a/arch/arm/mach-shmobile/clock-r8a7790.c +++ b/arch/arm/mach-shmobile/clock-r8a7790.c @@ -68,7 +68,7 @@ #define SDCKCR 0xE6150074 #define SD2CKCR 0xE6150078 -#define SD3CKCR 0xE615007C +#define SD3CKCR 0xE615026C #define MMC0CKCR 0xE6150240 #define MMC1CKCR 0xE6150244 #define SSPCKCR 0xE6150248 diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c index b7bd8e50966..328657d011d 100644 --- a/arch/arm/mach-shmobile/setup-sh73a0.c +++ b/arch/arm/mach-shmobile/setup-sh73a0.c @@ -26,6 +26,7 @@ #include <linux/of_platform.h> #include <linux/delay.h> #include <linux/input.h> +#include <linux/i2c/i2c-sh_mobile.h> #include <linux/io.h> #include <linux/serial_sci.h> #include <linux/sh_dma.h> @@ -192,11 +193,18 @@ static struct resource i2c4_resources[] = { }, }; +static struct i2c_sh_mobile_platform_data i2c_platform_data = { + .clks_per_count = 2, +}; + static struct platform_device i2c0_device = { .name = "i2c-sh_mobile", .id = 0, .resource = i2c0_resources, .num_resources = ARRAY_SIZE(i2c0_resources), + .dev = { + .platform_data = &i2c_platform_data, + }, }; static struct platform_device i2c1_device = { @@ -204,6 +212,9 @@ static struct platform_device i2c1_device = { .id = 1, .resource = i2c1_resources, .num_resources = ARRAY_SIZE(i2c1_resources), + .dev = { + .platform_data = &i2c_platform_data, + }, }; static struct platform_device i2c2_device = { @@ -211,6 +222,9 @@ static struct platform_device i2c2_device = { .id = 2, .resource = i2c2_resources, .num_resources = ARRAY_SIZE(i2c2_resources), + .dev = { + .platform_data = &i2c_platform_data, + }, }; static struct platform_device i2c3_device = { @@ -218,6 +232,9 @@ static struct platform_device i2c3_device = { .id = 3, .resource = i2c3_resources, .num_resources = ARRAY_SIZE(i2c3_resources), + .dev = { + .platform_data = &i2c_platform_data, + }, }; static struct platform_device i2c4_device = { @@ -225,6 +242,9 @@ static struct platform_device i2c4_device = { .id = 4, .resource = i2c4_resources, .num_resources = ARRAY_SIZE(i2c4_resources), + .dev = { + .platform_data = &i2c_platform_data, + }, }; static const struct sh_dmae_slave_config sh73a0_dmae_slaves[] = { diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h index 572b8f719ff..60c443dadb5 100644 --- a/arch/arm/mach-socfpga/core.h +++ b/arch/arm/mach-socfpga/core.h @@ -40,7 +40,7 @@ extern void __iomem *rst_manager_base_addr; extern struct smp_operations socfpga_smp_ops; extern char secondary_trampoline, secondary_trampoline_end; -extern unsigned long cpu1start_addr; +extern unsigned long socfpga_cpu1start_addr; #define SOCFPGA_SCU_VIRT_BASE 0xfffec000 diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S index 95c115d8b5e..f65ea0af4af 100644 --- a/arch/arm/mach-socfpga/headsmp.S +++ b/arch/arm/mach-socfpga/headsmp.S @@ -9,21 +9,26 @@ */ #include <linux/linkage.h> #include <linux/init.h> +#include <asm/memory.h> .arch armv7-a ENTRY(secondary_trampoline) - movw r2, #:lower16:cpu1start_addr - movt r2, #:upper16:cpu1start_addr - - /* The socfpga VT cannot handle a 0xC0000000 page offset when loading - the cpu1start_addr, we bit clear it. Tested on HW and VT. */ - bic r2, r2, #0x40000000 - - ldr r0, [r2] - ldr r1, [r0] - bx r1 + /* CPU1 will always fetch from 0x0 when it is brought out of reset. + * Thus, we can just subtract the PAGE_OFFSET to get the physical + * address of &cpu1start_addr. This would not work for platforms + * where the physical memory does not start at 0x0. + */ + adr r0, 1f + ldmia r0, {r1, r2} + sub r2, r2, #PAGE_OFFSET + ldr r3, [r2] + ldr r4, [r3] + bx r4 + .align +1: .long . + .long socfpga_cpu1start_addr ENTRY(secondary_trampoline_end) ENTRY(socfpga_secondary_startup) diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c index 5356a72bc8c..16ca97b039f 100644 --- a/arch/arm/mach-socfpga/platsmp.c +++ b/arch/arm/mach-socfpga/platsmp.c @@ -33,11 +33,11 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle) { int trampoline_size = &secondary_trampoline_end - &secondary_trampoline; - if (cpu1start_addr) { + if (socfpga_cpu1start_addr) { memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size); __raw_writel(virt_to_phys(socfpga_secondary_startup), - (sys_manager_base_addr + (cpu1start_addr & 0x000000ff))); + (sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff))); flush_cache_all(); smp_wmb(); diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c index adbf38314ca..383d61e138a 100644 --- a/arch/arm/mach-socfpga/socfpga.c +++ b/arch/arm/mach-socfpga/socfpga.c @@ -29,7 +29,7 @@ void __iomem *socfpga_scu_base_addr = ((void __iomem *)(SOCFPGA_SCU_VIRT_BASE)); void __iomem *sys_manager_base_addr; void __iomem *rst_manager_base_addr; -unsigned long cpu1start_addr; +unsigned long socfpga_cpu1start_addr; static struct map_desc scu_io_desc __initdata = { .virtual = SOCFPGA_SCU_VIRT_BASE, @@ -70,7 +70,7 @@ void __init socfpga_sysmgr_init(void) np = of_find_compatible_node(NULL, NULL, "altr,sys-mgr"); if (of_property_read_u32(np, "cpu1-start-addr", - (u32 *) &cpu1start_addr)) + (u32 *) &socfpga_cpu1start_addr)) pr_err("SMP: Need cpu1-start-addr in device tree.\n"); sys_manager_base_addr = of_iomap(np, 0); diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c index da7be13aecc..ab95f5391a2 100644 --- a/arch/arm/mach-tegra/irq.c +++ b/arch/arm/mach-tegra/irq.c @@ -99,42 +99,42 @@ static inline void tegra_irq_write_mask(unsigned int irq, unsigned long reg) static void tegra_mask(struct irq_data *d) { - if (d->irq < FIRST_LEGACY_IRQ) + if (d->hwirq < FIRST_LEGACY_IRQ) return; - tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_CLR); + tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_CLR); } static void tegra_unmask(struct irq_data *d) { - if (d->irq < FIRST_LEGACY_IRQ) + if (d->hwirq < FIRST_LEGACY_IRQ) return; - tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_SET); + tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_SET); } static void tegra_ack(struct irq_data *d) { - if (d->irq < FIRST_LEGACY_IRQ) + if (d->hwirq < FIRST_LEGACY_IRQ) return; - tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR); + tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR); } static void tegra_eoi(struct irq_data *d) { - if (d->irq < FIRST_LEGACY_IRQ) + if (d->hwirq < FIRST_LEGACY_IRQ) return; - tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR); + tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR); } static int tegra_retrigger(struct irq_data *d) { - if (d->irq < FIRST_LEGACY_IRQ) + if (d->hwirq < FIRST_LEGACY_IRQ) return 0; - tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_SET); + tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_SET); return 1; } @@ -142,7 +142,7 @@ static int tegra_retrigger(struct irq_data *d) #ifdef CONFIG_PM_SLEEP static int tegra_set_wake(struct irq_data *d, unsigned int enable) { - u32 irq = d->irq; + u32 irq = d->hwirq; u32 index, mask; if (irq < FIRST_LEGACY_IRQ || diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index ae69809a9e4..7eb94e6fc37 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -798,6 +798,7 @@ config NEED_KUSER_HELPERS config KUSER_HELPERS bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS + depends on MMU default y help Warning: disabling this option may break user programs. diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 55f9d6e0cc8..5e65ca8dea6 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -956,7 +956,7 @@ static u32 cache_id_part_number_from_dt; * @associativity: variable to return the calculated associativity in * @max_way_size: the maximum size in bytes for the cache ways */ -static void __init l2x0_cache_size_of_parse(const struct device_node *np, +static int __init l2x0_cache_size_of_parse(const struct device_node *np, u32 *aux_val, u32 *aux_mask, u32 *associativity, u32 max_way_size) @@ -974,7 +974,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np, of_property_read_u32(np, "cache-line-size", &line_size); if (!cache_size || !sets) - return; + return -ENODEV; /* All these l2 caches have the same line = block size actually */ if (!line_size) { @@ -1009,7 +1009,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np, if (way_size > max_way_size) { pr_err("L2C OF: set size %dKB is too large\n", way_size); - return; + return -EINVAL; } pr_info("L2C OF: override cache size: %d bytes (%dKB)\n", @@ -1027,7 +1027,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np, if (way_size_bits < 1 || way_size_bits > 6) { pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n", way_size); - return; + return -EINVAL; } mask |= L2C_AUX_CTRL_WAY_SIZE_MASK; @@ -1036,6 +1036,8 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np, *aux_val &= ~mask; *aux_val |= val; *aux_mask &= ~mask; + + return 0; } static void __init l2x0_of_parse(const struct device_node *np, @@ -1046,6 +1048,7 @@ static void __init l2x0_of_parse(const struct device_node *np, u32 dirty = 0; u32 val = 0, mask = 0; u32 assoc; + int ret; of_property_read_u32(np, "arm,tag-latency", &tag); if (tag) { @@ -1068,7 +1071,10 @@ static void __init l2x0_of_parse(const struct device_node *np, val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; } - l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); + ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); + if (ret) + return; + if (assoc > 8) { pr_err("l2x0 of: cache setting yield too high associativity\n"); pr_err("l2x0 of: %d calculated, max 8\n", assoc); @@ -1125,6 +1131,7 @@ static void __init l2c310_of_parse(const struct device_node *np, u32 tag[3] = { 0, 0, 0 }; u32 filter[2] = { 0, 0 }; u32 assoc; + int ret; of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); if (tag[0] && tag[1] && tag[2]) @@ -1152,7 +1159,10 @@ static void __init l2c310_of_parse(const struct device_node *np, l2x0_base + L310_ADDR_FILTER_START); } - l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); + ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); + if (ret) + return; + switch (assoc) { case 16: *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; @@ -1164,8 +1174,8 @@ static void __init l2c310_of_parse(const struct device_node *np, *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; break; default: - pr_err("PL310 OF: cache setting yield illegal associativity\n"); - pr_err("PL310 OF: %d calculated, only 8 and 16 legal\n", assoc); + pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", + assoc); break; } } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c245d903927..e8907117861 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1198,7 +1198,6 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, { return dma_common_pages_remap(pages, size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller); - return NULL; } /* diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 45aeaaca905..e17ed00828d 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -127,8 +127,11 @@ void *kmap_atomic_pfn(unsigned long pfn) { unsigned long vaddr; int idx, type; + struct page *page = pfn_to_page(pfn); pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 92bba32d923..9481f85c56e 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -559,10 +559,10 @@ void __init mem_init(void) #ifdef CONFIG_MODULES " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" #endif - " .text : 0x%p" " - 0x%p" " (%4d kB)\n" - " .init : 0x%p" " - 0x%p" " (%4d kB)\n" - " .data : 0x%p" " - 0x%p" " (%4d kB)\n" - " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", + " .text : 0x%p" " - 0x%p" " (%4td kB)\n" + " .init : 0x%p" " - 0x%p" " (%4td kB)\n" + " .data : 0x%p" " - 0x%p" " (%4td kB)\n" + " .bss : 0x%p" " - 0x%p" " (%4td kB)\n", MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + (PAGE_SIZE)), diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index b3a947863ac..22ac2a6fbfe 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -270,7 +270,6 @@ __v7_pj4b_setup: /* Auxiliary Debug Modes Control 1 Register */ #define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */ #define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */ -#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */ #define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */ /* Auxiliary Debug Modes Control 2 Register */ @@ -293,7 +292,6 @@ __v7_pj4b_setup: /* Auxiliary Debug Modes Control 1 Register */ mrc p15, 1, r0, c15, c1, 1 orr r0, r0, #PJ4B_CLEAN_LINE - orr r0, r0, #PJ4B_BCK_OFF_STREX orr r0, r0, #PJ4B_INTER_PARITY bic r0, r0, #PJ4B_STATIC_BP mcr p15, 1, r0, c15, c1, 1 diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 23259f104c6..afa2b3c4df4 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -535,7 +535,7 @@ ENTRY(cpu_xscale_do_suspend) mrc p15, 0, r5, c15, c1, 0 @ CP access reg mrc p15, 0, r6, c13, c0, 0 @ PID mrc p15, 0, r7, c3, c0, 0 @ domain ID - mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg + mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg mrc p15, 0, r9, c1, c0, 0 @ control reg bic r4, r4, #2 @ clear frequency change bit stmia r0, {r4 - r9} @ store cp regs @@ -552,7 +552,7 @@ ENTRY(cpu_xscale_do_resume) mcr p15, 0, r6, c13, c0, 0 @ PID mcr p15, 0, r7, c3, c0, 0 @ domain ID mcr p15, 0, r1, c2, c0, 0 @ translation table base addr - mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg + mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg mov r0, r9 @ control register b cpu_resume_mmu ENDPROC(cpu_xscale_do_resume) diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index b61a3bcc2fa..e048f6198d6 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c @@ -497,6 +497,34 @@ static void orion_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) #define orion_gpio_dbg_show NULL #endif +static void orion_gpio_unmask_irq(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); + u32 reg_val; + u32 mask = d->mask; + + irq_gc_lock(gc); + reg_val = irq_reg_readl(gc->reg_base + ct->regs.mask); + reg_val |= mask; + irq_reg_writel(reg_val, gc->reg_base + ct->regs.mask); + irq_gc_unlock(gc); +} + +static void orion_gpio_mask_irq(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); + u32 mask = d->mask; + u32 reg_val; + + irq_gc_lock(gc); + reg_val = irq_reg_readl(gc->reg_base + ct->regs.mask); + reg_val &= ~mask; + irq_reg_writel(reg_val, gc->reg_base + ct->regs.mask); + irq_gc_unlock(gc); +} + void __init orion_gpio_init(struct device_node *np, int gpio_base, int ngpio, void __iomem *base, int mask_offset, @@ -565,8 +593,8 @@ void __init orion_gpio_init(struct device_node *np, ct = gc->chip_types; ct->regs.mask = ochip->mask_offset + GPIO_LEVEL_MASK_OFF; ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; - ct->chip.irq_mask = irq_gc_mask_clr_bit; - ct->chip.irq_unmask = irq_gc_mask_set_bit; + ct->chip.irq_mask = orion_gpio_mask_irq; + ct->chip.irq_unmask = orion_gpio_unmask_irq; ct->chip.irq_set_type = gpio_irq_set_type; ct->chip.name = ochip->chip.label; @@ -575,8 +603,8 @@ void __init orion_gpio_init(struct device_node *np, ct->regs.ack = GPIO_EDGE_CAUSE_OFF; ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; ct->chip.irq_ack = irq_gc_ack_clr_bit; - ct->chip.irq_mask = irq_gc_mask_clr_bit; - ct->chip.irq_unmask = irq_gc_mask_set_bit; + ct->chip.irq_mask = orion_gpio_mask_irq; + ct->chip.irq_unmask = orion_gpio_unmask_irq; ct->chip.irq_set_type = gpio_irq_set_type; ct->handler = handle_edge_irq; ct->chip.name = ochip->chip.label; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index ac9afde76de..9532f8d5857 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1,5 +1,6 @@ config ARM64 def_bool y + select ARCH_BINFMT_ELF_RANDOMIZE_PIE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_SG_CHAIN select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST @@ -232,7 +233,7 @@ config ARM64_VA_BITS_42 config ARM64_VA_BITS_48 bool "48-bit" - depends on BROKEN + depends on !ARM_SMMU endchoice diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi index 295c72d52a1..f1ad9c2ab2e 100644 --- a/arch/arm64/boot/dts/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm-storm.dtsi @@ -599,7 +599,7 @@ compatible = "apm,xgene-enet"; status = "disabled"; reg = <0x0 0x17020000 0x0 0xd100>, - <0x0 0X17030000 0x0 0X400>, + <0x0 0X17030000 0x0 0Xc300>, <0x0 0X10000000 0x0 0X200>; reg-names = "enet_csr", "ring_csr", "ring_cmd"; interrupts = <0x0 0x3c 0x4>; @@ -624,9 +624,9 @@ sgenet0: ethernet@1f210000 { compatible = "apm,xgene-enet"; status = "disabled"; - reg = <0x0 0x1f210000 0x0 0x10000>, - <0x0 0x1f200000 0x0 0X10000>, - <0x0 0x1B000000 0x0 0X20000>; + reg = <0x0 0x1f210000 0x0 0xd100>, + <0x0 0x1f200000 0x0 0Xc300>, + <0x0 0x1B000000 0x0 0X200>; reg-names = "enet_csr", "ring_csr", "ring_cmd"; interrupts = <0x0 0xA0 0x4>; dma-coherent; @@ -639,7 +639,7 @@ compatible = "apm,xgene-enet"; status = "disabled"; reg = <0x0 0x1f610000 0x0 0xd100>, - <0x0 0x1f600000 0x0 0X400>, + <0x0 0x1f600000 0x0 0Xc300>, <0x0 0x18000000 0x0 0X200>; reg-names = "enet_csr", "ring_csr", "ring_cmd"; interrupts = <0x0 0x60 0x4>; diff --git a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi index ac2cb241802..c46cbb29f3c 100644 --- a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi +++ b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi @@ -22,7 +22,7 @@ bank-width = <4>; }; - vram@2,00000000 { + v2m_video_ram: vram@2,00000000 { compatible = "arm,vexpress-vram"; reg = <2 0x00000000 0x00800000>; }; @@ -179,9 +179,42 @@ clcd@1f0000 { compatible = "arm,pl111", "arm,primecell"; reg = <0x1f0000 0x1000>; + interrupt-names = "combined"; interrupts = <14>; clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>; clock-names = "clcdclk", "apb_pclk"; + arm,pl11x,framebuffer = <0x18000000 0x00180000>; + memory-region = <&v2m_video_ram>; + max-memory-bandwidth = <130000000>; /* 16bpp @ 63.5MHz */ + + port { + v2m_clcd_pads: endpoint { + remote-endpoint = <&v2m_clcd_panel>; + arm,pl11x,tft-r0g0b0-pads = <0 8 16>; + }; + }; + + panel { + compatible = "panel-dpi"; + + port { + v2m_clcd_panel: endpoint { + remote-endpoint = <&v2m_clcd_pads>; + }; + }; + + panel-timing { + clock-frequency = <63500127>; + hactive = <1024>; + hback-porch = <152>; + hfront-porch = <48>; + hsync-len = <104>; + vactive = <768>; + vback-porch = <23>; + vfront-porch = <3>; + vsync-len = <4>; + }; + }; }; virtio_block@0130000 { diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 9cd37de9aa8..dd301be89ec 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -35,6 +35,9 @@ CONFIG_MODULE_UNLOAD=y CONFIG_ARCH_THUNDER=y CONFIG_ARCH_VEXPRESS=y CONFIG_ARCH_XGENE=y +CONFIG_PCI=y +CONFIG_PCI_MSI=y +CONFIG_PCI_XGENE=y CONFIG_SMP=y CONFIG_PREEMPT=y CONFIG_KSM=y @@ -52,6 +55,7 @@ CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y # CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set +CONFIG_BPF_JIT=y # CONFIG_WIRELESS is not set CONFIG_NET_9P=y CONFIG_NET_9P_VIRTIO=y @@ -65,19 +69,21 @@ CONFIG_VIRTIO_BLK=y CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=y CONFIG_AHCI_XGENE=y -CONFIG_PHY_XGENE=y CONFIG_PATA_PLATFORM=y CONFIG_PATA_OF_PLATFORM=y CONFIG_NETDEVICES=y CONFIG_TUN=y CONFIG_VIRTIO_NET=y +CONFIG_NET_XGENE=y CONFIG_SMC91X=y CONFIG_SMSC911X=y -CONFIG_NET_XGENE=y # CONFIG_WLAN is not set CONFIG_INPUT_EVDEV=y # CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_AMBAKMI=y CONFIG_LEGACY_PTY_COUNT=16 CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y @@ -86,22 +92,40 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_VIRTIO_CONSOLE=y # CONFIG_HW_RANDOM is not set +# CONFIG_HMC_DRV is not set +CONFIG_SPI=y +CONFIG_SPI_PL022=y +CONFIG_GPIO_PL061=y +CONFIG_GPIO_XGENE=y # CONFIG_HWMON is not set CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_FB=y +CONFIG_FB_ARMCLCD=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_ISP1760_HCD=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_STORAGE=y +CONFIG_USB_ULPI=y CONFIG_MMC=y CONFIG_MMC_ARMMMCI=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SPI=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_XGENE=y CONFIG_VIRTIO_BALLOON=y CONFIG_VIRTIO_MMIO=y # CONFIG_IOMMU_SUPPORT is not set +CONFIG_PHY_XGENE=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 253e33bc94f..56de5aadede 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -37,8 +37,8 @@ typedef s32 compat_ssize_t; typedef s32 compat_time_t; typedef s32 compat_clock_t; typedef s32 compat_pid_t; -typedef u32 __compat_uid_t; -typedef u32 __compat_gid_t; +typedef u16 __compat_uid_t; +typedef u16 __compat_gid_t; typedef u16 __compat_uid16_t; typedef u16 __compat_gid16_t; typedef u32 __compat_uid32_t; diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 01d3aab64b7..1f65be39313 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -126,7 +126,7 @@ typedef struct user_fpsimd_state elf_fpregset_t; * that it will "exec", and that there is sufficient room for the brk. */ extern unsigned long randomize_et_dyn(unsigned long base); -#define ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_64 / 3)) +#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3) /* * When the program starts, a1 contains a pointer to a function to be @@ -169,7 +169,7 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm); #define COMPAT_ELF_PLATFORM ("v8l") #endif -#define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3)) +#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) /* AArch32 registers. */ #define COMPAT_ELF_NGREG 18 diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h index 8e24ef3f7c8..b4f6b19a8a6 100644 --- a/arch/arm64/include/asm/irq_work.h +++ b/arch/arm64/include/asm/irq_work.h @@ -1,6 +1,8 @@ #ifndef __ASM_IRQ_WORK_H #define __ASM_IRQ_WORK_H +#ifdef CONFIG_SMP + #include <asm/smp.h> static inline bool arch_irq_work_has_interrupt(void) @@ -8,4 +10,13 @@ static inline bool arch_irq_work_has_interrupt(void) return !!__smp_cross_call; } +#else + +static inline bool arch_irq_work_has_interrupt(void) +{ + return false; +} + +#endif + #endif /* __ASM_IRQ_WORK_H */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index ccc7087d3c4..a62cd077457 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -142,7 +142,7 @@ static inline void *phys_to_virt(phys_addr_t x) * virt_to_page(k) convert a _valid_ virtual address to struct page * * virt_addr_valid(k) indicates whether a virtual address is valid */ -#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET +#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index da1f06b535e..9dfdac4a74a 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -792,3 +792,5 @@ __SYSCALL(__NR_renameat2, sys_renameat2) __SYSCALL(__NR_getrandom, sys_getrandom) #define __NR_memfd_create 385 __SYSCALL(__NR_memfd_create, sys_memfd_create) +#define __NR_bpf 386 +__SYSCALL(__NR_bpf, sys_bpf) diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S index 619b1dd7bcd..d18a4494096 100644 --- a/arch/arm64/kernel/efi-entry.S +++ b/arch/arm64/kernel/efi-entry.S @@ -54,18 +54,17 @@ ENTRY(efi_stub_entry) b.eq efi_load_fail /* - * efi_entry() will have relocated the kernel image if necessary - * and we return here with device tree address in x0 and the kernel - * entry point stored at *image_addr. Save those values in registers - * which are callee preserved. + * efi_entry() will have copied the kernel image if necessary and we + * return here with device tree address in x0 and the kernel entry + * point stored at *image_addr. Save those values in registers which + * are callee preserved. */ mov x20, x0 // DTB address ldr x0, [sp, #16] // relocated _text address mov x21, x0 /* - * Flush dcache covering current runtime addresses - * of kernel text/data. Then flush all of icache. + * Calculate size of the kernel Image (same for original and copy). */ adrp x1, _text add x1, x1, #:lo12:_text @@ -73,9 +72,24 @@ ENTRY(efi_stub_entry) add x2, x2, #:lo12:_edata sub x1, x2, x1 + /* + * Flush the copied Image to the PoC, and ensure it is not shadowed by + * stale icache entries from before relocation. + */ bl __flush_dcache_area ic ialluis + /* + * Ensure that the rest of this function (in the original Image) is + * visible when the caches are disabled. The I-cache can't have stale + * entries for the VA range of the current image, so no maintenance is + * necessary. + */ + adr x0, efi_stub_entry + adr x1, efi_stub_entry_end + sub x1, x1, x0 + bl __flush_dcache_area + /* Turn off Dcache and MMU */ mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 @@ -105,4 +119,5 @@ efi_load_fail: ldp x29, x30, [sp], #32 ret +efi_stub_entry_end: ENDPROC(efi_stub_entry) diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 03aaa99e1ea..95c49ebc660 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -89,7 +89,8 @@ static int __init uefi_init(void) */ if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) { pr_err("System table signature incorrect\n"); - return -EINVAL; + retval = -EINVAL; + goto out; } if ((efi.systab->hdr.revision >> 16) < 2) pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n", @@ -103,6 +104,7 @@ static int __init uefi_init(void) for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) vendor[i] = c16[i]; vendor[i] = '\0'; + early_memunmap(c16, sizeof(vendor)); } pr_info("EFI v%u.%.02u by %s\n", @@ -113,29 +115,11 @@ static int __init uefi_init(void) if (retval == 0) set_bit(EFI_CONFIG_TABLES, &efi.flags); - early_memunmap(c16, sizeof(vendor)); +out: early_memunmap(efi.systab, sizeof(efi_system_table_t)); - return retval; } -static __initdata char memory_type_name[][32] = { - {"Reserved"}, - {"Loader Code"}, - {"Loader Data"}, - {"Boot Code"}, - {"Boot Data"}, - {"Runtime Code"}, - {"Runtime Data"}, - {"Conventional Memory"}, - {"Unusable Memory"}, - {"ACPI Reclaim Memory"}, - {"ACPI Memory NVS"}, - {"Memory Mapped I/O"}, - {"MMIO Port Space"}, - {"PAL Code"}, -}; - /* * Return true for RAM regions we want to permanently reserve. */ @@ -166,10 +150,13 @@ static __init void reserve_regions(void) paddr = md->phys_addr; npages = md->num_pages; - if (uefi_debug) - pr_info(" 0x%012llx-0x%012llx [%s]", + if (uefi_debug) { + char buf[64]; + + pr_info(" 0x%012llx-0x%012llx %s", paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, - memory_type_name[md->type]); + efi_md_typeattr_format(buf, sizeof(buf), md)); + } memrange_efi_to_native(&paddr, &npages); size = npages << PAGE_SHIFT; @@ -393,11 +380,16 @@ static int __init arm64_enter_virtual_mode(void) return -1; } - pr_info("Remapping and enabling EFI services.\n"); - - /* replace early memmap mapping with permanent mapping */ mapsize = memmap.map_end - memmap.map; early_memunmap(memmap.map, mapsize); + + if (efi_runtime_disabled()) { + pr_info("EFI runtime services will be disabled.\n"); + return -1; + } + + pr_info("Remapping and enabling EFI services.\n"); + /* replace early memmap mapping with permanent mapping */ memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map, mapsize); memmap.map_end = memmap.map + mapsize; diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index e007714ded0..8cd27fedc8b 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -163,9 +163,10 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg) * which ends with "dsb; isb" pair guaranteeing global * visibility. */ - atomic_set(&pp->cpu_count, -1); + /* Notify other processors with an additional increment. */ + atomic_inc(&pp->cpu_count); } else { - while (atomic_read(&pp->cpu_count) != -1) + while (atomic_read(&pp->cpu_count) <= num_online_cpus()) cpu_relax(); isb(); } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index c3065dbc4fa..fde9923af85 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -378,8 +378,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) { return randomize_base(mm->brk); } - -unsigned long randomize_et_dyn(unsigned long base) -{ - return randomize_base(base); -} diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 866c1c82186..663da771580 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -528,7 +528,7 @@ static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index) if (WARN_ON_ONCE(!index)) return -EINVAL; - if (state->type == PSCI_POWER_STATE_TYPE_STANDBY) + if (state[index - 1].type == PSCI_POWER_STATE_TYPE_STANDBY) ret = psci_ops.cpu_suspend(state[index - 1], 0); else ret = __cpu_suspend(index, psci_suspend_finisher); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 4cc3b719208..3d7c2df8994 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -424,6 +424,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* VBAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), NULL, reset_val, VBAR_EL1, 0 }, + + /* ICC_SRE_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), + trap_raz_wi }, + /* CONTEXTIDR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, @@ -690,6 +695,10 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, + + /* ICC_SRE */ + { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, + { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, }; diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index 6e0ed93d51f..c17967fdf5f 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2 ) sub x1, x1, #2 4: adds x1, x1, #1 b.mi 5f - strb wzr, [x0] +USER(9f, strb wzr, [x0] ) 5: mov x0, #0 ret ENDPROC(__clear_user) diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index fa324bd5a5c..4a07630a661 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c @@ -105,10 +105,10 @@ EXPORT_SYMBOL(ioremap_cache); static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; #if CONFIG_ARM64_PGTABLE_LEVELS > 2 -static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss; +static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss; #endif #if CONFIG_ARM64_PGTABLE_LEVELS > 3 -static pte_t bm_pud[PTRS_PER_PUD] __page_aligned_bss; +static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss; #endif static inline pud_t * __init early_ioremap_pud(unsigned long addr) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 6894ef3e623..f4f8b500f74 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -202,7 +202,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, } static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, - unsigned long end, unsigned long phys, + unsigned long end, phys_addr_t phys, int map_io) { pud_t *pud; @@ -297,11 +297,15 @@ static void __init map_mem(void) * create_mapping requires puds, pmds and ptes to be allocated from * memory addressable from the initial direct kernel mapping. * - * The initial direct kernel mapping, located at swapper_pg_dir, - * gives us PUD_SIZE memory starting from PHYS_OFFSET (which must be - * aligned to 2MB as per Documentation/arm64/booting.txt). + * The initial direct kernel mapping, located at swapper_pg_dir, gives + * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from + * PHYS_OFFSET (which must be aligned to 2MB as per + * Documentation/arm64/booting.txt). */ - limit = PHYS_OFFSET + PUD_SIZE; + if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) + limit = PHYS_OFFSET + PMD_SIZE; + else + limit = PHYS_OFFSET + PUD_SIZE; memblock_set_current_limit(limit); /* map all the memory banks */ diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c index 62c6101df26..6682b361d3a 100644 --- a/arch/arm64/mm/pgd.c +++ b/arch/arm64/mm/pgd.c @@ -30,12 +30,14 @@ #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) +static struct kmem_cache *pgd_cache; + pgd_t *pgd_alloc(struct mm_struct *mm) { if (PGD_SIZE == PAGE_SIZE) return (pgd_t *)get_zeroed_page(GFP_KERNEL); else - return kzalloc(PGD_SIZE, GFP_KERNEL); + return kmem_cache_zalloc(pgd_cache, GFP_KERNEL); } void pgd_free(struct mm_struct *mm, pgd_t *pgd) @@ -43,5 +45,17 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) if (PGD_SIZE == PAGE_SIZE) free_page((unsigned long)pgd); else - kfree(pgd); + kmem_cache_free(pgd_cache, pgd); +} + +static int __init pgd_cache_init(void) +{ + /* + * Naturally aligned pgds required by the architecture. + */ + if (PGD_SIZE != PAGE_SIZE) + pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE, + SLAB_PANIC, NULL); + return 0; } +core_initcall(pgd_cache_init); diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h index 2134f7e6c28..de0a81a539a 100644 --- a/arch/arm64/net/bpf_jit.h +++ b/arch/arm64/net/bpf_jit.h @@ -144,8 +144,12 @@ /* Data-processing (2 source) */ /* Rd = Rn OP Rm */ -#define A64_UDIV(sf, Rd, Rn, Rm) aarch64_insn_gen_data2(Rd, Rn, Rm, \ - A64_VARIANT(sf), AARCH64_INSN_DATA2_UDIV) +#define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \ + A64_VARIANT(sf), AARCH64_INSN_DATA2_##type) +#define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV) +#define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV) +#define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV) +#define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV) /* Data-processing (3 source) */ /* Rd = Ra + Rn * Rm */ diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 7ae33545535..41f1e3e2ea2 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -19,12 +19,13 @@ #define pr_fmt(fmt) "bpf_jit: " fmt #include <linux/filter.h> -#include <linux/moduleloader.h> #include <linux/printk.h> #include <linux/skbuff.h> #include <linux/slab.h> + #include <asm/byteorder.h> #include <asm/cacheflush.h> +#include <asm/debug-monitors.h> #include "bpf_jit.h" @@ -119,6 +120,14 @@ static inline int bpf2a64_offset(int bpf_to, int bpf_from, return to - from; } +static void jit_fill_hole(void *area, unsigned int size) +{ + u32 *ptr; + /* We are guaranteed to have aligned memory. */ + for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) + *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT); +} + static inline int epilogue_offset(const struct jit_ctx *ctx) { int to = ctx->offset[ctx->prog->len - 1]; @@ -196,6 +205,12 @@ static void build_epilogue(struct jit_ctx *ctx) emit(A64_RET(A64_LR), ctx); } +/* JITs an eBPF instruction. + * Returns: + * 0 - successfully JITed an 8-byte eBPF instruction. + * >0 - successfully JITed a 16-byte eBPF instruction. + * <0 - failed to JIT. + */ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) { const u8 code = insn->code; @@ -252,6 +267,18 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) emit(A64_MUL(is64, tmp, tmp, src), ctx); emit(A64_SUB(is64, dst, dst, tmp), ctx); break; + case BPF_ALU | BPF_LSH | BPF_X: + case BPF_ALU64 | BPF_LSH | BPF_X: + emit(A64_LSLV(is64, dst, dst, src), ctx); + break; + case BPF_ALU | BPF_RSH | BPF_X: + case BPF_ALU64 | BPF_RSH | BPF_X: + emit(A64_LSRV(is64, dst, dst, src), ctx); + break; + case BPF_ALU | BPF_ARSH | BPF_X: + case BPF_ALU64 | BPF_ARSH | BPF_X: + emit(A64_ASRV(is64, dst, dst, src), ctx); + break; /* dst = -dst */ case BPF_ALU | BPF_NEG: case BPF_ALU64 | BPF_NEG: @@ -443,6 +470,27 @@ emit_cond_jmp: emit(A64_B(jmp_offset), ctx); break; + /* dst = imm64 */ + case BPF_LD | BPF_IMM | BPF_DW: + { + const struct bpf_insn insn1 = insn[1]; + u64 imm64; + + if (insn1.code != 0 || insn1.src_reg != 0 || + insn1.dst_reg != 0 || insn1.off != 0) { + /* Note: verifier in BPF core must catch invalid + * instructions. + */ + pr_err_once("Invalid BPF_LD_IMM64 instruction\n"); + return -EINVAL; + } + + imm64 = (u64)insn1.imm << 32 | imm; + emit_a64_mov_i64(dst, imm64, ctx); + + return 1; + } + /* LDX: dst = *(size *)(src + off) */ case BPF_LDX | BPF_MEM | BPF_W: case BPF_LDX | BPF_MEM | BPF_H: @@ -594,6 +642,10 @@ static int build_body(struct jit_ctx *ctx) ctx->offset[i] = ctx->idx; ret = build_insn(insn, ctx); + if (ret > 0) { + i++; + continue; + } if (ret) return ret; } @@ -613,8 +665,10 @@ void bpf_jit_compile(struct bpf_prog *prog) void bpf_int_jit_compile(struct bpf_prog *prog) { + struct bpf_binary_header *header; struct jit_ctx ctx; int image_size; + u8 *image_ptr; if (!bpf_jit_enable) return; @@ -636,23 +690,25 @@ void bpf_int_jit_compile(struct bpf_prog *prog) goto out; build_prologue(&ctx); - build_epilogue(&ctx); /* Now we know the actual image size. */ image_size = sizeof(u32) * ctx.idx; - ctx.image = module_alloc(image_size); - if (unlikely(ctx.image == NULL)) + header = bpf_jit_binary_alloc(image_size, &image_ptr, + sizeof(u32), jit_fill_hole); + if (header == NULL) goto out; /* 2. Now, the actual pass. */ + ctx.image = (u32 *)image_ptr; ctx.idx = 0; + build_prologue(&ctx); ctx.body_offset = ctx.idx; if (build_body(&ctx)) { - module_free(NULL, ctx.image); + bpf_jit_binary_free(header); goto out; } @@ -663,17 +719,25 @@ void bpf_int_jit_compile(struct bpf_prog *prog) bpf_jit_dump(prog->len, image_size, 2, ctx.image); bpf_flush_icache(ctx.image, ctx.image + ctx.idx); - prog->bpf_func = (void *)ctx.image; - prog->jited = 1; + set_memory_ro((unsigned long)header, header->pages); + prog->bpf_func = (void *)ctx.image; + prog->jited = true; out: kfree(ctx.offset); } void bpf_jit_free(struct bpf_prog *prog) { - if (prog->jited) - module_free(NULL, prog->bpf_func); + unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK; + struct bpf_binary_header *header = (void *)addr; + + if (!prog->jited) + goto free_filter; + + set_memory_rw(addr, header->pages); + bpf_jit_binary_free(header); - kfree(prog); +free_filter: + bpf_prog_unlock_free(prog); } diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 741b99c1a0b..c52d7540dc0 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -568,6 +568,7 @@ efi_init (void) { const char *unit; unsigned long size; + char buf[64]; md = p; size = md->num_pages << EFI_PAGE_SHIFT; @@ -586,9 +587,10 @@ efi_init (void) unit = "KB"; } - printk("mem%02d: type=%2u, attr=0x%016lx, " + printk("mem%02d: %s " "range=[0x%016lx-0x%016lx) (%4lu%s)\n", - i, md->type, md->attribute, md->phys_addr, + i, efi_md_typeattr_format(buf, sizeof(buf), md), + md->phys_addr, md->phys_addr + efi_md_size(md), size, unit); } } diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index ec6b9acb6be..dbe46f43884 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, for (i = 0; i < npages; i++) { pfn = gfn_to_pfn(kvm, base_gfn + i); - if (!kvm_is_mmio_pfn(pfn)) { + if (!kvm_is_reserved_pfn(pfn)) { kvm_set_pmt_entry(kvm, base_gfn + i, pfn << PAGE_SHIFT, _PAGE_AR_RWX | _PAGE_MA_WB); diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 4ef7a54813e..75e75d7b170 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@ #include <uapi/asm/unistd.h> -#define NR_syscalls 354 +#define NR_syscalls 355 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index b419c6b7ac3..2c1bec9a14b 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -359,5 +359,6 @@ #define __NR_renameat2 351 #define __NR_getrandom 352 #define __NR_memfd_create 353 +#define __NR_bpf 354 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 05b46c2b08b..2ca219e184c 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -374,4 +374,5 @@ ENTRY(sys_call_table) .long sys_renameat2 .long sys_getrandom .long sys_memfd_create + .long sys_bpf diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 6feded3b0c4..a7736fa0580 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -129,6 +129,10 @@ endmenu menu "Kernel features" +config NR_CPUS + int + default "1" + config ADVANCED_OPTIONS bool "Prompt for advanced kernel configuration options" help diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h index ea4b233647c..0a53362d554 100644 --- a/arch/microblaze/include/asm/unistd.h +++ b/arch/microblaze/include/asm/unistd.h @@ -38,6 +38,6 @@ #endif /* __ASSEMBLY__ */ -#define __NR_syscalls 387 +#define __NR_syscalls 388 #endif /* _ASM_MICROBLAZE_UNISTD_H */ diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h index 1c2380bf8fe..c712677f8a2 100644 --- a/arch/microblaze/include/uapi/asm/unistd.h +++ b/arch/microblaze/include/uapi/asm/unistd.h @@ -402,5 +402,6 @@ #define __NR_seccomp 384 #define __NR_getrandom 385 #define __NR_memfd_create 386 +#define __NR_bpf 387 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */ diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index de59ee1d701..0166e890486 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S @@ -387,3 +387,4 @@ ENTRY(sys_call_table) .long sys_seccomp .long sys_getrandom /* 385 */ .long sys_memfd_create + .long sys_bpf diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 9037914f698..b30e41c0c03 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -660,8 +660,13 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose, res = &hose->mem_resources[memno++]; break; } - if (res != NULL) - of_pci_range_to_resource(&range, dev, res); + if (res != NULL) { + res->name = dev->full_name; + res->flags = range.flags; + res->start = range.cpu_addr; + res->end = range.cpu_addr + range.size - 1; + res->parent = res->child = res->sibling = NULL; + } } /* If there's an ISA hole and the pci_mem_offset is -not- matching diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index ad6badb6be7..9536ef912f5 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2066,6 +2066,7 @@ config MIPS_CPS support is unavailable. config MIPS_CPS_PM + depends on MIPS_CPS select MIPS_CPC bool @@ -2100,9 +2101,17 @@ config 64BIT_PHYS_ADDR config ARCH_PHYS_ADDR_T_64BIT def_bool 64BIT_PHYS_ADDR +choice + prompt "SmartMIPS or microMIPS ASE support" + +config CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS + bool "None" + help + Select this if you want neither microMIPS nor SmartMIPS support + config CPU_HAS_SMARTMIPS depends on SYS_SUPPORTS_SMARTMIPS - bool "Support for the SmartMIPS ASE" + bool "SmartMIPS" help SmartMIPS is a extension of the MIPS32 architecture aimed at increased security at both hardware and software level for @@ -2114,11 +2123,13 @@ config CPU_HAS_SMARTMIPS config CPU_MICROMIPS depends on SYS_SUPPORTS_MICROMIPS - bool "Build kernel using microMIPS ISA" + bool "microMIPS" help When this option is enabled the kernel will be built using the microMIPS ISA +endchoice + config CPU_HAS_MSA bool "Support for the MIPS SIMD Architecture (EXPERIMENTAL)" depends on CPU_SUPPORTS_MSA diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 23cb94806fb..58076472bdd 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -93,6 +93,15 @@ LDFLAGS_vmlinux += -G 0 -static -n -nostdlib KBUILD_AFLAGS_MODULE += -mlong-calls KBUILD_CFLAGS_MODULE += -mlong-calls +# +# pass -msoft-float to GAS if it supports it. However on newer binutils +# (specifically newer than 2.24.51.20140728) we then also need to explicitly +# set ".set hardfloat" in all files which manipulate floating point registers. +# +ifneq ($(call as-option,-Wa$(comma)-msoft-float,),) + cflags-y += -DGAS_HAS_SET_HARDFLOAT -Wa,-msoft-float +endif + cflags-y += -ffreestanding # diff --git a/arch/mips/ath79/mach-db120.c b/arch/mips/ath79/mach-db120.c index 4d661a1d2da..9423f5aed28 100644 --- a/arch/mips/ath79/mach-db120.c +++ b/arch/mips/ath79/mach-db120.c @@ -113,7 +113,7 @@ static void __init db120_pci_init(u8 *eeprom) ath79_register_pci(); } #else -static inline void db120_pci_init(void) {} +static inline void db120_pci_init(u8 *eeprom) {} #endif /* CONFIG_PCI */ static void __init db120_setup(void) diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 74173404967..2bc4aa95944 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -809,6 +809,7 @@ static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = { .irq_set_type = octeon_irq_ciu_gpio_set_type, #ifdef CONFIG_SMP .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, #endif .flags = IRQCHIP_SET_TYPE_MASKED, }; @@ -823,6 +824,7 @@ static struct irq_chip octeon_irq_chip_ciu_gpio = { .irq_set_type = octeon_irq_ciu_gpio_set_type, #ifdef CONFIG_SMP .irq_set_affinity = octeon_irq_ciu_set_affinity, + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, #endif .flags = IRQCHIP_SET_TYPE_MASKED, }; diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 38f4c32e281..5ebdb32d9a2 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -806,15 +806,6 @@ void __init prom_init(void) #endif } - if (octeon_is_simulation()) { - /* - * The simulator uses a mtdram device pre filled with - * the filesystem. Also specify the calibration delay - * to avoid calculating it every time. - */ - strcat(arcs_cmdline, " rw root=1f00 slram=root,0x40000000,+1073741824"); - } - mips_hpt_frequency = octeon_get_clock_rate(); octeon_init_cvmcount(); diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h index e38c2811d4e..cdac7b3eeaf 100644 --- a/arch/mips/include/asm/asmmacro-32.h +++ b/arch/mips/include/asm/asmmacro-32.h @@ -13,6 +13,8 @@ #include <asm/mipsregs.h> .macro fpu_save_single thread tmp=t0 + .set push + SET_HARDFLOAT cfc1 \tmp, fcr31 swc1 $f0, THREAD_FPR0_LS64(\thread) swc1 $f1, THREAD_FPR1_LS64(\thread) @@ -47,9 +49,12 @@ swc1 $f30, THREAD_FPR30_LS64(\thread) swc1 $f31, THREAD_FPR31_LS64(\thread) sw \tmp, THREAD_FCR31(\thread) + .set pop .endm .macro fpu_restore_single thread tmp=t0 + .set push + SET_HARDFLOAT lw \tmp, THREAD_FCR31(\thread) lwc1 $f0, THREAD_FPR0_LS64(\thread) lwc1 $f1, THREAD_FPR1_LS64(\thread) @@ -84,6 +89,7 @@ lwc1 $f30, THREAD_FPR30_LS64(\thread) lwc1 $f31, THREAD_FPR31_LS64(\thread) ctc1 \tmp, fcr31 + .set pop .endm .macro cpu_save_nonscratch thread diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index cd9a98bc8f6..6caf8766b80 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -57,6 +57,8 @@ #endif /* CONFIG_CPU_MIPSR2 */ .macro fpu_save_16even thread tmp=t0 + .set push + SET_HARDFLOAT cfc1 \tmp, fcr31 sdc1 $f0, THREAD_FPR0_LS64(\thread) sdc1 $f2, THREAD_FPR2_LS64(\thread) @@ -75,11 +77,13 @@ sdc1 $f28, THREAD_FPR28_LS64(\thread) sdc1 $f30, THREAD_FPR30_LS64(\thread) sw \tmp, THREAD_FCR31(\thread) + .set pop .endm .macro fpu_save_16odd thread .set push .set mips64r2 + SET_HARDFLOAT sdc1 $f1, THREAD_FPR1_LS64(\thread) sdc1 $f3, THREAD_FPR3_LS64(\thread) sdc1 $f5, THREAD_FPR5_LS64(\thread) @@ -110,6 +114,8 @@ .endm .macro fpu_restore_16even thread tmp=t0 + .set push + SET_HARDFLOAT lw \tmp, THREAD_FCR31(\thread) ldc1 $f0, THREAD_FPR0_LS64(\thread) ldc1 $f2, THREAD_FPR2_LS64(\thread) @@ -133,6 +139,7 @@ .macro fpu_restore_16odd thread .set push .set mips64r2 + SET_HARDFLOAT ldc1 $f1, THREAD_FPR1_LS64(\thread) ldc1 $f3, THREAD_FPR3_LS64(\thread) ldc1 $f5, THREAD_FPR5_LS64(\thread) @@ -277,6 +284,7 @@ .macro cfcmsa rd, cs .set push .set noat + SET_HARDFLOAT .insn .word CFC_MSA_INSN | (\cs << 11) move \rd, $1 @@ -286,6 +294,7 @@ .macro ctcmsa cd, rs .set push .set noat + SET_HARDFLOAT move $1, \rs .word CTC_MSA_INSN | (\cd << 6) .set pop @@ -294,6 +303,7 @@ .macro ld_d wd, off, base .set push .set noat + SET_HARDFLOAT add $1, \base, \off .word LDD_MSA_INSN | (\wd << 6) .set pop @@ -302,6 +312,7 @@ .macro st_d wd, off, base .set push .set noat + SET_HARDFLOAT add $1, \base, \off .word STD_MSA_INSN | (\wd << 6) .set pop @@ -310,6 +321,7 @@ .macro copy_u_w rd, ws, n .set push .set noat + SET_HARDFLOAT .insn .word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11) /* move triggers an assembler bug... */ @@ -320,6 +332,7 @@ .macro copy_u_d rd, ws, n .set push .set noat + SET_HARDFLOAT .insn .word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11) /* move triggers an assembler bug... */ @@ -330,6 +343,7 @@ .macro insert_w wd, n, rs .set push .set noat + SET_HARDFLOAT /* move triggers an assembler bug... */ or $1, \rs, zero .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6) @@ -339,6 +353,7 @@ .macro insert_d wd, n, rs .set push .set noat + SET_HARDFLOAT /* move triggers an assembler bug... */ or $1, \rs, zero .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6) @@ -381,6 +396,7 @@ st_d 31, THREAD_FPR31, \thread .set push .set noat + SET_HARDFLOAT cfcmsa $1, MSA_CSR sw $1, THREAD_MSA_CSR(\thread) .set pop @@ -389,6 +405,7 @@ .macro msa_restore_all thread .set push .set noat + SET_HARDFLOAT lw $1, THREAD_MSA_CSR(\thread) ctcmsa MSA_CSR, $1 .set pop @@ -441,6 +458,7 @@ .macro msa_init_all_upper .set push .set noat + SET_HARDFLOAT not $1, zero msa_init_upper 0 .set pop diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h index 51f80bd36fc..63b3468ede4 100644 --- a/arch/mips/include/asm/cop2.h +++ b/arch/mips/include/asm/cop2.h @@ -37,15 +37,15 @@ extern void nlm_cop2_restore(struct nlm_cop2_state *); #define cop2_present 1 #define cop2_lazy_restore 1 -#define cop2_save(r) do { (r); } while (0) -#define cop2_restore(r) do { (r); } while (0) +#define cop2_save(r) do { (void)(r); } while (0) +#define cop2_restore(r) do { (void)(r); } while (0) #else #define cop2_present 0 #define cop2_lazy_restore 0 -#define cop2_save(r) do { (r); } while (0) -#define cop2_restore(r) do { (r); } while (0) +#define cop2_save(r) do { (void)(r); } while (0) +#define cop2_restore(r) do { (void)(r); } while (0) #endif enum cu2_ops { diff --git a/arch/mips/include/asm/fpregdef.h b/arch/mips/include/asm/fpregdef.h index 429481f9028..f184ba08853 100644 --- a/arch/mips/include/asm/fpregdef.h +++ b/arch/mips/include/asm/fpregdef.h @@ -14,6 +14,20 @@ #include <asm/sgidefs.h> +/* + * starting with binutils 2.24.51.20140729, MIPS binutils warn about mixing + * hardfloat and softfloat object files. The kernel build uses soft-float by + * default, so we also need to pass -msoft-float along to GAS if it supports it. + * But this in turn causes assembler errors in files which access hardfloat + * registers. We detect if GAS supports "-msoft-float" in the Makefile and + * explicitly put ".set hardfloat" where floating point registers are touched. + */ +#ifdef GAS_HAS_SET_HARDFLOAT +#define SET_HARDFLOAT .set hardfloat +#else +#define SET_HARDFLOAT +#endif + #if _MIPS_SIM == _MIPS_SIM_ABI32 /* diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index 4d0aeda6839..dd562414cd5 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -145,8 +145,8 @@ static inline void lose_fpu(int save) if (is_msa_enabled()) { if (save) { save_msa(current); - asm volatile("cfc1 %0, $31" - : "=r"(current->thread.fpu.fcr31)); + current->thread.fpu.fcr31 = + read_32bit_cp1_register(CP1_STATUS); } disable_msa(); clear_thread_flag(TIF_USEDMSA); diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h index 992aaba603b..b463f2aa5a6 100644 --- a/arch/mips/include/asm/ftrace.h +++ b/arch/mips/include/asm/ftrace.h @@ -24,7 +24,7 @@ do { \ asm volatile ( \ "1: " load " %[tmp_dst], 0(%[tmp_src])\n" \ " li %[tmp_err], 0\n" \ - "2:\n" \ + "2: .insn\n" \ \ ".section .fixup, \"ax\"\n" \ "3: li %[tmp_err], 1\n" \ @@ -46,7 +46,7 @@ do { \ asm volatile ( \ "1: " store " %[tmp_src], 0(%[tmp_dst])\n"\ " li %[tmp_err], 0\n" \ - "2:\n" \ + "2: .insn\n" \ \ ".section .fixup, \"ax\"\n" \ "3: li %[tmp_err], 1\n" \ diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h index d9f932de80e..1c967abd545 100644 --- a/arch/mips/include/asm/idle.h +++ b/arch/mips/include/asm/idle.h @@ -8,19 +8,12 @@ extern void (*cpu_wait)(void); extern void r4k_wait(void); extern asmlinkage void __r4k_wait(void); extern void r4k_wait_irqoff(void); -extern void __pastwait(void); static inline int using_rollback_handler(void) { return cpu_wait == r4k_wait; } -static inline int address_is_in_r4k_wait_irqoff(unsigned long addr) -{ - return addr >= (unsigned long)r4k_wait_irqoff && - addr < (unsigned long)__pastwait; -} - extern int mips_cpuidle_wait_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index e194f957ca8..fdbff44e548 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h @@ -20,9 +20,15 @@ #define WORD_INSN ".word" #endif +#ifdef CONFIG_CPU_MICROMIPS +#define NOP_INSN "nop32" +#else +#define NOP_INSN "nop" +#endif + static __always_inline bool arch_static_branch(struct static_key *key) { - asm_volatile_goto("1:\tnop\n\t" + asm_volatile_goto("1:\t" NOP_INSN "\n\t" "nop\n\t" ".pushsection __jump_table, \"aw\"\n\t" WORD_INSN " 1b, %l[l_yes], %0\n\t" diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h index 7d28f95b051..6d69332f21e 100644 --- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h @@ -41,10 +41,8 @@ #define cpu_has_mcheck 0 #define cpu_has_mdmx 0 #define cpu_has_mips16 0 -#define cpu_has_mips32r1 0 #define cpu_has_mips32r2 0 #define cpu_has_mips3d 0 -#define cpu_has_mips64r1 0 #define cpu_has_mips64r2 0 #define cpu_has_mipsmt 0 #define cpu_has_prefetch 0 diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index cf3b580c3df..22a135ac91d 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -661,6 +661,8 @@ #define MIPS_CONF6_SYND (_ULCAST_(1) << 13) /* proAptiv FTLB on/off bit */ #define MIPS_CONF6_FTLBEN (_ULCAST_(1) << 15) +/* FTLB probability bits */ +#define MIPS_CONF6_FTLBP_SHIFT (16) #define MIPS_CONF7_WII (_ULCAST_(1) << 31) @@ -1324,7 +1326,7 @@ do { \ /* * Macros to access the floating point coprocessor control registers */ -#define read_32bit_cp1_register(source) \ +#define _read_32bit_cp1_register(source, gas_hardfloat) \ ({ \ int __res; \ \ @@ -1334,12 +1336,21 @@ do { \ " # gas fails to assemble cfc1 for some archs, \n" \ " # like Octeon. \n" \ " .set mips1 \n" \ + " "STR(gas_hardfloat)" \n" \ " cfc1 %0,"STR(source)" \n" \ " .set pop \n" \ : "=r" (__res)); \ __res; \ }) +#ifdef GAS_HAS_SET_HARDFLOAT +#define read_32bit_cp1_register(source) \ + _read_32bit_cp1_register(source, .set hardfloat) +#else +#define read_32bit_cp1_register(source) \ + _read_32bit_cp1_register(source, ) +#endif + #ifdef HAVE_AS_DSP #define rddsp(mask) \ ({ \ diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 4520adc8699..cd6e0afc683 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -257,7 +257,11 @@ static inline void protected_flush_icache_line(unsigned long addr) */ static inline void protected_writeback_dcache_line(unsigned long addr) { +#ifdef CONFIG_EVA + protected_cachee_op(Hit_Writeback_Inv_D, addr); +#else protected_cache_op(Hit_Writeback_Inv_D, addr); +#endif } static inline void protected_writeback_scache_line(unsigned long addr) diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index a1095109023..22a5624e2fd 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -301,7 +301,8 @@ do { \ __get_kernel_common((x), size, __gu_ptr); \ else \ __get_user_common((x), size, __gu_ptr); \ - } \ + } else \ + (x) = 0; \ \ __gu_err; \ }) @@ -316,6 +317,7 @@ do { \ " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ + " move %1, $0 \n" \ " j 2b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ @@ -630,6 +632,7 @@ do { \ " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ + " move %1, $0 \n" \ " j 2b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ @@ -773,10 +776,11 @@ extern void __put_user_unaligned_unknown(void); "jal\t" #destination "\n\t" #endif -#ifndef CONFIG_CPU_DADDI_WORKAROUNDS -#define DADDI_SCRATCH "$0" -#else +#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \ + defined(CONFIG_CPU_HAS_PREFETCH)) #define DADDI_SCRATCH "$3" +#else +#define DADDI_SCRATCH "$0" #endif extern size_t __copy_user(void *__to, const void *__from, size_t __n); @@ -1418,7 +1422,7 @@ static inline long __strnlen_user(const char __user *s, long n) } /* - * strlen_user: - Get the size of a string in user space. + * strnlen_user: - Get the size of a string in user space. * @str: The string to measure. * * Context: User context only. This function may sleep. @@ -1427,9 +1431,7 @@ static inline long __strnlen_user(const char __user *s, long n) * * Returns the size of the string INCLUDING the terminating NUL. * On exception, returns 0. - * - * If there is a limit on the length of a valid string, you may wish to - * consider using strnlen_user() instead. + * If the string is too long, returns a value greater than @n. */ static inline long strnlen_user(const char __user *s, long n) { diff --git a/arch/mips/include/uapi/asm/ptrace.h b/arch/mips/include/uapi/asm/ptrace.h index bbcfb8ba810..91a3d197ede 100644 --- a/arch/mips/include/uapi/asm/ptrace.h +++ b/arch/mips/include/uapi/asm/ptrace.h @@ -9,6 +9,8 @@ #ifndef _UAPI_ASM_PTRACE_H #define _UAPI_ASM_PTRACE_H +#include <linux/types.h> + /* 0 - 31 are integer registers, 32 - 63 are fp registers. */ #define FPR_BASE 32 #define PC 64 diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index fdb4923777d..d001bb1ad17 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -375,16 +375,17 @@ #define __NR_seccomp (__NR_Linux + 352) #define __NR_getrandom (__NR_Linux + 353) #define __NR_memfd_create (__NR_Linux + 354) +#define __NR_bpf (__NR_Linux + 355) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 354 +#define __NR_Linux_syscalls 355 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 354 +#define __NR_O32_Linux_syscalls 355 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -707,16 +708,17 @@ #define __NR_seccomp (__NR_Linux + 312) #define __NR_getrandom (__NR_Linux + 313) #define __NR_memfd_create (__NR_Linux + 314) +#define __NR_bpf (__NR_Linux + 315) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 314 +#define __NR_Linux_syscalls 315 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 314 +#define __NR_64_Linux_syscalls 315 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1043,15 +1045,16 @@ #define __NR_seccomp (__NR_Linux + 316) #define __NR_getrandom (__NR_Linux + 317) #define __NR_memfd_create (__NR_Linux + 318) +#define __NR_bpf (__NR_Linux + 319) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 318 +#define __NR_Linux_syscalls 319 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 318 +#define __NR_N32_Linux_syscalls 319 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S index 290c23b5167..86495072a92 100644 --- a/arch/mips/kernel/bmips_vec.S +++ b/arch/mips/kernel/bmips_vec.S @@ -208,7 +208,6 @@ bmips_reset_nmi_vec_end: END(bmips_reset_nmi_vec) .set pop - .previous /*********************************************************************** * CPU1 warm restart vector (used for second and subsequent boots). @@ -281,5 +280,3 @@ LEAF(bmips_enable_xks01) jr ra END(bmips_enable_xks01) - - .previous diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 7b2df224f04..4d7d99d601c 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -144,7 +144,7 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, case mm_bc1t_op: preempt_disable(); if (is_fpu_owner()) - asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); + fcr31 = read_32bit_cp1_register(CP1_STATUS); else fcr31 = current->thread.fpu.fcr31; preempt_enable(); @@ -562,11 +562,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case cop1_op: preempt_disable(); if (is_fpu_owner()) - asm volatile( - ".set push\n" - "\t.set mips1\n" - "\tcfc1\t%0,$31\n" - "\t.set pop" : "=r" (fcr31)); + fcr31 = read_32bit_cp1_register(CP1_STATUS); else fcr31 = current->thread.fpu.fcr31; preempt_enable(); diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index e6e97d2a5c9..0384b05ab5a 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -229,6 +229,7 @@ LEAF(mips_cps_core_init) nop .set push + .set mips32r2 .set mt /* Only allow 1 TC per VPE to execute... */ @@ -345,6 +346,7 @@ LEAF(mips_cps_boot_vpes) nop .set push + .set mips32r2 .set mt 1: /* Enter VPE configuration state */ diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 94c4a0c0a57..dc49cf30c2d 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -193,6 +193,32 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa) static char unknown_isa[] = KERN_ERR \ "Unsupported ISA type, c0.config0: %d."; +static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c) +{ + + unsigned int probability = c->tlbsize / c->tlbsizevtlb; + + /* + * 0 = All TLBWR instructions go to FTLB + * 1 = 15:1: For every 16 TBLWR instructions, 15 go to the + * FTLB and 1 goes to the VTLB. + * 2 = 7:1: As above with 7:1 ratio. + * 3 = 3:1: As above with 3:1 ratio. + * + * Use the linear midpoint as the probability threshold. + */ + if (probability >= 12) + return 1; + else if (probability >= 6) + return 2; + else + /* + * So FTLB is less than 4 times bigger than VTLB. + * A 3:1 ratio can still be useful though. + */ + return 3; +} + static void set_ftlb_enable(struct cpuinfo_mips *c, int enable) { unsigned int config6; @@ -203,9 +229,14 @@ static void set_ftlb_enable(struct cpuinfo_mips *c, int enable) case CPU_P5600: /* proAptiv & related cores use Config6 to enable the FTLB */ config6 = read_c0_config6(); + /* Clear the old probability value */ + config6 &= ~(3 << MIPS_CONF6_FTLBP_SHIFT); if (enable) /* Enable FTLB */ - write_c0_config6(config6 | MIPS_CONF6_FTLBEN); + write_c0_config6(config6 | + (calculate_ftlb_probability(c) + << MIPS_CONF6_FTLBP_SHIFT) + | MIPS_CONF6_FTLBEN); else /* Disable FTLB */ write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN); @@ -757,31 +788,34 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_LOONGSON2; __cpu_name[cpu] = "ICT Loongson-2"; set_elf_platform(cpu, "loongson2e"); + set_isa(c, MIPS_CPU_ISA_III); break; case PRID_REV_LOONGSON2F: c->cputype = CPU_LOONGSON2; __cpu_name[cpu] = "ICT Loongson-2"; set_elf_platform(cpu, "loongson2f"); + set_isa(c, MIPS_CPU_ISA_III); break; case PRID_REV_LOONGSON3A: c->cputype = CPU_LOONGSON3; - c->writecombine = _CACHE_UNCACHED_ACCELERATED; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3a"); + set_isa(c, MIPS_CPU_ISA_M64R1); break; case PRID_REV_LOONGSON3B_R1: case PRID_REV_LOONGSON3B_R2: c->cputype = CPU_LOONGSON3; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3b"); + set_isa(c, MIPS_CPU_ISA_M64R1); break; } - set_isa(c, MIPS_CPU_ISA_III); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC | MIPS_CPU_32FPR; c->tlbsize = 64; + c->writecombine = _CACHE_UNCACHED_ACCELERATED; break; case PRID_IMP_LOONGSON_32: /* Loongson-1 */ decode_configs(c); diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index ac35e12cb1f..a5e26dd9059 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -358,6 +358,7 @@ NESTED(nmi_handler, PT_SIZE, sp) .set push /* gas fails to assemble cfc1 for some archs (octeon).*/ \ .set mips1 + SET_HARDFLOAT cfc1 a1, fcr31 li a2, ~(0x3f << 12) and a2, a1 diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 09ce4598075..0b9082b6b68 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -68,9 +68,6 @@ void r4k_wait_irqoff(void) " wait \n" " .set pop \n"); local_irq_enable(); - __asm__( - " .globl __pastwait \n" - "__pastwait: \n"); } /* diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c index 6001610cfe5..dda800e9e73 100644 --- a/arch/mips/kernel/jump_label.c +++ b/arch/mips/kernel/jump_label.c @@ -18,31 +18,53 @@ #ifdef HAVE_JUMP_LABEL -#define J_RANGE_MASK ((1ul << 28) - 1) +/* + * Define parameters for the standard MIPS and the microMIPS jump + * instruction encoding respectively: + * + * - the ISA bit of the target, either 0 or 1 respectively, + * + * - the amount the jump target address is shifted right to fit in the + * immediate field of the machine instruction, either 2 or 1, + * + * - the mask determining the size of the jump region relative to the + * delay-slot instruction, either 256MB or 128MB, + * + * - the jump target alignment, either 4 or 2 bytes. + */ +#define J_ISA_BIT IS_ENABLED(CONFIG_CPU_MICROMIPS) +#define J_RANGE_SHIFT (2 - J_ISA_BIT) +#define J_RANGE_MASK ((1ul << (26 + J_RANGE_SHIFT)) - 1) +#define J_ALIGN_MASK ((1ul << J_RANGE_SHIFT) - 1) void arch_jump_label_transform(struct jump_entry *e, enum jump_label_type type) { + union mips_instruction *insn_p; union mips_instruction insn; - union mips_instruction *insn_p = - (union mips_instruction *)(unsigned long)e->code; - /* Jump only works within a 256MB aligned region. */ - BUG_ON((e->target & ~J_RANGE_MASK) != (e->code & ~J_RANGE_MASK)); + insn_p = (union mips_instruction *)msk_isa16_mode(e->code); + + /* Jump only works within an aligned region its delay slot is in. */ + BUG_ON((e->target & ~J_RANGE_MASK) != ((e->code + 4) & ~J_RANGE_MASK)); - /* Target must have 4 byte alignment. */ - BUG_ON((e->target & 3) != 0); + /* Target must have the right alignment and ISA must be preserved. */ + BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT); if (type == JUMP_LABEL_ENABLE) { - insn.j_format.opcode = j_op; - insn.j_format.target = (e->target & J_RANGE_MASK) >> 2; + insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op; + insn.j_format.target = e->target >> J_RANGE_SHIFT; } else { insn.word = 0; /* nop */ } get_online_cpus(); mutex_lock(&text_mutex); - *insn_p = insn; + if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) { + insn_p->halfword[0] = insn.word >> 16; + insn_p->halfword[1] = insn.word; + } else + *insn_p = insn; flush_icache_range((unsigned long)insn_p, (unsigned long)insn_p + sizeof(*insn_p)); diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S index f31063dbdae..5ce3b746ced 100644 --- a/arch/mips/kernel/r2300_fpu.S +++ b/arch/mips/kernel/r2300_fpu.S @@ -28,6 +28,8 @@ .set mips1 /* Save floating point context */ LEAF(_save_fp_context) + .set push + SET_HARDFLOAT li v0, 0 # assume success cfc1 t1,fcr31 EX(swc1 $f0,(SC_FPREGS+0)(a0)) @@ -65,6 +67,7 @@ LEAF(_save_fp_context) EX(sw t1,(SC_FPC_CSR)(a0)) cfc1 t0,$0 # implementation/version jr ra + .set pop .set nomacro EX(sw t0,(SC_FPC_EIR)(a0)) .set macro @@ -80,6 +83,8 @@ LEAF(_save_fp_context) * stack frame which might have been changed by the user. */ LEAF(_restore_fp_context) + .set push + SET_HARDFLOAT li v0, 0 # assume success EX(lw t0,(SC_FPC_CSR)(a0)) EX(lwc1 $f0,(SC_FPREGS+0)(a0)) @@ -116,6 +121,7 @@ LEAF(_restore_fp_context) EX(lwc1 $f31,(SC_FPREGS+248)(a0)) jr ra ctc1 t0,fcr31 + .set pop END(_restore_fp_context) .set reorder diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 20b7b040e76..435ea652f5f 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -120,6 +120,9 @@ LEAF(_restore_fp) #define FPU_DEFAULT 0x00000000 + .set push + SET_HARDFLOAT + LEAF(_init_fpu) mfc0 t0, CP0_STATUS li t1, ST0_CU1 @@ -165,3 +168,5 @@ LEAF(_init_fpu) mtc1 t0, $f31 jr ra END(_init_fpu) + + .set pop diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 8352523568e..6c160c67984 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -19,8 +19,12 @@ #include <asm/asm-offsets.h> #include <asm/regdef.h> +/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ +#undef fp + .macro EX insn, reg, src .set push + SET_HARDFLOAT .set nomacro .ex\@: \insn \reg, \src .set pop @@ -33,12 +37,17 @@ .set arch=r4000 LEAF(_save_fp_context) + .set push + SET_HARDFLOAT cfc1 t1, fcr31 + .set pop #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) .set push + SET_HARDFLOAT #ifdef CONFIG_CPU_MIPS32_R2 - .set mips64r2 + .set mips32r2 + .set fp=64 mfc0 t0, CP0_STATUS sll t0, t0, 5 bgez t0, 1f # skip storing odd if FR=0 @@ -64,6 +73,8 @@ LEAF(_save_fp_context) 1: .set pop #endif + .set push + SET_HARDFLOAT /* Store the 16 even double precision registers */ EX sdc1 $f0, SC_FPREGS+0(a0) EX sdc1 $f2, SC_FPREGS+16(a0) @@ -84,11 +95,14 @@ LEAF(_save_fp_context) EX sw t1, SC_FPC_CSR(a0) jr ra li v0, 0 # success + .set pop END(_save_fp_context) #ifdef CONFIG_MIPS32_COMPAT /* Save 32-bit process floating point context */ LEAF(_save_fp_context32) + .set push + SET_HARDFLOAT cfc1 t1, fcr31 mfc0 t0, CP0_STATUS @@ -134,6 +148,7 @@ LEAF(_save_fp_context32) EX sw t1, SC32_FPC_CSR(a0) cfc1 t0, $0 # implementation/version EX sw t0, SC32_FPC_EIR(a0) + .set pop jr ra li v0, 0 # success @@ -150,8 +165,10 @@ LEAF(_restore_fp_context) #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) .set push + SET_HARDFLOAT #ifdef CONFIG_CPU_MIPS32_R2 - .set mips64r2 + .set mips32r2 + .set fp=64 mfc0 t0, CP0_STATUS sll t0, t0, 5 bgez t0, 1f # skip loading odd if FR=0 @@ -175,6 +192,8 @@ LEAF(_restore_fp_context) EX ldc1 $f31, SC_FPREGS+248(a0) 1: .set pop #endif + .set push + SET_HARDFLOAT EX ldc1 $f0, SC_FPREGS+0(a0) EX ldc1 $f2, SC_FPREGS+16(a0) EX ldc1 $f4, SC_FPREGS+32(a0) @@ -192,6 +211,7 @@ LEAF(_restore_fp_context) EX ldc1 $f28, SC_FPREGS+224(a0) EX ldc1 $f30, SC_FPREGS+240(a0) ctc1 t1, fcr31 + .set pop jr ra li v0, 0 # success END(_restore_fp_context) @@ -199,6 +219,8 @@ LEAF(_restore_fp_context) #ifdef CONFIG_MIPS32_COMPAT LEAF(_restore_fp_context32) /* Restore an o32 sigcontext. */ + .set push + SET_HARDFLOAT EX lw t1, SC32_FPC_CSR(a0) mfc0 t0, CP0_STATUS @@ -242,6 +264,7 @@ LEAF(_restore_fp_context32) ctc1 t1, fcr31 jr ra li v0, 0 # success + .set pop END(_restore_fp_context32) #endif diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 4c4ec181242..64591e67187 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -22,6 +22,9 @@ #include <asm/asmmacro.h> +/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ +#undef fp + /* * Offset to the current process status flags, the first 32 bytes of the * stack are not used. @@ -65,8 +68,12 @@ bgtz a3, 1f /* Save 128b MSA vector context + scalar FP control & status. */ + .set push + SET_HARDFLOAT cfc1 t1, fcr31 msa_save_all a0 + .set pop /* SET_HARDFLOAT */ + sw t1, THREAD_FCR31(a0) b 2f @@ -161,6 +168,9 @@ LEAF(_init_msa_upper) #define FPU_DEFAULT 0x00000000 + .set push + SET_HARDFLOAT + LEAF(_init_fpu) mfc0 t0, CP0_STATUS li t1, ST0_CU1 @@ -232,7 +242,8 @@ LEAF(_init_fpu) #ifdef CONFIG_CPU_MIPS32_R2 .set push - .set mips64r2 + .set mips32r2 + .set fp=64 sll t0, t0, 5 # is Status.FR set? bgez t0, 1f # no: skip setting upper 32b @@ -291,3 +302,5 @@ LEAF(_init_fpu) #endif jr ra END(_init_fpu) + + .set pop /* SET_HARDFLOAT */ diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S index da0fbe46d83..47077380c15 100644 --- a/arch/mips/kernel/r6000_fpu.S +++ b/arch/mips/kernel/r6000_fpu.S @@ -18,6 +18,9 @@ .set noreorder .set mips2 + .set push + SET_HARDFLOAT + /* Save floating point context */ LEAF(_save_fp_context) mfc0 t0,CP0_STATUS @@ -85,3 +88,5 @@ 1: jr ra nop END(_restore_fp_context) + + .set pop /* SET_HARDFLOAT */ diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 31b1b763cb2..c5c4fd54d79 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -94,12 +94,12 @@ int rtlx_open(int index, int can_sleep) int ret = 0; if (index >= RTLX_CHANNELS) { - pr_debug(KERN_DEBUG "rtlx_open index out of range\n"); + pr_debug("rtlx_open index out of range\n"); return -ENOSYS; } if (atomic_inc_return(&channel_wqs[index].in_open) > 1) { - pr_debug(KERN_DEBUG "rtlx_open channel %d already opened\n", index); + pr_debug("rtlx_open channel %d already opened\n", index); ret = -EBUSY; goto out_fail; } diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 744cd10ba59..00cad1005a1 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -579,3 +579,4 @@ EXPORT(sys_call_table) PTR sys_seccomp PTR sys_getrandom PTR sys_memfd_create + PTR sys_bpf /* 4355 */ diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 002b1bc09c3..5251565e344 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -434,4 +434,5 @@ EXPORT(sys_call_table) PTR sys_seccomp PTR sys_getrandom PTR sys_memfd_create + PTR sys_bpf /* 5315 */ .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index ca6cbbe9805..77e74398b82 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -427,4 +427,5 @@ EXPORT(sysn32_call_table) PTR sys_seccomp PTR sys_getrandom PTR sys_memfd_create + PTR sys_bpf .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 9e10d11fbb8..6f8db9f728e 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -564,4 +564,5 @@ EXPORT(sys32_call_table) PTR sys_seccomp PTR sys_getrandom PTR sys_memfd_create + PTR sys_bpf /* 4355 */ .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index b3b8f0d9d4a..f3b635f86c3 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -485,7 +485,7 @@ static void __init bootmem_init(void) * NOTE: historically plat_mem_setup did the entire platform initialization. * This was rather impractical because it meant plat_mem_setup had to * get away without any kind of memory allocator. To keep old code from - * breaking plat_setup was just renamed to plat_setup and a second platform + * breaking plat_setup was just renamed to plat_mem_setup and a second platform * initialization hook for anything else was introduced. */ @@ -493,7 +493,7 @@ static int usermem __initdata; static int __init early_parse_mem(char *p) { - unsigned long start, size; + phys_t start, size; /* * If a user specifies memory size, we @@ -683,7 +683,8 @@ static void __init arch_mem_init(char **cmdline_p) dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); /* Tell bootmem about cma reserved memblock section */ for_each_memblock(reserved, reg) - reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); + if (reg->size != 0) + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); } static void __init resource_init(void) diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 1d57605e461..16f1e4f2bf3 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -658,13 +658,13 @@ static int signal_setup(void) save_fp_context = _save_fp_context; restore_fp_context = _restore_fp_context; } else { - save_fp_context = copy_fp_from_sigcontext; - restore_fp_context = copy_fp_to_sigcontext; + save_fp_context = copy_fp_to_sigcontext; + restore_fp_context = copy_fp_from_sigcontext; } #endif /* CONFIG_SMP */ #else - save_fp_context = copy_fp_from_sigcontext;; - restore_fp_context = copy_fp_to_sigcontext; + save_fp_context = copy_fp_to_sigcontext; + restore_fp_context = copy_fp_from_sigcontext; #endif return 0; diff --git a/arch/mips/lasat/Kconfig b/arch/mips/lasat/Kconfig index 1d2ee8a9be1..8776d0a3427 100644 --- a/arch/mips/lasat/Kconfig +++ b/arch/mips/lasat/Kconfig @@ -4,7 +4,7 @@ config PICVUE config PICVUE_PROC tristate "PICVUE LCD display driver /proc interface" - depends on PICVUE + depends on PICVUE && PROC_FS config DS1603 bool "DS1603 RTC driver" diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index c17ef80cf65..5d3238af9b5 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -503,6 +503,7 @@ STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@) .Ldone\@: jr ra + nop .if __memcpy == 1 END(memcpy) .set __memcpy, 0 diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c index 91615c2ef0c..1ef365ab3cd 100644 --- a/arch/mips/lib/r3k_dump_tlb.c +++ b/arch/mips/lib/r3k_dump_tlb.c @@ -34,7 +34,7 @@ static void dump_tlb(int first, int last) entrylo0 = read_c0_entrylo0(); /* Unused entries have a virtual address of KSEG0. */ - if ((entryhi & 0xffffe000) != 0x80000000 + if ((entryhi & 0xfffff000) != 0x80000000 && (entryhi & 0xfc0) == asid) { /* * Only print entries in use @@ -43,7 +43,7 @@ static void dump_tlb(int first, int last) printk("va=%08lx asid=%08lx" " [pa=%06lx n=%d d=%d v=%d g=%d]", - (entryhi & 0xffffe000), + (entryhi & 0xfffff000), entryhi & 0xfc0, entrylo0 & PAGE_MASK, (entrylo0 & (1 << 11)) ? 1 : 0, diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S index f3af6995e2a..7d12c0dded3 100644 --- a/arch/mips/lib/strnlen_user.S +++ b/arch/mips/lib/strnlen_user.S @@ -40,9 +40,11 @@ FEXPORT(__strnlen_\func\()_nocheck_asm) .else EX(lbe, t0, (v0), .Lfault\@) .endif - PTR_ADDIU v0, 1 + .set noreorder bnez t0, 1b -1: PTR_SUBU v0, a0 +1: PTR_ADDIU v0, 1 + .set reorder + PTR_SUBU v0, a0 jr ra END(__strnlen_\func\()_asm) diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile index 0bb9cc9dc62..d87e03330b2 100644 --- a/arch/mips/loongson/common/Makefile +++ b/arch/mips/loongson/common/Makefile @@ -11,7 +11,8 @@ obj-$(CONFIG_PCI) += pci.o # Serial port support # obj-$(CONFIG_EARLY_PRINTK) += early_printk.o -obj-$(CONFIG_SERIAL_8250) += serial.o +loongson-serial-$(CONFIG_SERIAL_8250) := serial.o +obj-y += $(loongson-serial-m) $(loongson-serial-y) obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o obj-$(CONFIG_LOONGSON_MC146818) += rtc.o diff --git a/arch/mips/loongson/lemote-2f/clock.c b/arch/mips/loongson/lemote-2f/clock.c index a217061beee..462e34d46b4 100644 --- a/arch/mips/loongson/lemote-2f/clock.c +++ b/arch/mips/loongson/lemote-2f/clock.c @@ -91,6 +91,7 @@ EXPORT_SYMBOL(clk_put); int clk_set_rate(struct clk *clk, unsigned long rate) { + unsigned int rate_khz = rate / 1000; struct cpufreq_frequency_table *pos; int ret = 0; int regval; @@ -107,9 +108,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate) propagate_rate(clk); cpufreq_for_each_valid_entry(pos, loongson2_clockmod_table) - if (rate == pos->frequency) + if (rate_khz == pos->frequency) break; - if (rate != pos->frequency) + if (rate_khz != pos->frequency) return -ENOTSUPP; clk->rate = rate; diff --git a/arch/mips/loongson/loongson-3/numa.c b/arch/mips/loongson/loongson-3/numa.c index 37ed184398c..42323bcc5d2 100644 --- a/arch/mips/loongson/loongson-3/numa.c +++ b/arch/mips/loongson/loongson-3/numa.c @@ -33,6 +33,7 @@ static struct node_data prealloc__node_data[MAX_NUMNODES]; unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES]; +EXPORT_SYMBOL(__node_distances); struct node_data *__node_data[MAX_NUMNODES]; EXPORT_SYMBOL(__node_data); diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 7a4727795a7..cac529a405b 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -584,11 +584,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, if (insn.i_format.rs == bc_op) { preempt_disable(); if (is_fpu_owner()) - asm volatile( - ".set push\n" - "\t.set mips1\n" - "\tcfc1\t%0,$31\n" - "\t.set pop" : "=r" (fcr31)); + fcr31 = read_32bit_cp1_register(CP1_STATUS); else fcr31 = current->thread.fpu.fcr31; preempt_enable(); @@ -1023,7 +1019,7 @@ emul: goto emul; case cop1x_op: - if (cpu_has_mips_4_5 || cpu_has_mips64) + if (cpu_has_mips_4_5 || cpu_has_mips64 || cpu_has_mips32r2) /* its one of ours */ goto emul; @@ -1068,7 +1064,7 @@ emul: break; case cop1x_op: - if (!cpu_has_mips_4_5 && !cpu_has_mips64) + if (!cpu_has_mips_4_5 && !cpu_has_mips64 && !cpu_has_mips32r2) return SIGILL; sig = fpux_emu(xcp, ctx, ir, fault_addr); diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index fa6ebd4bc9e..c3917e251f5 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -299,6 +299,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) local_irq_save(flags); + htw_stop(); pid = read_c0_entryhi() & ASID_MASK; address &= (PAGE_MASK << 1); write_c0_entryhi(address | pid); @@ -346,6 +347,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) tlb_write_indexed(); } tlbw_use_hazard(); + htw_start(); flush_itlb_vm(vma); local_irq_restore(flags); } @@ -422,6 +424,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, local_irq_save(flags); /* Save old context and create impossible VPN2 value */ + htw_stop(); old_ctx = read_c0_entryhi(); old_pagemask = read_c0_pagemask(); wired = read_c0_wired(); @@ -443,6 +446,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, write_c0_entryhi(old_ctx); write_c0_pagemask(old_pagemask); + htw_start(); out: local_irq_restore(flags); return ret; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index a08dd53a1cc..e3328a96e80 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1062,6 +1062,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) struct mips_huge_tlb_info { int huge_pte; int restore_scratch; + bool need_reload_pte; }; static struct mips_huge_tlb_info @@ -1076,6 +1077,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, rv.huge_pte = scratch; rv.restore_scratch = 0; + rv.need_reload_pte = false; if (check_for_high_segbits) { UASM_i_MFC0(p, tmp, C0_BADVADDR); @@ -1264,6 +1266,7 @@ static void build_r4000_tlb_refill_handler(void) } else { htlb_info.huge_pte = K0; htlb_info.restore_scratch = 0; + htlb_info.need_reload_pte = true; vmalloc_mode = refill_noscratch; /* * create the plain linear handler @@ -1300,7 +1303,8 @@ static void build_r4000_tlb_refill_handler(void) } #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT uasm_l_tlb_huge_update(&l, p); - UASM_i_LW(&p, K0, 0, K1); + if (htlb_info.need_reload_pte) + UASM_i_LW(&p, htlb_info.huge_pte, 0, K1); build_huge_update_entries(&p, htlb_info.huge_pte, K1); build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, htlb_info.restore_scratch); @@ -1868,8 +1872,16 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, uasm_l_smp_pgtable_change(l, *p); #endif iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ - if (!m4kc_tlbp_war()) + if (!m4kc_tlbp_war()) { build_tlb_probe_entry(p); + if (cpu_has_htw) { + /* race condition happens, leaving */ + uasm_i_ehb(p); + uasm_i_mfc0(p, wr.r3, C0_INDEX); + uasm_il_bltz(p, r, wr.r3, label_leave); + uasm_i_nop(p); + } + } return wr; } diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile index b9510ea8db5..6510ace272d 100644 --- a/arch/mips/mti-malta/Makefile +++ b/arch/mips/mti-malta/Makefile @@ -5,8 +5,9 @@ # Copyright (C) 2008 Wind River Systems, Inc. # written by Ralf Baechle <ralf@linux-mips.org> # -obj-y := malta-amon.o malta-display.o malta-init.o \ +obj-y := malta-display.o malta-init.o \ malta-int.o malta-memory.o malta-platform.o \ malta-reset.o malta-setup.o malta-time.o +obj-$(CONFIG_MIPS_CMP) += malta-amon.o obj-$(CONFIG_MIPS_MALTA_PM) += malta-pm.o diff --git a/arch/mips/mti-sead3/Makefile b/arch/mips/mti-sead3/Makefile index febf4334545..2ae49e99eb6 100644 --- a/arch/mips/mti-sead3/Makefile +++ b/arch/mips/mti-sead3/Makefile @@ -14,7 +14,6 @@ obj-y := sead3-lcd.o sead3-display.o sead3-init.o \ sead3-setup.o sead3-time.o obj-y += sead3-i2c-dev.o sead3-i2c.o \ - sead3-pic32-i2c-drv.o sead3-pic32-bus.o \ leds-sead3.o sead3-leds.o obj-$(CONFIG_EARLY_PRINTK) += sead3-console.o diff --git a/arch/mips/mti-sead3/sead3-i2c.c b/arch/mips/mti-sead3/sead3-i2c.c index f70d5fc58ef..795ae83894e 100644 --- a/arch/mips/mti-sead3/sead3-i2c.c +++ b/arch/mips/mti-sead3/sead3-i2c.c @@ -5,10 +5,8 @@ * * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. */ -#include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> -#include <irq.h> struct resource sead3_i2c_resources[] = { { @@ -30,8 +28,4 @@ static int __init sead3_i2c_init(void) return platform_device_register(&sead3_i2c_device); } -module_init(sead3_i2c_init); - -MODULE_AUTHOR("Chris Dearman <chris@mips.com>"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("I2C probe driver for SEAD3"); +device_initcall(sead3_i2c_init); diff --git a/arch/mips/mti-sead3/sead3-leds.c b/arch/mips/mti-sead3/sead3-leds.c index 20102a6d414..c427c577818 100644 --- a/arch/mips/mti-sead3/sead3-leds.c +++ b/arch/mips/mti-sead3/sead3-leds.c @@ -5,7 +5,7 @@ * * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. */ -#include <linux/module.h> +#include <linux/init.h> #include <linux/leds.h> #include <linux/platform_device.h> @@ -76,8 +76,4 @@ static int __init led_init(void) return platform_device_register(&fled_device); } -module_init(led_init); - -MODULE_AUTHOR("Chris Dearman <chris@mips.com>"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("LED probe driver for SEAD-3"); +device_initcall(led_init); diff --git a/arch/mips/mti-sead3/sead3-pic32-bus.c b/arch/mips/mti-sead3/sead3-pic32-bus.c deleted file mode 100644 index 3b12aa5a7c8..00000000000 --- a/arch/mips/mti-sead3/sead3-pic32-bus.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - */ -#include <linux/delay.h> -#include <linux/kernel.h> -#include <linux/spinlock.h> -#include <linux/io.h> -#include <linux/errno.h> - -#define PIC32_NULL 0x00 -#define PIC32_RD 0x01 -#define PIC32_SYSRD 0x02 -#define PIC32_WR 0x10 -#define PIC32_SYSWR 0x20 -#define PIC32_IRQ_CLR 0x40 -#define PIC32_STATUS 0x80 - -#define DELAY() udelay(100) /* FIXME: needed? */ - -/* spinlock to ensure atomic access to PIC32 */ -static DEFINE_SPINLOCK(pic32_bus_lock); - -/* FIXME: io_remap these */ -static void __iomem *bus_xfer = (void __iomem *)0xbf000600; -static void __iomem *bus_status = (void __iomem *)0xbf000060; - -static inline unsigned int ioready(void) -{ - return readl(bus_status) & 1; -} - -static inline void wait_ioready(void) -{ - do { } while (!ioready()); -} - -static inline void wait_ioclear(void) -{ - do { } while (ioready()); -} - -static inline void check_ioclear(void) -{ - if (ioready()) { - pr_debug("ioclear: initially busy\n"); - do { - (void) readl(bus_xfer); - DELAY(); - } while (ioready()); - pr_debug("ioclear: cleared busy\n"); - } -} - -u32 pic32_bus_readl(u32 reg) -{ - unsigned long flags; - u32 status, val; - - spin_lock_irqsave(&pic32_bus_lock, flags); - - check_ioclear(); - - writel((PIC32_RD << 24) | (reg & 0x00ffffff), bus_xfer); - DELAY(); - wait_ioready(); - status = readl(bus_xfer); - DELAY(); - val = readl(bus_xfer); - wait_ioclear(); - - pr_debug("pic32_bus_readl: *%x -> %x (status=%x)\n", reg, val, status); - - spin_unlock_irqrestore(&pic32_bus_lock, flags); - - return val; -} - -void pic32_bus_writel(u32 val, u32 reg) -{ - unsigned long flags; - u32 status; - - spin_lock_irqsave(&pic32_bus_lock, flags); - - check_ioclear(); - - writel((PIC32_WR << 24) | (reg & 0x00ffffff), bus_xfer); - DELAY(); - writel(val, bus_xfer); - DELAY(); - wait_ioready(); - status = readl(bus_xfer); - wait_ioclear(); - - pr_debug("pic32_bus_writel: *%x <- %x (status=%x)\n", reg, val, status); - - spin_unlock_irqrestore(&pic32_bus_lock, flags); -} diff --git a/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c b/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c deleted file mode 100644 index 80fe194cfa5..00000000000 --- a/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c +++ /dev/null @@ -1,423 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - */ -#include <linux/delay.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/spinlock.h> -#include <linux/platform_device.h> -#include <linux/init.h> -#include <linux/errno.h> -#include <linux/i2c.h> -#include <linux/slab.h> - -#define PIC32_I2CxCON 0x0000 -#define PIC32_I2CxCONCLR 0x0004 -#define PIC32_I2CxCONSET 0x0008 -#define PIC32_I2CxCONINV 0x000C -#define I2CCON_ON (1<<15) -#define I2CCON_FRZ (1<<14) -#define I2CCON_SIDL (1<<13) -#define I2CCON_SCLREL (1<<12) -#define I2CCON_STRICT (1<<11) -#define I2CCON_A10M (1<<10) -#define I2CCON_DISSLW (1<<9) -#define I2CCON_SMEN (1<<8) -#define I2CCON_GCEN (1<<7) -#define I2CCON_STREN (1<<6) -#define I2CCON_ACKDT (1<<5) -#define I2CCON_ACKEN (1<<4) -#define I2CCON_RCEN (1<<3) -#define I2CCON_PEN (1<<2) -#define I2CCON_RSEN (1<<1) -#define I2CCON_SEN (1<<0) - -#define PIC32_I2CxSTAT 0x0010 -#define PIC32_I2CxSTATCLR 0x0014 -#define PIC32_I2CxSTATSET 0x0018 -#define PIC32_I2CxSTATINV 0x001C -#define I2CSTAT_ACKSTAT (1<<15) -#define I2CSTAT_TRSTAT (1<<14) -#define I2CSTAT_BCL (1<<10) -#define I2CSTAT_GCSTAT (1<<9) -#define I2CSTAT_ADD10 (1<<8) -#define I2CSTAT_IWCOL (1<<7) -#define I2CSTAT_I2COV (1<<6) -#define I2CSTAT_DA (1<<5) -#define I2CSTAT_P (1<<4) -#define I2CSTAT_S (1<<3) -#define I2CSTAT_RW (1<<2) -#define I2CSTAT_RBF (1<<1) -#define I2CSTAT_TBF (1<<0) - -#define PIC32_I2CxADD 0x0020 -#define PIC32_I2CxADDCLR 0x0024 -#define PIC32_I2CxADDSET 0x0028 -#define PIC32_I2CxADDINV 0x002C -#define PIC32_I2CxMSK 0x0030 -#define PIC32_I2CxMSKCLR 0x0034 -#define PIC32_I2CxMSKSET 0x0038 -#define PIC32_I2CxMSKINV 0x003C -#define PIC32_I2CxBRG 0x0040 -#define PIC32_I2CxBRGCLR 0x0044 -#define PIC32_I2CxBRGSET 0x0048 -#define PIC32_I2CxBRGINV 0x004C -#define PIC32_I2CxTRN 0x0050 -#define PIC32_I2CxTRNCLR 0x0054 -#define PIC32_I2CxTRNSET 0x0058 -#define PIC32_I2CxTRNINV 0x005C -#define PIC32_I2CxRCV 0x0060 - -struct i2c_platform_data { - u32 base; - struct i2c_adapter adap; - u32 xfer_timeout; - u32 ack_timeout; - u32 ctl_timeout; -}; - -extern u32 pic32_bus_readl(u32 reg); -extern void pic32_bus_writel(u32 val, u32 reg); - -static inline void -StartI2C(struct i2c_platform_data *adap) -{ - pr_debug("StartI2C\n"); - pic32_bus_writel(I2CCON_SEN, adap->base + PIC32_I2CxCONSET); -} - -static inline void -StopI2C(struct i2c_platform_data *adap) -{ - pr_debug("StopI2C\n"); - pic32_bus_writel(I2CCON_PEN, adap->base + PIC32_I2CxCONSET); -} - -static inline void -AckI2C(struct i2c_platform_data *adap) -{ - pr_debug("AckI2C\n"); - pic32_bus_writel(I2CCON_ACKDT, adap->base + PIC32_I2CxCONCLR); - pic32_bus_writel(I2CCON_ACKEN, adap->base + PIC32_I2CxCONSET); -} - -static inline void -NotAckI2C(struct i2c_platform_data *adap) -{ - pr_debug("NakI2C\n"); - pic32_bus_writel(I2CCON_ACKDT, adap->base + PIC32_I2CxCONSET); - pic32_bus_writel(I2CCON_ACKEN, adap->base + PIC32_I2CxCONSET); -} - -static inline int -IdleI2C(struct i2c_platform_data *adap) -{ - int i; - - pr_debug("IdleI2C\n"); - for (i = 0; i < adap->ctl_timeout; i++) { - if (((pic32_bus_readl(adap->base + PIC32_I2CxCON) & - (I2CCON_ACKEN | I2CCON_RCEN | I2CCON_PEN | I2CCON_RSEN | - I2CCON_SEN)) == 0) && - ((pic32_bus_readl(adap->base + PIC32_I2CxSTAT) & - (I2CSTAT_TRSTAT)) == 0)) - return 0; - udelay(1); - } - return -ETIMEDOUT; -} - -static inline u32 -MasterWriteI2C(struct i2c_platform_data *adap, u32 byte) -{ - pr_debug("MasterWriteI2C\n"); - - pic32_bus_writel(byte, adap->base + PIC32_I2CxTRN); - - return pic32_bus_readl(adap->base + PIC32_I2CxSTAT) & I2CSTAT_IWCOL; -} - -static inline u32 -MasterReadI2C(struct i2c_platform_data *adap) -{ - pr_debug("MasterReadI2C\n"); - - pic32_bus_writel(I2CCON_RCEN, adap->base + PIC32_I2CxCONSET); - - while (pic32_bus_readl(adap->base + PIC32_I2CxCON) & I2CCON_RCEN) - ; - - pic32_bus_writel(I2CSTAT_I2COV, adap->base + PIC32_I2CxSTATCLR); - - return pic32_bus_readl(adap->base + PIC32_I2CxRCV); -} - -static int -do_address(struct i2c_platform_data *adap, unsigned int addr, int rd) -{ - pr_debug("doaddress\n"); - - IdleI2C(adap); - StartI2C(adap); - IdleI2C(adap); - - addr <<= 1; - if (rd) - addr |= 1; - - if (MasterWriteI2C(adap, addr)) - return -EIO; - IdleI2C(adap); - if (pic32_bus_readl(adap->base + PIC32_I2CxSTAT) & I2CSTAT_ACKSTAT) - return -EIO; - return 0; -} - -static int -i2c_read(struct i2c_platform_data *adap, unsigned char *buf, - unsigned int len) -{ - int i; - u32 data; - - pr_debug("i2c_read\n"); - - i = 0; - while (i < len) { - data = MasterReadI2C(adap); - buf[i++] = data; - if (i < len) - AckI2C(adap); - else - NotAckI2C(adap); - } - - StopI2C(adap); - IdleI2C(adap); - return 0; -} - -static int -i2c_write(struct i2c_platform_data *adap, unsigned char *buf, - unsigned int len) -{ - int i; - u32 data; - - pr_debug("i2c_write\n"); - - i = 0; - while (i < len) { - data = buf[i]; - if (MasterWriteI2C(adap, data)) - return -EIO; - IdleI2C(adap); - if (pic32_bus_readl(adap->base + PIC32_I2CxSTAT) & - I2CSTAT_ACKSTAT) - return -EIO; - i++; - } - - StopI2C(adap); - IdleI2C(adap); - return 0; -} - -static int -platform_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) -{ - struct i2c_platform_data *adap = i2c_adap->algo_data; - struct i2c_msg *p; - int i, err = 0; - - pr_debug("platform_xfer\n"); - for (i = 0; i < num; i++) { -#define __BUFSIZE 80 - int ii; - static char buf[__BUFSIZE]; - char *b = buf; - - p = &msgs[i]; - b += sprintf(buf, " [%d bytes]", p->len); - if ((p->flags & I2C_M_RD) == 0) { - for (ii = 0; ii < p->len; ii++) { - if (b < &buf[__BUFSIZE-4]) { - b += sprintf(b, " %02x", p->buf[ii]); - } else { - strcat(b, "..."); - break; - } - } - } - pr_debug("xfer%d: DevAddr: %04x Op:%s Data:%s\n", i, p->addr, - (p->flags & I2C_M_RD) ? "Rd" : "Wr", buf); - } - - - for (i = 0; !err && i < num; i++) { - p = &msgs[i]; - err = do_address(adap, p->addr, p->flags & I2C_M_RD); - if (err || !p->len) - continue; - if (p->flags & I2C_M_RD) - err = i2c_read(adap, p->buf, p->len); - else - err = i2c_write(adap, p->buf, p->len); - } - - /* Return the number of messages processed, or the error code. */ - if (err == 0) - err = num; - - return err; -} - -static u32 -platform_func(struct i2c_adapter *adap) -{ - pr_debug("platform_algo\n"); - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; -} - -static const struct i2c_algorithm platform_algo = { - .master_xfer = platform_xfer, - .functionality = platform_func, -}; - -static void i2c_platform_setup(struct i2c_platform_data *priv) -{ - pr_debug("i2c_platform_setup\n"); - - pic32_bus_writel(500, priv->base + PIC32_I2CxBRG); - pic32_bus_writel(I2CCON_ON, priv->base + PIC32_I2CxCONCLR); - pic32_bus_writel(I2CCON_ON, priv->base + PIC32_I2CxCONSET); - pic32_bus_writel((I2CSTAT_BCL | I2CSTAT_IWCOL), - (priv->base + PIC32_I2CxSTATCLR)); -} - -static void i2c_platform_disable(struct i2c_platform_data *priv) -{ - pr_debug("i2c_platform_disable\n"); -} - -static int i2c_platform_probe(struct platform_device *pdev) -{ - struct i2c_platform_data *priv; - struct resource *r; - int ret; - - pr_debug("i2c_platform_probe\n"); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) - return -ENODEV; - - priv = devm_kzalloc(&pdev->dev, sizeof(struct i2c_platform_data), - GFP_KERNEL); - if (!priv) - return -ENOMEM; - - /* FIXME: need to allocate resource in PIC32 space */ -#if 0 - priv->base = bus_request_region(r->start, resource_size(r), - pdev->name); -#else - priv->base = r->start; -#endif - if (!priv->base) - return -EBUSY; - - priv->xfer_timeout = 200; - priv->ack_timeout = 200; - priv->ctl_timeout = 200; - - priv->adap.nr = pdev->id; - priv->adap.algo = &platform_algo; - priv->adap.algo_data = priv; - priv->adap.dev.parent = &pdev->dev; - strlcpy(priv->adap.name, "PIC32 I2C", sizeof(priv->adap.name)); - - i2c_platform_setup(priv); - - ret = i2c_add_numbered_adapter(&priv->adap); - if (ret) { - i2c_platform_disable(priv); - return ret; - } - - platform_set_drvdata(pdev, priv); - return 0; -} - -static int i2c_platform_remove(struct platform_device *pdev) -{ - struct i2c_platform_data *priv = platform_get_drvdata(pdev); - - pr_debug("i2c_platform_remove\n"); - platform_set_drvdata(pdev, NULL); - i2c_del_adapter(&priv->adap); - i2c_platform_disable(priv); - return 0; -} - -#ifdef CONFIG_PM -static int -i2c_platform_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct i2c_platform_data *priv = platform_get_drvdata(pdev); - - dev_dbg(&pdev->dev, "i2c_platform_disable\n"); - i2c_platform_disable(priv); - - return 0; -} - -static int -i2c_platform_resume(struct platform_device *pdev) -{ - struct i2c_platform_data *priv = platform_get_drvdata(pdev); - - dev_dbg(&pdev->dev, "i2c_platform_setup\n"); - i2c_platform_setup(priv); - - return 0; -} -#else -#define i2c_platform_suspend NULL -#define i2c_platform_resume NULL -#endif - -static struct platform_driver i2c_platform_driver = { - .driver = { - .name = "i2c_pic32", - .owner = THIS_MODULE, - }, - .probe = i2c_platform_probe, - .remove = i2c_platform_remove, - .suspend = i2c_platform_suspend, - .resume = i2c_platform_resume, -}; - -static int __init -i2c_platform_init(void) -{ - pr_debug("i2c_platform_init\n"); - return platform_driver_register(&i2c_platform_driver); -} - -static void __exit -i2c_platform_exit(void) -{ - pr_debug("i2c_platform_exit\n"); - platform_driver_unregister(&i2c_platform_driver); -} - -MODULE_AUTHOR("Chris Dearman, MIPS Technologies INC."); -MODULE_DESCRIPTION("PIC32 I2C driver"); -MODULE_LICENSE("GPL"); - -module_init(i2c_platform_init); -module_exit(i2c_platform_exit); diff --git a/arch/mips/netlogic/xlp/Makefile b/arch/mips/netlogic/xlp/Makefile index be358a8050c..6b43af0a34d 100644 --- a/arch/mips/netlogic/xlp/Makefile +++ b/arch/mips/netlogic/xlp/Makefile @@ -1,6 +1,10 @@ obj-y += setup.o nlm_hal.o cop2-ex.o dt.o obj-$(CONFIG_SMP) += wakeup.o -obj-$(CONFIG_USB) += usb-init.o -obj-$(CONFIG_USB) += usb-init-xlp2.o -obj-$(CONFIG_SATA_AHCI) += ahci-init.o -obj-$(CONFIG_SATA_AHCI) += ahci-init-xlp2.o +ifdef CONFIG_USB +obj-y += usb-init.o +obj-y += usb-init-xlp2.o +endif +ifdef CONFIG_SATA_AHCI +obj-y += ahci-init.o +obj-y += ahci-init-xlp2.o +endif diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c index 6854ed5097d..83a1dfd8f0e 100644 --- a/arch/mips/oprofile/backtrace.c +++ b/arch/mips/oprofile/backtrace.c @@ -92,7 +92,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame, /* This marks the end of the previous function, which means we overran. */ break; - stack_size = (unsigned) stack_adjustment; + stack_size = (unsigned long) stack_adjustment; } else if (is_ra_save_ins(&ip)) { int ra_slot = ip.i_format.simmediate; if (ra_slot < 0) diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c index fa374fe3746..f7ac3edda1b 100644 --- a/arch/mips/pci/msi-xlp.c +++ b/arch/mips/pci/msi-xlp.c @@ -443,10 +443,8 @@ static int xlp_setup_msix(uint64_t lnkbase, int node, int link, msg.data = 0xc00 | msixvec; ret = irq_set_msi_desc(xirq, desc); - if (ret < 0) { - destroy_irq(xirq); + if (ret < 0) return ret; - } write_msi_msg(xirq, &msg); return 0; diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c index 37fe8e7887e..d3ed15b2b2d 100644 --- a/arch/mips/pci/pci-lantiq.c +++ b/arch/mips/pci/pci-lantiq.c @@ -215,17 +215,12 @@ static int ltq_pci_probe(struct platform_device *pdev) pci_clear_flags(PCI_PROBE_ONLY); - res_cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0); res_bridge = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res_cfg || !res_bridge) { - dev_err(&pdev->dev, "missing memory resources\n"); - return -EINVAL; - } - ltq_pci_membase = devm_ioremap_resource(&pdev->dev, res_bridge); if (IS_ERR(ltq_pci_membase)) return PTR_ERR(ltq_pci_membase); + res_cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0); ltq_pci_mapped_cfg = devm_ioremap_resource(&pdev->dev, res_cfg); if (IS_ERR(ltq_pci_mapped_cfg)) return PTR_ERR(ltq_pci_mapped_cfg); diff --git a/arch/mips/pmcs-msp71xx/msp_irq.c b/arch/mips/pmcs-msp71xx/msp_irq.c index f914c753de2..8d53d7a2ed4 100644 --- a/arch/mips/pmcs-msp71xx/msp_irq.c +++ b/arch/mips/pmcs-msp71xx/msp_irq.c @@ -16,6 +16,7 @@ #include <linux/time.h> #include <asm/irq_cpu.h> +#include <asm/setup.h> #include <msp_int.h> diff --git a/arch/mips/pmcs-msp71xx/msp_irq_cic.c b/arch/mips/pmcs-msp71xx/msp_irq_cic.c index b8df2f7b332..1207ec4dfb7 100644 --- a/arch/mips/pmcs-msp71xx/msp_irq_cic.c +++ b/arch/mips/pmcs-msp71xx/msp_irq_cic.c @@ -131,11 +131,11 @@ static int msp_cic_irq_set_affinity(struct irq_data *d, int cpu; unsigned long flags; unsigned int mtflags; - unsigned long imask = (1 << (irq - MSP_CIC_INTBASE)); + unsigned long imask = (1 << (d->irq - MSP_CIC_INTBASE)); volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG; /* timer balancing should be disabled in kernel code */ - BUG_ON(irq == MSP_INT_VPE0_TIMER || irq == MSP_INT_VPE1_TIMER); + BUG_ON(d->irq == MSP_INT_VPE0_TIMER || d->irq == MSP_INT_VPE1_TIMER); LOCK_CORE(flags, mtflags); /* enable if any of each VPE's TCs require this IRQ */ diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index a95c00f5fb9..a304bcc37e4 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -107,6 +107,7 @@ static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth) } unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; +EXPORT_SYMBOL(__node_distances); static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b) { diff --git a/arch/mips/sibyte/Makefile b/arch/mips/sibyte/Makefile index c8ed2c807e6..455c40d6d62 100644 --- a/arch/mips/sibyte/Makefile +++ b/arch/mips/sibyte/Makefile @@ -25,3 +25,4 @@ obj-$(CONFIG_SIBYTE_RHONE) += swarm/ obj-$(CONFIG_SIBYTE_SENTOSA) += swarm/ obj-$(CONFIG_SIBYTE_SWARM) += swarm/ obj-$(CONFIG_SIBYTE_BIGSUR) += swarm/ +obj-$(CONFIG_SIBYTE_LITTLESUR) += swarm/ diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 4006964d8e1..a5cb070b54b 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -9,6 +9,8 @@ #include <asm/errno.h> #include <asm-generic/uaccess-unaligned.h> +#include <linux/bug.h> + #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -28,11 +30,6 @@ * that put_user is the same as __put_user, etc. */ -extern int __get_kernel_bad(void); -extern int __get_user_bad(void); -extern int __put_kernel_bad(void); -extern int __put_user_bad(void); - static inline long access_ok(int type, const void __user * addr, unsigned long size) { @@ -43,8 +40,8 @@ static inline long access_ok(int type, const void __user * addr, #define get_user __get_user #if !defined(CONFIG_64BIT) -#define LDD_KERNEL(ptr) __get_kernel_bad(); -#define LDD_USER(ptr) __get_user_bad(); +#define LDD_KERNEL(ptr) BUILD_BUG() +#define LDD_USER(ptr) BUILD_BUG() #define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr) #define STD_USER(x, ptr) __put_user_asm64(x,ptr) #define ASM_WORD_INSN ".word\t" @@ -94,7 +91,7 @@ struct exception_data { case 2: __get_kernel_asm("ldh",ptr); break; \ case 4: __get_kernel_asm("ldw",ptr); break; \ case 8: LDD_KERNEL(ptr); break; \ - default: __get_kernel_bad(); break; \ + default: BUILD_BUG(); break; \ } \ } \ else { \ @@ -103,7 +100,7 @@ struct exception_data { case 2: __get_user_asm("ldh",ptr); break; \ case 4: __get_user_asm("ldw",ptr); break; \ case 8: LDD_USER(ptr); break; \ - default: __get_user_bad(); break; \ + default: BUILD_BUG(); break; \ } \ } \ \ @@ -136,7 +133,7 @@ struct exception_data { case 2: __put_kernel_asm("sth",__x,ptr); break; \ case 4: __put_kernel_asm("stw",__x,ptr); break; \ case 8: STD_KERNEL(__x,ptr); break; \ - default: __put_kernel_bad(); break; \ + default: BUILD_BUG(); break; \ } \ } \ else { \ @@ -145,7 +142,7 @@ struct exception_data { case 2: __put_user_asm("sth",__x,ptr); break; \ case 4: __put_user_asm("stw",__x,ptr); break; \ case 8: STD_USER(__x,ptr); break; \ - default: __put_user_bad(); break; \ + default: BUILD_BUG(); break; \ } \ } \ \ diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h index 75196b415d3..e0a23c7bdd4 100644 --- a/arch/parisc/include/uapi/asm/bitsperlong.h +++ b/arch/parisc/include/uapi/asm/bitsperlong.h @@ -1,13 +1,7 @@ #ifndef __ASM_PARISC_BITSPERLONG_H #define __ASM_PARISC_BITSPERLONG_H -/* - * using CONFIG_* outside of __KERNEL__ is wrong, - * __LP64__ was also removed from headers, so what - * is the right approach on parisc? - * -arnd - */ -#if (defined(__KERNEL__) && defined(CONFIG_64BIT)) || defined (__LP64__) +#if defined(__LP64__) #define __BITS_PER_LONG 64 #define SHIFT_PER_LONG 6 #else diff --git a/arch/parisc/include/uapi/asm/msgbuf.h b/arch/parisc/include/uapi/asm/msgbuf.h index fe88f264941..34213898391 100644 --- a/arch/parisc/include/uapi/asm/msgbuf.h +++ b/arch/parisc/include/uapi/asm/msgbuf.h @@ -1,6 +1,8 @@ #ifndef _PARISC_MSGBUF_H #define _PARISC_MSGBUF_H +#include <asm/bitsperlong.h> + /* * The msqid64_ds structure for parisc architecture, copied from sparc. * Note extra padding because this structure is passed back and forth @@ -13,15 +15,15 @@ struct msqid64_ds { struct ipc64_perm msg_perm; -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad1; #endif __kernel_time_t msg_stime; /* last msgsnd time */ -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad2; #endif __kernel_time_t msg_rtime; /* last msgrcv time */ -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad3; #endif __kernel_time_t msg_ctime; /* last change time */ diff --git a/arch/parisc/include/uapi/asm/sembuf.h b/arch/parisc/include/uapi/asm/sembuf.h index 1e59ffd3bd1..f01d89e30d7 100644 --- a/arch/parisc/include/uapi/asm/sembuf.h +++ b/arch/parisc/include/uapi/asm/sembuf.h @@ -1,6 +1,8 @@ #ifndef _PARISC_SEMBUF_H #define _PARISC_SEMBUF_H +#include <asm/bitsperlong.h> + /* * The semid64_ds structure for parisc architecture. * Note extra padding because this structure is passed back and forth @@ -13,11 +15,11 @@ struct semid64_ds { struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad1; #endif __kernel_time_t sem_otime; /* last semop time */ -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad2; #endif __kernel_time_t sem_ctime; /* last change time */ diff --git a/arch/parisc/include/uapi/asm/shmbuf.h b/arch/parisc/include/uapi/asm/shmbuf.h index 0a3eada1863..8496c38560c 100644 --- a/arch/parisc/include/uapi/asm/shmbuf.h +++ b/arch/parisc/include/uapi/asm/shmbuf.h @@ -1,6 +1,8 @@ #ifndef _PARISC_SHMBUF_H #define _PARISC_SHMBUF_H +#include <asm/bitsperlong.h> + /* * The shmid64_ds structure for parisc architecture. * Note extra padding because this structure is passed back and forth @@ -13,19 +15,19 @@ struct shmid64_ds { struct ipc64_perm shm_perm; /* operation perms */ -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad1; #endif __kernel_time_t shm_atime; /* last attach time */ -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad2; #endif __kernel_time_t shm_dtime; /* last detach time */ -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad3; #endif __kernel_time_t shm_ctime; /* last change time */ -#ifndef CONFIG_64BIT +#if __BITS_PER_LONG != 64 unsigned int __pad4; #endif size_t shm_segsz; /* size of segment (bytes) */ @@ -36,23 +38,16 @@ struct shmid64_ds { unsigned int __unused2; }; -#ifdef CONFIG_64BIT -/* The 'unsigned int' (formerly 'unsigned long') data types below will - * ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on - * a wide kernel, but if some of these values are meant to contain pointers - * they may need to be 'long long' instead. -PB XXX FIXME - */ -#endif struct shminfo64 { - unsigned int shmmax; - unsigned int shmmin; - unsigned int shmmni; - unsigned int shmseg; - unsigned int shmall; - unsigned int __unused1; - unsigned int __unused2; - unsigned int __unused3; - unsigned int __unused4; + unsigned long shmmax; + unsigned long shmmin; + unsigned long shmmni; + unsigned long shmseg; + unsigned long shmall; + unsigned long __unused1; + unsigned long __unused2; + unsigned long __unused3; + unsigned long __unused4; }; #endif /* _PARISC_SHMBUF_H */ diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h index 10df7079f4c..e26043b73f5 100644 --- a/arch/parisc/include/uapi/asm/signal.h +++ b/arch/parisc/include/uapi/asm/signal.h @@ -85,7 +85,7 @@ struct siginfo; /* Type of a signal handler. */ -#ifdef CONFIG_64BIT +#if defined(__LP64__) /* function pointers on 64-bit parisc are pointers to little structs and the * compiler doesn't support code which changes or tests the address of * the function in the little struct. This is really ugly -PB diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index 8667f18be23..5f5c0373de6 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h @@ -833,8 +833,9 @@ #define __NR_seccomp (__NR_Linux + 338) #define __NR_getrandom (__NR_Linux + 339) #define __NR_memfd_create (__NR_Linux + 340) +#define __NR_bpf (__NR_Linux + 341) -#define __NR_Linux_syscalls (__NR_memfd_create + 1) +#define __NR_Linux_syscalls (__NR_bpf + 1) #define __IGNORE_select /* newselect */ diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index b563d9c8268..fe4f0b89bf8 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -286,11 +286,11 @@ ENTRY_COMP(msgsnd) ENTRY_COMP(msgrcv) ENTRY_SAME(msgget) /* 190 */ - ENTRY_SAME(msgctl) - ENTRY_SAME(shmat) + ENTRY_COMP(msgctl) + ENTRY_COMP(shmat) ENTRY_SAME(shmdt) ENTRY_SAME(shmget) - ENTRY_SAME(shmctl) /* 195 */ + ENTRY_COMP(shmctl) /* 195 */ ENTRY_SAME(ni_syscall) /* streams1 */ ENTRY_SAME(ni_syscall) /* streams2 */ ENTRY_SAME(lstat64) @@ -323,7 +323,7 @@ ENTRY_SAME(epoll_ctl) /* 225 */ ENTRY_SAME(epoll_wait) ENTRY_SAME(remap_file_pages) - ENTRY_SAME(semtimedop) + ENTRY_COMP(semtimedop) ENTRY_COMP(mq_open) ENTRY_SAME(mq_unlink) /* 230 */ ENTRY_COMP(mq_timedsend) @@ -436,6 +436,7 @@ ENTRY_SAME(seccomp) ENTRY_SAME(getrandom) ENTRY_SAME(memfd_create) /* 340 */ + ENTRY_SAME(bpf) /* Nothing yet */ diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig index 63392f4b29a..d2008887eb8 100644 --- a/arch/powerpc/configs/pseries_le_defconfig +++ b/arch/powerpc/configs/pseries_le_defconfig @@ -48,7 +48,6 @@ CONFIG_KEXEC=y CONFIG_IRQ_ALL_CPUS=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y -CONFIG_CMA=y CONFIG_PPC_64K_PAGES=y CONFIG_PPC_SUBPAGE_PROT=y CONFIG_SCHED_SMT=y @@ -138,6 +137,7 @@ CONFIG_NETCONSOLE=y CONFIG_NETPOLL_TRAP=y CONFIG_TUN=m CONFIG_VIRTIO_NET=m +CONFIG_VHOST_NET=m CONFIG_VORTEX=y CONFIG_ACENIC=m CONFIG_ACENIC_OMIT_TIGON_I=y @@ -303,4 +303,9 @@ CONFIG_CRYPTO_LZO=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_DEV_NX=y CONFIG_CRYPTO_DEV_NX_ENCRYPT=m +CONFIG_VIRTUALIZATION=y +CONFIG_KVM_BOOK3S_64=m +CONFIG_KVM_BOOK3S_64_HV=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 3b260efbfbf..ca07f9c2733 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -71,9 +71,10 @@ struct device_node; #define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */ #define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ -#define EEH_PE_RESET (1 << 2) /* PE reset in progress */ +#define EEH_PE_CFG_BLOCKED (1 << 2) /* Block config access */ #define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */ +#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */ struct eeh_pe { int type; /* PE type: PHB/Bus/Device */ diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h index a6774560afe..493e72f64b3 100644 --- a/arch/powerpc/include/asm/fadump.h +++ b/arch/powerpc/include/asm/fadump.h @@ -70,39 +70,39 @@ #define CPU_UNKNOWN (~((u32)0)) /* Utility macros */ -#define SKIP_TO_NEXT_CPU(reg_entry) \ -({ \ - while (reg_entry->reg_id != REG_ID("CPUEND")) \ - reg_entry++; \ - reg_entry++; \ +#define SKIP_TO_NEXT_CPU(reg_entry) \ +({ \ + while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND")) \ + reg_entry++; \ + reg_entry++; \ }) /* Kernel Dump section info */ struct fadump_section { - u32 request_flag; - u16 source_data_type; - u16 error_flags; - u64 source_address; - u64 source_len; - u64 bytes_dumped; - u64 destination_address; + __be32 request_flag; + __be16 source_data_type; + __be16 error_flags; + __be64 source_address; + __be64 source_len; + __be64 bytes_dumped; + __be64 destination_address; }; /* ibm,configure-kernel-dump header. */ struct fadump_section_header { - u32 dump_format_version; - u16 dump_num_sections; - u16 dump_status_flag; - u32 offset_first_dump_section; + __be32 dump_format_version; + __be16 dump_num_sections; + __be16 dump_status_flag; + __be32 offset_first_dump_section; /* Fields for disk dump option. */ - u32 dd_block_size; - u64 dd_block_offset; - u64 dd_num_blocks; - u32 dd_offset_disk_path; + __be32 dd_block_size; + __be64 dd_block_offset; + __be64 dd_num_blocks; + __be32 dd_offset_disk_path; /* Maximum time allowed to prevent an automatic dump-reboot. */ - u32 max_time_auto; + __be32 max_time_auto; }; /* @@ -174,15 +174,15 @@ static inline u64 str_to_u64(const char *str) /* Register save area header. */ struct fadump_reg_save_area_header { - u64 magic_number; - u32 version; - u32 num_cpu_offset; + __be64 magic_number; + __be32 version; + __be32 num_cpu_offset; }; /* Register entry. */ struct fadump_reg_entry { - u64 reg_id; - u64 reg_value; + __be64 reg_id; + __be64 reg_value; }; /* fadump crash info structure */ diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 623f2971ce0..766b77d527a 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -71,7 +71,7 @@ pte_t *huge_pte_offset_and_shift(struct mm_struct *mm, void flush_dcache_icache_hugepage(struct page *page); -#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT) +#if defined(CONFIG_PPC_MM_SLICES) int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len); #else diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 4ca90a39d6d..725247beebe 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -159,8 +159,6 @@ struct pci_dn { int pci_ext_config_space; /* for pci devices */ - bool force_32bit_msi; - struct pci_dev *pcidev; /* back-pointer to the pci device */ #ifdef CONFIG_EEH struct eeh_dev *edev; /* eeh device */ diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h index 0bb23725b1e..8bf1b635171 100644 --- a/arch/powerpc/include/asm/perf_event.h +++ b/arch/powerpc/include/asm/perf_event.h @@ -34,7 +34,7 @@ do { \ (regs)->result = 0; \ (regs)->nip = __ip; \ - (regs)->gpr[1] = *(unsigned long *)__get_SP(); \ + (regs)->gpr[1] = current_stack_pointer(); \ asm volatile("mfmsr %0" : "=r" ((regs)->msr)); \ } while (0) #endif diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index fe3f9488f32..c998279bd85 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1265,8 +1265,7 @@ static inline unsigned long mfvtb (void) #define proc_trap() asm volatile("trap") -#define __get_SP() ({unsigned long sp; \ - asm volatile("mr %0,1": "=r" (sp)); sp;}) +extern unsigned long current_stack_pointer(void); extern unsigned long scom970_read(unsigned int address); extern void scom970_write(unsigned int address, unsigned long value); diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index 6fa2708da15..6240698fee9 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h @@ -19,7 +19,7 @@ /* ftrace syscalls requires exporting the sys_call_table */ #ifdef CONFIG_FTRACE_SYSCALLS -extern const unsigned long *sys_call_table; +extern const unsigned long sys_call_table[]; #endif /* CONFIG_FTRACE_SYSCALLS */ static inline long syscall_get_nr(struct task_struct *task, diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 7d8a6006880..ce9577d693b 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -365,3 +365,4 @@ SYSCALL_SPU(renameat2) SYSCALL_SPU(seccomp) SYSCALL_SPU(getrandom) SYSCALL_SPU(memfd_create) +SYSCALL_SPU(bpf) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 4e9af3fd43e..e0da021caa0 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls 361 +#define __NR_syscalls 362 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 0688fc06e18..f55351f2e66 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -383,5 +383,6 @@ #define __NR_seccomp 358 #define __NR_getrandom 359 #define __NR_memfd_create 360 +#define __NR_bpf 361 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index adac9dc54ae..484b2d4462c 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -53,9 +53,16 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size, #else struct page *page; int node = dev_to_node(dev); +#ifdef CONFIG_FSL_SOC u64 pfn = get_pfn_limit(dev); int zone; + /* + * This code should be OK on other platforms, but we have drivers that + * don't set coherent_dma_mask. As a workaround we just ifdef it. This + * whole routine needs some serious cleanup. + */ + zone = dma_pfn_limit_to_zone(pfn); if (zone < 0) { dev_err(dev, "%s: No suitable zone for pfn %#llx\n", @@ -73,6 +80,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size, break; #endif }; +#endif /* CONFIG_FSL_SOC */ /* ignore region specifiers */ flag &= ~(__GFP_HIGHMEM); diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index d543e4179c1..2248a1999c6 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -257,6 +257,13 @@ static void *eeh_dump_pe_log(void *data, void *flag) struct eeh_dev *edev, *tmp; size_t *plen = flag; + /* If the PE's config space is blocked, 0xFF's will be + * returned. It's pointless to collect the log in this + * case. + */ + if (pe->state & EEH_PE_CFG_BLOCKED) + return NULL; + eeh_pe_for_each_dev(pe, edev, tmp) *plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen, EEH_PCI_REGS_LOG_LEN - *plen); @@ -673,18 +680,18 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat switch (state) { case pcie_deassert_reset: eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); - eeh_pe_state_clear(pe, EEH_PE_RESET); + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); break; case pcie_hot_reset: - eeh_pe_state_mark(pe, EEH_PE_RESET); + eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); eeh_ops->reset(pe, EEH_RESET_HOT); break; case pcie_warm_reset: - eeh_pe_state_mark(pe, EEH_PE_RESET); + eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); break; default: - eeh_pe_state_clear(pe, EEH_PE_RESET); + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); return -EINVAL; }; @@ -1523,7 +1530,7 @@ int eeh_pe_reset(struct eeh_pe *pe, int option) switch (option) { case EEH_RESET_DEACTIVATE: ret = eeh_ops->reset(pe, option); - eeh_pe_state_clear(pe, EEH_PE_RESET); + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); if (ret) break; @@ -1538,7 +1545,7 @@ int eeh_pe_reset(struct eeh_pe *pe, int option) */ eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); - eeh_pe_state_mark(pe, EEH_PE_RESET); + eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); ret = eeh_ops->reset(pe, option); break; default: diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 3fd514f8e4b..6535936bdf2 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -528,13 +528,13 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe) eeh_pe_dev_traverse(pe, eeh_report_error, &result); /* Issue reset */ - eeh_pe_state_mark(pe, EEH_PE_RESET); + eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); ret = eeh_reset_pe(pe); if (ret) { - eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_RESET); + eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_CFG_BLOCKED); return ret; } - eeh_pe_state_clear(pe, EEH_PE_RESET); + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); /* Unfreeze the PE */ ret = eeh_clear_pe_frozen_state(pe, true); @@ -601,10 +601,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) * config accesses. So we prefer to block them. However, controlled * PCI config accesses initiated from EEH itself are allowed. */ - eeh_pe_state_mark(pe, EEH_PE_RESET); + eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); rc = eeh_reset_pe(pe); if (rc) { - eeh_pe_state_clear(pe, EEH_PE_RESET); + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); return rc; } @@ -613,7 +613,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) /* Restore PE */ eeh_ops->configure_bridge(pe); eeh_pe_restore_bars(pe); - eeh_pe_state_clear(pe, EEH_PE_RESET); + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); /* Clear frozen state */ rc = eeh_clear_pe_frozen_state(pe, false); diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 53dd0915e69..5a63e2b0f65 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -525,7 +525,7 @@ static void *__eeh_pe_state_mark(void *data, void *flag) pe->state |= state; /* Offline PCI devices if applicable */ - if (state != EEH_PE_ISOLATED) + if (!(state & EEH_PE_ISOLATED)) return NULL; eeh_pe_for_each_dev(pe, edev, tmp) { @@ -534,6 +534,10 @@ static void *__eeh_pe_state_mark(void *data, void *flag) pdev->error_state = pci_channel_io_frozen; } + /* Block PCI config access if required */ + if (pe->state & EEH_PE_CFG_RESTRICTED) + pe->state |= EEH_PE_CFG_BLOCKED; + return NULL; } @@ -611,6 +615,10 @@ static void *__eeh_pe_state_clear(void *data, void *flag) pdev->error_state = pci_channel_io_normal; } + /* Unblock PCI config access if required */ + if (pe->state & EEH_PE_CFG_RESTRICTED) + pe->state &= ~EEH_PE_CFG_BLOCKED; + return NULL; } diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c index f19b1e5cb06..1ceecdda810 100644 --- a/arch/powerpc/kernel/eeh_sysfs.c +++ b/arch/powerpc/kernel/eeh_sysfs.c @@ -65,7 +65,7 @@ static ssize_t eeh_pe_state_show(struct device *dev, return -ENODEV; state = eeh_ops->get_state(edev->pe, NULL); - return sprintf(buf, "%0x08x %0x08x\n", + return sprintf(buf, "0x%08x 0x%08x\n", state, edev->pe->state); } diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 5bbd1bc8c3b..0905c8da90f 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -659,7 +659,13 @@ _GLOBAL(ret_from_except_lite) 3: #endif bl save_nvgprs + /* + * Use a non volatile GPR to save and restore our thread_info flags + * across the call to restore_interrupts. + */ + mr r30,r4 bl restore_interrupts + mr r4,r30 addi r3,r1,STACK_FRAME_OVERHEAD bl do_notify_resume b ret_from_except diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 050f79a4a16..72e783ea068 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1270,11 +1270,6 @@ hmi_exception_early: addi r3,r1,STACK_FRAME_OVERHEAD bl hmi_exception_realmode /* Windup the stack. */ - /* Clear MSR_RI before setting SRR0 and SRR1. */ - li r0,MSR_RI - mfmsr r9 /* get MSR value */ - andc r9,r9,r0 - mtmsrd r9,1 /* Clear MSR_RI */ /* Move original HSRR0 and HSRR1 into the respective regs */ ld r9,_MSR(r1) mtspr SPRN_HSRR1,r9 diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 742694c1d85..26d091a1a54 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -58,7 +58,7 @@ int __init early_init_dt_scan_fw_dump(unsigned long node, const __be32 *sections; int i, num_sections; int size; - const int *token; + const __be32 *token; if (depth != 1 || strcmp(uname, "rtas") != 0) return 0; @@ -72,7 +72,7 @@ int __init early_init_dt_scan_fw_dump(unsigned long node, return 1; fw_dump.fadump_supported = 1; - fw_dump.ibm_configure_kernel_dump = *token; + fw_dump.ibm_configure_kernel_dump = be32_to_cpu(*token); /* * The 'ibm,kernel-dump' rtas node is present only if there is @@ -147,11 +147,11 @@ static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm, memset(fdm, 0, sizeof(struct fadump_mem_struct)); addr = addr & PAGE_MASK; - fdm->header.dump_format_version = 0x00000001; - fdm->header.dump_num_sections = 3; + fdm->header.dump_format_version = cpu_to_be32(0x00000001); + fdm->header.dump_num_sections = cpu_to_be16(3); fdm->header.dump_status_flag = 0; fdm->header.offset_first_dump_section = - (u32)offsetof(struct fadump_mem_struct, cpu_state_data); + cpu_to_be32((u32)offsetof(struct fadump_mem_struct, cpu_state_data)); /* * Fields for disk dump option. @@ -167,27 +167,27 @@ static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm, /* Kernel dump sections */ /* cpu state data section. */ - fdm->cpu_state_data.request_flag = FADUMP_REQUEST_FLAG; - fdm->cpu_state_data.source_data_type = FADUMP_CPU_STATE_DATA; + fdm->cpu_state_data.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG); + fdm->cpu_state_data.source_data_type = cpu_to_be16(FADUMP_CPU_STATE_DATA); fdm->cpu_state_data.source_address = 0; - fdm->cpu_state_data.source_len = fw_dump.cpu_state_data_size; - fdm->cpu_state_data.destination_address = addr; + fdm->cpu_state_data.source_len = cpu_to_be64(fw_dump.cpu_state_data_size); + fdm->cpu_state_data.destination_address = cpu_to_be64(addr); addr += fw_dump.cpu_state_data_size; /* hpte region section */ - fdm->hpte_region.request_flag = FADUMP_REQUEST_FLAG; - fdm->hpte_region.source_data_type = FADUMP_HPTE_REGION; + fdm->hpte_region.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG); + fdm->hpte_region.source_data_type = cpu_to_be16(FADUMP_HPTE_REGION); fdm->hpte_region.source_address = 0; - fdm->hpte_region.source_len = fw_dump.hpte_region_size; - fdm->hpte_region.destination_address = addr; + fdm->hpte_region.source_len = cpu_to_be64(fw_dump.hpte_region_size); + fdm->hpte_region.destination_address = cpu_to_be64(addr); addr += fw_dump.hpte_region_size; /* RMA region section */ - fdm->rmr_region.request_flag = FADUMP_REQUEST_FLAG; - fdm->rmr_region.source_data_type = FADUMP_REAL_MODE_REGION; - fdm->rmr_region.source_address = RMA_START; - fdm->rmr_region.source_len = fw_dump.boot_memory_size; - fdm->rmr_region.destination_address = addr; + fdm->rmr_region.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG); + fdm->rmr_region.source_data_type = cpu_to_be16(FADUMP_REAL_MODE_REGION); + fdm->rmr_region.source_address = cpu_to_be64(RMA_START); + fdm->rmr_region.source_len = cpu_to_be64(fw_dump.boot_memory_size); + fdm->rmr_region.destination_address = cpu_to_be64(addr); addr += fw_dump.boot_memory_size; return addr; @@ -272,7 +272,7 @@ int __init fadump_reserve_mem(void) * first kernel. */ if (fdm_active) - fw_dump.boot_memory_size = fdm_active->rmr_region.source_len; + fw_dump.boot_memory_size = be64_to_cpu(fdm_active->rmr_region.source_len); else fw_dump.boot_memory_size = fadump_calculate_reserve_size(); @@ -314,8 +314,8 @@ int __init fadump_reserve_mem(void) (unsigned long)(base >> 20)); fw_dump.fadumphdr_addr = - fdm_active->rmr_region.destination_address + - fdm_active->rmr_region.source_len; + be64_to_cpu(fdm_active->rmr_region.destination_address) + + be64_to_cpu(fdm_active->rmr_region.source_len); pr_debug("fadumphdr_addr = %p\n", (void *) fw_dump.fadumphdr_addr); } else { @@ -472,9 +472,9 @@ fadump_read_registers(struct fadump_reg_entry *reg_entry, struct pt_regs *regs) { memset(regs, 0, sizeof(struct pt_regs)); - while (reg_entry->reg_id != REG_ID("CPUEND")) { - fadump_set_regval(regs, reg_entry->reg_id, - reg_entry->reg_value); + while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND")) { + fadump_set_regval(regs, be64_to_cpu(reg_entry->reg_id), + be64_to_cpu(reg_entry->reg_value)); reg_entry++; } reg_entry++; @@ -603,20 +603,20 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm) if (!fdm->cpu_state_data.bytes_dumped) return -EINVAL; - addr = fdm->cpu_state_data.destination_address; + addr = be64_to_cpu(fdm->cpu_state_data.destination_address); vaddr = __va(addr); reg_header = vaddr; - if (reg_header->magic_number != REGSAVE_AREA_MAGIC) { + if (be64_to_cpu(reg_header->magic_number) != REGSAVE_AREA_MAGIC) { printk(KERN_ERR "Unable to read register save area.\n"); return -ENOENT; } pr_debug("--------CPU State Data------------\n"); - pr_debug("Magic Number: %llx\n", reg_header->magic_number); - pr_debug("NumCpuOffset: %x\n", reg_header->num_cpu_offset); + pr_debug("Magic Number: %llx\n", be64_to_cpu(reg_header->magic_number)); + pr_debug("NumCpuOffset: %x\n", be32_to_cpu(reg_header->num_cpu_offset)); - vaddr += reg_header->num_cpu_offset; - num_cpus = *((u32 *)(vaddr)); + vaddr += be32_to_cpu(reg_header->num_cpu_offset); + num_cpus = be32_to_cpu(*((__be32 *)(vaddr))); pr_debug("NumCpus : %u\n", num_cpus); vaddr += sizeof(u32); reg_entry = (struct fadump_reg_entry *)vaddr; @@ -639,13 +639,13 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm) fdh = __va(fw_dump.fadumphdr_addr); for (i = 0; i < num_cpus; i++) { - if (reg_entry->reg_id != REG_ID("CPUSTRT")) { + if (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUSTRT")) { printk(KERN_ERR "Unable to read CPU state data\n"); rc = -ENOENT; goto error_out; } /* Lower 4 bytes of reg_value contains logical cpu id */ - cpu = reg_entry->reg_value & FADUMP_CPU_ID_MASK; + cpu = be64_to_cpu(reg_entry->reg_value) & FADUMP_CPU_ID_MASK; if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) { SKIP_TO_NEXT_CPU(reg_entry); continue; @@ -692,7 +692,7 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active) return -EINVAL; /* Check if the dump data is valid. */ - if ((fdm_active->header.dump_status_flag == FADUMP_ERROR_FLAG) || + if ((be16_to_cpu(fdm_active->header.dump_status_flag) == FADUMP_ERROR_FLAG) || (fdm_active->cpu_state_data.error_flags != 0) || (fdm_active->rmr_region.error_flags != 0)) { printk(KERN_ERR "Dump taken by platform is not valid\n"); @@ -828,7 +828,7 @@ static void fadump_setup_crash_memory_ranges(void) static inline unsigned long fadump_relocate(unsigned long paddr) { if (paddr > RMA_START && paddr < fw_dump.boot_memory_size) - return fdm.rmr_region.destination_address + paddr; + return be64_to_cpu(fdm.rmr_region.destination_address) + paddr; else return paddr; } @@ -902,7 +902,7 @@ static int fadump_create_elfcore_headers(char *bufp) * to the specified destination_address. Hence set * the correct offset. */ - phdr->p_offset = fdm.rmr_region.destination_address; + phdr->p_offset = be64_to_cpu(fdm.rmr_region.destination_address); } phdr->p_paddr = mbase; @@ -951,7 +951,7 @@ static void register_fadump(void) fadump_setup_crash_memory_ranges(); - addr = fdm.rmr_region.destination_address + fdm.rmr_region.source_len; + addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len); /* Initialize fadump crash info header. */ addr = init_fadump_header(addr); vaddr = __va(addr); @@ -1023,7 +1023,7 @@ void fadump_cleanup(void) /* Invalidate the registration only if dump is active. */ if (fw_dump.dump_active) { init_fadump_mem_struct(&fdm, - fdm_active->cpu_state_data.destination_address); + be64_to_cpu(fdm_active->cpu_state_data.destination_address)); fadump_invalidate_dump(&fdm); } } @@ -1063,7 +1063,7 @@ static void fadump_invalidate_release_mem(void) return; } - destination_address = fdm_active->cpu_state_data.destination_address; + destination_address = be64_to_cpu(fdm_active->cpu_state_data.destination_address); fadump_cleanup(); mutex_unlock(&fadump_mutex); @@ -1183,31 +1183,31 @@ static int fadump_region_show(struct seq_file *m, void *private) seq_printf(m, "CPU : [%#016llx-%#016llx] %#llx bytes, " "Dumped: %#llx\n", - fdm_ptr->cpu_state_data.destination_address, - fdm_ptr->cpu_state_data.destination_address + - fdm_ptr->cpu_state_data.source_len - 1, - fdm_ptr->cpu_state_data.source_len, - fdm_ptr->cpu_state_data.bytes_dumped); + be64_to_cpu(fdm_ptr->cpu_state_data.destination_address), + be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) + + be64_to_cpu(fdm_ptr->cpu_state_data.source_len) - 1, + be64_to_cpu(fdm_ptr->cpu_state_data.source_len), + be64_to_cpu(fdm_ptr->cpu_state_data.bytes_dumped)); seq_printf(m, "HPTE: [%#016llx-%#016llx] %#llx bytes, " "Dumped: %#llx\n", - fdm_ptr->hpte_region.destination_address, - fdm_ptr->hpte_region.destination_address + - fdm_ptr->hpte_region.source_len - 1, - fdm_ptr->hpte_region.source_len, - fdm_ptr->hpte_region.bytes_dumped); + be64_to_cpu(fdm_ptr->hpte_region.destination_address), + be64_to_cpu(fdm_ptr->hpte_region.destination_address) + + be64_to_cpu(fdm_ptr->hpte_region.source_len) - 1, + be64_to_cpu(fdm_ptr->hpte_region.source_len), + be64_to_cpu(fdm_ptr->hpte_region.bytes_dumped)); seq_printf(m, "DUMP: [%#016llx-%#016llx] %#llx bytes, " "Dumped: %#llx\n", - fdm_ptr->rmr_region.destination_address, - fdm_ptr->rmr_region.destination_address + - fdm_ptr->rmr_region.source_len - 1, - fdm_ptr->rmr_region.source_len, - fdm_ptr->rmr_region.bytes_dumped); + be64_to_cpu(fdm_ptr->rmr_region.destination_address), + be64_to_cpu(fdm_ptr->rmr_region.destination_address) + + be64_to_cpu(fdm_ptr->rmr_region.source_len) - 1, + be64_to_cpu(fdm_ptr->rmr_region.source_len), + be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped)); if (!fdm_active || (fw_dump.reserve_dump_area_start == - fdm_ptr->cpu_state_data.destination_address)) + be64_to_cpu(fdm_ptr->cpu_state_data.destination_address))) goto out; /* Dump is active. Show reserved memory region. */ @@ -1215,10 +1215,10 @@ static int fadump_region_show(struct seq_file *m, void *private) " : [%#016llx-%#016llx] %#llx bytes, " "Dumped: %#llx\n", (unsigned long long)fw_dump.reserve_dump_area_start, - fdm_ptr->cpu_state_data.destination_address - 1, - fdm_ptr->cpu_state_data.destination_address - + be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - 1, + be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - fw_dump.reserve_dump_area_start, - fdm_ptr->cpu_state_data.destination_address - + be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - fw_dump.reserve_dump_area_start); out: if (fdm_active) diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 8eb857f216c..c14383575fe 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -466,7 +466,7 @@ static inline void check_stack_overflow(void) #ifdef CONFIG_DEBUG_STACKOVERFLOW long sp; - sp = __get_SP() & (THREAD_SIZE-1); + sp = current_stack_pointer() & (THREAD_SIZE-1); /* check for stack overflow: is there less than 2KB free? */ if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S index 7ce26d45777..0d432194c01 100644 --- a/arch/powerpc/kernel/misc.S +++ b/arch/powerpc/kernel/misc.S @@ -114,3 +114,7 @@ _GLOBAL(longjmp) mtlr r0 mr r3,r4 blr + +_GLOBAL(current_stack_pointer) + PPC_LL r3,0(r1) + blr diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 155013da27e..b15194e2c5f 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -266,13 +266,3 @@ int pcibus_to_node(struct pci_bus *bus) } EXPORT_SYMBOL(pcibus_to_node); #endif - -static void quirk_radeon_32bit_msi(struct pci_dev *dev) -{ - struct pci_dn *pdn = pci_get_pdn(dev); - - if (pdn) - pdn->force_32bit_msi = true; -} -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi); diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index c4dfff6c271..202963ee013 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -41,3 +41,5 @@ EXPORT_SYMBOL(giveup_spe); #ifdef CONFIG_EPAPR_PARAVIRT EXPORT_SYMBOL(epapr_hypercall_start); #endif + +EXPORT_SYMBOL(current_stack_pointer); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index aa1df89c8b2..923cd2daba8 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1545,7 +1545,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) tsk = current; if (sp == 0) { if (tsk == current) - asm("mr %0,1" : "=r" (sp)); + sp = current_stack_pointer(); else sp = tsk->thread.ksp; } diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index c168337aef9..7c55b86206b 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c @@ -66,6 +66,11 @@ int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val) return PCIBIOS_DEVICE_NOT_FOUND; if (!config_access_valid(pdn, where)) return PCIBIOS_BAD_REGISTER_NUMBER; +#ifdef CONFIG_EEH + if (pdn->edev && pdn->edev->pe && + (pdn->edev->pe->state & EEH_PE_CFG_BLOCKED)) + return PCIBIOS_SET_FAILED; +#endif addr = rtas_config_addr(pdn->busno, pdn->devfn, where); buid = pdn->phb->buid; @@ -90,9 +95,6 @@ static int rtas_pci_read_config(struct pci_bus *bus, struct device_node *busdn, *dn; struct pci_dn *pdn; bool found = false; -#ifdef CONFIG_EEH - struct eeh_dev *edev; -#endif int ret; /* Search only direct children of the bus */ @@ -109,11 +111,6 @@ static int rtas_pci_read_config(struct pci_bus *bus, if (!found) return PCIBIOS_DEVICE_NOT_FOUND; -#ifdef CONFIG_EEH - edev = of_node_to_eeh_dev(dn); - if (edev && edev->pe && edev->pe->state & EEH_PE_RESET) - return PCIBIOS_DEVICE_NOT_FOUND; -#endif ret = rtas_read_config(pdn, where, size, val); if (*val == EEH_IO_ERROR_VALUE(size) && @@ -132,6 +129,11 @@ int rtas_write_config(struct pci_dn *pdn, int where, int size, u32 val) return PCIBIOS_DEVICE_NOT_FOUND; if (!config_access_valid(pdn, where)) return PCIBIOS_BAD_REGISTER_NUMBER; +#ifdef CONFIG_EEH + if (pdn->edev && pdn->edev->pe && + (pdn->edev->pe->state & EEH_PE_CFG_BLOCKED)) + return PCIBIOS_SET_FAILED; +#endif addr = rtas_config_addr(pdn->busno, pdn->devfn, where); buid = pdn->phb->buid; @@ -155,10 +157,6 @@ static int rtas_pci_write_config(struct pci_bus *bus, struct device_node *busdn, *dn; struct pci_dn *pdn; bool found = false; -#ifdef CONFIG_EEH - struct eeh_dev *edev; -#endif - int ret; /* Search only direct children of the bus */ busdn = pci_bus_to_OF_node(bus); @@ -173,14 +171,8 @@ static int rtas_pci_write_config(struct pci_bus *bus, if (!found) return PCIBIOS_DEVICE_NOT_FOUND; -#ifdef CONFIG_EEH - edev = of_node_to_eeh_dev(dn); - if (edev && edev->pe && (edev->pe->state & EEH_PE_RESET)) - return PCIBIOS_DEVICE_NOT_FOUND; -#endif - ret = rtas_write_config(pdn, where, size, val); - return ret; + return rtas_write_config(pdn, where, size, val); } static struct pci_ops rtas_pci_ops = { diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index cd07d79ad21..4f3cfe1b6a3 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -522,36 +522,36 @@ void __init setup_system(void) smp_release_cpus(); #endif - printk("Starting Linux PPC64 %s\n", init_utsname()->version); + pr_info("Starting Linux PPC64 %s\n", init_utsname()->version); - printk("-----------------------------------------------------\n"); - printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); - printk("phys_mem_size = 0x%llx\n", memblock_phys_mem_size()); + pr_info("-----------------------------------------------------\n"); + pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); + pr_info("phys_mem_size = 0x%llx\n", memblock_phys_mem_size()); if (ppc64_caches.dline_size != 0x80) - printk("dcache_line_size = 0x%x\n", ppc64_caches.dline_size); + pr_info("dcache_line_size = 0x%x\n", ppc64_caches.dline_size); if (ppc64_caches.iline_size != 0x80) - printk("icache_line_size = 0x%x\n", ppc64_caches.iline_size); + pr_info("icache_line_size = 0x%x\n", ppc64_caches.iline_size); - printk("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features); - printk(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE); - printk(" always = 0x%016lx\n", CPU_FTRS_ALWAYS); - printk("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features, + pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features); + pr_info(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE); + pr_info(" always = 0x%016lx\n", CPU_FTRS_ALWAYS); + pr_info("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features, cur_cpu_spec->cpu_user_features2); - printk("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features); - printk("firmware_features = 0x%016lx\n", powerpc_firmware_features); + pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features); + pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features); #ifdef CONFIG_PPC_STD_MMU_64 if (htab_address) - printk("htab_address = 0x%p\n", htab_address); + pr_info("htab_address = 0x%p\n", htab_address); - printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); + pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask); #endif if (PHYSICAL_START > 0) - printk("physical_start = 0x%llx\n", + pr_info("physical_start = 0x%llx\n", (unsigned long long)PHYSICAL_START); - printk("-----------------------------------------------------\n"); + pr_info("-----------------------------------------------------\n"); DBG(" <- setup_system()\n"); } diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index 3d30ef1038e..ea43a347a10 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -50,7 +50,7 @@ void save_stack_trace(struct stack_trace *trace) { unsigned long sp; - asm("mr %0,1" : "=r" (sp)); + sp = current_stack_pointer(); save_context_stack(trace, sp, current, 1); } diff --git a/arch/powerpc/kernel/vdso32/getcpu.S b/arch/powerpc/kernel/vdso32/getcpu.S index 23eb9a9441b..c62be60c727 100644 --- a/arch/powerpc/kernel/vdso32/getcpu.S +++ b/arch/powerpc/kernel/vdso32/getcpu.S @@ -30,8 +30,8 @@ V_FUNCTION_BEGIN(__kernel_getcpu) .cfi_startproc mfspr r5,SPRN_SPRG_VDSO_READ - cmpdi cr0,r3,0 - cmpdi cr1,r4,0 + cmpwi cr0,r3,0 + cmpwi cr1,r4,0 clrlwi r6,r5,16 rlwinm r7,r5,16,31-15,31-0 beq cr0,1f diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index 0f9939e693d..5a236f082c7 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -99,8 +99,6 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) u64 vsid; int psize, ssize; - slb->esid = (ea & ESID_MASK) | SLB_ESID_V; - switch (REGION_ID(ea)) { case USER_REGION_ID: pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); @@ -133,6 +131,7 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) vsid |= mmu_psize_defs[psize].sllp | ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0); + slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V; slb->vsid = vsid; return 0; diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index cad68ff8eca..415a51b028b 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -103,7 +103,7 @@ unsigned long __max_low_memory = MAX_LOW_MEM; /* * Check for command-line options that affect what MMU_init will do. */ -void MMU_setup(void) +void __init MMU_setup(void) { /* Check for nobats option (used in mapin_ram). */ if (strstr(boot_command_line, "nobats")) { diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 649666d5d1c..b9d1dfdbe5b 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -8,6 +8,8 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) "numa: " fmt + #include <linux/threads.h> #include <linux/bootmem.h> #include <linux/init.h> @@ -1153,6 +1155,22 @@ static int __init early_numa(char *p) } early_param("numa", early_numa); +static bool topology_updates_enabled = true; + +static int __init early_topology_updates(char *p) +{ + if (!p) + return 0; + + if (!strcmp(p, "off")) { + pr_info("Disabling topology updates\n"); + topology_updates_enabled = false; + } + + return 0; +} +early_param("topology_updates", early_topology_updates); + #ifdef CONFIG_MEMORY_HOTPLUG /* * Find the node associated with a hot added memory section for @@ -1442,8 +1460,11 @@ static long hcall_vphn(unsigned long cpu, __be32 *associativity) long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; u64 flags = 1; int hwcpu = get_hard_smp_processor_id(cpu); + int i; rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu); + for (i = 0; i < 6; i++) + retbuf[i] = cpu_to_be64(retbuf[i]); vphn_unpack_associativity(retbuf, associativity); return rc; @@ -1488,11 +1509,14 @@ static int update_cpu_topology(void *data) cpu = smp_processor_id(); for (update = data; update; update = update->next) { + int new_nid = update->new_nid; if (cpu != update->cpu) continue; - unmap_cpu_from_node(update->cpu); - map_cpu_to_node(update->cpu, update->new_nid); + unmap_cpu_from_node(cpu); + map_cpu_to_node(cpu, new_nid); + set_cpu_numa_node(cpu, new_nid); + set_cpu_numa_mem(cpu, local_memory_node(new_nid)); vdso_getcpu_init(); } @@ -1539,6 +1563,9 @@ int arch_update_cpu_topology(void) struct device *dev; int weight, new_nid, i = 0; + if (!prrn_enabled && !vphn_enabled) + return 0; + weight = cpumask_weight(&cpu_associativity_changes_mask); if (!weight) return 0; @@ -1592,6 +1619,15 @@ int arch_update_cpu_topology(void) cpu = cpu_last_thread_sibling(cpu); } + pr_debug("Topology update for the following CPUs:\n"); + if (cpumask_weight(&updated_cpus)) { + for (ud = &updates[0]; ud; ud = ud->next) { + pr_debug("cpu %d moving from node %d " + "to %d\n", ud->cpu, + ud->old_nid, ud->new_nid); + } + } + /* * In cases where we have nothing to update (because the updates list * is too short or because the new topology is same as the old one), @@ -1800,8 +1836,12 @@ static const struct file_operations topology_ops = { static int topology_update_init(void) { - start_topology_update(); - proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops); + /* Do not poll for changes if disabled at boot */ + if (topology_updates_enabled) + start_topology_update(); + + if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops)) + return -ENOMEM; return 0; } diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 8d7bda94d19..ded0ea1afde 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -682,6 +682,7 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start, slice_convert(mm, mask, psize); } +#ifdef CONFIG_HUGETLB_PAGE /* * is_hugepage_only_range() is used by generic code to verify whether * a normal mmap mapping (non hugetlbfs) is valid on a given area. @@ -726,4 +727,4 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, #endif return !slice_check_fit(mask, available); } - +#endif diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 6c8710dd90c..dba34088da2 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -417,11 +417,6 @@ static int h_24x7_event_add(struct perf_event *event, int flags) return 0; } -static int h_24x7_event_idx(struct perf_event *event) -{ - return 0; -} - static struct pmu h_24x7_pmu = { .task_ctx_nr = perf_invalid_context, @@ -433,7 +428,6 @@ static struct pmu h_24x7_pmu = { .start = h_24x7_event_start, .stop = h_24x7_event_stop, .read = h_24x7_event_update, - .event_idx = h_24x7_event_idx, }; static int hv_24x7_init(void) diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c index 15fc76c9302..a051fe946c6 100644 --- a/arch/powerpc/perf/hv-gpci.c +++ b/arch/powerpc/perf/hv-gpci.c @@ -246,11 +246,6 @@ static int h_gpci_event_init(struct perf_event *event) return 0; } -static int h_gpci_event_idx(struct perf_event *event) -{ - return 0; -} - static struct pmu h_gpci_pmu = { .task_ctx_nr = perf_invalid_context, @@ -262,7 +257,6 @@ static struct pmu h_gpci_pmu = { .start = h_gpci_event_start, .stop = h_gpci_event_stop, .read = h_gpci_event_update, - .event_idx = h_gpci_event_idx, }; static int hv_gpci_init(void) diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index 426814a2ede..eba9cb10619 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c @@ -373,7 +373,7 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe) * moving forward, we have to return operational * state during PE reset. */ - if (pe->state & EEH_PE_RESET) { + if (pe->state & EEH_PE_CFG_BLOCKED) { result = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE | EEH_STATE_MMIO_ENABLED | diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 3e89cbf5588..1d19e7917d7 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -169,6 +169,26 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag) } /* + * If the PE contains any one of following adapters, the + * PCI config space can't be accessed when dumping EEH log. + * Otherwise, we will run into fenced PHB caused by shortage + * of outbound credits in the adapter. The PCI config access + * should be blocked until PE reset. MMIO access is dropped + * by hardware certainly. In order to drop PCI config requests, + * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which + * will be checked in the backend for PE state retrival. If + * the PE becomes frozen for the first time and the flag has + * been set for the PE, we will set EEH_PE_CFG_BLOCKED for + * that PE to block its config space. + * + * Broadcom Austin 4-ports NICs (14e4:1657) + * Broadcom Shiner 2-ports 10G NICs (14e4:168e) + */ + if ((dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x1657) || + (dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x168e)) + edev->pe->state |= EEH_PE_CFG_RESTRICTED; + + /* * Cache the PE primary bus, which can't be fetched when * full hotplug is in progress. In that case, all child * PCI devices of the PE are expected to be removed prior @@ -383,6 +403,39 @@ static int powernv_eeh_err_inject(struct eeh_pe *pe, int type, int func, return ret; } +static inline bool powernv_eeh_cfg_blocked(struct device_node *dn) +{ + struct eeh_dev *edev = of_node_to_eeh_dev(dn); + + if (!edev || !edev->pe) + return false; + + if (edev->pe->state & EEH_PE_CFG_BLOCKED) + return true; + + return false; +} + +static int powernv_eeh_read_config(struct device_node *dn, + int where, int size, u32 *val) +{ + if (powernv_eeh_cfg_blocked(dn)) { + *val = 0xFFFFFFFF; + return PCIBIOS_SET_FAILED; + } + + return pnv_pci_cfg_read(dn, where, size, val); +} + +static int powernv_eeh_write_config(struct device_node *dn, + int where, int size, u32 val) +{ + if (powernv_eeh_cfg_blocked(dn)) + return PCIBIOS_SET_FAILED; + + return pnv_pci_cfg_write(dn, where, size, val); +} + /** * powernv_eeh_next_error - Retrieve next EEH error to handle * @pe: Affected PE @@ -440,8 +493,8 @@ static struct eeh_ops powernv_eeh_ops = { .get_log = powernv_eeh_get_log, .configure_bridge = powernv_eeh_configure_bridge, .err_inject = powernv_eeh_err_inject, - .read_config = pnv_pci_cfg_read, - .write_config = pnv_pci_cfg_write, + .read_config = powernv_eeh_read_config, + .write_config = powernv_eeh_write_config, .next_error = powernv_eeh_next_error, .restore_config = powernv_eeh_restore_config }; diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c index 5e1ed1575aa..b322bfb5134 100644 --- a/arch/powerpc/platforms/powernv/opal-hmi.c +++ b/arch/powerpc/platforms/powernv/opal-hmi.c @@ -57,7 +57,7 @@ static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt) }; /* Print things out */ - if (hmi_evt->version != OpalHMIEvt_V1) { + if (hmi_evt->version < OpalHMIEvt_V1) { pr_err("HMI Interrupt, Unknown event version %d !\n", hmi_evt->version); return; diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index dd2c285ad17..e4169d68cb3 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -191,7 +191,6 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf, { struct lpc_debugfs_entry *lpc = filp->private_data; u32 data, pos, len, todo; - __be32 bedata; int rc; if (!access_ok(VERIFY_WRITE, ubuf, count)) @@ -214,18 +213,57 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf, len = 2; } rc = opal_lpc_read(opal_lpc_chip_id, lpc->lpc_type, pos, - &bedata, len); + &data, len); if (rc) return -ENXIO; - data = be32_to_cpu(bedata); + + /* + * Now there is some trickery with the data returned by OPAL + * as it's the desired data right justified in a 32-bit BE + * word. + * + * This is a very bad interface and I'm to blame for it :-( + * + * So we can't just apply a 32-bit swap to what comes from OPAL, + * because user space expects the *bytes* to be in their proper + * respective positions (ie, LPC position). + * + * So what we really want to do here is to shift data right + * appropriately on a LE kernel. + * + * IE. If the LPC transaction has bytes B0, B1, B2 and B3 in that + * order, we have in memory written to by OPAL at the "data" + * pointer: + * + * Bytes: OPAL "data" LE "data" + * 32-bit: B0 B1 B2 B3 B0B1B2B3 B3B2B1B0 + * 16-bit: B0 B1 0000B0B1 B1B00000 + * 8-bit: B0 000000B0 B0000000 + * + * So a BE kernel will have the leftmost of the above in the MSB + * and rightmost in the LSB and can just then "cast" the u32 "data" + * down to the appropriate quantity and write it. + * + * However, an LE kernel can't. It doesn't need to swap because a + * load from data followed by a store to user are going to preserve + * the byte ordering which is the wire byte order which is what the + * user wants, but in order to "crop" to the right size, we need to + * shift right first. + */ switch(len) { case 4: rc = __put_user((u32)data, (u32 __user *)ubuf); break; case 2: +#ifdef __LITTLE_ENDIAN__ + data >>= 16; +#endif rc = __put_user((u16)data, (u16 __user *)ubuf); break; default: +#ifdef __LITTLE_ENDIAN__ + data >>= 24; +#endif rc = __put_user((u8)data, (u8 __user *)ubuf); break; } @@ -265,12 +303,31 @@ static ssize_t lpc_debug_write(struct file *filp, const char __user *ubuf, else if (todo > 1 && (pos & 1) == 0) len = 2; } + + /* + * Similarly to the read case, we have some trickery here but + * it's different to handle. We need to pass the value to OPAL in + * a register whose layout depends on the access size. We want + * to reproduce the memory layout of the user, however we aren't + * doing a load from user and a store to another memory location + * which would achieve that. Here we pass the value to OPAL via + * a register which is expected to contain the "BE" interpretation + * of the byte sequence. IE: for a 32-bit access, byte 0 should be + * in the MSB. So here we *do* need to byteswap on LE. + * + * User bytes: LE "data" OPAL "data" + * 32-bit: B0 B1 B2 B3 B3B2B1B0 B0B1B2B3 + * 16-bit: B0 B1 0000B1B0 0000B0B1 + * 8-bit: B0 000000B0 000000B0 + */ switch(len) { case 4: rc = __get_user(data, (u32 __user *)ubuf); + data = cpu_to_be32(data); break; case 2: rc = __get_user(data, (u16 __user *)ubuf); + data = cpu_to_be16(data); break; default: rc = __get_user(data, (u8 __user *)ubuf); diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index e9e2450c1fd..feb549aa3ee 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -58,7 +58,7 @@ END_FTR_SECTION(0, 1); \ */ #define OPAL_CALL(name, token) \ - _GLOBAL(name); \ + _GLOBAL_TOC(name); \ mflr r0; \ std r0,16(r1); \ li r0,token; \ diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index b642b0562f5..d019b081df9 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -194,6 +194,27 @@ static int __init opal_register_exception_handlers(void) * fwnmi area at 0x7000 to provide the glue space to OPAL */ glue = 0x7000; + + /* + * Check if we are running on newer firmware that exports + * OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to patch + * the HMI interrupt and we catch it directly in Linux. + * + * For older firmware (i.e currently released POWER8 System Firmware + * as of today <= SV810_087), we fallback to old behavior and let OPAL + * patch the HMI vector and handle it inside OPAL firmware. + * + * For newer firmware (in development/yet to be released) we will + * start catching/handling HMI directly in Linux. + */ + if (!opal_check_token(OPAL_HANDLE_HMI)) { + pr_info("opal: Old firmware detected, OPAL handles HMIs.\n"); + opal_register_exception_handler( + OPAL_HYPERVISOR_MAINTENANCE_HANDLER, + 0, glue); + glue += 128; + } + opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue); #endif diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 468a0f23c7f..3ba435ec3dc 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1509,7 +1509,6 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, unsigned int is_64, struct msi_msg *msg) { struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); - struct pci_dn *pdn = pci_get_pdn(dev); unsigned int xive_num = hwirq - phb->msi_base; __be32 data; int rc; @@ -1523,7 +1522,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, return -ENXIO; /* Force 32-bit MSI on some broken devices */ - if (pdn && pdn->force_32bit_msi) + if (dev->no_64bit_msi) is_64 = 0; /* Assign XIVE to PE */ @@ -1997,7 +1996,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, if (is_kdump_kernel()) { pr_info(" Issue PHB reset ...\n"); ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL); - ioda_eeh_phb_reset(hose, OPAL_DEASSERT_RESET); + ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE); } /* Configure M64 window */ diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index b3ca77ddf36..4b20f2c6b3b 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -50,7 +50,6 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct pnv_phb *phb = hose->private_data; - struct pci_dn *pdn = pci_get_pdn(pdev); struct msi_desc *entry; struct msi_msg msg; int hwirq; @@ -60,7 +59,7 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) if (WARN_ON(!phb) || !phb->msi_bmp.bitmap) return -ENODEV; - if (pdn && pdn->force_32bit_msi && !phb->msi32_support) + if (pdev->no_64bit_msi && !phb->msi32_support) return -ENODEV; list_for_each_entry(entry, &pdev->msi_list, list) { @@ -505,7 +504,7 @@ static bool pnv_pci_cfg_check(struct pci_controller *hose, edev = of_node_to_eeh_dev(dn); if (edev) { if (edev->pe && - (edev->pe->state & EEH_PE_RESET)) + (edev->pe->state & EEH_PE_CFG_BLOCKED)) return false; if (edev->mode & EEH_DEV_REMOVED) diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index fdf01b660d5..c22bb1b4beb 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -25,11 +25,11 @@ #include <asm/rtas.h> struct cc_workarea { - u32 drc_index; - u32 zero; - u32 name_offset; - u32 prop_length; - u32 prop_offset; + __be32 drc_index; + __be32 zero; + __be32 name_offset; + __be32 prop_length; + __be32 prop_offset; }; void dlpar_free_cc_property(struct property *prop) @@ -49,11 +49,11 @@ static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa) if (!prop) return NULL; - name = (char *)ccwa + ccwa->name_offset; + name = (char *)ccwa + be32_to_cpu(ccwa->name_offset); prop->name = kstrdup(name, GFP_KERNEL); - prop->length = ccwa->prop_length; - value = (char *)ccwa + ccwa->prop_offset; + prop->length = be32_to_cpu(ccwa->prop_length); + value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset); prop->value = kmemdup(value, prop->length, GFP_KERNEL); if (!prop->value) { dlpar_free_cc_property(prop); @@ -79,7 +79,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa, if (!dn) return NULL; - name = (char *)ccwa + ccwa->name_offset; + name = (char *)ccwa + be32_to_cpu(ccwa->name_offset); dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name); if (!dn->full_name) { kfree(dn); @@ -126,7 +126,7 @@ void dlpar_free_cc_nodes(struct device_node *dn) #define CALL_AGAIN -2 #define ERR_CFG_USE -9003 -struct device_node *dlpar_configure_connector(u32 drc_index, +struct device_node *dlpar_configure_connector(__be32 drc_index, struct device_node *parent) { struct device_node *dn; @@ -382,7 +382,7 @@ static int dlpar_online_cpu(struct device_node *dn) BUG_ON(get_cpu_current_state(cpu) != CPU_STATE_OFFLINE); cpu_maps_update_done(); - rc = cpu_up(cpu); + rc = device_online(get_cpu_device(cpu)); if (rc) goto out; cpu_maps_update_begin(); @@ -414,7 +414,7 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) if (!parent) return -ENODEV; - dn = dlpar_configure_connector(drc_index, parent); + dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); if (!dn) return -EINVAL; @@ -467,7 +467,7 @@ static int dlpar_offline_cpu(struct device_node *dn) if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); cpu_maps_update_done(); - rc = cpu_down(cpu); + rc = device_offline(get_cpu_device(cpu)); if (rc) goto out; cpu_maps_update_begin(); diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index b174fa751d2..5c375f93c66 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -247,7 +247,7 @@ static int pseries_add_processor(struct device_node *np) unsigned int cpu; cpumask_var_t candidate_mask, tmp; int err = -ENOSPC, len, nthreads, i; - const u32 *intserv; + const __be32 *intserv; intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); if (!intserv) @@ -293,7 +293,7 @@ static int pseries_add_processor(struct device_node *np) for_each_cpu(cpu, tmp) { BUG_ON(cpu_present(cpu)); set_cpu_present(cpu, true); - set_hard_smp_processor_id(cpu, *intserv++); + set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++)); } err = 0; out_unlock: diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index de1ec54a2a5..e32e00976a9 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -30,7 +30,6 @@ #include <linux/mm.h> #include <linux/memblock.h> #include <linux/spinlock.h> -#include <linux/sched.h> /* for show_stack */ #include <linux/string.h> #include <linux/pci.h> #include <linux/dma-mapping.h> @@ -168,7 +167,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, printk("\tindex = 0x%llx\n", (u64)tbl->it_index); printk("\ttcenum = 0x%llx\n", (u64)tcenum); printk("\ttce val = 0x%llx\n", tce ); - show_stack(current, (unsigned long *)__get_SP()); + dump_stack(); } tcenum++; @@ -257,7 +256,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, printk("\tindex = 0x%llx\n", (u64)tbl->it_index); printk("\tnpages = 0x%llx\n", (u64)npages); printk("\ttce[0] val = 0x%llx\n", tcep[0]); - show_stack(current, (unsigned long *)__get_SP()); + dump_stack(); } return ret; } @@ -273,7 +272,7 @@ static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); printk("\tindex = 0x%llx\n", (u64)tbl->it_index); printk("\ttcenum = 0x%llx\n", (u64)tcenum); - show_stack(current, (unsigned long *)__get_SP()); + dump_stack(); } tcenum++; @@ -292,7 +291,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n printk("\trc = %lld\n", rc); printk("\tindex = 0x%llx\n", (u64)tbl->it_index); printk("\tnpages = 0x%llx\n", (u64)npages); - show_stack(current, (unsigned long *)__get_SP()); + dump_stack(); } } @@ -307,7 +306,7 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum) printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc); printk("\tindex = 0x%llx\n", (u64)tbl->it_index); printk("\ttcenum = 0x%llx\n", (u64)tcenum); - show_stack(current, (unsigned long *)__get_SP()); + dump_stack(); } return tce_ret; diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 8c509d5397c..f6880d2a40f 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -43,6 +43,7 @@ #include <asm/trace.h> #include <asm/firmware.h> #include <asm/plpar_wrappers.h> +#include <asm/fadump.h> #include "pseries.h" @@ -247,8 +248,17 @@ static void pSeries_lpar_hptab_clear(void) } #ifdef __LITTLE_ENDIAN__ - /* Reset exceptions to big endian */ - if (firmware_has_feature(FW_FEATURE_SET_MODE)) { + /* + * Reset exceptions to big endian. + * + * FIXME this is a hack for kexec, we need to reset the exception + * endian before starting the new kernel and this is a convenient place + * to do it. + * + * This is also called on boot when a fadump happens. In that case we + * must not change the exception endian mode. + */ + if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) { long rc; rc = pseries_big_endian_exceptions(); diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 8ab5add4ac8..8b909e94fd9 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -420,7 +420,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) */ again: if (type == PCI_CAP_ID_MSI) { - if (pdn->force_32bit_msi) { + if (pdev->no_64bit_msi) { rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec); if (rc < 0) { /* diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 361add62abf..1796c5438cc 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -56,7 +56,8 @@ extern void hvc_vio_init_early(void); /* Dynamic logical Partitioning/Mobility */ extern void dlpar_free_cc_nodes(struct device_node *); extern void dlpar_free_cc_property(struct property *); -extern struct device_node *dlpar_configure_connector(u32, struct device_node *); +extern struct device_node *dlpar_configure_connector(__be32, + struct device_node *); extern int dlpar_attach_node(struct device_node *); extern int dlpar_detach_node(struct device_node *); diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index de40b48b460..da08ed08815 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -361,7 +361,7 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, cascade_data->virq = virt_msir; msi->cascade_array[irq_index] = cascade_data; - ret = request_irq(virt_msir, fsl_msi_cascade, 0, + ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD, "fsl-msi-cascade", cascade_data); if (ret) { dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n", diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index 0c75214b6f9..73b64c73505 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c @@ -145,59 +145,64 @@ void msi_bitmap_free(struct msi_bitmap *bmp) #ifdef CONFIG_MSI_BITMAP_SELFTEST -#define check(x) \ - if (!(x)) printk("msi_bitmap: test failed at line %d\n", __LINE__); - static void __init test_basics(void) { struct msi_bitmap bmp; - int i, size = 512; + int rc, i, size = 512; /* Can't allocate a bitmap of 0 irqs */ - check(msi_bitmap_alloc(&bmp, 0, NULL) != 0); + WARN_ON(msi_bitmap_alloc(&bmp, 0, NULL) == 0); /* of_node may be NULL */ - check(0 == msi_bitmap_alloc(&bmp, size, NULL)); + WARN_ON(msi_bitmap_alloc(&bmp, size, NULL)); /* Should all be free by default */ - check(0 == bitmap_find_free_region(bmp.bitmap, size, - get_count_order(size))); + WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size))); bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); /* With no node, there's no msi-available-ranges, so expect > 0 */ - check(msi_bitmap_reserve_dt_hwirqs(&bmp) > 0); + WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0); /* Should all still be free */ - check(0 == bitmap_find_free_region(bmp.bitmap, size, - get_count_order(size))); + WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size))); bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); /* Check we can fill it up and then no more */ for (i = 0; i < size; i++) - check(msi_bitmap_alloc_hwirqs(&bmp, 1) >= 0); + WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) < 0); - check(msi_bitmap_alloc_hwirqs(&bmp, 1) < 0); + WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) >= 0); /* Should all be allocated */ - check(bitmap_find_free_region(bmp.bitmap, size, 0) < 0); + WARN_ON(bitmap_find_free_region(bmp.bitmap, size, 0) >= 0); /* And if we free one we can then allocate another */ msi_bitmap_free_hwirqs(&bmp, size / 2, 1); - check(msi_bitmap_alloc_hwirqs(&bmp, 1) == size / 2); + WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) != size / 2); + + /* Free most of them for the alignment tests */ + msi_bitmap_free_hwirqs(&bmp, 3, size - 3); /* Check we get a naturally aligned offset */ - check(msi_bitmap_alloc_hwirqs(&bmp, 2) % 2 == 0); - check(msi_bitmap_alloc_hwirqs(&bmp, 4) % 4 == 0); - check(msi_bitmap_alloc_hwirqs(&bmp, 8) % 8 == 0); - check(msi_bitmap_alloc_hwirqs(&bmp, 9) % 16 == 0); - check(msi_bitmap_alloc_hwirqs(&bmp, 3) % 4 == 0); - check(msi_bitmap_alloc_hwirqs(&bmp, 7) % 8 == 0); - check(msi_bitmap_alloc_hwirqs(&bmp, 121) % 128 == 0); + rc = msi_bitmap_alloc_hwirqs(&bmp, 2); + WARN_ON(rc < 0 && rc % 2 != 0); + rc = msi_bitmap_alloc_hwirqs(&bmp, 4); + WARN_ON(rc < 0 && rc % 4 != 0); + rc = msi_bitmap_alloc_hwirqs(&bmp, 8); + WARN_ON(rc < 0 && rc % 8 != 0); + rc = msi_bitmap_alloc_hwirqs(&bmp, 9); + WARN_ON(rc < 0 && rc % 16 != 0); + rc = msi_bitmap_alloc_hwirqs(&bmp, 3); + WARN_ON(rc < 0 && rc % 4 != 0); + rc = msi_bitmap_alloc_hwirqs(&bmp, 7); + WARN_ON(rc < 0 && rc % 8 != 0); + rc = msi_bitmap_alloc_hwirqs(&bmp, 121); + WARN_ON(rc < 0 && rc % 128 != 0); msi_bitmap_free(&bmp); - /* Clients may check bitmap == NULL for "not-allocated" */ - check(bmp.bitmap == NULL); + /* Clients may WARN_ON bitmap == NULL for "not-allocated" */ + WARN_ON(bmp.bitmap != NULL); kfree(bmp.bitmap); } @@ -219,14 +224,13 @@ static void __init test_of_node(void) of_node_init(&of_node); of_node.full_name = node_name; - check(0 == msi_bitmap_alloc(&bmp, size, &of_node)); + WARN_ON(msi_bitmap_alloc(&bmp, size, &of_node)); /* No msi-available-ranges, so expect > 0 */ - check(msi_bitmap_reserve_dt_hwirqs(&bmp) > 0); + WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0); /* Should all still be free */ - check(0 == bitmap_find_free_region(bmp.bitmap, size, - get_count_order(size))); + WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size))); bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); /* Now create a fake msi-available-ranges property */ @@ -240,11 +244,11 @@ static void __init test_of_node(void) of_node.properties = ∝ /* msi-available-ranges, so expect == 0 */ - check(msi_bitmap_reserve_dt_hwirqs(&bmp) == 0); + WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp)); /* Check we got the expected result */ - check(0 == bitmap_parselist(expected_str, expected, size)); - check(bitmap_equal(expected, bmp.bitmap, size)); + WARN_ON(bitmap_parselist(expected_str, expected, size)); + WARN_ON(!bitmap_equal(expected, bmp.bitmap, size)); msi_bitmap_free(&bmp); kfree(bmp.bitmap); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index b988b5addf8..c8efbb37d6e 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -293,10 +293,10 @@ static inline void disable_surveillance(void) args.token = rtas_token("set-indicator"); if (args.token == RTAS_UNKNOWN_SERVICE) return; - args.nargs = 3; - args.nret = 1; + args.nargs = cpu_to_be32(3); + args.nret = cpu_to_be32(1); args.rets = &args.args[3]; - args.args[0] = SURVEILLANCE_TOKEN; + args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN); args.args[1] = 0; args.args[2] = 0; enter_rtas(__pa(&args)); diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 9d94fdd9f52..9432d0f202e 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -35,7 +35,6 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_BLK_DEV_INTEGRITY=y CONFIG_BLK_DEV_THROTTLING=y CONFIG_PARTITION_ADVANCED=y CONFIG_IBM_PARTITION=y @@ -245,6 +244,7 @@ CONFIG_NF_TABLES_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_CHAIN_NAT_IPV4=m CONFIG_NF_TABLES_ARP=m +CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -252,11 +252,6 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT_IPV4=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -270,6 +265,7 @@ CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_TABLES_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m +CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -286,9 +282,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_SECURITY=m -CONFIG_NF_NAT_IPV6=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m CONFIG_NF_TABLES_BRIDGE=m CONFIG_NET_SCTPPROBE=m CONFIG_RDS=m @@ -374,14 +367,13 @@ CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SCH=m CONFIG_SCSI_ENCLOSURE=m -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_SAS_LIBSAS=m CONFIG_SCSI_SRP_ATTRS=m CONFIG_ISCSI_TCP=m -CONFIG_LIBFCOE=m CONFIG_SCSI_DEBUG=m CONFIG_ZFCP=y CONFIG_SCSI_VIRTIO=m @@ -427,7 +419,6 @@ CONFIG_VIRTIO_NET=m CONFIG_NLMON=m CONFIG_VHOST_NET=m # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_CHELSIO is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -481,14 +472,14 @@ CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y CONFIG_JFS_STATISTICS=y -CONFIG_XFS_FS=m +CONFIG_XFS_FS=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y CONFIG_XFS_RT=y CONFIG_XFS_DEBUG=y CONFIG_GFS2_FS=m CONFIG_OCFS2_FS=m -CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_NILFS2_FS=m CONFIG_FANOTIFY=y @@ -574,7 +565,6 @@ CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_HUNG_TASK=y CONFIG_TIMER_STATS=y CONFIG_DEBUG_RT_MUTEXES=y -CONFIG_RT_MUTEX_TESTER=y CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y CONFIG_PROVE_LOCKING=y CONFIG_LOCK_STAT=y @@ -600,8 +590,13 @@ CONFIG_FAULT_INJECTION_DEBUG_FS=y CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y CONFIG_LATENCYTOP=y CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y +CONFIG_IRQSOFF_TRACER=y +CONFIG_PREEMPT_TRACER=y +CONFIG_SCHED_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -# CONFIG_KPROBE_EVENT is not set +CONFIG_UPROBE_EVENT=y CONFIG_LKDTM=m CONFIG_TEST_LIST_SORT=y CONFIG_KPROBES_SANITY_TEST=y @@ -609,7 +604,10 @@ CONFIG_RBTREE_TEST=y CONFIG_INTERVAL_TREE_TEST=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y +CONFIG_TEST_STRING_HELPERS=y +CONFIG_TEST_KSTRTOX=y CONFIG_DMA_API_DEBUG=y +CONFIG_TEST_BPF=m # CONFIG_STRICT_DEVMEM is not set CONFIG_S390_PTDUMP=y CONFIG_ENCRYPTED_KEYS=m @@ -673,12 +671,6 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_CRC7=m CONFIG_CRC8=m -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y CONFIG_CORDIC=m CONFIG_CMM=m CONFIG_APPLDATA_BASE=y diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index 90f514baa37..219dca6ea92 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -35,7 +35,6 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_BLK_DEV_INTEGRITY=y CONFIG_BLK_DEV_THROTTLING=y CONFIG_PARTITION_ADVANCED=y CONFIG_IBM_PARTITION=y @@ -243,6 +242,7 @@ CONFIG_NF_TABLES_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_CHAIN_NAT_IPV4=m CONFIG_NF_TABLES_ARP=m +CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -250,11 +250,6 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT_IPV4=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -268,6 +263,7 @@ CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_TABLES_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m +CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -284,9 +280,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_SECURITY=m -CONFIG_NF_NAT_IPV6=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m CONFIG_NF_TABLES_BRIDGE=m CONFIG_NET_SCTPPROBE=m CONFIG_RDS=m @@ -371,14 +364,13 @@ CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SCH=m CONFIG_SCSI_ENCLOSURE=m -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_SAS_LIBSAS=m CONFIG_SCSI_SRP_ATTRS=m CONFIG_ISCSI_TCP=m -CONFIG_LIBFCOE=m CONFIG_SCSI_DEBUG=m CONFIG_ZFCP=y CONFIG_SCSI_VIRTIO=m @@ -424,7 +416,6 @@ CONFIG_VIRTIO_NET=m CONFIG_NLMON=m CONFIG_VHOST_NET=m # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_CHELSIO is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -478,13 +469,13 @@ CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y CONFIG_JFS_STATISTICS=y -CONFIG_XFS_FS=m +CONFIG_XFS_FS=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y CONFIG_XFS_RT=y CONFIG_GFS2_FS=m CONFIG_OCFS2_FS=m -CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_NILFS2_FS=m CONFIG_FANOTIFY=y @@ -626,12 +617,6 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_CRC7=m CONFIG_CRC8=m -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y CONFIG_CORDIC=m CONFIG_CMM=m CONFIG_APPLDATA_BASE=y diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 13559d32af6..822c2f2e0c2 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -33,7 +33,6 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_BLK_DEV_INTEGRITY=y CONFIG_BLK_DEV_THROTTLING=y CONFIG_PARTITION_ADVANCED=y CONFIG_IBM_PARTITION=y @@ -241,6 +240,7 @@ CONFIG_NF_TABLES_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_CHAIN_NAT_IPV4=m CONFIG_NF_TABLES_ARP=m +CONFIG_NF_NAT_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -248,11 +248,6 @@ CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT_IPV4=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -266,6 +261,7 @@ CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_TABLES_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m +CONFIG_NF_NAT_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -282,9 +278,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_SECURITY=m -CONFIG_NF_NAT_IPV6=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m CONFIG_NF_TABLES_BRIDGE=m CONFIG_NET_SCTPPROBE=m CONFIG_RDS=m @@ -369,14 +362,13 @@ CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SCH=m CONFIG_SCSI_ENCLOSURE=m -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_SAS_LIBSAS=m CONFIG_SCSI_SRP_ATTRS=m CONFIG_ISCSI_TCP=m -CONFIG_LIBFCOE=m CONFIG_SCSI_DEBUG=m CONFIG_ZFCP=y CONFIG_SCSI_VIRTIO=m @@ -422,7 +414,6 @@ CONFIG_VIRTIO_NET=m CONFIG_NLMON=m CONFIG_VHOST_NET=m # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_CHELSIO is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set @@ -476,13 +467,13 @@ CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y CONFIG_JFS_STATISTICS=y -CONFIG_XFS_FS=m +CONFIG_XFS_FS=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y CONFIG_XFS_RT=y CONFIG_GFS2_FS=m CONFIG_OCFS2_FS=m -CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_NILFS2_FS=m CONFIG_FANOTIFY=y @@ -550,8 +541,11 @@ CONFIG_TIMER_STATS=y CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_LATENCYTOP=y +CONFIG_SCHED_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -# CONFIG_KPROBE_EVENT is not set +CONFIG_UPROBE_EVENT=y CONFIG_LKDTM=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y @@ -618,12 +612,6 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_CRC7=m CONFIG_CRC8=m -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y CONFIG_CORDIC=m CONFIG_CMM=m CONFIG_APPLDATA_BASE=y diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index e376789f2d8..9d63051ebec 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -22,8 +22,8 @@ CONFIG_HZ_100=y CONFIG_CRASH_DUMP=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_SECCOMP is not set -# CONFIG_IUCV is not set CONFIG_NET=y +# CONFIG_IUCV is not set CONFIG_ATM=y CONFIG_ATM_LANE=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -36,9 +36,9 @@ CONFIG_ENCLOSURE_SERVICES=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_SCSI_ENCLOSURE=y -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_SRP_ATTRS=y CONFIG_ZFCP=y # CONFIG_INPUT_MOUSEDEV_PSAUX is not set @@ -75,12 +75,6 @@ CONFIG_DEBUG_KERNEL=y CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_FTRACE is not set # CONFIG_STRICT_DEVMEM is not set -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y # CONFIG_PFAULT is not set # CONFIG_S390_HYPFS_FS is not set # CONFIG_VIRTUALIZATION is not set diff --git a/arch/s390/defconfig b/arch/s390/defconfig index fab35a8efa4..785c5f24d6f 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -92,10 +92,10 @@ CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y -CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_FC_ATTRS=y CONFIG_ZFCP=y CONFIG_SCSI_VIRTIO=y CONFIG_NETDEVICES=y @@ -164,14 +164,13 @@ CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_CRCT10DIF=m CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 940ac49198d..4197c89c52d 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h @@ -286,7 +286,8 @@ #define __NR_seccomp 348 #define __NR_getrandom 349 #define __NR_memfd_create 350 -#define NR_syscalls 351 +#define __NR_bpf 351 +#define NR_syscalls 352 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index faf6caa510d..c4f7a3d655b 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c @@ -217,3 +217,4 @@ COMPAT_SYSCALL_WRAP5(renameat2, int, olddfd, const char __user *, oldname, int, COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, const char __user *, uargs) COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags) COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags) +COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size); diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 51d14fe5eb9..ca1cabb3a96 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -121,6 +121,8 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent, { struct ftrace_graph_ent trace; + if (unlikely(ftrace_graph_is_dead())) + goto out; if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out; ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE; diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 08e761318c1..b878f12a959 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1411,11 +1411,6 @@ static void cpumsf_pmu_del(struct perf_event *event, int flags) perf_pmu_enable(event->pmu); } -static int cpumsf_pmu_event_idx(struct perf_event *event) -{ - return event->hw.idx; -} - CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); @@ -1458,7 +1453,6 @@ static struct pmu cpumf_sampling = { .stop = cpumsf_pmu_stop, .read = cpumsf_pmu_read, - .event_idx = cpumsf_pmu_event_idx, .attr_groups = cpumsf_pmu_attr_groups, }; diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 6fe886ac2db..9f7087fd58d 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -359,3 +359,4 @@ SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2) SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp) SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom) SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */ +SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf) diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index 956f4f7a591..f6b3cd056ec 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c @@ -5,13 +5,13 @@ * Author(s): Jan Willeke, */ -#include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/uprobes.h> #include <linux/compat.h> #include <linux/kdebug.h> #include <asm/switch_to.h> #include <asm/facility.h> +#include <asm/kprobes.h> #include <asm/dis.h> #include "entry.h" diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S index 48c2206a395..5eec9afbb5b 100644 --- a/arch/s390/kernel/vdso32/clock_gettime.S +++ b/arch/s390/kernel/vdso32/clock_gettime.S @@ -19,6 +19,7 @@ .type __kernel_clock_gettime,@function __kernel_clock_gettime: .cfi_startproc + ahi %r15,-16 basr %r5,0 0: al %r5,21f-0b(%r5) /* get &_vdso_data */ chi %r2,__CLOCK_REALTIME_COARSE @@ -34,8 +35,8 @@ __kernel_clock_gettime: 1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ tml %r4,0x0001 /* pending update ? loop */ jnz 1b - stcke 24(%r15) /* Store TOD clock */ - lm %r0,%r1,25(%r15) + stcke 0(%r15) /* Store TOD clock */ + lm %r0,%r1,1(%r15) s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,2f @@ -70,6 +71,7 @@ __kernel_clock_gettime: 8: st %r2,0(%r3) /* store tp->tv_sec */ st %r1,4(%r3) /* store tp->tv_nsec */ lhi %r2,0 + ahi %r15,16 br %r14 /* CLOCK_MONOTONIC_COARSE */ @@ -96,8 +98,8 @@ __kernel_clock_gettime: 11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ tml %r4,0x0001 /* pending update ? loop */ jnz 11b - stcke 24(%r15) /* Store TOD clock */ - lm %r0,%r1,25(%r15) + stcke 0(%r15) /* Store TOD clock */ + lm %r0,%r1,1(%r15) s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,12f @@ -132,11 +134,13 @@ __kernel_clock_gettime: 17: st %r2,0(%r3) /* store tp->tv_sec */ st %r1,4(%r3) /* store tp->tv_nsec */ lhi %r2,0 + ahi %r15,16 br %r14 /* Fallback to system call */ 19: lhi %r1,__NR_clock_gettime svc 0 + ahi %r15,16 br %r14 20: .long 1000000000 diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S index 60def5f562d..719de6186b2 100644 --- a/arch/s390/kernel/vdso32/gettimeofday.S +++ b/arch/s390/kernel/vdso32/gettimeofday.S @@ -19,6 +19,7 @@ .type __kernel_gettimeofday,@function __kernel_gettimeofday: .cfi_startproc + ahi %r15,-16 basr %r5,0 0: al %r5,13f-0b(%r5) /* get &_vdso_data */ 1: ltr %r3,%r3 /* check if tz is NULL */ @@ -29,30 +30,30 @@ __kernel_gettimeofday: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ tml %r4,0x0001 /* pending update ? loop */ jnz 1b - stcke 24(%r15) /* Store TOD clock */ - lm %r0,%r1,25(%r15) + stcke 0(%r15) /* Store TOD clock */ + lm %r0,%r1,1(%r15) s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,3f ahi %r0,-1 3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ - st %r0,24(%r15) + st %r0,0(%r15) l %r0,__VDSO_TK_MULT(%r5) ltr %r1,%r1 mr %r0,%r0 jnm 4f a %r0,__VDSO_TK_MULT(%r5) -4: al %r0,24(%r15) +4: al %r0,0(%r15) al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ al %r1,__VDSO_XTIME_NSEC+4(%r5) brc 12,5f ahi %r0,1 -5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5) +5: mvc 0(4,%r15),__VDSO_XTIME_SEC+4(%r5) cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ jne 1b l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ srdl %r0,0(%r4) /* >> tk->shift */ - l %r4,24(%r15) /* get tv_sec from stack */ + l %r4,0(%r15) /* get tv_sec from stack */ basr %r5,0 6: ltr %r0,%r0 jnz 7f @@ -71,6 +72,7 @@ __kernel_gettimeofday: 9: srl %r0,6 st %r0,4(%r2) /* store tv->tv_usec */ 10: slr %r2,%r2 + ahi %r15,16 br %r14 11: .long 1000000000 12: .long 274877907 diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S index 9d9761f8e11..7699e735ae2 100644 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ b/arch/s390/kernel/vdso64/clock_gettime.S @@ -19,6 +19,7 @@ .type __kernel_clock_gettime,@function __kernel_clock_gettime: .cfi_startproc + aghi %r15,-16 larl %r5,_vdso_data cghi %r2,__CLOCK_REALTIME_COARSE je 4f @@ -37,10 +38,10 @@ __kernel_clock_gettime: 0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ tmll %r4,0x0001 /* pending update ? loop */ jnz 0b - stcke 48(%r15) /* Store TOD clock */ + stcke 0(%r15) /* Store TOD clock */ lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ lg %r0,__VDSO_WTOM_SEC(%r5) - lg %r1,49(%r15) + lg %r1,1(%r15) sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ alg %r1,__VDSO_WTOM_NSEC(%r5) @@ -56,6 +57,7 @@ __kernel_clock_gettime: 2: stg %r0,0(%r3) /* store tp->tv_sec */ stg %r1,8(%r3) /* store tp->tv_nsec */ lghi %r2,0 + aghi %r15,16 br %r14 /* CLOCK_MONOTONIC_COARSE */ @@ -82,9 +84,9 @@ __kernel_clock_gettime: 5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ tmll %r4,0x0001 /* pending update ? loop */ jnz 5b - stcke 48(%r15) /* Store TOD clock */ + stcke 0(%r15) /* Store TOD clock */ lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ - lg %r1,49(%r15) + lg %r1,1(%r15) sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ @@ -101,6 +103,7 @@ __kernel_clock_gettime: 7: stg %r0,0(%r3) /* store tp->tv_sec */ stg %r1,8(%r3) /* store tp->tv_nsec */ lghi %r2,0 + aghi %r15,16 br %r14 /* CLOCK_THREAD_CPUTIME_ID for this thread */ @@ -134,11 +137,13 @@ __kernel_clock_gettime: slgr %r4,%r0 /* r4 = tv_nsec */ stg %r4,8(%r3) lghi %r2,0 + aghi %r15,16 br %r14 /* Fallback to system call */ 12: lghi %r1,__NR_clock_gettime svc 0 + aghi %r15,16 br %r14 13: .quad 1000000000 diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S index 7a344995a97..6ce46707663 100644 --- a/arch/s390/kernel/vdso64/gettimeofday.S +++ b/arch/s390/kernel/vdso64/gettimeofday.S @@ -19,6 +19,7 @@ .type __kernel_gettimeofday,@function __kernel_gettimeofday: .cfi_startproc + aghi %r15,-16 larl %r5,_vdso_data 0: ltgr %r3,%r3 /* check if tz is NULL */ je 1f @@ -28,8 +29,8 @@ __kernel_gettimeofday: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ tmll %r4,0x0001 /* pending update ? loop */ jnz 0b - stcke 48(%r15) /* Store TOD clock */ - lg %r1,49(%r15) + stcke 0(%r15) /* Store TOD clock */ + lg %r1,1(%r15) sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ @@ -50,6 +51,7 @@ __kernel_gettimeofday: srlg %r0,%r0,6 stg %r0,8(%r2) /* store tv->tv_usec */ 4: lghi %r2,0 + aghi %r15,16 br %r14 5: .quad 1000000000 .long 274877907 diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 416f2a323ba..7f0089d9a4a 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -66,7 +66,11 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) clock = S390_lowcore.last_update_clock; asm volatile( " stpt %0\n" /* Store current cpu timer value */ +#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES + " stckf %1" /* Store current tod clock value */ +#else " stck %1" /* Store current tod clock value */ +#endif : "=m" (S390_lowcore.last_update_timer), "=m" (S390_lowcore.last_update_clock)); S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; diff --git a/arch/s390/lib/probes.c b/arch/s390/lib/probes.c index c5d64a09971..ae90e1ae360 100644 --- a/arch/s390/lib/probes.c +++ b/arch/s390/lib/probes.c @@ -4,7 +4,7 @@ * Copyright IBM Corp. 2014 */ -#include <linux/kprobes.h> +#include <asm/kprobes.h> #include <asm/dis.h> int probe_is_prohibited_opcode(u16 *insn) diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 296b61a4af5..1b79ca67392 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -656,7 +656,7 @@ void __gmap_zap(struct gmap *gmap, unsigned long gaddr) } pgste_set_unlock(ptep, pgste); out_pte: - pte_unmap_unlock(*ptep, ptl); + pte_unmap_unlock(ptep, ptl); } EXPORT_SYMBOL_GPL(__gmap_zap); @@ -943,7 +943,7 @@ retry: } if (!(pte_val(*ptep) & _PAGE_INVALID) && (pte_val(*ptep) & _PAGE_PROTECT)) { - pte_unmap_unlock(*ptep, ptl); + pte_unmap_unlock(ptep, ptl); if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) { up_read(&mm->mmap_sem); return -EFAULT; @@ -974,7 +974,7 @@ retry: pgste_val(new) |= PGSTE_UC_BIT; pgste_set_unlock(ptep, new); - pte_unmap_unlock(*ptep, ptl); + pte_unmap_unlock(ptep, ptl); up_read(&mm->mmap_sem); return 0; } diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c index 9139d14b9c5..538c10db353 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c @@ -118,7 +118,7 @@ static struct plat_sci_port scif0_platform_data = { }; static struct resource scif0_resources[] = { - DEFINE_RES_MEM(0xfffffe80, 0x100), + DEFINE_RES_MEM(0xfffffe80, 0x10), DEFINE_RES_IRQ(evt2irq(0x4e0)), }; @@ -143,7 +143,7 @@ static struct plat_sci_port scif1_platform_data = { }; static struct resource scif1_resources[] = { - DEFINE_RES_MEM(0xa4000150, 0x100), + DEFINE_RES_MEM(0xa4000150, 0x10), DEFINE_RES_IRQ(evt2irq(0x900)), }; @@ -169,7 +169,7 @@ static struct plat_sci_port scif2_platform_data = { }; static struct resource scif2_resources[] = { - DEFINE_RES_MEM(0xa4000140, 0x100), + DEFINE_RES_MEM(0xa4000140, 0x10), DEFINE_RES_IRQ(evt2irq(0x880)), }; diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 765c1776ec9..0e69b7e7a43 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -22,7 +22,7 @@ int atomic_add_return(int, atomic_t *); int atomic_cmpxchg(atomic_t *, int, int); -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +int atomic_xchg(atomic_t *, int); int __atomic_add_unless(atomic_t *, int, int); void atomic_set(atomic_t *, int); diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h index 32c29a133f9..d38b52dca21 100644 --- a/arch/sparc/include/asm/cmpxchg_32.h +++ b/arch/sparc/include/asm/cmpxchg_32.h @@ -11,22 +11,14 @@ #ifndef __ARCH_SPARC_CMPXCHG__ #define __ARCH_SPARC_CMPXCHG__ -static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) -{ - __asm__ __volatile__("swap [%2], %0" - : "=&r" (val) - : "0" (val), "r" (m) - : "memory"); - return val; -} - +unsigned long __xchg_u32(volatile u32 *m, u32 new); void __xchg_called_with_bad_pointer(void); static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) { switch (size) { case 4: - return xchg_u32(ptr, x); + return __xchg_u32(ptr, x); } __xchg_called_with_bad_pointer(); return x; diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 5b1b52a04ad..7e064c68c5e 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -12,6 +12,14 @@ int dma_supported(struct device *dev, u64 mask); #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) +static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction dir) +{ + /* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this + * routine can be a nop. + */ +} + extern struct dma_map_ops *dma_ops; extern struct dma_map_ops *leon_dma_ops; extern struct dma_map_ops pci32_dma_ops; diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h index f34682430fc..2e3a4add859 100644 --- a/arch/sparc/include/asm/oplib_64.h +++ b/arch/sparc/include/asm/oplib_64.h @@ -62,7 +62,8 @@ struct linux_mem_p1275 { /* You must call prom_init() before using any of the library services, * preferably as early as possible. Pass it the romvec pointer. */ -void prom_init(void *cif_handler, void *cif_stack); +void prom_init(void *cif_handler); +void prom_init_report(void); /* Boot argument acquisition, returns the boot command line string. */ char *prom_getbootargs(void); diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h index f5fffd84d0d..29d64b1758e 100644 --- a/arch/sparc/include/asm/setup.h +++ b/arch/sparc/include/asm/setup.h @@ -48,6 +48,8 @@ unsigned long safe_compute_effective_address(struct pt_regs *, unsigned int); #endif #ifdef CONFIG_SPARC64 +void __init start_early_boot(void); + /* unaligned_64.c */ int handle_ldf_stq(u32 insn, struct pt_regs *regs); void handle_ld_nf(u32 insn, struct pt_regs *regs); diff --git a/arch/sparc/include/uapi/asm/swab.h b/arch/sparc/include/uapi/asm/swab.h index a34ad079487..4c7c12d69be 100644 --- a/arch/sparc/include/uapi/asm/swab.h +++ b/arch/sparc/include/uapi/asm/swab.h @@ -9,9 +9,9 @@ static inline __u16 __arch_swab16p(const __u16 *addr) { __u16 ret; - __asm__ __volatile__ ("lduha [%1] %2, %0" + __asm__ __volatile__ ("lduha [%2] %3, %0" : "=r" (ret) - : "r" (addr), "i" (ASI_PL)); + : "m" (*addr), "r" (addr), "i" (ASI_PL)); return ret; } #define __arch_swab16p __arch_swab16p @@ -20,9 +20,9 @@ static inline __u32 __arch_swab32p(const __u32 *addr) { __u32 ret; - __asm__ __volatile__ ("lduwa [%1] %2, %0" + __asm__ __volatile__ ("lduwa [%2] %3, %0" : "=r" (ret) - : "r" (addr), "i" (ASI_PL)); + : "m" (*addr), "r" (addr), "i" (ASI_PL)); return ret; } #define __arch_swab32p __arch_swab32p @@ -31,9 +31,9 @@ static inline __u64 __arch_swab64p(const __u64 *addr) { __u64 ret; - __asm__ __volatile__ ("ldxa [%1] %2, %0" + __asm__ __volatile__ ("ldxa [%2] %3, %0" : "=r" (ret) - : "r" (addr), "i" (ASI_PL)); + : "m" (*addr), "r" (addr), "i" (ASI_PL)); return ret; } #define __arch_swab64p __arch_swab64p diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h index c842a89b119..46d83842edd 100644 --- a/arch/sparc/include/uapi/asm/unistd.h +++ b/arch/sparc/include/uapi/asm/unistd.h @@ -414,8 +414,9 @@ #define __NR_seccomp 346 #define __NR_getrandom 347 #define __NR_memfd_create 348 +#define __NR_bpf 349 -#define NR_syscalls 349 +#define NR_syscalls 350 /* Bitmask values returned from kern_features system call. */ #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index ebaba6167dd..88d322b67fa 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h @@ -65,13 +65,10 @@ struct pause_patch_entry { extern struct pause_patch_entry __pause_3insn_patch, __pause_3insn_patch_end; -void __init per_cpu_patch(void); void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, struct sun4v_1insn_patch_entry *); void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *, struct sun4v_2insn_patch_entry *); -void __init sun4v_patch(void); -void __init boot_cpu_id_too_large(int cpu); extern unsigned int dcache_parity_tl1_occurred; extern unsigned int icache_parity_tl1_occurred; diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 4fdeb8040d4..3d61fcae7ee 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -672,14 +672,12 @@ tlb_fixup_done: sethi %hi(init_thread_union), %g6 or %g6, %lo(init_thread_union), %g6 ldx [%g6 + TI_TASK], %g4 - mov %sp, %l6 wr %g0, ASI_P, %asi mov 1, %g1 sllx %g1, THREAD_SHIFT, %g1 sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1 add %g6, %g1, %sp - mov 0, %fp /* Set per-cpu pointer initially to zero, this makes * the boot-cpu use the in-kernel-image per-cpu areas @@ -706,44 +704,14 @@ tlb_fixup_done: nop #endif - mov %l6, %o1 ! OpenPROM stack call prom_init mov %l7, %o0 ! OpenPROM cif handler - /* Initialize current_thread_info()->cpu as early as possible. - * In order to do that accurately we have to patch up the get_cpuid() - * assembler sequences. And that, in turn, requires that we know - * if we are on a Starfire box or not. While we're here, patch up - * the sun4v sequences as well. + /* To create a one-register-window buffer between the kernel's + * initial stack and the last stack frame we use from the firmware, + * do the rest of the boot from a C helper function. */ - call check_if_starfire - nop - call per_cpu_patch - nop - call sun4v_patch - nop - -#ifdef CONFIG_SMP - call hard_smp_processor_id - nop - cmp %o0, NR_CPUS - blu,pt %xcc, 1f - nop - call boot_cpu_id_too_large - nop - /* Not reached... */ - -1: -#else - mov 0, %o0 -#endif - sth %o0, [%g6 + TI_CPU] - - call prom_init_report - nop - - /* Off we go.... */ - call start_kernel + call start_early_boot nop /* Not reached... */ diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S index b7ddcdd1dea..cdbfec299f2 100644 --- a/arch/sparc/kernel/hvtramp.S +++ b/arch/sparc/kernel/hvtramp.S @@ -109,7 +109,6 @@ hv_cpu_startup: sllx %g5, THREAD_SHIFT, %g5 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 add %g6, %g5, %sp - mov 0, %fp call init_irqwork_curcpu nop diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c index 8f76f23dac3..f9c6813c132 100644 --- a/arch/sparc/kernel/pci_schizo.c +++ b/arch/sparc/kernel/pci_schizo.c @@ -581,7 +581,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) { unsigned long csr_reg, csr, csr_error_bits; irqreturn_t ret = IRQ_NONE; - u16 stat; + u32 stat; csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL; csr = upa_readq(csr_reg); @@ -617,7 +617,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) pbm->name); ret = IRQ_HANDLED; } - pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat); + pbm->pci_ops->read(pbm->pci_bus, 0, PCI_STATUS, 2, &stat); if (stat & (PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_TARGET_ABORT | @@ -625,7 +625,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) PCI_STATUS_SIG_SYSTEM_ERROR)) { printk("%s: PCI bus error, PCI_STATUS[%04x]\n", pbm->name, stat); - pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff); + pbm->pci_ops->write(pbm->pci_bus, 0, PCI_STATUS, 2, 0xffff); ret = IRQ_HANDLED; } return ret; diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index e629b837758..c38d19fc27b 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -30,6 +30,7 @@ #include <linux/cpu.h> #include <linux/initrd.h> #include <linux/module.h> +#include <linux/start_kernel.h> #include <asm/io.h> #include <asm/processor.h> @@ -162,7 +163,7 @@ char reboot_command[COMMAND_LINE_SIZE]; static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; -void __init per_cpu_patch(void) +static void __init per_cpu_patch(void) { struct cpuid_patch_entry *p; unsigned long ver; @@ -254,7 +255,7 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start, } } -void __init sun4v_patch(void) +static void __init sun4v_patch(void) { extern void sun4v_hvapi_init(void); @@ -323,14 +324,25 @@ static void __init pause_patch(void) } } -#ifdef CONFIG_SMP -void __init boot_cpu_id_too_large(int cpu) +void __init start_early_boot(void) { - prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n", - cpu, NR_CPUS); - prom_halt(); + int cpu; + + check_if_starfire(); + per_cpu_patch(); + sun4v_patch(); + + cpu = hard_smp_processor_id(); + if (cpu >= NR_CPUS) { + prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n", + cpu, NR_CPUS); + prom_halt(); + } + current_thread_info()->cpu = cpu; + + prom_init_report(); + start_kernel(); } -#endif /* On Ultra, we support all of the v8 capabilities. */ unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 302c476413d..da6f1a7fc4d 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -816,13 +816,17 @@ void arch_send_call_function_single_ipi(int cpu) void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); + irq_enter(); generic_smp_call_function_interrupt(); + irq_exit(); } void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); + irq_enter(); generic_smp_call_function_single_interrupt(); + irq_exit(); } static void tsb_sync(void *info) diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 6a873c344bc..ad0cdf497b7 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -86,4 +86,4 @@ sys_call_table: /*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime /*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev /*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr -/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create +/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index d9151b6490d..580cde9370c 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -87,7 +87,7 @@ sys_call_table32: /*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr - .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create + .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf #endif /* CONFIG_COMPAT */ @@ -166,4 +166,4 @@ sys_call_table: /*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr - .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create + .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S index 737f8cbc7d5..88ede1d53b4 100644 --- a/arch/sparc/kernel/trampoline_64.S +++ b/arch/sparc/kernel/trampoline_64.S @@ -109,10 +109,13 @@ startup_continue: brnz,pn %g1, 1b nop - sethi %hi(p1275buf), %g2 - or %g2, %lo(p1275buf), %g2 - ldx [%g2 + 0x10], %l2 - add %l2, -(192 + 128), %sp + /* Get onto temporary stack which will be in the locked + * kernel image. + */ + sethi %hi(tramp_stack), %g1 + or %g1, %lo(tramp_stack), %g1 + add %g1, TRAMP_STACK_SIZE, %g1 + sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp flushw /* Setup the loop variables: @@ -394,7 +397,6 @@ after_lock_tlb: sllx %g5, THREAD_SHIFT, %g5 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 add %g6, %g5, %sp - mov 0, %fp rdpr %pstate, %o1 or %o1, PSTATE_IE, %o1 diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index a7c418ac26a..71cd65ab200 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -45,6 +45,19 @@ ATOMIC_OP(add, +=) #undef ATOMIC_OP +int atomic_xchg(atomic_t *v, int new) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(ATOMIC_HASH(v), flags); + ret = v->counter; + v->counter = new; + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + return ret; +} +EXPORT_SYMBOL(atomic_xchg); + int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; @@ -137,3 +150,17 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) return (unsigned long)prev; } EXPORT_SYMBOL(__cmpxchg_u32); + +unsigned long __xchg_u32(volatile u32 *ptr, u32 new) +{ + unsigned long flags; + u32 prev; + + spin_lock_irqsave(ATOMIC_HASH(ptr), flags); + prev = *ptr; + *ptr = new; + spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); + + return (unsigned long)prev; +} +EXPORT_SYMBOL(__xchg_u32); diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index 1aed0432c64..ae6ce383d4d 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -160,6 +160,36 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, return 1; } +int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) +{ + struct mm_struct *mm = current->mm; + unsigned long addr, len, end; + unsigned long next, flags; + pgd_t *pgdp; + int nr = 0; + + start &= PAGE_MASK; + addr = start; + len = (unsigned long) nr_pages << PAGE_SHIFT; + end = start + len; + + local_irq_save(flags); + pgdp = pgd_offset(mm, addr); + do { + pgd_t pgd = *pgdp; + + next = pgd_addr_end(addr, end); + if (pgd_none(pgd)) + break; + if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) + break; + } while (pgdp++, addr = next, addr != end); + local_irq_restore(flags); + + return nr; +} + int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { diff --git a/arch/sparc/prom/cif.S b/arch/sparc/prom/cif.S index 9c86b4b7d42..8050f381f51 100644 --- a/arch/sparc/prom/cif.S +++ b/arch/sparc/prom/cif.S @@ -11,11 +11,10 @@ .text .globl prom_cif_direct prom_cif_direct: + save %sp, -192, %sp sethi %hi(p1275buf), %o1 or %o1, %lo(p1275buf), %o1 - ldx [%o1 + 0x0010], %o2 ! prom_cif_stack - save %o2, -192, %sp - ldx [%i1 + 0x0008], %l2 ! prom_cif_handler + ldx [%o1 + 0x0008], %l2 ! prom_cif_handler mov %g4, %l0 mov %g5, %l1 mov %g6, %l3 diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c index d95db755828..110b0d78b86 100644 --- a/arch/sparc/prom/init_64.c +++ b/arch/sparc/prom/init_64.c @@ -26,13 +26,13 @@ phandle prom_chosen_node; * It gets passed the pointer to the PROM vector. */ -extern void prom_cif_init(void *, void *); +extern void prom_cif_init(void *); -void __init prom_init(void *cif_handler, void *cif_stack) +void __init prom_init(void *cif_handler) { phandle node; - prom_cif_init(cif_handler, cif_stack); + prom_cif_init(cif_handler); prom_chosen_node = prom_finddevice(prom_chosen_path); if (!prom_chosen_node || (s32)prom_chosen_node == -1) diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c index b2340f008ae..545d8bb79b6 100644 --- a/arch/sparc/prom/p1275.c +++ b/arch/sparc/prom/p1275.c @@ -20,7 +20,6 @@ struct { long prom_callback; /* 0x00 */ void (*prom_cif_handler)(long *); /* 0x08 */ - unsigned long prom_cif_stack; /* 0x10 */ } p1275buf; extern void prom_world(int); @@ -52,5 +51,4 @@ void p1275_cmd_direct(unsigned long *args) void prom_cif_init(void *cif_handler, void *cif_stack) { p1275buf.prom_cif_handler = (void (*)(long *))cif_handler; - p1275buf.prom_cif_stack = (unsigned long)cif_stack; } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f2327e88e07..41a503c1586 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -142,6 +142,10 @@ config INSTRUCTION_DECODER def_bool y depends on KPROBES || PERF_EVENTS || UPROBES +config PERF_EVENTS_INTEL_UNCORE + def_bool y + depends on PERF_EVENTS && CPU_SUP_INTEL && PCI + config OUTPUT_FORMAT string default "elf32-i386" if X86_32 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 704f58aa79c..be1e07d4b59 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -76,8 +76,10 @@ suffix-$(CONFIG_KERNEL_XZ) := xz suffix-$(CONFIG_KERNEL_LZO) := lzo suffix-$(CONFIG_KERNEL_LZ4) := lz4 +RUN_SIZE = $(shell objdump -h vmlinux | \ + perl $(srctree)/arch/x86/tools/calc_run_size.pl) quiet_cmd_mkpiggy = MKPIGGY $@ - cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false ) + cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false ) targets += piggy.S $(obj)/piggy.S: $(obj)/vmlinux.bin.$(suffix-y) $(obj)/mkpiggy FORCE diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index de8eebd6f67..1acf605a646 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -330,8 +330,10 @@ __setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom) size = pci->romsize + sizeof(*rom); status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk(sys_table, "Failed to alloc mem for rom\n"); return status; + } memset(rom, 0, sizeof(*rom)); @@ -344,14 +346,18 @@ __setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom) status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16, PCI_VENDOR_ID, 1, &(rom->vendor)); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk(sys_table, "Failed to read rom->vendor\n"); goto free_struct; + } status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16, PCI_DEVICE_ID, 1, &(rom->devid)); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk(sys_table, "Failed to read rom->devid\n"); goto free_struct; + } status = efi_early->call(pci->get_location, pci, &(rom->segment), &(rom->bus), &(rom->device), &(rom->function)); @@ -432,8 +438,10 @@ __setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct pci_setup_rom **__rom) size = pci->romsize + sizeof(*rom); status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk(sys_table, "Failed to alloc mem for rom\n"); return status; + } rom->data.type = SETUP_PCI; rom->data.len = size - sizeof(struct setup_data); @@ -444,14 +452,18 @@ __setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct pci_setup_rom **__rom) status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16, PCI_VENDOR_ID, 1, &(rom->vendor)); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk(sys_table, "Failed to read rom->vendor\n"); goto free_struct; + } status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16, PCI_DEVICE_ID, 1, &(rom->devid)); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk(sys_table, "Failed to read rom->devid\n"); goto free_struct; + } status = efi_early->call(pci->get_location, pci, &(rom->segment), &(rom->bus), &(rom->device), &(rom->function)); @@ -538,8 +550,10 @@ static void setup_efi_pci(struct boot_params *params) EFI_LOADER_DATA, size, (void **)&pci_handle); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk(sys_table, "Failed to alloc mem for pci_handle\n"); return; + } status = efi_call_early(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto, @@ -1105,6 +1119,10 @@ struct boot_params *make_boot_params(struct efi_config *c) memset(sdt, 0, sizeof(*sdt)); + status = efi_parse_options(cmdline_ptr); + if (status != EFI_SUCCESS) + goto fail2; + status = handle_cmdline_files(sys_table, image, (char *)(unsigned long)hdr->cmd_line_ptr, "initrd=", hdr->initrd_addr_max, diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index cbed1407a5c..1d7fbbcc196 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -207,7 +207,8 @@ relocated: * Do the decompression, and jump to the new kernel.. */ /* push arguments for decompress_kernel: */ - pushl $z_output_len /* decompressed length */ + pushl $z_run_size /* size of kernel with .bss and .brk */ + pushl $z_output_len /* decompressed length, end of relocs */ leal z_extract_offset_negative(%ebx), %ebp pushl %ebp /* output address */ pushl $z_input_len /* input_len */ @@ -217,7 +218,7 @@ relocated: pushl %eax /* heap area */ pushl %esi /* real mode pointer */ call decompress_kernel /* returns kernel location in %eax */ - addl $24, %esp + addl $28, %esp /* * Jump to the decompressed kernel. diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 2884e0c3e8a..6b1766c6c08 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -402,13 +402,16 @@ relocated: * Do the decompression, and jump to the new kernel.. */ pushq %rsi /* Save the real mode argument */ + movq $z_run_size, %r9 /* size of kernel with .bss and .brk */ + pushq %r9 movq %rsi, %rdi /* real mode address */ leaq boot_heap(%rip), %rsi /* malloc area for uncompression */ leaq input_data(%rip), %rdx /* input_data */ movl $z_input_len, %ecx /* input_len */ movq %rbp, %r8 /* output target address */ - movq $z_output_len, %r9 /* decompressed length */ + movq $z_output_len, %r9 /* decompressed length, end of relocs */ call decompress_kernel /* returns kernel location in %rax */ + popq %r9 popq %rsi /* diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 57ab74df7ee..30dd59a9f0b 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -358,7 +358,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, unsigned char *input_data, unsigned long input_len, unsigned char *output, - unsigned long output_len) + unsigned long output_len, + unsigned long run_size) { real_mode = rmode; @@ -381,8 +382,14 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, free_mem_ptr = heap; /* Heap */ free_mem_end_ptr = heap + BOOT_HEAP_SIZE; - output = choose_kernel_location(input_data, input_len, - output, output_len); + /* + * The memory hole needed for the kernel is the larger of either + * the entire decompressed kernel plus relocation table, or the + * entire decompressed kernel plus .bss and .brk sections. + */ + output = choose_kernel_location(input_data, input_len, output, + output_len > run_size ? output_len + : run_size); /* Validate memory location choices. */ if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1)) diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c index b669ab65bf6..d8222f21318 100644 --- a/arch/x86/boot/compressed/mkpiggy.c +++ b/arch/x86/boot/compressed/mkpiggy.c @@ -36,11 +36,13 @@ int main(int argc, char *argv[]) uint32_t olen; long ilen; unsigned long offs; + unsigned long run_size; FILE *f = NULL; int retval = 1; - if (argc < 2) { - fprintf(stderr, "Usage: %s compressed_file\n", argv[0]); + if (argc < 3) { + fprintf(stderr, "Usage: %s compressed_file run_size\n", + argv[0]); goto bail; } @@ -74,6 +76,7 @@ int main(int argc, char *argv[]) offs += olen >> 12; /* Add 8 bytes for each 32K block */ offs += 64*1024 + 128; /* Add 64K + 128 bytes slack */ offs = (offs+4095) & ~4095; /* Round to a 4K boundary */ + run_size = atoi(argv[2]); printf(".section \".rodata..compressed\",\"a\",@progbits\n"); printf(".globl z_input_len\n"); @@ -85,6 +88,8 @@ int main(int argc, char *argv[]) /* z_extract_offset_negative allows simplification of head_32.S */ printf(".globl z_extract_offset_negative\n"); printf("z_extract_offset_negative = -0x%lx\n", offs); + printf(".globl z_run_size\n"); + printf("z_run_size = %lu\n", run_size); printf(".globl input_data, input_data_end\n"); printf("input_data:\n"); diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 8ffba18395c..ffe71228fc1 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -157,7 +157,7 @@ ENTRY(ia32_sysenter_target) * ourselves. To save a few cycles, we can check whether * NT was set instead of doing an unconditional popfq. */ - testl $X86_EFLAGS_NT,EFLAGS(%rsp) /* saved EFLAGS match cpu */ + testl $X86_EFLAGS_NT,EFLAGS-ARGOFFSET(%rsp) jnz sysenter_fix_flags sysenter_flags_fixed: diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 0ec241ede5a..9b11757975d 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -81,24 +81,23 @@ extern u64 asmlinkage efi_call(void *fp, ...); */ #define __efi_call_virt(f, args...) efi_call_virt(f, args) -extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, - u32 type, u64 attribute); +extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, + u32 type, u64 attribute); #endif /* CONFIG_X86_32 */ -extern int add_efi_memmap; extern struct efi_scratch efi_scratch; -extern void efi_set_executable(efi_memory_desc_t *md, bool executable); -extern int efi_memblock_x86_reserve_range(void); -extern void efi_call_phys_prelog(void); -extern void efi_call_phys_epilog(void); -extern void efi_unmap_memmap(void); -extern void efi_memory_uc(u64 addr, unsigned long size); +extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable); +extern int __init efi_memblock_x86_reserve_range(void); +extern void __init efi_call_phys_prolog(void); +extern void __init efi_call_phys_epilog(void); +extern void __init efi_unmap_memmap(void); +extern void __init efi_memory_uc(u64 addr, unsigned long size); extern void __init efi_map_region(efi_memory_desc_t *md); extern void __init efi_map_region_fixed(efi_memory_desc_t *md); extern void efi_sync_low_kernel_mappings(void); -extern int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages); -extern void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages); +extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages); +extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages); extern void __init old_map_region(efi_memory_desc_t *md); extern void __init runtime_code_page_mkexec(void); extern void __init efi_runtime_mkexec(void); @@ -162,16 +161,6 @@ static inline efi_status_t efi_thunk_set_virtual_address_map( extern bool efi_reboot_required(void); #else -/* - * IF EFI is not configured, have the EFI calls return -ENOSYS. - */ -#define efi_call0(_f) (-ENOSYS) -#define efi_call1(_f, _a1) (-ENOSYS) -#define efi_call2(_f, _a1, _a2) (-ENOSYS) -#define efi_call3(_f, _a1, _a2, _a3) (-ENOSYS) -#define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS) -#define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS) -#define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS) static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {} static inline bool efi_reboot_required(void) { diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 7d603a71ab3..6ed0c30d6a0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -989,6 +989,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); } +static inline u64 get_canonical(u64 la) +{ + return ((int64_t)la << 16) >> 16; +} + +static inline bool is_noncanonical_address(u64 la) +{ +#ifdef CONFIG_X86_64 + return get_canonical(la) != la; +#else + return false; +#endif +} + #define TSS_IOPB_BASE_OFFSET 0x66 #define TSS_BASE_SIZE 0x68 #define TSS_IOPB_SIZE (65536 / 8) @@ -1050,7 +1064,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, unsigned long address); void kvm_define_shared_msr(unsigned index, u32 msr); -void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); +int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h index f48b17df422..3a52ee0e726 100644 --- a/arch/x86/include/asm/page_32_types.h +++ b/arch/x86/include/asm/page_32_types.h @@ -20,7 +20,6 @@ #define THREAD_SIZE_ORDER 1 #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) -#define STACKFAULT_STACK 0 #define DOUBLEFAULT_STACK 1 #define NMI_STACK 0 #define DEBUG_STACK 0 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 678205195ae..75450b2c7be 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -14,12 +14,11 @@ #define IRQ_STACK_ORDER 2 #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) -#define STACKFAULT_STACK 1 -#define DOUBLEFAULT_STACK 2 -#define NMI_STACK 3 -#define DEBUG_STACK 4 -#define MCE_STACK 5 -#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ +#define DOUBLEFAULT_STACK 1 +#define NMI_STACK 2 +#define DEBUG_STACK 3 +#define MCE_STACK 4 +#define N_EXCEPTION_STACKS 4 /* hw limit: 7 */ #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 7024c12f7bf..400873450e3 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -105,6 +105,7 @@ static __always_inline bool should_resched(void) # ifdef CONFIG_CONTEXT_TRACKING extern asmlinkage void ___preempt_schedule_context(void); # define __preempt_schedule_context() asm ("call ___preempt_schedule_context") + extern asmlinkage void preempt_schedule_context(void); # endif #endif diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 8cd27e08e23..8cd1cc3bc83 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -150,6 +150,7 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) } void cpu_disable_common(void); +void cpu_die_common(unsigned int cpu); void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus); diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 854053889d4..547e344a6dc 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -141,7 +141,7 @@ struct thread_info { /* Only used for 64 bit */ #define _TIF_DO_NOTIFY_MASK \ (_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME | \ - _TIF_USER_RETURN_NOTIFY) + _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE) /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index bc8352e7010..707adc6549d 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -39,6 +39,7 @@ asmlinkage void simd_coprocessor_error(void); #ifdef CONFIG_TRACING asmlinkage void trace_page_fault(void); +#define trace_stack_segment stack_segment #define trace_divide_error divide_error #define trace_bounds bounds #define trace_invalid_op invalid_op diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h index 0e79420376e..990a2fe1588 100644 --- a/arch/x86/include/uapi/asm/vmx.h +++ b/arch/x86/include/uapi/asm/vmx.h @@ -67,6 +67,7 @@ #define EXIT_REASON_EPT_MISCONFIG 49 #define EXIT_REASON_INVEPT 50 #define EXIT_REASON_PREEMPTION_TIMER 52 +#define EXIT_REASON_INVVPID 53 #define EXIT_REASON_WBINVD 54 #define EXIT_REASON_XSETBV 55 #define EXIT_REASON_APIC_WRITE 56 @@ -114,6 +115,7 @@ { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ { EXIT_REASON_INVD, "INVD" }, \ + { EXIT_REASON_INVVPID, "INVVPID" }, \ { EXIT_REASON_INVPCID, "INVPCID" } #endif /* _UAPIVMX_H */ diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index b436fc735aa..a142e77693e 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -397,7 +397,7 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger, /* Don't set up the ACPI SCI because it's already set up */ if (acpi_gbl_FADT.sci_interrupt == gsi) - return gsi; + return mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC); trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1; polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1; @@ -604,14 +604,18 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp) { - int irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK); + int irq; - if (irq >= 0) { + if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { + *irqp = gsi; + } else { + irq = mp_map_gsi_to_irq(gsi, + IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK); + if (irq < 0) + return -1; *irqp = irq; - return 0; } - - return -1; + return 0; } EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 5972b108f15..b708738d016 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c @@ -185,8 +185,6 @@ static void apbt_setup_irq(struct apbt_dev *adev) irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); - /* APB timer irqs are set up as mp_irqs, timer is edge type */ - __irq_set_handler(adev->irq, handle_edge_irq, 0, "edge"); } /* Should be called with per cpu */ diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 00853b254ab..ba6cc041edb 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1297,7 +1297,7 @@ void setup_local_APIC(void) unsigned int value, queued; int i, j, acked = 0; unsigned long long tsc = 0, ntsc; - long long max_loops = cpu_khz; + long long max_loops = cpu_khz ? cpu_khz : 1000000; if (cpu_has_tsc) rdtscll(tsc); @@ -1383,7 +1383,7 @@ void setup_local_APIC(void) break; } if (queued) { - if (cpu_has_tsc) { + if (cpu_has_tsc && cpu_khz) { rdtscll(ntsc); max_loops = (cpu_khz << 10) - (ntsc - tsc); } else diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 01d5453b550..e27b49d7c92 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -39,9 +39,12 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o endif obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o -obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_uncore_snb.o -obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore_snbep.o perf_event_intel_uncore_nhmex.o obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o + +obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \ + perf_event_intel_uncore_snb.o \ + perf_event_intel_uncore_snbep.o \ + perf_event_intel_uncore_nhmex.o endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4b4f78c9ba1..cfa9b5b2c27 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -146,6 +146,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); static int __init x86_xsave_setup(char *s) { + if (strlen(s)) + return 0; setup_clear_cpu_cap(X86_FEATURE_XSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); setup_clear_cpu_cap(X86_FEATURE_XSAVES); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 1ef45627317..9cc6b6f25f4 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -213,12 +213,13 @@ static void intel_workarounds(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_F00F_BUG /* - * All current models of Pentium and Pentium with MMX technology CPUs + * All models of Pentium and Pentium with MMX technology CPUs * have the F0 0F bug, which lets nonprivileged users lock up the * system. Announce that the fault handler will be checking for it. + * The Quark is also family 5, but does not have the same bug. */ clear_cpu_bug(c, X86_BUG_F00F); - if (!paravirt_enabled() && c->x86 == 5) { + if (!paravirt_enabled() && c->x86 == 5 && c->x86_model < 9) { static int f00f_workaround_enabled; set_cpu_bug(c, X86_BUG_F00F); diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c index 7aa1acc7978..06674473b0e 100644 --- a/arch/x86/kernel/cpu/microcode/amd_early.c +++ b/arch/x86/kernel/cpu/microcode/amd_early.c @@ -108,12 +108,13 @@ static size_t compute_container_size(u8 *data, u32 total_size) * load_microcode_amd() to save equivalent cpu table and microcode patches in * kernel heap memory. */ -static void apply_ucode_in_initrd(void *ucode, size_t size) +static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch) { struct equiv_cpu_entry *eq; size_t *cont_sz; u32 *header; u8 *data, **cont; + u8 (*patch)[PATCH_MAX_SIZE]; u16 eq_id = 0; int offset, left; u32 rev, eax, ebx, ecx, edx; @@ -123,10 +124,12 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); cont_sz = (size_t *)__pa_nodebug(&container_size); cont = (u8 **)__pa_nodebug(&container); + patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch); #else new_rev = &ucode_new_rev; cont_sz = &container_size; cont = &container; + patch = &amd_ucode_patch; #endif data = ucode; @@ -213,9 +216,9 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) rev = mc->hdr.patch_id; *new_rev = rev; - /* save ucode patch */ - memcpy(amd_ucode_patch, mc, - min_t(u32, header[1], PATCH_MAX_SIZE)); + if (save_patch) + memcpy(patch, mc, + min_t(u32, header[1], PATCH_MAX_SIZE)); } } @@ -246,7 +249,7 @@ void __init load_ucode_amd_bsp(void) *data = cp.data; *size = cp.size; - apply_ucode_in_initrd(cp.data, cp.size); + apply_ucode_in_initrd(cp.data, cp.size, true); } #ifdef CONFIG_X86_32 @@ -263,7 +266,7 @@ void load_ucode_amd_ap(void) size_t *usize; void **ucode; - mc = (struct microcode_amd *)__pa(amd_ucode_patch); + mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch); if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { __apply_microcode_amd(mc); return; @@ -275,7 +278,7 @@ void load_ucode_amd_ap(void) if (!*ucode || !*usize) return; - apply_ucode_in_initrd(*ucode, *usize); + apply_ucode_in_initrd(*ucode, *usize, false); } static void __init collect_cpu_sig_on_bsp(void *arg) @@ -339,7 +342,7 @@ void load_ucode_amd_ap(void) * AP has a different equivalence ID than BSP, looks like * mixed-steppings silicon so go through the ucode blob anew. */ - apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size); + apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false); } } #endif @@ -347,7 +350,9 @@ void load_ucode_amd_ap(void) int __init save_microcode_in_initrd_amd(void) { unsigned long cont; + int retval = 0; enum ucode_state ret; + u8 *cont_va; u32 eax; if (!container) @@ -355,13 +360,15 @@ int __init save_microcode_in_initrd_amd(void) #ifdef CONFIG_X86_32 get_bsp_sig(); - cont = (unsigned long)container; + cont = (unsigned long)container; + cont_va = __va(container); #else /* * We need the physical address of the container for both bitness since * boot_params.hdr.ramdisk_image is a physical address. */ - cont = __pa(container); + cont = __pa(container); + cont_va = container; #endif /* @@ -372,6 +379,8 @@ int __init save_microcode_in_initrd_amd(void) if (relocated_ramdisk) container = (u8 *)(__va(relocated_ramdisk) + (cont - boot_params.hdr.ramdisk_image)); + else + container = cont_va; if (ucode_new_rev) pr_info("microcode: updated early to new patch_level=0x%08x\n", @@ -382,7 +391,7 @@ int __init save_microcode_in_initrd_amd(void) ret = load_microcode_amd(eax, container, container_size); if (ret != UCODE_OK) - return -EINVAL; + retval = -EINVAL; /* * This will be freed any msec now, stash patches for the current @@ -391,5 +400,5 @@ int __init save_microcode_in_initrd_amd(void) container = NULL; container_size = 0; - return 0; + return retval; } diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index dd9d6190b08..2ce9051174e 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -465,6 +465,14 @@ static void mc_bp_resume(void) if (uci->valid && uci->mc) microcode_ops->apply_microcode(cpu); + else if (!uci->mc) + /* + * We might resume and not have applied late microcode but still + * have a newer patch stashed from the early loader. We don't + * have it in uci->mc so we have to load it the same way we're + * applying patches early on the APs. + */ + load_ucode_ap(); } static struct syscore_ops mc_syscore_ops = { diff --git a/arch/x86/kernel/cpu/microcode/core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c index 5f28a64e71e..2c017f242a7 100644 --- a/arch/x86/kernel/cpu/microcode/core_early.c +++ b/arch/x86/kernel/cpu/microcode/core_early.c @@ -124,7 +124,7 @@ void __init load_ucode_bsp(void) static bool check_loader_disabled_ap(void) { #ifdef CONFIG_X86_32 - return __pa_nodebug(dis_ucode_ldr); + return *((bool *)__pa_nodebug(&dis_ucode_ldr)); #else return dis_ucode_ldr; #endif diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 1b8299dd3d9..143e5f5dc85 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -243,8 +243,9 @@ static bool check_hw_exists(void) msr_fail: printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); - printk(boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR - "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new); + printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n", + boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR, + reg, val_new); return false; } @@ -444,12 +445,6 @@ int x86_pmu_hw_config(struct perf_event *event) if (event->attr.type == PERF_TYPE_RAW) event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; - if (event->attr.sample_period && x86_pmu.limit_period) { - if (x86_pmu.limit_period(event, event->attr.sample_period) > - event->attr.sample_period) - return -EINVAL; - } - return x86_setup_perfctr(event); } @@ -987,9 +982,6 @@ int x86_perf_event_set_period(struct perf_event *event) if (left > x86_pmu.max_period) left = x86_pmu.max_period; - if (x86_pmu.limit_period) - left = x86_pmu.limit_period(event, left); - per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; /* diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index d98a34d435d..fc5eb390b36 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -445,7 +445,6 @@ struct x86_pmu { struct x86_pmu_quirk *quirks; int perfctr_second_write; bool late_ack; - unsigned (*limit_period)(struct perf_event *event, unsigned l); /* * sysfs attrs diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index a73947c53b6..944bf019b74 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -220,15 +220,6 @@ static struct event_constraint intel_hsw_event_constraints[] = { EVENT_CONSTRAINT_END }; -static struct event_constraint intel_bdw_event_constraints[] = { - FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ - FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ - FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ - INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ - INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */ - EVENT_CONSTRAINT_END -}; - static u64 intel_pmu_event_map(int hw_event) { return intel_perfmon_event_map[hw_event]; @@ -424,126 +415,6 @@ static __initconst const u64 snb_hw_cache_event_ids }; -static __initconst const u64 hsw_hw_cache_event_ids - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX] = -{ - [ C(L1D ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ - [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ - [ C(RESULT_MISS) ] = 0x0, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x0, - [ C(RESULT_MISS) ] = 0x0, - }, - }, - [ C(L1I ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0, - [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x0, - [ C(RESULT_MISS) ] = 0x0, - }, - }, - [ C(LL ) ] = { - [ C(OP_READ) ] = { - /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */ - [ C(RESULT_ACCESS) ] = 0x1b7, - /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE| - L3_MISS|ANY_SNOOP */ - [ C(RESULT_MISS) ] = 0x1b7, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE:ALL_RFO */ - /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */ - [ C(RESULT_MISS) ] = 0x1b7, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x0, - [ C(RESULT_MISS) ] = 0x0, - }, - }, - [ C(DTLB) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ - [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ - [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x0, - [ C(RESULT_MISS) ] = 0x0, - }, - }, - [ C(ITLB) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */ - [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - }, - [ C(BPU ) ] = { - [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ - [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, - }, - }, -}; - -static __initconst const u64 hsw_hw_cache_extra_regs - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX] = -{ - [ C(LL ) ] = { - [ C(OP_READ) ] = { - /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */ - [ C(RESULT_ACCESS) ] = 0x2d5, - /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE| - L3_MISS|ANY_SNOOP */ - [ C(RESULT_MISS) ] = 0x3fbc0202d5ull, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x122, /* OFFCORE_RESPONSE:ALL_RFO */ - /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */ - [ C(RESULT_MISS) ] = 0x3fbc020122ull, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = 0x0, - [ C(RESULT_MISS) ] = 0x0, - }, - }, -}; - static __initconst const u64 westmere_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] @@ -2034,24 +1905,6 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) return c; } -/* - * Broadwell: - * The INST_RETIRED.ALL period always needs to have lowest - * 6bits cleared (BDM57). It shall not use a period smaller - * than 100 (BDM11). We combine the two to enforce - * a min-period of 128. - */ -static unsigned bdw_limit_period(struct perf_event *event, unsigned left) -{ - if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == - X86_CONFIG(.event=0xc0, .umask=0x01)) { - if (left < 128) - left = 128; - left &= ~0x3fu; - } - return left; -} - PMU_FORMAT_ATTR(event, "config:0-7" ); PMU_FORMAT_ATTR(umask, "config:8-15" ); PMU_FORMAT_ATTR(edge, "config:18" ); @@ -2692,8 +2545,8 @@ __init int intel_pmu_init(void) case 69: /* 22nm Haswell ULT */ case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */ x86_pmu.late_ack = true; - memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); + memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); intel_pmu_lbr_init_snb(); @@ -2712,28 +2565,6 @@ __init int intel_pmu_init(void) pr_cont("Haswell events, "); break; - case 61: /* 14nm Broadwell Core-M */ - x86_pmu.late_ack = true; - memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); - - intel_pmu_lbr_init_snb(); - - x86_pmu.event_constraints = intel_bdw_event_constraints; - x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; - x86_pmu.extra_regs = intel_snbep_extra_regs; - x86_pmu.pebs_aliases = intel_pebs_aliases_snb; - /* all extra regs are per-cpu when HT is on */ - x86_pmu.er_flags |= ERF_HAS_RSP_1; - x86_pmu.er_flags |= ERF_NO_HT_SHARING; - - x86_pmu.hw_config = hsw_hw_config; - x86_pmu.get_event_constraints = hsw_get_event_constraints; - x86_pmu.cpu_events = hsw_events_attrs; - x86_pmu.limit_period = bdw_limit_period; - pr_cont("Broadwell events, "); - break; - default: switch (x86_pmu.version) { case 1: diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c index adf138eac85..f9ed429d6e4 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c @@ -486,14 +486,17 @@ static struct attribute_group snbep_uncore_qpi_format_group = { .attrs = snbep_uncore_qpi_formats_attr, }; -#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ - .init_box = snbep_uncore_msr_init_box, \ +#define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ .disable_box = snbep_uncore_msr_disable_box, \ .enable_box = snbep_uncore_msr_enable_box, \ .disable_event = snbep_uncore_msr_disable_event, \ .enable_event = snbep_uncore_msr_enable_event, \ .read_counter = uncore_msr_read_counter +#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ + __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \ + .init_box = snbep_uncore_msr_init_box \ + static struct intel_uncore_ops snbep_uncore_msr_ops = { SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), }; @@ -1919,6 +1922,30 @@ static struct intel_uncore_type hswep_uncore_cbox = { .format_group = &hswep_uncore_cbox_format_group, }; +/* + * Write SBOX Initialization register bit by bit to avoid spurious #GPs + */ +static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box) +{ + unsigned msr = uncore_msr_box_ctl(box); + + if (msr) { + u64 init = SNBEP_PMON_BOX_CTL_INT; + u64 flags = 0; + int i; + + for_each_set_bit(i, (unsigned long *)&init, 64) { + flags |= (1ULL << i); + wrmsrl(msr, flags); + } + } +} + +static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = { + __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), + .init_box = hswep_uncore_sbox_msr_init_box +}; + static struct attribute *hswep_uncore_sbox_formats_attr[] = { &format_attr_event.attr, &format_attr_umask.attr, @@ -1944,7 +1971,7 @@ static struct intel_uncore_type hswep_uncore_sbox = { .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, .msr_offset = HSWEP_SBOX_MSR_OFFSET, - .ops = &snbep_uncore_msr_ops, + .ops = &hswep_uncore_sbox_msr_ops, .format_group = &hswep_uncore_sbox_format_group, }; @@ -2025,13 +2052,27 @@ static struct intel_uncore_type hswep_uncore_imc = { SNBEP_UNCORE_PCI_COMMON_INIT(), }; +static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8}; + +static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count); + pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1); + + return count; +} + static struct intel_uncore_ops hswep_uncore_irp_ops = { .init_box = snbep_uncore_pci_init_box, .disable_box = snbep_uncore_pci_disable_box, .enable_box = snbep_uncore_pci_enable_box, .disable_event = ivbep_uncore_irp_disable_event, .enable_event = ivbep_uncore_irp_enable_event, - .read_counter = ivbep_uncore_irp_read_counter, + .read_counter = hswep_uncore_irp_read_counter, }; static struct intel_uncore_type hswep_uncore_irp = { diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 1abcb50b48a..ff86f19b575 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -24,7 +24,6 @@ static char x86_stack_ids[][8] = { [ DEBUG_STACK-1 ] = "#DB", [ NMI_STACK-1 ] = "NMI", [ DOUBLEFAULT_STACK-1 ] = "#DF", - [ STACKFAULT_STACK-1 ] = "#SS", [ MCE_STACK-1 ] = "#MC", #if DEBUG_STKSZ > EXCEPTION_STKSZ [ N_EXCEPTION_STACKS ... diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index b553ed89e5f..344b63f18d1 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -447,15 +447,14 @@ sysenter_exit: sysenter_audit: testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) jnz syscall_trace_entry - addl $4,%esp - CFI_ADJUST_CFA_OFFSET -4 - movl %esi,4(%esp) /* 5th arg: 4th syscall arg */ - movl %edx,(%esp) /* 4th arg: 3rd syscall arg */ - /* %ecx already in %ecx 3rd arg: 2nd syscall arg */ - movl %ebx,%edx /* 2nd arg: 1st syscall arg */ - /* %eax already in %eax 1st arg: syscall number */ + /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */ + movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */ + /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */ + pushl_cfi PT_ESI(%esp) /* a3: 5th arg */ + pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */ call __audit_syscall_entry - pushl_cfi %ebx + popl_cfi %ecx /* get that remapped edx off the stack */ + popl_cfi %ecx /* get that remapped esi off the stack */ movl PT_EAX(%esp),%eax /* reload syscall number */ jmp sysenter_do_call diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index df088bb03fb..c0226ab5410 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -828,9 +828,15 @@ ENTRY(native_iret) jnz native_irq_return_ldt #endif +.global native_irq_return_iret native_irq_return_iret: + /* + * This may fault. Non-paranoid faults on return to userspace are + * handled by fixup_bad_iret. These include #SS, #GP, and #NP. + * Double-faults due to espfix64 are handled in do_double_fault. + * Other faults here are fatal. + */ iretq - _ASM_EXTABLE(native_irq_return_iret, bad_iret) #ifdef CONFIG_X86_ESPFIX64 native_irq_return_ldt: @@ -858,25 +864,6 @@ native_irq_return_ldt: jmp native_irq_return_iret #endif - .section .fixup,"ax" -bad_iret: - /* - * The iret traps when the %cs or %ss being restored is bogus. - * We've lost the original trap vector and error code. - * #GPF is the most likely one to get for an invalid selector. - * So pretend we completed the iret and took the #GPF in user mode. - * - * We are now running with the kernel GS after exception recovery. - * But error_entry expects us to have user GS to match the user %cs, - * so swap back. - */ - pushq $0 - - SWAPGS - jmp general_protection - - .previous - /* edi: workmask, edx: work */ retint_careful: CFI_RESTORE_STATE @@ -922,37 +909,6 @@ ENTRY(retint_kernel) CFI_ENDPROC END(common_interrupt) - /* - * If IRET takes a fault on the espfix stack, then we - * end up promoting it to a doublefault. In that case, - * modify the stack to make it look like we just entered - * the #GP handler from user space, similar to bad_iret. - */ -#ifdef CONFIG_X86_ESPFIX64 - ALIGN -__do_double_fault: - XCPT_FRAME 1 RDI+8 - movq RSP(%rdi),%rax /* Trap on the espfix stack? */ - sarq $PGDIR_SHIFT,%rax - cmpl $ESPFIX_PGD_ENTRY,%eax - jne do_double_fault /* No, just deliver the fault */ - cmpl $__KERNEL_CS,CS(%rdi) - jne do_double_fault - movq RIP(%rdi),%rax - cmpq $native_irq_return_iret,%rax - jne do_double_fault /* This shouldn't happen... */ - movq PER_CPU_VAR(kernel_stack),%rax - subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */ - movq %rax,RSP(%rdi) - movq $0,(%rax) /* Missing (lost) #GP error code */ - movq $general_protection,RIP(%rdi) - retq - CFI_ENDPROC -END(__do_double_fault) -#else -# define __do_double_fault do_double_fault -#endif - /* * APIC interrupts. */ @@ -1124,7 +1080,7 @@ idtentry overflow do_overflow has_error_code=0 idtentry bounds do_bounds has_error_code=0 idtentry invalid_op do_invalid_op has_error_code=0 idtentry device_not_available do_device_not_available has_error_code=0 -idtentry double_fault __do_double_fault has_error_code=1 paranoid=1 +idtentry double_fault do_double_fault has_error_code=1 paranoid=1 idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 idtentry invalid_TSS do_invalid_TSS has_error_code=1 idtentry segment_not_present do_segment_not_present has_error_code=1 @@ -1289,7 +1245,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK -idtentry stack_segment do_stack_segment has_error_code=1 paranoid=1 +idtentry stack_segment do_stack_segment has_error_code=1 #ifdef CONFIG_XEN idtentry xen_debug do_debug has_error_code=0 idtentry xen_int3 do_int3 has_error_code=0 @@ -1399,17 +1355,16 @@ error_sti: /* * There are two places in the kernel that can potentially fault with - * usergs. Handle them here. The exception handlers after iret run with - * kernel gs again, so don't set the user space flag. B stepping K8s - * sometimes report an truncated RIP for IRET exceptions returning to - * compat mode. Check for these here too. + * usergs. Handle them here. B stepping K8s sometimes report a + * truncated RIP for IRET exceptions returning to compat mode. Check + * for these here too. */ error_kernelspace: CFI_REL_OFFSET rcx, RCX+8 incl %ebx leaq native_irq_return_iret(%rip),%rcx cmpq %rcx,RIP+8(%rsp) - je error_swapgs + je error_bad_iret movl %ecx,%eax /* zero extend */ cmpq %rax,RIP+8(%rsp) je bstep_iret @@ -1420,7 +1375,15 @@ error_kernelspace: bstep_iret: /* Fix truncated RIP */ movq %rcx,RIP+8(%rsp) - jmp error_swapgs + /* fall through */ + +error_bad_iret: + SWAPGS + mov %rsp,%rdi + call fixup_bad_iret + mov %rax,%rsp + decl %ebx /* Return to usergs */ + jmp error_sti CFI_ENDPROC END(error_entry) diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 8af817105e2..e7cc5370cd2 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -111,8 +111,7 @@ static void make_8259A_irq(unsigned int irq) { disable_irq_nosync(irq); io_apic_irqs &= ~(1<<irq); - irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, - i8259A_chip.name); + irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq); enable_irq(irq); } diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 44f1ed42fdf..4de73ee7836 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -70,7 +70,6 @@ int vector_used_by_percpu_irq(unsigned int vector) void __init init_ISA_irqs(void) { struct irq_chip *chip = legacy_pic->chip; - const char *name = chip->name; int i; #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) @@ -79,7 +78,7 @@ void __init init_ISA_irqs(void) legacy_pic->init(0); for (i = 0; i < nr_legacy_irqs(); i++) - irq_set_chip_and_handler_name(i, chip, handle_level_irq, name); + irq_set_chip_and_handler(i, chip, handle_level_irq); } void __init init_IRQ(void) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 749b0e42341..e510618b2e9 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -1484,7 +1484,7 @@ unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch) */ if (work & _TIF_NOHZ) { user_exit(); - work &= ~TIF_NOHZ; + work &= ~_TIF_NOHZ; } #ifdef CONFIG_SECCOMP diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 235cfd39e0d..ab08aa2276f 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1128,7 +1128,6 @@ void __init setup_arch(char **cmdline_p) setup_real_mode(); memblock_set_current_limit(get_max_mapped()); - dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); /* * NOTE: On x86-32, only from this point on, fixmaps are ready for use. @@ -1159,6 +1158,7 @@ void __init setup_arch(char **cmdline_p) early_acpi_boot_init(); initmem_init(); + dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); /* * Reserve memory for crash kernel after SRAT is parsed so that it diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2d5200e5635..668d8f2a878 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -102,8 +102,6 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); -static DEFINE_PER_CPU(struct completion, die_complete); - atomic_t init_deasserted; /* @@ -1305,10 +1303,14 @@ static void __ref remove_cpu_from_maps(int cpu) numa_remove_cpu(cpu); } +static DEFINE_PER_CPU(struct completion, die_complete); + void cpu_disable_common(void) { int cpu = smp_processor_id(); + init_completion(&per_cpu(die_complete, smp_processor_id())); + remove_siblinginfo(cpu); /* It's now safe to remove this processor from the online map */ @@ -1327,16 +1329,21 @@ int native_cpu_disable(void) return ret; clear_local_APIC(); - init_completion(&per_cpu(die_complete, smp_processor_id())); cpu_disable_common(); return 0; } +void cpu_die_common(unsigned int cpu) +{ + wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ); +} + void native_cpu_die(unsigned int cpu) { /* We don't do anything here: idle task is faking death itself. */ - wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ); + + cpu_die_common(cpu); /* They ack this in play_dead() by setting CPU_DEAD */ if (per_cpu(cpu_state, cpu) == CPU_DEAD) { diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 0d0e922fafc..de801f22128 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -233,32 +233,40 @@ DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun) DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) -#ifdef CONFIG_X86_32 DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) -#endif DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) #ifdef CONFIG_X86_64 /* Runs on IST stack */ -dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) -{ - enum ctx_state prev_state; - - prev_state = exception_enter(); - if (notify_die(DIE_TRAP, "stack segment", regs, error_code, - X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { - preempt_conditional_sti(regs); - do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); - preempt_conditional_cli(regs); - } - exception_exit(prev_state); -} - dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) { static const char str[] = "double fault"; struct task_struct *tsk = current; +#ifdef CONFIG_X86_ESPFIX64 + extern unsigned char native_irq_return_iret[]; + + /* + * If IRET takes a non-IST fault on the espfix64 stack, then we + * end up promoting it to a doublefault. In that case, modify + * the stack to make it look like we just entered the #GP + * handler from user space, similar to bad_iret. + */ + if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && + regs->cs == __KERNEL_CS && + regs->ip == (unsigned long)native_irq_return_iret) + { + struct pt_regs *normal_regs = task_pt_regs(current); + + /* Fake a #GP(0) from userspace. */ + memmove(&normal_regs->ip, (void *)regs->sp, 5*8); + normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ + regs->ip = (unsigned long)general_protection; + regs->sp = (unsigned long)&normal_regs->orig_ax; + return; + } +#endif + exception_enter(); /* Return not checked because double check cannot be ignored */ notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); @@ -399,6 +407,35 @@ asmlinkage __visible struct pt_regs *sync_regs(struct pt_regs *eregs) return regs; } NOKPROBE_SYMBOL(sync_regs); + +struct bad_iret_stack { + void *error_entry_ret; + struct pt_regs regs; +}; + +asmlinkage __visible +struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) +{ + /* + * This is called from entry_64.S early in handling a fault + * caused by a bad iret to user mode. To handle the fault + * correctly, we want move our stack frame to task_pt_regs + * and we want to pretend that the exception came from the + * iret target. + */ + struct bad_iret_stack *new_stack = + container_of(task_pt_regs(current), + struct bad_iret_stack, regs); + + /* Copy the IRET target to the new stack. */ + memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); + + /* Copy the remainder of the stack from the current stack. */ + memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip)); + + BUG_ON(!user_mode_vm(&new_stack->regs)); + return new_stack; +} #endif /* @@ -778,7 +815,7 @@ void __init trap_init(void) set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun); set_intr_gate(X86_TRAP_TS, invalid_TSS); set_intr_gate(X86_TRAP_NP, segment_not_present); - set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); + set_intr_gate(X86_TRAP_SS, stack_segment); set_intr_gate(X86_TRAP_GP, general_protection); set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug); set_intr_gate(X86_TRAP_MF, coprocessor_error); diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index b6025f9e36c..b7e50bba3bb 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1166,14 +1166,17 @@ void __init tsc_init(void) x86_init.timers.tsc_pre_init(); - if (!cpu_has_tsc) + if (!cpu_has_tsc) { + setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); return; + } tsc_khz = x86_platform.calibrate_tsc(); cpu_khz = tsc_khz; if (!tsc_khz) { mark_tsc_unstable("could not calculate TSC khz"); + setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); return; } diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index a46207a0583..9f8a2faf504 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -504,11 +504,6 @@ static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); } -static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) -{ - register_address_increment(ctxt, &ctxt->_eip, rel); -} - static u32 desc_limit_scaled(struct desc_struct *desc) { u32 limit = get_desc_limit(desc); @@ -569,6 +564,40 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt) return emulate_exception(ctxt, NM_VECTOR, 0, false); } +static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, + int cs_l) +{ + switch (ctxt->op_bytes) { + case 2: + ctxt->_eip = (u16)dst; + break; + case 4: + ctxt->_eip = (u32)dst; + break; +#ifdef CONFIG_X86_64 + case 8: + if ((cs_l && is_noncanonical_address(dst)) || + (!cs_l && (dst >> 32) != 0)) + return emulate_gp(ctxt, 0); + ctxt->_eip = dst; + break; +#endif + default: + WARN(1, "unsupported eip assignment size\n"); + } + return X86EMUL_CONTINUE; +} + +static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) +{ + return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64); +} + +static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) +{ + return assign_eip_near(ctxt, ctxt->_eip + rel); +} + static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) { u16 selector; @@ -614,7 +643,8 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size) static int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, - unsigned size, bool write, bool fetch, + unsigned *max_size, unsigned size, + bool write, bool fetch, ulong *linear) { struct desc_struct desc; @@ -625,10 +655,15 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, unsigned cpl; la = seg_base(ctxt, addr.seg) + addr.ea; + *max_size = 0; switch (ctxt->mode) { case X86EMUL_MODE_PROT64: if (((signed long)la << 16) >> 16 != la) return emulate_gp(ctxt, 0); + + *max_size = min_t(u64, ~0u, (1ull << 48) - la); + if (size > *max_size) + goto bad; break; default: usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, @@ -646,20 +681,25 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && (ctxt->d & NoBigReal)) { /* la is between zero and 0xffff */ - if (la > 0xffff || (u32)(la + size - 1) > 0xffff) + if (la > 0xffff) goto bad; + *max_size = 0x10000 - la; } else if ((desc.type & 8) || !(desc.type & 4)) { /* expand-up segment */ - if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) + if (addr.ea > lim) goto bad; + *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea); } else { /* expand-down segment */ - if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) + if (addr.ea <= lim) goto bad; lim = desc.d ? 0xffffffff : 0xffff; - if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) + if (addr.ea > lim) goto bad; + *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea); } + if (size > *max_size) + goto bad; cpl = ctxt->ops->cpl(ctxt); if (!(desc.type & 8)) { /* data segment */ @@ -684,9 +724,9 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, return X86EMUL_CONTINUE; bad: if (addr.seg == VCPU_SREG_SS) - return emulate_ss(ctxt, sel); + return emulate_ss(ctxt, 0); else - return emulate_gp(ctxt, sel); + return emulate_gp(ctxt, 0); } static int linearize(struct x86_emulate_ctxt *ctxt, @@ -694,7 +734,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt, unsigned size, bool write, ulong *linear) { - return __linearize(ctxt, addr, size, write, false, linear); + unsigned max_size; + return __linearize(ctxt, addr, &max_size, size, write, false, linear); } @@ -719,17 +760,27 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) { int rc; - unsigned size; + unsigned size, max_size; unsigned long linear; int cur_size = ctxt->fetch.end - ctxt->fetch.data; struct segmented_address addr = { .seg = VCPU_SREG_CS, .ea = ctxt->eip + cur_size }; - size = 15UL ^ cur_size; - rc = __linearize(ctxt, addr, size, false, true, &linear); + /* + * We do not know exactly how many bytes will be needed, and + * __linearize is expensive, so fetch as much as possible. We + * just have to avoid going beyond the 15 byte limit, the end + * of the segment, or the end of the page. + * + * __linearize is called with size 0 so that it does not do any + * boundary check itself. Instead, we use max_size to check + * against op_size. + */ + rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; + size = min_t(unsigned, 15UL ^ cur_size, max_size); size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); /* @@ -739,7 +790,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) * still, we must have hit the 15-byte boundary. */ if (unlikely(size < op_size)) - return X86EMUL_UNHANDLEABLE; + return emulate_gp(ctxt, 0); + rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, size, &ctxt->exception); if (unlikely(rc != X86EMUL_CONTINUE)) @@ -751,8 +803,10 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, unsigned size) { - if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size)) - return __do_insn_fetch_bytes(ctxt, size); + unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr; + + if (unlikely(done_size < size)) + return __do_insn_fetch_bytes(ctxt, size - done_size); else return X86EMUL_CONTINUE; } @@ -1416,7 +1470,9 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, /* Does not support long mode */ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, - u16 selector, int seg, u8 cpl, bool in_task_switch) + u16 selector, int seg, u8 cpl, + bool in_task_switch, + struct desc_struct *desc) { struct desc_struct seg_desc, old_desc; u8 dpl, rpl; @@ -1557,6 +1613,8 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, } load: ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); + if (desc) + *desc = seg_desc; return X86EMUL_CONTINUE; exception: return emulate_exception(ctxt, err_vec, err_code, true); @@ -1566,7 +1624,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg) { u8 cpl = ctxt->ops->cpl(ctxt); - return __load_segment_descriptor(ctxt, selector, seg, cpl, false); + return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL); } static void write_register_operand(struct operand *op) @@ -1960,17 +2018,31 @@ static int em_iret(struct x86_emulate_ctxt *ctxt) static int em_jmp_far(struct x86_emulate_ctxt *ctxt) { int rc; - unsigned short sel; + unsigned short sel, old_sel; + struct desc_struct old_desc, new_desc; + const struct x86_emulate_ops *ops = ctxt->ops; + u8 cpl = ctxt->ops->cpl(ctxt); + + /* Assignment of RIP may only fail in 64-bit mode */ + if (ctxt->mode == X86EMUL_MODE_PROT64) + ops->get_segment(ctxt, &old_sel, &old_desc, NULL, + VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); - rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS); + rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false, + &new_desc); if (rc != X86EMUL_CONTINUE) return rc; - ctxt->_eip = 0; - memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); - return X86EMUL_CONTINUE; + rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); + if (rc != X86EMUL_CONTINUE) { + WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); + /* assigning eip failed; restore the old cs */ + ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); + return rc; + } + return rc; } static int em_grp45(struct x86_emulate_ctxt *ctxt) @@ -1981,13 +2053,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt) case 2: /* call near abs */ { long int old_eip; old_eip = ctxt->_eip; - ctxt->_eip = ctxt->src.val; + rc = assign_eip_near(ctxt, ctxt->src.val); + if (rc != X86EMUL_CONTINUE) + break; ctxt->src.val = old_eip; rc = em_push(ctxt); break; } case 4: /* jmp abs */ - ctxt->_eip = ctxt->src.val; + rc = assign_eip_near(ctxt, ctxt->src.val); break; case 5: /* jmp far */ rc = em_jmp_far(ctxt); @@ -2022,30 +2096,47 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) static int em_ret(struct x86_emulate_ctxt *ctxt) { - ctxt->dst.type = OP_REG; - ctxt->dst.addr.reg = &ctxt->_eip; - ctxt->dst.bytes = ctxt->op_bytes; - return em_pop(ctxt); + int rc; + unsigned long eip; + + rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); + if (rc != X86EMUL_CONTINUE) + return rc; + + return assign_eip_near(ctxt, eip); } static int em_ret_far(struct x86_emulate_ctxt *ctxt) { int rc; - unsigned long cs; + unsigned long eip, cs; + u16 old_cs; int cpl = ctxt->ops->cpl(ctxt); + struct desc_struct old_desc, new_desc; + const struct x86_emulate_ops *ops = ctxt->ops; - rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes); + if (ctxt->mode == X86EMUL_MODE_PROT64) + ops->get_segment(ctxt, &old_cs, &old_desc, NULL, + VCPU_SREG_CS); + + rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; - if (ctxt->op_bytes == 4) - ctxt->_eip = (u32)ctxt->_eip; rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; /* Outer-privilege level return is not implemented */ if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) return X86EMUL_UNHANDLEABLE; - rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); + rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false, + &new_desc); + if (rc != X86EMUL_CONTINUE) + return rc; + rc = assign_eip_far(ctxt, eip, new_desc.l); + if (rc != X86EMUL_CONTINUE) { + WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); + ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); + } return rc; } @@ -2306,7 +2397,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; - u64 msr_data; + u64 msr_data, rcx, rdx; int usermode; u16 cs_sel = 0, ss_sel = 0; @@ -2322,6 +2413,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) else usermode = X86EMUL_MODE_PROT32; + rcx = reg_read(ctxt, VCPU_REGS_RCX); + rdx = reg_read(ctxt, VCPU_REGS_RDX); + cs.dpl = 3; ss.dpl = 3; ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); @@ -2339,6 +2433,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) ss_sel = cs_sel + 8; cs.d = 0; cs.l = 1; + if (is_noncanonical_address(rcx) || + is_noncanonical_address(rdx)) + return emulate_gp(ctxt, 0); break; } cs_sel |= SELECTOR_RPL_MASK; @@ -2347,8 +2444,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); - ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX); - *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX); + ctxt->_eip = rdx; + *reg_write(ctxt, VCPU_REGS_RSP) = rcx; return X86EMUL_CONTINUE; } @@ -2466,19 +2563,24 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, * Now load segment descriptors. If fault happens at this stage * it is handled in a context of new task */ - ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true); + ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, + true, NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true); + ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, + true, NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true); + ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, + true, NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true); + ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, + true, NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true); + ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, + true, NULL); if (ret != X86EMUL_CONTINUE) return ret; @@ -2603,25 +2705,32 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, * Now load segment descriptors. If fault happenes at this stage * it is handled in a context of new task */ - ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true); + ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, + cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true); + ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, + true, NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true); + ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, + true, NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true); + ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, + true, NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true); + ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, + true, NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true); + ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, + true, NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true); + ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, + true, NULL); if (ret != X86EMUL_CONTINUE) return ret; @@ -2888,10 +2997,13 @@ static int em_aad(struct x86_emulate_ctxt *ctxt) static int em_call(struct x86_emulate_ctxt *ctxt) { + int rc; long rel = ctxt->src.val; ctxt->src.val = (unsigned long)ctxt->_eip; - jmp_rel(ctxt, rel); + rc = jmp_rel(ctxt, rel); + if (rc != X86EMUL_CONTINUE) + return rc; return em_push(ctxt); } @@ -2900,34 +3012,50 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt) u16 sel, old_cs; ulong old_eip; int rc; + struct desc_struct old_desc, new_desc; + const struct x86_emulate_ops *ops = ctxt->ops; + int cpl = ctxt->ops->cpl(ctxt); - old_cs = get_segment_selector(ctxt, VCPU_SREG_CS); old_eip = ctxt->_eip; + ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); - if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS)) + rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false, + &new_desc); + if (rc != X86EMUL_CONTINUE) return X86EMUL_CONTINUE; - ctxt->_eip = 0; - memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); + rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); + if (rc != X86EMUL_CONTINUE) + goto fail; ctxt->src.val = old_cs; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) - return rc; + goto fail; ctxt->src.val = old_eip; - return em_push(ctxt); + rc = em_push(ctxt); + /* If we failed, we tainted the memory, but the very least we should + restore cs */ + if (rc != X86EMUL_CONTINUE) + goto fail; + return rc; +fail: + ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); + return rc; + } static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) { int rc; + unsigned long eip; - ctxt->dst.type = OP_REG; - ctxt->dst.addr.reg = &ctxt->_eip; - ctxt->dst.bytes = ctxt->op_bytes; - rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); + rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); + if (rc != X86EMUL_CONTINUE) + return rc; + rc = assign_eip_near(ctxt, eip); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, ctxt->src.val); @@ -3254,20 +3382,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt) static int em_loop(struct x86_emulate_ctxt *ctxt) { + int rc = X86EMUL_CONTINUE; + register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1); if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) - jmp_rel(ctxt, ctxt->src.val); + rc = jmp_rel(ctxt, ctxt->src.val); - return X86EMUL_CONTINUE; + return rc; } static int em_jcxz(struct x86_emulate_ctxt *ctxt) { + int rc = X86EMUL_CONTINUE; + if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) - jmp_rel(ctxt, ctxt->src.val); + rc = jmp_rel(ctxt, ctxt->src.val); - return X86EMUL_CONTINUE; + return rc; } static int em_in(struct x86_emulate_ctxt *ctxt) @@ -3355,6 +3487,12 @@ static int em_bswap(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } +static int em_clflush(struct x86_emulate_ctxt *ctxt) +{ + /* emulating clflush regardless of cpuid */ + return X86EMUL_CONTINUE; +} + static bool valid_cr(int nr) { switch (nr) { @@ -3693,6 +3831,16 @@ static const struct opcode group11[] = { X7(D(Undefined)), }; +static const struct gprefix pfx_0f_ae_7 = { + I(SrcMem | ByteOp, em_clflush), N, N, N, +}; + +static const struct group_dual group15 = { { + N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7), +}, { + N, N, N, N, N, N, N, N, +} }; + static const struct gprefix pfx_0f_6f_0f_7f = { I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), }; @@ -3901,10 +4049,11 @@ static const struct opcode twobyte_table[256] = { N, I(ImplicitOps | EmulateOnUD, em_syscall), II(ImplicitOps | Priv, em_clts, clts), N, DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, - N, D(ImplicitOps | ModRM), N, N, + N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N, /* 0x10 - 0x1F */ N, N, N, N, N, N, N, N, - D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM), + D(ImplicitOps | ModRM | SrcMem | NoAccess), + N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 0x20 - 0x2F */ DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read), DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), @@ -3956,7 +4105,7 @@ static const struct opcode twobyte_table[256] = { F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), - D(ModRM), F(DstReg | SrcMem | ModRM, em_imul), + GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul), /* 0xB0 - 0xB7 */ I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg), I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), @@ -4138,6 +4287,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, fetch_register_operand(op); break; case OpCL: + op->type = OP_IMM; op->bytes = 1; op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; break; @@ -4145,6 +4295,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, rc = decode_imm(ctxt, op, 1, true); break; case OpOne: + op->type = OP_IMM; op->bytes = 1; op->val = 1; break; @@ -4203,21 +4354,27 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, ctxt->memop.bytes = ctxt->op_bytes + 2; goto mem_common; case OpES: + op->type = OP_IMM; op->val = VCPU_SREG_ES; break; case OpCS: + op->type = OP_IMM; op->val = VCPU_SREG_CS; break; case OpSS: + op->type = OP_IMM; op->val = VCPU_SREG_SS; break; case OpDS: + op->type = OP_IMM; op->val = VCPU_SREG_DS; break; case OpFS: + op->type = OP_IMM; op->val = VCPU_SREG_FS; break; case OpGS: + op->type = OP_IMM; op->val = VCPU_SREG_GS; break; case OpImplicit: @@ -4473,10 +4630,10 @@ done_prefixes: /* Decode and fetch the destination operand: register or memory. */ rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); -done: if (ctxt->rip_relative) ctxt->memopp->addr.mem.ea += ctxt->_eip; +done: return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; } @@ -4726,7 +4883,7 @@ special_insn: break; case 0x70 ... 0x7f: /* jcc (short) */ if (test_cc(ctxt->b, ctxt->eflags)) - jmp_rel(ctxt, ctxt->src.val); + rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x8d: /* lea r16/r32, m */ ctxt->dst.val = ctxt->src.addr.mem.ea; @@ -4756,7 +4913,7 @@ special_insn: break; case 0xe9: /* jmp rel */ case 0xeb: /* jmp rel short */ - jmp_rel(ctxt, ctxt->src.val); + rc = jmp_rel(ctxt, ctxt->src.val); ctxt->dst.type = OP_NONE; /* Disable writeback. */ break; case 0xf4: /* hlt */ @@ -4881,13 +5038,11 @@ twobyte_insn: break; case 0x80 ... 0x8f: /* jnz rel, etc*/ if (test_cc(ctxt->b, ctxt->eflags)) - jmp_rel(ctxt, ctxt->src.val); + rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x90 ... 0x9f: /* setcc r/m8 */ ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); break; - case 0xae: /* clflush */ - break; case 0xb6 ... 0xb7: /* movzx */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 518d86471b7..298781d4cfb 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -262,8 +262,10 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) return; timer = &pit->pit_state.timer; + mutex_lock(&pit->pit_state.lock); if (hrtimer_cancel(timer)) hrtimer_start_expires(timer, HRTIMER_MODE_ABS); + mutex_unlock(&pit->pit_state.lock); } static void destroy_pit_timer(struct kvm_pit *pit) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ac1c4de3a48..978f402006e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep) * kvm mmu, before reclaiming the page, we should * unmap it from mmu first. */ - WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn))); + WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn))); if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) kvm_set_pfn_accessed(pfn); @@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, spte |= PT_PAGE_SIZE_MASK; if (tdp_enabled) spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, - kvm_is_mmio_pfn(pfn)); + kvm_is_reserved_pfn(pfn)); if (host_writable) spte |= SPTE_HOST_WRITEABLE; @@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, * PT_PAGE_TABLE_LEVEL and there would be no adjustment done * here. */ - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) && + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL && PageTransCompound(pfn_to_page(pfn)) && !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 806d58e3c32..fd49c867b25 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -298,7 +298,7 @@ retry_walk: } #endif walker->max_level = walker->level; - ASSERT(!is_long_mode(vcpu) && is_pae(vcpu)); + ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu))); accessed_dirty = PT_GUEST_ACCESSED_MASK; pt_access = pte_access = ACC_ALL; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 65510f624df..7527cefc5a4 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3251,7 +3251,7 @@ static int wrmsr_interception(struct vcpu_svm *svm) msr.host_initiated = false; svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; - if (svm_set_msr(&svm->vcpu, &msr)) { + if (kvm_set_msr(&svm->vcpu, &msr)) { trace_kvm_msr_write_ex(ecx, data); kvm_inject_gp(&svm->vcpu, 0); } else { @@ -3551,9 +3551,9 @@ static int handle_exit(struct kvm_vcpu *vcpu) if (exit_code >= ARRAY_SIZE(svm_exit_handlers) || !svm_exit_handlers[exit_code]) { - kvm_run->exit_reason = KVM_EXIT_UNKNOWN; - kvm_run->hw.hardware_exit_reason = exit_code; - return 0; + WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code); + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; } return svm_exit_handlers[exit_code](svm); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0acac81f198..3e556c68351 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2659,12 +2659,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) default: msr = find_msr_entry(vmx, msr_index); if (msr) { + u64 old_msr_data = msr->data; msr->data = data; if (msr - vmx->guest_msrs < vmx->save_nmsrs) { preempt_disable(); - kvm_set_shared_msr(msr->index, msr->data, - msr->mask); + ret = kvm_set_shared_msr(msr->index, msr->data, + msr->mask); preempt_enable(); + if (ret) + msr->data = old_msr_data; } break; } @@ -4576,7 +4579,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu) vmcs_write32(TPR_THRESHOLD, 0); } - kvm_vcpu_reload_apic_access_page(vcpu); + kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); if (vmx_vm_has_apicv(vcpu->kvm)) memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); @@ -5291,7 +5294,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu) msr.data = data; msr.index = ecx; msr.host_initiated = false; - if (vmx_set_msr(vcpu, &msr) != 0) { + if (kvm_set_msr(vcpu, &msr) != 0) { trace_kvm_msr_write_ex(ecx, data); kvm_inject_gp(vcpu, 0); return 1; @@ -6423,6 +6426,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) const unsigned long *fields = shadow_read_write_fields; const int num_fields = max_shadow_read_write_fields; + preempt_disable(); + vmcs_load(shadow_vmcs); for (i = 0; i < num_fields; i++) { @@ -6446,6 +6451,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) vmcs_clear(shadow_vmcs); vmcs_load(vmx->loaded_vmcs->vmcs); + + preempt_enable(); } static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) @@ -6743,6 +6750,12 @@ static int handle_invept(struct kvm_vcpu *vcpu) return 1; } +static int handle_invvpid(struct kvm_vcpu *vcpu) +{ + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; +} + /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs @@ -6788,6 +6801,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, [EXIT_REASON_INVEPT] = handle_invept, + [EXIT_REASON_INVVPID] = handle_invvpid, }; static const int kvm_vmx_max_exit_handlers = @@ -7023,7 +7037,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE: case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: - case EXIT_REASON_INVEPT: + case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: /* * VMX instructions trap unconditionally. This allows L1 to * emulate them for its L2 guest, i.e., allows 3-level nesting! @@ -7164,10 +7178,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) && kvm_vmx_exit_handlers[exit_reason]) return kvm_vmx_exit_handlers[exit_reason](vcpu); else { - vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; - vcpu->run->hw.hardware_exit_reason = exit_reason; + WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason); + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; } - return 0; } static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 34c8f94331f..0033df32a74 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -229,20 +229,25 @@ static void kvm_shared_msr_cpu_online(void) shared_msr_update(i, shared_msrs_global.msrs[i]); } -void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) +int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) { unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); + int err; if (((value ^ smsr->values[slot].curr) & mask) == 0) - return; + return 0; smsr->values[slot].curr = value; - wrmsrl(shared_msrs_global.msrs[slot], value); + err = wrmsrl_safe(shared_msrs_global.msrs[slot], value); + if (err) + return 1; + if (!smsr->registered) { smsr->urn.on_user_return = kvm_on_user_return; user_return_notifier_register(&smsr->urn); smsr->registered = true; } + return 0; } EXPORT_SYMBOL_GPL(kvm_set_shared_msr); @@ -987,7 +992,6 @@ void kvm_enable_efer_bits(u64 mask) } EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); - /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. @@ -995,8 +999,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); */ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { + switch (msr->index) { + case MSR_FS_BASE: + case MSR_GS_BASE: + case MSR_KERNEL_GS_BASE: + case MSR_CSTAR: + case MSR_LSTAR: + if (is_noncanonical_address(msr->data)) + return 1; + break; + case MSR_IA32_SYSENTER_EIP: + case MSR_IA32_SYSENTER_ESP: + /* + * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if + * non-canonical address is written on Intel but not on + * AMD (which ignores the top 32-bits, because it does + * not implement 64-bit SYSENTER). + * + * 64-bit code should hence be able to write a non-canonical + * value on AMD. Making the address canonical ensures that + * vmentry does not fail on Intel after writing a non-canonical + * value, and that something deterministic happens if the guest + * invokes 64-bit SYSENTER. + */ + msr->data = get_canonical(msr->data); + } return kvm_x86_ops->set_msr(vcpu, msr); } +EXPORT_SYMBOL_GPL(kvm_set_msr); /* * Adapt set_msr() to msr_io()'s calling convention diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c index 7609e0e421e..1318f75d56e 100644 --- a/arch/x86/lib/csum-wrappers_64.c +++ b/arch/x86/lib/csum-wrappers_64.c @@ -41,9 +41,8 @@ csum_partial_copy_from_user(const void __user *src, void *dst, while (((unsigned long)src & 6) && len >= 2) { __u16 val16; - *errp = __get_user(val16, (const __u16 __user *)src); - if (*errp) - return isum; + if (__get_user(val16, (const __u16 __user *)src)) + goto out_err; *(__u16 *)dst = val16; isum = (__force __wsum)add32_with_carry( diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 4cb8763868f..4e5dfec750f 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1123,7 +1123,7 @@ void mark_rodata_ro(void) unsigned long end = (unsigned long) &__end_rodata_hpage_align; unsigned long text_end = PFN_ALIGN(&__stop___ex_table); unsigned long rodata_end = PFN_ALIGN(&__end_rodata); - unsigned long all_end = PFN_ALIGN(&_end); + unsigned long all_end; printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", (end - start) >> 10); @@ -1134,7 +1134,16 @@ void mark_rodata_ro(void) /* * The rodata/data/bss/brk section (but not the kernel text!) * should also be not-executable. + * + * We align all_end to PMD_SIZE because the existing mapping + * is a full PMD. If we would align _brk_end to PAGE_SIZE we + * split the PMD and the reminder between _brk_end and the end + * of the PMD will remain mapped executable. + * + * Any PMD which was setup after the one which covers _brk_end + * has been zapped already via cleanup_highmem(). */ + all_end = roundup((unsigned long)_brk_end, PMD_SIZE); set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); rodata_test(); diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index ae242a7c11c..36de293caf2 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -409,7 +409,7 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr) psize = page_level_size(level); pmask = page_level_mask(level); offset = virt_addr & ~pmask; - phys_addr = pte_pfn(*pte) << PAGE_SHIFT; + phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; return (phys_addr | offset); } EXPORT_SYMBOL_GPL(slow_virt_to_phys); diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c index f15103dff4b..d143d216d52 100644 --- a/arch/x86/platform/efi/efi-bgrt.c +++ b/arch/x86/platform/efi/efi-bgrt.c @@ -40,20 +40,40 @@ void __init efi_bgrt_init(void) if (ACPI_FAILURE(status)) return; - if (bgrt_tab->header.length < sizeof(*bgrt_tab)) + if (bgrt_tab->header.length < sizeof(*bgrt_tab)) { + pr_err("Ignoring BGRT: invalid length %u (expected %zu)\n", + bgrt_tab->header.length, sizeof(*bgrt_tab)); return; - if (bgrt_tab->version != 1 || bgrt_tab->status != 1) + } + if (bgrt_tab->version != 1) { + pr_err("Ignoring BGRT: invalid version %u (expected 1)\n", + bgrt_tab->version); + return; + } + if (bgrt_tab->status != 1) { + pr_err("Ignoring BGRT: invalid status %u (expected 1)\n", + bgrt_tab->status); + return; + } + if (bgrt_tab->image_type != 0) { + pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n", + bgrt_tab->image_type); return; - if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address) + } + if (!bgrt_tab->image_address) { + pr_err("Ignoring BGRT: null image address\n"); return; + } image = efi_lookup_mapped_addr(bgrt_tab->image_address); if (!image) { image = early_memremap(bgrt_tab->image_address, sizeof(bmp_header)); ioremapped = true; - if (!image) + if (!image) { + pr_err("Ignoring BGRT: failed to map image header memory\n"); return; + } } memcpy_fromio(&bmp_header, image, sizeof(bmp_header)); @@ -61,14 +81,18 @@ void __init efi_bgrt_init(void) early_iounmap(image, sizeof(bmp_header)); bgrt_image_size = bmp_header.size; - bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL); - if (!bgrt_image) + bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN); + if (!bgrt_image) { + pr_err("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n", + bgrt_image_size); return; + } if (ioremapped) { image = early_memremap(bgrt_tab->image_address, bmp_header.size); if (!image) { + pr_err("Ignoring BGRT: failed to map image memory\n"); kfree(bgrt_image); bgrt_image = NULL; return; diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 850da94fef3..dbc8627a5cd 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -70,17 +70,7 @@ static efi_config_table_type_t arch_tables[] __initdata = { u64 efi_setup; /* efi setup_data physical address */ -static bool disable_runtime __initdata = false; -static int __init setup_noefi(char *arg) -{ - disable_runtime = true; - return 0; -} -early_param("noefi", setup_noefi); - -int add_efi_memmap; -EXPORT_SYMBOL(add_efi_memmap); - +static int add_efi_memmap __initdata; static int __init setup_add_efi_memmap(char *arg) { add_efi_memmap = 1; @@ -96,7 +86,7 @@ static efi_status_t __init phys_efi_set_virtual_address_map( { efi_status_t status; - efi_call_phys_prelog(); + efi_call_phys_prolog(); status = efi_call_phys(efi_phys.set_virtual_address_map, memory_map_size, descriptor_size, descriptor_version, virtual_map); @@ -210,9 +200,12 @@ static void __init print_efi_memmap(void) for (p = memmap.map, i = 0; p < memmap.map_end; p += memmap.desc_size, i++) { + char buf[64]; + md = p; - pr_info("mem%02u: type=%u, attr=0x%llx, range=[0x%016llx-0x%016llx) (%lluMB)\n", - i, md->type, md->attribute, md->phys_addr, + pr_info("mem%02u: %s range=[0x%016llx-0x%016llx) (%lluMB)\n", + i, efi_md_typeattr_format(buf, sizeof(buf), md), + md->phys_addr, md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), (md->num_pages >> (20 - EFI_PAGE_SHIFT))); } @@ -344,9 +337,9 @@ static int __init efi_runtime_init32(void) } /* - * We will only need *early* access to the following two - * EFI runtime services before set_virtual_address_map - * is invoked. + * We will only need *early* access to the SetVirtualAddressMap + * EFI runtime service. All other runtime services will be called + * via the virtual mapping. */ efi_phys.set_virtual_address_map = (efi_set_virtual_address_map_t *) @@ -368,9 +361,9 @@ static int __init efi_runtime_init64(void) } /* - * We will only need *early* access to the following two - * EFI runtime services before set_virtual_address_map - * is invoked. + * We will only need *early* access to the SetVirtualAddressMap + * EFI runtime service. All other runtime services will be called + * via the virtual mapping. */ efi_phys.set_virtual_address_map = (efi_set_virtual_address_map_t *) @@ -492,7 +485,7 @@ void __init efi_init(void) if (!efi_runtime_supported()) pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); else { - if (disable_runtime || efi_runtime_init()) + if (efi_runtime_disabled() || efi_runtime_init()) return; } if (efi_memmap_init()) @@ -537,7 +530,7 @@ void __init runtime_code_page_mkexec(void) } } -void efi_memory_uc(u64 addr, unsigned long size) +void __init efi_memory_uc(u64 addr, unsigned long size) { unsigned long page_shift = 1UL << EFI_PAGE_SHIFT; u64 npages; @@ -732,6 +725,7 @@ static void __init kexec_enter_virtual_mode(void) */ if (!efi_is_native()) { efi_unmap_memmap(); + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); return; } @@ -805,6 +799,7 @@ static void __init __efi_enter_virtual_mode(void) new_memmap = efi_map_regions(&count, &pg_shift); if (!new_memmap) { pr_err("Error reallocating memory, EFI runtime non-functional!\n"); + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); return; } @@ -812,8 +807,10 @@ static void __init __efi_enter_virtual_mode(void) BUG_ON(!efi.systab); - if (efi_setup_page_tables(__pa(new_memmap), 1 << pg_shift)) + if (efi_setup_page_tables(__pa(new_memmap), 1 << pg_shift)) { + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); return; + } efi_sync_low_kernel_mappings(); efi_dump_pagetable(); @@ -938,14 +935,11 @@ u64 efi_mem_attributes(unsigned long phys_addr) return 0; } -static int __init parse_efi_cmdline(char *str) +static int __init arch_parse_efi_cmdline(char *str) { - if (*str == '=') - str++; - - if (!strncmp(str, "old_map", 7)) + if (parse_option_str(str, "old_map")) set_bit(EFI_OLD_MEMMAP, &efi.flags); return 0; } -early_param("efi", parse_efi_cmdline); +early_param("efi", arch_parse_efi_cmdline); diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 9ee3491e31f..40e7cda5293 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -33,7 +33,7 @@ /* * To make EFI call EFI runtime service in physical addressing mode we need - * prelog/epilog before/after the invocation to disable interrupt, to + * prolog/epilog before/after the invocation to disable interrupt, to * claim EFI runtime service handler exclusively and to duplicate a memory in * low memory space say 0 - 3G. */ @@ -41,11 +41,13 @@ static unsigned long efi_rt_eflags; void efi_sync_low_kernel_mappings(void) {} void __init efi_dump_pagetable(void) {} -int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) +int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) { return 0; } -void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) {} +void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) +{ +} void __init efi_map_region(efi_memory_desc_t *md) { @@ -55,7 +57,7 @@ void __init efi_map_region(efi_memory_desc_t *md) void __init efi_map_region_fixed(efi_memory_desc_t *md) {} void __init parse_efi_setup(u64 phys_addr, u32 data_len) {} -void efi_call_phys_prelog(void) +void __init efi_call_phys_prolog(void) { struct desc_ptr gdt_descr; @@ -69,7 +71,7 @@ void efi_call_phys_prelog(void) load_gdt(&gdt_descr); } -void efi_call_phys_epilog(void) +void __init efi_call_phys_epilog(void) { struct desc_ptr gdt_descr; diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 290d397e1dd..35aecb6042f 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -79,7 +79,7 @@ static void __init early_code_mapping_set_exec(int executable) } } -void __init efi_call_phys_prelog(void) +void __init efi_call_phys_prolog(void) { unsigned long vaddress; int pgd; @@ -139,7 +139,7 @@ void efi_sync_low_kernel_mappings(void) sizeof(pgd_t) * num_pgds); } -int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) +int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) { unsigned long text; struct page *page; @@ -192,7 +192,7 @@ int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) return 0; } -void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) +void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) { pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S index fbe66e626c0..040192b50d0 100644 --- a/arch/x86/platform/efi/efi_stub_32.S +++ b/arch/x86/platform/efi/efi_stub_32.S @@ -27,13 +27,13 @@ ENTRY(efi_call_phys) * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found * the values of these registers are the same. And, the corresponding * GDT entries are identical. So I will do nothing about segment reg - * and GDT, but change GDT base register in prelog and epilog. + * and GDT, but change GDT base register in prolog and epilog. */ /* * 1. Now I am running with EIP = <physical address> + PAGE_OFFSET. * But to make it smoothly switch from virtual mode to flat mode. - * The mapping of lower virtual memory has been created in prelog and + * The mapping of lower virtual memory has been created in prolog and * epilog. */ movl $1f, %edx diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h index 46aa25c8ce0..3c1c3866d82 100644 --- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h +++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h @@ -10,10 +10,9 @@ */ -/* __attribute__((weak)) makes these declarations overridable */ /* For every CPU addition a new get_<cpuname>_ops interface needs * to be added. */ -extern void *get_penwell_ops(void) __attribute__((weak)); -extern void *get_cloverview_ops(void) __attribute__((weak)); -extern void *get_tangier_ops(void) __attribute__((weak)); +extern void *get_penwell_ops(void); +extern void *get_cloverview_ops(void); +extern void *get_tangier_ops(void); diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c index 3c53a90fdb1..c14ad34776c 100644 --- a/arch/x86/platform/intel-mid/sfi.c +++ b/arch/x86/platform/intel-mid/sfi.c @@ -106,6 +106,7 @@ int __init sfi_parse_mtmr(struct sfi_table_header *table) mp_irq.dstapic = MP_APIC_ALL; mp_irq.dstirq = pentry->irq; mp_save_irq(&mp_irq); + mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC); } return 0; @@ -176,6 +177,7 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table) mp_irq.dstapic = MP_APIC_ALL; mp_irq.dstirq = pentry->irq; mp_save_irq(&mp_irq); + mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC); } return 0; } diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl new file mode 100644 index 00000000000..23210baade2 --- /dev/null +++ b/arch/x86/tools/calc_run_size.pl @@ -0,0 +1,39 @@ +#!/usr/bin/perl +# +# Calculate the amount of space needed to run the kernel, including room for +# the .bss and .brk sections. +# +# Usage: +# objdump -h a.out | perl calc_run_size.pl +use strict; + +my $mem_size = 0; +my $file_offset = 0; + +my $sections=" *[0-9]+ \.(?:bss|brk) +"; +while (<>) { + if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) { + my $size = hex($1); + my $offset = hex($2); + $mem_size += $size; + if ($file_offset == 0) { + $file_offset = $offset; + } elsif ($file_offset != $offset) { + # BFD linker shows the same file offset in ELF. + # Gold linker shows them as consecutive. + next if ($file_offset + $mem_size == $offset + $size); + + printf STDERR "file_offset: 0x%lx\n", $file_offset; + printf STDERR "mem_size: 0x%lx\n", $mem_size; + printf STDERR "offset: 0x%lx\n", $offset; + printf STDERR "size: 0x%lx\n", $size; + + die ".bss and .brk are non-contiguous\n"; + } + } +} + +if ($file_offset == 0) { + die "Never found .bss or .brk file offset\n"; +} +printf("%d\n", $mem_size + $file_offset); diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 1a3f0445432..fac5e4f9607 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1636,9 +1636,6 @@ asmlinkage __visible void __init xen_start_kernel(void) xen_raw_console_write("mapping kernel into physical memory\n"); xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages); - /* Allocate and initialize top and mid mfn levels for p2m structure */ - xen_build_mfn_list_list(); - /* keep using Xen gdt for now; no urgent need to change it */ #ifdef CONFIG_X86_32 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index f62af7647ec..a8a1a3d08d4 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1217,10 +1217,13 @@ static void __init xen_pagetable_p2m_copy(void) static void __init xen_pagetable_init(void) { paging_init(); - xen_setup_shared_info(); #ifdef CONFIG_X86_64 xen_pagetable_p2m_copy(); #endif + /* Allocate and initialize top and mid mfn levels for p2m structure */ + xen_build_mfn_list_list(); + + xen_setup_shared_info(); xen_post_allocator_init(); } static void xen_write_cr2(unsigned long cr2) diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 9f5983b01ed..b456b048eca 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -163,6 +163,7 @@ #include <linux/hash.h> #include <linux/sched.h> #include <linux/seq_file.h> +#include <linux/bootmem.h> #include <asm/cache.h> #include <asm/setup.h> @@ -181,21 +182,20 @@ static void __init m2p_override_init(void); unsigned long xen_max_p2m_pfn __read_mostly; +static unsigned long *p2m_mid_missing_mfn; +static unsigned long *p2m_top_mfn; +static unsigned long **p2m_top_mfn_p; + /* Placeholders for holes in the address space */ static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE); -static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE); -static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE); -static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_identity, P2M_MID_PER_PAGE); -static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_identity_mfn, P2M_MID_PER_PAGE); RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); -RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); /* For each I/O range remapped we may lose up to two leaf pages for the boundary * violations and three mid pages to cover up to 3GB. With @@ -272,11 +272,11 @@ static void p2m_init(unsigned long *p2m) * Build the parallel p2m_top_mfn and p2m_mid_mfn structures * * This is called both at boot time, and after resuming from suspend: - * - At boot time we're called very early, and must use extend_brk() + * - At boot time we're called rather early, and must use alloc_bootmem*() * to allocate memory. * * - After resume we're called from within stop_machine, but the mfn - * tree should alreay be completely allocated. + * tree should already be completely allocated. */ void __ref xen_build_mfn_list_list(void) { @@ -287,20 +287,17 @@ void __ref xen_build_mfn_list_list(void) /* Pre-initialize p2m_top_mfn to be completely missing */ if (p2m_top_mfn == NULL) { - p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_mid_missing_mfn = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); - p2m_mid_identity_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_mfn_init(p2m_mid_identity_mfn, p2m_identity); - p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_top_mfn_p = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); p2m_top_mfn_p_init(p2m_top_mfn_p); - p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_top_mfn = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); p2m_top_mfn_init(p2m_top_mfn); } else { /* Reinitialise, mfn's all change after migration */ p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); - p2m_mid_mfn_init(p2m_mid_identity_mfn, p2m_identity); } for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) { @@ -328,10 +325,9 @@ void __ref xen_build_mfn_list_list(void) /* * XXX boot-time only! We should never find * missing parts of the mfn tree after - * runtime. extend_brk() will BUG if we call - * it too late. + * runtime. */ - mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); + mid_mfn_p = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); p2m_mid_mfn_init(mid_mfn_p, p2m_missing); p2m_top_mfn_p[topidx] = mid_mfn_p; @@ -415,7 +411,6 @@ void __init xen_build_dynamic_phys_to_machine(void) m2p_override_init(); } #ifdef CONFIG_X86_64 -#include <linux/bootmem.h> unsigned long __init xen_revector_p2m_tree(void) { unsigned long va_start; @@ -477,7 +472,6 @@ unsigned long __init xen_revector_p2m_tree(void) copy_page(new, mid_p); p2m_top[topidx][mididx] = &mfn_list[pfn_free]; - p2m_top_mfn_p[topidx][mididx] = virt_to_mfn(&mfn_list[pfn_free]); pfn_free += P2M_PER_PAGE; @@ -538,12 +532,13 @@ static bool alloc_p2m(unsigned long pfn) unsigned topidx, mididx; unsigned long ***top_p, **mid; unsigned long *top_mfn_p, *mid_mfn; + unsigned long *p2m_orig; topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); top_p = &p2m_top[topidx]; - mid = *top_p; + mid = ACCESS_ONCE(*top_p); if (mid == p2m_mid_missing) { /* Mid level is missing, allocate a new one */ @@ -558,7 +553,7 @@ static bool alloc_p2m(unsigned long pfn) } top_mfn_p = &p2m_top_mfn[topidx]; - mid_mfn = p2m_top_mfn_p[topidx]; + mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]); BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); @@ -566,6 +561,7 @@ static bool alloc_p2m(unsigned long pfn) /* Separately check the mid mfn level */ unsigned long missing_mfn; unsigned long mid_mfn_mfn; + unsigned long old_mfn; mid_mfn = alloc_p2m_page(); if (!mid_mfn) @@ -575,17 +571,19 @@ static bool alloc_p2m(unsigned long pfn) missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); mid_mfn_mfn = virt_to_mfn(mid_mfn); - if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn) + old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn); + if (old_mfn != missing_mfn) { free_p2m_page(mid_mfn); - else + mid_mfn = mfn_to_virt(old_mfn); + } else { p2m_top_mfn_p[topidx] = mid_mfn; + } } - if (p2m_top[topidx][mididx] == p2m_identity || - p2m_top[topidx][mididx] == p2m_missing) { + p2m_orig = ACCESS_ONCE(p2m_top[topidx][mididx]); + if (p2m_orig == p2m_identity || p2m_orig == p2m_missing) { /* p2m leaf page is missing */ unsigned long *p2m; - unsigned long *p2m_orig = p2m_top[topidx][mididx]; p2m = alloc_p2m_page(); if (!p2m) @@ -606,7 +604,6 @@ static bool __init early_alloc_p2m(unsigned long pfn, bool check_boundary) { unsigned topidx, mididx, idx; unsigned long *p2m; - unsigned long *mid_mfn_p; topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); @@ -633,43 +630,21 @@ static bool __init early_alloc_p2m(unsigned long pfn, bool check_boundary) p2m_top[topidx][mididx] = p2m; - /* For save/restore we need to MFN of the P2M saved */ - - mid_mfn_p = p2m_top_mfn_p[topidx]; - WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), - "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", - topidx, mididx); - mid_mfn_p[mididx] = virt_to_mfn(p2m); - return true; } static bool __init early_alloc_p2m_middle(unsigned long pfn) { unsigned topidx = p2m_top_index(pfn); - unsigned long *mid_mfn_p; unsigned long **mid; mid = p2m_top[topidx]; - mid_mfn_p = p2m_top_mfn_p[topidx]; if (mid == p2m_mid_missing) { mid = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_init(mid, p2m_missing); p2m_top[topidx] = mid; - - BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); - } - /* And the save/restore P2M tables.. */ - if (mid_mfn_p == p2m_mid_missing_mfn) { - mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_mfn_init(mid_mfn_p, p2m_missing); - - p2m_top_mfn_p[topidx] = mid_mfn_p; - p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); - /* Note: we don't set mid_mfn_p[midix] here, - * look in early_alloc_p2m() */ } return true; } @@ -680,14 +655,13 @@ static bool __init early_alloc_p2m_middle(unsigned long pfn) * replace the P2M leaf with a p2m_missing or p2m_identity. * Stick the old page in the new P2M tree location. */ -bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn) +static bool __init early_can_reuse_p2m_middle(unsigned long set_pfn) { unsigned topidx; unsigned mididx; unsigned ident_pfns; unsigned inv_pfns; unsigned long *p2m; - unsigned long *mid_mfn_p; unsigned idx; unsigned long pfn; @@ -733,11 +707,6 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_ found: /* Found one, replace old with p2m_identity or p2m_missing */ p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing); - /* And the other for save/restore.. */ - mid_mfn_p = p2m_top_mfn_p[topidx]; - /* NOTE: Even if it is a p2m_identity it should still be point to - * a page filled with INVALID_P2M_ENTRY entries. */ - mid_mfn_p[mididx] = virt_to_mfn(p2m_missing); /* Reset where we want to stick the old page in. */ topidx = p2m_top_index(set_pfn); @@ -752,8 +721,6 @@ found: p2m_init(p2m); p2m_top[topidx][mididx] = p2m; - mid_mfn_p = p2m_top_mfn_p[topidx]; - mid_mfn_p[mididx] = virt_to_mfn(p2m); return true; } @@ -763,7 +730,7 @@ bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn) if (!early_alloc_p2m_middle(pfn)) return false; - if (early_can_reuse_p2m_middle(pfn, mfn)) + if (early_can_reuse_p2m_middle(pfn)) return __set_phys_to_machine(pfn, mfn); if (!early_alloc_p2m(pfn, false /* boundary crossover OK!*/)) diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index af7216128d9..29834b3fd87 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -595,6 +595,7 @@ char * __init xen_memory_setup(void) rc = 0; } BUG_ON(rc); + BUG_ON(memmap.nr_entries == 0); /* * Xen won't allow a 1:1 mapping to be created to UNUSABLE diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 8650cdb5320..4c071aeb841 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -510,6 +510,9 @@ static void xen_cpu_die(unsigned int cpu) current->state = TASK_UNINTERRUPTIBLE; schedule_timeout(HZ/10); } + + cpu_die_common(cpu); + xen_smp_intr_free(cpu); xen_uninit_lock_cpu(cpu); xen_teardown_timer(cpu); diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index a1d430b112b..f473d268d38 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -158,7 +158,7 @@ cycle_t xen_clocksource_read(void) cycle_t ret; preempt_disable_notrace(); - src = this_cpu_ptr(&xen_vcpu->time); + src = &__this_cpu_read(xen_vcpu)->time; ret = pvclock_clocksource_read(src); preempt_enable_notrace(); return ret; diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 49c6c3d9444..81f57e8c8f1 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -319,8 +319,8 @@ config XTENSA_PLATFORM_S6105 config XTENSA_PLATFORM_XTFPGA bool "XTFPGA" + select ETHOC if ETHERNET select SERIAL_CONSOLE - select ETHOC select XTENSA_CALIBRATE_CCOUNT help XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605). @@ -367,7 +367,7 @@ config BUILTIN_DTB config BLK_DEV_SIMDISK tristate "Host file-based simulated block device support" default n - depends on XTENSA_PLATFORM_ISS + depends on XTENSA_PLATFORM_ISS && BLOCK help Create block devices that map to files in the host file system. Device binding to host file may be changed at runtime via proc diff --git a/arch/xtensa/boot/dts/lx200mx.dts b/arch/xtensa/boot/dts/lx200mx.dts new file mode 100644 index 00000000000..249822b99bd --- /dev/null +++ b/arch/xtensa/boot/dts/lx200mx.dts @@ -0,0 +1,16 @@ +/dts-v1/; +/include/ "xtfpga.dtsi" +/include/ "xtfpga-flash-16m.dtsi" + +/ { + compatible = "cdns,xtensa-lx200"; + memory@0 { + device_type = "memory"; + reg = <0x00000000 0x06000000>; + }; + pic: pic { + compatible = "cdns,xtensa-mx"; + #interrupt-cells = <2>; + interrupt-controller; + }; +}; diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig new file mode 100644 index 00000000000..f4b7b3888da --- /dev/null +++ b/arch/xtensa/configs/generic_kc705_defconfig @@ -0,0 +1,131 @@ +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_FHANDLE=y +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_MEMCG=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PROFILING=y +CONFIG_OPROFILE=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_XTENSA_VARIANT_DC233C=y +CONFIG_XTENSA_UNALIGNED_USER=y +CONFIG_PREEMPT=y +CONFIG_HIGHMEM=y +# CONFIG_PCI is not set +CONFIG_XTENSA_PLATFORM_XTFPGA=y +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug" +CONFIG_USE_OF=y +CONFIG_BUILTIN_DTB="kc705" +# CONFIG_COMPACTION is not set +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +# CONFIG_IPV6 is not set +CONFIG_NETFILTER=y +# CONFIG_WIRELESS is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +CONFIG_MTD=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_UBI=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_MARVELL_PHY=y +# CONFIG_WLAN is not set +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_HW_RANDOM=y +# CONFIG_HWMON is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_NOWAYOUT=y +CONFIG_SOFT_WATCHDOG=y +# CONFIG_VGA_CONSOLE is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y +CONFIG_FANOTIFY=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_UBIFS_FS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_ROOT_NFS=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_LOCKUP_DETECTOR=y +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_STACKTRACE=y +CONFIG_RCU_TRACE=y +# CONFIG_FTRACE is not set +CONFIG_LD_NO_RELAX=y +# CONFIG_S32C1I_SELFTEST is not set +CONFIG_CRYPTO_ANSI_CPRNG=y diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig new file mode 100644 index 00000000000..22eeacba37c --- /dev/null +++ b/arch/xtensa/configs/smp_lx200_defconfig @@ -0,0 +1,135 @@ +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_FHANDLE=y +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_MEMCG=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PROFILING=y +CONFIG_OPROFILE=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_XTENSA_VARIANT_CUSTOM=y +CONFIG_XTENSA_VARIANT_CUSTOM_NAME="test_mmuhifi_c3" +CONFIG_XTENSA_UNALIGNED_USER=y +CONFIG_PREEMPT=y +CONFIG_HAVE_SMP=y +CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y +# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set +# CONFIG_PCI is not set +CONFIG_XTENSA_PLATFORM_XTFPGA=y +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug" +CONFIG_USE_OF=y +CONFIG_BUILTIN_DTB="lx200mx" +# CONFIG_COMPACTION is not set +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +# CONFIG_IPV6 is not set +CONFIG_NETFILTER=y +# CONFIG_WIRELESS is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +CONFIG_MTD=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_UBI=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_MARVELL_PHY=y +# CONFIG_WLAN is not set +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_HW_RANDOM=y +# CONFIG_HWMON is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_NOWAYOUT=y +CONFIG_SOFT_WATCHDOG=y +# CONFIG_VGA_CONSOLE is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y +CONFIG_FANOTIFY=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_UBIFS_FS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_ROOT_NFS=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_VM=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_STACKTRACE=y +CONFIG_RCU_TRACE=y +# CONFIG_FTRACE is not set +CONFIG_LD_NO_RELAX=y +# CONFIG_S32C1I_SELFTEST is not set +CONFIG_CRYPTO_ANSI_CPRNG=y diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index b2173e5da60..0383aed5912 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -277,6 +277,8 @@ static inline pte_t pte_mkwrite(pte_t pte) static inline pte_t pte_mkspecial(pte_t pte) { return pte; } +#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK)) + /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h index 8883fc877c5..db5bb72e2f4 100644 --- a/arch/xtensa/include/uapi/asm/unistd.h +++ b/arch/xtensa/include/uapi/asm/unistd.h @@ -384,7 +384,8 @@ __SYSCALL(174, sys_chroot, 1) #define __NR_pivot_root 175 __SYSCALL(175, sys_pivot_root, 2) #define __NR_umount 176 -__SYSCALL(176, sys_umount, 2) +__SYSCALL(176, sys_oldumount, 1) +#define __ARCH_WANT_SYS_OLDUMOUNT #define __NR_swapoff 177 __SYSCALL(177, sys_swapoff, 1) #define __NR_sync 178 @@ -742,7 +743,14 @@ __SYSCALL(335, sys_sched_getattr, 3) #define __NR_renameat2 336 __SYSCALL(336, sys_renameat2, 5) -#define __NR_syscall_count 337 +#define __NR_seccomp 337 +__SYSCALL(337, sys_seccomp, 3) +#define __NR_getrandom 338 +__SYSCALL(338, sys_getrandom, 3) +#define __NR_memfd_create 339 +__SYSCALL(339, sys_memfd_create, 2) + +#define __NR_syscall_count 340 /* * sysxtensa syscall handler diff --git a/block/blk-merge.c b/block/blk-merge.c index ba99351c0f5..89b97b5e088 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -97,18 +97,22 @@ void blk_recalc_rq_segments(struct request *rq) void blk_recount_segments(struct request_queue *q, struct bio *bio) { - bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, - &q->queue_flags); + unsigned short seg_cnt; + + /* estimate segment number by bi_vcnt for non-cloned bio */ + if (bio_flagged(bio, BIO_CLONED)) + seg_cnt = bio_segments(bio); + else + seg_cnt = bio->bi_vcnt; - if (no_sg_merge && !bio_flagged(bio, BIO_CLONED) && - bio->bi_vcnt < queue_max_segments(q)) - bio->bi_phys_segments = bio->bi_vcnt; + if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && + (seg_cnt < queue_max_segments(q))) + bio->bi_phys_segments = seg_cnt; else { struct bio *nxt = bio->bi_next; bio->bi_next = NULL; - bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, - no_sg_merge); + bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); bio->bi_next = nxt; } diff --git a/block/blk-mq.c b/block/blk-mq.c index 68929bad9a6..1d016fc9a8b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -107,11 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref) wake_up_all(&q->mq_freeze_wq); } -/* - * Guarantee no request is in use, so we can change any data structure of - * the queue afterward. - */ -void blk_mq_freeze_queue(struct request_queue *q) +static void blk_mq_freeze_queue_start(struct request_queue *q) { bool freeze; @@ -123,9 +119,23 @@ void blk_mq_freeze_queue(struct request_queue *q) percpu_ref_kill(&q->mq_usage_counter); blk_mq_run_queues(q, false); } +} + +static void blk_mq_freeze_queue_wait(struct request_queue *q) +{ wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); } +/* + * Guarantee no request is in use, so we can change any data structure of + * the queue afterward. + */ +void blk_mq_freeze_queue(struct request_queue *q) +{ + blk_mq_freeze_queue_start(q); + blk_mq_freeze_queue_wait(q); +} + static void blk_mq_unfreeze_queue(struct request_queue *q) { bool wake; @@ -1921,7 +1931,7 @@ void blk_mq_free_queue(struct request_queue *q) /* Basically redo blk_mq_init_queue with queue frozen */ static void blk_mq_queue_reinit(struct request_queue *q) { - blk_mq_freeze_queue(q); + WARN_ON_ONCE(!q->mq_freeze_depth); blk_mq_sysfs_unregister(q); @@ -1936,8 +1946,6 @@ static void blk_mq_queue_reinit(struct request_queue *q) blk_mq_map_swqueue(q); blk_mq_sysfs_register(q); - - blk_mq_unfreeze_queue(q); } static int blk_mq_queue_reinit_notify(struct notifier_block *nb, @@ -1956,8 +1964,25 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, return NOTIFY_OK; mutex_lock(&all_q_mutex); + + /* + * We need to freeze and reinit all existing queues. Freezing + * involves synchronous wait for an RCU grace period and doing it + * one by one may take a long time. Start freezing all queues in + * one swoop and then wait for the completions so that freezing can + * take place in parallel. + */ + list_for_each_entry(q, &all_q_list, all_q_node) + blk_mq_freeze_queue_start(q); + list_for_each_entry(q, &all_q_list, all_q_node) + blk_mq_freeze_queue_wait(q); + list_for_each_entry(q, &all_q_list, all_q_node) blk_mq_queue_reinit(q); + + list_for_each_entry(q, &all_q_list, all_q_node) + blk_mq_unfreeze_queue(q); + mutex_unlock(&all_q_mutex); return NOTIFY_OK; } diff --git a/block/elevator.c b/block/elevator.c index 24c28b659bb..afa3b037a17 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -229,7 +229,9 @@ int elevator_init(struct request_queue *q, char *name) } err = e->ops.elevator_init_fn(q, e); - return 0; + if (err) + elevator_put(e); + return err; } EXPORT_SYMBOL(elevator_init); diff --git a/block/ioprio.c b/block/ioprio.c index e50170ca7c3..31666c92b46 100644 --- a/block/ioprio.c +++ b/block/ioprio.c @@ -157,14 +157,16 @@ out: int ioprio_best(unsigned short aprio, unsigned short bprio) { - unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); - unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); + unsigned short aclass; + unsigned short bclass; - if (aclass == IOPRIO_CLASS_NONE) - aclass = IOPRIO_CLASS_BE; - if (bclass == IOPRIO_CLASS_NONE) - bclass = IOPRIO_CLASS_BE; + if (!ioprio_valid(aprio)) + aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM); + if (!ioprio_valid(bprio)) + bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM); + aclass = IOPRIO_PRIO_CLASS(aprio); + bclass = IOPRIO_PRIO_CLASS(bprio); if (aclass == bclass) return min(aprio, bprio); if (aclass > bclass) diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index abb2e65b24c..b0c2a616c8f 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -458,7 +458,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto error; + goto error_free_buffer; } blk_rq_set_block_pc(rq); @@ -508,7 +508,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) { err = DRIVER_ERROR << 24; - goto out; + goto error; } memset(sense, 0, sizeof(sense)); @@ -517,7 +517,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, blk_execute_rq(q, disk, rq, 0); -out: err = rq->errors & 0xff; /* only 8 bit SCSI status */ if (err) { if (rq->sense_len && rq->sense) { @@ -532,9 +531,11 @@ out: } error: + blk_put_request(rq); + +error_free_buffer: kfree(buffer); - if (rq) - blk_put_request(rq); + return err; } EXPORT_SYMBOL_GPL(sg_scsi_ioctl); diff --git a/crypto/cts.c b/crypto/cts.c index 042223f8e73..133f0874c95 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -202,7 +202,8 @@ static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx, /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */ memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn); /* 6. Decrypt En to create Pn-1 */ - memset(iv, 0, sizeof(iv)); + memzero_explicit(iv, sizeof(iv)); + sg_set_buf(&sgsrc[0], s + bsize, bsize); sg_set_buf(&sgdst[0], d, bsize); err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 42794803c48..7bb04743278 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c @@ -64,7 +64,7 @@ int crypto_sha1_update(struct shash_desc *desc, const u8 *data, src = data + done; } while (done + SHA1_BLOCK_SIZE <= len); - memset(temp, 0, sizeof(temp)); + memzero_explicit(temp, sizeof(temp)); partial = 0; } memcpy(sctx->buffer + partial, src, len - done); diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index 0bb55834469..65e7b76b057 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -211,10 +211,9 @@ static void sha256_transform(u32 *state, const u8 *input) /* clear any sensitive info... */ a = b = c = d = e = f = g = h = t1 = t2 = 0; - memset(W, 0, 64 * sizeof(u32)); + memzero_explicit(W, 64 * sizeof(u32)); } - static int sha224_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); @@ -317,7 +316,7 @@ static int sha224_final(struct shash_desc *desc, u8 *hash) sha256_final(desc, D); memcpy(hash, D, SHA224_DIGEST_SIZE); - memset(D, 0, SHA256_DIGEST_SIZE); + memzero_explicit(D, SHA256_DIGEST_SIZE); return 0; } diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index 6dde57dc511..95db67197cd 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -239,7 +239,7 @@ static int sha384_final(struct shash_desc *desc, u8 *hash) sha512_final(desc, D); memcpy(hash, D, 48); - memset(D, 0, 64); + memzero_explicit(D, 64); return 0; } diff --git a/crypto/tgr192.c b/crypto/tgr192.c index 87403556fd0..3c7af0d1ff7 100644 --- a/crypto/tgr192.c +++ b/crypto/tgr192.c @@ -612,7 +612,7 @@ static int tgr160_final(struct shash_desc *desc, u8 * out) tgr192_final(desc, D); memcpy(out, D, TGR160_DIGEST_SIZE); - memset(D, 0, TGR192_DIGEST_SIZE); + memzero_explicit(D, TGR192_DIGEST_SIZE); return 0; } @@ -623,7 +623,7 @@ static int tgr128_final(struct shash_desc *desc, u8 * out) tgr192_final(desc, D); memcpy(out, D, TGR128_DIGEST_SIZE); - memset(D, 0, TGR192_DIGEST_SIZE); + memzero_explicit(D, TGR192_DIGEST_SIZE); return 0; } diff --git a/crypto/vmac.c b/crypto/vmac.c index 2eb11a30c29..d84c24bd7ff 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -613,7 +613,7 @@ static int vmac_final(struct shash_desc *pdesc, u8 *out) } mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx); memcpy(out, &mac, sizeof(vmac_t)); - memset(&mac, 0, sizeof(vmac_t)); + memzero_explicit(&mac, sizeof(vmac_t)); memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); ctx->partial_size = 0; return 0; diff --git a/crypto/wp512.c b/crypto/wp512.c index 180f1d6e03f..ec64e7762fb 100644 --- a/crypto/wp512.c +++ b/crypto/wp512.c @@ -1102,8 +1102,8 @@ static int wp384_final(struct shash_desc *desc, u8 *out) u8 D[64]; wp512_final(desc, D); - memcpy (out, D, WP384_DIGEST_SIZE); - memset (D, 0, WP512_DIGEST_SIZE); + memcpy(out, D, WP384_DIGEST_SIZE); + memzero_explicit(D, WP512_DIGEST_SIZE); return 0; } @@ -1113,8 +1113,8 @@ static int wp256_final(struct shash_desc *desc, u8 *out) u8 D[64]; wp512_final(desc, D); - memcpy (out, D, WP256_DIGEST_SIZE); - memset (D, 0, WP512_DIGEST_SIZE); + memcpy(out, D, WP256_DIGEST_SIZE); + memzero_explicit(D, WP512_DIGEST_SIZE); return 0; } diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index d0f3265fb85..b23fe37f67c 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -144,7 +144,7 @@ config ACPI_VIDEO config ACPI_FAN tristate "Fan" - select THERMAL + depends on THERMAL default y help This driver supports ACPI fan devices, allowing user-mode diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 505d4d79fe3..c3b2fcb729f 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -43,6 +43,7 @@ acpi-y += pci_root.o pci_link.o pci_irq.o acpi-y += acpi_lpss.o acpi-y += acpi_platform.o acpi-y += acpi_pnp.o +acpi-y += int340x_thermal.o acpi-y += power.o acpi-y += event.o acpi-y += sysfs.o diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index 2bf9082f752..6ba8beb6b9d 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -16,6 +16,7 @@ #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/dma-mapping.h> #include <linux/platform_device.h> #include "internal.h" @@ -102,6 +103,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev) pdevinfo.res = resources; pdevinfo.num_res = count; pdevinfo.acpi_node.companion = adev; + pdevinfo.dma_mask = DMA_BIT_MASK(32); pdev = platform_device_register_full(&pdevinfo); if (IS_ERR(pdev)) dev_err(&adev->dev, "platform device creation failed: %ld\n", @@ -113,3 +115,4 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev) kfree(resources); return pdev; } +EXPORT_SYMBOL_GPL(acpi_create_platform_device); diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index 2ad2351a983..c318d3e2789 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -127,7 +127,7 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, acpi_status acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info, - acpi_event_status * event_status); + acpi_event_status *event_status); acpi_status acpi_hw_disable_all_gpes(void); diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 2747279fbe3..c00e7e41ad7 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -413,8 +413,8 @@ struct acpi_gpe_handler_info { acpi_gpe_handler address; /* Address of handler, if any */ void *context; /* Context to be passed to handler */ struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */ - u8 original_flags; /* Original (pre-handler) GPE info */ - u8 originally_enabled; /* True if GPE was originally enabled */ + u8 original_flags; /* Original (pre-handler) GPE info */ + u8 originally_enabled; /* True if GPE was originally enabled */ }; /* Notify info for implicit notify, multiple device objects */ diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index f14882788ee..1afe46e44da 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h @@ -49,6 +49,8 @@ acpi_status acpi_allocate_root_table(u32 initial_table_count); /* * tbxfroot - Root pointer utilities */ +u32 acpi_tb_get_rsdp_length(struct acpi_table_rsdp *rsdp); + acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp); u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length); diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h index f3f83440844..3a0beeb86ba 100644 --- a/drivers/acpi/acpica/amlresrc.h +++ b/drivers/acpi/acpica/amlresrc.h @@ -117,6 +117,12 @@ struct asl_resource_node { struct asl_resource_node *next; }; +struct asl_resource_info { + union acpi_parse_object *descriptor_type_op; /* Resource descriptor parse node */ + union acpi_parse_object *mapping_op; /* Used for mapfile support */ + u32 current_byte_offset; /* Offset in resource template */ +}; + /* Macros used to generate AML resource length fields */ #define ACPI_AML_SIZE_LARGE(r) (sizeof (r) - sizeof (struct aml_resource_large_header)) @@ -449,4 +455,32 @@ union aml_resource { u8 byte_item; }; +/* Interfaces used by both the disassembler and compiler */ + +void +mp_save_gpio_info(union acpi_parse_object *op, + union aml_resource *resource, + u32 pin_count, u16 *pin_list, char *device_name); + +void +mp_save_serial_info(union acpi_parse_object *op, + union aml_resource *resource, char *device_name); + +char *mp_get_hid_from_parse_tree(struct acpi_namespace_node *hid_node); + +char *mp_get_hid_via_namestring(char *device_name); + +char *mp_get_connection_info(union acpi_parse_object *op, + u32 pin_index, + struct acpi_namespace_node **target_node, + char **target_name); + +char *mp_get_parent_device_hid(union acpi_parse_object *op, + struct acpi_namespace_node **target_node, + char **parent_device_name); + +char *mp_get_ddn_value(char *device_name); + +char *mp_get_hid_value(struct acpi_namespace_node *device_node); + #endif diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index e4ba4dec86a..2095dfb72bc 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -100,13 +100,14 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) * * FUNCTION: acpi_ev_enable_gpe * - * PARAMETERS: gpe_event_info - GPE to enable + * PARAMETERS: gpe_event_info - GPE to enable * * RETURN: Status * * DESCRIPTION: Clear a GPE of stale events and enable it. * ******************************************************************************/ + acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status; @@ -125,6 +126,7 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) } /* Clear the GPE (of stale events) */ + status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); @@ -136,7 +138,6 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) return_ACPI_STATUS(status); } - /******************************************************************************* * * FUNCTION: acpi_ev_add_gpe_reference @@ -212,7 +213,7 @@ acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) if (ACPI_SUCCESS(status)) { status = acpi_hw_low_set_gpe(gpe_event_info, - ACPI_GPE_DISABLE); + ACPI_GPE_DISABLE); } if (ACPI_FAILURE(status)) { @@ -334,7 +335,7 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, * ******************************************************************************/ -u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) +u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list) { acpi_status status; struct acpi_gpe_block_info *gpe_block; @@ -427,7 +428,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) /* Check if there is anything active at all in this register */ - enabled_status_byte = (u8) (status_reg & enable_reg); + enabled_status_byte = (u8)(status_reg & enable_reg); if (!enabled_status_byte) { /* No active GPEs in this register, move on */ @@ -450,7 +451,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) acpi_ev_gpe_dispatch(gpe_block-> node, &gpe_block-> - event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); + event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); } } } @@ -636,7 +637,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context) * ******************************************************************************/ -acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info) +acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info * gpe_event_info) { acpi_status status; @@ -666,9 +667,9 @@ acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info) * * FUNCTION: acpi_ev_gpe_dispatch * - * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 - * gpe_event_info - Info for this GPE - * gpe_number - Number relative to the parent GPE block + * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 + * gpe_event_info - Info for this GPE + * gpe_number - Number relative to the parent GPE block * * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED * @@ -681,7 +682,7 @@ acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info) u32 acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, - struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) + struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) { acpi_status status; u32 return_value; diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c index 49fc7effd96..7be92837987 100644 --- a/drivers/acpi/acpica/evgpeinit.c +++ b/drivers/acpi/acpica/evgpeinit.c @@ -424,6 +424,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle, } /* Disable the GPE in case it's been enabled already. */ + (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); /* diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index 11e5803b8b4..55a58f3ec8d 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c @@ -786,18 +786,26 @@ acpi_install_gpe_handler(acpi_handle gpe_device, handler->method_node = gpe_event_info->dispatch.method_node; handler->original_flags = (u8)(gpe_event_info->flags & (ACPI_GPE_XRUPT_TYPE_MASK | - ACPI_GPE_DISPATCH_MASK)); + ACPI_GPE_DISPATCH_MASK)); /* * If the GPE is associated with a method, it may have been enabled * automatically during initialization, in which case it has to be * disabled now to avoid spurious execution of the handler. */ - - if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) - && gpe_event_info->runtime_count) { - handler->originally_enabled = 1; + if (((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) || + (handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) && + gpe_event_info->runtime_count) { + handler->originally_enabled = TRUE; (void)acpi_ev_remove_gpe_reference(gpe_event_info); + + /* Sanity check of original type against new type */ + + if (type != + (u32)(gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) { + ACPI_WARNING((AE_INFO, + "GPE type mismatch (level/edge)")); + } } /* Install the handler */ @@ -808,7 +816,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device, gpe_event_info->flags &= ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); - gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER); + gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_HANDLER); acpi_os_release_lock(acpi_gbl_gpe_lock, flags); @@ -893,7 +901,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, gpe_event_info->dispatch.method_node = handler->method_node; gpe_event_info->flags &= - ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); + ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); gpe_event_info->flags |= handler->original_flags; /* @@ -901,7 +909,8 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, * enabled, it should be enabled at this point to restore the * post-initialization configuration. */ - if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) && + if (((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) || + (handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) && handler->originally_enabled) { (void)acpi_ev_add_gpe_reference(gpe_event_info); } @@ -946,7 +955,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler) * handle is returned. * ******************************************************************************/ -acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle) +acpi_status acpi_acquire_global_lock(u16 timeout, u32 *handle) { acpi_status status; diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index e286640ad4f..bb8cbf5961b 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -324,8 +324,9 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event) ******************************************************************************/ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) { - acpi_status status = AE_OK; - u32 value; + acpi_status status; + acpi_event_status local_event_status = 0; + u32 in_byte; ACPI_FUNCTION_TRACE(acpi_get_event_status); @@ -339,29 +340,40 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) return_ACPI_STATUS(AE_BAD_PARAMETER); } - /* Get the status of the requested fixed event */ + /* Fixed event currently can be dispatched? */ + + if (acpi_gbl_fixed_event_handlers[event].handler) { + local_event_status |= ACPI_EVENT_FLAG_HAS_HANDLER; + } + + /* Fixed event currently enabled? */ status = acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. - enable_register_id, &value); - if (ACPI_FAILURE(status)) + enable_register_id, &in_byte); + if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); + } - *event_status = value; + if (in_byte) { + local_event_status |= ACPI_EVENT_FLAG_ENABLED; + } + + /* Fixed event currently active? */ status = acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. - status_register_id, &value); - if (ACPI_FAILURE(status)) + status_register_id, &in_byte); + if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); + } - if (value) - *event_status |= ACPI_EVENT_FLAG_SET; - - if (acpi_gbl_fixed_event_handlers[event].handler) - *event_status |= ACPI_EVENT_FLAG_HANDLE; + if (in_byte) { + local_event_status |= ACPI_EVENT_FLAG_SET; + } - return_ACPI_STATUS(status); + (*event_status) = local_event_status; + return_ACPI_STATUS(AE_OK); } ACPI_EXPORT_SYMBOL(acpi_get_event_status) diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 56710a03c9b..e889a5304ab 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -106,8 +106,8 @@ ACPI_EXPORT_SYMBOL(acpi_update_all_gpes) * * FUNCTION: acpi_enable_gpe * - * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 - * gpe_number - GPE level within the GPE block + * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 + * gpe_number - GPE level within the GPE block * * RETURN: Status * @@ -115,7 +115,6 @@ ACPI_EXPORT_SYMBOL(acpi_update_all_gpes) * hardware-enabled. * ******************************************************************************/ - acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) { acpi_status status = AE_BAD_PARAMETER; @@ -490,8 +489,8 @@ ACPI_EXPORT_SYMBOL(acpi_clear_gpe) * * FUNCTION: acpi_get_gpe_status * - * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 - * gpe_number - GPE level within the GPE block + * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 + * gpe_number - GPE level within the GPE block * event_status - Where the current status of the event * will be returned * @@ -524,9 +523,6 @@ acpi_get_gpe_status(acpi_handle gpe_device, status = acpi_hw_get_gpe_status(gpe_event_info, event_status); - if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) - *event_status |= ACPI_EVENT_FLAG_HANDLE; - unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index ea62d40fd16..48ac7b7b59c 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -202,7 +202,7 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info) acpi_status acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, - acpi_event_status * event_status) + acpi_event_status *event_status) { u32 in_byte; u32 register_bit; @@ -216,6 +216,13 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, return (AE_BAD_PARAMETER); } + /* GPE currently handled? */ + + if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) != + ACPI_GPE_DISPATCH_NONE) { + local_event_status |= ACPI_EVENT_FLAG_HAS_HANDLER; + } + /* Get the info block for the entire GPE register */ gpe_register_info = gpe_event_info->register_info; diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index 65ab8fed3d5..43a54af2b54 100644 --- a/drivers/acpi/acpica/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c @@ -50,6 +50,36 @@ ACPI_MODULE_NAME("tbxfroot") /******************************************************************************* * + * FUNCTION: acpi_tb_get_rsdp_length + * + * PARAMETERS: rsdp - Pointer to RSDP + * + * RETURN: Table length + * + * DESCRIPTION: Get the length of the RSDP + * + ******************************************************************************/ +u32 acpi_tb_get_rsdp_length(struct acpi_table_rsdp *rsdp) +{ + + if (!ACPI_VALIDATE_RSDP_SIG(rsdp->signature)) { + + /* BAD Signature */ + + return (0); + } + + /* "Length" field is available if table version >= 2 */ + + if (rsdp->revision >= 2) { + return (rsdp->length); + } else { + return (ACPI_RSDP_CHECKSUM_LENGTH); + } +} + +/******************************************************************************* + * * FUNCTION: acpi_tb_validate_rsdp * * PARAMETERS: rsdp - Pointer to unvalidated RSDP @@ -59,7 +89,8 @@ ACPI_MODULE_NAME("tbxfroot") * DESCRIPTION: Validate the RSDP (ptr) * ******************************************************************************/ -acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) + +acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp * rsdp) { /* diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index ed122e17636..7556e7c4a05 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -290,6 +290,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"), }, }, + { + .callback = dmi_disable_osi_win8, + .ident = "Dell Vostro 3546", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3546"), + }, + }, /* * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index bea6896be12..7db19316076 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -343,6 +343,7 @@ int acpi_device_update_power(struct acpi_device *device, int *state_p) return 0; } +EXPORT_SYMBOL_GPL(acpi_device_update_power); int acpi_bus_update_power(acpi_handle handle, int *state_p) { @@ -710,7 +711,7 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable) return -ENODEV; } - return acpi_device_wakeup(adev, enable, ACPI_STATE_S0); + return acpi_device_wakeup(adev, ACPI_STATE_S0, enable); } EXPORT_SYMBOL(acpi_pm_device_run_wake); #endif /* CONFIG_PM_RUNTIME */ @@ -877,7 +878,7 @@ int acpi_dev_suspend_late(struct device *dev) return 0; target_state = acpi_target_system_state(); - wakeup = device_may_wakeup(dev); + wakeup = device_may_wakeup(dev) && acpi_device_can_wakeup(adev); error = acpi_device_wakeup(adev, target_state, wakeup); if (wakeup && error) return error; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index cb6066c809e..5f9b74b9b71 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -126,14 +126,16 @@ static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ +static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ /* -------------------------------------------------------------------------- - Transaction Management - -------------------------------------------------------------------------- */ + * Transaction Management + * -------------------------------------------------------------------------- */ static inline u8 acpi_ec_read_status(struct acpi_ec *ec) { u8 x = inb(ec->command_addr); + pr_debug("EC_SC(R) = 0x%2.2x " "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n", x, @@ -148,6 +150,7 @@ static inline u8 acpi_ec_read_status(struct acpi_ec *ec) static inline u8 acpi_ec_read_data(struct acpi_ec *ec) { u8 x = inb(ec->data_addr); + pr_debug("EC_DATA(R) = 0x%2.2x\n", x); return x; } @@ -164,10 +167,32 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) outb(data, ec->data_addr); } +#ifdef DEBUG +static const char *acpi_ec_cmd_string(u8 cmd) +{ + switch (cmd) { + case 0x80: + return "RD_EC"; + case 0x81: + return "WR_EC"; + case 0x82: + return "BE_EC"; + case 0x83: + return "BD_EC"; + case 0x84: + return "QR_EC"; + } + return "UNKNOWN"; +} +#else +#define acpi_ec_cmd_string(cmd) "UNDEF" +#endif + static int ec_transaction_completed(struct acpi_ec *ec) { unsigned long flags; int ret = 0; + spin_lock_irqsave(&ec->lock, flags); if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) ret = 1; @@ -181,7 +206,8 @@ static bool advance_transaction(struct acpi_ec *ec) u8 status; bool wakeup = false; - pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK"); + pr_debug("===== %s (%d) =====\n", + in_interrupt() ? "IRQ" : "TASK", smp_processor_id()); status = acpi_ec_read_status(ec); t = ec->curr; if (!t) @@ -198,7 +224,8 @@ static bool advance_transaction(struct acpi_ec *ec) if (t->rlen == t->ri) { t->flags |= ACPI_EC_COMMAND_COMPLETE; if (t->command == ACPI_EC_COMMAND_QUERY) - pr_debug("hardware QR_EC completion\n"); + pr_debug("***** Command(%s) hardware completion *****\n", + acpi_ec_cmd_string(t->command)); wakeup = true; } } else @@ -210,18 +237,14 @@ static bool advance_transaction(struct acpi_ec *ec) } return wakeup; } else { - /* - * There is firmware refusing to respond QR_EC when SCI_EVT - * is not set, for which case, we complete the QR_EC - * without issuing it to the firmware. - * https://bugzilla.kernel.org/show_bug.cgi?id=86211 - */ - if (!(status & ACPI_EC_FLAG_SCI) && + if (EC_FLAGS_QUERY_HANDSHAKE && + !(status & ACPI_EC_FLAG_SCI) && (t->command == ACPI_EC_COMMAND_QUERY)) { t->flags |= ACPI_EC_COMMAND_POLL; t->rdata[t->ri++] = 0x00; t->flags |= ACPI_EC_COMMAND_COMPLETE; - pr_debug("software QR_EC completion\n"); + pr_debug("***** Command(%s) software completion *****\n", + acpi_ec_cmd_string(t->command)); wakeup = true; } else if ((status & ACPI_EC_FLAG_IBF) == 0) { acpi_ec_write_cmd(ec, t->command); @@ -264,6 +287,7 @@ static int ec_poll(struct acpi_ec *ec) { unsigned long flags; int repeat = 5; /* number of command restarts */ + while (repeat--) { unsigned long delay = jiffies + msecs_to_jiffies(ec_delay); @@ -296,18 +320,25 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, { unsigned long tmp; int ret = 0; + if (EC_FLAGS_MSI) udelay(ACPI_EC_MSI_UDELAY); /* start transaction */ spin_lock_irqsave(&ec->lock, tmp); /* following two actions should be kept atomic */ ec->curr = t; + pr_debug("***** Command(%s) started *****\n", + acpi_ec_cmd_string(t->command)); start_transaction(ec); + if (ec->curr->command == ACPI_EC_COMMAND_QUERY) { + clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); + pr_debug("***** Event stopped *****\n"); + } spin_unlock_irqrestore(&ec->lock, tmp); ret = ec_poll(ec); spin_lock_irqsave(&ec->lock, tmp); - if (ec->curr->command == ACPI_EC_COMMAND_QUERY) - clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); + pr_debug("***** Command(%s) stopped *****\n", + acpi_ec_cmd_string(t->command)); ec->curr = NULL; spin_unlock_irqrestore(&ec->lock, tmp); return ret; @@ -317,6 +348,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) { int status; u32 glk; + if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata)) return -EINVAL; if (t->rdata) @@ -333,8 +365,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) goto unlock; } } - pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n", - t->command, t->wdata ? t->wdata[0] : 0); /* disable GPE during transaction if storm is detected */ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { /* It has to be disabled, so that it doesn't trigger. */ @@ -355,7 +385,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) t->irq_count); set_bit(EC_FLAGS_GPE_STORM, &ec->flags); } - pr_debug("transaction end\n"); if (ec->global_lock) acpi_release_global_lock(glk); unlock: @@ -383,7 +412,7 @@ static int acpi_ec_burst_disable(struct acpi_ec *ec) acpi_ec_transaction(ec, &t) : 0; } -static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data) +static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data) { int result; u8 d; @@ -419,10 +448,9 @@ int ec_read(u8 addr, u8 *val) if (!err) { *val = temp_data; return 0; - } else - return err; + } + return err; } - EXPORT_SYMBOL(ec_read); int ec_write(u8 addr, u8 val) @@ -436,22 +464,21 @@ int ec_write(u8 addr, u8 val) return err; } - EXPORT_SYMBOL(ec_write); int ec_transaction(u8 command, - const u8 * wdata, unsigned wdata_len, - u8 * rdata, unsigned rdata_len) + const u8 *wdata, unsigned wdata_len, + u8 *rdata, unsigned rdata_len) { struct transaction t = {.command = command, .wdata = wdata, .rdata = rdata, .wlen = wdata_len, .rlen = rdata_len}; + if (!first_ec) return -ENODEV; return acpi_ec_transaction(first_ec, &t); } - EXPORT_SYMBOL(ec_transaction); /* Get the handle to the EC device */ @@ -461,7 +488,6 @@ acpi_handle ec_get_handle(void) return NULL; return first_ec->handle; } - EXPORT_SYMBOL(ec_get_handle); /* @@ -525,13 +551,14 @@ void acpi_ec_unblock_transactions_early(void) clear_bit(EC_FLAGS_BLOCKED, &first_ec->flags); } -static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data) +static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data) { int result; u8 d; struct transaction t = {.command = ACPI_EC_COMMAND_QUERY, .wdata = NULL, .rdata = &d, .wlen = 0, .rlen = 1}; + if (!ec || !data) return -EINVAL; /* @@ -557,6 +584,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, { struct acpi_ec_query_handler *handler = kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL); + if (!handler) return -ENOMEM; @@ -569,12 +597,12 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, mutex_unlock(&ec->mutex); return 0; } - EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler); void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) { struct acpi_ec_query_handler *handler, *tmp; + mutex_lock(&ec->mutex); list_for_each_entry_safe(handler, tmp, &ec->list, node) { if (query_bit == handler->query_bit) { @@ -584,20 +612,20 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) } mutex_unlock(&ec->mutex); } - EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); static void acpi_ec_run(void *cxt) { struct acpi_ec_query_handler *handler = cxt; + if (!handler) return; - pr_debug("start query execution\n"); + pr_debug("##### Query(0x%02x) started #####\n", handler->query_bit); if (handler->func) handler->func(handler->data); else if (handler->handle) acpi_evaluate_object(handler->handle, NULL, NULL, NULL); - pr_debug("stop query execution\n"); + pr_debug("##### Query(0x%02x) stopped #####\n", handler->query_bit); kfree(handler); } @@ -620,8 +648,8 @@ static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data) if (!copy) return -ENOMEM; memcpy(copy, handler, sizeof(*copy)); - pr_debug("push query execution (0x%2x) on queue\n", - value); + pr_debug("##### Query(0x%02x) scheduled #####\n", + handler->query_bit); return acpi_os_execute((copy->func) ? OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER, acpi_ec_run, copy); @@ -633,6 +661,7 @@ static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data) static void acpi_ec_gpe_query(void *ec_cxt) { struct acpi_ec *ec = ec_cxt; + if (!ec) return; mutex_lock(&ec->mutex); @@ -644,7 +673,7 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state) { if (state & ACPI_EC_FLAG_SCI) { if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { - pr_debug("push gpe query to the queue\n"); + pr_debug("***** Event started *****\n"); return acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ec_gpe_query, ec); } @@ -667,8 +696,8 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, } /* -------------------------------------------------------------------------- - Address Space Management - -------------------------------------------------------------------------- */ + * Address Space Management + * -------------------------------------------------------------------------- */ static acpi_status acpi_ec_space_handler(u32 function, acpi_physical_address address, @@ -699,27 +728,26 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, switch (result) { case -EINVAL: return AE_BAD_PARAMETER; - break; case -ENODEV: return AE_NOT_FOUND; - break; case -ETIME: return AE_TIME; - break; default: return AE_OK; } } /* -------------------------------------------------------------------------- - Driver Interface - -------------------------------------------------------------------------- */ + * Driver Interface + * -------------------------------------------------------------------------- */ + static acpi_status ec_parse_io_ports(struct acpi_resource *resource, void *context); static struct acpi_ec *make_acpi_ec(void) { struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL); + if (!ec) return NULL; ec->flags = 1 << EC_FLAGS_QUERY_PENDING; @@ -742,9 +770,8 @@ acpi_ec_register_query_methods(acpi_handle handle, u32 level, status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer); - if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1) { + if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1) acpi_ec_add_query_handler(ec, value, handle, NULL, NULL); - } return AE_OK; } @@ -753,7 +780,6 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval) { acpi_status status; unsigned long long tmp = 0; - struct acpi_ec *ec = context; /* clear addr values, ec_parse_io_ports depend on it */ @@ -781,6 +807,7 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval) static int ec_install_handlers(struct acpi_ec *ec) { acpi_status status; + if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags)) return 0; status = acpi_install_gpe_handler(NULL, ec->gpe, @@ -981,6 +1008,18 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id) } /* + * Acer EC firmware refuses to respond QR_EC when SCI_EVT is not set, for + * which case, we complete the QR_EC without issuing it to the firmware. + * https://bugzilla.kernel.org/show_bug.cgi?id=86211 + */ +static int ec_flag_query_handshake(const struct dmi_system_id *id) +{ + pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n"); + EC_FLAGS_QUERY_HANDSHAKE = 1; + return 0; +} + +/* * On some hardware it is necessary to clear events accumulated by the EC during * sleep. These ECs stop reporting GPEs until they are manually polled, if too * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) @@ -1054,6 +1093,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = { { ec_clear_on_resume, "Samsung hardware", { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, + { + ec_flag_query_handshake, "Acer hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), }, NULL}, {}, }; @@ -1078,7 +1120,8 @@ int __init acpi_ec_ecdt_probe(void) boot_ec->data_addr = ecdt_ptr->data.address; boot_ec->gpe = ecdt_ptr->gpe; boot_ec->handle = ACPI_ROOT_OBJECT; - acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); + acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, + &boot_ec->handle); /* Don't trust ECDT, which comes from ASUSTek */ if (!EC_FLAGS_VALIDATE_ECDT) goto install; @@ -1162,6 +1205,5 @@ static void __exit acpi_ec_exit(void) { acpi_bus_unregister_driver(&acpi_ec_driver); - return; } #endif /* 0 */ diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 5328b1090e0..caf9b76b7ef 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -30,22 +30,19 @@ #include <linux/uaccess.h> #include <linux/thermal.h> #include <linux/acpi.h> - -#define ACPI_FAN_CLASS "fan" -#define ACPI_FAN_FILE_STATE "state" - -#define _COMPONENT ACPI_FAN_COMPONENT -ACPI_MODULE_NAME("fan"); +#include <linux/platform_device.h> +#include <linux/sort.h> MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_DESCRIPTION("ACPI Fan Driver"); MODULE_LICENSE("GPL"); -static int acpi_fan_add(struct acpi_device *device); -static int acpi_fan_remove(struct acpi_device *device); +static int acpi_fan_probe(struct platform_device *pdev); +static int acpi_fan_remove(struct platform_device *pdev); static const struct acpi_device_id fan_device_ids[] = { {"PNP0C0B", 0}, + {"INT3404", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, fan_device_ids); @@ -64,37 +61,100 @@ static struct dev_pm_ops acpi_fan_pm = { #define FAN_PM_OPS_PTR NULL #endif -static struct acpi_driver acpi_fan_driver = { - .name = "fan", - .class = ACPI_FAN_CLASS, - .ids = fan_device_ids, - .ops = { - .add = acpi_fan_add, - .remove = acpi_fan_remove, - }, - .drv.pm = FAN_PM_OPS_PTR, +struct acpi_fan_fps { + u64 control; + u64 trip_point; + u64 speed; + u64 noise_level; + u64 power; +}; + +struct acpi_fan_fif { + u64 revision; + u64 fine_grain_ctrl; + u64 step_size; + u64 low_speed_notification; +}; + +struct acpi_fan { + bool acpi4; + struct acpi_fan_fif fif; + struct acpi_fan_fps *fps; + int fps_count; + struct thermal_cooling_device *cdev; +}; + +static struct platform_driver acpi_fan_driver = { + .probe = acpi_fan_probe, + .remove = acpi_fan_remove, + .driver = { + .name = "acpi-fan", + .acpi_match_table = fan_device_ids, + .pm = FAN_PM_OPS_PTR, + }, }; /* thermal cooling device callbacks */ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { - /* ACPI fan device only support two states: ON/OFF */ - *state = 1; + struct acpi_device *device = cdev->devdata; + struct acpi_fan *fan = acpi_driver_data(device); + + if (fan->acpi4) + *state = fan->fps_count - 1; + else + *state = 1; return 0; } -static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long - *state) +static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_fan *fan = acpi_driver_data(device); + union acpi_object *obj; + acpi_status status; + int control, i; + + status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer); + if (ACPI_FAILURE(status)) { + dev_err(&device->dev, "Get fan state failed\n"); + return status; + } + + obj = buffer.pointer; + if (!obj || obj->type != ACPI_TYPE_PACKAGE || + obj->package.count != 3 || + obj->package.elements[1].type != ACPI_TYPE_INTEGER) { + dev_err(&device->dev, "Invalid _FST data\n"); + status = -EINVAL; + goto err; + } + + control = obj->package.elements[1].integer.value; + for (i = 0; i < fan->fps_count; i++) { + if (control == fan->fps[i].control) + break; + } + if (i == fan->fps_count) { + dev_dbg(&device->dev, "Invalid control value returned\n"); + status = -EINVAL; + goto err; + } + + *state = i; + +err: + kfree(obj); + return status; +} + +static int fan_get_state(struct acpi_device *device, unsigned long *state) { - struct acpi_device *device = cdev->devdata; int result; int acpi_state = ACPI_STATE_D0; - if (!device) - return -EINVAL; - - result = acpi_bus_update_power(device->handle, &acpi_state); + result = acpi_device_update_power(device, &acpi_state); if (result) return result; @@ -103,21 +163,57 @@ static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long return 0; } -static int -fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) +static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long + *state) { struct acpi_device *device = cdev->devdata; - int result; + struct acpi_fan *fan = acpi_driver_data(device); - if (!device || (state != 0 && state != 1)) + if (fan->acpi4) + return fan_get_state_acpi4(device, state); + else + return fan_get_state(device, state); +} + +static int fan_set_state(struct acpi_device *device, unsigned long state) +{ + if (state != 0 && state != 1) return -EINVAL; - result = acpi_bus_set_power(device->handle, - state ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD); + return acpi_device_set_power(device, + state ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD); +} - return result; +static int fan_set_state_acpi4(struct acpi_device *device, unsigned long state) +{ + struct acpi_fan *fan = acpi_driver_data(device); + acpi_status status; + + if (state >= fan->fps_count) + return -EINVAL; + + status = acpi_execute_simple_method(device->handle, "_FSL", + fan->fps[state].control); + if (ACPI_FAILURE(status)) { + dev_dbg(&device->dev, "Failed to set state by _FSL\n"); + return status; + } + + return 0; } +static int +fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) +{ + struct acpi_device *device = cdev->devdata; + struct acpi_fan *fan = acpi_driver_data(device); + + if (fan->acpi4) + return fan_set_state_acpi4(device, state); + else + return fan_set_state(device, state); + } + static const struct thermal_cooling_device_ops fan_cooling_ops = { .get_max_state = fan_get_max_state, .get_cur_state = fan_get_cur_state, @@ -129,21 +225,125 @@ static const struct thermal_cooling_device_ops fan_cooling_ops = { * -------------------------------------------------------------------------- */ -static int acpi_fan_add(struct acpi_device *device) +static bool acpi_fan_is_acpi4(struct acpi_device *device) { - int result = 0; - struct thermal_cooling_device *cdev; + return acpi_has_method(device->handle, "_FIF") && + acpi_has_method(device->handle, "_FPS") && + acpi_has_method(device->handle, "_FSL") && + acpi_has_method(device->handle, "_FST"); +} - if (!device) - return -EINVAL; +static int acpi_fan_get_fif(struct acpi_device *device) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_fan *fan = acpi_driver_data(device); + struct acpi_buffer format = { sizeof("NNNN"), "NNNN" }; + struct acpi_buffer fif = { sizeof(fan->fif), &fan->fif }; + union acpi_object *obj; + acpi_status status; + + status = acpi_evaluate_object(device->handle, "_FIF", NULL, &buffer); + if (ACPI_FAILURE(status)) + return status; + + obj = buffer.pointer; + if (!obj || obj->type != ACPI_TYPE_PACKAGE) { + dev_err(&device->dev, "Invalid _FIF data\n"); + status = -EINVAL; + goto err; + } - strcpy(acpi_device_name(device), "Fan"); - strcpy(acpi_device_class(device), ACPI_FAN_CLASS); + status = acpi_extract_package(obj, &format, &fif); + if (ACPI_FAILURE(status)) { + dev_err(&device->dev, "Invalid _FIF element\n"); + status = -EINVAL; + } - result = acpi_bus_update_power(device->handle, NULL); - if (result) { - dev_err(&device->dev, "Setting initial power state\n"); - goto end; +err: + kfree(obj); + return status; +} + +static int acpi_fan_speed_cmp(const void *a, const void *b) +{ + const struct acpi_fan_fps *fps1 = a; + const struct acpi_fan_fps *fps2 = b; + return fps1->speed - fps2->speed; +} + +static int acpi_fan_get_fps(struct acpi_device *device) +{ + struct acpi_fan *fan = acpi_driver_data(device); + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + acpi_status status; + int i; + + status = acpi_evaluate_object(device->handle, "_FPS", NULL, &buffer); + if (ACPI_FAILURE(status)) + return status; + + obj = buffer.pointer; + if (!obj || obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) { + dev_err(&device->dev, "Invalid _FPS data\n"); + status = -EINVAL; + goto err; + } + + fan->fps_count = obj->package.count - 1; /* minus revision field */ + fan->fps = devm_kzalloc(&device->dev, + fan->fps_count * sizeof(struct acpi_fan_fps), + GFP_KERNEL); + if (!fan->fps) { + dev_err(&device->dev, "Not enough memory\n"); + status = -ENOMEM; + goto err; + } + for (i = 0; i < fan->fps_count; i++) { + struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; + struct acpi_buffer fps = { sizeof(fan->fps[i]), &fan->fps[i] }; + status = acpi_extract_package(&obj->package.elements[i + 1], + &format, &fps); + if (ACPI_FAILURE(status)) { + dev_err(&device->dev, "Invalid _FPS element\n"); + break; + } + } + + /* sort the state array according to fan speed in increase order */ + sort(fan->fps, fan->fps_count, sizeof(*fan->fps), + acpi_fan_speed_cmp, NULL); + +err: + kfree(obj); + return status; +} + +static int acpi_fan_probe(struct platform_device *pdev) +{ + int result = 0; + struct thermal_cooling_device *cdev; + struct acpi_fan *fan; + struct acpi_device *device = ACPI_COMPANION(&pdev->dev); + + fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL); + if (!fan) { + dev_err(&device->dev, "No memory for fan\n"); + return -ENOMEM; + } + device->driver_data = fan; + platform_set_drvdata(pdev, fan); + + if (acpi_fan_is_acpi4(device)) { + if (acpi_fan_get_fif(device) || acpi_fan_get_fps(device)) + goto end; + fan->acpi4 = true; + } else { + result = acpi_device_update_power(device, NULL); + if (result) { + dev_err(&device->dev, "Setting initial power state\n"); + goto end; + } } cdev = thermal_cooling_device_register("Fan", device, @@ -153,44 +353,32 @@ static int acpi_fan_add(struct acpi_device *device) goto end; } - dev_dbg(&device->dev, "registered as cooling_device%d\n", cdev->id); + dev_dbg(&pdev->dev, "registered as cooling_device%d\n", cdev->id); - device->driver_data = cdev; - result = sysfs_create_link(&device->dev.kobj, + fan->cdev = cdev; + result = sysfs_create_link(&pdev->dev.kobj, &cdev->device.kobj, "thermal_cooling"); if (result) - dev_err(&device->dev, "Failed to create sysfs link " - "'thermal_cooling'\n"); + dev_err(&pdev->dev, "Failed to create sysfs link 'thermal_cooling'\n"); result = sysfs_create_link(&cdev->device.kobj, - &device->dev.kobj, + &pdev->dev.kobj, "device"); if (result) - dev_err(&device->dev, "Failed to create sysfs link 'device'\n"); - - dev_info(&device->dev, "ACPI: %s [%s] (%s)\n", - acpi_device_name(device), acpi_device_bid(device), - !device->power.state ? "on" : "off"); + dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n"); end: return result; } -static int acpi_fan_remove(struct acpi_device *device) +static int acpi_fan_remove(struct platform_device *pdev) { - struct thermal_cooling_device *cdev; - - if (!device) - return -EINVAL; - - cdev = acpi_driver_data(device); - if (!cdev) - return -EINVAL; + struct acpi_fan *fan = platform_get_drvdata(pdev); - sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); - sysfs_remove_link(&cdev->device.kobj, "device"); - thermal_cooling_device_unregister(cdev); + sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling"); + sysfs_remove_link(&fan->cdev->device.kobj, "device"); + thermal_cooling_device_unregister(fan->cdev); return 0; } @@ -198,10 +386,11 @@ static int acpi_fan_remove(struct acpi_device *device) #ifdef CONFIG_PM_SLEEP static int acpi_fan_suspend(struct device *dev) { - if (!dev) - return -EINVAL; + struct acpi_fan *fan = dev_get_drvdata(dev); + if (fan->acpi4) + return 0; - acpi_bus_set_power(to_acpi_device(dev)->handle, ACPI_STATE_D0); + acpi_device_set_power(ACPI_COMPANION(dev), ACPI_STATE_D0); return AE_OK; } @@ -209,11 +398,12 @@ static int acpi_fan_suspend(struct device *dev) static int acpi_fan_resume(struct device *dev) { int result; + struct acpi_fan *fan = dev_get_drvdata(dev); - if (!dev) - return -EINVAL; + if (fan->acpi4) + return 0; - result = acpi_bus_update_power(to_acpi_device(dev)->handle, NULL); + result = acpi_device_update_power(ACPI_COMPANION(dev), NULL); if (result) dev_err(dev, "Error updating fan power state\n"); @@ -221,4 +411,4 @@ static int acpi_fan_resume(struct device *dev) } #endif -module_acpi_driver(acpi_fan_driver); +module_platform_driver(acpi_fan_driver); diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c new file mode 100644 index 00000000000..a27d31d1ba2 --- /dev/null +++ b/drivers/acpi/int340x_thermal.c @@ -0,0 +1,51 @@ +/* + * ACPI support for int340x thermal drivers + * + * Copyright (C) 2014, Intel Corporation + * Authors: Zhang Rui <rui.zhang@intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/acpi.h> +#include <linux/module.h> + +#include "internal.h" + +#define DO_ENUMERATION 0x01 +static const struct acpi_device_id int340x_thermal_device_ids[] = { + {"INT3400", DO_ENUMERATION }, + {"INT3401"}, + {"INT3402"}, + {"INT3403"}, + {"INT3404"}, + {"INT3406"}, + {"INT3407"}, + {"INT3408"}, + {"INT3409"}, + {"INT340A"}, + {"INT340B"}, + {""}, +}; + +static int int340x_thermal_handler_attach(struct acpi_device *adev, + const struct acpi_device_id *id) +{ +#if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE) + if (id->driver_data == DO_ENUMERATION) + acpi_create_platform_device(adev); +#endif + return 1; +} + +static struct acpi_scan_handler int340x_thermal_handler = { + .ids = int340x_thermal_device_ids, + .attach = int340x_thermal_handler_attach, +}; + +void __init acpi_int340x_thermal_init(void) +{ + acpi_scan_add_handler(&int340x_thermal_handler); +} diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 4c5cf77e757..447f6d679b2 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -31,6 +31,7 @@ void acpi_pci_link_init(void); void acpi_processor_init(void); void acpi_platform_init(void); void acpi_pnp_init(void); +void acpi_int340x_thermal_init(void); int acpi_sysfs_init(void); void acpi_container_init(void); void acpi_memory_hotplug_init(void); @@ -103,8 +104,6 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state); int acpi_power_on_resources(struct acpi_device *device, int state); int acpi_power_transition(struct acpi_device *device, int state); -int acpi_device_update_power(struct acpi_device *device, int *state_p); - int acpi_wakeup_device_init(void); #ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC @@ -168,13 +167,6 @@ static inline void suspend_nvs_restore(void) {} #endif /*-------------------------------------------------------------------------- - Platform bus support - -------------------------------------------------------------------------- */ -struct platform_device; - -struct platform_device *acpi_create_platform_device(struct acpi_device *adev); - -/*-------------------------------------------------------------------------- Video -------------------------------------------------------------------------- */ #if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index ae44d8654c8..0476e90b209 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -142,6 +142,53 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias, } /* + * acpi_companion_match() - Can we match via ACPI companion device + * @dev: Device in question + * + * Check if the given device has an ACPI companion and if that companion has + * a valid list of PNP IDs, and if the device is the first (primary) physical + * device associated with it. + * + * If multiple physical devices are attached to a single ACPI companion, we need + * to be careful. The usage scenario for this kind of relationship is that all + * of the physical devices in question use resources provided by the ACPI + * companion. A typical case is an MFD device where all the sub-devices share + * the parent's ACPI companion. In such cases we can only allow the primary + * (first) physical device to be matched with the help of the companion's PNP + * IDs. + * + * Additional physical devices sharing the ACPI companion can still use + * resources available from it but they will be matched normally using functions + * provided by their bus types (and analogously for their modalias). + */ +static bool acpi_companion_match(const struct device *dev) +{ + struct acpi_device *adev; + bool ret; + + adev = ACPI_COMPANION(dev); + if (!adev) + return false; + + if (list_empty(&adev->pnp.ids)) + return false; + + mutex_lock(&adev->physical_node_lock); + if (list_empty(&adev->physical_node_list)) { + ret = false; + } else { + const struct acpi_device_physical_node *node; + + node = list_first_entry(&adev->physical_node_list, + struct acpi_device_physical_node, node); + ret = node->dev == dev; + } + mutex_unlock(&adev->physical_node_lock); + + return ret; +} + +/* * Creates uevent modalias field for ACPI enumerated devices. * Because the other buses does not support ACPI HIDs & CIDs. * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get: @@ -149,20 +196,14 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias, */ int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) { - struct acpi_device *acpi_dev; int len; - acpi_dev = ACPI_COMPANION(dev); - if (!acpi_dev) - return -ENODEV; - - /* Fall back to bus specific way of modalias exporting */ - if (list_empty(&acpi_dev->pnp.ids)) + if (!acpi_companion_match(dev)) return -ENODEV; if (add_uevent_var(env, "MODALIAS=")) return -ENOMEM; - len = create_modalias(acpi_dev, &env->buf[env->buflen - 1], + len = create_modalias(ACPI_COMPANION(dev), &env->buf[env->buflen - 1], sizeof(env->buf) - env->buflen); if (len <= 0) return len; @@ -179,18 +220,12 @@ EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias); */ int acpi_device_modalias(struct device *dev, char *buf, int size) { - struct acpi_device *acpi_dev; int len; - acpi_dev = ACPI_COMPANION(dev); - if (!acpi_dev) + if (!acpi_companion_match(dev)) return -ENODEV; - /* Fall back to bus specific way of modalias exporting */ - if (list_empty(&acpi_dev->pnp.ids)) - return -ENODEV; - - len = create_modalias(acpi_dev, buf, size -1); + len = create_modalias(ACPI_COMPANION(dev), buf, size -1); if (len <= 0) return len; buf[len++] = '\n'; @@ -853,6 +888,9 @@ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, if (!ids || !handle || acpi_bus_get_device(handle, &adev)) return NULL; + if (!acpi_companion_match(dev)) + return NULL; + return __acpi_match_device(adev, ids); } EXPORT_SYMBOL_GPL(acpi_match_device); @@ -1470,7 +1508,7 @@ static void acpi_wakeup_gpe_init(struct acpi_device *device) if (ACPI_FAILURE(status)) return; - wakeup->flags.run_wake = !!(event_status & ACPI_EVENT_FLAG_HANDLE); + wakeup->flags.run_wake = !!(event_status & ACPI_EVENT_FLAG_HAS_HANDLER); } static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device) @@ -2315,6 +2353,7 @@ int __init acpi_scan_init(void) acpi_container_init(); acpi_memory_hotplug_init(); acpi_pnp_init(); + acpi_int340x_thermal_init(); mutex_lock(&acpi_scan_lock); /* diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 38cb9782d4b..13e577c8020 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -537,7 +537,7 @@ static ssize_t counter_show(struct kobject *kobj, if (result) goto end; - if (!(status & ACPI_EVENT_FLAG_HANDLE)) + if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) size += sprintf(buf + size, " invalid"); else if (status & ACPI_EVENT_FLAG_ENABLED) size += sprintf(buf + size, " enabled"); @@ -581,7 +581,7 @@ static ssize_t counter_set(struct kobject *kobj, if (result) goto end; - if (!(status & ACPI_EVENT_FLAG_HANDLE)) { + if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) { printk(KERN_WARNING PREFIX "Can not change Invalid GPE/Fixed Event status\n"); return -EINVAL; diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 112817e963e..d24fa1964eb 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -528,7 +528,6 @@ static void acpi_thermal_check(void *data) } /* sys I/F for generic thermal sysfs support */ -#define KELVIN_TO_MILLICELSIUS(t, off) (((t) - (off)) * 100) static int thermal_get_temp(struct thermal_zone_device *thermal, unsigned long *temp) @@ -543,7 +542,8 @@ static int thermal_get_temp(struct thermal_zone_device *thermal, if (result) return result; - *temp = KELVIN_TO_MILLICELSIUS(tz->temperature, tz->kelvin_offset); + *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(tz->temperature, + tz->kelvin_offset); return 0; } @@ -647,7 +647,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, if (tz->trips.critical.flags.valid) { if (!trip) { - *temp = KELVIN_TO_MILLICELSIUS( + *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET( tz->trips.critical.temperature, tz->kelvin_offset); return 0; @@ -657,7 +657,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, if (tz->trips.hot.flags.valid) { if (!trip) { - *temp = KELVIN_TO_MILLICELSIUS( + *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET( tz->trips.hot.temperature, tz->kelvin_offset); return 0; @@ -667,7 +667,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, if (tz->trips.passive.flags.valid) { if (!trip) { - *temp = KELVIN_TO_MILLICELSIUS( + *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET( tz->trips.passive.temperature, tz->kelvin_offset); return 0; @@ -678,7 +678,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].flags.valid; i++) { if (!trip) { - *temp = KELVIN_TO_MILLICELSIUS( + *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET( tz->trips.active[i].temperature, tz->kelvin_offset); return 0; @@ -694,7 +694,7 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal, struct acpi_thermal *tz = thermal->devdata; if (tz->trips.critical.flags.valid) { - *temperature = KELVIN_TO_MILLICELSIUS( + *temperature = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET( tz->trips.critical.temperature, tz->kelvin_offset); return 0; @@ -714,8 +714,8 @@ static int thermal_get_trend(struct thermal_zone_device *thermal, if (type == THERMAL_TRIP_ACTIVE) { unsigned long trip_temp; - unsigned long temp = KELVIN_TO_MILLICELSIUS(tz->temperature, - tz->kelvin_offset); + unsigned long temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET( + tz->temperature, tz->kelvin_offset); if (thermal_get_trip_temp(thermal, trip, &trip_temp)) return -EINVAL; diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 834f35c4bf8..371ac12d25b 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -149,6 +149,21 @@ acpi_extract_package(union acpi_object *package, break; } break; + case ACPI_TYPE_LOCAL_REFERENCE: + switch (format_string[i]) { + case 'R': + size_required += sizeof(void *); + tail_offset += sizeof(void *); + break; + default: + printk(KERN_WARNING PREFIX "Invalid package element" + " [%d] got reference," + " expecting [%c]\n", + i, format_string[i]); + return AE_BAD_DATA; + break; + } + break; case ACPI_TYPE_PACKAGE: default: @@ -247,7 +262,18 @@ acpi_extract_package(union acpi_object *package, break; } break; - + case ACPI_TYPE_LOCAL_REFERENCE: + switch (format_string[i]) { + case 'R': + *(void **)head = + (void *)element->reference.handle; + head += sizeof(void *); + break; + default: + /* Should never get here */ + break; + } + break; case ACPI_TYPE_PACKAGE: /* TBD: handle nested packages... */ default: diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 5f039f19106..e45f8378980 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -60,6 +60,7 @@ enum board_ids { /* board IDs by feature in alphabetical order */ board_ahci, board_ahci_ign_iferr, + board_ahci_nomsi, board_ahci_noncq, board_ahci_nosntf, board_ahci_yes_fbs, @@ -121,6 +122,13 @@ static const struct ata_port_info ahci_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, + [board_ahci_nomsi] = { + AHCI_HFLAGS (AHCI_HFLAG_NO_MSI), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, [board_ahci_noncq] = { AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ), .flags = AHCI_FLAG_COMMON, @@ -313,6 +321,11 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ + { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */ + { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ + { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ + { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, @@ -475,10 +488,10 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ /* - * Samsung SSDs found on some macbooks. NCQ times out. - * https://bugzilla.kernel.org/show_bug.cgi?id=60731 + * Samsung SSDs found on some macbooks. NCQ times out if MSI is + * enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731 */ - { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq }, + { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi }, /* Enmotus */ { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, @@ -514,12 +527,9 @@ MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)"); static void ahci_pci_save_initial_config(struct pci_dev *pdev, struct ahci_host_priv *hpriv) { - unsigned int force_port_map = 0; - unsigned int mask_port_map = 0; - if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) { dev_info(&pdev->dev, "JMB361 has only one port\n"); - force_port_map = 1; + hpriv->force_port_map = 1; } /* @@ -529,9 +539,9 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev, */ if (hpriv->flags & AHCI_HFLAG_MV_PATA) { if (pdev->device == 0x6121) - mask_port_map = 0x3; + hpriv->mask_port_map = 0x3; else - mask_port_map = 0xf; + hpriv->mask_port_map = 0xf; dev_info(&pdev->dev, "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n"); } diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 5eb61c9e63d..97683e45ab0 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1778,16 +1778,15 @@ static void ahci_handle_port_interrupt(struct ata_port *ap, } } -static void ahci_update_intr_status(struct ata_port *ap) +static void ahci_port_intr(struct ata_port *ap) { void __iomem *port_mmio = ahci_port_base(ap); - struct ahci_port_priv *pp = ap->private_data; u32 status; status = readl(port_mmio + PORT_IRQ_STAT); writel(status, port_mmio + PORT_IRQ_STAT); - atomic_or(status, &pp->intr_status); + ahci_handle_port_interrupt(ap, port_mmio, status); } static irqreturn_t ahci_port_thread_fn(int irq, void *dev_instance) @@ -1808,34 +1807,6 @@ static irqreturn_t ahci_port_thread_fn(int irq, void *dev_instance) return IRQ_HANDLED; } -irqreturn_t ahci_thread_fn(int irq, void *dev_instance) -{ - struct ata_host *host = dev_instance; - struct ahci_host_priv *hpriv = host->private_data; - u32 irq_masked = hpriv->port_map; - unsigned int i; - - for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap; - - if (!(irq_masked & (1 << i))) - continue; - - ap = host->ports[i]; - if (ap) { - ahci_port_thread_fn(irq, ap); - VPRINTK("port %u\n", i); - } else { - VPRINTK("port %u (no irq)\n", i); - if (ata_ratelimit()) - dev_warn(host->dev, - "interrupt on disabled port %u\n", i); - } - } - - return IRQ_HANDLED; -} - static irqreturn_t ahci_multi_irqs_intr(int irq, void *dev_instance) { struct ata_port *ap = dev_instance; @@ -1875,6 +1846,8 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance) irq_masked = irq_stat & hpriv->port_map; + spin_lock(&host->lock); + for (i = 0; i < host->n_ports; i++) { struct ata_port *ap; @@ -1883,7 +1856,7 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance) ap = host->ports[i]; if (ap) { - ahci_update_intr_status(ap); + ahci_port_intr(ap); VPRINTK("port %u\n", i); } else { VPRINTK("port %u (no irq)\n", i); @@ -1906,9 +1879,11 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance) */ writel(irq_stat, mmio + HOST_IRQ_STAT); + spin_unlock(&host->lock); + VPRINTK("EXIT\n"); - return handled ? IRQ_WAKE_THREAD : IRQ_NONE; + return IRQ_RETVAL(handled); } unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) @@ -2320,8 +2295,13 @@ static int ahci_port_start(struct ata_port *ap) */ pp->intr_mask = DEF_PORT_IRQ; - spin_lock_init(&pp->lock); - ap->lock = &pp->lock; + /* + * Switch to per-port locking in case each port has its own MSI vector. + */ + if ((hpriv->flags & AHCI_HFLAG_MULTI_MSI)) { + spin_lock_init(&pp->lock); + ap->lock = &pp->lock; + } ap->private_data = pp; @@ -2482,31 +2462,6 @@ out_free_irqs: return rc; } -static int ahci_host_activate_single_irq(struct ata_host *host, int irq, - struct scsi_host_template *sht) -{ - int i, rc; - - rc = ata_host_start(host); - if (rc) - return rc; - - rc = devm_request_threaded_irq(host->dev, irq, ahci_single_irq_intr, - ahci_thread_fn, IRQF_SHARED, - dev_driver_string(host->dev), host); - if (rc) - return rc; - - for (i = 0; i < host->n_ports; i++) - ata_port_desc(host->ports[i], "irq %d", irq); - - rc = ata_host_register(host, sht); - if (rc) - devm_free_irq(host->dev, irq, host); - - return rc; -} - /** * ahci_host_activate - start AHCI host, request IRQs and register it * @host: target ATA host @@ -2532,7 +2487,8 @@ int ahci_host_activate(struct ata_host *host, int irq, if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) rc = ahci_host_activate_multi_irqs(host, irq, sht); else - rc = ahci_host_activate_single_irq(host, irq, sht); + rc = ata_host_activate(host, irq, ahci_single_irq_intr, + IRQF_SHARED, sht); return rc; } EXPORT_SYMBOL_GPL(ahci_host_activate); diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 61eb6d77dac..ea1fbc1d4c5 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -146,6 +146,7 @@ enum sata_rcar_type { RCAR_GEN1_SATA, RCAR_GEN2_SATA, + RCAR_R8A7790_ES1_SATA, }; struct sata_rcar_priv { @@ -763,6 +764,9 @@ static void sata_rcar_setup_port(struct ata_host *host) ap->udma_mask = ATA_UDMA6; ap->flags |= ATA_FLAG_SATA; + if (priv->type == RCAR_R8A7790_ES1_SATA) + ap->flags |= ATA_FLAG_NO_DIPM; + ioaddr->cmd_addr = base + SDATA_REG; ioaddr->ctl_addr = base + SSDEVCON_REG; ioaddr->scr_addr = base + SCRSSTS_REG; @@ -792,6 +796,7 @@ static void sata_rcar_init_controller(struct ata_host *host) sata_rcar_gen1_phy_init(priv); break; case RCAR_GEN2_SATA: + case RCAR_R8A7790_ES1_SATA: sata_rcar_gen2_phy_init(priv); break; default: @@ -838,9 +843,17 @@ static struct of_device_id sata_rcar_match[] = { .data = (void *)RCAR_GEN2_SATA }, { + .compatible = "renesas,sata-r8a7790-es1", + .data = (void *)RCAR_R8A7790_ES1_SATA + }, + { .compatible = "renesas,sata-r8a7791", .data = (void *)RCAR_GEN2_SATA }, + { + .compatible = "renesas,sata-r8a7793", + .data = (void *)RCAR_GEN2_SATA + }, { }, }; MODULE_DEVICE_TABLE(of, sata_rcar_match); @@ -849,7 +862,9 @@ static const struct platform_device_id sata_rcar_id_table[] = { { "sata_rcar", RCAR_GEN1_SATA }, /* Deprecated by "sata-r8a7779" */ { "sata-r8a7779", RCAR_GEN1_SATA }, { "sata-r8a7790", RCAR_GEN2_SATA }, + { "sata-r8a7790-es1", RCAR_R8A7790_ES1_SATA }, { "sata-r8a7791", RCAR_GEN2_SATA }, + { "sata-r8a7793", RCAR_GEN2_SATA }, { }, }; MODULE_DEVICE_TABLE(platform, sata_rcar_id_table); diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 7652e8dc188..21b0bc6a9c9 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -1225,11 +1225,13 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id) card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE); if (!card->config_regs) { dev_warn(&dev->dev, "Failed to ioremap config registers\n"); + err = -ENOMEM; goto out_release_regions; } card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE); if (!card->buffers) { dev_warn(&dev->dev, "Failed to ioremap data buffers\n"); + err = -ENOMEM; goto out_unmap_config; } diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 61a33f4ba60..df04227d00c 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -171,20 +171,23 @@ config WANT_DEV_COREDUMP Drivers should "select" this option if they desire to use the device coredump mechanism. -config DISABLE_DEV_COREDUMP - bool "Disable device coredump" if EXPERT +config ALLOW_DEV_COREDUMP + bool "Allow device coredump" if EXPERT + default y help - Disable the device coredump mechanism despite drivers wanting to - use it; this allows for more sensitive systems or systems that - don't want to ever access the information to not have the code, - nor keep any data. + This option controls if the device coredump mechanism is available or + not; if disabled, the mechanism will be omitted even if drivers that + can use it are enabled. + Say 'N' for more sensitive systems or systems that don't want + to ever access the information to not have the code, nor keep any + data. - If unsure, say N. + If unsure, say Y. config DEV_COREDUMP bool default y if WANT_DEV_COREDUMP - depends on !DISABLE_DEV_COREDUMP + depends on ALLOW_DEV_COREDUMP config DEBUG_DRIVER bool "Driver Core verbose debug messages" diff --git a/drivers/base/core.c b/drivers/base/core.c index 14d162952c3..842d04707de 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -724,12 +724,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) return &dir->kobj; } +static DEFINE_MUTEX(gdp_mutex); static struct kobject *get_device_parent(struct device *dev, struct device *parent) { if (dev->class) { - static DEFINE_MUTEX(gdp_mutex); struct kobject *kobj = NULL; struct kobject *parent_kobj; struct kobject *k; @@ -793,7 +793,9 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) glue_dir->kset != &dev->class->p->glue_dirs) return; + mutex_lock(&gdp_mutex); kobject_put(glue_dir); + mutex_unlock(&gdp_mutex); } static void cleanup_device_parent(struct device *dev) diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 473ff489240..950fff9ce45 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -223,9 +223,10 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, #undef pr_fmt #define pr_fmt(fmt) fmt -static void rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev) +static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev) { dev_set_cma_area(dev, rmem->priv); + return 0; } static void rmem_cma_device_release(struct reserved_mem *rmem, diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 40bc2f4072c..fb83d4acd40 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -361,9 +361,19 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd, struct device *dev = pdd->dev; int ret = 0; - if (gpd_data->need_restore) + if (gpd_data->need_restore > 0) return 0; + /* + * If the value of the need_restore flag is still unknown at this point, + * we trust that pm_genpd_poweroff() has verified that the device is + * already runtime PM suspended. + */ + if (gpd_data->need_restore < 0) { + gpd_data->need_restore = 1; + return 0; + } + mutex_unlock(&genpd->lock); genpd_start_dev(genpd, dev); @@ -373,7 +383,7 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd, mutex_lock(&genpd->lock); if (!ret) - gpd_data->need_restore = true; + gpd_data->need_restore = 1; return ret; } @@ -389,12 +399,17 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd, { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); struct device *dev = pdd->dev; - bool need_restore = gpd_data->need_restore; + int need_restore = gpd_data->need_restore; - gpd_data->need_restore = false; + gpd_data->need_restore = 0; mutex_unlock(&genpd->lock); genpd_start_dev(genpd, dev); + + /* + * Call genpd_restore_dev() for recently added devices too (need_restore + * is negative then). + */ if (need_restore) genpd_restore_dev(genpd, dev); @@ -603,6 +618,7 @@ static void genpd_power_off_work_fn(struct work_struct *work) static int pm_genpd_runtime_suspend(struct device *dev) { struct generic_pm_domain *genpd; + struct generic_pm_domain_data *gpd_data; bool (*stop_ok)(struct device *__dev); int ret; @@ -628,6 +644,16 @@ static int pm_genpd_runtime_suspend(struct device *dev) return 0; mutex_lock(&genpd->lock); + + /* + * If we have an unknown state of the need_restore flag, it means none + * of the runtime PM callbacks has been invoked yet. Let's update the + * flag to reflect that the current state is active. + */ + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); + if (gpd_data->need_restore < 0) + gpd_data->need_restore = 0; + genpd->in_progress++; pm_genpd_poweroff(genpd); genpd->in_progress--; @@ -1437,12 +1463,12 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, spin_unlock_irq(&dev->power.lock); if (genpd->attach_dev) - genpd->attach_dev(dev); + genpd->attach_dev(genpd, dev); mutex_lock(&gpd_data->lock); gpd_data->base.dev = dev; list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); - gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF; + gpd_data->need_restore = -1; gpd_data->td.constraint_changed = true; gpd_data->td.effective_constraint_ns = -1; mutex_unlock(&gpd_data->lock); @@ -1499,7 +1525,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, genpd->max_off_time_changed = true; if (genpd->detach_dev) - genpd->detach_dev(dev); + genpd->detach_dev(genpd, dev); spin_lock_irq(&dev->power.lock); @@ -1546,7 +1572,7 @@ void pm_genpd_dev_need_restore(struct device *dev, bool val) psd = dev_to_psd(dev); if (psd && psd->domain_data) - to_gpd_data(psd->domain_data)->need_restore = val; + to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0; spin_unlock_irqrestore(&dev->power.lock, flags); } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 44973196d3f..9717d5f2013 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1266,6 +1266,8 @@ int dpm_suspend_late(pm_message_t state) } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); + if (!error) + error = async_error; if (error) { suspend_stats.failed_suspend_late++; dpm_save_failed_step(SUSPEND_SUSPEND_LATE); diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index 1e5ac0a7969..cd9161a8b3a 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c @@ -275,7 +275,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend, static const struct pci_device_id bcma_pci_bridge_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) }, - { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) }, /* 0xa8d8 */ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, @@ -285,7 +285,8 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, - { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xA8DB */ + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xa8db, BCM43217 (sic!) */ + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) }, /* 0xa8dc */ { 0, }, }; MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl); diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index d1656c2f70a..1000955ce09 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -132,7 +132,7 @@ static bool bcma_is_core_needed_early(u16 core_id) return false; } -#ifdef CONFIG_OF +#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS) static struct device_node *bcma_of_find_child_device(struct platform_device *parent, struct bcma_device *core) { diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 2671a3f02f0..8001e812018 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -450,14 +450,10 @@ static int init_driver_queues(struct nullb *nullb) ret = setup_commands(nq); if (ret) - goto err_queue; + return ret; nullb->nr_queues++; } - return 0; -err_queue: - cleanup_queues(nullb); - return ret; } static int null_add_dev(void) @@ -507,7 +503,9 @@ static int null_add_dev(void) goto out_cleanup_queues; } blk_queue_make_request(nullb->q, null_queue_bio); - init_driver_queues(nullb); + rv = init_driver_queues(nullb); + if (rv) + goto out_cleanup_blk_queue; } else { nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); if (!nullb->q) { @@ -516,7 +514,9 @@ static int null_add_dev(void) } blk_queue_prep_rq(nullb->q, null_rq_prep_fn); blk_queue_softirq_done(nullb->q, null_softirq_done_fn); - init_driver_queues(nullb); + rv = init_driver_queues(nullb); + if (rv) + goto out_cleanup_blk_queue; } nullb->q->queuedata = nullb; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 0a54c588e43..27b71a0b72d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -342,7 +342,6 @@ struct rbd_device { struct list_head rq_queue; /* incoming rq queue */ spinlock_t lock; /* queue, flags, open_count */ - struct workqueue_struct *rq_wq; struct work_struct rq_work; struct rbd_image_header header; @@ -402,6 +401,8 @@ static struct kmem_cache *rbd_segment_name_cache; static int rbd_major; static DEFINE_IDA(rbd_dev_id_ida); +static struct workqueue_struct *rbd_wq; + /* * Default to false for now, as single-major requires >= 0.75 version of * userspace rbd utility. @@ -3452,7 +3453,7 @@ static void rbd_request_fn(struct request_queue *q) } if (queued) - queue_work(rbd_dev->rq_wq, &rbd_dev->rq_work); + queue_work(rbd_wq, &rbd_dev->rq_work); } /* @@ -3532,7 +3533,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, page_count = (u32) calc_pages_for(offset, length); pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); if (IS_ERR(pages)) - ret = PTR_ERR(pages); + return PTR_ERR(pages); ret = -ENOMEM; obj_request = rbd_obj_request_create(object_name, offset, length, @@ -5242,16 +5243,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only); - rbd_dev->rq_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, - rbd_dev->disk->disk_name); - if (!rbd_dev->rq_wq) { - ret = -ENOMEM; - goto err_out_mapping; - } - ret = rbd_bus_add_dev(rbd_dev); if (ret) - goto err_out_workqueue; + goto err_out_mapping; /* Everything's ready. Announce the disk to the world. */ @@ -5263,9 +5257,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) return ret; -err_out_workqueue: - destroy_workqueue(rbd_dev->rq_wq); - rbd_dev->rq_wq = NULL; err_out_mapping: rbd_dev_mapping_clear(rbd_dev); err_out_disk: @@ -5512,7 +5503,6 @@ static void rbd_dev_device_release(struct device *dev) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); - destroy_workqueue(rbd_dev->rq_wq); rbd_free_disk(rbd_dev); clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); rbd_dev_mapping_clear(rbd_dev); @@ -5716,11 +5706,21 @@ static int __init rbd_init(void) if (rc) return rc; + /* + * The number of active work items is limited by the number of + * rbd devices, so leave @max_active at default. + */ + rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0); + if (!rbd_wq) { + rc = -ENOMEM; + goto err_out_slab; + } + if (single_major) { rbd_major = register_blkdev(0, RBD_DRV_NAME); if (rbd_major < 0) { rc = rbd_major; - goto err_out_slab; + goto err_out_wq; } } @@ -5738,6 +5738,8 @@ static int __init rbd_init(void) err_out_blkdev: if (single_major) unregister_blkdev(rbd_major, RBD_DRV_NAME); +err_out_wq: + destroy_workqueue(rbd_wq); err_out_slab: rbd_slab_exit(); return rc; @@ -5749,6 +5751,7 @@ static void __exit rbd_exit(void) rbd_sysfs_cleanup(); if (single_major) unregister_blkdev(rbd_major, RBD_DRV_NAME); + destroy_workqueue(rbd_wq); rbd_slab_exit(); } diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 756b8ec00f1..0ebadf93b6c 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -69,8 +69,6 @@ struct vdc_port { u8 vdisk_mtype; char disk_name[32]; - - struct vio_disk_vtoc label; }; static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) @@ -710,13 +708,6 @@ static int probe_disk(struct vdc_port *port) if (comp.err) return comp.err; - err = generic_request(port, VD_OP_GET_VTOC, - &port->label, sizeof(port->label)); - if (err < 0) { - printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err); - return err; - } - if (vdc_version_supported(port, 1, 1)) { /* vdisk_size should be set during the handshake, if it wasn't * then the underlying disk is reserved by another system diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 0e63e8aa827..3920ee45aa5 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -99,11 +99,12 @@ static ssize_t mem_used_total_show(struct device *dev, { u64 val = 0; struct zram *zram = dev_to_zram(dev); - struct zram_meta *meta = zram->meta; down_read(&zram->init_lock); - if (init_done(zram)) + if (init_done(zram)) { + struct zram_meta *meta = zram->meta; val = zs_get_total_pages(meta->mem_pool); + } up_read(&zram->init_lock); return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); @@ -173,16 +174,17 @@ static ssize_t mem_used_max_store(struct device *dev, int err; unsigned long val; struct zram *zram = dev_to_zram(dev); - struct zram_meta *meta = zram->meta; err = kstrtoul(buf, 10, &val); if (err || val != 0) return -EINVAL; down_read(&zram->init_lock); - if (init_done(zram)) + if (init_done(zram)) { + struct zram_meta *meta = zram->meta; atomic_long_set(&zram->stats.max_used_pages, zs_get_total_pages(meta->mem_pool)); + } up_read(&zram->init_lock); return len; @@ -558,7 +560,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, } if (page_zero_filled(uncmem)) { - kunmap_atomic(user_mem); + if (user_mem) + kunmap_atomic(user_mem); /* Free memory associated with this sector now. */ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); zram_free_page(zram, index); diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c index 6226aa08c36..bcf86f91800 100644 --- a/drivers/char/hw_random/pseries-rng.c +++ b/drivers/char/hw_random/pseries-rng.c @@ -25,18 +25,21 @@ #include <asm/vio.h> -static int pseries_rng_data_read(struct hwrng *rng, u32 *data) +static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { + u64 buffer[PLPAR_HCALL_BUFSIZE]; + size_t size = max < 8 ? max : 8; int rc; - rc = plpar_hcall(H_RANDOM, (unsigned long *)data); + rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer); if (rc != H_SUCCESS) { pr_err_ratelimited("H_RANDOM call failed %d\n", rc); return -EIO; } + memcpy(data, buffer, size); /* The hypervisor interface returns 64 bits */ - return 8; + return size; } /** @@ -55,7 +58,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev) static struct hwrng pseries_rng = { .name = KBUILD_MODNAME, - .data_read = pseries_rng_data_read, + .read = pseries_rng_read, }; static int __init pseries_rng_probe(struct vio_dev *dev, diff --git a/drivers/char/random.c b/drivers/char/random.c index 82759cef904..04645c09fe5 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1106,7 +1106,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out) __mix_pool_bytes(r, hash.w, sizeof(hash.w)); spin_unlock_irqrestore(&r->lock, flags); - memset(workspace, 0, sizeof(workspace)); + memzero_explicit(workspace, sizeof(workspace)); /* * In case the hash function has some recognizable output @@ -1118,7 +1118,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out) hash.w[2] ^= rol32(hash.w[2], 16); memcpy(out, &hash, EXTRACT_SIZE); - memset(&hash, 0, sizeof(hash)); + memzero_explicit(&hash, sizeof(hash)); } /* @@ -1175,7 +1175,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, } /* Wipe data just returned from memory */ - memset(tmp, 0, sizeof(tmp)); + memzero_explicit(tmp, sizeof(tmp)); return ret; } @@ -1218,7 +1218,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, } /* Wipe data just returned from memory */ - memset(tmp, 0, sizeof(tmp)); + memzero_explicit(tmp, sizeof(tmp)); return ret; } diff --git a/drivers/char/raw.c b/drivers/char/raw.c index 0102dc78860..a24891b9754 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c @@ -285,7 +285,7 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd, static const struct file_operations raw_fops = { .read = new_sync_read, - .read_iter = generic_file_read_iter, + .read_iter = blkdev_read_iter, .write = new_sync_write, .write_iter = blkdev_write_iter, .fsync = blkdev_fsync, diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index bfa640023e6..cf7a561fad7 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1449,8 +1449,6 @@ static int add_port(struct ports_device *portdev, u32 id) spin_lock_init(&port->outvq_lock); init_waitqueue_head(&port->waitqueue); - virtio_device_ready(portdev->vdev); - /* Fill the in_vq with buffers so the host can send us data. */ nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); if (!nr_added_bufs) { @@ -2026,6 +2024,8 @@ static int virtcons_probe(struct virtio_device *vdev) spin_lock_init(&portdev->ports_lock); INIT_LIST_HEAD(&portdev->ports); + virtio_device_ready(portdev->vdev); + if (multiport) { unsigned int nr_added_bufs; diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c index 24b5b020753..a23ac0c724f 100644 --- a/drivers/clk/at91/clk-usb.c +++ b/drivers/clk/at91/clk-usb.c @@ -52,29 +52,26 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw, tmp = pmc_read(pmc, AT91_PMC_USB); usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT; - return parent_rate / (usbdiv + 1); + + return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1)); } static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { unsigned long div; - unsigned long bestrate; - unsigned long tmp; + + if (!rate) + return -EINVAL; if (rate >= *parent_rate) return *parent_rate; - div = *parent_rate / rate; - if (div >= SAM9X5_USB_MAX_DIV) - return *parent_rate / (SAM9X5_USB_MAX_DIV + 1); - - bestrate = *parent_rate / div; - tmp = *parent_rate / (div + 1); - if (bestrate - rate > rate - tmp) - bestrate = tmp; + div = DIV_ROUND_CLOSEST(*parent_rate, rate); + if (div > SAM9X5_USB_MAX_DIV + 1) + div = SAM9X5_USB_MAX_DIV + 1; - return bestrate; + return DIV_ROUND_CLOSEST(*parent_rate, div); } static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index) @@ -106,9 +103,13 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate, u32 tmp; struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); struct at91_pmc *pmc = usb->pmc; - unsigned long div = parent_rate / rate; + unsigned long div; + + if (!rate) + return -EINVAL; - if (parent_rate % rate || div < 1 || div >= SAM9X5_USB_MAX_DIV) + div = DIV_ROUND_CLOSEST(parent_rate, rate); + if (div > SAM9X5_USB_MAX_DIV + 1 || !div) return -EINVAL; tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV; @@ -253,7 +254,7 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate, tmp_parent_rate = rate * usb->divisors[i]; tmp_parent_rate = __clk_round_rate(parent, tmp_parent_rate); - tmprate = tmp_parent_rate / usb->divisors[i]; + tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]); if (tmprate < rate) tmpdiff = rate - tmprate; else @@ -281,10 +282,10 @@ static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate, struct at91_pmc *pmc = usb->pmc; unsigned long div; - if (!rate || parent_rate % rate) + if (!rate) return -EINVAL; - div = parent_rate / rate; + div = DIV_ROUND_CLOSEST(parent_rate, rate); for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) { if (usb->divisors[i] == div) { diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index 18a9de29df0..c0a842b335c 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -263,6 +263,14 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, if (!rate) rate = 1; + /* if read only, just return current value */ + if (divider->flags & CLK_DIVIDER_READ_ONLY) { + bestdiv = readl(divider->reg) >> divider->shift; + bestdiv &= div_mask(divider); + bestdiv = _get_div(divider, bestdiv); + return bestdiv; + } + maxdiv = _get_maxdiv(divider); if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { @@ -361,11 +369,6 @@ const struct clk_ops clk_divider_ops = { }; EXPORT_SYMBOL_GPL(clk_divider_ops); -const struct clk_ops clk_divider_ro_ops = { - .recalc_rate = clk_divider_recalc_rate, -}; -EXPORT_SYMBOL_GPL(clk_divider_ro_ops); - static struct clk *_register_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, @@ -391,10 +394,7 @@ static struct clk *_register_divider(struct device *dev, const char *name, } init.name = name; - if (clk_divider_flags & CLK_DIVIDER_READ_ONLY) - init.ops = &clk_divider_ro_ops; - else - init.ops = &clk_divider_ops; + init.ops = &clk_divider_ops; init.flags = flags | CLK_IS_BASIC; init.parent_names = (parent_name ? &parent_name: NULL); init.num_parents = (parent_name ? 1 : 0); diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c index b345cc791e5..88b9fe13fa4 100644 --- a/drivers/clk/pxa/clk-pxa27x.c +++ b/drivers/clk/pxa/clk-pxa27x.c @@ -322,7 +322,7 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw, unsigned long ccsr = CCSR; osc_forced = ccsr & (1 << CCCR_CPDIS_BIT); - a = cccr & CCCR_A_BIT; + a = cccr & (1 << CCCR_A_BIT); l = ccsr & CCSR_L_MASK; if (osc_forced || a) @@ -341,7 +341,7 @@ static u8 clk_pxa27x_memory_get_parent(struct clk_hw *hw) unsigned long ccsr = CCSR; osc_forced = ccsr & (1 << CCCR_CPDIS_BIT); - a = cccr & CCCR_A_BIT; + a = cccr & (1 << CCCR_A_BIT); if (osc_forced) return PXA_MEM_13Mhz; if (a) diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c index dab988ab8cf..157139a5c1c 100644 --- a/drivers/clk/qcom/mmcc-apq8084.c +++ b/drivers/clk/qcom/mmcc-apq8084.c @@ -3122,7 +3122,7 @@ static struct clk_regmap *mmcc_apq8084_clocks[] = { [ESC1_CLK_SRC] = &esc1_clk_src.clkr, [HDMI_CLK_SRC] = &hdmi_clk_src.clkr, [VSYNC_CLK_SRC] = &vsync_clk_src.clkr, - [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr, + [MMSS_RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr, [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr, [MAPLE_CLK_SRC] = &maple_clk_src.clkr, [VDP_CLK_SRC] = &vdp_clk_src.clkr, diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c index 1e68bff481b..880a266f014 100644 --- a/drivers/clk/rockchip/clk.c +++ b/drivers/clk/rockchip/clk.c @@ -90,9 +90,7 @@ static struct clk *rockchip_clk_register_branch(const char *name, div->width = div_width; div->lock = lock; div->table = div_table; - div_ops = (div_flags & CLK_DIVIDER_READ_ONLY) - ? &clk_divider_ro_ops - : &clk_divider_ops; + div_ops = &clk_divider_ops; } clk = clk_register_composite(NULL, name, parent_names, num_parents, diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 2133f9d59d0..43005d4d334 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -660,11 +660,11 @@ static bool __init arch_timer_probed(int type, const struct of_device_id *matches) { struct device_node *dn; - bool probed = false; + bool probed = true; dn = of_find_matching_node(NULL, matches); - if (dn && of_device_is_available(dn) && (arch_timers_present & type)) - probed = true; + if (dn && of_device_is_available(dn) && !(arch_timers_present & type)) + probed = false; of_node_put(dn); return probed; diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index efb17c3ee12..f4a9c0058b4 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c @@ -182,6 +182,12 @@ static void __init sun4i_timer_init(struct device_node *node) /* Make sure timer is stopped before playing with interrupts */ sun4i_clkevt_time_stop(0); + sun4i_clockevent.cpumask = cpu_possible_mask; + sun4i_clockevent.irq = irq; + + clockevents_config_and_register(&sun4i_clockevent, rate, + TIMER_SYNC_TICKS, 0xffffffff); + ret = setup_irq(irq, &sun4i_timer_irq); if (ret) pr_warn("failed to setup irq %d\n", irq); @@ -189,12 +195,6 @@ static void __init sun4i_timer_init(struct device_node *node) /* Enable timer0 interrupt */ val = readl(timer_base + TIMER_IRQ_EN_REG); writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); - - sun4i_clockevent.cpumask = cpu_possible_mask; - sun4i_clockevent.irq = irq; - - clockevents_config_and_register(&sun4i_clockevent, rate, - TIMER_SYNC_TICKS, 0xffffffff); } CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer", sun4i_timer_init); diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 6bbb8b91344..f657c571b18 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -18,6 +18,7 @@ #include <linux/cpu.h> #include <linux/cpu_cooling.h> #include <linux/cpufreq.h> +#include <linux/cpufreq-dt.h> #include <linux/cpumask.h> #include <linux/err.h> #include <linux/module.h> @@ -146,8 +147,8 @@ try_again: goto try_again; } - dev_warn(cpu_dev, "failed to get cpu%d regulator: %ld\n", - cpu, PTR_ERR(cpu_reg)); + dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n", + cpu, PTR_ERR(cpu_reg)); } cpu_clk = clk_get(cpu_dev, NULL); @@ -165,8 +166,8 @@ try_again: if (ret == -EPROBE_DEFER) dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu); else - dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", ret, - cpu); + dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu, + ret); } else { *cdev = cpu_dev; *creg = cpu_reg; @@ -178,6 +179,7 @@ try_again: static int cpufreq_init(struct cpufreq_policy *policy) { + struct cpufreq_dt_platform_data *pd; struct cpufreq_frequency_table *freq_table; struct thermal_cooling_device *cdev; struct device_node *np; @@ -185,6 +187,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) struct device *cpu_dev; struct regulator *cpu_reg; struct clk *cpu_clk; + unsigned long min_uV = ~0, max_uV = 0; unsigned int transition_latency; int ret; @@ -204,16 +207,10 @@ static int cpufreq_init(struct cpufreq_policy *policy) /* OPPs might be populated at runtime, don't check for error here */ of_init_opp_table(cpu_dev); - ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); - if (ret) { - dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); - goto out_put_node; - } - priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; - goto out_free_table; + goto out_put_node; } of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); @@ -222,30 +219,51 @@ static int cpufreq_init(struct cpufreq_policy *policy) transition_latency = CPUFREQ_ETERNAL; if (!IS_ERR(cpu_reg)) { - struct dev_pm_opp *opp; - unsigned long min_uV, max_uV; - int i; + unsigned long opp_freq = 0; /* - * OPP is maintained in order of increasing frequency, and - * freq_table initialised from OPP is therefore sorted in the - * same order. + * Disable any OPPs where the connected regulator isn't able to + * provide the specified voltage and record minimum and maximum + * voltage levels. */ - for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) - ; - rcu_read_lock(); - opp = dev_pm_opp_find_freq_exact(cpu_dev, - freq_table[0].frequency * 1000, true); - min_uV = dev_pm_opp_get_voltage(opp); - opp = dev_pm_opp_find_freq_exact(cpu_dev, - freq_table[i-1].frequency * 1000, true); - max_uV = dev_pm_opp_get_voltage(opp); - rcu_read_unlock(); + while (1) { + struct dev_pm_opp *opp; + unsigned long opp_uV, tol_uV; + + rcu_read_lock(); + opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq); + if (IS_ERR(opp)) { + rcu_read_unlock(); + break; + } + opp_uV = dev_pm_opp_get_voltage(opp); + rcu_read_unlock(); + + tol_uV = opp_uV * priv->voltage_tolerance / 100; + if (regulator_is_supported_voltage(cpu_reg, opp_uV, + opp_uV + tol_uV)) { + if (opp_uV < min_uV) + min_uV = opp_uV; + if (opp_uV > max_uV) + max_uV = opp_uV; + } else { + dev_pm_opp_disable(cpu_dev, opp_freq); + } + + opp_freq++; + } + ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); if (ret > 0) transition_latency += ret * 1000; } + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); + if (ret) { + pr_err("failed to init cpufreq table: %d\n", ret); + goto out_free_priv; + } + /* * For now, just loading the cooling device; * thermal DT code takes care of matching them. @@ -265,9 +283,18 @@ static int cpufreq_init(struct cpufreq_policy *policy) policy->driver_data = priv; policy->clk = cpu_clk; - ret = cpufreq_generic_init(policy, freq_table, transition_latency); - if (ret) + ret = cpufreq_table_validate_and_show(policy, freq_table); + if (ret) { + dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, + ret); goto out_cooling_unregister; + } + + policy->cpuinfo.transition_latency = transition_latency; + + pd = cpufreq_get_driver_data(); + if (!pd || !pd->independent_clocks) + cpumask_setall(policy->cpus); of_node_put(np); @@ -275,9 +302,9 @@ static int cpufreq_init(struct cpufreq_policy *policy) out_cooling_unregister: cpufreq_cooling_unregister(priv->cdev); - kfree(priv); -out_free_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); +out_free_priv: + kfree(priv); out_put_node: of_node_put(np); out_put_reg_clk: @@ -335,6 +362,8 @@ static int dt_cpufreq_probe(struct platform_device *pdev) if (!IS_ERR(cpu_reg)) regulator_put(cpu_reg); + dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev); + ret = cpufreq_register_driver(&dt_cpufreq_driver); if (ret) dev_err(cpu_dev, "failed register driver: %d\n", ret); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 24bf76fba14..4473eba1d6b 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -512,7 +512,18 @@ show_one(cpuinfo_max_freq, cpuinfo.max_freq); show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); show_one(scaling_min_freq, min); show_one(scaling_max_freq, max); -show_one(scaling_cur_freq, cur); + +static ssize_t show_scaling_cur_freq( + struct cpufreq_policy *policy, char *buf) +{ + ssize_t ret; + + if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) + ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); + else + ret = sprintf(buf, "%u\n", policy->cur); + return ret; +} static int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_policy *new_policy); @@ -906,11 +917,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, if (ret) goto err_out_kobj_put; } - if (has_target()) { - ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); - if (ret) - goto err_out_kobj_put; - } + + ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); + if (ret) + goto err_out_kobj_put; + if (cpufreq_driver->bios_limit) { ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); if (ret) @@ -1011,7 +1022,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) read_unlock_irqrestore(&cpufreq_driver_lock, flags); - policy->governor = NULL; + if (policy) + policy->governor = NULL; return policy; } @@ -1731,6 +1743,21 @@ const char *cpufreq_get_current_driver(void) } EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); +/** + * cpufreq_get_driver_data - return current driver data + * + * Return the private data of the currently loaded cpufreq + * driver, or NULL if no cpufreq driver is loaded. + */ +void *cpufreq_get_driver_data(void) +{ + if (cpufreq_driver) + return cpufreq_driver->driver_data; + + return NULL; +} +EXPORT_SYMBOL_GPL(cpufreq_get_driver_data); + /********************************************************************* * NOTIFIER LISTS INTERFACE * *********************************************************************/ diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c index ec399ad2f05..1608f7105c9 100644 --- a/drivers/cpufreq/highbank-cpufreq.c +++ b/drivers/cpufreq/highbank-cpufreq.c @@ -19,7 +19,7 @@ #include <linux/cpu.h> #include <linux/err.h> #include <linux/of.h> -#include <linux/mailbox.h> +#include <linux/pl320-ipc.h> #include <linux/platform_device.h> #define HB_CPUFREQ_CHANGE_NOTE 0x80000001 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 0668b389c51..27bb6d3877e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -52,6 +52,17 @@ static inline int32_t div_fp(int32_t x, int32_t y) return div_s64((int64_t)x << FRAC_BITS, y); } +static inline int ceiling_fp(int32_t x) +{ + int mask, ret; + + ret = fp_toint(x); + mask = (1 << FRAC_BITS) - 1; + if (x & mask) + ret += 1; + return ret; +} + struct sample { int32_t core_pct_busy; u64 aperf; @@ -64,6 +75,7 @@ struct pstate_data { int current_pstate; int min_pstate; int max_pstate; + int scaling; int turbo_pstate; }; @@ -113,6 +125,7 @@ struct pstate_funcs { int (*get_max)(void); int (*get_min)(void); int (*get_turbo)(void); + int (*get_scaling)(void); void (*set)(struct cpudata*, int pstate); void (*get_vid)(struct cpudata *); }; @@ -138,6 +151,7 @@ struct perf_limits { static struct perf_limits limits = { .no_turbo = 0, + .turbo_disabled = 0, .max_perf_pct = 100, .max_perf = int_tofp(1), .min_perf_pct = 0, @@ -218,6 +232,18 @@ static inline void intel_pstate_reset_all_pid(void) } } +static inline void update_turbo_state(void) +{ + u64 misc_en; + struct cpudata *cpu; + + cpu = all_cpu_data[0]; + rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); + limits.turbo_disabled = + (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || + cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); +} + /************************** debugfs begin ************************/ static int pid_param_set(void *data, u64 val) { @@ -274,6 +300,20 @@ static void __init intel_pstate_debug_expose_params(void) return sprintf(buf, "%u\n", limits.object); \ } +static ssize_t show_no_turbo(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + ssize_t ret; + + update_turbo_state(); + if (limits.turbo_disabled) + ret = sprintf(buf, "%u\n", limits.turbo_disabled); + else + ret = sprintf(buf, "%u\n", limits.no_turbo); + + return ret; +} + static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, const char *buf, size_t count) { @@ -283,11 +323,14 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; - limits.no_turbo = clamp_t(int, input, 0 , 1); + + update_turbo_state(); if (limits.turbo_disabled) { pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); - limits.no_turbo = limits.turbo_disabled; + return -EPERM; } + limits.no_turbo = clamp_t(int, input, 0, 1); + return count; } @@ -323,7 +366,6 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, return count; } -show_one(no_turbo, no_turbo); show_one(max_perf_pct, max_perf_pct); show_one(min_perf_pct, min_perf_pct); @@ -394,7 +436,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) cpudata->vid.ratio); vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); - vid = fp_toint(vid_fp); + vid = ceiling_fp(vid_fp); if (pstate > cpudata->pstate.max_pstate) vid = cpudata->vid.turbo; @@ -404,6 +446,22 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) wrmsrl(MSR_IA32_PERF_CTL, val); } +#define BYT_BCLK_FREQS 5 +static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; + +static int byt_get_scaling(void) +{ + u64 value; + int i; + + rdmsrl(MSR_FSB_FREQ, value); + i = value & 0x3; + + BUG_ON(i > BYT_BCLK_FREQS); + + return byt_freq_table[i] * 100; +} + static void byt_get_vid(struct cpudata *cpudata) { u64 value; @@ -449,6 +507,11 @@ static int core_get_turbo_pstate(void) return ret; } +static inline int core_get_scaling(void) +{ + return 100000; +} + static void core_set_pstate(struct cpudata *cpudata, int pstate) { u64 val; @@ -473,6 +536,7 @@ static struct cpu_defaults core_params = { .get_max = core_get_max_pstate, .get_min = core_get_min_pstate, .get_turbo = core_get_turbo_pstate, + .get_scaling = core_get_scaling, .set = core_set_pstate, }, }; @@ -491,6 +555,7 @@ static struct cpu_defaults byt_params = { .get_min = byt_get_min_pstate, .get_turbo = byt_get_turbo_pstate, .set = byt_set_pstate, + .get_scaling = byt_get_scaling, .get_vid = byt_get_vid, }, }; @@ -501,7 +566,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) int max_perf_adj; int min_perf; - if (limits.no_turbo) + if (limits.no_turbo || limits.turbo_disabled) max_perf = cpu->pstate.max_pstate; max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); @@ -516,6 +581,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) { int max_perf, min_perf; + update_turbo_state(); + intel_pstate_get_min_max(cpu, &min_perf, &max_perf); pstate = clamp_t(int, pstate, min_perf, max_perf); @@ -523,7 +590,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) if (pstate == cpu->pstate.current_pstate) return; - trace_cpu_frequency(pstate * 100000, cpu->cpu); + trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); cpu->pstate.current_pstate = pstate; @@ -535,6 +602,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) cpu->pstate.min_pstate = pstate_funcs.get_min(); cpu->pstate.max_pstate = pstate_funcs.get_max(); cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); + cpu->pstate.scaling = pstate_funcs.get_scaling(); if (pstate_funcs.get_vid) pstate_funcs.get_vid(cpu); @@ -550,7 +618,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu) core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); sample->freq = fp_toint( - mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct)); + mul_fp(int_tofp( + cpu->pstate.max_pstate * cpu->pstate.scaling / 100), + core_pct)); sample->core_pct_busy = (int32_t)core_pct; } @@ -671,7 +741,9 @@ static int intel_pstate_init_cpu(unsigned int cpunum) { struct cpudata *cpu; - all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); + if (!all_cpu_data[cpunum]) + all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), + GFP_KERNEL); if (!all_cpu_data[cpunum]) return -ENOMEM; @@ -714,9 +786,10 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { limits.min_perf_pct = 100; limits.min_perf = int_tofp(1); + limits.max_policy_pct = 100; limits.max_perf_pct = 100; limits.max_perf = int_tofp(1); - limits.no_turbo = limits.turbo_disabled; + limits.no_turbo = 0; return 0; } limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; @@ -751,15 +824,12 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) del_timer_sync(&all_cpu_data[cpu_num]->timer); intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); - kfree(all_cpu_data[cpu_num]); - all_cpu_data[cpu_num] = NULL; } static int intel_pstate_cpu_init(struct cpufreq_policy *policy) { struct cpudata *cpu; int rc; - u64 misc_en; rc = intel_pstate_init_cpu(policy->cpu); if (rc) @@ -767,23 +837,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) cpu = all_cpu_data[policy->cpu]; - rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); - if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || - cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) { - limits.turbo_disabled = 1; - limits.no_turbo = 1; - } if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) policy->policy = CPUFREQ_POLICY_PERFORMANCE; else policy->policy = CPUFREQ_POLICY_POWERSAVE; - policy->min = cpu->pstate.min_pstate * 100000; - policy->max = cpu->pstate.turbo_pstate * 100000; + policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; + policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; /* cpuinfo and default policy values */ - policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000; - policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000; + policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; + policy->cpuinfo.max_freq = + cpu->pstate.turbo_pstate * cpu->pstate.scaling; policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; cpumask_set_cpu(policy->cpu, policy->cpus); @@ -841,6 +906,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs) pstate_funcs.get_max = funcs->get_max; pstate_funcs.get_min = funcs->get_min; pstate_funcs.get_turbo = funcs->get_turbo; + pstate_funcs.get_scaling = funcs->get_scaling; pstate_funcs.set = funcs->set; pstate_funcs.get_vid = funcs->get_vid; } diff --git a/drivers/cpuidle/Kconfig.mips b/drivers/cpuidle/Kconfig.mips index 0e70ee28a5c..4102be01d06 100644 --- a/drivers/cpuidle/Kconfig.mips +++ b/drivers/cpuidle/Kconfig.mips @@ -3,7 +3,7 @@ # config MIPS_CPS_CPUIDLE bool "CPU Idle driver for MIPS CPS platforms" - depends on CPU_IDLE + depends on CPU_IDLE && MIPS_CPS depends on SYS_SUPPORTS_MIPS_CPS select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT select GENERIC_CLOCKEVENTS_BROADCAST if SMP diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index a64be578dab..7d3a3497dd4 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -163,7 +163,8 @@ static int powernv_add_idle_states(void) int nr_idle_states = 1; /* Snooze */ int dt_idle_states; const __be32 *idle_state_flags; - u32 len_flags, flags; + const __be32 *idle_state_latency; + u32 len_flags, flags, latency_ns; int i; /* Currently we have snooze statically defined */ @@ -180,18 +181,32 @@ static int powernv_add_idle_states(void) return nr_idle_states; } + idle_state_latency = of_get_property(power_mgt, + "ibm,cpu-idle-state-latencies-ns", NULL); + if (!idle_state_latency) { + pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-latencies-ns\n"); + return nr_idle_states; + } + dt_idle_states = len_flags / sizeof(u32); for (i = 0; i < dt_idle_states; i++) { flags = be32_to_cpu(idle_state_flags[i]); + + /* Cpuidle accepts exit_latency in us and we estimate + * target residency to be 10x exit_latency + */ + latency_ns = be32_to_cpu(idle_state_latency[i]); if (flags & IDLE_USE_INST_NAP) { /* Add NAP state */ strcpy(powernv_states[nr_idle_states].name, "Nap"); strcpy(powernv_states[nr_idle_states].desc, "Nap"); powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIME_VALID; - powernv_states[nr_idle_states].exit_latency = 10; - powernv_states[nr_idle_states].target_residency = 100; + powernv_states[nr_idle_states].exit_latency = + ((unsigned int)latency_ns) / 1000; + powernv_states[nr_idle_states].target_residency = + ((unsigned int)latency_ns / 100); powernv_states[nr_idle_states].enter = &nap_loop; nr_idle_states++; } @@ -202,8 +217,10 @@ static int powernv_add_idle_states(void) strcpy(powernv_states[nr_idle_states].desc, "FastSleep"); powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TIMER_STOP; - powernv_states[nr_idle_states].exit_latency = 300; - powernv_states[nr_idle_states].target_residency = 1000000; + powernv_states[nr_idle_states].exit_latency = + ((unsigned int)latency_ns) / 1000; + powernv_states[nr_idle_states].target_residency = + ((unsigned int)latency_ns / 100); powernv_states[nr_idle_states].enter = &fastsleep_loop; nr_idle_states++; } diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index 871703c49d2..e1eaf4ff976 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c @@ -48,23 +48,29 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, u32 *desc; struct split_key_result result; dma_addr_t dma_addr_in, dma_addr_out; - int ret = 0; + int ret = -ENOMEM; desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); if (!desc) { dev_err(jrdev, "unable to allocate key input memory\n"); - return -ENOMEM; + return ret; } - init_job_desc(desc, 0); - dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, dma_addr_in)) { dev_err(jrdev, "unable to map key input memory\n"); - kfree(desc); - return -ENOMEM; + goto out_free; } + + dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(jrdev, dma_addr_out)) { + dev_err(jrdev, "unable to map key output memory\n"); + goto out_unmap_in; + } + + init_job_desc(desc, 0); append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); /* Sets MDHA up into an HMAC-INIT */ @@ -81,13 +87,6 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, * FIFO_STORE with the explicit split-key content store * (0x26 output type) */ - dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len, - DMA_FROM_DEVICE); - if (dma_mapping_error(jrdev, dma_addr_out)) { - dev_err(jrdev, "unable to map key output memory\n"); - kfree(desc); - return -ENOMEM; - } append_fifo_store(desc, dma_addr_out, split_key_len, LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); @@ -115,10 +114,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len, DMA_FROM_DEVICE); +out_unmap_in: dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE); - +out_free: kfree(desc); - return ret; } EXPORT_SYMBOL(gen_split_key); diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index 9282381b03c..fe7b3f06f6e 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -198,8 +198,7 @@ struct adf_accel_dev { struct dentry *debugfs_dir; struct list_head list; struct module *owner; - uint8_t accel_id; - uint8_t numa_node; struct adf_accel_pci accel_pci_dev; + uint8_t accel_id; } __packed; #endif diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index 5f3fa45348b..9dd2cb72a4e 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -419,9 +419,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev, WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0); ring = &bank->rings[i]; if (hw_data->tx_rings_mask & (1 << i)) { - ring->inflights = kzalloc_node(sizeof(atomic_t), - GFP_KERNEL, - accel_dev->numa_node); + ring->inflights = + kzalloc_node(sizeof(atomic_t), + GFP_KERNEL, + dev_to_node(&GET_DEV(accel_dev))); if (!ring->inflights) goto err; } else { @@ -469,13 +470,14 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev) int i, ret; etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL, - accel_dev->numa_node); + dev_to_node(&GET_DEV(accel_dev))); if (!etr_data) return -ENOMEM; num_banks = GET_MAX_BANKS(accel_dev); size = num_banks * sizeof(struct adf_etr_bank_data); - etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node); + etr_data->banks = kzalloc_node(size, GFP_KERNEL, + dev_to_node(&GET_DEV(accel_dev))); if (!etr_data->banks) { ret = -ENOMEM; goto err_bank; diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index f2e2f158cfb..9e9619cd4a7 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -596,7 +596,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, if (unlikely(!n)) return -EINVAL; - bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node); + bufl = kmalloc_node(sz, GFP_ATOMIC, + dev_to_node(&GET_DEV(inst->accel_dev))); if (unlikely(!bufl)) return -ENOMEM; @@ -605,6 +606,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, goto err; for_each_sg(assoc, sg, assoc_n, i) { + if (!sg->length) + continue; bufl->bufers[bufs].addr = dma_map_single(dev, sg_virt(sg), sg->length, @@ -640,7 +643,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, struct qat_alg_buf *bufers; buflout = kmalloc_node(sz, GFP_ATOMIC, - inst->accel_dev->numa_node); + dev_to_node(&GET_DEV(inst->accel_dev))); if (unlikely(!buflout)) goto err; bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE); diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index 0d59bcb50de..828f2a686aa 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -109,12 +109,14 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) list_for_each(itr, adf_devmgr_get_head()) { accel_dev = list_entry(itr, struct adf_accel_dev, list); - if (accel_dev->numa_node == node && adf_dev_started(accel_dev)) + if ((node == dev_to_node(&GET_DEV(accel_dev)) || + dev_to_node(&GET_DEV(accel_dev)) < 0) + && adf_dev_started(accel_dev)) break; accel_dev = NULL; } if (!accel_dev) { - pr_err("QAT: Could not find device on give node\n"); + pr_err("QAT: Could not find device on node %d\n", node); accel_dev = adf_devmgr_get_first(); } if (!accel_dev || !adf_dev_started(accel_dev)) @@ -164,7 +166,7 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) for (i = 0; i < num_inst; i++) { inst = kzalloc_node(sizeof(*inst), GFP_KERNEL, - accel_dev->numa_node); + dev_to_node(&GET_DEV(accel_dev))); if (!inst) goto err; diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c index 978d6c56639..53c491b59f0 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c @@ -108,7 +108,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev) uint64_t reg_val; admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, - accel_dev->numa_node); + dev_to_node(&GET_DEV(accel_dev))); if (!admin) return -ENOMEM; admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index 0d0435a41be..948f66be262 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -119,21 +119,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) kfree(accel_dev); } -static uint8_t adf_get_dev_node_id(struct pci_dev *pdev) -{ - unsigned int bus_per_cpu = 0; - struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1); - - if (!c->phys_proc_id) - return 0; - - bus_per_cpu = 256 / (c->phys_proc_id + 1); - - if (bus_per_cpu != 0) - return pdev->bus->number / bus_per_cpu; - return 0; -} - static int qat_dev_start(struct adf_accel_dev *accel_dev) { int cpus = num_online_cpus(); @@ -235,7 +220,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) void __iomem *pmisc_bar_addr = NULL; char name[ADF_DEVICE_NAME_LENGTH]; unsigned int i, bar_nr; - uint8_t node; int ret; switch (ent->device) { @@ -246,12 +230,19 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENODEV; } - node = adf_get_dev_node_id(pdev); - accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node); + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { + /* If the accelerator is connected to a node with no memory + * there is no point in using the accelerator since the remote + * memory transaction will be very slow. */ + dev_err(&pdev->dev, "Invalid NUMA configuration.\n"); + return -EINVAL; + } + + accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, + dev_to_node(&pdev->dev)); if (!accel_dev) return -ENOMEM; - accel_dev->numa_node = node; INIT_LIST_HEAD(&accel_dev->crypto_list); /* Add accel device to accel table. @@ -264,7 +255,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) accel_dev->owner = THIS_MODULE; /* Allocate and configure device configuration structure */ - hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node); + hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, + dev_to_node(&pdev->dev)); if (!hw_data) { ret = -ENOMEM; goto out_err; diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c index 67ec61e5118..d96ee21b9b7 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c @@ -168,7 +168,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) uint32_t msix_num_entries = hw_data->num_banks + 1; entries = kzalloc_node(msix_num_entries * sizeof(*entries), - GFP_KERNEL, accel_dev->numa_node); + GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!entries) return -ENOMEM; diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 123f578d6dd..4cfaaa5a49b 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -1107,52 +1107,14 @@ bool edma_filter_fn(struct dma_chan *chan, void *param) } EXPORT_SYMBOL(edma_filter_fn); -static struct platform_device *pdev0, *pdev1; - -static const struct platform_device_info edma_dev_info0 = { - .name = "edma-dma-engine", - .id = 0, - .dma_mask = DMA_BIT_MASK(32), -}; - -static const struct platform_device_info edma_dev_info1 = { - .name = "edma-dma-engine", - .id = 1, - .dma_mask = DMA_BIT_MASK(32), -}; - static int edma_init(void) { - int ret = platform_driver_register(&edma_driver); - - if (ret == 0) { - pdev0 = platform_device_register_full(&edma_dev_info0); - if (IS_ERR(pdev0)) { - platform_driver_unregister(&edma_driver); - ret = PTR_ERR(pdev0); - goto out; - } - } - - if (!of_have_populated_dt() && EDMA_CTLRS == 2) { - pdev1 = platform_device_register_full(&edma_dev_info1); - if (IS_ERR(pdev1)) { - platform_driver_unregister(&edma_driver); - platform_device_unregister(pdev0); - ret = PTR_ERR(pdev1); - } - } - -out: - return ret; + return platform_driver_register(&edma_driver); } subsys_initcall(edma_init); static void __exit edma_exit(void) { - platform_device_unregister(pdev0); - if (pdev1) - platform_device_unregister(pdev1); platform_driver_unregister(&edma_driver); } module_exit(edma_exit); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 4839bfa74a1..19a99743cf5 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -271,7 +271,7 @@ struct pl330_config { #define DMAC_MODE_NS (1 << 0) unsigned int mode; unsigned int data_bus_width:10; /* In number of bits */ - unsigned int data_buf_dep:10; + unsigned int data_buf_dep:11; unsigned int num_chan:4; unsigned int num_peri:6; u32 peri_ns; @@ -2336,7 +2336,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) int burst_len; burst_len = pl330->pcfg.data_bus_width / 8; - burst_len *= pl330->pcfg.data_buf_dep; + burst_len *= pl330->pcfg.data_buf_dep / pl330->pcfg.num_chan; burst_len >>= desc->rqcfg.brst_size; /* src/dst_burst_len can't be more than 16 */ @@ -2459,16 +2459,25 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, /* Select max possible burst size */ burst = pl330->pcfg.data_bus_width / 8; - while (burst > 1) { - if (!(len % burst)) - break; + /* + * Make sure we use a burst size that aligns with all the memcpy + * parameters because our DMA programming algorithm doesn't cope with + * transfers which straddle an entry in the DMA device's MFIFO. + */ + while ((src | dst | len) & (burst - 1)) burst /= 2; - } desc->rqcfg.brst_size = 0; while (burst != (1 << desc->rqcfg.brst_size)) desc->rqcfg.brst_size++; + /* + * If burst size is smaller than bus width then make sure we only + * transfer one at a time to avoid a burst stradling an MFIFO entry. + */ + if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width) + desc->rqcfg.brst_len = 1; + desc->rqcfg.brst_len = get_burst_len(desc, len); desc->txd.flags = flags; @@ -2732,7 +2741,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) dev_info(&adev->dev, - "Loaded driver for PL330 DMAC-%d\n", adev->periphid); + "Loaded driver for PL330 DMAC-%x\n", adev->periphid); dev_info(&adev->dev, "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan, diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 3aa10b32825..91292f5513f 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -230,30 +230,25 @@ static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, readl(pchan->base + DMA_CHAN_CUR_PARA)); } -static inline int convert_burst(u32 maxburst, u8 *burst) +static inline s8 convert_burst(u32 maxburst) { switch (maxburst) { case 1: - *burst = 0; - break; + return 0; case 8: - *burst = 2; - break; + return 2; default: return -EINVAL; } - - return 0; } -static inline int convert_buswidth(enum dma_slave_buswidth addr_width, u8 *width) +static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width) { if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) || (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) return -EINVAL; - *width = addr_width >> 1; - return 0; + return addr_width >> 1; } static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, @@ -284,26 +279,25 @@ static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli, struct dma_slave_config *config) { u8 src_width, dst_width, src_burst, dst_burst; - int ret; if (!config) return -EINVAL; - ret = convert_burst(config->src_maxburst, &src_burst); - if (ret) - return ret; + src_burst = convert_burst(config->src_maxburst); + if (src_burst) + return src_burst; - ret = convert_burst(config->dst_maxburst, &dst_burst); - if (ret) - return ret; + dst_burst = convert_burst(config->dst_maxburst); + if (dst_burst) + return dst_burst; - ret = convert_buswidth(config->src_addr_width, &src_width); - if (ret) - return ret; + src_width = convert_buswidth(config->src_addr_width); + if (src_width) + return src_width; - ret = convert_buswidth(config->dst_addr_width, &dst_width); - if (ret) - return ret; + dst_width = convert_buswidth(config->dst_addr_width); + if (dst_width) + return dst_width; lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) | DMA_CHAN_CFG_SRC_WIDTH(src_width) | @@ -542,11 +536,10 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( { struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); struct sun6i_vchan *vchan = to_sun6i_vchan(chan); - struct dma_slave_config *sconfig = &vchan->cfg; struct sun6i_dma_lli *v_lli; struct sun6i_desc *txd; dma_addr_t p_lli; - int ret; + s8 burst, width; dev_dbg(chan2dev(chan), "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n", @@ -565,14 +558,21 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( goto err_txd_free; } - ret = sun6i_dma_cfg_lli(v_lli, src, dest, len, sconfig); - if (ret) - goto err_dma_free; + v_lli->src = src; + v_lli->dst = dest; + v_lli->len = len; + v_lli->para = NORMAL_WAIT; + burst = convert_burst(8); + width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | DMA_CHAN_CFG_DST_LINEAR_MODE | - DMA_CHAN_CFG_SRC_LINEAR_MODE; + DMA_CHAN_CFG_SRC_LINEAR_MODE | + DMA_CHAN_CFG_SRC_BURST(burst) | + DMA_CHAN_CFG_SRC_WIDTH(width) | + DMA_CHAN_CFG_DST_BURST(burst) | + DMA_CHAN_CFG_DST_WIDTH(width); sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); @@ -580,8 +580,6 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( return vchan_tx_prep(&vchan->vc, &txd->vd, flags); -err_dma_free: - dma_pool_free(sdev->pool, v_lli, p_lli); err_txd_free: kfree(txd); return NULL; @@ -915,6 +913,7 @@ static int sun6i_dma_probe(struct platform_device *pdev) sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; sdc->slave.device_control = sun6i_dma_control; sdc->slave.chancnt = NR_MAX_VCHANS; + sdc->slave.copy_align = 4; sdc->slave.dev = &pdev->dev; diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c index df6575f1430..682288ced4a 100644 --- a/drivers/edac/cpc925_edac.c +++ b/drivers/edac/cpc925_edac.c @@ -562,7 +562,7 @@ static void cpc925_mc_check(struct mem_ctl_info *mci) if (apiexcp & UECC_EXCP_DETECTED) { cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, pfn, offset, 0, csrow, -1, -1, mci->ctl_name, ""); diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c index 3cda79bc8b0..ece3aef16bb 100644 --- a/drivers/edac/e7xxx_edac.c +++ b/drivers/edac/e7xxx_edac.c @@ -226,7 +226,7 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info) static void process_ce_no_info(struct mem_ctl_info *mci) { edac_dbg(3, "\n"); - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, "e7xxx CE log register overflow", ""); } diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c index 022a70273ad..aa98b136f5d 100644 --- a/drivers/edac/i3200_edac.c +++ b/drivers/edac/i3200_edac.c @@ -242,11 +242,11 @@ static void i3200_process_error_info(struct mem_ctl_info *mci, -1, -1, "i3000 UE", ""); } else if (log & I3200_ECCERRLOG_CE) { - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, eccerrlog_syndrome(log), eccerrlog_row(channel, log), -1, -1, - "i3000 UE", ""); + "i3000 CE", ""); } } } diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c index 3382f6344e4..4382343a7c6 100644 --- a/drivers/edac/i82860_edac.c +++ b/drivers/edac/i82860_edac.c @@ -124,7 +124,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci, dimm->location[0], dimm->location[1], -1, "i82860 UE", ""); else - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, info->eap, 0, info->derrsyn, dimm->location[0], dimm->location[1], -1, "i82860 CE", ""); diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 5d997a33907..2a3973a7c44 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -1637,8 +1637,7 @@ static int dispatch_ioctl(struct client *client, _IOC_SIZE(cmd) > sizeof(buffer)) return -ENOTTY; - if (_IOC_DIR(cmd) == _IOC_READ) - memset(&buffer, 0, _IOC_SIZE(cmd)); + memset(&buffer, 0, sizeof(buffer)); if (_IOC_DIR(cmd) & _IOC_WRITE) if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 64ecbb501c5..8590099ac14 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -41,6 +41,28 @@ struct efi __read_mostly efi = { }; EXPORT_SYMBOL(efi); +static bool disable_runtime; +static int __init setup_noefi(char *arg) +{ + disable_runtime = true; + return 0; +} +early_param("noefi", setup_noefi); + +bool efi_runtime_disabled(void) +{ + return disable_runtime; +} + +static int __init parse_efi_cmdline(char *str) +{ + if (parse_option_str(str, "noruntime")) + disable_runtime = true; + + return 0; +} +early_param("efi", parse_efi_cmdline); + static struct kobject *efi_kobj; static struct kobject *efivars_kobj; @@ -423,3 +445,60 @@ int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose) return ret; } #endif /* CONFIG_EFI_PARAMS_FROM_FDT */ + +static __initdata char memory_type_name[][20] = { + "Reserved", + "Loader Code", + "Loader Data", + "Boot Code", + "Boot Data", + "Runtime Code", + "Runtime Data", + "Conventional Memory", + "Unusable Memory", + "ACPI Reclaim Memory", + "ACPI Memory NVS", + "Memory Mapped I/O", + "MMIO Port Space", + "PAL Code" +}; + +char * __init efi_md_typeattr_format(char *buf, size_t size, + const efi_memory_desc_t *md) +{ + char *pos; + int type_len; + u64 attr; + + pos = buf; + if (md->type >= ARRAY_SIZE(memory_type_name)) + type_len = snprintf(pos, size, "[type=%u", md->type); + else + type_len = snprintf(pos, size, "[%-*s", + (int)(sizeof(memory_type_name[0]) - 1), + memory_type_name[md->type]); + if (type_len >= size) + return buf; + + pos += type_len; + size -= type_len; + + attr = md->attribute; + if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT | + EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_WP | + EFI_MEMORY_RP | EFI_MEMORY_XP | EFI_MEMORY_RUNTIME)) + snprintf(pos, size, "|attr=0x%016llx]", + (unsigned long long)attr); + else + snprintf(pos, size, "|%3s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]", + attr & EFI_MEMORY_RUNTIME ? "RUN" : "", + attr & EFI_MEMORY_XP ? "XP" : "", + attr & EFI_MEMORY_RP ? "RP" : "", + attr & EFI_MEMORY_WP ? "WP" : "", + attr & EFI_MEMORY_UCE ? "UCE" : "", + attr & EFI_MEMORY_WB ? "WB" : "", + attr & EFI_MEMORY_WT ? "WT" : "", + attr & EFI_MEMORY_WC ? "WC" : "", + attr & EFI_MEMORY_UC ? "UC" : ""); + return buf; +} diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 480339b6b11..75ee05964cb 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -226,6 +226,10 @@ unsigned long __init efi_entry(void *handle, efi_system_table_t *sys_table, goto fail_free_image; } + status = efi_parse_options(cmdline_ptr); + if (status != EFI_SUCCESS) + pr_efi_err(sys_table, "Failed to parse EFI cmdline options\n"); + /* * Unauthenticated device tree data is a security hazard, so * ignore 'dtb=' unless UEFI Secure Boot is disabled. diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index 32d5cca30f4..a920fec8fe8 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -15,8 +15,23 @@ #include "efistub.h" +/* + * Some firmware implementations have problems reading files in one go. + * A read chunk size of 1MB seems to work for most platforms. + * + * Unfortunately, reading files in chunks triggers *other* bugs on some + * platforms, so we provide a way to disable this workaround, which can + * be done by passing "efi=nochunk" on the EFI boot stub command line. + * + * If you experience issues with initrd images being corrupt it's worth + * trying efi=nochunk, but chunking is enabled by default because there + * are far more machines that require the workaround than those that + * break with it enabled. + */ #define EFI_READ_CHUNK_SIZE (1024 * 1024) +static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE; + struct file_info { efi_file_handle_t *handle; u64 size; @@ -281,6 +296,49 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size, efi_call_early(free_pages, addr, nr_pages); } +/* + * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi= + * option, e.g. efi=nochunk. + * + * It should be noted that efi= is parsed in two very different + * environments, first in the early boot environment of the EFI boot + * stub, and subsequently during the kernel boot. + */ +efi_status_t efi_parse_options(char *cmdline) +{ + char *str; + + /* + * If no EFI parameters were specified on the cmdline we've got + * nothing to do. + */ + str = strstr(cmdline, "efi="); + if (!str) + return EFI_SUCCESS; + + /* Skip ahead to first argument */ + str += strlen("efi="); + + /* + * Remember, because efi= is also used by the kernel we need to + * skip over arguments we don't understand. + */ + while (*str) { + if (!strncmp(str, "nochunk", 7)) { + str += strlen("nochunk"); + __chunk_size = -1UL; + } + + /* Group words together, delimited by "," */ + while (*str && *str != ',') + str++; + + if (*str == ',') + str++; + } + + return EFI_SUCCESS; +} /* * Check the cmdline for a LILO-style file= arguments. @@ -423,8 +481,8 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, size = files[j].size; while (size) { unsigned long chunksize; - if (size > EFI_READ_CHUNK_SIZE) - chunksize = EFI_READ_CHUNK_SIZE; + if (size > __chunk_size) + chunksize = __chunk_size; else chunksize = size; diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c index 10daa4bbb25..228bbf91046 100644 --- a/drivers/firmware/efi/runtime-wrappers.c +++ b/drivers/firmware/efi/runtime-wrappers.c @@ -14,11 +14,80 @@ * This file is released under the GPLv2. */ +#include <linux/bug.h> #include <linux/efi.h> -#include <linux/spinlock.h> /* spinlock_t */ +#include <linux/mutex.h> +#include <linux/spinlock.h> #include <asm/efi.h> /* + * According to section 7.1 of the UEFI spec, Runtime Services are not fully + * reentrant, and there are particular combinations of calls that need to be + * serialized. (source: UEFI Specification v2.4A) + * + * Table 31. Rules for Reentry Into Runtime Services + * +------------------------------------+-------------------------------+ + * | If previous call is busy in | Forbidden to call | + * +------------------------------------+-------------------------------+ + * | Any | SetVirtualAddressMap() | + * +------------------------------------+-------------------------------+ + * | ConvertPointer() | ConvertPointer() | + * +------------------------------------+-------------------------------+ + * | SetVariable() | ResetSystem() | + * | UpdateCapsule() | | + * | SetTime() | | + * | SetWakeupTime() | | + * | GetNextHighMonotonicCount() | | + * +------------------------------------+-------------------------------+ + * | GetVariable() | GetVariable() | + * | GetNextVariableName() | GetNextVariableName() | + * | SetVariable() | SetVariable() | + * | QueryVariableInfo() | QueryVariableInfo() | + * | UpdateCapsule() | UpdateCapsule() | + * | QueryCapsuleCapabilities() | QueryCapsuleCapabilities() | + * | GetNextHighMonotonicCount() | GetNextHighMonotonicCount() | + * +------------------------------------+-------------------------------+ + * | GetTime() | GetTime() | + * | SetTime() | SetTime() | + * | GetWakeupTime() | GetWakeupTime() | + * | SetWakeupTime() | SetWakeupTime() | + * +------------------------------------+-------------------------------+ + * + * Due to the fact that the EFI pstore may write to the variable store in + * interrupt context, we need to use a spinlock for at least the groups that + * contain SetVariable() and QueryVariableInfo(). That leaves little else, as + * none of the remaining functions are actually ever called at runtime. + * So let's just use a single spinlock to serialize all Runtime Services calls. + */ +static DEFINE_SPINLOCK(efi_runtime_lock); + +/* + * Some runtime services calls can be reentrant under NMI, even if the table + * above says they are not. (source: UEFI Specification v2.4A) + * + * Table 32. Functions that may be called after Machine Check, INIT and NMI + * +----------------------------+------------------------------------------+ + * | Function | Called after Machine Check, INIT and NMI | + * +----------------------------+------------------------------------------+ + * | GetTime() | Yes, even if previously busy. | + * | GetVariable() | Yes, even if previously busy | + * | GetNextVariableName() | Yes, even if previously busy | + * | QueryVariableInfo() | Yes, even if previously busy | + * | SetVariable() | Yes, even if previously busy | + * | UpdateCapsule() | Yes, even if previously busy | + * | QueryCapsuleCapabilities() | Yes, even if previously busy | + * | ResetSystem() | Yes, even if previously busy | + * +----------------------------+------------------------------------------+ + * + * In order to prevent deadlocks under NMI, the wrappers for these functions + * may only grab the efi_runtime_lock or rtc_lock spinlocks if !efi_in_nmi(). + * However, not all of the services listed are reachable through NMI code paths, + * so the the special handling as suggested by the UEFI spec is only implemented + * for QueryVariableInfo() and SetVariable(), as these can be reached in NMI + * context through efi_pstore_write(). + */ + +/* * As per commit ef68c8f87ed1 ("x86: Serialize EFI time accesses on rtc_lock"), * the EFI specification requires that callers of the time related runtime * functions serialize with other CMOS accesses in the kernel, as the EFI time @@ -32,7 +101,9 @@ static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) efi_status_t status; spin_lock_irqsave(&rtc_lock, flags); + spin_lock(&efi_runtime_lock); status = efi_call_virt(get_time, tm, tc); + spin_unlock(&efi_runtime_lock); spin_unlock_irqrestore(&rtc_lock, flags); return status; } @@ -43,7 +114,9 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm) efi_status_t status; spin_lock_irqsave(&rtc_lock, flags); + spin_lock(&efi_runtime_lock); status = efi_call_virt(set_time, tm); + spin_unlock(&efi_runtime_lock); spin_unlock_irqrestore(&rtc_lock, flags); return status; } @@ -56,7 +129,9 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled, efi_status_t status; spin_lock_irqsave(&rtc_lock, flags); + spin_lock(&efi_runtime_lock); status = efi_call_virt(get_wakeup_time, enabled, pending, tm); + spin_unlock(&efi_runtime_lock); spin_unlock_irqrestore(&rtc_lock, flags); return status; } @@ -67,7 +142,9 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) efi_status_t status; spin_lock_irqsave(&rtc_lock, flags); + spin_lock(&efi_runtime_lock); status = efi_call_virt(set_wakeup_time, enabled, tm); + spin_unlock(&efi_runtime_lock); spin_unlock_irqrestore(&rtc_lock, flags); return status; } @@ -78,14 +155,27 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name, unsigned long *data_size, void *data) { - return efi_call_virt(get_variable, name, vendor, attr, data_size, data); + unsigned long flags; + efi_status_t status; + + spin_lock_irqsave(&efi_runtime_lock, flags); + status = efi_call_virt(get_variable, name, vendor, attr, data_size, + data); + spin_unlock_irqrestore(&efi_runtime_lock, flags); + return status; } static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) { - return efi_call_virt(get_next_variable, name_size, name, vendor); + unsigned long flags; + efi_status_t status; + + spin_lock_irqsave(&efi_runtime_lock, flags); + status = efi_call_virt(get_next_variable, name_size, name, vendor); + spin_unlock_irqrestore(&efi_runtime_lock, flags); + return status; } static efi_status_t virt_efi_set_variable(efi_char16_t *name, @@ -94,24 +184,61 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name, unsigned long data_size, void *data) { - return efi_call_virt(set_variable, name, vendor, attr, data_size, data); + unsigned long flags; + efi_status_t status; + + spin_lock_irqsave(&efi_runtime_lock, flags); + status = efi_call_virt(set_variable, name, vendor, attr, data_size, + data); + spin_unlock_irqrestore(&efi_runtime_lock, flags); + return status; } +static efi_status_t +virt_efi_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor, + u32 attr, unsigned long data_size, + void *data) +{ + unsigned long flags; + efi_status_t status; + + if (!spin_trylock_irqsave(&efi_runtime_lock, flags)) + return EFI_NOT_READY; + + status = efi_call_virt(set_variable, name, vendor, attr, data_size, + data); + spin_unlock_irqrestore(&efi_runtime_lock, flags); + return status; +} + + static efi_status_t virt_efi_query_variable_info(u32 attr, u64 *storage_space, u64 *remaining_space, u64 *max_variable_size) { + unsigned long flags; + efi_status_t status; + if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) return EFI_UNSUPPORTED; - return efi_call_virt(query_variable_info, attr, storage_space, - remaining_space, max_variable_size); + spin_lock_irqsave(&efi_runtime_lock, flags); + status = efi_call_virt(query_variable_info, attr, storage_space, + remaining_space, max_variable_size); + spin_unlock_irqrestore(&efi_runtime_lock, flags); + return status; } static efi_status_t virt_efi_get_next_high_mono_count(u32 *count) { - return efi_call_virt(get_next_high_mono_count, count); + unsigned long flags; + efi_status_t status; + + spin_lock_irqsave(&efi_runtime_lock, flags); + status = efi_call_virt(get_next_high_mono_count, count); + spin_unlock_irqrestore(&efi_runtime_lock, flags); + return status; } static void virt_efi_reset_system(int reset_type, @@ -119,17 +246,27 @@ static void virt_efi_reset_system(int reset_type, unsigned long data_size, efi_char16_t *data) { + unsigned long flags; + + spin_lock_irqsave(&efi_runtime_lock, flags); __efi_call_virt(reset_system, reset_type, status, data_size, data); + spin_unlock_irqrestore(&efi_runtime_lock, flags); } static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules, unsigned long count, unsigned long sg_list) { + unsigned long flags; + efi_status_t status; + if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) return EFI_UNSUPPORTED; - return efi_call_virt(update_capsule, capsules, count, sg_list); + spin_lock_irqsave(&efi_runtime_lock, flags); + status = efi_call_virt(update_capsule, capsules, count, sg_list); + spin_unlock_irqrestore(&efi_runtime_lock, flags); + return status; } static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules, @@ -137,11 +274,17 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules, u64 *max_size, int *reset_type) { + unsigned long flags; + efi_status_t status; + if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) return EFI_UNSUPPORTED; - return efi_call_virt(query_capsule_caps, capsules, count, max_size, - reset_type); + spin_lock_irqsave(&efi_runtime_lock, flags); + status = efi_call_virt(query_capsule_caps, capsules, count, max_size, + reset_type); + spin_unlock_irqrestore(&efi_runtime_lock, flags); + return status; } void efi_native_runtime_setup(void) @@ -153,6 +296,7 @@ void efi_native_runtime_setup(void) efi.get_variable = virt_efi_get_variable; efi.get_next_variable = virt_efi_get_next_variable; efi.set_variable = virt_efi_set_variable; + efi.set_variable_nonblocking = virt_efi_set_variable_nonblocking; efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; efi.reset_system = virt_efi_reset_system; efi.query_variable_info = virt_efi_query_variable_info; diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index 5abe943e340..70a0fb10517 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -321,11 +321,11 @@ static unsigned long var_name_strnsize(efi_char16_t *variable_name, * Print a warning when duplicate EFI variables are encountered and * disable the sysfs workqueue since the firmware is buggy. */ -static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid, +static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid, unsigned long len16) { size_t i, len8 = len16 / sizeof(efi_char16_t); - char *s8; + char *str8; /* * Disable the workqueue since the algorithm it uses for @@ -334,16 +334,16 @@ static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid, */ efivar_wq_enabled = false; - s8 = kzalloc(len8, GFP_KERNEL); - if (!s8) + str8 = kzalloc(len8, GFP_KERNEL); + if (!str8) return; for (i = 0; i < len8; i++) - s8[i] = s16[i]; + str8[i] = str16[i]; printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n", - s8, vendor_guid); - kfree(s8); + str8, vendor_guid); + kfree(str8); } /** @@ -595,6 +595,39 @@ int efivar_entry_set(struct efivar_entry *entry, u32 attributes, } EXPORT_SYMBOL_GPL(efivar_entry_set); +/* + * efivar_entry_set_nonblocking - call set_variable_nonblocking() + * + * This function is guaranteed to not block and is suitable for calling + * from crash/panic handlers. + * + * Crucially, this function will not block if it cannot acquire + * __efivars->lock. Instead, it returns -EBUSY. + */ +static int +efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor, + u32 attributes, unsigned long size, void *data) +{ + const struct efivar_operations *ops = __efivars->ops; + unsigned long flags; + efi_status_t status; + + if (!spin_trylock_irqsave(&__efivars->lock, flags)) + return -EBUSY; + + status = check_var_size(attributes, size + ucs2_strsize(name, 1024)); + if (status != EFI_SUCCESS) { + spin_unlock_irqrestore(&__efivars->lock, flags); + return -ENOSPC; + } + + status = ops->set_variable_nonblocking(name, &vendor, attributes, + size, data); + + spin_unlock_irqrestore(&__efivars->lock, flags); + return efi_status_to_err(status); +} + /** * efivar_entry_set_safe - call set_variable() if enough space in firmware * @name: buffer containing the variable name @@ -622,6 +655,20 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, if (!ops->query_variable_store) return -ENOSYS; + /* + * If the EFI variable backend provides a non-blocking + * ->set_variable() operation and we're in a context where we + * cannot block, then we need to use it to avoid live-locks, + * since the implication is that the regular ->set_variable() + * will block. + * + * If no ->set_variable_nonblocking() is provided then + * ->set_variable() is assumed to be non-blocking. + */ + if (!block && ops->set_variable_nonblocking) + return efivar_entry_set_nonblocking(name, vendor, attributes, + size, data); + if (!block) { if (!spin_trylock_irqsave(&__efivars->lock, flags)) return -EBUSY; diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 9a0cc09e665..e4a1490b42c 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -260,7 +260,7 @@ static void armada_drm_vblank_off(struct armada_crtc *dcrtc) * Tell the DRM core that vblank IRQs aren't going to happen for * a while. This cleans up any pending vblank events for us. */ - drm_vblank_off(dev, dcrtc->num); + drm_crtc_vblank_off(&dcrtc->crtc); /* Handle any pending flip event. */ spin_lock_irq(&dev->event_lock); @@ -289,6 +289,8 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms) armada_drm_crtc_update(dcrtc); if (dpms_blanked(dpms)) armada_drm_vblank_off(dcrtc); + else + drm_crtc_vblank_on(&dcrtc->crtc); } } @@ -526,7 +528,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc, /* Wait for pending flips to complete */ wait_event(dcrtc->frame_wait, !dcrtc->frame_work); - drm_vblank_pre_modeset(crtc->dev, dcrtc->num); + drm_crtc_vblank_off(crtc); crtc->mode = *adj; @@ -617,7 +619,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc, armada_drm_crtc_update(dcrtc); - drm_vblank_post_modeset(crtc->dev, dcrtc->num); + drm_crtc_vblank_on(crtc); armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms)); return 0; @@ -945,18 +947,15 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc, armada_reg_queue_end(work->regs, i); /* - * Hold the old framebuffer for the work - DRM appears to drop our - * reference to the old framebuffer in drm_mode_page_flip_ioctl(). + * Ensure that we hold a reference on the new framebuffer. + * This has to match the behaviour in mode_set. */ - drm_framebuffer_reference(work->old_fb); + drm_framebuffer_reference(fb); ret = armada_drm_crtc_queue_frame_work(dcrtc, work); if (ret) { - /* - * Undo our reference above; DRM does not drop the reference - * to this object on error, so that's okay. - */ - drm_framebuffer_unreference(work->old_fb); + /* Undo our reference above */ + drm_framebuffer_unreference(fb); kfree(work); return ret; } diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index f672e6ad8af..908e5316eac 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -190,6 +190,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags) if (ret) goto err_comp; + dev->irq_enabled = true; dev->vblank_disable_allowed = 1; ret = armada_fbdev_init(dev); @@ -331,7 +332,7 @@ static struct drm_driver armada_drm_driver = { .desc = "Armada SoC DRM", .date = "20120730", .driver_features = DRIVER_GEM | DRIVER_MODESET | - DRIVER_PRIME, + DRIVER_HAVE_IRQ | DRIVER_PRIME, .ioctls = armada_ioctls, .fops = &armada_drm_fops, }; diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index e705335101a..c2a1cba1e98 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -32,6 +32,8 @@ static struct drm_driver driver; static const struct pci_device_id pciidlist[] = { { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0, 0, 0 }, + { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, PCI_VENDOR_ID_XEN, + 0x0001, 0, 0, 0 }, {0,} }; diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index cd50ece3160..6adb1e5cfb0 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -1355,13 +1355,8 @@ static void exynos_dp_unbind(struct device *dev, struct device *master, void *data) { struct exynos_drm_display *display = dev_get_drvdata(dev); - struct exynos_dp_device *dp = display->ctx; - struct drm_encoder *encoder = dp->encoder; exynos_dp_dpms(display, DRM_MODE_DPMS_OFF); - - exynos_dp_connector_destroy(&dp->connector); - encoder->funcs->destroy(encoder); } static const struct component_ops exynos_dp_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 8e38e9f8e54..45026e69322 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -71,13 +71,16 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) !atomic_read(&exynos_crtc->pending_flip), HZ/20)) atomic_set(&exynos_crtc->pending_flip, 0); - drm_vblank_off(crtc->dev, exynos_crtc->pipe); + drm_crtc_vblank_off(crtc); } if (manager->ops->dpms) manager->ops->dpms(manager, mode); exynos_crtc->dpms = mode; + + if (mode == DRM_MODE_DPMS_ON) + drm_crtc_vblank_on(crtc); } static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c index 96c87db388f..3dc678ed994 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c @@ -338,14 +338,10 @@ err_del_component: int exynos_dpi_remove(struct device *dev) { - struct drm_encoder *encoder = exynos_dpi_display.encoder; struct exynos_dpi *ctx = exynos_dpi_display.ctx; exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF); - exynos_dpi_connector_destroy(&ctx->connector); - encoder->funcs->destroy(encoder); - if (ctx->panel) drm_panel_detach(ctx->panel); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 443a2069858..e5c4c6c8c96 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -87,16 +87,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) plane = exynos_plane_init(dev, possible_crtcs, DRM_PLANE_TYPE_OVERLAY); - if (IS_ERR(plane)) - goto err_mode_config_cleanup; - } - - /* init kms poll for handling hpd */ - drm_kms_helper_poll_init(dev); + if (!IS_ERR(plane)) + continue; - ret = drm_vblank_init(dev, MAX_CRTC); - if (ret) + ret = PTR_ERR(plane); goto err_mode_config_cleanup; + } /* setup possible_clones. */ exynos_drm_encoder_setup(dev); @@ -106,15 +102,16 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) /* Try to bind all sub drivers. */ ret = component_bind_all(dev->dev, dev); if (ret) - goto err_cleanup_vblank; + goto err_mode_config_cleanup; - /* Probe non kms sub drivers and virtual display driver. */ - ret = exynos_drm_device_subdrv_probe(dev); + ret = drm_vblank_init(dev, dev->mode_config.num_crtc); if (ret) goto err_unbind_all; - /* force connectors detection */ - drm_helper_hpd_irq_event(dev); + /* Probe non kms sub drivers and virtual display driver. */ + ret = exynos_drm_device_subdrv_probe(dev); + if (ret) + goto err_cleanup_vblank; /* * enable drm irq mode. @@ -133,12 +130,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) */ dev->vblank_disable_allowed = true; + /* init kms poll for handling hpd */ + drm_kms_helper_poll_init(dev); + + /* force connectors detection */ + drm_helper_hpd_irq_event(dev); + return 0; -err_unbind_all: - component_unbind_all(dev->dev, dev); err_cleanup_vblank: drm_vblank_cleanup(dev); +err_unbind_all: + component_unbind_all(dev->dev, dev); err_mode_config_cleanup: drm_mode_config_cleanup(dev); drm_release_iommu_mapping(dev); @@ -155,8 +158,8 @@ static int exynos_drm_unload(struct drm_device *dev) exynos_drm_fbdev_fini(dev); drm_kms_helper_poll_fini(dev); - component_unbind_all(dev->dev, dev); drm_vblank_cleanup(dev); + component_unbind_all(dev->dev, dev); drm_mode_config_cleanup(dev); drm_release_iommu_mapping(dev); @@ -191,8 +194,12 @@ static int exynos_drm_resume(struct drm_device *dev) drm_modeset_lock_all(dev); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->funcs->dpms) - connector->funcs->dpms(connector, connector->dpms); + if (connector->funcs->dpms) { + int dpms = connector->dpms; + + connector->dpms = DRM_MODE_DPMS_OFF; + connector->funcs->dpms(connector, dpms); + } } drm_modeset_unlock_all(dev); @@ -488,6 +495,12 @@ static struct component_match *exynos_drm_match_add(struct device *dev) mutex_lock(&drm_component_lock); + /* Do not retry to probe if there is no any kms driver regitered. */ + if (list_empty(&drm_component_list)) { + mutex_unlock(&drm_component_lock); + return ERR_PTR(-ENODEV); + } + list_for_each_entry(cdev, &drm_component_list, list) { /* * Add components to master only in case that crtc and @@ -578,10 +591,21 @@ static int exynos_drm_platform_probe(struct platform_device *pdev) goto err_unregister_mixer_drv; #endif + match = exynos_drm_match_add(&pdev->dev); + if (IS_ERR(match)) { + ret = PTR_ERR(match); + goto err_unregister_hdmi_drv; + } + + ret = component_master_add_with_match(&pdev->dev, &exynos_drm_ops, + match); + if (ret < 0) + goto err_unregister_hdmi_drv; + #ifdef CONFIG_DRM_EXYNOS_G2D ret = platform_driver_register(&g2d_driver); if (ret < 0) - goto err_unregister_hdmi_drv; + goto err_del_component_master; #endif #ifdef CONFIG_DRM_EXYNOS_FIMC @@ -612,23 +636,9 @@ static int exynos_drm_platform_probe(struct platform_device *pdev) goto err_unregister_ipp_drv; #endif - match = exynos_drm_match_add(&pdev->dev); - if (IS_ERR(match)) { - ret = PTR_ERR(match); - goto err_unregister_resources; - } - - ret = component_master_add_with_match(&pdev->dev, &exynos_drm_ops, - match); - if (ret < 0) - goto err_unregister_resources; - return ret; -err_unregister_resources: - #ifdef CONFIG_DRM_EXYNOS_IPP - exynos_platform_device_ipp_unregister(); err_unregister_ipp_drv: platform_driver_unregister(&ipp_driver); err_unregister_gsc_drv: @@ -651,9 +661,11 @@ err_unregister_g2d_drv: #ifdef CONFIG_DRM_EXYNOS_G2D platform_driver_unregister(&g2d_driver); -err_unregister_hdmi_drv: +err_del_component_master: #endif + component_master_del(&pdev->dev, &exynos_drm_ops); +err_unregister_hdmi_drv: #ifdef CONFIG_DRM_EXYNOS_HDMI platform_driver_unregister(&hdmi_driver); err_unregister_mixer_drv: @@ -734,6 +746,18 @@ static int exynos_drm_init(void) { int ret; + /* + * Register device object only in case of Exynos SoC. + * + * Below codes resolves temporarily infinite loop issue incurred + * by Exynos drm driver when using multi-platform kernel. + * So these codes will be replaced with more generic way later. + */ + if (!of_machine_is_compatible("samsung,exynos3") && + !of_machine_is_compatible("samsung,exynos4") && + !of_machine_is_compatible("samsung,exynos5")) + return -ENODEV; + exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1, NULL, 0); if (IS_ERR(exynos_drm_pdev)) diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 24741d8758e..acf7e9e39dc 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1660,13 +1660,9 @@ static void exynos_dsi_unbind(struct device *dev, struct device *master, void *data) { struct exynos_dsi *dsi = exynos_dsi_display.ctx; - struct drm_encoder *encoder = dsi->encoder; exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF); - exynos_dsi_connector_destroy(&dsi->connector); - encoder->funcs->destroy(encoder); - mipi_dsi_host_unregister(&dsi->dsi_host); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index df7a77d3eff..6ff8599f6cb 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -302,9 +302,12 @@ static void g2d_fini_cmdlist(struct g2d_data *g2d) struct exynos_drm_subdrv *subdrv = &g2d->subdrv; kfree(g2d->cmdlist_node); - dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, - g2d->cmdlist_pool_virt, - g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); + + if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) { + dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, + g2d->cmdlist_pool_virt, + g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); + } } static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d) diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index d565207040a..50faf913e57 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -630,7 +630,6 @@ static int vidi_remove(struct platform_device *pdev) { struct exynos_drm_manager *mgr = platform_get_drvdata(pdev); struct vidi_context *ctx = mgr->ctx; - struct drm_encoder *encoder = ctx->encoder; if (ctx->raw_edid != (struct edid *)fake_edid_info) { kfree(ctx->raw_edid); @@ -639,9 +638,6 @@ static int vidi_remove(struct platform_device *pdev) return -EINVAL; } - encoder->funcs->destroy(encoder); - drm_connector_cleanup(&ctx->connector); - return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 7910fb37d9b..563a19e62eb 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -2312,12 +2312,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) static void hdmi_unbind(struct device *dev, struct device *master, void *data) { - struct exynos_drm_display *display = get_hdmi_display(dev); - struct drm_encoder *encoder = display->encoder; - struct hdmi_context *hdata = display->ctx; - - hdmi_connector_destroy(&hdata->connector); - encoder->funcs->destroy(encoder); } static const struct component_ops hdmi_component_ops = { diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 1403b01e821..318ade9bb5a 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1670,15 +1670,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_regs; if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_kick_out_vgacon(dev_priv); + /* WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. */ + ret = i915_kick_out_firmware_fb(dev_priv); if (ret) { - DRM_ERROR("failed to remove conflicting VGA console\n"); + DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); goto out_gtt; } - ret = i915_kick_out_firmware_fb(dev_priv); + ret = i915_kick_out_vgacon(dev_priv); if (ret) { - DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); + DRM_ERROR("failed to remove conflicting VGA console\n"); goto out_gtt; } } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 055d5e7fbf1..2318b4c7a8f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -986,6 +986,15 @@ static int i915_pm_freeze(struct device *dev) return i915_drm_freeze(drm_dev); } +static int i915_pm_freeze_late(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct drm_i915_private *dev_priv = drm_dev->dev_private; + + return intel_suspend_complete(dev_priv); +} + static int i915_pm_thaw_early(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -1570,6 +1579,7 @@ static const struct dev_pm_ops i915_pm_ops = { .resume_early = i915_pm_resume_early, .resume = i915_pm_resume, .freeze = i915_pm_freeze, + .freeze_late = i915_pm_freeze_late, .thaw_early = i915_pm_thaw_early, .thaw = i915_pm_thaw, .poweroff = i915_pm_poweroff, diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index b672b843fd5..728938f0234 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1902,6 +1902,22 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); + if (!USES_PPGTT(dev_priv->dev)) + /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, + * so RTL will always use the value corresponding to + * pat_sel = 000". + * So let's disable cache for GGTT to avoid screen corruptions. + * MOCS still can be used though. + * - System agent ggtt writes (i.e. cpu gtt mmaps) already work + * before this patch, i.e. the same uncached + snooping access + * like on gen6/7 seems to be in effect. + * - So this just fixes blitter/render access. Again it looks + * like it's not just uncached access, but uncached + snooping. + * So we can still hold onto all our assumptions wrt cpu + * clflushing on LLC machines. + */ + pat = GEN8_PPAT(0, GEN8_PPAT_UC); + /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b * write would work. */ I915_WRITE(GEN8_PRIVATE_PAT, pat); diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 2cefb597df6..2b1eaa29ada 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -364,22 +364,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, * has to also include the unfenced register the GPU uses * whilst executing a fenced command for an untiled object. */ - - obj->map_and_fenceable = - !i915_gem_obj_ggtt_bound(obj) || - (i915_gem_obj_ggtt_offset(obj) + - obj->base.size <= dev_priv->gtt.mappable_end && - i915_gem_object_fence_ok(obj, args->tiling_mode)); - - /* Rebind if we need a change of alignment */ - if (!obj->map_and_fenceable) { - u32 unfenced_align = - i915_gem_get_gtt_alignment(dev, obj->base.size, - args->tiling_mode, - false); - if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1)) - ret = i915_gem_object_ggtt_unbind(obj); - } + if (obj->map_and_fenceable && + !i915_gem_object_fence_ok(obj, args->tiling_mode)) + ret = i915_gem_object_ggtt_unbind(obj); if (ret == 0) { obj->fence_dirty = diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 3201986bf25..f66392b6e28 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1711,7 +1711,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, #define HPD_STORM_DETECT_PERIOD 1000 #define HPD_STORM_THRESHOLD 5 -static int ilk_port_to_hotplug_shift(enum port port) +static int pch_port_to_hotplug_shift(enum port port) { switch (port) { case PORT_A: @@ -1727,7 +1727,7 @@ static int ilk_port_to_hotplug_shift(enum port port) } } -static int g4x_port_to_hotplug_shift(enum port port) +static int i915_port_to_hotplug_shift(enum port port) { switch (port) { case PORT_A: @@ -1785,12 +1785,12 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, if (port && dev_priv->hpd_irq_port[port]) { bool long_hpd; - if (IS_G4X(dev)) { - dig_shift = g4x_port_to_hotplug_shift(port); - long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; - } else { - dig_shift = ilk_port_to_hotplug_shift(port); + if (HAS_PCH_SPLIT(dev)) { + dig_shift = pch_port_to_hotplug_shift(port); long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; + } else { + dig_shift = i915_port_to_hotplug_shift(port); + long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; } DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", @@ -3458,12 +3458,13 @@ static void gen8_irq_reset(struct drm_device *dev) void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) { unsigned long irqflags; + uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], - ~dev_priv->de_irq_mask[PIPE_B]); + ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], - ~dev_priv->de_irq_mask[PIPE_C]); + ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 507370513f3..8bcdb981d54 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -73,9 +73,6 @@ static const uint32_t intel_cursor_formats[] = { DRM_FORMAT_ARGB8888, }; -#define DIV_ROUND_CLOSEST_ULL(ll, d) \ -({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) - static void intel_increase_pllclock(struct drm_device *dev, enum pipe pipe); static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); @@ -4588,7 +4585,7 @@ static void vlv_update_cdclk(struct drm_device *dev) * BSpec erroneously claims we should aim for 4MHz, but * in fact 1MHz is the correct frequency. */ - I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq); + I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000)); } /* Adjust CDclk dividers to allow high res or save power if possible */ @@ -9411,6 +9408,10 @@ static bool page_flip_finished(struct intel_crtc *crtc) struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + if (i915_reset_in_progress(&dev_priv->gpu_error) || + crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) + return true; + /* * The relevant registers doen't exist on pre-ctg. * As the flip done interrupt doesn't trigger for mmio @@ -12357,27 +12358,36 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(PCH_DP_D) & DP_DETECTED) intel_dp_init(dev, PCH_DP_D, PORT_D); } else if (IS_VALLEYVIEW(dev)) { - if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { + /* + * The DP_DETECTED bit is the latched state of the DDC + * SDA pin at boot. However since eDP doesn't require DDC + * (no way to plug in a DP->HDMI dongle) the DDC pins for + * eDP ports may have been muxed to an alternate function. + * Thus we can't rely on the DP_DETECTED bit alone to detect + * eDP ports. Consult the VBT as well as DP_DETECTED to + * detect eDP ports. + */ + if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, PORT_B); - if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) - intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); - } + if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED || + intel_dp_is_edp(dev, PORT_B)) + intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); - if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) { + if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, PORT_C); - if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) - intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); - } + if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED || + intel_dp_is_edp(dev, PORT_C)) + intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); if (IS_CHERRYVIEW(dev)) { - if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) { + if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID, PORT_D); - if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED) - intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D); - } + /* eDP not supported on port D, so don't check VBT */ + if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED) + intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D); } intel_dsi_init(dev); @@ -12879,6 +12889,9 @@ static struct intel_quirk intel_quirks[] = { /* Acer C720 Chromebook (Core i3 4005U) */ { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, + /* Apple Macbook 2,1 (Core 2 T7400) */ + { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, + /* Toshiba CB35 Chromebook (Celeron 2955U) */ { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f6a3fdd5589..4bcd9175732 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -2806,6 +2806,13 @@ intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset, ssize_t ret; int i; + /* + * Sometime we just get the same incorrect byte repeated + * over the entire buffer. Doing just one throw away read + * initially seems to "solve" it. + */ + drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1); + for (i = 0; i < 3; i++) { ret = drm_dp_dpcd_read(aux, offset, buffer, size); if (ret == size) @@ -3724,9 +3731,10 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) } } - /* Training Pattern 3 support */ + /* Training Pattern 3 support, both source and sink */ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && - intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) { + intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED && + (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) { intel_dp->use_tps3 = true; DRM_DEBUG_KMS("Displayport TPS3 supported\n"); } else @@ -4442,6 +4450,7 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) * vdd might still be enabled do to the delayed vdd off. * Make sure vdd is actually turned off here. */ + cancel_delayed_work_sync(&intel_dp->panel_vdd_work); pps_lock(intel_dp); edp_panel_vdd_off_sync(intel_dp); pps_unlock(intel_dp); @@ -4491,6 +4500,18 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; + if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { + /* + * vdd off can generate a long pulse on eDP which + * would require vdd on to handle it, and thus we + * would end up in an endless cycle of + * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..." + */ + DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n", + port_name(intel_dig_port->port)); + return false; + } + DRM_DEBUG_KMS("got hpd irq on port %c - %s\n", port_name(intel_dig_port->port), long_hpd ? "long" : "short"); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 07ce04683c3..ba715229a54 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -35,6 +35,9 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_dp_mst_helper.h> +#define DIV_ROUND_CLOSEST_ULL(ll, d) \ +({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) + /** * _wait_for - magic (register) wait macro * diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 18784470a76..41b3be21749 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -419,9 +419,8 @@ static uint32_t scale(uint32_t source_val, source_val = clamp(source_val, source_min, source_max); /* avoid overflows */ - target_val = (uint64_t)(source_val - source_min) * - (target_max - target_min); - do_div(target_val, source_max - source_min); + target_val = DIV_ROUND_CLOSEST_ULL((uint64_t)(source_val - source_min) * + (target_max - target_min), source_max - source_min); target_val += target_min; return target_val; @@ -1099,12 +1098,25 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector) struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_panel *panel = &connector->panel; + int min; WARN_ON(panel->backlight.max == 0); + /* + * XXX: If the vbt value is 255, it makes min equal to max, which leads + * to problems. There are such machines out there. Either our + * interpretation is wrong or the vbt has bogus data. Or both. Safeguard + * against this by letting the minimum be at most (arbitrarily chosen) + * 25% of the max. + */ + min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64); + if (min != dev_priv->vbt.backlight.min_brightness) { + DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n", + dev_priv->vbt.backlight.min_brightness, min); + } + /* vbt value is a coefficient in range [0..255] */ - return scale(dev_priv->vbt.backlight.min_brightness, 0, 255, - 0, panel->backlight.max); + return scale(min, 0, 255, 0, panel->backlight.max); } static int bdw_setup_backlight(struct intel_connector *connector) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index c27b6140bfd..ad2fd605f76 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5469,11 +5469,6 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_WRITE(_3D_CHICKEN, _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); - /* WaSetupGtModeTdRowDispatch:snb */ - if (IS_SNB_GT1(dev)) - I915_WRITE(GEN6_GT_MODE, - _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); - /* WaDisable_RenderCache_OperationalFlush:snb */ I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c index 552fdbd45eb..1d0e33fb5f6 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c @@ -113,6 +113,8 @@ #define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf) #define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac) +#include <subdev/fb.h> + /* * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's * the GPU itself that does context-switching, but it needs a special @@ -569,8 +571,12 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) gr_def(ctx, 0x407d08, 0x00010040); else if (device->chipset < 0xa0) gr_def(ctx, 0x407d08, 0x00390040); - else - gr_def(ctx, 0x407d08, 0x003d0040); + else { + if (nouveau_fb(device)->ram->type != NV_MEM_TYPE_GDDR5) + gr_def(ctx, 0x407d08, 0x003d0040); + else + gr_def(ctx, 0x407d08, 0x003c0040); + } gr_def(ctx, 0x407d0c, 0x00000022); } diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c index a16024a7477..fde42e4d1b5 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c @@ -27,6 +27,20 @@ struct gk20a_fb_priv { }; static int +gk20a_fb_init(struct nouveau_object *object) +{ + struct gk20a_fb_priv *priv = (void *)object; + int ret; + + ret = nouveau_fb_init(&priv->base); + if (ret) + return ret; + + nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */ + return 0; +} + +static int gk20a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) @@ -48,7 +62,7 @@ gk20a_fb_oclass = &(struct nouveau_fb_impl) { .base.ofuncs = &(struct nouveau_ofuncs) { .ctor = gk20a_fb_ctor, .dtor = _nouveau_fb_dtor, - .init = _nouveau_fb_init, + .init = gk20a_fb_init, .fini = _nouveau_fb_fini, }, .memtype = nvc0_fb_memtype_valid, diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 589dbb582da..fd3dbd59d73 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -400,15 +400,20 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device, struct nouveau_channel **pchan) { struct nouveau_cli *cli = (void *)nvif_client(&device->base); + bool super; int ret; + /* hack until fencenv50 is fixed, and agp access relaxed */ + super = cli->base.super; + cli->base.super = true; + ret = nouveau_channel_ind(drm, device, handle, arg0, pchan); if (ret) { NV_PRINTK(debug, cli, "ib channel create, %d\n", ret); ret = nouveau_channel_dma(drm, device, handle, pchan); if (ret) { NV_PRINTK(debug, cli, "dma channel create, %d\n", ret); - return ret; + goto done; } } @@ -416,8 +421,9 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device, if (ret) { NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret); nouveau_channel_del(pchan); - return ret; } - return 0; +done: + cli->base.super = super; + return ret; } diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index ae873d1a8d4..eb8b36714fa 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -791,6 +791,22 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update) } static int +nv50_crtc_set_raster_vblank_dmi(struct nouveau_crtc *nv_crtc, u32 usec) +{ + struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); + u32 *push; + + push = evo_wait(mast, 8); + if (!push) + return -ENOMEM; + + evo_mthd(push, 0x0828 + (nv_crtc->index * 0x400), 1); + evo_data(push, usec); + evo_kick(push, mast); + return 0; +} + +static int nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update) { struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); @@ -1104,14 +1120,14 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2); evo_data(push, 0x00800000 | mode->clock); evo_data(push, (ilace == 2) ? 2 : 0); - evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 8); + evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6); evo_data(push, 0x00000000); evo_data(push, (vactive << 16) | hactive); evo_data(push, ( vsynce << 16) | hsynce); evo_data(push, (vblanke << 16) | hblanke); evo_data(push, (vblanks << 16) | hblanks); evo_data(push, (vblan2e << 16) | vblan2s); - evo_data(push, vblankus); + evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1); evo_data(push, 0x00000000); evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2); evo_data(push, 0x00000311); @@ -1141,6 +1157,11 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, nv_connector = nouveau_crtc_connector_get(nv_crtc); nv50_crtc_set_dither(nv_crtc, false); nv50_crtc_set_scale(nv_crtc, false); + + /* G94 only accepts this after setting scale */ + if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) + nv50_crtc_set_raster_vblank_dmi(nv_crtc, vblankus); + nv50_crtc_set_color_vibrance(nv_crtc, false); nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, false); return 0; diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index af9e7854668..0d139626685 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -572,7 +572,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, struct qxl_framebuffer *qfb; struct qxl_bo *bo, *old_bo = NULL; struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); - uint32_t width, height, base_offset; bool recreate_primary = false; int ret; int surf_id; @@ -602,9 +601,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, if (qcrtc->index == 0) recreate_primary = true; - width = mode->hdisplay; - height = mode->vdisplay; - base_offset = 0; + if (bo->surf.stride * bo->surf.height > qdev->vram_size) { + DRM_ERROR("Mode doesn't fit in vram size (vgamem)"); + return -EINVAL; + } ret = qxl_bo_reserve(bo, false); if (ret != 0) @@ -618,10 +618,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, if (recreate_primary) { qxl_io_destroy_primary(qdev); qxl_io_log(qdev, - "recreate primary: %dx%d (was %dx%d,%d,%d)\n", - width, height, bo->surf.width, - bo->surf.height, bo->surf.stride, bo->surf.format); - qxl_io_create_primary(qdev, base_offset, bo); + "recreate primary: %dx%d,%d,%d\n", + bo->surf.width, bo->surf.height, + bo->surf.stride, bo->surf.format); + qxl_io_create_primary(qdev, 0, bo); bo->is_primary = true; } diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 15da7ef344a..ec1593a6a56 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -1217,7 +1217,7 @@ free: return ret; } -int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) +int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params) { int r; @@ -1238,6 +1238,15 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) return r; } +int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) +{ + int r; + mutex_lock(&ctx->scratch_mutex); + r = atom_execute_table_scratch_unlocked(ctx, index, params); + mutex_unlock(&ctx->scratch_mutex); + return r; +} + static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; static void atom_index_iio(struct atom_context *ctx, int base) diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index feba6b8d36b..6d014ddb6b7 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h @@ -125,6 +125,7 @@ struct card_info { struct atom_context { struct card_info *card; struct mutex mutex; + struct mutex scratch_mutex; void *bios; uint32_t cmd_table, data_table; uint16_t *iio; @@ -145,6 +146,7 @@ extern int atom_debug; struct atom_context *atom_parse(struct card_info *, void *); int atom_execute_table(struct atom_context *, int, uint32_t *); +int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *); int atom_asic_init(struct atom_context *); void atom_destroy(struct atom_context *); bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 95d5d4ab333..11ba9d21b89 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -100,6 +100,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, memset(&args, 0, sizeof(args)); mutex_lock(&chan->mutex); + mutex_lock(&rdev->mode_info.atom_context->scratch_mutex); base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); @@ -113,7 +114,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, if (ASIC_IS_DCE4(rdev)) args.v2.ucHPD_ID = chan->rec.hpd; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args); *ack = args.v1.ucReplyStatus; @@ -147,6 +148,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, r = recv_bytes; done: + mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex); mutex_unlock(&chan->mutex); return r; diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c index 9c570fb15b8..4157780585a 100644 --- a/drivers/gpu/drm/radeon/atombios_i2c.c +++ b/drivers/gpu/drm/radeon/atombios_i2c.c @@ -48,6 +48,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, memset(&args, 0, sizeof(args)); mutex_lock(&chan->mutex); + mutex_lock(&rdev->mode_info.atom_context->scratch_mutex); base = (unsigned char *)rdev->mode_info.atom_context->scratch; @@ -82,7 +83,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, args.ucSlaveAddr = slave_addr << 1; args.ucLineNumber = chan->rec.i2c_id; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args); /* error */ if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { @@ -95,6 +96,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, radeon_atom_copy_swap(buf, base, num, false); done: + mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex); mutex_unlock(&chan->mutex); return r; diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 300d971187c..0b2929de9f4 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c @@ -24,6 +24,7 @@ #include "drmP.h" #include "radeon.h" +#include "radeon_asic.h" #include "btcd.h" #include "r600_dpm.h" #include "cypress_dpm.h" @@ -1170,6 +1171,23 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] = { 25000, 30000, RADEON_SCLK_UP } }; +void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table, + u32 *max_clock) +{ + u32 i, clock = 0; + + if ((table == NULL) || (table->count == 0)) { + *max_clock = clock; + return; + } + + for (i = 0; i < table->count; i++) { + if (clock < table->entries[i].clk) + clock = table->entries[i].clk; + } + *max_clock = clock; +} + void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, u32 clock, u16 max_voltage, u16 *voltage) { diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h index 1a15e0e4195..3b6f12b7760 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.h +++ b/drivers/gpu/drm/radeon/btc_dpm.h @@ -46,6 +46,8 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev, struct rv7xx_pl *pl); void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, u32 clock, u16 max_voltage, u16 *voltage); +void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table, + u32 *max_clock); void btc_apply_voltage_delta_rules(struct radeon_device *rdev, u16 max_vddc, u16 max_vddci, u16 *vddc, u16 *vddci); diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index f5c8c0445a9..11a55e9dad7 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -24,6 +24,7 @@ #include <linux/firmware.h> #include "drmP.h" #include "radeon.h" +#include "radeon_asic.h" #include "radeon_ucode.h" #include "cikd.h" #include "r600_dpm.h" diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 377afa504d2..89c01fa6dd8 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -4313,8 +4313,8 @@ static int cik_cp_gfx_start(struct radeon_device *rdev) /* init the CE partitions. CE only used for gfx on CIK */ radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); - radeon_ring_write(ring, 0xc000); - radeon_ring_write(ring, 0xc000); + radeon_ring_write(ring, 0x8000); + radeon_ring_write(ring, 0x8000); /* setup clear context state */ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); @@ -9447,6 +9447,9 @@ void dce8_bandwidth_update(struct radeon_device *rdev) u32 num_heads = 0, lb_size; int i; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); for (i = 0; i < rdev->num_crtc; i++) { diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index c77dad1a457..d748963af08 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -611,16 +611,19 @@ int cik_sdma_ring_test(struct radeon_device *rdev, { unsigned i; int r; - void __iomem *ptr = (void *)rdev->vram_scratch.ptr; + unsigned index; u32 tmp; + u64 gpu_addr; - if (!ptr) { - DRM_ERROR("invalid vram scratch pointer\n"); - return -EINVAL; - } + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + index = R600_WB_DMA_RING_TEST_OFFSET; + else + index = CAYMAN_WB_DMA1_RING_TEST_OFFSET; + + gpu_addr = rdev->wb.gpu_addr + index; tmp = 0xCAFEDEAD; - writel(tmp, ptr); + rdev->wb.wb[index/4] = cpu_to_le32(tmp); r = radeon_ring_lock(rdev, ring, 5); if (r) { @@ -628,14 +631,14 @@ int cik_sdma_ring_test(struct radeon_device *rdev, return r; } radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); - radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr)); + radeon_ring_write(ring, lower_32_bits(gpu_addr)); + radeon_ring_write(ring, upper_32_bits(gpu_addr)); radeon_ring_write(ring, 1); /* number of DWs to follow */ radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_unlock_commit(rdev, ring, false); for (i = 0; i < rdev->usec_timeout; i++) { - tmp = readl(ptr); + tmp = le32_to_cpu(rdev->wb.wb[index/4]); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); @@ -664,17 +667,20 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) { struct radeon_ib ib; unsigned i; + unsigned index; int r; - void __iomem *ptr = (void *)rdev->vram_scratch.ptr; u32 tmp = 0; + u64 gpu_addr; - if (!ptr) { - DRM_ERROR("invalid vram scratch pointer\n"); - return -EINVAL; - } + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + index = R600_WB_DMA_RING_TEST_OFFSET; + else + index = CAYMAN_WB_DMA1_RING_TEST_OFFSET; + + gpu_addr = rdev->wb.gpu_addr + index; tmp = 0xCAFEDEAD; - writel(tmp, ptr); + rdev->wb.wb[index/4] = cpu_to_le32(tmp); r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); if (r) { @@ -683,8 +689,8 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) } ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); - ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; - ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr); + ib.ptr[1] = lower_32_bits(gpu_addr); + ib.ptr[2] = upper_32_bits(gpu_addr); ib.ptr[3] = 1; ib.ptr[4] = 0xDEADBEEF; ib.length_dw = 5; @@ -701,7 +707,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) return r; } for (i = 0; i < rdev->usec_timeout; i++) { - tmp = readl(ptr); + tmp = le32_to_cpu(rdev->wb.wb[index/4]); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 47d31e91575..9aad0327e4d 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c @@ -24,6 +24,7 @@ #include "drmP.h" #include "radeon.h" +#include "radeon_asic.h" #include "evergreend.h" #include "r600_dpm.h" #include "cypress_dpm.h" diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c index 950af153f30..2fe8cfc966d 100644 --- a/drivers/gpu/drm/radeon/dce3_1_afmt.c +++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c @@ -32,7 +32,7 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder) struct drm_connector *connector; struct radeon_connector *radeon_connector = NULL; u32 tmp; - u8 *sadb; + u8 *sadb = NULL; int sad_count; list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { @@ -49,8 +49,8 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder) sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); if (sad_count < 0) { - DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); - return; + DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); + sad_count = 0; } /* program the speaker allocation */ diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index c0bbf68dbc2..f312edf4d50 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -155,7 +155,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) struct drm_connector *connector; struct radeon_connector *radeon_connector = NULL; u32 offset, tmp; - u8 *sadb; + u8 *sadb = NULL; int sad_count; if (!dig || !dig->afmt || !dig->afmt->pin) @@ -176,9 +176,9 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) } sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb); - if (sad_count <= 0) { - DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); - return; + if (sad_count < 0) { + DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); + sad_count = 0; } /* program the speaker allocation */ diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index a31f1ca40c6..85995b4e333 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2345,6 +2345,9 @@ void evergreen_bandwidth_update(struct radeon_device *rdev) u32 num_heads = 0, lb_size; int i; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); for (i = 0; i < rdev->num_crtc; i++) { @@ -2552,6 +2555,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); } } else { tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); @@ -3005,7 +3009,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) u32 vgt_cache_invalidation; u32 hdp_host_path_cntl, tmp; u32 disabled_rb_mask; - int i, j, num_shader_engines, ps_thread_count; + int i, j, ps_thread_count; switch (rdev->family) { case CHIP_CYPRESS: @@ -3303,8 +3307,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev) rdev->config.evergreen.tile_config |= ((gb_addr_config & 0x30000000) >> 28) << 12; - num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1; - if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) { u32 efuse_straps_4; u32 efuse_straps_3; diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 2514d659b1b..53abd9b17a5 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -133,7 +133,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder) struct drm_connector *connector; struct radeon_connector *radeon_connector = NULL; u32 tmp; - u8 *sadb; + u8 *sadb = NULL; int sad_count; list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { @@ -149,9 +149,9 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder) } sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb); - if (sad_count <= 0) { - DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); - return; + if (sad_count < 0) { + DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); + sad_count = 0; } /* program the speaker allocation */ diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 1dd976f447f..9b42001295b 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -2725,7 +2725,11 @@ int kv_dpm_init(struct radeon_device *rdev) pi->sram_end = SMC_RAM_END; - pi->enable_nb_dpm = true; + /* Enabling nb dpm on an asrock system prevents dpm from working */ + if (rdev->pdev->subsystem_vendor == 0x1849) + pi->enable_nb_dpm = false; + else + pi->enable_nb_dpm = true; pi->caps_power_containment = true; pi->caps_cac = true; @@ -2740,10 +2744,19 @@ int kv_dpm_init(struct radeon_device *rdev) pi->caps_sclk_ds = true; pi->enable_auto_thermal_throttling = true; pi->disable_nb_ps3_in_battery = false; - if (radeon_bapm == 0) + if (radeon_bapm == -1) { + /* There are stability issues reported on with + * bapm enabled on an asrock system. + */ + if (rdev->pdev->subsystem_vendor == 0x1849) + pi->bapm_enable = false; + else + pi->bapm_enable = true; + } else if (radeon_bapm == 0) { pi->bapm_enable = false; - else + } else { pi->bapm_enable = true; + } pi->voltage_drop_t = 0; pi->caps_sclk_throttle_low_notification = false; pi->caps_fps = false; /* true? */ diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 715b181c624..6d2f16cf2c1 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -23,6 +23,7 @@ #include "drmP.h" #include "radeon.h" +#include "radeon_asic.h" #include "nid.h" #include "r600_dpm.h" #include "ni_dpm.h" diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 10f8be0ee17..b53b31a7b76 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -3207,6 +3207,9 @@ void r100_bandwidth_update(struct radeon_device *rdev) uint32_t pixel_bytes1 = 0; uint32_t pixel_bytes2 = 0; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); if (rdev->mode_info.crtcs[0]->base.enabled) { diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index 100189ec5fa..cf0df45d455 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c @@ -232,16 +232,19 @@ int r600_dma_ring_test(struct radeon_device *rdev, { unsigned i; int r; - void __iomem *ptr = (void *)rdev->vram_scratch.ptr; + unsigned index; u32 tmp; + u64 gpu_addr; - if (!ptr) { - DRM_ERROR("invalid vram scratch pointer\n"); - return -EINVAL; - } + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + index = R600_WB_DMA_RING_TEST_OFFSET; + else + index = CAYMAN_WB_DMA1_RING_TEST_OFFSET; + + gpu_addr = rdev->wb.gpu_addr + index; tmp = 0xCAFEDEAD; - writel(tmp, ptr); + rdev->wb.wb[index/4] = cpu_to_le32(tmp); r = radeon_ring_lock(rdev, ring, 4); if (r) { @@ -249,13 +252,13 @@ int r600_dma_ring_test(struct radeon_device *rdev, return r; } radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); - radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); + radeon_ring_write(ring, lower_32_bits(gpu_addr)); + radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xff); radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_unlock_commit(rdev, ring, false); for (i = 0; i < rdev->usec_timeout; i++) { - tmp = readl(ptr); + tmp = le32_to_cpu(rdev->wb.wb[index/4]); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); @@ -335,17 +338,17 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) { struct radeon_ib ib; unsigned i; + unsigned index; int r; - void __iomem *ptr = (void *)rdev->vram_scratch.ptr; u32 tmp = 0; + u64 gpu_addr; - if (!ptr) { - DRM_ERROR("invalid vram scratch pointer\n"); - return -EINVAL; - } + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + index = R600_WB_DMA_RING_TEST_OFFSET; + else + index = CAYMAN_WB_DMA1_RING_TEST_OFFSET; - tmp = 0xCAFEDEAD; - writel(tmp, ptr); + gpu_addr = rdev->wb.gpu_addr + index; r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); if (r) { @@ -354,8 +357,8 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) } ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); - ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; - ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; + ib.ptr[1] = lower_32_bits(gpu_addr); + ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; ib.ptr[3] = 0xDEADBEEF; ib.length_dw = 4; @@ -371,7 +374,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) return r; } for (i = 0; i < rdev->usec_timeout; i++) { - tmp = readl(ptr); + tmp = le32_to_cpu(rdev->wb.wb[index/4]); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 9c61b74ef44..b5c73df8e20 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -24,6 +24,7 @@ #include "drmP.h" #include "radeon.h" +#include "radeon_asic.h" #include "r600d.h" #include "r600_dpm.h" #include "atom.h" @@ -1255,7 +1256,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) (mode_info->atom_context->bios + data_offset + le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = - ppt->usMaximumPowerDeliveryLimit; + le16_to_cpu(ppt->usMaximumPowerDeliveryLimit); pt = &ppt->power_tune_table; } else { ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index f7c4b226a28..a9717b3fbf1 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1133,6 +1133,8 @@ struct radeon_wb { #define R600_WB_EVENT_OFFSET 3072 #define CIK_WB_CP1_WPTR_OFFSET 3328 #define CIK_WB_CP2_WPTR_OFFSET 3584 +#define R600_WB_DMA_RING_TEST_OFFSET 3588 +#define CAYMAN_WB_DMA1_RING_TEST_OFFSET 3592 /** * struct radeon_pm - power management datas diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 6a03624fada..63ccb8fa799 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -658,12 +658,10 @@ bool radeon_get_bios(struct radeon_device *rdev) r = igp_read_bios_from_vram(rdev); if (r == false) r = radeon_read_bios(rdev); - if (r == false) { + if (r == false) r = radeon_read_disabled_bios(rdev); - } - if (r == false) { + if (r == false) r = radeon_read_platform_bios(rdev); - } if (r == false || rdev->bios == NULL) { DRM_ERROR("Unable to locate a BIOS ROM\n"); rdev->bios = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 300c4b3d466..26baa9c05f6 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -322,6 +322,12 @@ static void radeon_connector_get_edid(struct drm_connector *connector) } if (!radeon_connector->edid) { + /* don't fetch the edid from the vbios if ddc fails and runpm is + * enabled so we report disconnected. + */ + if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) + return; + if (rdev->is_atom_bios) { /* some laptops provide a hardcoded edid in rom for LCDs */ if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || @@ -826,6 +832,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector, static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector, bool force) { + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder = radeon_best_single_encoder(connector); enum drm_connector_status ret = connector_status_disconnected; @@ -842,7 +850,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) /* check if panel is valid */ if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) ret = connector_status_connected; - + /* don't fetch the edid from the vbios if ddc fails and runpm is + * enabled so we report disconnected. + */ + if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) + ret = connector_status_disconnected; } /* check for edid as well */ @@ -1589,6 +1601,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force) /* check if panel is valid */ if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) ret = connector_status_connected; + /* don't fetch the edid from the vbios if ddc fails and runpm is + * enabled so we report disconnected. + */ + if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) + ret = connector_status_disconnected; } /* eDP is always DP */ radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 1c893447d7c..a3e7aed7e68 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -450,7 +450,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo kfree(parser->track); kfree(parser->relocs); kfree(parser->relocs_ptr); - kfree(parser->vm_bos); + drm_free_large(parser->vm_bos); for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); kfree(parser->chunks); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index f41cc1538e4..995a8b1770d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -952,6 +952,7 @@ int radeon_atombios_init(struct radeon_device *rdev) } mutex_init(&rdev->mode_info.atom_context->mutex); + mutex_init(&rdev->mode_info.atom_context->scratch_mutex); radeon_atom_initialize_bios_scratch_regs(rdev->ddev); atom_allocate_fb_scratch(rdev->mode_info.atom_context); return 0; @@ -1130,7 +1131,7 @@ static void radeon_check_arguments(struct radeon_device *rdev) if (radeon_vm_block_size == -1) { /* Total bits covered by PD + PTs */ - unsigned bits = ilog2(radeon_vm_size) + 17; + unsigned bits = ilog2(radeon_vm_size) + 18; /* Make sure the PD is 4K in size up to 8GB address space. Above that split equal between PD and PTs */ diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 9a19e52cc65..6b670b0bc47 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -179,6 +179,9 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, (rdev->pdev->subsystem_vendor == 0x1734) && (rdev->pdev->subsystem_device == 0x1107)) use_bl = false; + /* disable native backlight control on older asics */ + else if (rdev->family < CHIP_R600) + use_bl = false; else use_bl = true; } diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 7784911d78e..00fc59762e0 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -185,6 +185,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_AGP) return false; + /* + * Older chips have a HW limitation, they can only generate 40 bits + * of address for "64-bit" MSIs which breaks on some platforms, notably + * IBM POWER servers, so we limit them + */ + if (rdev->family < CHIP_BONAIRE) { + dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n"); + rdev->pdev->no_64bit_msi = 1; + } + /* force MSI on */ if (radeon_msi == 1) return true; diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 3d17af34afa..2456f69efd2 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -314,7 +314,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring } /* and then save the content of the ring */ - *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); + *data = drm_malloc_ab(size, sizeof(uint32_t)); if (!*data) { mutex_unlock(&rdev->ring_lock); return 0; @@ -356,7 +356,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, } radeon_ring_unlock_commit(rdev, ring, false); - kfree(data); + drm_free_large(data); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 4532cc76a0a..dfde266529e 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -132,8 +132,8 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, struct radeon_cs_reloc *list; unsigned i, idx; - list = kmalloc_array(vm->max_pde_used + 2, - sizeof(struct radeon_cs_reloc), GFP_KERNEL); + list = drm_malloc_ab(vm->max_pde_used + 2, + sizeof(struct radeon_cs_reloc)); if (!list) return NULL; diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 5f6db4629aa..9acb1c3c005 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -879,6 +879,9 @@ void rs600_bandwidth_update(struct radeon_device *rdev) u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; /* FIXME: implement full support */ + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); if (rdev->mode_info.crtcs[0]->base.enabled) diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 3462b64369b..0a2d36e8110 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -579,6 +579,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev) u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt; u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); if (rdev->mode_info.crtcs[0]->base.enabled) diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c index 02f7710de47..9031f4b6982 100644 --- a/drivers/gpu/drm/radeon/rs780_dpm.c +++ b/drivers/gpu/drm/radeon/rs780_dpm.c @@ -24,6 +24,7 @@ #include "drmP.h" #include "radeon.h" +#include "radeon_asic.h" #include "rs780d.h" #include "r600_dpm.h" #include "rs780_dpm.h" diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 8a477bf1fdb..c55d653aaf5 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -1277,6 +1277,9 @@ void rv515_bandwidth_update(struct radeon_device *rdev) struct drm_display_mode *mode0 = NULL; struct drm_display_mode *mode1 = NULL; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); if (rdev->mode_info.crtcs[0]->base.enabled) diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index e7045b08571..6a5c233361e 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c @@ -24,6 +24,7 @@ #include "drmP.h" #include "radeon.h" +#include "radeon_asic.h" #include "rv6xxd.h" #include "r600_dpm.h" #include "rv6xx_dpm.h" diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 3c76e1dcdf0..755a8f96fe4 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -24,6 +24,7 @@ #include "drmP.h" #include "radeon.h" +#include "radeon_asic.h" #include "rv770d.h" #include "r600_dpm.h" #include "rv770_dpm.h" diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index eeea5b6a177..7d5083dc4ac 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2384,6 +2384,9 @@ void dce6_bandwidth_update(struct radeon_device *rdev) u32 num_heads = 0, lb_size; int i; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); for (i = 0; i < rdev->num_crtc; i++) { diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 9e4d5d7d348..676e6c2ba90 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -23,6 +23,7 @@ #include "drmP.h" #include "radeon.h" +#include "radeon_asic.h" #include "sid.h" #include "r600_dpm.h" #include "si_dpm.h" @@ -2916,6 +2917,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, bool disable_sclk_switching = false; u32 mclk, sclk; u16 vddc, vddci; + u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; int i; if ((rdev->pm.dpm.new_active_crtc_count > 1) || @@ -2949,6 +2951,29 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, } } + /* limit clocks to max supported clocks based on voltage dependency tables */ + btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, + &max_sclk_vddc); + btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + &max_mclk_vddci); + btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + &max_mclk_vddc); + + for (i = 0; i < ps->performance_level_count; i++) { + if (max_sclk_vddc) { + if (ps->performance_levels[i].sclk > max_sclk_vddc) + ps->performance_levels[i].sclk = max_sclk_vddc; + } + if (max_mclk_vddci) { + if (ps->performance_levels[i].mclk > max_mclk_vddci) + ps->performance_levels[i].mclk = max_mclk_vddci; + } + if (max_mclk_vddc) { + if (ps->performance_levels[i].mclk > max_mclk_vddc) + ps->performance_levels[i].mclk = max_mclk_vddc; + } + } + /* XXX validate the min clocks required for display */ if (disable_mclk_switching) { @@ -6231,7 +6256,7 @@ static void si_parse_pplib_clock_info(struct radeon_device *rdev, if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) && index == 0) { /* XXX disable for A0 tahiti */ - si_pi->ulv.supported = true; + si_pi->ulv.supported = false; si_pi->ulv.pl = *pl; si_pi->ulv.one_pcie_lane_in_ulv = false; si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT; diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index 3f0e8d7b8db..1f8a8833e1b 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c @@ -23,6 +23,7 @@ #include "drmP.h" #include "radeon.h" +#include "radeon_asic.h" #include "sumod.h" #include "r600_dpm.h" #include "cypress_dpm.h" diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 57f780053b3..b4ec5c4e796 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -23,6 +23,7 @@ #include "drmP.h" #include "radeon.h" +#include "radeon_asic.h" #include "trinityd.h" #include "r600_dpm.h" #include "trinity_dpm.h" diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 6553fd23868..054a79f143a 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -736,7 +736,6 @@ static const struct drm_crtc_funcs tegra_crtc_funcs = { static void tegra_crtc_disable(struct drm_crtc *crtc) { - struct tegra_dc *dc = to_tegra_dc(crtc); struct drm_device *drm = crtc->dev; struct drm_plane *plane; @@ -752,7 +751,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc) } } - drm_vblank_off(drm, dc->pipe); + drm_crtc_vblank_off(crtc); } static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc, @@ -841,8 +840,6 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc, u32 value; int err; - drm_vblank_pre_modeset(crtc->dev, dc->pipe); - err = tegra_crtc_setup_clk(crtc, mode); if (err) { dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err); @@ -896,6 +893,8 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc) unsigned int syncpt; unsigned long value; + drm_crtc_vblank_off(crtc); + /* hardware initialization */ reset_control_deassert(dc->rst); usleep_range(10000, 20000); @@ -943,7 +942,7 @@ static void tegra_crtc_commit(struct drm_crtc *crtc) value = GENERAL_ACT_REQ | WIN_A_ACT_REQ; tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); - drm_vblank_post_modeset(crtc->dev, dc->pipe); + drm_crtc_vblank_on(crtc); } static void tegra_crtc_load_lut(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 8f5cec67c47..d395b0bef73 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -709,6 +709,7 @@ out: static int ttm_mem_evict_first(struct ttm_bo_device *bdev, uint32_t mem_type, + const struct ttm_place *place, bool interruptible, bool no_wait_gpu) { @@ -720,8 +721,21 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, spin_lock(&glob->lru_lock); list_for_each_entry(bo, &man->lru, lru) { ret = __ttm_bo_reserve(bo, false, true, false, NULL); - if (!ret) + if (!ret) { + if (place && (place->fpfn || place->lpfn)) { + /* Don't evict this BO if it's outside of the + * requested placement range + */ + if (place->fpfn >= (bo->mem.start + bo->mem.size) || + (place->lpfn && place->lpfn <= bo->mem.start)) { + __ttm_bo_unreserve(bo); + ret = -EBUSY; + continue; + } + } + break; + } } if (ret) { @@ -782,7 +796,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return ret; if (mem->mm_node) break; - ret = ttm_mem_evict_first(bdev, mem_type, + ret = ttm_mem_evict_first(bdev, mem_type, place, interruptible, no_wait_gpu); if (unlikely(ret != 0)) return ret; @@ -994,9 +1008,9 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement, for (i = 0; i < placement->num_placement; i++) { const struct ttm_place *heap = &placement->placement[i]; - if (mem->mm_node && heap->lpfn != 0 && + if (mem->mm_node && (mem->start < heap->fpfn || - mem->start + mem->num_pages > heap->lpfn)) + (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) continue; *new_flags = heap->flags; @@ -1007,9 +1021,9 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement, for (i = 0; i < placement->num_busy_placement; i++) { const struct ttm_place *heap = &placement->busy_placement[i]; - if (mem->mm_node && heap->lpfn != 0 && + if (mem->mm_node && (mem->start < heap->fpfn || - mem->start + mem->num_pages > heap->lpfn)) + (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) continue; *new_flags = heap->flags; @@ -1233,7 +1247,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, spin_lock(&glob->lru_lock); while (!list_empty(&man->lru)) { spin_unlock(&glob->lru_lock); - ret = ttm_mem_evict_first(bdev, mem_type, false, false); + ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); if (ret) { if (allow_errors) { return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index bfeb4b1f2ac..21e9b7f8dad 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -246,7 +246,8 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, struct drm_hash_item *hash; int ret; - ret = drm_ht_find_item(&man->resources, user_key, &hash); + ret = drm_ht_find_item(&man->resources, user_key | (res_type << 24), + &hash); if (likely(ret != 0)) return -EINVAL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 7197af15731..25f3c250fd9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -688,7 +688,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_err0; } - if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size)) + /* + * Limit back buffer size to VRAM size. Remove this once + * screen targets are implemented. + */ + if (dev_priv->prim_bb_mem > dev_priv->vram_size) dev_priv->prim_bb_mem = dev_priv->vram_size; mutex_unlock(&dev_priv->hw_mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index d2bc2b03d4c..941a7bc0b79 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -187,7 +187,7 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, * can do this since the caller in the drm core doesn't check anything * which is protected by any looks. */ - drm_modeset_unlock(&crtc->mutex); + drm_modeset_unlock_crtc(crtc); drm_modeset_lock_all(dev_priv->dev); /* A lot of the code assumes this */ @@ -252,7 +252,7 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, ret = 0; out: drm_modeset_unlock_all(dev_priv->dev); - drm_modeset_lock(&crtc->mutex, NULL); + drm_modeset_lock_crtc(crtc); return ret; } @@ -273,7 +273,7 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) * can do this since the caller in the drm core doesn't check anything * which is protected by any looks. */ - drm_modeset_unlock(&crtc->mutex); + drm_modeset_unlock_crtc(crtc); drm_modeset_lock_all(dev_priv->dev); vmw_cursor_update_position(dev_priv, shown, @@ -281,7 +281,7 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) du->cursor_y + du->hotspot_y); drm_modeset_unlock_all(dev_priv->dev); - drm_modeset_lock(&crtc->mutex, NULL); + drm_modeset_lock_crtc(crtc); return 0; } @@ -1950,6 +1950,14 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }; int i; + u32 assumed_bpp = 2; + + /* + * If using screen objects, then assume 32-bpp because that's what the + * SVGA device is assuming + */ + if (dev_priv->sou_priv) + assumed_bpp = 4; /* Add preferred mode */ { @@ -1960,8 +1968,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, mode->vdisplay = du->pref_height; vmw_guess_mode_timing(mode); - if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, - mode->vdisplay)) { + if (vmw_kms_validate_mode_vram(dev_priv, + mode->hdisplay * assumed_bpp, + mode->vdisplay)) { drm_mode_probed_add(connector, mode); } else { drm_mode_destroy(dev, mode); @@ -1983,7 +1992,8 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, bmode->vdisplay > max_height) continue; - if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2, + if (!vmw_kms_validate_mode_vram(dev_priv, + bmode->hdisplay * assumed_bpp, bmode->vdisplay)) continue; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 73bd9e2e42b..3402033fa52 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1659,6 +1659,7 @@ void hid_disconnect(struct hid_device *hdev) hdev->hiddev_disconnect(hdev); if (hdev->claimed & HID_CLAIMED_HIDRAW) hidraw_disconnect(hdev); + hdev->claimed = 0; } EXPORT_SYMBOL_GPL(hid_disconnect); diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 84c3cb15ccd..8bf61d295ff 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -946,6 +946,12 @@ static const char *keys[KEY_MAX + 1] = { [KEY_BRIGHTNESS_MIN] = "BrightnessMin", [KEY_BRIGHTNESS_MAX] = "BrightnessMax", [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto", + [KEY_KBDINPUTASSIST_PREV] = "KbdInputAssistPrev", + [KEY_KBDINPUTASSIST_NEXT] = "KbdInputAssistNext", + [KEY_KBDINPUTASSIST_PREVGROUP] = "KbdInputAssistPrevGroup", + [KEY_KBDINPUTASSIST_NEXTGROUP] = "KbdInputAssistNextGroup", + [KEY_KBDINPUTASSIST_ACCEPT] = "KbdInputAssistAccept", + [KEY_KBDINPUTASSIST_CANCEL] = "KbdInputAssistCancel", }; static const char *relatives[REL_MAX + 1] = { diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index cd9c9e96cf0..7c863738e41 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -298,6 +298,9 @@ #define USB_VENDOR_ID_ELAN 0x04f3 #define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089 +#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b +#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103 0x0103 +#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f #define USB_VENDOR_ID_ELECOM 0x056e #define USB_DEVICE_ID_ELECOM_BM084 0x0061 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 2df7fddbd11..725f22ca47f 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -695,7 +695,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel break; case 0x5b: /* TransducerSerialNumber */ - set_bit(MSC_SERIAL, input->mscbit); + usage->type = EV_MSC; + usage->code = MSC_SERIAL; + bit = input->mscbit; + max = MSC_MAX; break; default: goto unknown; @@ -862,6 +865,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x28b: map_key_clear(KEY_FORWARDMAIL); break; case 0x28c: map_key_clear(KEY_SEND); break; + case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break; + case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break; + case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break; + case 0x2ca: map_key_clear(KEY_KBDINPUTASSIST_NEXTGROUP); break; + case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break; + case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break; + default: goto ignore; } break; diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index f3cb5b0a434..552671ee7c5 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -71,6 +71,9 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index fcdbde4ec69..3057dfc7e3b 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c @@ -234,7 +234,7 @@ static const struct pci_device_id fam15h_power_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, - { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, {} }; MODULE_DEVICE_TABLE(pci, fam15h_power_id_table); diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c index 6aac695b168..9b55e673b67 100644 --- a/drivers/hwmon/g762.c +++ b/drivers/hwmon/g762.c @@ -1084,10 +1084,8 @@ static int g762_probe(struct i2c_client *client, const struct i2c_device_id *id) if (ret) goto clock_dis; - data->hwmon_dev = devm_hwmon_device_register_with_groups(dev, - client->name, - data, - g762_groups); + data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name, + data, g762_groups); if (IS_ERR(data->hwmon_dev)) { ret = PTR_ERR(data->hwmon_dev); goto clock_dis; diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c index d2bf2c97ae7..6a30eeea94b 100644 --- a/drivers/hwmon/ibmpowernv.c +++ b/drivers/hwmon/ibmpowernv.c @@ -181,7 +181,7 @@ static int __init populate_attr_groups(struct platform_device *pdev) opal = of_find_node_by_path("/ibm,opal/sensors"); if (!opal) { - dev_err(&pdev->dev, "Opal node 'sensors' not found\n"); + dev_dbg(&pdev->dev, "Opal node 'sensors' not found\n"); return -ENODEV; } @@ -335,7 +335,9 @@ static int __init ibmpowernv_init(void) err = platform_driver_probe(&ibmpowernv_driver, ibmpowernv_probe); if (err) { - pr_err("Platfrom driver probe failed\n"); + if (err != -ENODEV) + pr_err("Platform driver probe failed (%d)\n", err); + goto exit_device_del; } diff --git a/drivers/hwmon/menf21bmc_hwmon.c b/drivers/hwmon/menf21bmc_hwmon.c index c92229d321c..afc6b58eaa6 100644 --- a/drivers/hwmon/menf21bmc_hwmon.c +++ b/drivers/hwmon/menf21bmc_hwmon.c @@ -21,6 +21,7 @@ #include <linux/jiffies.h> #include <linux/slab.h> #include <linux/i2c.h> +#include <linux/err.h> #define DRV_NAME "menf21bmc_hwmon" diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 823c877a1ec..1991d9032c3 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -161,10 +161,17 @@ static int pwm_fan_suspend(struct device *dev) static int pwm_fan_resume(struct device *dev) { struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); + unsigned long duty; + int ret; - if (ctx->pwm_value) - return pwm_enable(ctx->pwm); - return 0; + if (ctx->pwm_value == 0) + return 0; + + duty = DIV_ROUND_UP(ctx->pwm_value * (ctx->pwm->period - 1), MAX_PWM); + ret = pwm_config(ctx->pwm, duty, ctx->pwm->period); + if (ret) + return ret; + return pwm_enable(ctx->pwm); } #endif diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index 65ef9664d5d..899bede81b3 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c @@ -12,11 +12,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - MA 02110-1301 USA. * ------------------------------------------------------------------------- */ /* With some changes from Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c index 8b10f88b13d..580dbf05c14 100644 --- a/drivers/i2c/algos/i2c-algo-pca.c +++ b/drivers/i2c/algos/i2c-algo-pca.c @@ -12,11 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301 USA. */ #include <linux/kernel.h> diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c index 34370090b75..270d84bfc2c 100644 --- a/drivers/i2c/algos/i2c-algo-pcf.c +++ b/drivers/i2c/algos/i2c-algo-pcf.c @@ -14,11 +14,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301 USA. - * * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and * Frodo Looijaard <frodol@dds.nl>, and also from Martin Bailey * <mbailey@littlefeet-inc.com> diff --git a/drivers/i2c/algos/i2c-algo-pcf.h b/drivers/i2c/algos/i2c-algo-pcf.h index 1ec703ee788..262ee801975 100644 --- a/drivers/i2c/algos/i2c-algo-pcf.h +++ b/drivers/i2c/algos/i2c-algo-pcf.h @@ -12,12 +12,7 @@ This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - MA 02110-1301 USA. */ + GNU General Public License for more details. */ /* -------------------------------------------------------------------- */ /* With some changes from Frodo Looijaard <frodol@dds.nl> */ diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c index 451e305f797..4f2d7886828 100644 --- a/drivers/i2c/busses/i2c-ali1535.c +++ b/drivers/i2c/busses/i2c-ali1535.c @@ -14,10 +14,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c index 2fa21ce9682..45c5c488302 100644 --- a/drivers/i2c/busses/i2c-ali15x3.c +++ b/drivers/i2c/busses/i2c-ali15x3.c @@ -12,10 +12,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c index 41fc6837fb8..65e32405497 100644 --- a/drivers/i2c/busses/i2c-amd756-s4882.c +++ b/drivers/i2c/busses/i2c-amd756-s4882.c @@ -12,10 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c index a16f7289135..6c7113d990f 100644 --- a/drivers/i2c/busses/i2c-amd756.c +++ b/drivers/i2c/busses/i2c-amd756.c @@ -15,10 +15,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index 917d54588d9..e05a672db3e 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c @@ -434,7 +434,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) } } - ret = wait_for_completion_io_timeout(&dev->cmd_complete, + ret = wait_for_completion_timeout(&dev->cmd_complete, dev->adapter.timeout); if (ret == 0) { dev_err(dev->dev, "controller timed out\n"); diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c index 8762458ca7d..6f8c0756e35 100644 --- a/drivers/i2c/busses/i2c-au1550.c +++ b/drivers/i2c/busses/i2c-au1550.c @@ -21,10 +21,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/delay.h> diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index f3b89a4698b..5bdbc71698d 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c @@ -23,10 +23,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index 4d961471912..d15b7c9b921 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -17,10 +17,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ---------------------------------------------------------------------------- * */ diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index 3c20e4bd6dd..edca99dbba2 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@ -18,10 +18,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ---------------------------------------------------------------------------- * */ diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index d66b6cbc9ed..5a410ef17ab 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -18,10 +18,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ---------------------------------------------------------------------------- * */ diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index d31d313ab4f..acb40f95db7 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -19,10 +19,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ---------------------------------------------------------------------------- * */ diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index a7431150acf..373dd4d4776 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -18,10 +18,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ---------------------------------------------------------------------------- * */ diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index a44ea13d143..76e699f9ed9 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -9,10 +9,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c index 485497066ed..92e8c0ce162 100644 --- a/drivers/i2c/busses/i2c-elektor.c +++ b/drivers/i2c/busses/i2c-elektor.c @@ -12,11 +12,7 @@ This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ + GNU General Public License for more details. */ /* ------------------------------------------------------------------------- */ /* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c index 14d2b76de25..b7864cf42a7 100644 --- a/drivers/i2c/busses/i2c-hydra.c +++ b/drivers/i2c/busses/i2c-hydra.c @@ -15,10 +15,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 7cfc183b3d6..6ab4f1cb21f 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -15,10 +15,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index c48e46af670..e9fb7cf7861 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -11,11 +11,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, - * USA. - * * Author: * Darius Augulis, Teltonika Inc. * diff --git a/drivers/i2c/busses/i2c-iop3xx.h b/drivers/i2c/busses/i2c-iop3xx.h index 097e270955d..2d6929c2bd9 100644 --- a/drivers/i2c/busses/i2c-iop3xx.h +++ b/drivers/i2c/busses/i2c-iop3xx.h @@ -11,11 +11,7 @@ This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ + GNU General Public License for more details. */ /* ------------------------------------------------------------------------- */ diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c index cf99dbf21fd..113293d275f 100644 --- a/drivers/i2c/busses/i2c-isch.c +++ b/drivers/i2c/busses/i2c-isch.c @@ -14,10 +14,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 3f6ecbfb9a5..f2b0ff01163 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -14,10 +14,6 @@ * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c index b170bdffb5d..88eda09e73c 100644 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c @@ -12,10 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c index ee3a76c7ae9..70b3c915850 100644 --- a/drivers/i2c/busses/i2c-nforce2.c +++ b/drivers/i2c/busses/i2c-nforce2.c @@ -17,10 +17,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 0dffb0e62c3..26942c159de 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -22,10 +22,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c index 62f55fe624c..d1f625f923c 100644 --- a/drivers/i2c/busses/i2c-parport-light.c +++ b/drivers/i2c/busses/i2c-parport-light.c @@ -18,10 +18,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ------------------------------------------------------------------------ */ #include <linux/kernel.h> diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c index a27aae2d675..a1fac5aa9ba 100644 --- a/drivers/i2c/busses/i2c-parport.c +++ b/drivers/i2c/busses/i2c-parport.c @@ -18,10 +18,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ------------------------------------------------------------------------ */ #include <linux/kernel.h> diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h index e572f3aac0f..4e129453680 100644 --- a/drivers/i2c/busses/i2c-parport.h +++ b/drivers/i2c/busses/i2c-parport.h @@ -12,10 +12,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ------------------------------------------------------------------------ */ #define PORT_DATA 0 diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c index 7a9dce43e11..df1dbc92a02 100644 --- a/drivers/i2c/busses/i2c-pasemi.c +++ b/drivers/i2c/busses/i2c-pasemi.c @@ -11,10 +11,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c index 323f061a316..e0eb4ca0102 100644 --- a/drivers/i2c/busses/i2c-pca-isa.c +++ b/drivers/i2c/busses/i2c-pca-isa.c @@ -12,10 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index a6f54ba27e2..67cbec6796a 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -11,10 +11,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c index 8564768fee3..177834e2d84 100644 --- a/drivers/i2c/busses/i2c-pmcmsp.c +++ b/drivers/i2c/busses/i2c-pmcmsp.c @@ -18,10 +18,6 @@ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c index 01e967763c2..60a53c169ed 100644 --- a/drivers/i2c/busses/i2c-powermac.c +++ b/drivers/i2c/busses/i2c-powermac.c @@ -14,10 +14,6 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ #include <linux/module.h> diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index e3b0337faeb..65244774bfa 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -14,10 +14,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 8b5e79cb446..4855188747c 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -14,10 +14,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c index 0fe505d7abe..2b6219d86b0 100644 --- a/drivers/i2c/busses/i2c-sibyte.c +++ b/drivers/i2c/busses/i2c-sibyte.c @@ -12,10 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c index 964e5c6f84a..15ac8395dcd 100644 --- a/drivers/i2c/busses/i2c-simtec.c +++ b/drivers/i2c/busses/i2c-simtec.c @@ -12,10 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c index ac9bc33acef..7d58a40faf2 100644 --- a/drivers/i2c/busses/i2c-sis5595.c +++ b/drivers/i2c/busses/i2c-sis5595.c @@ -11,10 +11,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Note: we assume there can only be one SIS5595 with one SMBus interface */ diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c index c6366733008..1e6805b5cef 100644 --- a/drivers/i2c/busses/i2c-sis630.c +++ b/drivers/i2c/busses/i2c-sis630.c @@ -10,10 +10,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c index 8dc2fc5f74f..44b90442607 100644 --- a/drivers/i2c/busses/i2c-sis96x.c +++ b/drivers/i2c/busses/i2c-sis96x.c @@ -10,10 +10,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c index 10855a0b7e7..4c7fc2d4701 100644 --- a/drivers/i2c/busses/i2c-taos-evm.c +++ b/drivers/i2c/busses/i2c-taos-evm.c @@ -13,10 +13,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/delay.h> diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c index f4a1ed75761..59b1d233ca7 100644 --- a/drivers/i2c/busses/i2c-via.c +++ b/drivers/i2c/busses/i2c-via.c @@ -12,10 +12,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c index 6841200b6e5..0ee2646f3b0 100644 --- a/drivers/i2c/busses/i2c-viapro.c +++ b/drivers/i2c/busses/i2c-viapro.c @@ -13,10 +13,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index ade9223912d..cc65ea0b818 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * * * This code was implemented by Mocean Laboratories AB when porting linux * to the automotive development board Russellville. The copyright holder diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c index ff3f5747e43..5153354b1a6 100644 --- a/drivers/i2c/busses/scx200_acb.c +++ b/drivers/i2c/busses/scx200_acb.c @@ -17,10 +17,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c index f24cc64e2e8..90e32295930 100644 --- a/drivers/i2c/i2c-boardinfo.c +++ b/drivers/i2c/i2c-boardinfo.c @@ -10,11 +10,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301 USA. */ #include <linux/kernel.h> diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 2f90ac6a7f7..f43b4e11647 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -10,12 +10,7 @@ This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - MA 02110-1301 USA. */ + GNU General Public License for more details. */ /* ------------------------------------------------------------------------- */ /* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi>. @@ -670,6 +665,9 @@ static int i2c_device_remove(struct device *dev) status = driver->remove(client); } + if (dev->of_node) + irq_dispose_mapping(client->irq); + dev_pm_domain_detach(&client->dev, true); return status; } diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h index 18a8fd21d2c..17700bfddcf 100644 --- a/drivers/i2c/i2c-core.h +++ b/drivers/i2c/i2c-core.h @@ -10,11 +10,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301 USA. */ #include <linux/rwsem.h> diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index 80b47e8ce03..71c7a3975b6 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -14,11 +14,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - MA 02110-1301 USA. */ /* Note that this is a complete rewrite of Simon Vogl's i2c-dev module. diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c index fc99f0d6b4a..9ebf9cb4ad7 100644 --- a/drivers/i2c/i2c-smbus.c +++ b/drivers/i2c/i2c-smbus.c @@ -13,11 +13,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301 USA. */ #include <linux/kernel.h> diff --git a/drivers/i2c/i2c-stub.c b/drivers/i2c/i2c-stub.c index d241aa295d9..af2a94e1140 100644 --- a/drivers/i2c/i2c-stub.c +++ b/drivers/i2c/i2c-stub.c @@ -13,10 +13,6 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define DEBUG 1 diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c index 22c096ce39a..513bd6d1429 100644 --- a/drivers/iio/accel/bmc150-accel.c +++ b/drivers/iio/accel/bmc150-accel.c @@ -44,6 +44,9 @@ #define BMC150_ACCEL_REG_INT_STATUS_2 0x0B #define BMC150_ACCEL_ANY_MOTION_MASK 0x07 +#define BMC150_ACCEL_ANY_MOTION_BIT_X BIT(0) +#define BMC150_ACCEL_ANY_MOTION_BIT_Y BIT(1) +#define BMC150_ACCEL_ANY_MOTION_BIT_Z BIT(2) #define BMC150_ACCEL_ANY_MOTION_BIT_SIGN BIT(3) #define BMC150_ACCEL_REG_PMU_LPW 0x11 @@ -92,9 +95,9 @@ #define BMC150_ACCEL_SLOPE_THRES_MASK 0xFF /* Slope duration in terms of number of samples */ -#define BMC150_ACCEL_DEF_SLOPE_DURATION 2 +#define BMC150_ACCEL_DEF_SLOPE_DURATION 1 /* in terms of multiples of g's/LSB, based on range */ -#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD 5 +#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD 1 #define BMC150_ACCEL_REG_XOUT_L 0x02 @@ -536,6 +539,9 @@ static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on) if (ret < 0) { dev_err(&data->client->dev, "Failed: bmc150_accel_set_power_state for %d\n", on); + if (on) + pm_runtime_put_noidle(&data->client->dev); + return ret; } @@ -811,6 +817,7 @@ static int bmc150_accel_write_event_config(struct iio_dev *indio_dev, ret = bmc150_accel_setup_any_motion_interrupt(data, state); if (ret < 0) { + bmc150_accel_set_power_state(data, false); mutex_unlock(&data->mutex); return ret; } @@ -846,7 +853,7 @@ static const struct attribute_group bmc150_accel_attrs_group = { static const struct iio_event_spec bmc150_accel_event = { .type = IIO_EV_TYPE_ROC, - .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING, + .dir = IIO_EV_DIR_EITHER, .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE) | BIT(IIO_EV_INFO_PERIOD) @@ -1054,6 +1061,7 @@ static int bmc150_accel_data_rdy_trigger_set_state(struct iio_trigger *trig, else ret = bmc150_accel_setup_new_data_interrupt(data, state); if (ret < 0) { + bmc150_accel_set_power_state(data, false); mutex_unlock(&data->mutex); return ret; } @@ -1092,12 +1100,26 @@ static irqreturn_t bmc150_accel_event_handler(int irq, void *private) else dir = IIO_EV_DIR_RISING; - if (ret & BMC150_ACCEL_ANY_MOTION_MASK) + if (ret & BMC150_ACCEL_ANY_MOTION_BIT_X) + iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, + 0, + IIO_MOD_X, + IIO_EV_TYPE_ROC, + dir), + data->timestamp); + if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Y) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, - IIO_MOD_X_OR_Y_OR_Z, + IIO_MOD_Y, IIO_EV_TYPE_ROC, - IIO_EV_DIR_EITHER), + dir), + data->timestamp); + if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Z) + iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, + 0, + IIO_MOD_Z, + IIO_EV_TYPE_ROC, + dir), data->timestamp); ack_intr_status: if (!data->dready_trigger_on) @@ -1354,10 +1376,14 @@ static int bmc150_accel_runtime_suspend(struct device *dev) { struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); struct bmc150_accel_data *data = iio_priv(indio_dev); + int ret; dev_dbg(&data->client->dev, __func__); + ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0); + if (ret < 0) + return -EAGAIN; - return bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0); + return 0; } static int bmc150_accel_runtime_resume(struct device *dev) diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index 98909a9e284..320aa72c034 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -269,6 +269,8 @@ static int kxcjk1013_set_range(struct kxcjk1013_data *data, int range_index) return ret; } + ret &= ~(KXCJK1013_REG_CTRL1_BIT_GSEL0 | + KXCJK1013_REG_CTRL1_BIT_GSEL1); ret |= (KXCJK1013_scale_table[range_index].gsel_0 << 3); ret |= (KXCJK1013_scale_table[range_index].gsel_1 << 4); @@ -894,7 +896,7 @@ static const struct attribute_group kxcjk1013_attrs_group = { static const struct iio_event_spec kxcjk1013_event = { .type = IIO_EV_TYPE_THRESH, - .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING, + .dir = IIO_EV_DIR_EITHER, .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE) | BIT(IIO_EV_INFO_PERIOD) diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c index b58d6302521..d095efe1ba1 100644 --- a/drivers/iio/adc/men_z188_adc.c +++ b/drivers/iio/adc/men_z188_adc.c @@ -152,6 +152,7 @@ static void men_z188_remove(struct mcb_device *dev) static const struct mcb_device_id men_z188_ids[] = { { .device = 0xbc }, + { } }; MODULE_DEVICE_TABLE(mcb, men_z188_ids); diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c index 1665c8e4b62..e18bc678225 100644 --- a/drivers/iio/common/st_sensors/st_sensors_buffer.c +++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c @@ -71,7 +71,7 @@ int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf) goto st_sensors_free_memory; } - for (i = 0; i < n * num_data_channels; i++) { + for (i = 0; i < n * byte_for_channel; i++) { if (i < n) buf[i] = rx_array[i]; else diff --git a/drivers/iio/gyro/bmg160.c b/drivers/iio/gyro/bmg160.c index 1f967e0d688..d2fa526740c 100644 --- a/drivers/iio/gyro/bmg160.c +++ b/drivers/iio/gyro/bmg160.c @@ -67,6 +67,9 @@ #define BMG160_REG_INT_EN_0 0x15 #define BMG160_DATA_ENABLE_INT BIT(7) +#define BMG160_REG_INT_EN_1 0x16 +#define BMG160_INT1_BIT_OD BIT(1) + #define BMG160_REG_XOUT_L 0x02 #define BMG160_AXIS_TO_REG(axis) (BMG160_REG_XOUT_L + (axis * 2)) @@ -82,6 +85,9 @@ #define BMG160_REG_INT_STATUS_2 0x0B #define BMG160_ANY_MOTION_MASK 0x07 +#define BMG160_ANY_MOTION_BIT_X BIT(0) +#define BMG160_ANY_MOTION_BIT_Y BIT(1) +#define BMG160_ANY_MOTION_BIT_Z BIT(2) #define BMG160_REG_TEMP 0x08 #define BMG160_TEMP_CENTER_VAL 23 @@ -222,6 +228,19 @@ static int bmg160_chip_init(struct bmg160_data *data) data->slope_thres = ret; /* Set default interrupt mode */ + ret = i2c_smbus_read_byte_data(data->client, BMG160_REG_INT_EN_1); + if (ret < 0) { + dev_err(&data->client->dev, "Error reading reg_int_en_1\n"); + return ret; + } + ret &= ~BMG160_INT1_BIT_OD; + ret = i2c_smbus_write_byte_data(data->client, + BMG160_REG_INT_EN_1, ret); + if (ret < 0) { + dev_err(&data->client->dev, "Error writing reg_int_en_1\n"); + return ret; + } + ret = i2c_smbus_write_byte_data(data->client, BMG160_REG_INT_RST_LATCH, BMG160_INT_MODE_LATCH_INT | @@ -250,6 +269,9 @@ static int bmg160_set_power_state(struct bmg160_data *data, bool on) if (ret < 0) { dev_err(&data->client->dev, "Failed: bmg160_set_power_state for %d\n", on); + if (on) + pm_runtime_put_noidle(&data->client->dev); + return ret; } #endif @@ -705,6 +727,7 @@ static int bmg160_write_event_config(struct iio_dev *indio_dev, ret = bmg160_setup_any_motion_interrupt(data, state); if (ret < 0) { + bmg160_set_power_state(data, false); mutex_unlock(&data->mutex); return ret; } @@ -743,7 +766,7 @@ static const struct attribute_group bmg160_attrs_group = { static const struct iio_event_spec bmg160_event = { .type = IIO_EV_TYPE_ROC, - .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING, + .dir = IIO_EV_DIR_EITHER, .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE) }; @@ -871,6 +894,7 @@ static int bmg160_data_rdy_trigger_set_state(struct iio_trigger *trig, else ret = bmg160_setup_new_data_interrupt(data, state); if (ret < 0) { + bmg160_set_power_state(data, false); mutex_unlock(&data->mutex); return ret; } @@ -908,10 +932,24 @@ static irqreturn_t bmg160_event_handler(int irq, void *private) else dir = IIO_EV_DIR_FALLING; - if (ret & BMG160_ANY_MOTION_MASK) + if (ret & BMG160_ANY_MOTION_BIT_X) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL, 0, - IIO_MOD_X_OR_Y_OR_Z, + IIO_MOD_X, + IIO_EV_TYPE_ROC, + dir), + data->timestamp); + if (ret & BMG160_ANY_MOTION_BIT_Y) + iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL, + 0, + IIO_MOD_Y, + IIO_EV_TYPE_ROC, + dir), + data->timestamp); + if (ret & BMG160_ANY_MOTION_BIT_Z) + iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL, + 0, + IIO_MOD_Z, IIO_EV_TYPE_ROC, dir), data->timestamp); @@ -1169,8 +1207,15 @@ static int bmg160_runtime_suspend(struct device *dev) { struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); struct bmg160_data *data = iio_priv(indio_dev); + int ret; + + ret = bmg160_set_mode(data, BMG160_MODE_SUSPEND); + if (ret < 0) { + dev_err(&data->client->dev, "set mode failed\n"); + return -EAGAIN; + } - return bmg160_set_mode(data, BMG160_MODE_SUSPEND); + return 0; } static int bmg160_runtime_resume(struct device *dev) diff --git a/drivers/iio/light/tsl4531.c b/drivers/iio/light/tsl4531.c index a15006efa13..0763b863257 100644 --- a/drivers/iio/light/tsl4531.c +++ b/drivers/iio/light/tsl4531.c @@ -230,9 +230,12 @@ static int tsl4531_resume(struct device *dev) return i2c_smbus_write_byte_data(to_i2c_client(dev), TSL4531_CONTROL, TSL4531_MODE_NORMAL); } -#endif static SIMPLE_DEV_PM_OPS(tsl4531_pm_ops, tsl4531_suspend, tsl4531_resume); +#define TSL4531_PM_OPS (&tsl4531_pm_ops) +#else +#define TSL4531_PM_OPS NULL +#endif static const struct i2c_device_id tsl4531_id[] = { { "tsl4531", 0 }, @@ -243,7 +246,7 @@ MODULE_DEVICE_TABLE(i2c, tsl4531_id); static struct i2c_driver tsl4531_driver = { .driver = { .name = TSL4531_DRV_NAME, - .pm = &tsl4531_pm_ops, + .pm = TSL4531_PM_OPS, .owner = THIS_MODULE, }, .probe = tsl4531_probe, diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c index 5e780ef206f..8349cc0fdf6 100644 --- a/drivers/iio/proximity/as3935.c +++ b/drivers/iio/proximity/as3935.c @@ -330,7 +330,7 @@ static int as3935_probe(struct spi_device *spi) return -EINVAL; } - indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(st)); + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); if (!indio_dev) return -ENOMEM; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index bda5994ceb6..8b72cf392b3 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1173,18 +1173,24 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], &mflow->reg_id[i]); if (err) - goto err_free; + goto err_create_flow; i++; } if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]); if (err) - goto err_free; + goto err_create_flow; + i++; } return &mflow->ibflow; +err_create_flow: + while (i) { + (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]); + i--; + } err_free: kfree(mflow); return ERR_PTR(err); diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 0bea5776bcb..10641b7816f 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -115,9 +115,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; /* * FIXME: Use devattr.max_sge - 2 for max_send_sge as - * work-around for RDMA_READ.. + * work-around for RDMA_READs with ConnectX-2. + * + * Also, still make sure to have at least two SGEs for + * outgoing control PDU responses. */ - attr.cap.max_send_sge = device->dev_attr.max_sge - 2; + attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2); isert_conn->max_sge = attr.cap.max_send_sge; attr.cap.max_recv_sge = 1; @@ -225,12 +228,16 @@ isert_create_device_ib_res(struct isert_device *device) struct isert_cq_desc *cq_desc; struct ib_device_attr *dev_attr; int ret = 0, i, j; + int max_rx_cqe, max_tx_cqe; dev_attr = &device->dev_attr; ret = isert_query_device(ib_dev, dev_attr); if (ret) return ret; + max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe); + max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe); + /* asign function handlers */ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { @@ -272,7 +279,7 @@ isert_create_device_ib_res(struct isert_device *device) isert_cq_rx_callback, isert_cq_event_callback, (void *)&cq_desc[i], - ISER_MAX_RX_CQ_LEN, i); + max_rx_cqe, i); if (IS_ERR(device->dev_rx_cq[i])) { ret = PTR_ERR(device->dev_rx_cq[i]); device->dev_rx_cq[i] = NULL; @@ -284,7 +291,7 @@ isert_create_device_ib_res(struct isert_device *device) isert_cq_tx_callback, isert_cq_event_callback, (void *)&cq_desc[i], - ISER_MAX_TX_CQ_LEN, i); + max_tx_cqe, i); if (IS_ERR(device->dev_tx_cq[i])) { ret = PTR_ERR(device->dev_tx_cq[i]); device->dev_tx_cq[i] = NULL; @@ -803,14 +810,25 @@ wake_up: complete(&isert_conn->conn_wait); } -static void +static int isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) { - struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; + struct isert_conn *isert_conn; + + if (!cma_id->qp) { + struct isert_np *isert_np = cma_id->context; + + isert_np->np_cm_id = NULL; + return -1; + } + + isert_conn = (struct isert_conn *)cma_id->context; isert_conn->disconnect = disconnect; INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); schedule_work(&isert_conn->conn_logout_work); + + return 0; } static int @@ -825,6 +843,9 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: ret = isert_connect_request(cma_id, event); + if (ret) + pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", + event->event, ret); break; case RDMA_CM_EVENT_ESTABLISHED: isert_connected_handler(cma_id); @@ -834,7 +855,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ disconnect = true; case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ - isert_disconnected_handler(cma_id, disconnect); + ret = isert_disconnected_handler(cma_id, disconnect); break; case RDMA_CM_EVENT_CONNECT_ERROR: default: @@ -842,12 +863,6 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) break; } - if (ret != 0) { - pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", - event->event, ret); - dump_stack(); - } - return ret; } @@ -2185,7 +2200,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) isert_cmd->tx_desc.num_sge = 2; } - isert_init_send_wr(isert_conn, isert_cmd, send_wr, true); + isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -2871,7 +2886,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) &isert_cmd->tx_desc.iscsi_header); isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); isert_init_send_wr(isert_conn, isert_cmd, - &isert_cmd->tx_desc.send_wr, true); + &isert_cmd->tx_desc.send_wr, false); isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; wr->send_wr_num += 1; } @@ -3140,7 +3155,7 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) accept_wait: ret = down_interruptible(&isert_np->np_sem); - if (max_accept > 5) + if (ret || max_accept > 5) return -ENODEV; spin_lock_bh(&np->np_thread_lock); @@ -3190,7 +3205,8 @@ isert_free_np(struct iscsi_np *np) { struct isert_np *isert_np = (struct isert_np *)np->np_context; - rdma_destroy_id(isert_np->np_cm_id); + if (isert_np->np_cm_id) + rdma_destroy_id(isert_np->np_cm_id); np->np_context = NULL; kfree(isert_np); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 7206547c13c..dc829682701 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -2092,6 +2092,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) if (!qp_init) goto out; +retry: ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, ch->rq_size + srp_sq_size, 0); if (IS_ERR(ch->cq)) { @@ -2115,6 +2116,13 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) ch->qp = ib_create_qp(sdev->pd, qp_init); if (IS_ERR(ch->qp)) { ret = PTR_ERR(ch->qp); + if (ret == -ENOMEM) { + srp_sq_size /= 2; + if (srp_sq_size >= MIN_SRPT_SQ_SIZE) { + ib_destroy_cq(ch->cq); + goto retry; + } + } printk(KERN_ERR "failed to create_qp ret= %d\n", ret); goto err_destroy_cq; } diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 2ed7905a068..fc55f0d15b7 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -1179,9 +1179,19 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id } ep_irq_in = &intf->cur_altsetting->endpoint[1].desc; - usb_fill_bulk_urb(xpad->bulk_out, udev, - usb_sndbulkpipe(udev, ep_irq_in->bEndpointAddress), - xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad); + if (usb_endpoint_is_bulk_out(ep_irq_in)) { + usb_fill_bulk_urb(xpad->bulk_out, udev, + usb_sndbulkpipe(udev, + ep_irq_in->bEndpointAddress), + xpad->bdata, XPAD_PKT_LEN, + xpad_bulk_out, xpad); + } else { + usb_fill_int_urb(xpad->bulk_out, udev, + usb_sndintpipe(udev, + ep_irq_in->bEndpointAddress), + xpad->bdata, XPAD_PKT_LEN, + xpad_bulk_out, xpad, 0); + } /* * Submit the int URB immediately rather than waiting for open diff --git a/drivers/input/keyboard/opencores-kbd.c b/drivers/input/keyboard/opencores-kbd.c index 62abe2c1667..f8502bb2917 100644 --- a/drivers/input/keyboard/opencores-kbd.c +++ b/drivers/input/keyboard/opencores-kbd.c @@ -70,7 +70,7 @@ static int opencores_kbd_probe(struct platform_device *pdev) opencores_kbd->addr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(opencores_kbd->addr)) - error = PTR_ERR(opencores_kbd->addr); + return PTR_ERR(opencores_kbd->addr); input->name = pdev->name; input->phys = "opencores-kbd/input0"; diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c index c6727dda68f..ef5e67fb567 100644 --- a/drivers/input/keyboard/stmpe-keypad.c +++ b/drivers/input/keyboard/stmpe-keypad.c @@ -86,7 +86,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = { .max_cols = 8, .max_rows = 12, .col_gpios = 0x0000ff, /* GPIO 0 - 7*/ - .row_gpios = 0x1fef00, /* GPIO 8-14, 16-20 */ + .row_gpios = 0x1f7f00, /* GPIO 8-14, 16-20 */ }, [STMPE2403] = { .auto_increment = true, diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index 719410feb84..afed8e2b2f9 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -1381,7 +1381,7 @@ static ssize_t ims_pcu_ofn_reg_addr_store(struct device *dev, pcu->ofn_reg_addr = value; mutex_unlock(&pcu->cmd_mutex); - return error ?: count; + return count; } static DEVICE_ATTR(reg_addr, S_IRUGO | S_IWUSR, diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c index 7b1fde93799..ef6a9d650d6 100644 --- a/drivers/input/misc/max77693-haptic.c +++ b/drivers/input/misc/max77693-haptic.c @@ -194,7 +194,7 @@ static int max77693_haptic_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect) { struct max77693_haptic *haptic = input_get_drvdata(dev); - uint64_t period_mag_multi; + u64 period_mag_multi; haptic->magnitude = effect->u.rumble.strong_magnitude; if (!haptic->magnitude) @@ -205,8 +205,7 @@ static int max77693_haptic_play_effect(struct input_dev *dev, void *data, * The formula to convert magnitude to pwm_duty as follows: * - pwm_duty = (magnitude * pwm_period) / MAX_MAGNITUDE(0xFFFF) */ - period_mag_multi = (int64_t)(haptic->pwm_dev->period * - haptic->magnitude); + period_mag_multi = (u64)haptic->pwm_dev->period * haptic->magnitude; haptic->pwm_duty = (unsigned int)(period_mag_multi >> MAX_MAGNITUDE_SHIFT); diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index 73560475356..e097f1ab427 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -55,7 +55,7 @@ static int soc_button_lookup_gpio(struct device *dev, int acpi_index) struct gpio_desc *desc; int gpio; - desc = gpiod_get_index(dev, KBUILD_MODNAME, acpi_index); + desc = gpiod_get_index(dev, KBUILD_MODNAME, acpi_index, GPIOD_ASIS); if (IS_ERR(desc)) return PTR_ERR(desc); diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c index fb3b63b2f85..8400a1a34d8 100644 --- a/drivers/input/misc/twl4030-pwrbutton.c +++ b/drivers/input/misc/twl4030-pwrbutton.c @@ -85,6 +85,7 @@ static int twl4030_pwrbutton_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, pwr); + device_init_wakeup(&pdev->dev, true); return 0; } diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 2b0ae8cc8e5..d125a019383 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1156,7 +1156,13 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; - if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */ + /* + * Check if we are dealing with a bare PS/2 packet, presumably from + * a device connected to the external PS/2 port. Because bare PS/2 + * protocol does not have enough constant bits to self-synchronize + * properly we only do this if the device is fully synchronized. + */ + if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) { if (psmouse->pktcnt == 3) { alps_report_bare_ps2_packet(psmouse, psmouse->packet, true); @@ -1180,12 +1186,27 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) } /* Bytes 2 - pktsize should have 0 in the highest bit */ - if ((priv->proto_version < ALPS_PROTO_V5) && + if (priv->proto_version < ALPS_PROTO_V5 && psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize && (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", psmouse->pktcnt - 1, psmouse->packet[psmouse->pktcnt - 1]); + + if (priv->proto_version == ALPS_PROTO_V3 && + psmouse->pktcnt == psmouse->pktsize) { + /* + * Some Dell boxes, such as Latitude E6440 or E7440 + * with closed lid, quite often smash last byte of + * otherwise valid packet with 0xff. Given that the + * next packet is very likely to be valid let's + * report PSMOUSE_FULL_PACKET but not process data, + * rather than reporting PSMOUSE_BAD_DATA and + * filling the logs. + */ + return PSMOUSE_FULL_PACKET; + } + return PSMOUSE_BAD_DATA; } @@ -2389,6 +2410,9 @@ int alps_init(struct psmouse *psmouse) /* We are having trouble resyncing ALPS touchpads so disable it for now */ psmouse->resync_time = 0; + /* Allow 2 invalid packets without resetting device */ + psmouse->resetafter = psmouse->pktsize * 2; + return 0; init_fail: diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 06fc6e76ffb..f2b97802640 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -428,14 +428,6 @@ static void elantech_report_trackpoint(struct psmouse *psmouse, int x, y; u32 t; - if (dev_WARN_ONCE(&psmouse->ps2dev.serio->dev, - !tp_dev, - psmouse_fmt("Unexpected trackpoint message\n"))) { - if (etd->debug == 1) - elantech_packet_dump(psmouse); - return; - } - t = get_unaligned_le32(&packet[0]); switch (t & ~7U) { @@ -563,6 +555,7 @@ static void elantech_input_sync_v4(struct psmouse *psmouse) } else { input_report_key(dev, BTN_LEFT, packet[0] & 0x01); input_report_key(dev, BTN_RIGHT, packet[0] & 0x02); + input_report_key(dev, BTN_MIDDLE, packet[0] & 0x04); } input_mt_report_pointer_emulation(dev, true); @@ -792,6 +785,9 @@ static int elantech_packet_check_v4(struct psmouse *psmouse) unsigned char packet_type = packet[3] & 0x03; bool sanity_check; + if (etd->tp_dev && (packet[3] & 0x0f) == 0x06) + return PACKET_TRACKPOINT; + /* * Sanity check based on the constant bits of a packet. * The constant bits change depending on the value of @@ -877,10 +873,19 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse) case 4: packet_type = elantech_packet_check_v4(psmouse); - if (packet_type == PACKET_UNKNOWN) + switch (packet_type) { + case PACKET_UNKNOWN: return PSMOUSE_BAD_DATA; - elantech_report_absolute_v4(psmouse, packet_type); + case PACKET_TRACKPOINT: + elantech_report_trackpoint(psmouse, packet_type); + break; + + default: + elantech_report_absolute_v4(psmouse, packet_type); + break; + } + break; } @@ -1120,6 +1125,22 @@ static void elantech_set_buttonpad_prop(struct psmouse *psmouse) } /* + * Some hw_version 4 models do have a middle button + */ +static const struct dmi_system_id elantech_dmi_has_middle_button[] = { +#if defined(CONFIG_DMI) && defined(CONFIG_X86) + { + /* Fujitsu H730 has a middle button */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"), + }, + }, +#endif + { } +}; + +/* * Set the appropriate event bits for the input subsystem */ static int elantech_set_input_params(struct psmouse *psmouse) @@ -1138,6 +1159,8 @@ static int elantech_set_input_params(struct psmouse *psmouse) __clear_bit(EV_REL, dev->evbit); __set_bit(BTN_LEFT, dev->keybit); + if (dmi_check_system(elantech_dmi_has_middle_button)) + __set_bit(BTN_MIDDLE, dev->keybit); __set_bit(BTN_RIGHT, dev->keybit); __set_bit(BTN_TOUCH, dev->keybit); @@ -1299,6 +1322,7 @@ ELANTECH_INT_ATTR(reg_25, 0x25); ELANTECH_INT_ATTR(reg_26, 0x26); ELANTECH_INT_ATTR(debug, 0); ELANTECH_INT_ATTR(paritycheck, 0); +ELANTECH_INT_ATTR(crc_enabled, 0); static struct attribute *elantech_attrs[] = { &psmouse_attr_reg_07.dattr.attr, @@ -1313,6 +1337,7 @@ static struct attribute *elantech_attrs[] = { &psmouse_attr_reg_26.dattr.attr, &psmouse_attr_debug.dattr.attr, &psmouse_attr_paritycheck.dattr.attr, + &psmouse_attr_crc_enabled.dattr.attr, NULL }; @@ -1439,6 +1464,22 @@ static int elantech_reconnect(struct psmouse *psmouse) } /* + * Some hw_version 4 models do not work with crc_disabled + */ +static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { +#if defined(CONFIG_DMI) && defined(CONFIG_X86) + { + /* Fujitsu H730 does not work with crc_enabled == 0 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"), + }, + }, +#endif + { } +}; + +/* * Some hw_version 3 models go into error state when we try to set * bit 3 and/or bit 1 of r10. */ @@ -1513,7 +1554,8 @@ static int elantech_set_properties(struct elantech_data *etd) * The signatures of v3 and v4 packets change depending on the * value of this hardware flag. */ - etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000); + etd->crc_enabled = (etd->fw_version & 0x4000) == 0x4000 || + dmi_check_system(elantech_dmi_force_crc_enabled); /* Enable real hardware resolution on hw_version 3 ? */ etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table); diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 26994f6a2b2..95a3a6e2faf 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -1536,16 +1536,9 @@ static int psmouse_reconnect(struct serio *serio) { struct psmouse *psmouse = serio_get_drvdata(serio); struct psmouse *parent = NULL; - struct serio_driver *drv = serio->drv; unsigned char type; int rc = -1; - if (!drv || !psmouse) { - psmouse_dbg(psmouse, - "reconnect request, but serio is disconnected, ignoring...\n"); - return -1; - } - mutex_lock(&psmouse_mutex); if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 9031a0a28ea..f9472920d98 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -135,14 +135,18 @@ static const struct min_max_quirk min_max_pnpid_table[] = { 1232, 5710, 1156, 4696 }, { - (const char * const []){"LEN0034", "LEN0036", "LEN2002", - "LEN2004", NULL}, + (const char * const []){"LEN0034", "LEN0036", "LEN0039", + "LEN2002", "LEN2004", NULL}, 1024, 5112, 2024, 4832 }, { (const char * const []){"LEN2001", NULL}, 1024, 5022, 2508, 4832 }, + { + (const char * const []){"LEN2006", NULL}, + 1264, 5675, 1171, 4688 + }, { } }; @@ -163,6 +167,7 @@ static const char * const topbuttonpad_pnp_ids[] = { "LEN0036", /* T440 */ "LEN0037", "LEN0038", + "LEN0039", /* T440s */ "LEN0041", "LEN0042", /* Yoga */ "LEN0045", diff --git a/drivers/input/mouse/vsxxxaa.c b/drivers/input/mouse/vsxxxaa.c index 38298232124..abd494411e6 100644 --- a/drivers/input/mouse/vsxxxaa.c +++ b/drivers/input/mouse/vsxxxaa.c @@ -128,7 +128,7 @@ static void vsxxxaa_drop_bytes(struct vsxxxaa *mouse, int num) if (num >= mouse->count) { mouse->count = 0; } else { - memmove(mouse->buf, mouse->buf + num - 1, BUFLEN - num); + memmove(mouse->buf, mouse->buf + num, BUFLEN - num); mouse->count -= num; } } diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c index cce69d6b958..58781c8a8ae 100644 --- a/drivers/input/serio/altera_ps2.c +++ b/drivers/input/serio/altera_ps2.c @@ -37,7 +37,7 @@ static irqreturn_t altera_ps2_rxint(int irq, void *dev_id) { struct ps2if *ps2if = dev_id; unsigned int status; - int handled = IRQ_NONE; + irqreturn_t handled = IRQ_NONE; while ((status = readl(ps2if->base)) & 0xffff0000) { serio_interrupt(ps2if->io, status & 0xff, 0); @@ -74,7 +74,7 @@ static void altera_ps2_close(struct serio *io) { struct ps2if *ps2if = io->port_data; - writel(0, ps2if->base); /* disable rx irq */ + writel(0, ps2if->base + 4); /* disable rx irq */ } /* diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index a0bcbb64d06..faeeb137246 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -207,17 +207,282 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { }; /* - * Some laptops do implement active multiplexing mode correctly; - * unfortunately they are in minority. + * Some Fujitsu notebooks are having trouble with touchpads if + * active multiplexing mode is activated. Luckily they don't have + * external PS/2 ports so we can safely disable it. + * ... apparently some Toshibas don't like MUX mode either and + * die horrible death on reboot. */ -static const struct dmi_system_id __initconst i8042_dmi_mux_table[] = { +static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { + { + /* Fujitsu Lifebook P7010/P7010D */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "P7010"), + }, + }, + { + /* Fujitsu Lifebook P7010 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"), + }, + }, + { + /* Fujitsu Lifebook P5020D */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"), + }, + }, + { + /* Fujitsu Lifebook S2000 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"), + }, + }, + { + /* Fujitsu Lifebook S6230 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"), + }, + }, + { + /* Fujitsu T70H */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"), + }, + }, + { + /* Fujitsu-Siemens Lifebook T3010 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"), + }, + }, + { + /* Fujitsu-Siemens Lifebook E4010 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"), + }, + }, + { + /* Fujitsu-Siemens Amilo Pro 2010 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"), + }, + }, + { + /* Fujitsu-Siemens Amilo Pro 2030 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"), + }, + }, + { + /* + * No data is coming from the touchscreen unless KBC + * is in legacy mode. + */ + /* Panasonic CF-29 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"), + DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"), + }, + }, + { + /* + * HP Pavilion DV4017EA - + * errors on MUX ports are reported without raising AUXDATA + * causing "spurious NAK" messages. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"), + }, + }, + { + /* + * HP Pavilion ZT1000 - + * like DV4017EA does not raise AUXERR for errors on MUX ports. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"), + DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"), + }, + }, + { + /* + * HP Pavilion DV4270ca - + * like DV4017EA does not raise AUXERR for errors on MUX ports. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"), + DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"), + }, + }, + { + /* Sharp Actius MM20 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SHARP"), + DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"), + }, + }, + { + /* Sony Vaio FS-115b */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"), + }, + }, + { + /* + * Sony Vaio FZ-240E - + * reset and GET ID commands issued via KBD port are + * sometimes being delivered to AUX3. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"), + }, + }, { /* - * Panasonic CF-18 needs to be in MUX mode since the - * touchscreen is on serio3 and it also has touchpad. + * Most (all?) VAIOs do not have external PS/2 ports nor + * they implement active multiplexing properly, and + * MUX discovery usually messes up keyboard/touchpad. */ .matches = { - DMI_MATCH(DMI_PRODUCT_NAME, "CF-18"), + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "VAIO"), + }, + }, + { + /* Amoi M636/A737 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"), + }, + }, + { + /* Lenovo 3000 n100 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "076804U"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"), + }, + }, + { + /* Acer Aspire 5710 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"), + }, + }, + { + /* Gericom Bellagio */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), + DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"), + }, + }, + { + /* IBM 2656 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "IBM"), + DMI_MATCH(DMI_PRODUCT_NAME, "2656"), + }, + }, + { + /* Dell XPS M1530 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"), + }, + }, + { + /* Compal HEL80I */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"), + DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"), + }, + }, + { + /* Dell Vostro 1510 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"), + }, + }, + { + /* Acer Aspire 5536 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"), + DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), + }, + }, + { + /* Dell Vostro V13 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), + }, + }, + { + /* Newer HP Pavilion dv4 models */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), + }, + }, + { + /* Asus X450LCP */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"), + }, + }, + { + /* Avatar AVIU-145A6 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel"), + DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"), }, }, { } @@ -364,6 +629,22 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { }, }, { + /* Fujitsu A544 laptop */ + /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"), + }, + }, + { + /* Fujitsu AH544 laptop */ + /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"), + }, + }, + { /* Fujitsu U574 laptop */ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ .matches = { @@ -740,8 +1021,8 @@ static int __init i8042_platform_init(void) if (dmi_check_system(i8042_dmi_noloop_table)) i8042_noloop = true; - if (dmi_check_system(i8042_dmi_mux_table)) - i8042_nomux = false; + if (dmi_check_system(i8042_dmi_nomux_table)) + i8042_nomux = true; if (dmi_check_system(i8042_dmi_notimeout_table)) i8042_notimeout = true; diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 9a97c2b1092..f5a98af3b32 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -39,7 +39,7 @@ static bool i8042_noaux; module_param_named(noaux, i8042_noaux, bool, 0); MODULE_PARM_DESC(noaux, "Do not probe or use AUX (mouse) port."); -static bool i8042_nomux = true; +static bool i8042_nomux; module_param_named(nomux, i8042_nomux, bool, 0); MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing controller is present."); diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c index d0ef91fc87d..b1ae7799596 100644 --- a/drivers/input/touchscreen/wm97xx-core.c +++ b/drivers/input/touchscreen/wm97xx-core.c @@ -70,11 +70,11 @@ * Documentation/input/input-programming.txt for more details. */ -static int abs_x[3] = {350, 3900, 5}; +static int abs_x[3] = {150, 4000, 5}; module_param_array(abs_x, int, NULL, 0); MODULE_PARM_DESC(abs_x, "Touchscreen absolute X min, max, fuzz"); -static int abs_y[3] = {320, 3750, 40}; +static int abs_y[3] = {200, 4000, 40}; module_param_array(abs_y, int, NULL, 0); MODULE_PARM_DESC(abs_y, "Touchscreen absolute Y min, max, fuzz"); diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 3e238cd049e..6a2e168c3ab 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -43,6 +43,7 @@ #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) #define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF +#define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid) #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) #define ARMADA_375_PPI_CAUSE (0x10) @@ -406,19 +407,29 @@ static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_get_chip(irq); - unsigned long irqmap, irqn; + unsigned long irqmap, irqn, irqsrc, cpuid; unsigned int cascade_irq; chained_irq_enter(chip, desc); irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE); - - if (irqmap & BIT(0)) { - armada_370_xp_handle_msi_irq(NULL, true); - irqmap &= ~BIT(0); - } + cpuid = cpu_logical_map(smp_processor_id()); for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) { + irqsrc = readl_relaxed(main_int_base + + ARMADA_370_XP_INT_SOURCE_CTL(irqn)); + + /* Check if the interrupt is not masked on current CPU. + * Test IRQ (0-1) and FIQ (8-9) mask bits. + */ + if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid))) + continue; + + if (irqn == 1) { + armada_370_xp_handle_msi_irq(NULL, true); + continue; + } + cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn); generic_handle_irq(cascade_irq); } diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c index 6ae3cdee068..cc4f9d80122 100644 --- a/drivers/irqchip/irq-atmel-aic-common.c +++ b/drivers/irqchip/irq-atmel-aic-common.c @@ -217,8 +217,9 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node, } ret = irq_alloc_domain_generic_chips(domain, 32, 1, name, - handle_level_irq, 0, 0, - IRQCHIP_SKIP_SET_WAKE); + handle_fasteoi_irq, + IRQ_NOREQUEST | IRQ_NOPROBE | + IRQ_NOAUTOEN, 0, 0); if (ret) goto err_domain_remove; @@ -230,7 +231,6 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node, gc->unused = 0; gc->wake_enabled = ~0; gc->chip_types[0].type = IRQ_TYPE_SENSE_MASK; - gc->chip_types[0].handler = handle_fasteoi_irq; gc->chip_types[0].chip.irq_eoi = irq_gc_eoi; gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake; gc->chip_types[0].chip.irq_shutdown = aic_common_shutdown; diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index b9f4fb808e4..5fb38a2ac22 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c @@ -101,9 +101,9 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn, int parent_irq; parent_irq = irq_of_parse_and_map(dn, irq); - if (parent_irq < 0) { + if (!parent_irq) { pr_err("failed to map interrupt %d\n", irq); - return parent_irq; + return -EINVAL; } data->irq_map_mask |= be32_to_cpup(map_mask + irq); diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index c15c840987d..14691a4cb84 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -135,9 +135,9 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np, __raw_writel(0xffffffff, data->base + CPU_CLEAR); data->parent_irq = irq_of_parse_and_map(np, 0); - if (data->parent_irq < 0) { + if (!data->parent_irq) { pr_err("failed to find parent interrupt\n"); - ret = data->parent_irq; + ret = -EINVAL; goto out_unmap; } diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index aa29198fca3..7440c58b8e6 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -9,26 +9,21 @@ * published by the Free Software Foundation. */ -#include <linux/module.h> -#include <linux/kernel.h> +#include <linux/ctype.h> +#include <linux/device.h> +#include <linux/err.h> #include <linux/init.h> +#include <linux/kernel.h> +#include <linux/leds.h> #include <linux/list.h> +#include <linux/module.h> +#include <linux/slab.h> #include <linux/spinlock.h> -#include <linux/device.h> #include <linux/timer.h> -#include <linux/err.h> -#include <linux/ctype.h> -#include <linux/leds.h> #include "leds.h" static struct class *leds_class; -static void led_update_brightness(struct led_classdev *led_cdev) -{ - if (led_cdev->brightness_get) - led_cdev->brightness = led_cdev->brightness_get(led_cdev); -} - static ssize_t brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -59,14 +54,14 @@ static ssize_t brightness_store(struct device *dev, } static DEVICE_ATTR_RW(brightness); -static ssize_t led_max_brightness_show(struct device *dev, +static ssize_t max_brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); return sprintf(buf, "%u\n", led_cdev->max_brightness); } -static DEVICE_ATTR(max_brightness, 0444, led_max_brightness_show, NULL); +static DEVICE_ATTR_RO(max_brightness); #ifdef CONFIG_LEDS_TRIGGERS static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store); diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index 71b40d3bf77..aaa8eba9099 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c @@ -12,10 +12,11 @@ */ #include <linux/kernel.h> +#include <linux/leds.h> #include <linux/list.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/rwsem.h> -#include <linux/leds.h> #include "leds.h" DECLARE_RWSEM(leds_list_lock); @@ -126,3 +127,19 @@ void led_set_brightness(struct led_classdev *led_cdev, __led_set_brightness(led_cdev, brightness); } EXPORT_SYMBOL(led_set_brightness); + +int led_update_brightness(struct led_classdev *led_cdev) +{ + int ret = 0; + + if (led_cdev->brightness_get) { + ret = led_cdev->brightness_get(led_cdev); + if (ret >= 0) { + led_cdev->brightness = ret; + return 0; + } + } + + return ret; +} +EXPORT_SYMBOL(led_update_brightness); diff --git a/drivers/leds/leds-gpio-register.c b/drivers/leds/leds-gpio-register.c index 1c4ed5510f3..75717ba68ae 100644 --- a/drivers/leds/leds-gpio-register.c +++ b/drivers/leds/leds-gpio-register.c @@ -7,9 +7,9 @@ * Free Software Foundation. */ #include <linux/err.h> +#include <linux/leds.h> #include <linux/platform_device.h> #include <linux/slab.h> -#include <linux/leds.h> /** * gpio_led_register_device - register a gpio-led device @@ -28,6 +28,9 @@ struct platform_device *__init gpio_led_register_device( struct platform_device *ret; struct gpio_led_platform_data _pdata = *pdata; + if (!pdata->num_leds) + return ERR_PTR(-EINVAL); + _pdata.leds = kmemdup(pdata->leds, pdata->num_leds * sizeof(*pdata->leds), GFP_KERNEL); if (!_pdata.leds) diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index 57ff20fecf5..b4518c8751c 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -10,17 +10,17 @@ * published by the Free Software Foundation. * */ -#include <linux/kernel.h> -#include <linux/platform_device.h> +#include <linux/err.h> #include <linux/gpio.h> +#include <linux/kernel.h> #include <linux/leds.h> +#include <linux/module.h> #include <linux/of.h> -#include <linux/of_platform.h> #include <linux/of_gpio.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/workqueue.h> -#include <linux/module.h> -#include <linux/err.h> struct gpio_led_data { struct led_classdev cdev; @@ -36,7 +36,7 @@ struct gpio_led_data { static void gpio_led_work(struct work_struct *work) { - struct gpio_led_data *led_dat = + struct gpio_led_data *led_dat = container_of(work, struct gpio_led_data, work); if (led_dat->blinking) { @@ -235,14 +235,12 @@ static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev) } #endif /* CONFIG_OF_GPIO */ - static int gpio_led_probe(struct platform_device *pdev) { struct gpio_led_platform_data *pdata = dev_get_platdata(&pdev->dev); struct gpio_leds_priv *priv; int i, ret = 0; - if (pdata && pdata->num_leds) { priv = devm_kzalloc(&pdev->dev, sizeof_gpio_leds_priv(pdata->num_leds), diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c index 8e1abdcd4c9..53144fb9616 100644 --- a/drivers/leds/leds-lp3944.c +++ b/drivers/leds/leds-lp3944.c @@ -335,7 +335,8 @@ static int lp3944_configure(struct i2c_client *client, } /* to expose the default value to userspace */ - led->ldev.brightness = led->status; + led->ldev.brightness = + (enum led_brightness) led->status; /* Set the default led status */ err = lp3944_led_set(led, led->status); diff --git a/drivers/leds/trigger/ledtrig-gpio.c b/drivers/leds/trigger/ledtrig-gpio.c index 35812e3a37f..c86c4182647 100644 --- a/drivers/leds/trigger/ledtrig-gpio.c +++ b/drivers/leds/trigger/ledtrig-gpio.c @@ -48,7 +48,7 @@ static void gpio_trig_work(struct work_struct *work) if (!gpio_data->gpio) return; - tmp = gpio_get_value(gpio_data->gpio); + tmp = gpio_get_value_cansleep(gpio_data->gpio); if (gpio_data->inverted) tmp = !tmp; diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index 6d184dbcaca..94ed7cefb14 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -1,3 +1,7 @@ +# Generic MAILBOX API + +obj-$(CONFIG_MAILBOX) += mailbox.o + obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c new file mode 100644 index 00000000000..afcb430508e --- /dev/null +++ b/drivers/mailbox/mailbox.c @@ -0,0 +1,465 @@ +/* + * Mailbox: Common code for Mailbox controllers and users + * + * Copyright (C) 2013-2014 Linaro Ltd. + * Author: Jassi Brar <jassisinghbrar@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/bitops.h> +#include <linux/mailbox_client.h> +#include <linux/mailbox_controller.h> + +#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */ +#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */ +#define TXDONE_BY_ACK BIT(2) /* S/W ACK recevied by Client ticks the TX */ + +static LIST_HEAD(mbox_cons); +static DEFINE_MUTEX(con_mutex); + +static int add_to_rbuf(struct mbox_chan *chan, void *mssg) +{ + int idx; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + + /* See if there is any space left */ + if (chan->msg_count == MBOX_TX_QUEUE_LEN) { + spin_unlock_irqrestore(&chan->lock, flags); + return -ENOBUFS; + } + + idx = chan->msg_free; + chan->msg_data[idx] = mssg; + chan->msg_count++; + + if (idx == MBOX_TX_QUEUE_LEN - 1) + chan->msg_free = 0; + else + chan->msg_free++; + + spin_unlock_irqrestore(&chan->lock, flags); + + return idx; +} + +static void msg_submit(struct mbox_chan *chan) +{ + unsigned count, idx; + unsigned long flags; + void *data; + int err; + + spin_lock_irqsave(&chan->lock, flags); + + if (!chan->msg_count || chan->active_req) + goto exit; + + count = chan->msg_count; + idx = chan->msg_free; + if (idx >= count) + idx -= count; + else + idx += MBOX_TX_QUEUE_LEN - count; + + data = chan->msg_data[idx]; + + /* Try to submit a message to the MBOX controller */ + err = chan->mbox->ops->send_data(chan, data); + if (!err) { + chan->active_req = data; + chan->msg_count--; + } +exit: + spin_unlock_irqrestore(&chan->lock, flags); +} + +static void tx_tick(struct mbox_chan *chan, int r) +{ + unsigned long flags; + void *mssg; + + spin_lock_irqsave(&chan->lock, flags); + mssg = chan->active_req; + chan->active_req = NULL; + spin_unlock_irqrestore(&chan->lock, flags); + + /* Submit next message */ + msg_submit(chan); + + /* Notify the client */ + if (mssg && chan->cl->tx_done) + chan->cl->tx_done(chan->cl, mssg, r); + + if (chan->cl->tx_block) + complete(&chan->tx_complete); +} + +static void poll_txdone(unsigned long data) +{ + struct mbox_controller *mbox = (struct mbox_controller *)data; + bool txdone, resched = false; + int i; + + for (i = 0; i < mbox->num_chans; i++) { + struct mbox_chan *chan = &mbox->chans[i]; + + if (chan->active_req && chan->cl) { + resched = true; + txdone = chan->mbox->ops->last_tx_done(chan); + if (txdone) + tx_tick(chan, 0); + } + } + + if (resched) + mod_timer(&mbox->poll, jiffies + + msecs_to_jiffies(mbox->txpoll_period)); +} + +/** + * mbox_chan_received_data - A way for controller driver to push data + * received from remote to the upper layer. + * @chan: Pointer to the mailbox channel on which RX happened. + * @mssg: Client specific message typecasted as void * + * + * After startup and before shutdown any data received on the chan + * is passed on to the API via atomic mbox_chan_received_data(). + * The controller should ACK the RX only after this call returns. + */ +void mbox_chan_received_data(struct mbox_chan *chan, void *mssg) +{ + /* No buffering the received data */ + if (chan->cl->rx_callback) + chan->cl->rx_callback(chan->cl, mssg); +} +EXPORT_SYMBOL_GPL(mbox_chan_received_data); + +/** + * mbox_chan_txdone - A way for controller driver to notify the + * framework that the last TX has completed. + * @chan: Pointer to the mailbox chan on which TX happened. + * @r: Status of last TX - OK or ERROR + * + * The controller that has IRQ for TX ACK calls this atomic API + * to tick the TX state machine. It works only if txdone_irq + * is set by the controller. + */ +void mbox_chan_txdone(struct mbox_chan *chan, int r) +{ + if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) { + dev_err(chan->mbox->dev, + "Controller can't run the TX ticker\n"); + return; + } + + tx_tick(chan, r); +} +EXPORT_SYMBOL_GPL(mbox_chan_txdone); + +/** + * mbox_client_txdone - The way for a client to run the TX state machine. + * @chan: Mailbox channel assigned to this client. + * @r: Success status of last transmission. + * + * The client/protocol had received some 'ACK' packet and it notifies + * the API that the last packet was sent successfully. This only works + * if the controller can't sense TX-Done. + */ +void mbox_client_txdone(struct mbox_chan *chan, int r) +{ + if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) { + dev_err(chan->mbox->dev, "Client can't run the TX ticker\n"); + return; + } + + tx_tick(chan, r); +} +EXPORT_SYMBOL_GPL(mbox_client_txdone); + +/** + * mbox_client_peek_data - A way for client driver to pull data + * received from remote by the controller. + * @chan: Mailbox channel assigned to this client. + * + * A poke to controller driver for any received data. + * The data is actually passed onto client via the + * mbox_chan_received_data() + * The call can be made from atomic context, so the controller's + * implementation of peek_data() must not sleep. + * + * Return: True, if controller has, and is going to push after this, + * some data. + * False, if controller doesn't have any data to be read. + */ +bool mbox_client_peek_data(struct mbox_chan *chan) +{ + if (chan->mbox->ops->peek_data) + return chan->mbox->ops->peek_data(chan); + + return false; +} +EXPORT_SYMBOL_GPL(mbox_client_peek_data); + +/** + * mbox_send_message - For client to submit a message to be + * sent to the remote. + * @chan: Mailbox channel assigned to this client. + * @mssg: Client specific message typecasted. + * + * For client to submit data to the controller destined for a remote + * processor. If the client had set 'tx_block', the call will return + * either when the remote receives the data or when 'tx_tout' millisecs + * run out. + * In non-blocking mode, the requests are buffered by the API and a + * non-negative token is returned for each queued request. If the request + * is not queued, a negative token is returned. Upon failure or successful + * TX, the API calls 'tx_done' from atomic context, from which the client + * could submit yet another request. + * The pointer to message should be preserved until it is sent + * over the chan, i.e, tx_done() is made. + * This function could be called from atomic context as it simply + * queues the data and returns a token against the request. + * + * Return: Non-negative integer for successful submission (non-blocking mode) + * or transmission over chan (blocking mode). + * Negative value denotes failure. + */ +int mbox_send_message(struct mbox_chan *chan, void *mssg) +{ + int t; + + if (!chan || !chan->cl) + return -EINVAL; + + t = add_to_rbuf(chan, mssg); + if (t < 0) { + dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n"); + return t; + } + + msg_submit(chan); + + if (chan->txdone_method == TXDONE_BY_POLL) + poll_txdone((unsigned long)chan->mbox); + + if (chan->cl->tx_block && chan->active_req) { + unsigned long wait; + int ret; + + if (!chan->cl->tx_tout) /* wait forever */ + wait = msecs_to_jiffies(3600000); + else + wait = msecs_to_jiffies(chan->cl->tx_tout); + + ret = wait_for_completion_timeout(&chan->tx_complete, wait); + if (ret == 0) { + t = -EIO; + tx_tick(chan, -EIO); + } + } + + return t; +} +EXPORT_SYMBOL_GPL(mbox_send_message); + +/** + * mbox_request_channel - Request a mailbox channel. + * @cl: Identity of the client requesting the channel. + * @index: Index of mailbox specifier in 'mboxes' property. + * + * The Client specifies its requirements and capabilities while asking for + * a mailbox channel. It can't be called from atomic context. + * The channel is exclusively allocated and can't be used by another + * client before the owner calls mbox_free_channel. + * After assignment, any packet received on this channel will be + * handed over to the client via the 'rx_callback'. + * The framework holds reference to the client, so the mbox_client + * structure shouldn't be modified until the mbox_free_channel returns. + * + * Return: Pointer to the channel assigned to the client if successful. + * ERR_PTR for request failure. + */ +struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index) +{ + struct device *dev = cl->dev; + struct mbox_controller *mbox; + struct of_phandle_args spec; + struct mbox_chan *chan; + unsigned long flags; + int ret; + + if (!dev || !dev->of_node) { + pr_debug("%s: No owner device node\n", __func__); + return ERR_PTR(-ENODEV); + } + + mutex_lock(&con_mutex); + + if (of_parse_phandle_with_args(dev->of_node, "mboxes", + "#mbox-cells", index, &spec)) { + dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__); + mutex_unlock(&con_mutex); + return ERR_PTR(-ENODEV); + } + + chan = NULL; + list_for_each_entry(mbox, &mbox_cons, node) + if (mbox->dev->of_node == spec.np) { + chan = mbox->of_xlate(mbox, &spec); + break; + } + + of_node_put(spec.np); + + if (!chan || chan->cl || !try_module_get(mbox->dev->driver->owner)) { + dev_dbg(dev, "%s: mailbox not free\n", __func__); + mutex_unlock(&con_mutex); + return ERR_PTR(-EBUSY); + } + + spin_lock_irqsave(&chan->lock, flags); + chan->msg_free = 0; + chan->msg_count = 0; + chan->active_req = NULL; + chan->cl = cl; + init_completion(&chan->tx_complete); + + if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) + chan->txdone_method |= TXDONE_BY_ACK; + + spin_unlock_irqrestore(&chan->lock, flags); + + ret = chan->mbox->ops->startup(chan); + if (ret) { + dev_err(dev, "Unable to startup the chan (%d)\n", ret); + mbox_free_channel(chan); + chan = ERR_PTR(ret); + } + + mutex_unlock(&con_mutex); + return chan; +} +EXPORT_SYMBOL_GPL(mbox_request_channel); + +/** + * mbox_free_channel - The client relinquishes control of a mailbox + * channel by this call. + * @chan: The mailbox channel to be freed. + */ +void mbox_free_channel(struct mbox_chan *chan) +{ + unsigned long flags; + + if (!chan || !chan->cl) + return; + + chan->mbox->ops->shutdown(chan); + + /* The queued TX requests are simply aborted, no callbacks are made */ + spin_lock_irqsave(&chan->lock, flags); + chan->cl = NULL; + chan->active_req = NULL; + if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK)) + chan->txdone_method = TXDONE_BY_POLL; + + module_put(chan->mbox->dev->driver->owner); + spin_unlock_irqrestore(&chan->lock, flags); +} +EXPORT_SYMBOL_GPL(mbox_free_channel); + +static struct mbox_chan * +of_mbox_index_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *sp) +{ + int ind = sp->args[0]; + + if (ind >= mbox->num_chans) + return NULL; + + return &mbox->chans[ind]; +} + +/** + * mbox_controller_register - Register the mailbox controller + * @mbox: Pointer to the mailbox controller. + * + * The controller driver registers its communication channels + */ +int mbox_controller_register(struct mbox_controller *mbox) +{ + int i, txdone; + + /* Sanity check */ + if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans) + return -EINVAL; + + if (mbox->txdone_irq) + txdone = TXDONE_BY_IRQ; + else if (mbox->txdone_poll) + txdone = TXDONE_BY_POLL; + else /* It has to be ACK then */ + txdone = TXDONE_BY_ACK; + + if (txdone == TXDONE_BY_POLL) { + mbox->poll.function = &poll_txdone; + mbox->poll.data = (unsigned long)mbox; + init_timer(&mbox->poll); + } + + for (i = 0; i < mbox->num_chans; i++) { + struct mbox_chan *chan = &mbox->chans[i]; + + chan->cl = NULL; + chan->mbox = mbox; + chan->txdone_method = txdone; + spin_lock_init(&chan->lock); + } + + if (!mbox->of_xlate) + mbox->of_xlate = of_mbox_index_xlate; + + mutex_lock(&con_mutex); + list_add_tail(&mbox->node, &mbox_cons); + mutex_unlock(&con_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(mbox_controller_register); + +/** + * mbox_controller_unregister - Unregister the mailbox controller + * @mbox: Pointer to the mailbox controller. + */ +void mbox_controller_unregister(struct mbox_controller *mbox) +{ + int i; + + if (!mbox) + return; + + mutex_lock(&con_mutex); + + list_del(&mbox->node); + + for (i = 0; i < mbox->num_chans; i++) + mbox_free_channel(&mbox->chans[i]); + + if (mbox->txdone_poll) + del_timer_sync(&mbox->poll); + + mutex_unlock(&con_mutex); +} +EXPORT_SYMBOL_GPL(mbox_controller_unregister); diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c index d873cbae2fb..f3755e0aa93 100644 --- a/drivers/mailbox/pl320-ipc.c +++ b/drivers/mailbox/pl320-ipc.c @@ -26,7 +26,7 @@ #include <linux/device.h> #include <linux/amba/bus.h> -#include <linux/mailbox.h> +#include <linux/pl320-ipc.h> #define IPCMxSOURCE(m) ((m) * 0x40) #define IPCMxDSET(m) (((m) * 0x40) + 0x004) diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 825ca1f8763..afe79719ea3 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1434,9 +1434,9 @@ static void drop_buffers(struct dm_bufio_client *c) /* * Test if the buffer is unused and too old, and commit it. - * At if noio is set, we must not do any I/O because we hold - * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to - * different bufio client. + * And if GFP_NOFS is used, we must not do any I/O because we hold + * dm_bufio_clients_lock and we would risk deadlock if the I/O gets + * rerouted to different bufio client. */ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp, unsigned long max_jiffies) @@ -1444,7 +1444,7 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp, if (jiffies - b->last_accessed < max_jiffies) return 0; - if (!(gfp & __GFP_IO)) { + if (!(gfp & __GFP_FS)) { if (test_bit(B_READING, &b->state) || test_bit(B_WRITING, &b->state) || test_bit(B_DIRTY, &b->state)) @@ -1486,7 +1486,7 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) unsigned long freed; c = container_of(shrink, struct dm_bufio_client, shrinker); - if (sc->gfp_mask & __GFP_IO) + if (sc->gfp_mask & __GFP_FS) dm_bufio_lock(c); else if (!dm_bufio_trylock(c)) return SHRINK_STOP; @@ -1503,7 +1503,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) unsigned long count; c = container_of(shrink, struct dm_bufio_client, shrinker); - if (sc->gfp_mask & __GFP_IO) + if (sc->gfp_mask & __GFP_FS) dm_bufio_lock(c); else if (!dm_bufio_trylock(c)) return 0; diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 4857fa4a548..07c0fa0fa28 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -789,8 +789,7 @@ struct dm_raid_superblock { __le32 layout; __le32 stripe_sectors; - __u8 pad[452]; /* Round struct to 512 bytes. */ - /* Always set to 0 when writing. */ + /* Remainder of a logical block is zero-filled when writing (see super_sync()). */ } __packed; static int read_disk_sb(struct md_rdev *rdev, int size) @@ -827,7 +826,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev) test_bit(Faulty, &(rs->dev[i].rdev.flags))) failed_devices |= (1ULL << i); - memset(sb, 0, sizeof(*sb)); + memset(sb + 1, 0, rdev->sb_size - sizeof(*sb)); sb->magic = cpu_to_le32(DM_RAID_MAGIC); sb->features = cpu_to_le32(0); /* No features yet */ @@ -862,7 +861,11 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) uint64_t events_sb, events_refsb; rdev->sb_start = 0; - rdev->sb_size = sizeof(*sb); + rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); + if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) { + DMERR("superblock size of a logical block is no longer valid"); + return -EINVAL; + } ret = read_disk_sb(rdev, rdev->sb_size); if (ret) @@ -1169,8 +1172,12 @@ static void configure_discard_support(struct dm_target *ti, struct raid_set *rs) raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); for (i = 0; i < rs->md.raid_disks; i++) { - struct request_queue *q = bdev_get_queue(rs->dev[i].rdev.bdev); + struct request_queue *q; + + if (!rs->dev[i].rdev.bdev) + continue; + q = bdev_get_queue(rs->dev[i].rdev.bdev); if (!q || !blk_queue_discard(q)) return; diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index d1600d2aa2e..f8b37d4c05d 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -159,8 +159,10 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) sc->stripes_shift = __ffs(stripes); r = dm_set_target_max_io_len(ti, chunk_size); - if (r) + if (r) { + kfree(sc); return r; + } ti->num_flush_bios = stripes; ti->num_discard_bios = stripes; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 4843801173f..0f86d802b53 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1936,6 +1936,14 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_SUBMITTED; } + /* + * We must hold the virtual cell before doing the lookup, otherwise + * there's a race with discard. + */ + build_virtual_key(tc->td, block, &key); + if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result)) + return DM_MAPIO_SUBMITTED; + r = dm_thin_find_block(td, block, 0, &result); /* @@ -1959,13 +1967,10 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) * shared flag will be set in their case. */ thin_defer_bio(tc, bio); + cell_defer_no_holder_no_free(tc, &cell1); return DM_MAPIO_SUBMITTED; } - build_virtual_key(tc->td, block, &key); - if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result)) - return DM_MAPIO_SUBMITTED; - build_data_key(tc->td, result.block, &key); if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) { cell_defer_no_holder_no_free(tc, &cell1); @@ -1986,6 +1991,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) * of doing so. */ handle_unserviceable_bio(tc->pool, bio); + cell_defer_no_holder_no_free(tc, &cell1); return DM_MAPIO_SUBMITTED; } /* fall through */ @@ -1996,6 +2002,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) * provide the hint to load the metadata into cache. */ thin_defer_bio(tc, bio); + cell_defer_no_holder_no_free(tc, &cell1); return DM_MAPIO_SUBMITTED; default: @@ -2005,6 +2012,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) * pool is switched to fail-io mode. */ bio_io_error(bio); + cell_defer_no_holder_no_free(tc, &cell1); return DM_MAPIO_SUBMITTED; } } diff --git a/drivers/md/md.c b/drivers/md/md.c index 4dfa15da9cb..9233c71138f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5121,6 +5121,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) printk("md: %s still in use.\n",mdname(mddev)); if (did_freeze) { clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); } err = -EBUSY; @@ -5135,6 +5136,8 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) mddev->ro = 1; set_disk_ro(mddev->gendisk, 1); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); sysfs_notify_dirent_safe(mddev->sysfs_state); err = 0; } @@ -5178,6 +5181,7 @@ static int do_md_stop(struct mddev *mddev, int mode, mutex_unlock(&mddev->open_mutex); if (did_freeze) { clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); } return -EBUSY; diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h index 37d367bb9aa..bf2b80d5c47 100644 --- a/drivers/md/persistent-data/dm-btree-internal.h +++ b/drivers/md/persistent-data/dm-btree-internal.h @@ -42,6 +42,12 @@ struct btree_node { } __packed; +/* + * Locks a block using the btree node validator. + */ +int bn_read_lock(struct dm_btree_info *info, dm_block_t b, + struct dm_block **result); + void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, struct dm_btree_value_type *vt); diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c index cf9fd676ae4..1b5e13ec7f9 100644 --- a/drivers/md/persistent-data/dm-btree-spine.c +++ b/drivers/md/persistent-data/dm-btree-spine.c @@ -92,7 +92,7 @@ struct dm_block_validator btree_node_validator = { /*----------------------------------------------------------------*/ -static int bn_read_lock(struct dm_btree_info *info, dm_block_t b, +int bn_read_lock(struct dm_btree_info *info, dm_block_t b, struct dm_block **result) { return dm_tm_read_lock(info->tm, b, &btree_node_validator, result); diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index 416060c2570..200ac12a1d4 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -847,22 +847,26 @@ EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key); * FIXME: We shouldn't use a recursive algorithm when we have limited stack * space. Also this only works for single level trees. */ -static int walk_node(struct ro_spine *s, dm_block_t block, +static int walk_node(struct dm_btree_info *info, dm_block_t block, int (*fn)(void *context, uint64_t *keys, void *leaf), void *context) { int r; unsigned i, nr; + struct dm_block *node; struct btree_node *n; uint64_t keys; - r = ro_step(s, block); - n = ro_node(s); + r = bn_read_lock(info, block, &node); + if (r) + return r; + + n = dm_block_data(node); nr = le32_to_cpu(n->header.nr_entries); for (i = 0; i < nr; i++) { if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) { - r = walk_node(s, value64(n, i), fn, context); + r = walk_node(info, value64(n, i), fn, context); if (r) goto out; } else { @@ -874,7 +878,7 @@ static int walk_node(struct ro_spine *s, dm_block_t block, } out: - ro_pop(s); + dm_tm_unlock(info->tm, node); return r; } @@ -882,15 +886,7 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root, int (*fn)(void *context, uint64_t *keys, void *leaf), void *context) { - int r; - struct ro_spine spine; - BUG_ON(info->levels > 1); - - init_ro_spine(&spine, info); - r = walk_node(&spine, root, fn, context); - exit_ro_spine(&spine); - - return r; + return walk_node(info, root, fn, context); } EXPORT_SYMBOL_GPL(dm_btree_walk); diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c index 97afee672d0..4418119cf70 100644 --- a/drivers/media/common/saa7146/saa7146_core.c +++ b/drivers/media/common/saa7146/saa7146_core.c @@ -364,6 +364,9 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent goto out; } + /* create a nice device name */ + sprintf(dev->name, "saa7146 (%d)", saa7146_num); + DEB_EE("pci:%p\n", pci); err = pci_enable_device(pci); @@ -438,9 +441,6 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent /* the rest + print status message */ - /* create a nice device name */ - sprintf(dev->name, "saa7146 (%d)", saa7146_num); - pr_info("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x)\n", dev->mem, dev->revision, pci->irq, pci->subsystem_vendor, pci->subsystem_device); diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index b8579ee68bd..2cf30576bf3 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -962,6 +962,11 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe) case SYS_ATSC: c->modulation = VSB_8; break; + case SYS_ISDBS: + c->symbol_rate = 28860000; + c->rolloff = ROLLOFF_35; + c->bandwidth_hz = c->symbol_rate / 100 * 135; + break; default: c->modulation = QAM_AUTO; break; @@ -2072,6 +2077,7 @@ static int dtv_set_frontend(struct dvb_frontend *fe) break; case SYS_DVBS: case SYS_TURBO: + case SYS_ISDBS: rolloff = 135; break; case SYS_DVBS2: diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c index 335daeff91b..9d0d0347758 100644 --- a/drivers/media/dvb-frontends/ds3000.c +++ b/drivers/media/dvb-frontends/ds3000.c @@ -864,6 +864,13 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config, memcpy(&state->frontend.ops, &ds3000_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; + + /* + * Some devices like T480 starts with voltage on. Be sure + * to turn voltage off during init, as this can otherwise + * interfere with Unicable SCR systems. + */ + ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF); return &state->frontend; error3: diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c index 9b684d5c8f9..15bf4318cb7 100644 --- a/drivers/media/dvb-frontends/sp2.c +++ b/drivers/media/dvb-frontends/sp2.c @@ -266,7 +266,7 @@ int sp2_ci_poll_slot_status(struct dvb_ca_en50221 *en50221, return s->status; } -int sp2_init(struct sp2 *s) +static int sp2_init(struct sp2 *s) { int ret = 0; u8 buf; @@ -348,7 +348,7 @@ err: return ret; } -int sp2_exit(struct i2c_client *client) +static int sp2_exit(struct i2c_client *client) { struct sp2 *s; diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c index d9905fb52f8..b35d65c9cc0 100644 --- a/drivers/media/dvb-frontends/tc90522.c +++ b/drivers/media/dvb-frontends/tc90522.c @@ -216,32 +216,30 @@ static int tc90522s_get_frontend(struct dvb_frontend *fe) c->delivery_system = SYS_ISDBS; layers = 0; - ret = reg_read(state, 0xe8, val, 3); + ret = reg_read(state, 0xe6, val, 5); if (ret == 0) { - int slots; u8 v; + c->stream_id = val[0] << 8 | val[1]; + /* high/single layer */ - v = (val[0] & 0x70) >> 4; + v = (val[2] & 0x70) >> 4; c->modulation = (v == 7) ? PSK_8 : QPSK; c->fec_inner = fec_conv_sat[v]; c->layer[0].fec = c->fec_inner; c->layer[0].modulation = c->modulation; - c->layer[0].segment_count = val[1] & 0x3f; /* slots */ + c->layer[0].segment_count = val[3] & 0x3f; /* slots */ /* low layer */ - v = (val[0] & 0x07); + v = (val[2] & 0x07); c->layer[1].fec = fec_conv_sat[v]; if (v == 0) /* no low layer */ c->layer[1].segment_count = 0; else - c->layer[1].segment_count = val[2] & 0x3f; /* slots */ + c->layer[1].segment_count = val[4] & 0x3f; /* slots */ /* actually, BPSK if v==1, but not defined in fe_modulation_t */ c->layer[1].modulation = QPSK; layers = (v > 0) ? 2 : 1; - - slots = c->layer[0].segment_count + c->layer[1].segment_count; - c->symbol_rate = 28860000 * slots / 48; } /* statistics */ @@ -363,7 +361,7 @@ static int tc90522t_get_frontend(struct dvb_frontend *fe) u8 v; c->isdbt_partial_reception = val[0] & 0x01; - c->isdbt_sb_mode = (val[0] & 0xc0) == 0x01; + c->isdbt_sb_mode = (val[0] & 0xc0) == 0x40; /* layer A */ v = (val[2] & 0x78) >> 3; diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c index 13734b8c791..4cb90317ff4 100644 --- a/drivers/media/pci/cx23885/cx23885-dvb.c +++ b/drivers/media/pci/cx23885/cx23885-dvb.c @@ -1600,6 +1600,7 @@ static int dvb_register(struct cx23885_tsport *port) break; /* attach tuner */ + memset(&m88ts2022_config, 0, sizeof(m88ts2022_config)); m88ts2022_config.fe = fe0->dvb.frontend; m88ts2022_config.clock = 27000000; memset(&info, 0, sizeof(struct i2c_board_info)); @@ -1635,6 +1636,7 @@ static int dvb_register(struct cx23885_tsport *port) /* port c - terrestrial/cable */ case 2: /* attach frontend */ + memset(&si2168_config, 0, sizeof(si2168_config)); si2168_config.i2c_adapter = &adapter; si2168_config.fe = &fe0->dvb.frontend; si2168_config.ts_mode = SI2168_TS_SERIAL; @@ -1654,6 +1656,7 @@ static int dvb_register(struct cx23885_tsport *port) port->i2c_client_demod = client_demod; /* attach tuner */ + memset(&si2157_config, 0, sizeof(si2157_config)); si2157_config.fe = fe0->dvb.frontend; memset(&info, 0, sizeof(struct i2c_board_info)); strlcpy(info.type, "si2157", I2C_NAME_SIZE); diff --git a/drivers/media/pci/tw68/Kconfig b/drivers/media/pci/tw68/Kconfig index 5425ba1e320..95d5d520204 100644 --- a/drivers/media/pci/tw68/Kconfig +++ b/drivers/media/pci/tw68/Kconfig @@ -1,7 +1,6 @@ config VIDEO_TW68 tristate "Techwell tw68x Video For Linux" depends on VIDEO_DEV && PCI && VIDEO_V4L2 - select I2C_ALGOBIT select VIDEOBUF2_DMA_SG ---help--- Support for Techwell tw68xx based frame grabber boards. diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c index a6fb48cf7aa..63f0b64057c 100644 --- a/drivers/media/pci/tw68/tw68-core.c +++ b/drivers/media/pci/tw68/tw68-core.c @@ -306,7 +306,7 @@ static int tw68_initdev(struct pci_dev *pci_dev, /* get irq */ err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw68_irq, - IRQF_SHARED | IRQF_DISABLED, dev->name, dev); + IRQF_SHARED, dev->name, dev); if (err < 0) { pr_err("%s: can't get IRQ %d\n", dev->name, pci_dev->irq); diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index bee9074ebc1..3aac88f1d54 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -166,7 +166,7 @@ config VIDEO_MEM2MEM_DEINTERLACE config VIDEO_SAMSUNG_S5P_G2D tristate "Samsung S5P and EXYNOS4 G2D 2d graphics accelerator driver" depends on VIDEO_DEV && VIDEO_V4L2 - depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST + depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST depends on HAS_DMA select VIDEOBUF2_DMA_CONTIG select V4L2_MEM2MEM_DEV @@ -178,7 +178,7 @@ config VIDEO_SAMSUNG_S5P_G2D config VIDEO_SAMSUNG_S5P_JPEG tristate "Samsung S5P/Exynos3250/Exynos4 JPEG codec driver" depends on VIDEO_DEV && VIDEO_V4L2 - depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST + depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST depends on HAS_DMA select VIDEOBUF2_DMA_CONTIG select V4L2_MEM2MEM_DEV @@ -189,7 +189,7 @@ config VIDEO_SAMSUNG_S5P_JPEG config VIDEO_SAMSUNG_S5P_MFC tristate "Samsung S5P MFC Video Codec" depends on VIDEO_DEV && VIDEO_V4L2 - depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST + depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST depends on HAS_DMA select VIDEOBUF2_DMA_CONTIG default n diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig index 77c95123774..b7b2e472240 100644 --- a/drivers/media/platform/exynos4-is/Kconfig +++ b/drivers/media/platform/exynos4-is/Kconfig @@ -2,7 +2,7 @@ config VIDEO_SAMSUNG_EXYNOS4_IS bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver" depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API - depends on (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST) + depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST depends on OF && COMMON_CLK help Say Y here to enable camera host interface devices for diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c index b70fd996d79..aee92d908e4 100644 --- a/drivers/media/platform/exynos4-is/fimc-core.c +++ b/drivers/media/platform/exynos4-is/fimc-core.c @@ -832,6 +832,7 @@ err: return -ENXIO; } +#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP) static int fimc_m2m_suspend(struct fimc_dev *fimc) { unsigned long flags; @@ -870,6 +871,7 @@ static int fimc_m2m_resume(struct fimc_dev *fimc) return 0; } +#endif /* CONFIG_PM_RUNTIME || CONFIG_PM_SLEEP */ static const struct of_device_id fimc_of_match[]; diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c index e525a7c8d88..6fcc7f072ac 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-core.c +++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c @@ -893,7 +893,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, unsigned long buffer, unsigned long size, struct s5p_jpeg_ctx *ctx) { - int c, components, notfound; + int c, components = 0, notfound; unsigned int height, width, word, subsampling = 0; long length; struct s5p_jpeg_buffer jpeg_buffer; @@ -2632,6 +2632,7 @@ static int s5p_jpeg_remove(struct platform_device *pdev) return 0; } +#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP) static int s5p_jpeg_runtime_suspend(struct device *dev) { struct s5p_jpeg *jpeg = dev_get_drvdata(dev); @@ -2681,7 +2682,9 @@ static int s5p_jpeg_runtime_resume(struct device *dev) return 0; } +#endif /* CONFIG_PM_RUNTIME || CONFIG_PM_SLEEP */ +#ifdef CONFIG_PM_SLEEP static int s5p_jpeg_suspend(struct device *dev) { if (pm_runtime_suspended(dev)) @@ -2697,6 +2700,7 @@ static int s5p_jpeg_resume(struct device *dev) return s5p_jpeg_runtime_resume(dev); } +#endif static const struct dev_pm_ops s5p_jpeg_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(s5p_jpeg_suspend, s5p_jpeg_resume) diff --git a/drivers/media/platform/s5p-tv/Kconfig b/drivers/media/platform/s5p-tv/Kconfig index a9d56f8936b..beb180e71ba 100644 --- a/drivers/media/platform/s5p-tv/Kconfig +++ b/drivers/media/platform/s5p-tv/Kconfig @@ -9,7 +9,7 @@ config VIDEO_SAMSUNG_S5P_TV bool "Samsung TV driver for S5P platform" depends on PM_RUNTIME - depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST + depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST default n ---help--- Say Y here to enable selecting the TV output devices for diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig index d71139a2ae0..c3090932f06 100644 --- a/drivers/media/platform/vivid/Kconfig +++ b/drivers/media/platform/vivid/Kconfig @@ -1,8 +1,11 @@ config VIDEO_VIVID tristate "Virtual Video Test Driver" - depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64 + depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64 && FB select FONT_SUPPORT select FONT_8x16 + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT select VIDEOBUF2_VMALLOC default n ---help--- diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c index 2c61a62ab48..686c3c2ad05 100644 --- a/drivers/media/platform/vivid/vivid-core.c +++ b/drivers/media/platform/vivid/vivid-core.c @@ -100,11 +100,9 @@ MODULE_PARM_DESC(ccs_out_mode, " output crop/compose/scale mode:\n" "\t\t bit 0=crop, 1=compose, 2=scale,\n" "\t\t -1=user-controlled (default)"); -static unsigned multiplanar[VIVID_MAX_DEVS]; +static unsigned multiplanar[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 1 }; module_param_array(multiplanar, uint, NULL, 0444); -MODULE_PARM_DESC(multiplanar, " 0 (default) is alternating single and multiplanar devices,\n" - "\t\t 1 is single planar devices,\n" - "\t\t 2 is multiplanar devices"); +MODULE_PARM_DESC(multiplanar, " 1 (default) creates a single planar device, 2 creates a multiplanar device."); /* Default: video + vbi-cap (raw and sliced) + radio rx + radio tx + sdr + vbi-out + vid-out */ static unsigned node_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0x1d3d }; @@ -669,10 +667,7 @@ static int __init vivid_create_instance(int inst) /* start detecting feature set */ /* do we use single- or multi-planar? */ - if (multiplanar[inst] == 0) - dev->multiplanar = inst & 1; - else - dev->multiplanar = multiplanar[inst] > 1; + dev->multiplanar = multiplanar[inst] > 1; v4l2_info(&dev->v4l2_dev, "using %splanar format API\n", dev->multiplanar ? "multi" : "single "); diff --git a/drivers/media/platform/vivid/vivid-tpg.c b/drivers/media/platform/vivid/vivid-tpg.c index 0c6fa53fa64..cbcd6250e7b 100644 --- a/drivers/media/platform/vivid/vivid-tpg.c +++ b/drivers/media/platform/vivid/vivid-tpg.c @@ -136,7 +136,7 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w) tpg->black_line[plane] = vzalloc(max_w * pixelsz); if (!tpg->black_line[plane]) return -ENOMEM; - tpg->random_line[plane] = vzalloc(max_w * pixelsz); + tpg->random_line[plane] = vzalloc(max_w * 2 * pixelsz); if (!tpg->random_line[plane]) return -ENOMEM; } diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c index 6f28f6e02ea..704397f3c10 100644 --- a/drivers/media/radio/wl128x/fmdrv_common.c +++ b/drivers/media/radio/wl128x/fmdrv_common.c @@ -1256,7 +1256,7 @@ static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name) fmerr("Unable to read firmware(%s) content\n", fw_name); return ret; } - fmdbg("Firmware(%s) length : %d bytes\n", fw_name, fw_entry->size); + fmdbg("Firmware(%s) length : %zu bytes\n", fw_name, fw_entry->size); fw_data = (void *)fw_entry->data; fw_len = fw_entry->size; diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index b8837dd39bb..65f80b8b9f7 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -1678,7 +1678,8 @@ static void imon_incoming_packet(struct imon_context *ictx, if (press_type == 0) rc_keyup(ictx->rdev); else { - if (ictx->rc_type == RC_BIT_RC6_MCE) + if (ictx->rc_type == RC_BIT_RC6_MCE || + ictx->rc_type == RC_BIT_OTHER) rc_keydown(ictx->rdev, ictx->rc_type == RC_BIT_RC6_MCE ? RC_TYPE_RC6_MCE : RC_TYPE_OTHER, ictx->rc_scancode, ictx->rc_toggle); diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c index 08bbd4f508c..b0df62961c1 100644 --- a/drivers/media/rc/ir-hix5hd2.c +++ b/drivers/media/rc/ir-hix5hd2.c @@ -297,7 +297,7 @@ static int hix5hd2_ir_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int hix5hd2_ir_suspend(struct device *dev) { struct hix5hd2_ir_priv *priv = dev_get_drvdata(dev); diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c index 2ef763928ca..84fa6e9b59a 100644 --- a/drivers/media/rc/ir-rc5-decoder.c +++ b/drivers/media/rc/ir-rc5-decoder.c @@ -53,7 +53,7 @@ static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev) u32 scancode; enum rc_type protocol; - if (!(dev->enabled_protocols & (RC_BIT_RC5 | RC_BIT_RC5X))) + if (!(dev->enabled_protocols & (RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ))) return 0; if (!is_timing_event(ev)) { diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c index e8fff2add26..b732ac6a26d 100644 --- a/drivers/media/rc/rc-ir-raw.c +++ b/drivers/media/rc/rc-ir-raw.c @@ -262,7 +262,6 @@ int ir_raw_event_register(struct rc_dev *dev) return -ENOMEM; dev->raw->dev = dev; - dev->enabled_protocols = ~0; dev->change_protocol = change_protocol; rc = kfifo_alloc(&dev->raw->kfifo, sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE, diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index a7991c7d010..8d3b74c5a71 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -1421,6 +1421,8 @@ int rc_register_device(struct rc_dev *dev) if (dev->change_protocol) { u64 rc_type = (1 << rc_map->rc_type); + if (dev->driver_type == RC_DRIVER_IR_RAW) + rc_type |= RC_BIT_LIRC; rc = dev->change_protocol(dev, &rc_type); if (rc < 0) goto out_raw; diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c index e44c8aba607..803a0e63d47 100644 --- a/drivers/media/tuners/xc5000.c +++ b/drivers/media/tuners/xc5000.c @@ -1333,9 +1333,9 @@ static int xc5000_release(struct dvb_frontend *fe) if (priv) { cancel_delayed_work(&priv->timer_sleep); - hybrid_tuner_release_state(priv); if (priv->firmware) release_firmware(priv->firmware); + hybrid_tuner_release_state(priv); } mutex_unlock(&xc5000_list_mutex); diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 00758c83eec..1896ab218b1 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -193,8 +193,8 @@ static int af9035_wr_reg_mask(struct dvb_usb_device *d, u32 reg, u8 val, return af9035_wr_regs(d, reg, &val, 1); } -static int af9035_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr, - void *platform_data, struct i2c_adapter *adapter) +static int af9035_add_i2c_dev(struct dvb_usb_device *d, const char *type, + u8 addr, void *platform_data, struct i2c_adapter *adapter) { int ret, num; struct state *state = d_to_priv(d); @@ -221,7 +221,7 @@ static int af9035_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr, goto err; } - request_module(board_info.type); + request_module("%s", board_info.type); /* register I2C device */ client = i2c_new_device(adapter, &board_info); diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c index d3c5f230e97..ae917c042a5 100644 --- a/drivers/media/usb/dvb-usb-v2/anysee.c +++ b/drivers/media/usb/dvb-usb-v2/anysee.c @@ -630,8 +630,8 @@ error: return ret; } -static int anysee_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr, - void *platform_data) +static int anysee_add_i2c_dev(struct dvb_usb_device *d, const char *type, + u8 addr, void *platform_data) { int ret, num; struct anysee_state *state = d_to_priv(d); @@ -659,7 +659,7 @@ static int anysee_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr, goto err; } - request_module(board_info.type); + request_module("%s", board_info.type); /* register I2C device */ client = i2c_new_device(adapter, &board_info); diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c index b5e52fe7957..901cf2b952d 100644 --- a/drivers/media/usb/em28xx/em28xx-core.c +++ b/drivers/media/usb/em28xx/em28xx-core.c @@ -504,7 +504,7 @@ EXPORT_SYMBOL_GPL(em28xx_audio_analog_set); int em28xx_audio_setup(struct em28xx *dev) { int vid1, vid2, feat, cfg; - u32 vid; + u32 vid = 0; u8 i2s_samplerates; if (dev->chip_id == CHIP_ID_EM2870 || diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c index 581f6dad4ca..23f8f6afa2e 100644 --- a/drivers/media/usb/em28xx/em28xx-input.c +++ b/drivers/media/usb/em28xx/em28xx-input.c @@ -712,8 +712,10 @@ static int em28xx_ir_init(struct em28xx *dev) em28xx_info("Registering input extension\n"); ir = kzalloc(sizeof(*ir), GFP_KERNEL); + if (!ir) + return -ENOMEM; rc = rc_allocate_device(); - if (!ir || !rc) + if (!rc) goto error; /* record handles to ourself */ diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c index 328b5ba47a0..fd1fa412e09 100644 --- a/drivers/media/usb/hackrf/hackrf.c +++ b/drivers/media/usb/hackrf/hackrf.c @@ -932,7 +932,7 @@ static int hackrf_set_bandwidth(struct hackrf_dev *dev) dev->bandwidth->val = bandwidth; dev->bandwidth->cur.val = bandwidth; - dev_dbg(dev->dev, "bandwidth selected=%d\n", bandwidth_lut[i].freq); + dev_dbg(dev->dev, "bandwidth selected=%d\n", bandwidth); u16tmp = 0; u16tmp |= ((bandwidth >> 0) & 0xff) << 0; diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c index 68bc9615660..9bfa041e331 100644 --- a/drivers/media/usb/usbvision/usbvision-video.c +++ b/drivers/media/usb/usbvision/usbvision-video.c @@ -446,6 +446,7 @@ static int usbvision_v4l2_close(struct file *file) if (usbvision->remove_pending) { printk(KERN_INFO "%s: Final disconnect\n", __func__); usbvision_release(usbvision); + return 0; } mutex_unlock(&usbvision->v4l2_lock); @@ -1221,6 +1222,7 @@ static int usbvision_radio_close(struct file *file) if (usbvision->remove_pending) { printk(KERN_INFO "%s: Final disconnect\n", __func__); usbvision_release(usbvision); + return err_code; } mutex_unlock(&usbvision->v4l2_lock); diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index 60a8e2c3631..378ae02e593 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c @@ -318,7 +318,6 @@ static int uvc_v4l2_set_format(struct uvc_streaming *stream, stream->ctrl = probe; stream->cur_format = format; stream->cur_frame = frame; - stream->frame_size = fmt->fmt.pix.sizeimage; done: mutex_unlock(&stream->mutex); diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 9ace520bb07..df81b9c4faf 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -1143,7 +1143,7 @@ static int uvc_video_encode_data(struct uvc_streaming *stream, static void uvc_video_validate_buffer(const struct uvc_streaming *stream, struct uvc_buffer *buf) { - if (stream->frame_size != buf->bytesused && + if (stream->ctrl.dwMaxVideoFrameSize != buf->bytesused && !(stream->cur_format->flags & UVC_FMT_FLAG_COMPRESSED)) buf->error = 1; } diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index 6f676c29ec0..864ada74036 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -457,7 +457,6 @@ struct uvc_streaming { struct uvc_format *def_format; struct uvc_format *cur_format; struct uvc_frame *cur_frame; - size_t frame_size; /* Protect access to ctrl, cur_format, cur_frame and hardware video * probe control. diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c index bf80f0f7dfb..e02353e340d 100644 --- a/drivers/media/v4l2-core/videobuf-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf-dma-contig.c @@ -305,6 +305,15 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q, /* Try to remap memory */ size = vma->vm_end - vma->vm_start; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + /* the "vm_pgoff" is just used in v4l2 to find the + * corresponding buffer data structure which is allocated + * earlier and it does not mean the offset from the physical + * buffer start address as usual. So set it to 0 to pass + * the sanity check in vm_iomap_memory(). + */ + vma->vm_pgoff = 0; + retval = vm_iomap_memory(vma, mem->dma_handle, size); if (retval) { dev_err(q->dev, "mmap: remap failed with error %d. ", diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c index cf008f45968..711773e8e64 100644 --- a/drivers/mfd/max77693.c +++ b/drivers/mfd/max77693.c @@ -240,7 +240,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c, goto err_irq_charger; } - ret = regmap_add_irq_chip(max77693->regmap, max77693->irq, + ret = regmap_add_irq_chip(max77693->regmap_muic, max77693->irq, IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_FALLING, 0, &max77693_muic_irq_chip, @@ -250,6 +250,17 @@ static int max77693_i2c_probe(struct i2c_client *i2c, goto err_irq_muic; } + /* Unmask interrupts from all blocks in interrupt source register */ + ret = regmap_update_bits(max77693->regmap, + MAX77693_PMIC_REG_INTSRC_MASK, + SRC_IRQ_ALL, (unsigned int)~SRC_IRQ_ALL); + if (ret < 0) { + dev_err(max77693->dev, + "Could not unmask interrupts in INTSRC: %d\n", + ret); + goto err_intsrc; + } + pm_runtime_set_active(max77693->dev); ret = mfd_add_devices(max77693->dev, -1, max77693_devs, @@ -261,6 +272,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c, err_mfd: mfd_remove_devices(max77693->dev); +err_intsrc: regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic); err_irq_muic: regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger); diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c index f2643c221d3..30f7ca89a0e 100644 --- a/drivers/mfd/rtsx_pcr.c +++ b/drivers/mfd/rtsx_pcr.c @@ -947,6 +947,7 @@ static void rtsx_pci_idle_work(struct work_struct *work) mutex_unlock(&pcr->pcr_mutex); } +#ifdef CONFIG_PM static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state) { if (pcr->ops->turn_off_led) @@ -961,6 +962,7 @@ static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state) if (pcr->ops->force_power_down) pcr->ops->force_power_down(pcr, pm_state); } +#endif static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) { diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h index 2d045f26f19..bee0abf8204 100644 --- a/drivers/mfd/stmpe.h +++ b/drivers/mfd/stmpe.h @@ -269,7 +269,7 @@ int stmpe_remove(struct stmpe *stmpe); #define STMPE24XX_REG_CHIP_ID 0x80 #define STMPE24XX_REG_IEGPIOR_LSB 0x18 #define STMPE24XX_REG_ISGPIOR_MSB 0x19 -#define STMPE24XX_REG_GPMR_LSB 0xA5 +#define STMPE24XX_REG_GPMR_LSB 0xA4 #define STMPE24XX_REG_GPSR_LSB 0x85 #define STMPE24XX_REG_GPCR_LSB 0x88 #define STMPE24XX_REG_GPDR_LSB 0x8B diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c index cf92a6d1c53..50f9091bcd3 100644 --- a/drivers/mfd/twl4030-power.c +++ b/drivers/mfd/twl4030-power.c @@ -44,6 +44,15 @@ static u8 twl4030_start_script_address = 0x2b; #define PWR_DEVSLP BIT(1) #define PWR_DEVOFF BIT(0) +/* Register bits for CFG_P1_TRANSITION (also for P2 and P3) */ +#define STARTON_SWBUG BIT(7) /* Start on watchdog */ +#define STARTON_VBUS BIT(5) /* Start on VBUS */ +#define STARTON_VBAT BIT(4) /* Start on battery insert */ +#define STARTON_RTC BIT(3) /* Start on RTC */ +#define STARTON_USB BIT(2) /* Start on USB host */ +#define STARTON_CHG BIT(1) /* Start on charger */ +#define STARTON_PWON BIT(0) /* Start on PWRON button */ + #define SEQ_OFFSYNC (1 << 0) #define PHY_TO_OFF_PM_MASTER(p) (p - 0x36) @@ -606,6 +615,44 @@ twl4030_power_configure_resources(const struct twl4030_power_data *pdata) return 0; } +static int twl4030_starton_mask_and_set(u8 bitmask, u8 bitvalues) +{ + u8 regs[3] = { TWL4030_PM_MASTER_CFG_P1_TRANSITION, + TWL4030_PM_MASTER_CFG_P2_TRANSITION, + TWL4030_PM_MASTER_CFG_P3_TRANSITION, }; + u8 val; + int i, err; + + err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1, + TWL4030_PM_MASTER_PROTECT_KEY); + if (err) + goto relock; + err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, + TWL4030_PM_MASTER_KEY_CFG2, + TWL4030_PM_MASTER_PROTECT_KEY); + if (err) + goto relock; + + for (i = 0; i < sizeof(regs); i++) { + err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, + &val, regs[i]); + if (err) + break; + val = (~bitmask & val) | (bitmask & bitvalues); + err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, + val, regs[i]); + if (err) + break; + } + + if (err) + pr_err("TWL4030 Register access failed: %i\n", err); + +relock: + return twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0, + TWL4030_PM_MASTER_PROTECT_KEY); +} + /* * In master mode, start the power off sequence. * After a successful execution, TWL shuts down the power to the SoC @@ -615,6 +662,11 @@ void twl4030_power_off(void) { int err; + /* Disable start on charger or VBUS as it can break poweroff */ + err = twl4030_starton_mask_and_set(STARTON_VBUS | STARTON_CHG, 0); + if (err) + pr_err("TWL4030 Unable to configure start-up\n"); + err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, PWR_DEVOFF, TWL4030_PM_MASTER_P1_SW_EVENTS); if (err) diff --git a/drivers/mfd/viperboard.c b/drivers/mfd/viperboard.c index e00f5340ed8..3c2b8f9e3c8 100644 --- a/drivers/mfd/viperboard.c +++ b/drivers/mfd/viperboard.c @@ -93,8 +93,9 @@ static int vprbrd_probe(struct usb_interface *interface, version >> 8, version & 0xff, vb->usb_dev->bus->busnum, vb->usb_dev->devnum); - ret = mfd_add_devices(&interface->dev, -1, vprbrd_devs, - ARRAY_SIZE(vprbrd_devs), NULL, 0, NULL); + ret = mfd_add_devices(&interface->dev, PLATFORM_DEVID_AUTO, + vprbrd_devs, ARRAY_SIZE(vprbrd_devs), NULL, 0, + NULL); if (ret != 0) { dev_err(&interface->dev, "Failed to add mfd devices to core."); goto error; diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c index 69506ebd4d0..c99e896604e 100644 --- a/drivers/misc/cxl/fault.c +++ b/drivers/misc/cxl/fault.c @@ -21,60 +21,64 @@ #include "cxl.h" -static struct cxl_sste* find_free_sste(struct cxl_sste *primary_group, - bool sec_hash, - struct cxl_sste *secondary_group, - unsigned int *lru) +static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb) { - unsigned int i, entry; - struct cxl_sste *sste, *group = primary_group; - - for (i = 0; i < 2; i++) { - for (entry = 0; entry < 8; entry++) { - sste = group + entry; - if (!(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) - return sste; - } - if (!sec_hash) - break; - group = secondary_group; + return ((sste->vsid_data == cpu_to_be64(slb->vsid)) && + (sste->esid_data == cpu_to_be64(slb->esid))); +} + +/* + * This finds a free SSTE for the given SLB, or returns NULL if it's already in + * the segment table. + */ +static struct cxl_sste* find_free_sste(struct cxl_context *ctx, + struct copro_slb *slb) +{ + struct cxl_sste *primary, *sste, *ret = NULL; + unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */ + unsigned int entry; + unsigned int hash; + + if (slb->vsid & SLB_VSID_B_1T) + hash = (slb->esid >> SID_SHIFT_1T) & mask; + else /* 256M */ + hash = (slb->esid >> SID_SHIFT) & mask; + + primary = ctx->sstp + (hash << 3); + + for (entry = 0, sste = primary; entry < 8; entry++, sste++) { + if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) + ret = sste; + if (sste_matches(sste, slb)) + return NULL; } + if (ret) + return ret; + /* Nothing free, select an entry to cast out */ - if (sec_hash && (*lru & 0x8)) - sste = secondary_group + (*lru & 0x7); - else - sste = primary_group + (*lru & 0x7); - *lru = (*lru + 1) & 0xf; + ret = primary + ctx->sst_lru; + ctx->sst_lru = (ctx->sst_lru + 1) & 0x7; - return sste; + return ret; } static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb) { /* mask is the group index, we search primary and secondary here. */ - unsigned int mask = (ctx->sst_size >> 7)-1; /* SSTP0[SegTableSize] */ - bool sec_hash = 1; struct cxl_sste *sste; - unsigned int hash; unsigned long flags; - - sec_hash = !!(cxl_p1n_read(ctx->afu, CXL_PSL_SR_An) & CXL_PSL_SR_An_SC); - - if (slb->vsid & SLB_VSID_B_1T) - hash = (slb->esid >> SID_SHIFT_1T) & mask; - else /* 256M */ - hash = (slb->esid >> SID_SHIFT) & mask; - spin_lock_irqsave(&ctx->sste_lock, flags); - sste = find_free_sste(ctx->sstp + (hash << 3), sec_hash, - ctx->sstp + ((~hash & mask) << 3), &ctx->sst_lru); + sste = find_free_sste(ctx, slb); + if (!sste) + goto out_unlock; pr_devel("CXL Populating SST[%li]: %#llx %#llx\n", sste - ctx->sstp, slb->vsid, slb->esid); sste->vsid_data = cpu_to_be64(slb->vsid); sste->esid_data = cpu_to_be64(slb->esid); +out_unlock: spin_unlock_irqrestore(&ctx->sste_lock, flags); } diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 623286a7711..d47532e8f4f 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -417,7 +417,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) ctx->elem->haurp = 0; /* disable */ ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1)); - sr = CXL_PSL_SR_An_SC; + sr = 0; if (ctx->master) sr |= CXL_PSL_SR_An_MP; if (mfspr(SPRN_LPCR) & LPCR_TC) @@ -508,7 +508,7 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr) u64 sr; int rc; - sr = CXL_PSL_SR_An_SC; + sr = 0; set_endian(sr); if (ctx->master) sr |= CXL_PSL_SR_An_MP; diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 03c53b72a2d..270d58a4c43 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -311,7 +311,8 @@ int mmc_of_parse(struct mmc_host *host) struct device_node *np; u32 bus_width; int len, ret; - bool cap_invert, gpio_invert; + bool cd_cap_invert, cd_gpio_invert = false; + bool ro_cap_invert, ro_gpio_invert = false; if (!host->parent || !host->parent->of_node) return 0; @@ -359,16 +360,13 @@ int mmc_of_parse(struct mmc_host *host) if (of_find_property(np, "non-removable", &len)) { host->caps |= MMC_CAP_NONREMOVABLE; } else { - if (of_property_read_bool(np, "cd-inverted")) - cap_invert = true; - else - cap_invert = false; + cd_cap_invert = of_property_read_bool(np, "cd-inverted"); if (of_find_property(np, "broken-cd", &len)) host->caps |= MMC_CAP_NEEDS_POLL; ret = mmc_gpiod_request_cd(host, "cd", 0, true, - 0, &gpio_invert); + 0, &cd_gpio_invert); if (ret) { if (ret == -EPROBE_DEFER) return ret; @@ -391,17 +389,14 @@ int mmc_of_parse(struct mmc_host *host) * both inverted, the end result is that the CD line is * not inverted. */ - if (cap_invert ^ gpio_invert) + if (cd_cap_invert ^ cd_gpio_invert) host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; } /* Parse Write Protection */ - if (of_property_read_bool(np, "wp-inverted")) - cap_invert = true; - else - cap_invert = false; + ro_cap_invert = of_property_read_bool(np, "wp-inverted"); - ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &gpio_invert); + ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); if (ret) { if (ret == -EPROBE_DEFER) goto out; @@ -414,7 +409,7 @@ int mmc_of_parse(struct mmc_host *host) dev_info(host->parent, "Got WP GPIO\n"); /* See the comment on CD inversion above */ - if (cap_invert ^ gpio_invert) + if (ro_cap_invert ^ ro_gpio_invert) host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; if (of_find_property(np, "cap-sd-highspeed", &len)) diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index a7543ba3e19..3096f3ded3a 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -2590,6 +2590,8 @@ static void cfi_intelext_resume(struct mtd_info *mtd) /* Go to known state. Chip may have been power cycled */ if (chip->state == FL_PM_SUSPENDED) { + /* Refresh LH28F640BF Partition Config. Register */ + fixup_LH28F640BF(mtd); map_write(map, CMD(0xFF), cfi->chips[i].start); chip->oldstate = chip->state = FL_READY; wake_up(&chip->wq); diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index dcda6287228..ed827cf894e 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -193,10 +193,10 @@ static int m25p_probe(struct spi_device *spi) { struct mtd_part_parser_data ppdata; struct flash_platform_data *data; - const struct spi_device_id *id = NULL; struct m25p *flash; struct spi_nor *nor; enum read_mode mode = SPI_NOR_NORMAL; + char *flash_name = NULL; int ret; data = dev_get_platdata(&spi->dev); @@ -236,13 +236,11 @@ static int m25p_probe(struct spi_device *spi) * If that's the case, respect "type" and ignore a "name". */ if (data && data->type) - id = spi_nor_match_id(data->type); + flash_name = data->type; + else + flash_name = spi->modalias; - /* If we didn't get name from platform, simply use "modalias". */ - if (!id) - id = spi_get_device_id(spi); - - ret = spi_nor_scan(nor, id, mode); + ret = spi_nor_scan(nor, flash_name, mode); if (ret) return ret; @@ -263,12 +261,62 @@ static int m25p_remove(struct spi_device *spi) } +/* + * XXX This needs to be kept in sync with spi_nor_ids. We can't share + * it with spi-nor, because if this is built as a module then modpost + * won't be able to read it and add appropriate aliases. + */ +static const struct spi_device_id m25p_ids[] = { + {"at25fs010"}, {"at25fs040"}, {"at25df041a"}, {"at25df321a"}, + {"at25df641"}, {"at26f004"}, {"at26df081a"}, {"at26df161a"}, + {"at26df321"}, {"at45db081d"}, + {"en25f32"}, {"en25p32"}, {"en25q32b"}, {"en25p64"}, + {"en25q64"}, {"en25qh128"}, {"en25qh256"}, + {"f25l32pa"}, + {"mr25h256"}, {"mr25h10"}, + {"gd25q32"}, {"gd25q64"}, + {"160s33b"}, {"320s33b"}, {"640s33b"}, + {"mx25l2005a"}, {"mx25l4005a"}, {"mx25l8005"}, {"mx25l1606e"}, + {"mx25l3205d"}, {"mx25l3255e"}, {"mx25l6405d"}, {"mx25l12805d"}, + {"mx25l12855e"},{"mx25l25635e"},{"mx25l25655e"},{"mx66l51235l"}, + {"mx66l1g55g"}, + {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q256a"}, + {"n25q512a"}, {"n25q512ax3"}, {"n25q00"}, + {"pm25lv512"}, {"pm25lv010"}, {"pm25lq032"}, + {"s25sl032p"}, {"s25sl064p"}, {"s25fl256s0"}, {"s25fl256s1"}, + {"s25fl512s"}, {"s70fl01gs"}, {"s25sl12800"}, {"s25sl12801"}, + {"s25fl129p0"}, {"s25fl129p1"}, {"s25sl004a"}, {"s25sl008a"}, + {"s25sl016a"}, {"s25sl032a"}, {"s25sl064a"}, {"s25fl008k"}, + {"s25fl016k"}, {"s25fl064k"}, + {"sst25vf040b"},{"sst25vf080b"},{"sst25vf016b"},{"sst25vf032b"}, + {"sst25vf064c"},{"sst25wf512"}, {"sst25wf010"}, {"sst25wf020"}, + {"sst25wf040"}, + {"m25p05"}, {"m25p10"}, {"m25p20"}, {"m25p40"}, + {"m25p80"}, {"m25p16"}, {"m25p32"}, {"m25p64"}, + {"m25p128"}, {"n25q032"}, + {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"}, + {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"}, + {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"}, + {"m45pe10"}, {"m45pe80"}, {"m45pe16"}, + {"m25pe20"}, {"m25pe80"}, {"m25pe16"}, + {"m25px16"}, {"m25px32"}, {"m25px32-s0"}, {"m25px32-s1"}, + {"m25px64"}, + {"w25x10"}, {"w25x20"}, {"w25x40"}, {"w25x80"}, + {"w25x16"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"}, + {"w25x64"}, {"w25q64"}, {"w25q128"}, {"w25q80"}, + {"w25q80bl"}, {"w25q128"}, {"w25q256"}, {"cat25c11"}, + {"cat25c03"}, {"cat25c09"}, {"cat25c17"}, {"cat25128"}, + { }, +}; +MODULE_DEVICE_TABLE(spi, m25p_ids); + + static struct spi_driver m25p80_driver = { .driver = { .name = "m25p80", .owner = THIS_MODULE, }, - .id_table = spi_nor_ids, + .id_table = m25p_ids, .probe = m25p_probe, .remove = m25p_remove, diff --git a/drivers/mtd/nand/omap_elm.c b/drivers/mtd/nand/omap_elm.c index b4f61c7fc16..058531044ce 100644 --- a/drivers/mtd/nand/omap_elm.c +++ b/drivers/mtd/nand/omap_elm.c @@ -115,7 +115,7 @@ int elm_config(struct device *dev, enum bch_ecc bch_type, if (!info) { dev_err(dev, "Unable to configure elm - device not probed?\n"); - return -ENODEV; + return -EPROBE_DEFER; } /* ELM cannot detect ECC errors for chunks > 1KB */ if (ecc_step_size > ((ELM_ECC_SIZE + 1) / 2)) { diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c index 8d659a2888d..d5269a26c83 100644 --- a/drivers/mtd/spi-nor/fsl-quadspi.c +++ b/drivers/mtd/spi-nor/fsl-quadspi.c @@ -881,7 +881,6 @@ static int fsl_qspi_probe(struct platform_device *pdev) /* iterate the subnodes. */ for_each_available_child_of_node(dev->of_node, np) { - const struct spi_device_id *id; char modalias[40]; /* skip the holes */ @@ -909,10 +908,6 @@ static int fsl_qspi_probe(struct platform_device *pdev) if (of_modalias_node(np, modalias, sizeof(modalias)) < 0) goto map_failed; - id = spi_nor_match_id(modalias); - if (!id) - goto map_failed; - ret = of_property_read_u32(np, "spi-max-frequency", &q->clk_rate); if (ret < 0) @@ -921,7 +916,7 @@ static int fsl_qspi_probe(struct platform_device *pdev) /* set the chip address for READID */ fsl_qspi_set_base_addr(q, nor); - ret = spi_nor_scan(nor, id, SPI_NOR_QUAD); + ret = spi_nor_scan(nor, modalias, SPI_NOR_QUAD); if (ret) goto map_failed; diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index ae16aa2f688..c51ee52386a 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -28,6 +28,8 @@ #define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16) +static const struct spi_device_id *spi_nor_match_id(const char *name); + /* * Read the status register, returning its value in the location * Return the status register value. @@ -473,7 +475,7 @@ struct flash_info { * more nor chips. This current list focusses on newer chips, which * have been converging on command sets which including JEDEC ID. */ -const struct spi_device_id spi_nor_ids[] = { +static const struct spi_device_id spi_nor_ids[] = { /* Atmel -- some are (confusingly) marketed as "DataFlash" */ { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) }, { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) }, @@ -637,7 +639,6 @@ const struct spi_device_id spi_nor_ids[] = { { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, { }, }; -EXPORT_SYMBOL_GPL(spi_nor_ids); static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor) { @@ -911,9 +912,9 @@ static int spi_nor_check(struct spi_nor *nor) return 0; } -int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id, - enum read_mode mode) +int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) { + const struct spi_device_id *id = NULL; struct flash_info *info; struct device *dev = nor->dev; struct mtd_info *mtd = nor->mtd; @@ -925,6 +926,10 @@ int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id, if (ret) return ret; + id = spi_nor_match_id(name); + if (!id) + return -ENOENT; + info = (void *)id->driver_data; if (info->jedec_id) { @@ -1113,7 +1118,7 @@ int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id, } EXPORT_SYMBOL_GPL(spi_nor_scan); -const struct spi_device_id *spi_nor_match_id(char *name) +static const struct spi_device_id *spi_nor_match_id(const char *name) { const struct spi_device_id *id = spi_nor_ids; @@ -1124,7 +1129,6 @@ const struct spi_device_id *spi_nor_match_id(char *name) } return NULL; } -EXPORT_SYMBOL_GPL(spi_nor_match_id); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>"); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 4706386b7d3..f9009be3f30 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -135,6 +135,7 @@ config MACVLAN config MACVTAP tristate "MAC-VLAN based tap driver" depends on MACVLAN + depends on INET help This adds a specialized tap character device driver that is based on the MAC-VLAN network interface, called macvtap. A macvtap device @@ -200,6 +201,7 @@ config RIONET_RX_SIZE config TUN tristate "Universal TUN/TAP device driver support" + depends on INET select CRC32 ---help--- TUN/TAP provides packet reception and transmission for user space diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c9ac06cfe6b..a5115fb7cf3 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2471,7 +2471,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) bond_slave_state_change(bond); if (BOND_MODE(bond) == BOND_MODE_XOR) bond_update_slave_arr(bond, NULL); - } else if (do_failover) { + } + if (do_failover) { block_netpoll_tx(); bond_select_active_slave(bond); unblock_netpoll_tx(); diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 02492d241e4..2cfe5012e4e 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -110,7 +110,7 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, long rate; u64 v64; - /* Use CIA recommended sample points */ + /* Use CiA recommended sample points */ if (bt->sample_point) { sampl_pt = bt->sample_point; } else { @@ -382,7 +382,7 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx) BUG_ON(idx >= priv->echo_skb_max); if (priv->echo_skb[idx]) { - kfree_skb(priv->echo_skb[idx]); + dev_kfree_skb_any(priv->echo_skb[idx]); priv->echo_skb[idx] = NULL; } } diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig index fca5482c09a..04f20dd3900 100644 --- a/drivers/net/can/m_can/Kconfig +++ b/drivers/net/can/m_can/Kconfig @@ -1,4 +1,5 @@ config CAN_M_CAN + depends on HAS_IOMEM tristate "Bosch M_CAN devices" ---help--- Say Y here if you want to support for Bosch M_CAN controller. diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 10d571eaed8..d7bc462aafd 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -105,14 +105,36 @@ enum m_can_mram_cfg { MRAM_CFG_NUM, }; +/* Fast Bit Timing & Prescaler Register (FBTP) */ +#define FBTR_FBRP_MASK 0x1f +#define FBTR_FBRP_SHIFT 16 +#define FBTR_FTSEG1_SHIFT 8 +#define FBTR_FTSEG1_MASK (0xf << FBTR_FTSEG1_SHIFT) +#define FBTR_FTSEG2_SHIFT 4 +#define FBTR_FTSEG2_MASK (0x7 << FBTR_FTSEG2_SHIFT) +#define FBTR_FSJW_SHIFT 0 +#define FBTR_FSJW_MASK 0x3 + /* Test Register (TEST) */ #define TEST_LBCK BIT(4) /* CC Control Register(CCCR) */ -#define CCCR_TEST BIT(7) -#define CCCR_MON BIT(5) -#define CCCR_CCE BIT(1) -#define CCCR_INIT BIT(0) +#define CCCR_TEST BIT(7) +#define CCCR_CMR_MASK 0x3 +#define CCCR_CMR_SHIFT 10 +#define CCCR_CMR_CANFD 0x1 +#define CCCR_CMR_CANFD_BRS 0x2 +#define CCCR_CMR_CAN 0x3 +#define CCCR_CME_MASK 0x3 +#define CCCR_CME_SHIFT 8 +#define CCCR_CME_CAN 0 +#define CCCR_CME_CANFD 0x1 +#define CCCR_CME_CANFD_BRS 0x2 +#define CCCR_TEST BIT(7) +#define CCCR_MON BIT(5) +#define CCCR_CCE BIT(1) +#define CCCR_INIT BIT(0) +#define CCCR_CANFD 0x10 /* Bit Timing & Prescaler Register (BTP) */ #define BTR_BRP_MASK 0x3ff @@ -204,6 +226,7 @@ enum m_can_mram_cfg { /* Rx Buffer / FIFO Element Size Configuration (RXESC) */ #define M_CAN_RXESC_8BYTES 0x0 +#define M_CAN_RXESC_64BYTES 0x777 /* Tx Buffer Configuration(TXBC) */ #define TXBC_NDTB_OFF 16 @@ -211,6 +234,7 @@ enum m_can_mram_cfg { /* Tx Buffer Element Size Configuration(TXESC) */ #define TXESC_TBDS_8BYTES 0x0 +#define TXESC_TBDS_64BYTES 0x7 /* Tx Event FIFO Con.guration (TXEFC) */ #define TXEFC_EFS_OFF 16 @@ -219,11 +243,11 @@ enum m_can_mram_cfg { /* Message RAM Configuration (in bytes) */ #define SIDF_ELEMENT_SIZE 4 #define XIDF_ELEMENT_SIZE 8 -#define RXF0_ELEMENT_SIZE 16 -#define RXF1_ELEMENT_SIZE 16 +#define RXF0_ELEMENT_SIZE 72 +#define RXF1_ELEMENT_SIZE 72 #define RXB_ELEMENT_SIZE 16 #define TXE_ELEMENT_SIZE 8 -#define TXB_ELEMENT_SIZE 16 +#define TXB_ELEMENT_SIZE 72 /* Message RAM Elements */ #define M_CAN_FIFO_ID 0x0 @@ -231,11 +255,17 @@ enum m_can_mram_cfg { #define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2)) /* Rx Buffer Element */ +/* R0 */ #define RX_BUF_ESI BIT(31) #define RX_BUF_XTD BIT(30) #define RX_BUF_RTR BIT(29) +/* R1 */ +#define RX_BUF_ANMF BIT(31) +#define RX_BUF_EDL BIT(21) +#define RX_BUF_BRS BIT(20) /* Tx Buffer Element */ +/* R0 */ #define TX_BUF_XTD BIT(30) #define TX_BUF_RTR BIT(29) @@ -296,6 +326,7 @@ static inline void m_can_config_endisable(const struct m_can_priv *priv, if (enable) { /* enable m_can configuration */ m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT); + udelay(5); /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); } else { @@ -326,41 +357,67 @@ static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv) m_can_write(priv, M_CAN_ILE, 0x0); } -static void m_can_read_fifo(const struct net_device *dev, struct can_frame *cf, - u32 rxfs) +static void m_can_read_fifo(struct net_device *dev, u32 rxfs) { + struct net_device_stats *stats = &dev->stats; struct m_can_priv *priv = netdev_priv(dev); - u32 id, fgi; + struct canfd_frame *cf; + struct sk_buff *skb; + u32 id, fgi, dlc; + int i; /* calculate the fifo get index for where to read data */ fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF; + dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); + if (dlc & RX_BUF_EDL) + skb = alloc_canfd_skb(dev, &cf); + else + skb = alloc_can_skb(dev, (struct can_frame **)&cf); + if (!skb) { + stats->rx_dropped++; + return; + } + + if (dlc & RX_BUF_EDL) + cf->len = can_dlc2len((dlc >> 16) & 0x0F); + else + cf->len = get_can_dlc((dlc >> 16) & 0x0F); + id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID); if (id & RX_BUF_XTD) cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = (id >> 18) & CAN_SFF_MASK; - if (id & RX_BUF_RTR) { + if (id & RX_BUF_ESI) { + cf->flags |= CANFD_ESI; + netdev_dbg(dev, "ESI Error\n"); + } + + if (!(dlc & RX_BUF_EDL) && (id & RX_BUF_RTR)) { cf->can_id |= CAN_RTR_FLAG; } else { - id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); - cf->can_dlc = get_can_dlc((id >> 16) & 0x0F); - *(u32 *)(cf->data + 0) = m_can_fifo_read(priv, fgi, - M_CAN_FIFO_DATA(0)); - *(u32 *)(cf->data + 4) = m_can_fifo_read(priv, fgi, - M_CAN_FIFO_DATA(1)); + if (dlc & RX_BUF_BRS) + cf->flags |= CANFD_BRS; + + for (i = 0; i < cf->len; i += 4) + *(u32 *)(cf->data + i) = + m_can_fifo_read(priv, fgi, + M_CAN_FIFO_DATA(i / 4)); } /* acknowledge rx fifo 0 */ m_can_write(priv, M_CAN_RXF0A, fgi); + + stats->rx_packets++; + stats->rx_bytes += cf->len; + + netif_receive_skb(skb); } static int m_can_do_rx_poll(struct net_device *dev, int quota) { struct m_can_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - struct sk_buff *skb; - struct can_frame *frame; u32 pkts = 0; u32 rxfs; @@ -374,18 +431,7 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) if (rxfs & RXFS_RFL) netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); - skb = alloc_can_skb(dev, &frame); - if (!skb) { - stats->rx_dropped++; - return pkts; - } - - m_can_read_fifo(dev, frame, rxfs); - - stats->rx_packets++; - stats->rx_bytes += frame->can_dlc; - - netif_receive_skb(skb); + m_can_read_fifo(dev, rxfs); quota--; pkts++; @@ -481,11 +527,23 @@ static int m_can_handle_lec_err(struct net_device *dev, return 1; } +static int __m_can_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + struct m_can_priv *priv = netdev_priv(dev); + unsigned int ecr; + + ecr = m_can_read(priv, M_CAN_ECR); + bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; + bec->txerr = ecr & ECR_TEC_MASK; + + return 0; +} + static int m_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct m_can_priv *priv = netdev_priv(dev); - unsigned int ecr; int err; err = clk_prepare_enable(priv->hclk); @@ -498,9 +556,7 @@ static int m_can_get_berr_counter(const struct net_device *dev, return err; } - ecr = m_can_read(priv, M_CAN_ECR); - bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; - bec->txerr = ecr & ECR_TEC_MASK; + __m_can_get_berr_counter(dev, bec); clk_disable_unprepare(priv->cclk); clk_disable_unprepare(priv->hclk); @@ -544,7 +600,7 @@ static int m_can_handle_state_change(struct net_device *dev, if (unlikely(!skb)) return 0; - m_can_get_berr_counter(dev, &bec); + __m_can_get_berr_counter(dev, &bec); switch (new_state) { case CAN_STATE_ERROR_ACTIVE: @@ -596,14 +652,14 @@ static int m_can_handle_state_errors(struct net_device *dev, u32 psr) if ((psr & PSR_EP) && (priv->can.state != CAN_STATE_ERROR_PASSIVE)) { - netdev_dbg(dev, "entered error warning state\n"); + netdev_dbg(dev, "entered error passive state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_ERROR_PASSIVE); } if ((psr & PSR_BO) && (priv->can.state != CAN_STATE_BUS_OFF)) { - netdev_dbg(dev, "entered error warning state\n"); + netdev_dbg(dev, "entered error bus off state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_BUS_OFF); } @@ -615,7 +671,7 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) { if (irqstatus & IR_WDI) netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); - if (irqstatus & IR_BEU) + if (irqstatus & IR_ELO) netdev_err(dev, "Error Logging Overflow\n"); if (irqstatus & IR_BEU) netdev_err(dev, "Bit Error Uncorrected\n"); @@ -733,10 +789,23 @@ static const struct can_bittiming_const m_can_bittiming_const = { .brp_inc = 1, }; +static const struct can_bittiming_const m_can_data_bittiming_const = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + static int m_can_set_bittiming(struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); const struct can_bittiming *bt = &priv->can.bittiming; + const struct can_bittiming *dbt = &priv->can.data_bittiming; u16 brp, sjw, tseg1, tseg2; u32 reg_btp; @@ -747,7 +816,17 @@ static int m_can_set_bittiming(struct net_device *dev) reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT); m_can_write(priv, M_CAN_BTP, reg_btp); - netdev_dbg(dev, "setting BTP 0x%x\n", reg_btp); + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + brp = dbt->brp - 1; + sjw = dbt->sjw - 1; + tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; + tseg2 = dbt->phase_seg2 - 1; + reg_btp = (brp << FBTR_FBRP_SHIFT) | (sjw << FBTR_FSJW_SHIFT) | + (tseg1 << FBTR_FTSEG1_SHIFT) | + (tseg2 << FBTR_FTSEG2_SHIFT); + m_can_write(priv, M_CAN_FBTP, reg_btp); + } return 0; } @@ -767,8 +846,8 @@ static void m_can_chip_config(struct net_device *dev) m_can_config_endisable(priv, true); - /* RX Buffer/FIFO Element Size 8 bytes data field */ - m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_8BYTES); + /* RX Buffer/FIFO Element Size 64 bytes data field */ + m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_64BYTES); /* Accept Non-matching Frames Into FIFO 0 */ m_can_write(priv, M_CAN_GFC, 0x0); @@ -777,8 +856,8 @@ static void m_can_chip_config(struct net_device *dev) m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) | priv->mcfg[MRAM_TXB].off); - /* only support 8 bytes firstly */ - m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_8BYTES); + /* support 64 bytes payload */ + m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES); m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) | priv->mcfg[MRAM_TXE].off); @@ -793,7 +872,8 @@ static void m_can_chip_config(struct net_device *dev) RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off); cccr = m_can_read(priv, M_CAN_CCCR); - cccr &= ~(CCCR_TEST | CCCR_MON); + cccr &= ~(CCCR_TEST | CCCR_MON | (CCCR_CMR_MASK << CCCR_CMR_SHIFT) | + (CCCR_CME_MASK << CCCR_CME_SHIFT)); test = m_can_read(priv, M_CAN_TEST); test &= ~TEST_LBCK; @@ -805,6 +885,9 @@ static void m_can_chip_config(struct net_device *dev) test |= TEST_LBCK; } + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT; + m_can_write(priv, M_CAN_CCCR, cccr); m_can_write(priv, M_CAN_TEST, test); @@ -869,11 +952,13 @@ static struct net_device *alloc_m_can_dev(void) priv->dev = dev; priv->can.bittiming_const = &m_can_bittiming_const; + priv->can.data_bittiming_const = &m_can_data_bittiming_const; priv->can.do_set_mode = m_can_set_mode; priv->can.do_get_berr_counter = m_can_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | - CAN_CTRLMODE_BERR_REPORTING; + CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_FD; return dev; } @@ -956,8 +1041,9 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); - struct can_frame *cf = (struct can_frame *)skb->data; - u32 id; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u32 id, cccr; + int i; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; @@ -976,11 +1062,28 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, /* message ram configuration */ m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id); - m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, cf->can_dlc << 16); - m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(0), *(u32 *)(cf->data + 0)); - m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(1), *(u32 *)(cf->data + 4)); + m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, can_len2dlc(cf->len) << 16); + + for (i = 0; i < cf->len; i += 4) + m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(i / 4), + *(u32 *)(cf->data + i)); + can_put_echo_skb(skb, dev, 0); + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + cccr = m_can_read(priv, M_CAN_CCCR); + cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT); + if (can_is_canfd_skb(skb)) { + if (cf->flags & CANFD_BRS) + cccr |= CCCR_CMR_CANFD_BRS << CCCR_CMR_SHIFT; + else + cccr |= CCCR_CMR_CANFD << CCCR_CMR_SHIFT; + } else { + cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT; + } + m_can_write(priv, M_CAN_CCCR, cccr); + } + /* enable first TX buffer to start transfer */ m_can_write(priv, M_CAN_TXBTIE, 0x1); m_can_write(priv, M_CAN_TXBAR, 0x1); @@ -992,6 +1095,7 @@ static const struct net_device_ops m_can_netdev_ops = { .ndo_open = m_can_open, .ndo_stop = m_can_close, .ndo_start_xmit = m_can_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static int register_m_can_dev(struct net_device *dev) @@ -1009,7 +1113,7 @@ static int m_can_of_parse_mram(struct platform_device *pdev, struct resource *res; void __iomem *addr; u32 out_val[MRAM_CFG_LEN]; - int ret; + int i, start, end, ret; /* message ram could be shared */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); @@ -1060,6 +1164,15 @@ static int m_can_of_parse_mram(struct platform_device *pdev, priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num, priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num); + /* initialize the entire Message RAM in use to avoid possible + * ECC/parity checksum errors when reading an uninitialized buffer + */ + start = priv->mcfg[MRAM_SIDF].off; + end = priv->mcfg[MRAM_TXB].off + + priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; + for (i = start; i < end; i += 4) + writel(0x0, priv->mram_base + i); + return 0; } diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c index 1abe133d159..9718248e55f 100644 --- a/drivers/net/can/rcar_can.c +++ b/drivers/net/can/rcar_can.c @@ -628,6 +628,7 @@ static const struct net_device_ops rcar_can_netdev_ops = { .ndo_open = rcar_can_open, .ndo_stop = rcar_can_close, .ndo_start_xmit = rcar_can_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static void rcar_can_rx_pkt(struct rcar_can_priv *priv) diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c index 8ff3424d514..15c00faeec6 100644 --- a/drivers/net/can/sja1000/kvaser_pci.c +++ b/drivers/net/can/sja1000/kvaser_pci.c @@ -214,7 +214,7 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel, struct net_device *dev; struct sja1000_priv *priv; struct kvaser_pci *board; - int err, init_step; + int err; dev = alloc_sja1000dev(sizeof(struct kvaser_pci)); if (dev == NULL) @@ -235,7 +235,6 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel, if (channel == 0) { board->xilinx_ver = ioread8(board->res_addr + XILINX_VERINT) >> 4; - init_step = 2; /* Assert PTADR# - we're in passive mode so the other bits are not important */ @@ -264,8 +263,6 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel, priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; - init_step = 4; - dev_info(&pdev->dev, "reg_base=%p conf_addr=%p irq=%d\n", priv->reg_base, board->conf_addr, dev->irq); diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 00f2534dde7..29d3f0938eb 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -434,10 +434,9 @@ static void ems_usb_read_bulk_callback(struct urb *urb) if (urb->actual_length > CPC_HEADER_SIZE) { struct ems_cpc_msg *msg; u8 *ibuf = urb->transfer_buffer; - u8 msg_count, again, start; + u8 msg_count, start; msg_count = ibuf[0] & ~0x80; - again = ibuf[0] & 0x80; start = CPC_HEADER_SIZE; diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index b7c9e8b1146..c063a54ab8d 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -464,7 +464,6 @@ static void esd_usb2_write_bulk_callback(struct urb *urb) { struct esd_tx_urb_context *context = urb->context; struct esd_usb2_net_priv *priv; - struct esd_usb2 *dev; struct net_device *netdev; size_t size = sizeof(struct esd_usb2_msg); @@ -472,7 +471,6 @@ static void esd_usb2_write_bulk_callback(struct urb *urb) priv = context->priv; netdev = priv->netdev; - dev = priv->usb2; /* free up our allocated buffer */ usb_free_coherent(urb->dev, size, @@ -1143,6 +1141,7 @@ static void esd_usb2_disconnect(struct usb_interface *intf) } } unlink_all_urbs(dev); + kfree(dev); } } diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 04b0f84612f..009acc8641f 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -718,6 +718,7 @@ static const struct net_device_ops gs_usb_netdev_ops = { .ndo_open = gs_can_open, .ndo_stop = gs_can_close, .ndo_start_xmit = gs_can_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf) diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 5e8b5609c06..8a998e3884c 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -300,7 +300,8 @@ static int xcan_set_bittiming(struct net_device *ndev) static int xcan_chip_start(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); - u32 err, reg_msr, reg_sr_mask; + u32 reg_msr, reg_sr_mask; + int err; unsigned long timeout; /* Check if it is in reset mode */ @@ -961,6 +962,7 @@ static const struct net_device_ops xcan_netdev_ops = { .ndo_open = xcan_open, .ndo_stop = xcan_close, .ndo_start_xmit = xcan_start_xmit, + .ndo_change_mtu = can_change_mtu, }; /** diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index b9625968daa..4f4c2a7888e 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -377,6 +377,29 @@ static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id) return IRQ_HANDLED; } +static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) +{ + unsigned int timeout = 1000; + u32 reg; + + reg = core_readl(priv, CORE_WATCHDOG_CTRL); + reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; + core_writel(priv, reg, CORE_WATCHDOG_CTRL); + + do { + reg = core_readl(priv, CORE_WATCHDOG_CTRL); + if (!(reg & SOFTWARE_RESET)) + break; + + usleep_range(1000, 2000); + } while (timeout-- > 0); + + if (timeout == 0) + return -ETIMEDOUT; + + return 0; +} + static int bcm_sf2_sw_setup(struct dsa_switch *ds) { const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; @@ -404,11 +427,18 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) *base = of_iomap(dn, i); if (*base == NULL) { pr_err("unable to find register: %s\n", reg_names[i]); - return -ENODEV; + ret = -ENOMEM; + goto out_unmap; } base++; } + ret = bcm_sf2_sw_rst(priv); + if (ret) { + pr_err("unable to software reset switch: %d\n", ret); + goto out_unmap; + } + /* Disable all interrupts and request them */ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); @@ -484,7 +514,8 @@ out_free_irq0: out_unmap: base = &priv->core; for (i = 0; i < BCM_SF2_REGS_NUM; i++) { - iounmap(*base); + if (*base) + iounmap(*base); base++; } return ret; @@ -733,29 +764,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds) return 0; } -static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) -{ - unsigned int timeout = 1000; - u32 reg; - - reg = core_readl(priv, CORE_WATCHDOG_CTRL); - reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; - core_writel(priv, reg, CORE_WATCHDOG_CTRL); - - do { - reg = core_readl(priv, CORE_WATCHDOG_CTRL); - if (!(reg & SOFTWARE_RESET)) - break; - - usleep_range(1000, 2000); - } while (timeout-- > 0); - - if (timeout == 0) - return -ETIMEDOUT; - - return 0; -} - static int bcm_sf2_sw_resume(struct dsa_switch *ds) { struct bcm_sf2_priv *priv = ds_to_priv(ds); diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 1020a7af67c..78d8e876f3a 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -395,7 +395,7 @@ static int mv88e6171_get_sset_count(struct dsa_switch *ds) } struct dsa_switch_driver mv88e6171_switch_driver = { - .tag_protocol = DSA_TAG_PROTO_DSA, + .tag_protocol = DSA_TAG_PROTO_EDSA, .priv_size = sizeof(struct mv88e6xxx_priv_state), .probe = mv88e6171_probe, .setup = mv88e6171_setup, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 29554992215..2349ea97025 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1465,7 +1465,7 @@ static int xgbe_set_features(struct net_device *netdev, { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; - unsigned int rxcsum, rxvlan, rxvlan_filter; + netdev_features_t rxcsum, rxvlan, rxvlan_filter; rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; @@ -1598,7 +1598,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) struct skb_shared_hwtstamps *hwtstamps; unsigned int incomplete, error, context_next, context; unsigned int len, put_len, max_len; - int received = 0; + unsigned int received = 0; + int packet_count = 0; DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); @@ -1608,7 +1609,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) rdata = XGBE_GET_DESC_DATA(ring, ring->cur); packet = &ring->packet_data; - while (received < budget) { + while (packet_count < budget) { DBGPR(" cur = %d\n", ring->cur); /* First time in loop see if we need to restore state */ @@ -1662,7 +1663,7 @@ read_again: if (packet->errors) DBGPR("Error in received packet\n"); dev_kfree_skb(skb); - continue; + goto next_packet; } if (!context) { @@ -1677,7 +1678,7 @@ read_again: } dev_kfree_skb(skb); - continue; + goto next_packet; } memcpy(skb_tail_pointer(skb), rdata->skb->data, put_len); @@ -1694,7 +1695,7 @@ read_again: /* Stray Context Descriptor? */ if (!skb) - continue; + goto next_packet; /* Be sure we don't exceed the configured MTU */ max_len = netdev->mtu + ETH_HLEN; @@ -1705,7 +1706,7 @@ read_again: if (skb->len > max_len) { DBGPR("packet length exceeds configured MTU\n"); dev_kfree_skb(skb); - continue; + goto next_packet; } #ifdef XGMAC_ENABLE_RX_PKT_DUMP @@ -1739,6 +1740,9 @@ read_again: netdev->last_rx = jiffies; napi_gro_receive(&pdata->napi, skb); + +next_packet: + packet_count++; } /* Check if we need to save state before leaving */ @@ -1752,9 +1756,9 @@ read_again: rdata->state.error = error; } - DBGPR("<--xgbe_rx_poll: received = %d\n", received); + DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count); - return received; + return packet_count; } static int xgbe_poll(struct napi_struct *napi, int budget) diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 63ea1941e97..7ba83ffb08a 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -575,10 +575,24 @@ static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); } -static void xgene_enet_reset(struct xgene_enet_pdata *pdata) +bool xgene_ring_mgr_init(struct xgene_enet_pdata *p) +{ + if (!ioread32(p->ring_csr_addr + CLKEN_ADDR)) + return false; + + if (ioread32(p->ring_csr_addr + SRST_ADDR)) + return false; + + return true; +} + +static int xgene_enet_reset(struct xgene_enet_pdata *pdata) { u32 val; + if (!xgene_ring_mgr_init(pdata)) + return -ENODEV; + clk_prepare_enable(pdata->clk); clk_disable_unprepare(pdata->clk); clk_prepare_enable(pdata->clk); @@ -590,6 +604,8 @@ static void xgene_enet_reset(struct xgene_enet_pdata *pdata) val |= SCAN_AUTO_INCR; MGMT_CLOCK_SEL_SET(&val, 1); xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); + + return 0; } static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 38558584080..ec45f3256f0 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -104,6 +104,9 @@ enum xgene_enet_rm { #define BLOCK_ETH_MAC_OFFSET 0x0000 #define BLOCK_ETH_MAC_CSR_OFFSET 0x2800 +#define CLKEN_ADDR 0xc208 +#define SRST_ADDR 0xc200 + #define MAC_ADDR_REG_OFFSET 0x00 #define MAC_COMMAND_REG_OFFSET 0x04 #define MAC_WRITE_REG_OFFSET 0x08 @@ -318,6 +321,7 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); +bool xgene_ring_mgr_init(struct xgene_enet_pdata *p); extern struct xgene_mac_ops xgene_gmac_ops; extern struct xgene_port_ops xgene_gport_ops; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 3c208cc6f6b..12366969618 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -639,9 +639,9 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) struct device *dev = ndev_to_dev(ndev); struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; struct xgene_enet_desc_ring *buf_pool = NULL; - u8 cpu_bufnum = 0, eth_bufnum = 0; - u8 bp_bufnum = 0x20; - u16 ring_id, ring_num = 0; + u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM; + u8 bp_bufnum = START_BP_BUFNUM; + u16 ring_id, ring_num = START_RING_NUM; int ret; /* allocate rx descriptor ring */ @@ -852,7 +852,9 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) u16 dst_ring_num; int ret; - pdata->port_ops->reset(pdata); + ret = pdata->port_ops->reset(pdata); + if (ret) + return ret; ret = xgene_enet_create_desc_rings(ndev); if (ret) { @@ -954,6 +956,7 @@ static int xgene_enet_probe(struct platform_device *pdev) return ret; err: + unregister_netdev(ndev); free_netdev(ndev); return ret; } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 874e5a01161..f9958fae6ff 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -38,6 +38,9 @@ #define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) #define NUM_PKT_BUF 64 #define NUM_BUFPOOL 32 +#define START_ETH_BUFNUM 2 +#define START_BP_BUFNUM 0x22 +#define START_RING_NUM 8 #define PHY_POLL_LINK_ON (10 * HZ) #define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) @@ -83,7 +86,7 @@ struct xgene_mac_ops { }; struct xgene_port_ops { - void (*reset)(struct xgene_enet_pdata *pdata); + int (*reset)(struct xgene_enet_pdata *pdata); void (*cle_bypass)(struct xgene_enet_pdata *pdata, u32 dst_ring_num, u16 bufpool_id); void (*shutdown)(struct xgene_enet_pdata *pdata); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index e6d24c21019..f5d4f68c288 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -124,20 +124,18 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p) { struct net_device *ndev = p->ndev; u32 data; - int i; + int i = 0; xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0); - for (i = 0; i < 10 && data != ~0U ; i++) { + do { usleep_range(100, 110); data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR); - } - - if (data != ~0U) { - netdev_err(ndev, "Failed to release memory from shutdown\n"); - return -ENODEV; - } + if (data == ~0U) + return 0; + } while (++i < 10); - return 0; + netdev_err(ndev, "Failed to release memory from shutdown\n"); + return -ENODEV; } static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p) @@ -313,14 +311,19 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p) xgene_sgmac_rxtx(p, TX_EN, false); } -static void xgene_enet_reset(struct xgene_enet_pdata *p) +static int xgene_enet_reset(struct xgene_enet_pdata *p) { + if (!xgene_ring_mgr_init(p)) + return -ENODEV; + clk_prepare_enable(p->clk); clk_disable_unprepare(p->clk); clk_prepare_enable(p->clk); xgene_enet_ecc_init(p); xgene_enet_config_ring_if_assoc(p); + + return 0; } static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index 67d07206b3c..a18a9d1f114 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -252,14 +252,19 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata) xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN); } -static void xgene_enet_reset(struct xgene_enet_pdata *pdata) +static int xgene_enet_reset(struct xgene_enet_pdata *pdata) { + if (!xgene_ring_mgr_init(pdata)) + return -ENODEV; + clk_prepare_enable(pdata->clk); clk_disable_unprepare(pdata->clk); clk_prepare_enable(pdata->clk); xgene_enet_ecc_init(pdata); xgene_enet_config_ring_if_assoc(pdata); + + return 0; } static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 9ae36979bde..531bb7c5753 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1110,7 +1110,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, /* We just need one DMA descriptor which is DMA-able, since writing to * the port will allocate a new descriptor in its internal linked-list */ - p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL); + p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, + GFP_KERNEL); if (!p) { netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); return -ENOMEM; @@ -1174,6 +1175,13 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, if (!(reg & TDMA_DISABLED)) netdev_warn(priv->netdev, "TDMA not stopped!\n"); + /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could + * fail, so by checking this pointer we know whether the TX ring was + * fully initialized or not. + */ + if (!ring->cbs) + return; + napi_disable(&ring->napi); netif_napi_del(&ring->napi); @@ -1183,7 +1191,8 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, ring->cbs = NULL; if (ring->desc_dma) { - dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma); + dma_free_coherent(kdev, sizeof(struct dma_desc), + ring->desc_cpu, ring->desc_dma); ring->desc_dma = 0; } ring->size = 0; @@ -1397,6 +1406,9 @@ static void bcm_sysport_netif_start(struct net_device *dev) /* Enable NAPI */ napi_enable(&priv->napi); + /* Enable RX interrupt and TX ring full interrupt */ + intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); + phy_start(priv->phydev); /* Enable TX interrupts for the 32 TXQs */ @@ -1499,9 +1511,6 @@ static int bcm_sysport_open(struct net_device *dev) if (ret) goto out_free_rx_ring; - /* Enable RX interrupt and TX ring full interrupt */ - intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); - /* Turn on TDMA */ ret = tdma_enable_set(priv, 1); if (ret) @@ -1858,6 +1867,8 @@ static int bcm_sysport_resume(struct device *d) if (!netif_running(dev)) return 0; + umac_reset(priv); + /* We may have been suspended and never received a WOL event that * would turn off MPD detection, take care of that now */ @@ -1885,9 +1896,6 @@ static int bcm_sysport_resume(struct device *d) netif_device_attach(dev); - /* Enable RX interrupt and TX ring full interrupt */ - intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); - /* RX pipe enable */ topctrl_writel(priv, 0, RX_FLUSH_CNTL); diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 23f23c97c2a..f05fab65d78 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -382,10 +382,8 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, if (l5_cid >= MAX_CM_SK_TBL_SZ) break; - rcu_read_lock(); if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) { rc = -ENODEV; - rcu_read_unlock(); break; } csk = &cp->csk_tbl[l5_cid]; @@ -414,7 +412,6 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, } } csk_put(csk); - rcu_read_unlock(); rc = 0; } } @@ -615,7 +612,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); mutex_lock(&cnic_lock); - if (rcu_dereference(cp->ulp_ops[ulp_type])) { + if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); cnic_put(dev); } else { diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index fdc9ec09e45..da1a2500c91 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2140,6 +2140,12 @@ static int bcmgenet_open(struct net_device *dev) goto err_irq0; } + /* Re-configure the port multiplexer towards the PHY device */ + bcmgenet_mii_config(priv->dev, false); + + phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup, + priv->phy_interface); + bcmgenet_netif_start(dev); return 0; @@ -2184,6 +2190,9 @@ static int bcmgenet_close(struct net_device *dev) bcmgenet_netif_stop(dev); + /* Really kill the PHY state machine and disconnect from it */ + phy_disconnect(priv->phydev); + /* Disable MAC receive */ umac_enable_set(priv, CMD_RX_EN, false); @@ -2685,7 +2694,7 @@ static int bcmgenet_resume(struct device *d) phy_init_hw(priv->phydev); /* Speed settings must be restored */ - bcmgenet_mii_config(priv->dev); + bcmgenet_mii_config(priv->dev, false); /* disable ethernet MAC while updating its registers */ umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index dbf524ea3b1..31b2da5f9b8 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -617,9 +617,10 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); /* MDIO routines */ int bcmgenet_mii_init(struct net_device *dev); -int bcmgenet_mii_config(struct net_device *dev); +int bcmgenet_mii_config(struct net_device *dev, bool init); void bcmgenet_mii_exit(struct net_device *dev); void bcmgenet_mii_reset(struct net_device *dev); +void bcmgenet_mii_setup(struct net_device *dev); /* Wake-on-LAN routines */ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 9ff799a9f80..933cd7e7cd3 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -77,7 +77,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id, /* setup netdev link state when PHY link status change and * update UMAC and RGMII block when link up */ -static void bcmgenet_mii_setup(struct net_device *dev) +void bcmgenet_mii_setup(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = priv->phydev; @@ -211,7 +211,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); } -int bcmgenet_mii_config(struct net_device *dev) +int bcmgenet_mii_config(struct net_device *dev, bool init) { struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = priv->phydev; @@ -298,7 +298,8 @@ int bcmgenet_mii_config(struct net_device *dev) return -EINVAL; } - dev_info(kdev, "configuring instance for %s\n", phy_name); + if (init) + dev_info(kdev, "configuring instance for %s\n", phy_name); return 0; } @@ -350,7 +351,7 @@ static int bcmgenet_mii_probe(struct net_device *dev) * PHY speed which is needed for bcmgenet_mii_config() to configure * things appropriately. */ - ret = bcmgenet_mii_config(dev); + ret = bcmgenet_mii_config(dev, true); if (ret) { phy_disconnect(priv->phydev); return ret; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index dbb41c1923e..77f8f836cbb 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8563,7 +8563,8 @@ static int tg3_init_rings(struct tg3 *tp) if (tnapi->rx_rcb) memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); - if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { + if (tnapi->prodring.rx_std && + tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { tg3_free_rings(tp); return -ENOMEM; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 8edf0f5bd67..4fe33606f37 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -60,6 +60,43 @@ void cxgb4_dcb_version_init(struct net_device *dev) dcb->dcb_version = FW_PORT_DCB_VER_AUTO; } +static void cxgb4_dcb_cleanup_apps(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = pi->adapter; + struct port_dcb_info *dcb = &pi->dcb; + struct dcb_app app; + int i, err; + + /* zero priority implies remove */ + app.priority = 0; + + for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { + /* Check if app list is exhausted */ + if (!dcb->app_priority[i].protocolid) + break; + + app.protocol = dcb->app_priority[i].protocolid; + + if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { + app.priority = dcb->app_priority[i].user_prio_map; + app.selector = dcb->app_priority[i].sel_field + 1; + err = dcb_ieee_delapp(dev, &app); + } else { + app.selector = !!(dcb->app_priority[i].sel_field); + err = dcb_setapp(dev, &app); + } + + if (err) { + dev_err(adap->pdev_dev, + "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n", + dcb_ver_array[dcb->dcb_version], app.selector, + app.protocol, -err); + break; + } + } +} + /* Finite State machine for Data Center Bridging. */ void cxgb4_dcb_state_fsm(struct net_device *dev, @@ -80,14 +117,17 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, /* we're going to use Host DCB */ dcb->state = CXGB4_DCB_STATE_HOST; dcb->supported = CXGB4_DCBX_HOST_SUPPORT; - dcb->enabled = 1; break; } case CXGB4_DCB_INPUT_FW_ENABLED: { /* we're going to use Firmware DCB */ dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; - dcb->supported = CXGB4_DCBX_FW_SUPPORT; + dcb->supported = DCB_CAP_DCBX_LLD_MANAGED; + if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) + dcb->supported |= DCB_CAP_DCBX_VER_IEEE; + else + dcb->supported |= DCB_CAP_DCBX_VER_CEE; break; } @@ -145,6 +185,7 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, * state. We need to reset back to a ground state * of incomplete. */ + cxgb4_dcb_cleanup_apps(dev); cxgb4_dcb_state_init(dev); dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; dcb->supported = CXGB4_DCBX_FW_SUPPORT; @@ -349,6 +390,12 @@ static u8 cxgb4_setstate(struct net_device *dev, u8 enabled) { struct port_info *pi = netdev2pinfo(dev); + /* If DCBx is host-managed, dcb is enabled by outside lldp agents */ + if (pi->dcb.state == CXGB4_DCB_STATE_HOST) { + pi->dcb.enabled = enabled; + return 0; + } + /* Firmware doesn't provide any mechanism to control the DCB state. */ if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED)) @@ -394,14 +441,17 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc, *up_tc_map = (1 << tc); /* prio_type is link strict */ - *prio_type = 0x2; + if (*pgid != 0xF) + *prio_type = 0x2; } static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc, u8 *prio_type, u8 *pgid, u8 *bw_per, u8 *up_tc_map) { - return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1); + /* tc 0 is written at MSB position */ + return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per, + up_tc_map, 1); } @@ -409,7 +459,9 @@ static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc, u8 *prio_type, u8 *pgid, u8 *bw_per, u8 *up_tc_map) { - return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0); + /* tc 0 is written at MSB position */ + return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per, + up_tc_map, 0); } static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, @@ -419,6 +471,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, struct fw_port_cmd pcmd; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = pi->adapter; + int fw_tc = 7 - tc; u32 _pgid; int err; @@ -437,8 +490,8 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, } _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); - _pgid &= ~(0xF << (tc * 4)); - _pgid |= pgid << (tc * 4); + _pgid &= ~(0xF << (fw_tc * 4)); + _pgid |= pgid << (fw_tc * 4); pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid); INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); @@ -551,7 +604,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg) priority >= CXGB4_MAX_PRIORITY) *pfccfg = 0; else - *pfccfg = (pi->dcb.pfcen >> priority) & 1; + *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1; } /* Enable/disable Priority Pause Frames for the specified Traffic Class @@ -576,9 +629,9 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg) pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen; if (pfccfg) - pcmd.u.dcb.pfc.pfcen |= (1 << priority); + pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority)); else - pcmd.u.dcb.pfc.pfcen &= (~(1 << priority)); + pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority))); err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); if (err != FW_PORT_DCB_CFG_SUCCESS) { @@ -833,11 +886,16 @@ static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, /* Return whether IEEE Data Center Bridging has been negotiated. */ -static inline int cxgb4_ieee_negotiation_complete(struct net_device *dev) +static inline int +cxgb4_ieee_negotiation_complete(struct net_device *dev, + enum cxgb4_dcb_fw_msgs dcb_subtype) { struct port_info *pi = netdev2pinfo(dev); struct port_dcb_info *dcb = &pi->dcb; + if (dcb_subtype && !(dcb->msgs & dcb_subtype)) + return 0; + return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED && (dcb->supported & DCB_CAP_DCBX_VER_IEEE)); } @@ -850,7 +908,7 @@ static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app) { int prio; - if (!cxgb4_ieee_negotiation_complete(dev)) + if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID)) return -EINVAL; if (!(app->selector && app->protocol)) return -EINVAL; @@ -872,7 +930,7 @@ static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app) { int ret; - if (!cxgb4_ieee_negotiation_complete(dev)) + if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID)) return -EINVAL; if (!(app->selector && app->protocol)) return -EINVAL; @@ -1024,7 +1082,7 @@ static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg) pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); for (i = 0; i < CXGB4_MAX_PRIORITY; i++) - pg->prio_pg[i] = (pgid >> (i * 4)) & 0xF; + pg->prio_pg[7 - i] = (pgid >> (i * 4)) & 0xF; INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 3f60070f251..8520d5529df 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -694,7 +694,11 @@ int cxgb4_dcb_enabled(const struct net_device *dev) #ifdef CONFIG_CHELSIO_T4_DCB struct port_info *pi = netdev_priv(dev); - return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED; + if (!pi->dcb.enabled) + return 0; + + return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || + (pi->dcb.state == CXGB4_DCB_STATE_HOST)); #else return 0; #endif @@ -6610,6 +6614,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->tid_release_lock); + spin_lock_init(&adapter->win0_lock); INIT_WORK(&adapter->tid_release_task, process_tid_release_list); INIT_WORK(&adapter->db_full_task, process_db_full); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 5e1b314e11a..39f2b13e66c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2914,7 +2914,8 @@ static int t4_sge_init_hard(struct adapter *adap) int t4_sge_init(struct adapter *adap) { struct sge *s = &adap->sge; - u32 sge_control, sge_conm_ctrl; + u32 sge_control, sge_control2, sge_conm_ctrl; + unsigned int ingpadboundary, ingpackboundary; int ret, egress_threshold; /* @@ -2924,8 +2925,31 @@ int t4_sge_init(struct adapter *adap) sge_control = t4_read_reg(adap, SGE_CONTROL); s->pktshift = PKTSHIFT_GET(sge_control); s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64; - s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) + - X_INGPADBOUNDARY_SHIFT); + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. + */ + ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) + + X_INGPADBOUNDARY_SHIFT); + if (is_t4(adap->params.chip)) { + s->fl_align = ingpadboundary; + } else { + /* T5 has a different interpretation of one of the PCIe Packing + * Boundary values. + */ + sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A); + ingpackboundary = INGPACKBOUNDARY_G(sge_control2); + if (ingpackboundary == INGPACKBOUNDARY_16B_X) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + INGPACKBOUNDARY_SHIFT_X); + + s->fl_align = max(ingpadboundary, ingpackboundary); + } if (adap->flags & USING_SOFT_PARAMS) ret = t4_sge_init_soft(adap); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index a9d9d74e4f0..163a2a14948 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3129,12 +3129,51 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, HOSTPAGESIZEPF6(sge_hps) | HOSTPAGESIZEPF7(sge_hps)); - t4_set_reg_field(adap, SGE_CONTROL, - INGPADBOUNDARY_MASK | - EGRSTATUSPAGESIZE_MASK, - INGPADBOUNDARY(fl_align_log - 5) | - EGRSTATUSPAGESIZE(stat_len != 64)); - + if (is_t4(adap->params.chip)) { + t4_set_reg_field(adap, SGE_CONTROL, + INGPADBOUNDARY_MASK | + EGRSTATUSPAGESIZE_MASK, + INGPADBOUNDARY(fl_align_log - 5) | + EGRSTATUSPAGESIZE(stat_len != 64)); + } else { + /* T5 introduced the separation of the Free List Padding and + * Packing Boundaries. Thus, we can select a smaller Padding + * Boundary to avoid uselessly chewing up PCIe Link and Memory + * Bandwidth, and use a Packing Boundary which is large enough + * to avoid false sharing between CPUs, etc. + * + * For the PCI Link, the smaller the Padding Boundary the + * better. For the Memory Controller, a smaller Padding + * Boundary is better until we cross under the Memory Line + * Size (the minimum unit of transfer to/from Memory). If we + * have a Padding Boundary which is smaller than the Memory + * Line Size, that'll involve a Read-Modify-Write cycle on the + * Memory Controller which is never good. For T5 the smallest + * Padding Boundary which we can select is 32 bytes which is + * larger than any known Memory Controller Line Size so we'll + * use that. + * + * T5 has a different interpretation of the "0" value for the + * Packing Boundary. This corresponds to 16 bytes instead of + * the expected 32 bytes. We never have a Packing Boundary + * less than 32 bytes so we can't use that special value but + * on the other hand, if we wanted 32 bytes, the best we can + * really do is 64 bytes. + */ + if (fl_align <= 32) { + fl_align = 64; + fl_align_log = 6; + } + t4_set_reg_field(adap, SGE_CONTROL, + INGPADBOUNDARY_MASK | + EGRSTATUSPAGESIZE_MASK, + INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) | + EGRSTATUSPAGESIZE(stat_len != 64)); + t4_set_reg_field(adap, SGE_CONTROL2_A, + INGPACKBOUNDARY_V(INGPACKBOUNDARY_M), + INGPACKBOUNDARY_V(fl_align_log - + INGPACKBOUNDARY_SHIFT_X)); + } /* * Adjust various SGE Free List Host Buffer Sizes. * diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index a1024db5dc1..8d2de1006b0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -95,6 +95,7 @@ #define X_INGPADBOUNDARY_SHIFT 5 #define SGE_CONTROL 0x1008 +#define SGE_CONTROL2_A 0x1124 #define DCASYSTYPE 0x00080000U #define RXPKTCPLMODE_MASK 0x00040000U #define RXPKTCPLMODE_SHIFT 18 @@ -106,6 +107,7 @@ #define PKTSHIFT_SHIFT 10 #define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) #define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT) +#define INGPCIEBOUNDARY_32B_X 0 #define INGPCIEBOUNDARY_MASK 0x00000380U #define INGPCIEBOUNDARY_SHIFT 7 #define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) @@ -114,6 +116,14 @@ #define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) #define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \ >> INGPADBOUNDARY_SHIFT) +#define INGPACKBOUNDARY_16B_X 0 +#define INGPACKBOUNDARY_SHIFT_X 5 + +#define INGPACKBOUNDARY_S 16 +#define INGPACKBOUNDARY_M 0x7U +#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S) +#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \ + & INGPACKBOUNDARY_M) #define EGRPCIEBOUNDARY_MASK 0x0000000eU #define EGRPCIEBOUNDARY_SHIFT 1 #define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 68eaa9c88c7..3d06e77d712 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -299,6 +299,14 @@ struct sge { u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ + /* Decoded Adapter Parameters. + */ + u32 fl_pg_order; /* large page allocation size */ + u32 stat_len; /* length of status page at ring end */ + u32 pktshift; /* padding between CPL & packet data */ + u32 fl_align; /* response queue message alignment */ + u32 fl_starve_thres; /* Free List starvation threshold */ + /* * Reverse maps from Absolute Queue IDs to associated queue pointers. * The absolute Queue IDs are in a compact range which start at a diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index bfa398d9182..0b42bddaf28 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2929,14 +2929,14 @@ static const struct pci_device_id cxgb4vf_pci_tbl[] = { CH_DEVICE(0x480d), /* T480-cr */ CH_DEVICE(0x480e), /* T440-lp-cr */ CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), - CH_DEVICE(0x4880), + CH_DEVICE(0x4881), + CH_DEVICE(0x4882), + CH_DEVICE(0x4883), + CH_DEVICE(0x4884), + CH_DEVICE(0x4885), + CH_DEVICE(0x4886), + CH_DEVICE(0x4887), + CH_DEVICE(0x4888), CH_DEVICE(0x5801), /* T520-cr */ CH_DEVICE(0x5802), /* T522-cr */ CH_DEVICE(0x5803), /* T540-cr */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 85036e6b42c..fdd078d7d82 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -51,14 +51,6 @@ #include "../cxgb4/t4_msg.h" /* - * Decoded Adapter Parameters. - */ -static u32 FL_PG_ORDER; /* large page allocation size */ -static u32 STAT_LEN; /* length of status page at ring end */ -static u32 PKTSHIFT; /* padding between CPL and packet data */ -static u32 FL_ALIGN; /* response queue message alignment */ - -/* * Constants ... */ enum { @@ -102,12 +94,6 @@ enum { MAX_TIMER_TX_RECLAIM = 100, /* - * An FL with <= FL_STARVE_THRES buffers is starving and a periodic - * timer will attempt to refill it. - */ - FL_STARVE_THRES = 4, - - /* * Suspend an Ethernet TX queue with fewer available descriptors than * this. We always want to have room for a maximum sized packet: * inline immediate data + MAX_SKB_FRAGS. This is the same as @@ -264,15 +250,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl) /** * fl_starving - return whether a Free List is starving. + * @adapter: pointer to the adapter * @fl: the Free List * * Tests specified Free List to see whether the number of buffers * available to the hardware has falled below our "starvation" * threshold. */ -static inline bool fl_starving(const struct sge_fl *fl) +static inline bool fl_starving(const struct adapter *adapter, + const struct sge_fl *fl) { - return fl->avail - fl->pend_cred <= FL_STARVE_THRES; + const struct sge *s = &adapter->sge; + + return fl->avail - fl->pend_cred <= s->fl_starve_thres; } /** @@ -457,13 +447,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter, /** * get_buf_size - return the size of an RX Free List buffer. + * @adapter: pointer to the associated adapter * @sdesc: pointer to the software buffer descriptor */ -static inline int get_buf_size(const struct rx_sw_desc *sdesc) +static inline int get_buf_size(const struct adapter *adapter, + const struct rx_sw_desc *sdesc) { - return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF) - ? (PAGE_SIZE << FL_PG_ORDER) - : PAGE_SIZE; + const struct sge *s = &adapter->sge; + + return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF) + ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE); } /** @@ -483,7 +476,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) if (is_buf_mapped(sdesc)) dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), - get_buf_size(sdesc), PCI_DMA_FROMDEVICE); + get_buf_size(adapter, sdesc), + PCI_DMA_FROMDEVICE); put_page(sdesc->page); sdesc->page = NULL; if (++fl->cidx == fl->size) @@ -511,7 +505,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) if (is_buf_mapped(sdesc)) dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), - get_buf_size(sdesc), PCI_DMA_FROMDEVICE); + get_buf_size(adapter, sdesc), + PCI_DMA_FROMDEVICE); sdesc->page = NULL; if (++fl->cidx == fl->size) fl->cidx = 0; @@ -589,6 +584,7 @@ static inline void poison_buf(struct page *page, size_t sz) static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, int n, gfp_t gfp) { + struct sge *s = &adapter->sge; struct page *page; dma_addr_t dma_addr; unsigned int cred = fl->avail; @@ -608,12 +604,12 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, * If we don't support large pages, drop directly into the small page * allocation code. */ - if (FL_PG_ORDER == 0) + if (s->fl_pg_order == 0) goto alloc_small_pages; while (n) { page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, - FL_PG_ORDER); + s->fl_pg_order); if (unlikely(!page)) { /* * We've failed inour attempt to allocate a "large @@ -623,10 +619,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, fl->large_alloc_failed++; break; } - poison_buf(page, PAGE_SIZE << FL_PG_ORDER); + poison_buf(page, PAGE_SIZE << s->fl_pg_order); dma_addr = dma_map_page(adapter->pdev_dev, page, 0, - PAGE_SIZE << FL_PG_ORDER, + PAGE_SIZE << s->fl_pg_order, PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { /* @@ -637,7 +633,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, * because DMA mapping resources are typically * critical resources once they become scarse. */ - __free_pages(page, FL_PG_ORDER); + __free_pages(page, s->fl_pg_order); goto out; } dma_addr |= RX_LARGE_BUF; @@ -693,7 +689,7 @@ out: fl->pend_cred += cred; ring_fl_db(adapter, fl); - if (unlikely(fl_starving(fl))) { + if (unlikely(fl_starving(adapter, fl))) { smp_wmb(); set_bit(fl->cntxt_id, adapter->sge.starving_fl); } @@ -1468,6 +1464,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl) static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, const struct cpl_rx_pkt *pkt) { + struct adapter *adapter = rxq->rspq.adapter; + struct sge *s = &adapter->sge; int ret; struct sk_buff *skb; @@ -1478,8 +1476,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, return; } - copy_frags(skb, gl, PKTSHIFT); - skb->len = gl->tot_len - PKTSHIFT; + copy_frags(skb, gl, s->pktshift); + skb->len = gl->tot_len - s->pktshift; skb->data_len = skb->len; skb->truesize += skb->data_len; skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -1516,6 +1514,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, bool csum_ok = pkt->csum_calc && !pkt->err_vec && (rspq->netdev->features & NETIF_F_RXCSUM); struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); + struct adapter *adapter = rspq->adapter; + struct sge *s = &adapter->sge; /* * If this is a good TCP packet and we have Generic Receive Offload @@ -1537,7 +1537,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, rxq->stats.rx_drops++; return 0; } - __skb_pull(skb, PKTSHIFT); + __skb_pull(skb, s->pktshift); skb->protocol = eth_type_trans(skb, rspq->netdev); skb_record_rx_queue(skb, rspq->idx); rxq->stats.pkts++; @@ -1648,6 +1648,8 @@ static inline void rspq_next(struct sge_rspq *rspq) static int process_responses(struct sge_rspq *rspq, int budget) { struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); + struct adapter *adapter = rspq->adapter; + struct sge *s = &adapter->sge; int budget_left = budget; while (likely(budget_left)) { @@ -1697,7 +1699,7 @@ static int process_responses(struct sge_rspq *rspq, int budget) BUG_ON(frag >= MAX_SKB_FRAGS); BUG_ON(rxq->fl.avail == 0); sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; - bufsz = get_buf_size(sdesc); + bufsz = get_buf_size(adapter, sdesc); fp->page = sdesc->page; fp->offset = rspq->offset; fp->size = min(bufsz, len); @@ -1726,7 +1728,7 @@ static int process_responses(struct sge_rspq *rspq, int budget) */ ret = rspq->handler(rspq, rspq->cur_desc, &gl); if (likely(ret == 0)) - rspq->offset += ALIGN(fp->size, FL_ALIGN); + rspq->offset += ALIGN(fp->size, s->fl_align); else restore_rx_bufs(&gl, &rxq->fl, frag); } else if (likely(rsp_type == RSP_TYPE_CPL)) { @@ -1963,7 +1965,7 @@ static void sge_rx_timer_cb(unsigned long data) * schedule napi but the FL is no longer starving. * No biggie. */ - if (fl_starving(fl)) { + if (fl_starving(adapter, fl)) { struct sge_eth_rxq *rxq; rxq = container_of(fl, struct sge_eth_rxq, fl); @@ -2047,6 +2049,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, int intr_dest, struct sge_fl *fl, rspq_handler_t hnd) { + struct sge *s = &adapter->sge; struct port_info *pi = netdev_priv(dev); struct fw_iq_cmd cmd, rpl; int ret, iqandst, flsz = 0; @@ -2117,7 +2120,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, fl->size = roundup(fl->size, FL_PER_EQ_UNIT); fl->desc = alloc_ring(adapter->pdev_dev, fl->size, sizeof(__be64), sizeof(struct rx_sw_desc), - &fl->addr, &fl->sdesc, STAT_LEN); + &fl->addr, &fl->sdesc, s->stat_len); if (!fl->desc) { ret = -ENOMEM; goto err; @@ -2129,7 +2132,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, * free list ring) in Egress Queue Units. */ flsz = (fl->size / FL_PER_EQ_UNIT + - STAT_LEN / EQ_UNIT); + s->stat_len / EQ_UNIT); /* * Fill in all the relevant firmware Ingress Queue Command @@ -2217,6 +2220,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, struct net_device *dev, struct netdev_queue *devq, unsigned int iqid) { + struct sge *s = &adapter->sge; int ret, nentries; struct fw_eq_eth_cmd cmd, rpl; struct port_info *pi = netdev_priv(dev); @@ -2225,7 +2229,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, * Calculate the size of the hardware TX Queue (including the Status * Page on the end of the TX Queue) in units of TX Descriptors. */ - nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); /* * Allocate the hardware ring for the TX ring (with space for its @@ -2234,7 +2238,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, sizeof(struct tx_desc), sizeof(struct tx_sw_desc), - &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); + &txq->q.phys_addr, &txq->q.sdesc, s->stat_len); if (!txq->q.desc) return -ENOMEM; @@ -2307,8 +2311,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, */ static void free_txq(struct adapter *adapter, struct sge_txq *tq) { + struct sge *s = &adapter->sge; + dma_free_coherent(adapter->pdev_dev, - tq->size * sizeof(*tq->desc) + STAT_LEN, + tq->size * sizeof(*tq->desc) + s->stat_len, tq->desc, tq->phys_addr); tq->cntxt_id = 0; tq->sdesc = NULL; @@ -2322,6 +2328,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq) static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, struct sge_fl *fl) { + struct sge *s = &adapter->sge; unsigned int flid = fl ? fl->cntxt_id : 0xffff; t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, @@ -2337,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, if (fl) { free_rx_bufs(adapter, fl, fl->avail); dma_free_coherent(adapter->pdev_dev, - fl->size * sizeof(*fl->desc) + STAT_LEN, + fl->size * sizeof(*fl->desc) + s->stat_len, fl->desc, fl->addr); kfree(fl->sdesc); fl->sdesc = NULL; @@ -2423,6 +2430,7 @@ int t4vf_sge_init(struct adapter *adapter) u32 fl0 = sge_params->sge_fl_buffer_size[0]; u32 fl1 = sge_params->sge_fl_buffer_size[1]; struct sge *s = &adapter->sge; + unsigned int ingpadboundary, ingpackboundary; /* * Start by vetting the basic SGE parameters which have been set up by @@ -2443,12 +2451,48 @@ int t4vf_sge_init(struct adapter *adapter) * Now translate the adapter parameters into our internal forms. */ if (fl1) - FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; - STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) - ? 128 : 64); - PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); - FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + - SGE_INGPADBOUNDARY_SHIFT); + s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; + s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) + ? 128 : 64); + s->pktshift = PKTSHIFT_GET(sge_params->sge_control); + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. (Note that it makes no real practical sense to + * have the Pading Boudary be larger than the Packing Boundary but you + * could set the chip up that way and, in fact, legacy T4 code would + * end doing this because it would initialize the Padding Boundary and + * leave the Packing Boundary initialized to 0 (16 bytes).) + */ + ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + + X_INGPADBOUNDARY_SHIFT); + if (is_t4(adapter->params.chip)) { + s->fl_align = ingpadboundary; + } else { + /* T5 has a different interpretation of one of the PCIe Packing + * Boundary values. + */ + ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2); + if (ingpackboundary == INGPACKBOUNDARY_16B_X) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + INGPACKBOUNDARY_SHIFT_X); + + s->fl_align = max(ingpadboundary, ingpackboundary); + } + + /* A FL with <= fl_starve_thres buffers is starving and a periodic + * timer will attempt to refill it. This needs to be larger than the + * SGE's Egress Congestion Threshold. If it isn't, then we can get + * stuck waiting for new packets while the SGE is waiting for us to + * give it more Free List entries. (Note that the SGE's Egress + * Congestion Threshold is in units of 2 Free List pointers.) + */ + s->fl_starve_thres + = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1; /* * Set up tasklet timers. diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 95df61dcb4c..4b6a6d14d86 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -134,11 +134,13 @@ struct dev_params { */ struct sge_params { u32 sge_control; /* padding, boundaries, lengths, etc. */ + u32 sge_control2; /* T5: more of the same */ u32 sge_host_page_size; /* RDMA page sizes */ u32 sge_queues_per_page; /* RDMA queues/page */ u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */ u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ + u32 sge_congestion_control; /* congestion thresholds, etc. */ u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */ u32 sge_timer_value_2_and_3; u32 sge_timer_value_4_and_5; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index e984fdc48ba..1e896b92323 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -468,12 +468,38 @@ int t4vf_get_sge_params(struct adapter *adapter) sge_params->sge_timer_value_2_and_3 = vals[5]; sge_params->sge_timer_value_4_and_5 = vals[6]; + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately with the Padding Boundary in SGE_CONTROL and and Packing + * Boundary in SGE_CONTROL2. So for T5 and later we need to grab + * SGE_CONTROL in order to determine how ingress packet data will be + * laid out in Packed Buffer Mode. Unfortunately, older versions of + * the firmware won't let us retrieve SGE_CONTROL2 so if we get a + * failure grabbing it we throw an error since we can't figure out the + * right value. + */ + if (!is_t4(adapter->params.chip)) { + params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A)); + v = t4vf_query_params(adapter, 1, params, vals); + if (v != FW_SUCCESS) { + dev_err(adapter->pdev_dev, + "Unable to get SGE Control2; " + "probably old firmware.\n"); + return v; + } + sge_params->sge_control2 = vals[0]; + } + params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD)); - v = t4vf_query_params(adapter, 1, params, vals); + params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL)); + v = t4vf_query_params(adapter, 2, params, vals); if (v) return v; sge_params->sge_ingress_rx_threshold = vals[0]; + sge_params->sge_congestion_control = vals[1]; return 0; } diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c index 69dfd3c9e52..0be6850be8a 100644 --- a/drivers/net/ethernet/cisco/enic/enic_clsf.c +++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c @@ -86,7 +86,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic) int i; enic_rfs_timer_stop(enic); - spin_lock(&enic->rfs_h.lock); + spin_lock_bh(&enic->rfs_h.lock); enic->rfs_h.free = 0; for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { struct hlist_head *hhead; @@ -100,7 +100,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic) kfree(n); } } - spin_unlock(&enic->rfs_h.lock); + spin_unlock_bh(&enic->rfs_h.lock); } struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id) @@ -128,7 +128,7 @@ void enic_flow_may_expire(unsigned long data) bool res; int j; - spin_lock(&enic->rfs_h.lock); + spin_lock_bh(&enic->rfs_h.lock); for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) { struct hlist_head *hhead; struct hlist_node *tmp; @@ -148,7 +148,7 @@ void enic_flow_may_expire(unsigned long data) } } } - spin_unlock(&enic->rfs_h.lock); + spin_unlock_bh(&enic->rfs_h.lock); mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4); } @@ -183,7 +183,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, return -EPROTONOSUPPORT; tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK; - spin_lock(&enic->rfs_h.lock); + spin_lock_bh(&enic->rfs_h.lock); n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys); if (n) { /* entry already present */ @@ -277,7 +277,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, } ret_unlock: - spin_unlock(&enic->rfs_h.lock); + spin_unlock_bh(&enic->rfs_h.lock); return res; } diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 929bfe70080..73cf1653a4a 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -940,18 +940,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) struct vnic_rq_buf *buf = rq->to_use; if (buf->os_buf) { - buf = buf->next; - rq->to_use = buf; - rq->ring.desc_avail--; - if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) { - /* Adding write memory barrier prevents compiler and/or - * CPU reordering, thus avoiding descriptor posting - * before descriptor is initialized. Otherwise, hardware - * can read stale descriptor fields. - */ - wmb(); - iowrite32(buf->index, &rq->ctrl->posted_index); - } + enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr, + buf->len); return 0; } @@ -1037,7 +1027,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, enic->rq_truncated_pkts++; } + pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, + PCI_DMA_FROMDEVICE); dev_kfree_skb_any(skb); + buf->os_buf = NULL; return; } @@ -1088,7 +1081,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, /* Buffer overflow */ + pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, + PCI_DMA_FROMDEVICE); dev_kfree_skb_any(skb); + buf->os_buf = NULL; } } @@ -1674,13 +1670,13 @@ static int enic_stop(struct net_device *netdev) enic_dev_disable(enic); - local_bh_disable(); for (i = 0; i < enic->rq_count; i++) { napi_disable(&enic->napi[i]); + local_bh_disable(); while (!enic_poll_lock_napi(&enic->rq[i])) mdelay(1); + local_bh_enable(); } - local_bh_enable(); netif_carrier_off(netdev); netif_tx_disable(netdev); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 9a18e7930b3..597c463e384 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4309,11 +4309,16 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh) return -EOPNOTSUPP; br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; + if (nla_len(attr) < sizeof(mode)) + return -EINVAL; + mode = nla_get_u16(attr); if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) return -EINVAL; @@ -4421,6 +4426,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, "Disabled VxLAN offloads for UDP port %d\n", be16_to_cpu(port)); } + +static bool be_gso_check(struct sk_buff *skb, struct net_device *dev) +{ + return vxlan_gso_check(skb); +} #endif static const struct net_device_ops be_netdev_ops = { @@ -4450,6 +4460,7 @@ static const struct net_device_ops be_netdev_ops = { #ifdef CONFIG_BE2NET_VXLAN .ndo_add_vxlan_port = be_add_vxlan_port, .ndo_del_vxlan_port = be_del_vxlan_port, + .ndo_gso_check = be_gso_check, #endif }; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 81b96cf8757..3dca494797b 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -298,6 +298,16 @@ static void *swap_buffer(void *bufaddr, int len) return bufaddr; } +static void swap_buffer2(void *dst_buf, void *src_buf, int len) +{ + int i; + unsigned int *src = src_buf; + unsigned int *dst = dst_buf; + + for (i = 0; i < len; i += 4, src++, dst++) + *dst = swab32p(src); +} + static void fec_dump(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); @@ -1307,7 +1317,7 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff } static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, - struct bufdesc *bdp, u32 length) + struct bufdesc *bdp, u32 length, bool swap) { struct fec_enet_private *fep = netdev_priv(ndev); struct sk_buff *new_skb; @@ -1322,7 +1332,10 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); - memcpy(new_skb->data, (*skb)->data, length); + if (!swap) + memcpy(new_skb->data, (*skb)->data, length); + else + swap_buffer2(new_skb->data, (*skb)->data, length); *skb = new_skb; return true; @@ -1352,6 +1365,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) u16 vlan_tag; int index = 0; bool is_copybreak; + bool need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME; #ifdef CONFIG_M532x flush_cache_all(); @@ -1415,7 +1429,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) * include that when passing upstream as it messes up * bridging applications. */ - is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4); + is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4, + need_swap); if (!is_copybreak) { skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); if (unlikely(!skb_new)) { @@ -1430,7 +1445,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) prefetch(skb->data - NET_IP_ALIGN); skb_put(skb, pkt_len - 4); data = skb->data; - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + if (!is_copybreak && need_swap) swap_buffer(data, pkt_len); /* Extract the enhanced buffer descriptor */ @@ -1581,7 +1596,8 @@ fec_enet_interrupt(int irq, void *dev_id) complete(&fep->mdio_done); } - fec_ptp_check_pps_event(fep); + if (fep->ptp_clock) + fec_ptp_check_pps_event(fep); return ret; } @@ -3342,12 +3358,11 @@ static int __maybe_unused fec_suspend(struct device *dev) netif_device_detach(ndev); netif_tx_unlock_bh(ndev); fec_stop(ndev); + fec_enet_clk_enable(ndev, false); + pinctrl_pm_select_sleep_state(&fep->pdev->dev); } rtnl_unlock(); - fec_enet_clk_enable(ndev, false); - pinctrl_pm_select_sleep_state(&fep->pdev->dev); - if (fep->reg_phy) regulator_disable(fep->reg_phy); @@ -3366,13 +3381,14 @@ static int __maybe_unused fec_resume(struct device *dev) return ret; } - pinctrl_pm_select_default_state(&fep->pdev->dev); - ret = fec_enet_clk_enable(ndev, true); - if (ret) - goto failed_clk; - rtnl_lock(); if (netif_running(ndev)) { + pinctrl_pm_select_default_state(&fep->pdev->dev); + ret = fec_enet_clk_enable(ndev, true); + if (ret) { + rtnl_unlock(); + goto failed_clk; + } fec_restart(ndev); netif_tx_lock_bh(ndev); netif_device_attach(ndev); diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index 3d4e08be170..b34214e2df5 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -341,6 +341,9 @@ static void restart(struct net_device *dev) FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */ } + /* Restore multicast and promiscuous settings */ + set_multicast_list(dev); + /* * Enable interrupts we wish to service. */ diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c index f30411f0701..7a184e8816a 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c @@ -355,6 +355,9 @@ static void restart(struct net_device *dev) if (fep->phydev->duplex) S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); + /* Restore multicast and promiscuous settings */ + set_multicast_list(dev); + S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); } diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 5f6aded512f..24f3986cfae 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1075,7 +1075,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_HW_CSUM | NETIF_F_SG); - netdev->priv_flags |= IFF_UNICAST_FLT; + /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ + if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || + hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) + netdev->priv_flags |= IFF_UNICAST_FLT; adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ed5f1c15fb0..c3a7f4a4b77 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6151,7 +6151,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) I40E_GL_MDET_TX_PF_NUM_SHIFT; u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> I40E_GL_MDET_TX_VF_NUM_SHIFT; - u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >> + u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> I40E_GL_MDET_TX_EVENT_SHIFT; u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> I40E_GL_MDET_TX_QUEUE_SHIFT; @@ -6165,7 +6165,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) if (reg & I40E_GL_MDET_RX_VALID_MASK) { u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> I40E_GL_MDET_RX_FUNCTION_SHIFT; - u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >> + u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> I40E_GL_MDET_RX_EVENT_SHIFT; u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> I40E_GL_MDET_RX_QUEUE_SHIFT; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a21b14495eb..487cd9c4ac0 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1012,7 +1012,8 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) /* igb_get_stats64() might access the rings on this vector, * we must wait a grace period before freeing it. */ - kfree_rcu(q_vector, rcu); + if (q_vector) + kfree_rcu(q_vector, rcu); } /** @@ -1792,8 +1793,10 @@ void igb_down(struct igb_adapter *adapter) adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; for (i = 0; i < adapter->num_q_vectors; i++) { - napi_synchronize(&(adapter->q_vector[i]->napi)); - napi_disable(&(adapter->q_vector[i]->napi)); + if (adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } } @@ -3717,7 +3720,8 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->num_tx_queues; i++) - igb_free_tx_resources(adapter->tx_ring[i]); + if (adapter->tx_ring[i]) + igb_free_tx_resources(adapter->tx_ring[i]); } void igb_unmap_and_free_tx_resource(struct igb_ring *ring, @@ -3782,7 +3786,8 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->num_tx_queues; i++) - igb_clean_tx_ring(adapter->tx_ring[i]); + if (adapter->tx_ring[i]) + igb_clean_tx_ring(adapter->tx_ring[i]); } /** @@ -3819,7 +3824,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->num_rx_queues; i++) - igb_free_rx_resources(adapter->rx_ring[i]); + if (adapter->rx_ring[i]) + igb_free_rx_resources(adapter->rx_ring[i]); } /** @@ -3874,7 +3880,8 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->num_rx_queues; i++) - igb_clean_rx_ring(adapter->rx_ring[i]); + if (adapter->rx_ring[i]) + igb_clean_rx_ring(adapter->rx_ring[i]); } /** @@ -6537,6 +6544,9 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, if (unlikely(page_to_nid(page) != numa_node_id())) return false; + if (unlikely(page->pfmemalloc)) + return false; + #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ if (unlikely(page_count(page) != 1)) @@ -6603,7 +6613,8 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); /* we can reuse buffer as-is, just make sure it is local */ - if (likely(page_to_nid(page) == numa_node_id())) + if (likely((page_to_nid(page) == numa_node_id()) && + !page->pfmemalloc)) return true; /* this page cannot be reused so discard it */ @@ -7400,6 +7411,8 @@ static int igb_resume(struct device *dev) pci_restore_state(pdev); pci_save_state(pdev); + if (!pci_device_is_present(pdev)) + return -ENODEV; err = pci_enable_device_mem(pdev); if (err) { dev_err(&pdev->dev, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 3ce4a258f94..0ae038b9af9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -342,12 +342,16 @@ static int ixgbe_set_settings(struct net_device *netdev, if (old == advertised) return err; /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + hw->mac.autotry_restart = true; err = hw->mac.ops.setup_link(hw, advertised, true); if (err) { e_info(probe, "setup link failed with code %d\n", err); hw->mac.ops.setup_link(hw, old, true); } + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); } else { /* in this case we currently only support 10Gb/FULL */ u32 speed = ethtool_cmd_speed(ecmd); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index fec5212d433..cc51554c9e9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3936,8 +3936,8 @@ void ixgbe_set_rx_mode(struct net_device *netdev) * if SR-IOV and VMDQ are disabled - otherwise ensure * that hardware VLAN filters remain enabled. */ - if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | - IXGBE_FLAG_SRIOV_ENABLED))) + if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | + IXGBE_FLAG_SRIOV_ENABLED)) vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); } else { if (netdev->flags & IFF_ALLMULTI) { @@ -4321,8 +4321,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) IXGBE_CB(skb)->page_released = false; } dev_kfree_skb(skb); + rx_buffer->skb = NULL; } - rx_buffer->skb = NULL; if (rx_buffer->dma) dma_unmap_page(dev, rx_buffer->dma, ixgbe_rx_pg_size(rx_ring), @@ -7669,6 +7669,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, return -EOPNOTSUPP; br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { __u16 mode; @@ -7677,6 +7679,9 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; + if (nla_len(attr) < sizeof(mode)) + return -EINVAL; + mode = nla_get_u16(attr); if (mode == BRIDGE_MODE_VEPA) { reg = 0; @@ -7979,6 +7984,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int i, err, pci_using_dac, expected_gts; unsigned int indices = MAX_TX_QUEUES; u8 part_str[IXGBE_PBANUM_LENGTH]; + bool disable_dev = false; #ifdef IXGBE_FCOE u16 device_caps; #endif @@ -8369,13 +8375,14 @@ err_sw_init: iounmap(adapter->io_addr); kfree(adapter->mac_table); err_ioremap: + disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); err_pci_reg: err_dma: - if (!adapter || !test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) + if (!adapter || disable_dev) pci_disable_device(pdev); return err; } @@ -8393,6 +8400,7 @@ static void ixgbe_remove(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; + bool disable_dev; ixgbe_dbg_adapter_exit(adapter); @@ -8442,11 +8450,12 @@ static void ixgbe_remove(struct pci_dev *pdev) e_dev_info("complete\n"); kfree(adapter->mac_table); + disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); pci_disable_pcie_error_reporting(pdev); - if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) + if (disable_dev) pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index d47b19f27c3..28b81ae09b5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -635,7 +635,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, **/ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) { - s32 status; u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; bool autoneg = false; ixgbe_link_speed speed; @@ -700,8 +699,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg); - - return status; + return 0; } /** diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index b151a949f35..d44560d1d26 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1047,7 +1047,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) int tx_index; struct tx_desc *desc; u32 cmd_sts; - struct sk_buff *skb; tx_index = txq->tx_used_desc; desc = &txq->tx_desc_area[tx_index]; @@ -1066,19 +1065,22 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) reclaimed++; txq->tx_desc_count--; - skb = NULL; - if (cmd_sts & TX_LAST_DESC) - skb = __skb_dequeue(&txq->tx_skb); + if (!IS_TSO_HEADER(txq, desc->buf_ptr)) + dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, + desc->byte_cnt, DMA_TO_DEVICE); + + if (cmd_sts & TX_ENABLE_INTERRUPT) { + struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); + + if (!WARN_ON(!skb)) + dev_kfree_skb(skb); + } if (cmd_sts & ERROR_SUMMARY) { netdev_info(mp->dev, "tx error\n"); mp->dev->stats.tx_errors++; } - if (!IS_TSO_HEADER(txq, desc->buf_ptr)) - dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, - desc->byte_cnt, DMA_TO_DEVICE); - dev_kfree_skb(skb); } __netif_tx_unlock_bh(nq); diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index ece83f10152..fdf3e382e46 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -1692,6 +1692,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, { struct mvpp2_prs_entry *pe; int tid_aux, tid; + int ret = 0; pe = mvpp2_prs_vlan_find(priv, tpid, ai); @@ -1723,8 +1724,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, break; } - if (tid <= tid_aux) - return -EINVAL; + if (tid <= tid_aux) { + ret = -EINVAL; + goto error; + } memset(pe, 0 , sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); @@ -1756,9 +1759,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, mvpp2_prs_hw_write(priv, pe); +error: kfree(pe); - return 0; + return ret; } /* Get first free double vlan ai number */ @@ -1821,7 +1825,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, unsigned int port_map) { struct mvpp2_prs_entry *pe; - int tid_aux, tid, ai; + int tid_aux, tid, ai, ret = 0; pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); @@ -1838,8 +1842,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, /* Set ai value for new double vlan entry */ ai = mvpp2_prs_double_vlan_ai_free_get(priv); - if (ai < 0) - return ai; + if (ai < 0) { + ret = ai; + goto error; + } /* Get first single/triple vlan tid */ for (tid_aux = MVPP2_PE_FIRST_FREE_TID; @@ -1859,8 +1865,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, break; } - if (tid >= tid_aux) - return -ERANGE; + if (tid >= tid_aux) { + ret = -ERANGE; + goto error; + } memset(pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); @@ -1887,8 +1895,9 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, mvpp2_prs_tcam_port_map_set(pe, port_map); mvpp2_prs_hw_write(priv, pe); +error: kfree(pe); - return 0; + return ret; } /* IPv4 header parsing for fragmentation and L4 offset */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index f3032fec8fc..4d69e382b4e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1693,7 +1693,7 @@ int mlx4_en_start_port(struct net_device *dev) mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); #ifdef CONFIG_MLX4_EN_VXLAN - if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) + if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) vxlan_get_rx_port(dev); #endif priv->port_up = true; @@ -2281,8 +2281,16 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work) ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); out: - if (ret) + if (ret) { en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); + return; + } + + /* set offloads */ + priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; + priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL; } static void mlx4_en_del_vxlan_offloads(struct work_struct *work) @@ -2290,6 +2298,11 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) int ret; struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, vxlan_del_task); + /* unset offloads */ + priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); + priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; + priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL; ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 0); @@ -2342,6 +2355,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev, queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); } + +static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev) +{ + return vxlan_gso_check(skb); +} #endif static const struct net_device_ops mlx4_netdev_ops = { @@ -2373,6 +2391,7 @@ static const struct net_device_ops mlx4_netdev_ops = { #ifdef CONFIG_MLX4_EN_VXLAN .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_gso_check = mlx4_en_gso_check, #endif }; @@ -2403,6 +2422,11 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, +#ifdef CONFIG_MLX4_EN_VXLAN + .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, + .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_gso_check = mlx4_en_gso_check, +#endif }; int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, @@ -2568,13 +2592,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) dev->priv_flags |= IFF_UNICAST_FLT; - if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { - dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; - dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; - dev->features |= NETIF_F_GSO_UDP_TUNNEL; - } - mdev->pndev[port] = dev; netif_carrier_off(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 34c13787854..454d9fea640 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -836,8 +836,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) * whether LSO is used */ tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { - tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | - MLX4_WQE_CTRL_TCP_UDP_CSUM); + if (!skb->encapsulation) + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | + MLX4_WQE_CTRL_TCP_UDP_CSUM); + else + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM); ring->tx_csum++; } diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index a49c9d11d8a..49290a40590 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -1026,6 +1026,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev, pr_cont("\n"); } } + synchronize_irq(eq->irq); mlx4_mtt_cleanup(dev, &eq->mtt); for (i = 0; i < npages; ++i) diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index ca0f98c9510..872843179f4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -955,6 +955,10 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str, cur->ib.dst_gid_msk); break; + case MLX4_NET_TRANS_RULE_ID_VXLAN: + len += snprintf(buf + len, BUF_SIZE - len, + "VNID = %d ", be32_to_cpu(cur->vxlan.vni)); + break; case MLX4_NET_TRANS_RULE_ID_IPV6: break; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 5d2498dcf53..cd5cf6d957c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -1546,7 +1546,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, switch (op) { case RES_OP_RESERVE: - count = get_param_l(&in_param); + count = get_param_l(&in_param) & 0xffffff; align = get_param_h(&in_param); err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ed53291468f..ad2c96a02a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -374,15 +374,14 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s", name, pci_name(dev->pdev)); eq->eqn = out.eq_number; + eq->irqn = vecidx; + eq->dev = dev; + eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, eq->name, eq); if (err) goto err_eq; - eq->irqn = vecidx; - eq->dev = dev; - eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; - err = mlx5_debug_eq_add(dev, eq); if (err) goto err_irq; @@ -420,6 +419,7 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) if (err) mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", eq->eqn); + synchronize_irq(table->msix_arr[eq->irqn].vector); mlx5_buf_free(dev, &eq->buf); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 3d8e8e489b2..71b10b21079 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -864,14 +864,14 @@ static int init_one(struct pci_dev *pdev, dev->profile = &profile[prof_sel]; dev->event = mlx5_core_event; + INIT_LIST_HEAD(&priv->ctx_list); + spin_lock_init(&priv->ctx_lock); err = mlx5_dev_init(dev, pdev); if (err) { dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err); goto out; } - INIT_LIST_HEAD(&priv->ctx_list); - spin_lock_init(&priv->ctx_lock); err = mlx5_register_device(dev); if (err) { dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 0b2a1ccd276..613037584d0 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2762,7 +2762,8 @@ netxen_fw_poll_work(struct work_struct *work) if (test_bit(__NX_RESETTING, &adapter->state)) goto reschedule; - if (test_bit(__NX_DEV_UP, &adapter->state)) { + if (test_bit(__NX_DEV_UP, &adapter->state) && + !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) { if (!adapter->has_link_events) { netxen_nic_handle_phy_intr(adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index f5e29f7bdae..a913b3ad2f8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -503,6 +503,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev, adapter->flags |= QLCNIC_DEL_VXLAN_PORT; } + +static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev) +{ + return vxlan_gso_check(skb); +} #endif static const struct net_device_ops qlcnic_netdev_ops = { @@ -526,6 +531,7 @@ static const struct net_device_ops qlcnic_netdev_ops = { #ifdef CONFIG_QLCNIC_VXLAN .ndo_add_vxlan_port = qlcnic_add_vxlan_port, .ndo_del_vxlan_port = qlcnic_del_vxlan_port, + .ndo_gso_check = qlcnic_gso_check, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = qlcnic_poll_controller, diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig index f3a47147937..9a49f42ac2b 100644 --- a/drivers/net/ethernet/qualcomm/Kconfig +++ b/drivers/net/ethernet/qualcomm/Kconfig @@ -5,7 +5,6 @@ config NET_VENDOR_QUALCOMM bool "Qualcomm devices" default y - depends on SPI_MASTER && OF_GPIO ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -20,7 +19,7 @@ if NET_VENDOR_QUALCOMM config QCA7000 tristate "Qualcomm Atheros QCA7000 support" - depends on SPI_MASTER && OF_GPIO + depends on SPI_MASTER && OF ---help--- This SPI protocol driver supports the Qualcomm Atheros QCA7000. diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 002d4cdc319..a77f05ce832 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -180,7 +180,8 @@ static int efx_ef10_probe(struct efx_nic *efx) EFX_MAX_CHANNELS, resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) / (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); - BUG_ON(efx->max_channels == 0); + if (WARN_ON(efx->max_channels == 0)) + return -EIO; nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); if (!nic_data) diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index ee84a90e371..aaf2987512b 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -343,8 +343,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) unsigned short dma_flags; int i = 0; - EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count); - if (skb_shinfo(skb)->gso_size) return efx_enqueue_skb_tso(tx_queue, skb); @@ -1258,8 +1256,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, /* Find the packet protocol and sanity-check it */ state.protocol = efx_tso_check_protocol(skb); - EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count); - rc = tso_start(&state, efx, skb); if (rc) goto mem_err; diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 5e94d00b96b..6cc3cf6f17c 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -81,6 +81,7 @@ static const char version[] = #include <linux/workqueue.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/of_gpio.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -2188,6 +2189,41 @@ static const struct of_device_id smc91x_match[] = { {}, }; MODULE_DEVICE_TABLE(of, smc91x_match); + +/** + * of_try_set_control_gpio - configure a gpio if it exists + */ +static int try_toggle_control_gpio(struct device *dev, + struct gpio_desc **desc, + const char *name, int index, + int value, unsigned int nsdelay) +{ + struct gpio_desc *gpio = *desc; + int res; + + gpio = devm_gpiod_get_index(dev, name, index); + if (IS_ERR(gpio)) { + if (PTR_ERR(gpio) == -ENOENT) { + *desc = NULL; + return 0; + } + + return PTR_ERR(gpio); + } + res = gpiod_direction_output(gpio, !value); + if (res) { + dev_err(dev, "unable to toggle gpio %s: %i\n", name, res); + devm_gpiod_put(dev, gpio); + gpio = NULL; + return res; + } + if (nsdelay) + usleep_range(nsdelay, 2 * nsdelay); + gpiod_set_value_cansleep(gpio, value); + *desc = gpio; + + return 0; +} #endif /* @@ -2207,9 +2243,10 @@ static int smc_drv_probe(struct platform_device *pdev) const struct of_device_id *match = NULL; struct smc_local *lp; struct net_device *ndev; - struct resource *res, *ires; + struct resource *res; unsigned int __iomem *addr; unsigned long irq_flags = SMC_IRQ_FLAGS; + unsigned long irq_resflags; int ret; ndev = alloc_etherdev(sizeof(struct smc_local)); @@ -2237,6 +2274,28 @@ static int smc_drv_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; u32 val; + /* Optional pwrdwn GPIO configured? */ + ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio, + "power", 0, 0, 100); + if (ret) + return ret; + + /* + * Optional reset GPIO configured? Minimum 100 ns reset needed + * according to LAN91C96 datasheet page 14. + */ + ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio, + "reset", 0, 0, 100); + if (ret) + return ret; + + /* + * Need to wait for optional EEPROM to load, max 750 us according + * to LAN91C96 datasheet page 55. + */ + if (lp->reset_gpio) + usleep_range(750, 1000); + /* Combination of IO widths supported, default to 16-bit */ if (!of_property_read_u32(np, "reg-io-width", &val)) { if (val & 1) @@ -2279,16 +2338,19 @@ static int smc_drv_probe(struct platform_device *pdev) goto out_free_netdev; } - ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!ires) { + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq <= 0) { ret = -ENODEV; goto out_release_io; } - - ndev->irq = ires->start; - - if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) - irq_flags = ires->flags & IRQF_TRIGGER_MASK; + /* + * If this platform does not specify any special irqflags, or if + * the resource supplies a trigger, override the irqflags with + * the trigger flags from the resource. + */ + irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq)); + if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK) + irq_flags = irq_resflags & IRQF_TRIGGER_MASK; ret = smc_request_attrib(pdev, ndev); if (ret) diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index 47dce918eb0..2a38dacbbd2 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -298,6 +298,9 @@ struct smc_local { struct sk_buff *pending_tx_skb; struct tasklet_struct tx_task; + struct gpio_desc *power_gpio; + struct gpio_desc *reset_gpio; + /* version/revision of the SMC91x chip */ int version; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index affb29da353..77ed74561e5 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -1342,6 +1342,42 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) spin_unlock(&pdata->mac_lock); } +static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) +{ + int rc = 0; + + if (!pdata->phy_dev) + return rc; + + /* If the internal PHY is in General Power-Down mode, all, except the + * management interface, is powered-down and stays in that condition as + * long as Phy register bit 0.11 is HIGH. + * + * In that case, clear the bit 0.11, so the PHY powers up and we can + * access to the phy registers. + */ + rc = phy_read(pdata->phy_dev, MII_BMCR); + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); + return rc; + } + + /* If the PHY general power-down bit is not set is not necessary to + * disable the general power down-mode. + */ + if (rc & BMCR_PDOWN) { + rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN); + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); + return rc; + } + + usleep_range(1000, 1500); + } + + return 0; +} + static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) { int rc = 0; @@ -1356,12 +1392,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) return rc; } - /* - * If energy is detected the PHY is already awake so is not necessary - * to disable the energy detect power-down mode. - */ - if ((rc & MII_LAN83C185_EDPWRDOWN) && - !(rc & MII_LAN83C185_ENERGYON)) { + /* Only disable if energy detect mode is already enabled */ + if (rc & MII_LAN83C185_EDPWRDOWN) { /* Disable energy detect mode for this SMSC Transceivers */ rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, rc & (~MII_LAN83C185_EDPWRDOWN)); @@ -1370,8 +1402,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); return rc; } - - mdelay(1); + /* Allow PHY to wakeup */ + mdelay(2); } return 0; @@ -1393,7 +1425,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) /* Only enable if energy detect mode is already disabled */ if (!(rc & MII_LAN83C185_EDPWRDOWN)) { - mdelay(100); /* Enable energy detect mode for this SMSC Transceivers */ rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, rc | MII_LAN83C185_EDPWRDOWN); @@ -1402,8 +1433,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); return rc; } - - mdelay(1); } return 0; } @@ -1415,6 +1444,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata) int ret; /* + * Make sure to power-up the PHY chip before doing a reset, otherwise + * the reset fails. + */ + ret = smsc911x_phy_general_power_up(pdata); + if (ret) { + SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip"); + return ret; + } + + /* * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that * are initialized in a Energy Detect Power-Down mode that prevents * the MAC chip to be software reseted. So we have to wakeup the PHY diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 6f77a46c7e2..18c46bb0f3b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -276,6 +276,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) bool stmmac_eee_init(struct stmmac_priv *priv) { char *phy_bus_name = priv->plat->phy_bus_name; + unsigned long flags; bool ret = false; /* Using PCS we cannot dial with the phy registers at this stage @@ -300,6 +301,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv) * changed). * In that case the driver disable own timers. */ + spin_lock_irqsave(&priv->lock, flags); if (priv->eee_active) { pr_debug("stmmac: disable EEE\n"); del_timer_sync(&priv->eee_ctrl_timer); @@ -307,9 +309,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv) tx_lpi_timer); } priv->eee_active = 0; + spin_unlock_irqrestore(&priv->lock, flags); goto out; } /* Activate the EEE and start timers */ + spin_lock_irqsave(&priv->lock, flags); if (!priv->eee_active) { priv->eee_active = 1; init_timer(&priv->eee_ctrl_timer); @@ -325,9 +329,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv) /* Set HW EEE according to the speed */ priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); - pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); - ret = true; + spin_unlock_irqrestore(&priv->lock, flags); + + pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); } out: return ret; @@ -760,12 +765,12 @@ static void stmmac_adjust_link(struct net_device *dev) if (new_state && netif_msg_link(priv)) phy_print_status(phydev); + spin_unlock_irqrestore(&priv->lock, flags); + /* At this stage, it could be needed to setup the EEE or adjust some * MAC related HW registers. */ priv->eee_enabled = stmmac_eee_init(priv); - - spin_unlock_irqrestore(&priv->lock, flags); } /** @@ -959,12 +964,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv) } static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, - int i) + int i, gfp_t flags) { struct sk_buff *skb; skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, - GFP_KERNEL); + flags); if (!skb) { pr_err("%s: Rx init fails; skb is NULL\n", __func__); return -ENOMEM; @@ -1006,7 +1011,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) * and allocates the socket buffers. It suppors the chained and ring * modes. */ -static int init_dma_desc_rings(struct net_device *dev) +static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) { int i; struct stmmac_priv *priv = netdev_priv(dev); @@ -1041,7 +1046,7 @@ static int init_dma_desc_rings(struct net_device *dev) else p = priv->dma_rx + i; - ret = stmmac_init_rx_buffers(priv, p, i); + ret = stmmac_init_rx_buffers(priv, p, i, flags); if (ret) goto err_init_rx_buffers; @@ -1647,11 +1652,6 @@ static int stmmac_hw_setup(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); int ret; - ret = init_dma_desc_rings(dev); - if (ret < 0) { - pr_err("%s: DMA descriptors initialization failed\n", __func__); - return ret; - } /* DMA initialization and SW reset */ ret = stmmac_init_dma_engine(priv); if (ret < 0) { @@ -1705,10 +1705,6 @@ static int stmmac_hw_setup(struct net_device *dev) } priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; - priv->eee_enabled = stmmac_eee_init(priv); - - stmmac_init_tx_coalesce(priv); - if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { priv->rx_riwt = MAX_DMA_RIWT; priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); @@ -1761,12 +1757,20 @@ static int stmmac_open(struct net_device *dev) goto dma_desc_error; } + ret = init_dma_desc_rings(dev, GFP_KERNEL); + if (ret < 0) { + pr_err("%s: DMA descriptors initialization failed\n", __func__); + goto init_error; + } + ret = stmmac_hw_setup(dev); if (ret < 0) { pr_err("%s: Hw setup failed\n", __func__); goto init_error; } + stmmac_init_tx_coalesce(priv); + if (priv->phydev) phy_start(priv->phydev); @@ -1894,7 +1898,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int nopaged_len = skb_headlen(skb); unsigned int enh_desc = priv->plat->enh_desc; + spin_lock(&priv->tx_lock); + if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { + spin_unlock(&priv->tx_lock); if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); /* This is a hard error, log it. */ @@ -1903,8 +1910,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - spin_lock(&priv->tx_lock); - if (priv->tx_path_in_lpi_mode) stmmac_disable_eee_mode(priv); @@ -2025,6 +2030,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; dma_map_err: + spin_unlock(&priv->tx_lock); dev_err(priv->device, "Tx dma map failed\n"); dev_kfree_skb(skb); priv->dev->stats.tx_dropped++; @@ -2281,9 +2287,7 @@ static void stmmac_set_rx_mode(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); - spin_lock(&priv->lock); priv->hw->mac->set_filter(priv->hw, dev); - spin_unlock(&priv->lock); } /** @@ -2950,7 +2954,7 @@ int stmmac_suspend(struct net_device *ndev) stmmac_set_mac(priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ - clk_disable_unprepare(priv->stmmac_clk); + clk_disable(priv->stmmac_clk); } spin_unlock_irqrestore(&priv->lock, flags); @@ -2982,7 +2986,7 @@ int stmmac_resume(struct net_device *ndev) } else { pinctrl_pm_select_default_state(priv->device); /* enable the clk prevously disabled */ - clk_prepare_enable(priv->stmmac_clk); + clk_enable(priv->stmmac_clk); /* reset the phy so that it's ready */ if (priv->mii) stmmac_mdio_reset(priv->mii); @@ -2990,7 +2994,9 @@ int stmmac_resume(struct net_device *ndev) netif_device_attach(ndev); + init_dma_desc_rings(ndev, GFP_ATOMIC); stmmac_hw_setup(ndev); + stmmac_init_tx_coalesce(priv); napi_enable(&priv->napi); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 655a23bbc45..e17a970eaf2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -33,6 +33,7 @@ static struct stmmac_dma_cfg dma_cfg; static void stmmac_default_data(void) { memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data)); + plat_dat.bus_id = 1; plat_dat.phy_addr = 0; plat_dat.interface = PHY_INTERFACE_MODE_GMII; @@ -47,6 +48,12 @@ static void stmmac_default_data(void) dma_cfg.pbl = 32; dma_cfg.burst_len = DMA_AXI_BLEN_256; plat_dat.dma_cfg = &dma_cfg; + + /* Set default value for multicast hash bins */ + plat_dat.multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat_dat.unicast_filter_entries = 1; } /** diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index db56fa7ce8f..5b0da398621 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -177,12 +177,6 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, */ plat->maxmtu = JUMBO_LEN; - /* Set default value for multicast hash bins */ - plat->multicast_filter_bins = HASH_TABLE_SIZE; - - /* Set default value for unicast filter entries */ - plat->unicast_filter_entries = 1; - /* * Currently only the properties needed on SPEAr600 * are provided. All other properties should be added @@ -270,6 +264,13 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) return PTR_ERR(addr); plat_dat = dev_get_platdata(&pdev->dev); + + /* Set default value for multicast hash bins */ + plat_dat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat_dat->unicast_filter_entries = 1; + if (pdev->dev.of_node) { if (!plat_dat) plat_dat = devm_kzalloc(&pdev->dev, diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 72c8525d545..9c014803b03 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp) HMD(("init rxring, ")); for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; + u32 mapping; skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); if (!skb) { @@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp) /* Because we reserve afterwards. */ skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); + mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(hp->dma_dev, mapping)) { + dev_kfree_skb_any(skb); + hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0); + continue; + } hme_write_rxd(hp, &hb->happy_meal_rxd[i], (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), - dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, - DMA_FROM_DEVICE)); + mapping); skb_reserve(skb, RX_OFFSET); } @@ -2020,6 +2027,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) skb = hp->rx_skbs[elem]; if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; + u32 mapping; /* Now refill the entry, if we can. */ new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); @@ -2027,13 +2035,21 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) drops++; goto drop_it; } + skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); + mapping = dma_map_single(hp->dma_dev, new_skb->data, + RX_BUF_ALLOC_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) { + dev_kfree_skb_any(new_skb); + drops++; + goto drop_it; + } + dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); hp->rx_skbs[elem] = new_skb; - skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); hme_write_rxd(hp, this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), - dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE, - DMA_FROM_DEVICE)); + mapping); skb_reserve(new_skb, RX_OFFSET); /* Trim the original skb for the netif. */ @@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } +static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping, + u32 first_len, u32 first_entry, u32 entry) +{ + struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0]; + + dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE); + + first_entry = NEXT_TX(first_entry); + while (first_entry != entry) { + struct happy_meal_txd *this = &txbase[first_entry]; + u32 addr, len; + + addr = hme_read_desc32(hp, &this->tx_addr); + len = hme_read_desc32(hp, &this->tx_flags); + len &= TXFLAG_SIZE; + dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE); + } +} + static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, len = skb->len; mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) + goto out_dma_error; tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], (tx_flags | (len & TXFLAG_SIZE)), @@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, first_len = skb_headlen(skb); first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping))) + goto out_dma_error; entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { @@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, len = skb_frag_size(this_frag); mapping = skb_frag_dma_map(hp->dma_dev, this_frag, 0, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) { + unmap_partial_tx_skb(hp, first_mapping, first_len, + first_entry, entry); + goto out_dma_error; + } this_txflags = tx_flags; if (frag == skb_shinfo(skb)->nr_frags - 1) this_txflags |= TXFLAG_EOP; @@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); return NETDEV_TX_OK; + +out_dma_error: + hp->tx_skbs[hp->tx_new] = NULL; + spin_unlock_irq(&hp->happy_lock); + + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; } static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 952e1e4764b..c560f9aeb55 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -129,9 +129,9 @@ do { \ #define CPSW_VLAN_AWARE BIT(1) #define CPSW_ALE_VLAN_AWARE 1 -#define CPSW_FIFO_NORMAL_MODE (0 << 15) -#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15) -#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15) +#define CPSW_FIFO_NORMAL_MODE (0 << 16) +#define CPSW_FIFO_DUAL_MAC_MODE (1 << 16) +#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16) #define CPSW_INTPACEEN (0x3f << 16) #define CPSW_INTPRESCALE_MASK (0x7FF << 0) @@ -591,8 +591,8 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) if (enable) { unsigned long timeout = jiffies + HZ; - /* Disable Learn for all ports */ - for (i = 0; i < priv->data.slaves; i++) { + /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */ + for (i = 0; i <= priv->data.slaves; i++) { cpsw_ale_control_set(ale, i, ALE_PORT_NOLEARN, 1); cpsw_ale_control_set(ale, i, @@ -616,11 +616,11 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); dev_dbg(&ndev->dev, "promiscuity enabled\n"); } else { - /* Flood All Unicast Packets to Host port */ + /* Don't Flood All Unicast Packets to Host port */ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); - /* Enable Learn for all ports */ - for (i = 0; i < priv->data.slaves; i++) { + /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */ + for (i = 0; i <= priv->data.slaves; i++) { cpsw_ale_control_set(ale, i, ALE_PORT_NOLEARN, 0); cpsw_ale_control_set(ale, i, @@ -638,12 +638,16 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) if (ndev->flags & IFF_PROMISC) { /* Enable promiscuous mode */ cpsw_set_promiscious(ndev, true); + cpsw_ale_set_allmulti(priv->ale, IFF_ALLMULTI); return; } else { /* Disable promiscuous mode */ cpsw_set_promiscious(ndev, false); } + /* Restore allmulti on vlans if necessary */ + cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); + /* Clear all mcast from ALE */ cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); @@ -1149,6 +1153,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) const int port = priv->host_port; u32 reg; int i; + int unreg_mcast_mask; reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : CPSW2_PORT_VLAN; @@ -1158,9 +1163,14 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) for (i = 0; i < priv->data.slaves; i++) slave_write(priv->slaves + i, vlan, reg); + if (priv->ndev->flags & IFF_ALLMULTI) + unreg_mcast_mask = ALE_ALL_PORTS; + else + unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; + cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, - (ALE_PORT_1 | ALE_PORT_2) << port); + unreg_mcast_mask << port); } static void cpsw_init_host_port(struct cpsw_priv *priv) @@ -1620,11 +1630,17 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, unsigned short vid) { int ret; + int unreg_mcast_mask; + + if (priv->ndev->flags & IFF_ALLMULTI) + unreg_mcast_mask = ALE_ALL_PORTS; + else + unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; ret = cpsw_ale_add_vlan(priv->ale, vid, ALE_ALL_PORTS << priv->host_port, 0, ALE_ALL_PORTS << priv->host_port, - (ALE_PORT_1 | ALE_PORT_2) << priv->host_port); + unreg_mcast_mask << priv->host_port); if (ret != 0) return ret; @@ -2006,7 +2022,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, parp = of_get_property(slave_node, "phy_id", &lenp); if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); - return -EINVAL; + goto no_phy_slave; } mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); phyid = be32_to_cpup(parp+1); @@ -2019,6 +2035,14 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), PHY_ID_FMT, mdio->name, phyid); + slave_data->phy_if = of_get_phy_mode(slave_node); + if (slave_data->phy_if < 0) { + dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", + i); + return slave_data->phy_if; + } + +no_phy_slave: mac_addr = of_get_mac_address(slave_node); if (mac_addr) { memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); @@ -2030,14 +2054,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, return ret; } } - - slave_data->phy_if = of_get_phy_mode(slave_node); - if (slave_data->phy_if < 0) { - dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", - i); - return slave_data->phy_if; - } - if (data->dual_emac) { if (of_property_read_u32(slave_node, "dual_emac_res_vlan", &prop)) { diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 0579b2243bb..097ebe7077a 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -443,6 +443,35 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) return 0; } +void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti) +{ + u32 ale_entry[ALE_ENTRY_WORDS]; + int type, idx; + int unreg_mcast = 0; + + /* Only bother doing the work if the setting is actually changing */ + if (ale->allmulti == allmulti) + return; + + /* Remember the new setting to check against next time */ + ale->allmulti = allmulti; + + for (idx = 0; idx < ale->params.ale_entries; idx++) { + cpsw_ale_read(ale, idx, ale_entry); + type = cpsw_ale_get_entry_type(ale_entry); + if (type != ALE_TYPE_VLAN) + continue; + + unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry); + if (allmulti) + unreg_mcast |= 1; + else + unreg_mcast &= ~1; + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); + cpsw_ale_write(ale, idx, ale_entry); + } +} + struct ale_control_info { const char *name; int offset, port_offset; @@ -756,7 +785,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale) { if (!ale) return -EINVAL; - cpsw_ale_stop(ale); cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); kfree(ale); return 0; diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index 31cf43cab42..c0d4127aa54 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -27,6 +27,7 @@ struct cpsw_ale { struct cpsw_ale_params params; struct timer_list timer; unsigned long ageout; + int allmulti; }; enum cpsw_ale_control { @@ -103,6 +104,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, int reg_mcast, int unreg_mcast); int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port); +void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti); int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control); int cpsw_ale_control_set(struct cpsw_ale *ale, int port, diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index ab92f67da03..4a4388b813a 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -264,7 +264,7 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, switch (ptp_class & PTP_CLASS_PMASK) { case PTP_CLASS_IPV4: - offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; + offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; break; case PTP_CLASS_IPV6: offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 9e17d1a91e7..78ec33f5100 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -550,6 +550,7 @@ do_lso: do_send: /* Start filling in the page buffers with the rndis hdr */ rndis_msg->msg_len += rndis_msg_size; + packet->total_data_buflen = rndis_msg->msg_len; packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, skb, &packet->page_buf[0]); diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c index 9ce854f4391..6cbc56ad9ff 100644 --- a/drivers/net/ieee802154/fakehard.c +++ b/drivers/net/ieee802154/fakehard.c @@ -377,17 +377,20 @@ static int ieee802154fake_probe(struct platform_device *pdev) err = wpan_phy_register(phy); if (err) - goto out; + goto err_phy_reg; err = register_netdev(dev); - if (err < 0) - goto out; + if (err) + goto err_netdev_reg; dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n"); return 0; -out: - unregister_netdev(dev); +err_netdev_reg: + wpan_phy_unregister(phy); +err_phy_reg: + free_netdev(dev); + wpan_phy_free(phy); return err; } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 29b3bb41078..bfb0b6ec8c5 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -272,7 +272,7 @@ static void macvlan_process_broadcast(struct work_struct *w) struct sk_buff *skb; struct sk_buff_head list; - skb_queue_head_init(&list); + __skb_queue_head_init(&list); spin_lock_bh(&port->bc_queue.lock); skb_queue_splice_tail_init(&port->bc_queue, &list); @@ -1082,9 +1082,15 @@ static void macvlan_port_destroy(struct net_device *dev) { struct macvlan_port *port = macvlan_port_get_rtnl(dev); - cancel_work_sync(&port->bc_work); dev->priv_flags &= ~IFF_MACVLAN_PORT; netdev_rx_handler_unregister(dev); + + /* After this point, no packet can schedule bc_work anymore, + * but we need to cancel it and purge left skbs if any. + */ + cancel_work_sync(&port->bc_work); + __skb_queue_purge(&port->bc_queue); + kfree_rcu(port, rcu); } diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 65e2892342b..880cc090dc4 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -16,6 +16,7 @@ #include <linux/idr.h> #include <linux/fs.h> +#include <net/ipv6.h> #include <net/net_namespace.h> #include <net/rtnetlink.h> #include <net/sock.h> @@ -65,7 +66,7 @@ static struct cdev macvtap_cdev; static const struct proto_ops macvtap_socket_ops; #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ - NETIF_F_TSO6 | NETIF_F_UFO) + NETIF_F_TSO6) #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) @@ -569,7 +570,11 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb, gso_type = SKB_GSO_TCPV6; break; case VIRTIO_NET_HDR_GSO_UDP: + pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n", + current->comm); gso_type = SKB_GSO_UDP; + if (skb->protocol == htons(ETH_P_IPV6)) + ipv6_proxy_select_ident(skb); break; default: return -EINVAL; @@ -614,8 +619,6 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (sinfo->gso_type & SKB_GSO_TCPV6) vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; - else if (sinfo->gso_type & SKB_GSO_UDP) - vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); if (sinfo->gso_type & SKB_GSO_TCP_ECN) @@ -626,6 +629,8 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) { vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; vnet_hdr->csum_start = skb_checksum_start_offset(skb); + if (vlan_tx_tag_present(skb)) + vnet_hdr->csum_start += VLAN_HLEN; vnet_hdr->csum_offset = skb->csum_offset; } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; @@ -950,9 +955,6 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg) if (arg & TUN_F_TSO6) feature_mask |= NETIF_F_TSO6; } - - if (arg & TUN_F_UFO) - feature_mask |= NETIF_F_UFO; } /* tun/tap driver inverts the usage for TSO offloads, where @@ -963,7 +965,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg) * When user space turns off TSO, we turn off GSO/LRO so that * user-space will not receive TSO frames. */ - if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) + if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) features |= RX_OFFLOADS; else features &= ~RX_OFFLOADS; @@ -1064,7 +1066,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, case TUNSETOFFLOAD: /* let the user check for future flags */ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | - TUN_F_TSO_ECN | TUN_F_UFO)) + TUN_F_TSO_ECN)) return -EINVAL; rtnl_lock(); diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 2954052706e..e22e602beef 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -791,7 +791,7 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts) switch (type & PTP_CLASS_PMASK) { case PTP_CLASS_IPV4: - offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; + offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; break; case PTP_CLASS_IPV6: offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; @@ -934,7 +934,7 @@ static int is_sync(struct sk_buff *skb, int type) switch (type & PTP_CLASS_PMASK) { case PTP_CLASS_IPV4: - offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; + offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; break; case PTP_CLASS_IPV6: offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index bd37e45c89c..225c033b08f 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -50,10 +50,15 @@ #define MII_M1011_PHY_SCR 0x10 #define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060 +#define MII_M1145_PHY_EXT_SR 0x1b #define MII_M1145_PHY_EXT_CR 0x14 #define MII_M1145_RGMII_RX_DELAY 0x0080 #define MII_M1145_RGMII_TX_DELAY 0x0002 +#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4 +#define MII_M1145_HWCFG_MODE_MASK 0xf +#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000 + #define MII_M1111_PHY_LED_CONTROL 0x18 #define MII_M1111_PHY_LED_DIRECT 0x4100 #define MII_M1111_PHY_LED_COMBINE 0x411c @@ -676,6 +681,20 @@ static int m88e1145_config_init(struct phy_device *phydev) } } + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + int temp = phy_read(phydev, MII_M1145_PHY_EXT_SR); + if (temp < 0) + return temp; + + temp &= ~MII_M1145_HWCFG_MODE_MASK; + temp |= MII_M1145_HWCFG_MODE_SGMII_NO_CLK; + temp |= MII_M1145_HWCFG_FIBER_COPPER_AUTO; + + err = phy_write(phydev, MII_M1145_PHY_EXT_SR, temp); + if (err < 0) + return err; + } + err = marvell_of_reg_init(phydev); if (err < 0) return err; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 1dfffdc9dfc..767cd110f49 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -352,6 +352,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) { struct mii_ioctl_data *mii_data = if_mii(ifr); u16 val = mii_data->val_in; + bool change_autoneg = false; switch (cmd) { case SIOCGMIIPHY: @@ -367,22 +368,29 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) if (mii_data->phy_id == phydev->addr) { switch (mii_data->reg_num) { case MII_BMCR: - if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) + if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) { + if (phydev->autoneg == AUTONEG_ENABLE) + change_autoneg = true; phydev->autoneg = AUTONEG_DISABLE; - else + if (val & BMCR_FULLDPLX) + phydev->duplex = DUPLEX_FULL; + else + phydev->duplex = DUPLEX_HALF; + if (val & BMCR_SPEED1000) + phydev->speed = SPEED_1000; + else if (val & BMCR_SPEED100) + phydev->speed = SPEED_100; + else phydev->speed = SPEED_10; + } + else { + if (phydev->autoneg == AUTONEG_DISABLE) + change_autoneg = true; phydev->autoneg = AUTONEG_ENABLE; - if (!phydev->autoneg && (val & BMCR_FULLDPLX)) - phydev->duplex = DUPLEX_FULL; - else - phydev->duplex = DUPLEX_HALF; - if (!phydev->autoneg && (val & BMCR_SPEED1000)) - phydev->speed = SPEED_1000; - else if (!phydev->autoneg && - (val & BMCR_SPEED100)) - phydev->speed = SPEED_100; + } break; case MII_ADVERTISE: - phydev->advertising = val; + phydev->advertising = mii_adv_to_ethtool_adv_t(val); + change_autoneg = true; break; default: /* do nothing */ @@ -396,6 +404,10 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) if (mii_data->reg_num == MII_BMCR && val & BMCR_RESET) return phy_init_hw(phydev); + + if (change_autoneg) + return phy_start_aneg(phydev); + return 0; case SIOCSHWTSTAMP: diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 68c3a3f4e0a..794a4732936 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -755,23 +755,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) err = get_filter(argp, &code); if (err >= 0) { + struct bpf_prog *pass_filter = NULL; struct sock_fprog_kern fprog = { .len = err, .filter = code, }; - ppp_lock(ppp); - if (ppp->pass_filter) { - bpf_prog_destroy(ppp->pass_filter); - ppp->pass_filter = NULL; + err = 0; + if (fprog.filter) + err = bpf_prog_create(&pass_filter, &fprog); + if (!err) { + ppp_lock(ppp); + if (ppp->pass_filter) + bpf_prog_destroy(ppp->pass_filter); + ppp->pass_filter = pass_filter; + ppp_unlock(ppp); } - if (fprog.filter != NULL) - err = bpf_prog_create(&ppp->pass_filter, - &fprog); - else - err = 0; kfree(code); - ppp_unlock(ppp); } break; } @@ -781,23 +781,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) err = get_filter(argp, &code); if (err >= 0) { + struct bpf_prog *active_filter = NULL; struct sock_fprog_kern fprog = { .len = err, .filter = code, }; - ppp_lock(ppp); - if (ppp->active_filter) { - bpf_prog_destroy(ppp->active_filter); - ppp->active_filter = NULL; + err = 0; + if (fprog.filter) + err = bpf_prog_create(&active_filter, &fprog); + if (!err) { + ppp_lock(ppp); + if (ppp->active_filter) + bpf_prog_destroy(ppp->active_filter); + ppp->active_filter = active_filter; + ppp_unlock(ppp); } - if (fprog.filter != NULL) - err = bpf_prog_create(&ppp->active_filter, - &fprog); - else - err = 0; kfree(code); - ppp_unlock(ppp); } break; } diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 1aff970be33..1dc628ffce2 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -506,7 +506,9 @@ static int pptp_getname(struct socket *sock, struct sockaddr *uaddr, int len = sizeof(struct sockaddr_pppox); struct sockaddr_pppox sp; - sp.sa_family = AF_PPPOX; + memset(&sp.sa_addr, 0, sizeof(sp.sa_addr)); + + sp.sa_family = AF_PPPOX; sp.sa_protocol = PX_PROTO_PPTP; sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 186ce541c65..9dd3746994a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -65,6 +65,7 @@ #include <linux/nsproxy.h> #include <linux/virtio_net.h> #include <linux/rcupdate.h> +#include <net/ipv6.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> @@ -174,7 +175,7 @@ struct tun_struct { struct net_device *dev; netdev_features_t set_features; #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ - NETIF_F_TSO6|NETIF_F_UFO) + NETIF_F_TSO6) int vnet_hdr_sz; int sndbuf; @@ -1139,6 +1140,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, break; } + skb_reset_network_header(skb); + if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { pr_debug("GSO!\n"); switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { @@ -1149,8 +1152,20 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; case VIRTIO_NET_HDR_GSO_UDP: + { + static bool warned; + + if (!warned) { + warned = true; + netdev_warn(tun->dev, + "%s: using disabled UFO feature; please fix this program\n", + current->comm); + } skb_shinfo(skb)->gso_type = SKB_GSO_UDP; + if (skb->protocol == htons(ETH_P_IPV6)) + ipv6_proxy_select_ident(skb); break; + } default: tun->dev->stats.rx_frame_errors++; kfree_skb(skb); @@ -1179,7 +1194,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; } - skb_reset_network_header(skb); skb_probe_transport_header(skb, 0); rxhash = skb_get_hash(skb); @@ -1221,12 +1235,20 @@ static ssize_t tun_put_user(struct tun_struct *tun, struct tun_pi pi = { 0, skb->protocol }; ssize_t total = 0; int vlan_offset = 0, copied; + int vlan_hlen = 0; + int vnet_hdr_sz = 0; + + if (vlan_tx_tag_present(skb)) + vlan_hlen = VLAN_HLEN; + + if (tun->flags & TUN_VNET_HDR) + vnet_hdr_sz = tun->vnet_hdr_sz; if (!(tun->flags & TUN_NO_PI)) { if ((len -= sizeof(pi)) < 0) return -EINVAL; - if (len < skb->len) { + if (len < skb->len + vlan_hlen + vnet_hdr_sz) { /* Packet will be striped */ pi.flags |= TUN_PKT_STRIP; } @@ -1236,9 +1258,9 @@ static ssize_t tun_put_user(struct tun_struct *tun, total += sizeof(pi); } - if (tun->flags & TUN_VNET_HDR) { + if (vnet_hdr_sz) { struct virtio_net_hdr gso = { 0 }; /* no info leak */ - if ((len -= tun->vnet_hdr_sz) < 0) + if ((len -= vnet_hdr_sz) < 0) return -EINVAL; if (skb_is_gso(skb)) { @@ -1251,8 +1273,6 @@ static ssize_t tun_put_user(struct tun_struct *tun, gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (sinfo->gso_type & SKB_GSO_TCPV6) gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; - else if (sinfo->gso_type & SKB_GSO_UDP) - gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; else { pr_err("unexpected GSO type: " "0x%x, gso_size %d, hdr_len %d\n", @@ -1272,7 +1292,8 @@ static ssize_t tun_put_user(struct tun_struct *tun, if (skb->ip_summed == CHECKSUM_PARTIAL) { gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - gso.csum_start = skb_checksum_start_offset(skb); + gso.csum_start = skb_checksum_start_offset(skb) + + vlan_hlen; gso.csum_offset = skb->csum_offset; } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; @@ -1281,14 +1302,13 @@ static ssize_t tun_put_user(struct tun_struct *tun, if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total, sizeof(gso)))) return -EFAULT; - total += tun->vnet_hdr_sz; + total += vnet_hdr_sz; } copied = total; - total += skb->len; - if (!vlan_tx_tag_present(skb)) { - len = min_t(int, skb->len, len); - } else { + len = min_t(int, skb->len + vlan_hlen, len); + total += skb->len + vlan_hlen; + if (vlan_hlen) { int copy, ret; struct { __be16 h_vlan_proto; @@ -1299,8 +1319,6 @@ static ssize_t tun_put_user(struct tun_struct *tun, veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); - len = min_t(int, skb->len + VLAN_HLEN, len); - total += VLAN_HLEN; copy = min_t(int, vlan_offset, len); ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); @@ -1762,11 +1780,6 @@ static int set_offload(struct tun_struct *tun, unsigned long arg) features |= NETIF_F_TSO6; arg &= ~(TUN_F_TSO4|TUN_F_TSO6); } - - if (arg & TUN_F_UFO) { - features |= NETIF_F_UFO; - arg &= ~TUN_F_UFO; - } } /* This gives the user a way to test for new features in future by diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 2c05f6cdb12..816d511e34d 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -465,19 +465,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) return ret; } - ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL); - if (ret < 0) - return ret; - - msleep(150); - - ret = asix_sw_reset(dev, AX_SWRESET_CLEAR); - if (ret < 0) - return ret; - - msleep(150); - - ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE); + ax88772_reset(dev); /* Read PHYID register *AFTER* the PHY was reset properly */ phyid = asix_get_phyid(dev); diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index be427572103..e6338c16081 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -937,6 +937,7 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p) { struct usbnet *dev = netdev_priv(net); struct sockaddr *addr = p; + int ret; if (netif_running(net)) return -EBUSY; @@ -946,8 +947,12 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p) memcpy(net->dev_addr, addr->sa_data, ETH_ALEN); /* Set the MAC address */ - return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, + ret = ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN, net->dev_addr); + if (ret < 0) + return ret; + + return 0; } static const struct net_device_ops ax88179_netdev_ops = { diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 2a32d9167d3..d3920b54a92 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -67,6 +67,35 @@ static const u8 mbm_guid[16] = { 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a, }; +static void usbnet_cdc_update_filter(struct usbnet *dev) +{ + struct cdc_state *info = (void *) &dev->data; + struct usb_interface *intf = info->control; + + u16 cdc_filter = + USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | + USB_CDC_PACKET_TYPE_BROADCAST; + + if (dev->net->flags & IFF_PROMISC) + cdc_filter |= USB_CDC_PACKET_TYPE_PROMISCUOUS; + + /* FIXME cdc-ether has some multicast code too, though it complains + * in routine cases. info->ether describes the multicast support. + * Implement that here, manipulating the cdc filter as needed. + */ + + usb_control_msg(dev->udev, + usb_sndctrlpipe(dev->udev, 0), + USB_CDC_SET_ETHERNET_PACKET_FILTER, + USB_TYPE_CLASS | USB_RECIP_INTERFACE, + cdc_filter, + intf->cur_altsetting->desc.bInterfaceNumber, + NULL, + 0, + USB_CTRL_SET_TIMEOUT + ); +} + /* probes control interface, claims data interface, collects the bulk * endpoints, activates data interface (if needed), maybe sets MTU. * all pure cdc, except for certain firmware workarounds, and knowing @@ -347,16 +376,8 @@ next_desc: * don't do reset all the way. So the packet filter should * be set to a sane initial value. */ - usb_control_msg(dev->udev, - usb_sndctrlpipe(dev->udev, 0), - USB_CDC_SET_ETHERNET_PACKET_FILTER, - USB_TYPE_CLASS | USB_RECIP_INTERFACE, - USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST, - intf->cur_altsetting->desc.bInterfaceNumber, - NULL, - 0, - USB_CTRL_SET_TIMEOUT - ); + usbnet_cdc_update_filter(dev); + return 0; bad_desc: @@ -468,10 +489,6 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf) return status; } - /* FIXME cdc-ether has some multicast code too, though it complains - * in routine cases. info->ether describes the multicast support. - * Implement that here, manipulating the cdc filter as needed. - */ return 0; } EXPORT_SYMBOL_GPL(usbnet_cdc_bind); @@ -482,6 +499,7 @@ static const struct driver_info cdc_info = { .bind = usbnet_cdc_bind, .unbind = usbnet_cdc_unbind, .status = usbnet_cdc_status, + .set_rx_mode = usbnet_cdc_update_filter, .manage_power = usbnet_manage_power, }; @@ -491,6 +509,7 @@ static const struct driver_info wwan_info = { .bind = usbnet_cdc_bind, .unbind = usbnet_cdc_unbind, .status = usbnet_cdc_status, + .set_rx_mode = usbnet_cdc_update_filter, .manage_power = usbnet_manage_power, }; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 22756db53dc..b8a82b86f90 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -780,6 +780,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index e3d84c322e4..c6554c7a814 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1162,6 +1162,9 @@ static void intr_callback(struct urb *urb) case -ESHUTDOWN: netif_device_detach(tp->netdev); case -ENOENT: + case -EPROTO: + netif_info(tp, intr, tp->netdev, + "Stop submitting intr, status %d\n", status); return; case -EOVERFLOW: netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n"); @@ -2891,6 +2894,9 @@ static int rtl8152_open(struct net_device *netdev) if (res) goto out; + /* set speed to 0 to avoid autoresume try to submit rx */ + tp->speed = 0; + res = usb_autopm_get_interface(tp->intf); if (res < 0) { free_all_mem(tp); @@ -2904,6 +2910,8 @@ static int rtl8152_open(struct net_device *netdev) clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); + + /* disable the tx/rx, if the workqueue has enabled them. */ if (tp->speed & LINK_STATUS) tp->rtl_ops.disable(tp); } @@ -2955,10 +2963,7 @@ static int rtl8152_close(struct net_device *netdev) * be disable when autoresume occurs, because the * netif_running() would be false. */ - if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { - rtl_runtime_suspend_enable(tp, false); - clear_bit(SELECTIVE_SUSPEND, &tp->flags); - } + rtl_runtime_suspend_enable(tp, false); tasklet_disable(&tp->tl); tp->rtl_ops.down(tp); @@ -3205,7 +3210,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) netif_device_detach(netdev); } - if (netif_running(netdev)) { + if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); tasklet_disable(&tp->tl); @@ -3253,6 +3258,8 @@ static int rtl8152_resume(struct usb_interface *intf) set_bit(WORK_ENABLE, &tp->flags); } usb_submit_urb(tp->intr_urb, GFP_KERNEL); + } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { + clear_bit(SELECTIVE_SUSPEND, &tp->flags); } mutex_unlock(&tp->control); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 20615bbd693..3a6770a65d7 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1052,6 +1052,21 @@ static void __handle_link_change(struct usbnet *dev) clear_bit(EVENT_LINK_CHANGE, &dev->flags); } +static void usbnet_set_rx_mode(struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + + usbnet_defer_kevent(dev, EVENT_SET_RX_MODE); +} + +static void __handle_set_rx_mode(struct usbnet *dev) +{ + if (dev->driver_info->set_rx_mode) + (dev->driver_info->set_rx_mode)(dev); + + clear_bit(EVENT_SET_RX_MODE, &dev->flags); +} + /* work that cannot be done in interrupt context uses keventd. * * NOTE: with 2.5 we could do more of this using completion callbacks, @@ -1157,6 +1172,10 @@ skip_reset: if (test_bit (EVENT_LINK_CHANGE, &dev->flags)) __handle_link_change(dev); + if (test_bit (EVENT_SET_RX_MODE, &dev->flags)) + __handle_set_rx_mode(dev); + + if (dev->flags) netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); } @@ -1525,6 +1544,7 @@ static const struct net_device_ops usbnet_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_set_rx_mode = usbnet_set_rx_mode, .ndo_change_mtu = usbnet_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d75256bd1a6..b0bc8ead47d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -491,8 +491,17 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; break; case VIRTIO_NET_HDR_GSO_UDP: + { + static bool warned; + + if (!warned) { + warned = true; + netdev_warn(dev, + "host using disabled UFO feature; please fix it\n"); + } skb_shinfo(skb)->gso_type = SKB_GSO_UDP; break; + } case VIRTIO_NET_HDR_GSO_TCPV6: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; @@ -881,8 +890,6 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; - else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) - hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) @@ -1666,6 +1673,40 @@ static const struct attribute_group virtio_net_mrg_rx_group = { }; #endif +static bool virtnet_fail_on_feature(struct virtio_device *vdev, + unsigned int fbit, + const char *fname, const char *dname) +{ + if (!virtio_has_feature(vdev, fbit)) + return false; + + dev_err(&vdev->dev, "device advertises feature %s but not %s", + fname, dname); + + return true; +} + +#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ + virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) + +static bool virtnet_validate_features(struct virtio_device *vdev) +{ + if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && + (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, + "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, + "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, + "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, + "VIRTIO_NET_F_CTRL_VQ"))) { + return false; + } + + return true; +} + static int virtnet_probe(struct virtio_device *vdev) { int i, err; @@ -1673,6 +1714,9 @@ static int virtnet_probe(struct virtio_device *vdev) struct virtnet_info *vi; u16 max_queue_pairs; + if (!virtnet_validate_features(vdev)) + return -EINVAL; + /* Find if host supports multiqueue virtio_net device */ err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, struct virtio_net_config, @@ -1705,7 +1749,7 @@ static int virtnet_probe(struct virtio_device *vdev) dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { - dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO + dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ @@ -1715,11 +1759,9 @@ static int virtnet_probe(struct virtio_device *vdev) dev->hw_features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->hw_features |= NETIF_F_TSO_ECN; - if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) - dev->hw_features |= NETIF_F_UFO; if (gso) - dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); + dev->features |= dev->hw_features & NETIF_F_ALL_TSO; /* (!csum && gso) case will be fixed by register_netdev() */ } if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) @@ -1757,8 +1799,7 @@ static int virtnet_probe(struct virtio_device *vdev) /* If we can receive ANY GSO packets, we must allocate large ones. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || - virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || - virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) vi->big_packets = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) @@ -1952,9 +1993,9 @@ static struct virtio_device_id id_table[] = { static unsigned int features[] = { VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, - VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, + VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6, VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, - VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, + VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index ca309820d39..be4649a49c5 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -67,12 +67,6 @@ #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ -/* VXLAN protocol header */ -struct vxlanhdr { - __be32 vx_flags; - __be32 vx_vni; -}; - /* UDP port for VXLAN traffic. * The IANA assigned port is 4789, but the Linux default is 8472 * for compatibility with early adopters. @@ -275,13 +269,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); } -/* Find VXLAN socket based on network namespace and UDP port */ -static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port) +/* Find VXLAN socket based on network namespace, address family and UDP port */ +static struct vxlan_sock *vxlan_find_sock(struct net *net, + sa_family_t family, __be16 port) { struct vxlan_sock *vs; hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { - if (inet_sk(vs->sock->sk)->inet_sport == port) + if (inet_sk(vs->sock->sk)->inet_sport == port && + inet_sk(vs->sock->sk)->sk.sk_family == family) return vs; } return NULL; @@ -300,11 +296,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id) } /* Look up VNI in a per net namespace table */ -static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) +static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, + sa_family_t family, __be16 port) { struct vxlan_sock *vs; - vs = vxlan_find_sock(net, port); + vs = vxlan_find_sock(net, family, port); if (!vs) return NULL; @@ -621,6 +618,8 @@ static int vxlan_gro_complete(struct sk_buff *skb, int nhoff) int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr); int err = -ENOSYS; + udp_tunnel_gro_complete(skb, nhoff); + eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr)); type = eh->h_proto; @@ -1771,7 +1770,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_dev *dst_vxlan; ip_rt_put(rt); - dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port); + dst_vxlan = vxlan_find_vni(vxlan->net, vni, + dst->sa.sa_family, dst_port); if (!dst_vxlan) goto tx_error; vxlan_encap_bypass(skb, vxlan, dst_vxlan); @@ -1825,7 +1825,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_dev *dst_vxlan; dst_release(ndst); - dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port); + dst_vxlan = vxlan_find_vni(vxlan->net, vni, + dst->sa.sa_family, dst_port); if (!dst_vxlan) goto tx_error; vxlan_encap_bypass(skb, vxlan, dst_vxlan); @@ -1985,13 +1986,15 @@ static int vxlan_init(struct net_device *dev) struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); struct vxlan_sock *vs; + bool ipv6 = vxlan->flags & VXLAN_F_IPV6; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; spin_lock(&vn->sock_lock); - vs = vxlan_find_sock(vxlan->net, vxlan->dst_port); + vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, + vxlan->dst_port); if (vs) { /* If we have a socket with same port already, reuse it */ atomic_inc(&vs->refcnt); @@ -2303,9 +2306,9 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6, if (ipv6) { udp_conf.family = AF_INET6; udp_conf.use_udp6_tx_checksums = - !!(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); + !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); udp_conf.use_udp6_rx_checksums = - !!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); + !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); } else { udp_conf.family = AF_INET; udp_conf.local_ip.s_addr = INADDR_ANY; @@ -2382,6 +2385,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; + bool ipv6 = flags & VXLAN_F_IPV6; vs = vxlan_socket_create(net, port, rcv, data, flags); if (!IS_ERR(vs)) @@ -2391,7 +2395,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, return vs; spin_lock(&vn->sock_lock); - vs = vxlan_find_sock(net, port); + vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port); if (vs) { if (vs->rcv == rcv) atomic_inc(&vs->refcnt); @@ -2550,7 +2554,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; - if (vxlan_find_vni(net, vni, vxlan->dst_port)) { + if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET, + vxlan->dst_port)) { pr_info("duplicate VNI %u\n", vni); return -EEXIST; } diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h index e5ba6faf328..86907e5ba6c 100644 --- a/drivers/net/wireless/ath/ath.h +++ b/drivers/net/wireless/ath/ath.h @@ -80,6 +80,7 @@ struct reg_dmn_pair_mapping { struct ath_regulatory { char alpha2[2]; + enum nl80211_dfs_regions region; u16 country_code; u16 max_power_level; u16 current_rd; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 697c4ae90af..1e8ea5e4d4c 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -664,6 +664,19 @@ static void ar9003_hw_override_ini(struct ath_hw *ah) ah->enabled_cals |= TX_CL_CAL; else ah->enabled_cals &= ~TX_CL_CAL; + + if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) { + if (ah->is_clk_25mhz) { + REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); + REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); + REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); + } else { + REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); + REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); + REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); + } + udelay(100); + } } static void ar9003_hw_prog_ini(struct ath_hw *ah, diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c index c6dd7f1fed6..33b0c7aef2e 100644 --- a/drivers/net/wireless/ath/ath9k/common.c +++ b/drivers/net/wireless/ath/ath9k/common.c @@ -368,11 +368,11 @@ void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow, { struct ath_regulatory *reg = ath9k_hw_regulatory(ah); - if (reg->power_limit != new_txpow) { + if (reg->power_limit != new_txpow) ath9k_hw_set_txpowerlimit(ah, new_txpow, false); - /* read back in case value is clamped */ - *txpower = reg->max_power_level; - } + + /* read back in case value is clamped */ + *txpower = reg->max_power_level; } EXPORT_SYMBOL(ath9k_cmn_update_txpow); diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 46f20a309b5..5c45e787814 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -455,7 +455,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf, "%2d %2x %1x %2x %2x\n", i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset, (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3), - val[2] & (0x7 << (i * 3)) >> (i * 3), + (val[2] & (0x7 << (i * 3))) >> (i * 3), (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset); } diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 8be4b145339..2ad605760e2 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -861,19 +861,6 @@ static void ath9k_hw_init_pll(struct ath_hw *ah, udelay(RTC_PLL_SETTLE_DELAY); REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); - - if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) { - if (ah->is_clk_25mhz) { - REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); - REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); - REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); - } else { - REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); - REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); - REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); - } - udelay(100); - } } static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 156a944134d..3bd03049498 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -734,6 +734,32 @@ static const struct ieee80211_iface_combination if_comb[] = { #endif }; +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT +static void ath9k_set_mcc_capab(struct ath_softc *sc, struct ieee80211_hw *hw) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + + if (!ath9k_is_chanctx_enabled()) + return; + + hw->flags |= IEEE80211_HW_QUEUE_CONTROL; + hw->queues = ATH9K_NUM_TX_QUEUES; + hw->offchannel_tx_hw_queue = hw->queues - 1; + hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS); + hw->wiphy->iface_combinations = if_comb_multi; + hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi); + hw->wiphy->max_scan_ssids = 255; + hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; + hw->wiphy->max_remain_on_channel_duration = 10000; + hw->chanctx_data_size = sizeof(void *); + hw->extra_beacon_tailroom = + sizeof(struct ieee80211_p2p_noa_attr) + 9; + + ath_dbg(common, CHAN_CTX, "Use channel contexts\n"); +} +#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ + static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) { struct ath_hw *ah = sc->sc_ah; @@ -746,7 +772,6 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) IEEE80211_HW_SPECTRUM_MGMT | IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_RC_TABLE | - IEEE80211_HW_QUEUE_CONTROL | IEEE80211_HW_SUPPORTS_HT_CCK_RATES; if (ath9k_ps_enable) @@ -781,24 +806,6 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); } -#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT - - if (ath9k_is_chanctx_enabled()) { - hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS); - hw->wiphy->iface_combinations = if_comb_multi; - hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi); - hw->wiphy->max_scan_ssids = 255; - hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; - hw->wiphy->max_remain_on_channel_duration = 10000; - hw->chanctx_data_size = sizeof(void *); - hw->extra_beacon_tailroom = - sizeof(struct ieee80211_p2p_noa_attr) + 9; - - ath_dbg(common, CHAN_CTX, "Use channel contexts\n"); - } - -#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ - hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; @@ -808,12 +815,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; - /* allow 4 queues per channel context + - * 1 cab queue + 1 offchannel tx queue - */ - hw->queues = ATH9K_NUM_TX_QUEUES; - /* last queue for offchannel */ - hw->offchannel_tx_hw_queue = hw->queues - 1; + hw->queues = 4; hw->max_rates = 4; hw->max_listen_interval = 10; hw->max_rate_tries = 10; @@ -837,6 +839,9 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &common->sbands[IEEE80211_BAND_5GHZ]; +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT + ath9k_set_mcc_capab(sc, hw); +#endif ath9k_init_wow(hw); ath9k_cmn_reload_chainmask(ah); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 6f6a974f7fd..4f18a6be0c7 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -974,9 +974,8 @@ void ath9k_calculate_iter_data(struct ath_softc *sc, struct ath_vif *avp; /* - * Pick the MAC address of the first interface as the new hardware - * MAC address. The hardware will use it together with the BSSID mask - * when matching addresses. + * The hardware will use primary station addr together with the + * BSSID mask when matching addresses. */ memset(iter_data, 0, sizeof(*iter_data)); memset(&iter_data->mask, 0xff, ETH_ALEN); @@ -1162,6 +1161,9 @@ static void ath9k_assign_hw_queues(struct ieee80211_hw *hw, { int i; + if (!ath9k_is_chanctx_enabled()) + return; + for (i = 0; i < IEEE80211_NUM_ACS; i++) vif->hw_queue[i] = i; @@ -1202,6 +1204,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, list_add_tail(&avp->list, &avp->chanctx->vifs); } + ath9k_calculate_summary_state(sc, avp->chanctx); + ath9k_assign_hw_queues(hw, vif); an->sc = sc; @@ -1271,6 +1275,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, ath_tx_node_cleanup(sc, &avp->mcast_node); + ath9k_calculate_summary_state(sc, avp->chanctx); + mutex_unlock(&sc->mutex); } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 493a183d0aa..d6e54a3c88f 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -169,7 +169,10 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq, if (txq->stopped && txq->pending_frames < sc->tx.txq_max_pending[q]) { - ieee80211_wake_queue(sc->hw, info->hw_queue); + if (ath9k_is_chanctx_enabled()) + ieee80211_wake_queue(sc->hw, info->hw_queue); + else + ieee80211_wake_queue(sc->hw, q); txq->stopped = false; } } @@ -2247,7 +2250,10 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, fi->txq = q; if (++txq->pending_frames > sc->tx.txq_max_pending[q] && !txq->stopped) { - ieee80211_stop_queue(sc->hw, info->hw_queue); + if (ath9k_is_chanctx_enabled()) + ieee80211_stop_queue(sc->hw, info->hw_queue); + else + ieee80211_stop_queue(sc->hw, q); txq->stopped = true; } } diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index 415393dfb6f..06ea6cc9e30 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -515,6 +515,7 @@ void ath_reg_notifier_apply(struct wiphy *wiphy, if (!request) return; + reg->region = request->dfs_region; switch (request->initiator) { case NL80211_REGDOM_SET_BY_CORE: /* @@ -779,6 +780,19 @@ u32 ath_regd_get_band_ctl(struct ath_regulatory *reg, return SD_NO_CTL; } + if (ath_regd_get_eepromRD(reg) == CTRY_DEFAULT) { + switch (reg->region) { + case NL80211_DFS_FCC: + return CTL_FCC; + case NL80211_DFS_ETSI: + return CTL_ETSI; + case NL80211_DFS_JP: + return CTL_MKK; + default: + break; + } + } + switch (band) { case IEEE80211_BAND_2GHZ: return reg->regpair->reg_2ghz_ctl; diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c index 1dfc682a805..ee27b06074e 100644 --- a/drivers/net/wireless/b43/phy_common.c +++ b/drivers/net/wireless/b43/phy_common.c @@ -300,9 +300,7 @@ void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value) void b43_phy_copy(struct b43_wldev *dev, u16 destreg, u16 srcreg) { - assert_mac_suspended(dev); - dev->phy.ops->phy_write(dev, destreg, - dev->phy.ops->phy_read(dev, srcreg)); + b43_phy_write(dev, destreg, b43_phy_read(dev, srcreg)); } void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index f55f625fd06..d20d4e6f391 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -670,7 +670,6 @@ static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci, struct brcmf_sdio_dev *sdiodev) { int i; - uint fw_len, nv_len; char end; for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) { @@ -684,25 +683,25 @@ static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci, return -ENODEV; } - fw_len = sizeof(sdiodev->fw_name) - 1; - nv_len = sizeof(sdiodev->nvram_name) - 1; /* check if firmware path is provided by module parameter */ if (brcmf_firmware_path[0] != '\0') { - strncpy(sdiodev->fw_name, brcmf_firmware_path, fw_len); - strncpy(sdiodev->nvram_name, brcmf_firmware_path, nv_len); - fw_len -= strlen(sdiodev->fw_name); - nv_len -= strlen(sdiodev->nvram_name); + strlcpy(sdiodev->fw_name, brcmf_firmware_path, + sizeof(sdiodev->fw_name)); + strlcpy(sdiodev->nvram_name, brcmf_firmware_path, + sizeof(sdiodev->nvram_name)); end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1]; if (end != '/') { - strncat(sdiodev->fw_name, "/", fw_len); - strncat(sdiodev->nvram_name, "/", nv_len); - fw_len--; - nv_len--; + strlcat(sdiodev->fw_name, "/", + sizeof(sdiodev->fw_name)); + strlcat(sdiodev->nvram_name, "/", + sizeof(sdiodev->nvram_name)); } } - strncat(sdiodev->fw_name, brcmf_fwname_data[i].bin, fw_len); - strncat(sdiodev->nvram_name, brcmf_fwname_data[i].nv, nv_len); + strlcat(sdiodev->fw_name, brcmf_fwname_data[i].bin, + sizeof(sdiodev->fw_name)); + strlcat(sdiodev->nvram_name, brcmf_fwname_data[i].nv, + sizeof(sdiodev->nvram_name)); return 0; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/brcm80211/brcmfmac/of.c index f05f5270fec..927bffd5be6 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/of.c @@ -40,8 +40,8 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev) return; irq = irq_of_parse_and_map(np, 0); - if (irq < 0) { - brcmf_err("interrupt could not be mapped: err=%d\n", irq); + if (!irq) { + brcmf_err("interrupt could not be mapped\n"); devm_kfree(dev, sdiodev->pdata); return; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c index 8c0632ec9f7..16fef338201 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c @@ -19,10 +19,10 @@ #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/delay.h> -#include <linux/unaligned/access_ok.h> #include <linux/interrupt.h> #include <linux/bcma/bcma.h> #include <linux/sched.h> +#include <asm/unaligned.h> #include <soc.h> #include <chipcommon.h> diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c index dc135915470..875d1142c8b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c @@ -669,10 +669,12 @@ static int brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd, goto finalize; } - if (!brcmf_usb_ioctl_resp_wait(devinfo)) + if (!brcmf_usb_ioctl_resp_wait(devinfo)) { + usb_kill_urb(devinfo->ctl_urb); ret = -ETIMEDOUT; - else + } else { memcpy(buffer, tmpbuf, buflen); + } finalize: kfree(tmpbuf); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 28fa25b509d..39b45c038a9 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -299,6 +299,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, primary_offset = ch->center_freq1 - ch->chan->center_freq; switch (ch->width) { case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_20_NOHT: ch_inf.bw = BRCMU_CHAN_BW_20; WARN_ON(primary_offset != 0); break; @@ -323,6 +324,10 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, ch_inf.sb = BRCMU_CHAN_SB_LU; } break; + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: default: WARN_ON_ONCE(1); } @@ -333,6 +338,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, case IEEE80211_BAND_5GHZ: ch_inf.band = BRCMU_CHAN_BAND_5G; break; + case IEEE80211_BAND_60GHZ: default: WARN_ON_ONCE(1); } diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index 2364a3c09b9..cae692ff101 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -1095,6 +1095,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + u32 scd_queues; mutex_lock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "enter\n"); @@ -1108,18 +1109,19 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, goto done; } - /* - * mac80211 will not push any more frames for transmit - * until the flush is completed - */ - if (drop) { - IWL_DEBUG_MAC80211(priv, "send flush command\n"); - if (iwlagn_txfifo_flush(priv, 0)) { - IWL_ERR(priv, "flush request fail\n"); - goto done; - } + scd_queues = BIT(priv->cfg->base_params->num_of_queues) - 1; + scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) | + BIT(IWL_DEFAULT_CMD_QUEUE_NUM)); + + if (vif) + scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]); + + IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues); + if (iwlagn_txfifo_flush(priv, scd_queues)) { + IWL_ERR(priv, "flush request fail\n"); + goto done; } - IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); + IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n"); iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff); done: mutex_unlock(&priv->mutex); diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index e4351487ca7..d2b7234b1c7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -82,7 +82,8 @@ #define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL8000_FW_PRE "iwlwifi-8000" -#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode" +#define IWL8000_MODULE_FIRMWARE(api) \ + IWL8000_FW_PRE "-" __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_8000 10 #define DEFAULT_NVM_FILE_FAMILY_8000 "iwl_nvm_8000.bin" diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index 4f6e66892ac..b894a84e839 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -155,6 +155,7 @@ enum iwl_ucode_tlv_api { * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), * which also implies support for the scheduler configuration command + * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command */ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), @@ -163,6 +164,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10), IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11), IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12), + IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18), }; /* The default calibrate table size if not specified by firmware file */ diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 9eb85249e89..d8fc548c0d6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -563,6 +563,7 @@ enum iwl_trans_state { * Set during transport allocation. * @hw_id_str: a string with info about HW ID. Set during transport allocation. * @pm_support: set to true in start_hw if link pm is supported + * @ltr_enabled: set to true if the LTR is enabled * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. * The user should use iwl_trans_{alloc,free}_tx_cmd. * @dev_cmd_headroom: room needed for the transport's private use before the @@ -589,6 +590,7 @@ struct iwl_trans { u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; bool pm_support; + bool ltr_enabled; /* The following fields are internal only */ struct kmem_cache *dev_cmd_pool; diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index 8df2021f985..da2ffb78519 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -303,8 +303,8 @@ static const __le64 iwl_ci_mask[][3] = { }; static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = { - cpu_to_le32(0x28412201), - cpu_to_le32(0x11118451), + cpu_to_le32(0x2e402280), + cpu_to_le32(0x7711a751), }; struct corunning_block_luts { diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c index 585c0ab4a3e..8a1d2f33d5b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c @@ -291,8 +291,8 @@ static const __le64 iwl_ci_mask[][3] = { }; static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = { - cpu_to_le32(0x28412201), - cpu_to_le32(0x11118451), + cpu_to_le32(0x2e402280), + cpu_to_le32(0x7711a751), }; struct corunning_block_luts { diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h index 27dd86395b3..2fd8ad4633e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h @@ -68,13 +68,46 @@ /* Power Management Commands, Responses, Notifications */ +/** + * enum iwl_ltr_config_flags - masks for LTR config command flags + * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status + * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow + * memory access + * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR + * reg change + * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from + * D0 to D3 + * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register + * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register + * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD + */ +enum iwl_ltr_config_flags { + LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0), + LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1), + LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2), + LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3), + LTR_CFG_FLAG_SW_SET_SHORT = BIT(4), + LTR_CFG_FLAG_SW_SET_LONG = BIT(5), + LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6), +}; + +/** + * struct iwl_ltr_config_cmd - configures the LTR + * @flags: See %enum iwl_ltr_config_flags + */ +struct iwl_ltr_config_cmd { + __le32 flags; + __le32 static_long; + __le32 static_short; +} __packed; + /* Radio LP RX Energy Threshold measured in dBm */ #define POWER_LPRX_RSSI_THRESHOLD 75 #define POWER_LPRX_RSSI_THRESHOLD_MAX 94 #define POWER_LPRX_RSSI_THRESHOLD_MIN 30 /** - * enum iwl_scan_flags - masks for power table command flags + * enum iwl_power_flags - masks for power table command flags * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off * receiver and transmitter. '0' - does not allow. * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management, diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 667a92274c8..c62575d86bc 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -157,6 +157,7 @@ enum { /* Power - legacy power table command */ POWER_TABLE_CMD = 0x77, PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78, + LTR_CONFIG = 0xee, /* Thermal Throttling*/ REPLY_THERMAL_MNG_BACKOFF = 0x7e, diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index 23fd711a67e..eb03943f846 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -284,7 +284,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) lockdep_assert_held(&mvm->mutex); - if (WARN_ON_ONCE(mvm->init_ucode_complete)) + if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating)) return 0; iwl_init_notification_wait(&mvm->notif_wait, @@ -334,6 +334,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) goto out; } + mvm->calibrating = true; + /* Send TX valid antennas before triggering calibrations */ ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant); if (ret) @@ -358,11 +360,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) MVM_UCODE_CALIB_TIMEOUT); if (!ret) mvm->init_ucode_complete = true; + + if (ret && iwl_mvm_is_radio_killed(mvm)) { + IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); + ret = 1; + } goto out; error: iwl_remove_notification(&mvm->notif_wait, &calib_wait); out: + mvm->calibrating = false; if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { /* we want to debug INIT and we have no NVM - fake */ mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + @@ -480,6 +488,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm) /* Initialize tx backoffs to the minimal possible */ iwl_mvm_tt_tx_backoff(mvm, 0); + if (mvm->trans->ltr_enabled) { + struct iwl_ltr_config_cmd cmd = { + .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE), + }; + + WARN_ON(iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, + sizeof(cmd), &cmd)); + } + ret = iwl_mvm_power_update_device(mvm); if (ret) goto error; diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index c7a73c68bda..b6d2683da3a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -526,7 +526,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, } if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && - !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) + !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && + !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) goto drop; /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */ @@ -787,6 +788,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) mvm->scan_status = IWL_MVM_SCAN_NONE; mvm->ps_disabled = false; + mvm->calibrating = false; /* just in case one was running */ ieee80211_remain_on_channel_expired(mvm->hw); @@ -1734,6 +1736,13 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, if (changes & BSS_CHANGED_BEACON && iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) IWL_WARN(mvm, "Failed updating beacon data\n"); + + if (changes & BSS_CHANGED_TXPOWER) { + IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", + bss_conf->txpower); + iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); + } + } static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, @@ -2367,14 +2376,19 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, /* Set the node address */ memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN); + lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvm->time_event_lock); + + if (WARN_ON(te_data->id == HOT_SPOT_CMD)) { + spin_unlock_bh(&mvm->time_event_lock); + return -EIO; + } + te_data->vif = vif; te_data->duration = duration; te_data->id = HOT_SPOT_CMD; - lockdep_assert_held(&mvm->mutex); - - spin_lock_bh(&mvm->time_event_lock); - list_add_tail(&te_data->list, &mvm->time_event_list); spin_unlock_bh(&mvm->time_event_lock); /* @@ -2430,22 +2444,29 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw, IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, duration, type); + mutex_lock(&mvm->mutex); + switch (vif->type) { case NL80211_IFTYPE_STATION: - /* Use aux roc framework (HS20) */ - ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, - vif, duration); - return ret; + if (mvm->fw->ucode_capa.capa[0] & + IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) { + /* Use aux roc framework (HS20) */ + ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, + vif, duration); + goto out_unlock; + } + IWL_ERR(mvm, "hotspot not supported\n"); + ret = -EINVAL; + goto out_unlock; case NL80211_IFTYPE_P2P_DEVICE: /* handle below */ break; default: IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); - return -EINVAL; + ret = -EINVAL; + goto out_unlock; } - mutex_lock(&mvm->mutex); - for (i = 0; i < NUM_PHY_CTX; i++) { phy_ctxt = &mvm->phy_ctxts[i]; if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index b153ced7015..845429c88cf 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -548,6 +548,7 @@ struct iwl_mvm { enum iwl_ucode_type cur_ucode; bool ucode_loaded; bool init_ucode_complete; + bool calibrating; u32 error_event_table; u32 log_event_table; u32 umac_error_event_table; diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 15aa298ee79..5b719ee8e78 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -336,6 +336,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(DTS_MEASUREMENT_NOTIFICATION), CMD(REPLY_THERMAL_MNG_BACKOFF), CMD(MAC_PM_POWER_TABLE), + CMD(LTR_CONFIG), CMD(BT_COEX_CI), CMD(BT_COEX_UPDATE_SW_BOOST), CMD(BT_COEX_UPDATE_CORUN_LUT), @@ -423,6 +424,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, } mvm->sf_state = SF_UNINIT; mvm->low_latency_agg_frame_limit = 6; + mvm->cur_ucode = IWL_UCODE_INIT; mutex_init(&mvm->mutex); mutex_init(&mvm->d0i3_suspend_mutex); @@ -751,6 +753,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + bool calibrating = ACCESS_ONCE(mvm->calibrating); if (state) set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); @@ -759,7 +762,15 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); - return state && mvm->cur_ucode != IWL_UCODE_INIT; + /* iwl_run_init_mvm_ucode is waiting for results, abort it */ + if (calibrating) + iwl_abort_notification_waits(&mvm->notif_wait); + + /* + * Stop the device if we run OPERATIONAL firmware or if we are in the + * middle of the calibrations. + */ + return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating); } static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index cb85e63c20a..7554f705383 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -459,7 +459,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm, basic_ssid ? 1 : 0); cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | - TX_CMD_FLG_BT_DIS); + 3 << TX_CMD_FLG_BT_PRIO_POS); + cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id; cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); cmd->tx_cmd.rate_n_flags = @@ -601,16 +602,6 @@ static int iwl_mvm_cancel_regular_scan(struct iwl_mvm *mvm) SCAN_COMPLETE_NOTIFICATION }; int ret; - if (mvm->scan_status == IWL_MVM_SCAN_NONE) - return 0; - - if (iwl_mvm_is_radio_killed(mvm)) { - ieee80211_scan_completed(mvm->hw, true); - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - mvm->scan_status = IWL_MVM_SCAN_NONE; - return 0; - } - iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort, scan_abort_notif, ARRAY_SIZE(scan_abort_notif), @@ -1399,6 +1390,16 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, int iwl_mvm_cancel_scan(struct iwl_mvm *mvm) { + if (mvm->scan_status == IWL_MVM_SCAN_NONE) + return 0; + + if (iwl_mvm_is_radio_killed(mvm)) { + ieee80211_scan_completed(mvm->hw, true); + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + mvm->scan_status = IWL_MVM_SCAN_NONE; + return 0; + } + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) return iwl_mvm_scan_offload_stop(mvm, true); return iwl_mvm_cancel_regular_scan(mvm); diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index b7f9e61d14e..6dfad230be5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -305,8 +305,8 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm, te_data->running = false; te_data->vif = NULL; te_data->uid = 0; + te_data->id = TE_MAX; } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) { - set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); te_data->running = true; ieee80211_ready_on_channel(mvm->hw); /* Start TE */ diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 1cb793a498a..c6a517c771d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -175,14 +175,10 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, /* * for data packets, rate info comes from the table inside the fw. This - * table is controlled by LINK_QUALITY commands. Exclude ctrl port - * frames like EAPOLs which should be treated as mgmt frames. This - * avoids them being sent initially in high rates which increases the - * chances for completion of the 4-Way handshake. + * table is controlled by LINK_QUALITY commands */ - if (ieee80211_is_data(fc) && sta && - !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) { + if (ieee80211_is_data(fc) && sta) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); return; diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 1393bac0025..dd2f3f8baa9 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -174,6 +174,7 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u16 lctl; + u16 cap; /* * HW bug W/A for instability in PCIe bus L0S->L1 transition. @@ -184,16 +185,17 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans) * power savings, even without L1. */ pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); - if (lctl & PCI_EXP_LNKCTL_ASPM_L1) { - /* L1-ASPM enabled; disable(!) L0S */ + if (lctl & PCI_EXP_LNKCTL_ASPM_L1) iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); - dev_info(trans->dev, "L1 Enabled; Disabling L0S\n"); - } else { - /* L1-ASPM disabled; enable(!) L0S */ + else iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); - dev_info(trans->dev, "L1 Disabled; Enabling L0S\n"); - } trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); + + pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); + trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; + dev_info(trans->dev, "L1 %sabled - LTR %sabled\n", + (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", + trans->ltr_enabled ? "En" : "Dis"); } /* @@ -428,7 +430,7 @@ static int iwl_pcie_apm_stop_master(struct iwl_trans *trans) ret = iwl_poll_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); - if (ret) + if (ret < 0) IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); IWL_DEBUG_INFO(trans, "stop master\n"); @@ -544,7 +546,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) msleep(25); } - IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter); + IWL_ERR(trans, "Couldn't prepare the card\n"); return ret; } @@ -913,7 +915,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) * restart. So don't process again if the device is * already dead. */ - if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) { + if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { + IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); iwl_pcie_tx_stop(trans); iwl_pcie_rx_stop(trans); @@ -943,7 +946,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) /* clear all status bits */ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); clear_bit(STATUS_INT_ENABLED, &trans->status); - clear_bit(STATUS_DEVICE_ENABLED, &trans->status); clear_bit(STATUS_TPOWER_PMI, &trans->status); clear_bit(STATUS_RFKILL, &trans->status); @@ -1043,7 +1045,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); - if (ret) { + if (ret < 0) { IWL_ERR(trans, "Failed to resume the device (mac ready)\n"); return ret; } @@ -1892,8 +1894,7 @@ static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans, int reg; __le32 *val; - prph_len += sizeof(*data) + sizeof(*prph) + - num_bytes_in_chunk; + prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); (*data)->len = cpu_to_le32(sizeof(*prph) + diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index babbdc1ce74..c9ad4cf1adf 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1987,7 +1987,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, if (err != 0) { printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n", err); - goto failed_hw; + goto failed_bind; } skb_queue_head_init(&data->pending); @@ -2183,6 +2183,8 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, return idx; failed_hw: + device_release_driver(data->dev); +failed_bind: device_unregister(data->dev); failed_drvdata: ieee80211_free_hw(hw); diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c index 40057079ffb..5ef5a0eeba5 100644 --- a/drivers/net/wireless/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c @@ -196,6 +196,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win); del_timer_sync(&tbl->timer_context.timer); + tbl->timer_context.timer_is_set = false; spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); list_del(&tbl->list); @@ -297,6 +298,7 @@ mwifiex_flush_data(unsigned long context) (struct reorder_tmr_cnxt *) context; int start_win, seq_num; + ctx->timer_is_set = false; seq_num = mwifiex_11n_find_last_seq_num(ctx); if (seq_num < 0) @@ -385,6 +387,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, new_node->timer_context.ptr = new_node; new_node->timer_context.priv = priv; + new_node->timer_context.timer_is_set = false; init_timer(&new_node->timer_context.timer); new_node->timer_context.timer.function = mwifiex_flush_data; @@ -399,6 +402,22 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); } +static void +mwifiex_11n_rxreorder_timer_restart(struct mwifiex_rx_reorder_tbl *tbl) +{ + u32 min_flush_time; + + if (tbl->win_size >= MWIFIEX_BA_WIN_SIZE_32) + min_flush_time = MIN_FLUSH_TIMER_15_MS; + else + min_flush_time = MIN_FLUSH_TIMER_MS; + + mod_timer(&tbl->timer_context.timer, + jiffies + msecs_to_jiffies(min_flush_time * tbl->win_size)); + + tbl->timer_context.timer_is_set = true; +} + /* * This function prepares command for adding a BA request. * @@ -523,31 +542,31 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, u8 *ta, u8 pkt_type, void *payload) { struct mwifiex_rx_reorder_tbl *tbl; - int start_win, end_win, win_size; + int prev_start_win, start_win, end_win, win_size; u16 pkt_index; bool init_window_shift = false; + int ret = 0; tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); if (!tbl) { if (pkt_type != PKT_TYPE_BAR) mwifiex_11n_dispatch_pkt(priv, payload); - return 0; + return ret; } if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) { mwifiex_11n_dispatch_pkt(priv, payload); - return 0; + return ret; } start_win = tbl->start_win; + prev_start_win = start_win; win_size = tbl->win_size; end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1); if (tbl->flags & RXREOR_INIT_WINDOW_SHIFT) { init_window_shift = true; tbl->flags &= ~RXREOR_INIT_WINDOW_SHIFT; } - mod_timer(&tbl->timer_context.timer, - jiffies + msecs_to_jiffies(MIN_FLUSH_TIMER_MS * win_size)); if (tbl->flags & RXREOR_FORCE_NO_DROP) { dev_dbg(priv->adapter->dev, @@ -568,11 +587,14 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) { if (seq_num >= ((start_win + TWOPOW11) & (MAX_TID_VALUE - 1)) && - seq_num < start_win) - return -1; + seq_num < start_win) { + ret = -1; + goto done; + } } else if ((seq_num < start_win) || - (seq_num > (start_win + TWOPOW11))) { - return -1; + (seq_num >= (start_win + TWOPOW11))) { + ret = -1; + goto done; } } @@ -601,8 +623,10 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, else pkt_index = (seq_num+MAX_TID_VALUE) - start_win; - if (tbl->rx_reorder_ptr[pkt_index]) - return -1; + if (tbl->rx_reorder_ptr[pkt_index]) { + ret = -1; + goto done; + } tbl->rx_reorder_ptr[pkt_index] = payload; } @@ -613,7 +637,11 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, */ mwifiex_11n_scan_and_dispatch(priv, tbl); - return 0; +done: + if (!tbl->timer_context.timer_is_set || + prev_start_win != tbl->start_win) + mwifiex_11n_rxreorder_timer_restart(tbl); + return ret; } /* diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h index 3a87bb0e3a6..63ecea89b4a 100644 --- a/drivers/net/wireless/mwifiex/11n_rxreorder.h +++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h @@ -21,6 +21,8 @@ #define _MWIFIEX_11N_RXREORDER_H_ #define MIN_FLUSH_TIMER_MS 50 +#define MIN_FLUSH_TIMER_15_MS 15 +#define MWIFIEX_BA_WIN_SIZE_32 32 #define PKT_TYPE_BAR 0xE7 #define MAX_TID_VALUE (2 << 11) diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index e2635747d96..f55658d15c6 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -592,6 +592,7 @@ struct reorder_tmr_cnxt { struct timer_list timer; struct mwifiex_rx_reorder_tbl *ptr; struct mwifiex_private *priv; + u8 timer_is_set; }; struct mwifiex_rx_reorder_tbl { diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 573897b8e87..8444313eabe 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -1111,6 +1111,7 @@ static struct usb_device_id rt2800usb_device_table[] = { /* Ovislink */ { USB_DEVICE(0x1b75, 0x3071) }, { USB_DEVICE(0x1b75, 0x3072) }, + { USB_DEVICE(0x1b75, 0xa200) }, /* Para */ { USB_DEVICE(0x20b8, 0x8888) }, /* Pegatron */ diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 8e68f87ab13..66ff36447b9 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -158,55 +158,29 @@ void rt2x00queue_align_frame(struct sk_buff *skb) skb_trim(skb, frame_length); } -void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) +/* + * H/W needs L2 padding between the header and the paylod if header size + * is not 4 bytes aligned. + */ +void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len) { - unsigned int payload_length = skb->len - header_length; - unsigned int header_align = ALIGN_SIZE(skb, 0); - unsigned int payload_align = ALIGN_SIZE(skb, header_length); - unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0; + unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; - /* - * Adjust the header alignment if the payload needs to be moved more - * than the header. - */ - if (payload_align > header_align) - header_align += 4; - - /* There is nothing to do if no alignment is needed */ - if (!header_align) + if (!l2pad) return; - /* Reserve the amount of space needed in front of the frame */ - skb_push(skb, header_align); - - /* - * Move the header. - */ - memmove(skb->data, skb->data + header_align, header_length); - - /* Move the payload, if present and if required */ - if (payload_length && payload_align) - memmove(skb->data + header_length + l2pad, - skb->data + header_length + l2pad + payload_align, - payload_length); - - /* Trim the skb to the correct size */ - skb_trim(skb, header_length + l2pad + payload_length); + skb_push(skb, l2pad); + memmove(skb->data, skb->data + l2pad, hdr_len); } -void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) +void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len) { - /* - * L2 padding is only present if the skb contains more than just the - * IEEE 802.11 header. - */ - unsigned int l2pad = (skb->len > header_length) ? - L2PAD_SIZE(header_length) : 0; + unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; if (!l2pad) return; - memmove(skb->data + l2pad, skb->data, header_length); + memmove(skb->data + l2pad, skb->data, hdr_len); skb_pull(skb, l2pad); } diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 58ba7183088..40b6d1d006d 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -467,7 +467,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw) rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw); /* <2> work queue */ rtlpriv->works.hw = hw; - rtlpriv->works.rtl_wq = alloc_workqueue(rtlpriv->cfg->name, 0, 0); + rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name); INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq, (void *)rtl_watchdog_wq_callback); INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq, diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index f6179bc0608..07dae0d44ab 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c @@ -1828,3 +1828,9 @@ const struct ieee80211_ops rtl_ops = { .flush = rtl_op_flush, }; EXPORT_SYMBOL_GPL(rtl_ops); + +bool rtl_btc_status_false(void) +{ + return false; +} +EXPORT_SYMBOL_GPL(rtl_btc_status_false); diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h index 59cd3b9dca2..624e1dc16d3 100644 --- a/drivers/net/wireless/rtlwifi/core.h +++ b/drivers/net/wireless/rtlwifi/core.h @@ -42,5 +42,6 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr, u32 mask, u32 data); void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data); bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb); +bool rtl_btc_status_false(void); #endif diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 667aba81246..846a2e6e34d 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -842,7 +842,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) break; } /* handle command packet here */ - if (rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) { + if (rtlpriv->cfg->ops->rx_command_packet && + rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) { dev_kfree_skb_any(skb); goto end; } @@ -1127,9 +1128,14 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) __skb_queue_tail(&ring->queue, pskb); - rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN, - &temp_one); - + if (rtlpriv->use_new_trx_flow) { + temp_one = 4; + rtlpriv->cfg->ops->set_desc(hw, (u8 *)pbuffer_desc, true, + HW_DESC_OWN, (u8 *)&temp_one); + } else { + rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN, + &temp_one); + } return; } @@ -1370,9 +1376,9 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw, ring->desc = NULL; if (rtlpriv->use_new_trx_flow) { pci_free_consistent(rtlpci->pdev, - sizeof(*ring->desc) * ring->entries, + sizeof(*ring->buffer_desc) * ring->entries, ring->buffer_desc, ring->buffer_desc_dma); - ring->desc = NULL; + ring->buffer_desc = NULL; } } @@ -1543,7 +1549,6 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw) true, HW_DESC_TXBUFF_ADDR), skb->len, PCI_DMA_TODEVICE); - ring->idx = (ring->idx + 1) % ring->entries; kfree_skb(skb); ring->idx = (ring->idx + 1) % ring->entries; } @@ -1796,7 +1801,8 @@ static int rtl_pci_start(struct ieee80211_hw *hw) rtl_pci_reset_trx_ring(hw); rtlpci->driver_is_goingto_unload = false; - if (rtlpriv->cfg->ops->get_btc_status()) { + if (rtlpriv->cfg->ops->get_btc_status && + rtlpriv->cfg->ops->get_btc_status()) { rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv); rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv); } @@ -2243,6 +2249,16 @@ int rtl_pci_probe(struct pci_dev *pdev, /*like read eeprom and so on */ rtlpriv->cfg->ops->read_eeprom_info(hw); + if (rtlpriv->cfg->ops->init_sw_vars(hw)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n"); + err = -ENODEV; + goto fail3; + } + rtlpriv->cfg->ops->init_sw_leds(hw); + + /*aspm */ + rtl_pci_init_aspm(hw); + /* Init mac80211 sw */ err = rtl_init_core(hw); if (err) { @@ -2258,16 +2274,6 @@ int rtl_pci_probe(struct pci_dev *pdev, goto fail3; } - if (rtlpriv->cfg->ops->init_sw_vars(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n"); - err = -ENODEV; - goto fail3; - } - rtlpriv->cfg->ops->init_sw_leds(hw); - - /*aspm */ - rtl_pci_init_aspm(hw); - err = ieee80211_register_hw(hw); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c index a00861b26ec..29983bc96a8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c @@ -656,7 +656,8 @@ static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; -void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) +void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, + bool (*cmd_send_packet)(struct ieee80211_hw *, struct sk_buff *)) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -722,7 +723,10 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet, totalpacketlen); - rtstatus = rtl_cmd_send_packet(hw, skb); + if (cmd_send_packet) + rtstatus = cmd_send_packet(hw, skb); + else + rtstatus = rtl_cmd_send_packet(hw, skb); if (rtstatus) b_dlok = true; diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h index a815bd6273d..b64ae45dc67 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h +++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h @@ -109,7 +109,9 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); void rtl92c_firmware_selfreset(struct ieee80211_hw *hw); void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); -void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); +void rtl92c_set_fw_rsvdpagepkt + (struct ieee80211_hw *hw, + bool (*cmd_send_packet)(struct ieee80211_hw *, struct sk_buff *)); void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len); void rtl92c_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h index 831df101d7b..9b660df6fd7 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h @@ -114,6 +114,8 @@ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4) #define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12) +#define GET_RX_STATUS_DESC_BUFF_ADDR(__pdesc) \ + SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32) #define CHIP_VER_B BIT(4) #define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3) diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c index 8ec0f031f48..55357d69397 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c @@ -459,7 +459,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp_reg422 & (~BIT(6))); - rtl92c_set_fw_rsvdpagepkt(hw, 0); + rtl92c_set_fw_rsvdpagepkt(hw, NULL); _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0); _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4)); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c index d86b5b56644..46ea07605eb 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c @@ -244,6 +244,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = { .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate, .phy_set_bw_mode_callback = rtl92ce_phy_set_bw_mode_callback, .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower, + .get_btc_status = rtl_btc_status_false, }; static struct rtl_mod_params rtl92ce_mod_params = { diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c index 2fb9c7acb76..dc3d20b17a2 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c @@ -728,6 +728,9 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name) case HW_DESC_RXPKT_LEN: ret = GET_RX_DESC_PKT_LEN(pdesc); break; + case HW_DESC_RXBUFF_ADDR: + ret = GET_RX_STATUS_DESC_BUFF_ADDR(pdesc); + break; default: RT_ASSERT(false, "ERR rxdesc :%d not process\n", desc_name); diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index 04aa0b5f5b3..873363acbac 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -1592,6 +1592,20 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } } +bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + /* Currently nothing happens here. + * Traffic stops after some seconds in WPA2 802.11n mode. + * Maybe because rtl8192cu chip should be set from here? + * If I understand correctly, the realtek vendor driver sends some urbs + * if its "here". + * + * This is maybe necessary: + * rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, 1, 1, skb); + */ + return true; +} + void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1939,7 +1953,8 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) recover = true; rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp_reg422 & (~BIT(6))); - rtl92c_set_fw_rsvdpagepkt(hw, 0); + rtl92c_set_fw_rsvdpagepkt(hw, + &usb_cmd_send_packet); _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0); _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4)); if (recover) diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h index 0f7812e0c8a..c1e33b0228c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h @@ -104,7 +104,6 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid); void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid); int rtl92c_download_fw(struct ieee80211_hw *hw); void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); -void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished); void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 7c5fbaf5fee..e06bafee37f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -101,6 +101,12 @@ static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw) } } +/* get bt coexist status */ +static bool rtl92cu_get_btc_status(void) +{ + return false; +} + static struct rtl_hal_ops rtl8192cu_hal_ops = { .init_sw_vars = rtl92cu_init_sw_vars, .deinit_sw_vars = rtl92cu_deinit_sw_vars, @@ -148,6 +154,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = { .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback, .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower, .fill_h2c_cmd = rtl92c_fill_h2c_cmd, + .get_btc_status = rtl92cu_get_btc_status, }; static struct rtl_mod_params rtl92cu_mod_params = { diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c index edab5a5351b..a0aba088259 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c @@ -251,6 +251,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = { .get_rfreg = rtl92d_phy_query_rf_reg, .set_rfreg = rtl92d_phy_set_rf_reg, .linked_set_reg = rtl92d_linked_set_reg, + .get_btc_status = rtl_btc_status_false, }; static struct rtl_mod_params rtl92de_mod_params = { diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c index dfdc9b20e4a..1a87edca2c3 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c @@ -362,7 +362,7 @@ void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, "switch case not process %x\n", variable); break; } @@ -591,7 +591,7 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~ACMHW_BEQEN); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, "switch case not process\n"); break; } @@ -710,7 +710,7 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, "switch case not process %x\n", variable); break; } @@ -2424,7 +2424,7 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index, enc_algo = CAM_AES; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, "switch case not process\n"); enc_algo = CAM_TKIP; break; diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h index 83c98674bfd..6e7a70b4394 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h @@ -446,6 +446,8 @@ /* DWORD 6 */ #define SET_RX_STATUS__DESC_BUFF_ADDR(__pdesc, __val) \ SET_BITS_OFFSET_LE(__pdesc + 24, 0, 32, __val) +#define GET_RX_STATUS_DESC_BUFF_ADDR(__pdesc) \ + SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32) #define SE_RX_HAL_IS_CCK_RATE(_pdesc)\ (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE1M || \ diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c index 00e067044c0..5761d5b49e3 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c @@ -1201,6 +1201,9 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw, } + if (type != NL80211_IFTYPE_AP && + rtlpriv->mac80211.link_state < MAC80211_LINKED) + bt_msr = rtl_read_byte(rtlpriv, MSR) & ~MSR_LINK_MASK; rtl_write_byte(rtlpriv, (MSR), bt_msr); temp = rtl_read_dword(rtlpriv, TCR); @@ -1262,6 +1265,7 @@ void rtl92se_enable_interrupt(struct ieee80211_hw *hw) rtl_write_dword(rtlpriv, INTA_MASK, rtlpci->irq_mask[0]); /* Support Bit 32-37(Assign as Bit 0-5) interrupt setting now */ rtl_write_dword(rtlpriv, INTA_MASK + 4, rtlpci->irq_mask[1] & 0x3F); + rtlpci->irq_enabled = true; } void rtl92se_disable_interrupt(struct ieee80211_hw *hw) @@ -1276,8 +1280,7 @@ void rtl92se_disable_interrupt(struct ieee80211_hw *hw) rtlpci = rtl_pcidev(rtl_pcipriv(hw)); rtl_write_dword(rtlpriv, INTA_MASK, 0); rtl_write_dword(rtlpriv, INTA_MASK + 4, 0); - - synchronize_irq(rtlpci->pdev->irq); + rtlpci->irq_enabled = false; } static u8 _rtl92s_set_sysclk(struct ieee80211_hw *hw, u8 data) diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c index 77c5b5f3524..4b4612fe2fd 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c @@ -399,6 +399,8 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, case 2: currentcmd = &postcommoncmd[*step]; break; + default: + return true; } if (currentcmd->cmdid == CMDID_END) { diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c index 1bff2a0f760..fb003868bde 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c @@ -87,11 +87,8 @@ static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw) static void rtl92se_fw_cb(const struct firmware *firmware, void *context) { struct ieee80211_hw *hw = context; - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci *rtlpci = rtl_pcidev(pcipriv); struct rt_firmware *pfirmware = NULL; - int err; RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "Firmware callback routine entered!\n"); @@ -112,20 +109,6 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context) memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size); pfirmware->sz_fw_tmpbufferlen = firmware->size; release_firmware(firmware); - - err = ieee80211_register_hw(hw); - if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't register mac80211 hw\n"); - return; - } else { - rtlpriv->mac80211.mac80211_registered = 1; - } - rtlpci->irq_alloc = 1; - set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); - - /*init rfkill */ - rtl_init_rfkill(hw); } static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) @@ -226,8 +209,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) if (!rtlpriv->rtlhal.pfirmware) return 1; - rtlpriv->max_fw_size = RTL8190_MAX_RAW_FIRMWARE_CODE_SIZE; - + rtlpriv->max_fw_size = RTL8190_MAX_FIRMWARE_CODE_SIZE*2 + + sizeof(struct fw_hdr); pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n" "Loading firmware %s\n", rtlpriv->cfg->fw_name); /* request fw */ @@ -253,6 +236,19 @@ static void rtl92s_deinit_sw_vars(struct ieee80211_hw *hw) } } +static bool rtl92se_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, + u16 index) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; + u8 *entry = (u8 *)(&ring->desc[ring->idx]); + u8 own = (u8)rtl92se_get_desc(entry, true, HW_DESC_OWN); + + if (own) + return false; + return true; +} + static struct rtl_hal_ops rtl8192se_hal_ops = { .init_sw_vars = rtl92s_init_sw_vars, .deinit_sw_vars = rtl92s_deinit_sw_vars, @@ -286,6 +282,7 @@ static struct rtl_hal_ops rtl8192se_hal_ops = { .led_control = rtl92se_led_control, .set_desc = rtl92se_set_desc, .get_desc = rtl92se_get_desc, + .is_tx_desc_closed = rtl92se_is_tx_desc_closed, .tx_polling = rtl92se_tx_polling, .enable_hw_sec = rtl92se_enable_hw_security_config, .set_key = rtl92se_set_key, @@ -294,6 +291,7 @@ static struct rtl_hal_ops rtl8192se_hal_ops = { .set_bbreg = rtl92s_phy_set_bb_reg, .get_rfreg = rtl92s_phy_query_rf_reg, .set_rfreg = rtl92s_phy_set_rf_reg, + .get_btc_status = rtl_btc_status_false, }; static struct rtl_mod_params rtl92se_mod_params = { @@ -322,6 +320,8 @@ static struct rtl_hal_cfg rtl92se_hal_cfg = { .maps[MAC_RCR_ACRC32] = RCR_ACRC32, .maps[MAC_RCR_ACF] = RCR_ACF, .maps[MAC_RCR_AAP] = RCR_AAP, + .maps[MAC_HIMR] = INTA_MASK, + .maps[MAC_HIMRE] = INTA_MASK + 4, .maps[EFUSE_TEST] = REG_EFUSE_TEST, .maps[EFUSE_CTRL] = REG_EFUSE_CTRL, diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c index b358ebce894..672fd3b0283 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c @@ -640,6 +640,9 @@ u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name) case HW_DESC_RXPKT_LEN: ret = GET_RX_STATUS_DESC_PKT_LEN(desc); break; + case HW_DESC_RXBUFF_ADDR: + ret = GET_RX_STATUS_DESC_BUFF_ADDR(desc); + break; default: RT_ASSERT(false, "ERR rxdesc :%d not process\n", desc_name); diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c index 310d3163dc5..8ec8200002c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c @@ -3672,8 +3672,9 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw, mac->opmode == NL80211_IFTYPE_ADHOC) macid = sta->aid + 1; if (wirelessmode == WIRELESS_MODE_N_5G || - wirelessmode == WIRELESS_MODE_AC_5G) - ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ]; + wirelessmode == WIRELESS_MODE_AC_5G || + wirelessmode == WIRELESS_MODE_A) + ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ] << 4; else ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ]; diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c index 9786313dc62..1e9570fa874 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c @@ -1889,15 +1889,18 @@ static void _rtl8821ae_store_tx_power_by_rate(struct ieee80211_hw *hw, struct rtl_phy *rtlphy = &rtlpriv->phy; u8 rate_section = _rtl8821ae_get_rate_section_index(regaddr); - if (band != BAND_ON_2_4G && band != BAND_ON_5G) + if (band != BAND_ON_2_4G && band != BAND_ON_5G) { RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid Band %d\n", band); - - if (rfpath >= MAX_RF_PATH) + band = BAND_ON_2_4G; + } + if (rfpath >= MAX_RF_PATH) { RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid RfPath %d\n", rfpath); - - if (txnum >= MAX_RF_PATH) + rfpath = MAX_RF_PATH - 1; + } + if (txnum >= MAX_RF_PATH) { RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid TxNum %d\n", txnum); - + txnum = MAX_RF_PATH - 1; + } rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][RateSection %d] = 0x%x\n", diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index 10cf69c4bc4..46ee956d023 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -1117,7 +1117,18 @@ int rtl_usb_probe(struct usb_interface *intf, } rtlpriv->cfg->ops->init_sw_leds(hw); + err = ieee80211_register_hw(hw); + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Can't register mac80211 hw.\n"); + err = -ENODEV; + goto error_out; + } + rtlpriv->mac80211.mac80211_registered = 1; + + set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); return 0; + error_out: rtl_deinit_core(hw); _rtl_usb_io_handler_release(hw); diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index d4eb8d2e9cb..083ecc93fe5 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -176,10 +176,11 @@ struct xenvif_queue { /* Per-queue data for xenvif */ char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ struct xen_netif_rx_back_ring rx; struct sk_buff_head rx_queue; - RING_IDX rx_last_skb_slots; - unsigned long status; - struct timer_list rx_stalled; + unsigned int rx_queue_max; + unsigned int rx_queue_len; + unsigned long last_rx_time; + bool stalled; struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS]; @@ -199,18 +200,14 @@ struct xenvif_queue { /* Per-queue data for xenvif */ struct xenvif_stats stats; }; +/* Maximum number of Rx slots a to-guest packet may use, including the + * slot needed for GSO meta-data. + */ +#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1) + enum state_bit_shift { /* This bit marks that the vif is connected */ VIF_STATUS_CONNECTED, - /* This bit signals the RX thread that queuing was stopped (in - * start_xmit), and either the timer fired or an RX interrupt came - */ - QUEUE_STATUS_RX_PURGE_EVENT, - /* This bit tells the interrupt handler that this queue was the reason - * for the carrier off, so it should kick the thread. Only queues which - * brought it down can turn on the carrier. - */ - QUEUE_STATUS_RX_STALLED }; struct xenvif { @@ -228,9 +225,6 @@ struct xenvif { u8 ip_csum:1; u8 ipv6_csum:1; - /* Internal feature information. */ - u8 can_queue:1; /* can queue packets for receiver? */ - /* Is this interface disabled? True when backend discovers * frontend is rogue. */ @@ -240,6 +234,9 @@ struct xenvif { /* Queues */ struct xenvif_queue *queues; unsigned int num_queues; /* active queues, resource allocated */ + unsigned int stalled_queues; + + spinlock_t lock; #ifdef CONFIG_DEBUG_FS struct dentry *xenvif_dbg_root; @@ -249,6 +246,14 @@ struct xenvif { struct net_device *dev; }; +struct xenvif_rx_cb { + unsigned long expires; + int meta_slots_used; + bool full_coalesce; +}; + +#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) + static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif) { return to_xenbus_device(vif->dev->dev.parent); @@ -272,8 +277,6 @@ void xenvif_xenbus_fini(void); int xenvif_schedulable(struct xenvif *vif); -int xenvif_must_stop_queue(struct xenvif_queue *queue); - int xenvif_queue_stopped(struct xenvif_queue *queue); void xenvif_wake_queue(struct xenvif_queue *queue); @@ -296,6 +299,8 @@ void xenvif_kick_thread(struct xenvif_queue *queue); int xenvif_dealloc_kthread(void *data); +void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); + /* Determine whether the needed number of slots (req) are available, * and set req_event if not. */ diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f379689dde3..895fe84011e 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -43,6 +43,9 @@ #define XENVIF_QUEUE_LENGTH 32 #define XENVIF_NAPI_WEIGHT 64 +/* Number of bytes allowed on the internal guest Rx queue. */ +#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE) + /* This function is used to set SKBTX_DEV_ZEROCOPY as well as * increasing the inflight counter. We need to increase the inflight * counter because core driver calls into xenvif_zerocopy_callback @@ -60,20 +63,11 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) atomic_dec(&queue->inflight_packets); } -static inline void xenvif_stop_queue(struct xenvif_queue *queue) -{ - struct net_device *dev = queue->vif->dev; - - if (!queue->vif->can_queue) - return; - - netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); -} - int xenvif_schedulable(struct xenvif *vif) { return netif_running(vif->dev) && - test_bit(VIF_STATUS_CONNECTED, &vif->status); + test_bit(VIF_STATUS_CONNECTED, &vif->status) && + !vif->disabled; } static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) @@ -114,16 +108,7 @@ int xenvif_poll(struct napi_struct *napi, int budget) static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) { struct xenvif_queue *queue = dev_id; - struct netdev_queue *net_queue = - netdev_get_tx_queue(queue->vif->dev, queue->id); - /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR - * the carrier went down and this queue was previously blocked - */ - if (unlikely(netif_tx_queue_stopped(net_queue) || - (!netif_carrier_ok(queue->vif->dev) && - test_bit(QUEUE_STATUS_RX_STALLED, &queue->status)))) - set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status); xenvif_kick_thread(queue); return IRQ_HANDLED; @@ -151,24 +136,13 @@ void xenvif_wake_queue(struct xenvif_queue *queue) netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); } -/* Callback to wake the queue's thread and turn the carrier off on timeout */ -static void xenvif_rx_stalled(unsigned long data) -{ - struct xenvif_queue *queue = (struct xenvif_queue *)data; - - if (xenvif_queue_stopped(queue)) { - set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status); - xenvif_kick_thread(queue); - } -} - static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; unsigned int num_queues = vif->num_queues; u16 index; - int min_slots_needed; + struct xenvif_rx_cb *cb; BUG_ON(skb->dev != dev); @@ -191,30 +165,10 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) !xenvif_schedulable(vif)) goto drop; - /* At best we'll need one slot for the header and one for each - * frag. - */ - min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; - - /* If the skb is GSO then we'll also need an extra slot for the - * metadata. - */ - if (skb_is_gso(skb)) - min_slots_needed++; + cb = XENVIF_RX_CB(skb); + cb->expires = jiffies + rx_drain_timeout_jiffies; - /* If the skb can't possibly fit in the remaining slots - * then turn off the queue to give the ring a chance to - * drain. - */ - if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { - queue->rx_stalled.function = xenvif_rx_stalled; - queue->rx_stalled.data = (unsigned long)queue; - xenvif_stop_queue(queue); - mod_timer(&queue->rx_stalled, - jiffies + rx_drain_timeout_jiffies); - } - - skb_queue_tail(&queue->rx_queue, skb); + xenvif_rx_queue_tail(queue, skb); xenvif_kick_thread(queue); return NETDEV_TX_OK; @@ -465,6 +419,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, vif->queues = NULL; vif->num_queues = 0; + spin_lock_init(&vif->lock); + dev->netdev_ops = &xenvif_netdev_ops; dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -508,6 +464,8 @@ int xenvif_init_queue(struct xenvif_queue *queue) init_timer(&queue->credit_timeout); queue->credit_window_start = get_jiffies_64(); + queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES; + skb_queue_head_init(&queue->rx_queue); skb_queue_head_init(&queue->tx_queue); @@ -539,8 +497,6 @@ int xenvif_init_queue(struct xenvif_queue *queue) queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; } - init_timer(&queue->rx_stalled); - return 0; } @@ -551,7 +507,6 @@ void xenvif_carrier_on(struct xenvif *vif) dev_set_mtu(vif->dev, ETH_DATA_LEN); netdev_update_features(vif->dev); set_bit(VIF_STATUS_CONNECTED, &vif->status); - netif_carrier_on(vif->dev); if (netif_running(vif->dev)) xenvif_up(vif); rtnl_unlock(); @@ -611,6 +566,8 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, disable_irq(queue->rx_irq); } + queue->stalled = true; + task = kthread_create(xenvif_kthread_guest_rx, (void *)queue, "%s-guest-rx", queue->name); if (IS_ERR(task)) { @@ -674,7 +631,6 @@ void xenvif_disconnect(struct xenvif *vif) netif_napi_del(&queue->napi); if (queue->task) { - del_timer_sync(&queue->rx_stalled); kthread_stop(queue->task); queue->task = NULL; } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 08f65996534..6563f0713fc 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -55,13 +55,20 @@ bool separate_tx_rx_irq = 1; module_param(separate_tx_rx_irq, bool, 0644); -/* When guest ring is filled up, qdisc queues the packets for us, but we have - * to timeout them, otherwise other guests' packets can get stuck there +/* The time that packets can stay on the guest Rx internal queue + * before they are dropped. */ unsigned int rx_drain_timeout_msecs = 10000; module_param(rx_drain_timeout_msecs, uint, 0444); unsigned int rx_drain_timeout_jiffies; +/* The length of time before the frontend is considered unresponsive + * because it isn't providing Rx slots. + */ +static unsigned int rx_stall_timeout_msecs = 60000; +module_param(rx_stall_timeout_msecs, uint, 0444); +static unsigned int rx_stall_timeout_jiffies; + unsigned int xenvif_max_queues; module_param_named(max_queues, xenvif_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, @@ -83,7 +90,6 @@ static void make_tx_response(struct xenvif_queue *queue, s8 st); static inline int tx_work_todo(struct xenvif_queue *queue); -static inline int rx_work_todo(struct xenvif_queue *queue); static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, u16 id, @@ -163,6 +169,69 @@ bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed) return false; } +void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) +{ + unsigned long flags; + + spin_lock_irqsave(&queue->rx_queue.lock, flags); + + __skb_queue_tail(&queue->rx_queue, skb); + + queue->rx_queue_len += skb->len; + if (queue->rx_queue_len > queue->rx_queue_max) + netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); + + spin_unlock_irqrestore(&queue->rx_queue.lock, flags); +} + +static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) +{ + struct sk_buff *skb; + + spin_lock_irq(&queue->rx_queue.lock); + + skb = __skb_dequeue(&queue->rx_queue); + if (skb) + queue->rx_queue_len -= skb->len; + + spin_unlock_irq(&queue->rx_queue.lock); + + return skb; +} + +static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue) +{ + spin_lock_irq(&queue->rx_queue.lock); + + if (queue->rx_queue_len < queue->rx_queue_max) + netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); + + spin_unlock_irq(&queue->rx_queue.lock); +} + + +static void xenvif_rx_queue_purge(struct xenvif_queue *queue) +{ + struct sk_buff *skb; + while ((skb = xenvif_rx_dequeue(queue)) != NULL) + kfree_skb(skb); +} + +static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) +{ + struct sk_buff *skb; + + for(;;) { + skb = skb_peek(&queue->rx_queue); + if (!skb) + break; + if (time_before(jiffies, XENVIF_RX_CB(skb)->expires)) + break; + xenvif_rx_dequeue(queue); + kfree_skb(skb); + } +} + /* * Returns true if we should start a new receive buffer instead of * adding 'size' bytes to a buffer which currently contains 'offset' @@ -237,13 +306,6 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, return meta; } -struct xenvif_rx_cb { - int meta_slots_used; - bool full_coalesce; -}; - -#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) - /* * Set up the grant operations for this fragment. If it's a flipping * interface, we also set up the unmap request from here. @@ -587,12 +649,15 @@ static void xenvif_rx_action(struct xenvif_queue *queue) skb_queue_head_init(&rxq); - while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) { + while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX) + && (skb = xenvif_rx_dequeue(queue)) != NULL) { RING_IDX max_slots_needed; RING_IDX old_req_cons; RING_IDX ring_slots_used; int i; + queue->last_rx_time = jiffies; + /* We need a cheap worse case estimate for the number of * slots we'll use. */ @@ -634,15 +699,6 @@ static void xenvif_rx_action(struct xenvif_queue *queue) skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) max_slots_needed++; - /* If the skb may not fit then bail out now */ - if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) { - skb_queue_head(&queue->rx_queue, skb); - need_to_notify = true; - queue->rx_last_skb_slots = max_slots_needed; - break; - } else - queue->rx_last_skb_slots = 0; - old_req_cons = queue->rx.req_cons; XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); ring_slots_used = queue->rx.req_cons - old_req_cons; @@ -1869,12 +1925,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) } } -static inline int rx_work_todo(struct xenvif_queue *queue) -{ - return (!skb_queue_empty(&queue->rx_queue) && - xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots)); -} - static inline int tx_work_todo(struct xenvif_queue *queue) { if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) @@ -1931,92 +1981,121 @@ err: return err; } -static void xenvif_start_queue(struct xenvif_queue *queue) +static void xenvif_queue_carrier_off(struct xenvif_queue *queue) { - if (xenvif_schedulable(queue->vif)) - xenvif_wake_queue(queue); + struct xenvif *vif = queue->vif; + + queue->stalled = true; + + /* At least one queue has stalled? Disable the carrier. */ + spin_lock(&vif->lock); + if (vif->stalled_queues++ == 0) { + netdev_info(vif->dev, "Guest Rx stalled"); + netif_carrier_off(vif->dev); + } + spin_unlock(&vif->lock); } -/* Only called from the queue's thread, it handles the situation when the guest - * doesn't post enough requests on the receiving ring. - * First xenvif_start_xmit disables QDisc and start a timer, and then either the - * timer fires, or the guest send an interrupt after posting new request. If it - * is the timer, the carrier is turned off here. - * */ -static void xenvif_rx_purge_event(struct xenvif_queue *queue) +static void xenvif_queue_carrier_on(struct xenvif_queue *queue) { - /* Either the last unsuccesful skb or at least 1 slot should fit */ - int needed = queue->rx_last_skb_slots ? - queue->rx_last_skb_slots : 1; + struct xenvif *vif = queue->vif; - /* It is assumed that if the guest post new slots after this, the RX - * interrupt will set the QUEUE_STATUS_RX_PURGE_EVENT bit and wake up - * the thread again - */ - set_bit(QUEUE_STATUS_RX_STALLED, &queue->status); - if (!xenvif_rx_ring_slots_available(queue, needed)) { - rtnl_lock(); - if (netif_carrier_ok(queue->vif->dev)) { - /* Timer fired and there are still no slots. Turn off - * everything except the interrupts - */ - netif_carrier_off(queue->vif->dev); - skb_queue_purge(&queue->rx_queue); - queue->rx_last_skb_slots = 0; - if (net_ratelimit()) - netdev_err(queue->vif->dev, "Carrier off due to lack of guest response on queue %d\n", queue->id); - } else { - /* Probably an another queue already turned the carrier - * off, make sure nothing is stucked in the internal - * queue of this queue - */ - skb_queue_purge(&queue->rx_queue); - queue->rx_last_skb_slots = 0; - } - rtnl_unlock(); - } else if (!netif_carrier_ok(queue->vif->dev)) { - unsigned int num_queues = queue->vif->num_queues; - unsigned int i; - /* The carrier was down, but an interrupt kicked - * the thread again after new requests were - * posted - */ - clear_bit(QUEUE_STATUS_RX_STALLED, - &queue->status); - rtnl_lock(); - netif_carrier_on(queue->vif->dev); - netif_tx_wake_all_queues(queue->vif->dev); - rtnl_unlock(); + queue->last_rx_time = jiffies; /* Reset Rx stall detection. */ + queue->stalled = false; - for (i = 0; i < num_queues; i++) { - struct xenvif_queue *temp = &queue->vif->queues[i]; + /* All queues are ready? Enable the carrier. */ + spin_lock(&vif->lock); + if (--vif->stalled_queues == 0) { + netdev_info(vif->dev, "Guest Rx ready"); + netif_carrier_on(vif->dev); + } + spin_unlock(&vif->lock); +} - xenvif_napi_schedule_or_enable_events(temp); - } - if (net_ratelimit()) - netdev_err(queue->vif->dev, "Carrier on again\n"); - } else { - /* Queuing were stopped, but the guest posted - * new requests and sent an interrupt - */ - clear_bit(QUEUE_STATUS_RX_STALLED, - &queue->status); - del_timer_sync(&queue->rx_stalled); - xenvif_start_queue(queue); +static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) +{ + RING_IDX prod, cons; + + prod = queue->rx.sring->req_prod; + cons = queue->rx.req_cons; + + return !queue->stalled + && prod - cons < XEN_NETBK_RX_SLOTS_MAX + && time_after(jiffies, + queue->last_rx_time + rx_stall_timeout_jiffies); +} + +static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) +{ + RING_IDX prod, cons; + + prod = queue->rx.sring->req_prod; + cons = queue->rx.req_cons; + + return queue->stalled + && prod - cons >= XEN_NETBK_RX_SLOTS_MAX; +} + +static bool xenvif_have_rx_work(struct xenvif_queue *queue) +{ + return (!skb_queue_empty(&queue->rx_queue) + && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)) + || xenvif_rx_queue_stalled(queue) + || xenvif_rx_queue_ready(queue) + || kthread_should_stop() + || queue->vif->disabled; +} + +static long xenvif_rx_queue_timeout(struct xenvif_queue *queue) +{ + struct sk_buff *skb; + long timeout; + + skb = skb_peek(&queue->rx_queue); + if (!skb) + return MAX_SCHEDULE_TIMEOUT; + + timeout = XENVIF_RX_CB(skb)->expires - jiffies; + return timeout < 0 ? 0 : timeout; +} + +/* Wait until the guest Rx thread has work. + * + * The timeout needs to be adjusted based on the current head of the + * queue (and not just the head at the beginning). In particular, if + * the queue is initially empty an infinite timeout is used and this + * needs to be reduced when a skb is queued. + * + * This cannot be done with wait_event_timeout() because it only + * calculates the timeout once. + */ +static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) +{ + DEFINE_WAIT(wait); + + if (xenvif_have_rx_work(queue)) + return; + + for (;;) { + long ret; + + prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); + if (xenvif_have_rx_work(queue)) + break; + ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); + if (!ret) + break; } + finish_wait(&queue->wq, &wait); } int xenvif_kthread_guest_rx(void *data) { struct xenvif_queue *queue = data; - struct sk_buff *skb; + struct xenvif *vif = queue->vif; - while (!kthread_should_stop()) { - wait_event_interruptible(queue->wq, - rx_work_todo(queue) || - queue->vif->disabled || - test_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status) || - kthread_should_stop()); + for (;;) { + xenvif_wait_for_rx_work(queue); if (kthread_should_stop()) break; @@ -2028,35 +2107,38 @@ int xenvif_kthread_guest_rx(void *data) * context so we defer it here, if this thread is * associated with queue 0. */ - if (unlikely(queue->vif->disabled && queue->id == 0)) { - xenvif_carrier_off(queue->vif); - } else if (unlikely(queue->vif->disabled)) { - /* kthread_stop() would be called upon this thread soon, - * be a bit proactive - */ - skb_queue_purge(&queue->rx_queue); - queue->rx_last_skb_slots = 0; - } else if (unlikely(test_and_clear_bit(QUEUE_STATUS_RX_PURGE_EVENT, - &queue->status))) { - xenvif_rx_purge_event(queue); - } else if (!netif_carrier_ok(queue->vif->dev)) { - /* Another queue stalled and turned the carrier off, so - * purge the internal queue of queues which were not - * blocked - */ - skb_queue_purge(&queue->rx_queue); - queue->rx_last_skb_slots = 0; + if (unlikely(vif->disabled && queue->id == 0)) { + xenvif_carrier_off(vif); + xenvif_rx_queue_purge(queue); + continue; } if (!skb_queue_empty(&queue->rx_queue)) xenvif_rx_action(queue); + /* If the guest hasn't provided any Rx slots for a + * while it's probably not responsive, drop the + * carrier so packets are dropped earlier. + */ + if (xenvif_rx_queue_stalled(queue)) + xenvif_queue_carrier_off(queue); + else if (xenvif_rx_queue_ready(queue)) + xenvif_queue_carrier_on(queue); + + /* Queued packets may have foreign pages from other + * domains. These cannot be queued indefinitely as + * this would starve guests of grant refs and transmit + * slots. + */ + xenvif_rx_queue_drop_expired(queue); + + xenvif_rx_queue_maybe_wake(queue); + cond_resched(); } /* Bin any remaining skbs */ - while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) - dev_kfree_skb(skb); + xenvif_rx_queue_purge(queue); return 0; } @@ -2113,6 +2195,7 @@ static int __init netback_init(void) goto failed_init; rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs); + rx_stall_timeout_jiffies = msecs_to_jiffies(rx_stall_timeout_msecs); #ifdef CONFIG_DEBUG_FS xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL); diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 8079c31ac5e..fab0d4b42f5 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -39,7 +39,7 @@ struct backend_info { static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); static void connect(struct backend_info *be); static int read_xenbus_vif_flags(struct backend_info *be); -static void backend_create_xenvif(struct backend_info *be); +static int backend_create_xenvif(struct backend_info *be); static void unregister_hotplug_status_watch(struct backend_info *be); static void set_backend_state(struct backend_info *be, enum xenbus_state state); @@ -52,6 +52,7 @@ static int xenvif_read_io_ring(struct seq_file *m, void *v) struct xenvif_queue *queue = m->private; struct xen_netif_tx_back_ring *tx_ring = &queue->tx; struct xen_netif_rx_back_ring *rx_ring = &queue->rx; + struct netdev_queue *dev_queue; if (tx_ring->sring) { struct xen_netif_tx_sring *sring = tx_ring->sring; @@ -112,6 +113,13 @@ static int xenvif_read_io_ring(struct seq_file *m, void *v) queue->credit_timeout.expires, jiffies); + dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id); + + seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n", + queue->rx_queue_len, queue->rx_queue_max, + skb_queue_len(&queue->rx_queue), + netif_tx_queue_stopped(dev_queue) ? "stopped" : "running"); + return 0; } @@ -344,7 +352,9 @@ static int netback_probe(struct xenbus_device *dev, be->state = XenbusStateInitWait; /* This kicks hotplug scripts, so do it immediately. */ - backend_create_xenvif(be); + err = backend_create_xenvif(be); + if (err) + goto fail; return 0; @@ -389,19 +399,19 @@ static int netback_uevent(struct xenbus_device *xdev, } -static void backend_create_xenvif(struct backend_info *be) +static int backend_create_xenvif(struct backend_info *be) { int err; long handle; struct xenbus_device *dev = be->dev; if (be->vif != NULL) - return; + return 0; err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle); if (err != 1) { xenbus_dev_fatal(dev, err, "reading handle"); - return; + return (err < 0) ? err : -EINVAL; } be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle); @@ -409,10 +419,11 @@ static void backend_create_xenvif(struct backend_info *be) err = PTR_ERR(be->vif); be->vif = NULL; xenbus_dev_fatal(dev, err, "creating interface"); - return; + return err; } kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); + return 0; } static void backend_disconnect(struct backend_info *be) @@ -703,6 +714,7 @@ static void connect(struct backend_info *be) be->vif->queues = vzalloc(requested_num_queues * sizeof(struct xenvif_queue)); be->vif->num_queues = requested_num_queues; + be->vif->stalled_queues = requested_num_queues; for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) { queue = &be->vif->queues[queue_index]; @@ -873,15 +885,10 @@ static int read_xenbus_vif_flags(struct backend_info *be) if (!rx_copy) return -EOPNOTSUPP; - if (vif->dev->tx_queue_len != 0) { - if (xenbus_scanf(XBT_NIL, dev->otherend, - "feature-rx-notify", "%d", &val) < 0) - val = 0; - if (val) - vif->can_queue = 1; - else - /* Must be non-zero for pfifo_fast to work. */ - vif->dev->tx_queue_len = 1; + if (xenbus_scanf(XBT_NIL, dev->otherend, + "feature-rx-notify", "%d", &val) < 0 || val == 0) { + xenbus_dev_fatal(dev, -EINVAL, "feature-rx-notify is mandatory"); + return -EINVAL; } if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", diff --git a/drivers/of/address.c b/drivers/of/address.c index afdb78299f6..06af494184d 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -450,6 +450,21 @@ static struct of_bus *of_match_bus(struct device_node *np) return NULL; } +static int of_empty_ranges_quirk(void) +{ + if (IS_ENABLED(CONFIG_PPC)) { + /* To save cycles, we cache the result */ + static int quirk_state = -1; + + if (quirk_state < 0) + quirk_state = + of_machine_is_compatible("Power Macintosh") || + of_machine_is_compatible("MacRISC"); + return quirk_state; + } + return false; +} + static int of_translate_one(struct device_node *parent, struct of_bus *bus, struct of_bus *pbus, __be32 *addr, int na, int ns, int pna, const char *rprop) @@ -475,12 +490,10 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus, * This code is only enabled on powerpc. --gcl */ ranges = of_get_property(parent, rprop, &rlen); -#if !defined(CONFIG_PPC) - if (ranges == NULL) { + if (ranges == NULL && !of_empty_ranges_quirk()) { pr_err("OF: no ranges; cannot translate\n"); return 1; } -#endif /* !defined(CONFIG_PPC) */ if (ranges == NULL || rlen == 0) { offset = of_read_number(addr, na); memset(addr, 0, pna * 4); diff --git a/drivers/of/base.c b/drivers/of/base.c index 2305dc0382b..3823edf2d01 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -1280,52 +1280,6 @@ int of_property_read_string(struct device_node *np, const char *propname, EXPORT_SYMBOL_GPL(of_property_read_string); /** - * of_property_read_string_index - Find and read a string from a multiple - * strings property. - * @np: device node from which the property value is to be read. - * @propname: name of the property to be searched. - * @index: index of the string in the list of strings - * @out_string: pointer to null terminated return string, modified only if - * return value is 0. - * - * Search for a property in a device tree node and retrieve a null - * terminated string value (pointer to data, not a copy) in the list of strings - * contained in that property. - * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if - * property does not have a value, and -EILSEQ if the string is not - * null-terminated within the length of the property data. - * - * The out_string pointer is modified only if a valid string can be decoded. - */ -int of_property_read_string_index(struct device_node *np, const char *propname, - int index, const char **output) -{ - struct property *prop = of_find_property(np, propname, NULL); - int i = 0; - size_t l = 0, total = 0; - const char *p; - - if (!prop) - return -EINVAL; - if (!prop->value) - return -ENODATA; - if (strnlen(prop->value, prop->length) >= prop->length) - return -EILSEQ; - - p = prop->value; - - for (i = 0; total < prop->length; total += l, p += l) { - l = strlen(p) + 1; - if (i++ == index) { - *output = p; - return 0; - } - } - return -ENODATA; -} -EXPORT_SYMBOL_GPL(of_property_read_string_index); - -/** * of_property_match_string() - Find string in a list and return index * @np: pointer to node containing string list property * @propname: string list property name @@ -1351,7 +1305,7 @@ int of_property_match_string(struct device_node *np, const char *propname, end = p + prop->length; for (i = 0; p < end; i++, p += l) { - l = strlen(p) + 1; + l = strnlen(p, end - p) + 1; if (p + l > end) return -EILSEQ; pr_debug("comparing %s with %s\n", string, p); @@ -1363,39 +1317,41 @@ int of_property_match_string(struct device_node *np, const char *propname, EXPORT_SYMBOL_GPL(of_property_match_string); /** - * of_property_count_strings - Find and return the number of strings from a - * multiple strings property. + * of_property_read_string_util() - Utility helper for parsing string properties * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. + * @out_strs: output array of string pointers. + * @sz: number of array elements to read. + * @skip: Number of strings to skip over at beginning of list. * - * Search for a property in a device tree node and retrieve the number of null - * terminated string contain in it. Returns the number of strings on - * success, -EINVAL if the property does not exist, -ENODATA if property - * does not have a value, and -EILSEQ if the string is not null-terminated - * within the length of the property data. + * Don't call this function directly. It is a utility helper for the + * of_property_read_string*() family of functions. */ -int of_property_count_strings(struct device_node *np, const char *propname) +int of_property_read_string_helper(struct device_node *np, const char *propname, + const char **out_strs, size_t sz, int skip) { struct property *prop = of_find_property(np, propname, NULL); - int i = 0; - size_t l = 0, total = 0; - const char *p; + int l = 0, i = 0; + const char *p, *end; if (!prop) return -EINVAL; if (!prop->value) return -ENODATA; - if (strnlen(prop->value, prop->length) >= prop->length) - return -EILSEQ; - p = prop->value; + end = p + prop->length; - for (i = 0; total < prop->length; total += l, p += l, i++) - l = strlen(p) + 1; - - return i; + for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) { + l = strnlen(p, end - p) + 1; + if (p + l > end) + return -EILSEQ; + if (out_strs && i >= skip) + *out_strs++ = p; + } + i -= skip; + return i <= 0 ? -ENODATA : i; } -EXPORT_SYMBOL_GPL(of_property_count_strings); +EXPORT_SYMBOL_GPL(of_property_read_string_helper); void of_print_phandle_args(const char *msg, const struct of_phandle_args *args) { diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index f297891d852..d4994177dec 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -247,7 +247,7 @@ void of_node_release(struct kobject *kobj) * @allocflags: Allocation flags (typically pass GFP_KERNEL) * * Copy a property by dynamically allocating the memory of both the - * property stucture and the property name & contents. The property's + * property structure and the property name & contents. The property's * flags have the OF_DYNAMIC bit set so that we can differentiate between * dynamically allocated properties and not. * Returns the newly allocated property or NULL on out of memory error. diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index d1ffca8b34e..30e97bcc4f8 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -773,7 +773,7 @@ int __init early_init_dt_scan_chosen_serial(void) if (offset < 0) return -ENODEV; - while (match->compatible) { + while (match->compatible[0]) { unsigned long addr; if (fdt_node_check_compatible(fdt, offset, match->compatible)) { match++; diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 59fb12e84e6..dc566b38645 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -243,23 +243,27 @@ static inline struct reserved_mem *__find_rmem(struct device_node *node) * This function assign memory region pointed by "memory-region" device tree * property to the given device. */ -void of_reserved_mem_device_init(struct device *dev) +int of_reserved_mem_device_init(struct device *dev) { struct reserved_mem *rmem; struct device_node *np; + int ret; np = of_parse_phandle(dev->of_node, "memory-region", 0); if (!np) - return; + return -ENODEV; rmem = __find_rmem(np); of_node_put(np); if (!rmem || !rmem->ops || !rmem->ops->device_init) - return; + return -EINVAL; + + ret = rmem->ops->device_init(rmem, dev); + if (ret == 0) + dev_info(dev, "assigned reserved memory node %s\n", rmem->name); - rmem->ops->device_init(rmem, dev); - dev_info(dev, "assigned reserved memory node %s\n", rmem->name); + return ret; } /** diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c index 78001270a59..e2d79afa9dc 100644 --- a/drivers/of/selftest.c +++ b/drivers/of/selftest.c @@ -339,8 +339,9 @@ static void __init of_selftest_parse_phandle_with_args(void) selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); } -static void __init of_selftest_property_match_string(void) +static void __init of_selftest_property_string(void) { + const char *strings[4]; struct device_node *np; int rc; @@ -357,13 +358,66 @@ static void __init of_selftest_property_match_string(void) rc = of_property_match_string(np, "phandle-list-names", "third"); selftest(rc == 2, "third expected:0 got:%i\n", rc); rc = of_property_match_string(np, "phandle-list-names", "fourth"); - selftest(rc == -ENODATA, "unmatched string; rc=%i", rc); + selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); rc = of_property_match_string(np, "missing-property", "blah"); - selftest(rc == -EINVAL, "missing property; rc=%i", rc); + selftest(rc == -EINVAL, "missing property; rc=%i\n", rc); rc = of_property_match_string(np, "empty-property", "blah"); - selftest(rc == -ENODATA, "empty property; rc=%i", rc); + selftest(rc == -ENODATA, "empty property; rc=%i\n", rc); rc = of_property_match_string(np, "unterminated-string", "blah"); - selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc); + selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc); + + /* of_property_count_strings() tests */ + rc = of_property_count_strings(np, "string-property"); + selftest(rc == 1, "Incorrect string count; rc=%i\n", rc); + rc = of_property_count_strings(np, "phandle-list-names"); + selftest(rc == 3, "Incorrect string count; rc=%i\n", rc); + rc = of_property_count_strings(np, "unterminated-string"); + selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc); + rc = of_property_count_strings(np, "unterminated-string-list"); + selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc); + + /* of_property_read_string_index() tests */ + rc = of_property_read_string_index(np, "string-property", 0, strings); + selftest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc); + strings[0] = NULL; + rc = of_property_read_string_index(np, "string-property", 1, strings); + selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc); + rc = of_property_read_string_index(np, "phandle-list-names", 0, strings); + selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc); + rc = of_property_read_string_index(np, "phandle-list-names", 1, strings); + selftest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc); + rc = of_property_read_string_index(np, "phandle-list-names", 2, strings); + selftest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc); + strings[0] = NULL; + rc = of_property_read_string_index(np, "phandle-list-names", 3, strings); + selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc); + strings[0] = NULL; + rc = of_property_read_string_index(np, "unterminated-string", 0, strings); + selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc); + rc = of_property_read_string_index(np, "unterminated-string-list", 0, strings); + selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc); + strings[0] = NULL; + rc = of_property_read_string_index(np, "unterminated-string-list", 2, strings); /* should fail */ + selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc); + strings[1] = NULL; + + /* of_property_read_string_array() tests */ + rc = of_property_read_string_array(np, "string-property", strings, 4); + selftest(rc == 1, "Incorrect string count; rc=%i\n", rc); + rc = of_property_read_string_array(np, "phandle-list-names", strings, 4); + selftest(rc == 3, "Incorrect string count; rc=%i\n", rc); + rc = of_property_read_string_array(np, "unterminated-string", strings, 4); + selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc); + /* -- An incorrectly formed string should cause a failure */ + rc = of_property_read_string_array(np, "unterminated-string-list", strings, 4); + selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc); + /* -- parsing the correctly formed strings should still work: */ + strings[2] = NULL; + rc = of_property_read_string_array(np, "unterminated-string-list", strings, 2); + selftest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc); + strings[1] = NULL; + rc = of_property_read_string_array(np, "phandle-list-names", strings, 1); + selftest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]); } #define propcmp(p1, p2) (((p1)->length == (p2)->length) && \ @@ -842,10 +896,14 @@ static void selftest_data_remove(void) return; } - while (last_node_index >= 0) { + while (last_node_index-- > 0) { if (nodes[last_node_index]) { np = of_find_node_by_path(nodes[last_node_index]->full_name); - if (strcmp(np->full_name, "/aliases") != 0) { + if (np == nodes[last_node_index]) { + if (of_aliases == np) { + of_node_put(of_aliases); + of_aliases = NULL; + } detach_node_and_children(np); } else { for_each_property_of_node(np, prop) { @@ -854,7 +912,6 @@ static void selftest_data_remove(void) } } } - last_node_index--; } } @@ -867,6 +924,8 @@ static int __init of_selftest(void) res = selftest_data_add(); if (res) return res; + if (!of_aliases) + of_aliases = of_find_node_by_path("/aliases"); np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a"); if (!np) { @@ -881,7 +940,7 @@ static int __init of_selftest(void) of_selftest_find_node_by_name(); of_selftest_dynamic(); of_selftest_parse_phandle_with_args(); - of_selftest_property_match_string(); + of_selftest_property_string(); of_selftest_property_copy(); of_selftest_changeset(); of_selftest_parse_interrupts(); diff --git a/drivers/of/testcase-data/tests-phandle.dtsi b/drivers/of/testcase-data/tests-phandle.dtsi index ce0fe083d40..5b1527e8a7f 100644 --- a/drivers/of/testcase-data/tests-phandle.dtsi +++ b/drivers/of/testcase-data/tests-phandle.dtsi @@ -39,7 +39,9 @@ phandle-list-bad-args = <&provider2 1 0>, <&provider3 0>; empty-property; + string-property = "foobar"; unterminated-string = [40 41 42 43]; + unterminated-string-list = "first", "second", [40 41 42 43]; }; }; }; diff --git a/drivers/pci/access.c b/drivers/pci/access.c index d292d7cb341..49dd766852b 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -444,7 +444,7 @@ static inline int pcie_cap_version(const struct pci_dev *dev) return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS; } -static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) +bool pcie_cap_has_lnkctl(const struct pci_dev *dev) { int type = pci_pcie_type(dev); diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c index 233fe8a8826..69202d1eb8f 100644 --- a/drivers/pci/host/pci-imx6.c +++ b/drivers/pci/host/pci-imx6.c @@ -275,15 +275,22 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp) goto err_pcie; } - /* allow the clocks to stabilize */ - usleep_range(200, 500); - /* power up core phy and enable ref clock */ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); + /* + * the async reset input need ref clock to sync internally, + * when the ref clock comes after reset, internal synced + * reset time is too short, cannot meet the requirement. + * add one ~10us delay here. + */ + udelay(10); regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); + /* allow the clocks to stabilize */ + usleep_range(200, 500); + /* Some boards don't have PCIe reset GPIO. */ if (gpio_is_valid(imx6_pcie->reset_gpio)) { gpio_set_value(imx6_pcie->reset_gpio, 0); diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index 9ecabfa8c63..2988fe136c1 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -631,10 +631,15 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) if (ret) return ret; - bus = pci_scan_root_bus(&pdev->dev, 0, &xgene_pcie_ops, port, &res); + bus = pci_create_root_bus(&pdev->dev, 0, + &xgene_pcie_ops, port, &res); if (!bus) return -ENOMEM; + pci_scan_child_bus(bus); + pci_assign_unassigned_bus_resources(bus); + pci_bus_add_devices(bus); + platform_set_drvdata(pdev, port); return 0; } diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 3a5e7e28b87..07aa722bb12 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -262,13 +262,6 @@ static int pciehp_probe(struct pcie_device *dev) goto err_out_none; } - if (!dev->port->subordinate) { - /* Can happen if we run out of bus numbers during probe */ - dev_err(&dev->device, - "Hotplug bridge without secondary bus, ignoring\n"); - goto err_out_none; - } - ctrl = pcie_init(dev); if (!ctrl) { dev_err(&dev->device, "Controller initialization failed\n"); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 9fab30af0e7..084587d7cd1 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -590,6 +590,20 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev) return entry; } +static int msi_verify_entries(struct pci_dev *dev) +{ + struct msi_desc *entry; + + list_for_each_entry(entry, &dev->msi_list, list) { + if (!dev->no_64bit_msi || !entry->msg.address_hi) + continue; + dev_err(&dev->dev, "Device has broken 64-bit MSI but arch" + " tried to assign one above 4G\n"); + return -EIO; + } + return 0; +} + /** * msi_capability_init - configure device's MSI capability structure * @dev: pointer to the pci_dev data structure of MSI device function @@ -627,6 +641,13 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) return ret; } + ret = msi_verify_entries(dev); + if (ret) { + msi_mask_irq(entry, mask, ~mask); + free_msi_irqs(dev); + return ret; + } + ret = populate_msi_sysfs(dev); if (ret) { msi_mask_irq(entry, mask, ~mask); @@ -739,6 +760,11 @@ static int msix_capability_init(struct pci_dev *dev, if (ret) goto out_avail; + /* Check if all MSI entries honor device restrictions */ + ret = msi_verify_entries(dev); + if (ret) + goto out_free; + /* * Some devices require MSI-X to be enabled before we can touch the * MSI-X registers. We need to mask all the vectors to prevent diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 92b6d9ab00e..2c6643fdc0c 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -185,7 +185,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(modalias); -static ssize_t enabled_store(struct device *dev, struct device_attribute *attr, +static ssize_t enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); @@ -210,7 +210,7 @@ static ssize_t enabled_store(struct device *dev, struct device_attribute *attr, return result < 0 ? result : count; } -static ssize_t enabled_show(struct device *dev, struct device_attribute *attr, +static ssize_t enable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev; @@ -218,7 +218,7 @@ static ssize_t enabled_show(struct device *dev, struct device_attribute *attr, pdev = to_pci_dev(dev); return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt)); } -static DEVICE_ATTR_RW(enabled); +static DEVICE_ATTR_RW(enable); #ifdef CONFIG_NUMA static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, @@ -563,7 +563,7 @@ static struct attribute *pci_dev_attrs[] = { #endif &dev_attr_dma_mask_bits.attr, &dev_attr_consistent_dma_mask_bits.attr, - &dev_attr_enabled.attr, + &dev_attr_enable.attr, &dev_attr_broken_parity_status.attr, &dev_attr_msi_bus.attr, #if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI) diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 0601890db22..4a3902d8e6f 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -6,6 +6,8 @@ extern const unsigned char pcie_link_speed[]; +bool pcie_cap_has_lnkctl(const struct pci_dev *dev); + /* Functions internal to the PCI core code */ int pci_create_sysfs_dev_files(struct pci_dev *pdev); diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index a9f9c46e502..63fc6391129 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -397,6 +397,7 @@ static int pcie_pme_suspend(struct pcie_device *srv) struct pcie_pme_service_data *data = get_service_data(srv); struct pci_dev *port = srv->port; bool wakeup; + int ret; if (device_may_wakeup(&port->dev)) { wakeup = true; @@ -407,9 +408,10 @@ static int pcie_pme_suspend(struct pcie_device *srv) } spin_lock_irq(&data->lock); if (wakeup) { - enable_irq_wake(srv->irq); + ret = enable_irq_wake(srv->irq); data->suspend_level = PME_SUSPEND_WAKEUP; - } else { + } + if (!wakeup || ret) { struct pci_dev *port = srv->port; pcie_pme_interrupt_enable(port, false); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 5ed99309c75..c8ca98c2b48 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -407,15 +407,16 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) { struct pci_dev *dev = child->self; u16 mem_base_lo, mem_limit_lo; - unsigned long base, limit; + u64 base64, limit64; + dma_addr_t base, limit; struct pci_bus_region region; struct resource *res; res = child->resource[2]; pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); - base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16; - limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; + base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; + limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { u32 mem_base_hi, mem_limit_hi; @@ -429,17 +430,20 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) * this, just assume they are not being used. */ if (mem_base_hi <= mem_limit_hi) { -#if BITS_PER_LONG == 64 - base |= ((unsigned long) mem_base_hi) << 32; - limit |= ((unsigned long) mem_limit_hi) << 32; -#else - if (mem_base_hi || mem_limit_hi) { - dev_err(&dev->dev, "can't handle 64-bit address space for bridge\n"); - return; - } -#endif + base64 |= (u64) mem_base_hi << 32; + limit64 |= (u64) mem_limit_hi << 32; } } + + base = (dma_addr_t) base64; + limit = (dma_addr_t) limit64; + + if (base != base64) { + dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n", + (unsigned long long) base64); + return; + } + if (base <= limit) { res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; @@ -1323,7 +1327,7 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); /* Initialize Link Control Register */ - if (dev->subordinate) + if (pcie_cap_has_lnkctl(dev)) pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c index 8c842980834..f091576b644 100644 --- a/drivers/phy/phy-omap-usb2.c +++ b/drivers/phy/phy-omap-usb2.c @@ -258,14 +258,16 @@ static int omap_usb2_probe(struct platform_device *pdev) otg->phy = &phy->phy; platform_set_drvdata(pdev, phy); + pm_runtime_enable(phy->dev); generic_phy = devm_phy_create(phy->dev, NULL, &ops, NULL); - if (IS_ERR(generic_phy)) + if (IS_ERR(generic_phy)) { + pm_runtime_disable(phy->dev); return PTR_ERR(generic_phy); + } phy_set_drvdata(generic_phy, phy); - pm_runtime_enable(phy->dev); phy_provider = devm_of_phy_provider_register(phy->dev, of_phy_simple_xlate); if (IS_ERR(phy_provider)) { diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c index e12e5b07f6d..9dc38140194 100644 --- a/drivers/pinctrl/pinctrl-baytrail.c +++ b/drivers/pinctrl/pinctrl-baytrail.c @@ -227,10 +227,14 @@ static int byt_irq_type(struct irq_data *d, unsigned type) spin_lock_irqsave(&vg->lock, flags); value = readl(reg); + WARN(value & BYT_DIRECT_IRQ_EN, + "Bad pad config for io mode, force direct_irq_en bit clearing"); + /* For level trigges the BYT_TRIG_POS and BYT_TRIG_NEG bits * are used to indicate high and low level triggering */ - value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); + value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG | + BYT_TRIG_LVL); switch (type) { case IRQ_TYPE_LEVEL_HIGH: @@ -318,7 +322,7 @@ static int byt_gpio_direction_output(struct gpio_chip *chip, "Potential Error: Setting GPIO with direct_irq_en to output"); reg_val = readl(reg) | BYT_DIR_MASK; - reg_val &= ~BYT_OUTPUT_EN; + reg_val &= ~(BYT_OUTPUT_EN | BYT_INPUT_EN); if (value) writel(reg_val | BYT_LEVEL, reg); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 4dcfb7116a0..a2eabe6ff9a 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -202,6 +202,7 @@ config TC1100_WMI config HP_ACCEL tristate "HP laptop accelerometer" depends on INPUT && ACPI + depends on SERIO_I8042 select SENSORS_LIS3LV02D select NEW_LEDS select LEDS_CLASS diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 96a0b75c52c..26c4fd1394d 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -579,6 +579,17 @@ static const struct dmi_system_id video_vendor_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5741"), }, }, + { + /* + * Note no video_set_backlight_video_vendor, we must use the + * acer interface, as there is no native backlight interface. + */ + .ident = "Acer KAV80", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"), + }, + }, {} }; diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 3a4951f4606..c1a6cd66af4 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -182,6 +182,15 @@ static const struct dmi_system_id asus_quirks[] = { }, { .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. X550VB", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X550VB"), + }, + .driver_data = &quirk_asus_wapf4, + }, + { + .callback = dmi_matched, .ident = "ASUSTeK COMPUTER INC. X55A", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c index 13e14ec1d3d..6bec745b6b9 100644 --- a/drivers/platform/x86/hp_accel.c +++ b/drivers/platform/x86/hp_accel.c @@ -37,6 +37,8 @@ #include <linux/leds.h> #include <linux/atomic.h> #include <linux/acpi.h> +#include <linux/i8042.h> +#include <linux/serio.h> #include "../../misc/lis3lv02d/lis3lv02d.h" #define DRIVER_NAME "hp_accel" @@ -73,6 +75,13 @@ static inline void delayed_sysfs_set(struct led_classdev *led_cdev, /* HP-specific accelerometer driver ------------------------------------ */ +/* e0 25, e0 26, e0 27, e0 28 are scan codes that the accelerometer with acpi id + * HPQ6000 sends through the keyboard bus */ +#define ACCEL_1 0x25 +#define ACCEL_2 0x26 +#define ACCEL_3 0x27 +#define ACCEL_4 0x28 + /* For automatic insertion of the module */ static const struct acpi_device_id lis3lv02d_device_ids[] = { {"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */ @@ -294,6 +303,35 @@ static void lis3lv02d_enum_resources(struct acpi_device *device) printk(KERN_DEBUG DRIVER_NAME ": Error getting resources\n"); } +static bool hp_accel_i8042_filter(unsigned char data, unsigned char str, + struct serio *port) +{ + static bool extended; + + if (str & I8042_STR_AUXDATA) + return false; + + if (data == 0xe0) { + extended = true; + return true; + } else if (unlikely(extended)) { + extended = false; + + switch (data) { + case ACCEL_1: + case ACCEL_2: + case ACCEL_3: + case ACCEL_4: + return true; + default: + serio_interrupt(port, 0xe0, 0); + return false; + } + } + + return false; +} + static int lis3lv02d_add(struct acpi_device *device) { int ret; @@ -326,6 +364,11 @@ static int lis3lv02d_add(struct acpi_device *device) if (ret) return ret; + /* filter to remove HPQ6000 accelerometer data + * from keyboard bus stream */ + if (strstr(dev_name(&device->dev), "HPQ6000")) + i8042_install_filter(hp_accel_i8042_filter); + INIT_WORK(&hpled_led.work, delayed_set_status_worker); ret = led_classdev_register(NULL, &hpled_led.led_classdev); if (ret) { @@ -343,6 +386,7 @@ static int lis3lv02d_remove(struct acpi_device *device) if (!device) return -EINVAL; + i8042_remove_filter(hp_accel_i8042_filter); lis3lv02d_joystick_disable(&lis3_dev); lis3lv02d_poweroff(&lis3_dev); diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 02152de135b..ed494f37c40 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -837,6 +837,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2"), }, }, + { + .ident = "Lenovo Yoga 3 Pro 1370", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3 Pro-1370"), + }, + }, {} }; diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index 5a596651227..ff765d8e1a0 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -1561,6 +1561,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { }, { .callback = samsung_dmi_matched, + .ident = "NC210", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"), + DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"), + }, + .driver_data = &samsung_broken_acpi_video, + }, + { + .callback = samsung_dmi_matched, .ident = "730U3E/740U3E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index ef3a1904e92..ab6151f0542 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -240,6 +240,12 @@ static const struct dmi_system_id toshiba_alt_keymap_dmi[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Qosmio X75-A"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A50-A"), + }, + }, {} }; diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c index 217da4b2ca8..99a78d365ce 100644 --- a/drivers/power/ab8500_fg.c +++ b/drivers/power/ab8500_fg.c @@ -25,6 +25,7 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/time.h> +#include <linux/time64.h> #include <linux/of.h> #include <linux/completion.h> #include <linux/mfd/core.h> @@ -108,7 +109,7 @@ enum ab8500_fg_calibration_state { struct ab8500_fg_avg_cap { int avg; int samples[NBR_AVG_SAMPLES]; - __kernel_time_t time_stamps[NBR_AVG_SAMPLES]; + time64_t time_stamps[NBR_AVG_SAMPLES]; int pos; int nbr_samples; int sum; @@ -386,15 +387,15 @@ static int ab8500_fg_is_low_curr(struct ab8500_fg *di, int curr) */ static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample) { - struct timespec ts; + struct timespec64 ts64; struct ab8500_fg_avg_cap *avg = &di->avg_cap; - getnstimeofday(&ts); + getnstimeofday64(&ts64); do { avg->sum += sample - avg->samples[avg->pos]; avg->samples[avg->pos] = sample; - avg->time_stamps[avg->pos] = ts.tv_sec; + avg->time_stamps[avg->pos] = ts64.tv_sec; avg->pos++; if (avg->pos == NBR_AVG_SAMPLES) @@ -407,7 +408,7 @@ static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample) * Check the time stamp for each sample. If too old, * replace with latest sample */ - } while (ts.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]); + } while (ts64.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]); avg->avg = avg->sum / avg->nbr_samples; @@ -446,14 +447,14 @@ static void ab8500_fg_clear_cap_samples(struct ab8500_fg *di) static void ab8500_fg_fill_cap_sample(struct ab8500_fg *di, int sample) { int i; - struct timespec ts; + struct timespec64 ts64; struct ab8500_fg_avg_cap *avg = &di->avg_cap; - getnstimeofday(&ts); + getnstimeofday64(&ts64); for (i = 0; i < NBR_AVG_SAMPLES; i++) { avg->samples[i] = sample; - avg->time_stamps[i] = ts.tv_sec; + avg->time_stamps[i] = ts64.tv_sec; } avg->pos = 0; diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c index e384844a1ae..1f49986fc60 100644 --- a/drivers/power/bq2415x_charger.c +++ b/drivers/power/bq2415x_charger.c @@ -1579,8 +1579,15 @@ static int bq2415x_probe(struct i2c_client *client, if (np) { bq->notify_psy = power_supply_get_by_phandle(np, "ti,usb-charger-detection"); - if (!bq->notify_psy) - return -EPROBE_DEFER; + if (IS_ERR(bq->notify_psy)) { + dev_info(&client->dev, + "no 'ti,usb-charger-detection' property (err=%ld)\n", + PTR_ERR(bq->notify_psy)); + bq->notify_psy = NULL; + } else if (!bq->notify_psy) { + ret = -EPROBE_DEFER; + goto error_2; + } } else if (pdata->notify_device) bq->notify_psy = power_supply_get_by_name(pdata->notify_device); @@ -1602,27 +1609,27 @@ static int bq2415x_probe(struct i2c_client *client, ret = of_property_read_u32(np, "ti,current-limit", &bq->init_data.current_limit); if (ret) - return ret; + goto error_2; ret = of_property_read_u32(np, "ti,weak-battery-voltage", &bq->init_data.weak_battery_voltage); if (ret) - return ret; + goto error_2; ret = of_property_read_u32(np, "ti,battery-regulation-voltage", &bq->init_data.battery_regulation_voltage); if (ret) - return ret; + goto error_2; ret = of_property_read_u32(np, "ti,charge-current", &bq->init_data.charge_current); if (ret) - return ret; + goto error_2; ret = of_property_read_u32(np, "ti,termination-current", &bq->init_data.termination_current); if (ret) - return ret; + goto error_2; ret = of_property_read_u32(np, "ti,resistor-sense", &bq->init_data.resistor_sense); if (ret) - return ret; + goto error_2; } else { memcpy(&bq->init_data, pdata, sizeof(bq->init_data)); } diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c index 7098a1ce2d3..ef8094a61f1 100644 --- a/drivers/power/charger-manager.c +++ b/drivers/power/charger-manager.c @@ -97,6 +97,7 @@ static struct charger_global_desc *g_desc; /* init with setup_charger_manager */ static bool is_batt_present(struct charger_manager *cm) { union power_supply_propval val; + struct power_supply *psy; bool present = false; int i, ret; @@ -107,16 +108,27 @@ static bool is_batt_present(struct charger_manager *cm) case CM_NO_BATTERY: break; case CM_FUEL_GAUGE: - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, + psy = power_supply_get_by_name(cm->desc->psy_fuel_gauge); + if (!psy) + break; + + ret = psy->get_property(psy, POWER_SUPPLY_PROP_PRESENT, &val); if (ret == 0 && val.intval) present = true; break; case CM_CHARGER_STAT: - for (i = 0; cm->charger_stat[i]; i++) { - ret = cm->charger_stat[i]->get_property( - cm->charger_stat[i], - POWER_SUPPLY_PROP_PRESENT, &val); + for (i = 0; cm->desc->psy_charger_stat[i]; i++) { + psy = power_supply_get_by_name( + cm->desc->psy_charger_stat[i]); + if (!psy) { + dev_err(cm->dev, "Cannot find power supply \"%s\"\n", + cm->desc->psy_charger_stat[i]); + continue; + } + + ret = psy->get_property(psy, POWER_SUPPLY_PROP_PRESENT, + &val); if (ret == 0 && val.intval) { present = true; break; @@ -139,14 +151,20 @@ static bool is_batt_present(struct charger_manager *cm) static bool is_ext_pwr_online(struct charger_manager *cm) { union power_supply_propval val; + struct power_supply *psy; bool online = false; int i, ret; /* If at least one of them has one, it's yes. */ - for (i = 0; cm->charger_stat[i]; i++) { - ret = cm->charger_stat[i]->get_property( - cm->charger_stat[i], - POWER_SUPPLY_PROP_ONLINE, &val); + for (i = 0; cm->desc->psy_charger_stat[i]; i++) { + psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]); + if (!psy) { + dev_err(cm->dev, "Cannot find power supply \"%s\"\n", + cm->desc->psy_charger_stat[i]); + continue; + } + + ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val); if (ret == 0 && val.intval) { online = true; break; @@ -167,12 +185,14 @@ static bool is_ext_pwr_online(struct charger_manager *cm) static int get_batt_uV(struct charger_manager *cm, int *uV) { union power_supply_propval val; + struct power_supply *fuel_gauge; int ret; - if (!cm->fuel_gauge) + fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); + if (!fuel_gauge) return -ENODEV; - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, + ret = fuel_gauge->get_property(fuel_gauge, POWER_SUPPLY_PROP_VOLTAGE_NOW, &val); if (ret) return ret; @@ -189,6 +209,7 @@ static bool is_charging(struct charger_manager *cm) { int i, ret; bool charging = false; + struct power_supply *psy; union power_supply_propval val; /* If there is no battery, it cannot be charged */ @@ -196,17 +217,22 @@ static bool is_charging(struct charger_manager *cm) return false; /* If at least one of the charger is charging, return yes */ - for (i = 0; cm->charger_stat[i]; i++) { + for (i = 0; cm->desc->psy_charger_stat[i]; i++) { /* 1. The charger sholuld not be DISABLED */ if (cm->emergency_stop) continue; if (!cm->charger_enabled) continue; + psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]); + if (!psy) { + dev_err(cm->dev, "Cannot find power supply \"%s\"\n", + cm->desc->psy_charger_stat[i]); + continue; + } + /* 2. The charger should be online (ext-power) */ - ret = cm->charger_stat[i]->get_property( - cm->charger_stat[i], - POWER_SUPPLY_PROP_ONLINE, &val); + ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val); if (ret) { dev_warn(cm->dev, "Cannot read ONLINE value from %s\n", cm->desc->psy_charger_stat[i]); @@ -219,9 +245,7 @@ static bool is_charging(struct charger_manager *cm) * 3. The charger should not be FULL, DISCHARGING, * or NOT_CHARGING. */ - ret = cm->charger_stat[i]->get_property( - cm->charger_stat[i], - POWER_SUPPLY_PROP_STATUS, &val); + ret = psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &val); if (ret) { dev_warn(cm->dev, "Cannot read STATUS value from %s\n", cm->desc->psy_charger_stat[i]); @@ -248,6 +272,7 @@ static bool is_full_charged(struct charger_manager *cm) { struct charger_desc *desc = cm->desc; union power_supply_propval val; + struct power_supply *fuel_gauge; int ret = 0; int uV; @@ -255,11 +280,15 @@ static bool is_full_charged(struct charger_manager *cm) if (!is_batt_present(cm)) return false; - if (cm->fuel_gauge && desc->fullbatt_full_capacity > 0) { + fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); + if (!fuel_gauge) + return false; + + if (desc->fullbatt_full_capacity > 0) { val.intval = 0; /* Not full if capacity of fuel gauge isn't full */ - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, + ret = fuel_gauge->get_property(fuel_gauge, POWER_SUPPLY_PROP_CHARGE_FULL, &val); if (!ret && val.intval > desc->fullbatt_full_capacity) return true; @@ -273,10 +302,10 @@ static bool is_full_charged(struct charger_manager *cm) } /* Full, if the capacity is more than fullbatt_soc */ - if (cm->fuel_gauge && desc->fullbatt_soc > 0) { + if (desc->fullbatt_soc > 0) { val.intval = 0; - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, + ret = fuel_gauge->get_property(fuel_gauge, POWER_SUPPLY_PROP_CAPACITY, &val); if (!ret && val.intval >= desc->fullbatt_soc) return true; @@ -551,6 +580,20 @@ static int check_charging_duration(struct charger_manager *cm) return ret; } +static int cm_get_battery_temperature_by_psy(struct charger_manager *cm, + int *temp) +{ + struct power_supply *fuel_gauge; + + fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); + if (!fuel_gauge) + return -ENODEV; + + return fuel_gauge->get_property(fuel_gauge, + POWER_SUPPLY_PROP_TEMP, + (union power_supply_propval *)temp); +} + static int cm_get_battery_temperature(struct charger_manager *cm, int *temp) { @@ -560,15 +603,18 @@ static int cm_get_battery_temperature(struct charger_manager *cm, return -ENODEV; #ifdef CONFIG_THERMAL - ret = thermal_zone_get_temp(cm->tzd_batt, (unsigned long *)temp); - if (!ret) - /* Calibrate temperature unit */ - *temp /= 100; -#else - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, - POWER_SUPPLY_PROP_TEMP, - (union power_supply_propval *)temp); + if (cm->tzd_batt) { + ret = thermal_zone_get_temp(cm->tzd_batt, (unsigned long *)temp); + if (!ret) + /* Calibrate temperature unit */ + *temp /= 100; + } else #endif + { + /* if-else continued from CONFIG_THERMAL */ + ret = cm_get_battery_temperature_by_psy(cm, temp); + } + return ret; } @@ -827,6 +873,7 @@ static int charger_get_property(struct power_supply *psy, struct charger_manager *cm = container_of(psy, struct charger_manager, charger_psy); struct charger_desc *desc = cm->desc; + struct power_supply *fuel_gauge; int ret = 0; int uV; @@ -857,14 +904,20 @@ static int charger_get_property(struct power_supply *psy, ret = get_batt_uV(cm, &val->intval); break; case POWER_SUPPLY_PROP_CURRENT_NOW: - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, + fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); + if (!fuel_gauge) { + ret = -ENODEV; + break; + } + ret = fuel_gauge->get_property(fuel_gauge, POWER_SUPPLY_PROP_CURRENT_NOW, val); break; case POWER_SUPPLY_PROP_TEMP: case POWER_SUPPLY_PROP_TEMP_AMBIENT: return cm_get_battery_temperature(cm, &val->intval); case POWER_SUPPLY_PROP_CAPACITY: - if (!cm->fuel_gauge) { + fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); + if (!fuel_gauge) { ret = -ENODEV; break; } @@ -875,7 +928,7 @@ static int charger_get_property(struct power_supply *psy, break; } - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, + ret = fuel_gauge->get_property(fuel_gauge, POWER_SUPPLY_PROP_CAPACITY, val); if (ret) break; @@ -924,7 +977,14 @@ static int charger_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CHARGE_NOW: if (is_charging(cm)) { - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, + fuel_gauge = power_supply_get_by_name( + cm->desc->psy_fuel_gauge); + if (!fuel_gauge) { + ret = -ENODEV; + break; + } + + ret = fuel_gauge->get_property(fuel_gauge, POWER_SUPPLY_PROP_CHARGE_NOW, val); if (ret) { @@ -970,6 +1030,7 @@ static struct power_supply psy_default = { .properties = default_charger_props, .num_properties = ARRAY_SIZE(default_charger_props), .get_property = charger_get_property, + .no_thermal = true, }; /** @@ -1485,14 +1546,15 @@ err: return ret; } -static int cm_init_thermal_data(struct charger_manager *cm) +static int cm_init_thermal_data(struct charger_manager *cm, + struct power_supply *fuel_gauge) { struct charger_desc *desc = cm->desc; union power_supply_propval val; int ret; /* Verify whether fuel gauge provides battery temperature */ - ret = cm->fuel_gauge->get_property(cm->fuel_gauge, + ret = fuel_gauge->get_property(fuel_gauge, POWER_SUPPLY_PROP_TEMP, &val); if (!ret) { @@ -1502,8 +1564,6 @@ static int cm_init_thermal_data(struct charger_manager *cm) cm->desc->measure_battery_temp = true; } #ifdef CONFIG_THERMAL - cm->tzd_batt = cm->fuel_gauge->tzd; - if (ret && desc->thermal_zone) { cm->tzd_batt = thermal_zone_get_zone_by_name(desc->thermal_zone); @@ -1666,6 +1726,7 @@ static int charger_manager_probe(struct platform_device *pdev) int ret = 0, i = 0; int j = 0; union power_supply_propval val; + struct power_supply *fuel_gauge; if (g_desc && !rtc_dev && g_desc->rtc_name) { rtc_dev = rtc_class_open(g_desc->rtc_name); @@ -1729,23 +1790,20 @@ static int charger_manager_probe(struct platform_device *pdev) while (desc->psy_charger_stat[i]) i++; - cm->charger_stat = devm_kzalloc(&pdev->dev, - sizeof(struct power_supply *) * i, GFP_KERNEL); - if (!cm->charger_stat) - return -ENOMEM; - + /* Check if charger's supplies are present at probe */ for (i = 0; desc->psy_charger_stat[i]; i++) { - cm->charger_stat[i] = power_supply_get_by_name( - desc->psy_charger_stat[i]); - if (!cm->charger_stat[i]) { + struct power_supply *psy; + + psy = power_supply_get_by_name(desc->psy_charger_stat[i]); + if (!psy) { dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n", desc->psy_charger_stat[i]); return -ENODEV; } } - cm->fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge); - if (!cm->fuel_gauge) { + fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge); + if (!fuel_gauge) { dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n", desc->psy_fuel_gauge); return -ENODEV; @@ -1788,13 +1846,13 @@ static int charger_manager_probe(struct platform_device *pdev) cm->charger_psy.num_properties = psy_default.num_properties; /* Find which optional psy-properties are available */ - if (!cm->fuel_gauge->get_property(cm->fuel_gauge, + if (!fuel_gauge->get_property(fuel_gauge, POWER_SUPPLY_PROP_CHARGE_NOW, &val)) { cm->charger_psy.properties[cm->charger_psy.num_properties] = POWER_SUPPLY_PROP_CHARGE_NOW; cm->charger_psy.num_properties++; } - if (!cm->fuel_gauge->get_property(cm->fuel_gauge, + if (!fuel_gauge->get_property(fuel_gauge, POWER_SUPPLY_PROP_CURRENT_NOW, &val)) { cm->charger_psy.properties[cm->charger_psy.num_properties] = @@ -1802,7 +1860,7 @@ static int charger_manager_probe(struct platform_device *pdev) cm->charger_psy.num_properties++; } - ret = cm_init_thermal_data(cm); + ret = cm_init_thermal_data(cm, fuel_gauge); if (ret) { dev_err(&pdev->dev, "Failed to initialize thermal data\n"); cm->desc->measure_battery_temp = false; @@ -2066,8 +2124,8 @@ static bool find_power_supply(struct charger_manager *cm, int i; bool found = false; - for (i = 0; cm->charger_stat[i]; i++) { - if (psy == cm->charger_stat[i]) { + for (i = 0; cm->desc->psy_charger_stat[i]; i++) { + if (!strcmp(psy->name, cm->desc->psy_charger_stat[i])) { found = true; break; } diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index 6cb7fe5c022..694e8cddd5c 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c @@ -417,6 +417,9 @@ static int psy_register_thermal(struct power_supply *psy) { int i; + if (psy->no_thermal) + return 0; + /* Register battery zone device psy reports temperature */ for (i = 0; i < psy->num_properties; i++) { if (psy->properties[i] == POWER_SUPPLY_PROP_TEMP) { diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c index 3611806c9cf..3cb36693343 100644 --- a/drivers/power/reset/at91-reset.c +++ b/drivers/power/reset/at91-reset.c @@ -100,11 +100,11 @@ static void at91sam9g45_restart(enum reboot_mode mode, const char *cmd) /* Disable SDRAM0 accesses */ "1: str %3, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" /* Power down SDRAM0 */ - " str %4, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" + " str %4, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t" /* Disable SDRAM1 accesses */ " strne %3, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" /* Power down SDRAM1 */ - " strne %4, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" + " strne %4, [%1, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t" /* Reset CPU */ " str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t" diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index b800783800a..ef2dd2e4754 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -83,6 +83,7 @@ config PWM_BFIN config PWM_CLPS711X tristate "CLPS711X PWM support" depends on ARCH_CLPS711X || COMPILE_TEST + depends on HAS_IOMEM help Generic PWM framework driver for Cirrus Logic CLPS711X. @@ -101,6 +102,7 @@ config PWM_EP93XX config PWM_FSL_FTM tristate "Freescale FlexTimer Module (FTM) PWM support" depends on OF + select REGMAP_MMIO help Generic FTM PWM framework driver for Freescale VF610 and Layerscape LS-1 SoCs. @@ -149,7 +151,7 @@ config PWM_LPC32XX config PWM_LPSS tristate "Intel LPSS PWM support" - depends on ACPI + depends on X86 help Generic PWM framework driver for Intel Low Power Subsystem PWM controller. @@ -157,6 +159,24 @@ config PWM_LPSS To compile this driver as a module, choose M here: the module will be called pwm-lpss. +config PWM_LPSS_PCI + tristate "Intel LPSS PWM PCI driver" + depends on PWM_LPSS && PCI + help + The PCI driver for Intel Low Power Subsystem PWM controller. + + To compile this driver as a module, choose M here: the module + will be called pwm-lpss-pci. + +config PWM_LPSS_PLATFORM + tristate "Intel LPSS PWM platform driver" + depends on PWM_LPSS && ACPI + help + The platform driver for Intel Low Power Subsystem PWM controller. + + To compile this driver as a module, choose M here: the module + will be called pwm-lpss-platform. + config PWM_MXS tristate "Freescale MXS PWM support" depends on ARCH_MXS && OF diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index f8c577d4109..c458606c375 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -13,6 +13,8 @@ obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o obj-$(CONFIG_PWM_LPSS) += pwm-lpss.o +obj-$(CONFIG_PWM_LPSS_PCI) += pwm-lpss-pci.o +obj-$(CONFIG_PWM_LPSS_PLATFORM) += pwm-lpss-platform.o obj-$(CONFIG_PWM_MXS) += pwm-mxs.o obj-$(CONFIG_PWM_PCA9685) += pwm-pca9685.o obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index d2c35920ff0..966497d10c6 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -236,7 +236,7 @@ int pwmchip_add(struct pwm_chip *chip) int ret; if (!chip || !chip->dev || !chip->ops || !chip->ops->config || - !chip->ops->enable || !chip->ops->disable) + !chip->ops->enable || !chip->ops->disable || !chip->npwm) return -EINVAL; mutex_lock(&pwm_lock); @@ -602,12 +602,9 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER); const char *dev_id = dev ? dev_name(dev) : NULL; struct pwm_chip *chip = NULL; - unsigned int index = 0; unsigned int best = 0; - struct pwm_lookup *p; + struct pwm_lookup *p, *chosen = NULL; unsigned int match; - unsigned int period; - enum pwm_polarity polarity; /* look up via DT first */ if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) @@ -653,10 +650,7 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) } if (match > best) { - chip = pwmchip_find_by_name(p->provider); - index = p->index; - period = p->period; - polarity = p->polarity; + chosen = p; if (match != 3) best = match; @@ -665,17 +659,22 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) } } - mutex_unlock(&pwm_lookup_lock); + if (!chosen) + goto out; - if (chip) - pwm = pwm_request_from_chip(chip, index, con_id ?: dev_id); - if (IS_ERR(pwm)) - return pwm; + chip = pwmchip_find_by_name(chosen->provider); + if (!chip) + goto out; - pwm_set_period(pwm, period); - pwm_set_polarity(pwm, polarity); + pwm = pwm_request_from_chip(chip, chosen->index, con_id ?: dev_id); + if (IS_ERR(pwm)) + goto out; + pwm_set_period(pwm, chosen->period); + pwm_set_polarity(pwm, chosen->polarity); +out: + mutex_unlock(&pwm_lookup_lock); return pwm; } EXPORT_SYMBOL_GPL(pwm_get); diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c index 6e700a541ca..d3c22de9ee4 100644 --- a/drivers/pwm/pwm-atmel.c +++ b/drivers/pwm/pwm-atmel.c @@ -102,7 +102,7 @@ static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); - unsigned long clk_rate, prd, dty; + unsigned long prd, dty; unsigned long long div; unsigned int pres = 0; u32 val; @@ -113,20 +113,18 @@ static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, return -EBUSY; } - clk_rate = clk_get_rate(atmel_pwm->clk); - div = clk_rate; + /* Calculate the period cycles and prescale value */ + div = (unsigned long long)clk_get_rate(atmel_pwm->clk) * period_ns; + do_div(div, NSEC_PER_SEC); - /* Calculate the period cycles */ while (div > PWM_MAX_PRD) { - div = clk_rate / (1 << pres); - div = div * period_ns; - /* 1/Hz = 100000000 ns */ - do_div(div, 1000000000); - - if (pres++ > PRD_MAX_PRES) { - dev_err(chip->dev, "pres exceeds the maximum value\n"); - return -EINVAL; - } + div >>= 1; + pres++; + } + + if (pres > PRD_MAX_PRES) { + dev_err(chip->dev, "pres exceeds the maximum value\n"); + return -EINVAL; } /* Calculate the duty cycles */ diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c index a18bc8fea38..0f2cc7ef778 100644 --- a/drivers/pwm/pwm-fsl-ftm.c +++ b/drivers/pwm/pwm-fsl-ftm.c @@ -18,14 +18,14 @@ #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/pwm.h> +#include <linux/regmap.h> #include <linux/slab.h> #define FTM_SC 0x00 -#define FTM_SC_CLK_MASK 0x3 -#define FTM_SC_CLK_SHIFT 3 -#define FTM_SC_CLK(c) (((c) + 1) << FTM_SC_CLK_SHIFT) +#define FTM_SC_CLK_MASK_SHIFT 3 +#define FTM_SC_CLK_MASK (3 << FTM_SC_CLK_MASK_SHIFT) +#define FTM_SC_CLK(c) (((c) + 1) << FTM_SC_CLK_MASK_SHIFT) #define FTM_SC_PS_MASK 0x7 -#define FTM_SC_PS_SHIFT 0 #define FTM_CNT 0x04 #define FTM_MOD 0x08 @@ -83,7 +83,7 @@ struct fsl_pwm_chip { unsigned int cnt_select; unsigned int clk_ps; - void __iomem *base; + struct regmap *regmap; int period_ns; @@ -219,10 +219,11 @@ static unsigned long fsl_pwm_calculate_duty(struct fsl_pwm_chip *fpc, unsigned long period_ns, unsigned long duty_ns) { - unsigned long long val, duty; + unsigned long long duty; + u32 val; - val = readl(fpc->base + FTM_MOD); - duty = duty_ns * (val + 1); + regmap_read(fpc->regmap, FTM_MOD, &val); + duty = (unsigned long long)duty_ns * (val + 1); do_div(duty, period_ns); return (unsigned long)duty; @@ -232,7 +233,7 @@ static int fsl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { struct fsl_pwm_chip *fpc = to_fsl_chip(chip); - u32 val, period, duty; + u32 period, duty; mutex_lock(&fpc->lock); @@ -257,11 +258,9 @@ static int fsl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, return -EINVAL; } - val = readl(fpc->base + FTM_SC); - val &= ~(FTM_SC_PS_MASK << FTM_SC_PS_SHIFT); - val |= fpc->clk_ps; - writel(val, fpc->base + FTM_SC); - writel(period - 1, fpc->base + FTM_MOD); + regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_PS_MASK, + fpc->clk_ps); + regmap_write(fpc->regmap, FTM_MOD, period - 1); fpc->period_ns = period_ns; } @@ -270,8 +269,9 @@ static int fsl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, duty = fsl_pwm_calculate_duty(fpc, period_ns, duty_ns); - writel(FTM_CSC_MSB | FTM_CSC_ELSB, fpc->base + FTM_CSC(pwm->hwpwm)); - writel(duty, fpc->base + FTM_CV(pwm->hwpwm)); + regmap_write(fpc->regmap, FTM_CSC(pwm->hwpwm), + FTM_CSC_MSB | FTM_CSC_ELSB); + regmap_write(fpc->regmap, FTM_CV(pwm->hwpwm), duty); return 0; } @@ -283,31 +283,28 @@ static int fsl_pwm_set_polarity(struct pwm_chip *chip, struct fsl_pwm_chip *fpc = to_fsl_chip(chip); u32 val; - val = readl(fpc->base + FTM_POL); + regmap_read(fpc->regmap, FTM_POL, &val); if (polarity == PWM_POLARITY_INVERSED) val |= BIT(pwm->hwpwm); else val &= ~BIT(pwm->hwpwm); - writel(val, fpc->base + FTM_POL); + regmap_write(fpc->regmap, FTM_POL, val); return 0; } static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc) { - u32 val; int ret; if (fpc->use_count != 0) return 0; /* select counter clock source */ - val = readl(fpc->base + FTM_SC); - val &= ~(FTM_SC_CLK_MASK << FTM_SC_CLK_SHIFT); - val |= FTM_SC_CLK(fpc->cnt_select); - writel(val, fpc->base + FTM_SC); + regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK, + FTM_SC_CLK(fpc->cnt_select)); ret = clk_prepare_enable(fpc->clk[fpc->cnt_select]); if (ret) @@ -327,13 +324,10 @@ static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc) static int fsl_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) { struct fsl_pwm_chip *fpc = to_fsl_chip(chip); - u32 val; int ret; mutex_lock(&fpc->lock); - val = readl(fpc->base + FTM_OUTMASK); - val &= ~BIT(pwm->hwpwm); - writel(val, fpc->base + FTM_OUTMASK); + regmap_update_bits(fpc->regmap, FTM_OUTMASK, BIT(pwm->hwpwm), 0); ret = fsl_counter_clock_enable(fpc); mutex_unlock(&fpc->lock); @@ -343,8 +337,6 @@ static int fsl_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) static void fsl_counter_clock_disable(struct fsl_pwm_chip *fpc) { - u32 val; - /* * already disabled, do nothing */ @@ -356,9 +348,7 @@ static void fsl_counter_clock_disable(struct fsl_pwm_chip *fpc) return; /* no users left, disable PWM counter clock */ - val = readl(fpc->base + FTM_SC); - val &= ~(FTM_SC_CLK_MASK << FTM_SC_CLK_SHIFT); - writel(val, fpc->base + FTM_SC); + regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK, 0); clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]); clk_disable_unprepare(fpc->clk[fpc->cnt_select]); @@ -370,14 +360,12 @@ static void fsl_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) u32 val; mutex_lock(&fpc->lock); - val = readl(fpc->base + FTM_OUTMASK); - val |= BIT(pwm->hwpwm); - writel(val, fpc->base + FTM_OUTMASK); + regmap_update_bits(fpc->regmap, FTM_OUTMASK, BIT(pwm->hwpwm), + BIT(pwm->hwpwm)); fsl_counter_clock_disable(fpc); - val = readl(fpc->base + FTM_OUTMASK); - + regmap_read(fpc->regmap, FTM_OUTMASK, &val); if ((val & 0xFF) == 0xFF) fpc->period_ns = 0; @@ -402,19 +390,28 @@ static int fsl_pwm_init(struct fsl_pwm_chip *fpc) if (ret) return ret; - writel(0x00, fpc->base + FTM_CNTIN); - writel(0x00, fpc->base + FTM_OUTINIT); - writel(0xFF, fpc->base + FTM_OUTMASK); + regmap_write(fpc->regmap, FTM_CNTIN, 0x00); + regmap_write(fpc->regmap, FTM_OUTINIT, 0x00); + regmap_write(fpc->regmap, FTM_OUTMASK, 0xFF); clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]); return 0; } +static const struct regmap_config fsl_pwm_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + + .max_register = FTM_PWMLOAD, +}; + static int fsl_pwm_probe(struct platform_device *pdev) { struct fsl_pwm_chip *fpc; struct resource *res; + void __iomem *base; int ret; fpc = devm_kzalloc(&pdev->dev, sizeof(*fpc), GFP_KERNEL); @@ -426,9 +423,16 @@ static int fsl_pwm_probe(struct platform_device *pdev) fpc->chip.dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - fpc->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(fpc->base)) - return PTR_ERR(fpc->base); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + fpc->regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base, + &fsl_pwm_regmap_config); + if (IS_ERR(fpc->regmap)) { + dev_err(&pdev->dev, "regmap init failed\n"); + return PTR_ERR(fpc->regmap); + } fpc->clk[FSL_PWM_CLK_SYS] = devm_clk_get(&pdev->dev, "ftm_sys"); if (IS_ERR(fpc->clk[FSL_PWM_CLK_SYS])) { diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c index 5449d9150d4..f8b5f109c1a 100644 --- a/drivers/pwm/pwm-imx.c +++ b/drivers/pwm/pwm-imx.c @@ -14,6 +14,7 @@ #include <linux/slab.h> #include <linux/err.h> #include <linux/clk.h> +#include <linux/delay.h> #include <linux/io.h> #include <linux/pwm.h> #include <linux/of.h> @@ -21,24 +22,30 @@ /* i.MX1 and i.MX21 share the same PWM function block: */ -#define MX1_PWMC 0x00 /* PWM Control Register */ -#define MX1_PWMS 0x04 /* PWM Sample Register */ -#define MX1_PWMP 0x08 /* PWM Period Register */ +#define MX1_PWMC 0x00 /* PWM Control Register */ +#define MX1_PWMS 0x04 /* PWM Sample Register */ +#define MX1_PWMP 0x08 /* PWM Period Register */ -#define MX1_PWMC_EN (1 << 4) +#define MX1_PWMC_EN (1 << 4) /* i.MX27, i.MX31, i.MX35 share the same PWM function block: */ -#define MX3_PWMCR 0x00 /* PWM Control Register */ -#define MX3_PWMSAR 0x0C /* PWM Sample Register */ -#define MX3_PWMPR 0x10 /* PWM Period Register */ -#define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4) -#define MX3_PWMCR_DOZEEN (1 << 24) -#define MX3_PWMCR_WAITEN (1 << 23) +#define MX3_PWMCR 0x00 /* PWM Control Register */ +#define MX3_PWMSR 0x04 /* PWM Status Register */ +#define MX3_PWMSAR 0x0C /* PWM Sample Register */ +#define MX3_PWMPR 0x10 /* PWM Period Register */ +#define MX3_PWMCR_PRESCALER(x) ((((x) - 1) & 0xFFF) << 4) +#define MX3_PWMCR_DOZEEN (1 << 24) +#define MX3_PWMCR_WAITEN (1 << 23) #define MX3_PWMCR_DBGEN (1 << 22) -#define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16) -#define MX3_PWMCR_CLKSRC_IPG (1 << 16) -#define MX3_PWMCR_EN (1 << 0) +#define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16) +#define MX3_PWMCR_CLKSRC_IPG (1 << 16) +#define MX3_PWMCR_SWR (1 << 3) +#define MX3_PWMCR_EN (1 << 0) +#define MX3_PWMSR_FIFOAV_4WORDS 0x4 +#define MX3_PWMSR_FIFOAV_MASK 0x7 + +#define MX3_PWM_SWR_LOOP 5 struct imx_chip { struct clk *clk_per; @@ -103,9 +110,43 @@ static int imx_pwm_config_v2(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { struct imx_chip *imx = to_imx_chip(chip); + struct device *dev = chip->dev; unsigned long long c; unsigned long period_cycles, duty_cycles, prescale; - u32 cr; + unsigned int period_ms; + bool enable = test_bit(PWMF_ENABLED, &pwm->flags); + int wait_count = 0, fifoav; + u32 cr, sr; + + /* + * i.MX PWMv2 has a 4-word sample FIFO. + * In order to avoid FIFO overflow issue, we do software reset + * to clear all sample FIFO if the controller is disabled or + * wait for a full PWM cycle to get a relinquished FIFO slot + * when the controller is enabled and the FIFO is fully loaded. + */ + if (enable) { + sr = readl(imx->mmio_base + MX3_PWMSR); + fifoav = sr & MX3_PWMSR_FIFOAV_MASK; + if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) { + period_ms = DIV_ROUND_UP(pwm->period, NSEC_PER_MSEC); + msleep(period_ms); + + sr = readl(imx->mmio_base + MX3_PWMSR); + if (fifoav == (sr & MX3_PWMSR_FIFOAV_MASK)) + dev_warn(dev, "there is no free FIFO slot\n"); + } + } else { + writel(MX3_PWMCR_SWR, imx->mmio_base + MX3_PWMCR); + do { + usleep_range(200, 1000); + cr = readl(imx->mmio_base + MX3_PWMCR); + } while ((cr & MX3_PWMCR_SWR) && + (wait_count++ < MX3_PWM_SWR_LOOP)); + + if (cr & MX3_PWMCR_SWR) + dev_warn(dev, "software reset timeout\n"); + } c = clk_get_rate(imx->clk_per); c = c * period_ns; @@ -135,7 +176,7 @@ static int imx_pwm_config_v2(struct pwm_chip *chip, MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN | MX3_PWMCR_DBGEN | MX3_PWMCR_CLKSRC_IPG_HIGH; - if (test_bit(PWMF_ENABLED, &pwm->flags)) + if (enable) cr |= MX3_PWMCR_EN; writel(cr, imx->mmio_base + MX3_PWMCR); diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c new file mode 100644 index 00000000000..cf20d2beacd --- /dev/null +++ b/drivers/pwm/pwm-lpss-pci.c @@ -0,0 +1,64 @@ +/* + * Intel Low Power Subsystem PWM controller PCI driver + * + * Copyright (C) 2014, Intel Corporation + * + * Derived from the original pwm-lpss.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "pwm-lpss.h" + +static int pwm_lpss_probe_pci(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + const struct pwm_lpss_boardinfo *info; + struct pwm_lpss_chip *lpwm; + int err; + + err = pcim_enable_device(pdev); + if (err < 0) + return err; + + info = (struct pwm_lpss_boardinfo *)id->driver_data; + lpwm = pwm_lpss_probe(&pdev->dev, &pdev->resource[0], info); + if (IS_ERR(lpwm)) + return PTR_ERR(lpwm); + + pci_set_drvdata(pdev, lpwm); + return 0; +} + +static void pwm_lpss_remove_pci(struct pci_dev *pdev) +{ + struct pwm_lpss_chip *lpwm = pci_get_drvdata(pdev); + + pwm_lpss_remove(lpwm); +} + +static const struct pci_device_id pwm_lpss_pci_ids[] = { + { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info}, + { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info}, + { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info}, + { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info}, + { }, +}; +MODULE_DEVICE_TABLE(pci, pwm_lpss_pci_ids); + +static struct pci_driver pwm_lpss_driver_pci = { + .name = "pwm-lpss", + .id_table = pwm_lpss_pci_ids, + .probe = pwm_lpss_probe_pci, + .remove = pwm_lpss_remove_pci, +}; +module_pci_driver(pwm_lpss_driver_pci); + +MODULE_DESCRIPTION("PWM PCI driver for Intel LPSS"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c new file mode 100644 index 00000000000..18a9c880a76 --- /dev/null +++ b/drivers/pwm/pwm-lpss-platform.c @@ -0,0 +1,68 @@ +/* + * Intel Low Power Subsystem PWM controller driver + * + * Copyright (C) 2014, Intel Corporation + * + * Derived from the original pwm-lpss.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/acpi.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include "pwm-lpss.h" + +static int pwm_lpss_probe_platform(struct platform_device *pdev) +{ + const struct pwm_lpss_boardinfo *info; + const struct acpi_device_id *id; + struct pwm_lpss_chip *lpwm; + struct resource *r; + + id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); + if (!id) + return -ENODEV; + + info = (const struct pwm_lpss_boardinfo *)id->driver_data; + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + lpwm = pwm_lpss_probe(&pdev->dev, r, info); + if (IS_ERR(lpwm)) + return PTR_ERR(lpwm); + + platform_set_drvdata(pdev, lpwm); + return 0; +} + +static int pwm_lpss_remove_platform(struct platform_device *pdev) +{ + struct pwm_lpss_chip *lpwm = platform_get_drvdata(pdev); + + return pwm_lpss_remove(lpwm); +} + +static const struct acpi_device_id pwm_lpss_acpi_match[] = { + { "80860F09", (unsigned long)&pwm_lpss_byt_info }, + { "80862288", (unsigned long)&pwm_lpss_bsw_info }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, pwm_lpss_acpi_match); + +static struct platform_driver pwm_lpss_driver_platform = { + .driver = { + .name = "pwm-lpss", + .acpi_match_table = pwm_lpss_acpi_match, + }, + .probe = pwm_lpss_probe_platform, + .remove = pwm_lpss_remove_platform, +}; +module_platform_driver(pwm_lpss_driver_platform); + +MODULE_DESCRIPTION("PWM platform driver for Intel LPSS"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:pwm-lpss"); diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c index 4df994f72d9..e9798253a16 100644 --- a/drivers/pwm/pwm-lpss.c +++ b/drivers/pwm/pwm-lpss.c @@ -13,15 +13,11 @@ * published by the Free Software Foundation. */ -#include <linux/acpi.h> -#include <linux/device.h> +#include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/pwm.h> -#include <linux/platform_device.h> -#include <linux/pci.h> -static int pci_drv, plat_drv; /* So we know which drivers registered */ +#include "pwm-lpss.h" #define PWM 0x00000000 #define PWM_ENABLE BIT(31) @@ -39,14 +35,17 @@ struct pwm_lpss_chip { unsigned long clk_rate; }; -struct pwm_lpss_boardinfo { - unsigned long clk_rate; +/* BayTrail */ +const struct pwm_lpss_boardinfo pwm_lpss_byt_info = { + .clk_rate = 25000000 }; +EXPORT_SYMBOL_GPL(pwm_lpss_byt_info); -/* BayTrail */ -static const struct pwm_lpss_boardinfo byt_info = { - 25000000 +/* Braswell */ +const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = { + .clk_rate = 19200000 }; +EXPORT_SYMBOL_GPL(pwm_lpss_bsw_info); static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip) { @@ -118,9 +117,8 @@ static const struct pwm_ops pwm_lpss_ops = { .owner = THIS_MODULE, }; -static struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, - struct resource *r, - const struct pwm_lpss_boardinfo *info) +struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, + const struct pwm_lpss_boardinfo *info) { struct pwm_lpss_chip *lpwm; int ret; @@ -147,8 +145,9 @@ static struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, return lpwm; } +EXPORT_SYMBOL_GPL(pwm_lpss_probe); -static int pwm_lpss_remove(struct pwm_lpss_chip *lpwm) +int pwm_lpss_remove(struct pwm_lpss_chip *lpwm) { u32 ctrl; @@ -157,114 +156,8 @@ static int pwm_lpss_remove(struct pwm_lpss_chip *lpwm) return pwmchip_remove(&lpwm->chip); } - -static int pwm_lpss_probe_pci(struct pci_dev *pdev, - const struct pci_device_id *id) -{ - const struct pwm_lpss_boardinfo *info; - struct pwm_lpss_chip *lpwm; - int err; - - err = pci_enable_device(pdev); - if (err < 0) - return err; - - info = (struct pwm_lpss_boardinfo *)id->driver_data; - lpwm = pwm_lpss_probe(&pdev->dev, &pdev->resource[0], info); - if (IS_ERR(lpwm)) - return PTR_ERR(lpwm); - - pci_set_drvdata(pdev, lpwm); - return 0; -} - -static void pwm_lpss_remove_pci(struct pci_dev *pdev) -{ - struct pwm_lpss_chip *lpwm = pci_get_drvdata(pdev); - - pwm_lpss_remove(lpwm); - pci_disable_device(pdev); -} - -static struct pci_device_id pwm_lpss_pci_ids[] = { - { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&byt_info}, - { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&byt_info}, - { }, -}; -MODULE_DEVICE_TABLE(pci, pwm_lpss_pci_ids); - -static struct pci_driver pwm_lpss_driver_pci = { - .name = "pwm-lpss", - .id_table = pwm_lpss_pci_ids, - .probe = pwm_lpss_probe_pci, - .remove = pwm_lpss_remove_pci, -}; - -static int pwm_lpss_probe_platform(struct platform_device *pdev) -{ - const struct pwm_lpss_boardinfo *info; - const struct acpi_device_id *id; - struct pwm_lpss_chip *lpwm; - struct resource *r; - - id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); - if (!id) - return -ENODEV; - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - info = (struct pwm_lpss_boardinfo *)id->driver_data; - lpwm = pwm_lpss_probe(&pdev->dev, r, info); - if (IS_ERR(lpwm)) - return PTR_ERR(lpwm); - - platform_set_drvdata(pdev, lpwm); - return 0; -} - -static int pwm_lpss_remove_platform(struct platform_device *pdev) -{ - struct pwm_lpss_chip *lpwm = platform_get_drvdata(pdev); - - return pwm_lpss_remove(lpwm); -} - -static const struct acpi_device_id pwm_lpss_acpi_match[] = { - { "80860F09", (unsigned long)&byt_info }, - { }, -}; -MODULE_DEVICE_TABLE(acpi, pwm_lpss_acpi_match); - -static struct platform_driver pwm_lpss_driver_platform = { - .driver = { - .name = "pwm-lpss", - .acpi_match_table = pwm_lpss_acpi_match, - }, - .probe = pwm_lpss_probe_platform, - .remove = pwm_lpss_remove_platform, -}; - -static int __init pwm_init(void) -{ - pci_drv = pci_register_driver(&pwm_lpss_driver_pci); - plat_drv = platform_driver_register(&pwm_lpss_driver_platform); - if (pci_drv && plat_drv) - return pci_drv; - - return 0; -} -module_init(pwm_init); - -static void __exit pwm_exit(void) -{ - if (!pci_drv) - pci_unregister_driver(&pwm_lpss_driver_pci); - if (!plat_drv) - platform_driver_unregister(&pwm_lpss_driver_platform); -} -module_exit(pwm_exit); +EXPORT_SYMBOL_GPL(pwm_lpss_remove); MODULE_DESCRIPTION("PWM driver for Intel LPSS"); MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:pwm-lpss"); diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h new file mode 100644 index 00000000000..aa041bb1b67 --- /dev/null +++ b/drivers/pwm/pwm-lpss.h @@ -0,0 +1,32 @@ +/* + * Intel Low Power Subsystem PWM controller driver + * + * Copyright (C) 2014, Intel Corporation + * + * Derived from the original pwm-lpss.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __PWM_LPSS_H +#define __PWM_LPSS_H + +#include <linux/device.h> +#include <linux/pwm.h> + +struct pwm_lpss_chip; + +struct pwm_lpss_boardinfo { + unsigned long clk_rate; +}; + +extern const struct pwm_lpss_boardinfo pwm_lpss_byt_info; +extern const struct pwm_lpss_boardinfo pwm_lpss_bsw_info; + +struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, + const struct pwm_lpss_boardinfo *info); +int pwm_lpss_remove(struct pwm_lpss_chip *lpwm); + +#endif /* __PWM_LPSS_H */ diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c index bdd8644c01c..9442df24410 100644 --- a/drivers/pwm/pwm-rockchip.c +++ b/drivers/pwm/pwm-rockchip.c @@ -24,7 +24,9 @@ #define PWM_ENABLE (1 << 0) #define PWM_CONTINUOUS (1 << 1) #define PWM_DUTY_POSITIVE (1 << 3) +#define PWM_DUTY_NEGATIVE (0 << 3) #define PWM_INACTIVE_NEGATIVE (0 << 4) +#define PWM_INACTIVE_POSITIVE (1 << 4) #define PWM_OUTPUT_LEFT (0 << 5) #define PWM_LP_DISABLE (0 << 8) @@ -45,8 +47,10 @@ struct rockchip_pwm_regs { struct rockchip_pwm_data { struct rockchip_pwm_regs regs; unsigned int prescaler; + const struct pwm_ops *ops; - void (*set_enable)(struct pwm_chip *chip, bool enable); + void (*set_enable)(struct pwm_chip *chip, + struct pwm_device *pwm, bool enable); }; static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c) @@ -54,7 +58,8 @@ static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c) return container_of(c, struct rockchip_pwm_chip, chip); } -static void rockchip_pwm_set_enable_v1(struct pwm_chip *chip, bool enable) +static void rockchip_pwm_set_enable_v1(struct pwm_chip *chip, + struct pwm_device *pwm, bool enable) { struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); u32 enable_conf = PWM_CTRL_OUTPUT_EN | PWM_CTRL_TIMER_EN; @@ -70,14 +75,19 @@ static void rockchip_pwm_set_enable_v1(struct pwm_chip *chip, bool enable) writel_relaxed(val, pc->base + pc->data->regs.ctrl); } -static void rockchip_pwm_set_enable_v2(struct pwm_chip *chip, bool enable) +static void rockchip_pwm_set_enable_v2(struct pwm_chip *chip, + struct pwm_device *pwm, bool enable) { struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); u32 enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE | - PWM_CONTINUOUS | PWM_DUTY_POSITIVE | - PWM_INACTIVE_NEGATIVE; + PWM_CONTINUOUS; u32 val; + if (pwm->polarity == PWM_POLARITY_INVERSED) + enable_conf |= PWM_DUTY_NEGATIVE | PWM_INACTIVE_POSITIVE; + else + enable_conf |= PWM_DUTY_POSITIVE | PWM_INACTIVE_NEGATIVE; + val = readl_relaxed(pc->base + pc->data->regs.ctrl); if (enable) @@ -124,6 +134,19 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } +static int rockchip_pwm_set_polarity(struct pwm_chip *chip, + struct pwm_device *pwm, + enum pwm_polarity polarity) +{ + /* + * No action needed here because pwm->polarity will be set by the core + * and the core will only change polarity when the PWM is not enabled. + * We'll handle things in set_enable(). + */ + + return 0; +} + static int rockchip_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) { struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); @@ -133,7 +156,7 @@ static int rockchip_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) if (ret) return ret; - pc->data->set_enable(chip, true); + pc->data->set_enable(chip, pwm, true); return 0; } @@ -142,18 +165,26 @@ static void rockchip_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) { struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); - pc->data->set_enable(chip, false); + pc->data->set_enable(chip, pwm, false); clk_disable(pc->clk); } -static const struct pwm_ops rockchip_pwm_ops = { +static const struct pwm_ops rockchip_pwm_ops_v1 = { .config = rockchip_pwm_config, .enable = rockchip_pwm_enable, .disable = rockchip_pwm_disable, .owner = THIS_MODULE, }; +static const struct pwm_ops rockchip_pwm_ops_v2 = { + .config = rockchip_pwm_config, + .set_polarity = rockchip_pwm_set_polarity, + .enable = rockchip_pwm_enable, + .disable = rockchip_pwm_disable, + .owner = THIS_MODULE, +}; + static const struct rockchip_pwm_data pwm_data_v1 = { .regs = { .duty = 0x04, @@ -162,6 +193,7 @@ static const struct rockchip_pwm_data pwm_data_v1 = { .ctrl = 0x0c, }, .prescaler = 2, + .ops = &rockchip_pwm_ops_v1, .set_enable = rockchip_pwm_set_enable_v1, }; @@ -173,6 +205,7 @@ static const struct rockchip_pwm_data pwm_data_v2 = { .ctrl = 0x0c, }, .prescaler = 1, + .ops = &rockchip_pwm_ops_v2, .set_enable = rockchip_pwm_set_enable_v2, }; @@ -184,6 +217,7 @@ static const struct rockchip_pwm_data pwm_data_vop = { .ctrl = 0x00, }, .prescaler = 1, + .ops = &rockchip_pwm_ops_v2, .set_enable = rockchip_pwm_set_enable_v2, }; @@ -227,10 +261,15 @@ static int rockchip_pwm_probe(struct platform_device *pdev) pc->data = id->data; pc->chip.dev = &pdev->dev; - pc->chip.ops = &rockchip_pwm_ops; + pc->chip.ops = pc->data->ops; pc->chip.base = -1; pc->chip.npwm = 1; + if (pc->data->ops->set_polarity) { + pc->chip.of_xlate = of_pwm_xlate_with_flags; + pc->chip.of_pwm_n_cells = 3; + } + ret = pwmchip_add(&pc->chip); if (ret < 0) { clk_unprepare(pc->clk); diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c index 4c9db589f6c..559e7ea9dcf 100644 --- a/drivers/regulator/arizona-ldo1.c +++ b/drivers/regulator/arizona-ldo1.c @@ -260,6 +260,8 @@ static int arizona_ldo1_probe(struct platform_device *pdev) ret = arizona_ldo1_of_get_pdata(arizona, &config); if (ret < 0) return ret; + + config.ena_gpio_initialized = true; } } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index cd87c0c3703..e225711bb8b 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -828,7 +828,7 @@ static void print_constraints(struct regulator_dev *rdev) if (!count) sprintf(buf, "no parameters"); - rdev_info(rdev, "%s\n", buf); + rdev_dbg(rdev, "%s\n", buf); if ((constraints->min_uV != constraints->max_uV) && !(constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) @@ -1713,6 +1713,8 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev) gpiod_put(pin->gpiod); list_del(&pin->list); kfree(pin); + rdev->ena_pin = NULL; + return; } else { pin->request_count--; } @@ -1976,9 +1978,18 @@ static int _regulator_disable(struct regulator_dev *rdev) /* we are last user */ if (_regulator_can_change_status(rdev)) { + ret = _notifier_call_chain(rdev, + REGULATOR_EVENT_PRE_DISABLE, + NULL); + if (ret & NOTIFY_STOP_MASK) + return -EINVAL; + ret = _regulator_do_disable(rdev); if (ret < 0) { rdev_err(rdev, "failed to disable\n"); + _notifier_call_chain(rdev, + REGULATOR_EVENT_ABORT_DISABLE, + NULL); return ret; } _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, @@ -2035,9 +2046,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev) { int ret = 0; + ret = _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | + REGULATOR_EVENT_PRE_DISABLE, NULL); + if (ret & NOTIFY_STOP_MASK) + return -EINVAL; + ret = _regulator_do_disable(rdev); if (ret < 0) { rdev_err(rdev, "failed to force disable\n"); + _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | + REGULATOR_EVENT_ABORT_DISABLE, NULL); return ret; } @@ -3650,7 +3668,8 @@ regulator_register(const struct regulator_desc *regulator_desc, dev_set_drvdata(&rdev->dev, rdev); - if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) { + if ((config->ena_gpio || config->ena_gpio_initialized) && + gpio_is_valid(config->ena_gpio)) { ret = regulator_ena_gpio_request(rdev, config); if (ret != 0) { rdev_err(rdev, "Failed to request enable GPIO%d: %d\n", diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c index 7c9461d1331..37dd42759ca 100644 --- a/drivers/regulator/da9063-regulator.c +++ b/drivers/regulator/da9063-regulator.c @@ -867,17 +867,14 @@ static int da9063_regulator_probe(struct platform_device *pdev) return irq; } - regulators->irq_ldo_lim = regmap_irq_get_virq(da9063->regmap_irq, irq); - if (regulators->irq_ldo_lim >= 0) { - ret = request_threaded_irq(regulators->irq_ldo_lim, - NULL, da9063_ldo_lim_event, - IRQF_TRIGGER_LOW | IRQF_ONESHOT, - "LDO_LIM", regulators); - if (ret) { - dev_err(&pdev->dev, - "Failed to request LDO_LIM IRQ.\n"); - regulators->irq_ldo_lim = -ENXIO; - } + ret = request_threaded_irq(irq, + NULL, da9063_ldo_lim_event, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + "LDO_LIM", regulators); + if (ret) { + dev_err(&pdev->dev, + "Failed to request LDO_LIM IRQ.\n"); + regulators->irq_ldo_lim = -ENXIO; } return 0; diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c index 2436db9e2ca..7aef9e4c6fb 100644 --- a/drivers/regulator/dummy.c +++ b/drivers/regulator/dummy.c @@ -33,7 +33,7 @@ static struct regulator_init_data dummy_initdata = { static struct regulator_ops dummy_ops; -static struct regulator_desc dummy_desc = { +static const struct regulator_desc dummy_desc = { .name = "regulator-dummy", .id = -1, .type = REGULATOR_VOLTAGE, diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c index 354105eff1f..709fba19e59 100644 --- a/drivers/regulator/fixed.c +++ b/drivers/regulator/fixed.c @@ -157,8 +157,11 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev) drvdata->desc.fixed_uV = config->microvolts; - if (config->gpio >= 0) + if (gpio_is_valid(config->gpio)) { cfg.ena_gpio = config->gpio; + if (pdev->dev.of_node) + cfg.ena_gpio_initialized = true; + } cfg.ena_gpio_invert = !config->enable_high; if (config->enabled_at_boot) { if (config->enable_high) diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index 989b23b377c..b42c7ec0017 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c @@ -162,34 +162,41 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np) config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0); - /* Fetch GPIOs. */ - config->nr_gpios = of_gpio_count(np); - - config->gpios = devm_kzalloc(dev, - sizeof(struct gpio) * config->nr_gpios, - GFP_KERNEL); - if (!config->gpios) - return ERR_PTR(-ENOMEM); - - proplen = of_property_count_u32_elems(np, "gpios-states"); - /* optional property */ - if (proplen < 0) - proplen = 0; - - if (proplen > 0 && proplen != config->nr_gpios) { - dev_warn(dev, "gpios <-> gpios-states mismatch\n"); - proplen = 0; - } + /* Fetch GPIOs. - optional property*/ + ret = of_gpio_count(np); + if ((ret < 0) && (ret != -ENOENT)) + return ERR_PTR(ret); + + if (ret > 0) { + config->nr_gpios = ret; + config->gpios = devm_kzalloc(dev, + sizeof(struct gpio) * config->nr_gpios, + GFP_KERNEL); + if (!config->gpios) + return ERR_PTR(-ENOMEM); + + proplen = of_property_count_u32_elems(np, "gpios-states"); + /* optional property */ + if (proplen < 0) + proplen = 0; + + if (proplen > 0 && proplen != config->nr_gpios) { + dev_warn(dev, "gpios <-> gpios-states mismatch\n"); + proplen = 0; + } - for (i = 0; i < config->nr_gpios; i++) { - gpio = of_get_named_gpio(np, "gpios", i); - if (gpio < 0) - break; - config->gpios[i].gpio = gpio; - if (proplen > 0) { - of_property_read_u32_index(np, "gpios-states", i, &ret); - if (ret) - config->gpios[i].flags = GPIOF_OUT_INIT_HIGH; + for (i = 0; i < config->nr_gpios; i++) { + gpio = of_get_named_gpio(np, "gpios", i); + if (gpio < 0) + break; + config->gpios[i].gpio = gpio; + if (proplen > 0) { + of_property_read_u32_index(np, "gpios-states", + i, &ret); + if (ret) + config->gpios[i].flags = + GPIOF_OUT_INIT_HIGH; + } } } @@ -261,13 +268,23 @@ static int gpio_regulator_probe(struct platform_device *pdev) goto err; } - drvdata->gpios = kmemdup(config->gpios, - config->nr_gpios * sizeof(struct gpio), - GFP_KERNEL); - if (drvdata->gpios == NULL) { - dev_err(&pdev->dev, "Failed to allocate gpio data\n"); - ret = -ENOMEM; - goto err_name; + if (config->nr_gpios != 0) { + drvdata->gpios = kmemdup(config->gpios, + config->nr_gpios * sizeof(struct gpio), + GFP_KERNEL); + if (drvdata->gpios == NULL) { + dev_err(&pdev->dev, "Failed to allocate gpio data\n"); + ret = -ENOMEM; + goto err_name; + } + + drvdata->nr_gpios = config->nr_gpios; + ret = gpio_request_array(drvdata->gpios, drvdata->nr_gpios); + if (ret) { + dev_err(&pdev->dev, + "Could not obtain regulator setting GPIOs: %d\n", ret); + goto err_memstate; + } } drvdata->states = kmemdup(config->states, @@ -301,14 +318,6 @@ static int gpio_regulator_probe(struct platform_device *pdev) goto err_memgpio; } - drvdata->nr_gpios = config->nr_gpios; - ret = gpio_request_array(drvdata->gpios, drvdata->nr_gpios); - if (ret) { - dev_err(&pdev->dev, - "Could not obtain regulator setting GPIOs: %d\n", ret); - goto err_memstate; - } - /* build initial state from gpio init data. */ state = 0; for (ptr = 0; ptr < drvdata->nr_gpios; ptr++) { @@ -322,8 +331,10 @@ static int gpio_regulator_probe(struct platform_device *pdev) cfg.driver_data = drvdata; cfg.of_node = np; - if (config->enable_gpio >= 0) + if (gpio_is_valid(config->enable_gpio)) { cfg.ena_gpio = config->enable_gpio; + cfg.ena_gpio_initialized = true; + } cfg.ena_gpio_invert = !config->enable_high; if (config->enabled_at_boot) { if (config->enable_high) diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c index 86db310d530..d2a8c64cae4 100644 --- a/drivers/regulator/max1586.c +++ b/drivers/regulator/max1586.c @@ -163,7 +163,7 @@ static int of_get_max1586_platform_data(struct device *dev, struct max1586_platform_data *pdata) { struct max1586_subdev_data *sub; - struct of_regulator_match rmatch[ARRAY_SIZE(max1586_reg)]; + struct of_regulator_match rmatch[ARRAY_SIZE(max1586_reg)] = { }; struct device_node *np = dev->of_node; int i, matched; diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c index ef1af2debbd..f69320e1738 100644 --- a/drivers/regulator/max77686.c +++ b/drivers/regulator/max77686.c @@ -395,7 +395,7 @@ static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev, struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent); struct device_node *pmic_np, *regulators_np; struct max77686_regulator_data *rdata; - struct of_regulator_match rmatch; + struct of_regulator_match rmatch = { }; unsigned int i; pmic_np = iodev->dev->of_node; diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c index c67ff05fc1d..d158f71fa12 100644 --- a/drivers/regulator/max77693.c +++ b/drivers/regulator/max77693.c @@ -227,7 +227,7 @@ static int max77693_pmic_probe(struct platform_device *pdev) struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent); struct max77693_regulator_data *rdata = NULL; int num_rdata, i; - struct regulator_config config; + struct regulator_config config = { }; num_rdata = max77693_pmic_init_rdata(&pdev->dev, &rdata); if (!rdata || num_rdata <= 0) { diff --git a/drivers/regulator/max77802.c b/drivers/regulator/max77802.c index d89792b084e..45fa240fe24 100644 --- a/drivers/regulator/max77802.c +++ b/drivers/regulator/max77802.c @@ -454,7 +454,7 @@ static int max77802_pmic_dt_parse_pdata(struct platform_device *pdev, struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent); struct device_node *pmic_np, *regulators_np; struct max77686_regulator_data *rdata; - struct of_regulator_match rmatch; + struct of_regulator_match rmatch = { }; unsigned int i; pmic_np = iodev->dev->of_node; diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c index 2fc41118879..7eee2ca1854 100644 --- a/drivers/regulator/max8660.c +++ b/drivers/regulator/max8660.c @@ -335,7 +335,7 @@ static int max8660_pdata_from_dt(struct device *dev, int matched, i; struct device_node *np; struct max8660_subdev_data *sub; - struct of_regulator_match rmatch[ARRAY_SIZE(max8660_reg)]; + struct of_regulator_match rmatch[ARRAY_SIZE(max8660_reg)] = { }; np = of_get_child_by_name(dev->of_node, "regulators"); if (!np) { diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c index f7f9efcfedb..dec57fb9cd1 100644 --- a/drivers/regulator/max8952.c +++ b/drivers/regulator/max8952.c @@ -225,6 +225,8 @@ static int max8952_pmic_probe(struct i2c_client *client, config.of_node = client->dev.of_node; config.ena_gpio = pdata->gpio_en; + if (client->dev.of_node) + config.ena_gpio_initialized = true; if (pdata->reg_data->constraints.boot_on) config.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 7a51814abdc..5a1d4afa477 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -211,7 +211,8 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev, search = dev->of_node; if (!search) { - dev_err(dev, "Failed to find regulator container node\n"); + dev_dbg(dev, "Failed to find regulator container node '%s'\n", + desc->regulators_node); return NULL; } diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index e305416d769..196a5c8838c 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -44,7 +44,7 @@ static const int rk808_buck_config_regs[] = { }; static const struct regulator_linear_range rk808_buck_voltage_ranges[] = { - REGULATOR_LINEAR_RANGE(700000, 0, 63, 12500), + REGULATOR_LINEAR_RANGE(712500, 0, 63, 12500), }; static const struct regulator_linear_range rk808_buck4_voltage_ranges[] = { diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c index 4acefa6b462..7633b9bfbe6 100644 --- a/drivers/regulator/s2mpa01.c +++ b/drivers/regulator/s2mpa01.c @@ -341,7 +341,7 @@ static int s2mpa01_pmic_probe(struct platform_device *pdev) { struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent); struct sec_platform_data *pdata = dev_get_platdata(iodev->dev); - struct of_regulator_match rdata[S2MPA01_REGULATOR_MAX]; + struct of_regulator_match rdata[S2MPA01_REGULATOR_MAX] = { }; struct device_node *reg_np = NULL; struct regulator_config config = { }; struct s2mpa01_info *s2mpa01; diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index adab82d5279..b4ae022091e 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -886,6 +886,7 @@ common_reg: config.regmap = iodev->regmap_pmic; config.driver_data = s2mps11; config.ena_gpio_flags = GPIOF_OUT_INIT_HIGH; + config.ena_gpio_initialized = true; for (i = 0; i < s2mps11->rdev_num; i++) { struct regulator_dev *regulator; diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index 0ab5cbeeb79..4225df68dd4 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c @@ -950,6 +950,7 @@ static int s5m8767_pmic_probe(struct platform_device *pdev) config.of_node = pdata->regulators[i].reg_node; config.ena_gpio = -EINVAL; config.ena_gpio_flags = 0; + config.ena_gpio_initialized = true; if (gpio_is_valid(pdata->regulators[i].ext_control_gpio)) s5m8767_regulator_config_ext_control(s5m8767, &pdata->regulators[i], &config); diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c index d5df1e9ad1d..f1df4423d36 100644 --- a/drivers/regulator/tps65090-regulator.c +++ b/drivers/regulator/tps65090-regulator.c @@ -312,7 +312,11 @@ static void tps65090_configure_regulator_config( gpio_flag = GPIOF_OUT_INIT_HIGH; config->ena_gpio = tps_pdata->gpio; + config->ena_gpio_initialized = true; config->ena_gpio_flags = gpio_flag; + } else { + config->ena_gpio = -EINVAL; + config->ena_gpio_initialized = false; } } diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c index c24346db8a7..88f5064e412 100644 --- a/drivers/regulator/wm8994-regulator.c +++ b/drivers/regulator/wm8994-regulator.c @@ -145,10 +145,12 @@ static int wm8994_ldo_probe(struct platform_device *pdev) config.driver_data = ldo; config.regmap = wm8994->regmap; config.init_data = &ldo->init_data; - if (pdata) + if (pdata) { config.ena_gpio = pdata->ldo[id].enable; - else if (wm8994->dev->of_node) + } else if (wm8994->dev->of_node) { config.ena_gpio = wm8994->pdata.ldo[id].enable; + config.ena_gpio_initialized = true; + } /* Use default constraints if none set up */ if (!pdata || !pdata->ldo[id].init_data || wm8994->dev->of_node) { diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 8cd0beebdc3..6dd12ddbabc 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -830,7 +830,7 @@ config RTC_DRV_DA9063 config RTC_DRV_EFI tristate "EFI RTC" - depends on EFI + depends on EFI && !X86 help If you say yes here you will get support for the EFI Real Time Clock. @@ -1320,7 +1320,7 @@ config RTC_DRV_LPC32XX config RTC_DRV_PM8XXX tristate "Qualcomm PMIC8XXX RTC" - depends on MFD_PM8XXX + depends on MFD_PM8XXX || MFD_SPMI_PMIC help If you say yes here you get support for the Qualcomm PMIC8XXX RTC. diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c index 314129e66d6..92679df6d6e 100644 --- a/drivers/rtc/rtc-bq32k.c +++ b/drivers/rtc/rtc-bq32k.c @@ -160,7 +160,7 @@ static int trickle_charger_of_init(struct device *dev, struct device_node *node) dev_err(dev, "bq32k: diode and resistor mismatch\n"); return -EINVAL; } - reg = 0x25; + reg = 0x45; break; default: diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c index c384fec6d17..53b589dc34e 100644 --- a/drivers/rtc/rtc-efi.c +++ b/drivers/rtc/rtc-efi.c @@ -236,3 +236,4 @@ MODULE_ALIAS("platform:rtc-efi"); MODULE_AUTHOR("dann frazier <dannf@hp.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("EFI RTC driver"); +MODULE_ALIAS("platform:rtc-efi"); diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c index 197699f358c..5adcf111fc1 100644 --- a/drivers/rtc/rtc-pm8xxx.c +++ b/drivers/rtc/rtc-pm8xxx.c @@ -27,21 +27,36 @@ /* RTC_CTRL register bit fields */ #define PM8xxx_RTC_ENABLE BIT(7) -#define PM8xxx_RTC_ALARM_ENABLE BIT(1) #define PM8xxx_RTC_ALARM_CLEAR BIT(0) #define NUM_8_BIT_RTC_REGS 0x4 /** + * struct pm8xxx_rtc_regs - describe RTC registers per PMIC versions + * @ctrl: base address of control register + * @write: base address of write register + * @read: base address of read register + * @alarm_ctrl: base address of alarm control register + * @alarm_ctrl2: base address of alarm control2 register + * @alarm_rw: base address of alarm read-write register + * @alarm_en: alarm enable mask + */ +struct pm8xxx_rtc_regs { + unsigned int ctrl; + unsigned int write; + unsigned int read; + unsigned int alarm_ctrl; + unsigned int alarm_ctrl2; + unsigned int alarm_rw; + unsigned int alarm_en; +}; + +/** * struct pm8xxx_rtc - rtc driver internal structure * @rtc: rtc device for this driver. * @regmap: regmap used to access RTC registers * @allow_set_time: indicates whether writing to the RTC is allowed * @rtc_alarm_irq: rtc alarm irq number. - * @rtc_base: address of rtc control register. - * @rtc_read_base: base address of read registers. - * @rtc_write_base: base address of write registers. - * @alarm_rw_base: base address of alarm registers. * @ctrl_reg: rtc control register. * @rtc_dev: device structure. * @ctrl_reg_lock: spinlock protecting access to ctrl_reg. @@ -51,11 +66,7 @@ struct pm8xxx_rtc { struct regmap *regmap; bool allow_set_time; int rtc_alarm_irq; - int rtc_base; - int rtc_read_base; - int rtc_write_base; - int alarm_rw_base; - u8 ctrl_reg; + const struct pm8xxx_rtc_regs *regs; struct device *rtc_dev; spinlock_t ctrl_reg_lock; }; @@ -71,8 +82,10 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm) { int rc, i; unsigned long secs, irq_flags; - u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0, ctrl_reg; + u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0; + unsigned int ctrl_reg; struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); + const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; if (!rtc_dd->allow_set_time) return -EACCES; @@ -87,30 +100,30 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm) dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs); spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); - ctrl_reg = rtc_dd->ctrl_reg; - if (ctrl_reg & PM8xxx_RTC_ALARM_ENABLE) { + rc = regmap_read(rtc_dd->regmap, regs->ctrl, &ctrl_reg); + if (rc) + goto rtc_rw_fail; + + if (ctrl_reg & regs->alarm_en) { alarm_enabled = 1; - ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; - rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); + ctrl_reg &= ~regs->alarm_en; + rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg); if (rc) { dev_err(dev, "Write to RTC control register failed\n"); goto rtc_rw_fail; } - rtc_dd->ctrl_reg = ctrl_reg; - } else { - spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); } /* Write 0 to Byte[0] */ - rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_write_base, 0); + rc = regmap_write(rtc_dd->regmap, regs->write, 0); if (rc) { dev_err(dev, "Write to RTC write data register failed\n"); goto rtc_rw_fail; } /* Write Byte[1], Byte[2], Byte[3] */ - rc = regmap_bulk_write(rtc_dd->regmap, rtc_dd->rtc_write_base + 1, + rc = regmap_bulk_write(rtc_dd->regmap, regs->write + 1, &value[1], sizeof(value) - 1); if (rc) { dev_err(dev, "Write to RTC write data register failed\n"); @@ -118,25 +131,23 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm) } /* Write Byte[0] */ - rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_write_base, value[0]); + rc = regmap_write(rtc_dd->regmap, regs->write, value[0]); if (rc) { dev_err(dev, "Write to RTC write data register failed\n"); goto rtc_rw_fail; } if (alarm_enabled) { - ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE; - rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); + ctrl_reg |= regs->alarm_en; + rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg); if (rc) { dev_err(dev, "Write to RTC control register failed\n"); goto rtc_rw_fail; } - rtc_dd->ctrl_reg = ctrl_reg; } rtc_rw_fail: - if (alarm_enabled) - spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); + spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); return rc; } @@ -148,9 +159,9 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm) unsigned long secs; unsigned int reg; struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); + const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; - rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->rtc_read_base, - value, sizeof(value)); + rc = regmap_bulk_read(rtc_dd->regmap, regs->read, value, sizeof(value)); if (rc) { dev_err(dev, "RTC read data register failed\n"); return rc; @@ -160,14 +171,14 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm) * Read the LSB again and check if there has been a carry over. * If there is, redo the read operation. */ - rc = regmap_read(rtc_dd->regmap, rtc_dd->rtc_read_base, ®); + rc = regmap_read(rtc_dd->regmap, regs->read, ®); if (rc < 0) { dev_err(dev, "RTC read data register failed\n"); return rc; } if (unlikely(reg < value[0])) { - rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->rtc_read_base, + rc = regmap_bulk_read(rtc_dd->regmap, regs->read, value, sizeof(value)); if (rc) { dev_err(dev, "RTC read data register failed\n"); @@ -195,9 +206,11 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm) static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { int rc, i; - u8 value[NUM_8_BIT_RTC_REGS], ctrl_reg; + u8 value[NUM_8_BIT_RTC_REGS]; + unsigned int ctrl_reg; unsigned long secs, irq_flags; struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); + const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; rtc_tm_to_time(&alarm->time, &secs); @@ -208,28 +221,28 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); - rc = regmap_bulk_write(rtc_dd->regmap, rtc_dd->alarm_rw_base, value, + rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value, sizeof(value)); if (rc) { dev_err(dev, "Write to RTC ALARM register failed\n"); goto rtc_rw_fail; } - ctrl_reg = rtc_dd->ctrl_reg; + rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg); + if (rc) + goto rtc_rw_fail; if (alarm->enabled) - ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE; + ctrl_reg |= regs->alarm_en; else - ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; + ctrl_reg &= ~regs->alarm_en; - rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); + rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg); if (rc) { - dev_err(dev, "Write to RTC control register failed\n"); + dev_err(dev, "Write to RTC alarm control register failed\n"); goto rtc_rw_fail; } - rtc_dd->ctrl_reg = ctrl_reg; - dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n", alarm->time.tm_hour, alarm->time.tm_min, alarm->time.tm_sec, alarm->time.tm_mday, @@ -245,8 +258,9 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) u8 value[NUM_8_BIT_RTC_REGS]; unsigned long secs; struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); + const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; - rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->alarm_rw_base, value, + rc = regmap_bulk_read(rtc_dd->regmap, regs->alarm_rw, value, sizeof(value)); if (rc) { dev_err(dev, "RTC alarm time read failed\n"); @@ -276,25 +290,26 @@ static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) int rc; unsigned long irq_flags; struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); - u8 ctrl_reg; + const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; + unsigned int ctrl_reg; spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); - ctrl_reg = rtc_dd->ctrl_reg; + rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg); + if (rc) + goto rtc_rw_fail; if (enable) - ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE; + ctrl_reg |= regs->alarm_en; else - ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; + ctrl_reg &= ~regs->alarm_en; - rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); + rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg); if (rc) { dev_err(dev, "Write to RTC control register failed\n"); goto rtc_rw_fail; } - rtc_dd->ctrl_reg = ctrl_reg; - rtc_rw_fail: spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); return rc; @@ -311,6 +326,7 @@ static const struct rtc_class_ops pm8xxx_rtc_ops = { static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id) { struct pm8xxx_rtc *rtc_dd = dev_id; + const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; unsigned int ctrl_reg; int rc; unsigned long irq_flags; @@ -320,48 +336,100 @@ static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id) spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); /* Clear the alarm enable bit */ - ctrl_reg = rtc_dd->ctrl_reg; - ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; + rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg); + if (rc) { + spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); + goto rtc_alarm_handled; + } + + ctrl_reg &= ~regs->alarm_en; - rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); + rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg); if (rc) { spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); dev_err(rtc_dd->rtc_dev, - "Write to RTC control register failed\n"); + "Write to alarm control register failed\n"); goto rtc_alarm_handled; } - rtc_dd->ctrl_reg = ctrl_reg; spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); /* Clear RTC alarm register */ - rc = regmap_read(rtc_dd->regmap, - rtc_dd->rtc_base + PM8XXX_ALARM_CTRL_OFFSET, - &ctrl_reg); + rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl2, &ctrl_reg); if (rc) { dev_err(rtc_dd->rtc_dev, - "RTC Alarm control register read failed\n"); + "RTC Alarm control2 register read failed\n"); goto rtc_alarm_handled; } - ctrl_reg &= ~PM8xxx_RTC_ALARM_CLEAR; - rc = regmap_write(rtc_dd->regmap, - rtc_dd->rtc_base + PM8XXX_ALARM_CTRL_OFFSET, - ctrl_reg); + ctrl_reg |= PM8xxx_RTC_ALARM_CLEAR; + rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl2, ctrl_reg); if (rc) dev_err(rtc_dd->rtc_dev, - "Write to RTC Alarm control register failed\n"); + "Write to RTC Alarm control2 register failed\n"); rtc_alarm_handled: return IRQ_HANDLED; } +static int pm8xxx_rtc_enable(struct pm8xxx_rtc *rtc_dd) +{ + const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; + unsigned int ctrl_reg; + int rc; + + /* Check if the RTC is on, else turn it on */ + rc = regmap_read(rtc_dd->regmap, regs->ctrl, &ctrl_reg); + if (rc) + return rc; + + if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) { + ctrl_reg |= PM8xxx_RTC_ENABLE; + rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg); + if (rc) + return rc; + } + + return 0; +} + +static const struct pm8xxx_rtc_regs pm8921_regs = { + .ctrl = 0x11d, + .write = 0x11f, + .read = 0x123, + .alarm_rw = 0x127, + .alarm_ctrl = 0x11d, + .alarm_ctrl2 = 0x11e, + .alarm_en = BIT(1), +}; + +static const struct pm8xxx_rtc_regs pm8058_regs = { + .ctrl = 0x1e8, + .write = 0x1ea, + .read = 0x1ee, + .alarm_rw = 0x1f2, + .alarm_ctrl = 0x1e8, + .alarm_ctrl2 = 0x1e9, + .alarm_en = BIT(1), +}; + +static const struct pm8xxx_rtc_regs pm8941_regs = { + .ctrl = 0x6046, + .write = 0x6040, + .read = 0x6048, + .alarm_rw = 0x6140, + .alarm_ctrl = 0x6146, + .alarm_ctrl2 = 0x6148, + .alarm_en = BIT(7), +}; + /* * Hardcoded RTC bases until IORESOURCE_REG mapping is figured out */ static const struct of_device_id pm8xxx_id_table[] = { - { .compatible = "qcom,pm8921-rtc", .data = (void *) 0x11D }, - { .compatible = "qcom,pm8058-rtc", .data = (void *) 0x1E8 }, + { .compatible = "qcom,pm8921-rtc", .data = &pm8921_regs }, + { .compatible = "qcom,pm8058-rtc", .data = &pm8058_regs }, + { .compatible = "qcom,pm8941-rtc", .data = &pm8941_regs }, { }, }; MODULE_DEVICE_TABLE(of, pm8xxx_id_table); @@ -369,7 +437,6 @@ MODULE_DEVICE_TABLE(of, pm8xxx_id_table); static int pm8xxx_rtc_probe(struct platform_device *pdev) { int rc; - unsigned int ctrl_reg; struct pm8xxx_rtc *rtc_dd; const struct of_device_id *match; @@ -399,33 +466,12 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev) rtc_dd->allow_set_time = of_property_read_bool(pdev->dev.of_node, "allow-set-time"); - rtc_dd->rtc_base = (long) match->data; - - /* Setup RTC register addresses */ - rtc_dd->rtc_write_base = rtc_dd->rtc_base + PM8XXX_RTC_WRITE_OFFSET; - rtc_dd->rtc_read_base = rtc_dd->rtc_base + PM8XXX_RTC_READ_OFFSET; - rtc_dd->alarm_rw_base = rtc_dd->rtc_base + PM8XXX_ALARM_RW_OFFSET; - + rtc_dd->regs = match->data; rtc_dd->rtc_dev = &pdev->dev; - /* Check if the RTC is on, else turn it on */ - rc = regmap_read(rtc_dd->regmap, rtc_dd->rtc_base, &ctrl_reg); - if (rc) { - dev_err(&pdev->dev, "RTC control register read failed!\n"); + rc = pm8xxx_rtc_enable(rtc_dd); + if (rc) return rc; - } - - if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) { - ctrl_reg |= PM8xxx_RTC_ENABLE; - rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); - if (rc) { - dev_err(&pdev->dev, - "Write to RTC control register failed\n"); - return rc; - } - } - - rtc_dd->ctrl_reg = ctrl_reg; platform_set_drvdata(pdev, rtc_dd); diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index a6b1252c994..806072238c0 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -535,13 +535,15 @@ static int s3c_rtc_probe(struct platform_device *pdev) } clk_prepare_enable(info->rtc_clk); - info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src"); - if (IS_ERR(info->rtc_src_clk)) { - dev_err(&pdev->dev, "failed to find rtc source clock\n"); - return PTR_ERR(info->rtc_src_clk); + if (info->data->needs_src_clk) { + info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src"); + if (IS_ERR(info->rtc_src_clk)) { + dev_err(&pdev->dev, + "failed to find rtc source clock\n"); + return PTR_ERR(info->rtc_src_clk); + } + clk_prepare_enable(info->rtc_src_clk); } - clk_prepare_enable(info->rtc_src_clk); - /* check to see if everything is setup correctly */ if (info->data->enable) diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig index dc24ecfac2d..db2cb1f8a1b 100644 --- a/drivers/s390/char/Kconfig +++ b/drivers/s390/char/Kconfig @@ -105,7 +105,7 @@ config SCLP_ASYNC config HMC_DRV def_tristate m prompt "Support for file transfers from HMC drive CD/DVD-ROM" - depends on 64BIT + depends on S390 && 64BIT select CRC16 help This option enables support for file transfers from a Hardware diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c index 6cbe6ef3c88..bda52f18e96 100644 --- a/drivers/s390/kvm/virtio_ccw.c +++ b/drivers/s390/kvm/virtio_ccw.c @@ -888,7 +888,6 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev, struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); int i; struct virtqueue *vq; - struct virtio_driver *drv; if (!vcdev) return; diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c index ca75c7ca255..ef355c13ccc 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_els.c +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c @@ -480,9 +480,7 @@ void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg) bnx2fc_initiate_cleanup(orig_io_req); /* Post a new IO req with the same sc_cmd */ BNX2FC_IO_DBG(rec_req, "Post IO request again\n"); - spin_unlock_bh(&tgt->tgt_lock); rc = bnx2fc_post_io_req(tgt, new_io_req); - spin_lock_bh(&tgt->tgt_lock); if (!rc) goto free_frame; BNX2FC_IO_DBG(rec_req, "REC: io post err\n"); diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 79e5c94107a..72533c58c1f 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -412,6 +412,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, struct fc_frame_header *fh; struct fcoe_rcv_info *fr; struct fcoe_percpu_s *bg; + struct sk_buff *tmp_skb; unsigned short oxid; interface = container_of(ptype, struct bnx2fc_interface, @@ -424,6 +425,12 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, goto err; } + tmp_skb = skb_share_check(skb, GFP_ATOMIC); + if (!tmp_skb) + goto err; + + skb = tmp_skb; + if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n"); goto err; diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 0679782d9d1..5b99844ef6b 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1894,18 +1894,24 @@ int bnx2fc_queuecommand(struct Scsi_Host *host, goto exit_qcmd; } } + + spin_lock_bh(&tgt->tgt_lock); + io_req = bnx2fc_cmd_alloc(tgt); if (!io_req) { rc = SCSI_MLQUEUE_HOST_BUSY; - goto exit_qcmd; + goto exit_qcmd_tgtlock; } io_req->sc_cmd = sc_cmd; if (bnx2fc_post_io_req(tgt, io_req)) { printk(KERN_ERR PFX "Unable to post io_req\n"); rc = SCSI_MLQUEUE_HOST_BUSY; - goto exit_qcmd; + goto exit_qcmd_tgtlock; } + +exit_qcmd_tgtlock: + spin_unlock_bh(&tgt->tgt_lock); exit_qcmd: return rc; } @@ -2020,6 +2026,8 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, int task_idx, index; u16 xid; + /* bnx2fc_post_io_req() is called with the tgt_lock held */ + /* Initialize rest of io_req fields */ io_req->cmd_type = BNX2FC_SCSI_CMD; io_req->port = port; @@ -2047,9 +2055,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, /* Build buffer descriptor list for firmware from sg list */ if (bnx2fc_build_bd_list_from_sg(io_req)) { printk(KERN_ERR PFX "BD list creation failed\n"); - spin_lock_bh(&tgt->tgt_lock); kref_put(&io_req->refcount, bnx2fc_cmd_release); - spin_unlock_bh(&tgt->tgt_lock); return -EAGAIN; } @@ -2061,19 +2067,15 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, task = &(task_page[index]); bnx2fc_init_task(io_req, task); - spin_lock_bh(&tgt->tgt_lock); - if (tgt->flush_in_prog) { printk(KERN_ERR PFX "Flush in progress..Host Busy\n"); kref_put(&io_req->refcount, bnx2fc_cmd_release); - spin_unlock_bh(&tgt->tgt_lock); return -EAGAIN; } if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { printk(KERN_ERR PFX "Session not ready...post_io\n"); kref_put(&io_req->refcount, bnx2fc_cmd_release); - spin_unlock_bh(&tgt->tgt_lock); return -EAGAIN; } @@ -2091,6 +2093,5 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, /* Ring doorbell */ bnx2fc_ring_doorbell(tgt); - spin_unlock_bh(&tgt->tgt_lock); return 0; } diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 3e0a0d315f7..15081257cfc 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -828,6 +828,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) if (status == CPL_ERR_RTX_NEG_ADVICE) goto rel_skb; + module_put(THIS_MODULE); + if (status && status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && status != CPL_ERR_ARP_MISS) @@ -936,20 +938,23 @@ static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb) cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); - if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) { - cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); - cxgbi_sock_set_state(csk, CTP_ABORTING); - goto done; + cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); + + if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { + send_tx_flowc_wr(csk); + cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); } - cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); + cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); + cxgbi_sock_set_state(csk, CTP_ABORTING); + send_abort_rpl(csk, rst_status); if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { csk->err = abort_status_to_errno(csk, req->status, &rst_status); cxgbi_sock_closed(csk); } -done: + spin_unlock_bh(&csk->lock); cxgbi_sock_put(csk); rel_skb: diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 54fa6e0bc1b..7da59c38a69 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -399,6 +399,35 @@ EXPORT_SYMBOL_GPL(cxgbi_hbas_add); * If the source port is outside our allocation range, the caller is * responsible for keeping track of their port usage. */ + +static struct cxgbi_sock *find_sock_on_port(struct cxgbi_device *cdev, + unsigned char port_id) +{ + struct cxgbi_ports_map *pmap = &cdev->pmap; + unsigned int i; + unsigned int used; + + if (!pmap->max_connect || !pmap->used) + return NULL; + + spin_lock_bh(&pmap->lock); + used = pmap->used; + for (i = 0; used && i < pmap->max_connect; i++) { + struct cxgbi_sock *csk = pmap->port_csk[i]; + + if (csk) { + if (csk->port_id == port_id) { + spin_unlock_bh(&pmap->lock); + return csk; + } + used--; + } + } + spin_unlock_bh(&pmap->lock); + + return NULL; +} + static int sock_get_port(struct cxgbi_sock *csk) { struct cxgbi_device *cdev = csk->cdev; @@ -749,6 +778,7 @@ static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr) csk->daddr6.sin6_addr = daddr6->sin6_addr; csk->daddr6.sin6_port = daddr6->sin6_port; csk->daddr6.sin6_family = daddr6->sin6_family; + csk->saddr6.sin6_family = daddr6->sin6_family; csk->saddr6.sin6_addr = pref_saddr; neigh_release(n); @@ -786,7 +816,7 @@ static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) read_lock_bh(&csk->callback_lock); if (csk->user_data) iscsi_conn_failure(csk->user_data, - ISCSI_ERR_CONN_FAILED); + ISCSI_ERR_TCP_CONN_CLOSE); read_unlock_bh(&csk->callback_lock); } } @@ -875,18 +905,16 @@ void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk) { cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); + + cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { - if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD)) - cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); - else { - cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD); - cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); - if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) - pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n", - csk, csk->state, csk->flags, csk->tid); - cxgbi_sock_closed(csk); - } + cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); + if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) + pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n", + csk, csk->state, csk->flags, csk->tid); + cxgbi_sock_closed(csk); } + spin_unlock_bh(&csk->lock); cxgbi_sock_put(csk); } @@ -2647,12 +2675,14 @@ int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, break; case ISCSI_HOST_PARAM_IPADDRESS: { - __be32 addr; - - addr = cxgbi_get_iscsi_ipv4(chba); - len = sprintf(buf, "%pI4", &addr); + struct cxgbi_sock *csk = find_sock_on_port(chba->cdev, + chba->port_id); + if (csk) { + len = sprintf(buf, "%pIS", + (struct sockaddr *)&csk->saddr); + } log_debug(1 << CXGBI_DBG_ISCSI, - "hba %s, ipv4 %pI4.\n", chba->ndev->name, &addr); + "hba %s, addr %s.\n", chba->ndev->name, buf); break; } default: diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 1d98fad6a0a..2c7cb1c0c45 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -700,11 +700,6 @@ static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr) chba->ndev->name); } -static inline __be32 cxgbi_get_iscsi_ipv4(struct cxgbi_hba *chba) -{ - return chba->ipv4addr; -} - struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int); void cxgbi_device_unregister(struct cxgbi_device *); void cxgbi_device_unregister_all(unsigned int flag); diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index e99507ed0e3..fd78bdc5352 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -474,6 +474,13 @@ static int alua_check_sense(struct scsi_device *sdev, * LUN Not Ready -- Offline */ return SUCCESS; + if (sdev->allow_restart && + sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x02) + /* + * if the device is not started, we need to wake + * the error handler to start the motor + */ + return FAILED; break; case UNIT_ATTENTION: if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index f6a69a3b1b3..5640ad1c821 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4453,7 +4453,7 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->msixentry[i].entry = i; i = pci_enable_msix_range(instance->pdev, instance->msixentry, 1, instance->msix_vectors); - if (i) + if (i > 0) instance->msix_vectors = i; else instance->msix_vectors = 0; diff --git a/drivers/scsi/osd/Kbuild b/drivers/scsi/osd/Kbuild index 5fd73d77c3a..58cecd45b0f 100644 --- a/drivers/scsi/osd/Kbuild +++ b/drivers/scsi/osd/Kbuild @@ -4,7 +4,7 @@ # Copyright (C) 2008 Panasas Inc. All rights reserved. # # Authors: -# Boaz Harrosh <bharrosh@panasas.com> +# Boaz Harrosh <ooo@electrozaur.com> # Benny Halevy <bhalevy@panasas.com> # # This program is free software; you can redistribute it and/or modify diff --git a/drivers/scsi/osd/Kconfig b/drivers/scsi/osd/Kconfig index a0703514eb0..347cc5e3374 100644 --- a/drivers/scsi/osd/Kconfig +++ b/drivers/scsi/osd/Kconfig @@ -4,7 +4,7 @@ # Copyright (C) 2008 Panasas Inc. All rights reserved. # # Authors: -# Boaz Harrosh <bharrosh@panasas.com> +# Boaz Harrosh <ooo@electrozaur.com> # Benny Halevy <bhalevy@panasas.com> # # This program is free software; you can redistribute it and/or modify diff --git a/drivers/scsi/osd/osd_debug.h b/drivers/scsi/osd/osd_debug.h index 579e491f11d..26341261bb5 100644 --- a/drivers/scsi/osd/osd_debug.h +++ b/drivers/scsi/osd/osd_debug.h @@ -4,7 +4,7 @@ * Copyright (C) 2008 Panasas Inc. All rights reserved. * * Authors: - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * Benny Halevy <bhalevy@panasas.com> * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index fd19fd8468a..488c3929f19 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -7,7 +7,7 @@ * Copyright (C) 2008 Panasas Inc. All rights reserved. * * Authors: - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * Benny Halevy <bhalevy@panasas.com> * * This program is free software; you can redistribute it and/or modify @@ -57,7 +57,7 @@ enum { OSD_REQ_RETRIES = 1 }; -MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>"); +MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>"); MODULE_DESCRIPTION("open-osd initiator library libosd.ko"); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index e1d9a4c4c4b..92cdd4b0652 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -10,7 +10,7 @@ * Copyright (C) 2008 Panasas Inc. All rights reserved. * * Authors: - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * Benny Halevy <bhalevy@panasas.com> * * This program is free software; you can redistribute it and/or modify @@ -74,7 +74,7 @@ static const char osd_name[] = "osd"; static const char *osd_version_string = "open-osd 0.2.1"; -MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>"); +MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>"); MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(SCSI_OSD_MAJOR); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 829752cfd73..a902fa1db7a 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -112,6 +112,7 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd); static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, struct atio_from_isp *atio, uint16_t status, int qfull); +static void qlt_disable_vha(struct scsi_qla_host *vha); /* * Global Variables */ @@ -210,7 +211,7 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); } -void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, +static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, struct atio_from_isp *atio) { ql_dbg(ql_dbg_tgt, vha, 0xe072, @@ -433,7 +434,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) #if 0 /* FIXME: Re-enable Global event handling.. */ /* Global event */ atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); - qlt_clear_tgt_db(ha->tgt.qla_tgt, 1); + qlt_clear_tgt_db(ha->tgt.qla_tgt); if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { sess = list_entry(ha->tgt.qla_tgt->sess_list.next, typeof(*sess), sess_list_entry); @@ -515,7 +516,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, } /* ha->hardware_lock supposed to be held on entry */ -static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only) +static void qlt_clear_tgt_db(struct qla_tgt *tgt) { struct qla_tgt_sess *sess; @@ -867,7 +868,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt) mutex_lock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); tgt->tgt_stop = 1; - qlt_clear_tgt_db(tgt, true); + qlt_clear_tgt_db(tgt); spin_unlock_irqrestore(&ha->hardware_lock, flags); mutex_unlock(&vha->vha_tgt.tgt_mutex); mutex_unlock(&qla_tgt_mutex); @@ -1462,12 +1463,13 @@ out_err: return -1; } -static inline void qlt_unmap_sg(struct scsi_qla_host *vha, - struct qla_tgt_cmd *cmd) +static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) { struct qla_hw_data *ha = vha->hw; - BUG_ON(!cmd->sg_mapped); + if (!cmd->sg_mapped) + return; + pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); cmd->sg_mapped = 0; @@ -2428,8 +2430,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, return 0; out_unmap_unlock: - if (cmd->sg_mapped) - qlt_unmap_sg(vha, cmd); + qlt_unmap_sg(vha, cmd); spin_unlock_irqrestore(&ha->hardware_lock, flags); return res; @@ -2506,8 +2507,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) return res; out_unlock_free_unmap: - if (cmd->sg_mapped) - qlt_unmap_sg(vha, cmd); + qlt_unmap_sg(vha, cmd); spin_unlock_irqrestore(&ha->hardware_lock, flags); return res; @@ -2741,8 +2741,7 @@ done: if (!ha_locked && !in_interrupt()) msleep(250); /* just in case */ - if (cmd->sg_mapped) - qlt_unmap_sg(vha, cmd); + qlt_unmap_sg(vha, cmd); vha->hw->tgt.tgt_ops->free_cmd(cmd); } return; @@ -3087,8 +3086,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, tfo = se_cmd->se_tfo; cmd->cmd_sent_to_fw = 0; - if (cmd->sg_mapped) - qlt_unmap_sg(vha, cmd); + qlt_unmap_sg(vha, cmd); if (unlikely(status != CTIO_SUCCESS)) { switch (status & 0xFFFF) { @@ -5343,7 +5341,7 @@ void qlt_lport_deregister(struct scsi_qla_host *vha) EXPORT_SYMBOL(qlt_lport_deregister); /* Must be called under HW lock */ -void qlt_set_mode(struct scsi_qla_host *vha) +static void qlt_set_mode(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; @@ -5364,7 +5362,7 @@ void qlt_set_mode(struct scsi_qla_host *vha) } /* Must be called under HW lock */ -void qlt_clear_mode(struct scsi_qla_host *vha) +static void qlt_clear_mode(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; @@ -5428,8 +5426,7 @@ EXPORT_SYMBOL(qlt_enable_vha); * * Disable Target Mode and reset the adapter */ -void -qlt_disable_vha(struct scsi_qla_host *vha) +static void qlt_disable_vha(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 8ff330f7d6f..332086776df 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -1001,11 +1001,11 @@ struct qla_tgt_prm { struct qla_tgt *tgt; void *pkt; struct scatterlist *sg; /* cmd data buffer SG vector */ + unsigned char *sense_buffer; int seg_cnt; int req_cnt; uint16_t rq_result; uint16_t scsi_status; - unsigned char *sense_buffer; int sense_buffer_len; int residual; int add_status_pkt; @@ -1033,10 +1033,6 @@ struct qla_tgt_srr_ctio { extern struct qla_tgt_data qla_target; -/* - * Internal function prototypes - */ -void qlt_disable_vha(struct scsi_qla_host *); /* * Function prototypes for qla_target.c logic used by qla2xxx LLD code. @@ -1049,8 +1045,6 @@ extern void qlt_lport_deregister(struct scsi_qla_host *); extern void qlt_unreg_sess(struct qla_tgt_sess *); extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *); -extern void qlt_set_mode(struct scsi_qla_host *ha); -extern void qlt_clear_mode(struct scsi_qla_host *ha); extern int __init qlt_init(void); extern void qlt_exit(void); extern void qlt_update_vp_map(struct scsi_qla_host *, int); @@ -1083,13 +1077,9 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha) /* * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. */ -extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *, - struct atio_from_isp *); extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); -extern int qlt_rdy_to_xfer_dif(struct qla_tgt_cmd *); -extern int qlt_xmit_response_dif(struct qla_tgt_cmd *, int, uint8_t); extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 031b2961c6b..73f9feecda7 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -786,7 +786,16 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id); node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id); - WARN_ON(node && (node != se_nacl)); + if (WARN_ON(node && (node != se_nacl))) { + /* + * The nacl no longer matches what we think it should be. + * Most likely a new dynamic acl has been added while + * someone dropped the hardware lock. It clearly is a + * bug elsewhere, but this bit can't make things worse. + */ + btree_insert32(&lport->lport_fcport_map, nacl->nport_id, + node, GFP_ATOMIC); + } pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", se_nacl, nacl->nport_wwnn, nacl->nport_id); diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 49014a143c6..c1d04d4d3c6 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -202,6 +202,7 @@ static struct { {"IOMEGA", "Io20S *F", NULL, BLIST_KEY}, {"INSITE", "Floptical F*8I", NULL, BLIST_KEY}, {"INSITE", "I325VM", NULL, BLIST_KEY}, + {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC}, {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 9a6f8468225..bc5ff6ff9c7 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -459,14 +459,6 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) if (! scsi_command_normalize_sense(scmd, &sshdr)) return FAILED; /* no valid sense data */ - if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done) - /* - * nasty: for mid-layer issued TURs, we need to return the - * actual sense data without any recovery attempt. For eh - * issued ones, we need to try to recover and interpret - */ - return SUCCESS; - scsi_report_sense(sdev, &sshdr); if (scsi_sense_is_deferred(&sshdr)) @@ -482,6 +474,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) /* handler does not care. Drop down to default handling */ } + if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done) + /* + * nasty: for mid-layer issued TURs, we need to return the + * actual sense data without any recovery attempt. For eh + * issued ones, we need to try to recover and interpret + */ + return SUCCESS; + /* * Previous logic looked for FILEMARK, EOM or ILI which are * mainly associated with tapes and returned SUCCESS. @@ -2001,8 +2001,10 @@ static void scsi_restart_operations(struct Scsi_Host *shost) * is no point trying to lock the door of an off-line device. */ shost_for_each_device(sdev, shost) { - if (scsi_device_online(sdev) && sdev->locked) + if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) { scsi_eh_lock_door(sdev); + sdev->was_reset = 0; + } } /* diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9eff8a37513..50a6e1ac8d9 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1893,6 +1893,11 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req, blk_mq_start_request(req); } + if (blk_queue_tagged(q)) + req->cmd_flags |= REQ_QUEUED; + else + req->cmd_flags &= ~REQ_QUEUED; + scsi_init_cmd_errh(cmd); cmd->scsi_done = scsi_mq_done; diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 8adf067ff01..1c3467b8256 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -102,7 +102,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq), GFP_KERNEL); if (!clkfreq) { - dev_err(dev, "%s: no memory\n", "freq-table-hz"); ret = -ENOMEM; goto out; } @@ -112,19 +111,19 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) if (ret && (ret != -EINVAL)) { dev_err(dev, "%s: error reading array %d\n", "freq-table-hz", ret); - goto free_clkfreq; + return ret; } for (i = 0; i < sz; i += 2) { ret = of_property_read_string_index(np, "clock-names", i/2, (const char **)&name); if (ret) - goto free_clkfreq; + goto out; clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL); if (!clki) { ret = -ENOMEM; - goto free_clkfreq; + goto out; } clki->min_freq = clkfreq[i]; @@ -134,8 +133,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) clki->min_freq, clki->max_freq, clki->name); list_add_tail(&clki->list, &hba->clk_list_head); } -free_clkfreq: - kfree(clkfreq); out: return ret; } @@ -162,10 +159,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name, } vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL); - if (!vreg) { - dev_err(dev, "No memory for %s regulator\n", name); - goto out; - } + if (!vreg) + return -ENOMEM; vreg->name = kstrdup(name, GFP_KERNEL); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 497c38a4a86..605ca60e8a1 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -744,6 +744,8 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba) if (!ufshcd_is_clkgating_allowed(hba)) return; device_remove_file(hba->dev, &hba->clk_gating.delay_attr); + cancel_work_sync(&hba->clk_gating.ungate_work); + cancel_delayed_work_sync(&hba->clk_gating.gate_work); } /* Must be called with host lock acquired */ @@ -2246,6 +2248,22 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) return ret; } + /** + * ufshcd_init_pwr_info - setting the POR (power on reset) + * values in hba power info + * @hba: per-adapter instance + */ +static void ufshcd_init_pwr_info(struct ufs_hba *hba) +{ + hba->pwr_info.gear_rx = UFS_PWM_G1; + hba->pwr_info.gear_tx = UFS_PWM_G1; + hba->pwr_info.lane_rx = 1; + hba->pwr_info.lane_tx = 1; + hba->pwr_info.pwr_rx = SLOWAUTO_MODE; + hba->pwr_info.pwr_tx = SLOWAUTO_MODE; + hba->pwr_info.hs_rate = 0; +} + /** * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device * @hba: per-adapter instance @@ -2844,8 +2862,13 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev) hba = shost_priv(sdev->host); scsi_deactivate_tcq(sdev, hba->nutrs); /* Drop the reference as it won't be needed anymore */ - if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) + if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) { + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); hba->sdev_ufs_device = NULL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + } } /** @@ -4062,6 +4085,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba) static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) { int ret = 0; + struct scsi_device *sdev_rpmb; + struct scsi_device *sdev_boot; hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); @@ -4070,26 +4095,27 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) hba->sdev_ufs_device = NULL; goto out; } + scsi_device_put(hba->sdev_ufs_device); - hba->sdev_boot = __scsi_add_device(hba->host, 0, 0, + sdev_boot = __scsi_add_device(hba->host, 0, 0, ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); - if (IS_ERR(hba->sdev_boot)) { - ret = PTR_ERR(hba->sdev_boot); - hba->sdev_boot = NULL; + if (IS_ERR(sdev_boot)) { + ret = PTR_ERR(sdev_boot); goto remove_sdev_ufs_device; } + scsi_device_put(sdev_boot); - hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0, + sdev_rpmb = __scsi_add_device(hba->host, 0, 0, ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); - if (IS_ERR(hba->sdev_rpmb)) { - ret = PTR_ERR(hba->sdev_rpmb); - hba->sdev_rpmb = NULL; + if (IS_ERR(sdev_rpmb)) { + ret = PTR_ERR(sdev_rpmb); goto remove_sdev_boot; } + scsi_device_put(sdev_rpmb); goto out; remove_sdev_boot: - scsi_remove_device(hba->sdev_boot); + scsi_remove_device(sdev_boot); remove_sdev_ufs_device: scsi_remove_device(hba->sdev_ufs_device); out: @@ -4097,30 +4123,6 @@ out: } /** - * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by - * ufshcd_scsi_add_wlus() - * @hba: per-adapter instance - * - */ -static void ufshcd_scsi_remove_wlus(struct ufs_hba *hba) -{ - if (hba->sdev_ufs_device) { - scsi_remove_device(hba->sdev_ufs_device); - hba->sdev_ufs_device = NULL; - } - - if (hba->sdev_boot) { - scsi_remove_device(hba->sdev_boot); - hba->sdev_boot = NULL; - } - - if (hba->sdev_rpmb) { - scsi_remove_device(hba->sdev_rpmb); - hba->sdev_rpmb = NULL; - } -} - -/** * ufshcd_probe_hba - probe hba to detect device and initialize * @hba: per-adapter instance * @@ -4134,6 +4136,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (ret) goto out; + ufshcd_init_pwr_info(hba); + /* UniPro link is active now */ ufshcd_set_link_active(hba); @@ -4264,12 +4268,18 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, struct ufs_vreg *vreg) { + if (!vreg) + return 0; + return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); } static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, struct ufs_vreg *vreg) { + if (!vreg) + return 0; + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); } @@ -4471,7 +4481,7 @@ out: if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) clk_disable_unprepare(clki->clk); } - } else if (!ret && on) { + } else if (on) { spin_lock_irqsave(hba->host->host_lock, flags); hba->clk_gating.state = CLKS_ON; spin_unlock_irqrestore(hba->host->host_lock, flags); @@ -4675,11 +4685,25 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, { unsigned char cmd[6] = { START_STOP }; struct scsi_sense_hdr sshdr; - struct scsi_device *sdp = hba->sdev_ufs_device; + struct scsi_device *sdp; + unsigned long flags; int ret; - if (!sdp || !scsi_device_online(sdp)) - return -ENODEV; + spin_lock_irqsave(hba->host->host_lock, flags); + sdp = hba->sdev_ufs_device; + if (sdp) { + ret = scsi_device_get(sdp); + if (!ret && !scsi_device_online(sdp)) { + ret = -ENODEV; + scsi_device_put(sdp); + } + } else { + ret = -ENODEV; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (ret) + return ret; /* * If scsi commands fail, the scsi mid-layer schedules scsi error- @@ -4718,6 +4742,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, if (!ret) hba->curr_dev_pwr_mode = pwr_mode; out: + scsi_device_put(sdp); hba->host->eh_noresume = 0; return ret; } @@ -5087,7 +5112,7 @@ int ufshcd_system_suspend(struct ufs_hba *hba) int ret = 0; if (!hba || !hba->is_powered) - goto out; + return 0; if (pm_runtime_suspended(hba->dev)) { if (hba->rpm_lvl == hba->spm_lvl) @@ -5231,7 +5256,6 @@ EXPORT_SYMBOL(ufshcd_shutdown); void ufshcd_remove(struct ufs_hba *hba) { scsi_remove_host(hba->host); - ufshcd_scsi_remove_wlus(hba); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); ufshcd_hba_stop(hba); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 58ecdff5065..4a574aa4585 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -392,8 +392,6 @@ struct ufs_hba { * "UFS device" W-LU. */ struct scsi_device *sdev_ufs_device; - struct scsi_device *sdev_rpmb; - struct scsi_device *sdev_boot; enum ufs_dev_pwr_mode curr_dev_pwr_mode; enum uic_link_state uic_link_state; diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c index cea8ea3491d..1a07bf540fe 100644 --- a/drivers/soc/versatile/soc-realview.c +++ b/drivers/soc/versatile/soc-realview.c @@ -26,6 +26,7 @@ static const struct of_device_id realview_soc_of_match[] = { { .compatible = "arm,realview-pb11mp-soc", }, { .compatible = "arm,realview-pba8-soc", }, { .compatible = "arm,realview-pbx-soc", }, + { } }; static u32 realview_coreid; diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 72921588525..d0d5542efc0 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -376,9 +376,6 @@ static void pump_transfers(unsigned long data) chip = dws->cur_chip; spi = message->spi; - if (unlikely(!chip->clk_div)) - chip->clk_div = dws->max_freq / chip->speed_hz; - if (message->state == ERROR_STATE) { message->status = -EIO; goto early_exit; @@ -419,7 +416,7 @@ static void pump_transfers(unsigned long data) if (transfer->speed_hz) { speed = chip->speed_hz; - if (transfer->speed_hz != speed) { + if ((transfer->speed_hz != speed) || (!chip->clk_div)) { speed = transfer->speed_hz; /* clk_div doesn't support odd number */ @@ -581,7 +578,6 @@ static int dw_spi_setup(struct spi_device *spi) dev_err(&spi->dev, "No max speed HZ parameter\n"); return -EINVAL; } - chip->speed_hz = spi->max_speed_hz; chip->tmode = 0; /* Tx & Rx */ /* Default SPI mode is SCPOL = 0, SCPH = 0 */ @@ -669,6 +665,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) master->cleanup = dw_spi_cleanup; master->transfer_one_message = dw_spi_transfer_one_message; master->max_speed_hz = dws->max_freq; + master->dev.of_node = dev->of_node; /* Basic HW init */ spi_hw_init(dws); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 448216025ce..831ceb4a91f 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -46,7 +46,7 @@ #define SPI_TCR 0x08 -#define SPI_CTAR(x) (0x0c + (x * 4)) +#define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4)) #define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27) #define SPI_CTAR_CPOL(x) ((x) << 26) #define SPI_CTAR_CPHA(x) ((x) << 25) @@ -70,7 +70,7 @@ #define SPI_PUSHR 0x34 #define SPI_PUSHR_CONT (1 << 31) -#define SPI_PUSHR_CTAS(x) (((x) & 0x00000007) << 28) +#define SPI_PUSHR_CTAS(x) (((x) & 0x00000003) << 28) #define SPI_PUSHR_EOQ (1 << 27) #define SPI_PUSHR_CTCNT (1 << 26) #define SPI_PUSHR_PCS(x) (((1 << x) & 0x0000003f) << 16) diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c index 835cdda6f4f..c76b7d7879d 100644 --- a/drivers/spi/spi-orion.c +++ b/drivers/spi/spi-orion.c @@ -454,7 +454,7 @@ static int orion_spi_probe(struct platform_device *pdev) spi->master = master; of_id = of_match_device(orion_spi_of_match_table, &pdev->dev); - devdata = of_id->data; + devdata = (of_id) ? of_id->data : &orion_spi_dev_data; spi->devdata = devdata; spi->clk = devm_clk_get(&pdev->dev, NULL); diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index f35f723816e..fc2dd844160 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -1106,7 +1106,7 @@ err_rxdesc: pl022->sgt_tx.nents, DMA_TO_DEVICE); err_tx_sgmap: dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, - pl022->sgt_tx.nents, DMA_FROM_DEVICE); + pl022->sgt_rx.nents, DMA_FROM_DEVICE); err_rx_sgmap: sg_free_table(&pl022->sgt_tx); err_alloc_tx_sg: diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index d8a105f7683..9e9e0f971e6 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1274,7 +1274,9 @@ static int pxa2xx_spi_suspend(struct device *dev) if (status != 0) return status; write_SSCR0(0, drv_data->ioaddr); - clk_disable_unprepare(ssp->clk); + + if (!pm_runtime_suspended(dev)) + clk_disable_unprepare(ssp->clk); return 0; } @@ -1288,7 +1290,8 @@ static int pxa2xx_spi_resume(struct device *dev) pxa2xx_spi_dma_resume(drv_data); /* Enable the SSP clock */ - clk_prepare_enable(ssp->clk); + if (!pm_runtime_suspended(dev)) + clk_prepare_enable(ssp->clk); /* Restore LPSS private register bits */ lpss_ssp_setup(drv_data); diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index f96ea8a38d6..87bc16f491f 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -145,6 +145,9 @@ #define RXBUSY (1 << 0) #define TXBUSY (1 << 1) +/* sclk_out: spi master internal logic in rk3x can support 50Mhz */ +#define MAX_SCLK_OUT 50000000 + enum rockchip_ssi_type { SSI_MOTO_SPI = 0, SSI_TI_SSP, @@ -325,6 +328,8 @@ static int rockchip_spi_unprepare_message(struct spi_master *master, spin_unlock_irqrestore(&rs->lock, flags); + spi_enable_chip(rs, 0); + return 0; } @@ -381,6 +386,8 @@ static int rockchip_spi_pio_transfer(struct rockchip_spi *rs) if (rs->tx) wait_for_idle(rs); + spi_enable_chip(rs, 0); + return 0; } @@ -392,8 +399,10 @@ static void rockchip_spi_dma_rxcb(void *data) spin_lock_irqsave(&rs->lock, flags); rs->state &= ~RXBUSY; - if (!(rs->state & TXBUSY)) + if (!(rs->state & TXBUSY)) { + spi_enable_chip(rs, 0); spi_finalize_current_transfer(rs->master); + } spin_unlock_irqrestore(&rs->lock, flags); } @@ -409,8 +418,10 @@ static void rockchip_spi_dma_txcb(void *data) spin_lock_irqsave(&rs->lock, flags); rs->state &= ~TXBUSY; - if (!(rs->state & RXBUSY)) + if (!(rs->state & RXBUSY)) { + spi_enable_chip(rs, 0); spi_finalize_current_transfer(rs->master); + } spin_unlock_irqrestore(&rs->lock, flags); } @@ -496,12 +507,19 @@ static void rockchip_spi_config(struct rockchip_spi *rs) dmacr |= RF_DMA_EN; } + if (WARN_ON(rs->speed > MAX_SCLK_OUT)) + rs->speed = MAX_SCLK_OUT; + + /* the minimum divsor is 2 */ + if (rs->max_freq < 2 * rs->speed) { + clk_set_rate(rs->spiclk, 2 * rs->speed); + rs->max_freq = clk_get_rate(rs->spiclk); + } + /* div doesn't support odd number */ div = max_t(u32, rs->max_freq / rs->speed, 1); div = (div + 1) & 0xfffe; - spi_enable_chip(rs, 0); - writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0); writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1); @@ -515,8 +533,6 @@ static void rockchip_spi_config(struct rockchip_spi *rs) spi_set_clk(rs, div); dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div); - - spi_enable_chip(rs, 1); } static int rockchip_spi_transfer_one( @@ -524,7 +540,7 @@ static int rockchip_spi_transfer_one( struct spi_device *spi, struct spi_transfer *xfer) { - int ret = 0; + int ret = 1; struct rockchip_spi *rs = spi_master_get_devdata(master); WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) && @@ -556,17 +572,27 @@ static int rockchip_spi_transfer_one( rs->tmode = CR0_XFM_RO; /* we need prepare dma before spi was enabled */ - if (master->can_dma && master->can_dma(master, spi, xfer)) { + if (master->can_dma && master->can_dma(master, spi, xfer)) rs->use_dma = 1; - rockchip_spi_prepare_dma(rs); - } else { + else rs->use_dma = 0; - } rockchip_spi_config(rs); - if (!rs->use_dma) + if (rs->use_dma) { + if (rs->tmode == CR0_XFM_RO) { + /* rx: dma must be prepared first */ + rockchip_spi_prepare_dma(rs); + spi_enable_chip(rs, 1); + } else { + /* tx or tr: spi must be enabled first */ + spi_enable_chip(rs, 1); + rockchip_spi_prepare_dma(rs); + } + } else { + spi_enable_chip(rs, 1); ret = rockchip_spi_pio_transfer(rs); + } return ret; } diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c index 39e2c0a55a2..f63de781c72 100644 --- a/drivers/spi/spi-sirf.c +++ b/drivers/spi/spi-sirf.c @@ -562,9 +562,9 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) sspi->word_width = DIV_ROUND_UP(bits_per_word, 8); txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | - sspi->word_width; + (sspi->word_width >> 1); rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | - sspi->word_width; + (sspi->word_width >> 1); if (!(spi->mode & SPI_CS_HIGH)) regval |= SIRFSOC_SPI_CS_IDLE_STAT; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ebcb33df2eb..50f20f24398 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -615,13 +615,13 @@ static int spi_map_buf(struct spi_master *master, struct device *dev, sg_free_table(sgt); return -ENOMEM; } - sg_buf = page_address(vm_page) + - ((size_t)buf & ~PAGE_MASK); + sg_set_page(&sgt->sgl[i], vm_page, + min, offset_in_page(buf)); } else { sg_buf = buf; + sg_set_buf(&sgt->sgl[i], sg_buf, min); } - sg_set_buf(&sgt->sgl[i], sg_buf, min); buf += min; len -= min; diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index e3bc23bb588..e50039fb147 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -82,10 +82,11 @@ struct spidev_data { struct spi_device *spi; struct list_head device_entry; - /* buffer is NULL unless this device is open (users > 0) */ + /* TX/RX buffers are NULL unless this device is open (users > 0) */ struct mutex buf_lock; unsigned users; - u8 *buffer; + u8 *tx_buffer; + u8 *rx_buffer; }; static LIST_HEAD(device_list); @@ -135,7 +136,7 @@ static inline ssize_t spidev_sync_write(struct spidev_data *spidev, size_t len) { struct spi_transfer t = { - .tx_buf = spidev->buffer, + .tx_buf = spidev->tx_buffer, .len = len, }; struct spi_message m; @@ -149,7 +150,7 @@ static inline ssize_t spidev_sync_read(struct spidev_data *spidev, size_t len) { struct spi_transfer t = { - .rx_buf = spidev->buffer, + .rx_buf = spidev->rx_buffer, .len = len, }; struct spi_message m; @@ -179,7 +180,7 @@ spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) if (status > 0) { unsigned long missing; - missing = copy_to_user(buf, spidev->buffer, status); + missing = copy_to_user(buf, spidev->rx_buffer, status); if (missing == status) status = -EFAULT; else @@ -206,7 +207,7 @@ spidev_write(struct file *filp, const char __user *buf, spidev = filp->private_data; mutex_lock(&spidev->buf_lock); - missing = copy_from_user(spidev->buffer, buf, count); + missing = copy_from_user(spidev->tx_buffer, buf, count); if (missing == 0) status = spidev_sync_write(spidev, count); else @@ -224,7 +225,7 @@ static int spidev_message(struct spidev_data *spidev, struct spi_transfer *k_tmp; struct spi_ioc_transfer *u_tmp; unsigned n, total; - u8 *buf; + u8 *tx_buf, *rx_buf; int status = -EFAULT; spi_message_init(&msg); @@ -236,7 +237,8 @@ static int spidev_message(struct spidev_data *spidev, * We walk the array of user-provided transfers, using each one * to initialize a kernel version of the same transfer. */ - buf = spidev->buffer; + tx_buf = spidev->tx_buffer; + rx_buf = spidev->rx_buffer; total = 0; for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; n; @@ -250,20 +252,21 @@ static int spidev_message(struct spidev_data *spidev, } if (u_tmp->rx_buf) { - k_tmp->rx_buf = buf; + k_tmp->rx_buf = rx_buf; if (!access_ok(VERIFY_WRITE, (u8 __user *) (uintptr_t) u_tmp->rx_buf, u_tmp->len)) goto done; } if (u_tmp->tx_buf) { - k_tmp->tx_buf = buf; - if (copy_from_user(buf, (const u8 __user *) + k_tmp->tx_buf = tx_buf; + if (copy_from_user(tx_buf, (const u8 __user *) (uintptr_t) u_tmp->tx_buf, u_tmp->len)) goto done; } - buf += k_tmp->len; + tx_buf += k_tmp->len; + rx_buf += k_tmp->len; k_tmp->cs_change = !!u_tmp->cs_change; k_tmp->tx_nbits = u_tmp->tx_nbits; @@ -290,17 +293,17 @@ static int spidev_message(struct spidev_data *spidev, goto done; /* copy any rx data out of bounce buffer */ - buf = spidev->buffer; + rx_buf = spidev->rx_buffer; for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { if (u_tmp->rx_buf) { if (__copy_to_user((u8 __user *) - (uintptr_t) u_tmp->rx_buf, buf, + (uintptr_t) u_tmp->rx_buf, rx_buf, u_tmp->len)) { status = -EFAULT; goto done; } } - buf += u_tmp->len; + rx_buf += u_tmp->len; } status = total; @@ -508,22 +511,41 @@ static int spidev_open(struct inode *inode, struct file *filp) break; } } - if (status == 0) { - if (!spidev->buffer) { - spidev->buffer = kmalloc(bufsiz, GFP_KERNEL); - if (!spidev->buffer) { + + if (status) { + pr_debug("spidev: nothing for minor %d\n", iminor(inode)); + goto err_find_dev; + } + + if (!spidev->tx_buffer) { + spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL); + if (!spidev->tx_buffer) { dev_dbg(&spidev->spi->dev, "open/ENOMEM\n"); status = -ENOMEM; + goto err_find_dev; } } - if (status == 0) { - spidev->users++; - filp->private_data = spidev; - nonseekable_open(inode, filp); + + if (!spidev->rx_buffer) { + spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL); + if (!spidev->rx_buffer) { + dev_dbg(&spidev->spi->dev, "open/ENOMEM\n"); + status = -ENOMEM; + goto err_alloc_rx_buf; } - } else - pr_debug("spidev: nothing for minor %d\n", iminor(inode)); + } + + spidev->users++; + filp->private_data = spidev; + nonseekable_open(inode, filp); + + mutex_unlock(&device_list_lock); + return 0; +err_alloc_rx_buf: + kfree(spidev->tx_buffer); + spidev->tx_buffer = NULL; +err_find_dev: mutex_unlock(&device_list_lock); return status; } @@ -542,8 +564,11 @@ static int spidev_release(struct inode *inode, struct file *filp) if (!spidev->users) { int dofree; - kfree(spidev->buffer); - spidev->buffer = NULL; + kfree(spidev->tx_buffer); + spidev->tx_buffer = NULL; + + kfree(spidev->rx_buffer); + spidev->rx_buffer = NULL; /* ... after we unbound from the underlying device? */ spin_lock_irq(&spidev->spi_lock); diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c index 28b93d39a94..a673ffa34aa 100644 --- a/drivers/staging/android/logger.c +++ b/drivers/staging/android/logger.c @@ -420,7 +420,7 @@ static ssize_t logger_write_iter(struct kiocb *iocb, struct iov_iter *from) struct logger_log *log = file_get_log(iocb->ki_filp); struct logger_entry header; struct timespec now; - size_t len, count; + size_t len, count, w_off; count = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD); @@ -452,11 +452,14 @@ static ssize_t logger_write_iter(struct kiocb *iocb, struct iov_iter *from) memcpy(log->buffer + log->w_off, &header, len); memcpy(log->buffer, (char *)&header + len, sizeof(header) - len); - len = min(count, log->size - log->w_off); + /* Work with a copy until we are ready to commit the whole entry */ + w_off = logger_offset(log, log->w_off + sizeof(struct logger_entry)); - if (copy_from_iter(log->buffer + log->w_off, len, from) != len) { + len = min(count, log->size - w_off); + + if (copy_from_iter(log->buffer + w_off, len, from) != len) { /* - * Note that by not updating w_off, this abandons the + * Note that by not updating log->w_off, this abandons the * portion of the new entry that *was* successfully * copied, just above. This is intentional to avoid * message corruption from missing fragments. @@ -470,7 +473,7 @@ static ssize_t logger_write_iter(struct kiocb *iocb, struct iov_iter *from) return -EFAULT; } - log->w_off = logger_offset(log, log->w_off + count); + log->w_off = logger_offset(log, w_off + count); mutex_unlock(&log->mutex); /* wake up any blocked readers */ diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig index a8bc2b56778..152f4c12ea4 100644 --- a/drivers/staging/comedi/Kconfig +++ b/drivers/staging/comedi/Kconfig @@ -426,6 +426,7 @@ config COMEDI_AIO_IIRO_16 config COMEDI_II_PCI20KC tristate "Intelligent Instruments PCI-20001C carrier support" + depends on HAS_IOMEM ---help--- Enable support for Intelligent Instruments PCI-20001C carrier PCI-20001, PCI-20006 and PCI-20341 @@ -667,7 +668,6 @@ config COMEDI_ADDI_APCI_2200 config COMEDI_ADDI_APCI_3120 tristate "ADDI-DATA APCI_3120/3001 support" depends on HAS_DMA - depends on VIRT_TO_BUS ---help--- Enable support for ADDI-DATA APCI_3120/3001 cards diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 495969f46e7..9c32f027600 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -1462,10 +1462,7 @@ static int __comedi_get_user_chanlist(struct comedi_device *dev, unsigned int *chanlist; int ret; - /* user_chanlist could be NULL for do_cmdtest ioctls */ - if (!user_chanlist) - return 0; - + cmd->chanlist = NULL; chanlist = memdup_user(user_chanlist, cmd->chanlist_len * sizeof(unsigned int)); if (IS_ERR(chanlist)) @@ -1609,13 +1606,18 @@ static int do_cmdtest_ioctl(struct comedi_device *dev, s = &dev->subdevices[cmd.subdev]; - /* load channel/gain list */ - ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &cmd); - if (ret) - return ret; + /* user_chanlist can be NULL for COMEDI_CMDTEST ioctl */ + if (user_chanlist) { + /* load channel/gain list */ + ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &cmd); + if (ret) + return ret; + } ret = s->do_cmdtest(dev, s, &cmd); + kfree(cmd.chanlist); /* free kernel copy of user chanlist */ + /* restore chanlist pointer before copying back */ cmd.chanlist = (unsigned int __force *)user_chanlist; @@ -1642,7 +1644,7 @@ static int do_cmdtest_ioctl(struct comedi_device *dev, */ -static int do_lock_ioctl(struct comedi_device *dev, unsigned int arg, +static int do_lock_ioctl(struct comedi_device *dev, unsigned long arg, void *file) { int ret = 0; @@ -1679,7 +1681,7 @@ static int do_lock_ioctl(struct comedi_device *dev, unsigned int arg, This function isn't protected by the semaphore, since we already own the lock. */ -static int do_unlock_ioctl(struct comedi_device *dev, unsigned int arg, +static int do_unlock_ioctl(struct comedi_device *dev, unsigned long arg, void *file) { struct comedi_subdevice *s; @@ -1714,7 +1716,7 @@ static int do_unlock_ioctl(struct comedi_device *dev, unsigned int arg, nothing */ -static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg, +static int do_cancel_ioctl(struct comedi_device *dev, unsigned long arg, void *file) { struct comedi_subdevice *s; @@ -1751,7 +1753,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg, nothing */ -static int do_poll_ioctl(struct comedi_device *dev, unsigned int arg, +static int do_poll_ioctl(struct comedi_device *dev, unsigned long arg, void *file) { struct comedi_subdevice *s; diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c index 32a19264a17..2a29b9baec0 100644 --- a/drivers/staging/iio/adc/mxs-lradc.c +++ b/drivers/staging/iio/adc/mxs-lradc.c @@ -1559,14 +1559,16 @@ static int mxs_lradc_probe(struct platform_device *pdev) /* Grab all IRQ sources */ for (i = 0; i < of_cfg->irq_count; i++) { lradc->irq[i] = platform_get_irq(pdev, i); - if (lradc->irq[i] < 0) - return lradc->irq[i]; + if (lradc->irq[i] < 0) { + ret = lradc->irq[i]; + goto err_clk; + } ret = devm_request_irq(dev, lradc->irq[i], mxs_lradc_handle_irq, 0, of_cfg->irq_name[i], iio); if (ret) - return ret; + goto err_clk; } lradc->vref_mv = of_cfg->vref_mv; @@ -1588,7 +1590,7 @@ static int mxs_lradc_probe(struct platform_device *pdev) &mxs_lradc_trigger_handler, &mxs_lradc_buffer_ops); if (ret) - return ret; + goto err_clk; ret = mxs_lradc_trigger_init(iio); if (ret) @@ -1643,6 +1645,8 @@ err_dev: mxs_lradc_trigger_remove(iio); err_trig: iio_triggered_buffer_cleanup(iio); +err_clk: + clk_disable_unprepare(lradc->clk); return ret; } diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index d0c89d0457d..b6bd609c365 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -115,6 +115,7 @@ static const struct iio_chan_spec ad5933_channels[] = { .channel = 0, .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), .address = AD5933_REG_TEMP_DATA, + .scan_index = -1, .scan_type = { .sign = 's', .realbits = 14, @@ -124,9 +125,7 @@ static const struct iio_chan_spec ad5933_channels[] = { .type = IIO_VOLTAGE, .indexed = 1, .channel = 0, - .extend_name = "real_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "real", .address = AD5933_REG_REAL_DATA, .scan_index = 0, .scan_type = { @@ -138,9 +137,7 @@ static const struct iio_chan_spec ad5933_channels[] = { .type = IIO_VOLTAGE, .indexed = 1, .channel = 0, - .extend_name = "imag_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "imag", .address = AD5933_REG_IMAG_DATA, .scan_index = 1, .scan_type = { @@ -749,14 +746,14 @@ static int ad5933_probe(struct i2c_client *client, indio_dev->name = id->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = ad5933_channels; - indio_dev->num_channels = 1; /* only register temp0_input */ + indio_dev->num_channels = ARRAY_SIZE(ad5933_channels); ret = ad5933_register_ring_funcs_and_init(indio_dev); if (ret) goto error_disable_reg; - /* skip temp0_input, register in0_(real|imag)_raw */ - ret = iio_buffer_register(indio_dev, &ad5933_channels[1], 2); + ret = iio_buffer_register(indio_dev, ad5933_channels, + ARRAY_SIZE(ad5933_channels)); if (ret) goto error_unreg_ring; diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h index 07318203a83..e8c98cf5707 100644 --- a/drivers/staging/iio/meter/ade7758.h +++ b/drivers/staging/iio/meter/ade7758.h @@ -119,7 +119,6 @@ struct ade7758_state { u8 *tx; u8 *rx; struct mutex buf_lock; - const struct iio_chan_spec *ade7758_ring_channels; struct spi_transfer ring_xfer[4]; struct spi_message ring_msg; /* diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c index abc60067cd7..fb373b89dcc 100644 --- a/drivers/staging/iio/meter/ade7758_core.c +++ b/drivers/staging/iio/meter/ade7758_core.c @@ -634,9 +634,6 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_VOLTAGE, .indexed = 1, .channel = 0, - .extend_name = "raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE), .scan_index = 0, .scan_type = { @@ -648,9 +645,6 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_CURRENT, .indexed = 1, .channel = 0, - .extend_name = "raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT), .scan_index = 1, .scan_type = { @@ -662,9 +656,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_POWER, .indexed = 1, .channel = 0, - .extend_name = "apparent_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "apparent", .address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR), .scan_index = 2, .scan_type = { @@ -676,9 +668,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_POWER, .indexed = 1, .channel = 0, - .extend_name = "active_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "active", .address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR), .scan_index = 3, .scan_type = { @@ -690,9 +680,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_POWER, .indexed = 1, .channel = 0, - .extend_name = "reactive_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "reactive", .address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR), .scan_index = 4, .scan_type = { @@ -704,9 +692,6 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_VOLTAGE, .indexed = 1, .channel = 1, - .extend_name = "raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE), .scan_index = 5, .scan_type = { @@ -718,9 +703,6 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_CURRENT, .indexed = 1, .channel = 1, - .extend_name = "raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT), .scan_index = 6, .scan_type = { @@ -732,9 +714,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_POWER, .indexed = 1, .channel = 1, - .extend_name = "apparent_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "apparent", .address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR), .scan_index = 7, .scan_type = { @@ -746,9 +726,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_POWER, .indexed = 1, .channel = 1, - .extend_name = "active_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "active", .address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR), .scan_index = 8, .scan_type = { @@ -760,9 +738,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_POWER, .indexed = 1, .channel = 1, - .extend_name = "reactive_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "reactive", .address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR), .scan_index = 9, .scan_type = { @@ -774,9 +750,6 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_VOLTAGE, .indexed = 1, .channel = 2, - .extend_name = "raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE), .scan_index = 10, .scan_type = { @@ -788,9 +761,6 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_CURRENT, .indexed = 1, .channel = 2, - .extend_name = "raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT), .scan_index = 11, .scan_type = { @@ -802,9 +772,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_POWER, .indexed = 1, .channel = 2, - .extend_name = "apparent_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "apparent", .address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR), .scan_index = 12, .scan_type = { @@ -816,9 +784,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_POWER, .indexed = 1, .channel = 2, - .extend_name = "active_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "active", .address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR), .scan_index = 13, .scan_type = { @@ -830,9 +796,7 @@ static const struct iio_chan_spec ade7758_channels[] = { .type = IIO_POWER, .indexed = 1, .channel = 2, - .extend_name = "reactive_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "reactive", .address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR), .scan_index = 14, .scan_type = { @@ -873,13 +837,14 @@ static int ade7758_probe(struct spi_device *spi) goto error_free_rx; } st->us = spi; - st->ade7758_ring_channels = &ade7758_channels[0]; mutex_init(&st->buf_lock); indio_dev->name = spi->dev.driver->name; indio_dev->dev.parent = &spi->dev; indio_dev->info = &ade7758_info; indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = ade7758_channels; + indio_dev->num_channels = ARRAY_SIZE(ade7758_channels); ret = ade7758_configure_ring(indio_dev); if (ret) diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c index c0accf8cce9..6e900649074 100644 --- a/drivers/staging/iio/meter/ade7758_ring.c +++ b/drivers/staging/iio/meter/ade7758_ring.c @@ -85,17 +85,16 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p) **/ static int ade7758_ring_preenable(struct iio_dev *indio_dev) { - struct ade7758_state *st = iio_priv(indio_dev); unsigned channel; - if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) + if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) return -EINVAL; channel = find_first_bit(indio_dev->active_scan_mask, indio_dev->masklength); ade7758_write_waveform_type(&indio_dev->dev, - st->ade7758_ring_channels[channel].address); + indio_dev->channels[channel].address); return 0; } diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c index 9935e66935a..eddef9cd2e1 100644 --- a/drivers/staging/rtl8188eu/core/rtw_cmd.c +++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c @@ -275,11 +275,11 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, if (check_fwstate(pmlmepriv, _FW_LINKED) == true) rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1); - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (ph2c == NULL) return _FAIL; - psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_KERNEL); + psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC); if (psurveyPara == NULL) { kfree(ph2c); return _FAIL; @@ -405,7 +405,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork) else RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid)); - pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (pcmd == NULL) { res = _FAIL; RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n")); @@ -755,13 +755,13 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter) u8 res = _SUCCESS; - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (ph2c == NULL) { res = _FAIL; goto exit; } - pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); + pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC); if (pdrvextra_cmd_parm == NULL) { kfree(ph2c); res = _FAIL; @@ -967,13 +967,13 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue) u8 res = _SUCCESS; if (enqueue) { - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (ph2c == NULL) { res = _FAIL; goto exit; } - pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); + pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC); if (pdrvextra_cmd_parm == NULL) { kfree(ph2c); res = _FAIL; @@ -1010,13 +1010,13 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time) u8 res = _SUCCESS; - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (ph2c == NULL) { res = _FAIL; goto exit; } - pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); + pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC); if (pdrvextra_cmd_parm == NULL) { kfree(ph2c); res = _FAIL; @@ -1088,13 +1088,13 @@ u8 rtw_ps_cmd(struct adapter *padapter) u8 res = _SUCCESS; - ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (ppscmd == NULL) { res = _FAIL; goto exit; } - pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); + pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC); if (pdrvextra_cmd_parm == NULL) { kfree(ppscmd); res = _FAIL; diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c index 5ba5099ec20..70b1bc3e0e6 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c @@ -4241,12 +4241,12 @@ void report_survey_event(struct adapter *padapter, pcmdpriv = &padapter->cmdpriv; - pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (pcmd_obj == NULL) return; cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header)); - pevtcmd = kzalloc(cmdsz, GFP_KERNEL); + pevtcmd = kzalloc(cmdsz, GFP_ATOMIC); if (pevtcmd == NULL) { kfree(pcmd_obj); return; @@ -4339,12 +4339,12 @@ void report_join_res(struct adapter *padapter, int res) struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (pcmd_obj == NULL) return; cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header)); - pevtcmd = kzalloc(cmdsz, GFP_KERNEL); + pevtcmd = kzalloc(cmdsz, GFP_ATOMIC); if (pevtcmd == NULL) { kfree(pcmd_obj); return; @@ -4854,11 +4854,11 @@ void survey_timer_hdl(void *function_context) pmlmeext->scan_abort = false;/* reset */ } - ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (ph2c == NULL) goto exit_survey_timer_hdl; - psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_KERNEL); + psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC); if (psurveyPara == NULL) { kfree(ph2c); goto exit_survey_timer_hdl; diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c index 33ccbbbd8ed..d300369977f 100644 --- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c +++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c @@ -935,7 +935,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len) return true; } - bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_KERNEL); + bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_ATOMIC); subtype = GetFrameSubType(pframe) >> 4; diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 407a318b09d..2f87150a21b 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -47,6 +47,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ + {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ {} /* Terminating entry */ }; diff --git a/drivers/staging/rtl8723au/include/rtw_eeprom.h b/drivers/staging/rtl8723au/include/rtw_eeprom.h index e5121a2a64b..a86f36e49dd 100644 --- a/drivers/staging/rtl8723au/include/rtw_eeprom.h +++ b/drivers/staging/rtl8723au/include/rtw_eeprom.h @@ -107,12 +107,12 @@ enum rt_customer_id }; struct eeprom_priv { + u8 mac_addr[6]; /* PermanentAddress */ u8 bautoload_fail_flag; u8 bloadfile_fail_flag; u8 bloadmac_fail_flag; /* u8 bempty; */ /* u8 sys_config; */ - u8 mac_addr[6]; /* PermanentAddress */ /* u8 config0; */ u16 channel_plan; /* u8 country_string[3]; */ diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig index dc2d84ac5a0..81d44c477a5 100644 --- a/drivers/target/Kconfig +++ b/drivers/target/Kconfig @@ -31,6 +31,13 @@ config TCM_PSCSI Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered passthrough access to Linux/SCSI device +config TCM_USER + tristate "TCM/USER Subsystem Plugin for Linux" + depends on UIO && NET + help + Say Y here to enable the TCM/USER subsystem plugin for a userspace + process to handle requests + source "drivers/target/loopback/Kconfig" source "drivers/target/tcm_fc/Kconfig" source "drivers/target/iscsi/Kconfig" diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 85b012d2f89..bbb4a7d638e 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_TARGET_CORE) += target_core_mod.o obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o obj-$(CONFIG_TCM_FILEIO) += target_core_file.o obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o +obj-$(CONFIG_TCM_USER) += target_core_user.o # Fabric modules obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 260c3e1e312..73e58d22e32 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -3491,7 +3491,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, len = sprintf(buf, "TargetAddress=" "%s:%hu,%hu", inaddr_any ? conn->local_ip : np->np_ip, - inaddr_any ? conn->local_port : np->np_port, + np->np_port, tpg->tpgt); len += 1; @@ -3709,7 +3709,6 @@ static inline void iscsit_thread_check_cpumask( struct task_struct *p, int mode) { - char buf[128]; /* * mode == 1 signals iscsi_target_tx_thread() usage. * mode == 0 signals iscsi_target_rx_thread() usage. @@ -3728,8 +3727,6 @@ static inline void iscsit_thread_check_cpumask( * both TX and RX kthreads are scheduled to run on the * same CPU. */ - memset(buf, 0, 128); - cpumask_scnprintf(buf, 128, conn->conn_cpumask); set_cpus_allowed_ptr(p, conn->conn_cpumask); } @@ -4326,8 +4323,7 @@ int iscsit_close_connection( if (conn->conn_tx_hash.tfm) crypto_free_hash(conn->conn_tx_hash.tfm); - if (conn->conn_cpumask) - free_cpumask_var(conn->conn_cpumask); + free_cpumask_var(conn->conn_cpumask); kfree(conn->conn_ops); conn->conn_ops = NULL; diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index ae03f3e5de1..9059c1e0b26 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -669,12 +669,10 @@ static ssize_t lio_target_nacl_show_info( } else { sess = se_sess->fabric_sess_ptr; - if (sess->sess_ops->InitiatorName) - rb += sprintf(page+rb, "InitiatorName: %s\n", - sess->sess_ops->InitiatorName); - if (sess->sess_ops->InitiatorAlias) - rb += sprintf(page+rb, "InitiatorAlias: %s\n", - sess->sess_ops->InitiatorAlias); + rb += sprintf(page+rb, "InitiatorName: %s\n", + sess->sess_ops->InitiatorName); + rb += sprintf(page+rb, "InitiatorAlias: %s\n", + sess->sess_ops->InitiatorAlias); rb += sprintf(page+rb, "LIO Session ID: %u " "ISID: 0x%02x %02x %02x %02x %02x %02x " diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 0d1e6ee3e99..a0ae5fc0ad7 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -345,7 +345,6 @@ static int iscsit_dataout_check_datasn( struct iscsi_cmd *cmd, unsigned char *buf) { - int dump = 0, recovery = 0; u32 data_sn = 0; struct iscsi_conn *conn = cmd->conn; struct iscsi_data *hdr = (struct iscsi_data *) buf; @@ -370,13 +369,11 @@ static int iscsit_dataout_check_datasn( pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" " higher than expected 0x%08x.\n", cmd->init_task_tag, be32_to_cpu(hdr->datasn), data_sn); - recovery = 1; goto recover; } else if (be32_to_cpu(hdr->datasn) < data_sn) { pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" " lower than expected 0x%08x, discarding payload.\n", cmd->init_task_tag, be32_to_cpu(hdr->datasn), data_sn); - dump = 1; goto dump; } @@ -392,8 +389,7 @@ dump: if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) return DATAOUT_CANNOT_RECOVER; - return (recovery || dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : - DATAOUT_NORMAL; + return DATAOUT_WITHIN_COMMAND_RECOVERY; } static int iscsit_dataout_pre_datapduinorder_yes( diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 5e71ac60941..480f2e0ecc1 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -978,8 +978,7 @@ int iscsit_setup_np( return 0; fail: np->np_socket = NULL; - if (sock) - sock_release(sock); + sock_release(sock); return ret; } @@ -1190,8 +1189,7 @@ old_sess_out: if (!IS_ERR(conn->conn_tx_hash.tfm)) crypto_free_hash(conn->conn_tx_hash.tfm); - if (conn->conn_cpumask) - free_cpumask_var(conn->conn_cpumask); + free_cpumask_var(conn->conn_cpumask); kfree(conn->conn_ops); @@ -1268,8 +1266,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) iscsit_put_transport(conn->conn_transport); kfree(conn); conn = NULL; - if (ret == -ENODEV) - goto out; /* Get another socket */ return 1; } diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 73355f4fca7..ce87ce9bdb9 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -1481,8 +1481,9 @@ void iscsit_collect_login_stats( if (conn->param_list) intrname = iscsi_find_param_from_key(INITIATORNAME, conn->param_list); - strcpy(ls->last_intr_fail_name, - (intrname ? intrname->value : "Unknown")); + strlcpy(ls->last_intr_fail_name, + (intrname ? intrname->value : "Unknown"), + sizeof(ls->last_intr_fail_name)); ls->last_intr_fail_ip_family = conn->login_family; diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 340de9d92b1..ab3ab27d49b 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -153,18 +153,11 @@ static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag) /* * Locate the SAM Task Attr from struct scsi_cmnd * */ -static int tcm_loop_sam_attr(struct scsi_cmnd *sc) -{ - if (sc->device->tagged_supported) { - switch (sc->tag) { - case HEAD_OF_QUEUE_TAG: - return MSG_HEAD_TAG; - case ORDERED_QUEUE_TAG: - return MSG_ORDERED_TAG; - default: - break; - } - } +static int tcm_loop_sam_attr(struct scsi_cmnd *sc, int tag) +{ + if (sc->device->tagged_supported && + sc->device->ordered_tags && tag >= 0) + return MSG_ORDERED_TAG; return MSG_SIMPLE_TAG; } @@ -227,7 +220,7 @@ static void tcm_loop_submission_work(struct work_struct *work) rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, - transfer_length, tcm_loop_sam_attr(sc), + transfer_length, tcm_loop_sam_attr(sc, tl_cmd->sc_cmd_tag), sc->sc_data_direction, 0, scsi_sglist(sc), scsi_sg_count(sc), sgl_bidi, sgl_bidi_count, @@ -266,7 +259,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) } tl_cmd->sc = sc; - tl_cmd->sc_cmd_tag = sc->tag; + tl_cmd->sc_cmd_tag = sc->request->tag; INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); queue_work(tcm_loop_workqueue, &tl_cmd->work); return 0; @@ -370,7 +363,7 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc) */ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, - sc->tag, TMR_ABORT_TASK); + sc->request->tag, TMR_ABORT_TASK); return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; } @@ -960,8 +953,7 @@ static int tcm_loop_port_link( struct tcm_loop_tpg, tl_se_tpg); struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; - atomic_inc(&tl_tpg->tl_tpg_port_count); - smp_mb__after_atomic(); + atomic_inc_mb(&tl_tpg->tl_tpg_port_count); /* * Add Linux/SCSI struct scsi_device by HCTL */ @@ -995,8 +987,7 @@ static void tcm_loop_port_unlink( scsi_remove_device(sd); scsi_device_put(sd); - atomic_dec(&tl_tpg->tl_tpg_port_count); - smp_mb__after_atomic(); + atomic_dec_mb(&tl_tpg->tl_tpg_port_count); pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); } diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index fbc5ebb5f76..fb87780929d 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -392,8 +392,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) continue; - atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic(); + atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); @@ -403,8 +402,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) found = true; spin_lock(&dev->t10_alua.tg_pt_gps_lock); - atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic(); + atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); break; } spin_unlock(&dev->t10_alua.tg_pt_gps_lock); @@ -998,8 +996,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) * every I_T nexus other than the I_T nexus on which the SET * TARGET PORT GROUPS command */ - atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); - smp_mb__after_atomic(); + atomic_inc_mb(&mem->tg_pt_gp_mem_ref_cnt); spin_unlock(&tg_pt_gp->tg_pt_gp_lock); spin_lock_bh(&port->sep_alua_lock); @@ -1028,8 +1025,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) spin_unlock_bh(&port->sep_alua_lock); spin_lock(&tg_pt_gp->tg_pt_gp_lock); - atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); - smp_mb__after_atomic(); + atomic_dec_mb(&mem->tg_pt_gp_mem_ref_cnt); } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); /* @@ -1063,7 +1059,6 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic(); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); if (tg_pt_gp->tg_pt_gp_transition_complete) @@ -1125,7 +1120,6 @@ static int core_alua_do_transition_tg_pt( */ spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic(); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { @@ -1168,7 +1162,6 @@ int core_alua_do_port_transition( spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); lu_gp = local_lu_gp_mem->lu_gp; atomic_inc(&lu_gp->lu_gp_ref_cnt); - smp_mb__after_atomic(); spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); /* * For storage objects that are members of the 'default_lu_gp', @@ -1184,8 +1177,7 @@ int core_alua_do_port_transition( l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, new_state, explicit); - atomic_dec(&lu_gp->lu_gp_ref_cnt); - smp_mb__after_atomic(); + atomic_dec_mb(&lu_gp->lu_gp_ref_cnt); return rc; } /* @@ -1198,8 +1190,7 @@ int core_alua_do_port_transition( lu_gp_mem_list) { dev = lu_gp_mem->lu_gp_mem_dev; - atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); - smp_mb__after_atomic(); + atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt); spin_unlock(&lu_gp->lu_gp_lock); spin_lock(&dev->t10_alua.tg_pt_gps_lock); @@ -1227,8 +1218,7 @@ int core_alua_do_port_transition( tg_pt_gp->tg_pt_gp_alua_port = NULL; tg_pt_gp->tg_pt_gp_alua_nacl = NULL; } - atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic(); + atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); /* * core_alua_do_transition_tg_pt() will always return @@ -1238,16 +1228,14 @@ int core_alua_do_port_transition( new_state, explicit); spin_lock(&dev->t10_alua.tg_pt_gps_lock); - atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - smp_mb__after_atomic(); + atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); if (rc) break; } spin_unlock(&dev->t10_alua.tg_pt_gps_lock); spin_lock(&lu_gp->lu_gp_lock); - atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); - smp_mb__after_atomic(); + atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt); } spin_unlock(&lu_gp->lu_gp_lock); @@ -1260,8 +1248,7 @@ int core_alua_do_port_transition( core_alua_dump_state(new_state)); } - atomic_dec(&lu_gp->lu_gp_ref_cnt); - smp_mb__after_atomic(); + atomic_dec_mb(&lu_gp->lu_gp_ref_cnt); return rc; } diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 756def38c77..79f9296a08a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -665,6 +665,9 @@ SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(emulate_rest_reord); SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR); +DEF_DEV_ATTRIB(force_pr_aptpl); +SE_DEV_ATTR(force_pr_aptpl, S_IRUGO | S_IWUSR); + DEF_DEV_ATTRIB_RO(hw_block_size); SE_DEV_ATTR_RO(hw_block_size); @@ -719,6 +722,7 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = { &target_core_dev_attrib_hw_pi_prot_type.attr, &target_core_dev_attrib_pi_prot_format.attr, &target_core_dev_attrib_enforce_pr_isids.attr, + &target_core_dev_attrib_force_pr_aptpl.attr, &target_core_dev_attrib_is_nonrot.attr, &target_core_dev_attrib_emulate_rest_reord.attr, &target_core_dev_attrib_hw_block_size.attr, @@ -1263,7 +1267,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( { unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; unsigned char *t_fabric = NULL, *t_port = NULL; - char *orig, *ptr, *arg_p, *opts; + char *orig, *ptr, *opts; substring_t args[MAX_OPT_ARGS]; unsigned long long tmp_ll; u64 sa_res_key = 0; @@ -1295,14 +1299,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( token = match_token(ptr, tokens, args); switch (token) { case Opt_initiator_fabric: - i_fabric = match_strdup(&args[0]); + i_fabric = match_strdup(args); if (!i_fabric) { ret = -ENOMEM; goto out; } break; case Opt_initiator_node: - i_port = match_strdup(&args[0]); + i_port = match_strdup(args); if (!i_port) { ret = -ENOMEM; goto out; @@ -1316,7 +1320,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( } break; case Opt_initiator_sid: - isid = match_strdup(&args[0]); + isid = match_strdup(args); if (!isid) { ret = -ENOMEM; goto out; @@ -1330,15 +1334,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( } break; case Opt_sa_res_key: - arg_p = match_strdup(&args[0]); - if (!arg_p) { - ret = -ENOMEM; - goto out; - } - ret = kstrtoull(arg_p, 0, &tmp_ll); + ret = kstrtoull(args->from, 0, &tmp_ll); if (ret < 0) { - pr_err("kstrtoull() failed for" - " sa_res_key=\n"); + pr_err("kstrtoull() failed for sa_res_key=\n"); goto out; } sa_res_key = (u64)tmp_ll; @@ -1370,14 +1368,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( * PR APTPL Metadata for Target Port */ case Opt_target_fabric: - t_fabric = match_strdup(&args[0]); + t_fabric = match_strdup(args); if (!t_fabric) { ret = -ENOMEM; goto out; } break; case Opt_target_node: - t_port = match_strdup(&args[0]); + t_port = match_strdup(args); if (!t_port) { ret = -ENOMEM; goto out; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 98da9016715..c45f9e907e4 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -224,8 +224,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( if (port->sep_rtpi != rtpi) continue; - atomic_inc(&deve->pr_ref_count); - smp_mb__after_atomic(); + atomic_inc_mb(&deve->pr_ref_count); spin_unlock_irq(&nacl->device_list_lock); return deve; @@ -1019,6 +1018,23 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) return 0; } +int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag) +{ + if ((flag != 0) && (flag != 1)) { + printk(KERN_ERR "Illegal value %d\n", flag); + return -EINVAL; + } + if (dev->export_count) { + pr_err("dev[%p]: Unable to set force_pr_aptpl while" + " export_count is %d\n", dev, dev->export_count); + return -EINVAL; + } + + dev->dev_attrib.force_pr_aptpl = flag; + pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag); + return 0; +} + int se_dev_set_is_nonrot(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { @@ -1250,24 +1266,16 @@ struct se_lun *core_dev_add_lun( * * */ -int core_dev_del_lun( +void core_dev_del_lun( struct se_portal_group *tpg, - u32 unpacked_lun) + struct se_lun *lun) { - struct se_lun *lun; - - lun = core_tpg_pre_dellun(tpg, unpacked_lun); - if (IS_ERR(lun)) - return PTR_ERR(lun); - - core_tpg_post_dellun(tpg, lun); - - pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" + pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from" " device object\n", tpg->se_tpg_tfo->get_fabric_name(), - tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, tpg->se_tpg_tfo->get_fabric_name()); - return 0; + core_tpg_remove_lun(tpg, lun); } struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) @@ -1396,8 +1404,7 @@ int core_dev_add_initiator_node_lun_acl( spin_lock(&lun->lun_acl_lock); list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); - atomic_inc(&lun->lun_acl_count); - smp_mb__after_atomic(); + atomic_inc_mb(&lun->lun_acl_count); spin_unlock(&lun->lun_acl_lock); pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " @@ -1409,7 +1416,8 @@ int core_dev_add_initiator_node_lun_acl( * Check to see if there are any existing persistent reservation APTPL * pre-registrations that need to be enabled for this LUN ACL.. */ - core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); + core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl, + lacl->mapped_lun); return 0; } @@ -1430,8 +1438,7 @@ int core_dev_del_initiator_node_lun_acl( spin_lock(&lun->lun_acl_lock); list_del(&lacl->lacl_list); - atomic_dec(&lun->lun_acl_count); - smp_mb__after_atomic(); + atomic_dec_mb(&lun->lun_acl_count); spin_unlock(&lun->lun_acl_lock); core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, @@ -1554,6 +1561,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; + dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; dev->dev_attrib.is_nonrot = DA_IS_NONROT; dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 7de9f0475d0..0c3f90130b7 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -320,7 +320,7 @@ static struct config_group *target_fabric_make_mappedlun( struct se_node_acl, acl_group); struct se_portal_group *se_tpg = se_nacl->se_tpg; struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; - struct se_lun_acl *lacl; + struct se_lun_acl *lacl = NULL; struct config_item *acl_ci; struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; char *buf; @@ -406,6 +406,7 @@ static struct config_group *target_fabric_make_mappedlun( out: if (lacl_cg) kfree(lacl_cg->default_groups); + kfree(lacl); kfree(buf); return ERR_PTR(ret); } @@ -821,7 +822,7 @@ static int target_fabric_port_unlink( tf->tf_ops.fabric_pre_unlink(se_tpg, lun); } - core_dev_del_lun(se_tpg, lun->unpacked_lun); + core_dev_del_lun(se_tpg, lun); return 0; } @@ -910,16 +911,12 @@ static struct config_group *target_fabric_make_lun( GFP_KERNEL); if (!port_stat_grp->default_groups) { pr_err("Unable to allocate port_stat_grp->default_groups\n"); - errno = -ENOMEM; - goto out; + kfree(lun_cg->default_groups); + return ERR_PTR(-ENOMEM); } target_stat_setup_port_default_groups(lun); return &lun->lun_group; -out: - if (lun_cg) - kfree(lun_cg->default_groups); - return ERR_PTR(errno); } static void target_fabric_drop_lun( diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index 0d1cf8b4f49..35bfe77160d 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -394,9 +394,9 @@ char *iscsi_parse_pr_out_transport_id( * If the caller wants the TransportID Length, we set that value for the * entire iSCSI Tarnsport ID now. */ - if (out_tid_len != NULL) { - add_len = ((buf[2] >> 8) & 0xff); - add_len |= (buf[3] & 0xff); + if (out_tid_len) { + /* The shift works thanks to integer promotion rules */ + add_len = (buf[2] << 8) | buf[3]; tid_len = strlen(&buf[4]); tid_len += 4; /* Add four bytes for iSCSI Transport ID header */ diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 7d6cddaec52..72c83d98662 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -415,7 +415,7 @@ fd_execute_sync_cache(struct se_cmd *cmd) } else { start = cmd->t_task_lba * dev->dev_attrib.block_size; if (cmd->data_length) - end = start + cmd->data_length; + end = start + cmd->data_length - 1; else end = LLONG_MAX; } @@ -680,7 +680,12 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, struct fd_dev *fd_dev = FD_DEV(dev); loff_t start = cmd->t_task_lba * dev->dev_attrib.block_size; - loff_t end = start + cmd->data_length; + loff_t end; + + if (cmd->data_length) + end = start + cmd->data_length - 1; + else + end = LLONG_MAX; vfs_fsync_range(fd_dev->fd_file, start, end, 1); } @@ -762,7 +767,9 @@ static ssize_t fd_set_configfs_dev_params(struct se_device *dev, fd_dev->fbd_flags |= FBDF_HAS_SIZE; break; case Opt_fd_buffered_io: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; if (arg != 1) { pr_err("bogus fd_buffered_io=%d value\n", arg); ret = -EINVAL; diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index de9cab708f4..e31f42f369f 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -38,6 +38,7 @@ int se_dev_set_emulate_3pc(struct se_device *, int); int se_dev_set_pi_prot_type(struct se_device *, int); int se_dev_set_pi_prot_format(struct se_device *, int); int se_dev_set_enforce_pr_isids(struct se_device *, int); +int se_dev_set_force_pr_aptpl(struct se_device *, int); int se_dev_set_is_nonrot(struct se_device *, int); int se_dev_set_emulate_rest_reord(struct se_device *dev, int); int se_dev_set_queue_depth(struct se_device *, u32); @@ -46,7 +47,7 @@ int se_dev_set_fabric_max_sectors(struct se_device *, u32); int se_dev_set_optimal_sectors(struct se_device *, u32); int se_dev_set_block_size(struct se_device *, u32); struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32); -int core_dev_del_lun(struct se_portal_group *, u32); +void core_dev_del_lun(struct se_portal_group *, struct se_lun *); struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, struct se_node_acl *, u32, int *); @@ -82,8 +83,7 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32); int core_tpg_add_lun(struct se_portal_group *, struct se_lun *, u32, struct se_device *); -struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun); -int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *); +void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *); /* target_core_transport.c */ extern struct kmem_cache *se_tmr_req_cache; diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index df357862286..9f93b823409 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -674,8 +674,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( */ spin_lock(&dev->se_port_lock); list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { - atomic_inc(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic(); + atomic_inc_mb(&port->sep_tg_pt_ref_cnt); spin_unlock(&dev->se_port_lock); spin_lock_bh(&port->sep_alua_lock); @@ -709,8 +708,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname)) continue; - atomic_inc(&deve_tmp->pr_ref_count); - smp_mb__after_atomic(); + atomic_inc_mb(&deve_tmp->pr_ref_count); spin_unlock_bh(&port->sep_alua_lock); /* * Grab a configfs group dependency that is released @@ -722,10 +720,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( if (ret < 0) { pr_err("core_scsi3_lunacl_depend" "_item() failed\n"); - atomic_dec(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic(); - atomic_dec(&deve_tmp->pr_ref_count); - smp_mb__after_atomic(); + atomic_dec_mb(&port->sep_tg_pt_ref_cnt); + atomic_dec_mb(&deve_tmp->pr_ref_count); goto out; } /* @@ -739,10 +735,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( nacl_tmp, deve_tmp, NULL, sa_res_key, all_tg_pt, aptpl); if (!pr_reg_atp) { - atomic_dec(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic(); - atomic_dec(&deve_tmp->pr_ref_count); - smp_mb__after_atomic(); + atomic_dec_mb(&port->sep_tg_pt_ref_cnt); + atomic_dec_mb(&deve_tmp->pr_ref_count); core_scsi3_lunacl_undepend_item(deve_tmp); goto out; } @@ -754,8 +748,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( spin_unlock_bh(&port->sep_alua_lock); spin_lock(&dev->se_port_lock); - atomic_dec(&port->sep_tg_pt_ref_cnt); - smp_mb__after_atomic(); + atomic_dec_mb(&port->sep_tg_pt_ref_cnt); } spin_unlock(&dev->se_port_lock); @@ -902,6 +895,7 @@ static int __core_scsi3_check_aptpl_registration( spin_lock(&pr_tmpl->aptpl_reg_lock); list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, pr_reg_aptpl_list) { + if (!strcmp(pr_reg->pr_iport, i_port) && (pr_reg->pr_res_mapped_lun == deve->mapped_lun) && !(strcmp(pr_reg->pr_tport, t_port)) && @@ -944,10 +938,10 @@ int core_scsi3_check_aptpl_registration( struct se_device *dev, struct se_portal_group *tpg, struct se_lun *lun, - struct se_lun_acl *lun_acl) + struct se_node_acl *nacl, + u32 mapped_lun) { - struct se_node_acl *nacl = lun_acl->se_lun_nacl; - struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun]; + struct se_dev_entry *deve = nacl->device_list[mapped_lun]; if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) return 0; @@ -1109,8 +1103,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( if (dev->dev_attrib.enforce_pr_isids) continue; } - atomic_inc(&pr_reg->pr_res_holders); - smp_mb__after_atomic(); + atomic_inc_mb(&pr_reg->pr_res_holders); spin_unlock(&pr_tmpl->registration_lock); return pr_reg; } @@ -1124,8 +1117,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( if (strcmp(isid, pr_reg->pr_reg_isid)) continue; - atomic_inc(&pr_reg->pr_res_holders); - smp_mb__after_atomic(); + atomic_inc_mb(&pr_reg->pr_res_holders); spin_unlock(&pr_tmpl->registration_lock); return pr_reg; } @@ -1154,8 +1146,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg( static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) { - atomic_dec(&pr_reg->pr_res_holders); - smp_mb__after_atomic(); + atomic_dec_mb(&pr_reg->pr_res_holders); } static int core_scsi3_check_implicit_release( @@ -1348,8 +1339,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, &tpg->tpg_group.cg_item); - atomic_dec(&tpg->tpg_pr_ref_count); - smp_mb__after_atomic(); + atomic_dec_mb(&tpg->tpg_pr_ref_count); } static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) @@ -1368,16 +1358,14 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) struct se_portal_group *tpg = nacl->se_tpg; if (nacl->dynamic_node_acl) { - atomic_dec(&nacl->acl_pr_ref_count); - smp_mb__after_atomic(); + atomic_dec_mb(&nacl->acl_pr_ref_count); return; } configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, &nacl->acl_group.cg_item); - atomic_dec(&nacl->acl_pr_ref_count); - smp_mb__after_atomic(); + atomic_dec_mb(&nacl->acl_pr_ref_count); } static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) @@ -1407,8 +1395,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) * For nacl->dynamic_node_acl=1 */ if (!lun_acl) { - atomic_dec(&se_deve->pr_ref_count); - smp_mb__after_atomic(); + atomic_dec_mb(&se_deve->pr_ref_count); return; } nacl = lun_acl->se_lun_nacl; @@ -1417,8 +1404,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, &lun_acl->se_lun_group.cg_item); - atomic_dec(&se_deve->pr_ref_count); - smp_mb__after_atomic(); + atomic_dec_mb(&se_deve->pr_ref_count); } static sense_reason_t @@ -1551,15 +1537,13 @@ core_scsi3_decode_spec_i_port( if (!i_str) continue; - atomic_inc(&tmp_tpg->tpg_pr_ref_count); - smp_mb__after_atomic(); + atomic_inc_mb(&tmp_tpg->tpg_pr_ref_count); spin_unlock(&dev->se_port_lock); if (core_scsi3_tpg_depend_item(tmp_tpg)) { pr_err(" core_scsi3_tpg_depend_item()" " for tmp_tpg\n"); - atomic_dec(&tmp_tpg->tpg_pr_ref_count); - smp_mb__after_atomic(); + atomic_dec_mb(&tmp_tpg->tpg_pr_ref_count); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out_unmap; } @@ -1571,10 +1555,8 @@ core_scsi3_decode_spec_i_port( spin_lock_irq(&tmp_tpg->acl_node_lock); dest_node_acl = __core_tpg_get_initiator_node_acl( tmp_tpg, i_str); - if (dest_node_acl) { - atomic_inc(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic(); - } + if (dest_node_acl) + atomic_inc_mb(&dest_node_acl->acl_pr_ref_count); spin_unlock_irq(&tmp_tpg->acl_node_lock); if (!dest_node_acl) { @@ -1586,8 +1568,7 @@ core_scsi3_decode_spec_i_port( if (core_scsi3_nodeacl_depend_item(dest_node_acl)) { pr_err("configfs_depend_item() failed" " for dest_node_acl->acl_group\n"); - atomic_dec(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic(); + atomic_dec_mb(&dest_node_acl->acl_pr_ref_count); core_scsi3_tpg_undepend_item(tmp_tpg); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out_unmap; @@ -1646,8 +1627,7 @@ core_scsi3_decode_spec_i_port( if (core_scsi3_lunacl_depend_item(dest_se_deve)) { pr_err("core_scsi3_lunacl_depend_item()" " failed\n"); - atomic_dec(&dest_se_deve->pr_ref_count); - smp_mb__after_atomic(); + atomic_dec_mb(&dest_se_deve->pr_ref_count); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -2758,7 +2738,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; struct t10_reservation *pr_tmpl = &dev->t10_pr; u32 pr_res_mapped_lun = 0; - int all_reg = 0, calling_it_nexus = 0, released_regs = 0; + int all_reg = 0, calling_it_nexus = 0; + bool sa_res_key_unmatched = sa_res_key != 0; int prh_type = 0, prh_scope = 0; if (!se_sess) @@ -2833,6 +2814,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, if (!all_reg) { if (pr_reg->pr_res_key != sa_res_key) continue; + sa_res_key_unmatched = false; calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; pr_reg_nacl = pr_reg->pr_reg_nacl; @@ -2840,7 +2822,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, __core_scsi3_free_registration(dev, pr_reg, (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL, calling_it_nexus); - released_regs++; } else { /* * Case for any existing all registrants type @@ -2858,6 +2839,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, if ((sa_res_key) && (pr_reg->pr_res_key != sa_res_key)) continue; + sa_res_key_unmatched = false; calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; if (calling_it_nexus) @@ -2868,7 +2850,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, __core_scsi3_free_registration(dev, pr_reg, (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL, 0); - released_regs++; } if (!calling_it_nexus) core_scsi3_ua_allocate(pr_reg_nacl, @@ -2883,7 +2864,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, * registered reservation key, then the device server shall * complete the command with RESERVATION CONFLICT status. */ - if (!released_regs) { + if (sa_res_key_unmatched) { spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg_n); return TCM_RESERVATION_CONFLICT; @@ -3167,15 +3148,13 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, if (!dest_tf_ops) continue; - atomic_inc(&dest_se_tpg->tpg_pr_ref_count); - smp_mb__after_atomic(); + atomic_inc_mb(&dest_se_tpg->tpg_pr_ref_count); spin_unlock(&dev->se_port_lock); if (core_scsi3_tpg_depend_item(dest_se_tpg)) { pr_err("core_scsi3_tpg_depend_item() failed" " for dest_se_tpg\n"); - atomic_dec(&dest_se_tpg->tpg_pr_ref_count); - smp_mb__after_atomic(); + atomic_dec_mb(&dest_se_tpg->tpg_pr_ref_count); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out_put_pr_reg; } @@ -3271,10 +3250,8 @@ after_iport_check: spin_lock_irq(&dest_se_tpg->acl_node_lock); dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, initiator_str); - if (dest_node_acl) { - atomic_inc(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic(); - } + if (dest_node_acl) + atomic_inc_mb(&dest_node_acl->acl_pr_ref_count); spin_unlock_irq(&dest_se_tpg->acl_node_lock); if (!dest_node_acl) { @@ -3288,8 +3265,7 @@ after_iport_check: if (core_scsi3_nodeacl_depend_item(dest_node_acl)) { pr_err("core_scsi3_nodeacl_depend_item() for" " dest_node_acl\n"); - atomic_dec(&dest_node_acl->acl_pr_ref_count); - smp_mb__after_atomic(); + atomic_dec_mb(&dest_node_acl->acl_pr_ref_count); dest_node_acl = NULL; ret = TCM_INVALID_PARAMETER_LIST; goto out; @@ -3313,8 +3289,7 @@ after_iport_check: if (core_scsi3_lunacl_depend_item(dest_se_deve)) { pr_err("core_scsi3_lunacl_depend_item() failed\n"); - atomic_dec(&dest_se_deve->pr_ref_count); - smp_mb__after_atomic(); + atomic_dec_mb(&dest_se_deve->pr_ref_count); dest_se_deve = NULL; ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto out; @@ -3497,6 +3472,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) sense_reason_t target_scsi3_emulate_pr_out(struct se_cmd *cmd) { + struct se_device *dev = cmd->se_dev; unsigned char *cdb = &cmd->t_task_cdb[0]; unsigned char *buf; u64 res_key, sa_res_key; @@ -3561,6 +3537,13 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd) aptpl = (buf[17] & 0x01); unreg = (buf[17] & 0x02); } + /* + * If the backend device has been configured to force APTPL metadata + * write-out, go ahead and propigate aptpl=1 down now. + */ + if (dev->dev_attrib.force_pr_aptpl) + aptpl = 1; + transport_kunmap_data_sg(cmd); buf = NULL; @@ -3803,7 +3786,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd) if (!buf) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - buf[0] = ((add_len << 8) & 0xff); + buf[0] = ((add_len >> 8) & 0xff); buf[1] = (add_len & 0xff); buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */ @@ -3879,8 +3862,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) se_tpg = pr_reg->pr_reg_nacl->se_tpg; add_desc_len = 0; - atomic_inc(&pr_reg->pr_res_holders); - smp_mb__after_atomic(); + atomic_inc_mb(&pr_reg->pr_res_holders); spin_unlock(&pr_tmpl->registration_lock); /* * Determine expected length of $FABRIC_MOD specific @@ -3893,8 +3875,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) pr_warn("SPC-3 PRIN READ_FULL_STATUS ran" " out of buffer: %d\n", cmd->data_length); spin_lock(&pr_tmpl->registration_lock); - atomic_dec(&pr_reg->pr_res_holders); - smp_mb__after_atomic(); + atomic_dec_mb(&pr_reg->pr_res_holders); break; } /* @@ -3955,8 +3936,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) se_nacl, pr_reg, &format_code, &buf[off+4]); spin_lock(&pr_tmpl->registration_lock); - atomic_dec(&pr_reg->pr_res_holders); - smp_mb__after_atomic(); + atomic_dec_mb(&pr_reg->pr_res_holders); /* * Set the ADDITIONAL DESCRIPTOR LENGTH */ diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h index 2ee2936fa0b..749fd7bb751 100644 --- a/drivers/target/target_core_pr.h +++ b/drivers/target/target_core_pr.h @@ -60,7 +60,7 @@ extern int core_scsi3_alloc_aptpl_registration( unsigned char *, u16, u32, int, int, u8); extern int core_scsi3_check_aptpl_registration(struct se_device *, struct se_portal_group *, struct se_lun *, - struct se_lun_acl *); + struct se_node_acl *, u32); extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *, struct se_node_acl *); extern void core_scsi3_free_all_registrations(struct se_device *); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 70d9f6dabba..7c8291f0bbb 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -749,14 +749,18 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev, ret = -EINVAL; goto out; } - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; pdv->pdv_host_id = arg; pr_debug("PSCSI[%d]: Referencing SCSI Host ID:" " %d\n", phv->phv_host_id, pdv->pdv_host_id); pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; break; case Opt_scsi_channel_id: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; pdv->pdv_channel_id = arg; pr_debug("PSCSI[%d]: Referencing SCSI Channel" " ID: %d\n", phv->phv_host_id, @@ -764,7 +768,9 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev, pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; break; case Opt_scsi_target_id: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; pdv->pdv_target_id = arg; pr_debug("PSCSI[%d]: Referencing SCSI Target" " ID: %d\n", phv->phv_host_id, @@ -772,7 +778,9 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev, pdv->pdv_flags |= PDF_HAS_TARGET_ID; break; case Opt_scsi_lun_id: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; pdv->pdv_lun_id = arg; pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:" " %d\n", phv->phv_host_id, pdv->pdv_lun_id); diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index bd78d9235ac..ebe62afb957 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -948,7 +948,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) } /* reject any command that we don't have a handler for */ - if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd) + if (!cmd->execute_cmd) return TCM_UNSUPPORTED_SCSI_OPCODE; if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index f7cd95e8111..fa5e157db47 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -64,21 +64,17 @@ int core_tmr_alloc_req( } EXPORT_SYMBOL(core_tmr_alloc_req); -void core_tmr_release_req( - struct se_tmr_req *tmr) +void core_tmr_release_req(struct se_tmr_req *tmr) { struct se_device *dev = tmr->tmr_dev; unsigned long flags; - if (!dev) { - kfree(tmr); - return; + if (dev) { + spin_lock_irqsave(&dev->se_tmr_lock, flags); + list_del(&tmr->tmr_list); + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); } - spin_lock_irqsave(&dev->se_tmr_lock, flags); - list_del(&tmr->tmr_list); - spin_unlock_irqrestore(&dev->se_tmr_lock, flags); - kfree(tmr); } @@ -90,9 +86,8 @@ static void core_tmr_handle_tas_abort( bool remove = true; /* * TASK ABORTED status (TAS) bit support - */ - if ((tmr_nacl && - (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) { + */ + if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) { remove = false; transport_send_task_abort(cmd); } @@ -120,13 +115,12 @@ void core_tmr_abort_task( struct se_tmr_req *tmr, struct se_session *se_sess) { - struct se_cmd *se_cmd, *tmp_cmd; + struct se_cmd *se_cmd; unsigned long flags; int ref_tag; spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - list_for_each_entry_safe(se_cmd, tmp_cmd, - &se_sess->sess_cmd_list, se_cmd_list) { + list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { if (dev != se_cmd->se_dev) continue; diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index be783f717f1..0696de9553d 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -40,6 +40,7 @@ #include <target/target_core_fabric.h> #include "target_core_internal.h" +#include "target_core_pr.h" extern struct se_device *g_lun0_dev; @@ -166,6 +167,13 @@ void core_tpg_add_node_to_devs( core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, lun_access, acl, tpg); + /* + * Check to see if there are any existing persistent reservation + * APTPL pre-registrations that need to be enabled for this dynamic + * LUN ACL now.. + */ + core_scsi3_check_aptpl_registration(dev, tpg, lun, acl, + lun->unpacked_lun); spin_lock(&tpg->tpg_lun_lock); } spin_unlock(&tpg->tpg_lun_lock); @@ -335,7 +343,7 @@ void core_tpg_clear_object_luns(struct se_portal_group *tpg) continue; spin_unlock(&tpg->tpg_lun_lock); - core_dev_del_lun(tpg, lun->unpacked_lun); + core_dev_del_lun(tpg, lun); spin_lock(&tpg->tpg_lun_lock); } spin_unlock(&tpg->tpg_lun_lock); @@ -663,13 +671,6 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) return 0; } -static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg) -{ - struct se_lun *lun = &se_tpg->tpg_virt_lun0; - - core_tpg_post_dellun(se_tpg, lun); -} - int core_tpg_register( struct target_core_fabric_ops *tfo, struct se_wwn *se_wwn, @@ -773,7 +774,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) spin_unlock_irq(&se_tpg->acl_node_lock); if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) - core_tpg_release_virtual_lun0(se_tpg); + core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0); se_tpg->se_tpg_fabric_ptr = NULL; array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG); @@ -838,37 +839,7 @@ int core_tpg_add_lun( return 0; } -struct se_lun *core_tpg_pre_dellun( - struct se_portal_group *tpg, - u32 unpacked_lun) -{ - struct se_lun *lun; - - if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { - pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" - "-1: %u for Target Portal Group: %u\n", - tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, - TRANSPORT_MAX_LUNS_PER_TPG-1, - tpg->se_tpg_tfo->tpg_get_tag(tpg)); - return ERR_PTR(-EOVERFLOW); - } - - spin_lock(&tpg->tpg_lun_lock); - lun = tpg->tpg_lun_list[unpacked_lun]; - if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { - pr_err("%s Logical Unit Number: %u is not active on" - " Target Portal Group: %u, ignoring request.\n", - tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, - tpg->se_tpg_tfo->tpg_get_tag(tpg)); - spin_unlock(&tpg->tpg_lun_lock); - return ERR_PTR(-ENODEV); - } - spin_unlock(&tpg->tpg_lun_lock); - - return lun; -} - -int core_tpg_post_dellun( +void core_tpg_remove_lun( struct se_portal_group *tpg, struct se_lun *lun) { @@ -882,6 +853,4 @@ int core_tpg_post_dellun( spin_unlock(&tpg->tpg_lun_lock); percpu_ref_exit(&lun->lun_ref); - - return 0; } diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 7fa62fc93e0..be877bf6f73 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -232,6 +232,10 @@ void transport_subsystem_check_init(void) if (ret != 0) pr_err("Unable to load target_core_pscsi\n"); + ret = request_module("target_core_user"); + if (ret != 0) + pr_err("Unable to load target_core_user\n"); + sub_api_initialized = 1; } @@ -752,8 +756,7 @@ void target_qf_do_work(struct work_struct *work) list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { list_del(&cmd->se_qf_node); - atomic_dec(&dev->dev_qf_count); - smp_mb__after_atomic(); + atomic_dec_mb(&dev->dev_qf_count); pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, @@ -1166,7 +1169,6 @@ transport_check_alloc_task_attr(struct se_cmd *cmd) * Dormant to Active status. */ cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); - smp_mb__after_atomic(); pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", cmd->se_ordered_id, cmd->sam_task_attr, dev->transport->name); @@ -1722,8 +1724,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) cmd->t_task_cdb[0], cmd->se_ordered_id); return false; case MSG_ORDERED_TAG: - atomic_inc(&dev->dev_ordered_sync); - smp_mb__after_atomic(); + atomic_inc_mb(&dev->dev_ordered_sync); pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " " se_ordered_id: %u\n", @@ -1740,8 +1741,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) /* * For SIMPLE and UNTAGGED Task Attribute commands */ - atomic_inc(&dev->simple_cmds); - smp_mb__after_atomic(); + atomic_inc_mb(&dev->simple_cmds); break; } @@ -1845,8 +1845,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) return; if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { - atomic_dec(&dev->simple_cmds); - smp_mb__after_atomic(); + atomic_dec_mb(&dev->simple_cmds); dev->dev_cur_ordered_id++; pr_debug("Incremented dev->dev_cur_ordered_id: %u for" " SIMPLE: %u\n", dev->dev_cur_ordered_id, @@ -1857,8 +1856,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { - atomic_dec(&dev->dev_ordered_sync); - smp_mb__after_atomic(); + atomic_dec_mb(&dev->dev_ordered_sync); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" @@ -1877,8 +1875,7 @@ static void transport_complete_qf(struct se_cmd *cmd) if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); - if (ret) - goto out; + goto out; } switch (cmd->data_direction) { @@ -1916,8 +1913,7 @@ static void transport_handle_queue_full( { spin_lock_irq(&dev->qf_cmd_lock); list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); - atomic_inc(&dev->dev_qf_count); - smp_mb__after_atomic(); + atomic_inc_mb(&dev->dev_qf_count); spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); schedule_work(&cmd->se_dev->qf_work_queue); @@ -2296,7 +2292,7 @@ transport_generic_new_cmd(struct se_cmd *cmd) * and let it call back once the write buffers are ready. */ target_add_to_state_list(cmd); - if (cmd->data_direction != DMA_TO_DEVICE) { + if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { target_execute_cmd(cmd); return 0; } @@ -2896,7 +2892,6 @@ void transport_send_task_abort(struct se_cmd *cmd) if (cmd->se_tfo->write_pending_status(cmd) != 0) { cmd->transport_state |= CMD_T_ABORTED; cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; - smp_mb__after_atomic(); return; } } diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index 101858e245b..1738b164698 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -161,8 +161,7 @@ int core_scsi3_ua_allocate( spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); - atomic_inc(&deve->ua_count); - smp_mb__after_atomic(); + atomic_inc_mb(&deve->ua_count); return 0; } list_add_tail(&ua->ua_nacl_list, &deve->ua_list); @@ -174,8 +173,7 @@ int core_scsi3_ua_allocate( nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, asc, ascq); - atomic_inc(&deve->ua_count); - smp_mb__after_atomic(); + atomic_inc_mb(&deve->ua_count); return 0; } @@ -189,8 +187,7 @@ void core_scsi3_ua_release_all( list_del(&ua->ua_nacl_list); kmem_cache_free(se_ua_cache, ua); - atomic_dec(&deve->ua_count); - smp_mb__after_atomic(); + atomic_dec_mb(&deve->ua_count); } spin_unlock(&deve->ua_lock); } @@ -250,8 +247,7 @@ void core_scsi3_ua_for_check_condition( list_del(&ua->ua_nacl_list); kmem_cache_free(se_ua_cache, ua); - atomic_dec(&deve->ua_count); - smp_mb__after_atomic(); + atomic_dec_mb(&deve->ua_count); } spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); @@ -309,8 +305,7 @@ int core_scsi3_ua_clear_for_request_sense( list_del(&ua->ua_nacl_list); kmem_cache_free(se_ua_cache, ua); - atomic_dec(&deve->ua_count); - smp_mb__after_atomic(); + atomic_dec_mb(&deve->ua_count); } spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h index be912b36daa..a6b56b364e7 100644 --- a/drivers/target/target_core_ua.h +++ b/drivers/target/target_core_ua.h @@ -1,4 +1,5 @@ #ifndef TARGET_CORE_UA_H +#define TARGET_CORE_UA_H /* * From spc4r17, Table D.1: ASC and ASCQ Assignement diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c new file mode 100644 index 00000000000..9a1b314f648 --- /dev/null +++ b/drivers/target/target_core_user.c @@ -0,0 +1,1167 @@ +/* + * Copyright (C) 2013 Shaohua Li <shli@kernel.org> + * Copyright (C) 2014 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <linux/spinlock.h> +#include <linux/module.h> +#include <linux/idr.h> +#include <linux/timer.h> +#include <linux/parser.h> +#include <scsi/scsi.h> +#include <scsi/scsi_host.h> +#include <linux/uio_driver.h> +#include <net/genetlink.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include <target/target_core_backend.h> +#include <linux/target_core_user.h> + +/* + * Define a shared-memory interface for LIO to pass SCSI commands and + * data to userspace for processing. This is to allow backends that + * are too complex for in-kernel support to be possible. + * + * It uses the UIO framework to do a lot of the device-creation and + * introspection work for us. + * + * See the .h file for how the ring is laid out. Note that while the + * command ring is defined, the particulars of the data area are + * not. Offset values in the command entry point to other locations + * internal to the mmap()ed area. There is separate space outside the + * command ring for data buffers. This leaves maximum flexibility for + * moving buffer allocations, or even page flipping or other + * allocation techniques, without altering the command ring layout. + * + * SECURITY: + * The user process must be assumed to be malicious. There's no way to + * prevent it breaking the command ring protocol if it wants, but in + * order to prevent other issues we must only ever read *data* from + * the shared memory area, not offsets or sizes. This applies to + * command ring entries as well as the mailbox. Extra code needed for + * this may have a 'UAM' comment. + */ + + +#define TCMU_TIME_OUT (30 * MSEC_PER_SEC) + +#define CMDR_SIZE (16 * 4096) +#define DATA_SIZE (257 * 4096) + +#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE) + +static struct device *tcmu_root_device; + +struct tcmu_hba { + u32 host_id; +}; + +/* User wants all cmds or just some */ +enum passthru_level { + TCMU_PASS_ALL = 0, + TCMU_PASS_IO, + TCMU_PASS_INVALID, +}; + +#define TCMU_CONFIG_LEN 256 + +struct tcmu_dev { + struct se_device se_dev; + + char *name; + struct se_hba *hba; + +#define TCMU_DEV_BIT_OPEN 0 +#define TCMU_DEV_BIT_BROKEN 1 + unsigned long flags; + enum passthru_level pass_level; + + struct uio_info uio_info; + + struct tcmu_mailbox *mb_addr; + size_t dev_size; + u32 cmdr_size; + u32 cmdr_last_cleaned; + /* Offset of data ring from start of mb */ + size_t data_off; + size_t data_size; + /* Ring head + tail values. */ + /* Must add data_off and mb_addr to get the address */ + size_t data_head; + size_t data_tail; + + wait_queue_head_t wait_cmdr; + /* TODO should this be a mutex? */ + spinlock_t cmdr_lock; + + struct idr commands; + spinlock_t commands_lock; + + struct timer_list timeout; + + char dev_config[TCMU_CONFIG_LEN]; +}; + +#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) + +#define CMDR_OFF sizeof(struct tcmu_mailbox) + +struct tcmu_cmd { + struct se_cmd *se_cmd; + struct tcmu_dev *tcmu_dev; + + uint16_t cmd_id; + + /* Can't use se_cmd->data_length when cleaning up expired cmds, because if + cmd has been completed then accessing se_cmd is off limits */ + size_t data_length; + + unsigned long deadline; + +#define TCMU_CMD_BIT_EXPIRED 0 + unsigned long flags; +}; + +static struct kmem_cache *tcmu_cmd_cache; + +/* multicast group */ +enum tcmu_multicast_groups { + TCMU_MCGRP_CONFIG, +}; + +static const struct genl_multicast_group tcmu_mcgrps[] = { + [TCMU_MCGRP_CONFIG] = { .name = "config", }, +}; + +/* Our generic netlink family */ +static struct genl_family tcmu_genl_family = { + .id = GENL_ID_GENERATE, + .hdrsize = 0, + .name = "TCM-USER", + .version = 1, + .maxattr = TCMU_ATTR_MAX, + .mcgrps = tcmu_mcgrps, + .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), +}; + +static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) +{ + struct se_device *se_dev = se_cmd->se_dev; + struct tcmu_dev *udev = TCMU_DEV(se_dev); + struct tcmu_cmd *tcmu_cmd; + int cmd_id; + + tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL); + if (!tcmu_cmd) + return NULL; + + tcmu_cmd->se_cmd = se_cmd; + tcmu_cmd->tcmu_dev = udev; + tcmu_cmd->data_length = se_cmd->data_length; + + tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); + + idr_preload(GFP_KERNEL); + spin_lock_irq(&udev->commands_lock); + cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0, + USHRT_MAX, GFP_NOWAIT); + spin_unlock_irq(&udev->commands_lock); + idr_preload_end(); + + if (cmd_id < 0) { + kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); + return NULL; + } + tcmu_cmd->cmd_id = cmd_id; + + return tcmu_cmd; +} + +static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) +{ + unsigned long offset = (unsigned long) vaddr & ~PAGE_MASK; + + size = round_up(size+offset, PAGE_SIZE); + vaddr -= offset; + + while (size) { + flush_dcache_page(virt_to_page(vaddr)); + size -= PAGE_SIZE; + } +} + +/* + * Some ring helper functions. We don't assume size is a power of 2 so + * we can't use circ_buf.h. + */ +static inline size_t spc_used(size_t head, size_t tail, size_t size) +{ + int diff = head - tail; + + if (diff >= 0) + return diff; + else + return size + diff; +} + +static inline size_t spc_free(size_t head, size_t tail, size_t size) +{ + /* Keep 1 byte unused or we can't tell full from empty */ + return (size - spc_used(head, tail, size) - 1); +} + +static inline size_t head_to_end(size_t head, size_t size) +{ + return size - head; +} + +#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) + +/* + * We can't queue a command until we have space available on the cmd ring *and* space + * space avail on the data ring. + * + * Called with ring lock held. + */ +static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed) +{ + struct tcmu_mailbox *mb = udev->mb_addr; + size_t space; + u32 cmd_head; + size_t cmd_needed; + + tcmu_flush_dcache_range(mb, sizeof(*mb)); + + cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ + + /* + * If cmd end-of-ring space is too small then we need space for a NOP plus + * original cmd - cmds are internally contiguous. + */ + if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) + cmd_needed = cmd_size; + else + cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); + + space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); + if (space < cmd_needed) { + pr_debug("no cmd space: %u %u %u\n", cmd_head, + udev->cmdr_last_cleaned, udev->cmdr_size); + return false; + } + + space = spc_free(udev->data_head, udev->data_tail, udev->data_size); + if (space < data_needed) { + pr_debug("no data space: %zu %zu %zu\n", udev->data_head, + udev->data_tail, udev->data_size); + return false; + } + + return true; +} + +static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) +{ + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + size_t base_command_size, command_size; + struct tcmu_mailbox *mb; + struct tcmu_cmd_entry *entry; + int i; + struct scatterlist *sg; + struct iovec *iov; + int iov_cnt = 0; + uint32_t cmd_head; + uint64_t cdb_off; + + if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) + return -EINVAL; + + /* + * Must be a certain minimum size for response sense info, but + * also may be larger if the iov array is large. + * + * iovs = sgl_nents+1, for end-of-ring case, plus another 1 + * b/c size == offsetof one-past-element. + */ + base_command_size = max(offsetof(struct tcmu_cmd_entry, + req.iov[se_cmd->t_data_nents + 2]), + sizeof(struct tcmu_cmd_entry)); + command_size = base_command_size + + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); + + WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); + + spin_lock_irq(&udev->cmdr_lock); + + mb = udev->mb_addr; + cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ + if ((command_size > (udev->cmdr_size / 2)) + || tcmu_cmd->data_length > (udev->data_size - 1)) + pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu " + "cmd/data ring buffers\n", command_size, tcmu_cmd->data_length, + udev->cmdr_size, udev->data_size); + + while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) { + int ret; + DEFINE_WAIT(__wait); + + prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE); + + pr_debug("sleeping for ring space\n"); + spin_unlock_irq(&udev->cmdr_lock); + ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); + finish_wait(&udev->wait_cmdr, &__wait); + if (!ret) { + pr_warn("tcmu: command timed out\n"); + return -ETIMEDOUT; + } + + spin_lock_irq(&udev->cmdr_lock); + + /* We dropped cmdr_lock, cmd_head is stale */ + cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ + } + + /* Insert a PAD if end-of-ring space is too small */ + if (head_to_end(cmd_head, udev->cmdr_size) < command_size) { + size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); + + entry = (void *) mb + CMDR_OFF + cmd_head; + tcmu_flush_dcache_range(entry, sizeof(*entry)); + tcmu_hdr_set_op(&entry->hdr, TCMU_OP_PAD); + tcmu_hdr_set_len(&entry->hdr, pad_size); + + UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); + + cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ + WARN_ON(cmd_head != 0); + } + + entry = (void *) mb + CMDR_OFF + cmd_head; + tcmu_flush_dcache_range(entry, sizeof(*entry)); + tcmu_hdr_set_op(&entry->hdr, TCMU_OP_CMD); + tcmu_hdr_set_len(&entry->hdr, command_size); + entry->cmd_id = tcmu_cmd->cmd_id; + + /* + * Fix up iovecs, and handle if allocation in data ring wrapped. + */ + iov = &entry->req.iov[0]; + for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) { + size_t copy_bytes = min((size_t)sg->length, + head_to_end(udev->data_head, udev->data_size)); + void *from = kmap_atomic(sg_page(sg)) + sg->offset; + void *to = (void *) mb + udev->data_off + udev->data_head; + + if (tcmu_cmd->se_cmd->data_direction == DMA_TO_DEVICE) { + memcpy(to, from, copy_bytes); + tcmu_flush_dcache_range(to, copy_bytes); + } + + /* Even iov_base is relative to mb_addr */ + iov->iov_len = copy_bytes; + iov->iov_base = (void *) udev->data_off + udev->data_head; + iov_cnt++; + iov++; + + UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size); + + /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */ + if (sg->length != copy_bytes) { + from += copy_bytes; + copy_bytes = sg->length - copy_bytes; + + iov->iov_len = copy_bytes; + iov->iov_base = (void *) udev->data_off + udev->data_head; + + if (se_cmd->data_direction == DMA_TO_DEVICE) { + to = (void *) mb + udev->data_off + udev->data_head; + memcpy(to, from, copy_bytes); + tcmu_flush_dcache_range(to, copy_bytes); + } + + iov_cnt++; + iov++; + + UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size); + } + + kunmap_atomic(from); + } + entry->req.iov_cnt = iov_cnt; + + /* All offsets relative to mb_addr, not start of entry! */ + cdb_off = CMDR_OFF + cmd_head + base_command_size; + memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); + entry->req.cdb_off = cdb_off; + tcmu_flush_dcache_range(entry, sizeof(*entry)); + + UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); + tcmu_flush_dcache_range(mb, sizeof(*mb)); + + spin_unlock_irq(&udev->cmdr_lock); + + /* TODO: only if FLUSH and FUA? */ + uio_event_notify(&udev->uio_info); + + mod_timer(&udev->timeout, + round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT))); + + return 0; +} + +static int tcmu_queue_cmd(struct se_cmd *se_cmd) +{ + struct se_device *se_dev = se_cmd->se_dev; + struct tcmu_dev *udev = TCMU_DEV(se_dev); + struct tcmu_cmd *tcmu_cmd; + int ret; + + tcmu_cmd = tcmu_alloc_cmd(se_cmd); + if (!tcmu_cmd) + return -ENOMEM; + + ret = tcmu_queue_cmd_ring(tcmu_cmd); + if (ret < 0) { + pr_err("TCMU: Could not queue command\n"); + spin_lock_irq(&udev->commands_lock); + idr_remove(&udev->commands, tcmu_cmd->cmd_id); + spin_unlock_irq(&udev->commands_lock); + + kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); + } + + return ret; +} + +static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) +{ + struct se_cmd *se_cmd = cmd->se_cmd; + struct tcmu_dev *udev = cmd->tcmu_dev; + + if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { + /* cmd has been completed already from timeout, just reclaim data + ring space */ + UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); + return; + } + + if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { + memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer, + se_cmd->scsi_sense_length); + + UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); + } + else if (se_cmd->data_direction == DMA_FROM_DEVICE) { + struct scatterlist *sg; + int i; + + /* It'd be easier to look at entry's iovec again, but UAM */ + for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) { + size_t copy_bytes; + void *to; + void *from; + + copy_bytes = min((size_t)sg->length, + head_to_end(udev->data_tail, udev->data_size)); + + to = kmap_atomic(sg_page(sg)) + sg->offset; + WARN_ON(sg->length + sg->offset > PAGE_SIZE); + from = (void *) udev->mb_addr + udev->data_off + udev->data_tail; + tcmu_flush_dcache_range(from, copy_bytes); + memcpy(to, from, copy_bytes); + + UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size); + + /* Uh oh, wrapped the data buffer for this sg's data */ + if (sg->length != copy_bytes) { + from = (void *) udev->mb_addr + udev->data_off + udev->data_tail; + WARN_ON(udev->data_tail); + to += copy_bytes; + copy_bytes = sg->length - copy_bytes; + tcmu_flush_dcache_range(from, copy_bytes); + memcpy(to, from, copy_bytes); + + UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size); + } + + kunmap_atomic(to); + } + + } else if (se_cmd->data_direction == DMA_TO_DEVICE) { + UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); + } else { + pr_warn("TCMU: data direction was %d!\n", se_cmd->data_direction); + } + + target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); + cmd->se_cmd = NULL; + + kmem_cache_free(tcmu_cmd_cache, cmd); +} + +static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) +{ + struct tcmu_mailbox *mb; + LIST_HEAD(cpl_cmds); + unsigned long flags; + int handled = 0; + + if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { + pr_err("ring broken, not handling completions\n"); + return 0; + } + + spin_lock_irqsave(&udev->cmdr_lock, flags); + + mb = udev->mb_addr; + tcmu_flush_dcache_range(mb, sizeof(*mb)); + + while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) { + + struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; + struct tcmu_cmd *cmd; + + tcmu_flush_dcache_range(entry, sizeof(*entry)); + + if (tcmu_hdr_get_op(&entry->hdr) == TCMU_OP_PAD) { + UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size); + continue; + } + WARN_ON(tcmu_hdr_get_op(&entry->hdr) != TCMU_OP_CMD); + + spin_lock(&udev->commands_lock); + cmd = idr_find(&udev->commands, entry->cmd_id); + if (cmd) + idr_remove(&udev->commands, cmd->cmd_id); + spin_unlock(&udev->commands_lock); + + if (!cmd) { + pr_err("cmd_id not found, ring is broken\n"); + set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); + break; + } + + tcmu_handle_completion(cmd, entry); + + UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size); + + handled++; + } + + if (mb->cmd_tail == mb->cmd_head) + del_timer(&udev->timeout); /* no more pending cmds */ + + spin_unlock_irqrestore(&udev->cmdr_lock, flags); + + wake_up(&udev->wait_cmdr); + + return handled; +} + +static int tcmu_check_expired_cmd(int id, void *p, void *data) +{ + struct tcmu_cmd *cmd = p; + + if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) + return 0; + + if (!time_after(cmd->deadline, jiffies)) + return 0; + + set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); + target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION); + cmd->se_cmd = NULL; + + kmem_cache_free(tcmu_cmd_cache, cmd); + + return 0; +} + +static void tcmu_device_timedout(unsigned long data) +{ + struct tcmu_dev *udev = (struct tcmu_dev *)data; + unsigned long flags; + int handled; + + handled = tcmu_handle_completions(udev); + + pr_warn("%d completions handled from timeout\n", handled); + + spin_lock_irqsave(&udev->commands_lock, flags); + idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); + spin_unlock_irqrestore(&udev->commands_lock, flags); + + /* + * We don't need to wakeup threads on wait_cmdr since they have their + * own timeout. + */ +} + +static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) +{ + struct tcmu_hba *tcmu_hba; + + tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); + if (!tcmu_hba) + return -ENOMEM; + + tcmu_hba->host_id = host_id; + hba->hba_ptr = tcmu_hba; + + return 0; +} + +static void tcmu_detach_hba(struct se_hba *hba) +{ + kfree(hba->hba_ptr); + hba->hba_ptr = NULL; +} + +static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) +{ + struct tcmu_dev *udev; + + udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); + if (!udev) + return NULL; + + udev->name = kstrdup(name, GFP_KERNEL); + if (!udev->name) { + kfree(udev); + return NULL; + } + + udev->hba = hba; + + init_waitqueue_head(&udev->wait_cmdr); + spin_lock_init(&udev->cmdr_lock); + + idr_init(&udev->commands); + spin_lock_init(&udev->commands_lock); + + setup_timer(&udev->timeout, tcmu_device_timedout, + (unsigned long)udev); + + udev->pass_level = TCMU_PASS_ALL; + + return &udev->se_dev; +} + +static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) +{ + struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info); + + tcmu_handle_completions(tcmu_dev); + + return 0; +} + +/* + * mmap code from uio.c. Copied here because we want to hook mmap() + * and this stuff must come along. + */ +static int tcmu_find_mem_index(struct vm_area_struct *vma) +{ + struct tcmu_dev *udev = vma->vm_private_data; + struct uio_info *info = &udev->uio_info; + + if (vma->vm_pgoff < MAX_UIO_MAPS) { + if (info->mem[vma->vm_pgoff].size == 0) + return -1; + return (int)vma->vm_pgoff; + } + return -1; +} + +static int tcmu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct tcmu_dev *udev = vma->vm_private_data; + struct uio_info *info = &udev->uio_info; + struct page *page; + unsigned long offset; + void *addr; + + int mi = tcmu_find_mem_index(vma); + if (mi < 0) + return VM_FAULT_SIGBUS; + + /* + * We need to subtract mi because userspace uses offset = N*PAGE_SIZE + * to use mem[N]. + */ + offset = (vmf->pgoff - mi) << PAGE_SHIFT; + + addr = (void *)(unsigned long)info->mem[mi].addr + offset; + if (info->mem[mi].memtype == UIO_MEM_LOGICAL) + page = virt_to_page(addr); + else + page = vmalloc_to_page(addr); + get_page(page); + vmf->page = page; + return 0; +} + +static const struct vm_operations_struct tcmu_vm_ops = { + .fault = tcmu_vma_fault, +}; + +static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) +{ + struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); + + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_ops = &tcmu_vm_ops; + + vma->vm_private_data = udev; + + /* Ensure the mmap is exactly the right size */ + if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT)) + return -EINVAL; + + return 0; +} + +static int tcmu_open(struct uio_info *info, struct inode *inode) +{ + struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); + + /* O_EXCL not supported for char devs, so fake it? */ + if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) + return -EBUSY; + + pr_debug("open\n"); + + return 0; +} + +static int tcmu_release(struct uio_info *info, struct inode *inode) +{ + struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); + + clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); + + pr_debug("close\n"); + + return 0; +} + +static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor) +{ + struct sk_buff *skb; + void *msg_header; + int ret = -ENOMEM; + + skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb) + return ret; + + msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); + if (!msg_header) + goto free_skb; + + ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name); + if (ret < 0) + goto free_skb; + + ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor); + if (ret < 0) + goto free_skb; + + ret = genlmsg_end(skb, msg_header); + if (ret < 0) + goto free_skb; + + ret = genlmsg_multicast(&tcmu_genl_family, skb, 0, + TCMU_MCGRP_CONFIG, GFP_KERNEL); + + /* We don't care if no one is listening */ + if (ret == -ESRCH) + ret = 0; + + return ret; +free_skb: + nlmsg_free(skb); + return ret; +} + +static int tcmu_configure_device(struct se_device *dev) +{ + struct tcmu_dev *udev = TCMU_DEV(dev); + struct tcmu_hba *hba = udev->hba->hba_ptr; + struct uio_info *info; + struct tcmu_mailbox *mb; + size_t size; + size_t used; + int ret = 0; + char *str; + + info = &udev->uio_info; + + size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name, + udev->dev_config); + size += 1; /* for \0 */ + str = kmalloc(size, GFP_KERNEL); + if (!str) + return -ENOMEM; + + used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name); + + if (udev->dev_config[0]) + snprintf(str + used, size - used, "/%s", udev->dev_config); + + info->name = str; + + udev->mb_addr = vzalloc(TCMU_RING_SIZE); + if (!udev->mb_addr) { + ret = -ENOMEM; + goto err_vzalloc; + } + + /* mailbox fits in first part of CMDR space */ + udev->cmdr_size = CMDR_SIZE - CMDR_OFF; + udev->data_off = CMDR_SIZE; + udev->data_size = TCMU_RING_SIZE - CMDR_SIZE; + + mb = udev->mb_addr; + mb->version = 1; + mb->cmdr_off = CMDR_OFF; + mb->cmdr_size = udev->cmdr_size; + + WARN_ON(!PAGE_ALIGNED(udev->data_off)); + WARN_ON(udev->data_size % PAGE_SIZE); + + info->version = "1"; + + info->mem[0].name = "tcm-user command & data buffer"; + info->mem[0].addr = (phys_addr_t) udev->mb_addr; + info->mem[0].size = TCMU_RING_SIZE; + info->mem[0].memtype = UIO_MEM_VIRTUAL; + + info->irqcontrol = tcmu_irqcontrol; + info->irq = UIO_IRQ_CUSTOM; + + info->mmap = tcmu_mmap; + info->open = tcmu_open; + info->release = tcmu_release; + + ret = uio_register_device(tcmu_root_device, info); + if (ret) + goto err_register; + + /* Other attributes can be configured in userspace */ + dev->dev_attrib.hw_block_size = 512; + dev->dev_attrib.hw_max_sectors = 128; + dev->dev_attrib.hw_queue_depth = 128; + + ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, + udev->uio_info.uio_dev->minor); + if (ret) + goto err_netlink; + + return 0; + +err_netlink: + uio_unregister_device(&udev->uio_info); +err_register: + vfree(udev->mb_addr); +err_vzalloc: + kfree(info->name); + + return ret; +} + +static int tcmu_check_pending_cmd(int id, void *p, void *data) +{ + struct tcmu_cmd *cmd = p; + + if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) + return 0; + return -EINVAL; +} + +static void tcmu_free_device(struct se_device *dev) +{ + struct tcmu_dev *udev = TCMU_DEV(dev); + int i; + + del_timer_sync(&udev->timeout); + + vfree(udev->mb_addr); + + /* Upper layer should drain all requests before calling this */ + spin_lock_irq(&udev->commands_lock); + i = idr_for_each(&udev->commands, tcmu_check_pending_cmd, NULL); + idr_destroy(&udev->commands); + spin_unlock_irq(&udev->commands_lock); + WARN_ON(i); + + /* Device was configured */ + if (udev->uio_info.uio_dev) { + tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name, + udev->uio_info.uio_dev->minor); + + uio_unregister_device(&udev->uio_info); + kfree(udev->uio_info.name); + kfree(udev->name); + } + + kfree(udev); +} + +enum { + Opt_dev_config, Opt_dev_size, Opt_err, Opt_pass_level, +}; + +static match_table_t tokens = { + {Opt_dev_config, "dev_config=%s"}, + {Opt_dev_size, "dev_size=%u"}, + {Opt_pass_level, "pass_level=%u"}, + {Opt_err, NULL} +}; + +static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, + const char *page, ssize_t count) +{ + struct tcmu_dev *udev = TCMU_DEV(dev); + char *orig, *ptr, *opts, *arg_p; + substring_t args[MAX_OPT_ARGS]; + int ret = 0, token; + int arg; + + opts = kstrdup(page, GFP_KERNEL); + if (!opts) + return -ENOMEM; + + orig = opts; + + while ((ptr = strsep(&opts, ",\n")) != NULL) { + if (!*ptr) + continue; + + token = match_token(ptr, tokens, args); + switch (token) { + case Opt_dev_config: + if (match_strlcpy(udev->dev_config, &args[0], + TCMU_CONFIG_LEN) == 0) { + ret = -EINVAL; + break; + } + pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); + break; + case Opt_dev_size: + arg_p = match_strdup(&args[0]); + if (!arg_p) { + ret = -ENOMEM; + break; + } + ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size); + kfree(arg_p); + if (ret < 0) + pr_err("kstrtoul() failed for dev_size=\n"); + break; + case Opt_pass_level: + match_int(args, &arg); + if (arg >= TCMU_PASS_INVALID) { + pr_warn("TCMU: Invalid pass_level: %d\n", arg); + break; + } + + pr_debug("TCMU: Setting pass_level to %d\n", arg); + udev->pass_level = arg; + break; + default: + break; + } + } + + kfree(orig); + return (!ret) ? count : ret; +} + +static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) +{ + struct tcmu_dev *udev = TCMU_DEV(dev); + ssize_t bl = 0; + + bl = sprintf(b + bl, "Config: %s ", + udev->dev_config[0] ? udev->dev_config : "NULL"); + bl += sprintf(b + bl, "Size: %zu PassLevel: %u\n", + udev->dev_size, udev->pass_level); + + return bl; +} + +static sector_t tcmu_get_blocks(struct se_device *dev) +{ + struct tcmu_dev *udev = TCMU_DEV(dev); + + return div_u64(udev->dev_size - dev->dev_attrib.block_size, + dev->dev_attrib.block_size); +} + +static sense_reason_t +tcmu_execute_rw(struct se_cmd *se_cmd, struct scatterlist *sgl, u32 sgl_nents, + enum dma_data_direction data_direction) +{ + int ret; + + ret = tcmu_queue_cmd(se_cmd); + + if (ret != 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + else + return TCM_NO_SENSE; +} + +static sense_reason_t +tcmu_pass_op(struct se_cmd *se_cmd) +{ + int ret = tcmu_queue_cmd(se_cmd); + + if (ret != 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + else + return TCM_NO_SENSE; +} + +static struct sbc_ops tcmu_sbc_ops = { + .execute_rw = tcmu_execute_rw, + .execute_sync_cache = tcmu_pass_op, + .execute_write_same = tcmu_pass_op, + .execute_write_same_unmap = tcmu_pass_op, + .execute_unmap = tcmu_pass_op, +}; + +static sense_reason_t +tcmu_parse_cdb(struct se_cmd *cmd) +{ + unsigned char *cdb = cmd->t_task_cdb; + struct tcmu_dev *udev = TCMU_DEV(cmd->se_dev); + sense_reason_t ret; + + switch (udev->pass_level) { + case TCMU_PASS_ALL: + /* We're just like pscsi, then */ + /* + * For REPORT LUNS we always need to emulate the response, for everything + * else, pass it up. + */ + switch (cdb[0]) { + case REPORT_LUNS: + cmd->execute_cmd = spc_emulate_report_luns; + break; + case READ_6: + case READ_10: + case READ_12: + case READ_16: + case WRITE_6: + case WRITE_10: + case WRITE_12: + case WRITE_16: + case WRITE_VERIFY: + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + /* FALLTHROUGH */ + default: + cmd->execute_cmd = tcmu_pass_op; + } + ret = TCM_NO_SENSE; + break; + case TCMU_PASS_IO: + ret = sbc_parse_cdb(cmd, &tcmu_sbc_ops); + break; + default: + pr_err("Unknown tcm-user pass level %d\n", udev->pass_level); + ret = TCM_CHECK_CONDITION_ABORT_CMD; + } + + return ret; +} + +static struct se_subsystem_api tcmu_template = { + .name = "user", + .inquiry_prod = "USER", + .inquiry_rev = TCMU_VERSION, + .owner = THIS_MODULE, + .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, + .attach_hba = tcmu_attach_hba, + .detach_hba = tcmu_detach_hba, + .alloc_device = tcmu_alloc_device, + .configure_device = tcmu_configure_device, + .free_device = tcmu_free_device, + .parse_cdb = tcmu_parse_cdb, + .set_configfs_dev_params = tcmu_set_configfs_dev_params, + .show_configfs_dev_params = tcmu_show_configfs_dev_params, + .get_device_type = sbc_get_device_type, + .get_blocks = tcmu_get_blocks, +}; + +static int __init tcmu_module_init(void) +{ + int ret; + + BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); + + tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", + sizeof(struct tcmu_cmd), + __alignof__(struct tcmu_cmd), + 0, NULL); + if (!tcmu_cmd_cache) + return -ENOMEM; + + tcmu_root_device = root_device_register("tcm_user"); + if (IS_ERR(tcmu_root_device)) { + ret = PTR_ERR(tcmu_root_device); + goto out_free_cache; + } + + ret = genl_register_family(&tcmu_genl_family); + if (ret < 0) { + goto out_unreg_device; + } + + ret = transport_subsystem_register(&tcmu_template); + if (ret) + goto out_unreg_genl; + + return 0; + +out_unreg_genl: + genl_unregister_family(&tcmu_genl_family); +out_unreg_device: + root_device_unregister(tcmu_root_device); +out_free_cache: + kmem_cache_destroy(tcmu_cmd_cache); + + return ret; +} + +static void __exit tcmu_module_exit(void) +{ + transport_subsystem_release(&tcmu_template); + genl_unregister_family(&tcmu_genl_family); + root_device_unregister(tcmu_root_device); + kmem_cache_destroy(tcmu_cmd_cache); +} + +MODULE_DESCRIPTION("TCM USER subsystem plugin"); +MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); +MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); +MODULE_LICENSE("GPL"); + +module_init(tcmu_module_init); +module_exit(tcmu_module_exit); diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index 21ce50880c7..ccee7e332a4 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -98,7 +98,7 @@ static void ft_tport_delete(struct ft_tport *tport) ft_sess_delete_all(tport); lport = tport->lport; BUG_ON(tport != lport->prov[FC_TYPE_FCP]); - rcu_assign_pointer(lport->prov[FC_TYPE_FCP], NULL); + RCU_INIT_POINTER(lport->prov[FC_TYPE_FCP], NULL); tpg = tport->tpg; if (tpg) { diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index ef5587fe2c6..f554d25b439 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -84,6 +84,16 @@ config THERMAL_GOV_STEP_WISE Enable this to manage platform thermals using a simple linear governor. +config THERMAL_GOV_BANG_BANG + bool "Bang Bang thermal governor" + default n + help + Enable this to manage platform thermals using bang bang governor. + + Say 'Y' here if you want to use two point temperature regulation + used for fans without throttling. Some fan drivers depend on this + governor to be enabled (e.g. acerhdf). + config THERMAL_GOV_USER_SPACE bool "User_space thermal governor" help @@ -207,21 +217,6 @@ config X86_PKG_TEMP_THERMAL two trip points which can be set by user to get notifications via thermal notification methods. -config ACPI_INT3403_THERMAL - tristate "ACPI INT3403 thermal driver" - depends on X86 && ACPI - help - Newer laptops and tablets that use ACPI may have thermal sensors - outside the core CPU/SOC for thermal safety reasons. These - temperature sensors are also exposed for the OS to use via the so - called INT3403 ACPI object. This driver will, on devices that have - such sensors, expose the temperature information from these sensors - to userspace via the normal thermal framework. This means that a wide - range of applications and GUI widgets can show this information to - the user or use this information for making decisions. For example, - the Intel Thermal Daemon can use this information to allow the user - to select his laptop to run without turning on the fans. - config INTEL_SOC_DTS_THERMAL tristate "Intel SoCs DTS thermal driver" depends on X86 && IOSF_MBI @@ -234,6 +229,30 @@ config INTEL_SOC_DTS_THERMAL notification methods.The other trip is a critical trip point, which was set by the driver based on the TJ MAX temperature. +config INT340X_THERMAL + tristate "ACPI INT340X thermal drivers" + depends on X86 && ACPI + select THERMAL_GOV_USER_SPACE + select ACPI_THERMAL_REL + select ACPI_FAN + help + Newer laptops and tablets that use ACPI may have thermal sensors and + other devices with thermal control capabilities outside the core + CPU/SOC, for thermal safety reasons. + They are exposed for the OS to use via the INT3400 ACPI device object + as the master, and INT3401~INT340B ACPI device objects as the slaves. + Enable this to expose the temperature information and cooling ability + from these objects to userspace via the normal thermal framework. + This means that a wide range of applications and GUI widgets can show + the information to the user or use this information for making + decisions. For example, the Intel Thermal Daemon can use this + information to allow the user to select his laptop to run without + turning on the fans. + +config ACPI_THERMAL_REL + tristate + depends on ACPI + menu "Texas Instruments thermal drivers" source "drivers/thermal/ti-soc-thermal/Kconfig" endmenu diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 31e232f84b6..39c4fe87da2 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -11,6 +11,7 @@ thermal_sys-$(CONFIG_THERMAL_OF) += of-thermal.o # governors thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o +thermal_sys-$(CONFIG_THERMAL_GOV_BANG_BANG) += gov_bang_bang.o thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o @@ -31,5 +32,5 @@ obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/ -obj-$(CONFIG_ACPI_INT3403_THERMAL) += int3403_thermal.o +obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/ obj-$(CONFIG_ST_THERMAL) += st/ diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 1ab0018271c..ad09e51ffae 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -50,15 +50,14 @@ struct cpufreq_cooling_device { unsigned int cpufreq_state; unsigned int cpufreq_val; struct cpumask allowed_cpus; + struct list_head node; }; static DEFINE_IDR(cpufreq_idr); static DEFINE_MUTEX(cooling_cpufreq_lock); static unsigned int cpufreq_dev_count; -/* notify_table passes value to the CPUFREQ_ADJUST callback function. */ -#define NOTIFY_INVALID NULL -static struct cpufreq_cooling_device *notify_device; +static LIST_HEAD(cpufreq_dev_list); /** * get_idr - function to get a unique id. @@ -287,15 +286,12 @@ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device, cpufreq_device->cpufreq_state = cooling_state; cpufreq_device->cpufreq_val = clip_freq; - notify_device = cpufreq_device; for_each_cpu(cpuid, mask) { if (is_cpufreq_valid(cpuid)) cpufreq_update_policy(cpuid); } - notify_device = NOTIFY_INVALID; - return 0; } @@ -316,21 +312,28 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb, { struct cpufreq_policy *policy = data; unsigned long max_freq = 0; + struct cpufreq_cooling_device *cpufreq_dev; - if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID) + if (event != CPUFREQ_ADJUST) return 0; - if (cpumask_test_cpu(policy->cpu, ¬ify_device->allowed_cpus)) - max_freq = notify_device->cpufreq_val; - else - return 0; + mutex_lock(&cooling_cpufreq_lock); + list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { + if (!cpumask_test_cpu(policy->cpu, + &cpufreq_dev->allowed_cpus)) + continue; + + if (!cpufreq_dev->cpufreq_val) + cpufreq_dev->cpufreq_val = get_cpu_frequency( + cpumask_any(&cpufreq_dev->allowed_cpus), + cpufreq_dev->cpufreq_state); - /* Never exceed user_policy.max */ - if (max_freq > policy->user_policy.max) - max_freq = policy->user_policy.max; + max_freq = cpufreq_dev->cpufreq_val; - if (policy->max != max_freq) - cpufreq_verify_within_limits(policy, 0, max_freq); + if (policy->max != max_freq) + cpufreq_verify_within_limits(policy, 0, max_freq); + } + mutex_unlock(&cooling_cpufreq_lock); return 0; } @@ -486,6 +489,7 @@ __cpufreq_cooling_register(struct device_node *np, cpufreq_register_notifier(&thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); cpufreq_dev_count++; + list_add(&cpufreq_dev->node, &cpufreq_dev_list); mutex_unlock(&cooling_cpufreq_lock); @@ -549,6 +553,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) cpufreq_dev = cdev->devdata; mutex_lock(&cooling_cpufreq_lock); + list_del(&cpufreq_dev->node); cpufreq_dev_count--; /* Unregister the notifier for the last cpufreq cooling device */ diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c index 944ba2f340c..6e0a3fbfae8 100644 --- a/drivers/thermal/fair_share.c +++ b/drivers/thermal/fair_share.c @@ -23,6 +23,7 @@ */ #include <linux/thermal.h> +#include <trace/events/thermal.h> #include "thermal_core.h" @@ -34,6 +35,7 @@ static int get_trip_level(struct thermal_zone_device *tz) { int count = 0; unsigned long trip_temp; + enum thermal_trip_type trip_type; if (tz->trips == 0 || !tz->ops->get_trip_temp) return 0; @@ -43,6 +45,16 @@ static int get_trip_level(struct thermal_zone_device *tz) if (tz->temperature < trip_temp) break; } + + /* + * count > 0 only if temperature is greater than first trip + * point, in which case, trip_point = count - 1 + */ + if (count > 0) { + tz->ops->get_trip_type(tz, count - 1, &trip_type); + trace_thermal_zone_trip(tz, count - 1, trip_type); + } + return count; } diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c new file mode 100644 index 00000000000..c5dd76b2ee7 --- /dev/null +++ b/drivers/thermal/gov_bang_bang.c @@ -0,0 +1,131 @@ +/* + * gov_bang_bang.c - A simple thermal throttling governor using hysteresis + * + * Copyright (C) 2014 Peter Feuerer <peter@piie.net> + * + * Based on step_wise.c with following Copyrights: + * Copyright (C) 2012 Intel Corp + * Copyright (C) 2012 Durgadoss R <durgadoss.r@intel.com> + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + */ + +#include <linux/thermal.h> + +#include "thermal_core.h" + +static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) +{ + long trip_temp; + unsigned long trip_hyst; + struct thermal_instance *instance; + + tz->ops->get_trip_temp(tz, trip, &trip_temp); + tz->ops->get_trip_hyst(tz, trip, &trip_hyst); + + dev_dbg(&tz->device, "Trip%d[temp=%ld]:temp=%d:hyst=%ld\n", + trip, trip_temp, tz->temperature, + trip_hyst); + + mutex_lock(&tz->lock); + + list_for_each_entry(instance, &tz->thermal_instances, tz_node) { + if (instance->trip != trip) + continue; + + /* in case fan is in initial state, switch the fan off */ + if (instance->target == THERMAL_NO_TARGET) + instance->target = 0; + + /* in case fan is neither on nor off set the fan to active */ + if (instance->target != 0 && instance->target != 1) { + pr_warn("Thermal instance %s controlled by bang-bang has unexpected state: %ld\n", + instance->name, instance->target); + instance->target = 1; + } + + /* + * enable fan when temperature exceeds trip_temp and disable + * the fan in case it falls below trip_temp minus hysteresis + */ + if (instance->target == 0 && tz->temperature >= trip_temp) + instance->target = 1; + else if (instance->target == 1 && + tz->temperature < trip_temp - trip_hyst) + instance->target = 0; + + dev_dbg(&instance->cdev->device, "target=%d\n", + (int)instance->target); + + instance->cdev->updated = false; /* cdev needs update */ + } + + mutex_unlock(&tz->lock); +} + +/** + * bang_bang_control - controls devices associated with the given zone + * @tz - thermal_zone_device + * @trip - the trip point + * + * Regulation Logic: a two point regulation, deliver cooling state depending + * on the previous state shown in this diagram: + * + * Fan: OFF ON + * + * | + * | + * trip_temp: +---->+ + * | | ^ + * | | | + * | | Temperature + * (trip_temp - hyst): +<----+ + * | + * | + * | + * + * * If the fan is not running and temperature exceeds trip_temp, the fan + * gets turned on. + * * In case the fan is running, temperature must fall below + * (trip_temp - hyst) so that the fan gets turned off again. + * + */ +static int bang_bang_control(struct thermal_zone_device *tz, int trip) +{ + struct thermal_instance *instance; + + thermal_zone_trip_update(tz, trip); + + mutex_lock(&tz->lock); + + list_for_each_entry(instance, &tz->thermal_instances, tz_node) + thermal_cdev_update(instance->cdev); + + mutex_unlock(&tz->lock); + + return 0; +} + +static struct thermal_governor thermal_gov_bang_bang = { + .name = "bang_bang", + .throttle = bang_bang_control, +}; + +int thermal_gov_bang_bang_register(void) +{ + return thermal_register_governor(&thermal_gov_bang_bang); +} + +void thermal_gov_bang_bang_unregister(void) +{ + thermal_unregister_governor(&thermal_gov_bang_bang); +} diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index 2c516f2eebe..5a1f1070b70 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -19,6 +19,7 @@ #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/slab.h> @@ -31,6 +32,11 @@ #define MISC0 0x0150 #define MISC0_REFTOP_SELBIASOFF (1 << 3) +#define MISC1 0x0160 +#define MISC1_IRQ_TEMPHIGH (1 << 29) +/* Below LOW and PANIC bits are only for TEMPMON_IMX6SX */ +#define MISC1_IRQ_TEMPLOW (1 << 28) +#define MISC1_IRQ_TEMPPANIC (1 << 27) #define TEMPSENSE0 0x0180 #define TEMPSENSE0_ALARM_VALUE_SHIFT 20 @@ -43,6 +49,12 @@ #define TEMPSENSE1 0x0190 #define TEMPSENSE1_MEASURE_FREQ 0xffff +/* Below TEMPSENSE2 is only for TEMPMON_IMX6SX */ +#define TEMPSENSE2 0x0290 +#define TEMPSENSE2_LOW_VALUE_SHIFT 0 +#define TEMPSENSE2_LOW_VALUE_MASK 0xfff +#define TEMPSENSE2_PANIC_VALUE_SHIFT 16 +#define TEMPSENSE2_PANIC_VALUE_MASK 0xfff0000 #define OCOTP_ANA1 0x04e0 @@ -66,6 +78,21 @@ enum imx_thermal_trip { #define FACTOR1 15976 #define FACTOR2 4297157 +#define TEMPMON_IMX6Q 1 +#define TEMPMON_IMX6SX 2 + +struct thermal_soc_data { + u32 version; +}; + +static struct thermal_soc_data thermal_imx6q_data = { + .version = TEMPMON_IMX6Q, +}; + +static struct thermal_soc_data thermal_imx6sx_data = { + .version = TEMPMON_IMX6SX, +}; + struct imx_thermal_data { struct thermal_zone_device *tz; struct thermal_cooling_device *cdev; @@ -79,8 +106,21 @@ struct imx_thermal_data { bool irq_enabled; int irq; struct clk *thermal_clk; + const struct thermal_soc_data *socdata; }; +static void imx_set_panic_temp(struct imx_thermal_data *data, + signed long panic_temp) +{ + struct regmap *map = data->tempmon; + int critical_value; + + critical_value = (data->c2 - panic_temp) / data->c1; + regmap_write(map, TEMPSENSE2 + REG_CLR, TEMPSENSE2_PANIC_VALUE_MASK); + regmap_write(map, TEMPSENSE2 + REG_SET, critical_value << + TEMPSENSE2_PANIC_VALUE_SHIFT); +} + static void imx_set_alarm_temp(struct imx_thermal_data *data, signed long alarm_temp) { @@ -142,13 +182,17 @@ static int imx_get_temp(struct thermal_zone_device *tz, unsigned long *temp) /* See imx_get_sensor_data() for formula derivation */ *temp = data->c2 - n_meas * data->c1; - /* Update alarm value to next higher trip point */ - if (data->alarm_temp == data->temp_passive && *temp >= data->temp_passive) - imx_set_alarm_temp(data, data->temp_critical); - if (data->alarm_temp == data->temp_critical && *temp < data->temp_passive) { - imx_set_alarm_temp(data, data->temp_passive); - dev_dbg(&tz->device, "thermal alarm off: T < %lu\n", - data->alarm_temp / 1000); + /* Update alarm value to next higher trip point for TEMPMON_IMX6Q */ + if (data->socdata->version == TEMPMON_IMX6Q) { + if (data->alarm_temp == data->temp_passive && + *temp >= data->temp_passive) + imx_set_alarm_temp(data, data->temp_critical); + if (data->alarm_temp == data->temp_critical && + *temp < data->temp_passive) { + imx_set_alarm_temp(data, data->temp_passive); + dev_dbg(&tz->device, "thermal alarm off: T < %lu\n", + data->alarm_temp / 1000); + } } if (*temp != data->last_temp) { @@ -398,14 +442,27 @@ static irqreturn_t imx_thermal_alarm_irq_thread(int irq, void *dev) return IRQ_HANDLED; } +static const struct of_device_id of_imx_thermal_match[] = { + { .compatible = "fsl,imx6q-tempmon", .data = &thermal_imx6q_data, }, + { .compatible = "fsl,imx6sx-tempmon", .data = &thermal_imx6sx_data, }, + { /* end */ } +}; +MODULE_DEVICE_TABLE(of, of_imx_thermal_match); + static int imx_thermal_probe(struct platform_device *pdev) { + const struct of_device_id *of_id = + of_match_device(of_imx_thermal_match, &pdev->dev); struct imx_thermal_data *data; struct cpumask clip_cpus; struct regmap *map; int measure_freq; int ret; + if (!cpufreq_get_current_driver()) { + dev_dbg(&pdev->dev, "no cpufreq driver!"); + return -EPROBE_DEFER; + } data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; @@ -418,6 +475,20 @@ static int imx_thermal_probe(struct platform_device *pdev) } data->tempmon = map; + data->socdata = of_id->data; + + /* make sure the IRQ flag is clear before enabling irq on i.MX6SX */ + if (data->socdata->version == TEMPMON_IMX6SX) { + regmap_write(map, MISC1 + REG_CLR, MISC1_IRQ_TEMPHIGH | + MISC1_IRQ_TEMPLOW | MISC1_IRQ_TEMPPANIC); + /* + * reset value of LOW ALARM is incorrect, set it to lowest + * value to avoid false trigger of low alarm. + */ + regmap_write(map, TEMPSENSE2 + REG_SET, + TEMPSENSE2_LOW_VALUE_MASK); + } + data->irq = platform_get_irq(pdev, 0); if (data->irq < 0) return data->irq; @@ -454,6 +525,30 @@ static int imx_thermal_probe(struct platform_device *pdev) return ret; } + data->thermal_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(data->thermal_clk)) { + ret = PTR_ERR(data->thermal_clk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to get thermal clk: %d\n", ret); + cpufreq_cooling_unregister(data->cdev); + return ret; + } + + /* + * Thermal sensor needs clk on to get correct value, normally + * we should enable its clk before taking measurement and disable + * clk after measurement is done, but if alarm function is enabled, + * hardware will auto measure the temperature periodically, so we + * need to keep the clk always on for alarm function. + */ + ret = clk_prepare_enable(data->thermal_clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret); + cpufreq_cooling_unregister(data->cdev); + return ret; + } + data->tz = thermal_zone_device_register("imx_thermal_zone", IMX_TRIP_NUM, BIT(IMX_TRIP_PASSIVE), data, @@ -464,31 +559,20 @@ static int imx_thermal_probe(struct platform_device *pdev) ret = PTR_ERR(data->tz); dev_err(&pdev->dev, "failed to register thermal zone device %d\n", ret); + clk_disable_unprepare(data->thermal_clk); cpufreq_cooling_unregister(data->cdev); return ret; } - data->thermal_clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(data->thermal_clk)) { - dev_warn(&pdev->dev, "failed to get thermal clk!\n"); - } else { - /* - * Thermal sensor needs clk on to get correct value, normally - * we should enable its clk before taking measurement and disable - * clk after measurement is done, but if alarm function is enabled, - * hardware will auto measure the temperature periodically, so we - * need to keep the clk always on for alarm function. - */ - ret = clk_prepare_enable(data->thermal_clk); - if (ret) - dev_warn(&pdev->dev, "failed to enable thermal clk: %d\n", ret); - } - /* Enable measurements at ~ 10 Hz */ regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ); measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */ regmap_write(map, TEMPSENSE1 + REG_SET, measure_freq); imx_set_alarm_temp(data, data->temp_passive); + + if (data->socdata->version == TEMPMON_IMX6SX) + imx_set_panic_temp(data, data->temp_critical); + regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP); @@ -550,12 +634,6 @@ static int imx_thermal_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(imx_thermal_pm_ops, imx_thermal_suspend, imx_thermal_resume); -static const struct of_device_id of_imx_thermal_match[] = { - { .compatible = "fsl,imx6q-tempmon", }, - { /* end */ } -}; -MODULE_DEVICE_TABLE(of, of_imx_thermal_match); - static struct platform_driver imx_thermal = { .driver = { .name = "imx_thermal", diff --git a/drivers/thermal/int3403_thermal.c b/drivers/thermal/int3403_thermal.c deleted file mode 100644 index 17554eeb395..00000000000 --- a/drivers/thermal/int3403_thermal.c +++ /dev/null @@ -1,296 +0,0 @@ -/* - * ACPI INT3403 thermal driver - * Copyright (c) 2013, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/acpi.h> -#include <linux/thermal.h> - -#define INT3403_TYPE_SENSOR 0x03 -#define INT3403_PERF_CHANGED_EVENT 0x80 -#define INT3403_THERMAL_EVENT 0x90 - -#define DECI_KELVIN_TO_MILLI_CELSIUS(t, off) (((t) - (off)) * 100) -#define KELVIN_OFFSET 2732 -#define MILLI_CELSIUS_TO_DECI_KELVIN(t, off) (((t) / 100) + (off)) - -#define ACPI_INT3403_CLASS "int3403" -#define ACPI_INT3403_FILE_STATE "state" - -struct int3403_sensor { - struct thermal_zone_device *tzone; - unsigned long *thresholds; - unsigned long crit_temp; - int crit_trip_id; - unsigned long psv_temp; - int psv_trip_id; -}; - -static int sys_get_curr_temp(struct thermal_zone_device *tzone, - unsigned long *temp) -{ - struct acpi_device *device = tzone->devdata; - unsigned long long tmp; - acpi_status status; - - status = acpi_evaluate_integer(device->handle, "_TMP", NULL, &tmp); - if (ACPI_FAILURE(status)) - return -EIO; - - *temp = DECI_KELVIN_TO_MILLI_CELSIUS(tmp, KELVIN_OFFSET); - - return 0; -} - -static int sys_get_trip_hyst(struct thermal_zone_device *tzone, - int trip, unsigned long *temp) -{ - struct acpi_device *device = tzone->devdata; - unsigned long long hyst; - acpi_status status; - - status = acpi_evaluate_integer(device->handle, "GTSH", NULL, &hyst); - if (ACPI_FAILURE(status)) - return -EIO; - - /* - * Thermal hysteresis represents a temperature difference. - * Kelvin and Celsius have same degree size. So the - * conversion here between tenths of degree Kelvin unit - * and Milli-Celsius unit is just to multiply 100. - */ - *temp = hyst * 100; - - return 0; -} - -static int sys_get_trip_temp(struct thermal_zone_device *tzone, - int trip, unsigned long *temp) -{ - struct acpi_device *device = tzone->devdata; - struct int3403_sensor *obj = acpi_driver_data(device); - - if (trip == obj->crit_trip_id) - *temp = obj->crit_temp; - else if (trip == obj->psv_trip_id) - *temp = obj->psv_temp; - else { - /* - * get_trip_temp is a mandatory callback but - * PATx method doesn't return any value, so return - * cached value, which was last set from user space. - */ - *temp = obj->thresholds[trip]; - } - - return 0; -} - -static int sys_get_trip_type(struct thermal_zone_device *thermal, - int trip, enum thermal_trip_type *type) -{ - struct acpi_device *device = thermal->devdata; - struct int3403_sensor *obj = acpi_driver_data(device); - - /* Mandatory callback, may not mean much here */ - if (trip == obj->crit_trip_id) - *type = THERMAL_TRIP_CRITICAL; - else - *type = THERMAL_TRIP_PASSIVE; - - return 0; -} - -int sys_set_trip_temp(struct thermal_zone_device *tzone, int trip, - unsigned long temp) -{ - struct acpi_device *device = tzone->devdata; - acpi_status status; - char name[10]; - int ret = 0; - struct int3403_sensor *obj = acpi_driver_data(device); - - snprintf(name, sizeof(name), "PAT%d", trip); - if (acpi_has_method(device->handle, name)) { - status = acpi_execute_simple_method(device->handle, name, - MILLI_CELSIUS_TO_DECI_KELVIN(temp, - KELVIN_OFFSET)); - if (ACPI_FAILURE(status)) - ret = -EIO; - else - obj->thresholds[trip] = temp; - } else { - ret = -EIO; - dev_err(&device->dev, "sys_set_trip_temp: method not found\n"); - } - - return ret; -} - -static struct thermal_zone_device_ops tzone_ops = { - .get_temp = sys_get_curr_temp, - .get_trip_temp = sys_get_trip_temp, - .get_trip_type = sys_get_trip_type, - .set_trip_temp = sys_set_trip_temp, - .get_trip_hyst = sys_get_trip_hyst, -}; - -static void acpi_thermal_notify(struct acpi_device *device, u32 event) -{ - struct int3403_sensor *obj; - - if (!device) - return; - - obj = acpi_driver_data(device); - if (!obj) - return; - - switch (event) { - case INT3403_PERF_CHANGED_EVENT: - break; - case INT3403_THERMAL_EVENT: - thermal_zone_device_update(obj->tzone); - break; - default: - dev_err(&device->dev, "Unsupported event [0x%x]\n", event); - break; - } -} - -static int sys_get_trip_crt(struct acpi_device *device, unsigned long *temp) -{ - unsigned long long crt; - acpi_status status; - - status = acpi_evaluate_integer(device->handle, "_CRT", NULL, &crt); - if (ACPI_FAILURE(status)) - return -EIO; - - *temp = DECI_KELVIN_TO_MILLI_CELSIUS(crt, KELVIN_OFFSET); - - return 0; -} - -static int sys_get_trip_psv(struct acpi_device *device, unsigned long *temp) -{ - unsigned long long psv; - acpi_status status; - - status = acpi_evaluate_integer(device->handle, "_PSV", NULL, &psv); - if (ACPI_FAILURE(status)) - return -EIO; - - *temp = DECI_KELVIN_TO_MILLI_CELSIUS(psv, KELVIN_OFFSET); - - return 0; -} - -static int acpi_int3403_add(struct acpi_device *device) -{ - int result = 0; - unsigned long long ptyp; - acpi_status status; - struct int3403_sensor *obj; - unsigned long long trip_cnt; - int trip_mask = 0; - - if (!device) - return -EINVAL; - - status = acpi_evaluate_integer(device->handle, "PTYP", NULL, &ptyp); - if (ACPI_FAILURE(status)) - return -EINVAL; - - if (ptyp != INT3403_TYPE_SENSOR) - return -EINVAL; - - obj = devm_kzalloc(&device->dev, sizeof(*obj), GFP_KERNEL); - if (!obj) - return -ENOMEM; - - device->driver_data = obj; - - status = acpi_evaluate_integer(device->handle, "PATC", NULL, - &trip_cnt); - if (ACPI_FAILURE(status)) - trip_cnt = 0; - - if (trip_cnt) { - /* We have to cache, thresholds can't be readback */ - obj->thresholds = devm_kzalloc(&device->dev, - sizeof(*obj->thresholds) * trip_cnt, - GFP_KERNEL); - if (!obj->thresholds) - return -ENOMEM; - trip_mask = BIT(trip_cnt) - 1; - } - - obj->psv_trip_id = -1; - if (!sys_get_trip_psv(device, &obj->psv_temp)) - obj->psv_trip_id = trip_cnt++; - - obj->crit_trip_id = -1; - if (!sys_get_trip_crt(device, &obj->crit_temp)) - obj->crit_trip_id = trip_cnt++; - - obj->tzone = thermal_zone_device_register(acpi_device_bid(device), - trip_cnt, trip_mask, device, &tzone_ops, - NULL, 0, 0); - if (IS_ERR(obj->tzone)) { - result = PTR_ERR(obj->tzone); - return result; - } - - strcpy(acpi_device_name(device), "INT3403"); - strcpy(acpi_device_class(device), ACPI_INT3403_CLASS); - - return 0; -} - -static int acpi_int3403_remove(struct acpi_device *device) -{ - struct int3403_sensor *obj; - - obj = acpi_driver_data(device); - thermal_zone_device_unregister(obj->tzone); - - return 0; -} - -ACPI_MODULE_NAME("int3403"); -static const struct acpi_device_id int3403_device_ids[] = { - {"INT3403", 0}, - {"", 0}, -}; -MODULE_DEVICE_TABLE(acpi, int3403_device_ids); - -static struct acpi_driver acpi_int3403_driver = { - .name = "INT3403", - .class = ACPI_INT3403_CLASS, - .ids = int3403_device_ids, - .ops = { - .add = acpi_int3403_add, - .remove = acpi_int3403_remove, - .notify = acpi_thermal_notify, - }, -}; - -module_acpi_driver(acpi_int3403_driver); - -MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("ACPI INT3403 thermal driver"); diff --git a/drivers/thermal/int340x_thermal/Makefile b/drivers/thermal/int340x_thermal/Makefile new file mode 100644 index 00000000000..ffe40bffaf1 --- /dev/null +++ b/drivers/thermal/int340x_thermal/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o +obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o +obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o +obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c new file mode 100644 index 00000000000..0d8db808f0a --- /dev/null +++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c @@ -0,0 +1,400 @@ +/* acpi_thermal_rel.c driver for exporting ACPI thermal relationship + * + * Copyright (c) 2014 Intel Corp + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + */ + +/* + * Two functionalities included: + * 1. Export _TRT, _ART, via misc device interface to the userspace. + * 2. Provide parsing result to kernel drivers + * + */ +#include <linux/init.h> +#include <linux/export.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/acpi.h> +#include <linux/uaccess.h> +#include <linux/miscdevice.h> +#include "acpi_thermal_rel.h" + +static acpi_handle acpi_thermal_rel_handle; +static DEFINE_SPINLOCK(acpi_thermal_rel_chrdev_lock); +static int acpi_thermal_rel_chrdev_count; /* #times opened */ +static int acpi_thermal_rel_chrdev_exclu; /* already open exclusive? */ + +static int acpi_thermal_rel_open(struct inode *inode, struct file *file) +{ + spin_lock(&acpi_thermal_rel_chrdev_lock); + if (acpi_thermal_rel_chrdev_exclu || + (acpi_thermal_rel_chrdev_count && (file->f_flags & O_EXCL))) { + spin_unlock(&acpi_thermal_rel_chrdev_lock); + return -EBUSY; + } + + if (file->f_flags & O_EXCL) + acpi_thermal_rel_chrdev_exclu = 1; + acpi_thermal_rel_chrdev_count++; + + spin_unlock(&acpi_thermal_rel_chrdev_lock); + + return nonseekable_open(inode, file); +} + +static int acpi_thermal_rel_release(struct inode *inode, struct file *file) +{ + spin_lock(&acpi_thermal_rel_chrdev_lock); + acpi_thermal_rel_chrdev_count--; + acpi_thermal_rel_chrdev_exclu = 0; + spin_unlock(&acpi_thermal_rel_chrdev_lock); + + return 0; +} + +/** + * acpi_parse_trt - Thermal Relationship Table _TRT for passive cooling + * + * @handle: ACPI handle of the device contains _TRT + * @art_count: the number of valid entries resulted from parsing _TRT + * @artp: pointer to pointer of array of art entries in parsing result + * @create_dev: whether to create platform devices for target and source + * + */ +int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp, + bool create_dev) +{ + acpi_status status; + int result = 0; + int i; + int nr_bad_entries = 0; + struct trt *trts; + struct acpi_device *adev; + union acpi_object *p; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_buffer element = { 0, NULL }; + struct acpi_buffer trt_format = { sizeof("RRNNNNNN"), "RRNNNNNN" }; + + if (!acpi_has_method(handle, "_TRT")) + return 0; + + status = acpi_evaluate_object(handle, "_TRT", NULL, &buffer); + if (ACPI_FAILURE(status)) + return -ENODEV; + + p = buffer.pointer; + if (!p || (p->type != ACPI_TYPE_PACKAGE)) { + pr_err("Invalid _TRT data\n"); + result = -EFAULT; + goto end; + } + + *trt_count = p->package.count; + trts = kzalloc(*trt_count * sizeof(struct trt), GFP_KERNEL); + if (!trts) { + result = -ENOMEM; + goto end; + } + + for (i = 0; i < *trt_count; i++) { + struct trt *trt = &trts[i - nr_bad_entries]; + + element.length = sizeof(struct trt); + element.pointer = trt; + + status = acpi_extract_package(&(p->package.elements[i]), + &trt_format, &element); + if (ACPI_FAILURE(status)) { + nr_bad_entries++; + pr_warn("_TRT package %d is invalid, ignored\n", i); + continue; + } + if (!create_dev) + continue; + + result = acpi_bus_get_device(trt->source, &adev); + if (!result) + acpi_create_platform_device(adev); + else + pr_warn("Failed to get source ACPI device\n"); + + result = acpi_bus_get_device(trt->target, &adev); + if (!result) + acpi_create_platform_device(adev); + else + pr_warn("Failed to get target ACPI device\n"); + } + + *trtp = trts; + /* don't count bad entries */ + *trt_count -= nr_bad_entries; +end: + kfree(buffer.pointer); + return result; +} +EXPORT_SYMBOL(acpi_parse_trt); + +/** + * acpi_parse_art - Parse Active Relationship Table _ART + * + * @handle: ACPI handle of the device contains _ART + * @art_count: the number of valid entries resulted from parsing _ART + * @artp: pointer to pointer of array of art entries in parsing result + * @create_dev: whether to create platform devices for target and source + * + */ +int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp, + bool create_dev) +{ + acpi_status status; + int result = 0; + int i; + int nr_bad_entries = 0; + struct art *arts; + struct acpi_device *adev; + union acpi_object *p; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_buffer element = { 0, NULL }; + struct acpi_buffer art_format = { + sizeof("RRNNNNNNNNNNN"), "RRNNNNNNNNNNN" }; + + if (!acpi_has_method(handle, "_ART")) + return 0; + + status = acpi_evaluate_object(handle, "_ART", NULL, &buffer); + if (ACPI_FAILURE(status)) + return -ENODEV; + + p = buffer.pointer; + if (!p || (p->type != ACPI_TYPE_PACKAGE)) { + pr_err("Invalid _ART data\n"); + result = -EFAULT; + goto end; + } + + /* ignore p->package.elements[0], as this is _ART Revision field */ + *art_count = p->package.count - 1; + arts = kzalloc(*art_count * sizeof(struct art), GFP_KERNEL); + if (!arts) { + result = -ENOMEM; + goto end; + } + + for (i = 0; i < *art_count; i++) { + struct art *art = &arts[i - nr_bad_entries]; + + element.length = sizeof(struct art); + element.pointer = art; + + status = acpi_extract_package(&(p->package.elements[i + 1]), + &art_format, &element); + if (ACPI_FAILURE(status)) { + pr_warn("_ART package %d is invalid, ignored", i); + nr_bad_entries++; + continue; + } + if (!create_dev) + continue; + + if (art->source) { + result = acpi_bus_get_device(art->source, &adev); + if (!result) + acpi_create_platform_device(adev); + else + pr_warn("Failed to get source ACPI device\n"); + } + if (art->target) { + result = acpi_bus_get_device(art->target, &adev); + if (!result) + acpi_create_platform_device(adev); + else + pr_warn("Failed to get source ACPI device\n"); + } + } + + *artp = arts; + /* don't count bad entries */ + *art_count -= nr_bad_entries; +end: + kfree(buffer.pointer); + return result; +} +EXPORT_SYMBOL(acpi_parse_art); + + +/* get device name from acpi handle */ +static void get_single_name(acpi_handle handle, char *name) +{ + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER}; + + if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer))) + pr_warn("Failed get name from handle\n"); + else { + memcpy(name, buffer.pointer, ACPI_NAME_SIZE); + kfree(buffer.pointer); + } +} + +static int fill_art(char __user *ubuf) +{ + int i; + int ret; + int count; + int art_len; + struct art *arts = NULL; + union art_object *art_user; + + ret = acpi_parse_art(acpi_thermal_rel_handle, &count, &arts, false); + if (ret) + goto free_art; + art_len = count * sizeof(union art_object); + art_user = kzalloc(art_len, GFP_KERNEL); + if (!art_user) { + ret = -ENOMEM; + goto free_art; + } + /* now fill in user art data */ + for (i = 0; i < count; i++) { + /* userspace art needs device name instead of acpi reference */ + get_single_name(arts[i].source, art_user[i].source_device); + get_single_name(arts[i].target, art_user[i].target_device); + /* copy the rest int data in addition to source and target */ + memcpy(&art_user[i].weight, &arts[i].weight, + sizeof(u64) * (ACPI_NR_ART_ELEMENTS - 2)); + } + + if (copy_to_user(ubuf, art_user, art_len)) + ret = -EFAULT; + kfree(art_user); +free_art: + kfree(arts); + return ret; +} + +static int fill_trt(char __user *ubuf) +{ + int i; + int ret; + int count; + int trt_len; + struct trt *trts = NULL; + union trt_object *trt_user; + + ret = acpi_parse_trt(acpi_thermal_rel_handle, &count, &trts, false); + if (ret) + goto free_trt; + trt_len = count * sizeof(union trt_object); + trt_user = kzalloc(trt_len, GFP_KERNEL); + if (!trt_user) { + ret = -ENOMEM; + goto free_trt; + } + /* now fill in user trt data */ + for (i = 0; i < count; i++) { + /* userspace trt needs device name instead of acpi reference */ + get_single_name(trts[i].source, trt_user[i].source_device); + get_single_name(trts[i].target, trt_user[i].target_device); + trt_user[i].sample_period = trts[i].sample_period; + trt_user[i].influence = trts[i].influence; + } + + if (copy_to_user(ubuf, trt_user, trt_len)) + ret = -EFAULT; + kfree(trt_user); +free_trt: + kfree(trts); + return ret; +} + +static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd, + unsigned long __arg) +{ + int ret = 0; + unsigned long length = 0; + unsigned long count = 0; + char __user *arg = (void __user *)__arg; + struct trt *trts; + struct art *arts; + + switch (cmd) { + case ACPI_THERMAL_GET_TRT_COUNT: + ret = acpi_parse_trt(acpi_thermal_rel_handle, (int *)&count, + &trts, false); + kfree(trts); + if (!ret) + return put_user(count, (unsigned long __user *)__arg); + return ret; + case ACPI_THERMAL_GET_TRT_LEN: + ret = acpi_parse_trt(acpi_thermal_rel_handle, (int *)&count, + &trts, false); + kfree(trts); + length = count * sizeof(union trt_object); + if (!ret) + return put_user(length, (unsigned long __user *)__arg); + return ret; + case ACPI_THERMAL_GET_TRT: + return fill_trt(arg); + case ACPI_THERMAL_GET_ART_COUNT: + ret = acpi_parse_art(acpi_thermal_rel_handle, (int *)&count, + &arts, false); + kfree(arts); + if (!ret) + return put_user(count, (unsigned long __user *)__arg); + return ret; + case ACPI_THERMAL_GET_ART_LEN: + ret = acpi_parse_art(acpi_thermal_rel_handle, (int *)&count, + &arts, false); + kfree(arts); + length = count * sizeof(union art_object); + if (!ret) + return put_user(length, (unsigned long __user *)__arg); + return ret; + + case ACPI_THERMAL_GET_ART: + return fill_art(arg); + + default: + return -ENOTTY; + } +} + +static const struct file_operations acpi_thermal_rel_fops = { + .owner = THIS_MODULE, + .open = acpi_thermal_rel_open, + .release = acpi_thermal_rel_release, + .unlocked_ioctl = acpi_thermal_rel_ioctl, + .llseek = no_llseek, +}; + +static struct miscdevice acpi_thermal_rel_misc_device = { + .minor = MISC_DYNAMIC_MINOR, + "acpi_thermal_rel", + &acpi_thermal_rel_fops +}; + +int acpi_thermal_rel_misc_device_add(acpi_handle handle) +{ + acpi_thermal_rel_handle = handle; + + return misc_register(&acpi_thermal_rel_misc_device); +} +EXPORT_SYMBOL(acpi_thermal_rel_misc_device_add); + +int acpi_thermal_rel_misc_device_remove(acpi_handle handle) +{ + misc_deregister(&acpi_thermal_rel_misc_device); + + return 0; +} +EXPORT_SYMBOL(acpi_thermal_rel_misc_device_remove); + +MODULE_AUTHOR("Zhang Rui <rui.zhang@intel.com>"); +MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com"); +MODULE_DESCRIPTION("Intel acpi thermal rel misc dev driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.h b/drivers/thermal/int340x_thermal/acpi_thermal_rel.h new file mode 100644 index 00000000000..f00700bc9d7 --- /dev/null +++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.h @@ -0,0 +1,84 @@ +#ifndef __ACPI_ACPI_THERMAL_H +#define __ACPI_ACPI_THERMAL_H + +#include <asm/ioctl.h> + +#define ACPI_THERMAL_MAGIC 's' + +#define ACPI_THERMAL_GET_TRT_LEN _IOR(ACPI_THERMAL_MAGIC, 1, unsigned long) +#define ACPI_THERMAL_GET_ART_LEN _IOR(ACPI_THERMAL_MAGIC, 2, unsigned long) +#define ACPI_THERMAL_GET_TRT_COUNT _IOR(ACPI_THERMAL_MAGIC, 3, unsigned long) +#define ACPI_THERMAL_GET_ART_COUNT _IOR(ACPI_THERMAL_MAGIC, 4, unsigned long) + +#define ACPI_THERMAL_GET_TRT _IOR(ACPI_THERMAL_MAGIC, 5, unsigned long) +#define ACPI_THERMAL_GET_ART _IOR(ACPI_THERMAL_MAGIC, 6, unsigned long) + +struct art { + acpi_handle source; + acpi_handle target; + u64 weight; + u64 ac0_max; + u64 ac1_max; + u64 ac2_max; + u64 ac3_max; + u64 ac4_max; + u64 ac5_max; + u64 ac6_max; + u64 ac7_max; + u64 ac8_max; + u64 ac9_max; +} __packed; + +struct trt { + acpi_handle source; + acpi_handle target; + u64 influence; + u64 sample_period; + u64 reverved1; + u64 reverved2; + u64 reverved3; + u64 reverved4; +} __packed; + +#define ACPI_NR_ART_ELEMENTS 13 +/* for usrspace */ +union art_object { + struct { + char source_device[8]; /* ACPI single name */ + char target_device[8]; /* ACPI single name */ + u64 weight; + u64 ac0_max_level; + u64 ac1_max_level; + u64 ac2_max_level; + u64 ac3_max_level; + u64 ac4_max_level; + u64 ac5_max_level; + u64 ac6_max_level; + u64 ac7_max_level; + u64 ac8_max_level; + u64 ac9_max_level; + }; + u64 __data[ACPI_NR_ART_ELEMENTS]; +}; + +union trt_object { + struct { + char source_device[8]; /* ACPI single name */ + char target_device[8]; /* ACPI single name */ + u64 influence; + u64 sample_period; + u64 reserved[4]; + }; + u64 __data[8]; +}; + +#ifdef __KERNEL__ +int acpi_thermal_rel_misc_device_add(acpi_handle handle); +int acpi_thermal_rel_misc_device_remove(acpi_handle handle); +int acpi_parse_art(acpi_handle handle, int *art_count, struct art **arts, + bool create_dev); +int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trts, + bool create_dev); +#endif + +#endif /* __ACPI_ACPI_THERMAL_H */ diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c new file mode 100644 index 00000000000..edc1cce117b --- /dev/null +++ b/drivers/thermal/int340x_thermal/int3400_thermal.c @@ -0,0 +1,271 @@ +/* + * INT3400 thermal driver + * + * Copyright (C) 2014, Intel Corporation + * Authors: Zhang Rui <rui.zhang@intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/acpi.h> +#include <linux/thermal.h> +#include "acpi_thermal_rel.h" + +enum int3400_thermal_uuid { + INT3400_THERMAL_PASSIVE_1, + INT3400_THERMAL_PASSIVE_2, + INT3400_THERMAL_ACTIVE, + INT3400_THERMAL_CRITICAL, + INT3400_THERMAL_COOLING_MODE, + INT3400_THERMAL_MAXIMUM_UUID, +}; + +static u8 *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = { + "42A441D6-AE6A-462b-A84B-4A8CE79027D3", + "9E04115A-AE87-4D1C-9500-0F3E340BFE75", + "3A95C389-E4B8-4629-A526-C52C88626BAE", + "97C68AE7-15FA-499c-B8C9-5DA81D606E0A", + "16CAF1B7-DD38-40ed-B1C1-1B8A1913D531", +}; + +struct int3400_thermal_priv { + struct acpi_device *adev; + struct thermal_zone_device *thermal; + int mode; + int art_count; + struct art *arts; + int trt_count; + struct trt *trts; + u8 uuid_bitmap; + int rel_misc_dev_res; +}; + +static int int3400_thermal_get_uuids(struct int3400_thermal_priv *priv) +{ + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *obja, *objb; + int i, j; + int result = 0; + acpi_status status; + + status = acpi_evaluate_object(priv->adev->handle, "IDSP", NULL, &buf); + if (ACPI_FAILURE(status)) + return -ENODEV; + + obja = (union acpi_object *)buf.pointer; + if (obja->type != ACPI_TYPE_PACKAGE) { + result = -EINVAL; + goto end; + } + + for (i = 0; i < obja->package.count; i++) { + objb = &obja->package.elements[i]; + if (objb->type != ACPI_TYPE_BUFFER) { + result = -EINVAL; + goto end; + } + + /* UUID must be 16 bytes */ + if (objb->buffer.length != 16) { + result = -EINVAL; + goto end; + } + + for (j = 0; j < INT3400_THERMAL_MAXIMUM_UUID; j++) { + u8 uuid[16]; + + acpi_str_to_uuid(int3400_thermal_uuids[j], uuid); + if (!strncmp(uuid, objb->buffer.pointer, 16)) { + priv->uuid_bitmap |= (1 << j); + break; + } + } + } + +end: + kfree(buf.pointer); + return result; +} + +static int int3400_thermal_run_osc(acpi_handle handle, + enum int3400_thermal_uuid uuid, bool enable) +{ + u32 ret, buf[2]; + acpi_status status; + int result = 0; + struct acpi_osc_context context = { + .uuid_str = int3400_thermal_uuids[uuid], + .rev = 1, + .cap.length = 8, + }; + + buf[OSC_QUERY_DWORD] = 0; + buf[OSC_SUPPORT_DWORD] = enable; + + context.cap.pointer = buf; + + status = acpi_run_osc(handle, &context); + if (ACPI_SUCCESS(status)) { + ret = *((u32 *)(context.ret.pointer + 4)); + if (ret != enable) + result = -EPERM; + } else + result = -EPERM; + + kfree(context.ret.pointer); + return result; +} + +static int int3400_thermal_get_temp(struct thermal_zone_device *thermal, + unsigned long *temp) +{ + *temp = 20 * 1000; /* faked temp sensor with 20C */ + return 0; +} + +static int int3400_thermal_get_mode(struct thermal_zone_device *thermal, + enum thermal_device_mode *mode) +{ + struct int3400_thermal_priv *priv = thermal->devdata; + + if (!priv) + return -EINVAL; + + *mode = priv->mode; + + return 0; +} + +static int int3400_thermal_set_mode(struct thermal_zone_device *thermal, + enum thermal_device_mode mode) +{ + struct int3400_thermal_priv *priv = thermal->devdata; + bool enable; + int result = 0; + + if (!priv) + return -EINVAL; + + if (mode == THERMAL_DEVICE_ENABLED) + enable = true; + else if (mode == THERMAL_DEVICE_DISABLED) + enable = false; + else + return -EINVAL; + + if (enable != priv->mode) { + priv->mode = enable; + /* currently, only PASSIVE COOLING is supported */ + result = int3400_thermal_run_osc(priv->adev->handle, + INT3400_THERMAL_PASSIVE_1, enable); + } + return result; +} + +static struct thermal_zone_device_ops int3400_thermal_ops = { + .get_temp = int3400_thermal_get_temp, +}; + +static struct thermal_zone_params int3400_thermal_params = { + .governor_name = "user_space", + .no_hwmon = true, +}; + +static int int3400_thermal_probe(struct platform_device *pdev) +{ + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); + struct int3400_thermal_priv *priv; + int result; + + if (!adev) + return -ENODEV; + + priv = kzalloc(sizeof(struct int3400_thermal_priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->adev = adev; + + result = int3400_thermal_get_uuids(priv); + if (result) + goto free_priv; + + result = acpi_parse_art(priv->adev->handle, &priv->art_count, + &priv->arts, true); + if (result) + goto free_priv; + + + result = acpi_parse_trt(priv->adev->handle, &priv->trt_count, + &priv->trts, true); + if (result) + goto free_art; + + platform_set_drvdata(pdev, priv); + + if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) { + int3400_thermal_ops.get_mode = int3400_thermal_get_mode; + int3400_thermal_ops.set_mode = int3400_thermal_set_mode; + } + priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0, + priv, &int3400_thermal_ops, + &int3400_thermal_params, 0, 0); + if (IS_ERR(priv->thermal)) { + result = PTR_ERR(priv->thermal); + goto free_trt; + } + + priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add( + priv->adev->handle); + + return 0; +free_trt: + kfree(priv->trts); +free_art: + kfree(priv->arts); +free_priv: + kfree(priv); + return result; +} + +static int int3400_thermal_remove(struct platform_device *pdev) +{ + struct int3400_thermal_priv *priv = platform_get_drvdata(pdev); + + if (!priv->rel_misc_dev_res) + acpi_thermal_rel_misc_device_remove(priv->adev->handle); + + thermal_zone_device_unregister(priv->thermal); + kfree(priv->trts); + kfree(priv->arts); + kfree(priv); + return 0; +} + +static const struct acpi_device_id int3400_thermal_match[] = { + {"INT3400", 0}, + {} +}; + +MODULE_DEVICE_TABLE(acpi, int3400_thermal_match); + +static struct platform_driver int3400_thermal_driver = { + .probe = int3400_thermal_probe, + .remove = int3400_thermal_remove, + .driver = { + .name = "int3400 thermal", + .owner = THIS_MODULE, + .acpi_match_table = ACPI_PTR(int3400_thermal_match), + }, +}; + +module_platform_driver(int3400_thermal_driver); + +MODULE_DESCRIPTION("INT3400 Thermal driver"); +MODULE_AUTHOR("Zhang Rui <rui.zhang@intel.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/thermal/int340x_thermal/int3402_thermal.c b/drivers/thermal/int340x_thermal/int3402_thermal.c new file mode 100644 index 00000000000..a5d08c14ba2 --- /dev/null +++ b/drivers/thermal/int340x_thermal/int3402_thermal.c @@ -0,0 +1,242 @@ +/* + * INT3402 thermal driver for memory temperature reporting + * + * Copyright (C) 2014, Intel Corporation + * Authors: Aaron Lu <aaron.lu@intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/acpi.h> +#include <linux/thermal.h> + +#define ACPI_ACTIVE_COOLING_MAX_NR 10 + +struct active_trip { + unsigned long temp; + int id; + bool valid; +}; + +struct int3402_thermal_data { + unsigned long *aux_trips; + int aux_trip_nr; + unsigned long psv_temp; + int psv_trip_id; + unsigned long crt_temp; + int crt_trip_id; + unsigned long hot_temp; + int hot_trip_id; + struct active_trip act_trips[ACPI_ACTIVE_COOLING_MAX_NR]; + acpi_handle *handle; +}; + +static int int3402_thermal_get_zone_temp(struct thermal_zone_device *zone, + unsigned long *temp) +{ + struct int3402_thermal_data *d = zone->devdata; + unsigned long long tmp; + acpi_status status; + + status = acpi_evaluate_integer(d->handle, "_TMP", NULL, &tmp); + if (ACPI_FAILURE(status)) + return -ENODEV; + + /* _TMP returns the temperature in tenths of degrees Kelvin */ + *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp); + + return 0; +} + +static int int3402_thermal_get_trip_temp(struct thermal_zone_device *zone, + int trip, unsigned long *temp) +{ + struct int3402_thermal_data *d = zone->devdata; + int i; + + if (trip < d->aux_trip_nr) + *temp = d->aux_trips[trip]; + else if (trip == d->crt_trip_id) + *temp = d->crt_temp; + else if (trip == d->psv_trip_id) + *temp = d->psv_temp; + else if (trip == d->hot_trip_id) + *temp = d->hot_temp; + else { + for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) { + if (d->act_trips[i].valid && + d->act_trips[i].id == trip) { + *temp = d->act_trips[i].temp; + break; + } + } + if (i == ACPI_ACTIVE_COOLING_MAX_NR) + return -EINVAL; + } + return 0; +} + +static int int3402_thermal_get_trip_type(struct thermal_zone_device *zone, + int trip, enum thermal_trip_type *type) +{ + struct int3402_thermal_data *d = zone->devdata; + int i; + + if (trip < d->aux_trip_nr) + *type = THERMAL_TRIP_PASSIVE; + else if (trip == d->crt_trip_id) + *type = THERMAL_TRIP_CRITICAL; + else if (trip == d->hot_trip_id) + *type = THERMAL_TRIP_HOT; + else if (trip == d->psv_trip_id) + *type = THERMAL_TRIP_PASSIVE; + else { + for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) { + if (d->act_trips[i].valid && + d->act_trips[i].id == trip) { + *type = THERMAL_TRIP_ACTIVE; + break; + } + } + if (i == ACPI_ACTIVE_COOLING_MAX_NR) + return -EINVAL; + } + return 0; +} + +static int int3402_thermal_set_trip_temp(struct thermal_zone_device *zone, int trip, + unsigned long temp) +{ + struct int3402_thermal_data *d = zone->devdata; + acpi_status status; + char name[10]; + + snprintf(name, sizeof(name), "PAT%d", trip); + status = acpi_execute_simple_method(d->handle, name, + MILLICELSIUS_TO_DECI_KELVIN(temp)); + if (ACPI_FAILURE(status)) + return -EIO; + + d->aux_trips[trip] = temp; + return 0; +} + +static struct thermal_zone_device_ops int3402_thermal_zone_ops = { + .get_temp = int3402_thermal_get_zone_temp, + .get_trip_temp = int3402_thermal_get_trip_temp, + .get_trip_type = int3402_thermal_get_trip_type, + .set_trip_temp = int3402_thermal_set_trip_temp, +}; + +static struct thermal_zone_params int3402_thermal_params = { + .governor_name = "user_space", + .no_hwmon = true, +}; + +static int int3402_thermal_get_temp(acpi_handle handle, char *name, + unsigned long *temp) +{ + unsigned long long r; + acpi_status status; + + status = acpi_evaluate_integer(handle, name, NULL, &r); + if (ACPI_FAILURE(status)) + return -EIO; + + *temp = DECI_KELVIN_TO_MILLICELSIUS(r); + return 0; +} + +static int int3402_thermal_probe(struct platform_device *pdev) +{ + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); + struct int3402_thermal_data *d; + struct thermal_zone_device *zone; + acpi_status status; + unsigned long long trip_cnt; + int trip_mask = 0, i; + + if (!acpi_has_method(adev->handle, "_TMP")) + return -ENODEV; + + d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL); + if (!d) + return -ENOMEM; + + status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt); + if (ACPI_FAILURE(status)) + trip_cnt = 0; + else { + d->aux_trips = devm_kzalloc(&pdev->dev, + sizeof(*d->aux_trips) * trip_cnt, GFP_KERNEL); + if (!d->aux_trips) + return -ENOMEM; + trip_mask = trip_cnt - 1; + d->handle = adev->handle; + d->aux_trip_nr = trip_cnt; + } + + d->crt_trip_id = -1; + if (!int3402_thermal_get_temp(adev->handle, "_CRT", &d->crt_temp)) + d->crt_trip_id = trip_cnt++; + d->hot_trip_id = -1; + if (!int3402_thermal_get_temp(adev->handle, "_HOT", &d->hot_temp)) + d->hot_trip_id = trip_cnt++; + d->psv_trip_id = -1; + if (!int3402_thermal_get_temp(adev->handle, "_PSV", &d->psv_temp)) + d->psv_trip_id = trip_cnt++; + for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) { + char name[5] = { '_', 'A', 'C', '0' + i, '\0' }; + if (int3402_thermal_get_temp(adev->handle, name, + &d->act_trips[i].temp)) + break; + d->act_trips[i].id = trip_cnt++; + d->act_trips[i].valid = true; + } + + zone = thermal_zone_device_register(acpi_device_bid(adev), trip_cnt, + trip_mask, d, + &int3402_thermal_zone_ops, + &int3402_thermal_params, + 0, 0); + if (IS_ERR(zone)) + return PTR_ERR(zone); + platform_set_drvdata(pdev, zone); + + return 0; +} + +static int int3402_thermal_remove(struct platform_device *pdev) +{ + struct thermal_zone_device *zone = platform_get_drvdata(pdev); + + thermal_zone_device_unregister(zone); + return 0; +} + +static const struct acpi_device_id int3402_thermal_match[] = { + {"INT3402", 0}, + {} +}; + +MODULE_DEVICE_TABLE(acpi, int3402_thermal_match); + +static struct platform_driver int3402_thermal_driver = { + .probe = int3402_thermal_probe, + .remove = int3402_thermal_remove, + .driver = { + .name = "int3402 thermal", + .owner = THIS_MODULE, + .acpi_match_table = int3402_thermal_match, + }, +}; + +module_platform_driver(int3402_thermal_driver); + +MODULE_DESCRIPTION("INT3402 Thermal driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c new file mode 100644 index 00000000000..6e9fb62eb81 --- /dev/null +++ b/drivers/thermal/int340x_thermal/int3403_thermal.c @@ -0,0 +1,483 @@ +/* + * ACPI INT3403 thermal driver + * Copyright (c) 2013, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/acpi.h> +#include <linux/thermal.h> +#include <linux/platform_device.h> + +#define INT3403_TYPE_SENSOR 0x03 +#define INT3403_TYPE_CHARGER 0x0B +#define INT3403_TYPE_BATTERY 0x0C +#define INT3403_PERF_CHANGED_EVENT 0x80 +#define INT3403_THERMAL_EVENT 0x90 + +#define DECI_KELVIN_TO_MILLI_CELSIUS(t, off) (((t) - (off)) * 100) +#define KELVIN_OFFSET 2732 +#define MILLI_CELSIUS_TO_DECI_KELVIN(t, off) (((t) / 100) + (off)) + +struct int3403_sensor { + struct thermal_zone_device *tzone; + unsigned long *thresholds; + unsigned long crit_temp; + int crit_trip_id; + unsigned long psv_temp; + int psv_trip_id; + +}; + +struct int3403_performance_state { + u64 performance; + u64 power; + u64 latency; + u64 linear; + u64 control; + u64 raw_performace; + char *raw_unit; + int reserved; +}; + +struct int3403_cdev { + struct thermal_cooling_device *cdev; + unsigned long max_state; +}; + +struct int3403_priv { + struct platform_device *pdev; + struct acpi_device *adev; + unsigned long long type; + void *priv; +}; + +static int sys_get_curr_temp(struct thermal_zone_device *tzone, + unsigned long *temp) +{ + struct int3403_priv *priv = tzone->devdata; + struct acpi_device *device = priv->adev; + unsigned long long tmp; + acpi_status status; + + status = acpi_evaluate_integer(device->handle, "_TMP", NULL, &tmp); + if (ACPI_FAILURE(status)) + return -EIO; + + *temp = DECI_KELVIN_TO_MILLI_CELSIUS(tmp, KELVIN_OFFSET); + + return 0; +} + +static int sys_get_trip_hyst(struct thermal_zone_device *tzone, + int trip, unsigned long *temp) +{ + struct int3403_priv *priv = tzone->devdata; + struct acpi_device *device = priv->adev; + unsigned long long hyst; + acpi_status status; + + status = acpi_evaluate_integer(device->handle, "GTSH", NULL, &hyst); + if (ACPI_FAILURE(status)) + return -EIO; + + /* + * Thermal hysteresis represents a temperature difference. + * Kelvin and Celsius have same degree size. So the + * conversion here between tenths of degree Kelvin unit + * and Milli-Celsius unit is just to multiply 100. + */ + *temp = hyst * 100; + + return 0; +} + +static int sys_get_trip_temp(struct thermal_zone_device *tzone, + int trip, unsigned long *temp) +{ + struct int3403_priv *priv = tzone->devdata; + struct int3403_sensor *obj = priv->priv; + + if (priv->type != INT3403_TYPE_SENSOR || !obj) + return -EINVAL; + + if (trip == obj->crit_trip_id) + *temp = obj->crit_temp; + else if (trip == obj->psv_trip_id) + *temp = obj->psv_temp; + else { + /* + * get_trip_temp is a mandatory callback but + * PATx method doesn't return any value, so return + * cached value, which was last set from user space + */ + *temp = obj->thresholds[trip]; + } + + return 0; +} + +static int sys_get_trip_type(struct thermal_zone_device *thermal, + int trip, enum thermal_trip_type *type) +{ + struct int3403_priv *priv = thermal->devdata; + struct int3403_sensor *obj = priv->priv; + + /* Mandatory callback, may not mean much here */ + if (trip == obj->crit_trip_id) + *type = THERMAL_TRIP_CRITICAL; + else + *type = THERMAL_TRIP_PASSIVE; + + return 0; +} + +int sys_set_trip_temp(struct thermal_zone_device *tzone, int trip, + unsigned long temp) +{ + struct int3403_priv *priv = tzone->devdata; + struct acpi_device *device = priv->adev; + struct int3403_sensor *obj = priv->priv; + acpi_status status; + char name[10]; + int ret = 0; + + snprintf(name, sizeof(name), "PAT%d", trip); + if (acpi_has_method(device->handle, name)) { + status = acpi_execute_simple_method(device->handle, name, + MILLI_CELSIUS_TO_DECI_KELVIN(temp, + KELVIN_OFFSET)); + if (ACPI_FAILURE(status)) + ret = -EIO; + else + obj->thresholds[trip] = temp; + } else { + ret = -EIO; + dev_err(&device->dev, "sys_set_trip_temp: method not found\n"); + } + + return ret; +} + +static struct thermal_zone_device_ops tzone_ops = { + .get_temp = sys_get_curr_temp, + .get_trip_temp = sys_get_trip_temp, + .get_trip_type = sys_get_trip_type, + .set_trip_temp = sys_set_trip_temp, + .get_trip_hyst = sys_get_trip_hyst, +}; + +static struct thermal_zone_params int3403_thermal_params = { + .governor_name = "user_space", + .no_hwmon = true, +}; + +static void int3403_notify(acpi_handle handle, + u32 event, void *data) +{ + struct int3403_priv *priv = data; + struct int3403_sensor *obj; + + if (!priv) + return; + + obj = priv->priv; + if (priv->type != INT3403_TYPE_SENSOR || !obj) + return; + + switch (event) { + case INT3403_PERF_CHANGED_EVENT: + break; + case INT3403_THERMAL_EVENT: + thermal_zone_device_update(obj->tzone); + break; + default: + dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); + break; + } +} + +static int sys_get_trip_crt(struct acpi_device *device, unsigned long *temp) +{ + unsigned long long crt; + acpi_status status; + + status = acpi_evaluate_integer(device->handle, "_CRT", NULL, &crt); + if (ACPI_FAILURE(status)) + return -EIO; + + *temp = DECI_KELVIN_TO_MILLI_CELSIUS(crt, KELVIN_OFFSET); + + return 0; +} + +static int sys_get_trip_psv(struct acpi_device *device, unsigned long *temp) +{ + unsigned long long psv; + acpi_status status; + + status = acpi_evaluate_integer(device->handle, "_PSV", NULL, &psv); + if (ACPI_FAILURE(status)) + return -EIO; + + *temp = DECI_KELVIN_TO_MILLI_CELSIUS(psv, KELVIN_OFFSET); + + return 0; +} + +static int int3403_sensor_add(struct int3403_priv *priv) +{ + int result = 0; + acpi_status status; + struct int3403_sensor *obj; + unsigned long long trip_cnt; + int trip_mask = 0; + + obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL); + if (!obj) + return -ENOMEM; + + priv->priv = obj; + + status = acpi_evaluate_integer(priv->adev->handle, "PATC", NULL, + &trip_cnt); + if (ACPI_FAILURE(status)) + trip_cnt = 0; + + if (trip_cnt) { + /* We have to cache, thresholds can't be readback */ + obj->thresholds = devm_kzalloc(&priv->pdev->dev, + sizeof(*obj->thresholds) * trip_cnt, + GFP_KERNEL); + if (!obj->thresholds) { + result = -ENOMEM; + goto err_free_obj; + } + trip_mask = BIT(trip_cnt) - 1; + } + + obj->psv_trip_id = -1; + if (!sys_get_trip_psv(priv->adev, &obj->psv_temp)) + obj->psv_trip_id = trip_cnt++; + + obj->crit_trip_id = -1; + if (!sys_get_trip_crt(priv->adev, &obj->crit_temp)) + obj->crit_trip_id = trip_cnt++; + + obj->tzone = thermal_zone_device_register(acpi_device_bid(priv->adev), + trip_cnt, trip_mask, priv, &tzone_ops, + &int3403_thermal_params, 0, 0); + if (IS_ERR(obj->tzone)) { + result = PTR_ERR(obj->tzone); + obj->tzone = NULL; + goto err_free_obj; + } + + result = acpi_install_notify_handler(priv->adev->handle, + ACPI_DEVICE_NOTIFY, int3403_notify, + (void *)priv); + if (result) + goto err_free_obj; + + return 0; + + err_free_obj: + if (obj->tzone) + thermal_zone_device_unregister(obj->tzone); + return result; +} + +static int int3403_sensor_remove(struct int3403_priv *priv) +{ + struct int3403_sensor *obj = priv->priv; + + thermal_zone_device_unregister(obj->tzone); + return 0; +} + +/* INT3403 Cooling devices */ +static int int3403_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct int3403_priv *priv = cdev->devdata; + struct int3403_cdev *obj = priv->priv; + + *state = obj->max_state; + return 0; +} + +static int int3403_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct int3403_priv *priv = cdev->devdata; + unsigned long long level; + acpi_status status; + + status = acpi_evaluate_integer(priv->adev->handle, "PPPC", NULL, &level); + if (ACPI_SUCCESS(status)) { + *state = level; + return 0; + } else + return -EINVAL; +} + +static int +int3403_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) +{ + struct int3403_priv *priv = cdev->devdata; + acpi_status status; + + status = acpi_execute_simple_method(priv->adev->handle, "SPPC", state); + if (ACPI_SUCCESS(status)) + return 0; + else + return -EINVAL; +} + +static const struct thermal_cooling_device_ops int3403_cooling_ops = { + .get_max_state = int3403_get_max_state, + .get_cur_state = int3403_get_cur_state, + .set_cur_state = int3403_set_cur_state, +}; + +static int int3403_cdev_add(struct int3403_priv *priv) +{ + int result = 0; + acpi_status status; + struct int3403_cdev *obj; + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *p; + + obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL); + if (!obj) + return -ENOMEM; + + status = acpi_evaluate_object(priv->adev->handle, "PPSS", NULL, &buf); + if (ACPI_FAILURE(status)) + return -ENODEV; + + p = buf.pointer; + if (!p || (p->type != ACPI_TYPE_PACKAGE)) { + printk(KERN_WARNING "Invalid PPSS data\n"); + return -EFAULT; + } + + obj->max_state = p->package.count - 1; + obj->cdev = + thermal_cooling_device_register(acpi_device_bid(priv->adev), + priv, &int3403_cooling_ops); + if (IS_ERR(obj->cdev)) + result = PTR_ERR(obj->cdev); + + priv->priv = obj; + + /* TODO: add ACPI notification support */ + + return result; +} + +static int int3403_cdev_remove(struct int3403_priv *priv) +{ + struct int3403_cdev *obj = priv->priv; + + thermal_cooling_device_unregister(obj->cdev); + return 0; +} + +static int int3403_add(struct platform_device *pdev) +{ + struct int3403_priv *priv; + int result = 0; + acpi_status status; + + priv = devm_kzalloc(&pdev->dev, sizeof(struct int3403_priv), + GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->pdev = pdev; + priv->adev = ACPI_COMPANION(&(pdev->dev)); + if (!priv->adev) { + result = -EINVAL; + goto err; + } + + status = acpi_evaluate_integer(priv->adev->handle, "PTYP", + NULL, &priv->type); + if (ACPI_FAILURE(status)) { + result = -EINVAL; + goto err; + } + + platform_set_drvdata(pdev, priv); + switch (priv->type) { + case INT3403_TYPE_SENSOR: + result = int3403_sensor_add(priv); + break; + case INT3403_TYPE_CHARGER: + case INT3403_TYPE_BATTERY: + result = int3403_cdev_add(priv); + break; + default: + result = -EINVAL; + } + + if (result) + goto err; + return result; + +err: + return result; +} + +static int int3403_remove(struct platform_device *pdev) +{ + struct int3403_priv *priv = platform_get_drvdata(pdev); + + switch (priv->type) { + case INT3403_TYPE_SENSOR: + int3403_sensor_remove(priv); + break; + case INT3403_TYPE_CHARGER: + case INT3403_TYPE_BATTERY: + int3403_cdev_remove(priv); + break; + default: + break; + } + + return 0; +} + +static const struct acpi_device_id int3403_device_ids[] = { + {"INT3403", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, int3403_device_ids); + +static struct platform_driver int3403_driver = { + .probe = int3403_add, + .remove = int3403_remove, + .driver = { + .name = "int3403 thermal", + .owner = THIS_MODULE, + .acpi_match_table = int3403_device_ids, + }, +}; + +module_platform_driver(int3403_driver); + +MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ACPI INT3403 thermal driver"); diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 4b2b999b761..62143ba3100 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -387,20 +387,27 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, int (*get_trend)(void *, long *)) { struct device_node *np, *child, *sensor_np; + struct thermal_zone_device *tzd = ERR_PTR(-ENODEV); np = of_find_node_by_name(NULL, "thermal-zones"); if (!np) return ERR_PTR(-ENODEV); - if (!dev || !dev->of_node) + if (!dev || !dev->of_node) { + of_node_put(np); return ERR_PTR(-EINVAL); + } - sensor_np = dev->of_node; + sensor_np = of_node_get(dev->of_node); for_each_child_of_node(np, child) { struct of_phandle_args sensor_specs; int ret, id; + /* Check whether child is enabled or not */ + if (!of_device_is_available(child)) + continue; + /* For now, thermal framework supports only 1 sensor per zone */ ret = of_parse_phandle_with_args(child, "thermal-sensors", "#thermal-sensor-cells", @@ -418,16 +425,21 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, } if (sensor_specs.np == sensor_np && id == sensor_id) { - of_node_put(np); - return thermal_zone_of_add_sensor(child, sensor_np, - data, - get_temp, - get_trend); + tzd = thermal_zone_of_add_sensor(child, sensor_np, + data, + get_temp, + get_trend); + of_node_put(sensor_specs.np); + of_node_put(child); + goto exit; } + of_node_put(sensor_specs.np); } +exit: + of_node_put(sensor_np); of_node_put(np); - return ERR_PTR(-ENODEV); + return tzd; } EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_register); @@ -619,6 +631,7 @@ static int thermal_of_populate_trip(struct device_node *np, /* Required for cooling map matching */ trip->np = np; + of_node_get(np); return 0; } @@ -726,9 +739,14 @@ finish: return tz; free_tbps: + for (i = 0; i < tz->num_tbps; i++) + of_node_put(tz->tbps[i].cooling_device); kfree(tz->tbps); free_trips: + for (i = 0; i < tz->ntrips; i++) + of_node_put(tz->trips[i].np); kfree(tz->trips); + of_node_put(gchild); free_tz: kfree(tz); of_node_put(child); @@ -738,7 +756,13 @@ free_tz: static inline void of_thermal_free_zone(struct __thermal_zone *tz) { + int i; + + for (i = 0; i < tz->num_tbps; i++) + of_node_put(tz->tbps[i].cooling_device); kfree(tz->tbps); + for (i = 0; i < tz->ntrips; i++) + of_node_put(tz->trips[i].np); kfree(tz->trips); kfree(tz); } @@ -771,6 +795,10 @@ int __init of_parse_thermal_zones(void) struct thermal_zone_device *zone; struct thermal_zone_params *tzp; + /* Check whether child is enabled or not */ + if (!of_device_is_available(child)) + continue; + tz = thermal_of_build_thermal_zone(child); if (IS_ERR(tz)) { pr_err("failed to build thermal zone %s: %ld\n", @@ -806,10 +834,13 @@ int __init of_parse_thermal_zones(void) /* attempting to build remaining zones still */ } } + of_node_put(np); return 0; exit_free: + of_node_put(child); + of_node_put(np); of_thermal_free_zone(tz); /* no memory available, so free what we have built */ @@ -838,6 +869,10 @@ void of_thermal_destroy_zones(void) for_each_child_of_node(np, child) { struct thermal_zone_device *zone; + /* Check whether child is enabled or not */ + if (!of_device_is_available(child)) + continue; + zone = thermal_zone_get_zone_by_name(child->name); if (IS_ERR(zone)) continue; @@ -847,4 +882,5 @@ void of_thermal_destroy_zones(void) kfree(zone->ops); of_thermal_free_zone(zone->devdata); } + of_node_put(np); } diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c index 3f5ad25ddca..b6be572704a 100644 --- a/drivers/thermal/samsung/exynos_thermal_common.c +++ b/drivers/thermal/samsung/exynos_thermal_common.c @@ -417,13 +417,10 @@ void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf) th_zone = sensor_conf->pzone_data; - if (th_zone->therm_dev) - thermal_zone_device_unregister(th_zone->therm_dev); + thermal_zone_device_unregister(th_zone->therm_dev); - for (i = 0; i < th_zone->cool_dev_size; i++) { - if (th_zone->cool_dev[i]) - cpufreq_cooling_unregister(th_zone->cool_dev[i]); - } + for (i = 0; i < th_zone->cool_dev_size; ++i) + cpufreq_cooling_unregister(th_zone->cool_dev[i]); dev_info(sensor_conf->dev, "Exynos: Kernel Thermal management unregistered\n"); diff --git a/drivers/thermal/samsung/exynos_thermal_common.h b/drivers/thermal/samsung/exynos_thermal_common.h index 3eb2ed9ea3a..158f5aa8dc5 100644 --- a/drivers/thermal/samsung/exynos_thermal_common.h +++ b/drivers/thermal/samsung/exynos_thermal_common.h @@ -27,7 +27,7 @@ #define SENSOR_NAME_LEN 16 #define MAX_TRIP_COUNT 8 #define MAX_COOLING_DEVICE 4 -#define MAX_THRESHOLD_LEVS 5 +#define MAX_TRIMINFO_CTRL_REG 2 #define ACTIVE_INTERVAL 500 #define IDLE_INTERVAL 10000 diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index acbff14da3a..49c09243fd3 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -77,16 +77,6 @@ static int temp_to_code(struct exynos_tmu_data *data, u8 temp) struct exynos_tmu_platform_data *pdata = data->pdata; int temp_code; - if (pdata->cal_mode == HW_MODE) - return temp; - - if (data->soc == SOC_ARCH_EXYNOS4210) - /* temp should range between 25 and 125 */ - if (temp < 25 || temp > 125) { - temp_code = -EINVAL; - goto out; - } - switch (pdata->cal_type) { case TYPE_TWO_POINT_TRIMMING: temp_code = (temp - pdata->first_point_trim) * @@ -101,7 +91,7 @@ static int temp_to_code(struct exynos_tmu_data *data, u8 temp) temp_code = temp + pdata->default_temp_offset; break; } -out: + return temp_code; } @@ -114,16 +104,6 @@ static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code) struct exynos_tmu_platform_data *pdata = data->pdata; int temp; - if (pdata->cal_mode == HW_MODE) - return temp_code; - - if (data->soc == SOC_ARCH_EXYNOS4210) - /* temp_code should range between 75 and 175 */ - if (temp_code < 75 || temp_code > 175) { - temp = -ENODATA; - goto out; - } - switch (pdata->cal_type) { case TYPE_TWO_POINT_TRIMMING: temp = (temp_code - data->temp_error1) * @@ -138,18 +118,35 @@ static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code) temp = temp_code - pdata->default_temp_offset; break; } -out: + return temp; } +static void exynos_tmu_clear_irqs(struct exynos_tmu_data *data) +{ + const struct exynos_tmu_registers *reg = data->pdata->registers; + unsigned int val_irq; + + val_irq = readl(data->base + reg->tmu_intstat); + /* + * Clear the interrupts. Please note that the documentation for + * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly + * states that INTCLEAR register has a different placing of bits + * responsible for FALL IRQs than INTSTAT register. Exynos5420 + * and Exynos5440 documentation is correct (Exynos4210 doesn't + * support FALL IRQs at all). + */ + writel(val_irq, data->base + reg->tmu_intclear); +} + static int exynos_tmu_initialize(struct platform_device *pdev) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); struct exynos_tmu_platform_data *pdata = data->pdata; const struct exynos_tmu_registers *reg = pdata->registers; - unsigned int status, trim_info = 0, con; + unsigned int status, trim_info = 0, con, ctrl; unsigned int rising_threshold = 0, falling_threshold = 0; - int ret = 0, threshold_code, i, trigger_levs = 0; + int ret = 0, threshold_code, i; mutex_lock(&data->lock); clk_enable(data->clk); @@ -164,11 +161,17 @@ static int exynos_tmu_initialize(struct platform_device *pdev) } } - if (TMU_SUPPORTS(pdata, TRIM_RELOAD)) - __raw_writel(1, data->base + reg->triminfo_ctrl); - - if (pdata->cal_mode == HW_MODE) - goto skip_calib_data; + if (TMU_SUPPORTS(pdata, TRIM_RELOAD)) { + for (i = 0; i < reg->triminfo_ctrl_count; i++) { + if (pdata->triminfo_reload[i]) { + ctrl = readl(data->base + + reg->triminfo_ctrl[i]); + ctrl |= pdata->triminfo_reload[i]; + writel(ctrl, data->base + + reg->triminfo_ctrl[i]); + } + } + } /* Save trimming info in order to perform calibration */ if (data->soc == SOC_ARCH_EXYNOS5440) { @@ -197,7 +200,7 @@ static int exynos_tmu_initialize(struct platform_device *pdev) trim_info = readl(data->base + reg->triminfo_data); } data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK; - data->temp_error2 = ((trim_info >> reg->triminfo_85_shift) & + data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) & EXYNOS_TMU_TEMP_MASK); if (!data->temp_error1 || @@ -207,67 +210,33 @@ static int exynos_tmu_initialize(struct platform_device *pdev) if (!data->temp_error2) data->temp_error2 = - (pdata->efuse_value >> reg->triminfo_85_shift) & + (pdata->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) & EXYNOS_TMU_TEMP_MASK; -skip_calib_data: - if (pdata->max_trigger_level > MAX_THRESHOLD_LEVS) { - dev_err(&pdev->dev, "Invalid max trigger level\n"); - ret = -EINVAL; - goto out; - } - - for (i = 0; i < pdata->max_trigger_level; i++) { - if (!pdata->trigger_levels[i]) - continue; - - if ((pdata->trigger_type[i] == HW_TRIP) && - (!pdata->trigger_levels[pdata->max_trigger_level - 1])) { - dev_err(&pdev->dev, "Invalid hw trigger level\n"); - ret = -EINVAL; - goto out; - } - - /* Count trigger levels except the HW trip*/ - if (!(pdata->trigger_type[i] == HW_TRIP)) - trigger_levs++; - } - rising_threshold = readl(data->base + reg->threshold_th0); if (data->soc == SOC_ARCH_EXYNOS4210) { /* Write temperature code for threshold */ threshold_code = temp_to_code(data, pdata->threshold); - if (threshold_code < 0) { - ret = threshold_code; - goto out; - } writeb(threshold_code, data->base + reg->threshold_temp); - for (i = 0; i < trigger_levs; i++) + for (i = 0; i < pdata->non_hw_trigger_levels; i++) writeb(pdata->trigger_levels[i], data->base + reg->threshold_th0 + i * sizeof(reg->threshold_th0)); - writel(reg->intclr_rise_mask, data->base + reg->tmu_intclear); + exynos_tmu_clear_irqs(data); } else { /* Write temperature code for rising and falling threshold */ - for (i = 0; - i < trigger_levs && i < EXYNOS_MAX_TRIGGER_PER_REG; i++) { + for (i = 0; i < pdata->non_hw_trigger_levels; i++) { threshold_code = temp_to_code(data, pdata->trigger_levels[i]); - if (threshold_code < 0) { - ret = threshold_code; - goto out; - } rising_threshold &= ~(0xff << 8 * i); rising_threshold |= threshold_code << 8 * i; if (pdata->threshold_falling) { threshold_code = temp_to_code(data, pdata->trigger_levels[i] - pdata->threshold_falling); - if (threshold_code > 0) - falling_threshold |= - threshold_code << 8 * i; + falling_threshold |= threshold_code << 8 * i; } } @@ -276,9 +245,7 @@ skip_calib_data: writel(falling_threshold, data->base + reg->threshold_th1); - writel((reg->intclr_rise_mask << reg->intclr_rise_shift) | - (reg->intclr_fall_mask << reg->intclr_fall_shift), - data->base + reg->tmu_intclear); + exynos_tmu_clear_irqs(data); /* if last threshold limit is also present */ i = pdata->max_trigger_level - 1; @@ -286,10 +253,6 @@ skip_calib_data: (pdata->trigger_type[i] == HW_TRIP)) { threshold_code = temp_to_code(data, pdata->trigger_levels[i]); - if (threshold_code < 0) { - ret = threshold_code; - goto out; - } if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) { /* 1-4 level to be assigned in th0 reg */ rising_threshold &= ~(0xff << 8 * i); @@ -325,7 +288,7 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on) struct exynos_tmu_data *data = platform_get_drvdata(pdev); struct exynos_tmu_platform_data *pdata = data->pdata; const struct exynos_tmu_registers *reg = pdata->registers; - unsigned int con, interrupt_en, cal_val; + unsigned int con, interrupt_en; mutex_lock(&data->lock); clk_enable(data->clk); @@ -335,15 +298,11 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on) if (pdata->test_mux) con |= (pdata->test_mux << reg->test_mux_addr_shift); - if (pdata->reference_voltage) { - con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift); - con |= pdata->reference_voltage << reg->buf_vref_sel_shift; - } + con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT); + con |= pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT; - if (pdata->gain) { - con &= ~(reg->buf_slope_sel_mask << reg->buf_slope_sel_shift); - con |= (pdata->gain << reg->buf_slope_sel_shift); - } + con &= ~(EXYNOS_TMU_BUF_SLOPE_SEL_MASK << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT); + con |= (pdata->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT); if (pdata->noise_cancel_mode) { con &= ~(reg->therm_trip_mode_mask << @@ -351,29 +310,8 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on) con |= (pdata->noise_cancel_mode << reg->therm_trip_mode_shift); } - if (pdata->cal_mode == HW_MODE) { - con &= ~(reg->calib_mode_mask << reg->calib_mode_shift); - cal_val = 0; - switch (pdata->cal_type) { - case TYPE_TWO_POINT_TRIMMING: - cal_val = 3; - break; - case TYPE_ONE_POINT_TRIMMING_85: - cal_val = 2; - break; - case TYPE_ONE_POINT_TRIMMING_25: - cal_val = 1; - break; - case TYPE_NONE: - break; - default: - dev_err(&pdev->dev, "Invalid calibration type, using none\n"); - } - con |= cal_val << reg->calib_mode_shift; - } - if (on) { - con |= (1 << reg->core_en_shift); + con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); interrupt_en = pdata->trigger_enable[3] << reg->inten_rise3_shift | pdata->trigger_enable[2] << reg->inten_rise2_shift | @@ -383,7 +321,7 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on) interrupt_en |= interrupt_en << reg->inten_fall0_shift; } else { - con &= ~(1 << reg->core_en_shift); + con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); interrupt_en = 0; /* Disable all interrupts */ } writel(interrupt_en, data->base + reg->tmu_inten); @@ -404,8 +342,16 @@ static int exynos_tmu_read(struct exynos_tmu_data *data) clk_enable(data->clk); temp_code = readb(data->base + reg->tmu_cur_temp); - temp = code_to_temp(data, temp_code); + if (data->soc == SOC_ARCH_EXYNOS4210) + /* temp_code should range between 75 and 175 */ + if (temp_code < 75 || temp_code > 175) { + temp = -ENODATA; + goto out; + } + + temp = code_to_temp(data, temp_code); +out: clk_disable(data->clk); mutex_unlock(&data->lock); @@ -465,7 +411,7 @@ static void exynos_tmu_work(struct work_struct *work) struct exynos_tmu_data, irq_work); struct exynos_tmu_platform_data *pdata = data->pdata; const struct exynos_tmu_registers *reg = pdata->registers; - unsigned int val_irq, val_type; + unsigned int val_type; if (!IS_ERR(data->clk_sec)) clk_enable(data->clk_sec); @@ -483,9 +429,7 @@ static void exynos_tmu_work(struct work_struct *work) clk_enable(data->clk); /* TODO: take action based on particular interrupt */ - val_irq = readl(data->base + reg->tmu_intstat); - /* clear the interrupts */ - writel(val_irq, data->base + reg->tmu_intclear); + exynos_tmu_clear_irqs(data); clk_disable(data->clk); mutex_unlock(&data->lock); diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h index 1b4a6444ea6..c58c7663a3f 100644 --- a/drivers/thermal/samsung/exynos_tmu.h +++ b/drivers/thermal/samsung/exynos_tmu.h @@ -34,11 +34,6 @@ enum calibration_type { TYPE_NONE, }; -enum calibration_mode { - SW_MODE, - HW_MODE, -}; - enum soc_type { SOC_ARCH_EXYNOS3250 = 1, SOC_ARCH_EXYNOS4210, @@ -82,46 +77,19 @@ enum soc_type { * bitfields. The register validity, offsets and bitfield values may vary * slightly across different exynos SOC's. * @triminfo_data: register containing 2 pont trimming data - * @triminfo_25_shift: shift bit of the 25 C trim value in triminfo_data reg. - * @triminfo_85_shift: shift bit of the 85 C trim value in triminfo_data reg. * @triminfo_ctrl: trim info controller register. - * @triminfo_reload_shift: shift of triminfo reload enable bit in triminfo_ctrl - reg. + * @triminfo_ctrl_count: the number of trim info controller register. * @tmu_ctrl: TMU main controller register. * @test_mux_addr_shift: shift bits of test mux address. - * @buf_vref_sel_shift: shift bits of reference voltage in tmu_ctrl register. - * @buf_vref_sel_mask: mask bits of reference voltage in tmu_ctrl register. * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register. * @therm_trip_mode_mask: mask bits of tripping mode in tmu_ctrl register. * @therm_trip_en_shift: shift bits of tripping enable in tmu_ctrl register. - * @buf_slope_sel_shift: shift bits of amplifier gain value in tmu_ctrl - register. - * @buf_slope_sel_mask: mask bits of amplifier gain value in tmu_ctrl register. - * @calib_mode_shift: shift bits of calibration mode value in tmu_ctrl - register. - * @calib_mode_mask: mask bits of calibration mode value in tmu_ctrl - register. - * @therm_trip_tq_en_shift: shift bits of thermal trip enable by TQ pin in - tmu_ctrl register. - * @core_en_shift: shift bits of TMU core enable bit in tmu_ctrl register. * @tmu_status: register drescribing the TMU status. * @tmu_cur_temp: register containing the current temperature of the TMU. - * @tmu_cur_temp_shift: shift bits of current temp value in tmu_cur_temp - register. * @threshold_temp: register containing the base threshold level. * @threshold_th0: Register containing first set of rising levels. - * @threshold_th0_l0_shift: shift bits of level0 threshold temperature. - * @threshold_th0_l1_shift: shift bits of level1 threshold temperature. - * @threshold_th0_l2_shift: shift bits of level2 threshold temperature. - * @threshold_th0_l3_shift: shift bits of level3 threshold temperature. * @threshold_th1: Register containing second set of rising levels. - * @threshold_th1_l0_shift: shift bits of level0 threshold temperature. - * @threshold_th1_l1_shift: shift bits of level1 threshold temperature. - * @threshold_th1_l2_shift: shift bits of level2 threshold temperature. - * @threshold_th1_l3_shift: shift bits of level3 threshold temperature. * @threshold_th2: Register containing third set of rising levels. - * @threshold_th2_l0_shift: shift bits of level0 threshold temperature. - * @threshold_th3: Register containing fourth set of rising levels. * @threshold_th3_l0_shift: shift bits of level0 threshold temperature. * @tmu_inten: register containing the different threshold interrupt enable bits. @@ -130,68 +98,35 @@ enum soc_type { * @inten_rise2_shift: shift bits of rising 2 interrupt bits. * @inten_rise3_shift: shift bits of rising 3 interrupt bits. * @inten_fall0_shift: shift bits of falling 0 interrupt bits. - * @inten_fall1_shift: shift bits of falling 1 interrupt bits. - * @inten_fall2_shift: shift bits of falling 2 interrupt bits. - * @inten_fall3_shift: shift bits of falling 3 interrupt bits. * @tmu_intstat: Register containing the interrupt status values. * @tmu_intclear: Register for clearing the raised interrupt status. - * @intclr_fall_shift: shift bits for interrupt clear fall 0 - * @intclr_rise_shift: shift bits of all rising interrupt bits. - * @intclr_rise_mask: mask bits of all rising interrupt bits. - * @intclr_fall_mask: mask bits of all rising interrupt bits. * @emul_con: TMU emulation controller register. * @emul_temp_shift: shift bits of emulation temperature. * @emul_time_shift: shift bits of emulation time. - * @emul_time_mask: mask bits of emulation time. * @tmu_irqstatus: register to find which TMU generated interrupts. * @tmu_pmin: register to get/set the Pmin value. */ struct exynos_tmu_registers { u32 triminfo_data; - u32 triminfo_25_shift; - u32 triminfo_85_shift; - u32 triminfo_ctrl; - u32 triminfo_ctrl1; - u32 triminfo_reload_shift; + u32 triminfo_ctrl[MAX_TRIMINFO_CTRL_REG]; + u32 triminfo_ctrl_count; u32 tmu_ctrl; u32 test_mux_addr_shift; - u32 buf_vref_sel_shift; - u32 buf_vref_sel_mask; u32 therm_trip_mode_shift; u32 therm_trip_mode_mask; u32 therm_trip_en_shift; - u32 buf_slope_sel_shift; - u32 buf_slope_sel_mask; - u32 calib_mode_shift; - u32 calib_mode_mask; - u32 therm_trip_tq_en_shift; - u32 core_en_shift; u32 tmu_status; u32 tmu_cur_temp; - u32 tmu_cur_temp_shift; u32 threshold_temp; u32 threshold_th0; - u32 threshold_th0_l0_shift; - u32 threshold_th0_l1_shift; - u32 threshold_th0_l2_shift; - u32 threshold_th0_l3_shift; - u32 threshold_th1; - u32 threshold_th1_l0_shift; - u32 threshold_th1_l1_shift; - u32 threshold_th1_l2_shift; - u32 threshold_th1_l3_shift; - u32 threshold_th2; - u32 threshold_th2_l0_shift; - - u32 threshold_th3; u32 threshold_th3_l0_shift; u32 tmu_inten; @@ -200,22 +135,14 @@ struct exynos_tmu_registers { u32 inten_rise2_shift; u32 inten_rise3_shift; u32 inten_fall0_shift; - u32 inten_fall1_shift; - u32 inten_fall2_shift; - u32 inten_fall3_shift; u32 tmu_intstat; u32 tmu_intclear; - u32 intclr_fall_shift; - u32 intclr_rise_shift; - u32 intclr_fall_mask; - u32 intclr_rise_mask; u32 emul_con; u32 emul_temp_shift; u32 emul_time_shift; - u32 emul_time_mask; u32 tmu_irqstatus; u32 tmu_pmin; @@ -250,11 +177,12 @@ struct exynos_tmu_registers { * 1 = enable trigger_level[] interrupt, * 0 = disable trigger_level[] interrupt * @max_trigger_level: max trigger level supported by the TMU + * @non_hw_trigger_levels: number of defined non-hardware trigger levels * @gain: gain of amplifier in the positive-TC generator block - * 0 <= gain <= 15 + * 0 < gain <= 15 * @reference_voltage: reference voltage of amplifier * in the positive-TC generator block - * 0 <= reference_voltage <= 31 + * 0 < reference_voltage <= 31 * @noise_cancel_mode: noise cancellation mode * 000, 100, 101, 110 and 111 can be different modes * @type: determines the type of SOC @@ -265,8 +193,8 @@ struct exynos_tmu_registers { * @second_point_trim: temp value of the second point trimming * @default_temp_offset: default temperature offset in case of no trimming * @test_mux; information if SoC supports test MUX + * @triminfo_reload: reload value to read TRIMINFO register * @cal_type: calibration type for temperature - * @cal_mode: calibration mode for temperature * @freq_clip_table: Table representing frequency reduction percentage. * @freq_tab_count: Count of the above table as frequency reduction may * applicable to only some of the trigger levels. @@ -284,6 +212,7 @@ struct exynos_tmu_platform_data { enum trigger_type trigger_type[MAX_TRIP_COUNT]; bool trigger_enable[MAX_TRIP_COUNT]; u8 max_trigger_level; + u8 non_hw_trigger_levels; u8 gain; u8 reference_voltage; u8 noise_cancel_mode; @@ -295,9 +224,9 @@ struct exynos_tmu_platform_data { u8 second_point_trim; u8 default_temp_offset; u8 test_mux; + u8 triminfo_reload[MAX_TRIMINFO_CTRL_REG]; enum calibration_type cal_type; - enum calibration_mode cal_mode; enum soc_type type; struct freq_clip_table freq_tab[4]; unsigned int freq_tab_count; diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c index aa8e0dee205..1724f6cdaef 100644 --- a/drivers/thermal/samsung/exynos_tmu_data.c +++ b/drivers/thermal/samsung/exynos_tmu_data.c @@ -27,14 +27,7 @@ #if defined(CONFIG_CPU_EXYNOS4210) static const struct exynos_tmu_registers exynos4210_tmu_registers = { .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, - .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT, - .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT, .tmu_ctrl = EXYNOS_TMU_REG_CONTROL, - .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT, - .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK, - .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT, - .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK, - .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT, .tmu_status = EXYNOS_TMU_REG_STATUS, .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP, .threshold_temp = EXYNOS4210_TMU_REG_THRESHOLD_TEMP, @@ -46,7 +39,6 @@ static const struct exynos_tmu_registers exynos4210_tmu_registers = { .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT, .tmu_intstat = EXYNOS_TMU_REG_INTSTAT, .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR, - .intclr_rise_mask = EXYNOS4210_TMU_TRIG_LEVEL_MASK, }; struct exynos_tmu_init_data const exynos4210_default_tmu_data = { @@ -64,6 +56,7 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = { .trigger_type[1] = THROTTLE_ACTIVE, .trigger_type[2] = SW_TRIP, .max_trigger_level = 4, + .non_hw_trigger_levels = 3, .gain = 15, .reference_voltage = 7, .cal_type = TYPE_ONE_POINT_TRIMMING, @@ -93,18 +86,14 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = { #if defined(CONFIG_SOC_EXYNOS3250) static const struct exynos_tmu_registers exynos3250_tmu_registers = { .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, - .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT, - .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT, + .triminfo_ctrl[0] = EXYNOS_TMU_TRIMINFO_CON1, + .triminfo_ctrl[1] = EXYNOS_TMU_TRIMINFO_CON2, + .triminfo_ctrl_count = 2, .tmu_ctrl = EXYNOS_TMU_REG_CONTROL, .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT, - .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT, - .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK, .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT, .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK, .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT, - .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT, - .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK, - .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT, .tmu_status = EXYNOS_TMU_REG_STATUS, .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP, .threshold_th0 = EXYNOS_THD_TEMP_RISE, @@ -116,14 +105,9 @@ static const struct exynos_tmu_registers exynos3250_tmu_registers = { .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT, .tmu_intstat = EXYNOS_TMU_REG_INTSTAT, .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR, - .intclr_fall_shift = EXYNOS_TMU_CLEAR_FALL_INT_SHIFT, - .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT, - .intclr_rise_mask = EXYNOS_TMU_RISE_INT_MASK, - .intclr_fall_mask = EXYNOS_TMU_FALL_INT_MASK, .emul_con = EXYNOS_EMUL_CON, .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT, .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT, - .emul_time_mask = EXYNOS_EMUL_TIME_MASK, }; #define EXYNOS3250_TMU_DATA \ @@ -141,6 +125,7 @@ static const struct exynos_tmu_registers exynos3250_tmu_registers = { .trigger_type[2] = SW_TRIP, \ .trigger_type[3] = HW_TRIP, \ .max_trigger_level = 4, \ + .non_hw_trigger_levels = 3, \ .gain = 8, \ .reference_voltage = 16, \ .noise_cancel_mode = 4, \ @@ -160,8 +145,10 @@ static const struct exynos_tmu_registers exynos3250_tmu_registers = { .temp_level = 95, \ }, \ .freq_tab_count = 2, \ + .triminfo_reload[0] = EXYNOS_TRIMINFO_RELOAD_ENABLE, \ + .triminfo_reload[1] = EXYNOS_TRIMINFO_RELOAD_ENABLE, \ .registers = &exynos3250_tmu_registers, \ - .features = (TMU_SUPPORT_EMULATION | \ + .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \ TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \ TMU_SUPPORT_EMUL_TIME) #endif @@ -182,20 +169,13 @@ struct exynos_tmu_init_data const exynos3250_default_tmu_data = { #if defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250) static const struct exynos_tmu_registers exynos4412_tmu_registers = { .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, - .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT, - .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT, - .triminfo_ctrl = EXYNOS_TMU_TRIMINFO_CON, - .triminfo_reload_shift = EXYNOS_TRIMINFO_RELOAD_SHIFT, + .triminfo_ctrl[0] = EXYNOS_TMU_TRIMINFO_CON2, + .triminfo_ctrl_count = 1, .tmu_ctrl = EXYNOS_TMU_REG_CONTROL, .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT, - .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT, - .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK, .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT, .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK, .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT, - .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT, - .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK, - .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT, .tmu_status = EXYNOS_TMU_REG_STATUS, .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP, .threshold_th0 = EXYNOS_THD_TEMP_RISE, @@ -208,14 +188,9 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = { .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT, .tmu_intstat = EXYNOS_TMU_REG_INTSTAT, .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR, - .intclr_fall_shift = EXYNOS_TMU_CLEAR_FALL_INT_SHIFT, - .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT, - .intclr_rise_mask = EXYNOS_TMU_RISE_INT_MASK, - .intclr_fall_mask = EXYNOS_TMU_FALL_INT_MASK, .emul_con = EXYNOS_EMUL_CON, .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT, .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT, - .emul_time_mask = EXYNOS_EMUL_TIME_MASK, }; #define EXYNOS4412_TMU_DATA \ @@ -233,6 +208,7 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = { .trigger_type[2] = SW_TRIP, \ .trigger_type[3] = HW_TRIP, \ .max_trigger_level = 4, \ + .non_hw_trigger_levels = 3, \ .gain = 8, \ .reference_voltage = 16, \ .noise_cancel_mode = 4, \ @@ -252,6 +228,7 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = { .temp_level = 95, \ }, \ .freq_tab_count = 2, \ + .triminfo_reload[0] = EXYNOS_TRIMINFO_RELOAD_ENABLE, \ .registers = &exynos4412_tmu_registers, \ .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \ TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \ @@ -286,18 +263,10 @@ struct exynos_tmu_init_data const exynos5250_default_tmu_data = { #if defined(CONFIG_SOC_EXYNOS5260) static const struct exynos_tmu_registers exynos5260_tmu_registers = { .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, - .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT, - .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT, .tmu_ctrl = EXYNOS_TMU_REG_CONTROL, - .tmu_ctrl = EXYNOS_TMU_REG_CONTROL1, - .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT, - .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK, .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT, .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK, .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT, - .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT, - .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK, - .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT, .tmu_status = EXYNOS_TMU_REG_STATUS, .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP, .threshold_th0 = EXYNOS_THD_TEMP_RISE, @@ -310,14 +279,9 @@ static const struct exynos_tmu_registers exynos5260_tmu_registers = { .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT, .tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT, .tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR, - .intclr_fall_shift = EXYNOS5420_TMU_CLEAR_FALL_INT_SHIFT, - .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT, - .intclr_rise_mask = EXYNOS5260_TMU_RISE_INT_MASK, - .intclr_fall_mask = EXYNOS5260_TMU_FALL_INT_MASK, .emul_con = EXYNOS5260_EMUL_CON, .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT, .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT, - .emul_time_mask = EXYNOS_EMUL_TIME_MASK, }; #define __EXYNOS5260_TMU_DATA \ @@ -335,6 +299,7 @@ static const struct exynos_tmu_registers exynos5260_tmu_registers = { .trigger_type[2] = SW_TRIP, \ .trigger_type[3] = HW_TRIP, \ .max_trigger_level = 4, \ + .non_hw_trigger_levels = 3, \ .gain = 8, \ .reference_voltage = 16, \ .noise_cancel_mode = 4, \ @@ -359,9 +324,8 @@ static const struct exynos_tmu_registers exynos5260_tmu_registers = { #define EXYNOS5260_TMU_DATA \ __EXYNOS5260_TMU_DATA \ .type = SOC_ARCH_EXYNOS5260, \ - .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \ - TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \ - TMU_SUPPORT_EMUL_TIME) + .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \ + TMU_SUPPORT_READY_STATUS | TMU_SUPPORT_EMUL_TIME) struct exynos_tmu_init_data const exynos5260_default_tmu_data = { .tmu_data = { @@ -378,17 +342,10 @@ struct exynos_tmu_init_data const exynos5260_default_tmu_data = { #if defined(CONFIG_SOC_EXYNOS5420) static const struct exynos_tmu_registers exynos5420_tmu_registers = { .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, - .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT, - .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT, .tmu_ctrl = EXYNOS_TMU_REG_CONTROL, - .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT, - .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK, .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT, .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK, .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT, - .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT, - .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK, - .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT, .tmu_status = EXYNOS_TMU_REG_STATUS, .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP, .threshold_th0 = EXYNOS_THD_TEMP_RISE, @@ -402,14 +359,9 @@ static const struct exynos_tmu_registers exynos5420_tmu_registers = { .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT, .tmu_intstat = EXYNOS_TMU_REG_INTSTAT, .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR, - .intclr_fall_shift = EXYNOS5420_TMU_CLEAR_FALL_INT_SHIFT, - .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT, - .intclr_rise_mask = EXYNOS_TMU_RISE_INT_MASK, - .intclr_fall_mask = EXYNOS_TMU_FALL_INT_MASK, .emul_con = EXYNOS_EMUL_CON, .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT, .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT, - .emul_time_mask = EXYNOS_EMUL_TIME_MASK, }; #define __EXYNOS5420_TMU_DATA \ @@ -427,6 +379,7 @@ static const struct exynos_tmu_registers exynos5420_tmu_registers = { .trigger_type[2] = SW_TRIP, \ .trigger_type[3] = HW_TRIP, \ .max_trigger_level = 4, \ + .non_hw_trigger_levels = 3, \ .gain = 8, \ .reference_voltage = 16, \ .noise_cancel_mode = 4, \ @@ -451,16 +404,15 @@ static const struct exynos_tmu_registers exynos5420_tmu_registers = { #define EXYNOS5420_TMU_DATA \ __EXYNOS5420_TMU_DATA \ .type = SOC_ARCH_EXYNOS5250, \ - .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \ - TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \ - TMU_SUPPORT_EMUL_TIME) + .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \ + TMU_SUPPORT_READY_STATUS | TMU_SUPPORT_EMUL_TIME) #define EXYNOS5420_TMU_DATA_SHARED \ __EXYNOS5420_TMU_DATA \ .type = SOC_ARCH_EXYNOS5420_TRIMINFO, \ - .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \ - TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \ - TMU_SUPPORT_EMUL_TIME | TMU_SUPPORT_ADDRESS_MULTIPLE) + .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \ + TMU_SUPPORT_READY_STATUS | TMU_SUPPORT_EMUL_TIME | \ + TMU_SUPPORT_ADDRESS_MULTIPLE) struct exynos_tmu_init_data const exynos5420_default_tmu_data = { .tmu_data = { @@ -477,19 +429,10 @@ struct exynos_tmu_init_data const exynos5420_default_tmu_data = { #if defined(CONFIG_SOC_EXYNOS5440) static const struct exynos_tmu_registers exynos5440_tmu_registers = { .triminfo_data = EXYNOS5440_TMU_S0_7_TRIM, - .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT, - .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT, .tmu_ctrl = EXYNOS5440_TMU_S0_7_CTRL, - .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT, - .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK, .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT, .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK, .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT, - .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT, - .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK, - .calib_mode_shift = EXYNOS_TMU_CALIB_MODE_SHIFT, - .calib_mode_mask = EXYNOS_TMU_CALIB_MODE_MASK, - .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT, .tmu_status = EXYNOS5440_TMU_S0_7_STATUS, .tmu_cur_temp = EXYNOS5440_TMU_S0_7_TEMP, .threshold_th0 = EXYNOS5440_TMU_S0_7_TH0, @@ -504,10 +447,6 @@ static const struct exynos_tmu_registers exynos5440_tmu_registers = { .inten_fall0_shift = EXYNOS5440_TMU_INTEN_FALL0_SHIFT, .tmu_intstat = EXYNOS5440_TMU_S0_7_IRQ, .tmu_intclear = EXYNOS5440_TMU_S0_7_IRQ, - .intclr_fall_shift = EXYNOS5440_TMU_CLEAR_FALL_INT_SHIFT, - .intclr_rise_shift = EXYNOS5440_TMU_RISE_INT_SHIFT, - .intclr_rise_mask = EXYNOS5440_TMU_RISE_INT_MASK, - .intclr_fall_mask = EXYNOS5440_TMU_FALL_INT_MASK, .tmu_irqstatus = EXYNOS5440_TMU_IRQ_STATUS, .emul_con = EXYNOS5440_TMU_S0_7_DEBUG, .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT, @@ -521,11 +460,11 @@ static const struct exynos_tmu_registers exynos5440_tmu_registers = { .trigger_type[0] = SW_TRIP, \ .trigger_type[4] = HW_TRIP, \ .max_trigger_level = 5, \ + .non_hw_trigger_levels = 1, \ .gain = 5, \ .reference_voltage = 16, \ .noise_cancel_mode = 4, \ .cal_type = TYPE_ONE_POINT_TRIMMING, \ - .cal_mode = 0, \ .efuse_value = 0x5b2d, \ .min_efuse_value = 16, \ .max_efuse_value = 76, \ diff --git a/drivers/thermal/samsung/exynos_tmu_data.h b/drivers/thermal/samsung/exynos_tmu_data.h index f0979e59849..63de598c9c2 100644 --- a/drivers/thermal/samsung/exynos_tmu_data.h +++ b/drivers/thermal/samsung/exynos_tmu_data.h @@ -39,55 +39,31 @@ #define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT 8 #define EXYNOS_TMU_CORE_EN_SHIFT 0 +/* Exynos3250 specific registers */ +#define EXYNOS_TMU_TRIMINFO_CON1 0x10 + /* Exynos4210 specific registers */ #define EXYNOS4210_TMU_REG_THRESHOLD_TEMP 0x44 #define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50 -#define EXYNOS4210_TMU_REG_TRIG_LEVEL1 0x54 -#define EXYNOS4210_TMU_REG_TRIG_LEVEL2 0x58 -#define EXYNOS4210_TMU_REG_TRIG_LEVEL3 0x5C -#define EXYNOS4210_TMU_REG_PAST_TEMP0 0x60 -#define EXYNOS4210_TMU_REG_PAST_TEMP1 0x64 -#define EXYNOS4210_TMU_REG_PAST_TEMP2 0x68 -#define EXYNOS4210_TMU_REG_PAST_TEMP3 0x6C - -#define EXYNOS4210_TMU_TRIG_LEVEL0_MASK 0x1 -#define EXYNOS4210_TMU_TRIG_LEVEL1_MASK 0x10 -#define EXYNOS4210_TMU_TRIG_LEVEL2_MASK 0x100 -#define EXYNOS4210_TMU_TRIG_LEVEL3_MASK 0x1000 -#define EXYNOS4210_TMU_TRIG_LEVEL_MASK 0x1111 -#define EXYNOS4210_TMU_INTCLEAR_VAL 0x1111 - -/* Exynos5250 and Exynos4412 specific registers */ -#define EXYNOS_TMU_TRIMINFO_CON 0x14 + +/* Exynos5250, Exynos4412, Exynos3250 specific registers */ +#define EXYNOS_TMU_TRIMINFO_CON2 0x14 #define EXYNOS_THD_TEMP_RISE 0x50 #define EXYNOS_THD_TEMP_FALL 0x54 #define EXYNOS_EMUL_CON 0x80 -#define EXYNOS_TRIMINFO_RELOAD_SHIFT 1 +#define EXYNOS_TRIMINFO_RELOAD_ENABLE 1 #define EXYNOS_TRIMINFO_25_SHIFT 0 #define EXYNOS_TRIMINFO_85_SHIFT 8 -#define EXYNOS_TMU_RISE_INT_MASK 0x111 -#define EXYNOS_TMU_RISE_INT_SHIFT 0 -#define EXYNOS_TMU_FALL_INT_MASK 0x111 -#define EXYNOS_TMU_CLEAR_RISE_INT 0x111 -#define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 12) -#define EXYNOS_TMU_CLEAR_FALL_INT_SHIFT 12 -#define EXYNOS5420_TMU_CLEAR_FALL_INT_SHIFT 16 -#define EXYNOS5440_TMU_CLEAR_FALL_INT_SHIFT 4 #define EXYNOS_TMU_TRIP_MODE_SHIFT 13 #define EXYNOS_TMU_TRIP_MODE_MASK 0x7 #define EXYNOS_TMU_THERM_TRIP_EN_SHIFT 12 -#define EXYNOS_TMU_CALIB_MODE_SHIFT 4 -#define EXYNOS_TMU_CALIB_MODE_MASK 0x3 #define EXYNOS_TMU_INTEN_RISE0_SHIFT 0 #define EXYNOS_TMU_INTEN_RISE1_SHIFT 4 #define EXYNOS_TMU_INTEN_RISE2_SHIFT 8 #define EXYNOS_TMU_INTEN_RISE3_SHIFT 12 #define EXYNOS_TMU_INTEN_FALL0_SHIFT 16 -#define EXYNOS_TMU_INTEN_FALL1_SHIFT 20 -#define EXYNOS_TMU_INTEN_FALL2_SHIFT 24 -#define EXYNOS_TMU_INTEN_FALL3_SHIFT 28 #define EXYNOS_EMUL_TIME 0x57F0 #define EXYNOS_EMUL_TIME_MASK 0xffff @@ -99,14 +75,9 @@ #define EXYNOS_MAX_TRIGGER_PER_REG 4 /* Exynos5260 specific */ -#define EXYNOS_TMU_REG_CONTROL1 0x24 #define EXYNOS5260_TMU_REG_INTEN 0xC0 #define EXYNOS5260_TMU_REG_INTSTAT 0xC4 #define EXYNOS5260_TMU_REG_INTCLEAR 0xC8 -#define EXYNOS5260_TMU_CLEAR_RISE_INT 0x1111 -#define EXYNOS5260_TMU_CLEAR_FALL_INT (0x1111 << 16) -#define EXYNOS5260_TMU_RISE_INT_MASK 0x1111 -#define EXYNOS5260_TMU_FALL_INT_MASK 0x1111 #define EXYNOS5260_EMUL_CON 0x100 /* Exynos4412 specific */ @@ -122,29 +93,17 @@ #define EXYNOS5440_TMU_S0_7_TH0 0x110 #define EXYNOS5440_TMU_S0_7_TH1 0x130 #define EXYNOS5440_TMU_S0_7_TH2 0x150 -#define EXYNOS5440_TMU_S0_7_EVTEN 0x1F0 #define EXYNOS5440_TMU_S0_7_IRQEN 0x210 #define EXYNOS5440_TMU_S0_7_IRQ 0x230 /* exynos5440 common registers */ #define EXYNOS5440_TMU_IRQ_STATUS 0x000 #define EXYNOS5440_TMU_PMIN 0x004 -#define EXYNOS5440_TMU_TEMP 0x008 -#define EXYNOS5440_TMU_RISE_INT_MASK 0xf -#define EXYNOS5440_TMU_RISE_INT_SHIFT 0 -#define EXYNOS5440_TMU_FALL_INT_MASK 0xf #define EXYNOS5440_TMU_INTEN_RISE0_SHIFT 0 #define EXYNOS5440_TMU_INTEN_RISE1_SHIFT 1 #define EXYNOS5440_TMU_INTEN_RISE2_SHIFT 2 #define EXYNOS5440_TMU_INTEN_RISE3_SHIFT 3 #define EXYNOS5440_TMU_INTEN_FALL0_SHIFT 4 -#define EXYNOS5440_TMU_INTEN_FALL1_SHIFT 5 -#define EXYNOS5440_TMU_INTEN_FALL2_SHIFT 6 -#define EXYNOS5440_TMU_INTEN_FALL3_SHIFT 7 -#define EXYNOS5440_TMU_TH_RISE0_SHIFT 0 -#define EXYNOS5440_TMU_TH_RISE1_SHIFT 8 -#define EXYNOS5440_TMU_TH_RISE2_SHIFT 16 -#define EXYNOS5440_TMU_TH_RISE3_SHIFT 24 #define EXYNOS5440_TMU_TH_RISE4_SHIFT 24 #define EXYNOS5440_EFUSE_SWAP_OFFSET 8 diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c index 90163b38466..d1ec5804c0b 100644 --- a/drivers/thermal/st/st_thermal.c +++ b/drivers/thermal/st/st_thermal.c @@ -275,6 +275,7 @@ int st_thermal_unregister(struct platform_device *pdev) } EXPORT_SYMBOL_GPL(st_thermal_unregister); +#ifdef CONFIG_PM_SLEEP static int st_thermal_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -305,6 +306,8 @@ static int st_thermal_resume(struct device *dev) return 0; } +#endif + SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume); EXPORT_SYMBOL_GPL(st_thermal_pm_ops); diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c index f251521baaa..fdd1f523a1e 100644 --- a/drivers/thermal/step_wise.c +++ b/drivers/thermal/step_wise.c @@ -23,6 +23,7 @@ */ #include <linux/thermal.h> +#include <trace/events/thermal.h> #include "thermal_core.h" @@ -76,7 +77,7 @@ static unsigned long get_target_state(struct thermal_instance *instance, next_target = instance->upper; break; case THERMAL_TREND_DROPPING: - if (cur_state == instance->lower) { + if (cur_state <= instance->lower) { if (!throttle) next_target = THERMAL_NO_TARGET; } else { @@ -129,8 +130,10 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) trend = get_tz_trend(tz, trip); - if (tz->temperature >= trip_temp) + if (tz->temperature >= trip_temp) { throttle = true; + trace_thermal_zone_trip(tz, trip, trip_type); + } dev_dbg(&tz->device, "Trip%d[type=%d,temp=%ld]:trend=%d,throttle=%d\n", trip, trip_type, trip_temp, trend, throttle); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 1e23f4f8d2c..43b90709585 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -38,6 +38,9 @@ #include <net/netlink.h> #include <net/genetlink.h> +#define CREATE_TRACE_POINTS +#include <trace/events/thermal.h> + #include "thermal_core.h" #include "thermal_hwmon.h" @@ -368,6 +371,8 @@ static void handle_critical_trips(struct thermal_zone_device *tz, if (tz->temperature < trip_temp) return; + trace_thermal_zone_trip(tz, trip, trip_type); + if (tz->ops->notify) tz->ops->notify(tz, trip, trip_type); @@ -463,6 +468,7 @@ static void update_temperature(struct thermal_zone_device *tz) tz->temperature = temp; mutex_unlock(&tz->lock); + trace_thermal_temperature(tz); dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n", tz->last_temperature, tz->temperature); } @@ -1287,6 +1293,7 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev) mutex_unlock(&cdev->lock); cdev->ops->set_cur_state(cdev, target); cdev->updated = true; + trace_cdev_update(cdev, target); dev_dbg(&cdev->device, "set to state %lu\n", target); } EXPORT_SYMBOL(thermal_cdev_update); @@ -1568,8 +1575,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, thermal_zone_device_update(tz); - if (!result) - return tz; + return tz; unregister: release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); @@ -1790,6 +1796,10 @@ static int __init thermal_register_governors(void) if (result) return result; + result = thermal_gov_bang_bang_register(); + if (result) + return result; + return thermal_gov_user_space_register(); } @@ -1797,6 +1807,7 @@ static void thermal_unregister_governors(void) { thermal_gov_step_wise_unregister(); thermal_gov_fair_share_unregister(); + thermal_gov_bang_bang_unregister(); thermal_gov_user_space_unregister(); } diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 3db339fb636..d15d243de27 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -69,6 +69,14 @@ static inline int thermal_gov_fair_share_register(void) { return 0; } static inline void thermal_gov_fair_share_unregister(void) {} #endif /* CONFIG_THERMAL_GOV_FAIR_SHARE */ +#ifdef CONFIG_THERMAL_GOV_BANG_BANG +int thermal_gov_bang_bang_register(void); +void thermal_gov_bang_bang_unregister(void); +#else +static inline int thermal_gov_bang_bang_register(void) { return 0; } +static inline void thermal_gov_bang_bang_unregister(void) {} +#endif /* CONFIG_THERMAL_GOV_BANG_BANG */ + #ifdef CONFIG_THERMAL_GOV_USER_SPACE int thermal_gov_user_space_register(void); void thermal_gov_user_space_unregister(void); diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 89c4cee253e..2e900a98c3e 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -2413,12 +2413,17 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file, poll_wait(file, &tty->read_wait, wait); poll_wait(file, &tty->write_wait, wait); + if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) + mask |= POLLHUP; if (input_available_p(tty, 1)) mask |= POLLIN | POLLRDNORM; + else if (mask & POLLHUP) { + tty_flush_to_ldisc(tty); + if (input_available_p(tty, 1)) + mask |= POLLIN | POLLRDNORM; + } if (tty->packet && tty->link->ctrl_status) mask |= POLLPRI | POLLIN | POLLRDNORM; - if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) - mask |= POLLHUP; if (tty_hung_up_p(file)) mask |= POLLHUP; if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) { diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index 8f37d57165e..de7aae523b3 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -81,7 +81,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, /* Set to highest baudrate supported */ if (baud >= 1152000) baud = 921600; - quot = DIV_ROUND_CLOSEST(port->uartclk, 256 * baud); + quot = (port->uartclk / (256 * baud)) + 1; } /* diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c index 8bc2563335a..bf355050eab 100644 --- a/drivers/tty/serial/of_serial.c +++ b/drivers/tty/serial/of_serial.c @@ -158,7 +158,7 @@ static int of_platform_serial_probe(struct platform_device *ofdev) if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL)) return -EBUSY; - info = kmalloc(sizeof(*info), GFP_KERNEL); + info = kzalloc(sizeof(*info), GFP_KERNEL); if (info == NULL) return -ENOMEM; @@ -240,32 +240,6 @@ static int of_platform_serial_remove(struct platform_device *ofdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int of_serial_suspend(struct device *dev) -{ - struct of_serial_info *info = dev_get_drvdata(dev); - - serial8250_suspend_port(info->line); - if (info->clk) - clk_disable_unprepare(info->clk); - - return 0; -} - -static int of_serial_resume(struct device *dev) -{ - struct of_serial_info *info = dev_get_drvdata(dev); - - if (info->clk) - clk_prepare_enable(info->clk); - - serial8250_resume_port(info->line); - - return 0; -} -#endif -static SIMPLE_DEV_PM_OPS(of_serial_pm_ops, of_serial_suspend, of_serial_resume); - /* * A few common types, add more as needed. */ @@ -297,7 +271,6 @@ static struct platform_driver of_platform_serial_driver = { .name = "of_serial", .owner = THIS_MODULE, .of_match_table = of_platform_serial_table, - .pm = &of_serial_pm_ops, }, .probe = of_platform_serial_probe, .remove = of_platform_serial_remove, diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index df3a8c74358..eaeb9a02c7f 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -363,7 +363,7 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios, * The spd_hi, spd_vhi, spd_shi, spd_warp kludge... * Die! Die! Die! */ - if (baud == 38400) + if (try == 0 && baud == 38400) baud = altbaud; /* diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 16a2c0237dd..0508a1d8e4c 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1709,6 +1709,8 @@ int tty_release(struct inode *inode, struct file *filp) int pty_master, tty_closing, o_tty_closing, do_sleep; int idx; char buf[64]; + long timeout = 0; + int once = 1; if (tty_paranoia_check(tty, inode, __func__)) return 0; @@ -1789,11 +1791,18 @@ int tty_release(struct inode *inode, struct file *filp) if (!do_sleep) break; - printk(KERN_WARNING "%s: %s: read/write wait queue active!\n", - __func__, tty_name(tty, buf)); + if (once) { + once = 0; + printk(KERN_WARNING "%s: %s: read/write wait queue active!\n", + __func__, tty_name(tty, buf)); + } tty_unlock_pair(tty, o_tty); mutex_unlock(&tty_mutex); - schedule(); + schedule_timeout_killable(timeout); + if (timeout < 120 * HZ) + timeout = 2 * timeout + 1; + else + timeout = MAX_SCHEDULE_TIMEOUT; } /* diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c index 610b720d3b9..59b25e03996 100644 --- a/drivers/tty/vt/consolemap.c +++ b/drivers/tty/vt/consolemap.c @@ -539,6 +539,12 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) /* Save original vc_unipagdir_loc in case we allocate a new one */ p = *vc->vc_uni_pagedir_loc; + + if (!p) { + err = -EINVAL; + + goto out_unlock; + } if (p->refcount > 1) { int j, k; @@ -623,6 +629,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) set_inverse_transl(vc, p, i); /* Update inverse translations */ set_inverse_trans_unicode(vc, p); +out_unlock: console_unlock(); return err; } diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index a673e5b6a2e..60fa6278fbc 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -28,18 +28,6 @@ #define UIO_MAX_DEVICES (1U << MINORBITS) -struct uio_device { - struct module *owner; - struct device *dev; - int minor; - atomic_t event; - struct fasync_struct *async_queue; - wait_queue_head_t wait; - struct uio_info *info; - struct kobject *map_dir; - struct kobject *portio_dir; -}; - static int uio_major; static struct cdev *uio_cdev; static DEFINE_IDR(uio_idr); diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 3df5005c554..9bdc6bd7343 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -742,7 +742,6 @@ static int ci_hdrc_remove(struct platform_device *pdev) ci_role_destroy(ci); ci_hdrc_enter_lpm(ci, true); usb_phy_shutdown(ci->transceiver); - kfree(ci->hw_bank.regmap); return 0; } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index e934e19f49f..077d58ac3dc 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -60,6 +60,9 @@ static struct acm *acm_table[ACM_TTY_MINORS]; static DEFINE_MUTEX(acm_table_lock); +static void acm_tty_set_termios(struct tty_struct *tty, + struct ktermios *termios_old); + /* * acm_table accessors */ @@ -145,8 +148,15 @@ static int acm_ctrl_msg(struct acm *acm, int request, int value, /* devices aren't required to support these requests. * the cdc acm descriptor tells whether they do... */ -#define acm_set_control(acm, control) \ - acm_ctrl_msg(acm, USB_CDC_REQ_SET_CONTROL_LINE_STATE, control, NULL, 0) +static inline int acm_set_control(struct acm *acm, int control) +{ + if (acm->quirks & QUIRK_CONTROL_LINE_STATE) + return -EOPNOTSUPP; + + return acm_ctrl_msg(acm, USB_CDC_REQ_SET_CONTROL_LINE_STATE, + control, NULL, 0); +} + #define acm_set_line(acm, line) \ acm_ctrl_msg(acm, USB_CDC_REQ_SET_LINE_CODING, 0, line, sizeof *(line)) #define acm_send_break(acm, ms) \ @@ -554,6 +564,8 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) goto error_submit_urb; } + acm_tty_set_termios(tty, NULL); + /* * Unthrottle device in case the TTY was closed while throttled. */ @@ -980,11 +992,12 @@ static void acm_tty_set_termios(struct tty_struct *tty, /* FIXME: Needs to clear unsupported bits in the termios */ acm->clocal = ((termios->c_cflag & CLOCAL) != 0); - if (!newline.dwDTERate) { + if (C_BAUD(tty) == B0) { newline.dwDTERate = acm->line.dwDTERate; newctrl &= ~ACM_CTRL_DTR; - } else + } else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) { newctrl |= ACM_CTRL_DTR; + } if (newctrl != acm->ctrlout) acm_set_control(acm, acm->ctrlout = newctrl); @@ -1314,6 +1327,7 @@ made_compressed_probe: tty_port_init(&acm->port); acm->port.ops = &acm_port_ops; init_usb_anchor(&acm->delayed); + acm->quirks = quirks; buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma); if (!buf) { @@ -1681,6 +1695,9 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, + { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */ + .driver_info = QUIRK_CONTROL_LINE_STATE, }, + { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */ { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */ }, /* Motorola H24 HSPA module: */ diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index fc75651afe1..d3251ebd09e 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -121,6 +121,7 @@ struct acm { unsigned int throttle_req:1; /* throttle requested */ u8 bInterval; struct usb_anchor delayed; /* writes queued for a device about to be woken */ + unsigned long quirks; }; #define CDC_DATA_INTERFACE_TYPE 0x0a @@ -132,3 +133,4 @@ struct acm { #define NOT_A_MODEM BIT(3) #define NO_DATA_INTERFACE BIT(4) #define IGNORE_DEVICE BIT(5) +#define QUIRK_CONTROL_LINE_STATE BIT(6) diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index b84fb141e12..a6efb4184f2 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2060,6 +2060,8 @@ int usb_alloc_streams(struct usb_interface *interface, return -EINVAL; if (dev->speed != USB_SPEED_SUPER) return -EINVAL; + if (dev->state < USB_STATE_CONFIGURED) + return -ENODEV; for (i = 0; i < num_eps; i++) { /* Streams only apply to bulk endpoints. */ diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 11e80ac3132..b649fef2e35 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4468,9 +4468,6 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, if (retval) goto fail; - if (hcd->usb_phy && !hdev->parent) - usb_phy_notify_connect(hcd->usb_phy, udev->speed); - /* * Some superspeed devices have finished the link training process * and attached to a superspeed hub port, but the device descriptor @@ -4627,8 +4624,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, /* Disconnect any existing devices under this port */ if (udev) { - if (hcd->usb_phy && !hdev->parent && - !(portstatus & USB_PORT_STAT_CONNECTION)) + if (hcd->usb_phy && !hdev->parent) usb_phy_notify_disconnect(hcd->usb_phy, udev->speed); usb_disconnect(&port_dev->child); } @@ -4783,6 +4779,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, port_dev->child = NULL; spin_unlock_irq(&device_state_lock); mutex_unlock(&usb_port_peer_mutex); + } else { + if (hcd->usb_phy && !hdev->parent) + usb_phy_notify_connect(hcd->usb_phy, + udev->speed); } } diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 5ae883dc21f..96fafed92b7 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* Creative SB Audigy 2 NX */ { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Microsoft Wireless Laser Mouse 6000 Receiver */ + { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Microsoft LifeCam-VX700 v2.0 */ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -97,6 +100,12 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x04f3, 0x0089), .driver_info = USB_QUIRK_DEVICE_QUALIFIER }, + { USB_DEVICE(0x04f3, 0x009b), .driver_info = + USB_QUIRK_DEVICE_QUALIFIER }, + + { USB_DEVICE(0x04f3, 0x016f), .driver_info = + USB_QUIRK_DEVICE_QUALIFIER }, + /* Roland SC-8820 */ { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index bf015ab3b44..55c90c53f2d 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -619,7 +619,7 @@ struct dwc2_hsotg { unsigned port_suspend_change:1; unsigned port_over_current_change:1; unsigned port_l1_change:1; - unsigned reserved:26; + unsigned reserved:25; } b; } flags; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 7b5856fadd9..8b5c079c7b7 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -2327,7 +2327,7 @@ irq_retry: u32 usb_status = readl(hsotg->regs + GOTGCTL); - dev_info(hsotg->dev, "%s: USBRst\n", __func__); + dev_dbg(hsotg->dev, "%s: USBRst\n", __func__); dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n", readl(hsotg->regs + GNPTXSTS)); @@ -2561,8 +2561,10 @@ static int s3c_hsotg_ep_enable(struct usb_ep *ep, hs_ep->fifo_size = val; break; } - if (i == 8) - return -ENOMEM; + if (i == 8) { + ret = -ENOMEM; + goto error; + } } /* for non control endpoints, set PID to D0 */ @@ -2579,6 +2581,7 @@ static int s3c_hsotg_ep_enable(struct usb_ep *ep, /* enable the endpoint interrupt */ s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1); +error: spin_unlock_irqrestore(&hsotg->lock, flags); return ret; } @@ -2934,9 +2937,7 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget, spin_lock_irqsave(&hsotg->lock, flags); - if (!driver) - hsotg->driver = NULL; - + hsotg->driver = NULL; hsotg->gadget.speed = USB_SPEED_UNKNOWN; spin_unlock_irqrestore(&hsotg->lock, flags); @@ -3567,6 +3568,7 @@ static int s3c_hsotg_probe(struct platform_device *pdev) s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum); /* disable power and clock */ + s3c_hsotg_phy_disable(hsotg); ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies); @@ -3575,8 +3577,6 @@ static int s3c_hsotg_probe(struct platform_device *pdev) goto err_ep_mem; } - s3c_hsotg_phy_disable(hsotg); - ret = usb_add_gadget_udc(&pdev->dev, &hsotg->gadget); if (ret) goto err_ep_mem; diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index 2f537d58822..a0aa9f3da44 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c @@ -597,7 +597,7 @@ static int dwc3_omap_prepare(struct device *dev) { struct dwc3_omap *omap = dev_get_drvdata(dev); - dwc3_omap_write_irqmisc_set(omap, 0x00); + dwc3_omap_disable_irqs(omap); return 0; } @@ -605,19 +605,8 @@ static int dwc3_omap_prepare(struct device *dev) static void dwc3_omap_complete(struct device *dev) { struct dwc3_omap *omap = dev_get_drvdata(dev); - u32 reg; - reg = (USBOTGSS_IRQMISC_OEVT | - USBOTGSS_IRQMISC_DRVVBUS_RISE | - USBOTGSS_IRQMISC_CHRGVBUS_RISE | - USBOTGSS_IRQMISC_DISCHRGVBUS_RISE | - USBOTGSS_IRQMISC_IDPULLUP_RISE | - USBOTGSS_IRQMISC_DRVVBUS_FALL | - USBOTGSS_IRQMISC_CHRGVBUS_FALL | - USBOTGSS_IRQMISC_DISCHRGVBUS_FALL | - USBOTGSS_IRQMISC_IDPULLUP_FALL); - - dwc3_omap_write_irqmisc_set(omap, reg); + dwc3_omap_enable_irqs(omap); } static int dwc3_omap_suspend(struct device *dev) diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 436fb08c40b..a36cf66302f 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -30,6 +30,7 @@ #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd #define PCI_DEVICE_ID_INTEL_BYT 0x0f37 #define PCI_DEVICE_ID_INTEL_MRFLD 0x119e +#define PCI_DEVICE_ID_INTEL_BSW 0x22B7 struct dwc3_pci { struct device *dev; @@ -181,6 +182,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, { } /* Terminating Entry */ diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index b35938777dd..df38e7ef497 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -256,7 +256,7 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc) /* stall is always issued on EP0 */ dep = dwc->eps[0]; - __dwc3_gadget_ep_set_halt(dep, 1); + __dwc3_gadget_ep_set_halt(dep, 1, false); dep->flags = DWC3_EP_ENABLED; dwc->delayed_status = false; @@ -271,7 +271,7 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc) dwc3_ep0_out_start(dwc); } -int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value) +int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value) { struct dwc3_ep *dep = to_dwc3_ep(ep); struct dwc3 *dwc = dep->dwc; @@ -281,6 +281,20 @@ int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value) return 0; } +int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value) +{ + struct dwc3_ep *dep = to_dwc3_ep(ep); + struct dwc3 *dwc = dep->dwc; + unsigned long flags; + int ret; + + spin_lock_irqsave(&dwc->lock, flags); + ret = __dwc3_gadget_ep0_set_halt(ep, value); + spin_unlock_irqrestore(&dwc->lock, flags); + + return ret; +} + void dwc3_ep0_out_start(struct dwc3 *dwc) { int ret; @@ -466,7 +480,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc, return -EINVAL; if (set == 0 && (dep->flags & DWC3_EP_WEDGE)) break; - ret = __dwc3_gadget_ep_set_halt(dep, set); + ret = __dwc3_gadget_ep_set_halt(dep, set, true); if (ret) return -EINVAL; break; @@ -775,11 +789,12 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS; - r = next_request(&ep0->request_list); - ur = &r->request; - trb = dwc->ep0_trb; + r = next_request(&ep0->request_list); + if (!r) + return; + status = DWC3_TRB_SIZE_TRBSTS(trb->size); if (status == DWC3_TRBSTS_SETUP_PENDING) { dwc3_trace(trace_dwc3_ep0, "Setup Pending received"); @@ -790,6 +805,8 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, return; } + ur = &r->request; + length = trb->size & DWC3_TRB_SIZE_MASK; if (dwc->ep0_bounced) { @@ -811,12 +828,19 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, dwc3_ep0_stall_and_restart(dwc); } else { - /* - * handle the case where we have to send a zero packet. This - * seems to be case when req.length > maxpacket. Could it be? - */ - if (r) - dwc3_gadget_giveback(ep0, r, 0); + dwc3_gadget_giveback(ep0, r, 0); + + if (IS_ALIGNED(ur->length, ep0->endpoint.maxpacket) && + ur->length && ur->zero) { + int ret; + + dwc->ep0_next_event = DWC3_EP0_COMPLETE; + + ret = dwc3_ep0_start_trans(dwc, epnum, + dwc->ctrl_req_addr, 0, + DWC3_TRBCTL_CONTROL_DATA); + WARN_ON(ret < 0); + } } } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 3818b26bfc0..546ea5431b8 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -525,12 +525,11 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, if (!usb_endpoint_xfer_isoc(desc)) return 0; - memset(&trb_link, 0, sizeof(trb_link)); - /* Link TRB for ISOC. The HWO bit is never reset */ trb_st_hw = &dep->trb_pool[0]; trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; + memset(trb_link, 0, sizeof(*trb_link)); trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); @@ -581,7 +580,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) /* make sure HW endpoint isn't stalled */ if (dep->flags & DWC3_EP_STALL) - __dwc3_gadget_ep_set_halt(dep, 0); + __dwc3_gadget_ep_set_halt(dep, 0, false); reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); reg &= ~DWC3_DALEPENA_EP(dep->number); @@ -1202,15 +1201,28 @@ out0: return ret; } -int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value) +int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) { struct dwc3_gadget_ep_cmd_params params; struct dwc3 *dwc = dep->dwc; int ret; + if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { + dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); + return -EINVAL; + } + memset(¶ms, 0x00, sizeof(params)); if (value) { + if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) || + (!list_empty(&dep->req_queued) || + !list_empty(&dep->request_list)))) { + dev_dbg(dwc->dev, "%s: pending request, cannot halt\n", + dep->name); + return -EAGAIN; + } + ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, DWC3_DEPCMD_SETSTALL, ¶ms); if (ret) @@ -1241,15 +1253,7 @@ static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) int ret; spin_lock_irqsave(&dwc->lock, flags); - - if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { - dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name); - ret = -EINVAL; - goto out; - } - - ret = __dwc3_gadget_ep_set_halt(dep, value); -out: + ret = __dwc3_gadget_ep_set_halt(dep, value, false); spin_unlock_irqrestore(&dwc->lock, flags); return ret; @@ -1260,15 +1264,18 @@ static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) struct dwc3_ep *dep = to_dwc3_ep(ep); struct dwc3 *dwc = dep->dwc; unsigned long flags; + int ret; spin_lock_irqsave(&dwc->lock, flags); dep->flags |= DWC3_EP_WEDGE; - spin_unlock_irqrestore(&dwc->lock, flags); if (dep->number == 0 || dep->number == 1) - return dwc3_gadget_ep0_set_halt(ep, 1); + ret = __dwc3_gadget_ep0_set_halt(ep, 1); else - return dwc3_gadget_ep_set_halt(ep, 1); + ret = __dwc3_gadget_ep_set_halt(dep, 1, false); + spin_unlock_irqrestore(&dwc->lock, flags); + + return ret; } /* -------------------------------------------------------------------------- */ diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h index 178ad898220..18ae3eaa8b6 100644 --- a/drivers/usb/dwc3/gadget.h +++ b/drivers/usb/dwc3/gadget.h @@ -82,10 +82,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, void dwc3_ep0_interrupt(struct dwc3 *dwc, const struct dwc3_event_depevt *event); void dwc3_ep0_out_start(struct dwc3 *dwc); +int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value); int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value); int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request, gfp_t gfp_flags); -int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value); +int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol); /** * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h index 78aff1da089..60b0f41eafc 100644 --- a/drivers/usb/dwc3/trace.h +++ b/drivers/usb/dwc3/trace.h @@ -73,15 +73,23 @@ DECLARE_EVENT_CLASS(dwc3_log_ctrl, TP_PROTO(struct usb_ctrlrequest *ctrl), TP_ARGS(ctrl), TP_STRUCT__entry( - __field(struct usb_ctrlrequest *, ctrl) + __field(__u8, bRequestType) + __field(__u8, bRequest) + __field(__le16, wValue) + __field(__le16, wIndex) + __field(__le16, wLength) ), TP_fast_assign( - __entry->ctrl = ctrl; + __entry->bRequestType = ctrl->bRequestType; + __entry->bRequest = ctrl->bRequest; + __entry->wValue = ctrl->wValue; + __entry->wIndex = ctrl->wIndex; + __entry->wLength = ctrl->wLength; ), TP_printk("bRequestType %02x bRequest %02x wValue %04x wIndex %04x wLength %d", - __entry->ctrl->bRequestType, __entry->ctrl->bRequest, - le16_to_cpu(__entry->ctrl->wValue), le16_to_cpu(__entry->ctrl->wIndex), - le16_to_cpu(__entry->ctrl->wLength) + __entry->bRequestType, __entry->bRequest, + le16_to_cpu(__entry->wValue), le16_to_cpu(__entry->wIndex), + le16_to_cpu(__entry->wLength) ) ); @@ -94,15 +102,22 @@ DECLARE_EVENT_CLASS(dwc3_log_request, TP_PROTO(struct dwc3_request *req), TP_ARGS(req), TP_STRUCT__entry( + __dynamic_array(char, name, DWC3_MSG_MAX) __field(struct dwc3_request *, req) + __field(unsigned, actual) + __field(unsigned, length) + __field(int, status) ), TP_fast_assign( + snprintf(__get_str(name), DWC3_MSG_MAX, "%s", req->dep->name); __entry->req = req; + __entry->actual = req->request.actual; + __entry->length = req->request.length; + __entry->status = req->request.status; ), TP_printk("%s: req %p length %u/%u ==> %d", - __entry->req->dep->name, __entry->req, - __entry->req->request.actual, __entry->req->request.length, - __entry->req->request.status + __get_str(name), __entry->req, __entry->actual, __entry->length, + __entry->status ) ); @@ -158,17 +173,17 @@ DECLARE_EVENT_CLASS(dwc3_log_gadget_ep_cmd, struct dwc3_gadget_ep_cmd_params *params), TP_ARGS(dep, cmd, params), TP_STRUCT__entry( - __field(struct dwc3_ep *, dep) + __dynamic_array(char, name, DWC3_MSG_MAX) __field(unsigned int, cmd) __field(struct dwc3_gadget_ep_cmd_params *, params) ), TP_fast_assign( - __entry->dep = dep; + snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name); __entry->cmd = cmd; __entry->params = params; ), TP_printk("%s: cmd '%s' [%d] params %08x %08x %08x\n", - __entry->dep->name, dwc3_gadget_ep_cmd_string(__entry->cmd), + __get_str(name), dwc3_gadget_ep_cmd_string(__entry->cmd), __entry->cmd, __entry->params->param0, __entry->params->param1, __entry->params->param2 ) @@ -184,16 +199,24 @@ DECLARE_EVENT_CLASS(dwc3_log_trb, TP_PROTO(struct dwc3_ep *dep, struct dwc3_trb *trb), TP_ARGS(dep, trb), TP_STRUCT__entry( - __field(struct dwc3_ep *, dep) + __dynamic_array(char, name, DWC3_MSG_MAX) __field(struct dwc3_trb *, trb) + __field(u32, bpl) + __field(u32, bph) + __field(u32, size) + __field(u32, ctrl) ), TP_fast_assign( - __entry->dep = dep; + snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name); __entry->trb = trb; + __entry->bpl = trb->bpl; + __entry->bph = trb->bph; + __entry->size = trb->size; + __entry->ctrl = trb->ctrl; ), TP_printk("%s: trb %p bph %08x bpl %08x size %08x ctrl %08x\n", - __entry->dep->name, __entry->trb, __entry->trb->bph, - __entry->trb->bpl, __entry->trb->size, __entry->trb->ctrl + __get_str(name), __entry->trb, __entry->bph, __entry->bpl, + __entry->size, __entry->ctrl ) ); diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index a8c18df171c..f6a51fddd5b 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -560,7 +560,7 @@ static int bos_desc(struct usb_composite_dev *cdev) usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE; usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY; usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT; - usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT); + usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | USB_BESL_SUPPORT); /* * The Superspeed USB Capability descriptor shall be implemented by all diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c index 6da4685490e..aad8165e98e 100644 --- a/drivers/usb/gadget/function/f_acm.c +++ b/drivers/usb/gadget/function/f_acm.c @@ -433,12 +433,12 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) dev_vdbg(&cdev->gadget->dev, "reset acm control interface %d\n", intf); usb_ep_disable(acm->notify); - } else { - dev_vdbg(&cdev->gadget->dev, - "init acm ctrl interface %d\n", intf); + } + + if (!acm->notify->desc) if (config_ep_by_speed(cdev->gadget, f, acm->notify)) return -EINVAL; - } + usb_ep_enable(acm->notify); acm->notify->driver_data = acm; diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c index 4d8b236ea60..c9e90de5bdd 100644 --- a/drivers/usb/gadget/function/f_eem.c +++ b/drivers/usb/gadget/function/f_eem.c @@ -325,7 +325,6 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f) return 0; fail: - usb_free_all_descriptors(f); if (eem->port.out_ep) eem->port.out_ep->driver_data = NULL; if (eem->port.in_ep) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 7c6771d027a..63314ede7ba 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -647,15 +647,26 @@ static void ffs_user_copy_worker(struct work_struct *work) if (io_data->read && ret > 0) { int i; size_t pos = 0; + + /* + * Since req->length may be bigger than io_data->len (after + * being rounded up to maxpacketsize), we may end up with more + * data then user space has space for. + */ + ret = min_t(int, ret, io_data->len); + use_mm(io_data->mm); for (i = 0; i < io_data->nr_segs; i++) { + size_t len = min_t(size_t, ret - pos, + io_data->iovec[i].iov_len); + if (!len) + break; if (unlikely(copy_to_user(io_data->iovec[i].iov_base, - &io_data->buf[pos], - io_data->iovec[i].iov_len))) { + &io_data->buf[pos], len))) { ret = -EFAULT; break; } - pos += io_data->iovec[i].iov_len; + pos += len; } unuse_mm(io_data->mm); } @@ -687,7 +698,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) struct ffs_epfile *epfile = file->private_data; struct ffs_ep *ep; char *data = NULL; - ssize_t ret, data_len; + ssize_t ret, data_len = -EINVAL; int halt; /* Are we still active? */ @@ -787,13 +798,30 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) /* Fire the request */ struct usb_request *req; + /* + * Sanity Check: even though data_len can't be used + * uninitialized at the time I write this comment, some + * compilers complain about this situation. + * In order to keep the code clean from warnings, data_len is + * being initialized to -EINVAL during its declaration, which + * means we can't rely on compiler anymore to warn no future + * changes won't result in data_len being used uninitialized. + * For such reason, we're adding this redundant sanity check + * here. + */ + if (unlikely(data_len == -EINVAL)) { + WARN(1, "%s: data_len == -EINVAL\n", __func__); + ret = -EINVAL; + goto error_lock; + } + if (io_data->aio) { req = usb_ep_alloc_request(ep->ep, GFP_KERNEL); if (unlikely(!req)) goto error_lock; req->buf = data; - req->length = io_data->len; + req->length = data_len; io_data->buf = data; io_data->ep = ep->ep; @@ -815,7 +843,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) req = ep->req; req->buf = data; - req->length = io_data->len; + req->length = data_len; req->context = &done; req->complete = ffs_epfile_io_complete; @@ -2663,8 +2691,6 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f, func->conf = c; func->gadget = c->cdev->gadget; - ffs_data_get(func->ffs); - /* * in drivers/usb/gadget/configfs.c:configfs_composite_bind() * configurations are bound in sequence with list_for_each_entry, diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index a95290a1289..59ab62c92b6 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -621,12 +621,14 @@ static int __init hidg_bind(struct usb_configuration *c, struct usb_function *f) dev = MKDEV(major, hidg->minor); status = cdev_add(&hidg->cdev, dev, 1); if (status) - goto fail; + goto fail_free_descs; device_create(hidg_class, NULL, dev, NULL, "%s%d", "hidg", hidg->minor); return 0; +fail_free_descs: + usb_free_all_descriptors(f); fail: ERROR(f->config->cdev, "hidg_bind FAILED\n"); if (hidg->req != NULL) { @@ -635,7 +637,6 @@ fail: usb_ep_free_request(hidg->in_ep, hidg->req); } - usb_free_all_descriptors(f); return status; } diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c index bf04389137e..298b46112b1 100644 --- a/drivers/usb/gadget/function/f_loopback.c +++ b/drivers/usb/gadget/function/f_loopback.c @@ -253,22 +253,13 @@ static void loopback_complete(struct usb_ep *ep, struct usb_request *req) case 0: /* normal completion? */ if (ep == loop->out_ep) { - /* loop this OUT packet back IN to the host */ req->zero = (req->actual < req->length); req->length = req->actual; - status = usb_ep_queue(loop->in_ep, req, GFP_ATOMIC); - if (status == 0) - return; - - /* "should never get here" */ - ERROR(cdev, "can't loop %s to %s: %d\n", - ep->name, loop->in_ep->name, - status); } /* queue the buffer for some later OUT packet */ req->length = buflen; - status = usb_ep_queue(loop->out_ep, req, GFP_ATOMIC); + status = usb_ep_queue(ep, req, GFP_ATOMIC); if (status == 0) return; @@ -308,60 +299,66 @@ static inline struct usb_request *lb_alloc_ep_req(struct usb_ep *ep, int len) return alloc_ep_req(ep, len, buflen); } -static int -enable_loopback(struct usb_composite_dev *cdev, struct f_loopback *loop) +static int enable_endpoint(struct usb_composite_dev *cdev, struct f_loopback *loop, + struct usb_ep *ep) { - int result = 0; - struct usb_ep *ep; struct usb_request *req; unsigned i; + int result; - /* one endpoint writes data back IN to the host */ - ep = loop->in_ep; + /* + * one endpoint writes data back IN to the host while another endpoint + * just reads OUT packets + */ result = config_ep_by_speed(cdev->gadget, &(loop->function), ep); if (result) - return result; + goto fail0; result = usb_ep_enable(ep); if (result < 0) - return result; - ep->driver_data = loop; - - /* one endpoint just reads OUT packets */ - ep = loop->out_ep; - result = config_ep_by_speed(cdev->gadget, &(loop->function), ep); - if (result) goto fail0; - - result = usb_ep_enable(ep); - if (result < 0) { -fail0: - ep = loop->in_ep; - usb_ep_disable(ep); - ep->driver_data = NULL; - return result; - } ep->driver_data = loop; - /* allocate a bunch of read buffers and queue them all at once. + /* + * allocate a bunch of read buffers and queue them all at once. * we buffer at most 'qlen' transfers; fewer if any need more * than 'buflen' bytes each. */ for (i = 0; i < qlen && result == 0; i++) { req = lb_alloc_ep_req(ep, 0); - if (req) { - req->complete = loopback_complete; - result = usb_ep_queue(ep, req, GFP_ATOMIC); - if (result) - ERROR(cdev, "%s queue req --> %d\n", - ep->name, result); - } else { - usb_ep_disable(ep); - ep->driver_data = NULL; - result = -ENOMEM; - goto fail0; + if (!req) + goto fail1; + + req->complete = loopback_complete; + result = usb_ep_queue(ep, req, GFP_ATOMIC); + if (result) { + ERROR(cdev, "%s queue req --> %d\n", + ep->name, result); + goto fail1; } } + return 0; + +fail1: + usb_ep_disable(ep); + +fail0: + return result; +} + +static int +enable_loopback(struct usb_composite_dev *cdev, struct f_loopback *loop) +{ + int result = 0; + + result = enable_endpoint(cdev, loop, loop->in_ep); + if (result) + return result; + + result = enable_endpoint(cdev, loop, loop->out_ep); + if (result) + return result; + DBG(cdev, "%s enabled\n", loop->function.name); return result; } diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 146f48cc65d..16361b0a8b4 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -1461,7 +1461,6 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) return 0; fail: - usb_free_all_descriptors(f); if (ncm->notify_req) { kfree(ncm->notify_req->buf); usb_ep_free_request(ncm->notify, ncm->notify_req); diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c index 5f40080c92c..a1b79c53499 100644 --- a/drivers/usb/gadget/function/f_obex.c +++ b/drivers/usb/gadget/function/f_obex.c @@ -35,6 +35,7 @@ struct f_obex { struct gserial port; u8 ctrl_id; u8 data_id; + u8 cur_alt; u8 port_num; u8 can_activate; }; @@ -235,6 +236,8 @@ static int obex_set_alt(struct usb_function *f, unsigned intf, unsigned alt) } else goto fail; + obex->cur_alt = alt; + return 0; fail: @@ -245,10 +248,7 @@ static int obex_get_alt(struct usb_function *f, unsigned intf) { struct f_obex *obex = func_to_obex(f); - if (intf == obex->ctrl_id) - return 0; - - return obex->port.in->driver_data ? 1 : 0; + return obex->cur_alt; } static void obex_disable(struct usb_function *f) @@ -397,7 +397,6 @@ static int obex_bind(struct usb_configuration *c, struct usb_function *f) return 0; fail: - usb_free_all_descriptors(f); /* we might as well release our claims on endpoints */ if (obex->port.out) obex->port.out->driver_data = NULL; diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c index b9cfc1571d7..1ec8b7ffdcc 100644 --- a/drivers/usb/gadget/function/f_phonet.c +++ b/drivers/usb/gadget/function/f_phonet.c @@ -570,8 +570,8 @@ static int pn_bind(struct usb_configuration *c, struct usb_function *f) err_req: for (i = 0; i < phonet_rxq_size && fp->out_reqv[i]; i++) usb_ep_free_request(fp->out_ep, fp->out_reqv[i]); -err: usb_free_all_descriptors(f); +err: if (fp->out_ep) fp->out_ep->driver_data = NULL; if (fp->in_ep) diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index ddb09dc6d1f..f13fc6a5856 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c @@ -802,8 +802,10 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) if (rndis->manufacturer && rndis->vendorID && rndis_set_param_vendor(rndis->config, rndis->vendorID, - rndis->manufacturer)) - goto fail; + rndis->manufacturer)) { + status = -EINVAL; + goto fail_free_descs; + } /* NOTE: all that is done without knowing or caring about * the network link ... which is unavailable to this code @@ -817,10 +819,11 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) rndis->notify->name); return 0; +fail_free_descs: + usb_free_all_descriptors(f); fail: kfree(f->os_desc_table); f->os_desc_n = 0; - usb_free_all_descriptors(f); if (rndis->notify_req) { kfree(rndis->notify_req->buf); diff --git a/drivers/usb/gadget/function/f_subset.c b/drivers/usb/gadget/function/f_subset.c index 1ea8baf3333..e3dfa675ff0 100644 --- a/drivers/usb/gadget/function/f_subset.c +++ b/drivers/usb/gadget/function/f_subset.c @@ -380,7 +380,6 @@ geth_bind(struct usb_configuration *c, struct usb_function *f) return 0; fail: - usb_free_all_descriptors(f); /* we might as well release our claims on endpoints */ if (geth->port.out_ep) geth->port.out_ep->driver_data = NULL; diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index a5a27a504d6..33e16658e5c 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -512,6 +512,11 @@ static int snd_uac2_remove(struct platform_device *pdev) return 0; } +static void snd_uac2_release(struct device *dev) +{ + dev_dbg(dev, "releasing '%s'\n", dev_name(dev)); +} + static int alsa_uac2_init(struct audio_dev *agdev) { struct snd_uac2_chip *uac2 = &agdev->uac2; @@ -523,6 +528,7 @@ static int alsa_uac2_init(struct audio_dev *agdev) uac2->pdev.id = 0; uac2->pdev.name = uac2_name; + uac2->pdev.dev.release = snd_uac2_release; /* Register snd_uac2 driver */ err = platform_driver_register(&uac2->pdrv); @@ -772,6 +778,7 @@ struct usb_endpoint_descriptor fs_epout_desc = { .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, + .wMaxPacketSize = cpu_to_le16(1023), .bInterval = 1, }; @@ -780,6 +787,7 @@ struct usb_endpoint_descriptor hs_epout_desc = { .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, + .wMaxPacketSize = cpu_to_le16(1024), .bInterval = 4, }; @@ -847,6 +855,7 @@ struct usb_endpoint_descriptor fs_epin_desc = { .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, + .wMaxPacketSize = cpu_to_le16(1023), .bInterval = 1, }; @@ -855,6 +864,7 @@ struct usb_endpoint_descriptor hs_epin_desc = { .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, + .wMaxPacketSize = cpu_to_le16(1024), .bInterval = 4, }; @@ -947,6 +957,9 @@ free_ep(struct uac2_rtd_params *prm, struct usb_ep *ep) struct snd_uac2_chip *uac2 = prm->uac2; int i; + if (!prm->ep_enabled) + return; + prm->ep_enabled = false; for (i = 0; i < USB_XFERS; i++) { @@ -1071,7 +1084,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL); if (!prm->rbuf) { prm->max_psize = 0; - goto err; + goto err_free_descs; } prm = &agdev->uac2.p_prm; @@ -1079,17 +1092,19 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL); if (!prm->rbuf) { prm->max_psize = 0; - goto err; + goto err_free_descs; } ret = alsa_uac2_init(agdev); if (ret) - goto err; + goto err_free_descs; return 0; + +err_free_descs: + usb_free_all_descriptors(fn); err: kfree(agdev->uac2.p_prm.rbuf); kfree(agdev->uac2.c_prm.rbuf); - usb_free_all_descriptors(fn); if (agdev->in_ep) agdev->in_ep->driver_data = NULL; if (agdev->out_ep) diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index e126439e4b6..945b3bd2ca9 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c @@ -279,27 +279,41 @@ uvc_function_get_alt(struct usb_function *f, unsigned interface) else if (interface != uvc->streaming_intf) return -EINVAL; else - return uvc->state == UVC_STATE_STREAMING ? 1 : 0; + return uvc->video.ep->driver_data ? 1 : 0; } static int uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt) { struct uvc_device *uvc = to_uvc(f); + struct usb_composite_dev *cdev = f->config->cdev; struct v4l2_event v4l2_event; struct uvc_event *uvc_event = (void *)&v4l2_event.u.data; int ret; - INFO(f->config->cdev, "uvc_function_set_alt(%u, %u)\n", interface, alt); + INFO(cdev, "uvc_function_set_alt(%u, %u)\n", interface, alt); if (interface == uvc->control_intf) { if (alt) return -EINVAL; + if (uvc->control_ep->driver_data) { + INFO(cdev, "reset UVC Control\n"); + usb_ep_disable(uvc->control_ep); + uvc->control_ep->driver_data = NULL; + } + + if (!uvc->control_ep->desc) + if (config_ep_by_speed(cdev->gadget, f, uvc->control_ep)) + return -EINVAL; + + usb_ep_enable(uvc->control_ep); + uvc->control_ep->driver_data = uvc; + if (uvc->state == UVC_STATE_DISCONNECTED) { memset(&v4l2_event, 0, sizeof(v4l2_event)); v4l2_event.type = UVC_EVENT_CONNECT; - uvc_event->speed = f->config->cdev->gadget->speed; + uvc_event->speed = cdev->gadget->speed; v4l2_event_queue(uvc->vdev, &v4l2_event); uvc->state = UVC_STATE_CONNECTED; @@ -321,8 +335,10 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt) if (uvc->state != UVC_STATE_STREAMING) return 0; - if (uvc->video.ep) + if (uvc->video.ep) { usb_ep_disable(uvc->video.ep); + uvc->video.ep->driver_data = NULL; + } memset(&v4l2_event, 0, sizeof(v4l2_event)); v4l2_event.type = UVC_EVENT_STREAMOFF; @@ -335,14 +351,22 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt) if (uvc->state != UVC_STATE_CONNECTED) return 0; - if (uvc->video.ep) { - ret = config_ep_by_speed(f->config->cdev->gadget, - &(uvc->func), uvc->video.ep); - if (ret) - return ret; - usb_ep_enable(uvc->video.ep); + if (!uvc->video.ep) + return -EINVAL; + + if (uvc->video.ep->driver_data) { + INFO(cdev, "reset UVC\n"); + usb_ep_disable(uvc->video.ep); + uvc->video.ep->driver_data = NULL; } + ret = config_ep_by_speed(f->config->cdev->gadget, + &(uvc->func), uvc->video.ep); + if (ret) + return ret; + usb_ep_enable(uvc->video.ep); + uvc->video.ep->driver_data = uvc; + memset(&v4l2_event, 0, sizeof(v4l2_event)); v4l2_event.type = UVC_EVENT_STREAMON; v4l2_event_queue(uvc->vdev, &v4l2_event); @@ -366,6 +390,16 @@ uvc_function_disable(struct usb_function *f) v4l2_event_queue(uvc->vdev, &v4l2_event); uvc->state = UVC_STATE_DISCONNECTED; + + if (uvc->video.ep->driver_data) { + usb_ep_disable(uvc->video.ep); + uvc->video.ep->driver_data = NULL; + } + + if (uvc->control_ep->driver_data) { + usb_ep_disable(uvc->control_ep); + uvc->control_ep->driver_data = NULL; + } } /* -------------------------------------------------------------------------- diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c index c3e1f27dbbe..9cb86bc1a9a 100644 --- a/drivers/usb/gadget/function/uvc_video.c +++ b/drivers/usb/gadget/function/uvc_video.c @@ -352,7 +352,8 @@ int uvcg_video_enable(struct uvc_video *video, int enable) if (!enable) { for (i = 0; i < UVC_NUM_REQUESTS; ++i) - usb_ep_dequeue(video->ep, video->req[i]); + if (video->req[i]) + usb_ep_dequeue(video->ep, video->req[i]); uvc_video_free_requests(video); uvcg_queue_enable(&video->queue, 0); diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index 3ea287b0e44..217365d35a2 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -357,6 +357,7 @@ config USB_EG20T config USB_GADGET_XILINX tristate "Xilinx USB Driver" + depends on HAS_DMA depends on OF || COMPILE_TEST help USB peripheral controller driver for Xilinx USB2 device. diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index f107bb60a5a..f2054659f25 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c @@ -507,6 +507,11 @@ static ssize_t usb_udc_softconn_store(struct device *dev, { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); + if (!udc->driver) { + dev_err(dev, "soft-connect without a gadget driver\n"); + return -EOPNOTSUPP; + } + if (sysfs_streq(buf, "connect")) { usb_gadget_udc_start(udc->gadget, udc->driver); usb_gadget_connect(udc->gadget); diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index a8a30b1d416..a3ca1375dd5 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -234,7 +234,7 @@ config USB_EHCI_SH config USB_EHCI_EXYNOS tristate "EHCI support for Samsung S5P/EXYNOS SoC Series" - depends on PLAT_S5P || ARCH_EXYNOS + depends on ARCH_S5PV210 || ARCH_EXYNOS help Enable support for the Samsung Exynos SOC's on-chip EHCI controller. @@ -550,7 +550,7 @@ config USB_OHCI_SH config USB_OHCI_EXYNOS tristate "OHCI support for Samsung S5P/EXYNOS SoC Series" - depends on PLAT_S5P || ARCH_EXYNOS + depends on ARCH_S5PV210 || ARCH_EXYNOS help Enable support for the Samsung Exynos SOC's on-chip OHCI controller. diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c index d0d8fadf706..1db0626c8bf 100644 --- a/drivers/usb/host/hwa-hc.c +++ b/drivers/usb/host/hwa-hc.c @@ -607,7 +607,7 @@ found: wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr; if (le16_to_cpu(wa_descr->bcdWAVersion) > 0x0100) dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n", - le16_to_cpu(wa_descr->bcdWAVersion) & 0xff00 >> 8, + (le16_to_cpu(wa_descr->bcdWAVersion) & 0xff00) >> 8, le16_to_cpu(wa_descr->bcdWAVersion) & 0x00ff); result = 0; error: diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 696160d48ae..388cfd83b6b 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -22,7 +22,6 @@ #include <linux/slab.h> -#include <linux/device.h> #include <asm/unaligned.h> #include "xhci.h" @@ -1149,9 +1148,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd) * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME * is enabled, so also enable remote wake here. */ - if (hcd->self.root_hub->do_remote_wakeup - && device_may_wakeup(hcd->self.controller)) { - + if (hcd->self.root_hub->do_remote_wakeup) { if (t1 & PORT_CONNECT) { t2 |= PORT_WKOC_E | PORT_WKDISC_E; t2 &= ~PORT_WKCONN_E; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 280dde93abe..142b601f956 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -128,20 +128,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_AVOID_BEI; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && - (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) { - /* Workaround for occasional spurious wakeups from S5 (or - * any other sleep) on Haswell machines with LPT and LPT-LP - * with the new Intel BIOS - */ - /* Limit the quirk to only known vendors, as this triggers - * yet another BIOS bug on some other machines - * https://bugzilla.kernel.org/show_bug.cgi?id=66171 - */ - if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) - xhci->quirks |= XHCI_SPURIOUS_WAKEUP; - } - if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { xhci->quirks |= XHCI_SPURIOUS_REBOOT; } @@ -162,6 +148,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == 0x3432) xhci->quirks |= XHCI_BROKEN_STREAMS; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + pdev->device == 0x1042) + xhci->quirks |= XHCI_BROKEN_STREAMS; + if (xhci->quirks & XHCI_RESET_ON_RESUME) xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "QUIRK: Resetting on resume"); @@ -291,7 +281,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) if (xhci->quirks & XHCI_COMP_MODE_QUIRK) pdev->no_d3cold = true; - return xhci_suspend(xhci); + return xhci_suspend(xhci, do_wakeup); } static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 3d78b0cd674..646300cbe5f 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -204,7 +204,15 @@ static int xhci_plat_suspend(struct device *dev) struct usb_hcd *hcd = dev_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); - return xhci_suspend(xhci); + /* + * xhci_suspend() needs `do_wakeup` to know whether host is allowed + * to do wakeup during suspend. Since xhci_plat_suspend is currently + * only designed for system suspend, device_may_wakeup() is enough + * to dertermine whether host is allowed to do wakeup. Need to + * reconsider this when xhci_plat_suspend enlarges its scope, e.g., + * also applies to runtime suspend. + */ + return xhci_suspend(xhci, device_may_wakeup(dev)); } static int xhci_plat_resume(struct device *dev) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index bc6fcbc16f6..06433aec81d 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1067,9 +1067,8 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, false); xhci_ring_cmd_db(xhci); } else { - /* Clear our internal halted state and restart the ring(s) */ + /* Clear our internal halted state */ xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; - ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } } @@ -1823,22 +1822,13 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, ep->stopped_td = td; return 0; } else { - if (trb_comp_code == COMP_STALL) { - /* The transfer is completed from the driver's - * perspective, but we need to issue a set dequeue - * command for this stalled endpoint to move the dequeue - * pointer past the TD. We can't do that here because - * the halt condition must be cleared first. Let the - * USB class driver clear the stall later. - */ - ep->stopped_td = td; - ep->stopped_stream = ep_ring->stream_id; - } else if (xhci_requires_manual_halt_cleanup(xhci, - ep_ctx, trb_comp_code)) { - /* Other types of errors halt the endpoint, but the - * class driver doesn't call usb_reset_endpoint() unless - * the error is -EPIPE. Clear the halted status in the - * xHCI hardware manually. + if (trb_comp_code == COMP_STALL || + xhci_requires_manual_halt_cleanup(xhci, ep_ctx, + trb_comp_code)) { + /* Issue a reset endpoint command to clear the host side + * halt, followed by a set dequeue command to move the + * dequeue pointer past the TD. + * The class driver clears the device side halt later. */ xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, ep_ring->stream_id, @@ -1958,9 +1948,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, else td->urb->actual_length = 0; - xhci_cleanup_halted_endpoint(xhci, - slot_id, ep_index, 0, td, event_trb); - return finish_td(xhci, td, event_trb, event, ep, status, true); + return finish_td(xhci, td, event_trb, event, ep, status, false); } /* * Did we transfer any data, despite the errors that might have @@ -2519,17 +2507,8 @@ cleanup: if (ret) { urb = td->urb; urb_priv = urb->hcpriv; - /* Leave the TD around for the reset endpoint function - * to use(but only if it's not a control endpoint, - * since we already queued the Set TR dequeue pointer - * command for stalled control endpoints). - */ - if (usb_endpoint_xfer_control(&urb->ep->desc) || - (trb_comp_code != COMP_STALL && - trb_comp_code != COMP_BABBLE)) - xhci_urb_free_priv(xhci, urb_priv); - else - kfree(urb_priv); + + xhci_urb_free_priv(xhci, urb_priv); usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); if ((urb->actual_length != urb->transfer_buffer_length && diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 2a5d45b4cb1..033b46c470b 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -35,6 +35,8 @@ #define DRIVER_AUTHOR "Sarah Sharp" #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" +#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) + /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ static int link_quirk; module_param(link_quirk, int, S_IRUGO | S_IWUSR); @@ -851,13 +853,47 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) xhci_set_cmd_ring_deq(xhci); } +static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) +{ + int port_index; + __le32 __iomem **port_array; + unsigned long flags; + u32 t1, t2; + + spin_lock_irqsave(&xhci->lock, flags); + + /* disble usb3 ports Wake bits*/ + port_index = xhci->num_usb3_ports; + port_array = xhci->usb3_ports; + while (port_index--) { + t1 = readl(port_array[port_index]); + t1 = xhci_port_state_to_neutral(t1); + t2 = t1 & ~PORT_WAKE_BITS; + if (t1 != t2) + writel(t2, port_array[port_index]); + } + + /* disble usb2 ports Wake bits*/ + port_index = xhci->num_usb2_ports; + port_array = xhci->usb2_ports; + while (port_index--) { + t1 = readl(port_array[port_index]); + t1 = xhci_port_state_to_neutral(t1); + t2 = t1 & ~PORT_WAKE_BITS; + if (t1 != t2) + writel(t2, port_array[port_index]); + } + + spin_unlock_irqrestore(&xhci->lock, flags); +} + /* * Stop HC (not bus-specific) * * This is called when the machine transition into S3/S4 mode. * */ -int xhci_suspend(struct xhci_hcd *xhci) +int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) { int rc = 0; unsigned int delay = XHCI_MAX_HALT_USEC; @@ -868,6 +904,10 @@ int xhci_suspend(struct xhci_hcd *xhci) xhci->shared_hcd->state != HC_STATE_SUSPENDED) return -EINVAL; + /* Clear root port wake on bits if wakeup not allowed. */ + if (!do_wakeup) + xhci_disable_port_wake_on_bits(xhci); + /* Don't poll the roothubs on bus suspend. */ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); @@ -2912,68 +2952,33 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, } } -/* Deal with stalled endpoints. The core should have sent the control message - * to clear the halt condition. However, we need to make the xHCI hardware - * reset its sequence number, since a device will expect a sequence number of - * zero after the halt condition is cleared. +/* Called when clearing halted device. The core should have sent the control + * message to clear the device halt condition. The host side of the halt should + * already be cleared with a reset endpoint command issued when the STALL tx + * event was received. + * * Context: in_interrupt */ + void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { struct xhci_hcd *xhci; - struct usb_device *udev; - unsigned int ep_index; - unsigned long flags; - int ret; - struct xhci_virt_ep *virt_ep; - struct xhci_command *command; xhci = hcd_to_xhci(hcd); - udev = (struct usb_device *) ep->hcpriv; - /* Called with a root hub endpoint (or an endpoint that wasn't added - * with xhci_add_endpoint() - */ - if (!ep->hcpriv) - return; - ep_index = xhci_get_endpoint_index(&ep->desc); - virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; - if (!virt_ep->stopped_td) { - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, - "Endpoint 0x%x not halted, refusing to reset.", - ep->desc.bEndpointAddress); - return; - } - if (usb_endpoint_xfer_control(&ep->desc)) { - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, - "Control endpoint stall already handled."); - return; - } - command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); - if (!command) - return; - - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, - "Queueing reset endpoint command"); - spin_lock_irqsave(&xhci->lock, flags); - ret = xhci_queue_reset_ep(xhci, command, udev->slot_id, ep_index); /* - * Can't change the ring dequeue pointer until it's transitioned to the - * stopped state, which is only upon a successful reset endpoint - * command. Better hope that last command worked! + * We might need to implement the config ep cmd in xhci 4.8.1 note: + * The Reset Endpoint Command may only be issued to endpoints in the + * Halted state. If software wishes reset the Data Toggle or Sequence + * Number of an endpoint that isn't in the Halted state, then software + * may issue a Configure Endpoint Command with the Drop and Add bits set + * for the target endpoint. that is in the Stopped state. */ - if (!ret) { - xhci_cleanup_stalled_ring(xhci, udev, ep_index); - kfree(virt_ep->stopped_td); - xhci_ring_cmd_db(xhci); - } - virt_ep->stopped_td = NULL; - virt_ep->stopped_stream = 0; - spin_unlock_irqrestore(&xhci->lock, flags); - if (ret) - xhci_warn(xhci, "FIXME allocate a new ring segment\n"); + /* For now just print debug to follow the situation */ + xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n", + ep->desc.bEndpointAddress); } static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index df76d642e71..d745715a1e2 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1746,7 +1746,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); void xhci_init_driver(struct hc_driver *drv, int (*setup_fn)(struct usb_hcd *)); #ifdef CONFIG_PM -int xhci_suspend(struct xhci_hcd *xhci); +int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup); int xhci_resume(struct xhci_hcd *xhci, bool hibernated); #else #define xhci_suspend NULL diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c index acdfb3e68a9..5a9b977fbc1 100644 --- a/drivers/usb/musb/musb_cppi41.c +++ b/drivers/usb/musb/musb_cppi41.c @@ -209,7 +209,8 @@ static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer) } } - if (!list_empty(&controller->early_tx_list)) { + if (!list_empty(&controller->early_tx_list) && + !hrtimer_is_queued(&controller->early_tx)) { ret = HRTIMER_RESTART; hrtimer_forward_now(&controller->early_tx, ktime_set(0, 20 * NSEC_PER_USEC)); diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 154bcf1b5df..48bc09e7b83 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -868,9 +868,15 @@ static int dsps_suspend(struct device *dev) struct dsps_glue *glue = dev_get_drvdata(dev); const struct dsps_musb_wrapper *wrp = glue->wrp; struct musb *musb = platform_get_drvdata(glue->musb); - void __iomem *mbase = musb->ctrl_base; + void __iomem *mbase; del_timer_sync(&glue->timer); + + if (!musb) + /* This can happen if the musb device is in -EPROBE_DEFER */ + return 0; + + mbase = musb->ctrl_base; glue->context.control = dsps_readl(mbase, wrp->control); glue->context.epintr = dsps_readl(mbase, wrp->epintr_set); glue->context.coreintr = dsps_readl(mbase, wrp->coreintr_set); @@ -887,8 +893,12 @@ static int dsps_resume(struct device *dev) struct dsps_glue *glue = dev_get_drvdata(dev); const struct dsps_musb_wrapper *wrp = glue->wrp; struct musb *musb = platform_get_drvdata(glue->musb); - void __iomem *mbase = musb->ctrl_base; + void __iomem *mbase; + + if (!musb) + return 0; + mbase = musb->ctrl_base; dsps_writel(mbase, wrp->control, glue->context.control); dsps_writel(mbase, wrp->epintr_set, glue->context.epintr); dsps_writel(mbase, wrp->coreintr_set, glue->context.coreintr); @@ -896,7 +906,9 @@ static int dsps_resume(struct device *dev) dsps_writel(mbase, wrp->mode, glue->context.mode); dsps_writel(mbase, wrp->tx_mode, glue->context.tx_mode); dsps_writel(mbase, wrp->rx_mode, glue->context.rx_mode); - setup_timer(&glue->timer, otg_timer, (unsigned long) musb); + if (musb->xceiv->state == OTG_STATE_B_IDLE && + musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE) + mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ); return 0; } diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index eca1747ca8c..6c4eb3cf5ef 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ + { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ @@ -155,6 +156,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index dc72b924c39..1ebb351b9e9 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -140,6 +140,7 @@ static struct ftdi_sio_quirk ftdi_8u2232c_quirk = { * /sys/bus/usb-serial/drivers/ftdi_sio/new_id and send a patch or report. */ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE(FTDI_VID, FTDI_BRICK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, @@ -469,6 +470,39 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_4701_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9300_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9301_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9302_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9303_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9304_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9305_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9306_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9307_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9308_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9309_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930A_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930B_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930C_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930D_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930E_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930F_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9310_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9311_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9312_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9313_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9314_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9315_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9316_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9317_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9318_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9319_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931A_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931B_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931C_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931D_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931E_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931F_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, @@ -661,6 +695,8 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 5937b2d242f..e52409c9be9 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -30,6 +30,12 @@ /*** third-party PIDs (using FTDI_VID) ***/ +/* + * Certain versions of the official Windows FTDI driver reprogrammed + * counterfeit FTDI devices to PID 0. Support these devices anyway. + */ +#define FTDI_BRICK_PID 0x0000 + #define FTDI_LUMEL_PD12_PID 0x6002 /* @@ -143,8 +149,12 @@ * Xsens Technologies BV products (http://www.xsens.com). */ #define XSENS_VID 0x2639 -#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ +#define XSENS_AWINDA_STATION_PID 0x0101 +#define XSENS_AWINDA_DONGLE_PID 0x0102 #define XSENS_MTW_PID 0x0200 /* Xsens MTw */ +#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ + +/* Xsens devices using FTDI VID */ #define XSENS_CONVERTER_0_PID 0xD388 /* Xsens USB converter */ #define XSENS_CONVERTER_1_PID 0xD389 /* Xsens Wireless Receiver */ #define XSENS_CONVERTER_2_PID 0xD38A @@ -916,8 +926,8 @@ #define BAYER_CONTOUR_CABLE_PID 0x6001 /* - * The following are the values for the Matrix Orbital FTDI Range - * Anything in this range will use an FT232RL. + * Matrix Orbital Intelligent USB displays. + * http://www.matrixorbital.com */ #define MTXORB_VID 0x1B3D #define MTXORB_FTDI_RANGE_0100_PID 0x0100 @@ -1176,8 +1186,39 @@ #define MTXORB_FTDI_RANGE_01FD_PID 0x01FD #define MTXORB_FTDI_RANGE_01FE_PID 0x01FE #define MTXORB_FTDI_RANGE_01FF_PID 0x01FF - - +#define MTXORB_FTDI_RANGE_4701_PID 0x4701 +#define MTXORB_FTDI_RANGE_9300_PID 0x9300 +#define MTXORB_FTDI_RANGE_9301_PID 0x9301 +#define MTXORB_FTDI_RANGE_9302_PID 0x9302 +#define MTXORB_FTDI_RANGE_9303_PID 0x9303 +#define MTXORB_FTDI_RANGE_9304_PID 0x9304 +#define MTXORB_FTDI_RANGE_9305_PID 0x9305 +#define MTXORB_FTDI_RANGE_9306_PID 0x9306 +#define MTXORB_FTDI_RANGE_9307_PID 0x9307 +#define MTXORB_FTDI_RANGE_9308_PID 0x9308 +#define MTXORB_FTDI_RANGE_9309_PID 0x9309 +#define MTXORB_FTDI_RANGE_930A_PID 0x930A +#define MTXORB_FTDI_RANGE_930B_PID 0x930B +#define MTXORB_FTDI_RANGE_930C_PID 0x930C +#define MTXORB_FTDI_RANGE_930D_PID 0x930D +#define MTXORB_FTDI_RANGE_930E_PID 0x930E +#define MTXORB_FTDI_RANGE_930F_PID 0x930F +#define MTXORB_FTDI_RANGE_9310_PID 0x9310 +#define MTXORB_FTDI_RANGE_9311_PID 0x9311 +#define MTXORB_FTDI_RANGE_9312_PID 0x9312 +#define MTXORB_FTDI_RANGE_9313_PID 0x9313 +#define MTXORB_FTDI_RANGE_9314_PID 0x9314 +#define MTXORB_FTDI_RANGE_9315_PID 0x9315 +#define MTXORB_FTDI_RANGE_9316_PID 0x9316 +#define MTXORB_FTDI_RANGE_9317_PID 0x9317 +#define MTXORB_FTDI_RANGE_9318_PID 0x9318 +#define MTXORB_FTDI_RANGE_9319_PID 0x9319 +#define MTXORB_FTDI_RANGE_931A_PID 0x931A +#define MTXORB_FTDI_RANGE_931B_PID 0x931B +#define MTXORB_FTDI_RANGE_931C_PID 0x931C +#define MTXORB_FTDI_RANGE_931D_PID 0x931D +#define MTXORB_FTDI_RANGE_931E_PID 0x931E +#define MTXORB_FTDI_RANGE_931F_PID 0x931F /* * The Mobility Lab (TML) diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index 93cb7cebda6..077c714f128 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -311,24 +311,30 @@ static void usa26_indat_callback(struct urb *urb) if ((data[0] & 0x80) == 0) { /* no errors on individual bytes, only possible overrun err */ - if (data[0] & RXERROR_OVERRUN) - err = TTY_OVERRUN; - else - err = 0; + if (data[0] & RXERROR_OVERRUN) { + tty_insert_flip_char(&port->port, 0, + TTY_OVERRUN); + } for (i = 1; i < urb->actual_length ; ++i) - tty_insert_flip_char(&port->port, data[i], err); + tty_insert_flip_char(&port->port, data[i], + TTY_NORMAL); } else { /* some bytes had errors, every byte has status */ dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__); for (i = 0; i + 1 < urb->actual_length; i += 2) { - int stat = data[i], flag = 0; - if (stat & RXERROR_OVERRUN) - flag |= TTY_OVERRUN; - if (stat & RXERROR_FRAMING) - flag |= TTY_FRAME; - if (stat & RXERROR_PARITY) - flag |= TTY_PARITY; + int stat = data[i]; + int flag = TTY_NORMAL; + + if (stat & RXERROR_OVERRUN) { + tty_insert_flip_char(&port->port, 0, + TTY_OVERRUN); + } /* XXX should handle break (0x10) */ + if (stat & RXERROR_PARITY) + flag = TTY_PARITY; + else if (stat & RXERROR_FRAMING) + flag = TTY_FRAME; + tty_insert_flip_char(&port->port, data[i+1], flag); } @@ -649,14 +655,19 @@ static void usa49_indat_callback(struct urb *urb) } else { /* some bytes had errors, every byte has status */ for (i = 0; i + 1 < urb->actual_length; i += 2) { - int stat = data[i], flag = 0; - if (stat & RXERROR_OVERRUN) - flag |= TTY_OVERRUN; - if (stat & RXERROR_FRAMING) - flag |= TTY_FRAME; - if (stat & RXERROR_PARITY) - flag |= TTY_PARITY; + int stat = data[i]; + int flag = TTY_NORMAL; + + if (stat & RXERROR_OVERRUN) { + tty_insert_flip_char(&port->port, 0, + TTY_OVERRUN); + } /* XXX should handle break (0x10) */ + if (stat & RXERROR_PARITY) + flag = TTY_PARITY; + else if (stat & RXERROR_FRAMING) + flag = TTY_FRAME; + tty_insert_flip_char(&port->port, data[i+1], flag); } @@ -713,15 +724,19 @@ static void usa49wg_indat_callback(struct urb *urb) */ for (x = 0; x + 1 < len && i + 1 < urb->actual_length; x += 2) { - int stat = data[i], flag = 0; + int stat = data[i]; + int flag = TTY_NORMAL; - if (stat & RXERROR_OVERRUN) - flag |= TTY_OVERRUN; - if (stat & RXERROR_FRAMING) - flag |= TTY_FRAME; - if (stat & RXERROR_PARITY) - flag |= TTY_PARITY; + if (stat & RXERROR_OVERRUN) { + tty_insert_flip_char(&port->port, 0, + TTY_OVERRUN); + } /* XXX should handle break (0x10) */ + if (stat & RXERROR_PARITY) + flag = TTY_PARITY; + else if (stat & RXERROR_FRAMING) + flag = TTY_FRAME; + tty_insert_flip_char(&port->port, data[i+1], flag); i += 2; @@ -773,25 +788,31 @@ static void usa90_indat_callback(struct urb *urb) if ((data[0] & 0x80) == 0) { /* no errors on individual bytes, only possible overrun err*/ - if (data[0] & RXERROR_OVERRUN) - err = TTY_OVERRUN; - else - err = 0; + if (data[0] & RXERROR_OVERRUN) { + tty_insert_flip_char(&port->port, 0, + TTY_OVERRUN); + } for (i = 1; i < urb->actual_length ; ++i) tty_insert_flip_char(&port->port, - data[i], err); + data[i], TTY_NORMAL); } else { /* some bytes had errors, every byte has status */ dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__); for (i = 0; i + 1 < urb->actual_length; i += 2) { - int stat = data[i], flag = 0; - if (stat & RXERROR_OVERRUN) - flag |= TTY_OVERRUN; - if (stat & RXERROR_FRAMING) - flag |= TTY_FRAME; - if (stat & RXERROR_PARITY) - flag |= TTY_PARITY; + int stat = data[i]; + int flag = TTY_NORMAL; + + if (stat & RXERROR_OVERRUN) { + tty_insert_flip_char( + &port->port, 0, + TTY_OVERRUN); + } /* XXX should handle break (0x10) */ + if (stat & RXERROR_PARITY) + flag = TTY_PARITY; + else if (stat & RXERROR_FRAMING) + flag = TTY_FRAME; + tty_insert_flip_char(&port->port, data[i+1], flag); } diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c index 078f9ed419c..02c420af251 100644 --- a/drivers/usb/serial/kobil_sct.c +++ b/drivers/usb/serial/kobil_sct.c @@ -335,7 +335,8 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port, port->interrupt_out_urb->transfer_buffer_length = length; priv->cur_pos = priv->cur_pos + length; - result = usb_submit_urb(port->interrupt_out_urb, GFP_NOIO); + result = usb_submit_urb(port->interrupt_out_urb, + GFP_ATOMIC); dev_dbg(&port->dev, "%s - Send write URB returns: %i\n", __func__, result); todo = priv->filled - priv->cur_pos; @@ -350,7 +351,7 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port, if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID || priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) { result = usb_submit_urb(port->interrupt_in_urb, - GFP_NOIO); + GFP_ATOMIC); dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result); } } @@ -414,8 +415,6 @@ static int kobil_tiocmset(struct tty_struct *tty, int result; int dtr = 0; int rts = 0; - unsigned char *transfer_buffer; - int transfer_buffer_length = 8; /* FIXME: locking ? */ priv = usb_get_serial_port_data(port); @@ -425,11 +424,6 @@ static int kobil_tiocmset(struct tty_struct *tty, return -EINVAL; } - /* allocate memory for transfer buffer */ - transfer_buffer = kzalloc(transfer_buffer_length, GFP_KERNEL); - if (!transfer_buffer) - return -ENOMEM; - if (set & TIOCM_RTS) rts = 1; if (set & TIOCM_DTR) @@ -469,7 +463,6 @@ static int kobil_tiocmset(struct tty_struct *tty, KOBIL_TIMEOUT); } dev_dbg(dev, "%s - Send set_status_line URB returns: %i\n", __func__, result); - kfree(transfer_buffer); return (result < 0) ? result : 0; } @@ -530,8 +523,6 @@ static int kobil_ioctl(struct tty_struct *tty, { struct usb_serial_port *port = tty->driver_data; struct kobil_private *priv = usb_get_serial_port_data(port); - unsigned char *transfer_buffer; - int transfer_buffer_length = 8; int result; if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID || @@ -541,10 +532,6 @@ static int kobil_ioctl(struct tty_struct *tty, switch (cmd) { case TCFLSH: - transfer_buffer = kmalloc(transfer_buffer_length, GFP_KERNEL); - if (!transfer_buffer) - return -ENOBUFS; - result = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), SUSBCRequest_Misc, @@ -559,7 +546,6 @@ static int kobil_ioctl(struct tty_struct *tty, dev_dbg(&port->dev, "%s - Send reset_all_queues (FLUSH) URB returns: %i\n", __func__, result); - kfree(transfer_buffer); return (result < 0) ? -EIO: 0; default: return -ENOIOCTLCMD; diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c index 4856fb7e637..4b7bfb394a3 100644 --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@ -215,7 +215,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port, /* The connected devices do not have a bulk write endpoint, * to transmit data to de barcode device the control endpoint is used */ - dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO); + dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); if (!dr) { count = -ENOMEM; goto error_no_dr; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index d1a3f6044c8..7a4c21b4f67 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -269,6 +269,7 @@ static void option_instat_callback(struct urb *urb); #define TELIT_PRODUCT_DE910_DUAL 0x1010 #define TELIT_PRODUCT_UE910_V2 0x1012 #define TELIT_PRODUCT_LE920 0x1200 +#define TELIT_PRODUCT_LE910 0x1201 /* ZTE PRODUCTS */ #define ZTE_VENDOR_ID 0x19d2 @@ -362,6 +363,7 @@ static void option_instat_callback(struct urb *urb); /* Haier products */ #define HAIER_VENDOR_ID 0x201e +#define HAIER_PRODUCT_CE81B 0x10f8 #define HAIER_PRODUCT_CE100 0x2009 /* Cinterion (formerly Siemens) products */ @@ -589,6 +591,11 @@ static const struct option_blacklist_info zte_1255_blacklist = { .reserved = BIT(3) | BIT(4), }; +static const struct option_blacklist_info telit_le910_blacklist = { + .sendsetup = BIT(0), + .reserved = BIT(1) | BIT(2), +}; + static const struct option_blacklist_info telit_le920_blacklist = { .sendsetup = BIT(0), .reserved = BIT(1) | BIT(5), @@ -1138,6 +1145,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), + .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ @@ -1621,6 +1630,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, + { USB_DEVICE_AND_INTERFACE_INFO(HAIER_VENDOR_ID, HAIER_PRODUCT_CE81B, 0xff, 0xff, 0xff) }, /* Pirelli */ { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2, 0xff) }, diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c index a7fe664b6b7..70a098de429 100644 --- a/drivers/usb/serial/ssu100.c +++ b/drivers/usb/serial/ssu100.c @@ -490,10 +490,9 @@ static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr, if (*tty_flag == TTY_NORMAL) *tty_flag = TTY_FRAME; } - if (lsr & UART_LSR_OE){ + if (lsr & UART_LSR_OE) { port->icount.overrun++; - if (*tty_flag == TTY_NORMAL) - *tty_flag = TTY_OVERRUN; + tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); } } @@ -511,12 +510,8 @@ static void ssu100_process_read_urb(struct urb *urb) if ((len >= 4) && (packet[0] == 0x1b) && (packet[1] == 0x1b) && ((packet[2] == 0x00) || (packet[2] == 0x01))) { - if (packet[2] == 0x00) { + if (packet[2] == 0x00) ssu100_update_lsr(port, packet[3], &flag); - if (flag == TTY_OVERRUN) - tty_insert_flip_char(&port->port, 0, - TTY_OVERRUN); - } if (packet[2] == 0x01) ssu100_update_msr(port, packet[3]); diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c index 4bc2fc98636..73f125e0cb5 100644 --- a/drivers/usb/storage/initializers.c +++ b/drivers/usb/storage/initializers.c @@ -52,7 +52,7 @@ int usb_stor_euscsi_init(struct us_data *us) us->iobuf[0] = 0x1; result = usb_stor_control_msg(us, us->send_ctrl_pipe, 0x0C, USB_RECIP_INTERFACE | USB_TYPE_VENDOR, - 0x01, 0x0, us->iobuf, 0x1, USB_CTRL_SET_TIMEOUT); + 0x01, 0x0, us->iobuf, 0x1, 5 * HZ); usb_stor_dbg(us, "-- result is %d\n", result); return 0; @@ -100,7 +100,7 @@ int usb_stor_huawei_e220_init(struct us_data *us) result = usb_stor_control_msg(us, us->send_ctrl_pipe, USB_REQ_SET_FEATURE, USB_TYPE_STANDARD | USB_RECIP_DEVICE, - 0x01, 0x0, NULL, 0x0, 1000); + 0x01, 0x0, NULL, 0x0, 1 * HZ); usb_stor_dbg(us, "Huawei mode set result is %d\n", result); return 0; } diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c index 8591d89a38e..27e4a580d2e 100644 --- a/drivers/usb/storage/realtek_cr.c +++ b/drivers/usb/storage/realtek_cr.c @@ -626,6 +626,7 @@ static int config_autodelink_after_power_on(struct us_data *us) return 0; } +#ifdef CONFIG_PM static int config_autodelink_before_power_down(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); @@ -716,6 +717,7 @@ static void fw5895_init(struct us_data *us) } } } +#endif #ifdef CONFIG_REALTEK_AUTOPM static void fw5895_set_mmc_wp(struct us_data *us) diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index 22c7d4360fa..b1d815eb6d0 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c @@ -1118,6 +1118,31 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us) */ if (result == USB_STOR_XFER_LONG) fake_sense = 1; + + /* + * Sometimes a device will mistakenly skip the data phase + * and go directly to the status phase without sending a + * zero-length packet. If we get a 13-byte response here, + * check whether it really is a CSW. + */ + if (result == USB_STOR_XFER_SHORT && + srb->sc_data_direction == DMA_FROM_DEVICE && + transfer_length - scsi_get_resid(srb) == + US_BULK_CS_WRAP_LEN) { + struct scatterlist *sg = NULL; + unsigned int offset = 0; + + if (usb_stor_access_xfer_buf((unsigned char *) bcs, + US_BULK_CS_WRAP_LEN, srb, &sg, + &offset, FROM_XFER_BUF) == + US_BULK_CS_WRAP_LEN && + bcs->Signature == + cpu_to_le32(US_BULK_CS_SIGN)) { + usb_stor_dbg(us, "Device skipped data phase\n"); + scsi_set_resid(srb, transfer_length); + goto skipped_data_phase; + } + } } /* See flow chart on pg 15 of the Bulk Only Transport spec for @@ -1153,6 +1178,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us) if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; + skipped_data_phase: /* check bulk status */ residue = le32_to_cpu(bcs->Residue); usb_stor_dbg(us, "Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n", diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 8511b54a65d..18a283d6de1 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -54,6 +54,20 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_ATA_1X), +/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ +UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999, + "Seagate", + "Expansion Desk", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + +/* Reported-by: Bogdan Mihalcea <bogdan.mihalcea@infim.ro> */ +UNUSUAL_DEV(0x0bc2, 0xa003, 0x0000, 0x9999, + "Seagate", + "Backup Plus", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + /* https://bbs.archlinux.org/viewtopic.php?id=183190 */ UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999, "Seagate", @@ -61,6 +75,13 @@ UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_ATA_1X), +/* https://bbs.archlinux.org/viewtopic.php?id=183190 */ +UNUSUAL_DEV(0x0bc2, 0xab21, 0x0000, 0x9999, + "Seagate", + "Backup+ BK", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, "JMicron", @@ -75,3 +96,17 @@ UNUSUAL_DEV(0x174c, 0x5106, 0x0000, 0x9999, "ASM1051", USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_UAS), + +/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ +UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, + "VIA", + "VL711", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + +/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ +UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999, + "Hitachi", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 69906cacd04..a17f1185066 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1312,6 +1312,7 @@ static int vhost_scsi_set_endpoint(struct vhost_scsi *vs, struct vhost_scsi_target *t) { + struct se_portal_group *se_tpg; struct tcm_vhost_tport *tv_tport; struct tcm_vhost_tpg *tpg; struct tcm_vhost_tpg **vs_tpg; @@ -1359,6 +1360,21 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, ret = -EEXIST; goto out; } + /* + * In order to ensure individual vhost-scsi configfs + * groups cannot be removed while in use by vhost ioctl, + * go ahead and take an explicit se_tpg->tpg_group.cg_item + * dependency now. + */ + se_tpg = &tpg->se_tpg; + ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, + &se_tpg->tpg_group.cg_item); + if (ret) { + pr_warn("configfs_depend_item() failed: %d\n", ret); + kfree(vs_tpg); + mutex_unlock(&tpg->tv_tpg_mutex); + goto out; + } tpg->tv_tpg_vhost_count++; tpg->vhost_scsi = vs; vs_tpg[tpg->tport_tpgt] = tpg; @@ -1401,6 +1417,7 @@ static int vhost_scsi_clear_endpoint(struct vhost_scsi *vs, struct vhost_scsi_target *t) { + struct se_portal_group *se_tpg; struct tcm_vhost_tport *tv_tport; struct tcm_vhost_tpg *tpg; struct vhost_virtqueue *vq; @@ -1449,6 +1466,13 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, vs->vs_tpg[target] = NULL; match = true; mutex_unlock(&tpg->tv_tpg_mutex); + /* + * Release se_tpg->tpg_group.cg_item configfs dependency now + * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. + */ + se_tpg = &tpg->se_tpg; + configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, + &se_tpg->tpg_group.cg_item); } if (match) { for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 57b1d44acbf..eb976ee3a02 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -448,8 +448,10 @@ static int __init fb_console_setup(char *this_opt) return 1; while ((options = strsep(&this_opt, ",")) != NULL) { - if (!strncmp(options, "font:", 5)) + if (!strncmp(options, "font:", 5)) { strlcpy(fontname, options + 5, sizeof(fontname)); + continue; + } if (!strncmp(options, "scrollback:", 11)) { options += 11; @@ -457,13 +459,9 @@ static int __init fb_console_setup(char *this_opt) fbcon_softback_size = simple_strtoul(options, &options, 0); if (*options == 'k' || *options == 'K') { fbcon_softback_size *= 1024; - options++; } - if (*options != ',') - return 1; - options++; - } else - return 1; + } + continue; } if (!strncmp(options, "map:", 4)) { @@ -478,8 +476,7 @@ static int __init fb_console_setup(char *this_opt) fbcon_map_override(); } - - return 1; + continue; } if (!strncmp(options, "vc:", 3)) { @@ -491,7 +488,8 @@ static int __init fb_console_setup(char *this_opt) if (*options++ == '-') last_fb_vc = simple_strtoul(options, &options, 10) - 1; fbcon_is_default = 0; - } + continue; + } if (!strncmp(options, "rotate:", 7)) { options += 7; @@ -499,6 +497,7 @@ static int __init fb_console_setup(char *this_opt) initial_rotation = simple_strtoul(options, &options, 0); if (initial_rotation > 3) initial_rotation = 0; + continue; } } return 1; diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 6e6aa704fe8..517f565b65d 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -56,7 +56,7 @@ static int cursor_size_lastfrom; static int cursor_size_lastto; static u32 vgacon_xres; static u32 vgacon_yres; -static struct vgastate state; +static struct vgastate vgastate; #define BLANK 0x0020 @@ -400,7 +400,7 @@ static const char *vgacon_startup(void) vga_video_num_lines = screen_info.orig_video_lines; vga_video_num_columns = screen_info.orig_video_cols; - state.vgabase = NULL; + vgastate.vgabase = NULL; if (screen_info.orig_video_mode == 7) { /* Monochrome display */ @@ -851,12 +851,12 @@ static void vga_set_palette(struct vc_data *vc, unsigned char *table) { int i, j; - vga_w(state.vgabase, VGA_PEL_MSK, 0xff); + vga_w(vgastate.vgabase, VGA_PEL_MSK, 0xff); for (i = j = 0; i < 16; i++) { - vga_w(state.vgabase, VGA_PEL_IW, table[i]); - vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); - vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); - vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); + vga_w(vgastate.vgabase, VGA_PEL_IW, table[i]); + vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); + vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); + vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); } } @@ -1008,7 +1008,7 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch) switch (blank) { case 0: /* Unblank */ if (vga_vesa_blanked) { - vga_vesa_unblank(&state); + vga_vesa_unblank(&vgastate); vga_vesa_blanked = 0; } if (vga_palette_blanked) { @@ -1022,7 +1022,7 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch) case 1: /* Normal blanking */ case -1: /* Obsolete */ if (!mode_switch && vga_video_type == VIDEO_TYPE_VGAC) { - vga_pal_blank(&state); + vga_pal_blank(&vgastate); vga_palette_blanked = 1; return 0; } @@ -1034,7 +1034,7 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch) return 1; default: /* VESA blanking */ if (vga_video_type == VIDEO_TYPE_VGAC) { - vga_vesa_blank(&state, blank - 1); + vga_vesa_blank(&vgastate, blank - 1); vga_vesa_blanked = blank; } return 0; @@ -1280,7 +1280,7 @@ static int vgacon_font_set(struct vc_data *c, struct console_font *font, unsigne (charcount != 256 && charcount != 512)) return -EINVAL; - rc = vgacon_do_font_op(&state, font->data, 1, charcount == 512); + rc = vgacon_do_font_op(&vgastate, font->data, 1, charcount == 512); if (rc) return rc; @@ -1299,7 +1299,7 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font) font->charcount = vga_512_chars ? 512 : 256; if (!font->data) return 0; - return vgacon_do_font_op(&state, font->data, 0, vga_512_chars); + return vgacon_do_font_op(&vgastate, font->data, 0, vga_512_chars); } #else diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index 3bf403150a2..9ec81d46fc5 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -27,7 +27,6 @@ #include <linux/regulator/consumer.h> #include <video/videomode.h> -#include <mach/cpu.h> #include <asm/gpio.h> #include <video/atmel_lcdc.h> diff --git a/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c b/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c index 5ee3b5505f7..91921665b98 100644 --- a/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c +++ b/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c @@ -301,6 +301,8 @@ static const struct of_device_id tvc_of_match[] = { {}, }; +MODULE_DEVICE_TABLE(of, tvc_of_match); + static struct platform_driver tvc_connector_driver = { .probe = tvc_probe, .remove = __exit_p(tvc_remove), @@ -308,6 +310,7 @@ static struct platform_driver tvc_connector_driver = { .name = "connector-analog-tv", .owner = THIS_MODULE, .of_match_table = tvc_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c index 74de2bc50c4..2dfb6e5ff0c 100644 --- a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c +++ b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c @@ -391,6 +391,7 @@ static struct platform_driver dvi_connector_driver = { .name = "connector-dvi", .owner = THIS_MODULE, .of_match_table = dvic_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c b/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c index 131c6e26089..7b25967a91e 100644 --- a/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c +++ b/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c @@ -437,6 +437,7 @@ static struct platform_driver hdmi_connector_driver = { .name = "connector-hdmi", .owner = THIS_MODULE, .of_match_table = hdmic_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c b/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c index b4e9a42a79e..47ee7cdee1c 100644 --- a/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c +++ b/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c @@ -298,6 +298,7 @@ static struct platform_driver tfp410_driver = { .name = "tfp410", .owner = THIS_MODULE, .of_match_table = tfp410_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c index c891d8f84cb..c4abd56dd84 100644 --- a/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c +++ b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c @@ -461,6 +461,7 @@ static struct platform_driver tpd_driver = { .name = "tpd12s015", .owner = THIS_MODULE, .of_match_table = tpd_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/displays-new/panel-dpi.c b/drivers/video/fbdev/omap2/displays-new/panel-dpi.c index 3636b61dc9b..a9c3dcf0f6b 100644 --- a/drivers/video/fbdev/omap2/displays-new/panel-dpi.c +++ b/drivers/video/fbdev/omap2/displays-new/panel-dpi.c @@ -327,6 +327,7 @@ static struct platform_driver panel_dpi_driver = { .name = "panel-dpi", .owner = THIS_MODULE, .of_match_table = panel_dpi_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c b/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c index d6f14e8717e..899cb1ab523 100644 --- a/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c +++ b/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c @@ -1378,6 +1378,7 @@ static struct platform_driver dsicm_driver = { .name = "panel-dsi-cm", .owner = THIS_MODULE, .of_match_table = dsicm_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c index cc5b5124e0b..27d4fcfa182 100644 --- a/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c +++ b/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c @@ -394,6 +394,7 @@ static struct spi_driver lb035q02_spi_driver = { .name = "panel_lgphilips_lb035q02", .owner = THIS_MODULE, .of_match_table = lb035q02_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c index 3595f111aa3..ccf3f4f3c70 100644 --- a/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c +++ b/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c @@ -424,6 +424,7 @@ static struct spi_driver nec_8048_driver = { .owner = THIS_MODULE, .pm = NEC_8048_PM_OPS, .of_match_table = nec_8048_of_match, + .suppress_bind_attrs = true, }, .probe = nec_8048_probe, .remove = nec_8048_remove, diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c index f1f72ce50a1..234142cc376 100644 --- a/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c +++ b/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c @@ -410,6 +410,7 @@ static struct platform_driver sharp_ls_driver = { .name = "panel-sharp-ls037v7dw01", .owner = THIS_MODULE, .of_match_table = sharp_ls_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c index 617f8d2f512..337ccc5c0f5 100644 --- a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c @@ -904,6 +904,7 @@ static struct spi_driver acx565akm_driver = { .name = "acx565akm", .owner = THIS_MODULE, .of_match_table = acx565akm_of_match, + .suppress_bind_attrs = true, }, .probe = acx565akm_probe, .remove = acx565akm_remove, diff --git a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c index 728808bccee..fbba0b8ca87 100644 --- a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c +++ b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c @@ -500,6 +500,7 @@ static struct spi_driver td028ttec1_spi_driver = { .name = "panel-tpo-td028ttec1", .owner = THIS_MODULE, .of_match_table = td028ttec1_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c index de78ab0caaa..5aba76bca25 100644 --- a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c +++ b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c @@ -673,6 +673,7 @@ static struct spi_driver tpo_td043_spi_driver = { .owner = THIS_MODULE, .pm = &tpo_td043_spi_pm, .of_match_table = tpo_td043_of_match, + .suppress_bind_attrs = true, }, .probe = tpo_td043_probe, .remove = tpo_td043_remove, diff --git a/drivers/video/fbdev/omap2/dss/apply.c b/drivers/video/fbdev/omap2/dss/apply.c index 0a0b084ce65..663ccc3bf4e 100644 --- a/drivers/video/fbdev/omap2/dss/apply.c +++ b/drivers/video/fbdev/omap2/dss/apply.c @@ -1132,6 +1132,8 @@ static void dss_mgr_disable_compat(struct omap_overlay_manager *mgr) if (!mp->enabled) goto out; + wait_pending_extra_info_updates(); + if (!mgr_manual_update(mgr)) dispc_mgr_disable_sync(mgr->id); diff --git a/drivers/video/fbdev/omap2/dss/dispc.c b/drivers/video/fbdev/omap2/dss/dispc.c index be053aa8088..0e9a74bb9fc 100644 --- a/drivers/video/fbdev/omap2/dss/dispc.c +++ b/drivers/video/fbdev/omap2/dss/dispc.c @@ -3290,8 +3290,11 @@ static void dispc_dump_regs(struct seq_file *s) DUMPREG(i, DISPC_OVL_FIFO_SIZE_STATUS); DUMPREG(i, DISPC_OVL_ROW_INC); DUMPREG(i, DISPC_OVL_PIXEL_INC); + if (dss_has_feature(FEAT_PRELOAD)) DUMPREG(i, DISPC_OVL_PRELOAD); + if (dss_has_feature(FEAT_MFLAG)) + DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD); if (i == OMAP_DSS_GFX) { DUMPREG(i, DISPC_OVL_WINDOW_SKIP); @@ -3312,10 +3315,6 @@ static void dispc_dump_regs(struct seq_file *s) } if (dss_has_feature(FEAT_ATTR2)) DUMPREG(i, DISPC_OVL_ATTRIBUTES2); - if (dss_has_feature(FEAT_PRELOAD)) - DUMPREG(i, DISPC_OVL_PRELOAD); - if (dss_has_feature(FEAT_MFLAG)) - DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD); } #undef DISPC_REG @@ -3843,6 +3842,7 @@ static struct platform_driver omap_dispchw_driver = { .owner = THIS_MODULE, .pm = &dispc_pm_ops, .of_match_table = dispc_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/dss/dispc.h b/drivers/video/fbdev/omap2/dss/dispc.h index 78edb449c76..3043d6e0a5f 100644 --- a/drivers/video/fbdev/omap2/dss/dispc.h +++ b/drivers/video/fbdev/omap2/dss/dispc.h @@ -101,8 +101,7 @@ DISPC_FIR_COEF_V2_OFFSET(n, i)) #define DISPC_OVL_PRELOAD(n) (DISPC_OVL_BASE(n) + \ DISPC_PRELOAD_OFFSET(n)) -#define DISPC_OVL_MFLAG_THRESHOLD(n) (DISPC_OVL_BASE(n) + \ - DISPC_MFLAG_THRESHOLD_OFFSET(n)) +#define DISPC_OVL_MFLAG_THRESHOLD(n) DISPC_MFLAG_THRESHOLD_OFFSET(n) /* DISPC up/downsampling FIR filter coefficient structure */ struct dispc_coef { diff --git a/drivers/video/fbdev/omap2/dss/dpi.c b/drivers/video/fbdev/omap2/dss/dpi.c index 9368972d696..4a3363dae74 100644 --- a/drivers/video/fbdev/omap2/dss/dpi.c +++ b/drivers/video/fbdev/omap2/dss/dpi.c @@ -720,6 +720,7 @@ static struct platform_driver omap_dpi_driver = { .driver = { .name = "omapdss_dpi", .owner = THIS_MODULE, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/dss/dsi.c b/drivers/video/fbdev/omap2/dss/dsi.c index b6f6ae1d466..0793bc67a27 100644 --- a/drivers/video/fbdev/omap2/dss/dsi.c +++ b/drivers/video/fbdev/omap2/dss/dsi.c @@ -1603,7 +1603,7 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev, } else if (dss_has_feature(FEAT_DSI_PLL_SELFREQDCO)) { f = cinfo->clkin4ddr < 1000000000 ? 0x2 : 0x4; - l = FLD_MOD(l, f, 4, 1); /* PLL_SELFREQDCO */ + l = FLD_MOD(l, f, 3, 1); /* PLL_SELFREQDCO */ } l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */ @@ -5754,6 +5754,7 @@ static struct platform_driver omap_dsihw_driver = { .owner = THIS_MODULE, .pm = &dsi_pm_ops, .of_match_table = dsi_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/dss/dss.c b/drivers/video/fbdev/omap2/dss/dss.c index 6daeb7ed44c..14bcd6c43f7 100644 --- a/drivers/video/fbdev/omap2/dss/dss.c +++ b/drivers/video/fbdev/omap2/dss/dss.c @@ -966,6 +966,7 @@ static struct platform_driver omap_dsshw_driver = { .owner = THIS_MODULE, .pm = &dss_pm_ops, .of_match_table = dss_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/dss/hdmi4.c b/drivers/video/fbdev/omap2/dss/hdmi4.c index 6a8550cf43e..9a8713ca090 100644 --- a/drivers/video/fbdev/omap2/dss/hdmi4.c +++ b/drivers/video/fbdev/omap2/dss/hdmi4.c @@ -781,6 +781,7 @@ static struct platform_driver omapdss_hdmihw_driver = { .owner = THIS_MODULE, .pm = &hdmi_pm_ops, .of_match_table = hdmi_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/dss/hdmi5.c b/drivers/video/fbdev/omap2/dss/hdmi5.c index 32d02ec34d2..169b764bb9d 100644 --- a/drivers/video/fbdev/omap2/dss/hdmi5.c +++ b/drivers/video/fbdev/omap2/dss/hdmi5.c @@ -806,6 +806,7 @@ static struct platform_driver omapdss_hdmihw_driver = { .owner = THIS_MODULE, .pm = &hdmi_pm_ops, .of_match_table = hdmi_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/dss/hdmi_pll.c index 54df12a8d74..6d92bb32fe5 100644 --- a/drivers/video/fbdev/omap2/dss/hdmi_pll.c +++ b/drivers/video/fbdev/omap2/dss/hdmi_pll.c @@ -124,16 +124,15 @@ static int hdmi_pll_config(struct hdmi_pll_data *pll) r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */ r = FLD_MOD(r, fmt->refsel, 22, 21); /* REFSEL */ - if (fmt->dcofreq) { - /* divider programming for frequency beyond 1000Mhz */ - REG_FLD_MOD(pll->base, PLLCTRL_CFG3, fmt->regsd, 17, 10); + if (fmt->dcofreq) r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */ - } else { + else r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */ - } hdmi_write_reg(pll->base, PLLCTRL_CFG2, r); + REG_FLD_MOD(pll->base, PLLCTRL_CFG3, fmt->regsd, 17, 10); + r = hdmi_read_reg(pll->base, PLLCTRL_CFG4); r = FLD_MOD(r, fmt->regm2, 24, 18); r = FLD_MOD(r, fmt->regmf, 17, 0); @@ -144,8 +143,8 @@ static int hdmi_pll_config(struct hdmi_pll_data *pll) /* wait for bit change */ if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_GO, - 0, 0, 1) != 1) { - DSSERR("PLL GO bit not set\n"); + 0, 0, 0) != 0) { + DSSERR("PLL GO bit not clearing\n"); return -ETIMEDOUT; } diff --git a/drivers/video/fbdev/omap2/dss/rfbi.c b/drivers/video/fbdev/omap2/dss/rfbi.c index c8a81a2b879..878273f5883 100644 --- a/drivers/video/fbdev/omap2/dss/rfbi.c +++ b/drivers/video/fbdev/omap2/dss/rfbi.c @@ -1044,6 +1044,7 @@ static struct platform_driver omap_rfbihw_driver = { .name = "omapdss_rfbi", .owner = THIS_MODULE, .pm = &rfbi_pm_ops, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/dss/sdi.c b/drivers/video/fbdev/omap2/dss/sdi.c index 911dcc9173a..4c9c46d4ea6 100644 --- a/drivers/video/fbdev/omap2/dss/sdi.c +++ b/drivers/video/fbdev/omap2/dss/sdi.c @@ -377,6 +377,7 @@ static struct platform_driver omap_sdi_driver = { .driver = { .name = "omapdss_sdi", .owner = THIS_MODULE, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/dss/venc.c b/drivers/video/fbdev/omap2/dss/venc.c index 21d81113962..d077d8a75dd 100644 --- a/drivers/video/fbdev/omap2/dss/venc.c +++ b/drivers/video/fbdev/omap2/dss/venc.c @@ -966,6 +966,7 @@ static struct platform_driver omap_venchw_driver = { .owner = THIS_MODULE, .pm = &venc_pm_ops, .of_match_table = venc_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c index 15872433e0c..ce8a7057075 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c @@ -1833,14 +1833,13 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev) if (fbdev == NULL) return; - for (i = 0; i < fbdev->num_fbs; i++) { - struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]); - int j; + for (i = 0; i < fbdev->num_overlays; i++) { + struct omap_overlay *ovl = fbdev->overlays[i]; - for (j = 0; j < ofbi->num_overlays; j++) { - struct omap_overlay *ovl = ofbi->overlays[j]; - ovl->disable(ovl); - } + ovl->disable(ovl); + + if (ovl->manager) + ovl->unset_manager(ovl); } for (i = 0; i < fbdev->num_fbs; i++) @@ -2619,7 +2618,7 @@ err0: return r; } -static int __exit omapfb_remove(struct platform_device *pdev) +static int omapfb_remove(struct platform_device *pdev) { struct omapfb2_device *fbdev = platform_get_drvdata(pdev); @@ -2636,7 +2635,7 @@ static int __exit omapfb_remove(struct platform_device *pdev) static struct platform_driver omapfb_driver = { .probe = omapfb_probe, - .remove = __exit_p(omapfb_remove), + .remove = omapfb_remove, .driver = { .name = "omapfb", .owner = THIS_MODULE, @@ -2651,6 +2650,7 @@ module_param_named(mirror, def_mirror, bool, 0); module_platform_driver(omapfb_driver); +MODULE_ALIAS("platform:omapfb"); MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>"); MODULE_DESCRIPTION("OMAP2/3 Framebuffer"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index e3d5bf0a502..d0107d424ee 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -87,6 +87,15 @@ config DA9055_WATCHDOG This driver can also be built as a module. If so, the module will be called da9055_wdt. +config DA9063_WATCHDOG + tristate "Dialog DA9063 Watchdog" + depends on MFD_DA9063 + select WATCHDOG_CORE + help + Support for the watchdog in the DA9063 PMIC. + + This driver can be built as a module. The module name is da9063_wdt. + config GPIO_WATCHDOG tristate "Watchdog device controlled through GPIO-line" depends on OF_GPIO @@ -123,6 +132,7 @@ config WM8350_WATCHDOG config XILINX_WATCHDOG tristate "Xilinx Watchdog timer" + depends on HAS_IOMEM select WATCHDOG_CORE help Watchdog driver for the xps_timebase_wdt ip core. @@ -157,6 +167,14 @@ config AT91SAM9X_WATCHDOG Watchdog timer embedded into AT91SAM9X and AT91CAP9 chips. This will reboot your system when the timeout is reached. +config CADENCE_WATCHDOG + tristate "Cadence Watchdog Timer" + depends on ARM + select WATCHDOG_CORE + help + Say Y here if you want to include support for the watchdog + timer in the Xilinx Zynq. + config 21285_WATCHDOG tristate "DC21285 watchdog" depends on FOOTBRIDGE @@ -319,6 +337,17 @@ config ORION_WATCHDOG To compile this driver as a module, choose M here: the module will be called orion_wdt. +config RN5T618_WATCHDOG + tristate "Ricoh RN5T618 watchdog" + depends on MFD_RN5T618 + select WATCHDOG_CORE + help + If you say yes here you get support for watchdog on the Ricoh + RN5T618 PMIC. + + This driver can also be built as a module. If so, the module + will be called rn5t618_wdt. + config SUNXI_WATCHDOG tristate "Allwinner SoCs watchdog support" depends on ARCH_SUNXI @@ -444,7 +473,7 @@ config SIRFSOC_WATCHDOG config TEGRA_WATCHDOG tristate "Tegra watchdog" - depends on ARCH_TEGRA || COMPILE_TEST + depends on (ARCH_TEGRA || COMPILE_TEST) && HAS_IOMEM select WATCHDOG_CORE help Say Y here to include support for the watchdog timer @@ -453,6 +482,29 @@ config TEGRA_WATCHDOG To compile this driver as a module, choose M here: the module will be called tegra_wdt. +config QCOM_WDT + tristate "QCOM watchdog" + depends on HAS_IOMEM + depends on ARCH_QCOM + select WATCHDOG_CORE + help + Say Y here to include Watchdog timer support for the watchdog found + on QCOM chipsets. Currently supported targets are the MSM8960, + APQ8064, and IPQ8064. + + To compile this driver as a module, choose M here: the + module will be called qcom_wdt. + +config MESON_WATCHDOG + tristate "Amlogic Meson SoCs watchdog support" + depends on ARCH_MESON + select WATCHDOG_CORE + help + Say Y here to include support for the watchdog timer + in Amlogic Meson SoCs. + To compile this driver as a module, choose M here: the + module will be called meson_wdt. + # AVR32 Architecture config AT32AP700X_WDT diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index de1701470c1..c569ec8f8a7 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb.o obj-$(CONFIG_ARM_SP805_WATCHDOG) += sp805_wdt.o obj-$(CONFIG_AT91RM9200_WATCHDOG) += at91rm9200_wdt.o obj-$(CONFIG_AT91SAM9X_WATCHDOG) += at91sam9_wdt.o +obj-$(CONFIG_CADENCE_WATCHDOG) += cadence_wdt.o obj-$(CONFIG_OMAP_WATCHDOG) += omap_wdt.o obj-$(CONFIG_TWL4030_WATCHDOG) += twl4030_wdt.o obj-$(CONFIG_21285_WATCHDOG) += wdt285.o @@ -47,6 +48,7 @@ obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o obj-$(CONFIG_SUNXI_WATCHDOG) += sunxi_wdt.o +obj-$(CONFIG_RN5T618_WATCHDOG) += rn5t618_wdt.o obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o obj-$(CONFIG_STMP3XXX_RTC_WATCHDOG) += stmp3xxx_rtc_wdt.o obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o @@ -57,8 +59,10 @@ obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o obj-$(CONFIG_MOXART_WDT) += moxart_wdt.o obj-$(CONFIG_SIRFSOC_WATCHDOG) += sirfsoc_wdt.o +obj-$(CONFIG_QCOM_WDT) += qcom-wdt.o obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o +obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o # AVR32 Architecture obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o @@ -173,6 +177,7 @@ obj-$(CONFIG_XEN_WDT) += xen_wdt.o # Architecture Independent obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o +obj-$(CONFIG_DA9063_WATCHDOG) += da9063_wdt.o obj-$(CONFIG_GPIO_WATCHDOG) += gpio_wdt.o obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c index 08a785398ea..e96b09b135c 100644 --- a/drivers/watchdog/booke_wdt.c +++ b/drivers/watchdog/booke_wdt.c @@ -30,8 +30,6 @@ * occur, and the final time the board will reset. */ -u32 booke_wdt_enabled; -u32 booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT; #ifdef CONFIG_PPC_FSL_BOOK3E #define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15)) @@ -41,27 +39,10 @@ u32 booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT; #define WDTP_MASK (TCR_WP_MASK) #endif -/* Checks wdt=x and wdt_period=xx command-line option */ -notrace int __init early_parse_wdt(char *p) -{ - if (p && strncmp(p, "0", 1) != 0) - booke_wdt_enabled = 1; - - return 0; -} -early_param("wdt", early_parse_wdt); - -int __init early_parse_wdt_period(char *p) -{ - unsigned long ret; - if (p) { - if (!kstrtol(p, 0, &ret)) - booke_wdt_period = ret; - } - - return 0; -} -early_param("wdt_period", early_parse_wdt_period); +static bool booke_wdt_enabled; +module_param(booke_wdt_enabled, bool, 0); +static int booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT; +module_param(booke_wdt_period, int, 0); #ifdef CONFIG_PPC_FSL_BOOK3E @@ -259,5 +240,6 @@ static int __init booke_wdt_init(void) module_init(booke_wdt_init); module_exit(booke_wdt_exit); +MODULE_ALIAS("booke_wdt"); MODULE_DESCRIPTION("PowerPC Book-E watchdog driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c new file mode 100644 index 00000000000..5927c0a98a7 --- /dev/null +++ b/drivers/watchdog/cadence_wdt.c @@ -0,0 +1,516 @@ +/* + * Cadence WDT driver - Used by Xilinx Zynq + * + * Copyright (C) 2010 - 2014 Xilinx, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/clk.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> +#include <linux/watchdog.h> + +#define CDNS_WDT_DEFAULT_TIMEOUT 10 +/* Supports 1 - 516 sec */ +#define CDNS_WDT_MIN_TIMEOUT 1 +#define CDNS_WDT_MAX_TIMEOUT 516 + +/* Restart key */ +#define CDNS_WDT_RESTART_KEY 0x00001999 + +/* Counter register access key */ +#define CDNS_WDT_REGISTER_ACCESS_KEY 0x00920000 + +/* Counter value divisor */ +#define CDNS_WDT_COUNTER_VALUE_DIVISOR 0x1000 + +/* Clock prescaler value and selection */ +#define CDNS_WDT_PRESCALE_64 64 +#define CDNS_WDT_PRESCALE_512 512 +#define CDNS_WDT_PRESCALE_4096 4096 +#define CDNS_WDT_PRESCALE_SELECT_64 1 +#define CDNS_WDT_PRESCALE_SELECT_512 2 +#define CDNS_WDT_PRESCALE_SELECT_4096 3 + +/* Input clock frequency */ +#define CDNS_WDT_CLK_10MHZ 10000000 +#define CDNS_WDT_CLK_75MHZ 75000000 + +/* Counter maximum value */ +#define CDNS_WDT_COUNTER_MAX 0xFFF + +static int wdt_timeout = CDNS_WDT_DEFAULT_TIMEOUT; +static int nowayout = WATCHDOG_NOWAYOUT; + +module_param(wdt_timeout, int, 0); +MODULE_PARM_DESC(wdt_timeout, + "Watchdog time in seconds. (default=" + __MODULE_STRING(CDNS_WDT_DEFAULT_TIMEOUT) ")"); + +module_param(nowayout, int, 0); +MODULE_PARM_DESC(nowayout, + "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +/** + * struct cdns_wdt - Watchdog device structure + * @regs: baseaddress of device + * @rst: reset flag + * @clk: struct clk * of a clock source + * @prescaler: for saving prescaler value + * @ctrl_clksel: counter clock prescaler selection + * @io_lock: spinlock for IO register access + * @cdns_wdt_device: watchdog device structure + * @cdns_wdt_notifier: notifier structure + * + * Structure containing parameters specific to cadence watchdog. + */ +struct cdns_wdt { + void __iomem *regs; + bool rst; + struct clk *clk; + u32 prescaler; + u32 ctrl_clksel; + spinlock_t io_lock; + struct watchdog_device cdns_wdt_device; + struct notifier_block cdns_wdt_notifier; +}; + +/* Write access to Registers */ +static inline void cdns_wdt_writereg(struct cdns_wdt *wdt, u32 offset, u32 val) +{ + writel_relaxed(val, wdt->regs + offset); +} + +/*************************Register Map**************************************/ + +/* Register Offsets for the WDT */ +#define CDNS_WDT_ZMR_OFFSET 0x0 /* Zero Mode Register */ +#define CDNS_WDT_CCR_OFFSET 0x4 /* Counter Control Register */ +#define CDNS_WDT_RESTART_OFFSET 0x8 /* Restart Register */ +#define CDNS_WDT_SR_OFFSET 0xC /* Status Register */ + +/* + * Zero Mode Register - This register controls how the time out is indicated + * and also contains the access code to allow writes to the register (0xABC). + */ +#define CDNS_WDT_ZMR_WDEN_MASK 0x00000001 /* Enable the WDT */ +#define CDNS_WDT_ZMR_RSTEN_MASK 0x00000002 /* Enable the reset output */ +#define CDNS_WDT_ZMR_IRQEN_MASK 0x00000004 /* Enable IRQ output */ +#define CDNS_WDT_ZMR_RSTLEN_16 0x00000030 /* Reset pulse of 16 pclk cycles */ +#define CDNS_WDT_ZMR_ZKEY_VAL 0x00ABC000 /* Access key, 0xABC << 12 */ +/* + * Counter Control register - This register controls how fast the timer runs + * and the reset value and also contains the access code to allow writes to + * the register. + */ +#define CDNS_WDT_CCR_CRV_MASK 0x00003FFC /* Counter reset value */ + +/** + * cdns_wdt_stop - Stop the watchdog. + * + * @wdd: watchdog device + * + * Read the contents of the ZMR register, clear the WDEN bit + * in the register and set the access key for successful write. + * + * Return: always 0 + */ +static int cdns_wdt_stop(struct watchdog_device *wdd) +{ + struct cdns_wdt *wdt = watchdog_get_drvdata(wdd); + + spin_lock(&wdt->io_lock); + cdns_wdt_writereg(wdt, CDNS_WDT_ZMR_OFFSET, + CDNS_WDT_ZMR_ZKEY_VAL & (~CDNS_WDT_ZMR_WDEN_MASK)); + spin_unlock(&wdt->io_lock); + + return 0; +} + +/** + * cdns_wdt_reload - Reload the watchdog timer (i.e. pat the watchdog). + * + * @wdd: watchdog device + * + * Write the restart key value (0x00001999) to the restart register. + * + * Return: always 0 + */ +static int cdns_wdt_reload(struct watchdog_device *wdd) +{ + struct cdns_wdt *wdt = watchdog_get_drvdata(wdd); + + spin_lock(&wdt->io_lock); + cdns_wdt_writereg(wdt, CDNS_WDT_RESTART_OFFSET, + CDNS_WDT_RESTART_KEY); + spin_unlock(&wdt->io_lock); + + return 0; +} + +/** + * cdns_wdt_start - Enable and start the watchdog. + * + * @wdd: watchdog device + * + * The counter value is calculated according to the formula: + * calculated count = (timeout * clock) / prescaler + 1. + * The calculated count is divided by 0x1000 to obtain the field value + * to write to counter control register. + * Clears the contents of prescaler and counter reset value. Sets the + * prescaler to 4096 and the calculated count and access key + * to write to CCR Register. + * Sets the WDT (WDEN bit) and either the Reset signal(RSTEN bit) + * or Interrupt signal(IRQEN) with a specified cycles and the access + * key to write to ZMR Register. + * + * Return: always 0 + */ +static int cdns_wdt_start(struct watchdog_device *wdd) +{ + struct cdns_wdt *wdt = watchdog_get_drvdata(wdd); + unsigned int data = 0; + unsigned short count; + unsigned long clock_f = clk_get_rate(wdt->clk); + + /* + * Counter value divisor to obtain the value of + * counter reset to be written to control register. + */ + count = (wdd->timeout * (clock_f / wdt->prescaler)) / + CDNS_WDT_COUNTER_VALUE_DIVISOR + 1; + + if (count > CDNS_WDT_COUNTER_MAX) + count = CDNS_WDT_COUNTER_MAX; + + spin_lock(&wdt->io_lock); + cdns_wdt_writereg(wdt, CDNS_WDT_ZMR_OFFSET, + CDNS_WDT_ZMR_ZKEY_VAL); + + count = (count << 2) & CDNS_WDT_CCR_CRV_MASK; + + /* Write counter access key first to be able write to register */ + data = count | CDNS_WDT_REGISTER_ACCESS_KEY | wdt->ctrl_clksel; + cdns_wdt_writereg(wdt, CDNS_WDT_CCR_OFFSET, data); + data = CDNS_WDT_ZMR_WDEN_MASK | CDNS_WDT_ZMR_RSTLEN_16 | + CDNS_WDT_ZMR_ZKEY_VAL; + + /* Reset on timeout if specified in device tree. */ + if (wdt->rst) { + data |= CDNS_WDT_ZMR_RSTEN_MASK; + data &= ~CDNS_WDT_ZMR_IRQEN_MASK; + } else { + data &= ~CDNS_WDT_ZMR_RSTEN_MASK; + data |= CDNS_WDT_ZMR_IRQEN_MASK; + } + cdns_wdt_writereg(wdt, CDNS_WDT_ZMR_OFFSET, data); + cdns_wdt_writereg(wdt, CDNS_WDT_RESTART_OFFSET, + CDNS_WDT_RESTART_KEY); + spin_unlock(&wdt->io_lock); + + return 0; +} + +/** + * cdns_wdt_settimeout - Set a new timeout value for the watchdog device. + * + * @wdd: watchdog device + * @new_time: new timeout value that needs to be set + * Return: 0 on success + * + * Update the watchdog_device timeout with new value which is used when + * cdns_wdt_start is called. + */ +static int cdns_wdt_settimeout(struct watchdog_device *wdd, + unsigned int new_time) +{ + wdd->timeout = new_time; + + return cdns_wdt_start(wdd); +} + +/** + * cdns_wdt_irq_handler - Notifies of watchdog timeout. + * + * @irq: interrupt number + * @dev_id: pointer to a platform device structure + * Return: IRQ_HANDLED + * + * The handler is invoked when the watchdog times out and a + * reset on timeout has not been enabled. + */ +static irqreturn_t cdns_wdt_irq_handler(int irq, void *dev_id) +{ + struct platform_device *pdev = dev_id; + + dev_info(&pdev->dev, + "Watchdog timed out. Internal reset not enabled\n"); + + return IRQ_HANDLED; +} + +/* + * Info structure used to indicate the features supported by the device + * to the upper layers. This is defined in watchdog.h header file. + */ +static struct watchdog_info cdns_wdt_info = { + .identity = "cdns_wdt watchdog", + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE, +}; + +/* Watchdog Core Ops */ +static struct watchdog_ops cdns_wdt_ops = { + .owner = THIS_MODULE, + .start = cdns_wdt_start, + .stop = cdns_wdt_stop, + .ping = cdns_wdt_reload, + .set_timeout = cdns_wdt_settimeout, +}; + +/** + * cdns_wdt_notify_sys - Notifier for reboot or shutdown. + * + * @this: handle to notifier block + * @code: turn off indicator + * @unused: unused + * Return: NOTIFY_DONE + * + * This notifier is invoked whenever the system reboot or shutdown occur + * because we need to disable the WDT before system goes down as WDT might + * reset on the next boot. + */ +static int cdns_wdt_notify_sys(struct notifier_block *this, unsigned long code, + void *unused) +{ + struct cdns_wdt *wdt = container_of(this, struct cdns_wdt, + cdns_wdt_notifier); + if (code == SYS_DOWN || code == SYS_HALT) + cdns_wdt_stop(&wdt->cdns_wdt_device); + + return NOTIFY_DONE; +} + +/************************Platform Operations*****************************/ +/** + * cdns_wdt_probe - Probe call for the device. + * + * @pdev: handle to the platform device structure. + * Return: 0 on success, negative error otherwise. + * + * It does all the memory allocation and registration for the device. + */ +static int cdns_wdt_probe(struct platform_device *pdev) +{ + struct resource *res; + int ret, irq; + unsigned long clock_f; + struct cdns_wdt *wdt; + struct watchdog_device *cdns_wdt_device; + + wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) + return -ENOMEM; + + cdns_wdt_device = &wdt->cdns_wdt_device; + cdns_wdt_device->info = &cdns_wdt_info; + cdns_wdt_device->ops = &cdns_wdt_ops; + cdns_wdt_device->timeout = CDNS_WDT_DEFAULT_TIMEOUT; + cdns_wdt_device->min_timeout = CDNS_WDT_MIN_TIMEOUT; + cdns_wdt_device->max_timeout = CDNS_WDT_MAX_TIMEOUT; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + wdt->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(wdt->regs)) + return PTR_ERR(wdt->regs); + + /* Register the interrupt */ + wdt->rst = of_property_read_bool(pdev->dev.of_node, "reset-on-timeout"); + irq = platform_get_irq(pdev, 0); + if (!wdt->rst && irq >= 0) { + ret = devm_request_irq(&pdev->dev, irq, cdns_wdt_irq_handler, 0, + pdev->name, pdev); + if (ret) { + dev_err(&pdev->dev, + "cannot register interrupt handler err=%d\n", + ret); + return ret; + } + } + + /* Initialize the members of cdns_wdt structure */ + cdns_wdt_device->parent = &pdev->dev; + + ret = watchdog_init_timeout(cdns_wdt_device, wdt_timeout, &pdev->dev); + if (ret) { + dev_err(&pdev->dev, "unable to set timeout value\n"); + return ret; + } + + watchdog_set_nowayout(cdns_wdt_device, nowayout); + watchdog_set_drvdata(cdns_wdt_device, wdt); + + wdt->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(wdt->clk)) { + dev_err(&pdev->dev, "input clock not found\n"); + ret = PTR_ERR(wdt->clk); + return ret; + } + + ret = clk_prepare_enable(wdt->clk); + if (ret) { + dev_err(&pdev->dev, "unable to enable clock\n"); + return ret; + } + + clock_f = clk_get_rate(wdt->clk); + if (clock_f <= CDNS_WDT_CLK_75MHZ) { + wdt->prescaler = CDNS_WDT_PRESCALE_512; + wdt->ctrl_clksel = CDNS_WDT_PRESCALE_SELECT_512; + } else { + wdt->prescaler = CDNS_WDT_PRESCALE_4096; + wdt->ctrl_clksel = CDNS_WDT_PRESCALE_SELECT_4096; + } + + spin_lock_init(&wdt->io_lock); + + wdt->cdns_wdt_notifier.notifier_call = &cdns_wdt_notify_sys; + ret = register_reboot_notifier(&wdt->cdns_wdt_notifier); + if (ret != 0) { + dev_err(&pdev->dev, "cannot register reboot notifier err=%d)\n", + ret); + goto err_clk_disable; + } + + ret = watchdog_register_device(cdns_wdt_device); + if (ret) { + dev_err(&pdev->dev, "Failed to register wdt device\n"); + goto err_clk_disable; + } + platform_set_drvdata(pdev, wdt); + + dev_dbg(&pdev->dev, "Xilinx Watchdog Timer at %p with timeout %ds%s\n", + wdt->regs, cdns_wdt_device->timeout, + nowayout ? ", nowayout" : ""); + + return 0; + +err_clk_disable: + clk_disable_unprepare(wdt->clk); + + return ret; +} + +/** + * cdns_wdt_remove - Probe call for the device. + * + * @pdev: handle to the platform device structure. + * Return: 0 on success, otherwise negative error. + * + * Unregister the device after releasing the resources. + */ +static int cdns_wdt_remove(struct platform_device *pdev) +{ + struct cdns_wdt *wdt = platform_get_drvdata(pdev); + + cdns_wdt_stop(&wdt->cdns_wdt_device); + watchdog_unregister_device(&wdt->cdns_wdt_device); + unregister_reboot_notifier(&wdt->cdns_wdt_notifier); + clk_disable_unprepare(wdt->clk); + + return 0; +} + +/** + * cdns_wdt_shutdown - Stop the device. + * + * @pdev: handle to the platform structure. + * + */ +static void cdns_wdt_shutdown(struct platform_device *pdev) +{ + struct cdns_wdt *wdt = platform_get_drvdata(pdev); + + cdns_wdt_stop(&wdt->cdns_wdt_device); + clk_disable_unprepare(wdt->clk); +} + +/** + * cdns_wdt_suspend - Stop the device. + * + * @dev: handle to the device structure. + * Return: 0 always. + */ +static int __maybe_unused cdns_wdt_suspend(struct device *dev) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct cdns_wdt *wdt = platform_get_drvdata(pdev); + + cdns_wdt_stop(&wdt->cdns_wdt_device); + clk_disable_unprepare(wdt->clk); + + return 0; +} + +/** + * cdns_wdt_resume - Resume the device. + * + * @dev: handle to the device structure. + * Return: 0 on success, errno otherwise. + */ +static int __maybe_unused cdns_wdt_resume(struct device *dev) +{ + int ret; + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct cdns_wdt *wdt = platform_get_drvdata(pdev); + + ret = clk_prepare_enable(wdt->clk); + if (ret) { + dev_err(dev, "unable to enable clock\n"); + return ret; + } + cdns_wdt_start(&wdt->cdns_wdt_device); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(cdns_wdt_pm_ops, cdns_wdt_suspend, cdns_wdt_resume); + +static struct of_device_id cdns_wdt_of_match[] = { + { .compatible = "cdns,wdt-r1p2", }, + { /* end of table */ } +}; +MODULE_DEVICE_TABLE(of, cdns_wdt_of_match); + +/* Driver Structure */ +static struct platform_driver cdns_wdt_driver = { + .probe = cdns_wdt_probe, + .remove = cdns_wdt_remove, + .shutdown = cdns_wdt_shutdown, + .driver = { + .name = "cdns-wdt", + .owner = THIS_MODULE, + .of_match_table = cdns_wdt_of_match, + .pm = &cdns_wdt_pm_ops, + }, +}; + +module_platform_driver(cdns_wdt_driver); + +MODULE_AUTHOR("Xilinx, Inc."); +MODULE_DESCRIPTION("Watchdog driver for Cadence WDT"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c new file mode 100644 index 00000000000..2cd6b2c2dd2 --- /dev/null +++ b/drivers/watchdog/da9063_wdt.c @@ -0,0 +1,191 @@ +/* + * Watchdog driver for DA9063 PMICs. + * + * Copyright(c) 2012 Dialog Semiconductor Ltd. + * + * Author: Mariusz Wojtasik <mariusz.wojtasik@diasemi.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/watchdog.h> +#include <linux/platform_device.h> +#include <linux/uaccess.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/mfd/da9063/registers.h> +#include <linux/mfd/da9063/core.h> +#include <linux/regmap.h> + +/* + * Watchdog selector to timeout in seconds. + * 0: WDT disabled; + * others: timeout = 2048 ms * 2^(TWDSCALE-1). + */ +static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 }; +#define DA9063_TWDSCALE_DISABLE 0 +#define DA9063_TWDSCALE_MIN 1 +#define DA9063_TWDSCALE_MAX (ARRAY_SIZE(wdt_timeout) - 1) +#define DA9063_WDT_MIN_TIMEOUT wdt_timeout[DA9063_TWDSCALE_MIN] +#define DA9063_WDT_MAX_TIMEOUT wdt_timeout[DA9063_TWDSCALE_MAX] +#define DA9063_WDG_TIMEOUT wdt_timeout[3] + +struct da9063_watchdog { + struct da9063 *da9063; + struct watchdog_device wdtdev; +}; + +static unsigned int da9063_wdt_timeout_to_sel(unsigned int secs) +{ + unsigned int i; + + for (i = DA9063_TWDSCALE_MIN; i <= DA9063_TWDSCALE_MAX; i++) { + if (wdt_timeout[i] >= secs) + return i; + } + + return DA9063_TWDSCALE_MAX; +} + +static int _da9063_wdt_set_timeout(struct da9063 *da9063, unsigned int regval) +{ + return regmap_update_bits(da9063->regmap, DA9063_REG_CONTROL_D, + DA9063_TWDSCALE_MASK, regval); +} + +static int da9063_wdt_start(struct watchdog_device *wdd) +{ + struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd); + unsigned int selector; + int ret; + + selector = da9063_wdt_timeout_to_sel(wdt->wdtdev.timeout); + ret = _da9063_wdt_set_timeout(wdt->da9063, selector); + if (ret) + dev_err(wdt->da9063->dev, "Watchdog failed to start (err = %d)\n", + ret); + + return ret; +} + +static int da9063_wdt_stop(struct watchdog_device *wdd) +{ + struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd); + int ret; + + ret = regmap_update_bits(wdt->da9063->regmap, DA9063_REG_CONTROL_D, + DA9063_TWDSCALE_MASK, DA9063_TWDSCALE_DISABLE); + if (ret) + dev_alert(wdt->da9063->dev, "Watchdog failed to stop (err = %d)\n", + ret); + + return ret; +} + +static int da9063_wdt_ping(struct watchdog_device *wdd) +{ + struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd); + int ret; + + ret = regmap_write(wdt->da9063->regmap, DA9063_REG_CONTROL_F, + DA9063_WATCHDOG); + if (ret) + dev_alert(wdt->da9063->dev, "Failed to ping the watchdog (err = %d)\n", + ret); + + return ret; +} + +static int da9063_wdt_set_timeout(struct watchdog_device *wdd, + unsigned int timeout) +{ + struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd); + unsigned int selector; + int ret; + + selector = da9063_wdt_timeout_to_sel(timeout); + ret = _da9063_wdt_set_timeout(wdt->da9063, selector); + if (ret) + dev_err(wdt->da9063->dev, "Failed to set watchdog timeout (err = %d)\n", + ret); + else + wdd->timeout = wdt_timeout[selector]; + + return ret; +} + +static const struct watchdog_info da9063_watchdog_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, + .identity = "DA9063 Watchdog", +}; + +static const struct watchdog_ops da9063_watchdog_ops = { + .owner = THIS_MODULE, + .start = da9063_wdt_start, + .stop = da9063_wdt_stop, + .ping = da9063_wdt_ping, + .set_timeout = da9063_wdt_set_timeout, +}; + +static int da9063_wdt_probe(struct platform_device *pdev) +{ + int ret; + struct da9063 *da9063; + struct da9063_watchdog *wdt; + + if (!pdev->dev.parent) + return -EINVAL; + + da9063 = dev_get_drvdata(pdev->dev.parent); + if (!da9063) + return -EINVAL; + + wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) + return -ENOMEM; + + wdt->da9063 = da9063; + + wdt->wdtdev.info = &da9063_watchdog_info; + wdt->wdtdev.ops = &da9063_watchdog_ops; + wdt->wdtdev.min_timeout = DA9063_WDT_MIN_TIMEOUT; + wdt->wdtdev.max_timeout = DA9063_WDT_MAX_TIMEOUT; + wdt->wdtdev.timeout = DA9063_WDG_TIMEOUT; + + wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS; + + watchdog_set_drvdata(&wdt->wdtdev, wdt); + dev_set_drvdata(&pdev->dev, wdt); + + ret = watchdog_register_device(&wdt->wdtdev); + + return ret; +} + +static int da9063_wdt_remove(struct platform_device *pdev) +{ + struct da9063_watchdog *wdt = dev_get_drvdata(&pdev->dev); + + watchdog_unregister_device(&wdt->wdtdev); + + return 0; +} + +static struct platform_driver da9063_wdt_driver = { + .probe = da9063_wdt_probe, + .remove = da9063_wdt_remove, + .driver = { + .name = DA9063_DRVNAME_WATCHDOG, + }, +}; +module_platform_driver(da9063_wdt_driver); + +MODULE_AUTHOR("Mariusz Wojtasik <mariusz.wojtasik@diasemi.com>"); +MODULE_DESCRIPTION("Watchdog driver for Dialog DA9063"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DA9063_DRVNAME_WATCHDOG); diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c index 9f210299de2..9e577a64ec9 100644 --- a/drivers/watchdog/dw_wdt.c +++ b/drivers/watchdog/dw_wdt.c @@ -21,6 +21,7 @@ #include <linux/bitops.h> #include <linux/clk.h> +#include <linux/delay.h> #include <linux/device.h> #include <linux/err.h> #include <linux/fs.h> @@ -29,9 +30,11 @@ #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/notifier.h> #include <linux/of.h> #include <linux/pm.h> #include <linux/platform_device.h> +#include <linux/reboot.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/uaccess.h> @@ -40,6 +43,7 @@ #define WDOG_CONTROL_REG_OFFSET 0x00 #define WDOG_CONTROL_REG_WDT_EN_MASK 0x01 #define WDOG_TIMEOUT_RANGE_REG_OFFSET 0x04 +#define WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT 4 #define WDOG_CURRENT_COUNT_REG_OFFSET 0x08 #define WDOG_COUNTER_RESTART_REG_OFFSET 0x0c #define WDOG_COUNTER_RESTART_KICK_VALUE 0x76 @@ -62,6 +66,7 @@ static struct { unsigned long next_heartbeat; struct timer_list timer; int expect_close; + struct notifier_block restart_handler; } dw_wdt; static inline int dw_wdt_is_enabled(void) @@ -106,7 +111,8 @@ static int dw_wdt_set_top(unsigned top_s) } /* Set the new value in the watchdog. */ - writel(top_val, dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET); + writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT, + dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET); dw_wdt_set_next_heartbeat(); @@ -119,6 +125,26 @@ static void dw_wdt_keepalive(void) WDOG_COUNTER_RESTART_REG_OFFSET); } +static int dw_wdt_restart_handle(struct notifier_block *this, + unsigned long mode, void *cmd) +{ + u32 val; + + writel(0, dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET); + val = readl(dw_wdt.regs + WDOG_CONTROL_REG_OFFSET); + if (val & WDOG_CONTROL_REG_WDT_EN_MASK) + writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt.regs + + WDOG_COUNTER_RESTART_REG_OFFSET); + else + writel(WDOG_CONTROL_REG_WDT_EN_MASK, + dw_wdt.regs + WDOG_CONTROL_REG_OFFSET); + + /* wait for reset to assert... */ + mdelay(500); + + return NOTIFY_DONE; +} + static void dw_wdt_ping(unsigned long data) { if (time_before(jiffies, dw_wdt.next_heartbeat) || @@ -314,6 +340,12 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) if (ret) goto out_disable_clk; + dw_wdt.restart_handler.notifier_call = dw_wdt_restart_handle; + dw_wdt.restart_handler.priority = 128; + ret = register_restart_handler(&dw_wdt.restart_handler); + if (ret) + pr_warn("cannot register restart handler\n"); + dw_wdt_set_next_heartbeat(); setup_timer(&dw_wdt.timer, dw_wdt_ping, 0); mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT); @@ -328,6 +360,8 @@ out_disable_clk: static int dw_wdt_drv_remove(struct platform_device *pdev) { + unregister_restart_handler(&dw_wdt.restart_handler); + misc_deregister(&dw_wdt_miscdev); clk_disable_unprepare(dw_wdt.clk); diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index 68c3d379ffa..7e12f88bb4a 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c @@ -22,14 +22,17 @@ */ #include <linux/clk.h> +#include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/notifier.h> #include <linux/of_address.h> #include <linux/platform_device.h> +#include <linux/reboot.h> #include <linux/regmap.h> #include <linux/timer.h> #include <linux/watchdog.h> @@ -59,6 +62,7 @@ struct imx2_wdt_device { struct regmap *regmap; struct timer_list timer; /* Pings the watchdog when closed */ struct watchdog_device wdog; + struct notifier_block restart_handler; }; static bool nowayout = WATCHDOG_NOWAYOUT; @@ -77,6 +81,31 @@ static const struct watchdog_info imx2_wdt_info = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, }; +static int imx2_restart_handler(struct notifier_block *this, unsigned long mode, + void *cmd) +{ + unsigned int wcr_enable = IMX2_WDT_WCR_WDE; + struct imx2_wdt_device *wdev = container_of(this, + struct imx2_wdt_device, + restart_handler); + /* Assert SRS signal */ + regmap_write(wdev->regmap, 0, wcr_enable); + /* + * Due to imx6q errata ERR004346 (WDOG: WDOG SRS bit requires to be + * written twice), we add another two writes to ensure there must be at + * least two writes happen in the same one 32kHz clock period. We save + * the target check here, since the writes shouldn't be a huge burden + * for other platforms. + */ + regmap_write(wdev->regmap, 0, wcr_enable); + regmap_write(wdev->regmap, 0, wcr_enable); + + /* wait for reset to assert... */ + mdelay(500); + + return NOTIFY_DONE; +} + static inline void imx2_wdt_setup(struct watchdog_device *wdog) { struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); @@ -191,12 +220,10 @@ static struct regmap_config imx2_wdt_regmap_config = { static int __init imx2_wdt_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; struct imx2_wdt_device *wdev; struct watchdog_device *wdog; struct resource *res; void __iomem *base; - bool big_endian; int ret; u32 val; @@ -204,10 +231,6 @@ static int __init imx2_wdt_probe(struct platform_device *pdev) if (!wdev) return -ENOMEM; - big_endian = of_property_read_bool(np, "big-endian"); - if (big_endian) - imx2_wdt_regmap_config.val_format_endian = REGMAP_ENDIAN_BIG; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) @@ -257,6 +280,12 @@ static int __init imx2_wdt_probe(struct platform_device *pdev) return ret; } + wdev->restart_handler.notifier_call = imx2_restart_handler; + wdev->restart_handler.priority = 128; + ret = register_restart_handler(&wdev->restart_handler); + if (ret) + dev_err(&pdev->dev, "cannot register restart handler\n"); + dev_info(&pdev->dev, "timeout %d sec (nowayout=%d)\n", wdog->timeout, nowayout); @@ -268,6 +297,8 @@ static int __exit imx2_wdt_remove(struct platform_device *pdev) struct watchdog_device *wdog = platform_get_drvdata(pdev); struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); + unregister_restart_handler(&wdev->restart_handler); + watchdog_unregister_device(wdog); if (imx2_wdt_is_running(wdev)) { diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c new file mode 100644 index 00000000000..ef6a298e8c4 --- /dev/null +++ b/drivers/watchdog/meson_wdt.c @@ -0,0 +1,236 @@ +/* + * Meson Watchdog Driver + * + * Copyright (c) 2014 Carlo Caione + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/notifier.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> +#include <linux/types.h> +#include <linux/watchdog.h> + +#define DRV_NAME "meson_wdt" + +#define MESON_WDT_TC 0x00 +#define MESON_WDT_TC_EN BIT(22) +#define MESON_WDT_TC_TM_MASK 0x3fffff +#define MESON_WDT_DC_RESET (3 << 24) + +#define MESON_WDT_RESET 0x04 + +#define MESON_WDT_TIMEOUT 30 +#define MESON_WDT_MIN_TIMEOUT 1 +#define MESON_WDT_MAX_TIMEOUT (MESON_WDT_TC_TM_MASK / 100000) + +#define MESON_SEC_TO_TC(s) ((s) * 100000) + +static bool nowayout = WATCHDOG_NOWAYOUT; +static unsigned int timeout = MESON_WDT_TIMEOUT; + +struct meson_wdt_dev { + struct watchdog_device wdt_dev; + void __iomem *wdt_base; + struct notifier_block restart_handler; +}; + +static int meson_restart_handle(struct notifier_block *this, unsigned long mode, + void *cmd) +{ + u32 tc_reboot = MESON_WDT_DC_RESET | MESON_WDT_TC_EN; + struct meson_wdt_dev *meson_wdt = container_of(this, + struct meson_wdt_dev, + restart_handler); + + while (1) { + writel(tc_reboot, meson_wdt->wdt_base + MESON_WDT_TC); + mdelay(5); + } + + return NOTIFY_DONE; +} + +static int meson_wdt_ping(struct watchdog_device *wdt_dev) +{ + struct meson_wdt_dev *meson_wdt = watchdog_get_drvdata(wdt_dev); + + writel(0, meson_wdt->wdt_base + MESON_WDT_RESET); + + return 0; +} + +static void meson_wdt_change_timeout(struct watchdog_device *wdt_dev, + unsigned int timeout) +{ + struct meson_wdt_dev *meson_wdt = watchdog_get_drvdata(wdt_dev); + u32 reg; + + reg = readl(meson_wdt->wdt_base + MESON_WDT_TC); + reg &= ~MESON_WDT_TC_TM_MASK; + reg |= MESON_SEC_TO_TC(timeout); + writel(reg, meson_wdt->wdt_base + MESON_WDT_TC); +} + +static int meson_wdt_set_timeout(struct watchdog_device *wdt_dev, + unsigned int timeout) +{ + wdt_dev->timeout = timeout; + + meson_wdt_change_timeout(wdt_dev, timeout); + meson_wdt_ping(wdt_dev); + + return 0; +} + +static int meson_wdt_stop(struct watchdog_device *wdt_dev) +{ + struct meson_wdt_dev *meson_wdt = watchdog_get_drvdata(wdt_dev); + u32 reg; + + reg = readl(meson_wdt->wdt_base + MESON_WDT_TC); + reg &= ~MESON_WDT_TC_EN; + writel(reg, meson_wdt->wdt_base + MESON_WDT_TC); + + return 0; +} + +static int meson_wdt_start(struct watchdog_device *wdt_dev) +{ + struct meson_wdt_dev *meson_wdt = watchdog_get_drvdata(wdt_dev); + u32 reg; + + meson_wdt_change_timeout(wdt_dev, meson_wdt->wdt_dev.timeout); + meson_wdt_ping(wdt_dev); + + reg = readl(meson_wdt->wdt_base + MESON_WDT_TC); + reg |= MESON_WDT_TC_EN; + writel(reg, meson_wdt->wdt_base + MESON_WDT_TC); + + return 0; +} + +static const struct watchdog_info meson_wdt_info = { + .identity = DRV_NAME, + .options = WDIOF_SETTIMEOUT | + WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE, +}; + +static const struct watchdog_ops meson_wdt_ops = { + .owner = THIS_MODULE, + .start = meson_wdt_start, + .stop = meson_wdt_stop, + .ping = meson_wdt_ping, + .set_timeout = meson_wdt_set_timeout, +}; + +static int meson_wdt_probe(struct platform_device *pdev) +{ + struct resource *res; + struct meson_wdt_dev *meson_wdt; + int err; + + meson_wdt = devm_kzalloc(&pdev->dev, sizeof(*meson_wdt), GFP_KERNEL); + if (!meson_wdt) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + meson_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(meson_wdt->wdt_base)) + return PTR_ERR(meson_wdt->wdt_base); + + meson_wdt->wdt_dev.parent = &pdev->dev; + meson_wdt->wdt_dev.info = &meson_wdt_info; + meson_wdt->wdt_dev.ops = &meson_wdt_ops; + meson_wdt->wdt_dev.timeout = MESON_WDT_TIMEOUT; + meson_wdt->wdt_dev.max_timeout = MESON_WDT_MAX_TIMEOUT; + meson_wdt->wdt_dev.min_timeout = MESON_WDT_MIN_TIMEOUT; + + watchdog_set_drvdata(&meson_wdt->wdt_dev, meson_wdt); + + watchdog_init_timeout(&meson_wdt->wdt_dev, timeout, &pdev->dev); + watchdog_set_nowayout(&meson_wdt->wdt_dev, nowayout); + + meson_wdt_stop(&meson_wdt->wdt_dev); + + err = watchdog_register_device(&meson_wdt->wdt_dev); + if (err) + return err; + + platform_set_drvdata(pdev, meson_wdt); + + meson_wdt->restart_handler.notifier_call = meson_restart_handle; + meson_wdt->restart_handler.priority = 128; + err = register_restart_handler(&meson_wdt->restart_handler); + if (err) + dev_err(&pdev->dev, + "cannot register restart handler (err=%d)\n", err); + + dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)", + meson_wdt->wdt_dev.timeout, nowayout); + + return 0; +} + +static int meson_wdt_remove(struct platform_device *pdev) +{ + struct meson_wdt_dev *meson_wdt = platform_get_drvdata(pdev); + + unregister_restart_handler(&meson_wdt->restart_handler); + + watchdog_unregister_device(&meson_wdt->wdt_dev); + + return 0; +} + +static void meson_wdt_shutdown(struct platform_device *pdev) +{ + struct meson_wdt_dev *meson_wdt = platform_get_drvdata(pdev); + + meson_wdt_stop(&meson_wdt->wdt_dev); +} + +static const struct of_device_id meson_wdt_dt_ids[] = { + { .compatible = "amlogic,meson6-wdt" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, meson_wdt_dt_ids); + +static struct platform_driver meson_wdt_driver = { + .probe = meson_wdt_probe, + .remove = meson_wdt_remove, + .shutdown = meson_wdt_shutdown, + .driver = { + .owner = THIS_MODULE, + .name = DRV_NAME, + .of_match_table = meson_wdt_dt_ids, + }, +}; + +module_platform_driver(meson_wdt_driver); + +module_param(timeout, uint, 0); +MODULE_PARM_DESC(timeout, "Watchdog heartbeat in seconds"); + +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, + "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Carlo Caione <carlo@caione.org>"); +MODULE_DESCRIPTION("Meson Watchdog Timer Driver"); diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c index 1e6e28df5d7..b2e1b4cbbdc 100644 --- a/drivers/watchdog/of_xilinx_wdt.c +++ b/drivers/watchdog/of_xilinx_wdt.c @@ -236,7 +236,6 @@ static struct platform_driver xwdt_driver = { .probe = xwdt_probe, .remove = xwdt_remove, .driver = { - .owner = THIS_MODULE, .name = WATCHDOG_NAME, .of_match_table = xwdt_of_match, }, diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c new file mode 100644 index 00000000000..aa85618c4d0 --- /dev/null +++ b/drivers/watchdog/qcom-wdt.c @@ -0,0 +1,224 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> +#include <linux/watchdog.h> + +#define WDT_RST 0x0 +#define WDT_EN 0x8 +#define WDT_BITE_TIME 0x24 + +struct qcom_wdt { + struct watchdog_device wdd; + struct clk *clk; + unsigned long rate; + struct notifier_block restart_nb; + void __iomem *base; +}; + +static inline +struct qcom_wdt *to_qcom_wdt(struct watchdog_device *wdd) +{ + return container_of(wdd, struct qcom_wdt, wdd); +} + +static int qcom_wdt_start(struct watchdog_device *wdd) +{ + struct qcom_wdt *wdt = to_qcom_wdt(wdd); + + writel(0, wdt->base + WDT_EN); + writel(1, wdt->base + WDT_RST); + writel(wdd->timeout * wdt->rate, wdt->base + WDT_BITE_TIME); + writel(1, wdt->base + WDT_EN); + return 0; +} + +static int qcom_wdt_stop(struct watchdog_device *wdd) +{ + struct qcom_wdt *wdt = to_qcom_wdt(wdd); + + writel(0, wdt->base + WDT_EN); + return 0; +} + +static int qcom_wdt_ping(struct watchdog_device *wdd) +{ + struct qcom_wdt *wdt = to_qcom_wdt(wdd); + + writel(1, wdt->base + WDT_RST); + return 0; +} + +static int qcom_wdt_set_timeout(struct watchdog_device *wdd, + unsigned int timeout) +{ + wdd->timeout = timeout; + return qcom_wdt_start(wdd); +} + +static const struct watchdog_ops qcom_wdt_ops = { + .start = qcom_wdt_start, + .stop = qcom_wdt_stop, + .ping = qcom_wdt_ping, + .set_timeout = qcom_wdt_set_timeout, + .owner = THIS_MODULE, +}; + +static const struct watchdog_info qcom_wdt_info = { + .options = WDIOF_KEEPALIVEPING + | WDIOF_MAGICCLOSE + | WDIOF_SETTIMEOUT, + .identity = KBUILD_MODNAME, +}; + +static int qcom_wdt_restart(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct qcom_wdt *wdt = container_of(nb, struct qcom_wdt, restart_nb); + u32 timeout; + + /* + * Trigger watchdog bite: + * Setup BITE_TIME to be 128ms, and enable WDT. + */ + timeout = 128 * wdt->rate / 1000; + + writel(0, wdt->base + WDT_EN); + writel(1, wdt->base + WDT_RST); + writel(timeout, wdt->base + WDT_BITE_TIME); + writel(1, wdt->base + WDT_EN); + + /* + * Actually make sure the above sequence hits hardware before sleeping. + */ + wmb(); + + msleep(150); + return NOTIFY_DONE; +} + +static int qcom_wdt_probe(struct platform_device *pdev) +{ + struct qcom_wdt *wdt; + struct resource *res; + int ret; + + wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + wdt->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(wdt->base)) + return PTR_ERR(wdt->base); + + wdt->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(wdt->clk)) { + dev_err(&pdev->dev, "failed to get input clock\n"); + return PTR_ERR(wdt->clk); + } + + ret = clk_prepare_enable(wdt->clk); + if (ret) { + dev_err(&pdev->dev, "failed to setup clock\n"); + return ret; + } + + /* + * We use the clock rate to calculate the max timeout, so ensure it's + * not zero to avoid a divide-by-zero exception. + * + * WATCHDOG_CORE assumes units of seconds, if the WDT is clocked such + * that it would bite before a second elapses it's usefulness is + * limited. Bail if this is the case. + */ + wdt->rate = clk_get_rate(wdt->clk); + if (wdt->rate == 0 || + wdt->rate > 0x10000000U) { + dev_err(&pdev->dev, "invalid clock rate\n"); + ret = -EINVAL; + goto err_clk_unprepare; + } + + wdt->wdd.dev = &pdev->dev; + wdt->wdd.info = &qcom_wdt_info; + wdt->wdd.ops = &qcom_wdt_ops; + wdt->wdd.min_timeout = 1; + wdt->wdd.max_timeout = 0x10000000U / wdt->rate; + + /* + * If 'timeout-sec' unspecified in devicetree, assume a 30 second + * default, unless the max timeout is less than 30 seconds, then use + * the max instead. + */ + wdt->wdd.timeout = min(wdt->wdd.max_timeout, 30U); + watchdog_init_timeout(&wdt->wdd, 0, &pdev->dev); + + ret = watchdog_register_device(&wdt->wdd); + if (ret) { + dev_err(&pdev->dev, "failed to register watchdog\n"); + goto err_clk_unprepare; + } + + /* + * WDT restart notifier has priority 0 (use as a last resort) + */ + wdt->restart_nb.notifier_call = qcom_wdt_restart; + ret = register_restart_handler(&wdt->restart_nb); + if (ret) + dev_err(&pdev->dev, "failed to setup restart handler\n"); + + platform_set_drvdata(pdev, wdt); + return 0; + +err_clk_unprepare: + clk_disable_unprepare(wdt->clk); + return ret; +} + +static int qcom_wdt_remove(struct platform_device *pdev) +{ + struct qcom_wdt *wdt = platform_get_drvdata(pdev); + + unregister_restart_handler(&wdt->restart_nb); + watchdog_unregister_device(&wdt->wdd); + clk_disable_unprepare(wdt->clk); + return 0; +} + +static const struct of_device_id qcom_wdt_of_table[] = { + { .compatible = "qcom,kpss-wdt-msm8960", }, + { .compatible = "qcom,kpss-wdt-apq8064", }, + { .compatible = "qcom,kpss-wdt-ipq8064", }, + { }, +}; +MODULE_DEVICE_TABLE(of, qcom_wdt_of_table); + +static struct platform_driver qcom_watchdog_driver = { + .probe = qcom_wdt_probe, + .remove = qcom_wdt_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = qcom_wdt_of_table, + }, +}; +module_platform_driver(qcom_watchdog_driver); + +MODULE_DESCRIPTION("QCOM KPSS Watchdog Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/watchdog/rn5t618_wdt.c b/drivers/watchdog/rn5t618_wdt.c new file mode 100644 index 00000000000..d1c12278cb6 --- /dev/null +++ b/drivers/watchdog/rn5t618_wdt.c @@ -0,0 +1,198 @@ +/* + * Watchdog driver for Ricoh RN5T618 PMIC + * + * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/device.h> +#include <linux/mfd/rn5t618.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/watchdog.h> + +#define DRIVER_NAME "rn5t618-wdt" + +static bool nowayout = WATCHDOG_NOWAYOUT; +static unsigned int timeout; + +module_param(timeout, uint, 0); +MODULE_PARM_DESC(timeout, "Initial watchdog timeout in seconds"); + +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +struct rn5t618_wdt { + struct watchdog_device wdt_dev; + struct rn5t618 *rn5t618; +}; + +/* + * This array encodes the values of WDOGTIM field for the supported + * watchdog expiration times. If the watchdog is not accessed before + * the timer expiration, the PMU generates an interrupt and if the CPU + * doesn't clear it within one second the system is restarted. + */ +static const struct { + u8 reg_val; + unsigned int time; +} rn5t618_wdt_map[] = { + { 0, 1 }, + { 1, 8 }, + { 2, 32 }, + { 3, 128 }, +}; + +static int rn5t618_wdt_set_timeout(struct watchdog_device *wdt_dev, + unsigned int t) +{ + struct rn5t618_wdt *wdt = watchdog_get_drvdata(wdt_dev); + int ret, i; + + for (i = 0; i < ARRAY_SIZE(rn5t618_wdt_map); i++) { + if (rn5t618_wdt_map[i].time + 1 >= t) + break; + } + + if (i == ARRAY_SIZE(rn5t618_wdt_map)) + return -EINVAL; + + ret = regmap_update_bits(wdt->rn5t618->regmap, RN5T618_WATCHDOG, + RN5T618_WATCHDOG_WDOGTIM_M, + rn5t618_wdt_map[i].reg_val); + if (!ret) + wdt_dev->timeout = rn5t618_wdt_map[i].time; + + return ret; +} + +static int rn5t618_wdt_start(struct watchdog_device *wdt_dev) +{ + struct rn5t618_wdt *wdt = watchdog_get_drvdata(wdt_dev); + int ret; + + ret = rn5t618_wdt_set_timeout(wdt_dev, wdt_dev->timeout); + if (ret) + return ret; + + /* enable repower-on */ + ret = regmap_update_bits(wdt->rn5t618->regmap, RN5T618_REPCNT, + RN5T618_REPCNT_REPWRON, + RN5T618_REPCNT_REPWRON); + if (ret) + return ret; + + /* enable watchdog */ + ret = regmap_update_bits(wdt->rn5t618->regmap, RN5T618_WATCHDOG, + RN5T618_WATCHDOG_WDOGEN, + RN5T618_WATCHDOG_WDOGEN); + if (ret) + return ret; + + /* enable watchdog interrupt */ + return regmap_update_bits(wdt->rn5t618->regmap, RN5T618_PWRIREN, + RN5T618_PWRIRQ_IR_WDOG, + RN5T618_PWRIRQ_IR_WDOG); +} + +static int rn5t618_wdt_stop(struct watchdog_device *wdt_dev) +{ + struct rn5t618_wdt *wdt = watchdog_get_drvdata(wdt_dev); + + return regmap_update_bits(wdt->rn5t618->regmap, RN5T618_WATCHDOG, + RN5T618_WATCHDOG_WDOGEN, 0); +} + +static int rn5t618_wdt_ping(struct watchdog_device *wdt_dev) +{ + struct rn5t618_wdt *wdt = watchdog_get_drvdata(wdt_dev); + unsigned int val; + int ret; + + /* The counter is restarted after a R/W access to watchdog register */ + ret = regmap_read(wdt->rn5t618->regmap, RN5T618_WATCHDOG, &val); + if (ret) + return ret; + + ret = regmap_write(wdt->rn5t618->regmap, RN5T618_WATCHDOG, val); + if (ret) + return ret; + + /* Clear pending watchdog interrupt */ + return regmap_update_bits(wdt->rn5t618->regmap, RN5T618_PWRIRQ, + RN5T618_PWRIRQ_IR_WDOG, 0); +} + +static struct watchdog_info rn5t618_wdt_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | + WDIOF_KEEPALIVEPING, + .identity = DRIVER_NAME, +}; + +static struct watchdog_ops rn5t618_wdt_ops = { + .owner = THIS_MODULE, + .start = rn5t618_wdt_start, + .stop = rn5t618_wdt_stop, + .ping = rn5t618_wdt_ping, + .set_timeout = rn5t618_wdt_set_timeout, +}; + +static int rn5t618_wdt_probe(struct platform_device *pdev) +{ + struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent); + struct rn5t618_wdt *wdt; + int min_timeout, max_timeout; + + wdt = devm_kzalloc(&pdev->dev, sizeof(struct rn5t618_wdt), GFP_KERNEL); + if (!wdt) + return -ENOMEM; + + min_timeout = rn5t618_wdt_map[0].time; + max_timeout = rn5t618_wdt_map[ARRAY_SIZE(rn5t618_wdt_map) - 1].time; + + wdt->rn5t618 = rn5t618; + wdt->wdt_dev.info = &rn5t618_wdt_info; + wdt->wdt_dev.ops = &rn5t618_wdt_ops; + wdt->wdt_dev.min_timeout = min_timeout; + wdt->wdt_dev.max_timeout = max_timeout; + wdt->wdt_dev.timeout = max_timeout; + wdt->wdt_dev.parent = &pdev->dev; + + watchdog_set_drvdata(&wdt->wdt_dev, wdt); + watchdog_init_timeout(&wdt->wdt_dev, timeout, &pdev->dev); + watchdog_set_nowayout(&wdt->wdt_dev, nowayout); + + platform_set_drvdata(pdev, wdt); + + return watchdog_register_device(&wdt->wdt_dev); +} + +static int rn5t618_wdt_remove(struct platform_device *pdev) +{ + struct rn5t618_wdt *wdt = platform_get_drvdata(pdev); + + watchdog_unregister_device(&wdt->wdt_dev); + + return 0; +} + +static struct platform_driver rn5t618_wdt_driver = { + .probe = rn5t618_wdt_probe, + .remove = rn5t618_wdt_remove, + .driver = { + .name = DRIVER_NAME, + }, +}; + +module_platform_driver(rn5t618_wdt_driver); + +MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>"); +MODULE_DESCRIPTION("RN5T618 watchdog driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 7c6ccd071ba..8532c3e2aea 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -41,6 +41,8 @@ #include <linux/of.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> +#include <linux/reboot.h> +#include <linux/delay.h> #define S3C2410_WTCON 0x00 #define S3C2410_WTDAT 0x04 @@ -128,6 +130,7 @@ struct s3c2410_wdt { unsigned long wtdat_save; struct watchdog_device wdt_device; struct notifier_block freq_transition; + struct notifier_block restart_handler; struct s3c2410_wdt_variant *drv_data; struct regmap *pmureg; }; @@ -155,6 +158,15 @@ static const struct s3c2410_wdt_variant drv_data_exynos5420 = { .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT, }; +static const struct s3c2410_wdt_variant drv_data_exynos7 = { + .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET, + .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET, + .mask_bit = 0, + .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, + .rst_stat_bit = 23, /* A57 WDTRESET */ + .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT, +}; + static const struct of_device_id s3c2410_wdt_match[] = { { .compatible = "samsung,s3c2410-wdt", .data = &drv_data_s3c2410 }, @@ -162,6 +174,8 @@ static const struct of_device_id s3c2410_wdt_match[] = { .data = &drv_data_exynos5250 }, { .compatible = "samsung,exynos5420-wdt", .data = &drv_data_exynos5420 }, + { .compatible = "samsung,exynos7-wdt", + .data = &drv_data_exynos7 }, {}, }; MODULE_DEVICE_TABLE(of, s3c2410_wdt_match); @@ -438,6 +452,31 @@ static inline void s3c2410wdt_cpufreq_deregister(struct s3c2410_wdt *wdt) } #endif +static int s3c2410wdt_restart(struct notifier_block *this, + unsigned long mode, void *cmd) +{ + struct s3c2410_wdt *wdt = container_of(this, struct s3c2410_wdt, + restart_handler); + void __iomem *wdt_base = wdt->reg_base; + + /* disable watchdog, to be safe */ + writel(0, wdt_base + S3C2410_WTCON); + + /* put initial values into count and data */ + writel(0x80, wdt_base + S3C2410_WTCNT); + writel(0x80, wdt_base + S3C2410_WTDAT); + + /* set the watchdog to go and reset... */ + writel(S3C2410_WTCON_ENABLE | S3C2410_WTCON_DIV16 | + S3C2410_WTCON_RSTEN | S3C2410_WTCON_PRESCALE(0x20), + wdt_base + S3C2410_WTCON); + + /* wait for reset to assert... */ + mdelay(500); + + return NOTIFY_DONE; +} + static inline unsigned int s3c2410wdt_get_bootstatus(struct s3c2410_wdt *wdt) { unsigned int rst_stat; @@ -592,6 +631,12 @@ static int s3c2410wdt_probe(struct platform_device *pdev) platform_set_drvdata(pdev, wdt); + wdt->restart_handler.notifier_call = s3c2410wdt_restart; + wdt->restart_handler.priority = 128; + ret = register_restart_handler(&wdt->restart_handler); + if (ret) + pr_err("cannot register restart handler, %d\n", ret); + /* print out a statement of readiness */ wtcon = readl(wdt->reg_base + S3C2410_WTCON); @@ -621,6 +666,8 @@ static int s3c2410wdt_remove(struct platform_device *dev) int ret; struct s3c2410_wdt *wdt = platform_get_drvdata(dev); + unregister_restart_handler(&wdt->restart_handler); + ret = s3c2410wdt_mask_and_disable_reset(wdt, true); if (ret < 0) return ret; diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c index 3804d5e9bae..a62b1b6decf 100644 --- a/drivers/watchdog/stmp3xxx_rtc_wdt.c +++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c @@ -94,9 +94,33 @@ static int stmp3xxx_wdt_remove(struct platform_device *pdev) return 0; } +static int __maybe_unused stmp3xxx_wdt_suspend(struct device *dev) +{ + struct watchdog_device *wdd = &stmp3xxx_wdd; + + if (watchdog_active(wdd)) + return wdt_stop(wdd); + + return 0; +} + +static int __maybe_unused stmp3xxx_wdt_resume(struct device *dev) +{ + struct watchdog_device *wdd = &stmp3xxx_wdd; + + if (watchdog_active(wdd)) + return wdt_start(wdd); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(stmp3xxx_wdt_pm_ops, + stmp3xxx_wdt_suspend, stmp3xxx_wdt_resume); + static struct platform_driver stmp3xxx_wdt_driver = { .driver = { .name = "stmp3xxx_rtc_wdt", + .pm = &stmp3xxx_wdt_pm_ops, }, .probe = stmp3xxx_wdt_probe, .remove = stmp3xxx_wdt_remove, diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c index 480bb557f35..b62301e74e5 100644 --- a/drivers/watchdog/sunxi_wdt.c +++ b/drivers/watchdog/sunxi_wdt.c @@ -23,6 +23,7 @@ #include <linux/moduleparam.h> #include <linux/notifier.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/reboot.h> #include <linux/types.h> @@ -30,15 +31,11 @@ #define WDT_MAX_TIMEOUT 16 #define WDT_MIN_TIMEOUT 1 -#define WDT_MODE_TIMEOUT(n) ((n) << 3) -#define WDT_TIMEOUT_MASK WDT_MODE_TIMEOUT(0x0F) +#define WDT_TIMEOUT_MASK 0x0F -#define WDT_CTRL 0x00 #define WDT_CTRL_RELOAD ((1 << 0) | (0x0a57 << 1)) -#define WDT_MODE 0x04 #define WDT_MODE_EN (1 << 0) -#define WDT_MODE_RST_EN (1 << 1) #define DRV_NAME "sunxi-wdt" #define DRV_VERSION "1.0" @@ -46,15 +43,29 @@ static bool nowayout = WATCHDOG_NOWAYOUT; static unsigned int timeout = WDT_MAX_TIMEOUT; +/* + * This structure stores the register offsets for different variants + * of Allwinner's watchdog hardware. + */ +struct sunxi_wdt_reg { + u8 wdt_ctrl; + u8 wdt_cfg; + u8 wdt_mode; + u8 wdt_timeout_shift; + u8 wdt_reset_mask; + u8 wdt_reset_val; +}; + struct sunxi_wdt_dev { struct watchdog_device wdt_dev; void __iomem *wdt_base; + const struct sunxi_wdt_reg *wdt_regs; struct notifier_block restart_handler; }; /* * wdt_timeout_map maps the watchdog timer interval value in seconds to - * the value of the register WDT_MODE bit 3:6 + * the value of the register WDT_MODE at bits .wdt_timeout_shift ~ +3 * * [timeout seconds] = register value * @@ -82,19 +93,32 @@ static int sunxi_restart_handle(struct notifier_block *this, unsigned long mode, struct sunxi_wdt_dev, restart_handler); void __iomem *wdt_base = sunxi_wdt->wdt_base; + const struct sunxi_wdt_reg *regs = sunxi_wdt->wdt_regs; + u32 val; + + /* Set system reset function */ + val = readl(wdt_base + regs->wdt_cfg); + val &= ~(regs->wdt_reset_mask); + val |= regs->wdt_reset_val; + writel(val, wdt_base + regs->wdt_cfg); - /* Enable timer and set reset bit in the watchdog */ - writel(WDT_MODE_EN | WDT_MODE_RST_EN, wdt_base + WDT_MODE); + /* Set lowest timeout and enable watchdog */ + val = readl(wdt_base + regs->wdt_mode); + val &= ~(WDT_TIMEOUT_MASK << regs->wdt_timeout_shift); + val |= WDT_MODE_EN; + writel(val, wdt_base + regs->wdt_mode); /* * Restart the watchdog. The default (and lowest) interval * value for the watchdog is 0.5s. */ - writel(WDT_CTRL_RELOAD, wdt_base + WDT_CTRL); + writel(WDT_CTRL_RELOAD, wdt_base + regs->wdt_ctrl); while (1) { mdelay(5); - writel(WDT_MODE_EN | WDT_MODE_RST_EN, wdt_base + WDT_MODE); + val = readl(wdt_base + regs->wdt_mode); + val |= WDT_MODE_EN; + writel(val, wdt_base + regs->wdt_mode); } return NOTIFY_DONE; } @@ -103,8 +127,9 @@ static int sunxi_wdt_ping(struct watchdog_device *wdt_dev) { struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev); void __iomem *wdt_base = sunxi_wdt->wdt_base; + const struct sunxi_wdt_reg *regs = sunxi_wdt->wdt_regs; - iowrite32(WDT_CTRL_RELOAD, wdt_base + WDT_CTRL); + writel(WDT_CTRL_RELOAD, wdt_base + regs->wdt_ctrl); return 0; } @@ -114,6 +139,7 @@ static int sunxi_wdt_set_timeout(struct watchdog_device *wdt_dev, { struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev); void __iomem *wdt_base = sunxi_wdt->wdt_base; + const struct sunxi_wdt_reg *regs = sunxi_wdt->wdt_regs; u32 reg; if (wdt_timeout_map[timeout] == 0) @@ -121,10 +147,10 @@ static int sunxi_wdt_set_timeout(struct watchdog_device *wdt_dev, sunxi_wdt->wdt_dev.timeout = timeout; - reg = ioread32(wdt_base + WDT_MODE); - reg &= ~WDT_TIMEOUT_MASK; - reg |= WDT_MODE_TIMEOUT(wdt_timeout_map[timeout]); - iowrite32(reg, wdt_base + WDT_MODE); + reg = readl(wdt_base + regs->wdt_mode); + reg &= ~(WDT_TIMEOUT_MASK << regs->wdt_timeout_shift); + reg |= wdt_timeout_map[timeout] << regs->wdt_timeout_shift; + writel(reg, wdt_base + regs->wdt_mode); sunxi_wdt_ping(wdt_dev); @@ -135,8 +161,9 @@ static int sunxi_wdt_stop(struct watchdog_device *wdt_dev) { struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev); void __iomem *wdt_base = sunxi_wdt->wdt_base; + const struct sunxi_wdt_reg *regs = sunxi_wdt->wdt_regs; - iowrite32(0, wdt_base + WDT_MODE); + writel(0, wdt_base + regs->wdt_mode); return 0; } @@ -146,6 +173,7 @@ static int sunxi_wdt_start(struct watchdog_device *wdt_dev) u32 reg; struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev); void __iomem *wdt_base = sunxi_wdt->wdt_base; + const struct sunxi_wdt_reg *regs = sunxi_wdt->wdt_regs; int ret; ret = sunxi_wdt_set_timeout(&sunxi_wdt->wdt_dev, @@ -153,9 +181,16 @@ static int sunxi_wdt_start(struct watchdog_device *wdt_dev) if (ret < 0) return ret; - reg = ioread32(wdt_base + WDT_MODE); - reg |= (WDT_MODE_RST_EN | WDT_MODE_EN); - iowrite32(reg, wdt_base + WDT_MODE); + /* Set system reset function */ + reg = readl(wdt_base + regs->wdt_cfg); + reg &= ~(regs->wdt_reset_mask); + reg |= ~(regs->wdt_reset_val); + writel(reg, wdt_base + regs->wdt_cfg); + + /* Enable watchdog */ + reg = readl(wdt_base + regs->wdt_mode); + reg |= WDT_MODE_EN; + writel(reg, wdt_base + regs->wdt_mode); return 0; } @@ -175,9 +210,35 @@ static const struct watchdog_ops sunxi_wdt_ops = { .set_timeout = sunxi_wdt_set_timeout, }; +static const struct sunxi_wdt_reg sun4i_wdt_reg = { + .wdt_ctrl = 0x00, + .wdt_cfg = 0x04, + .wdt_mode = 0x04, + .wdt_timeout_shift = 3, + .wdt_reset_mask = 0x02, + .wdt_reset_val = 0x02, +}; + +static const struct sunxi_wdt_reg sun6i_wdt_reg = { + .wdt_ctrl = 0x10, + .wdt_cfg = 0x14, + .wdt_mode = 0x18, + .wdt_timeout_shift = 4, + .wdt_reset_mask = 0x03, + .wdt_reset_val = 0x01, +}; + +static const struct of_device_id sunxi_wdt_dt_ids[] = { + { .compatible = "allwinner,sun4i-a10-wdt", .data = &sun4i_wdt_reg }, + { .compatible = "allwinner,sun6i-a31-wdt", .data = &sun6i_wdt_reg }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sunxi_wdt_dt_ids); + static int sunxi_wdt_probe(struct platform_device *pdev) { struct sunxi_wdt_dev *sunxi_wdt; + const struct of_device_id *device; struct resource *res; int err; @@ -187,6 +248,12 @@ static int sunxi_wdt_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sunxi_wdt); + device = of_match_device(sunxi_wdt_dt_ids, &pdev->dev); + if (!device) + return -ENODEV; + + sunxi_wdt->wdt_regs = device->data; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); sunxi_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(sunxi_wdt->wdt_base)) @@ -242,12 +309,6 @@ static void sunxi_wdt_shutdown(struct platform_device *pdev) sunxi_wdt_stop(&sunxi_wdt->wdt_dev); } -static const struct of_device_id sunxi_wdt_dt_ids[] = { - { .compatible = "allwinner,sun4i-a10-wdt" }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, sunxi_wdt_dt_ids); - static struct platform_driver sunxi_wdt_driver = { .probe = sunxi_wdt_probe, .remove = sunxi_wdt_remove, diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c index afa9d6ef353..dee9c6cbe6d 100644 --- a/drivers/watchdog/ts72xx_wdt.c +++ b/drivers/watchdog/ts72xx_wdt.c @@ -428,11 +428,7 @@ static int ts72xx_wdt_probe(struct platform_device *pdev) static int ts72xx_wdt_remove(struct platform_device *pdev) { - int error; - - error = misc_deregister(&ts72xx_wdt_miscdev); - - return error; + return misc_deregister(&ts72xx_wdt_miscdev); } static struct platform_driver ts72xx_wdt_driver = { diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 1e0a317d3dc..3860d02729d 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -167,6 +167,9 @@ static struct page *balloon_next_page(struct page *page) static enum bp_state update_schedule(enum bp_state state) { + if (state == BP_ECANCELED) + return BP_ECANCELED; + if (state == BP_DONE) { balloon_stats.schedule_delay = 1; balloon_stats.retry_count = 1; diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c index dd9c249ea31..95ee4302ffb 100644 --- a/drivers/xen/pci.c +++ b/drivers/xen/pci.c @@ -41,24 +41,29 @@ static int xen_add_device(struct device *dev) #endif if (pci_seg_supported) { - struct physdev_pci_device_add add = { - .seg = pci_domain_nr(pci_dev->bus), - .bus = pci_dev->bus->number, - .devfn = pci_dev->devfn + struct { + struct physdev_pci_device_add add; + uint32_t pxm; + } add_ext = { + .add.seg = pci_domain_nr(pci_dev->bus), + .add.bus = pci_dev->bus->number, + .add.devfn = pci_dev->devfn }; + struct physdev_pci_device_add *add = &add_ext.add; + #ifdef CONFIG_ACPI acpi_handle handle; #endif #ifdef CONFIG_PCI_IOV if (pci_dev->is_virtfn) { - add.flags = XEN_PCI_DEV_VIRTFN; - add.physfn.bus = physfn->bus->number; - add.physfn.devfn = physfn->devfn; + add->flags = XEN_PCI_DEV_VIRTFN; + add->physfn.bus = physfn->bus->number; + add->physfn.devfn = physfn->devfn; } else #endif if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) - add.flags = XEN_PCI_DEV_EXTFN; + add->flags = XEN_PCI_DEV_EXTFN; #ifdef CONFIG_ACPI handle = ACPI_HANDLE(&pci_dev->dev); @@ -77,8 +82,8 @@ static int xen_add_device(struct device *dev) status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm); if (ACPI_SUCCESS(status)) { - add.optarr[0] = pxm; - add.flags |= XEN_PCI_DEV_PXM; + add->optarr[0] = pxm; + add->flags |= XEN_PCI_DEV_PXM; break; } status = acpi_get_parent(handle, &handle); @@ -86,7 +91,7 @@ static int xen_add_device(struct device *dev) } #endif /* CONFIG_ACPI */ - r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, &add); + r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, add); if (r != -ENOSYS) return r; pci_seg_supported = false; diff --git a/fs/Kconfig b/fs/Kconfig index db5dc159871..664991afe0c 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -67,6 +67,7 @@ source "fs/quota/Kconfig" source "fs/autofs4/Kconfig" source "fs/fuse/Kconfig" +source "fs/overlayfs/Kconfig" menu "Caches" diff --git a/fs/Makefile b/fs/Makefile index 90c88529892..da0bbb456d3 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -104,6 +104,7 @@ obj-$(CONFIG_QNX6FS_FS) += qnx6/ obj-$(CONFIG_AUTOFS4_FS) += autofs4/ obj-$(CONFIG_ADFS_FS) += adfs/ obj-$(CONFIG_FUSE_FS) += fuse/ +obj-$(CONFIG_OVERLAY_FS) += overlayfs/ obj-$(CONFIG_UDF_FS) += udf/ obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/ obj-$(CONFIG_OMFS_FS) += omfs/ @@ -165,6 +165,15 @@ static struct vfsmount *aio_mnt; static const struct file_operations aio_ring_fops; static const struct address_space_operations aio_ctx_aops; +/* Backing dev info for aio fs. + * -no dirty page accounting or writeback happens + */ +static struct backing_dev_info aio_fs_backing_dev_info = { + .name = "aiofs", + .state = 0, + .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY, +}; + static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) { struct qstr this = QSTR_INIT("[aio]", 5); @@ -176,6 +185,7 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) inode->i_mapping->a_ops = &aio_ctx_aops; inode->i_mapping->private_data = ctx; + inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info; inode->i_size = PAGE_SIZE * nr_pages; path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); @@ -220,6 +230,9 @@ static int __init aio_setup(void) if (IS_ERR(aio_mnt)) panic("Failed to create aio fs mount."); + if (bdi_init(&aio_fs_backing_dev_info)) + panic("Failed to init aio fs backing dev info."); + kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); @@ -281,11 +294,6 @@ static const struct file_operations aio_ring_fops = { .mmap = aio_ring_mmap, }; -static int aio_set_page_dirty(struct page *page) -{ - return 0; -} - #if IS_ENABLED(CONFIG_MIGRATION) static int aio_migratepage(struct address_space *mapping, struct page *new, struct page *old, enum migrate_mode mode) @@ -357,7 +365,7 @@ out: #endif static const struct address_space_operations aio_ctx_aops = { - .set_page_dirty = aio_set_page_dirty, + .set_page_dirty = __set_page_dirty_no_writeback, #if IS_ENABLED(CONFIG_MIGRATION) .migratepage = aio_migratepage, #endif @@ -412,7 +420,6 @@ static int aio_setup_ring(struct kioctx *ctx) pr_debug("pid(%d) page[%d]->count=%d\n", current->pid, i, page_count(page)); SetPageUptodate(page); - SetPageDirty(page); unlock_page(page); ctx->ring_pages[i] = page; diff --git a/fs/block_dev.c b/fs/block_dev.c index cc9d4114cda..1d9c9f3754f 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1585,7 +1585,7 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) } EXPORT_SYMBOL_GPL(blkdev_write_iter); -static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) +ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct inode *bd_inode = file->f_mapping->host; @@ -1599,6 +1599,7 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) iov_iter_truncate(to, size); return generic_file_read_iter(iocb, to); } +EXPORT_SYMBOL_GPL(blkdev_read_iter); /* * Try to release a page associated with block device when the system diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index d3220d31d3c..dcd9be32ac5 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -1011,8 +1011,6 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, bytes = min(bytes, working_bytes); kaddr = kmap_atomic(page_out); memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); - if (*pg_index == (vcnt - 1) && *pg_offset == 0) - memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); kunmap_atomic(kaddr); flush_dcache_page(page_out); @@ -1054,3 +1052,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, return 1; } + +/* + * When uncompressing data, we need to make sure and zero any parts of + * the biovec that were not filled in by the decompression code. pg_index + * and pg_offset indicate the last page and the last offset of that page + * that have been filled in. This will zero everything remaining in the + * biovec. + */ +void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt, + unsigned long pg_index, + unsigned long pg_offset) +{ + while (pg_index < vcnt) { + struct page *page = bvec[pg_index].bv_page; + unsigned long off = bvec[pg_index].bv_offset; + unsigned long len = bvec[pg_index].bv_len; + + if (pg_offset < off) + pg_offset = off; + if (pg_offset < off + len) { + unsigned long bytes = off + len - pg_offset; + char *kaddr; + + kaddr = kmap_atomic(page); + memset(kaddr + pg_offset, 0, bytes); + kunmap_atomic(kaddr); + } + pg_index++; + pg_offset = 0; + } +} diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 0c803b4fbf9..d181f70caae 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -45,7 +45,9 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, unsigned long nr_pages); int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, int mirror_num, unsigned long bio_flags); - +void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt, + unsigned long pg_index, + unsigned long pg_offset); struct btrfs_compress_op { struct list_head *(*alloc_workspace)(void); diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 19bc6162fb8..150822ee0a0 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -80,13 +80,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, { int i; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - /* lockdep really cares that we take all of these spinlocks - * in the right order. If any of the locks in the path are not - * currently blocking, it is going to complain. So, make really - * really sure by forcing the path to blocking before we clear - * the path blocking. - */ if (held) { btrfs_set_lock_blocking_rw(held, held_rw); if (held_rw == BTRFS_WRITE_LOCK) @@ -95,7 +88,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, held_rw = BTRFS_READ_LOCK_BLOCKING; } btrfs_set_path_blocking(p); -#endif for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { if (p->nodes[i] && p->locks[i]) { @@ -107,10 +99,8 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, } } -#ifdef CONFIG_DEBUG_LOCK_ALLOC if (held) btrfs_clear_lock_blocking_rw(held, held_rw); -#endif } /* this also releases the path */ @@ -2893,7 +2883,7 @@ cow_done: } p->locks[level] = BTRFS_WRITE_LOCK; } else { - err = btrfs_try_tree_read_lock(b); + err = btrfs_tree_read_lock_atomic(b); if (!err) { btrfs_set_path_blocking(p); btrfs_tree_read_lock(b); @@ -3025,7 +3015,7 @@ again: } level = btrfs_header_level(b); - err = btrfs_try_tree_read_lock(b); + err = btrfs_tree_read_lock_atomic(b); if (!err) { btrfs_set_path_blocking(p); btrfs_tree_read_lock(b); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index d557264ee97..fe69edda11f 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3276,7 +3276,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_root *root, unsigned long count); int btrfs_async_run_delayed_refs(struct btrfs_root *root, unsigned long count, int wait); -int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len); +int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len); int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 offset, int metadata, u64 *refs, u64 *flags); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 1ad0f47ac85..1bf9f897065 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3817,19 +3817,19 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, struct btrfs_super_block *sb = fs_info->super_copy; int ret = 0; - if (sb->root_level > BTRFS_MAX_LEVEL) { - printk(KERN_ERR "BTRFS: tree_root level too big: %d > %d\n", - sb->root_level, BTRFS_MAX_LEVEL); + if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) { + printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n", + btrfs_super_root_level(sb), BTRFS_MAX_LEVEL); ret = -EINVAL; } - if (sb->chunk_root_level > BTRFS_MAX_LEVEL) { - printk(KERN_ERR "BTRFS: chunk_root level too big: %d > %d\n", - sb->chunk_root_level, BTRFS_MAX_LEVEL); + if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) { + printk(KERN_ERR "BTRFS: chunk_root level too big: %d >= %d\n", + btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL); ret = -EINVAL; } - if (sb->log_root_level > BTRFS_MAX_LEVEL) { - printk(KERN_ERR "BTRFS: log_root level too big: %d > %d\n", - sb->log_root_level, BTRFS_MAX_LEVEL); + if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) { + printk(KERN_ERR "BTRFS: log_root level too big: %d >= %d\n", + btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL); ret = -EINVAL; } @@ -3837,15 +3837,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, * The common minimum, we don't know if we can trust the nodesize/sectorsize * items yet, they'll be verified later. Issue just a warning. */ - if (!IS_ALIGNED(sb->root, 4096)) + if (!IS_ALIGNED(btrfs_super_root(sb), 4096)) printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n", sb->root); - if (!IS_ALIGNED(sb->chunk_root, 4096)) + if (!IS_ALIGNED(btrfs_super_chunk_root(sb), 4096)) printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n", sb->chunk_root); - if (!IS_ALIGNED(sb->log_root, 4096)) + if (!IS_ALIGNED(btrfs_super_log_root(sb), 4096)) printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n", - sb->log_root); + btrfs_super_log_root(sb)); if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) { printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n", @@ -3857,13 +3857,13 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, * Hint to catch really bogus numbers, bitflips or so, more exact checks are * done later */ - if (sb->num_devices > (1UL << 31)) + if (btrfs_super_num_devices(sb) > (1UL << 31)) printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n", - sb->num_devices); + btrfs_super_num_devices(sb)); - if (sb->bytenr != BTRFS_SUPER_INFO_OFFSET) { + if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) { printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n", - sb->bytenr, BTRFS_SUPER_INFO_OFFSET); + btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET); ret = -EINVAL; } @@ -3871,14 +3871,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, * The generation is a global counter, we'll trust it more than the others * but it's still possible that it's the one that's wrong. */ - if (sb->generation < sb->chunk_root_generation) + if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb)) printk(KERN_WARNING "BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n", - sb->generation, sb->chunk_root_generation); - if (sb->generation < sb->cache_generation && sb->cache_generation != (u64)-1) + btrfs_super_generation(sb), btrfs_super_chunk_root_generation(sb)); + if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb) + && btrfs_super_cache_generation(sb) != (u64)-1) printk(KERN_WARNING "BTRFS: suspicious: generation < cache_generation: %llu < %llu\n", - sb->generation, sb->cache_generation); + btrfs_super_generation(sb), btrfs_super_cache_generation(sb)); return ret; } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d5658957101..47c1ba14108 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -710,8 +710,8 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info) rcu_read_unlock(); } -/* simple helper to search for an existing extent at a given offset */ -int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len) +/* simple helper to search for an existing data extent at a given offset */ +int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len) { int ret; struct btrfs_key key; @@ -726,12 +726,6 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len) key.type = BTRFS_EXTENT_ITEM_KEY; ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path, 0, 0); - if (ret > 0) { - btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); - if (key.objectid == start && - key.type == BTRFS_METADATA_ITEM_KEY) - ret = 0; - } btrfs_free_path(path); return ret; } @@ -786,7 +780,6 @@ search_again: else key.type = BTRFS_EXTENT_ITEM_KEY; -again: ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path, 0, 0); if (ret < 0) @@ -802,13 +795,6 @@ again: key.offset == root->nodesize) ret = 0; } - if (ret) { - key.objectid = bytenr; - key.type = BTRFS_EXTENT_ITEM_KEY; - key.offset = root->nodesize; - btrfs_release_path(path); - goto again; - } } if (ret == 0) { diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 783a94355ef..84a2d186827 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -413,7 +413,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, ret = 0; fail: while (ret < 0 && !list_empty(&tmplist)) { - sums = list_entry(&tmplist, struct btrfs_ordered_sum, list); + sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list); list_del(&sums->list); kfree(sums); } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 8d2b76e29d3..4399f0c3a4c 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -765,23 +765,6 @@ out: return ret; } -/* copy of check_sticky in fs/namei.c() -* It's inline, so penalty for filesystems that don't use sticky bit is -* minimal. -*/ -static inline int btrfs_check_sticky(struct inode *dir, struct inode *inode) -{ - kuid_t fsuid = current_fsuid(); - - if (!(dir->i_mode & S_ISVTX)) - return 0; - if (uid_eq(inode->i_uid, fsuid)) - return 0; - if (uid_eq(dir->i_uid, fsuid)) - return 0; - return !capable(CAP_FOWNER); -} - /* copy of may_delete in fs/namei.c() * Check whether we can remove a link victim from directory dir, check * whether the type of victim is right. @@ -817,8 +800,7 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir) return error; if (IS_APPEND(dir)) return -EPERM; - if (btrfs_check_sticky(dir, victim->d_inode)|| - IS_APPEND(victim->d_inode)|| + if (check_sticky(dir, victim->d_inode) || IS_APPEND(victim->d_inode) || IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) return -EPERM; if (isdir) { diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 5665d214924..f8229ef1b46 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -128,6 +128,26 @@ again: } /* + * take a spinning read lock. + * returns 1 if we get the read lock and 0 if we don't + * this won't wait for blocking writers + */ +int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) +{ + if (atomic_read(&eb->blocking_writers)) + return 0; + + read_lock(&eb->lock); + if (atomic_read(&eb->blocking_writers)) { + read_unlock(&eb->lock); + return 0; + } + atomic_inc(&eb->read_locks); + atomic_inc(&eb->spinning_readers); + return 1; +} + +/* * returns 1 if we get the read lock and 0 if we don't * this won't wait for blocking writers */ @@ -158,9 +178,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) atomic_read(&eb->blocking_readers)) return 0; - if (!write_trylock(&eb->lock)) - return 0; - + write_lock(&eb->lock); if (atomic_read(&eb->blocking_writers) || atomic_read(&eb->blocking_readers)) { write_unlock(&eb->lock); diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index b81e0e9a489..c44a9d5f536 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -35,6 +35,8 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw); void btrfs_assert_tree_locked(struct extent_buffer *eb); int btrfs_try_tree_read_lock(struct extent_buffer *eb); int btrfs_try_tree_write_lock(struct extent_buffer *eb); +int btrfs_tree_read_lock_atomic(struct extent_buffer *eb); + static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) { diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 78285f30909..617553cdb7d 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -373,6 +373,8 @@ cont: } done: kunmap(pages_in[page_in_index]); + if (!ret) + btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset); return ret; } @@ -410,10 +412,23 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in, goto out; } + /* + * the caller is already checking against PAGE_SIZE, but lets + * move this check closer to the memcpy/memset + */ + destlen = min_t(unsigned long, destlen, PAGE_SIZE); bytes = min_t(unsigned long, destlen, out_len - start_byte); kaddr = kmap_atomic(dest_page); memcpy(kaddr, workspace->buf + start_byte, bytes); + + /* + * btrfs_getblock is doing a zero on the tail of the page too, + * but this will cover anything missing from the decompressed + * data. + */ + if (bytes < destlen) + memset(kaddr+bytes, 0, destlen-bytes); kunmap_atomic(kaddr); out: return ret; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index a2b97ef1031..54bd91ece35 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -2151,6 +2151,7 @@ static void __exit exit_btrfs_fs(void) extent_map_exit(); extent_io_exit(); btrfs_interface_exit(); + btrfs_end_io_wq_exit(); unregister_filesystem(&btrfs_fs_type); btrfs_exit_sysfs(); btrfs_cleanup_fs_uuids(); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 1475979e571..286213cec86 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -672,7 +672,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, * is this extent already allocated in the extent * allocation tree? If so, just add a reference */ - ret = btrfs_lookup_extent(root, ins.objectid, + ret = btrfs_lookup_data_extent(root, ins.objectid, ins.offset); if (ret == 0) { ret = btrfs_inc_extent_ref(trans, root, diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 759fa4e2de8..fb22fd8d8fb 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -299,6 +299,8 @@ done: zlib_inflateEnd(&workspace->strm); if (data_in) kunmap(pages_in[page_in_index]); + if (!ret) + btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset); return ret; } @@ -310,10 +312,14 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in, struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0; int wbits = MAX_WBITS; - unsigned long bytes_left = destlen; + unsigned long bytes_left; unsigned long total_out = 0; + unsigned long pg_offset = 0; char *kaddr; + destlen = min_t(unsigned long, destlen, PAGE_SIZE); + bytes_left = destlen; + workspace->strm.next_in = data_in; workspace->strm.avail_in = srclen; workspace->strm.total_in = 0; @@ -341,7 +347,6 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in, unsigned long buf_start; unsigned long buf_offset; unsigned long bytes; - unsigned long pg_offset = 0; ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH); if (ret != Z_OK && ret != Z_STREAM_END) @@ -384,6 +389,17 @@ next: ret = 0; zlib_inflateEnd(&workspace->strm); + + /* + * this should only happen if zlib returned fewer bytes than we + * expected. btrfs_get_block is responsible for zeroing from the + * end of the inline extent (destlen) to the end of the page + */ + if (pg_offset < destlen) { + kaddr = kmap_atomic(dest_page); + memset(kaddr + pg_offset, 0, destlen - pg_offset); + kunmap_atomic(kaddr); + } return ret; } diff --git a/fs/buffer.c b/fs/buffer.c index 9614adc7e75..20805db2c98 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -128,21 +128,15 @@ __clear_page_buffers(struct page *page) page_cache_release(page); } - -static int quiet_error(struct buffer_head *bh) -{ - if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit()) - return 0; - return 1; -} - - -static void buffer_io_error(struct buffer_head *bh) +static void buffer_io_error(struct buffer_head *bh, char *msg) { char b[BDEVNAME_SIZE]; - printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n", + + if (!test_bit(BH_Quiet, &bh->b_state)) + printk_ratelimited(KERN_ERR + "Buffer I/O error on dev %s, logical block %llu%s\n", bdevname(bh->b_bdev, b), - (unsigned long long)bh->b_blocknr); + (unsigned long long)bh->b_blocknr, msg); } /* @@ -177,17 +171,10 @@ EXPORT_SYMBOL(end_buffer_read_sync); void end_buffer_write_sync(struct buffer_head *bh, int uptodate) { - char b[BDEVNAME_SIZE]; - if (uptodate) { set_buffer_uptodate(bh); } else { - if (!quiet_error(bh)) { - buffer_io_error(bh); - printk(KERN_WARNING "lost page write due to " - "I/O error on %s\n", - bdevname(bh->b_bdev, b)); - } + buffer_io_error(bh, ", lost sync page write"); set_buffer_write_io_error(bh); clear_buffer_uptodate(bh); } @@ -304,8 +291,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) set_buffer_uptodate(bh); } else { clear_buffer_uptodate(bh); - if (!quiet_error(bh)) - buffer_io_error(bh); + buffer_io_error(bh, ", async page read"); SetPageError(page); } @@ -353,7 +339,6 @@ still_busy: */ void end_buffer_async_write(struct buffer_head *bh, int uptodate) { - char b[BDEVNAME_SIZE]; unsigned long flags; struct buffer_head *first; struct buffer_head *tmp; @@ -365,12 +350,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) if (uptodate) { set_buffer_uptodate(bh); } else { - if (!quiet_error(bh)) { - buffer_io_error(bh); - printk(KERN_WARNING "lost page write due to " - "I/O error on %s\n", - bdevname(bh->b_bdev, b)); - } + buffer_io_error(bh, ", lost async page write"); set_bit(AS_EIO, &page->mapping->flags); set_buffer_write_io_error(bh); clear_buffer_uptodate(bh); @@ -993,7 +973,7 @@ init_page_buffers(struct page *page, struct block_device *bdev, */ static int grow_dev_page(struct block_device *bdev, sector_t block, - pgoff_t index, int size, int sizebits) + pgoff_t index, int size, int sizebits, gfp_t gfp) { struct inode *inode = bdev->bd_inode; struct page *page; @@ -1002,8 +982,8 @@ grow_dev_page(struct block_device *bdev, sector_t block, int ret = 0; /* Will call free_more_memory() */ gfp_t gfp_mask; - gfp_mask = mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS; - gfp_mask |= __GFP_MOVABLE; + gfp_mask = (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS) | gfp; + /* * XXX: __getblk_slow() can not really deal with failure and * will endlessly loop on improvised global reclaim. Prefer @@ -1060,7 +1040,7 @@ failed: * that page was dirty, the buffers are set dirty also. */ static int -grow_buffers(struct block_device *bdev, sector_t block, int size) +grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp) { pgoff_t index; int sizebits; @@ -1087,11 +1067,12 @@ grow_buffers(struct block_device *bdev, sector_t block, int size) } /* Create a page with the proper size buffers.. */ - return grow_dev_page(bdev, block, index, size, sizebits); + return grow_dev_page(bdev, block, index, size, sizebits, gfp); } -static struct buffer_head * -__getblk_slow(struct block_device *bdev, sector_t block, int size) +struct buffer_head * +__getblk_slow(struct block_device *bdev, sector_t block, + unsigned size, gfp_t gfp) { /* Size must be multiple of hard sectorsize */ if (unlikely(size & (bdev_logical_block_size(bdev)-1) || @@ -1113,13 +1094,14 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) if (bh) return bh; - ret = grow_buffers(bdev, block, size); + ret = grow_buffers(bdev, block, size, gfp); if (ret < 0) return NULL; if (ret == 0) free_more_memory(); } } +EXPORT_SYMBOL(__getblk_slow); /* * The relationship between dirty buffers and dirty pages: @@ -1373,24 +1355,25 @@ __find_get_block(struct block_device *bdev, sector_t block, unsigned size) EXPORT_SYMBOL(__find_get_block); /* - * __getblk will locate (and, if necessary, create) the buffer_head + * __getblk_gfp() will locate (and, if necessary, create) the buffer_head * which corresponds to the passed block_device, block and size. The * returned buffer has its reference count incremented. * - * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers() - * attempt is failing. FIXME, perhaps? + * __getblk_gfp() will lock up the machine if grow_dev_page's + * try_to_free_buffers() attempt is failing. FIXME, perhaps? */ struct buffer_head * -__getblk(struct block_device *bdev, sector_t block, unsigned size) +__getblk_gfp(struct block_device *bdev, sector_t block, + unsigned size, gfp_t gfp) { struct buffer_head *bh = __find_get_block(bdev, block, size); might_sleep(); if (bh == NULL) - bh = __getblk_slow(bdev, block, size); + bh = __getblk_slow(bdev, block, size, gfp); return bh; } -EXPORT_SYMBOL(__getblk); +EXPORT_SYMBOL(__getblk_gfp); /* * Do async read-ahead on a buffer.. @@ -1406,24 +1389,28 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size) EXPORT_SYMBOL(__breadahead); /** - * __bread() - reads a specified block and returns the bh + * __bread_gfp() - reads a specified block and returns the bh * @bdev: the block_device to read from * @block: number of block * @size: size (in bytes) to read - * + * @gfp: page allocation flag + * * Reads a specified block, and returns buffer head that contains it. + * The page cache can be allocated from non-movable area + * not to prevent page migration if you set gfp to zero. * It returns NULL if the block was unreadable. */ struct buffer_head * -__bread(struct block_device *bdev, sector_t block, unsigned size) +__bread_gfp(struct block_device *bdev, sector_t block, + unsigned size, gfp_t gfp) { - struct buffer_head *bh = __getblk(bdev, block, size); + struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); if (likely(bh) && !buffer_uptodate(bh)) bh = __bread_slow(bh); return bh; } -EXPORT_SYMBOL(__bread); +EXPORT_SYMBOL(__bread_gfp); /* * invalidate_bh_lrus() is called rarely - but not only at unmount. @@ -2082,6 +2069,7 @@ int generic_write_end(struct file *file, struct address_space *mapping, struct page *page, void *fsdata) { struct inode *inode = mapping->host; + loff_t old_size = inode->i_size; int i_size_changed = 0; copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); @@ -2101,6 +2089,8 @@ int generic_write_end(struct file *file, struct address_space *mapping, unlock_page(page); page_cache_release(page); + if (old_size < pos) + pagecache_isize_extended(inode, old_size, pos); /* * Don't mark the inode dirty under page lock. First, it unnecessarily * makes the holding time of page lock longer. Second, it forces lock diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 659f2ea9e6f..cefca661464 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2638,7 +2638,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, for (i = 0; i < CEPH_CAP_BITS; i++) if ((dirty & (1 << i)) && - flush_tid == ci->i_cap_flush_tid[i]) + (u16)flush_tid == ci->i_cap_flush_tid[i]) cleaned |= 1 << i; dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s," diff --git a/fs/dcache.c b/fs/dcache.c index d5a23fd0da9..5bc72b07fde 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -778,6 +778,7 @@ restart: struct dentry *parent = lock_parent(dentry); if (likely(!dentry->d_lockref.count)) { __dentry_kill(dentry); + dput(parent); goto restart; } if (parent) @@ -2673,11 +2674,13 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) if (!IS_ROOT(new)) { spin_unlock(&inode->i_lock); dput(new); + iput(inode); return ERR_PTR(-EIO); } if (d_ancestor(new, dentry)) { spin_unlock(&inode->i_lock); dput(new); + iput(inode); return ERR_PTR(-EIO); } write_seqlock(&rename_lock); diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 1b119d3bf92..c4cd1fd86cc 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -566,6 +566,13 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags s->s_maxbytes = path.dentry->d_sb->s_maxbytes; s->s_blocksize = path.dentry->d_sb->s_blocksize; s->s_magic = ECRYPTFS_SUPER_MAGIC; + s->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1; + + rc = -EINVAL; + if (s->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { + pr_err("eCryptfs: maximum fs stacking depth exceeded\n"); + goto out_free; + } inode = ecryptfs_get_inode(path.dentry->d_inode, s); rc = PTR_ERR(inode); diff --git a/fs/exofs/Kbuild b/fs/exofs/Kbuild index 389ba8312d5..b47c7b8dc27 100644 --- a/fs/exofs/Kbuild +++ b/fs/exofs/Kbuild @@ -4,7 +4,7 @@ # Copyright (C) 2008 Panasas Inc. All rights reserved. # # Authors: -# Boaz Harrosh <bharrosh@panasas.com> +# Boaz Harrosh <ooo@electrozaur.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 diff --git a/fs/exofs/common.h b/fs/exofs/common.h index 3bbd46956d7..7d88ef56621 100644 --- a/fs/exofs/common.h +++ b/fs/exofs/common.h @@ -4,7 +4,7 @@ * Copyright (C) 2005, 2006 * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * Copyrights for code taken from ext2: * Copyright (C) 1992, 1993, 1994, 1995 diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c index 49f51ab4caa..d7defd55760 100644 --- a/fs/exofs/dir.c +++ b/fs/exofs/dir.c @@ -2,7 +2,7 @@ * Copyright (C) 2005, 2006 * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * Copyrights for code taken from ext2: * Copyright (C) 1992, 1993, 1994, 1995 diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h index fffe86fd7a4..ad9cac670a4 100644 --- a/fs/exofs/exofs.h +++ b/fs/exofs/exofs.h @@ -2,7 +2,7 @@ * Copyright (C) 2005, 2006 * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * Copyrights for code taken from ext2: * Copyright (C) 1992, 1993, 1994, 1995 diff --git a/fs/exofs/file.c b/fs/exofs/file.c index 71bf8e4fb5d..1a376b42d30 100644 --- a/fs/exofs/file.c +++ b/fs/exofs/file.c @@ -2,7 +2,7 @@ * Copyright (C) 2005, 2006 * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * Copyrights for code taken from ext2: * Copyright (C) 1992, 1993, 1994, 1995 diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index 3f9cafd7393..f1d3d4eb8c4 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c @@ -2,7 +2,7 @@ * Copyright (C) 2005, 2006 * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * Copyrights for code taken from ext2: * Copyright (C) 1992, 1993, 1994, 1995 diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c index 4731fd991ef..28907460e8f 100644 --- a/fs/exofs/namei.c +++ b/fs/exofs/namei.c @@ -2,7 +2,7 @@ * Copyright (C) 2005, 2006 * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * Copyrights for code taken from ext2: * Copyright (C) 1992, 1993, 1994, 1995 diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c index cfc0205d62c..7bd8ac8dfb2 100644 --- a/fs/exofs/ore.c +++ b/fs/exofs/ore.c @@ -2,7 +2,7 @@ * Copyright (C) 2005, 2006 * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * This file is part of exofs. * @@ -29,7 +29,7 @@ #include "ore_raid.h" -MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>"); +MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>"); MODULE_DESCRIPTION("Objects Raid Engine ore.ko"); MODULE_LICENSE("GPL"); diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c index 84529b8a331..27cbdb69764 100644 --- a/fs/exofs/ore_raid.c +++ b/fs/exofs/ore_raid.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2011 - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * This file is part of the objects raid engine (ore). * diff --git a/fs/exofs/ore_raid.h b/fs/exofs/ore_raid.h index cf6375d8212..a6e74677557 100644 --- a/fs/exofs/ore_raid.h +++ b/fs/exofs/ore_raid.h @@ -1,6 +1,6 @@ /* * Copyright (C) from 2011 - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * This file is part of the objects raid engine (ore). * diff --git a/fs/exofs/super.c b/fs/exofs/super.c index ed73ed8ebbe..95965503afc 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c @@ -2,7 +2,7 @@ * Copyright (C) 2005, 2006 * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * Copyrights for code taken from ext2: * Copyright (C) 1992, 1993, 1994, 1995 diff --git a/fs/exofs/symlink.c b/fs/exofs/symlink.c index 4dd687c3e74..832e2624b80 100644 --- a/fs/exofs/symlink.c +++ b/fs/exofs/symlink.c @@ -2,7 +2,7 @@ * Copyright (C) 2005, 2006 * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * Copyrights for code taken from ext2: * Copyright (C) 1992, 1993, 1994, 1995 diff --git a/fs/exofs/sys.c b/fs/exofs/sys.c index 1b4f2f95fc3..5e6a2c0a1f0 100644 --- a/fs/exofs/sys.c +++ b/fs/exofs/sys.c @@ -1,7 +1,7 @@ /* * Copyright (C) 2012 * Sachin Bhamare <sbhamare@panasas.com> - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * This file is part of exofs. * diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 7015db0bafd..eb742d0e67f 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -1354,13 +1354,6 @@ set_qf_format: "not specified."); return 0; } - } else { - if (sbi->s_jquota_fmt) { - ext3_msg(sb, KERN_ERR, "error: journaled quota format " - "specified with no journaling " - "enabled."); - return 0; - } } #endif return 1; diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 581ef40fbe9..83a6f497c4e 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -176,7 +176,7 @@ static unsigned int num_clusters_in_group(struct super_block *sb, } /* Initializes an uninitialized block bitmap */ -static void ext4_init_block_bitmap(struct super_block *sb, +static int ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, ext4_group_t block_group, struct ext4_group_desc *gdp) @@ -192,7 +192,6 @@ static void ext4_init_block_bitmap(struct super_block *sb, /* If checksum is bad mark all blocks used to prevent allocation * essentially implementing a per-group read-only flag. */ if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { - ext4_error(sb, "Checksum bad for group %u", block_group); grp = ext4_get_group_info(sb, block_group); if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) percpu_counter_sub(&sbi->s_freeclusters_counter, @@ -205,7 +204,7 @@ static void ext4_init_block_bitmap(struct super_block *sb, count); } set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); - return; + return -EIO; } memset(bh->b_data, 0, sb->s_blocksize); @@ -243,6 +242,7 @@ static void ext4_init_block_bitmap(struct super_block *sb, sb->s_blocksize * 8, bh->b_data); ext4_block_bitmap_csum_set(sb, block_group, gdp, bh); ext4_group_desc_csum_set(sb, block_group, gdp); + return 0; } /* Return the number of free blocks in a block group. It is used when @@ -438,11 +438,15 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) } ext4_lock_group(sb, block_group); if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { - ext4_init_block_bitmap(sb, bh, block_group, desc); + int err; + + err = ext4_init_block_bitmap(sb, bh, block_group, desc); set_bitmap_uptodate(bh); set_buffer_uptodate(bh); ext4_unlock_group(sb, block_group); unlock_buffer(bh); + if (err) + ext4_error(sb, "Checksum bad for grp %u", block_group); return bh; } ext4_unlock_group(sb, block_group); @@ -636,8 +640,7 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, * Account for the allocated meta blocks. We will never * fail EDQUOT for metdata, but we do account for it. */ - if (!(*errp) && - ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) { + if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) { spin_lock(&EXT4_I(inode)->i_block_reservation_lock); spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); dquot_alloc_block_nofail(inode, diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c index 3285aa5a706..b610779a958 100644 --- a/fs/ext4/bitmap.c +++ b/fs/ext4/bitmap.c @@ -24,8 +24,7 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, __u32 provided, calculated; struct ext4_sb_info *sbi = EXT4_SB(sb); - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (!ext4_has_metadata_csum(sb)) return 1; provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo); @@ -46,8 +45,7 @@ void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group, __u32 csum; struct ext4_sb_info *sbi = EXT4_SB(sb); - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (!ext4_has_metadata_csum(sb)) return; csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); @@ -65,8 +63,7 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, struct ext4_sb_info *sbi = EXT4_SB(sb); int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8; - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (!ext4_has_metadata_csum(sb)) return 1; provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo); @@ -91,8 +88,7 @@ void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group, __u32 csum; struct ext4_sb_info *sbi = EXT4_SB(sb); - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (!ext4_has_metadata_csum(sb)) return; csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 0bb3f9ea083..c24143ea9c0 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -151,13 +151,11 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) &file->f_ra, file, index, 1); file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; - bh = ext4_bread(NULL, inode, map.m_lblk, 0, &err); + bh = ext4_bread(NULL, inode, map.m_lblk, 0); + if (IS_ERR(bh)) + return PTR_ERR(bh); } - /* - * We ignore I/O errors on directories so users have a chance - * of recovering data when there's a bad sector - */ if (!bh) { if (!dir_has_error) { EXT4_ERROR_FILE(file, 0, diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index b0c225cdb52..c55a1faaed5 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -572,15 +572,15 @@ enum { /* * The bit position of these flags must not overlap with any of the - * EXT4_GET_BLOCKS_*. They are used by ext4_ext_find_extent(), + * EXT4_GET_BLOCKS_*. They are used by ext4_find_extent(), * read_extent_tree_block(), ext4_split_extent_at(), * ext4_ext_insert_extent(), and ext4_ext_create_new_leaf(). * EXT4_EX_NOCACHE is used to indicate that the we shouldn't be * caching the extents when reading from the extent tree while a * truncate or punch hole operation is in progress. */ -#define EXT4_EX_NOCACHE 0x0400 -#define EXT4_EX_FORCE_CACHE 0x0800 +#define EXT4_EX_NOCACHE 0x40000000 +#define EXT4_EX_FORCE_CACHE 0x20000000 /* * Flags used by ext4_free_blocks @@ -890,6 +890,7 @@ struct ext4_inode_info { struct ext4_es_tree i_es_tree; rwlock_t i_es_lock; struct list_head i_es_lru; + unsigned int i_es_all_nr; /* protected by i_es_lock */ unsigned int i_es_lru_nr; /* protected by i_es_lock */ unsigned long i_touch_when; /* jiffies of last accessing */ @@ -1174,6 +1175,9 @@ struct ext4_super_block { #define EXT4_MF_MNTDIR_SAMPLED 0x0001 #define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */ +/* Number of quota types we support */ +#define EXT4_MAXQUOTAS 2 + /* * fourth extended-fs super-block data in memory */ @@ -1237,7 +1241,7 @@ struct ext4_sb_info { u32 s_min_batch_time; struct block_device *journal_bdev; #ifdef CONFIG_QUOTA - char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */ + char *s_qf_names[EXT4_MAXQUOTAS]; /* Names of quota files with journalled quota */ int s_jquota_fmt; /* Format of quota to use */ #endif unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */ @@ -1330,8 +1334,7 @@ struct ext4_sb_info { /* Reclaim extents from extent status tree */ struct shrinker s_es_shrinker; struct list_head s_es_lru; - unsigned long s_es_last_sorted; - struct percpu_counter s_extent_cache_cnt; + struct ext4_es_stats s_es_stats; struct mb_cache *s_mb_cache; spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp; @@ -1399,7 +1402,6 @@ enum { EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */ EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/ EXT4_STATE_NEWENTRY, /* File just added to dir */ - EXT4_STATE_DELALLOC_RESERVED, /* blks already reserved for delalloc */ EXT4_STATE_DIOREAD_LOCK, /* Disable support for dio read nolocking */ EXT4_STATE_MAY_INLINE_DATA, /* may have in-inode data */ @@ -2086,10 +2088,8 @@ extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, extern int ext4_trim_fs(struct super_block *, struct fstrim_range *); /* inode.c */ -struct buffer_head *ext4_getblk(handle_t *, struct inode *, - ext4_lblk_t, int, int *); -struct buffer_head *ext4_bread(handle_t *, struct inode *, - ext4_lblk_t, int, int *); +struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int); +struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int); int ext4_get_block_write(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); int ext4_get_block(struct inode *inode, sector_t iblock, @@ -2109,6 +2109,7 @@ int do_journal_get_write_access(handle_t *handle, #define CONVERT_INLINE_DATA 2 extern struct inode *ext4_iget(struct super_block *, unsigned long); +extern struct inode *ext4_iget_normal(struct super_block *, unsigned long); extern int ext4_write_inode(struct inode *, struct writeback_control *); extern int ext4_setattr(struct dentry *, struct iattr *); extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, @@ -2332,10 +2333,18 @@ extern int ext4_register_li_request(struct super_block *sb, static inline int ext4_has_group_desc_csum(struct super_block *sb) { return EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_GDT_CSUM | - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM); + EXT4_FEATURE_RO_COMPAT_GDT_CSUM) || + (EXT4_SB(sb)->s_chksum_driver != NULL); } +static inline int ext4_has_metadata_csum(struct super_block *sb) +{ + WARN_ON_ONCE(EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && + !EXT4_SB(sb)->s_chksum_driver); + + return (EXT4_SB(sb)->s_chksum_driver != NULL); +} static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es) { return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) | @@ -2731,21 +2740,26 @@ extern int ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, struct ext4_extent *ex2); extern int ext4_ext_insert_extent(handle_t *, struct inode *, - struct ext4_ext_path *, + struct ext4_ext_path **, struct ext4_extent *, int); -extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t, - struct ext4_ext_path *, - int flags); +extern struct ext4_ext_path *ext4_find_extent(struct inode *, ext4_lblk_t, + struct ext4_ext_path **, + int flags); extern void ext4_ext_drop_refs(struct ext4_ext_path *); extern int ext4_ext_check_inode(struct inode *inode); extern int ext4_find_delalloc_range(struct inode *inode, ext4_lblk_t lblk_start, ext4_lblk_t lblk_end); extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk); +extern ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_ext_path *path); extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len); extern int ext4_ext_precache(struct inode *inode); extern int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len); +extern int ext4_swap_extents(handle_t *handle, struct inode *inode1, + struct inode *inode2, ext4_lblk_t lblk1, + ext4_lblk_t lblk2, ext4_lblk_t count, + int mark_unwritten,int *err); /* move_extent.c */ extern void ext4_double_down_write_data_sem(struct inode *first, @@ -2755,8 +2769,6 @@ extern void ext4_double_up_write_data_sem(struct inode *orig_inode, extern int ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 start_orig, __u64 start_donor, __u64 len, __u64 *moved_len); -extern int mext_next_extent(struct inode *inode, struct ext4_ext_path *path, - struct ext4_extent **extent); /* page-io.c */ extern int __init ext4_init_pageio(void); diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h index a867f5ca999..3c938154709 100644 --- a/fs/ext4/ext4_extents.h +++ b/fs/ext4/ext4_extents.h @@ -123,6 +123,7 @@ find_ext4_extent_tail(struct ext4_extent_header *eh) struct ext4_ext_path { ext4_fsblk_t p_block; __u16 p_depth; + __u16 p_maxdepth; struct ext4_extent *p_ext; struct ext4_extent_idx *p_idx; struct ext4_extent_header *p_hdr; diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 0074e0d23d6..3445035c7e0 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -256,8 +256,8 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line, set_buffer_prio(bh); if (ext4_handle_valid(handle)) { err = jbd2_journal_dirty_metadata(handle, bh); - /* Errors can only happen if there is a bug */ - if (WARN_ON_ONCE(err)) { + /* Errors can only happen due to aborted journal or a nasty bug */ + if (!is_handle_aborted(handle) && WARN_ON_ONCE(err)) { ext4_journal_abort_handle(where, line, __func__, bh, handle, err); if (inode == NULL) { diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index 17c00ff202f..9c5b49fb281 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h @@ -102,9 +102,9 @@ #define EXT4_QUOTA_INIT_BLOCKS(sb) 0 #define EXT4_QUOTA_DEL_BLOCKS(sb) 0 #endif -#define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb)) -#define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb)) -#define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb)) +#define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb)) +#define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb)) +#define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb)) static inline int ext4_jbd2_credits_xattr(struct inode *inode) { diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 74292a71b38..0b16fb4c06d 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -73,8 +73,7 @@ static int ext4_extent_block_csum_verify(struct inode *inode, { struct ext4_extent_tail *et; - if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (!ext4_has_metadata_csum(inode->i_sb)) return 1; et = find_ext4_extent_tail(eh); @@ -88,8 +87,7 @@ static void ext4_extent_block_csum_set(struct inode *inode, { struct ext4_extent_tail *et; - if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (!ext4_has_metadata_csum(inode->i_sb)) return; et = find_ext4_extent_tail(eh); @@ -98,14 +96,14 @@ static void ext4_extent_block_csum_set(struct inode *inode, static int ext4_split_extent(handle_t *handle, struct inode *inode, - struct ext4_ext_path *path, + struct ext4_ext_path **ppath, struct ext4_map_blocks *map, int split_flag, int flags); static int ext4_split_extent_at(handle_t *handle, struct inode *inode, - struct ext4_ext_path *path, + struct ext4_ext_path **ppath, ext4_lblk_t split, int split_flag, int flags); @@ -291,6 +289,20 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check) return size; } +static inline int +ext4_force_split_extent_at(handle_t *handle, struct inode *inode, + struct ext4_ext_path **ppath, ext4_lblk_t lblk, + int nofail) +{ + struct ext4_ext_path *path = *ppath; + int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext); + + return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ? + EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0, + EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO | + (nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0)); +} + /* * Calculate the number of metadata blocks needed * to allocate @blocks @@ -695,9 +707,11 @@ static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, void ext4_ext_drop_refs(struct ext4_ext_path *path) { - int depth = path->p_depth; - int i; + int depth, i; + if (!path) + return; + depth = path->p_depth; for (i = 0; i <= depth; i++, path++) if (path->p_bh) { brelse(path->p_bh); @@ -841,24 +855,32 @@ int ext4_ext_tree_init(handle_t *handle, struct inode *inode) } struct ext4_ext_path * -ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, - struct ext4_ext_path *path, int flags) +ext4_find_extent(struct inode *inode, ext4_lblk_t block, + struct ext4_ext_path **orig_path, int flags) { struct ext4_extent_header *eh; struct buffer_head *bh; - short int depth, i, ppos = 0, alloc = 0; + struct ext4_ext_path *path = orig_path ? *orig_path : NULL; + short int depth, i, ppos = 0; int ret; eh = ext_inode_hdr(inode); depth = ext_depth(inode); - /* account possible depth increase */ + if (path) { + ext4_ext_drop_refs(path); + if (depth > path[0].p_maxdepth) { + kfree(path); + *orig_path = path = NULL; + } + } if (!path) { + /* account possible depth increase */ path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), GFP_NOFS); - if (!path) + if (unlikely(!path)) return ERR_PTR(-ENOMEM); - alloc = 1; + path[0].p_maxdepth = depth + 1; } path[0].p_hdr = eh; path[0].p_bh = NULL; @@ -876,7 +898,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, bh = read_extent_tree_block(inode, path[ppos].p_block, --i, flags); - if (IS_ERR(bh)) { + if (unlikely(IS_ERR(bh))) { ret = PTR_ERR(bh); goto err; } @@ -910,8 +932,9 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, err: ext4_ext_drop_refs(path); - if (alloc) - kfree(path); + kfree(path); + if (orig_path) + *orig_path = NULL; return ERR_PTR(ret); } @@ -1238,16 +1261,24 @@ cleanup: * just created block */ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, - unsigned int flags, - struct ext4_extent *newext) + unsigned int flags) { struct ext4_extent_header *neh; struct buffer_head *bh; - ext4_fsblk_t newblock; + ext4_fsblk_t newblock, goal = 0; + struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; int err = 0; - newblock = ext4_ext_new_meta_block(handle, inode, NULL, - newext, &err, flags); + /* Try to prepend new index to old one */ + if (ext_depth(inode)) + goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode))); + if (goal > le32_to_cpu(es->s_first_data_block)) { + flags |= EXT4_MB_HINT_TRY_GOAL; + goal--; + } else + goal = ext4_inode_to_goal_block(inode); + newblock = ext4_new_meta_blocks(handle, inode, goal, flags, + NULL, &err); if (newblock == 0) return err; @@ -1314,9 +1345,10 @@ out: static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, unsigned int mb_flags, unsigned int gb_flags, - struct ext4_ext_path *path, + struct ext4_ext_path **ppath, struct ext4_extent *newext) { + struct ext4_ext_path *path = *ppath; struct ext4_ext_path *curp; int depth, i, err = 0; @@ -1340,23 +1372,21 @@ repeat: goto out; /* refill path */ - ext4_ext_drop_refs(path); - path = ext4_ext_find_extent(inode, + path = ext4_find_extent(inode, (ext4_lblk_t)le32_to_cpu(newext->ee_block), - path, gb_flags); + ppath, gb_flags); if (IS_ERR(path)) err = PTR_ERR(path); } else { /* tree is full, time to grow in depth */ - err = ext4_ext_grow_indepth(handle, inode, mb_flags, newext); + err = ext4_ext_grow_indepth(handle, inode, mb_flags); if (err) goto out; /* refill path */ - ext4_ext_drop_refs(path); - path = ext4_ext_find_extent(inode, + path = ext4_find_extent(inode, (ext4_lblk_t)le32_to_cpu(newext->ee_block), - path, gb_flags); + ppath, gb_flags); if (IS_ERR(path)) { err = PTR_ERR(path); goto out; @@ -1559,7 +1589,7 @@ found_extent: * allocated block. Thus, index entries have to be consistent * with leaves. */ -static ext4_lblk_t +ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_ext_path *path) { int depth; @@ -1802,6 +1832,7 @@ static void ext4_ext_try_to_merge_up(handle_t *handle, sizeof(struct ext4_extent_idx); s += sizeof(struct ext4_extent_header); + path[1].p_maxdepth = path[0].p_maxdepth; memcpy(path[0].p_hdr, path[1].p_hdr, s); path[0].p_depth = 0; path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + @@ -1896,9 +1927,10 @@ out: * creating new leaf in the no-space case. */ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, - struct ext4_ext_path *path, + struct ext4_ext_path **ppath, struct ext4_extent *newext, int gb_flags) { + struct ext4_ext_path *path = *ppath; struct ext4_extent_header *eh; struct ext4_extent *ex, *fex; struct ext4_extent *nearex; /* nearest extent */ @@ -1907,6 +1939,8 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, ext4_lblk_t next; int mb_flags = 0, unwritten; + if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) + mb_flags |= EXT4_MB_DELALLOC_RESERVED; if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); return -EIO; @@ -1925,7 +1959,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, /* * Try to see whether we should rather test the extent on * right from ex, or from the left of ex. This is because - * ext4_ext_find_extent() can return either extent on the + * ext4_find_extent() can return either extent on the * left, or on the right from the searched position. This * will make merging more effective. */ @@ -2008,7 +2042,7 @@ prepend: if (next != EXT_MAX_BLOCKS) { ext_debug("next leaf block - %u\n", next); BUG_ON(npath != NULL); - npath = ext4_ext_find_extent(inode, next, NULL, 0); + npath = ext4_find_extent(inode, next, NULL, 0); if (IS_ERR(npath)) return PTR_ERR(npath); BUG_ON(npath->p_depth != path->p_depth); @@ -2028,9 +2062,9 @@ prepend: * We're gonna add a new leaf in the tree. */ if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) - mb_flags = EXT4_MB_USE_RESERVED; + mb_flags |= EXT4_MB_USE_RESERVED; err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, - path, newext); + ppath, newext); if (err) goto cleanup; depth = ext_depth(inode); @@ -2108,10 +2142,8 @@ merge: err = ext4_ext_dirty(handle, inode, path + path->p_depth); cleanup: - if (npath) { - ext4_ext_drop_refs(npath); - kfree(npath); - } + ext4_ext_drop_refs(npath); + kfree(npath); return err; } @@ -2133,13 +2165,7 @@ static int ext4_fill_fiemap_extents(struct inode *inode, /* find extent for this block */ down_read(&EXT4_I(inode)->i_data_sem); - if (path && ext_depth(inode) != depth) { - /* depth was changed. we have to realloc path */ - kfree(path); - path = NULL; - } - - path = ext4_ext_find_extent(inode, block, path, 0); + path = ext4_find_extent(inode, block, &path, 0); if (IS_ERR(path)) { up_read(&EXT4_I(inode)->i_data_sem); err = PTR_ERR(path); @@ -2156,7 +2182,6 @@ static int ext4_fill_fiemap_extents(struct inode *inode, } ex = path[depth].p_ext; next = ext4_ext_next_allocated_block(path); - ext4_ext_drop_refs(path); flags = 0; exists = 0; @@ -2266,11 +2291,8 @@ static int ext4_fill_fiemap_extents(struct inode *inode, block = es.es_lblk + es.es_len; } - if (path) { - ext4_ext_drop_refs(path); - kfree(path); - } - + ext4_ext_drop_refs(path); + kfree(path); return err; } @@ -2826,7 +2848,7 @@ again: ext4_lblk_t ee_block; /* find extent for this block */ - path = ext4_ext_find_extent(inode, end, NULL, EXT4_EX_NOCACHE); + path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE); if (IS_ERR(path)) { ext4_journal_stop(handle); return PTR_ERR(path); @@ -2854,24 +2876,14 @@ again: */ if (end >= ee_block && end < ee_block + ext4_ext_get_actual_len(ex) - 1) { - int split_flag = 0; - - if (ext4_ext_is_unwritten(ex)) - split_flag = EXT4_EXT_MARK_UNWRIT1 | - EXT4_EXT_MARK_UNWRIT2; - /* * Split the extent in two so that 'end' is the last * block in the first new extent. Also we should not * fail removing space due to ENOSPC so try to use * reserved block if that happens. */ - err = ext4_split_extent_at(handle, inode, path, - end + 1, split_flag, - EXT4_EX_NOCACHE | - EXT4_GET_BLOCKS_PRE_IO | - EXT4_GET_BLOCKS_METADATA_NOFAIL); - + err = ext4_force_split_extent_at(handle, inode, &path, + end + 1, 1); if (err < 0) goto out; } @@ -2893,7 +2905,7 @@ again: ext4_journal_stop(handle); return -ENOMEM; } - path[0].p_depth = depth; + path[0].p_maxdepth = path[0].p_depth = depth; path[0].p_hdr = ext_inode_hdr(inode); i = 0; @@ -3013,10 +3025,9 @@ again: out: ext4_ext_drop_refs(path); kfree(path); - if (err == -EAGAIN) { - path = NULL; + path = NULL; + if (err == -EAGAIN) goto again; - } ext4_journal_stop(handle); return err; @@ -3130,11 +3141,12 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) */ static int ext4_split_extent_at(handle_t *handle, struct inode *inode, - struct ext4_ext_path *path, + struct ext4_ext_path **ppath, ext4_lblk_t split, int split_flag, int flags) { + struct ext4_ext_path *path = *ppath; ext4_fsblk_t newblock; ext4_lblk_t ee_block; struct ext4_extent *ex, newex, orig_ex, zero_ex; @@ -3205,7 +3217,7 @@ static int ext4_split_extent_at(handle_t *handle, if (split_flag & EXT4_EXT_MARK_UNWRIT2) ext4_ext_mark_unwritten(ex2); - err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); + err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { if (split_flag & EXT4_EXT_DATA_VALID1) { @@ -3271,11 +3283,12 @@ fix_extent_len: */ static int ext4_split_extent(handle_t *handle, struct inode *inode, - struct ext4_ext_path *path, + struct ext4_ext_path **ppath, struct ext4_map_blocks *map, int split_flag, int flags) { + struct ext4_ext_path *path = *ppath; ext4_lblk_t ee_block; struct ext4_extent *ex; unsigned int ee_len, depth; @@ -3298,7 +3311,7 @@ static int ext4_split_extent(handle_t *handle, EXT4_EXT_MARK_UNWRIT2; if (split_flag & EXT4_EXT_DATA_VALID2) split_flag1 |= EXT4_EXT_DATA_VALID1; - err = ext4_split_extent_at(handle, inode, path, + err = ext4_split_extent_at(handle, inode, ppath, map->m_lblk + map->m_len, split_flag1, flags1); if (err) goto out; @@ -3309,8 +3322,7 @@ static int ext4_split_extent(handle_t *handle, * Update path is required because previous ext4_split_extent_at() may * result in split of original leaf or extent zeroout. */ - ext4_ext_drop_refs(path); - path = ext4_ext_find_extent(inode, map->m_lblk, path, 0); + path = ext4_find_extent(inode, map->m_lblk, ppath, 0); if (IS_ERR(path)) return PTR_ERR(path); depth = ext_depth(inode); @@ -3330,7 +3342,7 @@ static int ext4_split_extent(handle_t *handle, split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | EXT4_EXT_MARK_UNWRIT2); } - err = ext4_split_extent_at(handle, inode, path, + err = ext4_split_extent_at(handle, inode, ppath, map->m_lblk, split_flag1, flags); if (err) goto out; @@ -3364,9 +3376,10 @@ out: static int ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, - struct ext4_ext_path *path, + struct ext4_ext_path **ppath, int flags) { + struct ext4_ext_path *path = *ppath; struct ext4_sb_info *sbi; struct ext4_extent_header *eh; struct ext4_map_blocks split_map; @@ -3590,11 +3603,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, } } - allocated = ext4_split_extent(handle, inode, path, - &split_map, split_flag, flags); - if (allocated < 0) - err = allocated; - + err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag, + flags); + if (err > 0) + err = 0; out: /* If we have gotten a failure, don't zero out status tree */ if (!err) @@ -3629,9 +3641,10 @@ out: static int ext4_split_convert_extents(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, - struct ext4_ext_path *path, + struct ext4_ext_path **ppath, int flags) { + struct ext4_ext_path *path = *ppath; ext4_lblk_t eof_block; ext4_lblk_t ee_block; struct ext4_extent *ex; @@ -3665,74 +3678,15 @@ static int ext4_split_convert_extents(handle_t *handle, split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2); } flags |= EXT4_GET_BLOCKS_PRE_IO; - return ext4_split_extent(handle, inode, path, map, split_flag, flags); + return ext4_split_extent(handle, inode, ppath, map, split_flag, flags); } -static int ext4_convert_initialized_extents(handle_t *handle, - struct inode *inode, - struct ext4_map_blocks *map, - struct ext4_ext_path *path) -{ - struct ext4_extent *ex; - ext4_lblk_t ee_block; - unsigned int ee_len; - int depth; - int err = 0; - - depth = ext_depth(inode); - ex = path[depth].p_ext; - ee_block = le32_to_cpu(ex->ee_block); - ee_len = ext4_ext_get_actual_len(ex); - - ext_debug("%s: inode %lu, logical" - "block %llu, max_blocks %u\n", __func__, inode->i_ino, - (unsigned long long)ee_block, ee_len); - - if (ee_block != map->m_lblk || ee_len > map->m_len) { - err = ext4_split_convert_extents(handle, inode, map, path, - EXT4_GET_BLOCKS_CONVERT_UNWRITTEN); - if (err < 0) - goto out; - ext4_ext_drop_refs(path); - path = ext4_ext_find_extent(inode, map->m_lblk, path, 0); - if (IS_ERR(path)) { - err = PTR_ERR(path); - goto out; - } - depth = ext_depth(inode); - ex = path[depth].p_ext; - if (!ex) { - EXT4_ERROR_INODE(inode, "unexpected hole at %lu", - (unsigned long) map->m_lblk); - err = -EIO; - goto out; - } - } - - err = ext4_ext_get_access(handle, inode, path + depth); - if (err) - goto out; - /* first mark the extent as unwritten */ - ext4_ext_mark_unwritten(ex); - - /* note: ext4_ext_correct_indexes() isn't needed here because - * borders are not changed - */ - ext4_ext_try_to_merge(handle, inode, path, ex); - - /* Mark modified extent as dirty */ - err = ext4_ext_dirty(handle, inode, path + path->p_depth); -out: - ext4_ext_show_leaf(inode, path); - return err; -} - - static int ext4_convert_unwritten_extents_endio(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, - struct ext4_ext_path *path) + struct ext4_ext_path **ppath) { + struct ext4_ext_path *path = *ppath; struct ext4_extent *ex; ext4_lblk_t ee_block; unsigned int ee_len; @@ -3761,16 +3715,13 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle, inode->i_ino, (unsigned long long)ee_block, ee_len, (unsigned long long)map->m_lblk, map->m_len); #endif - err = ext4_split_convert_extents(handle, inode, map, path, + err = ext4_split_convert_extents(handle, inode, map, ppath, EXT4_GET_BLOCKS_CONVERT); if (err < 0) - goto out; - ext4_ext_drop_refs(path); - path = ext4_ext_find_extent(inode, map->m_lblk, path, 0); - if (IS_ERR(path)) { - err = PTR_ERR(path); - goto out; - } + return err; + path = ext4_find_extent(inode, map->m_lblk, ppath, 0); + if (IS_ERR(path)) + return PTR_ERR(path); depth = ext_depth(inode); ex = path[depth].p_ext; } @@ -3963,12 +3914,16 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, } static int -ext4_ext_convert_initialized_extent(handle_t *handle, struct inode *inode, - struct ext4_map_blocks *map, - struct ext4_ext_path *path, int flags, - unsigned int allocated, ext4_fsblk_t newblock) +convert_initialized_extent(handle_t *handle, struct inode *inode, + struct ext4_map_blocks *map, + struct ext4_ext_path **ppath, int flags, + unsigned int allocated, ext4_fsblk_t newblock) { - int ret = 0; + struct ext4_ext_path *path = *ppath; + struct ext4_extent *ex; + ext4_lblk_t ee_block; + unsigned int ee_len; + int depth; int err = 0; /* @@ -3978,28 +3933,67 @@ ext4_ext_convert_initialized_extent(handle_t *handle, struct inode *inode, if (map->m_len > EXT_UNWRITTEN_MAX_LEN) map->m_len = EXT_UNWRITTEN_MAX_LEN / 2; - ret = ext4_convert_initialized_extents(handle, inode, map, - path); - if (ret >= 0) { - ext4_update_inode_fsync_trans(handle, inode, 1); - err = check_eofblocks_fl(handle, inode, map->m_lblk, - path, map->m_len); - } else - err = ret; + depth = ext_depth(inode); + ex = path[depth].p_ext; + ee_block = le32_to_cpu(ex->ee_block); + ee_len = ext4_ext_get_actual_len(ex); + + ext_debug("%s: inode %lu, logical" + "block %llu, max_blocks %u\n", __func__, inode->i_ino, + (unsigned long long)ee_block, ee_len); + + if (ee_block != map->m_lblk || ee_len > map->m_len) { + err = ext4_split_convert_extents(handle, inode, map, ppath, + EXT4_GET_BLOCKS_CONVERT_UNWRITTEN); + if (err < 0) + return err; + path = ext4_find_extent(inode, map->m_lblk, ppath, 0); + if (IS_ERR(path)) + return PTR_ERR(path); + depth = ext_depth(inode); + ex = path[depth].p_ext; + if (!ex) { + EXT4_ERROR_INODE(inode, "unexpected hole at %lu", + (unsigned long) map->m_lblk); + return -EIO; + } + } + + err = ext4_ext_get_access(handle, inode, path + depth); + if (err) + return err; + /* first mark the extent as unwritten */ + ext4_ext_mark_unwritten(ex); + + /* note: ext4_ext_correct_indexes() isn't needed here because + * borders are not changed + */ + ext4_ext_try_to_merge(handle, inode, path, ex); + + /* Mark modified extent as dirty */ + err = ext4_ext_dirty(handle, inode, path + path->p_depth); + if (err) + return err; + ext4_ext_show_leaf(inode, path); + + ext4_update_inode_fsync_trans(handle, inode, 1); + err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len); + if (err) + return err; map->m_flags |= EXT4_MAP_UNWRITTEN; if (allocated > map->m_len) allocated = map->m_len; map->m_len = allocated; - - return err ? err : allocated; + return allocated; } static int ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, - struct ext4_ext_path *path, int flags, + struct ext4_ext_path **ppath, int flags, unsigned int allocated, ext4_fsblk_t newblock) { + struct ext4_ext_path *path = *ppath; int ret = 0; int err = 0; ext4_io_end_t *io = ext4_inode_aio(inode); @@ -4021,8 +4015,8 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, /* get_block() before submit the IO, split the extent */ if (flags & EXT4_GET_BLOCKS_PRE_IO) { - ret = ext4_split_convert_extents(handle, inode, map, - path, flags | EXT4_GET_BLOCKS_CONVERT); + ret = ext4_split_convert_extents(handle, inode, map, ppath, + flags | EXT4_GET_BLOCKS_CONVERT); if (ret <= 0) goto out; /* @@ -4040,7 +4034,7 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, /* IO end_io complete, convert the filled extent to written */ if (flags & EXT4_GET_BLOCKS_CONVERT) { ret = ext4_convert_unwritten_extents_endio(handle, inode, map, - path); + ppath); if (ret >= 0) { ext4_update_inode_fsync_trans(handle, inode, 1); err = check_eofblocks_fl(handle, inode, map->m_lblk, @@ -4078,7 +4072,7 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, } /* buffered write, writepage time, convert*/ - ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags); + ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags); if (ret >= 0) ext4_update_inode_fsync_trans(handle, inode, 1); out: @@ -4279,7 +4273,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); /* find extent for this block */ - path = ext4_ext_find_extent(inode, map->m_lblk, NULL, 0); + path = ext4_find_extent(inode, map->m_lblk, NULL, 0); if (IS_ERR(path)) { err = PTR_ERR(path); path = NULL; @@ -4291,7 +4285,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, /* * consistent leaf must not be empty; * this situation is possible, though, _during_ tree modification; - * this is why assert can't be put in ext4_ext_find_extent() + * this is why assert can't be put in ext4_find_extent() */ if (unlikely(path[depth].p_ext == NULL && depth != 0)) { EXT4_ERROR_INODE(inode, "bad extent address " @@ -4331,15 +4325,15 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, */ if ((!ext4_ext_is_unwritten(ex)) && (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { - allocated = ext4_ext_convert_initialized_extent( - handle, inode, map, path, flags, - allocated, newblock); + allocated = convert_initialized_extent( + handle, inode, map, &path, + flags, allocated, newblock); goto out2; } else if (!ext4_ext_is_unwritten(ex)) goto out; ret = ext4_ext_handle_unwritten_extents( - handle, inode, map, path, flags, + handle, inode, map, &path, flags, allocated, newblock); if (ret < 0) err = ret; @@ -4376,7 +4370,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, /* * If we are doing bigalloc, check to see if the extent returned - * by ext4_ext_find_extent() implies a cluster we can use. + * by ext4_find_extent() implies a cluster we can use. */ if (cluster_offset && ex && get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { @@ -4451,6 +4445,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, ar.flags = 0; if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) ar.flags |= EXT4_MB_HINT_NOPREALLOC; + if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) + ar.flags |= EXT4_MB_DELALLOC_RESERVED; newblock = ext4_mb_new_blocks(handle, &ar, &err); if (!newblock) goto out2; @@ -4486,7 +4482,7 @@ got_allocated_blocks: err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len); if (!err) - err = ext4_ext_insert_extent(handle, inode, path, + err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags); if (!err && set_unwritten) { @@ -4619,10 +4615,8 @@ out: map->m_pblk = newblock; map->m_len = allocated; out2: - if (path) { - ext4_ext_drop_refs(path); - kfree(path); - } + ext4_ext_drop_refs(path); + kfree(path); trace_ext4_ext_map_blocks_exit(inode, flags, map, err ? err : allocated); @@ -4799,7 +4793,8 @@ static long ext4_zero_range(struct file *file, loff_t offset, max_blocks -= lblk; flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT | - EXT4_GET_BLOCKS_CONVERT_UNWRITTEN; + EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | + EXT4_EX_NOCACHE; if (mode & FALLOC_FL_KEEP_SIZE) flags |= EXT4_GET_BLOCKS_KEEP_SIZE; @@ -4837,15 +4832,21 @@ static long ext4_zero_range(struct file *file, loff_t offset, ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); + ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, + flags, mode); + if (ret) + goto out_dio; /* * Remove entire range from the extent status tree. + * + * ext4_es_remove_extent(inode, lblk, max_blocks) is + * NOT sufficient. I'm not sure why this is the case, + * but let's be conservative and remove the extent + * status tree for the entire inode. There should be + * no outstanding delalloc extents thanks to the + * filemap_write_and_wait_range() call above. */ - ret = ext4_es_remove_extent(inode, lblk, max_blocks); - if (ret) - goto out_dio; - - ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, - flags, mode); + ret = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); if (ret) goto out_dio; } @@ -5304,36 +5305,31 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, struct ext4_ext_path *path; int ret = 0, depth; struct ext4_extent *extent; - ext4_lblk_t stop_block, current_block; + ext4_lblk_t stop_block; ext4_lblk_t ex_start, ex_end; /* Let path point to the last extent */ - path = ext4_ext_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0); + path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0); if (IS_ERR(path)) return PTR_ERR(path); depth = path->p_depth; extent = path[depth].p_ext; - if (!extent) { - ext4_ext_drop_refs(path); - kfree(path); - return ret; - } + if (!extent) + goto out; stop_block = le32_to_cpu(extent->ee_block) + ext4_ext_get_actual_len(extent); - ext4_ext_drop_refs(path); - kfree(path); /* Nothing to shift, if hole is at the end of file */ if (start >= stop_block) - return ret; + goto out; /* * Don't start shifting extents until we make sure the hole is big * enough to accomodate the shift. */ - path = ext4_ext_find_extent(inode, start - 1, NULL, 0); + path = ext4_find_extent(inode, start - 1, &path, 0); if (IS_ERR(path)) return PTR_ERR(path); depth = path->p_depth; @@ -5346,8 +5342,6 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, ex_start = 0; ex_end = 0; } - ext4_ext_drop_refs(path); - kfree(path); if ((start == ex_start && shift > ex_start) || (shift > start - ex_end)) @@ -5355,7 +5349,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, /* Its safe to start updating extents */ while (start < stop_block) { - path = ext4_ext_find_extent(inode, start, NULL, 0); + path = ext4_find_extent(inode, start, &path, 0); if (IS_ERR(path)) return PTR_ERR(path); depth = path->p_depth; @@ -5365,27 +5359,23 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, (unsigned long) start); return -EIO; } - - current_block = le32_to_cpu(extent->ee_block); - if (start > current_block) { + if (start > le32_to_cpu(extent->ee_block)) { /* Hole, move to the next extent */ - ret = mext_next_extent(inode, path, &extent); - if (ret != 0) { - ext4_ext_drop_refs(path); - kfree(path); - if (ret == 1) - ret = 0; - break; + if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) { + path[depth].p_ext++; + } else { + start = ext4_ext_next_allocated_block(path); + continue; } } ret = ext4_ext_shift_path_extents(path, shift, inode, handle, &start); - ext4_ext_drop_refs(path); - kfree(path); if (ret) break; } - +out: + ext4_ext_drop_refs(path); + kfree(path); return ret; } @@ -5508,3 +5498,199 @@ out_mutex: mutex_unlock(&inode->i_mutex); return ret; } + +/** + * ext4_swap_extents - Swap extents between two inodes + * + * @inode1: First inode + * @inode2: Second inode + * @lblk1: Start block for first inode + * @lblk2: Start block for second inode + * @count: Number of blocks to swap + * @mark_unwritten: Mark second inode's extents as unwritten after swap + * @erp: Pointer to save error value + * + * This helper routine does exactly what is promise "swap extents". All other + * stuff such as page-cache locking consistency, bh mapping consistency or + * extent's data copying must be performed by caller. + * Locking: + * i_mutex is held for both inodes + * i_data_sem is locked for write for both inodes + * Assumptions: + * All pages from requested range are locked for both inodes + */ +int +ext4_swap_extents(handle_t *handle, struct inode *inode1, + struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2, + ext4_lblk_t count, int unwritten, int *erp) +{ + struct ext4_ext_path *path1 = NULL; + struct ext4_ext_path *path2 = NULL; + int replaced_count = 0; + + BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem)); + BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem)); + BUG_ON(!mutex_is_locked(&inode1->i_mutex)); + BUG_ON(!mutex_is_locked(&inode1->i_mutex)); + + *erp = ext4_es_remove_extent(inode1, lblk1, count); + if (unlikely(*erp)) + return 0; + *erp = ext4_es_remove_extent(inode2, lblk2, count); + if (unlikely(*erp)) + return 0; + + while (count) { + struct ext4_extent *ex1, *ex2, tmp_ex; + ext4_lblk_t e1_blk, e2_blk; + int e1_len, e2_len, len; + int split = 0; + + path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE); + if (unlikely(IS_ERR(path1))) { + *erp = PTR_ERR(path1); + path1 = NULL; + finish: + count = 0; + goto repeat; + } + path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE); + if (unlikely(IS_ERR(path2))) { + *erp = PTR_ERR(path2); + path2 = NULL; + goto finish; + } + ex1 = path1[path1->p_depth].p_ext; + ex2 = path2[path2->p_depth].p_ext; + /* Do we have somthing to swap ? */ + if (unlikely(!ex2 || !ex1)) + goto finish; + + e1_blk = le32_to_cpu(ex1->ee_block); + e2_blk = le32_to_cpu(ex2->ee_block); + e1_len = ext4_ext_get_actual_len(ex1); + e2_len = ext4_ext_get_actual_len(ex2); + + /* Hole handling */ + if (!in_range(lblk1, e1_blk, e1_len) || + !in_range(lblk2, e2_blk, e2_len)) { + ext4_lblk_t next1, next2; + + /* if hole after extent, then go to next extent */ + next1 = ext4_ext_next_allocated_block(path1); + next2 = ext4_ext_next_allocated_block(path2); + /* If hole before extent, then shift to that extent */ + if (e1_blk > lblk1) + next1 = e1_blk; + if (e2_blk > lblk2) + next2 = e1_blk; + /* Do we have something to swap */ + if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS) + goto finish; + /* Move to the rightest boundary */ + len = next1 - lblk1; + if (len < next2 - lblk2) + len = next2 - lblk2; + if (len > count) + len = count; + lblk1 += len; + lblk2 += len; + count -= len; + goto repeat; + } + + /* Prepare left boundary */ + if (e1_blk < lblk1) { + split = 1; + *erp = ext4_force_split_extent_at(handle, inode1, + &path1, lblk1, 0); + if (unlikely(*erp)) + goto finish; + } + if (e2_blk < lblk2) { + split = 1; + *erp = ext4_force_split_extent_at(handle, inode2, + &path2, lblk2, 0); + if (unlikely(*erp)) + goto finish; + } + /* ext4_split_extent_at() may result in leaf extent split, + * path must to be revalidated. */ + if (split) + goto repeat; + + /* Prepare right boundary */ + len = count; + if (len > e1_blk + e1_len - lblk1) + len = e1_blk + e1_len - lblk1; + if (len > e2_blk + e2_len - lblk2) + len = e2_blk + e2_len - lblk2; + + if (len != e1_len) { + split = 1; + *erp = ext4_force_split_extent_at(handle, inode1, + &path1, lblk1 + len, 0); + if (unlikely(*erp)) + goto finish; + } + if (len != e2_len) { + split = 1; + *erp = ext4_force_split_extent_at(handle, inode2, + &path2, lblk2 + len, 0); + if (*erp) + goto finish; + } + /* ext4_split_extent_at() may result in leaf extent split, + * path must to be revalidated. */ + if (split) + goto repeat; + + BUG_ON(e2_len != e1_len); + *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth); + if (unlikely(*erp)) + goto finish; + *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth); + if (unlikely(*erp)) + goto finish; + + /* Both extents are fully inside boundaries. Swap it now */ + tmp_ex = *ex1; + ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2)); + ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex)); + ex1->ee_len = cpu_to_le16(e2_len); + ex2->ee_len = cpu_to_le16(e1_len); + if (unwritten) + ext4_ext_mark_unwritten(ex2); + if (ext4_ext_is_unwritten(&tmp_ex)) + ext4_ext_mark_unwritten(ex1); + + ext4_ext_try_to_merge(handle, inode2, path2, ex2); + ext4_ext_try_to_merge(handle, inode1, path1, ex1); + *erp = ext4_ext_dirty(handle, inode2, path2 + + path2->p_depth); + if (unlikely(*erp)) + goto finish; + *erp = ext4_ext_dirty(handle, inode1, path1 + + path1->p_depth); + /* + * Looks scarry ah..? second inode already points to new blocks, + * and it was successfully dirtied. But luckily error may happen + * only due to journal error, so full transaction will be + * aborted anyway. + */ + if (unlikely(*erp)) + goto finish; + lblk1 += len; + lblk2 += len; + replaced_count += len; + count -= len; + + repeat: + ext4_ext_drop_refs(path1); + kfree(path1); + ext4_ext_drop_refs(path2); + kfree(path2); + path1 = path2 = NULL; + } + return replaced_count; +} diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 0b7e28e7eaa..94e7855ae71 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -11,6 +11,8 @@ */ #include <linux/rbtree.h> #include <linux/list_sort.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> #include "ext4.h" #include "extents_status.h" @@ -313,19 +315,27 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, */ if (!ext4_es_is_delayed(es)) { EXT4_I(inode)->i_es_lru_nr++; - percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt); + percpu_counter_inc(&EXT4_SB(inode->i_sb)-> + s_es_stats.es_stats_lru_cnt); } + EXT4_I(inode)->i_es_all_nr++; + percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); + return es; } static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) { + EXT4_I(inode)->i_es_all_nr--; + percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); + /* Decrease the lru counter when this es is not delayed */ if (!ext4_es_is_delayed(es)) { BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0); EXT4_I(inode)->i_es_lru_nr--; - percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt); + percpu_counter_dec(&EXT4_SB(inode->i_sb)-> + s_es_stats.es_stats_lru_cnt); } kmem_cache_free(ext4_es_cachep, es); @@ -426,7 +436,7 @@ static void ext4_es_insert_extent_ext_check(struct inode *inode, unsigned short ee_len; int depth, ee_status, es_status; - path = ext4_ext_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE); + path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE); if (IS_ERR(path)) return; @@ -499,10 +509,8 @@ static void ext4_es_insert_extent_ext_check(struct inode *inode, } } out: - if (path) { - ext4_ext_drop_refs(path); - kfree(path); - } + ext4_ext_drop_refs(path); + kfree(path); } static void ext4_es_insert_extent_ind_check(struct inode *inode, @@ -731,6 +739,7 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, struct extent_status *es) { struct ext4_es_tree *tree; + struct ext4_es_stats *stats; struct extent_status *es1 = NULL; struct rb_node *node; int found = 0; @@ -767,11 +776,15 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, } out: + stats = &EXT4_SB(inode->i_sb)->s_es_stats; if (found) { BUG_ON(!es1); es->es_lblk = es1->es_lblk; es->es_len = es1->es_len; es->es_pblk = es1->es_pblk; + stats->es_stats_cache_hits++; + } else { + stats->es_stats_cache_misses++; } read_unlock(&EXT4_I(inode)->i_es_lock); @@ -933,11 +946,16 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, struct ext4_inode_info *locked_ei) { struct ext4_inode_info *ei; + struct ext4_es_stats *es_stats; struct list_head *cur, *tmp; LIST_HEAD(skipped); + ktime_t start_time; + u64 scan_time; int nr_shrunk = 0; int retried = 0, skip_precached = 1, nr_skipped = 0; + es_stats = &sbi->s_es_stats; + start_time = ktime_get(); spin_lock(&sbi->s_es_lru_lock); retry: @@ -948,7 +966,8 @@ retry: * If we have already reclaimed all extents from extent * status tree, just stop the loop immediately. */ - if (percpu_counter_read_positive(&sbi->s_extent_cache_cnt) == 0) + if (percpu_counter_read_positive( + &es_stats->es_stats_lru_cnt) == 0) break; ei = list_entry(cur, struct ext4_inode_info, i_es_lru); @@ -958,7 +977,7 @@ retry: * time. Normally we try hard to avoid shrinking * precached inodes, but we will as a last resort. */ - if ((sbi->s_es_last_sorted < ei->i_touch_when) || + if ((es_stats->es_stats_last_sorted < ei->i_touch_when) || (skip_precached && ext4_test_inode_state(&ei->vfs_inode, EXT4_STATE_EXT_PRECACHED))) { nr_skipped++; @@ -992,7 +1011,7 @@ retry: if ((nr_shrunk == 0) && nr_skipped && !retried) { retried++; list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp); - sbi->s_es_last_sorted = jiffies; + es_stats->es_stats_last_sorted = jiffies; ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info, i_es_lru); /* @@ -1010,6 +1029,22 @@ retry: if (locked_ei && nr_shrunk == 0) nr_shrunk = __es_try_to_reclaim_extents(locked_ei, nr_to_scan); + scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); + if (likely(es_stats->es_stats_scan_time)) + es_stats->es_stats_scan_time = (scan_time + + es_stats->es_stats_scan_time*3) / 4; + else + es_stats->es_stats_scan_time = scan_time; + if (scan_time > es_stats->es_stats_max_scan_time) + es_stats->es_stats_max_scan_time = scan_time; + if (likely(es_stats->es_stats_shrunk)) + es_stats->es_stats_shrunk = (nr_shrunk + + es_stats->es_stats_shrunk*3) / 4; + else + es_stats->es_stats_shrunk = nr_shrunk; + + trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time, skip_precached, + nr_skipped, retried); return nr_shrunk; } @@ -1020,8 +1055,8 @@ static unsigned long ext4_es_count(struct shrinker *shrink, struct ext4_sb_info *sbi; sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker); - nr = percpu_counter_read_positive(&sbi->s_extent_cache_cnt); - trace_ext4_es_shrink_enter(sbi->s_sb, sc->nr_to_scan, nr); + nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt); + trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr); return nr; } @@ -1033,31 +1068,160 @@ static unsigned long ext4_es_scan(struct shrinker *shrink, int nr_to_scan = sc->nr_to_scan; int ret, nr_shrunk; - ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt); - trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret); + ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt); + trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret); if (!nr_to_scan) return ret; nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL); - trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret); + trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret); return nr_shrunk; } -void ext4_es_register_shrinker(struct ext4_sb_info *sbi) +static void *ext4_es_seq_shrinker_info_start(struct seq_file *seq, loff_t *pos) { + return *pos ? NULL : SEQ_START_TOKEN; +} + +static void * +ext4_es_seq_shrinker_info_next(struct seq_file *seq, void *v, loff_t *pos) +{ + return NULL; +} + +static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v) +{ + struct ext4_sb_info *sbi = seq->private; + struct ext4_es_stats *es_stats = &sbi->s_es_stats; + struct ext4_inode_info *ei, *max = NULL; + unsigned int inode_cnt = 0; + + if (v != SEQ_START_TOKEN) + return 0; + + /* here we just find an inode that has the max nr. of objects */ + spin_lock(&sbi->s_es_lru_lock); + list_for_each_entry(ei, &sbi->s_es_lru, i_es_lru) { + inode_cnt++; + if (max && max->i_es_all_nr < ei->i_es_all_nr) + max = ei; + else if (!max) + max = ei; + } + spin_unlock(&sbi->s_es_lru_lock); + + seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n", + percpu_counter_sum_positive(&es_stats->es_stats_all_cnt), + percpu_counter_sum_positive(&es_stats->es_stats_lru_cnt)); + seq_printf(seq, " %lu/%lu cache hits/misses\n", + es_stats->es_stats_cache_hits, + es_stats->es_stats_cache_misses); + if (es_stats->es_stats_last_sorted != 0) + seq_printf(seq, " %u ms last sorted interval\n", + jiffies_to_msecs(jiffies - + es_stats->es_stats_last_sorted)); + if (inode_cnt) + seq_printf(seq, " %d inodes on lru list\n", inode_cnt); + + seq_printf(seq, "average:\n %llu us scan time\n", + div_u64(es_stats->es_stats_scan_time, 1000)); + seq_printf(seq, " %lu shrunk objects\n", es_stats->es_stats_shrunk); + if (inode_cnt) + seq_printf(seq, + "maximum:\n %lu inode (%u objects, %u reclaimable)\n" + " %llu us max scan time\n", + max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_lru_nr, + div_u64(es_stats->es_stats_max_scan_time, 1000)); + + return 0; +} + +static void ext4_es_seq_shrinker_info_stop(struct seq_file *seq, void *v) +{ +} + +static const struct seq_operations ext4_es_seq_shrinker_info_ops = { + .start = ext4_es_seq_shrinker_info_start, + .next = ext4_es_seq_shrinker_info_next, + .stop = ext4_es_seq_shrinker_info_stop, + .show = ext4_es_seq_shrinker_info_show, +}; + +static int +ext4_es_seq_shrinker_info_open(struct inode *inode, struct file *file) +{ + int ret; + + ret = seq_open(file, &ext4_es_seq_shrinker_info_ops); + if (!ret) { + struct seq_file *m = file->private_data; + m->private = PDE_DATA(inode); + } + + return ret; +} + +static int +ext4_es_seq_shrinker_info_release(struct inode *inode, struct file *file) +{ + return seq_release(inode, file); +} + +static const struct file_operations ext4_es_seq_shrinker_info_fops = { + .owner = THIS_MODULE, + .open = ext4_es_seq_shrinker_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = ext4_es_seq_shrinker_info_release, +}; + +int ext4_es_register_shrinker(struct ext4_sb_info *sbi) +{ + int err; + INIT_LIST_HEAD(&sbi->s_es_lru); spin_lock_init(&sbi->s_es_lru_lock); - sbi->s_es_last_sorted = 0; + sbi->s_es_stats.es_stats_last_sorted = 0; + sbi->s_es_stats.es_stats_shrunk = 0; + sbi->s_es_stats.es_stats_cache_hits = 0; + sbi->s_es_stats.es_stats_cache_misses = 0; + sbi->s_es_stats.es_stats_scan_time = 0; + sbi->s_es_stats.es_stats_max_scan_time = 0; + err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL); + if (err) + return err; + err = percpu_counter_init(&sbi->s_es_stats.es_stats_lru_cnt, 0, GFP_KERNEL); + if (err) + goto err1; + sbi->s_es_shrinker.scan_objects = ext4_es_scan; sbi->s_es_shrinker.count_objects = ext4_es_count; sbi->s_es_shrinker.seeks = DEFAULT_SEEKS; - register_shrinker(&sbi->s_es_shrinker); + err = register_shrinker(&sbi->s_es_shrinker); + if (err) + goto err2; + + if (sbi->s_proc) + proc_create_data("es_shrinker_info", S_IRUGO, sbi->s_proc, + &ext4_es_seq_shrinker_info_fops, sbi); + + return 0; + +err2: + percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt); +err1: + percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); + return err; } void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi) { + if (sbi->s_proc) + remove_proc_entry("es_shrinker_info", sbi->s_proc); + percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); + percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt); unregister_shrinker(&sbi->s_es_shrinker); } diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h index f1b62a41992..efd5f970b50 100644 --- a/fs/ext4/extents_status.h +++ b/fs/ext4/extents_status.h @@ -64,6 +64,17 @@ struct ext4_es_tree { struct extent_status *cache_es; /* recently accessed extent */ }; +struct ext4_es_stats { + unsigned long es_stats_last_sorted; + unsigned long es_stats_shrunk; + unsigned long es_stats_cache_hits; + unsigned long es_stats_cache_misses; + u64 es_stats_scan_time; + u64 es_stats_max_scan_time; + struct percpu_counter es_stats_all_cnt; + struct percpu_counter es_stats_lru_cnt; +}; + extern int __init ext4_init_es(void); extern void ext4_exit_es(void); extern void ext4_es_init_tree(struct ext4_es_tree *tree); @@ -138,7 +149,7 @@ static inline void ext4_es_store_pblock_status(struct extent_status *es, (pb & ~ES_MASK)); } -extern void ext4_es_register_shrinker(struct ext4_sb_info *sbi); +extern int ext4_es_register_shrinker(struct ext4_sb_info *sbi); extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi); extern void ext4_es_lru_add(struct inode *inode); extern void ext4_es_lru_del(struct inode *inode); diff --git a/fs/ext4/file.c b/fs/ext4/file.c index aca7b24a443..8131be8c0af 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -137,10 +137,10 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos); } + iocb->private = &overwrite; if (o_direct) { blk_start_plug(&plug); - iocb->private = &overwrite; /* check whether we do a DIO overwrite or not */ if (ext4_should_dioread_nolock(inode) && !aio_mutex && diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 5b87fc36aab..ac644c31ca6 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -887,6 +887,10 @@ got: struct buffer_head *block_bitmap_bh; block_bitmap_bh = ext4_read_block_bitmap(sb, group); + if (!block_bitmap_bh) { + err = -EIO; + goto out; + } BUFFER_TRACE(block_bitmap_bh, "get block bitmap access"); err = ext4_journal_get_write_access(handle, block_bitmap_bh); if (err) { @@ -1011,8 +1015,7 @@ got: spin_unlock(&sbi->s_next_gen_lock); /* Precompute checksum seed for inode metadata */ - if (EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { + if (ext4_has_metadata_csum(sb)) { __u32 csum; __le32 inum = cpu_to_le32(inode->i_ino); __le32 gen = cpu_to_le32(inode->i_generation); diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index e75f840000a..36b369697a1 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -318,34 +318,24 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain * as described above and return 0. */ -static int ext4_alloc_branch(handle_t *handle, struct inode *inode, - ext4_lblk_t iblock, int indirect_blks, - int *blks, ext4_fsblk_t goal, - ext4_lblk_t *offsets, Indirect *branch) +static int ext4_alloc_branch(handle_t *handle, + struct ext4_allocation_request *ar, + int indirect_blks, ext4_lblk_t *offsets, + Indirect *branch) { - struct ext4_allocation_request ar; struct buffer_head * bh; ext4_fsblk_t b, new_blocks[4]; __le32 *p; int i, j, err, len = 1; - /* - * Set up for the direct block allocation - */ - memset(&ar, 0, sizeof(ar)); - ar.inode = inode; - ar.len = *blks; - ar.logical = iblock; - if (S_ISREG(inode->i_mode)) - ar.flags = EXT4_MB_HINT_DATA; - for (i = 0; i <= indirect_blks; i++) { if (i == indirect_blks) { - ar.goal = goal; - new_blocks[i] = ext4_mb_new_blocks(handle, &ar, &err); + new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err); } else - goal = new_blocks[i] = ext4_new_meta_blocks(handle, inode, - goal, 0, NULL, &err); + ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle, + ar->inode, ar->goal, + ar->flags & EXT4_MB_DELALLOC_RESERVED, + NULL, &err); if (err) { i--; goto failed; @@ -354,7 +344,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, if (i == 0) continue; - bh = branch[i].bh = sb_getblk(inode->i_sb, new_blocks[i-1]); + bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]); if (unlikely(!bh)) { err = -ENOMEM; goto failed; @@ -372,7 +362,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, b = new_blocks[i]; if (i == indirect_blks) - len = ar.len; + len = ar->len; for (j = 0; j < len; j++) *p++ = cpu_to_le32(b++); @@ -381,11 +371,10 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, unlock_buffer(bh); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); - err = ext4_handle_dirty_metadata(handle, inode, bh); + err = ext4_handle_dirty_metadata(handle, ar->inode, bh); if (err) goto failed; } - *blks = ar.len; return 0; failed: for (; i >= 0; i--) { @@ -396,10 +385,10 @@ failed: * existing before ext4_alloc_branch() was called. */ if (i > 0 && i != indirect_blks && branch[i].bh) - ext4_forget(handle, 1, inode, branch[i].bh, + ext4_forget(handle, 1, ar->inode, branch[i].bh, branch[i].bh->b_blocknr); - ext4_free_blocks(handle, inode, NULL, new_blocks[i], - (i == indirect_blks) ? ar.len : 1, 0); + ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i], + (i == indirect_blks) ? ar->len : 1, 0); } return err; } @@ -419,9 +408,9 @@ failed: * inode (->i_blocks, etc.). In case of success we end up with the full * chain to new block and return 0. */ -static int ext4_splice_branch(handle_t *handle, struct inode *inode, - ext4_lblk_t block, Indirect *where, int num, - int blks) +static int ext4_splice_branch(handle_t *handle, + struct ext4_allocation_request *ar, + Indirect *where, int num) { int i; int err = 0; @@ -446,9 +435,9 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode, * Update the host buffer_head or inode to point to more just allocated * direct blocks blocks */ - if (num == 0 && blks > 1) { + if (num == 0 && ar->len > 1) { current_block = le32_to_cpu(where->key) + 1; - for (i = 1; i < blks; i++) + for (i = 1; i < ar->len; i++) *(where->p + i) = cpu_to_le32(current_block++); } @@ -465,14 +454,14 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode, */ jbd_debug(5, "splicing indirect only\n"); BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); - err = ext4_handle_dirty_metadata(handle, inode, where->bh); + err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh); if (err) goto err_out; } else { /* * OK, we spliced it into the inode itself on a direct block. */ - ext4_mark_inode_dirty(handle, inode); + ext4_mark_inode_dirty(handle, ar->inode); jbd_debug(5, "splicing direct\n"); } return err; @@ -484,11 +473,11 @@ err_out: * need to revoke the block, which is why we don't * need to set EXT4_FREE_BLOCKS_METADATA. */ - ext4_free_blocks(handle, inode, where[i].bh, 0, 1, + ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1, EXT4_FREE_BLOCKS_FORGET); } - ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key), - blks, 0); + ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key), + ar->len, 0); return err; } @@ -525,11 +514,11 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags) { + struct ext4_allocation_request ar; int err = -EIO; ext4_lblk_t offsets[4]; Indirect chain[4]; Indirect *partial; - ext4_fsblk_t goal; int indirect_blks; int blocks_to_boundary = 0; int depth; @@ -579,7 +568,16 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, return -ENOSPC; } - goal = ext4_find_goal(inode, map->m_lblk, partial); + /* Set up for the direct block allocation */ + memset(&ar, 0, sizeof(ar)); + ar.inode = inode; + ar.logical = map->m_lblk; + if (S_ISREG(inode->i_mode)) + ar.flags = EXT4_MB_HINT_DATA; + if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) + ar.flags |= EXT4_MB_DELALLOC_RESERVED; + + ar.goal = ext4_find_goal(inode, map->m_lblk, partial); /* the number of blocks need to allocate for [d,t]indirect blocks */ indirect_blks = (chain + depth) - partial - 1; @@ -588,13 +586,13 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, * Next look up the indirect map to count the totoal number of * direct blocks to allocate for this branch. */ - count = ext4_blks_to_allocate(partial, indirect_blks, - map->m_len, blocks_to_boundary); + ar.len = ext4_blks_to_allocate(partial, indirect_blks, + map->m_len, blocks_to_boundary); + /* * Block out ext4_truncate while we alter the tree */ - err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks, - &count, goal, + err = ext4_alloc_branch(handle, &ar, indirect_blks, offsets + (partial - chain), partial); /* @@ -605,14 +603,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, * may need to return -EAGAIN upwards in the worst case. --sct */ if (!err) - err = ext4_splice_branch(handle, inode, map->m_lblk, - partial, indirect_blks, count); + err = ext4_splice_branch(handle, &ar, partial, indirect_blks); if (err) goto cleanup; map->m_flags |= EXT4_MAP_NEW; ext4_update_inode_fsync_trans(handle, inode, 1); + count = ar.len; got_it: map->m_flags |= EXT4_MAP_MAPPED; map->m_pblk = le32_to_cpu(chain[depth-1].key); diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index bea662bd0ca..3ea62695abc 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -594,6 +594,7 @@ retry: if (ret) { unlock_page(page); page_cache_release(page); + page = NULL; ext4_orphan_add(handle, inode); up_write(&EXT4_I(inode)->xattr_sem); sem_held = 0; @@ -613,7 +614,8 @@ retry: if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; - block_commit_write(page, from, to); + if (page) + block_commit_write(page, from, to); out: if (page) { unlock_page(page); @@ -1126,8 +1128,7 @@ static int ext4_finish_convert_inline_dir(handle_t *handle, memcpy((void *)de, buf + EXT4_INLINE_DOTDOT_SIZE, inline_size - EXT4_INLINE_DOTDOT_SIZE); - if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (ext4_has_metadata_csum(inode->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); inode->i_size = inode->i_sb->s_blocksize; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 3aa26e9117c..3356ab5395f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -83,8 +83,7 @@ static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != cpu_to_le32(EXT4_OS_LINUX) || - !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + !ext4_has_metadata_csum(inode->i_sb)) return 1; provided = le16_to_cpu(raw->i_checksum_lo); @@ -105,8 +104,7 @@ static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != cpu_to_le32(EXT4_OS_LINUX) || - !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + !ext4_has_metadata_csum(inode->i_sb)) return; csum = ext4_inode_csum(inode, raw, ei); @@ -224,16 +222,15 @@ void ext4_evict_inode(struct inode *inode) goto no_delete; } - if (!is_bad_inode(inode)) - dquot_initialize(inode); + if (is_bad_inode(inode)) + goto no_delete; + dquot_initialize(inode); if (ext4_should_order_data(inode)) ext4_begin_ordered_truncate(inode, 0); truncate_inode_pages_final(&inode->i_data); WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count)); - if (is_bad_inode(inode)) - goto no_delete; /* * Protect us against freezing - iput() caller didn't have to have any @@ -590,20 +587,12 @@ found: /* * New blocks allocate and/or writing to unwritten extent * will possibly result in updating i_data, so we take - * the write lock of i_data_sem, and call get_blocks() + * the write lock of i_data_sem, and call get_block() * with create == 1 flag. */ down_write(&EXT4_I(inode)->i_data_sem); /* - * if the caller is from delayed allocation writeout path - * we have already reserved fs blocks for allocation - * let the underlying get_block() function know to - * avoid double accounting - */ - if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) - ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); - /* * We need to check for EXT4 here because migrate * could have changed the inode type in between */ @@ -631,8 +620,6 @@ found: (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) ext4_da_update_reserve_space(inode, retval, 1); } - if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) - ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); if (retval > 0) { unsigned int status; @@ -734,11 +721,11 @@ int ext4_get_block(struct inode *inode, sector_t iblock, * `handle' can be NULL if create is zero */ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, - ext4_lblk_t block, int create, int *errp) + ext4_lblk_t block, int create) { struct ext4_map_blocks map; struct buffer_head *bh; - int fatal = 0, err; + int err; J_ASSERT(handle != NULL || create == 0); @@ -747,21 +734,14 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, err = ext4_map_blocks(handle, inode, &map, create ? EXT4_GET_BLOCKS_CREATE : 0); - /* ensure we send some value back into *errp */ - *errp = 0; - - if (create && err == 0) - err = -ENOSPC; /* should never happen */ + if (err == 0) + return create ? ERR_PTR(-ENOSPC) : NULL; if (err < 0) - *errp = err; - if (err <= 0) - return NULL; + return ERR_PTR(err); bh = sb_getblk(inode->i_sb, map.m_pblk); - if (unlikely(!bh)) { - *errp = -ENOMEM; - return NULL; - } + if (unlikely(!bh)) + return ERR_PTR(-ENOMEM); if (map.m_flags & EXT4_MAP_NEW) { J_ASSERT(create != 0); J_ASSERT(handle != NULL); @@ -775,44 +755,44 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, */ lock_buffer(bh); BUFFER_TRACE(bh, "call get_create_access"); - fatal = ext4_journal_get_create_access(handle, bh); - if (!fatal && !buffer_uptodate(bh)) { + err = ext4_journal_get_create_access(handle, bh); + if (unlikely(err)) { + unlock_buffer(bh); + goto errout; + } + if (!buffer_uptodate(bh)) { memset(bh->b_data, 0, inode->i_sb->s_blocksize); set_buffer_uptodate(bh); } unlock_buffer(bh); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, inode, bh); - if (!fatal) - fatal = err; - } else { + if (unlikely(err)) + goto errout; + } else BUFFER_TRACE(bh, "not a new buffer"); - } - if (fatal) { - *errp = fatal; - brelse(bh); - bh = NULL; - } return bh; +errout: + brelse(bh); + return ERR_PTR(err); } struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, - ext4_lblk_t block, int create, int *err) + ext4_lblk_t block, int create) { struct buffer_head *bh; - bh = ext4_getblk(handle, inode, block, create, err); - if (!bh) + bh = ext4_getblk(handle, inode, block, create); + if (IS_ERR(bh)) return bh; - if (buffer_uptodate(bh)) + if (!bh || buffer_uptodate(bh)) return bh; ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; put_bh(bh); - *err = -EIO; - return NULL; + return ERR_PTR(-EIO); } int ext4_walk_page_buffers(handle_t *handle, @@ -1536,7 +1516,7 @@ out_unlock: } /* - * This is a special get_blocks_t callback which is used by + * This is a special get_block_t callback which is used by * ext4_da_write_begin(). It will either return mapped block or * reserve space for a single block. * @@ -2011,12 +1991,10 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) * in data loss. So use reserved blocks to allocate metadata if * possible. * - * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if the blocks - * in question are delalloc blocks. This affects functions in many - * different parts of the allocation call path. This flag exists - * primarily because we don't want to change *many* call functions, so - * ext4_map_blocks() will set the EXT4_STATE_DELALLOC_RESERVED flag - * once the inode's allocation semaphore is taken. + * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if + * the blocks in question are delalloc blocks. This indicates + * that the blocks and quotas has already been checked when + * the data was copied into the page cache. */ get_blocks_flags = EXT4_GET_BLOCKS_CREATE | EXT4_GET_BLOCKS_METADATA_NOFAIL; @@ -2515,6 +2493,20 @@ static int ext4_nonda_switch(struct super_block *sb) return 0; } +/* We always reserve for an inode update; the superblock could be there too */ +static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len) +{ + if (likely(EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, + EXT4_FEATURE_RO_COMPAT_LARGE_FILE))) + return 1; + + if (pos + len <= 0x7fffffffULL) + return 1; + + /* We might need to update the superblock to set LARGE_FILE */ + return 2; +} + static int ext4_da_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) @@ -2565,7 +2557,8 @@ retry_grab: * of file which has an already mapped buffer. */ retry_journal: - handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1); + handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, + ext4_da_write_credits(inode, pos, len)); if (IS_ERR(handle)) { page_cache_release(page); return PTR_ERR(handle); @@ -2658,10 +2651,7 @@ static int ext4_da_write_end(struct file *file, if (copied && new_i_size > EXT4_I(inode)->i_disksize) { if (ext4_has_inline_data(inode) || ext4_da_should_update_i_disksize(page, end)) { - down_write(&EXT4_I(inode)->i_data_sem); - if (new_i_size > EXT4_I(inode)->i_disksize) - EXT4_I(inode)->i_disksize = new_i_size; - up_write(&EXT4_I(inode)->i_data_sem); + ext4_update_i_disksize(inode, new_i_size); /* We need to mark inode dirty even if * new_i_size is less that inode->i_size * bu greater than i_disksize.(hint delalloc) @@ -3936,8 +3926,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) ei->i_extra_isize = 0; /* Precompute checksum seed for inode metadata */ - if (EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { + if (ext4_has_metadata_csum(sb)) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); __u32 csum; __le32 inum = cpu_to_le32(inode->i_ino); @@ -4127,6 +4116,13 @@ bad_inode: return ERR_PTR(ret); } +struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino) +{ + if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) + return ERR_PTR(-EIO); + return ext4_iget(sb, ino); +} + static int ext4_inode_blocks_set(handle_t *handle, struct ext4_inode *raw_inode, struct ext4_inode_info *ei) @@ -4226,7 +4222,8 @@ static int ext4_do_update_inode(handle_t *handle, EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); - if (ext4_inode_blocks_set(handle, raw_inode, ei)) { + err = ext4_inode_blocks_set(handle, raw_inode, ei); + if (err) { spin_unlock(&ei->i_raw_lock); goto out_brelse; } @@ -4536,8 +4533,12 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) ext4_orphan_del(NULL, inode); goto err_out; } - } else + } else { + loff_t oldsize = inode->i_size; + i_size_write(inode, attr->ia_size); + pagecache_isize_extended(inode, oldsize, inode->i_size); + } /* * Blocks are going to be removed from the inode. Wait @@ -4958,7 +4959,12 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) if (val) ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); else { - jbd2_journal_flush(journal); + err = jbd2_journal_flush(journal); + if (err < 0) { + jbd2_journal_unlock_updates(journal); + ext4_inode_resume_unlocked_dio(inode); + return err; + } ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); } ext4_set_aops(inode); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 0f2252ec274..bfda18a1559 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -331,8 +331,7 @@ flags_out: if (!inode_owner_or_capable(inode)) return -EPERM; - if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { + if (ext4_has_metadata_csum(inode->i_sb)) { ext4_warning(sb, "Setting inode version is not " "supported with metadata_csum enabled."); return -ENOTTY; @@ -532,9 +531,17 @@ group_add_out: } case EXT4_IOC_SWAP_BOOT: + { + int err; if (!(filp->f_mode & FMODE_WRITE)) return -EBADF; - return swap_inode_boot_loader(sb, inode); + err = mnt_want_write_file(filp); + if (err) + return err; + err = swap_inode_boot_loader(sb, inode); + mnt_drop_write_file(filp); + return err; + } case EXT4_IOC_RESIZE_FS: { ext4_fsblk_t n_blocks_count; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 748c9136a60..dbfe15c2533 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3155,9 +3155,8 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, "start %lu, size %lu, fe_logical %lu", (unsigned long) start, (unsigned long) size, (unsigned long) ac->ac_o_ex.fe_logical); + BUG(); } - BUG_ON(start + size <= ac->ac_o_ex.fe_logical && - start > ac->ac_o_ex.fe_logical); BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); /* now prepare goal request */ @@ -4410,14 +4409,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, if (IS_NOQUOTA(ar->inode)) ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; - /* - * For delayed allocation, we could skip the ENOSPC and - * EDQUOT check, as blocks and quotas have been already - * reserved when data being copied into pagecache. - */ - if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED)) - ar->flags |= EXT4_MB_DELALLOC_RESERVED; - else { + if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { /* Without delayed allocation we need to verify * there is enough free blocks to do block allocation * and verify allocation doesn't exceed the quota limits. @@ -4528,8 +4520,7 @@ out: if (inquota && ar->len < inquota) dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); if (!ar->len) { - if (!ext4_test_inode_state(ar->inode, - EXT4_STATE_DELALLOC_RESERVED)) + if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) /* release all the reserved blocks if non delalloc */ percpu_counter_sub(&sbi->s_dirtyclusters_counter, reserv_clstrs); diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index d3567f27bae..a432634f2e6 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -41,8 +41,7 @@ static int finish_range(handle_t *handle, struct inode *inode, ext4_ext_store_pblock(&newext, lb->first_pblock); /* Locking only for convinience since we are operating on temp inode */ down_write(&EXT4_I(inode)->i_data_sem); - path = ext4_ext_find_extent(inode, lb->first_block, NULL, 0); - + path = ext4_find_extent(inode, lb->first_block, NULL, 0); if (IS_ERR(path)) { retval = PTR_ERR(path); path = NULL; @@ -81,13 +80,11 @@ static int finish_range(handle_t *handle, struct inode *inode, goto err_out; } } - retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0); + retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0); err_out: up_write((&EXT4_I(inode)->i_data_sem)); - if (path) { - ext4_ext_drop_refs(path); - kfree(path); - } + ext4_ext_drop_refs(path); + kfree(path); lb->first_pblock = 0; return retval; } diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index 32bce844c2e..8313ca3324e 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c @@ -20,8 +20,7 @@ static __le32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp) static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp) { - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (!ext4_has_metadata_csum(sb)) return 1; return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp); @@ -29,8 +28,7 @@ static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp) static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp) { - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (!ext4_has_metadata_csum(sb)) return; mmp->mmp_checksum = ext4_mmp_csum(sb, mmp); diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 671a74b14fd..9f2311bc9c4 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -27,120 +27,26 @@ * @lblock: logical block number to find an extent path * @path: pointer to an extent path pointer (for output) * - * ext4_ext_find_extent wrapper. Return 0 on success, or a negative error value + * ext4_find_extent wrapper. Return 0 on success, or a negative error value * on failure. */ static inline int get_ext_path(struct inode *inode, ext4_lblk_t lblock, - struct ext4_ext_path **orig_path) + struct ext4_ext_path **ppath) { - int ret = 0; struct ext4_ext_path *path; - path = ext4_ext_find_extent(inode, lblock, *orig_path, EXT4_EX_NOCACHE); + path = ext4_find_extent(inode, lblock, ppath, EXT4_EX_NOCACHE); if (IS_ERR(path)) - ret = PTR_ERR(path); - else if (path[ext_depth(inode)].p_ext == NULL) - ret = -ENODATA; - else - *orig_path = path; - - return ret; -} - -/** - * copy_extent_status - Copy the extent's initialization status - * - * @src: an extent for getting initialize status - * @dest: an extent to be set the status - */ -static void -copy_extent_status(struct ext4_extent *src, struct ext4_extent *dest) -{ - if (ext4_ext_is_unwritten(src)) - ext4_ext_mark_unwritten(dest); - else - dest->ee_len = cpu_to_le16(ext4_ext_get_actual_len(dest)); -} - -/** - * mext_next_extent - Search for the next extent and set it to "extent" - * - * @inode: inode which is searched - * @path: this will obtain data for the next extent - * @extent: pointer to the next extent we have just gotten - * - * Search the next extent in the array of ext4_ext_path structure (@path) - * and set it to ext4_extent structure (@extent). In addition, the member of - * @path (->p_ext) also points the next extent. Return 0 on success, 1 if - * ext4_ext_path structure refers to the last extent, or a negative error - * value on failure. - */ -int -mext_next_extent(struct inode *inode, struct ext4_ext_path *path, - struct ext4_extent **extent) -{ - struct ext4_extent_header *eh; - int ppos, leaf_ppos = path->p_depth; - - ppos = leaf_ppos; - if (EXT_LAST_EXTENT(path[ppos].p_hdr) > path[ppos].p_ext) { - /* leaf block */ - *extent = ++path[ppos].p_ext; - path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); - return 0; - } - - while (--ppos >= 0) { - if (EXT_LAST_INDEX(path[ppos].p_hdr) > - path[ppos].p_idx) { - int cur_ppos = ppos; - - /* index block */ - path[ppos].p_idx++; - path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); - if (path[ppos+1].p_bh) - brelse(path[ppos+1].p_bh); - path[ppos+1].p_bh = - sb_bread(inode->i_sb, path[ppos].p_block); - if (!path[ppos+1].p_bh) - return -EIO; - path[ppos+1].p_hdr = - ext_block_hdr(path[ppos+1].p_bh); - - /* Halfway index block */ - while (++cur_ppos < leaf_ppos) { - path[cur_ppos].p_idx = - EXT_FIRST_INDEX(path[cur_ppos].p_hdr); - path[cur_ppos].p_block = - ext4_idx_pblock(path[cur_ppos].p_idx); - if (path[cur_ppos+1].p_bh) - brelse(path[cur_ppos+1].p_bh); - path[cur_ppos+1].p_bh = sb_bread(inode->i_sb, - path[cur_ppos].p_block); - if (!path[cur_ppos+1].p_bh) - return -EIO; - path[cur_ppos+1].p_hdr = - ext_block_hdr(path[cur_ppos+1].p_bh); - } - - path[leaf_ppos].p_ext = *extent = NULL; - - eh = path[leaf_ppos].p_hdr; - if (le16_to_cpu(eh->eh_entries) == 0) - /* empty leaf is found */ - return -ENODATA; - - /* leaf block */ - path[leaf_ppos].p_ext = *extent = - EXT_FIRST_EXTENT(path[leaf_ppos].p_hdr); - path[leaf_ppos].p_block = - ext4_ext_pblock(path[leaf_ppos].p_ext); - return 0; - } + return PTR_ERR(path); + if (path[ext_depth(inode)].p_ext == NULL) { + ext4_ext_drop_refs(path); + kfree(path); + *ppath = NULL; + return -ENODATA; } - /* We found the last extent */ - return 1; + *ppath = path; + return 0; } /** @@ -178,417 +84,6 @@ ext4_double_up_write_data_sem(struct inode *orig_inode, } /** - * mext_insert_across_blocks - Insert extents across leaf block - * - * @handle: journal handle - * @orig_inode: original inode - * @o_start: first original extent to be changed - * @o_end: last original extent to be changed - * @start_ext: first new extent to be inserted - * @new_ext: middle of new extent to be inserted - * @end_ext: last new extent to be inserted - * - * Allocate a new leaf block and insert extents into it. Return 0 on success, - * or a negative error value on failure. - */ -static int -mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode, - struct ext4_extent *o_start, struct ext4_extent *o_end, - struct ext4_extent *start_ext, struct ext4_extent *new_ext, - struct ext4_extent *end_ext) -{ - struct ext4_ext_path *orig_path = NULL; - ext4_lblk_t eblock = 0; - int new_flag = 0; - int end_flag = 0; - int err = 0; - - if (start_ext->ee_len && new_ext->ee_len && end_ext->ee_len) { - if (o_start == o_end) { - - /* start_ext new_ext end_ext - * donor |---------|-----------|--------| - * orig |------------------------------| - */ - end_flag = 1; - } else { - - /* start_ext new_ext end_ext - * donor |---------|----------|---------| - * orig |---------------|--------------| - */ - o_end->ee_block = end_ext->ee_block; - o_end->ee_len = end_ext->ee_len; - ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext)); - } - - o_start->ee_len = start_ext->ee_len; - eblock = le32_to_cpu(start_ext->ee_block); - new_flag = 1; - - } else if (start_ext->ee_len && new_ext->ee_len && - !end_ext->ee_len && o_start == o_end) { - - /* start_ext new_ext - * donor |--------------|---------------| - * orig |------------------------------| - */ - o_start->ee_len = start_ext->ee_len; - eblock = le32_to_cpu(start_ext->ee_block); - new_flag = 1; - - } else if (!start_ext->ee_len && new_ext->ee_len && - end_ext->ee_len && o_start == o_end) { - - /* new_ext end_ext - * donor |--------------|---------------| - * orig |------------------------------| - */ - o_end->ee_block = end_ext->ee_block; - o_end->ee_len = end_ext->ee_len; - ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext)); - - /* - * Set 0 to the extent block if new_ext was - * the first block. - */ - if (new_ext->ee_block) - eblock = le32_to_cpu(new_ext->ee_block); - - new_flag = 1; - } else { - ext4_debug("ext4 move extent: Unexpected insert case\n"); - return -EIO; - } - - if (new_flag) { - err = get_ext_path(orig_inode, eblock, &orig_path); - if (err) - goto out; - - if (ext4_ext_insert_extent(handle, orig_inode, - orig_path, new_ext, 0)) - goto out; - } - - if (end_flag) { - err = get_ext_path(orig_inode, - le32_to_cpu(end_ext->ee_block) - 1, &orig_path); - if (err) - goto out; - - if (ext4_ext_insert_extent(handle, orig_inode, - orig_path, end_ext, 0)) - goto out; - } -out: - if (orig_path) { - ext4_ext_drop_refs(orig_path); - kfree(orig_path); - } - - return err; - -} - -/** - * mext_insert_inside_block - Insert new extent to the extent block - * - * @o_start: first original extent to be moved - * @o_end: last original extent to be moved - * @start_ext: first new extent to be inserted - * @new_ext: middle of new extent to be inserted - * @end_ext: last new extent to be inserted - * @eh: extent header of target leaf block - * @range_to_move: used to decide how to insert extent - * - * Insert extents into the leaf block. The extent (@o_start) is overwritten - * by inserted extents. - */ -static void -mext_insert_inside_block(struct ext4_extent *o_start, - struct ext4_extent *o_end, - struct ext4_extent *start_ext, - struct ext4_extent *new_ext, - struct ext4_extent *end_ext, - struct ext4_extent_header *eh, - int range_to_move) -{ - int i = 0; - unsigned long len; - - /* Move the existing extents */ - if (range_to_move && o_end < EXT_LAST_EXTENT(eh)) { - len = (unsigned long)(EXT_LAST_EXTENT(eh) + 1) - - (unsigned long)(o_end + 1); - memmove(o_end + 1 + range_to_move, o_end + 1, len); - } - - /* Insert start entry */ - if (start_ext->ee_len) - o_start[i++].ee_len = start_ext->ee_len; - - /* Insert new entry */ - if (new_ext->ee_len) { - o_start[i] = *new_ext; - ext4_ext_store_pblock(&o_start[i++], ext4_ext_pblock(new_ext)); - } - - /* Insert end entry */ - if (end_ext->ee_len) - o_start[i] = *end_ext; - - /* Increment the total entries counter on the extent block */ - le16_add_cpu(&eh->eh_entries, range_to_move); -} - -/** - * mext_insert_extents - Insert new extent - * - * @handle: journal handle - * @orig_inode: original inode - * @orig_path: path indicates first extent to be changed - * @o_start: first original extent to be changed - * @o_end: last original extent to be changed - * @start_ext: first new extent to be inserted - * @new_ext: middle of new extent to be inserted - * @end_ext: last new extent to be inserted - * - * Call the function to insert extents. If we cannot add more extents into - * the leaf block, we call mext_insert_across_blocks() to create a - * new leaf block. Otherwise call mext_insert_inside_block(). Return 0 - * on success, or a negative error value on failure. - */ -static int -mext_insert_extents(handle_t *handle, struct inode *orig_inode, - struct ext4_ext_path *orig_path, - struct ext4_extent *o_start, - struct ext4_extent *o_end, - struct ext4_extent *start_ext, - struct ext4_extent *new_ext, - struct ext4_extent *end_ext) -{ - struct ext4_extent_header *eh; - unsigned long need_slots, slots_range; - int range_to_move, depth, ret; - - /* - * The extents need to be inserted - * start_extent + new_extent + end_extent. - */ - need_slots = (start_ext->ee_len ? 1 : 0) + (end_ext->ee_len ? 1 : 0) + - (new_ext->ee_len ? 1 : 0); - - /* The number of slots between start and end */ - slots_range = ((unsigned long)(o_end + 1) - (unsigned long)o_start + 1) - / sizeof(struct ext4_extent); - - /* Range to move the end of extent */ - range_to_move = need_slots - slots_range; - depth = orig_path->p_depth; - orig_path += depth; - eh = orig_path->p_hdr; - - if (depth) { - /* Register to journal */ - BUFFER_TRACE(orig_path->p_bh, "get_write_access"); - ret = ext4_journal_get_write_access(handle, orig_path->p_bh); - if (ret) - return ret; - } - - /* Expansion */ - if (range_to_move > 0 && - (range_to_move > le16_to_cpu(eh->eh_max) - - le16_to_cpu(eh->eh_entries))) { - - ret = mext_insert_across_blocks(handle, orig_inode, o_start, - o_end, start_ext, new_ext, end_ext); - if (ret < 0) - return ret; - } else - mext_insert_inside_block(o_start, o_end, start_ext, new_ext, - end_ext, eh, range_to_move); - - return ext4_ext_dirty(handle, orig_inode, orig_path); -} - -/** - * mext_leaf_block - Move one leaf extent block into the inode. - * - * @handle: journal handle - * @orig_inode: original inode - * @orig_path: path indicates first extent to be changed - * @dext: donor extent - * @from: start offset on the target file - * - * In order to insert extents into the leaf block, we must divide the extent - * in the leaf block into three extents. The one is located to be inserted - * extents, and the others are located around it. - * - * Therefore, this function creates structures to save extents of the leaf - * block, and inserts extents by calling mext_insert_extents() with - * created extents. Return 0 on success, or a negative error value on failure. - */ -static int -mext_leaf_block(handle_t *handle, struct inode *orig_inode, - struct ext4_ext_path *orig_path, struct ext4_extent *dext, - ext4_lblk_t *from) -{ - struct ext4_extent *oext, *o_start, *o_end, *prev_ext; - struct ext4_extent new_ext, start_ext, end_ext; - ext4_lblk_t new_ext_end; - int oext_alen, new_ext_alen, end_ext_alen; - int depth = ext_depth(orig_inode); - int ret; - - start_ext.ee_block = end_ext.ee_block = 0; - o_start = o_end = oext = orig_path[depth].p_ext; - oext_alen = ext4_ext_get_actual_len(oext); - start_ext.ee_len = end_ext.ee_len = 0; - - new_ext.ee_block = cpu_to_le32(*from); - ext4_ext_store_pblock(&new_ext, ext4_ext_pblock(dext)); - new_ext.ee_len = dext->ee_len; - new_ext_alen = ext4_ext_get_actual_len(&new_ext); - new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1; - - /* - * Case: original extent is first - * oext |--------| - * new_ext |--| - * start_ext |--| - */ - if (le32_to_cpu(oext->ee_block) < le32_to_cpu(new_ext.ee_block) && - le32_to_cpu(new_ext.ee_block) < - le32_to_cpu(oext->ee_block) + oext_alen) { - start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) - - le32_to_cpu(oext->ee_block)); - start_ext.ee_block = oext->ee_block; - copy_extent_status(oext, &start_ext); - } else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) { - prev_ext = oext - 1; - /* - * We can merge new_ext into previous extent, - * if these are contiguous and same extent type. - */ - if (ext4_can_extents_be_merged(orig_inode, prev_ext, - &new_ext)) { - o_start = prev_ext; - start_ext.ee_len = cpu_to_le16( - ext4_ext_get_actual_len(prev_ext) + - new_ext_alen); - start_ext.ee_block = oext->ee_block; - copy_extent_status(prev_ext, &start_ext); - new_ext.ee_len = 0; - } - } - - /* - * Case: new_ext_end must be less than oext - * oext |-----------| - * new_ext |-------| - */ - if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) { - EXT4_ERROR_INODE(orig_inode, - "new_ext_end(%u) should be less than or equal to " - "oext->ee_block(%u) + oext_alen(%d) - 1", - new_ext_end, le32_to_cpu(oext->ee_block), - oext_alen); - ret = -EIO; - goto out; - } - - /* - * Case: new_ext is smaller than original extent - * oext |---------------| - * new_ext |-----------| - * end_ext |---| - */ - if (le32_to_cpu(oext->ee_block) <= new_ext_end && - new_ext_end < le32_to_cpu(oext->ee_block) + oext_alen - 1) { - end_ext.ee_len = - cpu_to_le16(le32_to_cpu(oext->ee_block) + - oext_alen - 1 - new_ext_end); - copy_extent_status(oext, &end_ext); - end_ext_alen = ext4_ext_get_actual_len(&end_ext); - ext4_ext_store_pblock(&end_ext, - (ext4_ext_pblock(o_end) + oext_alen - end_ext_alen)); - end_ext.ee_block = - cpu_to_le32(le32_to_cpu(o_end->ee_block) + - oext_alen - end_ext_alen); - } - - ret = mext_insert_extents(handle, orig_inode, orig_path, o_start, - o_end, &start_ext, &new_ext, &end_ext); -out: - return ret; -} - -/** - * mext_calc_swap_extents - Calculate extents for extent swapping. - * - * @tmp_dext: the extent that will belong to the original inode - * @tmp_oext: the extent that will belong to the donor inode - * @orig_off: block offset of original inode - * @donor_off: block offset of donor inode - * @max_count: the maximum length of extents - * - * Return 0 on success, or a negative error value on failure. - */ -static int -mext_calc_swap_extents(struct ext4_extent *tmp_dext, - struct ext4_extent *tmp_oext, - ext4_lblk_t orig_off, ext4_lblk_t donor_off, - ext4_lblk_t max_count) -{ - ext4_lblk_t diff, orig_diff; - struct ext4_extent dext_old, oext_old; - - BUG_ON(orig_off != donor_off); - - /* original and donor extents have to cover the same block offset */ - if (orig_off < le32_to_cpu(tmp_oext->ee_block) || - le32_to_cpu(tmp_oext->ee_block) + - ext4_ext_get_actual_len(tmp_oext) - 1 < orig_off) - return -ENODATA; - - if (orig_off < le32_to_cpu(tmp_dext->ee_block) || - le32_to_cpu(tmp_dext->ee_block) + - ext4_ext_get_actual_len(tmp_dext) - 1 < orig_off) - return -ENODATA; - - dext_old = *tmp_dext; - oext_old = *tmp_oext; - - /* When tmp_dext is too large, pick up the target range. */ - diff = donor_off - le32_to_cpu(tmp_dext->ee_block); - - ext4_ext_store_pblock(tmp_dext, ext4_ext_pblock(tmp_dext) + diff); - le32_add_cpu(&tmp_dext->ee_block, diff); - le16_add_cpu(&tmp_dext->ee_len, -diff); - - if (max_count < ext4_ext_get_actual_len(tmp_dext)) - tmp_dext->ee_len = cpu_to_le16(max_count); - - orig_diff = orig_off - le32_to_cpu(tmp_oext->ee_block); - ext4_ext_store_pblock(tmp_oext, ext4_ext_pblock(tmp_oext) + orig_diff); - - /* Adjust extent length if donor extent is larger than orig */ - if (ext4_ext_get_actual_len(tmp_dext) > - ext4_ext_get_actual_len(tmp_oext) - orig_diff) - tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_oext->ee_len) - - orig_diff); - - tmp_oext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(tmp_dext)); - - copy_extent_status(&oext_old, tmp_dext); - copy_extent_status(&dext_old, tmp_oext); - - return 0; -} - -/** * mext_check_coverage - Check that all extents in range has the same type * * @inode: inode in question @@ -619,171 +114,25 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count, } ret = 1; out: - if (path) { - ext4_ext_drop_refs(path); - kfree(path); - } + ext4_ext_drop_refs(path); + kfree(path); return ret; } /** - * mext_replace_branches - Replace original extents with new extents - * - * @handle: journal handle - * @orig_inode: original inode - * @donor_inode: donor inode - * @from: block offset of orig_inode - * @count: block count to be replaced - * @err: pointer to save return value - * - * Replace original inode extents and donor inode extents page by page. - * We implement this replacement in the following three steps: - * 1. Save the block information of original and donor inodes into - * dummy extents. - * 2. Change the block information of original inode to point at the - * donor inode blocks. - * 3. Change the block information of donor inode to point at the saved - * original inode blocks in the dummy extents. - * - * Return replaced block count. - */ -static int -mext_replace_branches(handle_t *handle, struct inode *orig_inode, - struct inode *donor_inode, ext4_lblk_t from, - ext4_lblk_t count, int *err) -{ - struct ext4_ext_path *orig_path = NULL; - struct ext4_ext_path *donor_path = NULL; - struct ext4_extent *oext, *dext; - struct ext4_extent tmp_dext, tmp_oext; - ext4_lblk_t orig_off = from, donor_off = from; - int depth; - int replaced_count = 0; - int dext_alen; - - *err = ext4_es_remove_extent(orig_inode, from, count); - if (*err) - goto out; - - *err = ext4_es_remove_extent(donor_inode, from, count); - if (*err) - goto out; - - /* Get the original extent for the block "orig_off" */ - *err = get_ext_path(orig_inode, orig_off, &orig_path); - if (*err) - goto out; - - /* Get the donor extent for the head */ - *err = get_ext_path(donor_inode, donor_off, &donor_path); - if (*err) - goto out; - depth = ext_depth(orig_inode); - oext = orig_path[depth].p_ext; - tmp_oext = *oext; - - depth = ext_depth(donor_inode); - dext = donor_path[depth].p_ext; - if (unlikely(!dext)) - goto missing_donor_extent; - tmp_dext = *dext; - - *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, - donor_off, count); - if (*err) - goto out; - - /* Loop for the donor extents */ - while (1) { - /* The extent for donor must be found. */ - if (unlikely(!dext)) { - missing_donor_extent: - EXT4_ERROR_INODE(donor_inode, - "The extent for donor must be found"); - *err = -EIO; - goto out; - } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) { - EXT4_ERROR_INODE(donor_inode, - "Donor offset(%u) and the first block of donor " - "extent(%u) should be equal", - donor_off, - le32_to_cpu(tmp_dext.ee_block)); - *err = -EIO; - goto out; - } - - /* Set donor extent to orig extent */ - *err = mext_leaf_block(handle, orig_inode, - orig_path, &tmp_dext, &orig_off); - if (*err) - goto out; - - /* Set orig extent to donor extent */ - *err = mext_leaf_block(handle, donor_inode, - donor_path, &tmp_oext, &donor_off); - if (*err) - goto out; - - dext_alen = ext4_ext_get_actual_len(&tmp_dext); - replaced_count += dext_alen; - donor_off += dext_alen; - orig_off += dext_alen; - - BUG_ON(replaced_count > count); - /* Already moved the expected blocks */ - if (replaced_count >= count) - break; - - if (orig_path) - ext4_ext_drop_refs(orig_path); - *err = get_ext_path(orig_inode, orig_off, &orig_path); - if (*err) - goto out; - depth = ext_depth(orig_inode); - oext = orig_path[depth].p_ext; - tmp_oext = *oext; - - if (donor_path) - ext4_ext_drop_refs(donor_path); - *err = get_ext_path(donor_inode, donor_off, &donor_path); - if (*err) - goto out; - depth = ext_depth(donor_inode); - dext = donor_path[depth].p_ext; - tmp_dext = *dext; - - *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, - donor_off, count - replaced_count); - if (*err) - goto out; - } - -out: - if (orig_path) { - ext4_ext_drop_refs(orig_path); - kfree(orig_path); - } - if (donor_path) { - ext4_ext_drop_refs(donor_path); - kfree(donor_path); - } - - return replaced_count; -} - -/** * mext_page_double_lock - Grab and lock pages on both @inode1 and @inode2 * * @inode1: the inode structure * @inode2: the inode structure - * @index: page index + * @index1: page index + * @index2: page index * @page: result page vector * * Grab two locked pages for inode's by inode order */ static int mext_page_double_lock(struct inode *inode1, struct inode *inode2, - pgoff_t index, struct page *page[2]) + pgoff_t index1, pgoff_t index2, struct page *page[2]) { struct address_space *mapping[2]; unsigned fl = AOP_FLAG_NOFS; @@ -793,15 +142,18 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2, mapping[0] = inode1->i_mapping; mapping[1] = inode2->i_mapping; } else { + pgoff_t tmp = index1; + index1 = index2; + index2 = tmp; mapping[0] = inode2->i_mapping; mapping[1] = inode1->i_mapping; } - page[0] = grab_cache_page_write_begin(mapping[0], index, fl); + page[0] = grab_cache_page_write_begin(mapping[0], index1, fl); if (!page[0]) return -ENOMEM; - page[1] = grab_cache_page_write_begin(mapping[1], index, fl); + page[1] = grab_cache_page_write_begin(mapping[1], index2, fl); if (!page[1]) { unlock_page(page[0]); page_cache_release(page[0]); @@ -893,25 +245,27 @@ out: * @o_filp: file structure of original file * @donor_inode: donor inode * @orig_page_offset: page index on original file + * @donor_page_offset: page index on donor file * @data_offset_in_page: block index where data swapping starts * @block_len_in_page: the number of blocks to be swapped * @unwritten: orig extent is unwritten or not * @err: pointer to save return value * * Save the data in original inode blocks and replace original inode extents - * with donor inode extents by calling mext_replace_branches(). + * with donor inode extents by calling ext4_swap_extents(). * Finally, write out the saved data in new original inode blocks. Return * replaced block count. */ static int move_extent_per_page(struct file *o_filp, struct inode *donor_inode, - pgoff_t orig_page_offset, int data_offset_in_page, - int block_len_in_page, int unwritten, int *err) + pgoff_t orig_page_offset, pgoff_t donor_page_offset, + int data_offset_in_page, + int block_len_in_page, int unwritten, int *err) { struct inode *orig_inode = file_inode(o_filp); struct page *pagep[2] = {NULL, NULL}; handle_t *handle; - ext4_lblk_t orig_blk_offset; + ext4_lblk_t orig_blk_offset, donor_blk_offset; unsigned long blocksize = orig_inode->i_sb->s_blocksize; unsigned int w_flags = 0; unsigned int tmp_data_size, data_size, replaced_size; @@ -939,6 +293,9 @@ again: orig_blk_offset = orig_page_offset * blocks_per_page + data_offset_in_page; + donor_blk_offset = donor_page_offset * blocks_per_page + + data_offset_in_page; + /* Calculate data_size */ if ((orig_blk_offset + block_len_in_page - 1) == ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) { @@ -959,7 +316,7 @@ again: replaced_size = data_size; *err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset, - pagep); + donor_page_offset, pagep); if (unlikely(*err < 0)) goto stop_journal; /* @@ -978,7 +335,7 @@ again: if (*err) goto drop_data_sem; - unwritten &= mext_check_coverage(donor_inode, orig_blk_offset, + unwritten &= mext_check_coverage(donor_inode, donor_blk_offset, block_len_in_page, 1, err); if (*err) goto drop_data_sem; @@ -994,9 +351,10 @@ again: *err = -EBUSY; goto drop_data_sem; } - replaced_count = mext_replace_branches(handle, orig_inode, - donor_inode, orig_blk_offset, - block_len_in_page, err); + replaced_count = ext4_swap_extents(handle, orig_inode, + donor_inode, orig_blk_offset, + donor_blk_offset, + block_len_in_page, 1, err); drop_data_sem: ext4_double_up_write_data_sem(orig_inode, donor_inode); goto unlock_pages; @@ -1014,9 +372,9 @@ data_copy: goto unlock_pages; } ext4_double_down_write_data_sem(orig_inode, donor_inode); - replaced_count = mext_replace_branches(handle, orig_inode, donor_inode, - orig_blk_offset, - block_len_in_page, err); + replaced_count = ext4_swap_extents(handle, orig_inode, donor_inode, + orig_blk_offset, donor_blk_offset, + block_len_in_page, 1, err); ext4_double_up_write_data_sem(orig_inode, donor_inode); if (*err) { if (replaced_count) { @@ -1061,9 +419,9 @@ repair_branches: * Try to swap extents to it's original places */ ext4_double_down_write_data_sem(orig_inode, donor_inode); - replaced_count = mext_replace_branches(handle, donor_inode, orig_inode, - orig_blk_offset, - block_len_in_page, &err2); + replaced_count = ext4_swap_extents(handle, donor_inode, orig_inode, + orig_blk_offset, donor_blk_offset, + block_len_in_page, 0, &err2); ext4_double_up_write_data_sem(orig_inode, donor_inode); if (replaced_count != block_len_in_page) { EXT4_ERROR_INODE_BLOCK(orig_inode, (sector_t)(orig_blk_offset), @@ -1093,10 +451,14 @@ mext_check_arguments(struct inode *orig_inode, struct inode *donor_inode, __u64 orig_start, __u64 donor_start, __u64 *len) { - ext4_lblk_t orig_blocks, donor_blocks; + __u64 orig_eof, donor_eof; unsigned int blkbits = orig_inode->i_blkbits; unsigned int blocksize = 1 << blkbits; + orig_eof = (i_size_read(orig_inode) + blocksize - 1) >> blkbits; + donor_eof = (i_size_read(donor_inode) + blocksize - 1) >> blkbits; + + if (donor_inode->i_mode & (S_ISUID|S_ISGID)) { ext4_debug("ext4 move extent: suid or sgid is set" " to donor file [ino:orig %lu, donor %lu]\n", @@ -1112,7 +474,7 @@ mext_check_arguments(struct inode *orig_inode, ext4_debug("ext4 move extent: The argument files should " "not be swapfile [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); - return -EINVAL; + return -EBUSY; } /* Ext4 move extent supports only extent based file */ @@ -1132,67 +494,28 @@ mext_check_arguments(struct inode *orig_inode, } /* Start offset should be same */ - if (orig_start != donor_start) { + if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) != + (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) { ext4_debug("ext4 move extent: orig and donor's start " - "offset are not same [ino:orig %lu, donor %lu]\n", + "offset are not alligned [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } if ((orig_start >= EXT_MAX_BLOCKS) || + (donor_start >= EXT_MAX_BLOCKS) || (*len > EXT_MAX_BLOCKS) || + (donor_start + *len >= EXT_MAX_BLOCKS) || (orig_start + *len >= EXT_MAX_BLOCKS)) { ext4_debug("ext4 move extent: Can't handle over [%u] blocks " "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS, orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } - - if (orig_inode->i_size > donor_inode->i_size) { - donor_blocks = (donor_inode->i_size + blocksize - 1) >> blkbits; - /* TODO: eliminate this artificial restriction */ - if (orig_start >= donor_blocks) { - ext4_debug("ext4 move extent: orig start offset " - "[%llu] should be less than donor file blocks " - "[%u] [ino:orig %lu, donor %lu]\n", - orig_start, donor_blocks, - orig_inode->i_ino, donor_inode->i_ino); - return -EINVAL; - } - - /* TODO: eliminate this artificial restriction */ - if (orig_start + *len > donor_blocks) { - ext4_debug("ext4 move extent: End offset [%llu] should " - "be less than donor file blocks [%u]." - "So adjust length from %llu to %llu " - "[ino:orig %lu, donor %lu]\n", - orig_start + *len, donor_blocks, - *len, donor_blocks - orig_start, - orig_inode->i_ino, donor_inode->i_ino); - *len = donor_blocks - orig_start; - } - } else { - orig_blocks = (orig_inode->i_size + blocksize - 1) >> blkbits; - if (orig_start >= orig_blocks) { - ext4_debug("ext4 move extent: start offset [%llu] " - "should be less than original file blocks " - "[%u] [ino:orig %lu, donor %lu]\n", - orig_start, orig_blocks, - orig_inode->i_ino, donor_inode->i_ino); - return -EINVAL; - } - - if (orig_start + *len > orig_blocks) { - ext4_debug("ext4 move extent: Adjust length " - "from %llu to %llu. Because it should be " - "less than original file blocks " - "[ino:orig %lu, donor %lu]\n", - *len, orig_blocks - orig_start, - orig_inode->i_ino, donor_inode->i_ino); - *len = orig_blocks - orig_start; - } - } - + if (orig_eof < orig_start + *len - 1) + *len = orig_eof - orig_start; + if (donor_eof < donor_start + *len - 1) + *len = donor_eof - donor_start; if (!*len) { ext4_debug("ext4 move extent: len should not be 0 " "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino, @@ -1208,60 +531,26 @@ mext_check_arguments(struct inode *orig_inode, * * @o_filp: file structure of the original file * @d_filp: file structure of the donor file - * @orig_start: start offset in block for orig - * @donor_start: start offset in block for donor + * @orig_blk: start offset in block for orig + * @donor_blk: start offset in block for donor * @len: the number of blocks to be moved * @moved_len: moved block length * * This function returns 0 and moved block length is set in moved_len * if succeed, otherwise returns error value. * - * Note: ext4_move_extents() proceeds the following order. - * 1:ext4_move_extents() calculates the last block number of moving extent - * function by the start block number (orig_start) and the number of blocks - * to be moved (len) specified as arguments. - * If the {orig, donor}_start points a hole, the extent's start offset - * pointed by ext_cur (current extent), holecheck_path, orig_path are set - * after hole behind. - * 2:Continue step 3 to step 5, until the holecheck_path points to last_extent - * or the ext_cur exceeds the block_end which is last logical block number. - * 3:To get the length of continues area, call mext_next_extent() - * specified with the ext_cur (initial value is holecheck_path) re-cursive, - * until find un-continuous extent, the start logical block number exceeds - * the block_end or the extent points to the last extent. - * 4:Exchange the original inode data with donor inode data - * from orig_page_offset to seq_end_page. - * The start indexes of data are specified as arguments. - * That of the original inode is orig_page_offset, - * and the donor inode is also orig_page_offset - * (To easily handle blocksize != pagesize case, the offset for the - * donor inode is block unit). - * 5:Update holecheck_path and orig_path to points a next proceeding extent, - * then returns to step 2. - * 6:Release holecheck_path, orig_path and set the len to moved_len - * which shows the number of moved blocks. - * The moved_len is useful for the command to calculate the file offset - * for starting next move extent ioctl. - * 7:Return 0 on success, or a negative error value on failure. */ int -ext4_move_extents(struct file *o_filp, struct file *d_filp, - __u64 orig_start, __u64 donor_start, __u64 len, - __u64 *moved_len) +ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, + __u64 donor_blk, __u64 len, __u64 *moved_len) { struct inode *orig_inode = file_inode(o_filp); struct inode *donor_inode = file_inode(d_filp); - struct ext4_ext_path *orig_path = NULL, *holecheck_path = NULL; - struct ext4_extent *ext_prev, *ext_cur, *ext_dummy; - ext4_lblk_t block_start = orig_start; - ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0; - ext4_lblk_t rest_blocks; - pgoff_t orig_page_offset = 0, seq_end_page; - int ret, depth, last_extent = 0; + struct ext4_ext_path *path = NULL; int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; - int data_offset_in_page; - int block_len_in_page; - int unwritten; + ext4_lblk_t o_end, o_start = orig_blk; + ext4_lblk_t d_start = donor_blk; + int ret; if (orig_inode->i_sb != donor_inode->i_sb) { ext4_debug("ext4 move extent: The argument files " @@ -1303,121 +592,58 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, /* Protect extent tree against block allocations via delalloc */ ext4_double_down_write_data_sem(orig_inode, donor_inode); /* Check the filesystem environment whether move_extent can be done */ - ret = mext_check_arguments(orig_inode, donor_inode, orig_start, - donor_start, &len); + ret = mext_check_arguments(orig_inode, donor_inode, orig_blk, + donor_blk, &len); if (ret) goto out; + o_end = o_start + len; - file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits; - block_end = block_start + len - 1; - if (file_end < block_end) - len -= block_end - file_end; + while (o_start < o_end) { + struct ext4_extent *ex; + ext4_lblk_t cur_blk, next_blk; + pgoff_t orig_page_index, donor_page_index; + int offset_in_page; + int unwritten, cur_len; - ret = get_ext_path(orig_inode, block_start, &orig_path); - if (ret) - goto out; - - /* Get path structure to check the hole */ - ret = get_ext_path(orig_inode, block_start, &holecheck_path); - if (ret) - goto out; - - depth = ext_depth(orig_inode); - ext_cur = holecheck_path[depth].p_ext; - - /* - * Get proper starting location of block replacement if block_start was - * within the hole. - */ - if (le32_to_cpu(ext_cur->ee_block) + - ext4_ext_get_actual_len(ext_cur) - 1 < block_start) { - /* - * The hole exists between extents or the tail of - * original file. - */ - last_extent = mext_next_extent(orig_inode, - holecheck_path, &ext_cur); - if (last_extent < 0) { - ret = last_extent; - goto out; - } - last_extent = mext_next_extent(orig_inode, orig_path, - &ext_dummy); - if (last_extent < 0) { - ret = last_extent; + ret = get_ext_path(orig_inode, o_start, &path); + if (ret) goto out; - } - seq_start = le32_to_cpu(ext_cur->ee_block); - } else if (le32_to_cpu(ext_cur->ee_block) > block_start) - /* The hole exists at the beginning of original file. */ - seq_start = le32_to_cpu(ext_cur->ee_block); - else - seq_start = block_start; - - /* No blocks within the specified range. */ - if (le32_to_cpu(ext_cur->ee_block) > block_end) { - ext4_debug("ext4 move extent: The specified range of file " - "may be the hole\n"); - ret = -EINVAL; - goto out; - } - - /* Adjust start blocks */ - add_blocks = min(le32_to_cpu(ext_cur->ee_block) + - ext4_ext_get_actual_len(ext_cur), block_end + 1) - - max(le32_to_cpu(ext_cur->ee_block), block_start); - - while (!last_extent && le32_to_cpu(ext_cur->ee_block) <= block_end) { - seq_blocks += add_blocks; - - /* Adjust tail blocks */ - if (seq_start + seq_blocks - 1 > block_end) - seq_blocks = block_end - seq_start + 1; - - ext_prev = ext_cur; - last_extent = mext_next_extent(orig_inode, holecheck_path, - &ext_cur); - if (last_extent < 0) { - ret = last_extent; - break; - } - add_blocks = ext4_ext_get_actual_len(ext_cur); - - /* - * Extend the length of contiguous block (seq_blocks) - * if extents are contiguous. - */ - if (ext4_can_extents_be_merged(orig_inode, - ext_prev, ext_cur) && - block_end >= le32_to_cpu(ext_cur->ee_block) && - !last_extent) + ex = path[path->p_depth].p_ext; + next_blk = ext4_ext_next_allocated_block(path); + cur_blk = le32_to_cpu(ex->ee_block); + cur_len = ext4_ext_get_actual_len(ex); + /* Check hole before the start pos */ + if (cur_blk + cur_len - 1 < o_start) { + if (next_blk == EXT_MAX_BLOCKS) { + o_start = o_end; + ret = -ENODATA; + goto out; + } + d_start += next_blk - o_start; + o_start = next_blk; continue; - - /* Is original extent is unwritten */ - unwritten = ext4_ext_is_unwritten(ext_prev); - - data_offset_in_page = seq_start % blocks_per_page; - - /* - * Calculate data blocks count that should be swapped - * at the first page. - */ - if (data_offset_in_page + seq_blocks > blocks_per_page) { - /* Swapped blocks are across pages */ - block_len_in_page = - blocks_per_page - data_offset_in_page; - } else { - /* Swapped blocks are in a page */ - block_len_in_page = seq_blocks; + /* Check hole after the start pos */ + } else if (cur_blk > o_start) { + /* Skip hole */ + d_start += cur_blk - o_start; + o_start = cur_blk; + /* Extent inside requested range ?*/ + if (cur_blk >= o_end) + goto out; + } else { /* in_range(o_start, o_blk, o_len) */ + cur_len += cur_blk - o_start; } - - orig_page_offset = seq_start >> - (PAGE_CACHE_SHIFT - orig_inode->i_blkbits); - seq_end_page = (seq_start + seq_blocks - 1) >> - (PAGE_CACHE_SHIFT - orig_inode->i_blkbits); - seq_start = le32_to_cpu(ext_cur->ee_block); - rest_blocks = seq_blocks; - + unwritten = ext4_ext_is_unwritten(ex); + if (o_end - o_start < cur_len) + cur_len = o_end - o_start; + + orig_page_index = o_start >> (PAGE_CACHE_SHIFT - + orig_inode->i_blkbits); + donor_page_index = d_start >> (PAGE_CACHE_SHIFT - + donor_inode->i_blkbits); + offset_in_page = o_start % blocks_per_page; + if (cur_len > blocks_per_page- offset_in_page) + cur_len = blocks_per_page - offset_in_page; /* * Up semaphore to avoid following problems: * a. transaction deadlock among ext4_journal_start, @@ -1426,77 +652,29 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, * in move_extent_per_page */ ext4_double_up_write_data_sem(orig_inode, donor_inode); - - while (orig_page_offset <= seq_end_page) { - - /* Swap original branches with new branches */ - block_len_in_page = move_extent_per_page( - o_filp, donor_inode, - orig_page_offset, - data_offset_in_page, - block_len_in_page, - unwritten, &ret); - - /* Count how many blocks we have exchanged */ - *moved_len += block_len_in_page; - if (ret < 0) - break; - if (*moved_len > len) { - EXT4_ERROR_INODE(orig_inode, - "We replaced blocks too much! " - "sum of replaced: %llu requested: %llu", - *moved_len, len); - ret = -EIO; - break; - } - - orig_page_offset++; - data_offset_in_page = 0; - rest_blocks -= block_len_in_page; - if (rest_blocks > blocks_per_page) - block_len_in_page = blocks_per_page; - else - block_len_in_page = rest_blocks; - } - + /* Swap original branches with new branches */ + move_extent_per_page(o_filp, donor_inode, + orig_page_index, donor_page_index, + offset_in_page, cur_len, + unwritten, &ret); ext4_double_down_write_data_sem(orig_inode, donor_inode); if (ret < 0) break; - - /* Decrease buffer counter */ - if (holecheck_path) - ext4_ext_drop_refs(holecheck_path); - ret = get_ext_path(orig_inode, seq_start, &holecheck_path); - if (ret) - break; - depth = holecheck_path->p_depth; - - /* Decrease buffer counter */ - if (orig_path) - ext4_ext_drop_refs(orig_path); - ret = get_ext_path(orig_inode, seq_start, &orig_path); - if (ret) - break; - - ext_cur = holecheck_path[depth].p_ext; - add_blocks = ext4_ext_get_actual_len(ext_cur); - seq_blocks = 0; - + o_start += cur_len; + d_start += cur_len; } + *moved_len = o_start - orig_blk; + if (*moved_len > len) + *moved_len = len; + out: if (*moved_len) { ext4_discard_preallocations(orig_inode); ext4_discard_preallocations(donor_inode); } - if (orig_path) { - ext4_ext_drop_refs(orig_path); - kfree(orig_path); - } - if (holecheck_path) { - ext4_ext_drop_refs(holecheck_path); - kfree(holecheck_path); - } + ext4_ext_drop_refs(path); + kfree(path); ext4_double_up_write_data_sem(orig_inode, donor_inode); ext4_inode_resume_unlocked_dio(orig_inode); ext4_inode_resume_unlocked_dio(donor_inode); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 603e4ebbd0a..426211882f7 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -53,7 +53,7 @@ static struct buffer_head *ext4_append(handle_t *handle, ext4_lblk_t *block) { struct buffer_head *bh; - int err = 0; + int err; if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb && ((inode->i_size >> 10) >= @@ -62,9 +62,9 @@ static struct buffer_head *ext4_append(handle_t *handle, *block = inode->i_size >> inode->i_sb->s_blocksize_bits; - bh = ext4_bread(handle, inode, *block, 1, &err); - if (!bh) - return ERR_PTR(err); + bh = ext4_bread(handle, inode, *block, 1); + if (IS_ERR(bh)) + return bh; inode->i_size += inode->i_sb->s_blocksize; EXT4_I(inode)->i_disksize = inode->i_size; BUFFER_TRACE(bh, "get_write_access"); @@ -94,20 +94,20 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode, { struct buffer_head *bh; struct ext4_dir_entry *dirent; - int err = 0, is_dx_block = 0; + int is_dx_block = 0; - bh = ext4_bread(NULL, inode, block, 0, &err); - if (!bh) { - if (err == 0) { - ext4_error_inode(inode, __func__, line, block, - "Directory hole found"); - return ERR_PTR(-EIO); - } + bh = ext4_bread(NULL, inode, block, 0); + if (IS_ERR(bh)) { __ext4_warning(inode->i_sb, __func__, line, - "error reading directory block " - "(ino %lu, block %lu)", inode->i_ino, + "error %ld reading directory block " + "(ino %lu, block %lu)", PTR_ERR(bh), inode->i_ino, (unsigned long) block); - return ERR_PTR(err); + + return bh; + } + if (!bh) { + ext4_error_inode(inode, __func__, line, block, "Directory hole found"); + return ERR_PTR(-EIO); } dirent = (struct ext4_dir_entry *) bh->b_data; /* Determine whether or not we have an index block */ @@ -124,8 +124,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode, "directory leaf block found instead of index block"); return ERR_PTR(-EIO); } - if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) || + if (!ext4_has_metadata_csum(inode->i_sb) || buffer_verified(bh)) return bh; @@ -253,8 +252,7 @@ static unsigned dx_node_limit(struct inode *dir); static struct dx_frame *dx_probe(const struct qstr *d_name, struct inode *dir, struct dx_hash_info *hinfo, - struct dx_frame *frame, - int *err); + struct dx_frame *frame); static void dx_release(struct dx_frame *frames); static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize, struct dx_hash_info *hinfo, struct dx_map_entry map[]); @@ -270,8 +268,7 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash, __u32 *start_hash); static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name, - struct ext4_dir_entry_2 **res_dir, - int *err); + struct ext4_dir_entry_2 **res_dir); static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, struct inode *inode); @@ -340,8 +337,7 @@ int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent) { struct ext4_dir_entry_tail *t; - if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (!ext4_has_metadata_csum(inode->i_sb)) return 1; t = get_dirent_tail(inode, dirent); @@ -362,8 +358,7 @@ static void ext4_dirent_csum_set(struct inode *inode, { struct ext4_dir_entry_tail *t; - if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (!ext4_has_metadata_csum(inode->i_sb)) return; t = get_dirent_tail(inode, dirent); @@ -438,8 +433,7 @@ static int ext4_dx_csum_verify(struct inode *inode, struct dx_tail *t; int count_offset, limit, count; - if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (!ext4_has_metadata_csum(inode->i_sb)) return 1; c = get_dx_countlimit(inode, dirent, &count_offset); @@ -468,8 +462,7 @@ static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent) struct dx_tail *t; int count_offset, limit, count; - if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (!ext4_has_metadata_csum(inode->i_sb)) return; c = get_dx_countlimit(inode, dirent, &count_offset); @@ -557,8 +550,7 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize) unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) - EXT4_DIR_REC_LEN(2) - infosize; - if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (ext4_has_metadata_csum(dir->i_sb)) entry_space -= sizeof(struct dx_tail); return entry_space / sizeof(struct dx_entry); } @@ -567,8 +559,7 @@ static inline unsigned dx_node_limit(struct inode *dir) { unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0); - if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (ext4_has_metadata_csum(dir->i_sb)) entry_space -= sizeof(struct dx_tail); return entry_space / sizeof(struct dx_entry); } @@ -641,7 +632,9 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir, u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash; struct stats stats; printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range); - if (!(bh = ext4_bread (NULL,dir, block, 0,&err))) continue; + bh = ext4_bread(NULL,dir, block, 0); + if (!bh || IS_ERR(bh)) + continue; stats = levels? dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1): dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0); @@ -669,29 +662,25 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir, */ static struct dx_frame * dx_probe(const struct qstr *d_name, struct inode *dir, - struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err) + struct dx_hash_info *hinfo, struct dx_frame *frame_in) { unsigned count, indirect; struct dx_entry *at, *entries, *p, *q, *m; struct dx_root *root; - struct buffer_head *bh; struct dx_frame *frame = frame_in; + struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR); u32 hash; - frame->bh = NULL; - bh = ext4_read_dirblock(dir, 0, INDEX); - if (IS_ERR(bh)) { - *err = PTR_ERR(bh); - goto fail; - } - root = (struct dx_root *) bh->b_data; + frame->bh = ext4_read_dirblock(dir, 0, INDEX); + if (IS_ERR(frame->bh)) + return (struct dx_frame *) frame->bh; + + root = (struct dx_root *) frame->bh->b_data; if (root->info.hash_version != DX_HASH_TEA && root->info.hash_version != DX_HASH_HALF_MD4 && root->info.hash_version != DX_HASH_LEGACY) { ext4_warning(dir->i_sb, "Unrecognised inode hash code %d", root->info.hash_version); - brelse(bh); - *err = ERR_BAD_DX_DIR; goto fail; } hinfo->hash_version = root->info.hash_version; @@ -705,16 +694,12 @@ dx_probe(const struct qstr *d_name, struct inode *dir, if (root->info.unused_flags & 1) { ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x", root->info.unused_flags); - brelse(bh); - *err = ERR_BAD_DX_DIR; goto fail; } if ((indirect = root->info.indirect_levels) > 1) { ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x", root->info.indirect_levels); - brelse(bh); - *err = ERR_BAD_DX_DIR; goto fail; } @@ -724,27 +709,21 @@ dx_probe(const struct qstr *d_name, struct inode *dir, if (dx_get_limit(entries) != dx_root_limit(dir, root->info.info_length)) { ext4_warning(dir->i_sb, "dx entry: limit != root limit"); - brelse(bh); - *err = ERR_BAD_DX_DIR; goto fail; } dxtrace(printk("Look up %x", hash)); - while (1) - { + while (1) { count = dx_get_count(entries); if (!count || count > dx_get_limit(entries)) { ext4_warning(dir->i_sb, "dx entry: no count or count > limit"); - brelse(bh); - *err = ERR_BAD_DX_DIR; - goto fail2; + goto fail; } p = entries + 1; q = entries + count - 1; - while (p <= q) - { + while (p <= q) { m = p + (q - p)/2; dxtrace(printk(".")); if (dx_get_hash(m) > hash) @@ -753,8 +732,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, p = m + 1; } - if (0) // linear search cross check - { + if (0) { // linear search cross check unsigned n = count - 1; at = entries; while (n--) @@ -771,38 +749,35 @@ dx_probe(const struct qstr *d_name, struct inode *dir, at = p - 1; dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at))); - frame->bh = bh; frame->entries = entries; frame->at = at; - if (!indirect--) return frame; - bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX); - if (IS_ERR(bh)) { - *err = PTR_ERR(bh); - goto fail2; + if (!indirect--) + return frame; + frame++; + frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX); + if (IS_ERR(frame->bh)) { + ret_err = (struct dx_frame *) frame->bh; + frame->bh = NULL; + goto fail; } - entries = ((struct dx_node *) bh->b_data)->entries; + entries = ((struct dx_node *) frame->bh->b_data)->entries; if (dx_get_limit(entries) != dx_node_limit (dir)) { ext4_warning(dir->i_sb, "dx entry: limit != node limit"); - brelse(bh); - *err = ERR_BAD_DX_DIR; - goto fail2; + goto fail; } - frame++; - frame->bh = NULL; } -fail2: +fail: while (frame >= frame_in) { brelse(frame->bh); frame--; } -fail: - if (*err == ERR_BAD_DX_DIR) + if (ret_err == ERR_PTR(ERR_BAD_DX_DIR)) ext4_warning(dir->i_sb, "Corrupt dir inode %lu, running e2fsck is " "recommended.", dir->i_ino); - return NULL; + return ret_err; } static void dx_release (struct dx_frame *frames) @@ -988,9 +963,9 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash, } hinfo.hash = start_hash; hinfo.minor_hash = 0; - frame = dx_probe(NULL, dir, &hinfo, frames, &err); - if (!frame) - return err; + frame = dx_probe(NULL, dir, &hinfo, frames); + if (IS_ERR(frame)) + return PTR_ERR(frame); /* Add '.' and '..' from the htree header */ if (!start_hash && !start_minor_hash) { @@ -1227,8 +1202,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir, buffer */ int num = 0; ext4_lblk_t nblocks; - int i, err = 0; - int namelen; + int i, namelen; *res_dir = NULL; sb = dir->i_sb; @@ -1258,17 +1232,13 @@ static struct buffer_head * ext4_find_entry (struct inode *dir, goto restart; } if (is_dx(dir)) { - bh = ext4_dx_find_entry(dir, d_name, res_dir, &err); + bh = ext4_dx_find_entry(dir, d_name, res_dir); /* * On success, or if the error was file not found, * return. Otherwise, fall back to doing a search the * old fashioned way. */ - if (err == -ENOENT) - return NULL; - if (err && err != ERR_BAD_DX_DIR) - return ERR_PTR(err); - if (bh) + if (!IS_ERR(bh) || PTR_ERR(bh) != ERR_BAD_DX_DIR) return bh; dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, " "falling back\n")); @@ -1298,10 +1268,10 @@ restart: break; } num++; - bh = ext4_getblk(NULL, dir, b++, 0, &err); - if (unlikely(err)) { + bh = ext4_getblk(NULL, dir, b++, 0); + if (unlikely(IS_ERR(bh))) { if (ra_max == 0) - return ERR_PTR(err); + return bh; break; } bh_use[ra_max] = bh; @@ -1366,7 +1336,7 @@ cleanup_and_exit: } static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name, - struct ext4_dir_entry_2 **res_dir, int *err) + struct ext4_dir_entry_2 **res_dir) { struct super_block * sb = dir->i_sb; struct dx_hash_info hinfo; @@ -1375,25 +1345,23 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q ext4_lblk_t block; int retval; - if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err))) - return NULL; + frame = dx_probe(d_name, dir, &hinfo, frames); + if (IS_ERR(frame)) + return (struct buffer_head *) frame; do { block = dx_get_block(frame->at); bh = ext4_read_dirblock(dir, block, DIRENT); - if (IS_ERR(bh)) { - *err = PTR_ERR(bh); + if (IS_ERR(bh)) goto errout; - } + retval = search_dirblock(bh, dir, d_name, block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); - if (retval == 1) { /* Success! */ - dx_release(frames); - return bh; - } + if (retval == 1) + goto success; brelse(bh); if (retval == -1) { - *err = ERR_BAD_DX_DIR; + bh = ERR_PTR(ERR_BAD_DX_DIR); goto errout; } @@ -1402,18 +1370,19 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q frames, NULL); if (retval < 0) { ext4_warning(sb, - "error reading index page in directory #%lu", - dir->i_ino); - *err = retval; + "error %d reading index page in directory #%lu", + retval, dir->i_ino); + bh = ERR_PTR(retval); goto errout; } } while (retval == 1); - *err = -ENOENT; + bh = NULL; errout: dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name)); - dx_release (frames); - return NULL; +success: + dx_release(frames); + return bh; } static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) @@ -1441,7 +1410,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi dentry); return ERR_PTR(-EIO); } - inode = ext4_iget(dir->i_sb, ino); + inode = ext4_iget_normal(dir->i_sb, ino); if (inode == ERR_PTR(-ESTALE)) { EXT4_ERROR_INODE(dir, "deleted inode referenced: %u", @@ -1474,7 +1443,7 @@ struct dentry *ext4_get_parent(struct dentry *child) return ERR_PTR(-EIO); } - return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino)); + return d_obtain_alias(ext4_iget_normal(child->d_inode->i_sb, ino)); } /* @@ -1533,7 +1502,7 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize) */ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, struct buffer_head **bh,struct dx_frame *frame, - struct dx_hash_info *hinfo, int *error) + struct dx_hash_info *hinfo) { unsigned blocksize = dir->i_sb->s_blocksize; unsigned count, continued; @@ -1548,16 +1517,14 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, int csum_size = 0; int err = 0, i; - if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (ext4_has_metadata_csum(dir->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); bh2 = ext4_append(handle, dir, &newblock); if (IS_ERR(bh2)) { brelse(*bh); *bh = NULL; - *error = PTR_ERR(bh2); - return NULL; + return (struct ext4_dir_entry_2 *) bh2; } BUFFER_TRACE(*bh, "get_write_access"); @@ -1617,8 +1584,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1)); /* Which block gets the new entry? */ - if (hinfo->hash >= hash2) - { + if (hinfo->hash >= hash2) { swap(*bh, bh2); de = de2; } @@ -1638,8 +1604,7 @@ journal_error: brelse(bh2); *bh = NULL; ext4_std_error(dir->i_sb, err); - *error = err; - return NULL; + return ERR_PTR(err); } int ext4_find_dest_de(struct inode *dir, struct inode *inode, @@ -1718,8 +1683,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, int csum_size = 0; int err; - if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (ext4_has_metadata_csum(inode->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); if (!de) { @@ -1786,8 +1750,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, struct fake_dirent *fde; int csum_size = 0; - if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (ext4_has_metadata_csum(inode->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); blocksize = dir->i_sb->s_blocksize; @@ -1853,31 +1816,39 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; ext4fs_dirhash(name, namelen, &hinfo); + memset(frames, 0, sizeof(frames)); frame = frames; frame->entries = entries; frame->at = entries; frame->bh = bh; bh = bh2; - ext4_handle_dirty_dx_node(handle, dir, frame->bh); - ext4_handle_dirty_dirent_node(handle, dir, bh); + retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh); + if (retval) + goto out_frames; + retval = ext4_handle_dirty_dirent_node(handle, dir, bh); + if (retval) + goto out_frames; - de = do_split(handle,dir, &bh, frame, &hinfo, &retval); - if (!de) { - /* - * Even if the block split failed, we have to properly write - * out all the changes we did so far. Otherwise we can end up - * with corrupted filesystem. - */ - ext4_mark_inode_dirty(handle, dir); - dx_release(frames); - return retval; + de = do_split(handle,dir, &bh, frame, &hinfo); + if (IS_ERR(de)) { + retval = PTR_ERR(de); + goto out_frames; } dx_release(frames); retval = add_dirent_to_buf(handle, dentry, inode, de, bh); brelse(bh); return retval; +out_frames: + /* + * Even if the block split failed, we have to properly write + * out all the changes we did so far. Otherwise we can end up + * with corrupted filesystem. + */ + ext4_mark_inode_dirty(handle, dir); + dx_release(frames); + return retval; } /* @@ -1904,8 +1875,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, ext4_lblk_t block, blocks; int csum_size = 0; - if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (ext4_has_metadata_csum(inode->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); sb = dir->i_sb; @@ -1982,9 +1952,9 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, struct ext4_dir_entry_2 *de; int err; - frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err); - if (!frame) - return err; + frame = dx_probe(&dentry->d_name, dir, &hinfo, frames); + if (IS_ERR(frame)) + return PTR_ERR(frame); entries = frame->entries; at = frame->at; bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT); @@ -2095,9 +2065,11 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, goto cleanup; } } - de = do_split(handle, dir, &bh, frame, &hinfo, &err); - if (!de) + de = do_split(handle, dir, &bh, frame, &hinfo); + if (IS_ERR(de)) { + err = PTR_ERR(de); goto cleanup; + } err = add_dirent_to_buf(handle, dentry, inode, de, bh); goto cleanup; @@ -2167,8 +2139,7 @@ static int ext4_delete_entry(handle_t *handle, return err; } - if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (ext4_has_metadata_csum(dir->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); BUFFER_TRACE(bh, "get_write_access"); @@ -2387,8 +2358,7 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir, int csum_size = 0; int err; - if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (ext4_has_metadata_csum(dir->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { @@ -2403,10 +2373,6 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir, dir_block = ext4_append(handle, inode, &block); if (IS_ERR(dir_block)) return PTR_ERR(dir_block); - BUFFER_TRACE(dir_block, "get_write_access"); - err = ext4_journal_get_write_access(handle, dir_block); - if (err) - goto out; de = (struct ext4_dir_entry_2 *)dir_block->b_data; ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0); set_nlink(inode, 2); @@ -2573,7 +2539,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode) int err = 0, rc; bool dirty = false; - if (!sbi->s_journal) + if (!sbi->s_journal || is_bad_inode(inode)) return 0; WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) && @@ -3190,6 +3156,39 @@ static void ext4_update_dir_count(handle_t *handle, struct ext4_renament *ent) } } +static struct inode *ext4_whiteout_for_rename(struct ext4_renament *ent, + int credits, handle_t **h) +{ + struct inode *wh; + handle_t *handle; + int retries = 0; + + /* + * for inode block, sb block, group summaries, + * and inode bitmap + */ + credits += (EXT4_MAXQUOTAS_TRANS_BLOCKS(ent->dir->i_sb) + + EXT4_XATTR_TRANS_BLOCKS + 4); +retry: + wh = ext4_new_inode_start_handle(ent->dir, S_IFCHR | WHITEOUT_MODE, + &ent->dentry->d_name, 0, NULL, + EXT4_HT_DIR, credits); + + handle = ext4_journal_current_handle(); + if (IS_ERR(wh)) { + if (handle) + ext4_journal_stop(handle); + if (PTR_ERR(wh) == -ENOSPC && + ext4_should_retry_alloc(ent->dir->i_sb, &retries)) + goto retry; + } else { + *h = handle; + init_special_inode(wh, wh->i_mode, WHITEOUT_DEV); + wh->i_op = &ext4_special_inode_operations; + } + return wh; +} + /* * Anybody can rename anything with this: the permission checks are left to the * higher-level routines. @@ -3199,7 +3198,8 @@ static void ext4_update_dir_count(handle_t *handle, struct ext4_renament *ent) * This comes from rename(const char *oldpath, const char *newpath) */ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, struct dentry *new_dentry) + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags) { handle_t *handle = NULL; struct ext4_renament old = { @@ -3214,6 +3214,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, }; int force_reread; int retval; + struct inode *whiteout = NULL; + int credits; + u8 old_file_type; dquot_initialize(old.dir); dquot_initialize(new.dir); @@ -3252,11 +3255,17 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, if (new.inode && !test_opt(new.dir->i_sb, NO_AUTO_DA_ALLOC)) ext4_alloc_da_blocks(old.inode); - handle = ext4_journal_start(old.dir, EXT4_HT_DIR, - (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) + - EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2)); - if (IS_ERR(handle)) - return PTR_ERR(handle); + credits = (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) + + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); + if (!(flags & RENAME_WHITEOUT)) { + handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits); + if (IS_ERR(handle)) + return PTR_ERR(handle); + } else { + whiteout = ext4_whiteout_for_rename(&old, credits, &handle); + if (IS_ERR(whiteout)) + return PTR_ERR(whiteout); + } if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir)) ext4_handle_sync(handle); @@ -3284,13 +3293,26 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, */ force_reread = (new.dir->i_ino == old.dir->i_ino && ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA)); + + old_file_type = old.de->file_type; + if (whiteout) { + /* + * Do this before adding a new entry, so the old entry is sure + * to be still pointing to the valid old entry. + */ + retval = ext4_setent(handle, &old, whiteout->i_ino, + EXT4_FT_CHRDEV); + if (retval) + goto end_rename; + ext4_mark_inode_dirty(handle, whiteout); + } if (!new.bh) { retval = ext4_add_entry(handle, new.dentry, old.inode); if (retval) goto end_rename; } else { retval = ext4_setent(handle, &new, - old.inode->i_ino, old.de->file_type); + old.inode->i_ino, old_file_type); if (retval) goto end_rename; } @@ -3305,10 +3327,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, old.inode->i_ctime = ext4_current_time(old.inode); ext4_mark_inode_dirty(handle, old.inode); - /* - * ok, that's it - */ - ext4_rename_delete(handle, &old, force_reread); + if (!whiteout) { + /* + * ok, that's it + */ + ext4_rename_delete(handle, &old, force_reread); + } if (new.inode) { ext4_dec_count(handle, new.inode); @@ -3344,6 +3368,12 @@ end_rename: brelse(old.dir_bh); brelse(old.bh); brelse(new.bh); + if (whiteout) { + if (retval) + drop_nlink(whiteout); + unlock_new_inode(whiteout); + iput(whiteout); + } if (handle) ext4_journal_stop(handle); return retval; @@ -3476,18 +3506,15 @@ static int ext4_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { - if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) + if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; if (flags & RENAME_EXCHANGE) { return ext4_cross_rename(old_dir, old_dentry, new_dir, new_dentry); } - /* - * Existence checking was done by the VFS, otherwise "RENAME_NOREPLACE" - * is equivalent to regular rename. - */ - return ext4_rename(old_dir, old_dentry, new_dir, new_dentry); + + return ext4_rename(old_dir, old_dentry, new_dir, new_dentry, flags); } /* diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 1e43b905ff9..ca4588388fc 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -1081,7 +1081,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data, break; if (meta_bg == 0) - backup_block = group * bpg + blk_off; + backup_block = ((ext4_fsblk_t)group) * bpg + blk_off; else backup_block = (ext4_group_first_block_no(sb, group) + ext4_bg_has_super(sb, group)); @@ -1212,8 +1212,7 @@ static int ext4_set_bitmap_checksums(struct super_block *sb, { struct buffer_head *bh; - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (!ext4_has_metadata_csum(sb)) return 0; bh = ext4_get_bitmap(sb, group_data->inode_bitmap); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 05c159218bc..2c9e6864abd 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -70,7 +70,6 @@ static void ext4_mark_recovery_complete(struct super_block *sb, static void ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es); static int ext4_sync_fs(struct super_block *sb, int wait); -static int ext4_sync_fs_nojournal(struct super_block *sb, int wait); static int ext4_remount(struct super_block *sb, int *flags, char *data); static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); static int ext4_unfreeze(struct super_block *sb); @@ -141,8 +140,7 @@ static __le32 ext4_superblock_csum(struct super_block *sb, static int ext4_superblock_csum_verify(struct super_block *sb, struct ext4_super_block *es) { - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (!ext4_has_metadata_csum(sb)) return 1; return es->s_checksum == ext4_superblock_csum(sb, es); @@ -152,8 +150,7 @@ void ext4_superblock_csum_set(struct super_block *sb) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (!ext4_has_metadata_csum(sb)) return; es->s_checksum = ext4_superblock_csum(sb, es); @@ -820,10 +817,9 @@ static void ext4_put_super(struct super_block *sb) percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter); - percpu_counter_destroy(&sbi->s_extent_cache_cnt); brelse(sbi->s_sbh); #ifdef CONFIG_QUOTA - for (i = 0; i < MAXQUOTAS; i++) + for (i = 0; i < EXT4_MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif @@ -885,6 +881,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) ext4_es_init_tree(&ei->i_es_tree); rwlock_init(&ei->i_es_lock); INIT_LIST_HEAD(&ei->i_es_lru); + ei->i_es_all_nr = 0; ei->i_es_lru_nr = 0; ei->i_touch_when = 0; ei->i_reserved_data_blocks = 0; @@ -1002,7 +999,7 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb, * Currently we don't know the generation for parent directory, so * a generation of 0 means "accept any" */ - inode = ext4_iget(sb, ino); + inode = ext4_iget_normal(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (generation && inode->i_generation != generation) { @@ -1124,25 +1121,6 @@ static const struct super_operations ext4_sops = { .bdev_try_to_free_page = bdev_try_to_free_page, }; -static const struct super_operations ext4_nojournal_sops = { - .alloc_inode = ext4_alloc_inode, - .destroy_inode = ext4_destroy_inode, - .write_inode = ext4_write_inode, - .dirty_inode = ext4_dirty_inode, - .drop_inode = ext4_drop_inode, - .evict_inode = ext4_evict_inode, - .sync_fs = ext4_sync_fs_nojournal, - .put_super = ext4_put_super, - .statfs = ext4_statfs, - .remount_fs = ext4_remount, - .show_options = ext4_show_options, -#ifdef CONFIG_QUOTA - .quota_read = ext4_quota_read, - .quota_write = ext4_quota_write, -#endif - .bdev_try_to_free_page = bdev_try_to_free_page, -}; - static const struct export_operations ext4_export_ops = { .fh_to_dentry = ext4_fh_to_dentry, .fh_to_parent = ext4_fh_to_parent, @@ -1712,13 +1690,6 @@ static int parse_options(char *options, struct super_block *sb, "not specified"); return 0; } - } else { - if (sbi->s_jquota_fmt) { - ext4_msg(sb, KERN_ERR, "journaled quota format " - "specified with no journaling " - "enabled"); - return 0; - } } #endif if (test_opt(sb, DIOREAD_NOLOCK)) { @@ -2016,8 +1987,7 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group, __u16 crc = 0; __le32 le_group = cpu_to_le32(block_group); - if ((sbi->s_es->s_feature_ro_compat & - cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) { + if (ext4_has_metadata_csum(sbi->s_sb)) { /* Use new metadata_csum algorithm */ __le16 save_csum; __u32 csum32; @@ -2035,6 +2005,10 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group, } /* old crc16 code */ + if (!(sbi->s_es->s_feature_ro_compat & + cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM))) + return 0; + offset = offsetof(struct ext4_group_desc, bg_checksum); crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); @@ -2191,7 +2165,7 @@ static void ext4_orphan_cleanup(struct super_block *sb, if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { /* don't clear list on RO mount w/ errors */ if (es->s_last_orphan && !(s_flags & MS_RDONLY)) { - jbd_debug(1, "Errors on filesystem, " + ext4_msg(sb, KERN_INFO, "Errors on filesystem, " "clearing orphan list.\n"); es->s_last_orphan = 0; } @@ -2207,7 +2181,7 @@ static void ext4_orphan_cleanup(struct super_block *sb, /* Needed for iput() to work correctly and not trash data */ sb->s_flags |= MS_ACTIVE; /* Turn on quotas so that they are updated correctly */ - for (i = 0; i < MAXQUOTAS; i++) { + for (i = 0; i < EXT4_MAXQUOTAS; i++) { if (EXT4_SB(sb)->s_qf_names[i]) { int ret = ext4_quota_on_mount(sb, i); if (ret < 0) @@ -2263,7 +2237,7 @@ static void ext4_orphan_cleanup(struct super_block *sb, PLURAL(nr_truncates)); #ifdef CONFIG_QUOTA /* Turn quotas off */ - for (i = 0; i < MAXQUOTAS; i++) { + for (i = 0; i < EXT4_MAXQUOTAS; i++) { if (sb_dqopt(sb)->files[i]) dquot_quota_off(sb, i); } @@ -2548,6 +2522,16 @@ static ssize_t sbi_ui_store(struct ext4_attr *a, return count; } +static ssize_t es_ui_show(struct ext4_attr *a, + struct ext4_sb_info *sbi, char *buf) +{ + + unsigned int *ui = (unsigned int *) (((char *) sbi->s_es) + + a->u.offset); + + return snprintf(buf, PAGE_SIZE, "%u\n", *ui); +} + static ssize_t reserved_clusters_show(struct ext4_attr *a, struct ext4_sb_info *sbi, char *buf) { @@ -2601,14 +2585,29 @@ static struct ext4_attr ext4_attr_##_name = { \ .offset = offsetof(struct ext4_sb_info, _elname),\ }, \ } + +#define EXT4_ATTR_OFFSET_ES(_name,_mode,_show,_store,_elname) \ +static struct ext4_attr ext4_attr_##_name = { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .show = _show, \ + .store = _store, \ + .u = { \ + .offset = offsetof(struct ext4_super_block, _elname), \ + }, \ +} + #define EXT4_ATTR(name, mode, show, store) \ static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store) #define EXT4_INFO_ATTR(name) EXT4_ATTR(name, 0444, NULL, NULL) #define EXT4_RO_ATTR(name) EXT4_ATTR(name, 0444, name##_show, NULL) #define EXT4_RW_ATTR(name) EXT4_ATTR(name, 0644, name##_show, name##_store) + +#define EXT4_RO_ATTR_ES_UI(name, elname) \ + EXT4_ATTR_OFFSET_ES(name, 0444, es_ui_show, NULL, elname) #define EXT4_RW_ATTR_SBI_UI(name, elname) \ EXT4_ATTR_OFFSET(name, 0644, sbi_ui_show, sbi_ui_store, elname) + #define ATTR_LIST(name) &ext4_attr_##name.attr #define EXT4_DEPRECATED_ATTR(_name, _val) \ static struct ext4_attr ext4_attr_##_name = { \ @@ -2641,6 +2640,9 @@ EXT4_RW_ATTR_SBI_UI(warning_ratelimit_interval_ms, s_warning_ratelimit_state.int EXT4_RW_ATTR_SBI_UI(warning_ratelimit_burst, s_warning_ratelimit_state.burst); EXT4_RW_ATTR_SBI_UI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval); EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst); +EXT4_RO_ATTR_ES_UI(errors_count, s_error_count); +EXT4_RO_ATTR_ES_UI(first_error_time, s_first_error_time); +EXT4_RO_ATTR_ES_UI(last_error_time, s_last_error_time); static struct attribute *ext4_attrs[] = { ATTR_LIST(delayed_allocation_blocks), @@ -2664,6 +2666,9 @@ static struct attribute *ext4_attrs[] = { ATTR_LIST(warning_ratelimit_burst), ATTR_LIST(msg_ratelimit_interval_ms), ATTR_LIST(msg_ratelimit_burst), + ATTR_LIST(errors_count), + ATTR_LIST(first_error_time), + ATTR_LIST(last_error_time), NULL, }; @@ -2723,9 +2728,25 @@ static void ext4_feat_release(struct kobject *kobj) complete(&ext4_feat->f_kobj_unregister); } +static ssize_t ext4_feat_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "supported\n"); +} + +/* + * We can not use ext4_attr_show/store because it relies on the kobject + * being embedded in the ext4_sb_info structure which is definitely not + * true in this case. + */ +static const struct sysfs_ops ext4_feat_ops = { + .show = ext4_feat_show, + .store = NULL, +}; + static struct kobj_type ext4_feat_ktype = { .default_attrs = ext4_feat_attrs, - .sysfs_ops = &ext4_attr_ops, + .sysfs_ops = &ext4_feat_ops, .release = ext4_feat_release, }; @@ -3179,8 +3200,7 @@ static int set_journal_csum_feature_set(struct super_block *sb) int compat, incompat; struct ext4_sb_info *sbi = EXT4_SB(sb); - if (EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { + if (ext4_has_metadata_csum(sb)) { /* journal checksum v3 */ compat = 0; incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3; @@ -3190,6 +3210,10 @@ static int set_journal_csum_feature_set(struct super_block *sb) incompat = 0; } + jbd2_journal_clear_features(sbi->s_journal, + JBD2_FEATURE_COMPAT_CHECKSUM, 0, + JBD2_FEATURE_INCOMPAT_CSUM_V3 | + JBD2_FEATURE_INCOMPAT_CSUM_V2); if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { ret = jbd2_journal_set_features(sbi->s_journal, compat, 0, @@ -3202,11 +3226,8 @@ static int set_journal_csum_feature_set(struct super_block *sb) jbd2_journal_clear_features(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); } else { - jbd2_journal_clear_features(sbi->s_journal, - JBD2_FEATURE_COMPAT_CHECKSUM, 0, - JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | - JBD2_FEATURE_INCOMPAT_CSUM_V3 | - JBD2_FEATURE_INCOMPAT_CSUM_V2); + jbd2_journal_clear_features(sbi->s_journal, 0, 0, + JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); } return ret; @@ -3436,7 +3457,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) logical_sb_block = sb_block; } - if (!(bh = sb_bread(sb, logical_sb_block))) { + if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) { ext4_msg(sb, KERN_ERR, "unable to read superblock"); goto out_fail; } @@ -3487,8 +3508,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } /* Precompute checksum seed for all metadata */ - if (EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (ext4_has_metadata_csum(sb)) sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid, sizeof(es->s_uuid)); @@ -3506,6 +3526,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) #ifdef CONFIG_EXT4_FS_POSIX_ACL set_opt(sb, POSIX_ACL); #endif + /* don't forget to enable journal_csum when metadata_csum is enabled. */ + if (ext4_has_metadata_csum(sb)) + set_opt(sb, JOURNAL_CHECKSUM); + if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) set_opt(sb, JOURNAL_DATA); else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) @@ -3519,8 +3543,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) set_opt(sb, ERRORS_CONT); else set_opt(sb, ERRORS_RO); - if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY) - set_opt(sb, BLOCK_VALIDITY); + /* block_validity enabled by default; disable with noblock_validity */ + set_opt(sb, BLOCK_VALIDITY); if (def_mount_opts & EXT4_DEFM_DISCARD) set_opt(sb, DISCARD); @@ -3646,7 +3670,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) brelse(bh); logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; offset = do_div(logical_sb_block, blocksize); - bh = sb_bread(sb, logical_sb_block); + bh = sb_bread_unmovable(sb, logical_sb_block); if (!bh) { ext4_msg(sb, KERN_ERR, "Can't read superblock on 2nd try"); @@ -3868,7 +3892,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logical_sb_block, i); - sbi->s_group_desc[i] = sb_bread(sb, block); + sbi->s_group_desc[i] = sb_bread_unmovable(sb, block); if (!sbi->s_group_desc[i]) { ext4_msg(sb, KERN_ERR, "can't read group descriptor %d", i); @@ -3890,13 +3914,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) sbi->s_err_report.data = (unsigned long) sb; /* Register extent status tree shrinker */ - ext4_es_register_shrinker(sbi); - - err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0, GFP_KERNEL); - if (err) { - ext4_msg(sb, KERN_ERR, "insufficient memory"); + if (ext4_es_register_shrinker(sbi)) goto failed_mount3; - } sbi->s_stripe = ext4_get_stripe_size(sbi); sbi->s_extent_max_zeroout_kb = 32; @@ -3904,11 +3923,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) /* * set up enough so that it can read an inode */ - if (!test_opt(sb, NOLOAD) && - EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) - sb->s_op = &ext4_sops; - else - sb->s_op = &ext4_nojournal_sops; + sb->s_op = &ext4_sops; sb->s_export_op = &ext4_export_ops; sb->s_xattr = ext4_xattr_handlers; #ifdef CONFIG_QUOTA @@ -3932,7 +3947,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) && !(sb->s_flags & MS_RDONLY)) if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) - goto failed_mount3; + goto failed_mount3a; /* * The first inode we look at is the journal inode. Don't try @@ -3941,7 +3956,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if (!test_opt(sb, NOLOAD) && EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) { if (ext4_load_journal(sb, es, journal_devnum)) - goto failed_mount3; + goto failed_mount3a; } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) && EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) { ext4_msg(sb, KERN_ERR, "required journal recovery " @@ -4229,10 +4244,10 @@ failed_mount_wq: jbd2_journal_destroy(sbi->s_journal); sbi->s_journal = NULL; } -failed_mount3: +failed_mount3a: ext4_es_unregister_shrinker(sbi); +failed_mount3: del_timer_sync(&sbi->s_err_report); - percpu_counter_destroy(&sbi->s_extent_cache_cnt); if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); failed_mount2: @@ -4247,7 +4262,7 @@ failed_mount: remove_proc_entry(sb->s_id, ext4_proc_root); } #ifdef CONFIG_QUOTA - for (i = 0; i < MAXQUOTAS; i++) + for (i = 0; i < EXT4_MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif ext4_blkdev_remove(sbi); @@ -4375,6 +4390,15 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, goto out_bdev; } + if ((le32_to_cpu(es->s_feature_ro_compat) & + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && + es->s_checksum != ext4_superblock_csum(sb, es)) { + ext4_msg(sb, KERN_ERR, "external journal has " + "corrupt superblock"); + brelse(bh); + goto out_bdev; + } + if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { ext4_msg(sb, KERN_ERR, "journal UUID does not match"); brelse(bh); @@ -4677,15 +4701,19 @@ static int ext4_sync_fs(struct super_block *sb, int wait) * being sent at the end of the function. But we can skip it if * transaction_commit will do it for us. */ - target = jbd2_get_latest_transaction(sbi->s_journal); - if (wait && sbi->s_journal->j_flags & JBD2_BARRIER && - !jbd2_trans_will_send_data_barrier(sbi->s_journal, target)) + if (sbi->s_journal) { + target = jbd2_get_latest_transaction(sbi->s_journal); + if (wait && sbi->s_journal->j_flags & JBD2_BARRIER && + !jbd2_trans_will_send_data_barrier(sbi->s_journal, target)) + needs_barrier = true; + + if (jbd2_journal_start_commit(sbi->s_journal, &target)) { + if (wait) + ret = jbd2_log_wait_commit(sbi->s_journal, + target); + } + } else if (wait && test_opt(sb, BARRIER)) needs_barrier = true; - - if (jbd2_journal_start_commit(sbi->s_journal, &target)) { - if (wait) - ret = jbd2_log_wait_commit(sbi->s_journal, target); - } if (needs_barrier) { int err; err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); @@ -4696,19 +4724,6 @@ static int ext4_sync_fs(struct super_block *sb, int wait) return ret; } -static int ext4_sync_fs_nojournal(struct super_block *sb, int wait) -{ - int ret = 0; - - trace_ext4_sync_fs(sb, wait); - flush_workqueue(EXT4_SB(sb)->rsv_conversion_wq); - dquot_writeback_dquots(sb, -1); - if (wait && test_opt(sb, BARRIER)) - ret = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); - - return ret; -} - /* * LVM calls this function before a (read-only) snapshot is created. This * gives us a chance to flush the journal completely and mark the fs clean. @@ -4727,23 +4742,26 @@ static int ext4_freeze(struct super_block *sb) journal = EXT4_SB(sb)->s_journal; - /* Now we set up the journal barrier. */ - jbd2_journal_lock_updates(journal); + if (journal) { + /* Now we set up the journal barrier. */ + jbd2_journal_lock_updates(journal); - /* - * Don't clear the needs_recovery flag if we failed to flush - * the journal. - */ - error = jbd2_journal_flush(journal); - if (error < 0) - goto out; + /* + * Don't clear the needs_recovery flag if we failed to + * flush the journal. + */ + error = jbd2_journal_flush(journal); + if (error < 0) + goto out; + } /* Journal blocked and flushed, clear needs_recovery flag. */ EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); error = ext4_commit_super(sb, 1); out: - /* we rely on upper layer to stop further updates */ - jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); + if (journal) + /* we rely on upper layer to stop further updates */ + jbd2_journal_unlock_updates(journal); return error; } @@ -4774,7 +4792,7 @@ struct ext4_mount_options { u32 s_min_batch_time, s_max_batch_time; #ifdef CONFIG_QUOTA int s_jquota_fmt; - char *s_qf_names[MAXQUOTAS]; + char *s_qf_names[EXT4_MAXQUOTAS]; #endif }; @@ -4804,7 +4822,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) old_opts.s_max_batch_time = sbi->s_max_batch_time; #ifdef CONFIG_QUOTA old_opts.s_jquota_fmt = sbi->s_jquota_fmt; - for (i = 0; i < MAXQUOTAS; i++) + for (i = 0; i < EXT4_MAXQUOTAS; i++) if (sbi->s_qf_names[i]) { old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i], GFP_KERNEL); @@ -4828,6 +4846,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } + if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ + test_opt(sb, JOURNAL_CHECKSUM)) { + ext4_msg(sb, KERN_ERR, "changing journal_checksum " + "during remount not supported"); + err = -EINVAL; + goto restore_opts; + } + if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { if (test_opt2(sb, EXPLICIT_DELALLOC)) { ext4_msg(sb, KERN_ERR, "can't mount with " @@ -4965,7 +4991,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) #ifdef CONFIG_QUOTA /* Release old quota file names */ - for (i = 0; i < MAXQUOTAS; i++) + for (i = 0; i < EXT4_MAXQUOTAS; i++) kfree(old_opts.s_qf_names[i]); if (enable_quota) { if (sb_any_quota_suspended(sb)) @@ -4994,7 +5020,7 @@ restore_opts: sbi->s_max_batch_time = old_opts.s_max_batch_time; #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; - for (i = 0; i < MAXQUOTAS; i++) { + for (i = 0; i < EXT4_MAXQUOTAS; i++) { kfree(sbi->s_qf_names[i]); sbi->s_qf_names[i] = old_opts.s_qf_names[i]; } @@ -5197,7 +5223,7 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id, { int err; struct inode *qf_inode; - unsigned long qf_inums[MAXQUOTAS] = { + unsigned long qf_inums[EXT4_MAXQUOTAS] = { le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum) }; @@ -5225,13 +5251,13 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id, static int ext4_enable_quotas(struct super_block *sb) { int type, err = 0; - unsigned long qf_inums[MAXQUOTAS] = { + unsigned long qf_inums[EXT4_MAXQUOTAS] = { le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum) }; sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; - for (type = 0; type < MAXQUOTAS; type++) { + for (type = 0; type < EXT4_MAXQUOTAS; type++) { if (qf_inums[type]) { err = ext4_quota_enable(sb, type, QFMT_VFS_V1, DQUOT_USAGE_ENABLED); @@ -5309,7 +5335,6 @@ static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, { struct inode *inode = sb_dqopt(sb)->files[type]; ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); - int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; @@ -5324,9 +5349,9 @@ static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, while (toread > 0) { tocopy = sb->s_blocksize - offset < toread ? sb->s_blocksize - offset : toread; - bh = ext4_bread(NULL, inode, blk, 0, &err); - if (err) - return err; + bh = ext4_bread(NULL, inode, blk, 0); + if (IS_ERR(bh)) + return PTR_ERR(bh); if (!bh) /* A hole? */ memset(data, 0, tocopy); else @@ -5347,8 +5372,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, { struct inode *inode = sb_dqopt(sb)->files[type]; ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); - int err = 0; - int offset = off & (sb->s_blocksize - 1); + int err, offset = off & (sb->s_blocksize - 1); struct buffer_head *bh; handle_t *handle = journal_current_handle(); @@ -5369,14 +5393,16 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, return -EIO; } - bh = ext4_bread(handle, inode, blk, 1, &err); + bh = ext4_bread(handle, inode, blk, 1); + if (IS_ERR(bh)) + return PTR_ERR(bh); if (!bh) goto out; BUFFER_TRACE(bh, "get write access"); err = ext4_journal_get_write_access(handle, bh); if (err) { brelse(bh); - goto out; + return err; } lock_buffer(bh); memcpy(bh->b_data+offset, data, len); @@ -5385,8 +5411,6 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, err = ext4_handle_dirty_metadata(handle, NULL, bh); brelse(bh); out: - if (err) - return err; if (inode->i_size < off + len) { i_size_write(inode, off + len); EXT4_I(inode)->i_disksize = inode->i_size; diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index e7387337060..1e09fc77395 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -142,8 +142,7 @@ static int ext4_xattr_block_csum_verify(struct inode *inode, sector_t block_nr, struct ext4_xattr_header *hdr) { - if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && + if (ext4_has_metadata_csum(inode->i_sb) && (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr))) return 0; return 1; @@ -153,8 +152,7 @@ static void ext4_xattr_block_csum_set(struct inode *inode, sector_t block_nr, struct ext4_xattr_header *hdr) { - if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + if (!ext4_has_metadata_csum(inode->i_sb)) return; hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr); @@ -190,14 +188,28 @@ ext4_listxattr(struct dentry *dentry, char *buffer, size_t size) } static int -ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end) +ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end, + void *value_start) { - while (!IS_LAST_ENTRY(entry)) { - struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(entry); + struct ext4_xattr_entry *e = entry; + + while (!IS_LAST_ENTRY(e)) { + struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e); if ((void *)next >= end) return -EIO; - entry = next; + e = next; } + + while (!IS_LAST_ENTRY(entry)) { + if (entry->e_value_size != 0 && + (value_start + le16_to_cpu(entry->e_value_offs) < + (void *)e + sizeof(__u32) || + value_start + le16_to_cpu(entry->e_value_offs) + + le32_to_cpu(entry->e_value_size) > end)) + return -EIO; + entry = EXT4_XATTR_NEXT(entry); + } + return 0; } @@ -214,7 +226,8 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh) return -EIO; if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh))) return -EIO; - error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size); + error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size, + bh->b_data); if (!error) set_buffer_verified(bh); return error; @@ -331,7 +344,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name, header = IHDR(inode, raw_inode); entry = IFIRST(header); end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; - error = ext4_xattr_check_names(entry, end); + error = ext4_xattr_check_names(entry, end, entry); if (error) goto cleanup; error = ext4_xattr_find_entry(&entry, name_index, name, @@ -463,7 +476,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) raw_inode = ext4_raw_inode(&iloc); header = IHDR(inode, raw_inode); end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; - error = ext4_xattr_check_names(IFIRST(header), end); + error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header)); if (error) goto cleanup; error = ext4_xattr_list_entries(dentry, IFIRST(header), @@ -899,14 +912,8 @@ inserted: if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; - /* - * take i_data_sem because we will test - * i_delalloc_reserved_flag in ext4_mb_new_blocks - */ - down_read(&EXT4_I(inode)->i_data_sem); block = ext4_new_meta_blocks(handle, inode, goal, 0, NULL, &error); - up_read((&EXT4_I(inode)->i_data_sem)); if (error) goto cleanup; @@ -986,7 +993,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, is->s.here = is->s.first; is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { - error = ext4_xattr_check_names(IFIRST(header), is->s.end); + error = ext4_xattr_check_names(IFIRST(header), is->s.end, + IFIRST(header)); if (error) return error; /* Find the named attribute. */ diff --git a/fs/internal.h b/fs/internal.h index 9477f8f6aef..757ba2abf21 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -47,7 +47,6 @@ extern void __init chrdev_init(void); /* * namei.c */ -extern int __inode_permission(struct inode *, int); extern int user_path_mountpoint_at(int, const char __user *, unsigned int, struct path *); extern int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *, unsigned int, struct path *); @@ -139,12 +138,6 @@ extern long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan, extern int rw_verify_area(int, struct file *, const loff_t *, size_t); /* - * splice.c - */ -extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, - loff_t *opos, size_t len, unsigned int flags); - -/* * pipe.c */ extern const struct file_operations pipefifo_fops; diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 881b3bd0143..d67a16f2a45 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -29,13 +29,9 @@ #define BEQUIET static int isofs_hashi(const struct dentry *parent, struct qstr *qstr); -static int isofs_hash(const struct dentry *parent, struct qstr *qstr); static int isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name); -static int isofs_dentry_cmp(const struct dentry *parent, - const struct dentry *dentry, - unsigned int len, const char *str, const struct qstr *name); #ifdef CONFIG_JOLIET static int isofs_hashi_ms(const struct dentry *parent, struct qstr *qstr); @@ -135,10 +131,6 @@ static const struct super_operations isofs_sops = { static const struct dentry_operations isofs_dentry_ops[] = { { - .d_hash = isofs_hash, - .d_compare = isofs_dentry_cmp, - }, - { .d_hash = isofs_hashi, .d_compare = isofs_dentry_cmpi, }, @@ -182,27 +174,6 @@ struct iso9660_options{ * Compute the hash for the isofs name corresponding to the dentry. */ static int -isofs_hash_common(struct qstr *qstr, int ms) -{ - const char *name; - int len; - - len = qstr->len; - name = qstr->name; - if (ms) { - while (len && name[len-1] == '.') - len--; - } - - qstr->hash = full_name_hash(name, len); - - return 0; -} - -/* - * Compute the hash for the isofs name corresponding to the dentry. - */ -static int isofs_hashi_common(struct qstr *qstr, int ms) { const char *name; @@ -258,32 +229,40 @@ static int isofs_dentry_cmp_common( } static int -isofs_hash(const struct dentry *dentry, struct qstr *qstr) -{ - return isofs_hash_common(qstr, 0); -} - -static int isofs_hashi(const struct dentry *dentry, struct qstr *qstr) { return isofs_hashi_common(qstr, 0); } static int -isofs_dentry_cmp(const struct dentry *parent, const struct dentry *dentry, +isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { - return isofs_dentry_cmp_common(len, str, name, 0, 0); + return isofs_dentry_cmp_common(len, str, name, 0, 1); } +#ifdef CONFIG_JOLIET +/* + * Compute the hash for the isofs name corresponding to the dentry. + */ static int -isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry, - unsigned int len, const char *str, const struct qstr *name) +isofs_hash_common(struct qstr *qstr, int ms) { - return isofs_dentry_cmp_common(len, str, name, 0, 1); + const char *name; + int len; + + len = qstr->len; + name = qstr->name; + if (ms) { + while (len && name[len-1] == '.') + len--; + } + + qstr->hash = full_name_hash(name, len); + + return 0; } -#ifdef CONFIG_JOLIET static int isofs_hash_ms(const struct dentry *dentry, struct qstr *qstr) { @@ -930,7 +909,8 @@ root_found: if (opt.check == 'r') table++; - s->s_d_op = &isofs_dentry_ops[table]; + if (table) + s->s_d_op = &isofs_dentry_ops[table - 1]; /* get the root dentry */ s->s_root = d_make_root(inode); diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c index 95295640d9c..7b543e6b652 100644 --- a/fs/isofs/namei.c +++ b/fs/isofs/namei.c @@ -18,25 +18,10 @@ static int isofs_cmp(struct dentry *dentry, const char *compare, int dlen) { struct qstr qstr; - - if (!compare) - return 1; - - /* check special "." and ".." files */ - if (dlen == 1) { - /* "." */ - if (compare[0] == 0) { - if (!dentry->d_name.len) - return 0; - compare = "."; - } else if (compare[0] == 1) { - compare = ".."; - dlen = 2; - } - } - qstr.name = compare; qstr.len = dlen; + if (likely(!dentry->d_op)) + return dentry->d_name.len != dlen || memcmp(dentry->d_name.name, compare, dlen); return dentry->d_op->d_compare(NULL, NULL, dentry->d_name.len, dentry->d_name.name, &qstr); } @@ -146,7 +131,8 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry, (!(de->flags[-sbi->s_high_sierra] & 1))) && (sbi->s_showassoc || (!(de->flags[-sbi->s_high_sierra] & 4)))) { - match = (isofs_cmp(dentry, dpnt, dlen) == 0); + if (dpnt && (dlen > 1 || dpnt[0] > 1)) + match = (isofs_cmp(dentry, dpnt, dlen) == 0); } if (match) { isofs_normalize_block_and_offset(de, diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 06fe11e0abf..aab8549591e 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -886,7 +886,7 @@ journal_t * journal_init_inode (struct inode *inode) goto out_err; } - bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); + bh = getblk_unmovable(journal->j_dev, blocknr, journal->j_blocksize); if (!bh) { printk(KERN_ERR "%s: Cannot get buffer for journal superblock\n", diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c index 8898bbd2b61..dcead636c33 100644 --- a/fs/jbd/revoke.c +++ b/fs/jbd/revoke.c @@ -93,6 +93,7 @@ #include <linux/bio.h> #endif #include <linux/log2.h> +#include <linux/hash.h> static struct kmem_cache *revoke_record_cache; static struct kmem_cache *revoke_table_cache; @@ -129,15 +130,11 @@ static void flush_descriptor(journal_t *, struct journal_head *, int, int); /* Utility functions to maintain the revoke table */ -/* Borrowed from buffer.c: this is a tried and tested block hash function */ static inline int hash(journal_t *journal, unsigned int block) { struct jbd_revoke_table_s *table = journal->j_revoke; - int hash_shift = table->hash_shift; - return ((block << (hash_shift - 6)) ^ - (block >> 13) ^ - (block << (hash_shift - 12))) & (table->hash_size - 1); + return hash_32(block, table->hash_shift); } static int insert_revoke_hash(journal_t *journal, unsigned int blocknr, diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 7f34f471616..988b32ed4c8 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -96,15 +96,8 @@ static int __try_to_free_cp_buf(struct journal_head *jh) if (jh->b_transaction == NULL && !buffer_locked(bh) && !buffer_dirty(bh) && !buffer_write_io_error(bh)) { - /* - * Get our reference so that bh cannot be freed before - * we unlock it - */ - get_bh(bh); JBUFFER_TRACE(jh, "remove from checkpoint list"); ret = __jbd2_journal_remove_checkpoint(jh) + 1; - BUFFER_TRACE(bh, "release"); - __brelse(bh); } return ret; } @@ -122,8 +115,6 @@ void __jbd2_log_wait_for_space(journal_t *journal) nblocks = jbd2_space_needed(journal); while (jbd2_log_space_left(journal) < nblocks) { - if (journal->j_flags & JBD2_ABORT) - return; write_unlock(&journal->j_state_lock); mutex_lock(&journal->j_checkpoint_mutex); @@ -139,6 +130,10 @@ void __jbd2_log_wait_for_space(journal_t *journal) * trace for forensic evidence. */ write_lock(&journal->j_state_lock); + if (journal->j_flags & JBD2_ABORT) { + mutex_unlock(&journal->j_checkpoint_mutex); + return; + } spin_lock(&journal->j_list_lock); nblocks = jbd2_space_needed(journal); space_left = jbd2_log_space_left(journal); @@ -183,58 +178,6 @@ void __jbd2_log_wait_for_space(journal_t *journal) } } -/* - * Clean up transaction's list of buffers submitted for io. - * We wait for any pending IO to complete and remove any clean - * buffers. Note that we take the buffers in the opposite ordering - * from the one in which they were submitted for IO. - * - * Return 0 on success, and return <0 if some buffers have failed - * to be written out. - * - * Called with j_list_lock held. - */ -static int __wait_cp_io(journal_t *journal, transaction_t *transaction) -{ - struct journal_head *jh; - struct buffer_head *bh; - tid_t this_tid; - int released = 0; - int ret = 0; - - this_tid = transaction->t_tid; -restart: - /* Did somebody clean up the transaction in the meanwhile? */ - if (journal->j_checkpoint_transactions != transaction || - transaction->t_tid != this_tid) - return ret; - while (!released && transaction->t_checkpoint_io_list) { - jh = transaction->t_checkpoint_io_list; - bh = jh2bh(jh); - get_bh(bh); - if (buffer_locked(bh)) { - spin_unlock(&journal->j_list_lock); - wait_on_buffer(bh); - /* the journal_head may have gone by now */ - BUFFER_TRACE(bh, "brelse"); - __brelse(bh); - spin_lock(&journal->j_list_lock); - goto restart; - } - if (unlikely(buffer_write_io_error(bh))) - ret = -EIO; - - /* - * Now in whatever state the buffer currently is, we know that - * it has been written out and so we can drop it from the list - */ - released = __jbd2_journal_remove_checkpoint(jh); - __brelse(bh); - } - - return ret; -} - static void __flush_batch(journal_t *journal, int *batch_count) { @@ -255,81 +198,6 @@ __flush_batch(journal_t *journal, int *batch_count) } /* - * Try to flush one buffer from the checkpoint list to disk. - * - * Return 1 if something happened which requires us to abort the current - * scan of the checkpoint list. Return <0 if the buffer has failed to - * be written out. - * - * Called with j_list_lock held and drops it if 1 is returned - */ -static int __process_buffer(journal_t *journal, struct journal_head *jh, - int *batch_count, transaction_t *transaction) -{ - struct buffer_head *bh = jh2bh(jh); - int ret = 0; - - if (buffer_locked(bh)) { - get_bh(bh); - spin_unlock(&journal->j_list_lock); - wait_on_buffer(bh); - /* the journal_head may have gone by now */ - BUFFER_TRACE(bh, "brelse"); - __brelse(bh); - ret = 1; - } else if (jh->b_transaction != NULL) { - transaction_t *t = jh->b_transaction; - tid_t tid = t->t_tid; - - transaction->t_chp_stats.cs_forced_to_close++; - spin_unlock(&journal->j_list_lock); - if (unlikely(journal->j_flags & JBD2_UNMOUNT)) - /* - * The journal thread is dead; so starting and - * waiting for a commit to finish will cause - * us to wait for a _very_ long time. - */ - printk(KERN_ERR "JBD2: %s: " - "Waiting for Godot: block %llu\n", - journal->j_devname, - (unsigned long long) bh->b_blocknr); - jbd2_log_start_commit(journal, tid); - jbd2_log_wait_commit(journal, tid); - ret = 1; - } else if (!buffer_dirty(bh)) { - ret = 1; - if (unlikely(buffer_write_io_error(bh))) - ret = -EIO; - get_bh(bh); - BUFFER_TRACE(bh, "remove from checkpoint"); - __jbd2_journal_remove_checkpoint(jh); - spin_unlock(&journal->j_list_lock); - __brelse(bh); - } else { - /* - * Important: we are about to write the buffer, and - * possibly block, while still holding the journal lock. - * We cannot afford to let the transaction logic start - * messing around with this buffer before we write it to - * disk, as that would break recoverability. - */ - BUFFER_TRACE(bh, "queue"); - get_bh(bh); - J_ASSERT_BH(bh, !buffer_jwrite(bh)); - journal->j_chkpt_bhs[*batch_count] = bh; - __buffer_relink_io(jh); - transaction->t_chp_stats.cs_written++; - (*batch_count)++; - if (*batch_count == JBD2_NR_BATCH) { - spin_unlock(&journal->j_list_lock); - __flush_batch(journal, batch_count); - ret = 1; - } - } - return ret; -} - -/* * Perform an actual checkpoint. We take the first transaction on the * list of transactions to be checkpointed and send all its buffers * to disk. We submit larger chunks of data at once. @@ -339,9 +207,11 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh, */ int jbd2_log_do_checkpoint(journal_t *journal) { - transaction_t *transaction; - tid_t this_tid; - int result; + struct journal_head *jh; + struct buffer_head *bh; + transaction_t *transaction; + tid_t this_tid; + int result, batch_count = 0; jbd_debug(1, "Start checkpoint\n"); @@ -374,45 +244,117 @@ restart: * done (maybe it's a new transaction, but it fell at the same * address). */ - if (journal->j_checkpoint_transactions == transaction && - transaction->t_tid == this_tid) { - int batch_count = 0; - struct journal_head *jh; - int retry = 0, err; - - while (!retry && transaction->t_checkpoint_list) { - jh = transaction->t_checkpoint_list; - retry = __process_buffer(journal, jh, &batch_count, - transaction); - if (retry < 0 && !result) - result = retry; - if (!retry && (need_resched() || - spin_needbreak(&journal->j_list_lock))) { - spin_unlock(&journal->j_list_lock); - retry = 1; - break; - } + if (journal->j_checkpoint_transactions != transaction || + transaction->t_tid != this_tid) + goto out; + + /* checkpoint all of the transaction's buffers */ + while (transaction->t_checkpoint_list) { + jh = transaction->t_checkpoint_list; + bh = jh2bh(jh); + + if (buffer_locked(bh)) { + spin_unlock(&journal->j_list_lock); + get_bh(bh); + wait_on_buffer(bh); + /* the journal_head may have gone by now */ + BUFFER_TRACE(bh, "brelse"); + __brelse(bh); + goto retry; } + if (jh->b_transaction != NULL) { + transaction_t *t = jh->b_transaction; + tid_t tid = t->t_tid; - if (batch_count) { - if (!retry) { - spin_unlock(&journal->j_list_lock); - retry = 1; - } - __flush_batch(journal, &batch_count); + transaction->t_chp_stats.cs_forced_to_close++; + spin_unlock(&journal->j_list_lock); + if (unlikely(journal->j_flags & JBD2_UNMOUNT)) + /* + * The journal thread is dead; so + * starting and waiting for a commit + * to finish will cause us to wait for + * a _very_ long time. + */ + printk(KERN_ERR + "JBD2: %s: Waiting for Godot: block %llu\n", + journal->j_devname, (unsigned long long) bh->b_blocknr); + + jbd2_log_start_commit(journal, tid); + jbd2_log_wait_commit(journal, tid); + goto retry; + } + if (!buffer_dirty(bh)) { + if (unlikely(buffer_write_io_error(bh)) && !result) + result = -EIO; + BUFFER_TRACE(bh, "remove from checkpoint"); + if (__jbd2_journal_remove_checkpoint(jh)) + /* The transaction was released; we're done */ + goto out; + continue; } + /* + * Important: we are about to write the buffer, and + * possibly block, while still holding the journal + * lock. We cannot afford to let the transaction + * logic start messing around with this buffer before + * we write it to disk, as that would break + * recoverability. + */ + BUFFER_TRACE(bh, "queue"); + get_bh(bh); + J_ASSERT_BH(bh, !buffer_jwrite(bh)); + journal->j_chkpt_bhs[batch_count++] = bh; + __buffer_relink_io(jh); + transaction->t_chp_stats.cs_written++; + if ((batch_count == JBD2_NR_BATCH) || + need_resched() || + spin_needbreak(&journal->j_list_lock)) + goto unlock_and_flush; + } - if (retry) { + if (batch_count) { + unlock_and_flush: + spin_unlock(&journal->j_list_lock); + retry: + if (batch_count) + __flush_batch(journal, &batch_count); spin_lock(&journal->j_list_lock); goto restart; + } + + /* + * Now we issued all of the transaction's buffers, let's deal + * with the buffers that are out for I/O. + */ +restart2: + /* Did somebody clean up the transaction in the meanwhile? */ + if (journal->j_checkpoint_transactions != transaction || + transaction->t_tid != this_tid) + goto out; + + while (transaction->t_checkpoint_io_list) { + jh = transaction->t_checkpoint_io_list; + bh = jh2bh(jh); + if (buffer_locked(bh)) { + spin_unlock(&journal->j_list_lock); + get_bh(bh); + wait_on_buffer(bh); + /* the journal_head may have gone by now */ + BUFFER_TRACE(bh, "brelse"); + __brelse(bh); + spin_lock(&journal->j_list_lock); + goto restart2; } + if (unlikely(buffer_write_io_error(bh)) && !result) + result = -EIO; + /* - * Now we have cleaned up the first transaction's checkpoint - * list. Let's clean up the second one + * Now in whatever state the buffer currently is, we + * know that it has been written out and so we can + * drop it from the list */ - err = __wait_cp_io(journal, transaction); - if (!result) - result = err; + if (__jbd2_journal_remove_checkpoint(jh)) + break; } out: spin_unlock(&journal->j_list_lock); @@ -478,18 +420,16 @@ int jbd2_cleanup_journal_tail(journal_t *journal) * Find all the written-back checkpoint buffers in the given list and * release them. * - * Called with the journal locked. * Called with j_list_lock held. - * Returns number of buffers reaped (for debug) + * Returns 1 if we freed the transaction, 0 otherwise. */ - -static int journal_clean_one_cp_list(struct journal_head *jh, int *released) +static int journal_clean_one_cp_list(struct journal_head *jh) { struct journal_head *last_jh; struct journal_head *next_jh = jh; - int ret, freed = 0; + int ret; + int freed = 0; - *released = 0; if (!jh) return 0; @@ -498,13 +438,11 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released) jh = next_jh; next_jh = jh->b_cpnext; ret = __try_to_free_cp_buf(jh); - if (ret) { - freed++; - if (ret == 2) { - *released = 1; - return freed; - } - } + if (!ret) + return freed; + if (ret == 2) + return 1; + freed = 1; /* * This function only frees up some memory * if possible so we dont have an obligation @@ -523,49 +461,49 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released) * * Find all the written-back checkpoint buffers in the journal and release them. * - * Called with the journal locked. * Called with j_list_lock held. - * Returns number of buffers reaped (for debug) */ - -int __jbd2_journal_clean_checkpoint_list(journal_t *journal) +void __jbd2_journal_clean_checkpoint_list(journal_t *journal) { transaction_t *transaction, *last_transaction, *next_transaction; - int ret = 0; - int released; + int ret; transaction = journal->j_checkpoint_transactions; if (!transaction) - goto out; + return; last_transaction = transaction->t_cpprev; next_transaction = transaction; do { transaction = next_transaction; next_transaction = transaction->t_cpnext; - ret += journal_clean_one_cp_list(transaction-> - t_checkpoint_list, &released); + ret = journal_clean_one_cp_list(transaction->t_checkpoint_list); /* * This function only frees up some memory if possible so we * dont have an obligation to finish processing. Bail out if * preemption requested: */ if (need_resched()) - goto out; - if (released) + return; + if (ret) continue; /* * It is essential that we are as careful as in the case of * t_checkpoint_list with removing the buffer from the list as * we can possibly see not yet submitted buffers on io_list */ - ret += journal_clean_one_cp_list(transaction-> - t_checkpoint_io_list, &released); + ret = journal_clean_one_cp_list(transaction-> + t_checkpoint_io_list); if (need_resched()) - goto out; + return; + /* + * Stop scanning if we couldn't free the transaction. This + * avoids pointless scanning of transactions which still + * weren't checkpointed. + */ + if (!ret) + return; } while (transaction != last_transaction); -out: - return ret; } /* diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 19d74d86d99..e4dc74713a4 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1237,7 +1237,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode) goto out_err; } - bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); + bh = getblk_unmovable(journal->j_dev, blocknr, journal->j_blocksize); if (!bh) { printk(KERN_ERR "%s: Cannot get buffer for journal superblock\n", @@ -1522,14 +1522,6 @@ static int journal_get_superblock(journal_t *journal) goto out; } - if (jbd2_journal_has_csum_v2or3(journal) && - JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM)) { - /* Can't have checksum v1 and v2 on at the same time! */ - printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 " - "at the same time!\n"); - goto out; - } - if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) && JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) { /* Can't have checksum v2 and v3 at the same time! */ @@ -1538,6 +1530,14 @@ static int journal_get_superblock(journal_t *journal) goto out; } + if (jbd2_journal_has_csum_v2or3(journal) && + JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM)) { + /* Can't have checksum v1 and v2 on at the same time! */ + printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2/3 " + "at the same time!\n"); + goto out; + } + if (!jbd2_verify_csum_type(journal, sb)) { printk(KERN_ERR "JBD2: Unknown checksum type\n"); goto out; diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 9b329b55ffe..bcbef08a4d8 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -525,6 +525,7 @@ static int do_one_pass(journal_t *journal, !jbd2_descr_block_csum_verify(journal, bh->b_data)) { err = -EIO; + brelse(bh); goto failed; } diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c index d5e95a175c9..c6cbaef2bda 100644 --- a/fs/jbd2/revoke.c +++ b/fs/jbd2/revoke.c @@ -92,6 +92,7 @@ #include <linux/init.h> #include <linux/bio.h> #include <linux/log2.h> +#include <linux/hash.h> #endif static struct kmem_cache *jbd2_revoke_record_cache; @@ -130,16 +131,9 @@ static void flush_descriptor(journal_t *, struct buffer_head *, int, int); /* Utility functions to maintain the revoke table */ -/* Borrowed from buffer.c: this is a tried and tested block hash function */ static inline int hash(journal_t *journal, unsigned long long block) { - struct jbd2_revoke_table_s *table = journal->j_revoke; - int hash_shift = table->hash_shift; - int hash = (int)block ^ (int)((block >> 31) >> 1); - - return ((hash << (hash_shift - 6)) ^ - (hash >> 13) ^ - (hash << (hash_shift - 12))) & (table->hash_size - 1); + return hash_64(block, journal->j_revoke->hash_shift); } static int insert_revoke_hash(journal_t *journal, unsigned long long blocknr, diff --git a/fs/namei.c b/fs/namei.c index 43927d14db6..db5fe86319e 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -416,6 +416,7 @@ int __inode_permission(struct inode *inode, int mask) return security_inode_permission(inode, mask); } +EXPORT_SYMBOL(__inode_permission); /** * sb_permission - Check superblock-level permissions @@ -2383,22 +2384,17 @@ kern_path_mountpoint(int dfd, const char *name, struct path *path, } EXPORT_SYMBOL(kern_path_mountpoint); -/* - * It's inline, so penalty for filesystems that don't use sticky bit is - * minimal. - */ -static inline int check_sticky(struct inode *dir, struct inode *inode) +int __check_sticky(struct inode *dir, struct inode *inode) { kuid_t fsuid = current_fsuid(); - if (!(dir->i_mode & S_ISVTX)) - return 0; if (uid_eq(inode->i_uid, fsuid)) return 0; if (uid_eq(dir->i_uid, fsuid)) return 0; return !capable_wrt_inode_uidgid(inode, CAP_FOWNER); } +EXPORT_SYMBOL(__check_sticky); /* * Check whether we can remove a link victim from directory dir, check @@ -2501,7 +2497,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) } mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); - mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); + mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT2); return NULL; } EXPORT_SYMBOL(lock_rename); @@ -3064,9 +3060,12 @@ finish_open_created: error = may_open(&nd->path, acc_mode, open_flag); if (error) goto out; - file->f_path.mnt = nd->path.mnt; - error = finish_open(file, nd->path.dentry, NULL, opened); - if (error) { + + BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ + error = vfs_open(&nd->path, file, current_cred()); + if (!error) { + *opened |= FILE_OPENED; + } else { if (error == -EOPENSTALE) goto stale_open; goto out; @@ -3155,7 +3154,8 @@ static int do_tmpfile(int dfd, struct filename *pathname, if (error) goto out2; audit_inode(pathname, nd->path.dentry, 0); - error = may_open(&nd->path, op->acc_mode, op->open_flag); + /* Don't check for other permissions, the inode was just created */ + error = may_open(&nd->path, MAY_OPEN, op->open_flag); if (error) goto out2; file->f_path.mnt = nd->path.mnt; @@ -4210,12 +4210,16 @@ SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname, bool should_retry = false; int error; - if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) + if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; - if ((flags & RENAME_NOREPLACE) && (flags & RENAME_EXCHANGE)) + if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) && + (flags & RENAME_EXCHANGE)) return -EINVAL; + if ((flags & RENAME_WHITEOUT) && !capable(CAP_MKNOD)) + return -EPERM; + retry: from = user_path_parent(olddfd, oldname, &oldnd, lookup_flags); if (IS_ERR(from)) { @@ -4347,6 +4351,20 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } +int vfs_whiteout(struct inode *dir, struct dentry *dentry) +{ + int error = may_create(dir, dentry); + if (error) + return error; + + if (!dir->i_op->mknod) + return -EPERM; + + return dir->i_op->mknod(dir, dentry, + S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); +} +EXPORT_SYMBOL(vfs_whiteout); + int readlink_copy(char __user *buffer, int buflen, const char *link) { int len = PTR_ERR(link); diff --git a/fs/namespace.c b/fs/namespace.c index fbba8b17330..5b66b2b3624 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1686,6 +1686,33 @@ void drop_collected_mounts(struct vfsmount *mnt) namespace_unlock(); } +/** + * clone_private_mount - create a private clone of a path + * + * This creates a new vfsmount, which will be the clone of @path. The new will + * not be attached anywhere in the namespace and will be private (i.e. changes + * to the originating mount won't be propagated into this). + * + * Release with mntput(). + */ +struct vfsmount *clone_private_mount(struct path *path) +{ + struct mount *old_mnt = real_mount(path->mnt); + struct mount *new_mnt; + + if (IS_MNT_UNBINDABLE(old_mnt)) + return ERR_PTR(-EINVAL); + + down_read(&namespace_sem); + new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); + up_read(&namespace_sem); + if (IS_ERR(new_mnt)) + return ERR_CAST(new_mnt); + + return &new_mnt->mnt; +} +EXPORT_SYMBOL_GPL(clone_private_mount); + int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, struct vfsmount *root) { diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 5228f201d3d..4f46f7a0528 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -378,7 +378,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync) loff_t offset = header->args.offset; size_t count = header->args.count; struct page **pages = header->args.pages; - int pg_index = pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; + int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; unsigned int pg_len; struct blk_plug plug; int i; diff --git a/fs/nfs/blocklayout/rpc_pipefs.c b/fs/nfs/blocklayout/rpc_pipefs.c index e966c023b1b..acbf9ca4018 100644 --- a/fs/nfs/blocklayout/rpc_pipefs.c +++ b/fs/nfs/blocklayout/rpc_pipefs.c @@ -65,17 +65,18 @@ bl_resolve_deviceid(struct nfs_server *server, struct pnfs_block_volume *b, dprintk("%s CREATING PIPEFS MESSAGE\n", __func__); + mutex_lock(&nn->bl_mutex); bl_pipe_msg.bl_wq = &nn->bl_wq; b->simple.len += 4; /* single volume */ if (b->simple.len > PAGE_SIZE) - return -EIO; + goto out_unlock; memset(msg, 0, sizeof(*msg)); msg->len = sizeof(*bl_msg) + b->simple.len; msg->data = kzalloc(msg->len, gfp_mask); if (!msg->data) - goto out; + goto out_free_data; bl_msg = msg->data; bl_msg->type = BL_DEVICE_MOUNT, @@ -87,7 +88,7 @@ bl_resolve_deviceid(struct nfs_server *server, struct pnfs_block_volume *b, rc = rpc_queue_upcall(nn->bl_device_pipe, msg); if (rc < 0) { remove_wait_queue(&nn->bl_wq, &wq); - goto out; + goto out_free_data; } set_current_state(TASK_UNINTERRUPTIBLE); @@ -97,12 +98,14 @@ bl_resolve_deviceid(struct nfs_server *server, struct pnfs_block_volume *b, if (reply->status != BL_DEVICE_REQUEST_PROC) { printk(KERN_WARNING "%s failed to decode device: %d\n", __func__, reply->status); - goto out; + goto out_free_data; } dev = MKDEV(reply->major, reply->minor); -out: +out_free_data: kfree(msg->data); +out_unlock: + mutex_unlock(&nn->bl_mutex); return dev; } @@ -232,6 +235,7 @@ static int nfs4blocklayout_net_init(struct net *net) struct nfs_net *nn = net_generic(net, nfs_net_id); struct dentry *dentry; + mutex_init(&nn->bl_mutex); init_waitqueue_head(&nn->bl_wq); nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0); if (IS_ERR(nn->bl_device_pipe)) diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 5853f53db73..7f3f6064134 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -125,6 +125,8 @@ again: continue; if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) continue; + if (!nfs4_valid_open_stateid(state)) + continue; if (!nfs4_stateid_match(&state->stateid, stateid)) continue; get_nfs_open_context(ctx); @@ -193,7 +195,11 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation * { int res = 0; - res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync); + if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) + res = nfs4_proc_delegreturn(inode, + delegation->cred, + &delegation->stateid, + issync); nfs_free_delegation(delegation); return res; } @@ -380,11 +386,13 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation { struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; struct nfs_inode *nfsi = NFS_I(inode); - int err; + int err = 0; if (delegation == NULL) return 0; do { + if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) + break; err = nfs_delegation_claim_opens(inode, &delegation->stateid); if (!issync || err != -EAGAIN) break; @@ -605,10 +613,23 @@ static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *cl rcu_read_unlock(); } +static void nfs_revoke_delegation(struct inode *inode) +{ + struct nfs_delegation *delegation; + rcu_read_lock(); + delegation = rcu_dereference(NFS_I(inode)->delegation); + if (delegation != NULL) { + set_bit(NFS_DELEGATION_REVOKED, &delegation->flags); + nfs_mark_return_delegation(NFS_SERVER(inode), delegation); + } + rcu_read_unlock(); +} + void nfs_remove_bad_delegation(struct inode *inode) { struct nfs_delegation *delegation; + nfs_revoke_delegation(inode); delegation = nfs_inode_detach_delegation(inode); if (delegation) { nfs_inode_find_state_and_recover(inode, &delegation->stateid); diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 5c1cce39297..e3c20a3ccc9 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -31,6 +31,7 @@ enum { NFS_DELEGATION_RETURN_IF_CLOSED, NFS_DELEGATION_REFERENCED, NFS_DELEGATION_RETURNING, + NFS_DELEGATION_REVOKED, }; int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 06e8cfcbb67..6e62155abf2 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1527,6 +1527,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, case -ENOENT: d_drop(dentry); d_add(dentry, NULL); + nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); break; case -EISDIR: case -ENOTDIR: diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 20cffc83046..10bf07280f4 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -266,6 +266,7 @@ static void nfs_direct_req_free(struct kref *kref) { struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); + nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo); if (dreq->l_ctx != NULL) nfs_put_lock_context(dreq->l_ctx); if (dreq->ctx != NULL) diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 46fab1cb455..7afb52f6a25 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -145,9 +145,6 @@ static int filelayout_async_handle_error(struct rpc_task *task, case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: - if (state == NULL) - break; - nfs_remove_bad_delegation(state->inode); case -NFS4ERR_OPENMODE: if (state == NULL) break; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 6388a59f2ad..00689a8a85e 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -626,7 +626,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME; - int err; + int err = 0; trace_nfs_getattr_enter(inode); /* Flush out writes to the server in order to update c/mtime. */ diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h index ef221fb8a18..f0e06e4acbe 100644 --- a/fs/nfs/netns.h +++ b/fs/nfs/netns.h @@ -19,6 +19,7 @@ struct nfs_net { struct rpc_pipe *bl_device_pipe; struct bl_dev_msg bl_mount_reply; wait_queue_head_t bl_wq; + struct mutex bl_mutex; struct list_head nfs_client_list; struct list_head nfs_volume_list; #if IS_ENABLED(CONFIG_NFS_V4) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 405bd95c1f5..69dc20a743f 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -370,11 +370,6 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: - if (inode != NULL && nfs4_have_delegation(inode, FMODE_READ)) { - nfs_remove_bad_delegation(inode); - exception->retry = 1; - break; - } if (state == NULL) break; ret = nfs4_schedule_stateid_recovery(server, state); @@ -1654,7 +1649,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs_inode_find_state_and_recover(state->inode, stateid); nfs4_schedule_stateid_recovery(server, state); - return 0; + return -EAGAIN; case -NFS4ERR_DELAY: case -NFS4ERR_GRACE: set_bit(NFS_DELEGATED_STATE, &state->flags); @@ -2109,46 +2104,60 @@ static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *sta return ret; } +static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state) +{ + nfs_remove_bad_delegation(state->inode); + write_seqlock(&state->seqlock); + nfs4_stateid_copy(&state->stateid, &state->open_stateid); + write_sequnlock(&state->seqlock); + clear_bit(NFS_DELEGATED_STATE, &state->flags); +} + +static void nfs40_clear_delegation_stateid(struct nfs4_state *state) +{ + if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) + nfs_finish_clear_delegation_stateid(state); +} + +static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) +{ + /* NFSv4.0 doesn't allow for delegation recovery on open expire */ + nfs40_clear_delegation_stateid(state); + return nfs4_open_expired(sp, state); +} + #if defined(CONFIG_NFS_V4_1) -static void nfs41_clear_delegation_stateid(struct nfs4_state *state) +static void nfs41_check_delegation_stateid(struct nfs4_state *state) { struct nfs_server *server = NFS_SERVER(state->inode); - nfs4_stateid *stateid = &state->stateid; + nfs4_stateid stateid; struct nfs_delegation *delegation; - struct rpc_cred *cred = NULL; - int status = -NFS4ERR_BAD_STATEID; - - /* If a state reset has been done, test_stateid is unneeded */ - if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) - return; + struct rpc_cred *cred; + int status; /* Get the delegation credential for use by test/free_stateid */ rcu_read_lock(); delegation = rcu_dereference(NFS_I(state->inode)->delegation); - if (delegation != NULL && - nfs4_stateid_match(&delegation->stateid, stateid)) { - cred = get_rpccred(delegation->cred); - rcu_read_unlock(); - status = nfs41_test_stateid(server, stateid, cred); - trace_nfs4_test_delegation_stateid(state, NULL, status); - } else + if (delegation == NULL) { rcu_read_unlock(); + return; + } + + nfs4_stateid_copy(&stateid, &delegation->stateid); + cred = get_rpccred(delegation->cred); + rcu_read_unlock(); + status = nfs41_test_stateid(server, &stateid, cred); + trace_nfs4_test_delegation_stateid(state, NULL, status); if (status != NFS_OK) { /* Free the stateid unless the server explicitly * informs us the stateid is unrecognized. */ if (status != -NFS4ERR_BAD_STATEID) - nfs41_free_stateid(server, stateid, cred); - nfs_remove_bad_delegation(state->inode); - - write_seqlock(&state->seqlock); - nfs4_stateid_copy(&state->stateid, &state->open_stateid); - write_sequnlock(&state->seqlock); - clear_bit(NFS_DELEGATED_STATE, &state->flags); + nfs41_free_stateid(server, &stateid, cred); + nfs_finish_clear_delegation_stateid(state); } - if (cred != NULL) - put_rpccred(cred); + put_rpccred(cred); } /** @@ -2192,7 +2201,7 @@ static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st { int status; - nfs41_clear_delegation_stateid(state); + nfs41_check_delegation_stateid(state); status = nfs41_check_open_stateid(state); if (status != NFS_OK) status = nfs4_open_expired(sp, state); @@ -2231,19 +2240,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); ret = _nfs4_proc_open(opendata); - if (ret != 0) { - if (ret == -ENOENT) { - dentry = opendata->dentry; - if (dentry->d_inode) - d_delete(dentry); - else if (d_unhashed(dentry)) - d_add(dentry, NULL); - - nfs_set_verifier(dentry, - nfs_save_change_attribute(opendata->dir->d_inode)); - } + if (ret != 0) goto out; - } state = nfs4_opendata_to_nfs4_state(opendata); ret = PTR_ERR(state); @@ -4841,9 +4839,6 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: - if (state == NULL) - break; - nfs_remove_bad_delegation(state->inode); case -NFS4ERR_OPENMODE: if (state == NULL) break; @@ -8341,7 +8336,7 @@ static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, - .recover_open = nfs4_open_expired, + .recover_open = nfs40_open_expired, .recover_lock = nfs4_lock_expired, .establish_clid = nfs4_init_clientid, }; @@ -8408,8 +8403,7 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { | NFS_CAP_CHANGE_ATTR | NFS_CAP_POSIX_LOCK | NFS_CAP_STATEID_NFSV41 - | NFS_CAP_ATOMIC_OPEN_V1 - | NFS_CAP_SEEK, + | NFS_CAP_ATOMIC_OPEN_V1, .init_client = nfs41_init_client, .shutdown_client = nfs41_shutdown_client, .match_stateid = nfs41_match_stateid, @@ -8431,7 +8425,8 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { | NFS_CAP_CHANGE_ATTR | NFS_CAP_POSIX_LOCK | NFS_CAP_STATEID_NFSV41 - | NFS_CAP_ATOMIC_OPEN_V1, + | NFS_CAP_ATOMIC_OPEN_V1 + | NFS_CAP_SEEK, .init_client = nfs41_init_client, .shutdown_client = nfs41_shutdown_client, .match_stateid = nfs41_match_stateid, diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index c6e4bda6300..9e5bc42180e 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -5,7 +5,7 @@ * All rights reserved. * * Benny Halevy <bhalevy@panasas.com> - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c index c89357c7a91..919efd4a1a2 100644 --- a/fs/nfs/objlayout/objlayout.c +++ b/fs/nfs/objlayout/objlayout.c @@ -5,7 +5,7 @@ * All rights reserved. * * Benny Halevy <bhalevy@panasas.com> - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 diff --git a/fs/nfs/objlayout/objlayout.h b/fs/nfs/objlayout/objlayout.h index 3a0828d5733..2641dbad345 100644 --- a/fs/nfs/objlayout/objlayout.h +++ b/fs/nfs/objlayout/objlayout.h @@ -6,7 +6,7 @@ * All rights reserved. * * Benny Halevy <bhalevy@panasas.com> - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 diff --git a/fs/nfs/objlayout/pnfs_osd_xdr_cli.c b/fs/nfs/objlayout/pnfs_osd_xdr_cli.c index b3918f7ac34..f093c7ec983 100644 --- a/fs/nfs/objlayout/pnfs_osd_xdr_cli.c +++ b/fs/nfs/objlayout/pnfs_osd_xdr_cli.c @@ -5,7 +5,7 @@ * All rights reserved. * * Benny Halevy <bhalevy@panasas.com> - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 12493846a2d..f83b02dc916 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -715,8 +715,6 @@ static void nfs_inode_remove_request(struct nfs_page *req) if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) nfs_release_request(req); - else - WARN_ON_ONCE(1); } static void diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index ed2b1151b17..7cbdf1b2e4a 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -774,8 +774,12 @@ static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task) { if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { rpc_sleep_on(&clp->cl_cb_waitq, task, NULL); - dprintk("%s slot is busy\n", __func__); - return false; + /* Race breaker */ + if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { + dprintk("%s slot is busy\n", __func__); + return false; + } + rpc_wake_up_queued_task(&clp->cl_cb_waitq, task); } return true; } diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index cdeb3cfd6f3..0beb023f25a 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1272,7 +1272,8 @@ static bool need_wrongsec_check(struct svc_rqst *rqstp) */ if (argp->opcnt == resp->opcnt) return false; - + if (next->opnum == OP_ILLEGAL) + return false; nextd = OPDESC(next); /* * Rest of 2.6.3.1.1: certain operations will return WRONGSEC @@ -1589,7 +1590,8 @@ static inline u32 nfsd4_rename_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op static inline u32 nfsd4_sequence_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) { - return NFS4_MAX_SESSIONID_LEN + 20; + return (op_encode_hdr_size + + XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) * sizeof(__be32); } static inline u32 nfsd4_setattr_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) @@ -1893,6 +1895,7 @@ static struct nfsd4_operation nfsd4_ops[] = { .op_func = (nfsd4op_func)nfsd4_sequence, .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP, .op_name = "OP_SEQUENCE", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_sequence_rsize, }, [OP_DESTROY_CLIENTID] = { .op_func = (nfsd4op_func)nfsd4_destroy_clientid, diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index 747f3b95bd1..33a46a8dfaf 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -335,12 +335,15 @@ void nfsd_lockd_shutdown(void); (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT) #ifdef CONFIG_NFSD_V4_SECURITY_LABEL -#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \ - (NFSD4_1_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SECURITY_LABEL) +#define NFSD4_2_SECURITY_ATTRS FATTR4_WORD2_SECURITY_LABEL #else -#define NFSD4_2_SUPPORTED_ATTRS_WORD2 0 +#define NFSD4_2_SECURITY_ATTRS 0 #endif +#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \ + (NFSD4_1_SUPPORTED_ATTRS_WORD2 | \ + NFSD4_2_SECURITY_ATTRS) + static inline u32 nfsd_suppattrs0(u32 minorversion) { return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0 diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 9d3e9c50066..89326acd456 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -229,8 +229,16 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, &fsnotify_mark_srcu); } + /* + * We need to merge inode & vfsmount mark lists so that inode mark + * ignore masks are properly reflected for mount mark notifications. + * That's why this traversal is so complicated... + */ while (inode_node || vfsmount_node) { - inode_group = vfsmount_group = NULL; + inode_group = NULL; + inode_mark = NULL; + vfsmount_group = NULL; + vfsmount_mark = NULL; if (inode_node) { inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu), @@ -244,21 +252,19 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, vfsmount_group = vfsmount_mark->group; } - if (inode_group > vfsmount_group) { - /* handle inode */ - ret = send_to_group(to_tell, inode_mark, NULL, mask, - data, data_is, cookie, file_name); - /* we didn't use the vfsmount_mark */ - vfsmount_group = NULL; - } else if (vfsmount_group > inode_group) { - ret = send_to_group(to_tell, NULL, vfsmount_mark, mask, - data, data_is, cookie, file_name); - inode_group = NULL; - } else { - ret = send_to_group(to_tell, inode_mark, vfsmount_mark, - mask, data, data_is, cookie, - file_name); + if (inode_group && vfsmount_group) { + int cmp = fsnotify_compare_groups(inode_group, + vfsmount_group); + if (cmp > 0) { + inode_group = NULL; + inode_mark = NULL; + } else if (cmp < 0) { + vfsmount_group = NULL; + vfsmount_mark = NULL; + } } + ret = send_to_group(to_tell, inode_mark, vfsmount_mark, mask, + data, data_is, cookie, file_name); if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS)) goto out; diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h index 9c0898c4cfe..3b68b0ae0a9 100644 --- a/fs/notify/fsnotify.h +++ b/fs/notify/fsnotify.h @@ -12,6 +12,10 @@ extern void fsnotify_flush_notify(struct fsnotify_group *group); /* protects reads of inode and vfsmount marks list */ extern struct srcu_struct fsnotify_mark_srcu; +/* compare two groups for sorting of marks lists */ +extern int fsnotify_compare_groups(struct fsnotify_group *a, + struct fsnotify_group *b); + extern void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *fsn_mark, __u32 mask); /* add a mark to an inode */ diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c index 9ce062218de..dfbf5447eea 100644 --- a/fs/notify/inode_mark.c +++ b/fs/notify/inode_mark.c @@ -194,6 +194,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark, { struct fsnotify_mark *lmark, *last = NULL; int ret = 0; + int cmp; mark->flags |= FSNOTIFY_MARK_FLAG_INODE; @@ -219,11 +220,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark, goto out; } - if (mark->group->priority < lmark->group->priority) - continue; - - if ((mark->group->priority == lmark->group->priority) && - (mark->group < lmark->group)) + cmp = fsnotify_compare_groups(lmark->group, mark->group); + if (cmp < 0) continue; hlist_add_before_rcu(&mark->i.i_list, &lmark->i.i_list); @@ -288,20 +286,25 @@ void fsnotify_unmount_inodes(struct list_head *list) spin_unlock(&inode->i_lock); /* In case the dropping of a reference would nuke next_i. */ - if ((&next_i->i_sb_list != list) && - atomic_read(&next_i->i_count)) { + while (&next_i->i_sb_list != list) { spin_lock(&next_i->i_lock); - if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) { + if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) && + atomic_read(&next_i->i_count)) { __iget(next_i); need_iput = next_i; + spin_unlock(&next_i->i_lock); + break; } spin_unlock(&next_i->i_lock); + next_i = list_entry(next_i->i_sb_list.next, + struct inode, i_sb_list); } /* - * We can safely drop inode_sb_list_lock here because we hold - * references on both inode and next_i. Also no new inodes - * will be added since the umount has begun. + * We can safely drop inode_sb_list_lock here because either + * we actually hold references on both inode and next_i or + * end of list. Also no new inodes will be added since the + * umount has begun. */ spin_unlock(&inode_sb_list_lock); diff --git a/fs/notify/mark.c b/fs/notify/mark.c index d90deaa08e7..34c38fabf51 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -210,6 +210,42 @@ void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mas } /* + * Sorting function for lists of fsnotify marks. + * + * Fanotify supports different notification classes (reflected as priority of + * notification group). Events shall be passed to notification groups in + * decreasing priority order. To achieve this marks in notification lists for + * inodes and vfsmounts are sorted so that priorities of corresponding groups + * are descending. + * + * Furthermore correct handling of the ignore mask requires processing inode + * and vfsmount marks of each group together. Using the group address as + * further sort criterion provides a unique sorting order and thus we can + * merge inode and vfsmount lists of marks in linear time and find groups + * present in both lists. + * + * A return value of 1 signifies that b has priority over a. + * A return value of 0 signifies that the two marks have to be handled together. + * A return value of -1 signifies that a has priority over b. + */ +int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b) +{ + if (a == b) + return 0; + if (!a) + return 1; + if (!b) + return -1; + if (a->priority < b->priority) + return 1; + if (a->priority > b->priority) + return -1; + if (a < b) + return 1; + return -1; +} + +/* * Attach an initialized mark to a given group and fs object. * These marks may be used for the fsnotify backend to determine which * event types should be delivered to which group. diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c index ac851e8376b..faefa72a11e 100644 --- a/fs/notify/vfsmount_mark.c +++ b/fs/notify/vfsmount_mark.c @@ -153,6 +153,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark, struct mount *m = real_mount(mnt); struct fsnotify_mark *lmark, *last = NULL; int ret = 0; + int cmp; mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT; @@ -178,11 +179,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark, goto out; } - if (mark->group->priority < lmark->group->priority) - continue; - - if ((mark->group->priority == lmark->group->priority) && - (mark->group < lmark->group)) + cmp = fsnotify_compare_groups(lmark->group, mark->group); + if (cmp < 0) continue; hlist_add_before_rcu(&mark->m.m_list, &lmark->m.m_list); diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 97de0fbd9f7..a9604400406 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -925,7 +925,7 @@ static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec, size_t veclen, size_t total) { int ret; - struct msghdr msg; + struct msghdr msg = {.msg_flags = 0,}; if (sock == NULL) { ret = -EINVAL; diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 8add6f1030d..b931e04e338 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -158,7 +158,7 @@ bail_add: * NOTE: This dentry already has ->d_op set from * ocfs2_get_parent() and ocfs2_get_dentry() */ - if (ret) + if (!IS_ERR_OR_NULL(ret)) dentry = ret; status = ocfs2_dentry_attach_lock(dentry, inode, diff --git a/fs/open.c b/fs/open.c index d6fd3acde13..de92c13b58b 100644 --- a/fs/open.c +++ b/fs/open.c @@ -823,8 +823,7 @@ struct file *dentry_open(const struct path *path, int flags, f = get_empty_filp(); if (!IS_ERR(f)) { f->f_flags = flags; - f->f_path = *path; - error = do_dentry_open(f, NULL, cred); + error = vfs_open(path, f, cred); if (!error) { /* from now on we need fput() to dispose of f */ error = open_check_o_direct(f); @@ -841,6 +840,26 @@ struct file *dentry_open(const struct path *path, int flags, } EXPORT_SYMBOL(dentry_open); +/** + * vfs_open - open the file at the given path + * @path: path to open + * @filp: newly allocated file with f_flag initialized + * @cred: credentials to use + */ +int vfs_open(const struct path *path, struct file *filp, + const struct cred *cred) +{ + struct inode *inode = path->dentry->d_inode; + + if (inode->i_op->dentry_open) + return inode->i_op->dentry_open(path->dentry, filp, cred); + else { + filp->f_path = *path; + return do_dentry_open(filp, NULL, cred); + } +} +EXPORT_SYMBOL(vfs_open); + static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op) { int lookup_flags = 0; diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig new file mode 100644 index 00000000000..34355818a2e --- /dev/null +++ b/fs/overlayfs/Kconfig @@ -0,0 +1,10 @@ +config OVERLAY_FS + tristate "Overlay filesystem support" + help + An overlay filesystem combines two filesystems - an 'upper' filesystem + and a 'lower' filesystem. When a name exists in both filesystems, the + object in the 'upper' filesystem is visible while the object in the + 'lower' filesystem is either hidden or, in the case of directories, + merged with the 'upper' object. + + For more information see Documentation/filesystems/overlayfs.txt diff --git a/fs/overlayfs/Makefile b/fs/overlayfs/Makefile new file mode 100644 index 00000000000..900daed3e91 --- /dev/null +++ b/fs/overlayfs/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the overlay filesystem. +# + +obj-$(CONFIG_OVERLAY_FS) += overlay.o + +overlay-objs := super.o inode.o dir.o readdir.o copy_up.o diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c new file mode 100644 index 00000000000..ea10a871910 --- /dev/null +++ b/fs/overlayfs/copy_up.c @@ -0,0 +1,414 @@ +/* + * + * Copyright (C) 2011 Novell Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/file.h> +#include <linux/splice.h> +#include <linux/xattr.h> +#include <linux/security.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/namei.h> +#include "overlayfs.h" + +#define OVL_COPY_UP_CHUNK_SIZE (1 << 20) + +int ovl_copy_xattr(struct dentry *old, struct dentry *new) +{ + ssize_t list_size, size; + char *buf, *name, *value; + int error; + + if (!old->d_inode->i_op->getxattr || + !new->d_inode->i_op->getxattr) + return 0; + + list_size = vfs_listxattr(old, NULL, 0); + if (list_size <= 0) { + if (list_size == -EOPNOTSUPP) + return 0; + return list_size; + } + + buf = kzalloc(list_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + error = -ENOMEM; + value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL); + if (!value) + goto out; + + list_size = vfs_listxattr(old, buf, list_size); + if (list_size <= 0) { + error = list_size; + goto out_free_value; + } + + for (name = buf; name < (buf + list_size); name += strlen(name) + 1) { + size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX); + if (size <= 0) { + error = size; + goto out_free_value; + } + error = vfs_setxattr(new, name, value, size, 0); + if (error) + goto out_free_value; + } + +out_free_value: + kfree(value); +out: + kfree(buf); + return error; +} + +static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len) +{ + struct file *old_file; + struct file *new_file; + loff_t old_pos = 0; + loff_t new_pos = 0; + int error = 0; + + if (len == 0) + return 0; + + old_file = ovl_path_open(old, O_RDONLY); + if (IS_ERR(old_file)) + return PTR_ERR(old_file); + + new_file = ovl_path_open(new, O_WRONLY); + if (IS_ERR(new_file)) { + error = PTR_ERR(new_file); + goto out_fput; + } + + /* FIXME: copy up sparse files efficiently */ + while (len) { + size_t this_len = OVL_COPY_UP_CHUNK_SIZE; + long bytes; + + if (len < this_len) + this_len = len; + + if (signal_pending_state(TASK_KILLABLE, current)) { + error = -EINTR; + break; + } + + bytes = do_splice_direct(old_file, &old_pos, + new_file, &new_pos, + this_len, SPLICE_F_MOVE); + if (bytes <= 0) { + error = bytes; + break; + } + WARN_ON(old_pos != new_pos); + + len -= bytes; + } + + fput(new_file); +out_fput: + fput(old_file); + return error; +} + +static char *ovl_read_symlink(struct dentry *realdentry) +{ + int res; + char *buf; + struct inode *inode = realdentry->d_inode; + mm_segment_t old_fs; + + res = -EINVAL; + if (!inode->i_op->readlink) + goto err; + + res = -ENOMEM; + buf = (char *) __get_free_page(GFP_KERNEL); + if (!buf) + goto err; + + old_fs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ + res = inode->i_op->readlink(realdentry, + (char __user *)buf, PAGE_SIZE - 1); + set_fs(old_fs); + if (res < 0) { + free_page((unsigned long) buf); + goto err; + } + buf[res] = '\0'; + + return buf; + +err: + return ERR_PTR(res); +} + +static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat) +{ + struct iattr attr = { + .ia_valid = + ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET, + .ia_atime = stat->atime, + .ia_mtime = stat->mtime, + }; + + return notify_change(upperdentry, &attr, NULL); +} + +int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat) +{ + int err = 0; + + if (!S_ISLNK(stat->mode)) { + struct iattr attr = { + .ia_valid = ATTR_MODE, + .ia_mode = stat->mode, + }; + err = notify_change(upperdentry, &attr, NULL); + } + if (!err) { + struct iattr attr = { + .ia_valid = ATTR_UID | ATTR_GID, + .ia_uid = stat->uid, + .ia_gid = stat->gid, + }; + err = notify_change(upperdentry, &attr, NULL); + } + if (!err) + ovl_set_timestamps(upperdentry, stat); + + return err; + +} + +static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, + struct dentry *dentry, struct path *lowerpath, + struct kstat *stat, struct iattr *attr, + const char *link) +{ + struct inode *wdir = workdir->d_inode; + struct inode *udir = upperdir->d_inode; + struct dentry *newdentry = NULL; + struct dentry *upper = NULL; + umode_t mode = stat->mode; + int err; + + newdentry = ovl_lookup_temp(workdir, dentry); + err = PTR_ERR(newdentry); + if (IS_ERR(newdentry)) + goto out; + + upper = lookup_one_len(dentry->d_name.name, upperdir, + dentry->d_name.len); + err = PTR_ERR(upper); + if (IS_ERR(upper)) + goto out1; + + /* Can't properly set mode on creation because of the umask */ + stat->mode &= S_IFMT; + err = ovl_create_real(wdir, newdentry, stat, link, NULL, true); + stat->mode = mode; + if (err) + goto out2; + + if (S_ISREG(stat->mode)) { + struct path upperpath; + ovl_path_upper(dentry, &upperpath); + BUG_ON(upperpath.dentry != NULL); + upperpath.dentry = newdentry; + + err = ovl_copy_up_data(lowerpath, &upperpath, stat->size); + if (err) + goto out_cleanup; + } + + err = ovl_copy_xattr(lowerpath->dentry, newdentry); + if (err) + goto out_cleanup; + + mutex_lock(&newdentry->d_inode->i_mutex); + err = ovl_set_attr(newdentry, stat); + if (!err && attr) + err = notify_change(newdentry, attr, NULL); + mutex_unlock(&newdentry->d_inode->i_mutex); + if (err) + goto out_cleanup; + + err = ovl_do_rename(wdir, newdentry, udir, upper, 0); + if (err) + goto out_cleanup; + + ovl_dentry_update(dentry, newdentry); + newdentry = NULL; + + /* + * Non-directores become opaque when copied up. + */ + if (!S_ISDIR(stat->mode)) + ovl_dentry_set_opaque(dentry, true); +out2: + dput(upper); +out1: + dput(newdentry); +out: + return err; + +out_cleanup: + ovl_cleanup(wdir, newdentry); + goto out; +} + +/* + * Copy up a single dentry + * + * Directory renames only allowed on "pure upper" (already created on + * upper filesystem, never copied up). Directories which are on lower or + * are merged may not be renamed. For these -EXDEV is returned and + * userspace has to deal with it. This means, when copying up a + * directory we can rely on it and ancestors being stable. + * + * Non-directory renames start with copy up of source if necessary. The + * actual rename will only proceed once the copy up was successful. Copy + * up uses upper parent i_mutex for exclusion. Since rename can change + * d_parent it is possible that the copy up will lock the old parent. At + * that point the file will have already been copied up anyway. + */ +int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, + struct path *lowerpath, struct kstat *stat, + struct iattr *attr) +{ + struct dentry *workdir = ovl_workdir(dentry); + int err; + struct kstat pstat; + struct path parentpath; + struct dentry *upperdir; + struct dentry *upperdentry; + const struct cred *old_cred; + struct cred *override_cred; + char *link = NULL; + + ovl_path_upper(parent, &parentpath); + upperdir = parentpath.dentry; + + err = vfs_getattr(&parentpath, &pstat); + if (err) + return err; + + if (S_ISLNK(stat->mode)) { + link = ovl_read_symlink(lowerpath->dentry); + if (IS_ERR(link)) + return PTR_ERR(link); + } + + err = -ENOMEM; + override_cred = prepare_creds(); + if (!override_cred) + goto out_free_link; + + override_cred->fsuid = stat->uid; + override_cred->fsgid = stat->gid; + /* + * CAP_SYS_ADMIN for copying up extended attributes + * CAP_DAC_OVERRIDE for create + * CAP_FOWNER for chmod, timestamp update + * CAP_FSETID for chmod + * CAP_CHOWN for chown + * CAP_MKNOD for mknod + */ + cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); + cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); + cap_raise(override_cred->cap_effective, CAP_FOWNER); + cap_raise(override_cred->cap_effective, CAP_FSETID); + cap_raise(override_cred->cap_effective, CAP_CHOWN); + cap_raise(override_cred->cap_effective, CAP_MKNOD); + old_cred = override_creds(override_cred); + + err = -EIO; + if (lock_rename(workdir, upperdir) != NULL) { + pr_err("overlayfs: failed to lock workdir+upperdir\n"); + goto out_unlock; + } + upperdentry = ovl_dentry_upper(dentry); + if (upperdentry) { + unlock_rename(workdir, upperdir); + err = 0; + /* Raced with another copy-up? Do the setattr here */ + if (attr) { + mutex_lock(&upperdentry->d_inode->i_mutex); + err = notify_change(upperdentry, attr, NULL); + mutex_unlock(&upperdentry->d_inode->i_mutex); + } + goto out_put_cred; + } + + err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath, + stat, attr, link); + if (!err) { + /* Restore timestamps on parent (best effort) */ + ovl_set_timestamps(upperdir, &pstat); + } +out_unlock: + unlock_rename(workdir, upperdir); +out_put_cred: + revert_creds(old_cred); + put_cred(override_cred); + +out_free_link: + if (link) + free_page((unsigned long) link); + + return err; +} + +int ovl_copy_up(struct dentry *dentry) +{ + int err; + + err = 0; + while (!err) { + struct dentry *next; + struct dentry *parent; + struct path lowerpath; + struct kstat stat; + enum ovl_path_type type = ovl_path_type(dentry); + + if (type != OVL_PATH_LOWER) + break; + + next = dget(dentry); + /* find the topmost dentry not yet copied up */ + for (;;) { + parent = dget_parent(next); + + type = ovl_path_type(parent); + if (type != OVL_PATH_LOWER) + break; + + dput(next); + next = parent; + } + + ovl_path_lower(next, &lowerpath); + err = vfs_getattr(&lowerpath, &stat); + if (!err) + err = ovl_copy_up_one(parent, next, &lowerpath, &stat, NULL); + + dput(parent); + dput(next); + } + + return err; +} diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c new file mode 100644 index 00000000000..8ffc4b980f1 --- /dev/null +++ b/fs/overlayfs/dir.c @@ -0,0 +1,928 @@ +/* + * + * Copyright (C) 2011 Novell Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/fs.h> +#include <linux/namei.h> +#include <linux/xattr.h> +#include <linux/security.h> +#include <linux/cred.h> +#include "overlayfs.h" + +void ovl_cleanup(struct inode *wdir, struct dentry *wdentry) +{ + int err; + + dget(wdentry); + if (S_ISDIR(wdentry->d_inode->i_mode)) + err = ovl_do_rmdir(wdir, wdentry); + else + err = ovl_do_unlink(wdir, wdentry); + dput(wdentry); + + if (err) { + pr_err("overlayfs: cleanup of '%pd2' failed (%i)\n", + wdentry, err); + } +} + +struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry) +{ + struct dentry *temp; + char name[20]; + + snprintf(name, sizeof(name), "#%lx", (unsigned long) dentry); + + temp = lookup_one_len(name, workdir, strlen(name)); + if (!IS_ERR(temp) && temp->d_inode) { + pr_err("overlayfs: workdir/%s already exists\n", name); + dput(temp); + temp = ERR_PTR(-EIO); + } + + return temp; +} + +/* caller holds i_mutex on workdir */ +static struct dentry *ovl_whiteout(struct dentry *workdir, + struct dentry *dentry) +{ + int err; + struct dentry *whiteout; + struct inode *wdir = workdir->d_inode; + + whiteout = ovl_lookup_temp(workdir, dentry); + if (IS_ERR(whiteout)) + return whiteout; + + err = ovl_do_whiteout(wdir, whiteout); + if (err) { + dput(whiteout); + whiteout = ERR_PTR(err); + } + + return whiteout; +} + +int ovl_create_real(struct inode *dir, struct dentry *newdentry, + struct kstat *stat, const char *link, + struct dentry *hardlink, bool debug) +{ + int err; + + if (newdentry->d_inode) + return -ESTALE; + + if (hardlink) { + err = ovl_do_link(hardlink, dir, newdentry, debug); + } else { + switch (stat->mode & S_IFMT) { + case S_IFREG: + err = ovl_do_create(dir, newdentry, stat->mode, debug); + break; + + case S_IFDIR: + err = ovl_do_mkdir(dir, newdentry, stat->mode, debug); + break; + + case S_IFCHR: + case S_IFBLK: + case S_IFIFO: + case S_IFSOCK: + err = ovl_do_mknod(dir, newdentry, + stat->mode, stat->rdev, debug); + break; + + case S_IFLNK: + err = ovl_do_symlink(dir, newdentry, link, debug); + break; + + default: + err = -EPERM; + } + } + if (!err && WARN_ON(!newdentry->d_inode)) { + /* + * Not quite sure if non-instantiated dentry is legal or not. + * VFS doesn't seem to care so check and warn here. + */ + err = -ENOENT; + } + return err; +} + +static int ovl_set_opaque(struct dentry *upperdentry) +{ + return ovl_do_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0); +} + +static void ovl_remove_opaque(struct dentry *upperdentry) +{ + int err; + + err = ovl_do_removexattr(upperdentry, ovl_opaque_xattr); + if (err) { + pr_warn("overlayfs: failed to remove opaque from '%s' (%i)\n", + upperdentry->d_name.name, err); + } +} + +static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry, + struct kstat *stat) +{ + int err; + enum ovl_path_type type; + struct path realpath; + + type = ovl_path_real(dentry, &realpath); + err = vfs_getattr(&realpath, stat); + if (err) + return err; + + stat->dev = dentry->d_sb->s_dev; + stat->ino = dentry->d_inode->i_ino; + + /* + * It's probably not worth it to count subdirs to get the + * correct link count. nlink=1 seems to pacify 'find' and + * other utilities. + */ + if (type == OVL_PATH_MERGE) + stat->nlink = 1; + + return 0; +} + +static int ovl_create_upper(struct dentry *dentry, struct inode *inode, + struct kstat *stat, const char *link, + struct dentry *hardlink) +{ + struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); + struct inode *udir = upperdir->d_inode; + struct dentry *newdentry; + int err; + + mutex_lock_nested(&udir->i_mutex, I_MUTEX_PARENT); + newdentry = lookup_one_len(dentry->d_name.name, upperdir, + dentry->d_name.len); + err = PTR_ERR(newdentry); + if (IS_ERR(newdentry)) + goto out_unlock; + err = ovl_create_real(udir, newdentry, stat, link, hardlink, false); + if (err) + goto out_dput; + + ovl_dentry_version_inc(dentry->d_parent); + ovl_dentry_update(dentry, newdentry); + ovl_copyattr(newdentry->d_inode, inode); + d_instantiate(dentry, inode); + newdentry = NULL; +out_dput: + dput(newdentry); +out_unlock: + mutex_unlock(&udir->i_mutex); + return err; +} + +static int ovl_lock_rename_workdir(struct dentry *workdir, + struct dentry *upperdir) +{ + /* Workdir should not be the same as upperdir */ + if (workdir == upperdir) + goto err; + + /* Workdir should not be subdir of upperdir and vice versa */ + if (lock_rename(workdir, upperdir) != NULL) + goto err_unlock; + + return 0; + +err_unlock: + unlock_rename(workdir, upperdir); +err: + pr_err("overlayfs: failed to lock workdir+upperdir\n"); + return -EIO; +} + +static struct dentry *ovl_clear_empty(struct dentry *dentry, + struct list_head *list) +{ + struct dentry *workdir = ovl_workdir(dentry); + struct inode *wdir = workdir->d_inode; + struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); + struct inode *udir = upperdir->d_inode; + struct path upperpath; + struct dentry *upper; + struct dentry *opaquedir; + struct kstat stat; + int err; + + err = ovl_lock_rename_workdir(workdir, upperdir); + if (err) + goto out; + + ovl_path_upper(dentry, &upperpath); + err = vfs_getattr(&upperpath, &stat); + if (err) + goto out_unlock; + + err = -ESTALE; + if (!S_ISDIR(stat.mode)) + goto out_unlock; + upper = upperpath.dentry; + if (upper->d_parent->d_inode != udir) + goto out_unlock; + + opaquedir = ovl_lookup_temp(workdir, dentry); + err = PTR_ERR(opaquedir); + if (IS_ERR(opaquedir)) + goto out_unlock; + + err = ovl_create_real(wdir, opaquedir, &stat, NULL, NULL, true); + if (err) + goto out_dput; + + err = ovl_copy_xattr(upper, opaquedir); + if (err) + goto out_cleanup; + + err = ovl_set_opaque(opaquedir); + if (err) + goto out_cleanup; + + mutex_lock(&opaquedir->d_inode->i_mutex); + err = ovl_set_attr(opaquedir, &stat); + mutex_unlock(&opaquedir->d_inode->i_mutex); + if (err) + goto out_cleanup; + + err = ovl_do_rename(wdir, opaquedir, udir, upper, RENAME_EXCHANGE); + if (err) + goto out_cleanup; + + ovl_cleanup_whiteouts(upper, list); + ovl_cleanup(wdir, upper); + unlock_rename(workdir, upperdir); + + /* dentry's upper doesn't match now, get rid of it */ + d_drop(dentry); + + return opaquedir; + +out_cleanup: + ovl_cleanup(wdir, opaquedir); +out_dput: + dput(opaquedir); +out_unlock: + unlock_rename(workdir, upperdir); +out: + return ERR_PTR(err); +} + +static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry) +{ + int err; + struct dentry *ret = NULL; + LIST_HEAD(list); + + err = ovl_check_empty_dir(dentry, &list); + if (err) + ret = ERR_PTR(err); + else { + /* + * If no upperdentry then skip clearing whiteouts. + * + * Can race with copy-up, since we don't hold the upperdir + * mutex. Doesn't matter, since copy-up can't create a + * non-empty directory from an empty one. + */ + if (ovl_dentry_upper(dentry)) + ret = ovl_clear_empty(dentry, &list); + } + + ovl_cache_free(&list); + + return ret; +} + +static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, + struct kstat *stat, const char *link, + struct dentry *hardlink) +{ + struct dentry *workdir = ovl_workdir(dentry); + struct inode *wdir = workdir->d_inode; + struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); + struct inode *udir = upperdir->d_inode; + struct dentry *upper; + struct dentry *newdentry; + int err; + + err = ovl_lock_rename_workdir(workdir, upperdir); + if (err) + goto out; + + newdentry = ovl_lookup_temp(workdir, dentry); + err = PTR_ERR(newdentry); + if (IS_ERR(newdentry)) + goto out_unlock; + + upper = lookup_one_len(dentry->d_name.name, upperdir, + dentry->d_name.len); + err = PTR_ERR(upper); + if (IS_ERR(upper)) + goto out_dput; + + err = ovl_create_real(wdir, newdentry, stat, link, hardlink, true); + if (err) + goto out_dput2; + + if (S_ISDIR(stat->mode)) { + err = ovl_set_opaque(newdentry); + if (err) + goto out_cleanup; + + err = ovl_do_rename(wdir, newdentry, udir, upper, + RENAME_EXCHANGE); + if (err) + goto out_cleanup; + + ovl_cleanup(wdir, upper); + } else { + err = ovl_do_rename(wdir, newdentry, udir, upper, 0); + if (err) + goto out_cleanup; + } + ovl_dentry_version_inc(dentry->d_parent); + ovl_dentry_update(dentry, newdentry); + ovl_copyattr(newdentry->d_inode, inode); + d_instantiate(dentry, inode); + newdentry = NULL; +out_dput2: + dput(upper); +out_dput: + dput(newdentry); +out_unlock: + unlock_rename(workdir, upperdir); +out: + return err; + +out_cleanup: + ovl_cleanup(wdir, newdentry); + goto out_dput2; +} + +static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev, + const char *link, struct dentry *hardlink) +{ + int err; + struct inode *inode; + struct kstat stat = { + .mode = mode, + .rdev = rdev, + }; + + err = -ENOMEM; + inode = ovl_new_inode(dentry->d_sb, mode, dentry->d_fsdata); + if (!inode) + goto out; + + err = ovl_copy_up(dentry->d_parent); + if (err) + goto out_iput; + + if (!ovl_dentry_is_opaque(dentry)) { + err = ovl_create_upper(dentry, inode, &stat, link, hardlink); + } else { + const struct cred *old_cred; + struct cred *override_cred; + + err = -ENOMEM; + override_cred = prepare_creds(); + if (!override_cred) + goto out_iput; + + /* + * CAP_SYS_ADMIN for setting opaque xattr + * CAP_DAC_OVERRIDE for create in workdir, rename + * CAP_FOWNER for removing whiteout from sticky dir + */ + cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); + cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); + cap_raise(override_cred->cap_effective, CAP_FOWNER); + old_cred = override_creds(override_cred); + + err = ovl_create_over_whiteout(dentry, inode, &stat, link, + hardlink); + + revert_creds(old_cred); + put_cred(override_cred); + } + + if (!err) + inode = NULL; +out_iput: + iput(inode); +out: + return err; +} + +static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev, + const char *link) +{ + int err; + + err = ovl_want_write(dentry); + if (!err) { + err = ovl_create_or_link(dentry, mode, rdev, link, NULL); + ovl_drop_write(dentry); + } + + return err; +} + +static int ovl_create(struct inode *dir, struct dentry *dentry, umode_t mode, + bool excl) +{ + return ovl_create_object(dentry, (mode & 07777) | S_IFREG, 0, NULL); +} + +static int ovl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) +{ + return ovl_create_object(dentry, (mode & 07777) | S_IFDIR, 0, NULL); +} + +static int ovl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, + dev_t rdev) +{ + /* Don't allow creation of "whiteout" on overlay */ + if (S_ISCHR(mode) && rdev == WHITEOUT_DEV) + return -EPERM; + + return ovl_create_object(dentry, mode, rdev, NULL); +} + +static int ovl_symlink(struct inode *dir, struct dentry *dentry, + const char *link) +{ + return ovl_create_object(dentry, S_IFLNK, 0, link); +} + +static int ovl_link(struct dentry *old, struct inode *newdir, + struct dentry *new) +{ + int err; + struct dentry *upper; + + err = ovl_want_write(old); + if (err) + goto out; + + err = ovl_copy_up(old); + if (err) + goto out_drop_write; + + upper = ovl_dentry_upper(old); + err = ovl_create_or_link(new, upper->d_inode->i_mode, 0, NULL, upper); + +out_drop_write: + ovl_drop_write(old); +out: + return err; +} + +static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir) +{ + struct dentry *workdir = ovl_workdir(dentry); + struct inode *wdir = workdir->d_inode; + struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); + struct inode *udir = upperdir->d_inode; + struct dentry *whiteout; + struct dentry *upper; + struct dentry *opaquedir = NULL; + int err; + + if (is_dir) { + opaquedir = ovl_check_empty_and_clear(dentry); + err = PTR_ERR(opaquedir); + if (IS_ERR(opaquedir)) + goto out; + } + + err = ovl_lock_rename_workdir(workdir, upperdir); + if (err) + goto out_dput; + + whiteout = ovl_whiteout(workdir, dentry); + err = PTR_ERR(whiteout); + if (IS_ERR(whiteout)) + goto out_unlock; + + upper = ovl_dentry_upper(dentry); + if (!upper) { + upper = lookup_one_len(dentry->d_name.name, upperdir, + dentry->d_name.len); + err = PTR_ERR(upper); + if (IS_ERR(upper)) + goto kill_whiteout; + + err = ovl_do_rename(wdir, whiteout, udir, upper, 0); + dput(upper); + if (err) + goto kill_whiteout; + } else { + int flags = 0; + + if (opaquedir) + upper = opaquedir; + err = -ESTALE; + if (upper->d_parent != upperdir) + goto kill_whiteout; + + if (is_dir) + flags |= RENAME_EXCHANGE; + + err = ovl_do_rename(wdir, whiteout, udir, upper, flags); + if (err) + goto kill_whiteout; + + if (is_dir) + ovl_cleanup(wdir, upper); + } + ovl_dentry_version_inc(dentry->d_parent); +out_d_drop: + d_drop(dentry); + dput(whiteout); +out_unlock: + unlock_rename(workdir, upperdir); +out_dput: + dput(opaquedir); +out: + return err; + +kill_whiteout: + ovl_cleanup(wdir, whiteout); + goto out_d_drop; +} + +static int ovl_remove_upper(struct dentry *dentry, bool is_dir) +{ + struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); + struct inode *dir = upperdir->d_inode; + struct dentry *upper = ovl_dentry_upper(dentry); + int err; + + mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); + err = -ESTALE; + if (upper->d_parent == upperdir) { + /* Don't let d_delete() think it can reset d_inode */ + dget(upper); + if (is_dir) + err = vfs_rmdir(dir, upper); + else + err = vfs_unlink(dir, upper, NULL); + dput(upper); + ovl_dentry_version_inc(dentry->d_parent); + } + + /* + * Keeping this dentry hashed would mean having to release + * upperpath/lowerpath, which could only be done if we are the + * sole user of this dentry. Too tricky... Just unhash for + * now. + */ + d_drop(dentry); + mutex_unlock(&dir->i_mutex); + + return err; +} + +static inline int ovl_check_sticky(struct dentry *dentry) +{ + struct inode *dir = ovl_dentry_real(dentry->d_parent)->d_inode; + struct inode *inode = ovl_dentry_real(dentry)->d_inode; + + if (check_sticky(dir, inode)) + return -EPERM; + + return 0; +} + +static int ovl_do_remove(struct dentry *dentry, bool is_dir) +{ + enum ovl_path_type type; + int err; + + err = ovl_check_sticky(dentry); + if (err) + goto out; + + err = ovl_want_write(dentry); + if (err) + goto out; + + err = ovl_copy_up(dentry->d_parent); + if (err) + goto out_drop_write; + + type = ovl_path_type(dentry); + if (type == OVL_PATH_PURE_UPPER) { + err = ovl_remove_upper(dentry, is_dir); + } else { + const struct cred *old_cred; + struct cred *override_cred; + + err = -ENOMEM; + override_cred = prepare_creds(); + if (!override_cred) + goto out_drop_write; + + /* + * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir + * CAP_DAC_OVERRIDE for create in workdir, rename + * CAP_FOWNER for removing whiteout from sticky dir + * CAP_FSETID for chmod of opaque dir + * CAP_CHOWN for chown of opaque dir + */ + cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); + cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); + cap_raise(override_cred->cap_effective, CAP_FOWNER); + cap_raise(override_cred->cap_effective, CAP_FSETID); + cap_raise(override_cred->cap_effective, CAP_CHOWN); + old_cred = override_creds(override_cred); + + err = ovl_remove_and_whiteout(dentry, is_dir); + + revert_creds(old_cred); + put_cred(override_cred); + } +out_drop_write: + ovl_drop_write(dentry); +out: + return err; +} + +static int ovl_unlink(struct inode *dir, struct dentry *dentry) +{ + return ovl_do_remove(dentry, false); +} + +static int ovl_rmdir(struct inode *dir, struct dentry *dentry) +{ + return ovl_do_remove(dentry, true); +} + +static int ovl_rename2(struct inode *olddir, struct dentry *old, + struct inode *newdir, struct dentry *new, + unsigned int flags) +{ + int err; + enum ovl_path_type old_type; + enum ovl_path_type new_type; + struct dentry *old_upperdir; + struct dentry *new_upperdir; + struct dentry *olddentry; + struct dentry *newdentry; + struct dentry *trap; + bool old_opaque; + bool new_opaque; + bool new_create = false; + bool cleanup_whiteout = false; + bool overwrite = !(flags & RENAME_EXCHANGE); + bool is_dir = S_ISDIR(old->d_inode->i_mode); + bool new_is_dir = false; + struct dentry *opaquedir = NULL; + const struct cred *old_cred = NULL; + struct cred *override_cred = NULL; + + err = -EINVAL; + if (flags & ~(RENAME_EXCHANGE | RENAME_NOREPLACE)) + goto out; + + flags &= ~RENAME_NOREPLACE; + + err = ovl_check_sticky(old); + if (err) + goto out; + + /* Don't copy up directory trees */ + old_type = ovl_path_type(old); + err = -EXDEV; + if ((old_type == OVL_PATH_LOWER || old_type == OVL_PATH_MERGE) && is_dir) + goto out; + + if (new->d_inode) { + err = ovl_check_sticky(new); + if (err) + goto out; + + if (S_ISDIR(new->d_inode->i_mode)) + new_is_dir = true; + + new_type = ovl_path_type(new); + err = -EXDEV; + if (!overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) + goto out; + + err = 0; + if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) { + if (ovl_dentry_lower(old)->d_inode == + ovl_dentry_lower(new)->d_inode) + goto out; + } + if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) { + if (ovl_dentry_upper(old)->d_inode == + ovl_dentry_upper(new)->d_inode) + goto out; + } + } else { + if (ovl_dentry_is_opaque(new)) + new_type = OVL_PATH_UPPER; + else + new_type = OVL_PATH_PURE_UPPER; + } + + err = ovl_want_write(old); + if (err) + goto out; + + err = ovl_copy_up(old); + if (err) + goto out_drop_write; + + err = ovl_copy_up(new->d_parent); + if (err) + goto out_drop_write; + if (!overwrite) { + err = ovl_copy_up(new); + if (err) + goto out_drop_write; + } + + old_opaque = old_type != OVL_PATH_PURE_UPPER; + new_opaque = new_type != OVL_PATH_PURE_UPPER; + + if (old_opaque || new_opaque) { + err = -ENOMEM; + override_cred = prepare_creds(); + if (!override_cred) + goto out_drop_write; + + /* + * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir + * CAP_DAC_OVERRIDE for create in workdir + * CAP_FOWNER for removing whiteout from sticky dir + * CAP_FSETID for chmod of opaque dir + * CAP_CHOWN for chown of opaque dir + */ + cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); + cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); + cap_raise(override_cred->cap_effective, CAP_FOWNER); + cap_raise(override_cred->cap_effective, CAP_FSETID); + cap_raise(override_cred->cap_effective, CAP_CHOWN); + old_cred = override_creds(override_cred); + } + + if (overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) { + opaquedir = ovl_check_empty_and_clear(new); + err = PTR_ERR(opaquedir); + if (IS_ERR(opaquedir)) { + opaquedir = NULL; + goto out_revert_creds; + } + } + + if (overwrite) { + if (old_opaque) { + if (new->d_inode || !new_opaque) { + /* Whiteout source */ + flags |= RENAME_WHITEOUT; + } else { + /* Switch whiteouts */ + flags |= RENAME_EXCHANGE; + } + } else if (is_dir && !new->d_inode && new_opaque) { + flags |= RENAME_EXCHANGE; + cleanup_whiteout = true; + } + } + + old_upperdir = ovl_dentry_upper(old->d_parent); + new_upperdir = ovl_dentry_upper(new->d_parent); + + trap = lock_rename(new_upperdir, old_upperdir); + + olddentry = ovl_dentry_upper(old); + newdentry = ovl_dentry_upper(new); + if (newdentry) { + if (opaquedir) { + newdentry = opaquedir; + opaquedir = NULL; + } else { + dget(newdentry); + } + } else { + new_create = true; + newdentry = lookup_one_len(new->d_name.name, new_upperdir, + new->d_name.len); + err = PTR_ERR(newdentry); + if (IS_ERR(newdentry)) + goto out_unlock; + } + + err = -ESTALE; + if (olddentry->d_parent != old_upperdir) + goto out_dput; + if (newdentry->d_parent != new_upperdir) + goto out_dput; + if (olddentry == trap) + goto out_dput; + if (newdentry == trap) + goto out_dput; + + if (is_dir && !old_opaque && new_opaque) { + err = ovl_set_opaque(olddentry); + if (err) + goto out_dput; + } + if (!overwrite && new_is_dir && old_opaque && !new_opaque) { + err = ovl_set_opaque(newdentry); + if (err) + goto out_dput; + } + + if (old_opaque || new_opaque) { + err = ovl_do_rename(old_upperdir->d_inode, olddentry, + new_upperdir->d_inode, newdentry, + flags); + } else { + /* No debug for the plain case */ + BUG_ON(flags & ~RENAME_EXCHANGE); + err = vfs_rename(old_upperdir->d_inode, olddentry, + new_upperdir->d_inode, newdentry, + NULL, flags); + } + + if (err) { + if (is_dir && !old_opaque && new_opaque) + ovl_remove_opaque(olddentry); + if (!overwrite && new_is_dir && old_opaque && !new_opaque) + ovl_remove_opaque(newdentry); + goto out_dput; + } + + if (is_dir && old_opaque && !new_opaque) + ovl_remove_opaque(olddentry); + if (!overwrite && new_is_dir && !old_opaque && new_opaque) + ovl_remove_opaque(newdentry); + + if (old_opaque != new_opaque) { + ovl_dentry_set_opaque(old, new_opaque); + if (!overwrite) + ovl_dentry_set_opaque(new, old_opaque); + } + + if (cleanup_whiteout) + ovl_cleanup(old_upperdir->d_inode, newdentry); + + ovl_dentry_version_inc(old->d_parent); + ovl_dentry_version_inc(new->d_parent); + +out_dput: + dput(newdentry); +out_unlock: + unlock_rename(new_upperdir, old_upperdir); +out_revert_creds: + if (old_opaque || new_opaque) { + revert_creds(old_cred); + put_cred(override_cred); + } +out_drop_write: + ovl_drop_write(old); +out: + dput(opaquedir); + return err; +} + +const struct inode_operations ovl_dir_inode_operations = { + .lookup = ovl_lookup, + .mkdir = ovl_mkdir, + .symlink = ovl_symlink, + .unlink = ovl_unlink, + .rmdir = ovl_rmdir, + .rename2 = ovl_rename2, + .link = ovl_link, + .setattr = ovl_setattr, + .create = ovl_create, + .mknod = ovl_mknod, + .permission = ovl_permission, + .getattr = ovl_dir_getattr, + .setxattr = ovl_setxattr, + .getxattr = ovl_getxattr, + .listxattr = ovl_listxattr, + .removexattr = ovl_removexattr, +}; diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c new file mode 100644 index 00000000000..07d74b24913 --- /dev/null +++ b/fs/overlayfs/inode.c @@ -0,0 +1,434 @@ +/* + * + * Copyright (C) 2011 Novell Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/xattr.h> +#include "overlayfs.h" + +static int ovl_copy_up_last(struct dentry *dentry, struct iattr *attr, + bool no_data) +{ + int err; + struct dentry *parent; + struct kstat stat; + struct path lowerpath; + + parent = dget_parent(dentry); + err = ovl_copy_up(parent); + if (err) + goto out_dput_parent; + + ovl_path_lower(dentry, &lowerpath); + err = vfs_getattr(&lowerpath, &stat); + if (err) + goto out_dput_parent; + + if (no_data) + stat.size = 0; + + err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat, attr); + +out_dput_parent: + dput(parent); + return err; +} + +int ovl_setattr(struct dentry *dentry, struct iattr *attr) +{ + int err; + struct dentry *upperdentry; + + err = ovl_want_write(dentry); + if (err) + goto out; + + upperdentry = ovl_dentry_upper(dentry); + if (upperdentry) { + mutex_lock(&upperdentry->d_inode->i_mutex); + err = notify_change(upperdentry, attr, NULL); + mutex_unlock(&upperdentry->d_inode->i_mutex); + } else { + err = ovl_copy_up_last(dentry, attr, false); + } + ovl_drop_write(dentry); +out: + return err; +} + +static int ovl_getattr(struct vfsmount *mnt, struct dentry *dentry, + struct kstat *stat) +{ + struct path realpath; + + ovl_path_real(dentry, &realpath); + return vfs_getattr(&realpath, stat); +} + +int ovl_permission(struct inode *inode, int mask) +{ + struct ovl_entry *oe; + struct dentry *alias = NULL; + struct inode *realinode; + struct dentry *realdentry; + bool is_upper; + int err; + + if (S_ISDIR(inode->i_mode)) { + oe = inode->i_private; + } else if (mask & MAY_NOT_BLOCK) { + return -ECHILD; + } else { + /* + * For non-directories find an alias and get the info + * from there. + */ + alias = d_find_any_alias(inode); + if (WARN_ON(!alias)) + return -ENOENT; + + oe = alias->d_fsdata; + } + + realdentry = ovl_entry_real(oe, &is_upper); + + /* Careful in RCU walk mode */ + realinode = ACCESS_ONCE(realdentry->d_inode); + if (!realinode) { + WARN_ON(!(mask & MAY_NOT_BLOCK)); + err = -ENOENT; + goto out_dput; + } + + if (mask & MAY_WRITE) { + umode_t mode = realinode->i_mode; + + /* + * Writes will always be redirected to upper layer, so + * ignore lower layer being read-only. + * + * If the overlay itself is read-only then proceed + * with the permission check, don't return EROFS. + * This will only happen if this is the lower layer of + * another overlayfs. + * + * If upper fs becomes read-only after the overlay was + * constructed return EROFS to prevent modification of + * upper layer. + */ + err = -EROFS; + if (is_upper && !IS_RDONLY(inode) && IS_RDONLY(realinode) && + (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) + goto out_dput; + } + + err = __inode_permission(realinode, mask); +out_dput: + dput(alias); + return err; +} + + +struct ovl_link_data { + struct dentry *realdentry; + void *cookie; +}; + +static void *ovl_follow_link(struct dentry *dentry, struct nameidata *nd) +{ + void *ret; + struct dentry *realdentry; + struct inode *realinode; + + realdentry = ovl_dentry_real(dentry); + realinode = realdentry->d_inode; + + if (WARN_ON(!realinode->i_op->follow_link)) + return ERR_PTR(-EPERM); + + ret = realinode->i_op->follow_link(realdentry, nd); + if (IS_ERR(ret)) + return ret; + + if (realinode->i_op->put_link) { + struct ovl_link_data *data; + + data = kmalloc(sizeof(struct ovl_link_data), GFP_KERNEL); + if (!data) { + realinode->i_op->put_link(realdentry, nd, ret); + return ERR_PTR(-ENOMEM); + } + data->realdentry = realdentry; + data->cookie = ret; + + return data; + } else { + return NULL; + } +} + +static void ovl_put_link(struct dentry *dentry, struct nameidata *nd, void *c) +{ + struct inode *realinode; + struct ovl_link_data *data = c; + + if (!data) + return; + + realinode = data->realdentry->d_inode; + realinode->i_op->put_link(data->realdentry, nd, data->cookie); + kfree(data); +} + +static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz) +{ + struct path realpath; + struct inode *realinode; + + ovl_path_real(dentry, &realpath); + realinode = realpath.dentry->d_inode; + + if (!realinode->i_op->readlink) + return -EINVAL; + + touch_atime(&realpath); + + return realinode->i_op->readlink(realpath.dentry, buf, bufsiz); +} + + +static bool ovl_is_private_xattr(const char *name) +{ + return strncmp(name, "trusted.overlay.", 14) == 0; +} + +int ovl_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags) +{ + int err; + struct dentry *upperdentry; + + err = ovl_want_write(dentry); + if (err) + goto out; + + err = -EPERM; + if (ovl_is_private_xattr(name)) + goto out_drop_write; + + err = ovl_copy_up(dentry); + if (err) + goto out_drop_write; + + upperdentry = ovl_dentry_upper(dentry); + err = vfs_setxattr(upperdentry, name, value, size, flags); + +out_drop_write: + ovl_drop_write(dentry); +out: + return err; +} + +static bool ovl_need_xattr_filter(struct dentry *dentry, + enum ovl_path_type type) +{ + return type == OVL_PATH_UPPER && S_ISDIR(dentry->d_inode->i_mode); +} + +ssize_t ovl_getxattr(struct dentry *dentry, const char *name, + void *value, size_t size) +{ + struct path realpath; + enum ovl_path_type type = ovl_path_real(dentry, &realpath); + + if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name)) + return -ENODATA; + + return vfs_getxattr(realpath.dentry, name, value, size); +} + +ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) +{ + struct path realpath; + enum ovl_path_type type = ovl_path_real(dentry, &realpath); + ssize_t res; + int off; + + res = vfs_listxattr(realpath.dentry, list, size); + if (res <= 0 || size == 0) + return res; + + if (!ovl_need_xattr_filter(dentry, type)) + return res; + + /* filter out private xattrs */ + for (off = 0; off < res;) { + char *s = list + off; + size_t slen = strlen(s) + 1; + + BUG_ON(off + slen > res); + + if (ovl_is_private_xattr(s)) { + res -= slen; + memmove(s, s + slen, res - off); + } else { + off += slen; + } + } + + return res; +} + +int ovl_removexattr(struct dentry *dentry, const char *name) +{ + int err; + struct path realpath; + enum ovl_path_type type = ovl_path_real(dentry, &realpath); + + err = ovl_want_write(dentry); + if (err) + goto out; + + err = -ENODATA; + if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name)) + goto out_drop_write; + + if (type == OVL_PATH_LOWER) { + err = vfs_getxattr(realpath.dentry, name, NULL, 0); + if (err < 0) + goto out_drop_write; + + err = ovl_copy_up(dentry); + if (err) + goto out_drop_write; + + ovl_path_upper(dentry, &realpath); + } + + err = vfs_removexattr(realpath.dentry, name); +out_drop_write: + ovl_drop_write(dentry); +out: + return err; +} + +static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type, + struct dentry *realdentry) +{ + if (type != OVL_PATH_LOWER) + return false; + + if (special_file(realdentry->d_inode->i_mode)) + return false; + + if (!(OPEN_FMODE(flags) & FMODE_WRITE) && !(flags & O_TRUNC)) + return false; + + return true; +} + +static int ovl_dentry_open(struct dentry *dentry, struct file *file, + const struct cred *cred) +{ + int err; + struct path realpath; + enum ovl_path_type type; + bool want_write = false; + + type = ovl_path_real(dentry, &realpath); + if (ovl_open_need_copy_up(file->f_flags, type, realpath.dentry)) { + want_write = true; + err = ovl_want_write(dentry); + if (err) + goto out; + + if (file->f_flags & O_TRUNC) + err = ovl_copy_up_last(dentry, NULL, true); + else + err = ovl_copy_up(dentry); + if (err) + goto out_drop_write; + + ovl_path_upper(dentry, &realpath); + } + + err = vfs_open(&realpath, file, cred); +out_drop_write: + if (want_write) + ovl_drop_write(dentry); +out: + return err; +} + +static const struct inode_operations ovl_file_inode_operations = { + .setattr = ovl_setattr, + .permission = ovl_permission, + .getattr = ovl_getattr, + .setxattr = ovl_setxattr, + .getxattr = ovl_getxattr, + .listxattr = ovl_listxattr, + .removexattr = ovl_removexattr, + .dentry_open = ovl_dentry_open, +}; + +static const struct inode_operations ovl_symlink_inode_operations = { + .setattr = ovl_setattr, + .follow_link = ovl_follow_link, + .put_link = ovl_put_link, + .readlink = ovl_readlink, + .getattr = ovl_getattr, + .setxattr = ovl_setxattr, + .getxattr = ovl_getxattr, + .listxattr = ovl_listxattr, + .removexattr = ovl_removexattr, +}; + +struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, + struct ovl_entry *oe) +{ + struct inode *inode; + + inode = new_inode(sb); + if (!inode) + return NULL; + + mode &= S_IFMT; + + inode->i_ino = get_next_ino(); + inode->i_mode = mode; + inode->i_flags |= S_NOATIME | S_NOCMTIME; + + switch (mode) { + case S_IFDIR: + inode->i_private = oe; + inode->i_op = &ovl_dir_inode_operations; + inode->i_fop = &ovl_dir_operations; + break; + + case S_IFLNK: + inode->i_op = &ovl_symlink_inode_operations; + break; + + case S_IFREG: + case S_IFSOCK: + case S_IFBLK: + case S_IFCHR: + case S_IFIFO: + inode->i_op = &ovl_file_inode_operations; + break; + + default: + WARN(1, "illegal file type: %i\n", mode); + iput(inode); + inode = NULL; + } + + return inode; + +} diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h new file mode 100644 index 00000000000..814bed33dd0 --- /dev/null +++ b/fs/overlayfs/overlayfs.h @@ -0,0 +1,191 @@ +/* + * + * Copyright (C) 2011 Novell Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/kernel.h> + +struct ovl_entry; + +enum ovl_path_type { + OVL_PATH_PURE_UPPER, + OVL_PATH_UPPER, + OVL_PATH_MERGE, + OVL_PATH_LOWER, +}; + +extern const char *ovl_opaque_xattr; + +static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry) +{ + int err = vfs_rmdir(dir, dentry); + pr_debug("rmdir(%pd2) = %i\n", dentry, err); + return err; +} + +static inline int ovl_do_unlink(struct inode *dir, struct dentry *dentry) +{ + int err = vfs_unlink(dir, dentry, NULL); + pr_debug("unlink(%pd2) = %i\n", dentry, err); + return err; +} + +static inline int ovl_do_link(struct dentry *old_dentry, struct inode *dir, + struct dentry *new_dentry, bool debug) +{ + int err = vfs_link(old_dentry, dir, new_dentry, NULL); + if (debug) { + pr_debug("link(%pd2, %pd2) = %i\n", + old_dentry, new_dentry, err); + } + return err; +} + +static inline int ovl_do_create(struct inode *dir, struct dentry *dentry, + umode_t mode, bool debug) +{ + int err = vfs_create(dir, dentry, mode, true); + if (debug) + pr_debug("create(%pd2, 0%o) = %i\n", dentry, mode, err); + return err; +} + +static inline int ovl_do_mkdir(struct inode *dir, struct dentry *dentry, + umode_t mode, bool debug) +{ + int err = vfs_mkdir(dir, dentry, mode); + if (debug) + pr_debug("mkdir(%pd2, 0%o) = %i\n", dentry, mode, err); + return err; +} + +static inline int ovl_do_mknod(struct inode *dir, struct dentry *dentry, + umode_t mode, dev_t dev, bool debug) +{ + int err = vfs_mknod(dir, dentry, mode, dev); + if (debug) { + pr_debug("mknod(%pd2, 0%o, 0%o) = %i\n", + dentry, mode, dev, err); + } + return err; +} + +static inline int ovl_do_symlink(struct inode *dir, struct dentry *dentry, + const char *oldname, bool debug) +{ + int err = vfs_symlink(dir, dentry, oldname); + if (debug) + pr_debug("symlink(\"%s\", %pd2) = %i\n", oldname, dentry, err); + return err; +} + +static inline int ovl_do_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags) +{ + int err = vfs_setxattr(dentry, name, value, size, flags); + pr_debug("setxattr(%pd2, \"%s\", \"%*s\", 0x%x) = %i\n", + dentry, name, (int) size, (char *) value, flags, err); + return err; +} + +static inline int ovl_do_removexattr(struct dentry *dentry, const char *name) +{ + int err = vfs_removexattr(dentry, name); + pr_debug("removexattr(%pd2, \"%s\") = %i\n", dentry, name, err); + return err; +} + +static inline int ovl_do_rename(struct inode *olddir, struct dentry *olddentry, + struct inode *newdir, struct dentry *newdentry, + unsigned int flags) +{ + int err; + + pr_debug("rename2(%pd2, %pd2, 0x%x)\n", + olddentry, newdentry, flags); + + err = vfs_rename(olddir, olddentry, newdir, newdentry, NULL, flags); + + if (err) { + pr_debug("...rename2(%pd2, %pd2, ...) = %i\n", + olddentry, newdentry, err); + } + return err; +} + +static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry) +{ + int err = vfs_whiteout(dir, dentry); + pr_debug("whiteout(%pd2) = %i\n", dentry, err); + return err; +} + +enum ovl_path_type ovl_path_type(struct dentry *dentry); +u64 ovl_dentry_version_get(struct dentry *dentry); +void ovl_dentry_version_inc(struct dentry *dentry); +void ovl_path_upper(struct dentry *dentry, struct path *path); +void ovl_path_lower(struct dentry *dentry, struct path *path); +enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path); +struct dentry *ovl_dentry_upper(struct dentry *dentry); +struct dentry *ovl_dentry_lower(struct dentry *dentry); +struct dentry *ovl_dentry_real(struct dentry *dentry); +struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper); +struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry); +void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache); +struct dentry *ovl_workdir(struct dentry *dentry); +int ovl_want_write(struct dentry *dentry); +void ovl_drop_write(struct dentry *dentry); +bool ovl_dentry_is_opaque(struct dentry *dentry); +void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque); +bool ovl_is_whiteout(struct dentry *dentry); +void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry); +struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, + unsigned int flags); +struct file *ovl_path_open(struct path *path, int flags); + +struct dentry *ovl_upper_create(struct dentry *upperdir, struct dentry *dentry, + struct kstat *stat, const char *link); + +/* readdir.c */ +extern const struct file_operations ovl_dir_operations; +int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list); +void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list); +void ovl_cache_free(struct list_head *list); + +/* inode.c */ +int ovl_setattr(struct dentry *dentry, struct iattr *attr); +int ovl_permission(struct inode *inode, int mask); +int ovl_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); +ssize_t ovl_getxattr(struct dentry *dentry, const char *name, + void *value, size_t size); +ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size); +int ovl_removexattr(struct dentry *dentry, const char *name); + +struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, + struct ovl_entry *oe); +static inline void ovl_copyattr(struct inode *from, struct inode *to) +{ + to->i_uid = from->i_uid; + to->i_gid = from->i_gid; +} + +/* dir.c */ +extern const struct inode_operations ovl_dir_inode_operations; +struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry); +int ovl_create_real(struct inode *dir, struct dentry *newdentry, + struct kstat *stat, const char *link, + struct dentry *hardlink, bool debug); +void ovl_cleanup(struct inode *dir, struct dentry *dentry); + +/* copy_up.c */ +int ovl_copy_up(struct dentry *dentry); +int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, + struct path *lowerpath, struct kstat *stat, + struct iattr *attr); +int ovl_copy_xattr(struct dentry *old, struct dentry *new); +int ovl_set_attr(struct dentry *upper, struct kstat *stat); diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c new file mode 100644 index 00000000000..ab1e3dcbed9 --- /dev/null +++ b/fs/overlayfs/readdir.c @@ -0,0 +1,586 @@ +/* + * + * Copyright (C) 2011 Novell Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/namei.h> +#include <linux/file.h> +#include <linux/xattr.h> +#include <linux/rbtree.h> +#include <linux/security.h> +#include <linux/cred.h> +#include "overlayfs.h" + +struct ovl_cache_entry { + unsigned int len; + unsigned int type; + u64 ino; + struct list_head l_node; + struct rb_node node; + bool is_whiteout; + bool is_cursor; + char name[]; +}; + +struct ovl_dir_cache { + long refcount; + u64 version; + struct list_head entries; +}; + +struct ovl_readdir_data { + struct dir_context ctx; + bool is_merge; + struct rb_root root; + struct list_head *list; + struct list_head middle; + int count; + int err; +}; + +struct ovl_dir_file { + bool is_real; + bool is_upper; + struct ovl_dir_cache *cache; + struct ovl_cache_entry cursor; + struct file *realfile; + struct file *upperfile; +}; + +static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n) +{ + return container_of(n, struct ovl_cache_entry, node); +} + +static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root, + const char *name, int len) +{ + struct rb_node *node = root->rb_node; + int cmp; + + while (node) { + struct ovl_cache_entry *p = ovl_cache_entry_from_node(node); + + cmp = strncmp(name, p->name, len); + if (cmp > 0) + node = p->node.rb_right; + else if (cmp < 0 || len < p->len) + node = p->node.rb_left; + else + return p; + } + + return NULL; +} + +static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len, + u64 ino, unsigned int d_type) +{ + struct ovl_cache_entry *p; + size_t size = offsetof(struct ovl_cache_entry, name[len + 1]); + + p = kmalloc(size, GFP_KERNEL); + if (p) { + memcpy(p->name, name, len); + p->name[len] = '\0'; + p->len = len; + p->type = d_type; + p->ino = ino; + p->is_whiteout = false; + p->is_cursor = false; + } + + return p; +} + +static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd, + const char *name, int len, u64 ino, + unsigned int d_type) +{ + struct rb_node **newp = &rdd->root.rb_node; + struct rb_node *parent = NULL; + struct ovl_cache_entry *p; + + while (*newp) { + int cmp; + struct ovl_cache_entry *tmp; + + parent = *newp; + tmp = ovl_cache_entry_from_node(*newp); + cmp = strncmp(name, tmp->name, len); + if (cmp > 0) + newp = &tmp->node.rb_right; + else if (cmp < 0 || len < tmp->len) + newp = &tmp->node.rb_left; + else + return 0; + } + + p = ovl_cache_entry_new(name, len, ino, d_type); + if (p == NULL) + return -ENOMEM; + + list_add_tail(&p->l_node, rdd->list); + rb_link_node(&p->node, parent, newp); + rb_insert_color(&p->node, &rdd->root); + + return 0; +} + +static int ovl_fill_lower(struct ovl_readdir_data *rdd, + const char *name, int namelen, + loff_t offset, u64 ino, unsigned int d_type) +{ + struct ovl_cache_entry *p; + + p = ovl_cache_entry_find(&rdd->root, name, namelen); + if (p) { + list_move_tail(&p->l_node, &rdd->middle); + } else { + p = ovl_cache_entry_new(name, namelen, ino, d_type); + if (p == NULL) + rdd->err = -ENOMEM; + else + list_add_tail(&p->l_node, &rdd->middle); + } + + return rdd->err; +} + +void ovl_cache_free(struct list_head *list) +{ + struct ovl_cache_entry *p; + struct ovl_cache_entry *n; + + list_for_each_entry_safe(p, n, list, l_node) + kfree(p); + + INIT_LIST_HEAD(list); +} + +static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry) +{ + struct ovl_dir_cache *cache = od->cache; + + list_del_init(&od->cursor.l_node); + WARN_ON(cache->refcount <= 0); + cache->refcount--; + if (!cache->refcount) { + if (ovl_dir_cache(dentry) == cache) + ovl_set_dir_cache(dentry, NULL); + + ovl_cache_free(&cache->entries); + kfree(cache); + } +} + +static int ovl_fill_merge(void *buf, const char *name, int namelen, + loff_t offset, u64 ino, unsigned int d_type) +{ + struct ovl_readdir_data *rdd = buf; + + rdd->count++; + if (!rdd->is_merge) + return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type); + else + return ovl_fill_lower(rdd, name, namelen, offset, ino, d_type); +} + +static inline int ovl_dir_read(struct path *realpath, + struct ovl_readdir_data *rdd) +{ + struct file *realfile; + int err; + + realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY); + if (IS_ERR(realfile)) + return PTR_ERR(realfile); + + rdd->ctx.pos = 0; + do { + rdd->count = 0; + rdd->err = 0; + err = iterate_dir(realfile, &rdd->ctx); + if (err >= 0) + err = rdd->err; + } while (!err && rdd->count); + fput(realfile); + + return err; +} + +static void ovl_dir_reset(struct file *file) +{ + struct ovl_dir_file *od = file->private_data; + struct ovl_dir_cache *cache = od->cache; + struct dentry *dentry = file->f_path.dentry; + enum ovl_path_type type = ovl_path_type(dentry); + + if (cache && ovl_dentry_version_get(dentry) != cache->version) { + ovl_cache_put(od, dentry); + od->cache = NULL; + } + WARN_ON(!od->is_real && type != OVL_PATH_MERGE); + if (od->is_real && type == OVL_PATH_MERGE) + od->is_real = false; +} + +static int ovl_dir_mark_whiteouts(struct dentry *dir, + struct ovl_readdir_data *rdd) +{ + struct ovl_cache_entry *p; + struct dentry *dentry; + const struct cred *old_cred; + struct cred *override_cred; + + override_cred = prepare_creds(); + if (!override_cred) { + ovl_cache_free(rdd->list); + return -ENOMEM; + } + + /* + * CAP_DAC_OVERRIDE for lookup + */ + cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); + old_cred = override_creds(override_cred); + + mutex_lock(&dir->d_inode->i_mutex); + list_for_each_entry(p, rdd->list, l_node) { + if (p->is_cursor) + continue; + + if (p->type != DT_CHR) + continue; + + dentry = lookup_one_len(p->name, dir, p->len); + if (IS_ERR(dentry)) + continue; + + p->is_whiteout = ovl_is_whiteout(dentry); + dput(dentry); + } + mutex_unlock(&dir->d_inode->i_mutex); + + revert_creds(old_cred); + put_cred(override_cred); + + return 0; +} + +static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list) +{ + int err; + struct path lowerpath; + struct path upperpath; + struct ovl_readdir_data rdd = { + .ctx.actor = ovl_fill_merge, + .list = list, + .root = RB_ROOT, + .is_merge = false, + }; + + ovl_path_lower(dentry, &lowerpath); + ovl_path_upper(dentry, &upperpath); + + if (upperpath.dentry) { + err = ovl_dir_read(&upperpath, &rdd); + if (err) + goto out; + + if (lowerpath.dentry) { + err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd); + if (err) + goto out; + } + } + if (lowerpath.dentry) { + /* + * Insert lowerpath entries before upperpath ones, this allows + * offsets to be reasonably constant + */ + list_add(&rdd.middle, rdd.list); + rdd.is_merge = true; + err = ovl_dir_read(&lowerpath, &rdd); + list_del(&rdd.middle); + } +out: + return err; +} + +static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos) +{ + struct ovl_cache_entry *p; + loff_t off = 0; + + list_for_each_entry(p, &od->cache->entries, l_node) { + if (p->is_cursor) + continue; + if (off >= pos) + break; + off++; + } + list_move_tail(&od->cursor.l_node, &p->l_node); +} + +static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry) +{ + int res; + struct ovl_dir_cache *cache; + + cache = ovl_dir_cache(dentry); + if (cache && ovl_dentry_version_get(dentry) == cache->version) { + cache->refcount++; + return cache; + } + ovl_set_dir_cache(dentry, NULL); + + cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL); + if (!cache) + return ERR_PTR(-ENOMEM); + + cache->refcount = 1; + INIT_LIST_HEAD(&cache->entries); + + res = ovl_dir_read_merged(dentry, &cache->entries); + if (res) { + ovl_cache_free(&cache->entries); + kfree(cache); + return ERR_PTR(res); + } + + cache->version = ovl_dentry_version_get(dentry); + ovl_set_dir_cache(dentry, cache); + + return cache; +} + +static int ovl_iterate(struct file *file, struct dir_context *ctx) +{ + struct ovl_dir_file *od = file->private_data; + struct dentry *dentry = file->f_path.dentry; + + if (!ctx->pos) + ovl_dir_reset(file); + + if (od->is_real) + return iterate_dir(od->realfile, ctx); + + if (!od->cache) { + struct ovl_dir_cache *cache; + + cache = ovl_cache_get(dentry); + if (IS_ERR(cache)) + return PTR_ERR(cache); + + od->cache = cache; + ovl_seek_cursor(od, ctx->pos); + } + + while (od->cursor.l_node.next != &od->cache->entries) { + struct ovl_cache_entry *p; + + p = list_entry(od->cursor.l_node.next, struct ovl_cache_entry, l_node); + /* Skip cursors */ + if (!p->is_cursor) { + if (!p->is_whiteout) { + if (!dir_emit(ctx, p->name, p->len, p->ino, p->type)) + break; + } + ctx->pos++; + } + list_move(&od->cursor.l_node, &p->l_node); + } + return 0; +} + +static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin) +{ + loff_t res; + struct ovl_dir_file *od = file->private_data; + + mutex_lock(&file_inode(file)->i_mutex); + if (!file->f_pos) + ovl_dir_reset(file); + + if (od->is_real) { + res = vfs_llseek(od->realfile, offset, origin); + file->f_pos = od->realfile->f_pos; + } else { + res = -EINVAL; + + switch (origin) { + case SEEK_CUR: + offset += file->f_pos; + break; + case SEEK_SET: + break; + default: + goto out_unlock; + } + if (offset < 0) + goto out_unlock; + + if (offset != file->f_pos) { + file->f_pos = offset; + if (od->cache) + ovl_seek_cursor(od, offset); + } + res = offset; + } +out_unlock: + mutex_unlock(&file_inode(file)->i_mutex); + + return res; +} + +static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, + int datasync) +{ + struct ovl_dir_file *od = file->private_data; + struct dentry *dentry = file->f_path.dentry; + struct file *realfile = od->realfile; + + /* + * Need to check if we started out being a lower dir, but got copied up + */ + if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) { + struct inode *inode = file_inode(file); + + realfile = lockless_dereference(od->upperfile); + if (!realfile) { + struct path upperpath; + + ovl_path_upper(dentry, &upperpath); + realfile = ovl_path_open(&upperpath, O_RDONLY); + smp_mb__before_spinlock(); + mutex_lock(&inode->i_mutex); + if (!od->upperfile) { + if (IS_ERR(realfile)) { + mutex_unlock(&inode->i_mutex); + return PTR_ERR(realfile); + } + od->upperfile = realfile; + } else { + /* somebody has beaten us to it */ + if (!IS_ERR(realfile)) + fput(realfile); + realfile = od->upperfile; + } + mutex_unlock(&inode->i_mutex); + } + } + + return vfs_fsync_range(realfile, start, end, datasync); +} + +static int ovl_dir_release(struct inode *inode, struct file *file) +{ + struct ovl_dir_file *od = file->private_data; + + if (od->cache) { + mutex_lock(&inode->i_mutex); + ovl_cache_put(od, file->f_path.dentry); + mutex_unlock(&inode->i_mutex); + } + fput(od->realfile); + if (od->upperfile) + fput(od->upperfile); + kfree(od); + + return 0; +} + +static int ovl_dir_open(struct inode *inode, struct file *file) +{ + struct path realpath; + struct file *realfile; + struct ovl_dir_file *od; + enum ovl_path_type type; + + od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL); + if (!od) + return -ENOMEM; + + type = ovl_path_real(file->f_path.dentry, &realpath); + realfile = ovl_path_open(&realpath, file->f_flags); + if (IS_ERR(realfile)) { + kfree(od); + return PTR_ERR(realfile); + } + INIT_LIST_HEAD(&od->cursor.l_node); + od->realfile = realfile; + od->is_real = (type != OVL_PATH_MERGE); + od->is_upper = (type != OVL_PATH_LOWER); + od->cursor.is_cursor = true; + file->private_data = od; + + return 0; +} + +const struct file_operations ovl_dir_operations = { + .read = generic_read_dir, + .open = ovl_dir_open, + .iterate = ovl_iterate, + .llseek = ovl_dir_llseek, + .fsync = ovl_dir_fsync, + .release = ovl_dir_release, +}; + +int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list) +{ + int err; + struct ovl_cache_entry *p; + + err = ovl_dir_read_merged(dentry, list); + if (err) + return err; + + err = 0; + + list_for_each_entry(p, list, l_node) { + if (p->is_whiteout) + continue; + + if (p->name[0] == '.') { + if (p->len == 1) + continue; + if (p->len == 2 && p->name[1] == '.') + continue; + } + err = -ENOTEMPTY; + break; + } + + return err; +} + +void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list) +{ + struct ovl_cache_entry *p; + + mutex_lock_nested(&upper->d_inode->i_mutex, I_MUTEX_CHILD); + list_for_each_entry(p, list, l_node) { + struct dentry *dentry; + + if (!p->is_whiteout) + continue; + + dentry = lookup_one_len(p->name, upper, p->len); + if (IS_ERR(dentry)) { + pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n", + upper->d_name.name, p->len, p->name, + (int) PTR_ERR(dentry)); + continue; + } + ovl_cleanup(upper->d_inode, dentry); + dput(dentry); + } + mutex_unlock(&upper->d_inode->i_mutex); +} diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c new file mode 100644 index 00000000000..f16d318b71f --- /dev/null +++ b/fs/overlayfs/super.c @@ -0,0 +1,833 @@ +/* + * + * Copyright (C) 2011 Novell Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/fs.h> +#include <linux/namei.h> +#include <linux/xattr.h> +#include <linux/security.h> +#include <linux/mount.h> +#include <linux/slab.h> +#include <linux/parser.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/statfs.h> +#include <linux/seq_file.h> +#include "overlayfs.h" + +MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); +MODULE_DESCRIPTION("Overlay filesystem"); +MODULE_LICENSE("GPL"); + +#define OVERLAYFS_SUPER_MAGIC 0x794c7630 + +struct ovl_config { + char *lowerdir; + char *upperdir; + char *workdir; +}; + +/* private information held for overlayfs's superblock */ +struct ovl_fs { + struct vfsmount *upper_mnt; + struct vfsmount *lower_mnt; + struct dentry *workdir; + long lower_namelen; + /* pathnames of lower and upper dirs, for show_options */ + struct ovl_config config; +}; + +struct ovl_dir_cache; + +/* private information held for every overlayfs dentry */ +struct ovl_entry { + struct dentry *__upperdentry; + struct dentry *lowerdentry; + struct ovl_dir_cache *cache; + union { + struct { + u64 version; + bool opaque; + }; + struct rcu_head rcu; + }; +}; + +const char *ovl_opaque_xattr = "trusted.overlay.opaque"; + + +enum ovl_path_type ovl_path_type(struct dentry *dentry) +{ + struct ovl_entry *oe = dentry->d_fsdata; + + if (oe->__upperdentry) { + if (oe->lowerdentry) { + if (S_ISDIR(dentry->d_inode->i_mode)) + return OVL_PATH_MERGE; + else + return OVL_PATH_UPPER; + } else { + if (oe->opaque) + return OVL_PATH_UPPER; + else + return OVL_PATH_PURE_UPPER; + } + } else { + return OVL_PATH_LOWER; + } +} + +static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe) +{ + return lockless_dereference(oe->__upperdentry); +} + +void ovl_path_upper(struct dentry *dentry, struct path *path) +{ + struct ovl_fs *ofs = dentry->d_sb->s_fs_info; + struct ovl_entry *oe = dentry->d_fsdata; + + path->mnt = ofs->upper_mnt; + path->dentry = ovl_upperdentry_dereference(oe); +} + +enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path) +{ + + enum ovl_path_type type = ovl_path_type(dentry); + + if (type == OVL_PATH_LOWER) + ovl_path_lower(dentry, path); + else + ovl_path_upper(dentry, path); + + return type; +} + +struct dentry *ovl_dentry_upper(struct dentry *dentry) +{ + struct ovl_entry *oe = dentry->d_fsdata; + + return ovl_upperdentry_dereference(oe); +} + +struct dentry *ovl_dentry_lower(struct dentry *dentry) +{ + struct ovl_entry *oe = dentry->d_fsdata; + + return oe->lowerdentry; +} + +struct dentry *ovl_dentry_real(struct dentry *dentry) +{ + struct ovl_entry *oe = dentry->d_fsdata; + struct dentry *realdentry; + + realdentry = ovl_upperdentry_dereference(oe); + if (!realdentry) + realdentry = oe->lowerdentry; + + return realdentry; +} + +struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper) +{ + struct dentry *realdentry; + + realdentry = ovl_upperdentry_dereference(oe); + if (realdentry) { + *is_upper = true; + } else { + realdentry = oe->lowerdentry; + *is_upper = false; + } + return realdentry; +} + +struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry) +{ + struct ovl_entry *oe = dentry->d_fsdata; + + return oe->cache; +} + +void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache) +{ + struct ovl_entry *oe = dentry->d_fsdata; + + oe->cache = cache; +} + +void ovl_path_lower(struct dentry *dentry, struct path *path) +{ + struct ovl_fs *ofs = dentry->d_sb->s_fs_info; + struct ovl_entry *oe = dentry->d_fsdata; + + path->mnt = ofs->lower_mnt; + path->dentry = oe->lowerdentry; +} + +int ovl_want_write(struct dentry *dentry) +{ + struct ovl_fs *ofs = dentry->d_sb->s_fs_info; + return mnt_want_write(ofs->upper_mnt); +} + +void ovl_drop_write(struct dentry *dentry) +{ + struct ovl_fs *ofs = dentry->d_sb->s_fs_info; + mnt_drop_write(ofs->upper_mnt); +} + +struct dentry *ovl_workdir(struct dentry *dentry) +{ + struct ovl_fs *ofs = dentry->d_sb->s_fs_info; + return ofs->workdir; +} + +bool ovl_dentry_is_opaque(struct dentry *dentry) +{ + struct ovl_entry *oe = dentry->d_fsdata; + return oe->opaque; +} + +void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque) +{ + struct ovl_entry *oe = dentry->d_fsdata; + oe->opaque = opaque; +} + +void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry) +{ + struct ovl_entry *oe = dentry->d_fsdata; + + WARN_ON(!mutex_is_locked(&upperdentry->d_parent->d_inode->i_mutex)); + WARN_ON(oe->__upperdentry); + BUG_ON(!upperdentry->d_inode); + /* + * Make sure upperdentry is consistent before making it visible to + * ovl_upperdentry_dereference(). + */ + smp_wmb(); + oe->__upperdentry = upperdentry; +} + +void ovl_dentry_version_inc(struct dentry *dentry) +{ + struct ovl_entry *oe = dentry->d_fsdata; + + WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); + oe->version++; +} + +u64 ovl_dentry_version_get(struct dentry *dentry) +{ + struct ovl_entry *oe = dentry->d_fsdata; + + WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); + return oe->version; +} + +bool ovl_is_whiteout(struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + + return inode && IS_WHITEOUT(inode); +} + +static bool ovl_is_opaquedir(struct dentry *dentry) +{ + int res; + char val; + struct inode *inode = dentry->d_inode; + + if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr) + return false; + + res = inode->i_op->getxattr(dentry, ovl_opaque_xattr, &val, 1); + if (res == 1 && val == 'y') + return true; + + return false; +} + +static void ovl_dentry_release(struct dentry *dentry) +{ + struct ovl_entry *oe = dentry->d_fsdata; + + if (oe) { + dput(oe->__upperdentry); + dput(oe->lowerdentry); + kfree_rcu(oe, rcu); + } +} + +static const struct dentry_operations ovl_dentry_operations = { + .d_release = ovl_dentry_release, +}; + +static struct ovl_entry *ovl_alloc_entry(void) +{ + return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL); +} + +static inline struct dentry *ovl_lookup_real(struct dentry *dir, + struct qstr *name) +{ + struct dentry *dentry; + + mutex_lock(&dir->d_inode->i_mutex); + dentry = lookup_one_len(name->name, dir, name->len); + mutex_unlock(&dir->d_inode->i_mutex); + + if (IS_ERR(dentry)) { + if (PTR_ERR(dentry) == -ENOENT) + dentry = NULL; + } else if (!dentry->d_inode) { + dput(dentry); + dentry = NULL; + } + return dentry; +} + +struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, + unsigned int flags) +{ + struct ovl_entry *oe; + struct dentry *upperdir; + struct dentry *lowerdir; + struct dentry *upperdentry = NULL; + struct dentry *lowerdentry = NULL; + struct inode *inode = NULL; + int err; + + err = -ENOMEM; + oe = ovl_alloc_entry(); + if (!oe) + goto out; + + upperdir = ovl_dentry_upper(dentry->d_parent); + lowerdir = ovl_dentry_lower(dentry->d_parent); + + if (upperdir) { + upperdentry = ovl_lookup_real(upperdir, &dentry->d_name); + err = PTR_ERR(upperdentry); + if (IS_ERR(upperdentry)) + goto out_put_dir; + + if (lowerdir && upperdentry) { + if (ovl_is_whiteout(upperdentry)) { + dput(upperdentry); + upperdentry = NULL; + oe->opaque = true; + } else if (ovl_is_opaquedir(upperdentry)) { + oe->opaque = true; + } + } + } + if (lowerdir && !oe->opaque) { + lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name); + err = PTR_ERR(lowerdentry); + if (IS_ERR(lowerdentry)) + goto out_dput_upper; + } + + if (lowerdentry && upperdentry && + (!S_ISDIR(upperdentry->d_inode->i_mode) || + !S_ISDIR(lowerdentry->d_inode->i_mode))) { + dput(lowerdentry); + lowerdentry = NULL; + oe->opaque = true; + } + + if (lowerdentry || upperdentry) { + struct dentry *realdentry; + + realdentry = upperdentry ? upperdentry : lowerdentry; + err = -ENOMEM; + inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode, + oe); + if (!inode) + goto out_dput; + ovl_copyattr(realdentry->d_inode, inode); + } + + oe->__upperdentry = upperdentry; + oe->lowerdentry = lowerdentry; + + dentry->d_fsdata = oe; + d_add(dentry, inode); + + return NULL; + +out_dput: + dput(lowerdentry); +out_dput_upper: + dput(upperdentry); +out_put_dir: + kfree(oe); +out: + return ERR_PTR(err); +} + +struct file *ovl_path_open(struct path *path, int flags) +{ + return dentry_open(path, flags, current_cred()); +} + +static void ovl_put_super(struct super_block *sb) +{ + struct ovl_fs *ufs = sb->s_fs_info; + + dput(ufs->workdir); + mntput(ufs->upper_mnt); + mntput(ufs->lower_mnt); + + kfree(ufs->config.lowerdir); + kfree(ufs->config.upperdir); + kfree(ufs->config.workdir); + kfree(ufs); +} + +/** + * ovl_statfs + * @sb: The overlayfs super block + * @buf: The struct kstatfs to fill in with stats + * + * Get the filesystem statistics. As writes always target the upper layer + * filesystem pass the statfs to the same filesystem. + */ +static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + struct ovl_fs *ofs = dentry->d_sb->s_fs_info; + struct dentry *root_dentry = dentry->d_sb->s_root; + struct path path; + int err; + + ovl_path_upper(root_dentry, &path); + + err = vfs_statfs(&path, buf); + if (!err) { + buf->f_namelen = max(buf->f_namelen, ofs->lower_namelen); + buf->f_type = OVERLAYFS_SUPER_MAGIC; + } + + return err; +} + +/** + * ovl_show_options + * + * Prints the mount options for a given superblock. + * Returns zero; does not fail. + */ +static int ovl_show_options(struct seq_file *m, struct dentry *dentry) +{ + struct super_block *sb = dentry->d_sb; + struct ovl_fs *ufs = sb->s_fs_info; + + seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir); + seq_printf(m, ",upperdir=%s", ufs->config.upperdir); + seq_printf(m, ",workdir=%s", ufs->config.workdir); + return 0; +} + +static const struct super_operations ovl_super_operations = { + .put_super = ovl_put_super, + .statfs = ovl_statfs, + .show_options = ovl_show_options, +}; + +enum { + OPT_LOWERDIR, + OPT_UPPERDIR, + OPT_WORKDIR, + OPT_ERR, +}; + +static const match_table_t ovl_tokens = { + {OPT_LOWERDIR, "lowerdir=%s"}, + {OPT_UPPERDIR, "upperdir=%s"}, + {OPT_WORKDIR, "workdir=%s"}, + {OPT_ERR, NULL} +}; + +static char *ovl_next_opt(char **s) +{ + char *sbegin = *s; + char *p; + + if (sbegin == NULL) + return NULL; + + for (p = sbegin; *p; p++) { + if (*p == '\\') { + p++; + if (!*p) + break; + } else if (*p == ',') { + *p = '\0'; + *s = p + 1; + return sbegin; + } + } + *s = NULL; + return sbegin; +} + +static int ovl_parse_opt(char *opt, struct ovl_config *config) +{ + char *p; + + while ((p = ovl_next_opt(&opt)) != NULL) { + int token; + substring_t args[MAX_OPT_ARGS]; + + if (!*p) + continue; + + token = match_token(p, ovl_tokens, args); + switch (token) { + case OPT_UPPERDIR: + kfree(config->upperdir); + config->upperdir = match_strdup(&args[0]); + if (!config->upperdir) + return -ENOMEM; + break; + + case OPT_LOWERDIR: + kfree(config->lowerdir); + config->lowerdir = match_strdup(&args[0]); + if (!config->lowerdir) + return -ENOMEM; + break; + + case OPT_WORKDIR: + kfree(config->workdir); + config->workdir = match_strdup(&args[0]); + if (!config->workdir) + return -ENOMEM; + break; + + default: + return -EINVAL; + } + } + return 0; +} + +#define OVL_WORKDIR_NAME "work" + +static struct dentry *ovl_workdir_create(struct vfsmount *mnt, + struct dentry *dentry) +{ + struct inode *dir = dentry->d_inode; + struct dentry *work; + int err; + bool retried = false; + + err = mnt_want_write(mnt); + if (err) + return ERR_PTR(err); + + mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); +retry: + work = lookup_one_len(OVL_WORKDIR_NAME, dentry, + strlen(OVL_WORKDIR_NAME)); + + if (!IS_ERR(work)) { + struct kstat stat = { + .mode = S_IFDIR | 0, + }; + + if (work->d_inode) { + err = -EEXIST; + if (retried) + goto out_dput; + + retried = true; + ovl_cleanup(dir, work); + dput(work); + goto retry; + } + + err = ovl_create_real(dir, work, &stat, NULL, NULL, true); + if (err) + goto out_dput; + } +out_unlock: + mutex_unlock(&dir->i_mutex); + mnt_drop_write(mnt); + + return work; + +out_dput: + dput(work); + work = ERR_PTR(err); + goto out_unlock; +} + +static void ovl_unescape(char *s) +{ + char *d = s; + + for (;; s++, d++) { + if (*s == '\\') + s++; + *d = *s; + if (!*s) + break; + } +} + +static int ovl_mount_dir(const char *name, struct path *path) +{ + int err; + char *tmp = kstrdup(name, GFP_KERNEL); + + if (!tmp) + return -ENOMEM; + + ovl_unescape(tmp); + err = kern_path(tmp, LOOKUP_FOLLOW, path); + if (err) { + pr_err("overlayfs: failed to resolve '%s': %i\n", tmp, err); + err = -EINVAL; + } + kfree(tmp); + return err; +} + +static bool ovl_is_allowed_fs_type(struct dentry *root) +{ + const struct dentry_operations *dop = root->d_op; + + /* + * We don't support: + * - automount filesystems + * - filesystems with revalidate (FIXME for lower layer) + * - filesystems with case insensitive names + */ + if (dop && + (dop->d_manage || dop->d_automount || + dop->d_revalidate || dop->d_weak_revalidate || + dop->d_compare || dop->d_hash)) { + return false; + } + return true; +} + +/* Workdir should not be subdir of upperdir and vice versa */ +static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir) +{ + bool ok = false; + + if (workdir != upperdir) { + ok = (lock_rename(workdir, upperdir) == NULL); + unlock_rename(workdir, upperdir); + } + return ok; +} + +static int ovl_fill_super(struct super_block *sb, void *data, int silent) +{ + struct path lowerpath; + struct path upperpath; + struct path workpath; + struct inode *root_inode; + struct dentry *root_dentry; + struct ovl_entry *oe; + struct ovl_fs *ufs; + struct kstatfs statfs; + int err; + + err = -ENOMEM; + ufs = kzalloc(sizeof(struct ovl_fs), GFP_KERNEL); + if (!ufs) + goto out; + + err = ovl_parse_opt((char *) data, &ufs->config); + if (err) + goto out_free_config; + + /* FIXME: workdir is not needed for a R/O mount */ + err = -EINVAL; + if (!ufs->config.upperdir || !ufs->config.lowerdir || + !ufs->config.workdir) { + pr_err("overlayfs: missing upperdir or lowerdir or workdir\n"); + goto out_free_config; + } + + err = -ENOMEM; + oe = ovl_alloc_entry(); + if (oe == NULL) + goto out_free_config; + + err = ovl_mount_dir(ufs->config.upperdir, &upperpath); + if (err) + goto out_free_oe; + + err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath); + if (err) + goto out_put_upperpath; + + err = ovl_mount_dir(ufs->config.workdir, &workpath); + if (err) + goto out_put_lowerpath; + + err = -EINVAL; + if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) || + !S_ISDIR(lowerpath.dentry->d_inode->i_mode) || + !S_ISDIR(workpath.dentry->d_inode->i_mode)) { + pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n"); + goto out_put_workpath; + } + + if (upperpath.mnt != workpath.mnt) { + pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); + goto out_put_workpath; + } + if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) { + pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); + goto out_put_workpath; + } + + if (!ovl_is_allowed_fs_type(upperpath.dentry)) { + pr_err("overlayfs: filesystem of upperdir is not supported\n"); + goto out_put_workpath; + } + + if (!ovl_is_allowed_fs_type(lowerpath.dentry)) { + pr_err("overlayfs: filesystem of lowerdir is not supported\n"); + goto out_put_workpath; + } + + err = vfs_statfs(&lowerpath, &statfs); + if (err) { + pr_err("overlayfs: statfs failed on lowerpath\n"); + goto out_put_workpath; + } + ufs->lower_namelen = statfs.f_namelen; + + sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth, + lowerpath.mnt->mnt_sb->s_stack_depth) + 1; + + err = -EINVAL; + if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { + pr_err("overlayfs: maximum fs stacking depth exceeded\n"); + goto out_put_workpath; + } + + ufs->upper_mnt = clone_private_mount(&upperpath); + err = PTR_ERR(ufs->upper_mnt); + if (IS_ERR(ufs->upper_mnt)) { + pr_err("overlayfs: failed to clone upperpath\n"); + goto out_put_workpath; + } + + ufs->lower_mnt = clone_private_mount(&lowerpath); + err = PTR_ERR(ufs->lower_mnt); + if (IS_ERR(ufs->lower_mnt)) { + pr_err("overlayfs: failed to clone lowerpath\n"); + goto out_put_upper_mnt; + } + + ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); + err = PTR_ERR(ufs->workdir); + if (IS_ERR(ufs->workdir)) { + pr_err("overlayfs: failed to create directory %s/%s\n", + ufs->config.workdir, OVL_WORKDIR_NAME); + goto out_put_lower_mnt; + } + + /* + * Make lower_mnt R/O. That way fchmod/fchown on lower file + * will fail instead of modifying lower fs. + */ + ufs->lower_mnt->mnt_flags |= MNT_READONLY; + + /* If the upper fs is r/o, we mark overlayfs r/o too */ + if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY) + sb->s_flags |= MS_RDONLY; + + sb->s_d_op = &ovl_dentry_operations; + + err = -ENOMEM; + root_inode = ovl_new_inode(sb, S_IFDIR, oe); + if (!root_inode) + goto out_put_workdir; + + root_dentry = d_make_root(root_inode); + if (!root_dentry) + goto out_put_workdir; + + mntput(upperpath.mnt); + mntput(lowerpath.mnt); + path_put(&workpath); + + oe->__upperdentry = upperpath.dentry; + oe->lowerdentry = lowerpath.dentry; + + root_dentry->d_fsdata = oe; + + sb->s_magic = OVERLAYFS_SUPER_MAGIC; + sb->s_op = &ovl_super_operations; + sb->s_root = root_dentry; + sb->s_fs_info = ufs; + + return 0; + +out_put_workdir: + dput(ufs->workdir); +out_put_lower_mnt: + mntput(ufs->lower_mnt); +out_put_upper_mnt: + mntput(ufs->upper_mnt); +out_put_workpath: + path_put(&workpath); +out_put_lowerpath: + path_put(&lowerpath); +out_put_upperpath: + path_put(&upperpath); +out_free_oe: + kfree(oe); +out_free_config: + kfree(ufs->config.lowerdir); + kfree(ufs->config.upperdir); + kfree(ufs->config.workdir); + kfree(ufs); +out: + return err; +} + +static struct dentry *ovl_mount(struct file_system_type *fs_type, int flags, + const char *dev_name, void *raw_data) +{ + return mount_nodev(fs_type, flags, raw_data, ovl_fill_super); +} + +static struct file_system_type ovl_fs_type = { + .owner = THIS_MODULE, + .name = "overlay", + .mount = ovl_mount, + .kill_sb = kill_anon_super, +}; +MODULE_ALIAS_FS("overlay"); + +static int __init ovl_init(void) +{ + return register_filesystem(&ovl_fs_type); +} + +static void __exit ovl_exit(void) +{ + unregister_filesystem(&ovl_fs_type); +} + +module_init(ovl_init); +module_exit(ovl_exit); diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 8b663b2d956..6b4527216a7 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -634,7 +634,7 @@ int dquot_writeback_dquots(struct super_block *sb, int type) dqstats_inc(DQST_LOOKUPS); err = sb->dq_op->write_dquot(dquot); if (!ret && err) - err = ret; + ret = err; dqput(dquot); spin_lock(&dq_list_lock); } diff --git a/fs/splice.c b/fs/splice.c index f5cb9ba8451..75c6058eabf 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -1330,6 +1330,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, return ret; } +EXPORT_SYMBOL(do_splice_direct); static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 92e8f99a585..281002689d6 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -1338,7 +1338,10 @@ xfs_free_file_space( goto out; } - +/* + * Preallocate and zero a range of a file. This mechanism has the allocation + * semantics of fallocate and in addition converts data in the range to zeroes. + */ int xfs_zero_file_space( struct xfs_inode *ip, @@ -1346,65 +1349,30 @@ xfs_zero_file_space( xfs_off_t len) { struct xfs_mount *mp = ip->i_mount; - uint granularity; - xfs_off_t start_boundary; - xfs_off_t end_boundary; + uint blksize; int error; trace_xfs_zero_file_space(ip); - granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); + blksize = 1 << mp->m_sb.sb_blocklog; /* - * Round the range of extents we are going to convert inwards. If the - * offset is aligned, then it doesn't get changed so we zero from the - * start of the block offset points to. + * Punch a hole and prealloc the range. We use hole punch rather than + * unwritten extent conversion for two reasons: + * + * 1.) Hole punch handles partial block zeroing for us. + * + * 2.) If prealloc returns ENOSPC, the file range is still zero-valued + * by virtue of the hole punch. */ - start_boundary = round_up(offset, granularity); - end_boundary = round_down(offset + len, granularity); - - ASSERT(start_boundary >= offset); - ASSERT(end_boundary <= offset + len); - - if (start_boundary < end_boundary - 1) { - /* - * Writeback the range to ensure any inode size updates due to - * appending writes make it to disk (otherwise we could just - * punch out the delalloc blocks). - */ - error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, - start_boundary, end_boundary - 1); - if (error) - goto out; - truncate_pagecache_range(VFS_I(ip), start_boundary, - end_boundary - 1); - - /* convert the blocks */ - error = xfs_alloc_file_space(ip, start_boundary, - end_boundary - start_boundary - 1, - XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT); - if (error) - goto out; - - /* We've handled the interior of the range, now for the edges */ - if (start_boundary != offset) { - error = xfs_iozero(ip, offset, start_boundary - offset); - if (error) - goto out; - } - - if (end_boundary != offset + len) - error = xfs_iozero(ip, end_boundary, - offset + len - end_boundary); - - } else { - /* - * It's either a sub-granularity range or the range spanned lies - * partially across two adjacent blocks. - */ - error = xfs_iozero(ip, offset, len); - } + error = xfs_free_file_space(ip, offset, len); + if (error) + goto out; + error = xfs_alloc_file_space(ip, round_down(offset, blksize), + round_up(offset + len, blksize) - + round_down(offset, blksize), + XFS_BMAPI_PREALLOC); out: return error; diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index f1deb961a29..894924a5129 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -236,8 +236,10 @@ xfs_bulkstat_grab_ichunk( XFS_WANT_CORRUPTED_RETURN(stat == 1); /* Check if the record contains the inode in request */ - if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) - return -EINVAL; + if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) { + *icount = 0; + return 0; + } idx = agino - irec->ir_startino + 1; if (idx < XFS_INODES_PER_CHUNK && @@ -262,75 +264,76 @@ xfs_bulkstat_grab_ichunk( #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) +struct xfs_bulkstat_agichunk { + char __user **ac_ubuffer;/* pointer into user's buffer */ + int ac_ubleft; /* bytes left in user's buffer */ + int ac_ubelem; /* spaces used in user's buffer */ +}; + /* * Process inodes in chunk with a pointer to a formatter function * that will iget the inode and fill in the appropriate structure. */ -int +static int xfs_bulkstat_ag_ichunk( struct xfs_mount *mp, xfs_agnumber_t agno, struct xfs_inobt_rec_incore *irbp, bulkstat_one_pf formatter, size_t statstruct_size, - struct xfs_bulkstat_agichunk *acp) + struct xfs_bulkstat_agichunk *acp, + xfs_agino_t *last_agino) { - xfs_ino_t lastino = acp->ac_lastino; char __user **ubufp = acp->ac_ubuffer; - int ubleft = acp->ac_ubleft; - int ubelem = acp->ac_ubelem; - int chunkidx, clustidx; + int chunkidx; int error = 0; - xfs_agino_t agino; + xfs_agino_t agino = irbp->ir_startino; - for (agino = irbp->ir_startino, chunkidx = clustidx = 0; - XFS_BULKSTAT_UBLEFT(ubleft) && - irbp->ir_freecount < XFS_INODES_PER_CHUNK; - chunkidx++, clustidx++, agino++) { - int fmterror; /* bulkstat formatter result */ + for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK; + chunkidx++, agino++) { + int fmterror; int ubused; - xfs_ino_t ino = XFS_AGINO_TO_INO(mp, agno, agino); - ASSERT(chunkidx < XFS_INODES_PER_CHUNK); + /* inode won't fit in buffer, we are done */ + if (acp->ac_ubleft < statstruct_size) + break; /* Skip if this inode is free */ - if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) { - lastino = ino; + if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) continue; - } - - /* - * Count used inodes as free so we can tell when the - * chunk is used up. - */ - irbp->ir_freecount++; /* Get the inode and fill in a single buffer */ ubused = statstruct_size; - error = formatter(mp, ino, *ubufp, ubleft, &ubused, &fmterror); - if (fmterror == BULKSTAT_RV_NOTHING) { - if (error && error != -ENOENT && error != -EINVAL) { - ubleft = 0; - break; - } - lastino = ino; - continue; - } - if (fmterror == BULKSTAT_RV_GIVEUP) { - ubleft = 0; + error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino), + *ubufp, acp->ac_ubleft, &ubused, &fmterror); + + if (fmterror == BULKSTAT_RV_GIVEUP || + (error && error != -ENOENT && error != -EINVAL)) { + acp->ac_ubleft = 0; ASSERT(error); break; } - if (*ubufp) - *ubufp += ubused; - ubleft -= ubused; - ubelem++; - lastino = ino; + + /* be careful not to leak error if at end of chunk */ + if (fmterror == BULKSTAT_RV_NOTHING || error) { + error = 0; + continue; + } + + *ubufp += ubused; + acp->ac_ubleft -= ubused; + acp->ac_ubelem++; } - acp->ac_lastino = lastino; - acp->ac_ubleft = ubleft; - acp->ac_ubelem = ubelem; + /* + * Post-update *last_agino. At this point, agino will always point one + * inode past the last inode we processed successfully. Hence we + * substract that inode when setting the *last_agino cursor so that we + * return the correct cookie to userspace. On the next bulkstat call, + * the inode under the lastino cookie will be skipped as we have already + * processed it here. + */ + *last_agino = agino - 1; return error; } @@ -353,45 +356,33 @@ xfs_bulkstat( xfs_agino_t agino; /* inode # in allocation group */ xfs_agnumber_t agno; /* allocation group number */ xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ - int end_of_ag; /* set if we've seen the ag end */ - int error; /* error code */ - int fmterror;/* bulkstat formatter result */ - int i; /* loop index */ - int icount; /* count of inodes good in irbuf */ size_t irbsize; /* size of irec buffer in bytes */ - xfs_ino_t ino; /* inode number (filesystem) */ - xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */ xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ - xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ - xfs_ino_t lastino; /* last inode number returned */ int nirbuf; /* size of irbuf */ - int rval; /* return value error code */ - int tmp; /* result value from btree calls */ int ubcount; /* size of user's buffer */ - int ubleft; /* bytes left in user's buffer */ - char __user *ubufp; /* pointer into user's buffer */ - int ubelem; /* spaces used in user's buffer */ + struct xfs_bulkstat_agichunk ac; + int error = 0; /* * Get the last inode value, see if there's nothing to do. */ - ino = (xfs_ino_t)*lastinop; - lastino = ino; - agno = XFS_INO_TO_AGNO(mp, ino); - agino = XFS_INO_TO_AGINO(mp, ino); + agno = XFS_INO_TO_AGNO(mp, *lastinop); + agino = XFS_INO_TO_AGINO(mp, *lastinop); if (agno >= mp->m_sb.sb_agcount || - ino != XFS_AGINO_TO_INO(mp, agno, agino)) { + *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) { *done = 1; *ubcountp = 0; return 0; } ubcount = *ubcountp; /* statstruct's */ - ubleft = ubcount * statstruct_size; /* bytes */ - *ubcountp = ubelem = 0; + ac.ac_ubuffer = &ubuffer; + ac.ac_ubleft = ubcount * statstruct_size; /* bytes */; + ac.ac_ubelem = 0; + + *ubcountp = 0; *done = 0; - fmterror = 0; - ubufp = ubuffer; + irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); if (!irbuf) return -ENOMEM; @@ -402,9 +393,13 @@ xfs_bulkstat( * Loop over the allocation groups, starting from the last * inode returned; 0 means start of the allocation group. */ - rval = 0; - while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) { - cond_resched(); + while (agno < mp->m_sb.sb_agcount) { + struct xfs_inobt_rec_incore *irbp = irbuf; + struct xfs_inobt_rec_incore *irbufend = irbuf + nirbuf; + bool end_of_ag = false; + int icount = 0; + int stat; + error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); if (error) break; @@ -414,10 +409,6 @@ xfs_bulkstat( */ cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO); - irbp = irbuf; - irbufend = irbuf + nirbuf; - end_of_ag = 0; - icount = 0; if (agino > 0) { /* * In the middle of an allocation group, we need to get @@ -427,22 +418,23 @@ xfs_bulkstat( error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r); if (error) - break; + goto del_cursor; if (icount) { irbp->ir_startino = r.ir_startino; irbp->ir_freecount = r.ir_freecount; irbp->ir_free = r.ir_free; irbp++; - agino = r.ir_startino + XFS_INODES_PER_CHUNK; } /* Increment to the next record */ - error = xfs_btree_increment(cur, 0, &tmp); + error = xfs_btree_increment(cur, 0, &stat); } else { /* Start of ag. Lookup the first inode chunk */ - error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp); + error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat); + } + if (error || stat == 0) { + end_of_ag = true; + goto del_cursor; } - if (error) - break; /* * Loop through inode btree records in this ag, @@ -451,10 +443,10 @@ xfs_bulkstat( while (irbp < irbufend && icount < ubcount) { struct xfs_inobt_rec_incore r; - error = xfs_inobt_get_rec(cur, &r, &i); - if (error || i == 0) { - end_of_ag = 1; - break; + error = xfs_inobt_get_rec(cur, &r, &stat); + if (error || stat == 0) { + end_of_ag = true; + goto del_cursor; } /* @@ -469,77 +461,79 @@ xfs_bulkstat( irbp++; icount += XFS_INODES_PER_CHUNK - r.ir_freecount; } - /* - * Set agino to after this chunk and bump the cursor. - */ - agino = r.ir_startino + XFS_INODES_PER_CHUNK; - error = xfs_btree_increment(cur, 0, &tmp); + error = xfs_btree_increment(cur, 0, &stat); + if (error || stat == 0) { + end_of_ag = true; + goto del_cursor; + } cond_resched(); } + /* - * Drop the btree buffers and the agi buffer. - * We can't hold any of the locks these represent - * when calling iget. + * Drop the btree buffers and the agi buffer as we can't hold any + * of the locks these represent when calling iget. If there is a + * pending error, then we are done. */ +del_cursor: xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_buf_relse(agbp); + if (error) + break; /* - * Now format all the good inodes into the user's buffer. + * Now format all the good inodes into the user's buffer. The + * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer + * for the next loop iteration. */ irbufend = irbp; for (irbp = irbuf; - irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) { - struct xfs_bulkstat_agichunk ac; - - ac.ac_lastino = lastino; - ac.ac_ubuffer = &ubuffer; - ac.ac_ubleft = ubleft; - ac.ac_ubelem = ubelem; + irbp < irbufend && ac.ac_ubleft >= statstruct_size; + irbp++) { error = xfs_bulkstat_ag_ichunk(mp, agno, irbp, - formatter, statstruct_size, &ac); + formatter, statstruct_size, &ac, + &agino); if (error) - rval = error; - - lastino = ac.ac_lastino; - ubleft = ac.ac_ubleft; - ubelem = ac.ac_ubelem; + break; cond_resched(); } + /* - * Set up for the next loop iteration. + * If we've run out of space or had a formatting error, we + * are now done */ - if (XFS_BULKSTAT_UBLEFT(ubleft)) { - if (end_of_ag) { - agno++; - agino = 0; - } else - agino = XFS_INO_TO_AGINO(mp, lastino); - } else + if (ac.ac_ubleft < statstruct_size || error) break; + + if (end_of_ag) { + agno++; + agino = 0; + } } /* * Done, we're either out of filesystem or space to put the data. */ kmem_free(irbuf); - *ubcountp = ubelem; + *ubcountp = ac.ac_ubelem; + /* - * Found some inodes, return them now and return the error next time. + * We found some inodes, so clear the error status and return them. + * The lastino pointer will point directly at the inode that triggered + * any error that occurred, so on the next call the error will be + * triggered again and propagated to userspace as there will be no + * formatted inodes in the buffer. */ - if (ubelem) - rval = 0; - if (agno >= mp->m_sb.sb_agcount) { - /* - * If we ran out of filesystem, mark lastino as off - * the end of the filesystem, so the next call - * will return immediately. - */ - *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0); + if (ac.ac_ubelem) + error = 0; + + /* + * If we ran out of filesystem, lastino will point off the end of + * the filesystem so the next call will return immediately. + */ + *lastinop = XFS_AGINO_TO_INO(mp, agno, agino); + if (agno >= mp->m_sb.sb_agcount) *done = 1; - } else - *lastinop = (xfs_ino_t)lastino; - return rval; + return error; } int diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h index aaed08022eb..6ea8b3912fa 100644 --- a/fs/xfs/xfs_itable.h +++ b/fs/xfs/xfs_itable.h @@ -30,22 +30,6 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp, int *ubused, int *stat); -struct xfs_bulkstat_agichunk { - xfs_ino_t ac_lastino; /* last inode returned */ - char __user **ac_ubuffer;/* pointer into user's buffer */ - int ac_ubleft; /* bytes left in user's buffer */ - int ac_ubelem; /* spaces used in user's buffer */ -}; - -int -xfs_bulkstat_ag_ichunk( - struct xfs_mount *mp, - xfs_agnumber_t agno, - struct xfs_inobt_rec_incore *irbp, - bulkstat_one_pf formatter, - size_t statstruct_size, - struct xfs_bulkstat_agichunk *acp); - /* * Values for stat return value. */ diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h index f97804bdf1f..7461327e14e 100644 --- a/include/acpi/acnames.h +++ b/include/acpi/acnames.h @@ -52,6 +52,7 @@ #define METHOD_NAME__CBA "_CBA" #define METHOD_NAME__CID "_CID" #define METHOD_NAME__CRS "_CRS" +#define METHOD_NAME__DDN "_DDN" #define METHOD_NAME__HID "_HID" #define METHOD_NAME__INI "_INI" #define METHOD_NAME__PLD "_PLD" diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 57ee0528aac..f34a0835aa4 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -433,6 +433,7 @@ int acpi_device_set_power(struct acpi_device *device, int state); int acpi_bus_init_power(struct acpi_device *device); int acpi_device_fix_up_power(struct acpi_device *device); int acpi_bus_update_power(acpi_handle handle, int *state_p); +int acpi_device_update_power(struct acpi_device *device, int *state_p); bool acpi_bus_power_manageable(acpi_handle handle); #ifdef CONFIG_PM diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 9fc1d71c82b..ab2acf629a6 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -46,7 +46,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20140828 +#define ACPI_CA_VERSION 0x20140926 #include <acpi/acconfig.h> #include <acpi/actypes.h> diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index ac03ec81d34..7000e66f768 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -721,7 +721,7 @@ typedef u32 acpi_event_type; * | | | +--- Enabled for wake? * | | +----- Set? * | +------- Has a handler? - * +----------- <Reserved> + * +------------- <Reserved> */ typedef u32 acpi_event_status; @@ -729,7 +729,7 @@ typedef u32 acpi_event_status; #define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01 #define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02 #define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04 -#define ACPI_EVENT_FLAG_HANDLE (acpi_event_status) 0x08 +#define ACPI_EVENT_FLAG_HAS_HANDLER (acpi_event_status) 0x08 /* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */ diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index e973540cd15..2dd405c9be7 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -74,7 +74,6 @@ {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ - {0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h index ddaef8620b2..b690cdba163 100644 --- a/include/dt-bindings/clock/imx6qdl-clock.h +++ b/include/dt-bindings/clock/imx6qdl-clock.h @@ -62,8 +62,8 @@ #define IMX6QDL_CLK_USDHC3_SEL 50 #define IMX6QDL_CLK_USDHC4_SEL 51 #define IMX6QDL_CLK_ENFC_SEL 52 -#define IMX6QDL_CLK_EMI_SEL 53 -#define IMX6QDL_CLK_EMI_SLOW_SEL 54 +#define IMX6QDL_CLK_EIM_SEL 53 +#define IMX6QDL_CLK_EIM_SLOW_SEL 54 #define IMX6QDL_CLK_VDO_AXI_SEL 55 #define IMX6QDL_CLK_VPU_AXI_SEL 56 #define IMX6QDL_CLK_CKO1_SEL 57 @@ -106,8 +106,8 @@ #define IMX6QDL_CLK_USDHC4_PODF 94 #define IMX6QDL_CLK_ENFC_PRED 95 #define IMX6QDL_CLK_ENFC_PODF 96 -#define IMX6QDL_CLK_EMI_PODF 97 -#define IMX6QDL_CLK_EMI_SLOW_PODF 98 +#define IMX6QDL_CLK_EIM_PODF 97 +#define IMX6QDL_CLK_EIM_SLOW_PODF 98 #define IMX6QDL_CLK_VPU_AXI_PODF 99 #define IMX6QDL_CLK_CKO1_PODF 100 #define IMX6QDL_CLK_AXI 101 diff --git a/include/dt-bindings/clock/qcom,mmcc-apq8084.h b/include/dt-bindings/clock/qcom,mmcc-apq8084.h index a929f86d0dd..d72b5b35f15 100644 --- a/include/dt-bindings/clock/qcom,mmcc-apq8084.h +++ b/include/dt-bindings/clock/qcom,mmcc-apq8084.h @@ -60,7 +60,7 @@ #define ESC1_CLK_SRC 43 #define HDMI_CLK_SRC 44 #define VSYNC_CLK_SRC 45 -#define RBCPR_CLK_SRC 46 +#define MMSS_RBCPR_CLK_SRC 46 #define RBBMTIMER_CLK_SRC 47 #define MAPLE_CLK_SRC 48 #define VDP_CLK_SRC 49 diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h index d6b56b21539..801c0ac50c4 100644 --- a/include/dt-bindings/clock/vf610-clock.h +++ b/include/dt-bindings/clock/vf610-clock.h @@ -21,24 +21,24 @@ #define VF610_CLK_FASK_CLK_SEL 8 #define VF610_CLK_AUDIO_EXT 9 #define VF610_CLK_ENET_EXT 10 -#define VF610_CLK_PLL1_MAIN 11 +#define VF610_CLK_PLL1_SYS 11 #define VF610_CLK_PLL1_PFD1 12 #define VF610_CLK_PLL1_PFD2 13 #define VF610_CLK_PLL1_PFD3 14 #define VF610_CLK_PLL1_PFD4 15 -#define VF610_CLK_PLL2_MAIN 16 +#define VF610_CLK_PLL2_BUS 16 #define VF610_CLK_PLL2_PFD1 17 #define VF610_CLK_PLL2_PFD2 18 #define VF610_CLK_PLL2_PFD3 19 #define VF610_CLK_PLL2_PFD4 20 -#define VF610_CLK_PLL3_MAIN 21 +#define VF610_CLK_PLL3_USB_OTG 21 #define VF610_CLK_PLL3_PFD1 22 #define VF610_CLK_PLL3_PFD2 23 #define VF610_CLK_PLL3_PFD3 24 #define VF610_CLK_PLL3_PFD4 25 -#define VF610_CLK_PLL4_MAIN 26 -#define VF610_CLK_PLL5_MAIN 27 -#define VF610_CLK_PLL6_MAIN 28 +#define VF610_CLK_PLL4_AUDIO 26 +#define VF610_CLK_PLL5_ENET 27 +#define VF610_CLK_PLL6_VIDEO 28 #define VF610_CLK_PLL3_MAIN_DIV 29 #define VF610_CLK_PLL4_MAIN_DIV 30 #define VF610_CLK_PLL6_MAIN_DIV 31 @@ -166,9 +166,32 @@ #define VF610_CLK_DMAMUX3 153 #define VF610_CLK_FLEXCAN0_EN 154 #define VF610_CLK_FLEXCAN1_EN 155 -#define VF610_CLK_PLL7_MAIN 156 +#define VF610_CLK_PLL7_USB_HOST 156 #define VF610_CLK_USBPHY0 157 #define VF610_CLK_USBPHY1 158 -#define VF610_CLK_END 159 +#define VF610_CLK_LVDS1_IN 159 +#define VF610_CLK_ANACLK1 160 +#define VF610_CLK_PLL1_BYPASS_SRC 161 +#define VF610_CLK_PLL2_BYPASS_SRC 162 +#define VF610_CLK_PLL3_BYPASS_SRC 163 +#define VF610_CLK_PLL4_BYPASS_SRC 164 +#define VF610_CLK_PLL5_BYPASS_SRC 165 +#define VF610_CLK_PLL6_BYPASS_SRC 166 +#define VF610_CLK_PLL7_BYPASS_SRC 167 +#define VF610_CLK_PLL1 168 +#define VF610_CLK_PLL2 169 +#define VF610_CLK_PLL3 170 +#define VF610_CLK_PLL4 171 +#define VF610_CLK_PLL5 172 +#define VF610_CLK_PLL6 173 +#define VF610_CLK_PLL7 174 +#define VF610_PLL1_BYPASS 175 +#define VF610_PLL2_BYPASS 176 +#define VF610_PLL3_BYPASS 177 +#define VF610_PLL4_BYPASS 178 +#define VF610_PLL5_BYPASS 179 +#define VF610_PLL6_BYPASS 180 +#define VF610_PLL7_BYPASS 181 +#define VF610_CLK_END 182 #endif /* __DT_BINDINGS_CLOCK_VF610_H */ diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h index 3d33794e4f3..7448edff472 100644 --- a/include/dt-bindings/pinctrl/dra.h +++ b/include/dt-bindings/pinctrl/dra.h @@ -40,8 +40,8 @@ /* Active pin states */ #define PIN_OUTPUT (0 | PULL_DIS) -#define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP) -#define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA) +#define PIN_OUTPUT_PULLUP (PULL_UP) +#define PIN_OUTPUT_PULLDOWN (0) #define PIN_INPUT (INPUT_EN | PULL_DIS) #define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL) #define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP) diff --git a/include/linux/acpi.h b/include/linux/acpi.h index b7926bb9b44..407a12f663e 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -432,6 +432,7 @@ static inline bool acpi_driver_match_device(struct device *dev, int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); int acpi_device_modalias(struct device *, char *, int); +struct platform_device *acpi_create_platform_device(struct acpi_device *); #define ACPI_PTR(_ptr) (_ptr) #else /* !CONFIG_ACPI */ diff --git a/include/linux/audit.h b/include/linux/audit.h index 36dffeccebd..e58fe7df8b9 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -90,7 +90,7 @@ extern unsigned compat_dir_class[]; extern unsigned compat_chattr_class[]; extern unsigned compat_signal_class[]; -extern int __weak audit_classify_compat_syscall(int abi, unsigned syscall); +extern int audit_classify_compat_syscall(int abi, unsigned syscall); /* audit_names->type values */ #define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */ diff --git a/include/linux/bitops.h b/include/linux/bitops.h index be5fd38bd5a..5d858e02997 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -18,8 +18,11 @@ * position @h. For example * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. */ -#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) -#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) + +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0207a78a8d8..aac0f9ea952 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1136,8 +1136,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) /* * tag stuff */ -#define blk_rq_tagged(rq) \ - ((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED)) +#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) extern int blk_queue_start_tag(struct request_queue *, struct request *); extern struct request *blk_queue_find_tag(struct request_queue *, int); extern void blk_queue_end_tag(struct request_queue *, struct request *); @@ -1583,13 +1582,13 @@ static inline bool blk_integrity_merge_rq(struct request_queue *rq, struct request *r1, struct request *r2) { - return 0; + return true; } static inline bool blk_integrity_merge_bio(struct request_queue *rq, struct request *r, struct bio *b) { - return 0; + return true; } static inline bool blk_integrity_is_initialized(struct gendisk *g) { diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 4e2bd4c95b6..0995c2de816 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -46,6 +46,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat, extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); extern unsigned long free_all_bootmem(void); +extern void reset_node_managed_pages(pg_data_t *pgdat); extern void reset_all_zones_managed_pages(void); extern void free_bootmem_node(pg_data_t *pgdat, diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 324329ceea1..73b45225a7c 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -175,12 +175,13 @@ void __wait_on_buffer(struct buffer_head *); wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, unsigned size); -struct buffer_head *__getblk(struct block_device *bdev, sector_t block, - unsigned size); +struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, + unsigned size, gfp_t gfp); void __brelse(struct buffer_head *); void __bforget(struct buffer_head *); void __breadahead(struct block_device *, sector_t block, unsigned int size); -struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size); +struct buffer_head *__bread_gfp(struct block_device *, + sector_t block, unsigned size, gfp_t gfp); void invalidate_bh_lrus(void); struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); void free_buffer_head(struct buffer_head * bh); @@ -295,7 +296,13 @@ static inline void bforget(struct buffer_head *bh) static inline struct buffer_head * sb_bread(struct super_block *sb, sector_t block) { - return __bread(sb->s_bdev, block, sb->s_blocksize); + return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); +} + +static inline struct buffer_head * +sb_bread_unmovable(struct super_block *sb, sector_t block) +{ + return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0); } static inline void @@ -307,7 +314,7 @@ sb_breadahead(struct super_block *sb, sector_t block) static inline struct buffer_head * sb_getblk(struct super_block *sb, sector_t block) { - return __getblk(sb->s_bdev, block, sb->s_blocksize); + return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); } static inline struct buffer_head * @@ -344,6 +351,36 @@ static inline void lock_buffer(struct buffer_head *bh) __lock_buffer(bh); } +static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, + sector_t block, + unsigned size) +{ + return __getblk_gfp(bdev, block, size, 0); +} + +static inline struct buffer_head *__getblk(struct block_device *bdev, + sector_t block, + unsigned size) +{ + return __getblk_gfp(bdev, block, size, __GFP_MOVABLE); +} + +/** + * __bread() - reads a specified block and returns the bh + * @bdev: the block_device to read from + * @block: number of block + * @size: size (in bytes) to read + * + * Reads a specified block, and returns buffer head that contains it. + * The page cache is allocated from movable area so that it can be migrated. + * It returns NULL if the block was unreadable. + */ +static inline struct buffer_head * +__bread(struct block_device *bdev, sector_t block, unsigned size) +{ + return __bread_gfp(bdev, block, size, __GFP_MOVABLE); +} + extern int __set_page_dirty_buffers(struct page *page); #else /* CONFIG_BLOCK */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 6992afc6ba7..b37ea95bc34 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -99,6 +99,12 @@ inval_skb: return 1; } +static inline bool can_is_canfd_skb(const struct sk_buff *skb) +{ + /* the CAN specific type of skb is identified by its data length */ + return skb->len == CANFD_MTU; +} + /* get data length from can_dlc with sanitized can_dlc */ u8 can_dlc2len(u8 can_dlc); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index be21af149f1..2839c639f09 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -352,7 +352,6 @@ struct clk_divider { #define CLK_DIVIDER_READ_ONLY BIT(5) extern const struct clk_ops clk_divider_ops; -extern const struct clk_ops clk_divider_ro_ops; struct clk *clk_register_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 653f0e2b6ca..abcafaa20b8 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -287,7 +287,7 @@ extern struct clocksource* clocksource_get_next(void); extern void clocksource_change_rating(struct clocksource *cs, int rating); extern void clocksource_suspend(void); extern void clocksource_resume(void); -extern struct clocksource * __init __weak clocksource_default_clock(void); +extern struct clocksource * __init clocksource_default_clock(void); extern void clocksource_mark_unstable(struct clocksource *cs); extern u64 diff --git a/include/linux/cma.h b/include/linux/cma.h index 0430ed05d3b..a93438beb33 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -18,12 +18,12 @@ struct cma; extern phys_addr_t cma_get_base(struct cma *cma); extern unsigned long cma_get_size(struct cma *cma); -extern int __init cma_declare_contiguous(phys_addr_t size, - phys_addr_t base, phys_addr_t limit, +extern int __init cma_declare_contiguous(phys_addr_t base, + phys_addr_t size, phys_addr_t limit, phys_addr_t alignment, unsigned int order_per_bit, bool fixed, struct cma **res_cma); -extern int cma_init_reserved_mem(phys_addr_t size, - phys_addr_t base, int order_per_bit, +extern int cma_init_reserved_mem(phys_addr_t base, + phys_addr_t size, int order_per_bit, struct cma **res_cma); extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align); extern bool cma_release(struct cma *cma, struct page *pages, int count); diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index 2507fd2a1eb..d1a558239b1 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h @@ -71,7 +71,6 @@ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 * * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. - * Fixed in GCC 4.8.2 and later versions. * * (asm goto is automatically volatile - the naming reflects this.) */ diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h index cdd1cc202d5..c8c56595254 100644 --- a/include/linux/compiler-gcc5.h +++ b/include/linux/compiler-gcc5.h @@ -53,7 +53,6 @@ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 * * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. - * Fixed in GCC 4.8.2 and later versions. * * (asm goto is automatically volatile - the naming reflects this.) */ diff --git a/include/linux/cpufreq-dt.h b/include/linux/cpufreq-dt.h new file mode 100644 index 00000000000..0414009e2c3 --- /dev/null +++ b/include/linux/cpufreq-dt.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2014 Marvell + * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __CPUFREQ_DT_H__ +#define __CPUFREQ_DT_H__ + +struct cpufreq_dt_platform_data { + /* + * True when each CPU has its own clock to control its + * frequency, false when all CPUs are controlled by a single + * clock. + */ + bool independent_clocks; +}; + +#endif /* __CPUFREQ_DT_H__ */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 138336b6bb0..503b085b783 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -219,6 +219,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name) struct cpufreq_driver { char name[CPUFREQ_NAME_LEN]; u8 flags; + void *driver_data; /* needed by all drivers */ int (*init) (struct cpufreq_policy *policy); @@ -312,6 +313,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data); int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); const char *cpufreq_get_current_driver(void); +void *cpufreq_get_driver_data(void); static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 72ab536ad3d..3849fce7ecf 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -14,14 +14,13 @@ extern unsigned long long elfcorehdr_addr; extern unsigned long long elfcorehdr_size; -extern int __weak elfcorehdr_alloc(unsigned long long *addr, - unsigned long long *size); -extern void __weak elfcorehdr_free(unsigned long long addr); -extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos); -extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos); -extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, - unsigned long from, unsigned long pfn, - unsigned long size, pgprot_t prot); +extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size); +extern void elfcorehdr_free(unsigned long long addr); +extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos); +extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos); +extern int remap_oldmem_pfn_range(struct vm_area_struct *vma, + unsigned long from, unsigned long pfn, + unsigned long size, pgprot_t prot); extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, unsigned long, int); diff --git a/include/linux/efi.h b/include/linux/efi.h index 45cb4ffdea6..0949f9c7e87 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -92,6 +92,7 @@ typedef struct { #define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */ #define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */ #define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */ +#define EFI_MEMORY_UCE ((u64)0x0000000000000010ULL) /* uncached, exported */ #define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */ #define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */ #define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ @@ -502,6 +503,10 @@ typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, u32 attr, unsigned long data_size, void *data); +typedef efi_status_t +efi_set_variable_nonblocking_t(efi_char16_t *name, efi_guid_t *vendor, + u32 attr, unsigned long data_size, void *data); + typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count); typedef void efi_reset_system_t (int reset_type, efi_status_t status, unsigned long data_size, efi_char16_t *data); @@ -821,6 +826,7 @@ extern struct efi { efi_get_variable_t *get_variable; efi_get_next_variable_t *get_next_variable; efi_set_variable_t *set_variable; + efi_set_variable_nonblocking_t *set_variable_nonblocking; efi_query_variable_info_t *query_variable_info; efi_update_capsule_t *update_capsule; efi_query_capsule_caps_t *query_capsule_caps; @@ -886,6 +892,13 @@ extern bool efi_poweroff_required(void); (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \ (md) = (void *)(md) + (m)->desc_size) +/* + * Format an EFI memory descriptor's type and attributes to a user-provided + * character buffer, as per snprintf(), and return the buffer. + */ +char * __init efi_md_typeattr_format(char *buf, size_t size, + const efi_memory_desc_t *md); + /** * efi_range_is_wc - check the WC bit on an address range * @start: starting kvirt address @@ -1034,6 +1047,7 @@ struct efivar_operations { efi_get_variable_t *get_variable; efi_get_next_variable_t *get_next_variable; efi_set_variable_t *set_variable; + efi_set_variable_nonblocking_t *set_variable_nonblocking; efi_query_variable_store_t *query_variable_store; }; @@ -1227,4 +1241,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, unsigned long *load_addr, unsigned long *load_size); +efi_status_t efi_parse_options(char *cmdline); + +bool efi_runtime_disabled(void); #endif /* _LINUX_EFI_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index a957d4366c2..9ab779e8a63 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -223,6 +223,13 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, #define ATTR_TIMES_SET (1 << 16) /* + * Whiteout is represented by a char device. The following constants define the + * mode and device number to use. + */ +#define WHITEOUT_MODE 0 +#define WHITEOUT_DEV 0 + +/* * This is the Inode Attributes structure, used for notify_change(). It * uses the above definitions as flags, to know which values have changed. * Also, in this manner, a Filesystem can look at only the values it cares @@ -254,6 +261,12 @@ struct iattr { */ #include <linux/quota.h> +/* + * Maximum number of layers of fs stack. Needs to be limited to + * prevent kernel stack overflow + */ +#define FILESYSTEM_MAX_STACK_DEPTH 2 + /** * enum positive_aop_returns - aop return codes with specific semantics * @@ -626,11 +639,13 @@ static inline int inode_unhashed(struct inode *inode) * 2: child/target * 3: xattr * 4: second non-directory - * The last is for certain operations (such as rename) which lock two + * 5: second parent (when locking independent directories in rename) + * + * I_MUTEX_NONDIR2 is for certain operations (such as rename) which lock two * non-directories at once. * * The locking order between these classes is - * parent -> child -> normal -> xattr -> second non-directory + * parent[2] -> child -> grandchild -> normal -> xattr -> second non-directory */ enum inode_i_mutex_lock_class { @@ -638,7 +653,8 @@ enum inode_i_mutex_lock_class I_MUTEX_PARENT, I_MUTEX_CHILD, I_MUTEX_XATTR, - I_MUTEX_NONDIR2 + I_MUTEX_NONDIR2, + I_MUTEX_PARENT2, }; void lock_two_nondirectories(struct inode *, struct inode*); @@ -1266,6 +1282,11 @@ struct super_block { struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; struct list_lru s_inode_lru ____cacheline_aligned_in_smp; struct rcu_head rcu; + + /* + * Indicates how deep in a filesystem stack this SB is + */ + int s_stack_depth; }; extern struct timespec current_fs_time(struct super_block *sb); @@ -1398,6 +1419,7 @@ extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct ino extern int vfs_rmdir(struct inode *, struct dentry *); extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); +extern int vfs_whiteout(struct inode *, struct dentry *); /* * VFS dentry helper functions. @@ -1528,6 +1550,9 @@ struct inode_operations { umode_t create_mode, int *opened); int (*tmpfile) (struct inode *, struct dentry *, umode_t); int (*set_acl)(struct inode *, struct posix_acl *, int); + + /* WARNING: probably going away soon, do not use! */ + int (*dentry_open)(struct dentry *, struct file *, const struct cred *); } ____cacheline_aligned; ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, @@ -1625,6 +1650,9 @@ struct super_operations { #define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) #define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC) +#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \ + (inode)->i_rdev == WHITEOUT_DEV) + /* * Inode state bits. Protected by inode->i_lock * @@ -2040,6 +2068,7 @@ extern struct file *file_open_name(struct filename *, int, umode_t); extern struct file *filp_open(const char *, int, umode_t); extern struct file *file_open_root(struct dentry *, struct vfsmount *, const char *, int); +extern int vfs_open(const struct path *, struct file *, const struct cred *); extern struct file * dentry_open(const struct path *, int, const struct cred *); extern int filp_close(struct file *, fl_owner_t id); @@ -2253,7 +2282,9 @@ extern sector_t bmap(struct inode *, sector_t); #endif extern int notify_change(struct dentry *, struct iattr *, struct inode **); extern int inode_permission(struct inode *, int); +extern int __inode_permission(struct inode *, int); extern int generic_permission(struct inode *, int); +extern int __check_sticky(struct inode *dir, struct inode *inode); static inline bool execute_ok(struct inode *inode) { @@ -2438,6 +2469,7 @@ extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos); /* fs/block_dev.c */ +extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to); extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from); extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync); @@ -2452,6 +2484,9 @@ extern ssize_t iter_file_splice_write(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, loff_t *, size_t len, unsigned int flags); +extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, + loff_t *opos, size_t len, unsigned int flags); + extern void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); @@ -2737,6 +2772,14 @@ static inline int is_sxid(umode_t mode) return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP)); } +static inline int check_sticky(struct inode *dir, struct inode *inode) +{ + if (!(dir->i_mode & S_ISVTX)) + return 0; + + return __check_sticky(dir, inode); +} + static inline void inode_has_no_xattr(struct inode *inode) { if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC)) diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h index 8bbd7bc1043..03fa332ad2a 100644 --- a/include/linux/iio/events.h +++ b/include/linux/iio/events.h @@ -72,7 +72,7 @@ struct iio_event_data { #define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF) -#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF) +#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F) #define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF) diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 0068708161f..0a21fbefdfb 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -242,7 +242,7 @@ static inline void in_dev_put(struct in_device *idev) static __inline__ __be32 inet_make_mask(int logmask) { if (logmask) - return htonl(~((1<<(32-logmask))-1)); + return htonl(~((1U<<(32-logmask))-1)); return 0; } diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 0dae71e9971..704b9a599b2 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1042,7 +1042,7 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); extern void jbd2_journal_commit_transaction(journal_t *); /* Checkpoint list management */ -int __jbd2_journal_clean_checkpoint_list(journal_t *journal); +void __jbd2_journal_clean_checkpoint_list(journal_t *journal); int __jbd2_journal_remove_checkpoint(struct journal_head *); void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 40728cf1c45..3d770f5564b 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -403,6 +403,7 @@ int vsscanf(const char *, const char *, va_list); extern int get_option(char **str, int *pint); extern char *get_options(const char *str, int nints, int *ints); extern unsigned long long memparse(const char *ptr, char **retptr); +extern bool parse_option_str(const char *str, const char *option); extern int core_kernel_text(unsigned long addr); extern int core_kernel_data(unsigned long addr); diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 8422b4ed688..b9376cd5a18 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -77,11 +77,6 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) return kstat_cpu(cpu).irqs_sum; } -/* - * Lock/unlock the current runqueue - to extract task statistics: - */ -extern unsigned long long task_delta_exec(struct task_struct *); - extern void account_user_time(struct task_struct *, cputime_t, cputime_t); extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); extern void account_steal_time(cputime_t); diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index 6b06d378f3d..e465bb15912 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -283,7 +283,7 @@ struct kgdb_io { extern struct kgdb_arch arch_kgdb_ops; -extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs); +extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs); #ifdef CONFIG_SERIAL_KGDB_NMI extern int kgdb_register_nmi_console(void); diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 6b394f0b514..eeb30798571 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -6,7 +6,8 @@ #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern int __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); -extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma); +extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, + unsigned long vm_flags); #define khugepaged_enabled() \ (transparent_hugepage_flags & \ @@ -35,13 +36,13 @@ static inline void khugepaged_exit(struct mm_struct *mm) __khugepaged_exit(mm); } -static inline int khugepaged_enter(struct vm_area_struct *vma) +static inline int khugepaged_enter(struct vm_area_struct *vma, + unsigned long vm_flags) { if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) if ((khugepaged_always() || - (khugepaged_req_madv() && - vma->vm_flags & VM_HUGEPAGE)) && - !(vma->vm_flags & VM_NOHUGEPAGE)) + (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) && + !(vm_flags & VM_NOHUGEPAGE)) if (__khugepaged_enter(vma->vm_mm)) return -ENOMEM; return 0; @@ -54,11 +55,13 @@ static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) static inline void khugepaged_exit(struct mm_struct *mm) { } -static inline int khugepaged_enter(struct vm_area_struct *vma) +static inline int khugepaged_enter(struct vm_area_struct *vma, + unsigned long vm_flags) { return 0; } -static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma) +static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, + unsigned long vm_flags) { return 0; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 28be31f4925..a6059bdf7b0 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm); int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); -bool kvm_is_mmio_pfn(pfn_t pfn); +bool kvm_is_reserved_pfn(pfn_t pfn); struct kvm_irq_ack_notifier { struct hlist_node link; @@ -1080,6 +1080,7 @@ void kvm_device_get(struct kvm_device *dev); void kvm_device_put(struct kvm_device *dev); struct kvm_device *kvm_device_from_filp(struct file *filp); int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type); +void kvm_unregister_device_ops(u32 type); extern struct kvm_device_ops kvm_mpic_ops; extern struct kvm_device_ops kvm_xics_ops; diff --git a/include/linux/leds.h b/include/linux/leds.h index e4368647219..a57611d0c94 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -13,8 +13,8 @@ #define __LINUX_LEDS_H_INCLUDED #include <linux/list.h> -#include <linux/spinlock.h> #include <linux/rwsem.h> +#include <linux/spinlock.h> #include <linux/timer.h> #include <linux/workqueue.h> @@ -31,8 +31,8 @@ enum led_brightness { struct led_classdev { const char *name; - int brightness; - int max_brightness; + enum led_brightness brightness; + enum led_brightness max_brightness; int flags; /* Lower 16 bits reflect status */ @@ -140,6 +140,16 @@ extern void led_blink_set_oneshot(struct led_classdev *led_cdev, */ extern void led_set_brightness(struct led_classdev *led_cdev, enum led_brightness brightness); +/** + * led_update_brightness - update LED brightness + * @led_cdev: the LED to query + * + * Get an LED's current brightness and update led_cdev->brightness + * member with the obtained value. + * + * Returns: 0 on success or negative error value on failure + */ +extern int led_update_brightness(struct led_classdev *led_cdev); /* * LED Triggers diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h new file mode 100644 index 00000000000..307d9cab202 --- /dev/null +++ b/include/linux/mailbox_client.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2013-2014 Linaro Ltd. + * Author: Jassi Brar <jassisinghbrar@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MAILBOX_CLIENT_H +#define __MAILBOX_CLIENT_H + +#include <linux/of.h> +#include <linux/device.h> + +struct mbox_chan; + +/** + * struct mbox_client - User of a mailbox + * @dev: The client device + * @tx_block: If the mbox_send_message should block until data is + * transmitted. + * @tx_tout: Max block period in ms before TX is assumed failure + * @knows_txdone: If the client could run the TX state machine. Usually + * if the client receives some ACK packet for transmission. + * Unused if the controller already has TX_Done/RTR IRQ. + * @rx_callback: Atomic callback to provide client the data received + * @tx_done: Atomic callback to tell client of data transmission + */ +struct mbox_client { + struct device *dev; + bool tx_block; + unsigned long tx_tout; + bool knows_txdone; + + void (*rx_callback)(struct mbox_client *cl, void *mssg); + void (*tx_done)(struct mbox_client *cl, void *mssg, int r); +}; + +struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index); +int mbox_send_message(struct mbox_chan *chan, void *mssg); +void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */ +bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */ +void mbox_free_channel(struct mbox_chan *chan); /* may sleep */ + +#endif /* __MAILBOX_CLIENT_H */ diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h new file mode 100644 index 00000000000..d4cf96f07cf --- /dev/null +++ b/include/linux/mailbox_controller.h @@ -0,0 +1,133 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MAILBOX_CONTROLLER_H +#define __MAILBOX_CONTROLLER_H + +#include <linux/of.h> +#include <linux/types.h> +#include <linux/timer.h> +#include <linux/device.h> +#include <linux/completion.h> + +struct mbox_chan; + +/** + * struct mbox_chan_ops - methods to control mailbox channels + * @send_data: The API asks the MBOX controller driver, in atomic + * context try to transmit a message on the bus. Returns 0 if + * data is accepted for transmission, -EBUSY while rejecting + * if the remote hasn't yet read the last data sent. Actual + * transmission of data is reported by the controller via + * mbox_chan_txdone (if it has some TX ACK irq). It must not + * sleep. + * @startup: Called when a client requests the chan. The controller + * could ask clients for additional parameters of communication + * to be provided via client's chan_data. This call may + * block. After this call the Controller must forward any + * data received on the chan by calling mbox_chan_received_data. + * The controller may do stuff that need to sleep. + * @shutdown: Called when a client relinquishes control of a chan. + * This call may block too. The controller must not forward + * any received data anymore. + * The controller may do stuff that need to sleep. + * @last_tx_done: If the controller sets 'txdone_poll', the API calls + * this to poll status of last TX. The controller must + * give priority to IRQ method over polling and never + * set both txdone_poll and txdone_irq. Only in polling + * mode 'send_data' is expected to return -EBUSY. + * The controller may do stuff that need to sleep/block. + * Used only if txdone_poll:=true && txdone_irq:=false + * @peek_data: Atomic check for any received data. Return true if controller + * has some data to push to the client. False otherwise. + */ +struct mbox_chan_ops { + int (*send_data)(struct mbox_chan *chan, void *data); + int (*startup)(struct mbox_chan *chan); + void (*shutdown)(struct mbox_chan *chan); + bool (*last_tx_done)(struct mbox_chan *chan); + bool (*peek_data)(struct mbox_chan *chan); +}; + +/** + * struct mbox_controller - Controller of a class of communication channels + * @dev: Device backing this controller + * @ops: Operators that work on each communication chan + * @chans: Array of channels + * @num_chans: Number of channels in the 'chans' array. + * @txdone_irq: Indicates if the controller can report to API when + * the last transmitted data was read by the remote. + * Eg, if it has some TX ACK irq. + * @txdone_poll: If the controller can read but not report the TX + * done. Ex, some register shows the TX status but + * no interrupt rises. Ignored if 'txdone_irq' is set. + * @txpoll_period: If 'txdone_poll' is in effect, the API polls for + * last TX's status after these many millisecs + * @of_xlate: Controller driver specific mapping of channel via DT + * @poll: API private. Used to poll for TXDONE on all channels. + * @node: API private. To hook into list of controllers. + */ +struct mbox_controller { + struct device *dev; + struct mbox_chan_ops *ops; + struct mbox_chan *chans; + int num_chans; + bool txdone_irq; + bool txdone_poll; + unsigned txpoll_period; + struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox, + const struct of_phandle_args *sp); + /* Internal to API */ + struct timer_list poll; + struct list_head node; +}; + +/* + * The length of circular buffer for queuing messages from a client. + * 'msg_count' tracks the number of buffered messages while 'msg_free' + * is the index where the next message would be buffered. + * We shouldn't need it too big because every transfer is interrupt + * triggered and if we have lots of data to transfer, the interrupt + * latencies are going to be the bottleneck, not the buffer length. + * Besides, mbox_send_message could be called from atomic context and + * the client could also queue another message from the notifier 'tx_done' + * of the last transfer done. + * REVISIT: If too many platforms see the "Try increasing MBOX_TX_QUEUE_LEN" + * print, it needs to be taken from config option or somesuch. + */ +#define MBOX_TX_QUEUE_LEN 20 + +/** + * struct mbox_chan - s/w representation of a communication chan + * @mbox: Pointer to the parent/provider of this channel + * @txdone_method: Way to detect TXDone chosen by the API + * @cl: Pointer to the current owner of this channel + * @tx_complete: Transmission completion + * @active_req: Currently active request hook + * @msg_count: No. of mssg currently queued + * @msg_free: Index of next available mssg slot + * @msg_data: Hook for data packet + * @lock: Serialise access to the channel + * @con_priv: Hook for controller driver to attach private data + */ +struct mbox_chan { + struct mbox_controller *mbox; + unsigned txdone_method; + struct mbox_client *cl; + struct completion tx_complete; + void *active_req; + unsigned msg_count, msg_free; + void *msg_data[MBOX_TX_QUEUE_LEN]; + spinlock_t lock; /* Serialise access to the channel */ + void *con_priv; +}; + +int mbox_controller_register(struct mbox_controller *mbox); /* can sleep */ +void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */ +void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */ +void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */ + +#endif /* __MAILBOX_CONTROLLER_H */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 19df5d85741..6b75640ef5a 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -139,48 +139,23 @@ static inline bool mem_cgroup_disabled(void) return false; } -void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked, - unsigned long *flags); - -extern atomic_t memcg_moving; - -static inline void mem_cgroup_begin_update_page_stat(struct page *page, - bool *locked, unsigned long *flags) -{ - if (mem_cgroup_disabled()) - return; - rcu_read_lock(); - *locked = false; - if (atomic_read(&memcg_moving)) - __mem_cgroup_begin_update_page_stat(page, locked, flags); -} - -void __mem_cgroup_end_update_page_stat(struct page *page, - unsigned long *flags); -static inline void mem_cgroup_end_update_page_stat(struct page *page, - bool *locked, unsigned long *flags) -{ - if (mem_cgroup_disabled()) - return; - if (*locked) - __mem_cgroup_end_update_page_stat(page, flags); - rcu_read_unlock(); -} - -void mem_cgroup_update_page_stat(struct page *page, - enum mem_cgroup_stat_index idx, - int val); - -static inline void mem_cgroup_inc_page_stat(struct page *page, +struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked, + unsigned long *flags); +void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked, + unsigned long flags); +void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, + enum mem_cgroup_stat_index idx, int val); + +static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) { - mem_cgroup_update_page_stat(page, idx, 1); + mem_cgroup_update_page_stat(memcg, idx, 1); } -static inline void mem_cgroup_dec_page_stat(struct page *page, +static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) { - mem_cgroup_update_page_stat(page, idx, -1); + mem_cgroup_update_page_stat(memcg, idx, -1); } unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, @@ -315,13 +290,14 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) { } -static inline void mem_cgroup_begin_update_page_stat(struct page *page, +static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked, unsigned long *flags) { + return NULL; } -static inline void mem_cgroup_end_update_page_stat(struct page *page, - bool *locked, unsigned long *flags) +static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, + bool locked, unsigned long flags) { } @@ -343,12 +319,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait) return false; } -static inline void mem_cgroup_inc_page_stat(struct page *page, +static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) { } -static inline void mem_cgroup_dec_page_stat(struct page *page, +static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) { } diff --git a/include/linux/memory.h b/include/linux/memory.h index bb7384e3c3d..8b8d8d12348 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -35,7 +35,7 @@ struct memory_block { }; int arch_get_memory_phys_device(unsigned long start_pfn); -unsigned long __weak memory_block_size_bytes(void); +unsigned long memory_block_size_bytes(void); /* These states are exposed to userspace as text strings in sysfs */ #define MEM_ONLINE (1<<0) /* exposed to userspace */ diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index fc17d56581b..582e67f3405 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -330,6 +330,13 @@ enum max77693_irq_source { MAX77693_IRQ_GROUP_NR, }; +#define SRC_IRQ_CHARGER BIT(0) +#define SRC_IRQ_TOP BIT(1) +#define SRC_IRQ_FLASH BIT(2) +#define SRC_IRQ_MUIC BIT(3) +#define SRC_IRQ_ALL (SRC_IRQ_CHARGER | SRC_IRQ_TOP \ + | SRC_IRQ_FLASH | SRC_IRQ_MUIC) + #define LED_IRQ_FLED2_OPEN BIT(0) #define LED_IRQ_FLED2_SHORT BIT(1) #define LED_IRQ_FLED1_OPEN BIT(2) diff --git a/include/linux/mm.h b/include/linux/mm.h index 02d11ee7f19..b46461116cd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1176,6 +1176,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, extern void truncate_pagecache(struct inode *inode, loff_t new); extern void truncate_setsize(struct inode *inode, loff_t newsize); +void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); int truncate_inode_page(struct address_space *mapping, struct page *page); int generic_error_remove_page(struct address_space *mapping, struct page *page); @@ -1234,7 +1235,6 @@ int __set_page_dirty_no_writeback(struct page *page); int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page); void account_page_dirtied(struct page *page, struct address_space *mapping); -void account_page_writeback(struct page *page); int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 48bf12ef662..ffe66e381c0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -431,6 +431,15 @@ struct zone { */ int nr_migrate_reserve_block; +#ifdef CONFIG_MEMORY_ISOLATION + /* + * Number of isolated pageblock. It is used to solve incorrect + * freepage counting problem due to racy retrieving migratetype + * of pageblock. Protected by zone->lock. + */ + unsigned long nr_isolate_pageblock; +#endif + #ifdef CONFIG_MEMORY_HOTPLUG /* see spanned/present_pages for more description */ seqlock_t span_seqlock; diff --git a/include/linux/mount.h b/include/linux/mount.h index 9262e4bf0cc..c2c561dc011 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -81,6 +81,9 @@ extern struct vfsmount *mntget(struct vfsmount *mnt); extern struct vfsmount *mnt_clone_internal(struct path *path); extern int __mnt_is_readonly(struct vfsmount *mnt); +struct path; +extern struct vfsmount *clone_private_mount(struct path *path); + struct file_system_type; extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, int flags, const char *name, diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 9e6294f32ba..046a0a2e4c4 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -187,32 +187,17 @@ struct spi_nor { /** * spi_nor_scan() - scan the SPI NOR * @nor: the spi_nor structure - * @id: the spi_device_id provided by the driver + * @name: the chip type name * @mode: the read mode supported by the driver * * The drivers can use this fuction to scan the SPI NOR. * In the scanning, it will try to get all the necessary information to * fill the mtd_info{} and the spi_nor{}. * - * The board may assigns a spi_device_id with @id which be used to compared with - * the spi_device_id detected by the scanning. + * The chip type name can be provided through the @name parameter. * * Return: 0 for success, others for failure. */ -int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id, - enum read_mode mode); -extern const struct spi_device_id spi_nor_ids[]; - -/** - * spi_nor_match_id() - find the spi_device_id by the name - * @name: the name of the spi_device_id - * - * The drivers use this function to find the spi_device_id - * specified by the @name. - * - * Return: returns the right spi_device_id pointer on success, - * and returns NULL on failure. - */ -const struct spi_device_id *spi_nor_match_id(char *name); +int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode); #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 983876f24ae..47ebb4fafd8 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1224,11 +1224,22 @@ struct nfs41_free_stateid_res { unsigned int status; }; +static inline void +nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) +{ + kfree(cinfo->buckets); +} + #else struct pnfs_ds_commit_info { }; +static inline void +nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) +{ +} + #endif /* CONFIG_NFS_V4_1 */ #ifdef CONFIG_NFS_V4_2 diff --git a/include/linux/of.h b/include/linux/of.h index 6545e7aec7b..29f0adc5f3e 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -267,14 +267,12 @@ extern int of_property_read_u64(const struct device_node *np, extern int of_property_read_string(struct device_node *np, const char *propname, const char **out_string); -extern int of_property_read_string_index(struct device_node *np, - const char *propname, - int index, const char **output); extern int of_property_match_string(struct device_node *np, const char *propname, const char *string); -extern int of_property_count_strings(struct device_node *np, - const char *propname); +extern int of_property_read_string_helper(struct device_node *np, + const char *propname, + const char **out_strs, size_t sz, int index); extern int of_device_is_compatible(const struct device_node *device, const char *); extern int of_device_is_available(const struct device_node *device); @@ -486,15 +484,9 @@ static inline int of_property_read_string(struct device_node *np, return -ENOSYS; } -static inline int of_property_read_string_index(struct device_node *np, - const char *propname, int index, - const char **out_string) -{ - return -ENOSYS; -} - -static inline int of_property_count_strings(struct device_node *np, - const char *propname) +static inline int of_property_read_string_helper(struct device_node *np, + const char *propname, + const char **out_strs, size_t sz, int index) { return -ENOSYS; } @@ -668,6 +660,70 @@ static inline int of_property_count_u64_elems(const struct device_node *np, } /** + * of_property_read_string_array() - Read an array of strings from a multiple + * strings property. + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_strs: output array of string pointers. + * @sz: number of array elements to read. + * + * Search for a property in a device tree node and retrieve a list of + * terminated string values (pointer to data, not a copy) in that property. + * + * If @out_strs is NULL, the number of strings in the property is returned. + */ +static inline int of_property_read_string_array(struct device_node *np, + const char *propname, const char **out_strs, + size_t sz) +{ + return of_property_read_string_helper(np, propname, out_strs, sz, 0); +} + +/** + * of_property_count_strings() - Find and return the number of strings from a + * multiple strings property. + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * + * Search for a property in a device tree node and retrieve the number of null + * terminated string contain in it. Returns the number of strings on + * success, -EINVAL if the property does not exist, -ENODATA if property + * does not have a value, and -EILSEQ if the string is not null-terminated + * within the length of the property data. + */ +static inline int of_property_count_strings(struct device_node *np, + const char *propname) +{ + return of_property_read_string_helper(np, propname, NULL, 0, 0); +} + +/** + * of_property_read_string_index() - Find and read a string from a multiple + * strings property. + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @index: index of the string in the list of strings + * @out_string: pointer to null terminated return string, modified only if + * return value is 0. + * + * Search for a property in a device tree node and retrieve a null + * terminated string value (pointer to data, not a copy) in the list of strings + * contained in that property. + * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if + * property does not have a value, and -EILSEQ if the string is not + * null-terminated within the length of the property data. + * + * The out_string pointer is modified only if a valid string can be decoded. + */ +static inline int of_property_read_string_index(struct device_node *np, + const char *propname, + int index, const char **output) +{ + int rc = of_property_read_string_helper(np, propname, output, 1, index); + return rc < 0 ? rc : 0; +} + +/** * of_property_read_bool - Findfrom a property * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index 5b5efae0913..ad2f6705437 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h @@ -16,7 +16,7 @@ struct reserved_mem { }; struct reserved_mem_ops { - void (*device_init)(struct reserved_mem *rmem, + int (*device_init)(struct reserved_mem *rmem, struct device *dev); void (*device_release)(struct reserved_mem *rmem, struct device *dev); @@ -28,14 +28,17 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem); _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn) #ifdef CONFIG_OF_RESERVED_MEM -void of_reserved_mem_device_init(struct device *dev); +int of_reserved_mem_device_init(struct device *dev); void of_reserved_mem_device_release(struct device *dev); void fdt_init_reserved_mem(void); void fdt_reserved_mem_save_node(unsigned long node, const char *uname, phys_addr_t base, phys_addr_t size); #else -static inline void of_reserved_mem_device_init(struct device *dev) { } +static inline int of_reserved_mem_device_init(struct device *dev) +{ + return -ENOSYS; +} static inline void of_reserved_mem_device_release(struct device *pdev) { } static inline void fdt_init_reserved_mem(void) { } diff --git a/include/linux/oom.h b/include/linux/oom.h index 647395a1a55..e8d6e105872 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -50,6 +50,9 @@ static inline bool oom_task_origin(const struct task_struct *p) extern unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, const nodemask_t *nodemask, unsigned long totalpages); + +extern int oom_kills_count(void); +extern void note_oom_kill(void); extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, unsigned int points, unsigned long totalpages, struct mem_cgroup *memcg, nodemask_t *nodemask, diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 3fff8e77406..2dc1e1697b4 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -2,6 +2,10 @@ #define __LINUX_PAGEISOLATION_H #ifdef CONFIG_MEMORY_ISOLATION +static inline bool has_isolate_pageblock(struct zone *zone) +{ + return zone->nr_isolate_pageblock; +} static inline bool is_migrate_isolate_page(struct page *page) { return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; @@ -11,6 +15,10 @@ static inline bool is_migrate_isolate(int migratetype) return migratetype == MIGRATE_ISOLATE; } #else +static inline bool has_isolate_pageblock(struct zone *zone) +{ + return false; +} static inline bool is_migrate_isolate_page(struct page *page) { return false; diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 64dacb7288a..24c7728ca68 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -41,8 +41,13 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) if (pci_is_root_bus(pbus)) dev = pbus->bridge; - else + else { + /* If pbus is a virtual bus, there is no bridge to it */ + if (!pbus->self) + return NULL; + dev = &pbus->self->dev; + } return ACPI_HANDLE(dev); } diff --git a/include/linux/pci.h b/include/linux/pci.h index 5be8db45e36..4c8ac5fcc22 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -331,6 +331,7 @@ struct pci_dev { unsigned int is_added:1; unsigned int is_busmaster:1; /* device is busmaster */ unsigned int no_msi:1; /* device may not use msi */ + unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */ unsigned int block_cfg_access:1; /* config space access is blocked */ unsigned int broken_parity_status:1; /* Device generates false positive parity */ unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index d5c89e0dd0e..51ce60c35f4 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -133,7 +133,13 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, /* paired with smp_store_release() in percpu_ref_reinit() */ smp_read_barrier_depends(); - if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC)) + /* + * Theoretically, the following could test just ATOMIC; however, + * then we'd have to mask off DEAD separately as DEAD may be + * visible without ATOMIC if we race with percpu_ref_kill(). DEAD + * implies ATOMIC anyway. Test them together. + */ + if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD)) return false; *percpu_countp = (unsigned long __percpu *)percpu_ptr; diff --git a/include/linux/mailbox.h b/include/linux/pl320-ipc.h index 5161f63ec1c..5161f63ec1c 100644 --- a/include/linux/mailbox.h +++ b/include/linux/pl320-ipc.h diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 73e938b7e93..2e0e06daf8c 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -72,8 +72,10 @@ struct generic_pm_domain { bool max_off_time_changed; bool cached_power_down_ok; struct gpd_cpuidle_data *cpuidle_data; - void (*attach_dev)(struct device *dev); - void (*detach_dev)(struct device *dev); + int (*attach_dev)(struct generic_pm_domain *domain, + struct device *dev); + void (*detach_dev)(struct generic_pm_domain *domain, + struct device *dev); }; static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) @@ -104,7 +106,7 @@ struct generic_pm_domain_data { struct notifier_block nb; struct mutex lock; unsigned int refcount; - bool need_restore; + int need_restore; }; #ifdef CONFIG_PM_GENERIC_DOMAINS diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 9ab4bf7c464..636e8283450 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -15,6 +15,7 @@ enum { PM_QOS_CPU_DMA_LATENCY, PM_QOS_NETWORK_LATENCY, PM_QOS_NETWORK_THROUGHPUT, + PM_QOS_MEMORY_BANDWIDTH, /* insert new class ID */ PM_QOS_NUM_CLASSES, @@ -32,6 +33,7 @@ enum pm_qos_flags_status { #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 +#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0 #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0 #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) @@ -69,7 +71,8 @@ struct dev_pm_qos_request { enum pm_qos_type { PM_QOS_UNITIALIZED, PM_QOS_MAX, /* return the largest value */ - PM_QOS_MIN /* return the smallest value */ + PM_QOS_MIN, /* return the smallest value */ + PM_QOS_SUM /* return the sum */ }; /* diff --git a/include/linux/pnfs_osd_xdr.h b/include/linux/pnfs_osd_xdr.h index fe25876c1a5..17d7d0d20ec 100644 --- a/include/linux/pnfs_osd_xdr.h +++ b/include/linux/pnfs_osd_xdr.h @@ -5,7 +5,7 @@ * All rights reserved. * * Benny Halevy <bhalevy@panasas.com> - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h index 07e7945a1ff..e97fc656a05 100644 --- a/include/linux/power/charger-manager.h +++ b/include/linux/power/charger-manager.h @@ -253,9 +253,6 @@ struct charger_manager { struct device *dev; struct charger_desc *desc; - struct power_supply *fuel_gauge; - struct power_supply **charger_stat; - #ifdef CONFIG_THERMAL struct thermal_zone_device *tzd_batt; #endif diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 3ed04967302..096dbced02a 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -200,6 +200,12 @@ struct power_supply { void (*external_power_changed)(struct power_supply *psy); void (*set_charged)(struct power_supply *psy); + /* + * Set if thermal zone should not be created for this power supply. + * For example for virtual supplies forwarding calls to actual + * sensors or other supplies. + */ + bool no_thermal; /* For APM emulation, think legacy userspace. */ int use_for_apm; diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index a4a819ffb2d..53ff1a752d7 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -617,6 +617,21 @@ static inline void rcu_preempt_sleep_check(void) #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) /** + * lockless_dereference() - safely load a pointer for later dereference + * @p: The pointer to load + * + * Similar to rcu_dereference(), but for situations where the pointed-to + * object's lifetime is managed by something other than RCU. That + * "something other" might be reference counting or simple immortality. + */ +#define lockless_dereference(p) \ +({ \ + typeof(p) _________p1 = ACCESS_ONCE(p); \ + smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ + (_________p1); \ +}) + +/** * rcu_assign_pointer() - assign to RCU-protected pointer * @p: pointer to assign to * @v: value to assign (publish) diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index d347c805f92..4cfaaf22230 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -35,6 +35,8 @@ #ifndef __LINUX_REGULATOR_CONSUMER_H_ #define __LINUX_REGULATOR_CONSUMER_H_ +#include <linux/err.h> + struct device; struct notifier_block; struct regmap; @@ -99,6 +101,8 @@ struct regmap; * Data passed is "struct pre_voltage_change_data" * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason. * Data passed is old voltage cast to (void *). + * PRE_DISABLE Regulator is about to be disabled + * ABORT_DISABLE Regulator disable failed for some reason * * NOTE: These events can be OR'ed together when passed into handler. */ @@ -113,6 +117,8 @@ struct regmap; #define REGULATOR_EVENT_DISABLE 0x80 #define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100 #define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200 +#define REGULATOR_EVENT_PRE_DISABLE 0x400 +#define REGULATOR_EVENT_ABORT_DISABLE 0x800 /** * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index fc0ee0ce832..28da08e4671 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -301,6 +301,9 @@ struct regulator_desc { * NULL). * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is * insufficient. + * @ena_gpio_initialized: GPIO controlling regulator enable was properly + * initialized, meaning that >= 0 is a valid gpio + * identifier and < 0 is a non existent gpio. * @ena_gpio: GPIO controlling regulator enable. * @ena_gpio_invert: Sense for GPIO enable control. * @ena_gpio_flags: Flags to use when calling gpio_request_one() @@ -312,6 +315,7 @@ struct regulator_config { struct device_node *of_node; struct regmap *regmap; + bool ena_gpio_initialized; int ena_gpio; unsigned int ena_gpio_invert:1; unsigned int ena_gpio_flags; diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 49a4d6f5910..e2c13cd863b 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k __ring_buffer_alloc((size), (flags), &__key); \ }) -int ring_buffer_wait(struct ring_buffer *buffer, int cpu); +int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full); int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, struct file *filp, poll_table *poll_table); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a59d9343c25..6c8b6f604e7 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -557,7 +557,9 @@ struct sk_buff { /* fields enclosed in headers_start/headers_end are copied * using a single memcpy() in __copy_skb_header() */ + /* private: */ __u32 headers_start[0]; + /* public: */ /* if you move pkt_type around you also must adapt those constants */ #ifdef __BIG_ENDIAN_BITFIELD @@ -642,7 +644,9 @@ struct sk_buff { __u16 network_header; __u16 mac_header; + /* private: */ __u32 headers_end[0]; + /* public: */ /* These elements must be at the end, see alloc_skb() for details. */ sk_buff_data_t tail; @@ -795,15 +799,19 @@ struct sk_buff_fclones { * @skb: buffer * * Returns true is skb is a fast clone, and its clone is not freed. + * Some drivers call skb_orphan() in their ndo_start_xmit(), + * so we also check that this didnt happen. */ -static inline bool skb_fclone_busy(const struct sk_buff *skb) +static inline bool skb_fclone_busy(const struct sock *sk, + const struct sk_buff *skb) { const struct sk_buff_fclones *fclones; fclones = container_of(skb, struct sk_buff_fclones, skb1); return skb->fclone == SKB_FCLONE_ORIG && - fclones->skb2.fclone == SKB_FCLONE_CLONE; + fclones->skb2.fclone == SKB_FCLONE_CLONE && + fclones->skb2.sk == sk; } static inline struct sk_buff *alloc_skb_fclone(unsigned int size, diff --git a/include/linux/socket.h b/include/linux/socket.h index ec538fc287a..bb9b8364007 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -256,7 +256,7 @@ struct ucred { #define MSG_EOF MSG_FIN #define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ -#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file +#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file descriptor received through SCM_RIGHTS */ #if defined(CONFIG_COMPAT) diff --git a/include/linux/string.h b/include/linux/string.h index e6edfe51575..2e22a2e58f3 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -132,7 +132,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4); #endif extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, - const void *from, size_t available); + const void *from, size_t available); /** * strstarts - does @str start with @prefix? @@ -144,7 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix) return strncmp(str, prefix, strlen(prefix)) == 0; } -extern size_t memweight(const void *ptr, size_t bytes); +size_t memweight(const void *ptr, size_t bytes); +void memzero_explicit(void *s, size_t count); /** * kbasename - return the last part of a pathname. diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 0305cde21a7..ef90838b36a 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -44,6 +44,10 @@ #define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \ ((long)t-2732+5)/10 : ((long)t-2732-5)/10) #define CELSIUS_TO_KELVIN(t) ((t)*10+2732) +#define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100) +#define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732) +#define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off)) +#define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732) /* Adding event notification support elements */ #define THERMAL_GENL_FAMILY_NAME "thermal_event" diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 1ad4724458d..baa81718d98 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -63,7 +63,17 @@ struct uio_port { #define MAX_UIO_PORT_REGIONS 5 -struct uio_device; +struct uio_device { + struct module *owner; + struct device *dev; + int minor; + atomic_t event; + struct fasync_struct *async_queue; + wait_queue_head_t wait; + struct uio_info *info; + struct kobject *map_dir; + struct kobject *portio_dir; +}; /** * struct uio_info - UIO device capabilities diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 4f844c6b03e..60beb5dc797 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -98,11 +98,11 @@ struct uprobes_state { struct xol_area *xol_area; }; -extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); -extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); -extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); -extern bool __weak is_trap_insn(uprobe_opcode_t *insn); -extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); +extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); +extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); +extern bool is_swbp_insn(uprobe_opcode_t *insn); +extern bool is_trap_insn(uprobe_opcode_t *insn); +extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs); extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs); extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); @@ -128,8 +128,8 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); -extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs); -extern void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, +extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs); +extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, void *src, unsigned long len); #else /* !CONFIG_UPROBES */ struct uprobes_state { diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 26088feb660..d9a4905e01d 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -78,6 +78,7 @@ struct usbnet { # define EVENT_NO_RUNTIME_PM 9 # define EVENT_RX_KILL 10 # define EVENT_LINK_CHANGE 11 +# define EVENT_SET_RX_MODE 12 }; static inline struct usb_driver *driver_of(struct usb_interface *intf) @@ -159,6 +160,9 @@ struct driver_info { /* called by minidriver when receiving indication */ void (*indication)(struct usbnet *dev, void *ind, int indlen); + /* rx mode change (device changes address list filtering) */ + void (*set_rx_mode)(struct usbnet *dev); + /* for new devices, use the descriptor-reading code instead */ int in; /* rx endpoint */ int out; /* tx endpoint */ diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 2a3038ee17a..395b70e0ecc 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -97,13 +97,8 @@ struct watchdog_device { #define WDOG_UNREGISTERED 4 /* Has the device been unregistered */ }; -#ifdef CONFIG_WATCHDOG_NOWAYOUT -#define WATCHDOG_NOWAYOUT 1 -#define WATCHDOG_NOWAYOUT_INIT_STATUS (1 << WDOG_NO_WAY_OUT) -#else -#define WATCHDOG_NOWAYOUT 0 -#define WATCHDOG_NOWAYOUT_INIT_STATUS 0 -#endif +#define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT) +#define WATCHDOG_NOWAYOUT_INIT_STATUS (WATCHDOG_NOWAYOUT << WDOG_NO_WAY_OUT) /* Use the following function to check whether or not the watchdog is active */ static inline bool watchdog_active(struct watchdog_device *wdd) diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h index d9fa68f26c4..2a25dec3021 100644 --- a/include/net/9p/transport.h +++ b/include/net/9p/transport.h @@ -34,7 +34,6 @@ * @list: used to maintain a list of currently available transports * @name: the human-readable name of the transport * @maxsize: transport provided maximum packet size - * @pref: Preferences of this transport * @def: set if this transport should be considered the default * @create: member function to create a new connection on this transport * @close: member function to discard a connection on this transport diff --git a/include/net/inet_common.h b/include/net/inet_common.h index fe7994c48b7..b2828a06a5a 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -37,6 +37,8 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int inet_ctl_sock_create(struct sock **sk, unsigned short family, unsigned short type, unsigned char protocol, struct net *net); +int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, + int *addr_len); static inline void inet_ctl_sock_destroy(struct sock *sk) { diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 97f47201243..4292929392b 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -671,6 +671,8 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); } +void ipv6_proxy_select_ident(struct sk_buff *skb); + int ip6_dst_hoplimit(struct dst_entry *dst); static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6, diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h index e8427193c77..03e928a5522 100644 --- a/include/net/netfilter/ipv4/nf_reject.h +++ b/include/net/netfilter/ipv4/nf_reject.h @@ -1,6 +1,8 @@ #ifndef _IPV4_NF_REJECT_H #define _IPV4_NF_REJECT_H +#include <linux/skbuff.h> +#include <net/ip.h> #include <net/icmp.h> static inline void nf_send_unreach(struct sk_buff *skb_in, int code) @@ -10,4 +12,12 @@ static inline void nf_send_unreach(struct sk_buff *skb_in, int code) void nf_send_reset(struct sk_buff *oldskb, int hook); +const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb, + struct tcphdr *_oth, int hook); +struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb, + const struct sk_buff *oldskb, + __be16 protocol, int ttl); +void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb, + const struct tcphdr *oth); + #endif /* _IPV4_NF_REJECT_H */ diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h index 48e18810a9b..23216d48abf 100644 --- a/include/net/netfilter/ipv6/nf_reject.h +++ b/include/net/netfilter/ipv6/nf_reject.h @@ -15,4 +15,14 @@ nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook); +const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb, + struct tcphdr *otcph, + unsigned int *otcplen, int hook); +struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb, + const struct sk_buff *oldskb, + __be16 protocol, int hoplimit); +void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb, + const struct sk_buff *oldskb, + const struct tcphdr *oth, unsigned int otcplen); + #endif /* _IPV6_NF_REJECT_H */ diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 3d7292392fa..3ae969e3acf 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -396,14 +396,12 @@ struct nft_rule { /** * struct nft_trans - nf_tables object update in transaction * - * @rcu_head: rcu head to defer release of transaction data * @list: used internally * @msg_type: message type * @ctx: transaction context * @data: internal information related to the transaction */ struct nft_trans { - struct rcu_head rcu_head; struct list_head list; int msg_type; struct nft_ctx ctx; @@ -530,6 +528,9 @@ enum nft_chain_type { NFT_CHAIN_T_MAX }; +int nft_chain_validate_dependency(const struct nft_chain *chain, + enum nft_chain_type type); + struct nft_stats { u64 bytes; u64 pkts; diff --git a/include/net/netfilter/nft_masq.h b/include/net/netfilter/nft_masq.h index c72729f954f..e2a518b60e1 100644 --- a/include/net/netfilter/nft_masq.h +++ b/include/net/netfilter/nft_masq.h @@ -13,4 +13,7 @@ int nft_masq_init(const struct nft_ctx *ctx, int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr); +int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, + const struct nft_data **data); + #endif /* _NFT_MASQ_H_ */ diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index a47790bcaa3..2a50a70ef58 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -100,6 +100,15 @@ static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb, return iptunnel_handle_offloads(skb, udp_csum, type); } +static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff) +{ + struct udphdr *uh; + + uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr)); + skb_shinfo(skb)->gso_type |= uh->check ? + SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; +} + static inline void udp_tunnel_encap_enable(struct socket *sock) { #if IS_ENABLED(CONFIG_IPV6) diff --git a/include/net/vxlan.h b/include/net/vxlan.h index d5f59f3fc35..57cccd0052e 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -8,6 +8,12 @@ #define VNI_HASH_BITS 10 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) +/* VXLAN protocol header */ +struct vxlanhdr { + __be32 vx_flags; + __be32 vx_vni; +}; + struct vxlan_sock; typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key); @@ -45,6 +51,18 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, __be32 vni, bool xnet); +static inline bool vxlan_gso_check(struct sk_buff *skb) +{ + if ((skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) && + (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB) || + (skb_inner_mac_header(skb) - skb_transport_header(skb) != + sizeof(struct udphdr) + sizeof(struct vxlanhdr)))) + return false; + + return true; +} + /* IP header + UDP + VXLAN + Ethernet header */ #define VXLAN_HEADROOM (20 + 8 + 8 + 14) /* IPv6 header + UDP + VXLAN + Ethernet header */ diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h index b2e85fdd2ae..a09cca82908 100644 --- a/include/scsi/osd_initiator.h +++ b/include/scsi/osd_initiator.h @@ -4,7 +4,7 @@ * Copyright (C) 2008 Panasas Inc. All rights reserved. * * Authors: - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * Benny Halevy <bhalevy@panasas.com> * * This program is free software; you can redistribute it and/or modify diff --git a/include/scsi/osd_ore.h b/include/scsi/osd_ore.h index 6ca3265a4dc..7a8d2cd3032 100644 --- a/include/scsi/osd_ore.h +++ b/include/scsi/osd_ore.h @@ -1,6 +1,6 @@ /* * Copyright (C) 2011 - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * * Public Declarations of the ORE API * diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h index a2594afe05c..e0ca835e7bf 100644 --- a/include/scsi/osd_protocol.h +++ b/include/scsi/osd_protocol.h @@ -4,7 +4,7 @@ * Copyright (C) 2008 Panasas Inc. All rights reserved. * * Authors: - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * Benny Halevy <bhalevy@panasas.com> * * This program is free software; you can redistribute it and/or modify @@ -496,7 +496,7 @@ struct osd_timestamp { */ struct osd_key_identifier { - u8 id[7]; /* if you know why 7 please email bharrosh@panasas.com */ + u8 id[7]; /* if you know why 7 please email ooo@electrozaur.com */ } __packed; /* for osd_capability.format */ diff --git a/include/scsi/osd_sec.h b/include/scsi/osd_sec.h index f96151c9c9e..7abeb0f0db3 100644 --- a/include/scsi/osd_sec.h +++ b/include/scsi/osd_sec.h @@ -4,7 +4,7 @@ * Copyright (C) 2008 Panasas Inc. All rights reserved. * * Authors: - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * Benny Halevy <bhalevy@panasas.com> * * This program is free software; you can redistribute it and/or modify diff --git a/include/scsi/osd_sense.h b/include/scsi/osd_sense.h index 91db543a550..d52aa93a0b2 100644 --- a/include/scsi/osd_sense.h +++ b/include/scsi/osd_sense.h @@ -4,7 +4,7 @@ * Copyright (C) 2008 Panasas Inc. All rights reserved. * * Authors: - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * Benny Halevy <bhalevy@panasas.com> * * This program is free software; you can redistribute it and/or modify diff --git a/include/scsi/osd_types.h b/include/scsi/osd_types.h index bd0be7ed4bc..48e8a165e13 100644 --- a/include/scsi/osd_types.h +++ b/include/scsi/osd_types.h @@ -4,7 +4,7 @@ * Copyright (C) 2008 Panasas Inc. All rights reserved. * * Authors: - * Boaz Harrosh <bharrosh@panasas.com> + * Boaz Harrosh <ooo@electrozaur.com> * Benny Halevy <bhalevy@panasas.com> * * This program is free software; you can redistribute it and/or modify diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h index e6458356070..56ed843969c 100644 --- a/include/scsi/scsi_tcq.h +++ b/include/scsi/scsi_tcq.h @@ -67,8 +67,9 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth) if (!sdev->tagged_supported) return; - if (!shost_use_blk_mq(sdev->host) && - !blk_queue_tagged(sdev->request_queue)) + if (shost_use_blk_mq(sdev->host)) + queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, sdev->request_queue); + else if (!blk_queue_tagged(sdev->request_queue)) blk_queue_init_tags(sdev->request_queue, depth, sdev->host->bqt); @@ -81,8 +82,7 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth) **/ static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth) { - if (!shost_use_blk_mq(sdev->host) && - blk_queue_tagged(sdev->request_queue)) + if (blk_queue_tagged(sdev->request_queue)) blk_queue_free_tags(sdev->request_queue); scsi_adjust_queue_depth(sdev, 0, depth); } diff --git a/include/sound/pcm.h b/include/sound/pcm.h index e862497f755..8bb00a27e21 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -184,6 +184,8 @@ struct snd_pcm_ops { #define SNDRV_PCM_FMTBIT_DSD_U8 _SNDRV_PCM_FMTBIT(DSD_U8) #define SNDRV_PCM_FMTBIT_DSD_U16_LE _SNDRV_PCM_FMTBIT(DSD_U16_LE) #define SNDRV_PCM_FMTBIT_DSD_U32_LE _SNDRV_PCM_FMTBIT(DSD_U32_LE) +#define SNDRV_PCM_FMTBIT_DSD_U16_BE _SNDRV_PCM_FMTBIT(DSD_U16_BE) +#define SNDRV_PCM_FMTBIT_DSD_U32_BE _SNDRV_PCM_FMTBIT(DSD_U32_BE) #ifdef SNDRV_LITTLE_ENDIAN #define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_LE diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h index 2883a7a6f9f..98f2ade0266 100644 --- a/include/sound/soc-dpcm.h +++ b/include/sound/soc-dpcm.h @@ -102,6 +102,8 @@ struct snd_soc_dpcm_runtime { /* state and update */ enum snd_soc_dpcm_update runtime_update; enum snd_soc_dpcm_state state; + + int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */ }; /* can this BE stop and free */ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 9ec9864ecf3..23c518a0340 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -108,6 +108,8 @@ #define DA_EMULATE_ALUA 0 /* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */ #define DA_ENFORCE_PR_ISIDS 1 +/* Force SPC-3 PR Activate Persistence across Target Power Loss */ +#define DA_FORCE_PR_APTPL 0 #define DA_STATUS_MAX_SECTORS_MIN 16 #define DA_STATUS_MAX_SECTORS_MAX 8192 /* By default don't report non-rotating (solid state) medium */ @@ -680,6 +682,7 @@ struct se_dev_attrib { enum target_prot_type pi_prot_type; enum target_prot_type hw_pi_prot_type; int enforce_pr_isids; + int force_pr_aptpl; int is_nonrot; int emulate_rest_reord; u32 hw_block_size; @@ -903,4 +906,18 @@ struct se_wwn { struct config_group fabric_stat_group; }; +static inline void atomic_inc_mb(atomic_t *v) +{ + smp_mb__before_atomic(); + atomic_inc(v); + smp_mb__after_atomic(); +} + +static inline void atomic_dec_mb(atomic_t *v) +{ + smp_mb__before_atomic(); + atomic_dec(v); + smp_mb__after_atomic(); +} + #endif /* TARGET_CORE_BASE_H */ diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index d4f70a7fe87..ff4bd1b3524 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -2369,7 +2369,7 @@ TRACE_EVENT(ext4_es_lookup_extent_exit, show_extent_status(__entry->found ? __entry->status : 0)) ); -TRACE_EVENT(ext4_es_shrink_enter, +DECLARE_EVENT_CLASS(ext4__es_shrink_enter, TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt), TP_ARGS(sb, nr_to_scan, cache_cnt), @@ -2391,26 +2391,38 @@ TRACE_EVENT(ext4_es_shrink_enter, __entry->nr_to_scan, __entry->cache_cnt) ); -TRACE_EVENT(ext4_es_shrink_exit, - TP_PROTO(struct super_block *sb, int shrunk_nr, int cache_cnt), +DEFINE_EVENT(ext4__es_shrink_enter, ext4_es_shrink_count, + TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt), - TP_ARGS(sb, shrunk_nr, cache_cnt), + TP_ARGS(sb, nr_to_scan, cache_cnt) +); + +DEFINE_EVENT(ext4__es_shrink_enter, ext4_es_shrink_scan_enter, + TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt), + + TP_ARGS(sb, nr_to_scan, cache_cnt) +); + +TRACE_EVENT(ext4_es_shrink_scan_exit, + TP_PROTO(struct super_block *sb, int nr_shrunk, int cache_cnt), + + TP_ARGS(sb, nr_shrunk, cache_cnt), TP_STRUCT__entry( __field( dev_t, dev ) - __field( int, shrunk_nr ) + __field( int, nr_shrunk ) __field( int, cache_cnt ) ), TP_fast_assign( __entry->dev = sb->s_dev; - __entry->shrunk_nr = shrunk_nr; + __entry->nr_shrunk = nr_shrunk; __entry->cache_cnt = cache_cnt; ), - TP_printk("dev %d,%d shrunk_nr %d cache_cnt %d", + TP_printk("dev %d,%d nr_shrunk %d cache_cnt %d", MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->shrunk_nr, __entry->cache_cnt) + __entry->nr_shrunk, __entry->cache_cnt) ); TRACE_EVENT(ext4_collapse_range, @@ -2438,6 +2450,37 @@ TRACE_EVENT(ext4_collapse_range, __entry->offset, __entry->len) ); +TRACE_EVENT(ext4_es_shrink, + TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time, + int skip_precached, int nr_skipped, int retried), + + TP_ARGS(sb, nr_shrunk, scan_time, skip_precached, nr_skipped, retried), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, nr_shrunk ) + __field( unsigned long long, scan_time ) + __field( int, skip_precached ) + __field( int, nr_skipped ) + __field( int, retried ) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->nr_shrunk = nr_shrunk; + __entry->scan_time = div_u64(scan_time, 1000); + __entry->skip_precached = skip_precached; + __entry->nr_skipped = nr_skipped; + __entry->retried = retried; + ), + + TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu skip_precached %d " + "nr_skipped %d retried %d", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_shrunk, + __entry->scan_time, __entry->skip_precached, + __entry->nr_skipped, __entry->retried) +); + #endif /* _TRACE_EXT4_H */ /* This part must be outside protection */ diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 9b56f37148c..e335e7d8c6c 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -660,18 +660,18 @@ TRACE_EVENT(rcu_torture_read, /* * Tracepoint for _rcu_barrier() execution. The string "s" describes * the _rcu_barrier phase: - * "Begin": rcu_barrier_callback() started. - * "Check": rcu_barrier_callback() checking for piggybacking. - * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit. - * "Inc1": rcu_barrier_callback() piggyback check counter incremented. - * "Offline": rcu_barrier_callback() found offline CPU - * "OnlineNoCB": rcu_barrier_callback() found online no-CBs CPU. - * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks. - * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks. + * "Begin": _rcu_barrier() started. + * "Check": _rcu_barrier() checking for piggybacking. + * "EarlyExit": _rcu_barrier() piggybacked, thus early exit. + * "Inc1": _rcu_barrier() piggyback check counter incremented. + * "OfflineNoCB": _rcu_barrier() found callback on never-online CPU + * "OnlineNoCB": _rcu_barrier() found online no-CBs CPU. + * "OnlineQ": _rcu_barrier() found online CPU with callbacks. + * "OnlineNQ": _rcu_barrier() found online CPU, no callbacks. * "IRQ": An rcu_barrier_callback() callback posted on remote CPU. * "CB": An rcu_barrier_callback() invoked a callback, not the last. * "LastCB": An rcu_barrier_callback() invoked the last callback. - * "Inc2": rcu_barrier_callback() piggyback check counter incremented. + * "Inc2": _rcu_barrier() piggyback check counter incremented. * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument * is the count of remaining callbacks, and "done" is the piggybacking count. */ diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h new file mode 100644 index 00000000000..0f4f95d63c0 --- /dev/null +++ b/include/trace/events/thermal.h @@ -0,0 +1,83 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM thermal + +#if !defined(_TRACE_THERMAL_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_THERMAL_H + +#include <linux/thermal.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(thermal_temperature, + + TP_PROTO(struct thermal_zone_device *tz), + + TP_ARGS(tz), + + TP_STRUCT__entry( + __string(thermal_zone, tz->type) + __field(int, id) + __field(int, temp_prev) + __field(int, temp) + ), + + TP_fast_assign( + __assign_str(thermal_zone, tz->type); + __entry->id = tz->id; + __entry->temp_prev = tz->last_temperature; + __entry->temp = tz->temperature; + ), + + TP_printk("thermal_zone=%s id=%d temp_prev=%d temp=%d", + __get_str(thermal_zone), __entry->id, __entry->temp_prev, + __entry->temp) +); + +TRACE_EVENT(cdev_update, + + TP_PROTO(struct thermal_cooling_device *cdev, unsigned long target), + + TP_ARGS(cdev, target), + + TP_STRUCT__entry( + __string(type, cdev->type) + __field(unsigned long, target) + ), + + TP_fast_assign( + __assign_str(type, cdev->type); + __entry->target = target; + ), + + TP_printk("type=%s target=%lu", __get_str(type), __entry->target) +); + +TRACE_EVENT(thermal_zone_trip, + + TP_PROTO(struct thermal_zone_device *tz, int trip, + enum thermal_trip_type trip_type), + + TP_ARGS(tz, trip, trip_type), + + TP_STRUCT__entry( + __string(thermal_zone, tz->type) + __field(int, id) + __field(int, trip) + __field(enum thermal_trip_type, trip_type) + ), + + TP_fast_assign( + __assign_str(thermal_zone, tz->type); + __entry->id = tz->id; + __entry->trip = trip; + __entry->trip_type = trip_type; + ), + + TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%d", + __get_str(thermal_zone), __entry->id, __entry->trip, + __entry->trip_type) +); + +#endif /* _TRACE_THERMAL_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 6cad97485ba..4c94f31a8c9 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -125,6 +125,7 @@ header-y += filter.h header-y += firewire-cdev.h header-y += firewire-constants.h header-y += flat.h +header-y += fou.h header-y += fs.h header-y += fsl_hypervisor.h header-y += fuse.h @@ -141,6 +142,7 @@ header-y += hid.h header-y += hiddev.h header-y += hidraw.h header-y += hpet.h +header-y += hsr_netlink.h header-y += hyperv.h header-y += hysdn_if.h header-y += i2c-dev.h @@ -251,6 +253,7 @@ header-y += mii.h header-y += minix_fs.h header-y += mman.h header-y += mmtimer.h +header-y += mpls.h header-y += mqueue.h header-y += mroute.h header-y += mroute6.h @@ -374,6 +377,7 @@ header-y += swab.h header-y += synclink.h header-y += sysctl.h header-y += sysinfo.h +header-y += target_core_user.h header-y += taskstats.h header-y += tcp.h header-y += tcp_metrics.h @@ -423,6 +427,7 @@ header-y += virtio_net.h header-y += virtio_pci.h header-y += virtio_ring.h header-y += virtio_rng.h +header=y += vm_sockets.h header-y += vt.h header-y += wait.h header-y += wanrouter.h diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index ca1a11bb444..3735fa0a678 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -37,6 +37,7 @@ #define RENAME_NOREPLACE (1 << 0) /* Don't overwrite target */ #define RENAME_EXCHANGE (1 << 1) /* Exchange source and dest */ +#define RENAME_WHITEOUT (1 << 2) /* Whiteout source */ struct fstrim_range { __u64 start; diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index 39f621a9fe8..da17e456908 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -15,6 +15,7 @@ #include <linux/types.h> #include <linux/if_ether.h> +#include <linux/in6.h> #define SYSFS_BRIDGE_ATTR "bridge" #define SYSFS_BRIDGE_FDB "brforward" diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index 1874ebe9ac1..a1d7e931ab7 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h @@ -739,6 +739,13 @@ struct input_keymap_entry { #define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */ #define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */ +#define KEY_KBDINPUTASSIST_PREV 0x260 +#define KEY_KBDINPUTASSIST_NEXT 0x261 +#define KEY_KBDINPUTASSIST_PREVGROUP 0x262 +#define KEY_KBDINPUTASSIST_NEXTGROUP 0x263 +#define KEY_KBDINPUTASSIST_ACCEPT 0x264 +#define KEY_KBDINPUTASSIST_CANCEL 0x265 + #define BTN_TRIGGER_HAPPY 0x2c0 #define BTN_TRIGGER_HAPPY1 0x2c0 #define BTN_TRIGGER_HAPPY2 0x2c1 diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 9269de25487..9d845404d87 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -364,7 +364,7 @@ struct perf_event_mmap_page { /* * Bits needed to read the hw events in user-space. * - * u32 seq, time_mult, time_shift, idx, width; + * u32 seq, time_mult, time_shift, index, width; * u64 count, enabled, running; * u64 cyc, time_offset; * s64 pmc = 0; @@ -383,11 +383,11 @@ struct perf_event_mmap_page { * time_shift = pc->time_shift; * } * - * idx = pc->index; + * index = pc->index; * count = pc->offset; - * if (pc->cap_usr_rdpmc && idx) { + * if (pc->cap_user_rdpmc && index) { * width = pc->pmc_width; - * pmc = rdpmc(idx - 1); + * pmc = rdpmc(index - 1); * } * * barrier(); @@ -415,7 +415,7 @@ struct perf_event_mmap_page { }; /* - * If cap_usr_rdpmc this field provides the bit-width of the value + * If cap_user_rdpmc this field provides the bit-width of the value * read using the rdpmc() or equivalent instruction. This can be used * to sign extend the result like: * @@ -439,10 +439,10 @@ struct perf_event_mmap_page { * * Where time_offset,time_mult,time_shift and cyc are read in the * seqcount loop described above. This delta can then be added to - * enabled and possible running (if idx), improving the scaling: + * enabled and possible running (if index), improving the scaling: * * enabled += delta; - * if (idx) + * if (index) * running += delta; * * quot = count / running; diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h index 34f9d7387d1..b932be9f5c5 100644 --- a/include/uapi/linux/sched.h +++ b/include/uapi/linux/sched.h @@ -13,7 +13,7 @@ #define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ #define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ #define CLONE_THREAD 0x00010000 /* Same thread group? */ -#define CLONE_NEWNS 0x00020000 /* New namespace group? */ +#define CLONE_NEWNS 0x00020000 /* New mount namespace group */ #define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */ #define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */ #define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */ diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h new file mode 100644 index 00000000000..7dcfbe6771b --- /dev/null +++ b/include/uapi/linux/target_core_user.h @@ -0,0 +1,142 @@ +#ifndef __TARGET_CORE_USER_H +#define __TARGET_CORE_USER_H + +/* This header will be used by application too */ + +#include <linux/types.h> +#include <linux/uio.h> + +#ifndef __packed +#define __packed __attribute__((packed)) +#endif + +#define TCMU_VERSION "1.0" + +/* + * Ring Design + * ----------- + * + * The mmaped area is divided into three parts: + * 1) The mailbox (struct tcmu_mailbox, below) + * 2) The command ring + * 3) Everything beyond the command ring (data) + * + * The mailbox tells userspace the offset of the command ring from the + * start of the shared memory region, and how big the command ring is. + * + * The kernel passes SCSI commands to userspace by putting a struct + * tcmu_cmd_entry in the ring, updating mailbox->cmd_head, and poking + * userspace via uio's interrupt mechanism. + * + * tcmu_cmd_entry contains a header. If the header type is PAD, + * userspace should skip hdr->length bytes (mod cmdr_size) to find the + * next cmd_entry. + * + * Otherwise, the entry will contain offsets into the mmaped area that + * contain the cdb and data buffers -- the latter accessible via the + * iov array. iov addresses are also offsets into the shared area. + * + * When userspace is completed handling the command, set + * entry->rsp.scsi_status, fill in rsp.sense_buffer if appropriate, + * and also set mailbox->cmd_tail equal to the old cmd_tail plus + * hdr->length, mod cmdr_size. If cmd_tail doesn't equal cmd_head, it + * should process the next packet the same way, and so on. + */ + +#define TCMU_MAILBOX_VERSION 1 +#define ALIGN_SIZE 64 /* Should be enough for most CPUs */ + +struct tcmu_mailbox { + __u16 version; + __u16 flags; + __u32 cmdr_off; + __u32 cmdr_size; + + __u32 cmd_head; + + /* Updated by user. On its own cacheline */ + __u32 cmd_tail __attribute__((__aligned__(ALIGN_SIZE))); + +} __packed; + +enum tcmu_opcode { + TCMU_OP_PAD = 0, + TCMU_OP_CMD, +}; + +/* + * Only a few opcodes, and length is 8-byte aligned, so use low bits for opcode. + */ +struct tcmu_cmd_entry_hdr { + __u32 len_op; +} __packed; + +#define TCMU_OP_MASK 0x7 + +static inline enum tcmu_opcode tcmu_hdr_get_op(struct tcmu_cmd_entry_hdr *hdr) +{ + return hdr->len_op & TCMU_OP_MASK; +} + +static inline void tcmu_hdr_set_op(struct tcmu_cmd_entry_hdr *hdr, enum tcmu_opcode op) +{ + hdr->len_op &= ~TCMU_OP_MASK; + hdr->len_op |= (op & TCMU_OP_MASK); +} + +static inline __u32 tcmu_hdr_get_len(struct tcmu_cmd_entry_hdr *hdr) +{ + return hdr->len_op & ~TCMU_OP_MASK; +} + +static inline void tcmu_hdr_set_len(struct tcmu_cmd_entry_hdr *hdr, __u32 len) +{ + hdr->len_op &= TCMU_OP_MASK; + hdr->len_op |= len; +} + +/* Currently the same as SCSI_SENSE_BUFFERSIZE */ +#define TCMU_SENSE_BUFFERSIZE 96 + +struct tcmu_cmd_entry { + struct tcmu_cmd_entry_hdr hdr; + + uint16_t cmd_id; + uint16_t __pad1; + + union { + struct { + uint64_t cdb_off; + uint64_t iov_cnt; + struct iovec iov[0]; + } req; + struct { + uint8_t scsi_status; + uint8_t __pad1; + uint16_t __pad2; + uint32_t __pad3; + char sense_buffer[TCMU_SENSE_BUFFERSIZE]; + } rsp; + }; + +} __packed; + +#define TCMU_OP_ALIGN_SIZE sizeof(uint64_t) + +enum tcmu_genl_cmd { + TCMU_CMD_UNSPEC, + TCMU_CMD_ADDED_DEVICE, + TCMU_CMD_REMOVED_DEVICE, + __TCMU_CMD_MAX, +}; +#define TCMU_CMD_MAX (__TCMU_CMD_MAX - 1) + +enum tcmu_genl_attr { + TCMU_ATTR_UNSPEC, + TCMU_ATTR_DEVICE, + TCMU_ATTR_MINOR, + __TCMU_ATTR_MAX, +}; +#define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1) + +#endif diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h index 6a0764c89fc..6c8f159e416 100644 --- a/include/uapi/linux/v4l2-dv-timings.h +++ b/include/uapi/linux/v4l2-dv-timings.h @@ -21,8 +21,17 @@ #ifndef _V4L2_DV_TIMINGS_H #define _V4L2_DV_TIMINGS_H +#if __GNUC__ < 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ < 6)) +/* Sadly gcc versions older than 4.6 have a bug in how they initialize + anonymous unions where they require additional curly brackets. + This violates the C1x standard. This workaround adds the curly brackets + if needed. */ #define V4L2_INIT_BT_TIMINGS(_width, args...) \ { .bt = { _width , ## args } } +#else +#define V4L2_INIT_BT_TIMINGS(_width, args...) \ + .bt = { _width , ## args } +#endif /* CEA-861-E timings (i.e. standard HDTV timings) */ diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h index 6ee586728df..941d32f007d 100644 --- a/include/uapi/sound/asound.h +++ b/include/uapi/sound/asound.h @@ -220,7 +220,9 @@ typedef int __bitwise snd_pcm_format_t; #define SNDRV_PCM_FORMAT_DSD_U8 ((__force snd_pcm_format_t) 48) /* DSD, 1-byte samples DSD (x8) */ #define SNDRV_PCM_FORMAT_DSD_U16_LE ((__force snd_pcm_format_t) 49) /* DSD, 2-byte samples DSD (x16), little endian */ #define SNDRV_PCM_FORMAT_DSD_U32_LE ((__force snd_pcm_format_t) 50) /* DSD, 4-byte samples DSD (x32), little endian */ -#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_LE +#define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */ +#define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */ +#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE #ifdef SNDRV_LITTLE_ENDIAN #define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE diff --git a/init/Kconfig b/init/Kconfig index 3ee28ae02cc..2081a4d3d91 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1341,6 +1341,10 @@ config SYSCTL_ARCH_UNALIGN_ALLOW config HAVE_PCSPKR_PLATFORM bool +# interpreter that classic socket filters depend on +config BPF + bool + menuconfig EXPERT bool "Configure standard kernel features (expert users)" # Unhide debug options, to make the on-by-default options visible @@ -1521,6 +1525,16 @@ config EVENTFD If unsure, say Y. +# syscall, maps, verifier +config BPF_SYSCALL + bool "Enable bpf() system call" if EXPERT + select ANON_INODES + select BPF + default n + help + Enable the bpf() system call that allows to manipulate eBPF + programs and maps via file descriptors. + config SHMEM bool "Use full shmem filesystem" if EXPERT default y diff --git a/init/main.c b/init/main.c index 800a0daede7..321d0ceb26d 100644 --- a/init/main.c +++ b/init/main.c @@ -544,7 +544,7 @@ asmlinkage __visible void __init start_kernel(void) static_command_line, __start___param, __stop___param - __start___param, -1, -1, &unknown_bootoption); - if (after_dashes) + if (!IS_ERR_OR_NULL(after_dashes)) parse_args("Setting init args", after_dashes, NULL, 0, -1, -1, set_init_arg); diff --git a/kernel/Makefile b/kernel/Makefile index dc5c77544fd..17ea6d4a9a2 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -86,7 +86,7 @@ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_TRACEPOINTS) += trace/ obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-$(CONFIG_CPU_PM) += cpu_pm.o -obj-$(CONFIG_NET) += bpf/ +obj-$(CONFIG_BPF) += bpf/ obj-$(CONFIG_PERF_EVENTS) += events/ diff --git a/kernel/audit.c b/kernel/audit.c index 80983df92cd..cebb11db4d3 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -739,7 +739,7 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE); audit_log_task_info(ab, current); - audit_log_format(ab, "feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d", + audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d", audit_feature_names[which], !!old_feature, !!new_feature, !!old_lock, !!new_lock, res); audit_log_end(ab); diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index e242e3a9864..80f29e01557 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -154,6 +154,7 @@ static struct audit_chunk *alloc_chunk(int count) chunk->owners[i].index = i; } fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); + chunk->mark.mask = FS_IN_IGNORED; return chunk; } diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 45427239f37..0daf7f6ae7d 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -1,5 +1,5 @@ -obj-y := core.o syscall.o verifier.o - +obj-y := core.o +obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o ifdef CONFIG_TEST_BPF -obj-y += test_stub.o +obj-$(CONFIG_BPF_SYSCALL) += test_stub.o endif diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index f0c30c59b31..d6594e457a2 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -655,3 +655,12 @@ void bpf_prog_free(struct bpf_prog *fp) schedule_work(&aux->work); } EXPORT_SYMBOL_GPL(bpf_prog_free); + +/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call + * skb_copy_bits(), so provide a weak definition of it for NET-less config. + */ +int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, + int len) +{ + return -EFAULT; +} diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 801f5f3b930..9f81818f294 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1409,7 +1409,8 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur) if (memcmp(&old->regs[i], &cur->regs[i], sizeof(old->regs[0])) != 0) { if (old->regs[i].type == NOT_INIT || - old->regs[i].type == UNKNOWN_VALUE) + (old->regs[i].type == UNKNOWN_VALUE && + cur->regs[i].type != NOT_INIT)) continue; return false; } diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index 5664985c46a..937ecdfdf25 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@ -107,46 +107,6 @@ void context_tracking_user_enter(void) } NOKPROBE_SYMBOL(context_tracking_user_enter); -#ifdef CONFIG_PREEMPT -/** - * preempt_schedule_context - preempt_schedule called by tracing - * - * The tracing infrastructure uses preempt_enable_notrace to prevent - * recursion and tracing preempt enabling caused by the tracing - * infrastructure itself. But as tracing can happen in areas coming - * from userspace or just about to enter userspace, a preempt enable - * can occur before user_exit() is called. This will cause the scheduler - * to be called when the system is still in usermode. - * - * To prevent this, the preempt_enable_notrace will use this function - * instead of preempt_schedule() to exit user context if needed before - * calling the scheduler. - */ -asmlinkage __visible void __sched notrace preempt_schedule_context(void) -{ - enum ctx_state prev_ctx; - - if (likely(!preemptible())) - return; - - /* - * Need to disable preemption in case user_exit() is traced - * and the tracer calls preempt_enable_notrace() causing - * an infinite recursion. - */ - preempt_disable_notrace(); - prev_ctx = exception_enter(); - preempt_enable_no_resched_notrace(); - - preempt_schedule(); - - preempt_disable_notrace(); - exception_exit(prev_ctx); - preempt_enable_notrace(); -} -EXPORT_SYMBOL_GPL(preempt_schedule_context); -#endif /* CONFIG_PREEMPT */ - /** * context_tracking_user_exit - Inform the context tracking that the CPU is * exiting userspace mode and entering the kernel. diff --git a/kernel/cpu.c b/kernel/cpu.c index 356450f09c1..90a3d017b90 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -64,6 +64,8 @@ static struct { * an ongoing cpu hotplug operation. */ int refcount; + /* And allows lockless put_online_cpus(). */ + atomic_t puts_pending; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; @@ -113,7 +115,11 @@ void put_online_cpus(void) { if (cpu_hotplug.active_writer == current) return; - mutex_lock(&cpu_hotplug.lock); + if (!mutex_trylock(&cpu_hotplug.lock)) { + atomic_inc(&cpu_hotplug.puts_pending); + cpuhp_lock_release(); + return; + } if (WARN_ON(!cpu_hotplug.refcount)) cpu_hotplug.refcount++; /* try to fix things up */ @@ -155,6 +161,12 @@ void cpu_hotplug_begin(void) cpuhp_lock_acquire(); for (;;) { mutex_lock(&cpu_hotplug.lock); + if (atomic_read(&cpu_hotplug.puts_pending)) { + int delta; + + delta = atomic_xchg(&cpu_hotplug.puts_pending, 0); + cpu_hotplug.refcount -= delta; + } if (likely(!cpu_hotplug.refcount)) break; __set_current_state(TASK_UNINTERRUPTIBLE); diff --git a/kernel/events/core.c b/kernel/events/core.c index 1425d07018d..1cd5eef1fcd 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1562,8 +1562,10 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group if (!task) { /* - * Per cpu events are removed via an smp call and - * the removal is always successful. + * Per cpu events are removed via an smp call. The removal can + * fail if the CPU is currently offline, but in that case we + * already called __perf_remove_from_context from + * perf_event_exit_cpu. */ cpu_function_call(event->cpu, __perf_remove_from_context, &re); return; @@ -6071,11 +6073,6 @@ static int perf_swevent_init(struct perf_event *event) return 0; } -static int perf_swevent_event_idx(struct perf_event *event) -{ - return 0; -} - static struct pmu perf_swevent = { .task_ctx_nr = perf_sw_context, @@ -6085,8 +6082,6 @@ static struct pmu perf_swevent = { .start = perf_swevent_start, .stop = perf_swevent_stop, .read = perf_swevent_read, - - .event_idx = perf_swevent_event_idx, }; #ifdef CONFIG_EVENT_TRACING @@ -6204,8 +6199,6 @@ static struct pmu perf_tracepoint = { .start = perf_swevent_start, .stop = perf_swevent_stop, .read = perf_swevent_read, - - .event_idx = perf_swevent_event_idx, }; static inline void perf_tp_register(void) @@ -6431,8 +6424,6 @@ static struct pmu perf_cpu_clock = { .start = cpu_clock_event_start, .stop = cpu_clock_event_stop, .read = cpu_clock_event_read, - - .event_idx = perf_swevent_event_idx, }; /* @@ -6511,8 +6502,6 @@ static struct pmu perf_task_clock = { .start = task_clock_event_start, .stop = task_clock_event_stop, .read = task_clock_event_read, - - .event_idx = perf_swevent_event_idx, }; static void perf_pmu_nop_void(struct pmu *pmu) @@ -6542,7 +6531,7 @@ static void perf_pmu_cancel_txn(struct pmu *pmu) static int perf_event_idx_default(struct perf_event *event) { - return event->hw.idx + 1; + return 0; } /* @@ -8130,7 +8119,7 @@ static void perf_pmu_rotate_stop(struct pmu *pmu) static void __perf_event_exit_context(void *__info) { - struct remove_event re = { .detach_group = false }; + struct remove_event re = { .detach_group = true }; struct perf_event_context *ctx = __info; perf_pmu_rotate_stop(ctx->pmu); diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 1559fb0b929..9803a6600d4 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -605,11 +605,6 @@ static void hw_breakpoint_stop(struct perf_event *bp, int flags) bp->hw.state = PERF_HES_STOPPED; } -static int hw_breakpoint_event_idx(struct perf_event *bp) -{ - return 0; -} - static struct pmu perf_breakpoint = { .task_ctx_nr = perf_sw_context, /* could eventually get its own */ @@ -619,8 +614,6 @@ static struct pmu perf_breakpoint = { .start = hw_breakpoint_start, .stop = hw_breakpoint_stop, .read = hw_breakpoint_pmu_read, - - .event_idx = hw_breakpoint_event_idx, }; int __init init_hw_breakpoint(void) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 1d0af8a2c64..ed8f2cde34c 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1640,7 +1640,6 @@ bool uprobe_deny_signal(void) if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { utask->state = UTASK_SSTEP_TRAPPED; set_tsk_thread_flag(t, TIF_UPROBE); - set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); } } diff --git a/kernel/freezer.c b/kernel/freezer.c index aa6a8aadb91..a8900a3bc27 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c @@ -42,6 +42,9 @@ bool freezing_slow_path(struct task_struct *p) if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK)) return false; + if (test_thread_flag(TIF_MEMDIE)) + return false; + if (pm_nosig_freezing || cgroup_freezing(p)) return true; @@ -147,12 +150,6 @@ void __thaw_task(struct task_struct *p) { unsigned long flags; - /* - * Clear freezing and kick @p if FROZEN. Clearing is guaranteed to - * be visible to @p as waking up implies wmb. Waking up inside - * freezer_lock also prevents wakeups from leaking outside - * refrigerator. - */ spin_lock_irqsave(&freezer_lock, flags); if (frozen(p)) wake_up_process(p); diff --git a/kernel/futex.c b/kernel/futex.c index f3a3a071283..63678b573d6 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -143,9 +143,8 @@ * * Where (A) orders the waiters increment and the futex value read through * atomic operations (see hb_waiters_inc) and where (B) orders the write - * to futex and the waiters read -- this is done by the barriers in - * get_futex_key_refs(), through either ihold or atomic_inc, depending on the - * futex type. + * to futex and the waiters read -- this is done by the barriers for both + * shared and private futexes in get_futex_key_refs(). * * This yields the following case (where X:=waiters, Y:=futex): * @@ -344,13 +343,20 @@ static void get_futex_key_refs(union futex_key *key) futex_get_mm(key); /* implies MB (B) */ break; default: + /* + * Private futexes do not hold reference on an inode or + * mm, therefore the only purpose of calling get_futex_key_refs + * is because we need the barrier for the lockless waiter check. + */ smp_mb(); /* explicit MB (B) */ } } /* * Drop a reference to the resource addressed by a key. - * The hash bucket spinlock must not be held. + * The hash bucket spinlock must not be held. This is + * a no-op for private futexes, see comment in the get + * counterpart. */ static void drop_futex_key_refs(union futex_key *key) { @@ -641,8 +647,14 @@ static struct futex_pi_state * alloc_pi_state(void) return pi_state; } +/* + * Must be called with the hb lock held. + */ static void free_pi_state(struct futex_pi_state *pi_state) { + if (!pi_state) + return; + if (!atomic_dec_and_test(&pi_state->refcount)) return; @@ -1521,15 +1533,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, } retry: - if (pi_state != NULL) { - /* - * We will have to lookup the pi_state again, so free this one - * to keep the accounting correct. - */ - free_pi_state(pi_state); - pi_state = NULL; - } - ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); if (unlikely(ret != 0)) goto out; @@ -1619,6 +1622,8 @@ retry_private: case 0: break; case -EFAULT: + free_pi_state(pi_state); + pi_state = NULL; double_unlock_hb(hb1, hb2); hb_waiters_dec(hb2); put_futex_key(&key2); @@ -1634,6 +1639,8 @@ retry_private: * exit to complete. * - The user space value changed. */ + free_pi_state(pi_state); + pi_state = NULL; double_unlock_hb(hb1, hb2); hb_waiters_dec(hb2); put_futex_key(&key2); @@ -1710,6 +1717,7 @@ retry_private: } out_unlock: + free_pi_state(pi_state); double_unlock_hb(hb1, hb2); hb_waiters_dec(hb2); @@ -1727,8 +1735,6 @@ out_put_keys: out_put_key1: put_futex_key(&key1); out: - if (pi_state != NULL) - free_pi_state(pi_state); return ret ? ret : task_count; } diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig index cf66c5c8458..3b7408759bd 100644 --- a/kernel/gcov/Kconfig +++ b/kernel/gcov/Kconfig @@ -35,7 +35,7 @@ config GCOV_KERNEL config GCOV_PROFILE_ALL bool "Profile entire Kernel" depends on GCOV_KERNEL - depends on SUPERH || S390 || X86 || PPC || MICROBLAZE || ARM + depends on SUPERH || S390 || X86 || PPC || MICROBLAZE || ARM || ARM64 default n ---help--- This options activates profiling for the entire kernel. diff --git a/kernel/kmod.c b/kernel/kmod.c index 8637e041a24..80f7a6d0051 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -196,12 +196,34 @@ int __request_module(bool wait, const char *fmt, ...) EXPORT_SYMBOL(__request_module); #endif /* CONFIG_MODULES */ +static void call_usermodehelper_freeinfo(struct subprocess_info *info) +{ + if (info->cleanup) + (*info->cleanup)(info); + kfree(info); +} + +static void umh_complete(struct subprocess_info *sub_info) +{ + struct completion *comp = xchg(&sub_info->complete, NULL); + /* + * See call_usermodehelper_exec(). If xchg() returns NULL + * we own sub_info, the UMH_KILLABLE caller has gone away + * or the caller used UMH_NO_WAIT. + */ + if (comp) + complete(comp); + else + call_usermodehelper_freeinfo(sub_info); +} + /* * This is the task which runs the usermode application */ static int ____call_usermodehelper(void *data) { struct subprocess_info *sub_info = data; + int wait = sub_info->wait & ~UMH_KILLABLE; struct cred *new; int retval; @@ -221,7 +243,7 @@ static int ____call_usermodehelper(void *data) retval = -ENOMEM; new = prepare_kernel_cred(current); if (!new) - goto fail; + goto out; spin_lock(&umh_sysctl_lock); new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset); @@ -233,7 +255,7 @@ static int ____call_usermodehelper(void *data) retval = sub_info->init(sub_info, new); if (retval) { abort_creds(new); - goto fail; + goto out; } } @@ -242,12 +264,13 @@ static int ____call_usermodehelper(void *data) retval = do_execve(getname_kernel(sub_info->path), (const char __user *const __user *)sub_info->argv, (const char __user *const __user *)sub_info->envp); +out: + sub_info->retval = retval; + /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */ + if (wait != UMH_WAIT_PROC) + umh_complete(sub_info); if (!retval) return 0; - - /* Exec failed? */ -fail: - sub_info->retval = retval; do_exit(0); } @@ -258,26 +281,6 @@ static int call_helper(void *data) return ____call_usermodehelper(data); } -static void call_usermodehelper_freeinfo(struct subprocess_info *info) -{ - if (info->cleanup) - (*info->cleanup)(info); - kfree(info); -} - -static void umh_complete(struct subprocess_info *sub_info) -{ - struct completion *comp = xchg(&sub_info->complete, NULL); - /* - * See call_usermodehelper_exec(). If xchg() returns NULL - * we own sub_info, the UMH_KILLABLE caller has gone away. - */ - if (comp) - complete(comp); - else - call_usermodehelper_freeinfo(sub_info); -} - /* Keventd can't block, but this (a child) can. */ static int wait_for_helper(void *data) { @@ -336,18 +339,8 @@ static void __call_usermodehelper(struct work_struct *work) kmod_thread_locker = NULL; } - switch (wait) { - case UMH_NO_WAIT: - call_usermodehelper_freeinfo(sub_info); - break; - - case UMH_WAIT_PROC: - if (pid > 0) - break; - /* FALLTHROUGH */ - case UMH_WAIT_EXEC: - if (pid < 0) - sub_info->retval = pid; + if (pid < 0) { + sub_info->retval = pid; umh_complete(sub_info); } } @@ -588,7 +581,12 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) goto out; } - sub_info->complete = &done; + /* + * Set the completion pointer only if there is a waiter. + * This makes it possible to use umh_complete to free + * the data structure in case of UMH_NO_WAIT. + */ + sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done; sub_info->wait = wait; queue_work(khelper_wq, &sub_info->work); diff --git a/kernel/panic.c b/kernel/panic.c index d09dc5c32c6..cf80672b792 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -244,6 +244,7 @@ static const struct tnt tnts[] = { * 'I' - Working around severe firmware bug. * 'O' - Out-of-tree module has been loaded. * 'E' - Unsigned module has been loaded. + * 'L' - A soft lockup has previously occurred. * * The string is overwritten by the next call to print_tainted(). */ diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index a9dfa79b6ba..1f35a3478f3 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -502,8 +502,14 @@ int hibernation_restore(int platform_mode) error = dpm_suspend_start(PMSG_QUIESCE); if (!error) { error = resume_target_kernel(platform_mode); - dpm_resume_end(PMSG_RECOVER); + /* + * The above should either succeed and jump to the new kernel, + * or return with an error. Otherwise things are just + * undefined, so let's be paranoid. + */ + BUG_ON(!error); } + dpm_resume_end(PMSG_RECOVER); pm_restore_gfp_mask(); resume_console(); pm_restore_console(); diff --git a/kernel/power/process.c b/kernel/power/process.c index 7b323221b9e..5a6ec8678b9 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -46,13 +46,13 @@ static int try_to_freeze_tasks(bool user_only) while (true) { todo = 0; read_lock(&tasklist_lock); - do_each_thread(g, p) { + for_each_process_thread(g, p) { if (p == current || !freeze_task(p)) continue; if (!freezer_should_skip(p)) todo++; - } while_each_thread(g, p); + } read_unlock(&tasklist_lock); if (!user_only) { @@ -93,11 +93,11 @@ static int try_to_freeze_tasks(bool user_only) if (!wakeup) { read_lock(&tasklist_lock); - do_each_thread(g, p) { + for_each_process_thread(g, p) { if (p != current && !freezer_should_skip(p) && freezing(p) && !frozen(p)) sched_show_task(p); - } while_each_thread(g, p); + } read_unlock(&tasklist_lock); } } else { @@ -108,6 +108,30 @@ static int try_to_freeze_tasks(bool user_only) return todo ? -EBUSY : 0; } +static bool __check_frozen_processes(void) +{ + struct task_struct *g, *p; + + for_each_process_thread(g, p) + if (p != current && !freezer_should_skip(p) && !frozen(p)) + return false; + + return true; +} + +/* + * Returns true if all freezable tasks (except for current) are frozen already + */ +static bool check_frozen_processes(void) +{ + bool ret; + + read_lock(&tasklist_lock); + ret = __check_frozen_processes(); + read_unlock(&tasklist_lock); + return ret; +} + /** * freeze_processes - Signal user space processes to enter the refrigerator. * The current thread will not be frozen. The same process that calls @@ -118,6 +142,7 @@ static int try_to_freeze_tasks(bool user_only) int freeze_processes(void) { int error; + int oom_kills_saved; error = __usermodehelper_disable(UMH_FREEZING); if (error) @@ -132,11 +157,25 @@ int freeze_processes(void) pm_wakeup_clear(); printk("Freezing user space processes ... "); pm_freezing = true; + oom_kills_saved = oom_kills_count(); error = try_to_freeze_tasks(true); if (!error) { - printk("done."); __usermodehelper_set_disable_depth(UMH_DISABLED); oom_killer_disable(); + + /* + * There might have been an OOM kill while we were + * freezing tasks and the killed task might be still + * on the way out so we have to double check for race. + */ + if (oom_kills_count() != oom_kills_saved && + !check_frozen_processes()) { + __usermodehelper_set_disable_depth(UMH_ENABLED); + printk("OOM in progress."); + error = -EBUSY; + } else { + printk("done."); + } } printk("\n"); BUG_ON(in_atomic()); @@ -191,11 +230,11 @@ void thaw_processes(void) thaw_workqueues(); read_lock(&tasklist_lock); - do_each_thread(g, p) { + for_each_process_thread(g, p) { /* No other threads should have PF_SUSPEND_TASK set */ WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); __thaw_task(p); - } while_each_thread(g, p); + } read_unlock(&tasklist_lock); WARN_ON(!(curr->flags & PF_SUSPEND_TASK)); @@ -218,10 +257,10 @@ void thaw_kernel_threads(void) thaw_workqueues(); read_lock(&tasklist_lock); - do_each_thread(g, p) { + for_each_process_thread(g, p) { if (p->flags & (PF_KTHREAD | PF_WQ_WORKER)) __thaw_task(p); - } while_each_thread(g, p); + } read_unlock(&tasklist_lock); schedule(); diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 884b7705886..5f4c006c4b1 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -105,11 +105,27 @@ static struct pm_qos_object network_throughput_pm_qos = { }; +static BLOCKING_NOTIFIER_HEAD(memory_bandwidth_notifier); +static struct pm_qos_constraints memory_bw_constraints = { + .list = PLIST_HEAD_INIT(memory_bw_constraints.list), + .target_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE, + .default_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE, + .no_constraint_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE, + .type = PM_QOS_SUM, + .notifiers = &memory_bandwidth_notifier, +}; +static struct pm_qos_object memory_bandwidth_pm_qos = { + .constraints = &memory_bw_constraints, + .name = "memory_bandwidth", +}; + + static struct pm_qos_object *pm_qos_array[] = { &null_pm_qos, &cpu_dma_pm_qos, &network_lat_pm_qos, - &network_throughput_pm_qos + &network_throughput_pm_qos, + &memory_bandwidth_pm_qos, }; static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, @@ -130,6 +146,9 @@ static const struct file_operations pm_qos_power_fops = { /* unlocked internal variant */ static inline int pm_qos_get_value(struct pm_qos_constraints *c) { + struct plist_node *node; + int total_value = 0; + if (plist_head_empty(&c->list)) return c->no_constraint_value; @@ -140,6 +159,12 @@ static inline int pm_qos_get_value(struct pm_qos_constraints *c) case PM_QOS_MAX: return plist_last(&c->list)->prio; + case PM_QOS_SUM: + plist_for_each(node, &c->list) + total_value += node->prio; + + return total_value; + default: /* runtime check for not using enum */ BUG(); diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 4ca9a33ff62..c347e3ce3a5 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -146,7 +146,7 @@ static int platform_suspend_prepare(suspend_state_t state) static int platform_suspend_prepare_late(suspend_state_t state) { - return state == PM_SUSPEND_FREEZE && freeze_ops->prepare ? + return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ? freeze_ops->prepare() : 0; } @@ -164,7 +164,7 @@ static void platform_resume_noirq(suspend_state_t state) static void platform_resume_early(suspend_state_t state) { - if (state == PM_SUSPEND_FREEZE && freeze_ops->restore) + if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->restore) freeze_ops->restore(); } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 133e4722309..9815447d22e 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3299,11 +3299,16 @@ static void _rcu_barrier(struct rcu_state *rsp) continue; rdp = per_cpu_ptr(rsp->rda, cpu); if (rcu_is_nocb_cpu(cpu)) { - _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, - rsp->n_barrier_done); - atomic_inc(&rsp->barrier_cpu_count); - __call_rcu(&rdp->barrier_head, rcu_barrier_callback, - rsp, cpu, 0); + if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) { + _rcu_barrier_trace(rsp, "OfflineNoCB", cpu, + rsp->n_barrier_done); + } else { + _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, + rsp->n_barrier_done); + atomic_inc(&rsp->barrier_cpu_count); + __call_rcu(&rdp->barrier_head, + rcu_barrier_callback, rsp, cpu, 0); + } } else if (ACCESS_ONCE(rdp->qlen)) { _rcu_barrier_trace(rsp, "OnlineQ", cpu, rsp->n_barrier_done); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index d03764652d9..bbdc45d8d74 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -587,6 +587,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); static void print_cpu_stall_info_end(void); static void zero_cpu_stall_ticks(struct rcu_data *rdp); static void increment_cpu_stall_ticks(void); +static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu); static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp); static void rcu_init_one_nocb(struct rcu_node *rnp); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 387dd459934..c1d7f27bd38 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -2050,6 +2050,33 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force) } /* + * Does the specified CPU need an RCU callback for the specified flavor + * of rcu_barrier()? + */ +static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) +{ + struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); + struct rcu_head *rhp; + + /* No-CBs CPUs might have callbacks on any of three lists. */ + rhp = ACCESS_ONCE(rdp->nocb_head); + if (!rhp) + rhp = ACCESS_ONCE(rdp->nocb_gp_head); + if (!rhp) + rhp = ACCESS_ONCE(rdp->nocb_follower_head); + + /* Having no rcuo kthread but CBs after scheduler starts is bad! */ + if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp) { + /* RCU callback enqueued before CPU first came online??? */ + pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n", + cpu, rhp->func); + WARN_ON_ONCE(1); + } + + return !!rhp; +} + +/* * Enqueue the specified string of rcu_head structures onto the specified * CPU's no-CBs lists. The CPU is specified by rdp, the head of the * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy @@ -2642,6 +2669,12 @@ static bool init_nocb_callback_list(struct rcu_data *rdp) #else /* #ifdef CONFIG_RCU_NOCB_CPU */ +static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) +{ + WARN_ON_ONCE(1); /* Should be dead code. */ + return false; +} + static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) { } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 44999505e1b..24beb9bb4c3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2475,44 +2475,6 @@ EXPORT_PER_CPU_SYMBOL(kstat); EXPORT_PER_CPU_SYMBOL(kernel_cpustat); /* - * Return any ns on the sched_clock that have not yet been accounted in - * @p in case that task is currently running. - * - * Called with task_rq_lock() held on @rq. - */ -static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) -{ - u64 ns = 0; - - /* - * Must be ->curr _and_ ->on_rq. If dequeued, we would - * project cycles that may never be accounted to this - * thread, breaking clock_gettime(). - */ - if (task_current(rq, p) && task_on_rq_queued(p)) { - update_rq_clock(rq); - ns = rq_clock_task(rq) - p->se.exec_start; - if ((s64)ns < 0) - ns = 0; - } - - return ns; -} - -unsigned long long task_delta_exec(struct task_struct *p) -{ - unsigned long flags; - struct rq *rq; - u64 ns = 0; - - rq = task_rq_lock(p, &flags); - ns = do_task_delta_exec(p, rq); - task_rq_unlock(rq, p, &flags); - - return ns; -} - -/* * Return accounted runtime for the task. * In case the task is currently running, return the runtime plus current's * pending runtime that have not been accounted yet. @@ -2521,7 +2483,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) { unsigned long flags; struct rq *rq; - u64 ns = 0; + u64 ns; #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) /* @@ -2540,7 +2502,16 @@ unsigned long long task_sched_runtime(struct task_struct *p) #endif rq = task_rq_lock(p, &flags); - ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); + /* + * Must be ->curr _and_ ->on_rq. If dequeued, we would + * project cycles that may never be accounted to this + * thread, breaking clock_gettime(). + */ + if (task_current(rq, p) && task_on_rq_queued(p)) { + update_rq_clock(rq); + p->sched_class->update_curr(rq); + } + ns = p->se.sum_exec_runtime; task_rq_unlock(rq, p, &flags); return ns; @@ -2951,6 +2922,47 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) } NOKPROBE_SYMBOL(preempt_schedule); EXPORT_SYMBOL(preempt_schedule); + +#ifdef CONFIG_CONTEXT_TRACKING +/** + * preempt_schedule_context - preempt_schedule called by tracing + * + * The tracing infrastructure uses preempt_enable_notrace to prevent + * recursion and tracing preempt enabling caused by the tracing + * infrastructure itself. But as tracing can happen in areas coming + * from userspace or just about to enter userspace, a preempt enable + * can occur before user_exit() is called. This will cause the scheduler + * to be called when the system is still in usermode. + * + * To prevent this, the preempt_enable_notrace will use this function + * instead of preempt_schedule() to exit user context if needed before + * calling the scheduler. + */ +asmlinkage __visible void __sched notrace preempt_schedule_context(void) +{ + enum ctx_state prev_ctx; + + if (likely(!preemptible())) + return; + + do { + __preempt_count_add(PREEMPT_ACTIVE); + /* + * Needs preempt disabled in case user_exit() is traced + * and the tracer calls preempt_enable_notrace() causing + * an infinite recursion. + */ + prev_ctx = exception_enter(); + __schedule(); + exception_exit(prev_ctx); + + __preempt_count_sub(PREEMPT_ACTIVE); + barrier(); + } while (need_resched()); +} +EXPORT_SYMBOL_GPL(preempt_schedule_context); +#endif /* CONFIG_CONTEXT_TRACKING */ + #endif /* CONFIG_PREEMPT */ /* @@ -6327,6 +6339,10 @@ static void sched_init_numa(void) if (!sched_debug()) break; } + + if (!level) + return; + /* * 'level' contains the number of unique distances, excluding the * identity distance node_distance(i,i). @@ -7403,8 +7419,12 @@ void sched_move_task(struct task_struct *tsk) if (unlikely(running)) put_prev_task(rq, tsk); - tg = container_of(task_css_check(tsk, cpu_cgrp_id, - lockdep_is_held(&tsk->sighand->siglock)), + /* + * All callers are synchronized by task_rq_lock(); we do not use RCU + * which is pointless here. Thus, we pass "true" to task_css_check() + * to prevent lockdep warnings. + */ + tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), struct task_group, css); tg = autogroup_task_group(tsk, tg); tsk->sched_task_group = tg; @@ -7833,6 +7853,11 @@ static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css) sched_offline_group(tg); } +static void cpu_cgroup_fork(struct task_struct *task) +{ + sched_move_task(task); +} + static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, struct cgroup_taskset *tset) { @@ -8205,6 +8230,7 @@ struct cgroup_subsys cpu_cgrp_subsys = { .css_free = cpu_cgroup_css_free, .css_online = cpu_cgroup_css_online, .css_offline = cpu_cgroup_css_offline, + .fork = cpu_cgroup_fork, .can_attach = cpu_cgroup_can_attach, .attach = cpu_cgroup_attach, .exit = cpu_cgroup_exit, diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 256e577faf1..28fa9d9e920 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -518,12 +518,20 @@ again: } /* - * We need to take care of a possible races here. In fact, the - * task might have changed its scheduling policy to something - * different from SCHED_DEADLINE or changed its reservation - * parameters (through sched_setattr()). + * We need to take care of several possible races here: + * + * - the task might have changed its scheduling policy + * to something different than SCHED_DEADLINE + * - the task might have changed its reservation parameters + * (through sched_setattr()) + * - the task might have been boosted by someone else and + * might be in the boosting/deboosting path + * + * In all this cases we bail out, as the task is already + * in the runqueue or is going to be enqueued back anyway. */ - if (!dl_task(p) || dl_se->dl_new) + if (!dl_task(p) || dl_se->dl_new || + dl_se->dl_boosted || !dl_se->dl_throttled) goto unlock; sched_clock_tick(); @@ -532,7 +540,7 @@ again: dl_se->dl_yielded = 0; if (task_on_rq_queued(p)) { enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); - if (task_has_dl_policy(rq->curr)) + if (dl_task(rq->curr)) check_preempt_curr_dl(rq, p, 0); else resched_curr(rq); @@ -847,8 +855,19 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) * smaller than our one... OTW we keep our runtime and * deadline. */ - if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) + if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) { pi_se = &pi_task->dl; + } else if (!dl_prio(p->normal_prio)) { + /* + * Special case in which we have a !SCHED_DEADLINE task + * that is going to be deboosted, but exceedes its + * runtime while doing so. No point in replenishing + * it, as it's going to return back to its original + * scheduling class after this. + */ + BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH); + return; + } /* * If p is throttled, we do nothing. In fact, if it exhausted @@ -1607,8 +1626,12 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) /* Only reschedule if pushing failed */ check_resched = 0; #endif /* CONFIG_SMP */ - if (check_resched && task_has_dl_policy(rq->curr)) - check_preempt_curr_dl(rq, p, 0); + if (check_resched) { + if (dl_task(rq->curr)) + check_preempt_curr_dl(rq, p, 0); + else + resched_curr(rq); + } } } @@ -1678,4 +1701,6 @@ const struct sched_class dl_sched_class = { .prio_changed = prio_changed_dl, .switched_from = switched_from_dl, .switched_to = switched_to_dl, + + .update_curr = update_curr_dl, }; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0b069bf3e70..ef2b104b254 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -726,6 +726,11 @@ static void update_curr(struct cfs_rq *cfs_rq) account_cfs_rq_runtime(cfs_rq, delta_exec); } +static void update_curr_fair(struct rq *rq) +{ + update_curr(cfs_rq_of(&rq->curr->se)); +} + static inline void update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) { @@ -828,11 +833,12 @@ static unsigned int task_nr_scan_windows(struct task_struct *p) static unsigned int task_scan_min(struct task_struct *p) { + unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size); unsigned int scan, floor; unsigned int windows = 1; - if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW) - windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size; + if (scan_size < MAX_SCAN_WINDOW) + windows = MAX_SCAN_WINDOW / scan_size; floor = 1000 / windows; scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); @@ -1164,9 +1170,26 @@ static void task_numa_compare(struct task_numa_env *env, long moveimp = imp; rcu_read_lock(); - cur = ACCESS_ONCE(dst_rq->curr); - if (cur->pid == 0) /* idle */ + + raw_spin_lock_irq(&dst_rq->lock); + cur = dst_rq->curr; + /* + * No need to move the exiting task, and this ensures that ->curr + * wasn't reaped and thus get_task_struct() in task_numa_assign() + * is safe under RCU read lock. + * Note that rcu_read_lock() itself can't protect from the final + * put_task_struct() after the last schedule(). + */ + if ((cur->flags & PF_EXITING) || is_idle_task(cur)) cur = NULL; + raw_spin_unlock_irq(&dst_rq->lock); + + /* + * Because we have preemption enabled we can get migrated around and + * end try selecting ourselves (current == env->p) as a swap candidate. + */ + if (cur == env->p) + goto unlock; /* * "imp" is the fault differential for the source task between the @@ -1520,7 +1543,7 @@ static void update_task_scan_period(struct task_struct *p, * scanning faster if shared accesses dominate as it may * simply bounce migrations uselessly */ - ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared)); + ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1)); diff = (diff * ratio) / NUMA_PERIOD_SLOTS; } @@ -7938,6 +7961,8 @@ const struct sched_class fair_sched_class = { .get_rr_interval = get_rr_interval_fair, + .update_curr = update_curr_fair, + #ifdef CONFIG_FAIR_GROUP_SCHED .task_move_group = task_move_group_fair, #endif diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 67ad4e7f506..c65dac8c97c 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -75,6 +75,10 @@ static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task return 0; } +static void update_curr_idle(struct rq *rq) +{ +} + /* * Simple, special scheduling class for the per-CPU idle tasks: */ @@ -101,4 +105,5 @@ const struct sched_class idle_sched_class = { .prio_changed = prio_changed_idle, .switched_to = switched_to_idle, + .update_curr = update_curr_idle, }; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index d024e6ce30b..20bca398084 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2128,6 +2128,8 @@ const struct sched_class rt_sched_class = { .prio_changed = prio_changed_rt, .switched_to = switched_to_rt, + + .update_curr = update_curr_rt, }; #ifdef CONFIG_SCHED_DEBUG diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 24156c8434d..2df8ef067cc 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1135,6 +1135,8 @@ struct sched_class { unsigned int (*get_rr_interval) (struct rq *rq, struct task_struct *task); + void (*update_curr) (struct rq *rq); + #ifdef CONFIG_FAIR_GROUP_SCHED void (*task_move_group) (struct task_struct *p, int on_rq); #endif diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 67426e529f5..79ffec45a6a 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -102,6 +102,10 @@ get_rr_interval_stop(struct rq *rq, struct task_struct *task) return 0; } +static void update_curr_stop(struct rq *rq) +{ +} + /* * Simple, special scheduling class for the per-CPU stop tasks: */ @@ -128,4 +132,5 @@ const struct sched_class stop_sched_class = { .prio_changed = prio_changed_stop, .switched_to = switched_to_stop, + .update_curr = update_curr_stop, }; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 4aada6d9fe7..15f2511a1b7 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -387,7 +387,8 @@ static struct ctl_table kern_table[] = { .data = &sysctl_numa_balancing_scan_size, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = &one, }, { .procname = "numa_balancing", diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 9c94c19f130..55449909f11 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -72,7 +72,7 @@ static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt, * Also omit the add if it would overflow the u64 boundary. */ if ((~0ULL - clc > rnd) && - (!ismax || evt->mult <= (1U << evt->shift))) + (!ismax || evt->mult <= (1ULL << evt->shift))) clc += rnd; do_div(clc, evt->mult); diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 492b986195d..a16b67859e2 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -553,7 +553,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock, *sample = cputime_to_expires(cputime.utime); break; case CPUCLOCK_SCHED: - *sample = cputime.sum_exec_runtime + task_delta_exec(p); + *sample = cputime.sum_exec_runtime; break; } return 0; diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 42b463ad90f..31ea01f42e1 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -636,6 +636,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, goto out; } } else { + memset(&event.sigev_value, 0, sizeof(event.sigev_value)); event.sigev_notify = SIGEV_SIGNAL; event.sigev_signo = SIGALRM; event.sigev_value.sival_int = new_timer->it_id; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fb186b9ddf5..31c90fec415 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1925,8 +1925,16 @@ ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) * when we are adding another op to the rec or removing the * current one. Thus, if the op is being added, we can * ignore it because it hasn't attached itself to the rec - * yet. That means we just need to find the op that has a - * trampoline and is not beeing added. + * yet. + * + * If an ops is being modified (hooking to different functions) + * then we don't care about the new functions that are being + * added, just the old ones (that are probably being removed). + * + * If we are adding an ops to a function that already is using + * a trampoline, it needs to be removed (trampolines are only + * for single ops connected), then an ops that is not being + * modified also needs to be checked. */ do_for_each_ftrace_op(op, ftrace_ops_list) { @@ -1940,17 +1948,23 @@ ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) if (op->flags & FTRACE_OPS_FL_ADDING) continue; + /* - * If the ops is not being added and has a trampoline, - * then it must be the one that we want! + * If the ops is being modified and is in the old + * hash, then it is probably being removed from this + * function. */ - if (hash_contains_ip(ip, op->func_hash)) - return op; - - /* If the ops is being modified, it may be in the old hash. */ if ((op->flags & FTRACE_OPS_FL_MODIFYING) && hash_contains_ip(ip, &op->old_hash)) return op; + /* + * If the ops is not being added or modified, and it's + * in its normal filter hash, then this must be the one + * we want! + */ + if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && + hash_contains_ip(ip, op->func_hash)) + return op; } while_for_each_ftrace_op(op); @@ -2293,10 +2307,13 @@ static void ftrace_run_update_code(int command) FTRACE_WARN_ON(ret); } -static void ftrace_run_modify_code(struct ftrace_ops *ops, int command) +static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, + struct ftrace_hash *old_hash) { ops->flags |= FTRACE_OPS_FL_MODIFYING; + ops->old_hash.filter_hash = old_hash; ftrace_run_update_code(command); + ops->old_hash.filter_hash = NULL; ops->flags &= ~FTRACE_OPS_FL_MODIFYING; } @@ -3340,7 +3357,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly = static int ftrace_probe_registered; -static void __enable_ftrace_function_probe(void) +static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash) { int ret; int i; @@ -3348,7 +3365,8 @@ static void __enable_ftrace_function_probe(void) if (ftrace_probe_registered) { /* still need to update the function call sites */ if (ftrace_enabled) - ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS); + ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS, + old_hash); return; } @@ -3477,13 +3495,14 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, } while_for_each_ftrace_rec(); ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); + + __enable_ftrace_function_probe(old_hash); + if (!ret) free_ftrace_hash_rcu(old_hash); else count = ret; - __enable_ftrace_function_probe(); - out_unlock: mutex_unlock(&ftrace_lock); out: @@ -3764,10 +3783,11 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) return add_hash_entry(hash, ip); } -static void ftrace_ops_update_code(struct ftrace_ops *ops) +static void ftrace_ops_update_code(struct ftrace_ops *ops, + struct ftrace_hash *old_hash) { if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) - ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS); + ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); } static int @@ -3813,7 +3833,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, old_hash = *orig_hash; ret = ftrace_hash_move(ops, enable, orig_hash, hash); if (!ret) { - ftrace_ops_update_code(ops); + ftrace_ops_update_code(ops, old_hash); free_ftrace_hash_rcu(old_hash); } mutex_unlock(&ftrace_lock); @@ -4058,7 +4078,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file) ret = ftrace_hash_move(iter->ops, filter_hash, orig_hash, iter->hash); if (!ret) { - ftrace_ops_update_code(iter->ops); + ftrace_ops_update_code(iter->ops, old_hash); free_ftrace_hash_rcu(old_hash); } mutex_unlock(&ftrace_lock); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 2d75c94ae87..a56e07c8d15 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -538,16 +538,18 @@ static void rb_wake_up_waiters(struct irq_work *work) * ring_buffer_wait - wait for input to the ring buffer * @buffer: buffer to wait on * @cpu: the cpu buffer to wait on + * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS * * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon * as data is added to any of the @buffer's cpu buffers. Otherwise * it will wait for data to be added to a specific cpu buffer. */ -int ring_buffer_wait(struct ring_buffer *buffer, int cpu) +int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) { - struct ring_buffer_per_cpu *cpu_buffer; + struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer); DEFINE_WAIT(wait); struct rb_irq_work *work; + int ret = 0; /* * Depending on what the caller is waiting for, either any @@ -564,36 +566,61 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu) } - prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); + while (true) { + prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); - /* - * The events can happen in critical sections where - * checking a work queue can cause deadlocks. - * After adding a task to the queue, this flag is set - * only to notify events to try to wake up the queue - * using irq_work. - * - * We don't clear it even if the buffer is no longer - * empty. The flag only causes the next event to run - * irq_work to do the work queue wake up. The worse - * that can happen if we race with !trace_empty() is that - * an event will cause an irq_work to try to wake up - * an empty queue. - * - * There's no reason to protect this flag either, as - * the work queue and irq_work logic will do the necessary - * synchronization for the wake ups. The only thing - * that is necessary is that the wake up happens after - * a task has been queued. It's OK for spurious wake ups. - */ - work->waiters_pending = true; + /* + * The events can happen in critical sections where + * checking a work queue can cause deadlocks. + * After adding a task to the queue, this flag is set + * only to notify events to try to wake up the queue + * using irq_work. + * + * We don't clear it even if the buffer is no longer + * empty. The flag only causes the next event to run + * irq_work to do the work queue wake up. The worse + * that can happen if we race with !trace_empty() is that + * an event will cause an irq_work to try to wake up + * an empty queue. + * + * There's no reason to protect this flag either, as + * the work queue and irq_work logic will do the necessary + * synchronization for the wake ups. The only thing + * that is necessary is that the wake up happens after + * a task has been queued. It's OK for spurious wake ups. + */ + work->waiters_pending = true; + + if (signal_pending(current)) { + ret = -EINTR; + break; + } + + if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) + break; + + if (cpu != RING_BUFFER_ALL_CPUS && + !ring_buffer_empty_cpu(buffer, cpu)) { + unsigned long flags; + bool pagebusy; + + if (!full) + break; + + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); + pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); + + if (!pagebusy) + break; + } - if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) || - (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu))) schedule(); + } finish_wait(&work->waiters, &wait); - return 0; + + return ret; } /** diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8a528392b1f..92f4a6cee17 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1076,13 +1076,14 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) } #endif /* CONFIG_TRACER_MAX_TRACE */ -static int wait_on_pipe(struct trace_iterator *iter) +static int wait_on_pipe(struct trace_iterator *iter, bool full) { /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) return 0; - return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); + return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, + full); } #ifdef CONFIG_FTRACE_STARTUP_TEST @@ -4434,15 +4435,12 @@ static int tracing_wait_pipe(struct file *filp) mutex_unlock(&iter->mutex); - ret = wait_on_pipe(iter); + ret = wait_on_pipe(iter, false); mutex_lock(&iter->mutex); if (ret) return ret; - - if (signal_pending(current)) - return -EINTR; } return 1; @@ -5372,16 +5370,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, goto out_unlock; } mutex_unlock(&trace_types_lock); - ret = wait_on_pipe(iter); + ret = wait_on_pipe(iter, false); mutex_lock(&trace_types_lock); if (ret) { size = ret; goto out_unlock; } - if (signal_pending(current)) { - size = -EINTR; - goto out_unlock; - } goto again; } size = 0; @@ -5500,7 +5494,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, }; struct buffer_ref *ref; int entries, size, i; - ssize_t ret; + ssize_t ret = 0; mutex_lock(&trace_types_lock); @@ -5538,13 +5532,16 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, int r; ref = kzalloc(sizeof(*ref), GFP_KERNEL); - if (!ref) + if (!ref) { + ret = -ENOMEM; break; + } ref->ref = 1; ref->buffer = iter->trace_buffer->buffer; ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); if (!ref->page) { + ret = -ENOMEM; kfree(ref); break; } @@ -5582,19 +5579,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, /* did we read anything? */ if (!spd.nr_pages) { + if (ret) + goto out; + if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) { ret = -EAGAIN; goto out; } mutex_unlock(&trace_types_lock); - ret = wait_on_pipe(iter); + ret = wait_on_pipe(iter, true); mutex_lock(&trace_types_lock); if (ret) goto out; - if (signal_pending(current)) { - ret = -EINTR; - goto out; - } + goto again; } diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 4dc8b79c5f7..29228c4d569 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -313,7 +313,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) int size; syscall_nr = trace_get_syscall_nr(current, regs); - if (syscall_nr < 0) + if (syscall_nr < 0 || syscall_nr >= NR_syscalls) return; /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ @@ -360,7 +360,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) int syscall_nr; syscall_nr = trace_get_syscall_nr(current, regs); - if (syscall_nr < 0) + if (syscall_nr < 0 || syscall_nr >= NR_syscalls) return; /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ @@ -567,7 +567,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) int size; syscall_nr = trace_get_syscall_nr(current, regs); - if (syscall_nr < 0) + if (syscall_nr < 0 || syscall_nr >= NR_syscalls) return; if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) return; @@ -641,7 +641,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) int size; syscall_nr = trace_get_syscall_nr(current, regs); - if (syscall_nr < 0) + if (syscall_nr < 0 || syscall_nr >= NR_syscalls) return; if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) return; diff --git a/lib/Makefile b/lib/Makefile index 7512dc978f1..0211d2bd5e1 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -10,7 +10,7 @@ endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o timerqueue.o\ idr.o int_sqrt.o extable.o \ - sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ + sha1.o md5.o irq_regs.o argv_split.o \ proportions.o flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o @@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ - percpu-refcount.o percpu_ida.o hash.o rhashtable.o + percpu-refcount.o percpu_ida.o hash.o rhashtable.o reciprocal_div.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += kstrtox.o diff --git a/lib/bitmap.c b/lib/bitmap.c index cd250a2e14c..b499ab6ada2 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst, lower = src[off + k]; if (left && off + k == lim - 1) lower &= mask; - dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem; + dst[k] = lower >> rem; + if (rem) + dst[k] |= upper << (BITS_PER_LONG - rem); if (left && k == lim - 1) dst[k] &= mask; } @@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst, upper = src[k]; if (left && k == lim - 1) upper &= (1UL << left) - 1; - dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem; + dst[k + off] = upper << rem; + if (rem) + dst[k + off] |= lower >> (BITS_PER_LONG - rem); if (left && k + off == lim - 1) dst[k + off] &= (1UL << left) - 1; } diff --git a/lib/cmdline.c b/lib/cmdline.c index 76a712e6e20..8f13cf73c2e 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c @@ -160,3 +160,32 @@ unsigned long long memparse(const char *ptr, char **retptr) return ret; } EXPORT_SYMBOL(memparse); + +/** + * parse_option_str - Parse a string and check an option is set or not + * @str: String to be parsed + * @option: option name + * + * This function parses a string containing a comma-separated list of + * strings like a=b,c. + * + * Return true if there's such option in the string, or return false. + */ +bool parse_option_str(const char *str, const char *option) +{ + while (*str) { + if (!strncmp(str, option, strlen(option))) { + str += strlen(option); + if (!*str || *str == ',') + return true; + } + + while (*str && *str != ',') + str++; + + if (*str == ',') + str++; + } + + return false; +} diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 081be3ba9ea..624a0b7c05e 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -230,7 +230,7 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags) ht->shift++; /* For each new bucket, search the corresponding old bucket - * for the first entry that hashes to the new bucket, and + * for the first entry that hashes to the new bucket, and * link the new bucket to that entry. Since all the entries * which will end up in the new bucket appear in the same * old bucket, this constructs an entirely valid new hash @@ -248,8 +248,8 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags) } /* Publish the new table pointer. Lookups may now traverse - * the new table, but they will not benefit from any - * additional efficiency until later steps unzip the buckets. + * the new table, but they will not benefit from any + * additional efficiency until later steps unzip the buckets. */ rcu_assign_pointer(ht->tbl, new_tbl); @@ -306,14 +306,14 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) ht->shift--; - /* Link each bucket in the new table to the first bucket + /* Link each bucket in the new table to the first bucket * in the old table that contains entries which will hash * to the new bucket. */ for (i = 0; i < ntbl->size; i++) { ntbl->buckets[i] = tbl->buckets[i]; - /* Link each bucket in the new table to the first bucket + /* Link each bucket in the new table to the first bucket * in the old table that contains entries which will hash * to the new bucket. */ diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 9cdf62f8acc..c9f2e8c6ccc 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -203,10 +203,10 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents, } table->orig_nents -= sg_size; - if (!skip_first_chunk) { - free_fn(sgl, alloc_size); + if (skip_first_chunk) skip_first_chunk = false; - } + else + free_fn(sgl, alloc_size); sgl = next; } diff --git a/lib/string.c b/lib/string.c index 2fc20aa06f8..10063300b83 100644 --- a/lib/string.c +++ b/lib/string.c @@ -598,6 +598,22 @@ void *memset(void *s, int c, size_t count) EXPORT_SYMBOL(memset); #endif +/** + * memzero_explicit - Fill a region of memory (e.g. sensitive + * keying data) with 0s. + * @s: Pointer to the start of the area. + * @count: The size of the area. + * + * memzero_explicit() doesn't need an arch-specific version as + * it just invokes the one of memset() implicitly. + */ +void memzero_explicit(void *s, size_t count) +{ + memset(s, 0, count); + OPTIMIZER_HIDE_VAR(s); +} +EXPORT_SYMBOL(memzero_explicit); + #ifndef __HAVE_ARCH_MEMCPY /** * memcpy - Copy one area of memory to another diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index b3cbe19f71b..fcad8322ef3 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -68,11 +68,13 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) * to be released by the balloon driver. */ if (trylock_page(page)) { +#ifdef CONFIG_BALLOON_COMPACTION if (!PagePrivate(page)) { /* raced with isolation */ unlock_page(page); continue; } +#endif spin_lock_irqsave(&b_dev_info->pages_lock, flags); balloon_page_delete(page); __count_vm_event(BALLOON_DEFLATE); diff --git a/mm/bootmem.c b/mm/bootmem.c index 8a000cebb0d..477be696511 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -243,13 +243,10 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) static int reset_managed_pages_done __initdata; -static inline void __init reset_node_managed_pages(pg_data_t *pgdat) +void reset_node_managed_pages(pg_data_t *pgdat) { struct zone *z; - if (reset_managed_pages_done) - return; - for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) z->managed_pages = 0; } @@ -258,8 +255,12 @@ void __init reset_all_zones_managed_pages(void) { struct pglist_data *pgdat; + if (reset_managed_pages_done) + return; + for_each_online_pgdat(pgdat) reset_node_managed_pages(pgdat); + reset_managed_pages_done = 1; } @@ -124,6 +124,7 @@ static int __init cma_activate_area(struct cma *cma) err: kfree(cma->bitmap); + cma->count = 0; return -EINVAL; } @@ -217,9 +218,8 @@ int __init cma_declare_contiguous(phys_addr_t base, phys_addr_t highmem_start = __pa(high_memory); int ret = 0; - pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n", - __func__, (unsigned long)size, (unsigned long)base, - (unsigned long)limit, (unsigned long)alignment); + pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", + __func__, &size, &base, &limit, &alignment); if (cma_area_count == ARRAY_SIZE(cma_areas)) { pr_err("Not enough slots for CMA reserved regions!\n"); @@ -244,52 +244,72 @@ int __init cma_declare_contiguous(phys_addr_t base, size = ALIGN(size, alignment); limit &= ~(alignment - 1); + if (!base) + fixed = false; + /* size should be aligned with order_per_bit */ if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) return -EINVAL; /* - * adjust limit to avoid crossing low/high memory boundary for - * automatically allocated regions + * If allocating at a fixed base the request region must not cross the + * low/high memory boundary. */ - if (((limit == 0 || limit > memblock_end) && - (memblock_end - size < highmem_start && - memblock_end > highmem_start)) || - (!fixed && limit > highmem_start && limit - size < highmem_start)) { - limit = highmem_start; - } - - if (fixed && base < highmem_start && base+size > highmem_start) { + if (fixed && base < highmem_start && base + size > highmem_start) { ret = -EINVAL; - pr_err("Region at %08lx defined on low/high memory boundary (%08lx)\n", - (unsigned long)base, (unsigned long)highmem_start); + pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", + &base, &highmem_start); goto err; } + /* + * If the limit is unspecified or above the memblock end, its effective + * value will be the memblock end. Set it explicitly to simplify further + * checks. + */ + if (limit == 0 || limit > memblock_end) + limit = memblock_end; + /* Reserve memory */ - if (base && fixed) { + if (fixed) { if (memblock_is_region_reserved(base, size) || memblock_reserve(base, size) < 0) { ret = -EBUSY; goto err; } } else { - phys_addr_t addr = memblock_alloc_range(size, alignment, base, - limit); + phys_addr_t addr = 0; + + /* + * All pages in the reserved area must come from the same zone. + * If the requested region crosses the low/high memory boundary, + * try allocating from high memory first and fall back to low + * memory in case of failure. + */ + if (base < highmem_start && limit > highmem_start) { + addr = memblock_alloc_range(size, alignment, + highmem_start, limit); + limit = highmem_start; + } + if (!addr) { - ret = -ENOMEM; - goto err; - } else { - base = addr; + addr = memblock_alloc_range(size, alignment, base, + limit); + if (!addr) { + ret = -ENOMEM; + goto err; + } } + + base = addr; } ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); if (ret) goto err; - pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, - (unsigned long)base); + pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, + &base); return 0; err: diff --git a/mm/compaction.c b/mm/compaction.c index edba18aed17..f9792ba3537 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -479,6 +479,16 @@ isolate_freepages_range(struct compact_control *cc, block_end_pfn = min(block_end_pfn, end_pfn); + /* + * pfn could pass the block_end_pfn if isolated freepage + * is more than pageblock order. In this case, we adjust + * scanning range to right one. + */ + if (pfn >= block_end_pfn) { + block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); + block_end_pfn = min(block_end_pfn, end_pfn); + } + if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) break; @@ -784,6 +794,9 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, cc->nr_migratepages = 0; break; } + + if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) + break; } acct_isolated(cc->zone, cc); @@ -1026,8 +1039,12 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, } acct_isolated(zone, cc); - /* Record where migration scanner will be restarted */ - cc->migrate_pfn = low_pfn; + /* + * Record where migration scanner will be restarted. If we end up in + * the same pageblock as the free scanner, make the scanners fully + * meet so that compact_finished() terminates compaction. + */ + cc->migrate_pfn = (end_pfn <= cc->free_pfn) ? low_pfn : cc->free_pfn; return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 74c78aa8bc2..de984159cf0 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -200,7 +200,7 @@ retry: preempt_disable(); if (cmpxchg(&huge_zero_page, NULL, zero_page)) { preempt_enable(); - __free_page(zero_page); + __free_pages(zero_page, compound_order(zero_page)); goto retry; } @@ -232,7 +232,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { struct page *zero_page = xchg(&huge_zero_page, NULL); BUG_ON(zero_page == NULL); - __free_page(zero_page); + __free_pages(zero_page, compound_order(zero_page)); return HPAGE_PMD_NR; } @@ -803,7 +803,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, return VM_FAULT_FALLBACK; if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; - if (unlikely(khugepaged_enter(vma))) + if (unlikely(khugepaged_enter(vma, vma->vm_flags))) return VM_FAULT_OOM; if (!(flags & FAULT_FLAG_WRITE) && transparent_hugepage_use_zero_page()) { @@ -1970,7 +1970,7 @@ int hugepage_madvise(struct vm_area_struct *vma, * register it here without waiting a page fault that * may not happen any time soon. */ - if (unlikely(khugepaged_enter_vma_merge(vma))) + if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags))) return -ENOMEM; break; case MADV_NOHUGEPAGE: @@ -2071,7 +2071,8 @@ int __khugepaged_enter(struct mm_struct *mm) return 0; } -int khugepaged_enter_vma_merge(struct vm_area_struct *vma) +int khugepaged_enter_vma_merge(struct vm_area_struct *vma, + unsigned long vm_flags) { unsigned long hstart, hend; if (!vma->anon_vma) @@ -2083,11 +2084,11 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma) if (vma->vm_ops) /* khugepaged not yet working on file or special mappings */ return 0; - VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); + VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (hstart < hend) - return khugepaged_enter(vma); + return khugepaged_enter(vma, vm_flags); return 0; } diff --git a/mm/internal.h b/mm/internal.h index 829304090b9..a4f90ba7068 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -108,6 +108,31 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); /* * in mm/page_alloc.c */ + +/* + * Locate the struct page for both the matching buddy in our + * pair (buddy1) and the combined O(n+1) page they form (page). + * + * 1) Any buddy B1 will have an order O twin B2 which satisfies + * the following equation: + * B2 = B1 ^ (1 << O) + * For example, if the starting buddy (buddy2) is #8 its order + * 1 buddy is #10: + * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 + * + * 2) Any buddy B will have an order O+1 parent P which + * satisfies the following equation: + * P = B & ~(1 << O) + * + * Assumption: *_mem_map is contiguous at least up to MAX_ORDER + */ +static inline unsigned long +__find_buddy_index(unsigned long page_idx, unsigned int order) +{ + return page_idx ^ (1 << order); +} + +extern int __isolate_free_page(struct page *page, unsigned int order); extern void __free_pages_bootmem(struct page *page, unsigned int order); extern void prep_compound_page(struct page *page, unsigned long order); #ifdef CONFIG_MEMORY_FAILURE diff --git a/mm/iov_iter.c b/mm/iov_iter.c index eafcf60f6b8..e34a3cb6aad 100644 --- a/mm/iov_iter.c +++ b/mm/iov_iter.c @@ -911,9 +911,9 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i) if (i->nr_segs == 1) return i->count; else if (i->type & ITER_BVEC) - return min(i->count, i->iov->iov_len - i->iov_offset); - else return min(i->count, i->bvec->bv_len - i->iov_offset); + else + return min(i->count, i->iov->iov_len - i->iov_offset); } EXPORT_SYMBOL(iov_iter_single_seg_count); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 23976fd885f..d6ac0e33e15 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1536,12 +1536,8 @@ int mem_cgroup_swappiness(struct mem_cgroup *memcg) * start move here. */ -/* for quick checking without looking up memcg */ -atomic_t memcg_moving __read_mostly; - static void mem_cgroup_start_move(struct mem_cgroup *memcg) { - atomic_inc(&memcg_moving); atomic_inc(&memcg->moving_account); synchronize_rcu(); } @@ -1552,10 +1548,8 @@ static void mem_cgroup_end_move(struct mem_cgroup *memcg) * Now, mem_cgroup_clear_mc() may call this function with NULL. * We check NULL in callee rather than caller. */ - if (memcg) { - atomic_dec(&memcg_moving); + if (memcg) atomic_dec(&memcg->moving_account); - } } /* @@ -2204,41 +2198,52 @@ cleanup: return true; } -/* - * Used to update mapped file or writeback or other statistics. +/** + * mem_cgroup_begin_page_stat - begin a page state statistics transaction + * @page: page that is going to change accounted state + * @locked: &memcg->move_lock slowpath was taken + * @flags: IRQ-state flags for &memcg->move_lock * - * Notes: Race condition + * This function must mark the beginning of an accounted page state + * change to prevent double accounting when the page is concurrently + * being moved to another memcg: * - * Charging occurs during page instantiation, while the page is - * unmapped and locked in page migration, or while the page table is - * locked in THP migration. No race is possible. + * memcg = mem_cgroup_begin_page_stat(page, &locked, &flags); + * if (TestClearPageState(page)) + * mem_cgroup_update_page_stat(memcg, state, -1); + * mem_cgroup_end_page_stat(memcg, locked, flags); * - * Uncharge happens to pages with zero references, no race possible. + * The RCU lock is held throughout the transaction. The fast path can + * get away without acquiring the memcg->move_lock (@locked is false) + * because page moving starts with an RCU grace period. * - * Charge moving between groups is protected by checking mm->moving - * account and taking the move_lock in the slowpath. + * The RCU lock also protects the memcg from being freed when the page + * state that is going to change is the only thing preventing the page + * from being uncharged. E.g. end-writeback clearing PageWriteback(), + * which allows migration to go ahead and uncharge the page before the + * account transaction might be complete. */ - -void __mem_cgroup_begin_update_page_stat(struct page *page, - bool *locked, unsigned long *flags) +struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, + bool *locked, + unsigned long *flags) { struct mem_cgroup *memcg; struct page_cgroup *pc; + rcu_read_lock(); + + if (mem_cgroup_disabled()) + return NULL; + pc = lookup_page_cgroup(page); again: memcg = pc->mem_cgroup; if (unlikely(!memcg || !PageCgroupUsed(pc))) - return; - /* - * If this memory cgroup is not under account moving, we don't - * need to take move_lock_mem_cgroup(). Because we already hold - * rcu_read_lock(), any calls to move_account will be delayed until - * rcu_read_unlock(). - */ - VM_BUG_ON(!rcu_read_lock_held()); + return NULL; + + *locked = false; if (atomic_read(&memcg->moving_account) <= 0) - return; + return memcg; move_lock_mem_cgroup(memcg, flags); if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) { @@ -2246,36 +2251,40 @@ again: goto again; } *locked = true; + + return memcg; } -void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags) +/** + * mem_cgroup_end_page_stat - finish a page state statistics transaction + * @memcg: the memcg that was accounted against + * @locked: value received from mem_cgroup_begin_page_stat() + * @flags: value received from mem_cgroup_begin_page_stat() + */ +void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked, + unsigned long flags) { - struct page_cgroup *pc = lookup_page_cgroup(page); + if (memcg && locked) + move_unlock_mem_cgroup(memcg, &flags); - /* - * It's guaranteed that pc->mem_cgroup never changes while - * lock is held because a routine modifies pc->mem_cgroup - * should take move_lock_mem_cgroup(). - */ - move_unlock_mem_cgroup(pc->mem_cgroup, flags); + rcu_read_unlock(); } -void mem_cgroup_update_page_stat(struct page *page, +/** + * mem_cgroup_update_page_stat - update page state statistics + * @memcg: memcg to account against + * @idx: page state item to account + * @val: number of pages (positive or negative) + * + * See mem_cgroup_begin_page_stat() for locking requirements. + */ +void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx, int val) { - struct mem_cgroup *memcg; - struct page_cgroup *pc = lookup_page_cgroup(page); - unsigned long uninitialized_var(flags); - - if (mem_cgroup_disabled()) - return; - VM_BUG_ON(!rcu_read_lock_held()); - memcg = pc->mem_cgroup; - if (unlikely(!memcg || !PageCgroupUsed(pc))) - return; - this_cpu_add(memcg->stat->count[idx], val); + if (memcg) + this_cpu_add(memcg->stat->count[idx], val); } /* diff --git a/mm/memory.c b/mm/memory.c index 1cc6bfbd872..3e503831e04 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1147,6 +1147,7 @@ again: print_bad_pte(vma, addr, ptent, page); if (unlikely(!__tlb_remove_page(tlb, page))) { force_flush = 1; + addr += PAGE_SIZE; break; } continue; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 29d8693d0c6..1bf4807cb21 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -31,6 +31,7 @@ #include <linux/stop_machine.h> #include <linux/hugetlb.h> #include <linux/memblock.h> +#include <linux/bootmem.h> #include <asm/tlbflush.h> @@ -1066,6 +1067,16 @@ out: } #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ +static void reset_node_present_pages(pg_data_t *pgdat) +{ + struct zone *z; + + for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) + z->present_pages = 0; + + pgdat->node_present_pages = 0; +} + /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) { @@ -1096,6 +1107,21 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) build_all_zonelists(pgdat, NULL); mutex_unlock(&zonelists_mutex); + /* + * zone->managed_pages is set to an approximate value in + * free_area_init_core(), which will cause + * /sys/device/system/node/nodeX/meminfo has wrong data. + * So reset it to 0 before any memory is onlined. + */ + reset_node_managed_pages(pgdat); + + /* + * When memory is hot-added, all the memory is in offline state. So + * clear all zones' present_pages because they will be updated in + * online_pages() and offline_pages(). + */ + reset_node_present_pages(pgdat); + return pgdat; } @@ -1912,7 +1938,6 @@ void try_offline_node(int nid) unsigned long start_pfn = pgdat->node_start_pfn; unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; unsigned long pfn; - struct page *pgdat_page = virt_to_page(pgdat); int i; for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { @@ -1941,10 +1966,6 @@ void try_offline_node(int nid) node_set_offline(nid); unregister_one_node(nid); - if (!PageSlab(pgdat_page) && !PageCompound(pgdat_page)) - /* node data is allocated from boot memory */ - return; - /* free waittable in each zone */ for (i = 0; i < MAX_NR_ZONES; i++) { struct zone *zone = pgdat->node_zones + i; diff --git a/mm/mmap.c b/mm/mmap.c index 7f855206e7f..87e82b38453 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1080,7 +1080,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, end, prev->vm_pgoff, NULL); if (err) return NULL; - khugepaged_enter_vma_merge(prev); + khugepaged_enter_vma_merge(prev, vm_flags); return prev; } @@ -1099,7 +1099,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, next->vm_pgoff - pglen, NULL); if (err) return NULL; - khugepaged_enter_vma_merge(area); + khugepaged_enter_vma_merge(area, vm_flags); return area; } @@ -2208,7 +2208,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) } } vma_unlock_anon_vma(vma); - khugepaged_enter_vma_merge(vma); + khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(vma->vm_mm); return error; } @@ -2277,7 +2277,7 @@ int expand_downwards(struct vm_area_struct *vma, } } vma_unlock_anon_vma(vma); - khugepaged_enter_vma_merge(vma); + khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(vma->vm_mm); return error; } diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 7c7ab32ee50..90b50468333 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -145,12 +145,10 @@ static unsigned long __init free_low_memory_core_early(void) static int reset_managed_pages_done __initdata; -static inline void __init reset_node_managed_pages(pg_data_t *pgdat) +void reset_node_managed_pages(pg_data_t *pgdat) { struct zone *z; - if (reset_managed_pages_done) - return; for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) z->managed_pages = 0; } @@ -159,8 +157,12 @@ void __init reset_all_zones_managed_pages(void) { struct pglist_data *pgdat; + if (reset_managed_pages_done) + return; + for_each_online_pgdat(pgdat) reset_node_managed_pages(pgdat); + reset_managed_pages_done = 1; } diff --git a/mm/oom_kill.c b/mm/oom_kill.c index bbf405a3a18..5340f6b9131 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -404,6 +404,23 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, dump_tasks(memcg, nodemask); } +/* + * Number of OOM killer invocations (including memcg OOM killer). + * Primarily used by PM freezer to check for potential races with + * OOM killed frozen task. + */ +static atomic_t oom_kills = ATOMIC_INIT(0); + +int oom_kills_count(void) +{ + return atomic_read(&oom_kills); +} + +void note_oom_kill(void) +{ + atomic_inc(&oom_kills); +} + #define K(x) ((x) << (PAGE_SHIFT-10)) /* * Must be called while holding a reference to p, which will be released upon diff --git a/mm/page-writeback.c b/mm/page-writeback.c index ff24c9d8311..19ceae87522 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2116,23 +2116,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) EXPORT_SYMBOL(account_page_dirtied); /* - * Helper function for set_page_writeback family. - * - * The caller must hold mem_cgroup_begin/end_update_page_stat() lock - * while calling this function. - * See test_set_page_writeback for example. - * - * NOTE: Unlike account_page_dirtied this does not rely on being atomic - * wrt interrupts. - */ -void account_page_writeback(struct page *page) -{ - mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); - inc_zone_page_state(page, NR_WRITEBACK); -} -EXPORT_SYMBOL(account_page_writeback); - -/* * For address_spaces which do not use buffers. Just tag the page as dirty in * its radix tree. * @@ -2344,11 +2327,12 @@ EXPORT_SYMBOL(clear_page_dirty_for_io); int test_clear_page_writeback(struct page *page) { struct address_space *mapping = page_mapping(page); - int ret; - bool locked; unsigned long memcg_flags; + struct mem_cgroup *memcg; + bool locked; + int ret; - mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags); + memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags); if (mapping) { struct backing_dev_info *bdi = mapping->backing_dev_info; unsigned long flags; @@ -2369,22 +2353,23 @@ int test_clear_page_writeback(struct page *page) ret = TestClearPageWriteback(page); } if (ret) { - mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); + mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); dec_zone_page_state(page, NR_WRITEBACK); inc_zone_page_state(page, NR_WRITTEN); } - mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags); + mem_cgroup_end_page_stat(memcg, locked, memcg_flags); return ret; } int __test_set_page_writeback(struct page *page, bool keep_write) { struct address_space *mapping = page_mapping(page); - int ret; - bool locked; unsigned long memcg_flags; + struct mem_cgroup *memcg; + bool locked; + int ret; - mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags); + memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags); if (mapping) { struct backing_dev_info *bdi = mapping->backing_dev_info; unsigned long flags; @@ -2410,9 +2395,11 @@ int __test_set_page_writeback(struct page *page, bool keep_write) } else { ret = TestSetPageWriteback(page); } - if (!ret) - account_page_writeback(page); - mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags); + if (!ret) { + mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); + inc_zone_page_state(page, NR_WRITEBACK); + } + mem_cgroup_end_page_stat(memcg, locked, memcg_flags); return ret; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 736d8e1b638..616a2c956b4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -467,29 +467,6 @@ static inline void rmv_page_order(struct page *page) } /* - * Locate the struct page for both the matching buddy in our - * pair (buddy1) and the combined O(n+1) page they form (page). - * - * 1) Any buddy B1 will have an order O twin B2 which satisfies - * the following equation: - * B2 = B1 ^ (1 << O) - * For example, if the starting buddy (buddy2) is #8 its order - * 1 buddy is #10: - * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 - * - * 2) Any buddy B will have an order O+1 parent P which - * satisfies the following equation: - * P = B & ~(1 << O) - * - * Assumption: *_mem_map is contiguous at least up to MAX_ORDER - */ -static inline unsigned long -__find_buddy_index(unsigned long page_idx, unsigned int order) -{ - return page_idx ^ (1 << order); -} - -/* * This function checks whether a page is free && is the buddy * we can do coalesce a page and its buddy if * (a) the buddy is not in a hole && @@ -569,6 +546,7 @@ static inline void __free_one_page(struct page *page, unsigned long combined_idx; unsigned long uninitialized_var(buddy_idx); struct page *buddy; + int max_order = MAX_ORDER; VM_BUG_ON(!zone_is_initialized(zone)); @@ -577,13 +555,24 @@ static inline void __free_one_page(struct page *page, return; VM_BUG_ON(migratetype == -1); + if (is_migrate_isolate(migratetype)) { + /* + * We restrict max order of merging to prevent merge + * between freepages on isolate pageblock and normal + * pageblock. Without this, pageblock isolation + * could cause incorrect freepage accounting. + */ + max_order = min(MAX_ORDER, pageblock_order + 1); + } else { + __mod_zone_freepage_state(zone, 1 << order, migratetype); + } - page_idx = pfn & ((1 << MAX_ORDER) - 1); + page_idx = pfn & ((1 << max_order) - 1); VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); VM_BUG_ON_PAGE(bad_range(zone, page), page); - while (order < MAX_ORDER-1) { + while (order < max_order - 1) { buddy_idx = __find_buddy_index(page_idx, order); buddy = page + (buddy_idx - page_idx); if (!page_is_buddy(page, buddy, order)) @@ -594,9 +583,11 @@ static inline void __free_one_page(struct page *page, */ if (page_is_guard(buddy)) { clear_page_guard_flag(buddy); - set_page_private(page, 0); - __mod_zone_freepage_state(zone, 1 << order, - migratetype); + set_page_private(buddy, 0); + if (!is_migrate_isolate(migratetype)) { + __mod_zone_freepage_state(zone, 1 << order, + migratetype); + } } else { list_del(&buddy->lru); zone->free_area[order].nr_free--; @@ -715,14 +706,12 @@ static void free_pcppages_bulk(struct zone *zone, int count, /* must delete as __free_one_page list manipulates */ list_del(&page->lru); mt = get_freepage_migratetype(page); + if (unlikely(has_isolate_pageblock(zone))) + mt = get_pageblock_migratetype(page); + /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ __free_one_page(page, page_to_pfn(page), zone, 0, mt); trace_mm_page_pcpu_drain(page, 0, mt); - if (likely(!is_migrate_isolate_page(page))) { - __mod_zone_page_state(zone, NR_FREE_PAGES, 1); - if (is_migrate_cma(mt)) - __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1); - } } while (--to_free && --batch_free && !list_empty(list)); } spin_unlock(&zone->lock); @@ -739,9 +728,11 @@ static void free_one_page(struct zone *zone, if (nr_scanned) __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); + if (unlikely(has_isolate_pageblock(zone) || + is_migrate_isolate(migratetype))) { + migratetype = get_pfnblock_migratetype(page, pfn); + } __free_one_page(page, pfn, zone, order, migratetype); - if (unlikely(!is_migrate_isolate(migratetype))) - __mod_zone_freepage_state(zone, 1 << order, migratetype); spin_unlock(&zone->lock); } @@ -1484,7 +1475,7 @@ void split_page(struct page *page, unsigned int order) } EXPORT_SYMBOL_GPL(split_page); -static int __isolate_free_page(struct page *page, unsigned int order) +int __isolate_free_page(struct page *page, unsigned int order) { unsigned long watermark; struct zone *zone; @@ -2252,6 +2243,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, } /* + * PM-freezer should be notified that there might be an OOM killer on + * its way to kill and wake somebody up. This is too early and we might + * end up not killing anything but false positives are acceptable. + * See freeze_processes. + */ + note_oom_kill(); + + /* * Go through the zonelist yet one more time, keep very high watermark * here, this is only to catch a parallel oom killing, we must fail if * we're still under heavy pressure. @@ -6400,13 +6399,12 @@ int alloc_contig_range(unsigned long start, unsigned long end, /* Make sure the range is really isolated. */ if (test_pages_isolated(outer_start, end, false)) { - pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n", - outer_start, end); + pr_info("%s: [%lx, %lx) PFNs busy\n", + __func__, outer_start, end); ret = -EBUSY; goto done; } - /* Grab isolated pages from freelists. */ outer_end = isolate_freepages_range(&cc, outer_start, end); if (!outer_end) { diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 3708264d283..5331c2bd85a 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -171,6 +171,7 @@ static void free_page_cgroup(void *addr) sizeof(struct page_cgroup) * PAGES_PER_SECTION; BUG_ON(PageReserved(page)); + kmemleak_free(addr); free_pages_exact(addr, table_size); } } diff --git a/mm/page_isolation.c b/mm/page_isolation.c index d1473b2e948..c8778f7e208 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -60,6 +60,7 @@ out: int migratetype = get_pageblock_migratetype(page); set_pageblock_migratetype(page, MIGRATE_ISOLATE); + zone->nr_isolate_pageblock++; nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); __mod_zone_freepage_state(zone, -nr_pages, migratetype); @@ -75,16 +76,54 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) { struct zone *zone; unsigned long flags, nr_pages; + struct page *isolated_page = NULL; + unsigned int order; + unsigned long page_idx, buddy_idx; + struct page *buddy; zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) goto out; - nr_pages = move_freepages_block(zone, page, migratetype); - __mod_zone_freepage_state(zone, nr_pages, migratetype); + + /* + * Because freepage with more than pageblock_order on isolated + * pageblock is restricted to merge due to freepage counting problem, + * it is possible that there is free buddy page. + * move_freepages_block() doesn't care of merge so we need other + * approach in order to merge them. Isolation and free will make + * these pages to be merged. + */ + if (PageBuddy(page)) { + order = page_order(page); + if (order >= pageblock_order) { + page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); + buddy_idx = __find_buddy_index(page_idx, order); + buddy = page + (buddy_idx - page_idx); + + if (!is_migrate_isolate_page(buddy)) { + __isolate_free_page(page, order); + set_page_refcounted(page); + isolated_page = page; + } + } + } + + /* + * If we isolate freepage with more than pageblock_order, there + * should be no freepage in the range, so we could avoid costly + * pageblock scanning for freepage moving. + */ + if (!isolated_page) { + nr_pages = move_freepages_block(zone, page, migratetype); + __mod_zone_freepage_state(zone, nr_pages, migratetype); + } set_pageblock_migratetype(page, migratetype); + zone->nr_isolate_pageblock--; out: spin_unlock_irqrestore(&zone->lock, flags); + if (isolated_page) + __free_pages(isolated_page, order); } static inline struct page * diff --git a/mm/rmap.c b/mm/rmap.c index 116a5053415..19886fb2f13 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1042,15 +1042,46 @@ void page_add_new_anon_rmap(struct page *page, */ void page_add_file_rmap(struct page *page) { - bool locked; + struct mem_cgroup *memcg; unsigned long flags; + bool locked; - mem_cgroup_begin_update_page_stat(page, &locked, &flags); + memcg = mem_cgroup_begin_page_stat(page, &locked, &flags); if (atomic_inc_and_test(&page->_mapcount)) { __inc_zone_page_state(page, NR_FILE_MAPPED); - mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); + mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); } - mem_cgroup_end_update_page_stat(page, &locked, &flags); + mem_cgroup_end_page_stat(memcg, locked, flags); +} + +static void page_remove_file_rmap(struct page *page) +{ + struct mem_cgroup *memcg; + unsigned long flags; + bool locked; + + memcg = mem_cgroup_begin_page_stat(page, &locked, &flags); + + /* page still mapped by someone else? */ + if (!atomic_add_negative(-1, &page->_mapcount)) + goto out; + + /* Hugepages are not counted in NR_FILE_MAPPED for now. */ + if (unlikely(PageHuge(page))) + goto out; + + /* + * We use the irq-unsafe __{inc|mod}_zone_page_stat because + * these counters are not modified in interrupt context, and + * pte lock(a spinlock) is held, which implies preemption disabled. + */ + __dec_zone_page_state(page, NR_FILE_MAPPED); + mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); + + if (unlikely(PageMlocked(page))) + clear_page_mlock(page); +out: + mem_cgroup_end_page_stat(memcg, locked, flags); } /** @@ -1061,46 +1092,33 @@ void page_add_file_rmap(struct page *page) */ void page_remove_rmap(struct page *page) { - bool anon = PageAnon(page); - bool locked; - unsigned long flags; - - /* - * The anon case has no mem_cgroup page_stat to update; but may - * uncharge_page() below, where the lock ordering can deadlock if - * we hold the lock against page_stat move: so avoid it on anon. - */ - if (!anon) - mem_cgroup_begin_update_page_stat(page, &locked, &flags); + if (!PageAnon(page)) { + page_remove_file_rmap(page); + return; + } /* page still mapped by someone else? */ if (!atomic_add_negative(-1, &page->_mapcount)) - goto out; + return; + + /* Hugepages are not counted in NR_ANON_PAGES for now. */ + if (unlikely(PageHuge(page))) + return; /* - * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED - * and not charged by memcg for now. - * * We use the irq-unsafe __{inc|mod}_zone_page_stat because * these counters are not modified in interrupt context, and - * these counters are not modified in interrupt context, and * pte lock(a spinlock) is held, which implies preemption disabled. */ - if (unlikely(PageHuge(page))) - goto out; - if (anon) { - if (PageTransHuge(page)) - __dec_zone_page_state(page, - NR_ANON_TRANSPARENT_HUGEPAGES); - __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, - -hpage_nr_pages(page)); - } else { - __dec_zone_page_state(page, NR_FILE_MAPPED); - mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); - mem_cgroup_end_update_page_stat(page, &locked, &flags); - } + if (PageTransHuge(page)) + __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); + + __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, + -hpage_nr_pages(page)); + if (unlikely(PageMlocked(page))) clear_page_mlock(page); + /* * It would be tidy to reset the PageAnon mapping here, * but that might overwrite a racing page_add_anon_rmap @@ -1110,10 +1128,6 @@ void page_remove_rmap(struct page *page) * Leaving it set also helps swapoff to reinstate ptes * faster for those pages still in swapcache. */ - return; -out: - if (!anon) - mem_cgroup_end_update_page_stat(page, &locked, &flags); } /* diff --git a/mm/shmem.c b/mm/shmem.c index cd6fc7590e5..185836ba53e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2345,6 +2345,32 @@ static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, stru return 0; } +static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry) +{ + struct dentry *whiteout; + int error; + + whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); + if (!whiteout) + return -ENOMEM; + + error = shmem_mknod(old_dir, whiteout, + S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); + dput(whiteout); + if (error) + return error; + + /* + * Cheat and hash the whiteout while the old dentry is still in + * place, instead of playing games with FS_RENAME_DOES_D_MOVE. + * + * d_lookup() will consistently find one of them at this point, + * not sure which one, but that isn't even important. + */ + d_rehash(whiteout); + return 0; +} + /* * The VFS layer already does all the dentry stuff for rename, * we just have to decrement the usage count for the target if @@ -2356,7 +2382,7 @@ static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struc struct inode *inode = old_dentry->d_inode; int they_are_dirs = S_ISDIR(inode->i_mode); - if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) + if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; if (flags & RENAME_EXCHANGE) @@ -2365,6 +2391,14 @@ static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struc if (!simple_empty(new_dentry)) return -ENOTEMPTY; + if (flags & RENAME_WHITEOUT) { + int error; + + error = shmem_whiteout(old_dir, old_dentry); + if (error) + return error; + } + if (new_dentry->d_inode) { (void) shmem_unlink(new_dir, new_dentry); if (they_are_dirs) { diff --git a/mm/slab_common.c b/mm/slab_common.c index 3a6e0cfdf03..dcdab81bd24 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -93,16 +93,6 @@ static int kmem_cache_sanity_check(const char *name, size_t size) s->object_size); continue; } - -#if !defined(CONFIG_SLUB) - if (!strcmp(s->name, name)) { - pr_err("%s (%s): Cache name already exists.\n", - __func__, name); - dump_stack(); - s = NULL; - return -EINVAL; - } -#endif } WARN_ON(strchr(name, ' ')); /* It confuses parsers */ @@ -269,6 +259,10 @@ struct kmem_cache *find_mergeable(size_t size, size_t align, if (s->size - size >= sizeof(void *)) continue; + if (IS_ENABLED(CONFIG_SLAB) && align && + (align > s->align || s->align % align)) + continue; + return s; } return NULL; diff --git a/mm/truncate.c b/mm/truncate.c index 96d167372d8..f1e4d605236 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -20,6 +20,7 @@ #include <linux/buffer_head.h> /* grr. try_to_release_page, do_invalidatepage */ #include <linux/cleancache.h> +#include <linux/rmap.h> #include "internal.h" static void clear_exceptional_entry(struct address_space *mapping, @@ -714,17 +715,73 @@ EXPORT_SYMBOL(truncate_pagecache); * necessary) to @newsize. It will be typically be called from the filesystem's * setattr function when ATTR_SIZE is passed in. * - * Must be called with inode_mutex held and before all filesystem specific - * block truncation has been performed. + * Must be called with a lock serializing truncates and writes (generally + * i_mutex but e.g. xfs uses a different lock) and before all filesystem + * specific block truncation has been performed. */ void truncate_setsize(struct inode *inode, loff_t newsize) { + loff_t oldsize = inode->i_size; + i_size_write(inode, newsize); + if (newsize > oldsize) + pagecache_isize_extended(inode, oldsize, newsize); truncate_pagecache(inode, newsize); } EXPORT_SYMBOL(truncate_setsize); /** + * pagecache_isize_extended - update pagecache after extension of i_size + * @inode: inode for which i_size was extended + * @from: original inode size + * @to: new inode size + * + * Handle extension of inode size either caused by extending truncate or by + * write starting after current i_size. We mark the page straddling current + * i_size RO so that page_mkwrite() is called on the nearest write access to + * the page. This way filesystem can be sure that page_mkwrite() is called on + * the page before user writes to the page via mmap after the i_size has been + * changed. + * + * The function must be called after i_size is updated so that page fault + * coming after we unlock the page will already see the new i_size. + * The function must be called while we still hold i_mutex - this not only + * makes sure i_size is stable but also that userspace cannot observe new + * i_size value before we are prepared to store mmap writes at new inode size. + */ +void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) +{ + int bsize = 1 << inode->i_blkbits; + loff_t rounded_from; + struct page *page; + pgoff_t index; + + WARN_ON(to > inode->i_size); + + if (from >= to || bsize == PAGE_CACHE_SIZE) + return; + /* Page straddling @from will not have any hole block created? */ + rounded_from = round_up(from, bsize); + if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1))) + return; + + index = from >> PAGE_CACHE_SHIFT; + page = find_lock_page(inode->i_mapping, index); + /* Page not cached? Nothing to do */ + if (!page) + return; + /* + * See clear_page_dirty_for_io() for details why set_page_dirty() + * is needed. + */ + if (page_mkclean(page)) + set_page_dirty(page); + unlock_page(page); + page_cache_release(page); +} +EXPORT_SYMBOL(pagecache_isize_extended); + +/** * truncate_pagecache_range - unmap and remove pagecache that is hole-punched * @inode: inode * @lstart: offset of beginning of hole diff --git a/net/Kconfig b/net/Kconfig index 6272420a721..99815b5454b 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -6,7 +6,7 @@ menuconfig NET bool "Networking support" select NLATTR select GENERIC_NET_UTILS - select ANON_INODES + select BPF ---help--- Unless you really know what you are doing, you should say Y here. The reason is that some programs need kernel networking support even diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 992ec49a96a..44cb786b925 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -112,6 +112,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) kfree_skb(skb); } +EXPORT_SYMBOL_GPL(br_deliver); /* called with rcu_read_lock */ void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 648d79ccf46..c465876c786 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -813,10 +813,9 @@ static void __br_multicast_send_query(struct net_bridge *br, return; if (port) { - __skb_push(skb, sizeof(struct ethhdr)); skb->dev = port->dev; NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, - dev_queue_xmit); + br_dev_queue_push_xmit); } else { br_multicast_select_own_querier(br, ip, skb); netif_rx(skb); diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 1bada53bb19..1a4f32c09ad 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -192,7 +192,6 @@ static inline void nf_bridge_save_header(struct sk_buff *skb) static int br_parse_ip_options(struct sk_buff *skb) { - struct ip_options *opt; const struct iphdr *iph; struct net_device *dev = skb->dev; u32 len; @@ -201,7 +200,6 @@ static int br_parse_ip_options(struct sk_buff *skb) goto inhdr_error; iph = ip_hdr(skb); - opt = &(IPCB(skb)->opt); /* Basic sanity checks */ if (iph->ihl < 5 || iph->version != 4) @@ -227,23 +225,11 @@ static int br_parse_ip_options(struct sk_buff *skb) } memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); - if (iph->ihl == 5) - return 0; - - opt->optlen = iph->ihl*4 - sizeof(struct iphdr); - if (ip_options_compile(dev_net(dev), opt, skb)) - goto inhdr_error; - - /* Check correct handling of SRR option */ - if (unlikely(opt->srr)) { - struct in_device *in_dev = __in_dev_get_rcu(dev); - if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev)) - goto drop; - - if (ip_options_rcv_srr(skb)) - goto drop; - } - + /* We should really parse IP options here but until + * somebody who actually uses IP options complains to + * us we'll just silently ignore the options because + * we're lazy! + */ return 0; inhdr_error: diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 2ff9706647f..e5ec470b851 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -280,6 +280,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, + [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, }; diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c index da17a5eab8b..074c557ab50 100644 --- a/net/bridge/netfilter/nf_tables_bridge.c +++ b/net/bridge/netfilter/nf_tables_bridge.c @@ -75,9 +75,11 @@ static const struct nf_chain_type filter_bridge = { .type = NFT_CHAIN_T_DEFAULT, .family = NFPROTO_BRIDGE, .owner = THIS_MODULE, - .hook_mask = (1 << NF_BR_LOCAL_IN) | + .hook_mask = (1 << NF_BR_PRE_ROUTING) | + (1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | - (1 << NF_BR_LOCAL_OUT), + (1 << NF_BR_LOCAL_OUT) | + (1 << NF_BR_POST_ROUTING), }; static int __init nf_tables_bridge_init(void) diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index a76479535df..48da2c54a69 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -16,6 +16,239 @@ #include <net/netfilter/nft_reject.h> #include <net/netfilter/ipv4/nf_reject.h> #include <net/netfilter/ipv6/nf_reject.h> +#include <linux/ip.h> +#include <net/ip.h> +#include <net/ip6_checksum.h> +#include <linux/netfilter_bridge.h> +#include "../br_private.h" + +static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, + struct sk_buff *nskb) +{ + struct ethhdr *eth; + + eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN); + skb_reset_mac_header(nskb); + ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest); + ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source); + eth->h_proto = eth_hdr(oldskb)->h_proto; + skb_pull(nskb, ETH_HLEN); +} + +static int nft_reject_iphdr_validate(struct sk_buff *oldskb) +{ + struct iphdr *iph; + u32 len; + + if (!pskb_may_pull(oldskb, sizeof(struct iphdr))) + return 0; + + iph = ip_hdr(oldskb); + if (iph->ihl < 5 || iph->version != 4) + return 0; + + len = ntohs(iph->tot_len); + if (oldskb->len < len) + return 0; + else if (len < (iph->ihl*4)) + return 0; + + if (!pskb_may_pull(oldskb, iph->ihl*4)) + return 0; + + return 1; +} + +static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook) +{ + struct sk_buff *nskb; + struct iphdr *niph; + const struct tcphdr *oth; + struct tcphdr _oth; + + if (!nft_reject_iphdr_validate(oldskb)) + return; + + oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook); + if (!oth) + return; + + nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + + LL_MAX_HEADER, GFP_ATOMIC); + if (!nskb) + return; + + skb_reserve(nskb, LL_MAX_HEADER); + niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, + sysctl_ip_default_ttl); + nf_reject_ip_tcphdr_put(nskb, oldskb, oth); + niph->ttl = sysctl_ip_default_ttl; + niph->tot_len = htons(nskb->len); + ip_send_check(niph); + + nft_reject_br_push_etherhdr(oldskb, nskb); + + br_deliver(br_port_get_rcu(oldskb->dev), nskb); +} + +static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, + u8 code) +{ + struct sk_buff *nskb; + struct iphdr *niph; + struct icmphdr *icmph; + unsigned int len; + void *payload; + __wsum csum; + + if (!nft_reject_iphdr_validate(oldskb)) + return; + + /* IP header checks: fragment. */ + if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) + return; + + /* RFC says return as much as we can without exceeding 576 bytes. */ + len = min_t(unsigned int, 536, oldskb->len); + + if (!pskb_may_pull(oldskb, len)) + return; + + if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0)) + return; + + nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) + + LL_MAX_HEADER + len, GFP_ATOMIC); + if (!nskb) + return; + + skb_reserve(nskb, LL_MAX_HEADER); + niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP, + sysctl_ip_default_ttl); + + skb_reset_transport_header(nskb); + icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr)); + memset(icmph, 0, sizeof(*icmph)); + icmph->type = ICMP_DEST_UNREACH; + icmph->code = code; + + payload = skb_put(nskb, len); + memcpy(payload, skb_network_header(oldskb), len); + + csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0); + icmph->checksum = csum_fold(csum); + + niph->tot_len = htons(nskb->len); + ip_send_check(niph); + + nft_reject_br_push_etherhdr(oldskb, nskb); + + br_deliver(br_port_get_rcu(oldskb->dev), nskb); +} + +static int nft_reject_ip6hdr_validate(struct sk_buff *oldskb) +{ + struct ipv6hdr *hdr; + u32 pkt_len; + + if (!pskb_may_pull(oldskb, sizeof(struct ipv6hdr))) + return 0; + + hdr = ipv6_hdr(oldskb); + if (hdr->version != 6) + return 0; + + pkt_len = ntohs(hdr->payload_len); + if (pkt_len + sizeof(struct ipv6hdr) > oldskb->len) + return 0; + + return 1; +} + +static void nft_reject_br_send_v6_tcp_reset(struct net *net, + struct sk_buff *oldskb, int hook) +{ + struct sk_buff *nskb; + const struct tcphdr *oth; + struct tcphdr _oth; + unsigned int otcplen; + struct ipv6hdr *nip6h; + + if (!nft_reject_ip6hdr_validate(oldskb)) + return; + + oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook); + if (!oth) + return; + + nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + + LL_MAX_HEADER, GFP_ATOMIC); + if (!nskb) + return; + + skb_reserve(nskb, LL_MAX_HEADER); + nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, + net->ipv6.devconf_all->hop_limit); + nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen); + nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr)); + + nft_reject_br_push_etherhdr(oldskb, nskb); + + br_deliver(br_port_get_rcu(oldskb->dev), nskb); +} + +static void nft_reject_br_send_v6_unreach(struct net *net, + struct sk_buff *oldskb, int hook, + u8 code) +{ + struct sk_buff *nskb; + struct ipv6hdr *nip6h; + struct icmp6hdr *icmp6h; + unsigned int len; + void *payload; + + if (!nft_reject_ip6hdr_validate(oldskb)) + return; + + /* Include "As much of invoking packet as possible without the ICMPv6 + * packet exceeding the minimum IPv6 MTU" in the ICMP payload. + */ + len = min_t(unsigned int, 1220, oldskb->len); + + if (!pskb_may_pull(oldskb, len)) + return; + + nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) + + LL_MAX_HEADER + len, GFP_ATOMIC); + if (!nskb) + return; + + skb_reserve(nskb, LL_MAX_HEADER); + nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6, + net->ipv6.devconf_all->hop_limit); + + skb_reset_transport_header(nskb); + icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr)); + memset(icmp6h, 0, sizeof(*icmp6h)); + icmp6h->icmp6_type = ICMPV6_DEST_UNREACH; + icmp6h->icmp6_code = code; + + payload = skb_put(nskb, len); + memcpy(payload, skb_network_header(oldskb), len); + nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr)); + + icmp6h->icmp6_cksum = + csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr, + nskb->len - sizeof(struct ipv6hdr), + IPPROTO_ICMPV6, + csum_partial(icmp6h, + nskb->len - sizeof(struct ipv6hdr), + 0)); + + nft_reject_br_push_etherhdr(oldskb, nskb); + + br_deliver(br_port_get_rcu(oldskb->dev), nskb); +} static void nft_reject_bridge_eval(const struct nft_expr *expr, struct nft_data data[NFT_REG_MAX + 1], @@ -23,35 +256,46 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, { struct nft_reject *priv = nft_expr_priv(expr); struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); + const unsigned char *dest = eth_hdr(pkt->skb)->h_dest; + + if (is_broadcast_ether_addr(dest) || + is_multicast_ether_addr(dest)) + goto out; switch (eth_hdr(pkt->skb)->h_proto) { case htons(ETH_P_IP): switch (priv->type) { case NFT_REJECT_ICMP_UNREACH: - nf_send_unreach(pkt->skb, priv->icmp_code); + nft_reject_br_send_v4_unreach(pkt->skb, + pkt->ops->hooknum, + priv->icmp_code); break; case NFT_REJECT_TCP_RST: - nf_send_reset(pkt->skb, pkt->ops->hooknum); + nft_reject_br_send_v4_tcp_reset(pkt->skb, + pkt->ops->hooknum); break; case NFT_REJECT_ICMPX_UNREACH: - nf_send_unreach(pkt->skb, - nft_reject_icmp_code(priv->icmp_code)); + nft_reject_br_send_v4_unreach(pkt->skb, + pkt->ops->hooknum, + nft_reject_icmp_code(priv->icmp_code)); break; } break; case htons(ETH_P_IPV6): switch (priv->type) { case NFT_REJECT_ICMP_UNREACH: - nf_send_unreach6(net, pkt->skb, priv->icmp_code, - pkt->ops->hooknum); + nft_reject_br_send_v6_unreach(net, pkt->skb, + pkt->ops->hooknum, + priv->icmp_code); break; case NFT_REJECT_TCP_RST: - nf_send_reset6(net, pkt->skb, pkt->ops->hooknum); + nft_reject_br_send_v6_tcp_reset(net, pkt->skb, + pkt->ops->hooknum); break; case NFT_REJECT_ICMPX_UNREACH: - nf_send_unreach6(net, pkt->skb, - nft_reject_icmpv6_code(priv->icmp_code), - pkt->ops->hooknum); + nft_reject_br_send_v6_unreach(net, pkt->skb, + pkt->ops->hooknum, + nft_reject_icmpv6_code(priv->icmp_code)); break; } break; @@ -59,15 +303,38 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, /* No explicit way to reject this protocol, drop it. */ break; } +out: data[NFT_REG_VERDICT].verdict = NF_DROP; } +static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain) +{ + struct nft_base_chain *basechain; + + if (chain->flags & NFT_BASE_CHAIN) { + basechain = nft_base_chain(chain); + + switch (basechain->ops[0].hooknum) { + case NF_BR_PRE_ROUTING: + case NF_BR_LOCAL_IN: + break; + default: + return -EOPNOTSUPP; + } + } + return 0; +} + static int nft_reject_bridge_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_reject *priv = nft_expr_priv(expr); - int icmp_code; + int icmp_code, err; + + err = nft_reject_bridge_validate_hooks(ctx->chain); + if (err < 0) + return err; if (tb[NFTA_REJECT_TYPE] == NULL) return -EINVAL; @@ -116,6 +383,13 @@ nla_put_failure: return -1; } +static int nft_reject_bridge_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ + return nft_reject_bridge_validate_hooks(ctx->chain); +} + static struct nft_expr_type nft_reject_bridge_type; static const struct nft_expr_ops nft_reject_bridge_ops = { .type = &nft_reject_bridge_type, @@ -123,6 +397,7 @@ static const struct nft_expr_ops nft_reject_bridge_ops = { .eval = nft_reject_bridge_eval, .init = nft_reject_bridge_init, .dump = nft_reject_bridge_dump, + .validate = nft_reject_bridge_validate, }; static struct nft_expr_type nft_reject_bridge_type __read_mostly = { diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index de6662b14e1..7e38b729696 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c @@ -149,6 +149,7 @@ static int process_one_ticket(struct ceph_auth_client *ac, struct ceph_crypto_key old_key; void *ticket_buf = NULL; void *tp, *tpend; + void **ptp; struct ceph_timespec new_validity; struct ceph_crypto_key new_session_key; struct ceph_buffer *new_ticket_blob; @@ -208,25 +209,19 @@ static int process_one_ticket(struct ceph_auth_client *ac, goto out; } tp = ticket_buf; - dlen = ceph_decode_32(&tp); + ptp = &tp; + tpend = *ptp + dlen; } else { /* unencrypted */ - ceph_decode_32_safe(p, end, dlen, bad); - ticket_buf = kmalloc(dlen, GFP_NOFS); - if (!ticket_buf) { - ret = -ENOMEM; - goto out; - } - tp = ticket_buf; - ceph_decode_need(p, end, dlen, bad); - ceph_decode_copy(p, ticket_buf, dlen); + ptp = p; + tpend = end; } - tpend = tp + dlen; + ceph_decode_32_safe(ptp, tpend, dlen, bad); dout(" ticket blob is %d bytes\n", dlen); - ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); - blob_struct_v = ceph_decode_8(&tp); - new_secret_id = ceph_decode_64(&tp); - ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); + ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad); + blob_struct_v = ceph_decode_8(ptp); + new_secret_id = ceph_decode_64(ptp); + ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend); if (ret) goto out; diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index 62fc5e7a9ac..790fe89d90c 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c @@ -90,11 +90,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void) static const u8 *aes_iv = (u8 *)CEPH_AES_IV; +/* + * Should be used for buffers allocated with ceph_kvmalloc(). + * Currently these are encrypt out-buffer (ceph_buffer) and decrypt + * in-buffer (msg front). + * + * Dispose of @sgt with teardown_sgtable(). + * + * @prealloc_sg is to avoid memory allocation inside sg_alloc_table() + * in cases where a single sg is sufficient. No attempt to reduce the + * number of sgs by squeezing physically contiguous pages together is + * made though, for simplicity. + */ +static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg, + const void *buf, unsigned int buf_len) +{ + struct scatterlist *sg; + const bool is_vmalloc = is_vmalloc_addr(buf); + unsigned int off = offset_in_page(buf); + unsigned int chunk_cnt = 1; + unsigned int chunk_len = PAGE_ALIGN(off + buf_len); + int i; + int ret; + + if (buf_len == 0) { + memset(sgt, 0, sizeof(*sgt)); + return -EINVAL; + } + + if (is_vmalloc) { + chunk_cnt = chunk_len >> PAGE_SHIFT; + chunk_len = PAGE_SIZE; + } + + if (chunk_cnt > 1) { + ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS); + if (ret) + return ret; + } else { + WARN_ON(chunk_cnt != 1); + sg_init_table(prealloc_sg, 1); + sgt->sgl = prealloc_sg; + sgt->nents = sgt->orig_nents = 1; + } + + for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) { + struct page *page; + unsigned int len = min(chunk_len - off, buf_len); + + if (is_vmalloc) + page = vmalloc_to_page(buf); + else + page = virt_to_page(buf); + + sg_set_page(sg, page, len, off); + + off = 0; + buf += len; + buf_len -= len; + } + WARN_ON(buf_len != 0); + + return 0; +} + +static void teardown_sgtable(struct sg_table *sgt) +{ + if (sgt->orig_nents > 1) + sg_free_table(sgt); +} + static int ceph_aes_encrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src, size_t src_len) { - struct scatterlist sg_in[2], sg_out[1]; + struct scatterlist sg_in[2], prealloc_sg; + struct sg_table sg_out; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; int ret; @@ -110,16 +181,18 @@ static int ceph_aes_encrypt(const void *key, int key_len, *dst_len = src_len + zero_padding; - crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 2); sg_set_buf(&sg_in[0], src, src_len); sg_set_buf(&sg_in[1], pad, zero_padding); - sg_init_table(sg_out, 1); - sg_set_buf(sg_out, dst, *dst_len); + ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); + if (ret) + goto out_tfm; + + crypto_blkcipher_setkey((void *)tfm, key, key_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); - memcpy(iv, aes_iv, ivsize); + /* print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, key, key_len, 1); @@ -128,16 +201,22 @@ static int ceph_aes_encrypt(const void *key, int key_len, print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, pad, zero_padding, 1); */ - ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, + ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in, src_len + zero_padding); - crypto_free_blkcipher(tfm); - if (ret < 0) + if (ret < 0) { pr_err("ceph_aes_crypt failed %d\n", ret); + goto out_sg; + } /* print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, dst, *dst_len, 1); */ - return 0; + +out_sg: + teardown_sgtable(&sg_out); +out_tfm: + crypto_free_blkcipher(tfm); + return ret; } static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, @@ -145,7 +224,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, const void *src1, size_t src1_len, const void *src2, size_t src2_len) { - struct scatterlist sg_in[3], sg_out[1]; + struct scatterlist sg_in[3], prealloc_sg; + struct sg_table sg_out; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; int ret; @@ -161,17 +241,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, *dst_len = src1_len + src2_len + zero_padding; - crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 3); sg_set_buf(&sg_in[0], src1, src1_len); sg_set_buf(&sg_in[1], src2, src2_len); sg_set_buf(&sg_in[2], pad, zero_padding); - sg_init_table(sg_out, 1); - sg_set_buf(sg_out, dst, *dst_len); + ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); + if (ret) + goto out_tfm; + + crypto_blkcipher_setkey((void *)tfm, key, key_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); - memcpy(iv, aes_iv, ivsize); + /* print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, key, key_len, 1); @@ -182,23 +264,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, pad, zero_padding, 1); */ - ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, + ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in, src1_len + src2_len + zero_padding); - crypto_free_blkcipher(tfm); - if (ret < 0) + if (ret < 0) { pr_err("ceph_aes_crypt2 failed %d\n", ret); + goto out_sg; + } /* print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, dst, *dst_len, 1); */ - return 0; + +out_sg: + teardown_sgtable(&sg_out); +out_tfm: + crypto_free_blkcipher(tfm); + return ret; } static int ceph_aes_decrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src, size_t src_len) { - struct scatterlist sg_in[1], sg_out[2]; + struct sg_table sg_in; + struct scatterlist sg_out[2], prealloc_sg; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm }; char pad[16]; @@ -210,16 +299,16 @@ static int ceph_aes_decrypt(const void *key, int key_len, if (IS_ERR(tfm)) return PTR_ERR(tfm); - crypto_blkcipher_setkey((void *)tfm, key, key_len); - sg_init_table(sg_in, 1); sg_init_table(sg_out, 2); - sg_set_buf(sg_in, src, src_len); sg_set_buf(&sg_out[0], dst, *dst_len); sg_set_buf(&sg_out[1], pad, sizeof(pad)); + ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); + if (ret) + goto out_tfm; + crypto_blkcipher_setkey((void *)tfm, key, key_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); - memcpy(iv, aes_iv, ivsize); /* @@ -228,12 +317,10 @@ static int ceph_aes_decrypt(const void *key, int key_len, print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, src, src_len, 1); */ - - ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); - crypto_free_blkcipher(tfm); + ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len); if (ret < 0) { pr_err("ceph_aes_decrypt failed %d\n", ret); - return ret; + goto out_sg; } if (src_len <= *dst_len) @@ -251,7 +338,12 @@ static int ceph_aes_decrypt(const void *key, int key_len, print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, dst, *dst_len, 1); */ - return 0; + +out_sg: + teardown_sgtable(&sg_in); +out_tfm: + crypto_free_blkcipher(tfm); + return ret; } static int ceph_aes_decrypt2(const void *key, int key_len, @@ -259,7 +351,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len, void *dst2, size_t *dst2_len, const void *src, size_t src_len) { - struct scatterlist sg_in[1], sg_out[3]; + struct sg_table sg_in; + struct scatterlist sg_out[3], prealloc_sg; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm }; char pad[16]; @@ -271,17 +364,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len, if (IS_ERR(tfm)) return PTR_ERR(tfm); - sg_init_table(sg_in, 1); - sg_set_buf(sg_in, src, src_len); sg_init_table(sg_out, 3); sg_set_buf(&sg_out[0], dst1, *dst1_len); sg_set_buf(&sg_out[1], dst2, *dst2_len); sg_set_buf(&sg_out[2], pad, sizeof(pad)); + ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); + if (ret) + goto out_tfm; crypto_blkcipher_setkey((void *)tfm, key, key_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); - memcpy(iv, aes_iv, ivsize); /* @@ -290,12 +383,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len, print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, src, src_len, 1); */ - - ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); - crypto_free_blkcipher(tfm); + ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len); if (ret < 0) { pr_err("ceph_aes_decrypt failed %d\n", ret); - return ret; + goto out_sg; } if (src_len <= *dst1_len) @@ -325,7 +416,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len, dst2, *dst2_len, 1); */ - return 0; +out_sg: + teardown_sgtable(&sg_in); +out_tfm: + crypto_free_blkcipher(tfm); + return ret; } diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 559c9f619c2..8d1653caffd 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -484,7 +484,7 @@ static int ceph_tcp_connect(struct ceph_connection *con) IPPROTO_TCP, &sock); if (ret) return ret; - sock->sk->sk_allocation = GFP_NOFS; + sock->sk->sk_allocation = GFP_NOFS | __GFP_MEMALLOC; #ifdef CONFIG_LOCKDEP lockdep_set_class(&sock->sk->sk_lock, &socket_class); @@ -509,6 +509,9 @@ static int ceph_tcp_connect(struct ceph_connection *con) return ret; } + + sk_set_memalloc(sock->sk); + con->sock = sock; return 0; } @@ -2769,8 +2772,11 @@ static void con_work(struct work_struct *work) { struct ceph_connection *con = container_of(work, struct ceph_connection, work.work); + unsigned long pflags = current->flags; bool fault; + current->flags |= PF_MEMALLOC; + mutex_lock(&con->mutex); while (true) { int ret; @@ -2824,6 +2830,8 @@ static void con_work(struct work_struct *work) con_fault_finish(con); con->ops->put(con); + + tsk_restore_flags(current, pflags, PF_MEMALLOC); } /* diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f3fc54eac09..6f164289bde 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1007,8 +1007,8 @@ static void put_osd(struct ceph_osd *osd) static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) { dout("__remove_osd %p\n", osd); - BUG_ON(!list_empty(&osd->o_requests)); - BUG_ON(!list_empty(&osd->o_linger_requests)); + WARN_ON(!list_empty(&osd->o_requests)); + WARN_ON(!list_empty(&osd->o_linger_requests)); rb_erase(&osd->o_node, &osdc->osds); list_del_init(&osd->o_osd_lru); @@ -1254,6 +1254,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc, if (list_empty(&req->r_osd_item)) req->r_osd = NULL; } + + list_del_init(&req->r_req_lru_item); /* can be on notarget */ ceph_osdc_put_request(req); } @@ -1395,6 +1397,7 @@ static int __map_request(struct ceph_osd_client *osdc, if (req->r_osd) { __cancel_request(req); list_del_init(&req->r_osd_item); + list_del_init(&req->r_linger_osd_item); req->r_osd = NULL; } diff --git a/net/core/dev.c b/net/core/dev.c index b793e3521a3..945bbd00135 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4157,6 +4157,10 @@ EXPORT_SYMBOL(napi_gro_receive); static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) { + if (unlikely(skb->pfmemalloc)) { + consume_skb(skb); + return; + } __skb_pull(skb, skb_headlen(skb)); /* restore the reserve we had after netdev_alloc_skb_ip_align() */ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 1600aa24d36..06dfb293e5a 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1036,7 +1036,8 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) { const struct ethtool_ops *ops = dev->ethtool_ops; - if (!ops->get_eeprom || !ops->get_eeprom_len) + if (!ops->get_eeprom || !ops->get_eeprom_len || + !ops->get_eeprom_len(dev)) return -EOPNOTSUPP; return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom, @@ -1052,7 +1053,8 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) u8 *data; int ret = 0; - if (!ops->set_eeprom || !ops->get_eeprom_len) + if (!ops->set_eeprom || !ops->get_eeprom_len || + !ops->get_eeprom_len(dev)) return -EOPNOTSUPP; if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a6882686ca3..b9b7dfaf202 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2685,13 +2685,20 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) int idx = 0; u32 portid = NETLINK_CB(cb->skb).portid; u32 seq = cb->nlh->nlmsg_seq; - struct nlattr *extfilt; u32 filter_mask = 0; - extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), - IFLA_EXT_MASK); - if (extfilt) - filter_mask = nla_get_u32(extfilt); + if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) { + struct nlattr *extfilt; + + extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), + IFLA_EXT_MASK); + if (extfilt) { + if (nla_len(extfilt) < sizeof(filter_mask)) + return -EINVAL; + + filter_mask = nla_get_u32(extfilt); + } + } rcu_read_lock(); for_each_netdev_rcu(net, dev) { @@ -2798,6 +2805,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh) if (br_spec) { nla_for_each_nested(attr, br_spec, rem) { if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { + if (nla_len(attr) < sizeof(flags)) + return -EINVAL; + have_flags = true; flags = nla_get_u16(attr); break; @@ -2868,6 +2878,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh) if (br_spec) { nla_for_each_nested(attr, br_spec, rem) { if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { + if (nla_len(attr) < sizeof(flags)) + return -EINVAL; + have_flags = true; flags = nla_get_u16(attr); break; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 61059a05ec9..32e31c29963 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -552,20 +552,13 @@ static void kfree_skbmem(struct sk_buff *skb) case SKB_FCLONE_CLONE: fclones = container_of(skb, struct sk_buff_fclones, skb2); - /* Warning : We must perform the atomic_dec_and_test() before - * setting skb->fclone back to SKB_FCLONE_FREE, otherwise - * skb_clone() could set clone_ref to 2 before our decrement. - * Anyway, if we are going to free the structure, no need to - * rewrite skb->fclone. + /* The clone portion is available for + * fast-cloning again. */ - if (atomic_dec_and_test(&fclones->fclone_ref)) { + skb->fclone = SKB_FCLONE_FREE; + + if (atomic_dec_and_test(&fclones->fclone_ref)) kmem_cache_free(skbuff_fclone_cache, fclones); - } else { - /* The clone portion is available for - * fast-cloning again. - */ - skb->fclone = SKB_FCLONE_FREE; - } break; } } @@ -887,11 +880,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) if (skb->fclone == SKB_FCLONE_ORIG && n->fclone == SKB_FCLONE_FREE) { n->fclone = SKB_FCLONE_CLONE; - /* As our fastclone was free, clone_ref must be 1 at this point. - * We could use atomic_inc() here, but it is faster - * to set the final value. - */ - atomic_set(&fclones->fclone_ref, 2); + atomic_inc(&fclones->fclone_ref); } else { if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; @@ -4070,15 +4059,22 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet); unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) { const struct skb_shared_info *shinfo = skb_shinfo(skb); + unsigned int thlen = 0; - if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) - return tcp_hdrlen(skb) + shinfo->gso_size; + if (skb->encapsulation) { + thlen = skb_inner_transport_header(skb) - + skb_transport_header(skb); + if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) + thlen += inner_tcp_hdrlen(skb); + } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { + thlen = tcp_hdrlen(skb); + } /* UFO sets gso_size to the size of the fragmentation * payload, i.e. the size of the L4 (UDP) header is already * accounted for. */ - return shinfo->gso_size; + return thlen + shinfo->gso_size; } EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); diff --git a/net/core/tso.c b/net/core/tso.c index 8c3203c585b..630b30b4fb5 100644 --- a/net/core/tso.c +++ b/net/core/tso.c @@ -1,6 +1,7 @@ #include <linux/export.h> #include <net/ip.h> #include <net/tso.h> +#include <asm/unaligned.h> /* Calculate expected number of TX descriptors */ int tso_count_descs(struct sk_buff *skb) @@ -23,7 +24,7 @@ void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, iph->id = htons(tso->ip_id); iph->tot_len = htons(size + hdr_len - mac_hdr_len); tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); - tcph->seq = htonl(tso->tcp_seq); + put_unaligned_be32(tso->tcp_seq, &tcph->seq); tso->ip_id++; if (!is_last) { diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index ca11d283bbe..93ea80196f0 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1080,13 +1080,13 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (!app) return -EMSGSIZE; - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == netdev->ifindex) { err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app), &itr->app); if (err) { - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); return -EMSGSIZE; } } @@ -1097,7 +1097,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) else dcbx = -EOPNOTSUPP; - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); nla_nest_end(skb, app); /* get peer info if available */ @@ -1234,7 +1234,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) } /* local app */ - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE); if (!app) goto dcb_unlock; @@ -1271,7 +1271,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) else dcbx = -EOPNOTSUPP; - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); /* features flags */ if (ops->getfeatcfg) { @@ -1326,7 +1326,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) return 0; dcb_unlock: - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); nla_put_failure: return err; } @@ -1762,10 +1762,10 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) struct dcb_app_type *itr; u8 prio = 0; - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) prio = itr->app.priority; - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); return prio; } @@ -1789,7 +1789,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new) if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); /* Search for existing match and replace */ if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) { if (new->priority) @@ -1804,7 +1804,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new) if (new->priority) err = dcb_app_add(new, dev->ifindex); out: - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; @@ -1823,10 +1823,10 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) struct dcb_app_type *itr; u8 prio = 0; - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) prio |= 1 << itr->app.priority; - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); return prio; } @@ -1850,7 +1850,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); /* Search for existing match and abort if found */ if (dcb_app_lookup(new, dev->ifindex, new->priority)) { err = -EEXIST; @@ -1859,7 +1859,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) err = dcb_app_add(new, dev->ifindex); out: - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; @@ -1882,7 +1882,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); /* Search for existing match and remove it. */ if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) { list_del(&itr->list); @@ -1890,7 +1890,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) err = 0; } - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; @@ -1902,12 +1902,12 @@ static void dcb_flushapp(void) struct dcb_app_type *app; struct dcb_app_type *tmp; - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); list_for_each_entry_safe(app, tmp, &dcb_app_list, list) { list_del(&app->list); kfree(app); } - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); } static int __init dcbnl_init(void) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 22f34cf4cb2..6317b41c99b 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -174,8 +174,11 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, dst->rcv = brcm_netdev_ops.rcv; break; #endif - default: + case DSA_TAG_PROTO_NONE: break; + default: + ret = -ENOPROTOOPT; + goto out; } dst->tag_protocol = drv->tag_protocol; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 6d1817449c3..ab03e00ffe8 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -489,11 +489,14 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p, /* We could not connect to a designated PHY, so use the switch internal * MDIO bus instead */ - if (!p->phy) + if (!p->phy) { p->phy = ds->slave_mii_bus->phy_map[p->port]; - else + phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, + p->phy_interface); + } else { pr_info("attached PHY at address %d [%s]\n", p->phy->addr, p->phy->drv->name); + } } int dsa_slave_suspend(struct net_device *slave_dev) diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 92db7a69f2b..e67da4e6c32 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1246,7 +1246,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, encap = SKB_GSO_CB(skb)->encap_level > 0; if (encap) - features = skb->dev->hw_enc_features & netif_skb_features(skb); + features &= skb->dev->hw_enc_features; SKB_GSO_CB(skb)->encap_level += ihl; skb_reset_transport_header(skb); @@ -1386,6 +1386,17 @@ out: return pp; } +int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) +{ + if (sk->sk_family == AF_INET) + return ip_recv_error(sk, msg, len, addr_len); +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) + return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len); +#endif + return -EINVAL; +} + static int inet_gro_complete(struct sk_buff *skb, int nhoff) { __be16 newlen = htons(skb->len - nhoff); diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index f2e15738534..8f7bd56955b 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -62,6 +62,10 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res) else res->tclassid = 0; #endif + + if (err == -ESRCH) + err = -ENETUNREACH; + return err; } EXPORT_SYMBOL_GPL(__fib_lookup); diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 32e78924e24..606c520ffd5 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -133,6 +133,8 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff) int err = -ENOSYS; const struct net_offload **offloads; + udp_tunnel_gro_complete(skb, nhoff); + rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c index 065cd94c640..dedb21e9991 100644 --- a/net/ipv4/geneve.c +++ b/net/ipv4/geneve.c @@ -144,6 +144,8 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt, gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); + skb_set_inner_protocol(skb, htons(ETH_P_TEB)); + return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst, tos, ttl, df, src_port, dst_port, xnet); } @@ -364,6 +366,7 @@ late_initcall(geneve_init_module); static void __exit geneve_cleanup_module(void) { destroy_workqueue(geneve_wq); + unregister_pernet_subsys(&geneve_net_ops); } module_exit(geneve_cleanup_module); diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index ccda09628de..bb5947b0ce2 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -47,7 +47,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, greh = (struct gre_base_hdr *)skb_transport_header(skb); - ghl = skb_inner_network_header(skb) - skb_transport_header(skb); + ghl = skb_inner_mac_header(skb) - skb_transport_header(skb); if (unlikely(ghl < sizeof(*greh))) goto out; @@ -68,7 +68,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, skb->mac_len = skb_inner_network_offset(skb); /* segment inner packet. */ - enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); + enc_features = skb->dev->hw_enc_features & features; segs = skb_mac_gso_segment(skb, enc_features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index fb70e3ecc3e..bb15d0e03d4 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -318,9 +318,7 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) return scount; } -#define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb)) - -static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) +static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) { struct sk_buff *skb; struct rtable *rt; @@ -330,6 +328,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) struct flowi4 fl4; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; + unsigned int size = mtu; while (1) { skb = alloc_skb(size + hlen + tlen, @@ -341,7 +340,6 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) return NULL; } skb->priority = TC_PRIO_CONTROL; - igmp_skb_size(skb) = size; rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0, 0, 0, @@ -354,6 +352,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) skb_dst_set(skb, &rt->dst); skb->dev = dev; + skb->reserved_tailroom = skb_end_offset(skb) - + min(mtu, skb_end_offset(skb)); skb_reserve(skb, hlen); skb_reset_network_header(skb); @@ -423,8 +423,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, return skb; } -#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \ - skb_tailroom(skb)) : 0) +#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 9eb89f3f0ee..19419b60cb3 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -146,7 +146,6 @@ evict_again: atomic_inc(&fq->refcnt); spin_unlock(&hb->chain_lock); del_timer_sync(&fq->timer); - WARN_ON(atomic_read(&fq->refcnt) != 1); inet_frag_put(fq, f); goto evict_again; } @@ -285,7 +284,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) struct inet_frag_bucket *hb; hb = get_frag_bucket_locked(fq, f); - hlist_del(&fq->list); + if (!(fq->flags & INET_FRAG_EVICTED)) + hlist_del(&fq->list); spin_unlock(&hb->chain_lock); } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 88e5ef2c7f5..bc6471d4abc 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -231,7 +231,7 @@ static int ip_finish_output_gso(struct sk_buff *skb) */ features = netif_skb_features(skb); segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); - if (IS_ERR(segs)) { + if (IS_ERR_OR_NULL(segs)) { kfree_skb(skb); return -ENOMEM; } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index c373a9ad455..9daf2177dc0 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -195,7 +195,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; -#if defined(CONFIG_IPV6) +#if IS_ENABLED(CONFIG_IPV6) if (allow_ipv6 && cmsg->cmsg_level == SOL_IPV6 && cmsg->cmsg_type == IPV6_PKTINFO) { diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 3e861011e4a..1a7e979e80b 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -528,6 +528,7 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = { .validate = vti_tunnel_validate, .newlink = vti_newlink, .changelink = vti_changelink, + .dellink = ip_tunnel_dellink, .get_size = vti_get_size, .fill_info = vti_fill_info, }; diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index b023b4eb1a9..1baaa83dfe5 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c @@ -6,48 +6,45 @@ * published by the Free Software Foundation. */ +#include <linux/module.h> #include <net/ip.h> #include <net/tcp.h> #include <net/route.h> #include <net/dst.h> #include <linux/netfilter_ipv4.h> +#include <net/netfilter/ipv4/nf_reject.h> -/* Send RST reply */ -void nf_send_reset(struct sk_buff *oldskb, int hook) +const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb, + struct tcphdr *_oth, int hook) { - struct sk_buff *nskb; - const struct iphdr *oiph; - struct iphdr *niph; const struct tcphdr *oth; - struct tcphdr _otcph, *tcph; /* IP header checks: fragment. */ if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) - return; + return NULL; oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), - sizeof(_otcph), &_otcph); + sizeof(struct tcphdr), _oth); if (oth == NULL) - return; + return NULL; /* No RST for RST. */ if (oth->rst) - return; - - if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) - return; + return NULL; /* Check checksum */ if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) - return; - oiph = ip_hdr(oldskb); + return NULL; - nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + - LL_MAX_HEADER, GFP_ATOMIC); - if (!nskb) - return; + return oth; +} +EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_get); - skb_reserve(nskb, LL_MAX_HEADER); +struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb, + const struct sk_buff *oldskb, + __be16 protocol, int ttl) +{ + struct iphdr *niph, *oiph = ip_hdr(oldskb); skb_reset_network_header(nskb); niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); @@ -56,10 +53,23 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) niph->tos = 0; niph->id = 0; niph->frag_off = htons(IP_DF); - niph->protocol = IPPROTO_TCP; + niph->protocol = protocol; niph->check = 0; niph->saddr = oiph->daddr; niph->daddr = oiph->saddr; + niph->ttl = ttl; + + nskb->protocol = htons(ETH_P_IP); + + return niph; +} +EXPORT_SYMBOL_GPL(nf_reject_iphdr_put); + +void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb, + const struct tcphdr *oth) +{ + struct iphdr *niph = ip_hdr(nskb); + struct tcphdr *tcph; skb_reset_transport_header(nskb); tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); @@ -68,9 +78,9 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) tcph->dest = oth->source; tcph->doff = sizeof(struct tcphdr) / 4; - if (oth->ack) + if (oth->ack) { tcph->seq = oth->ack_seq; - else { + } else { tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + oldskb->len - ip_hdrlen(oldskb) - (oth->doff << 2)); @@ -83,16 +93,43 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) nskb->ip_summed = CHECKSUM_PARTIAL; nskb->csum_start = (unsigned char *)tcph - nskb->head; nskb->csum_offset = offsetof(struct tcphdr, check); +} +EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put); + +/* Send RST reply */ +void nf_send_reset(struct sk_buff *oldskb, int hook) +{ + struct sk_buff *nskb; + const struct iphdr *oiph; + struct iphdr *niph; + const struct tcphdr *oth; + struct tcphdr _oth; + + oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook); + if (!oth) + return; + + if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) + return; + + oiph = ip_hdr(oldskb); + + nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + + LL_MAX_HEADER, GFP_ATOMIC); + if (!nskb) + return; /* ip_route_me_harder expects skb->dst to be set */ skb_dst_set_noref(nskb, skb_dst(oldskb)); - nskb->protocol = htons(ETH_P_IP); + skb_reserve(nskb, LL_MAX_HEADER); + niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, + ip4_dst_hoplimit(skb_dst(nskb))); + nf_reject_ip_tcphdr_put(nskb, oldskb, oth); + if (ip_route_me_harder(nskb, RTN_UNSPEC)) goto free_nskb; - niph->ttl = ip4_dst_hoplimit(skb_dst(nskb)); - /* "Never happens" */ if (nskb->len > dst_mtu(skb_dst(nskb))) goto free_nskb; @@ -125,3 +162,5 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) kfree_skb(nskb); } EXPORT_SYMBOL_GPL(nf_send_reset); + +MODULE_LICENSE("GPL"); diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c index 1c636d6b5b5..665de06561c 100644 --- a/net/ipv4/netfilter/nft_masq_ipv4.c +++ b/net/ipv4/netfilter/nft_masq_ipv4.c @@ -24,6 +24,7 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr, struct nf_nat_range range; unsigned int verdict; + memset(&range, 0, sizeof(range)); range.flags = priv->flags; verdict = nf_nat_masquerade_ipv4(pkt->skb, pkt->ops->hooknum, @@ -39,6 +40,7 @@ static const struct nft_expr_ops nft_masq_ipv4_ops = { .eval = nft_masq_ipv4_eval, .init = nft_masq_init, .dump = nft_masq_dump, + .validate = nft_masq_validate, }; static struct nft_expr_type nft_masq_ipv4_type __read_mostly = { diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 57f7c980413..5d740cccf69 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -217,6 +217,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) &ipv6_hdr(skb)->daddr)) continue; #endif + } else { + continue; } if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) @@ -853,16 +855,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (flags & MSG_OOB) goto out; - if (flags & MSG_ERRQUEUE) { - if (family == AF_INET) { - return ip_recv_error(sk, msg, len, addr_len); -#if IS_ENABLED(CONFIG_IPV6) - } else if (family == AF_INET6) { - return pingv6_ops.ipv6_recv_error(sk, msg, len, - addr_len); -#endif - } - } + if (flags & MSG_ERRQUEUE) + return inet_recv_error(sk, msg, len, addr_len); skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 2d4ae469b47..6a2155b0260 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1798,6 +1798,7 @@ local_input: no_route: RT_CACHE_STAT_INC(in_no_route); res.type = RTN_UNREACHABLE; + res.fi = NULL; goto local_input; /* diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1bec4e76d88..38c2bcb8dd5 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1598,7 +1598,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, u32 urg_hole = 0; if (unlikely(flags & MSG_ERRQUEUE)) - return ip_recv_error(sk, msg, len, addr_len); + return inet_recv_error(sk, msg, len, addr_len); if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && (sk->sk_state == TCP_ESTABLISHED)) @@ -2868,61 +2868,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt); #endif #ifdef CONFIG_TCP_MD5SIG -static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly; +static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool); static DEFINE_MUTEX(tcp_md5sig_mutex); - -static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool) -{ - int cpu; - - for_each_possible_cpu(cpu) { - struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu); - - if (p->md5_desc.tfm) - crypto_free_hash(p->md5_desc.tfm); - } - free_percpu(pool); -} +static bool tcp_md5sig_pool_populated = false; static void __tcp_alloc_md5sig_pool(void) { int cpu; - struct tcp_md5sig_pool __percpu *pool; - - pool = alloc_percpu(struct tcp_md5sig_pool); - if (!pool) - return; for_each_possible_cpu(cpu) { - struct crypto_hash *hash; - - hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); - if (IS_ERR_OR_NULL(hash)) - goto out_free; + if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) { + struct crypto_hash *hash; - per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; + hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR_OR_NULL(hash)) + return; + per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash; + } } - /* before setting tcp_md5sig_pool, we must commit all writes - * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool() + /* before setting tcp_md5sig_pool_populated, we must commit all writes + * to memory. See smp_rmb() in tcp_get_md5sig_pool() */ smp_wmb(); - tcp_md5sig_pool = pool; - return; -out_free: - __tcp_free_md5sig_pool(pool); + tcp_md5sig_pool_populated = true; } bool tcp_alloc_md5sig_pool(void) { - if (unlikely(!tcp_md5sig_pool)) { + if (unlikely(!tcp_md5sig_pool_populated)) { mutex_lock(&tcp_md5sig_mutex); - if (!tcp_md5sig_pool) + if (!tcp_md5sig_pool_populated) __tcp_alloc_md5sig_pool(); mutex_unlock(&tcp_md5sig_mutex); } - return tcp_md5sig_pool != NULL; + return tcp_md5sig_pool_populated; } EXPORT_SYMBOL(tcp_alloc_md5sig_pool); @@ -2936,13 +2917,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool); */ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) { - struct tcp_md5sig_pool __percpu *p; - local_bh_disable(); - p = ACCESS_ONCE(tcp_md5sig_pool); - if (p) - return raw_cpu_ptr(p); + if (tcp_md5sig_pool_populated) { + /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ + smp_rmb(); + return this_cpu_ptr(&tcp_md5sig_pool); + } local_bh_enable(); return NULL; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a12b455928e..d107ee246a1 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2315,6 +2315,35 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp) /* Undo procedures. */ +/* We can clear retrans_stamp when there are no retransmissions in the + * window. It would seem that it is trivially available for us in + * tp->retrans_out, however, that kind of assumptions doesn't consider + * what will happen if errors occur when sending retransmission for the + * second time. ...It could the that such segment has only + * TCPCB_EVER_RETRANS set at the present time. It seems that checking + * the head skb is enough except for some reneging corner cases that + * are not worth the effort. + * + * Main reason for all this complexity is the fact that connection dying + * time now depends on the validity of the retrans_stamp, in particular, + * that successive retransmissions of a segment must not advance + * retrans_stamp under any conditions. + */ +static bool tcp_any_retrans_done(const struct sock *sk) +{ + const struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *skb; + + if (tp->retrans_out) + return true; + + skb = tcp_write_queue_head(sk); + if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) + return true; + + return false; +} + #if FASTRETRANS_DEBUG > 1 static void DBGUNDO(struct sock *sk, const char *msg) { @@ -2410,6 +2439,8 @@ static bool tcp_try_undo_recovery(struct sock *sk) * is ACKed. For Reno it is MUST to prevent false * fast retransmits (RFC2582). SACK TCP is safe. */ tcp_moderate_cwnd(tp); + if (!tcp_any_retrans_done(sk)) + tp->retrans_stamp = 0; return true; } tcp_set_ca_state(sk, TCP_CA_Open); @@ -2430,35 +2461,6 @@ static bool tcp_try_undo_dsack(struct sock *sk) return false; } -/* We can clear retrans_stamp when there are no retransmissions in the - * window. It would seem that it is trivially available for us in - * tp->retrans_out, however, that kind of assumptions doesn't consider - * what will happen if errors occur when sending retransmission for the - * second time. ...It could the that such segment has only - * TCPCB_EVER_RETRANS set at the present time. It seems that checking - * the head skb is enough except for some reneging corner cases that - * are not worth the effort. - * - * Main reason for all this complexity is the fact that connection dying - * time now depends on the validity of the retrans_stamp, in particular, - * that successive retransmissions of a segment must not advance - * retrans_stamp under any conditions. - */ -static bool tcp_any_retrans_done(const struct sock *sk) -{ - const struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb; - - if (tp->retrans_out) - return true; - - skb = tcp_write_queue_head(sk); - if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) - return true; - - return false; -} - /* Undo during loss recovery after partial ACK or using F-RTO. */ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) { @@ -5229,7 +5231,7 @@ slow_path: if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) goto csum_error; - if (!th->ack && !th->rst) + if (!th->ack && !th->rst && !th->syn) goto discard; /* @@ -5648,7 +5650,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, goto discard; } - if (!th->ack && !th->rst) + if (!th->ack && !th->rst && !th->syn) goto discard; if (!tcp_validate_incoming(sk, skb, th, 0)) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 94d1a7757ff..147be202429 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -206,8 +206,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_dport = usin->sin_port; inet->inet_daddr = daddr; - inet_set_txhash(sk); - inet_csk(sk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; @@ -224,6 +222,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (err) goto failure; + inet_set_txhash(sk); + rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, inet->inet_sport, inet->inet_dport, sk); if (IS_ERR(rt)) { @@ -598,7 +598,10 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) if (th->rst) return; - if (skb_rtable(skb)->rt_type != RTN_LOCAL) + /* If sk not NULL, it means we did a successful lookup and incoming + * route had to be correct. prequeue might have dropped our dst. + */ + if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL) return; /* Swap the send and the receive. */ diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3af21296d96..a3d453b9474 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2126,7 +2126,7 @@ bool tcp_schedule_loss_probe(struct sock *sk) static bool skb_still_in_host_queue(const struct sock *sk, const struct sk_buff *skb) { - if (unlikely(skb_fclone_busy(skb))) { + if (unlikely(skb_fclone_busy(sk, skb))) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); return true; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 507310ef4b5..6480cea7aa5 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -58,7 +58,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, skb->encap_hdr_csum = 1; /* segment inner packet. */ - enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); + enc_features = skb->dev->hw_enc_features & features; segs = gso_inner_segment(skb, enc_features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 725c763270a..0169ccf5aa4 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4531,6 +4531,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) } write_unlock_bh(&idev->lock); + inet6_ifinfo_notify(RTM_NEWLINK, idev); addrconf_verify_rtnl(); return 0; } diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 12c3c8ef384..0e32d2e1bdb 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -502,11 +502,11 @@ static int ip6gre_rcv(struct sk_buff *skb) skb->protocol = gre_proto; /* WCCP version 1 and 2 protocol decoding. - * - Change protocol to IP + * - Change protocol to IPv6 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header */ if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) { - skb->protocol = htons(ETH_P_IP); + skb->protocol = htons(ETH_P_IPV6); if ((*(h + offset) & 0xF0) != 0x40) offset += 4; } @@ -961,8 +961,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) else dev->flags &= ~IFF_POINTOPOINT; - dev->iflink = p->link; - /* Precalculate GRE options length */ if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { if (t->parms.o_flags&GRE_CSUM) @@ -1272,6 +1270,7 @@ static int ip6gre_tunnel_init(struct net_device *dev) u64_stats_init(&ip6gre_tunnel_stats->syncp); } + dev->iflink = tunnel->parms.link; return 0; } @@ -1481,6 +1480,8 @@ static int ip6gre_tap_init(struct net_device *dev) if (!dev->tstats) return -ENOMEM; + dev->iflink = tunnel->parms.link; + return 0; } diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 91014d32488..01e12d0d8fc 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -69,7 +69,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int nhoff; if (unlikely(skb_shinfo(skb)->gso_type & - ~(SKB_GSO_UDP | + ~(SKB_GSO_TCPV4 | + SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE | @@ -90,7 +91,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, encap = SKB_GSO_CB(skb)->encap_level > 0; if (encap) - features = skb->dev->hw_enc_features & netif_skb_features(skb); + features &= skb->dev->hw_enc_features; SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); ipv6h = ipv6_hdr(skb); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 9409887fb66..9cb94cfa0ae 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -272,9 +272,6 @@ static int ip6_tnl_create2(struct net_device *dev) int err; t = netdev_priv(dev); - err = ip6_tnl_dev_init(dev); - if (err < 0) - goto out; err = register_netdevice(dev); if (err < 0) @@ -1462,6 +1459,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) static const struct net_device_ops ip6_tnl_netdev_ops = { + .ndo_init = ip6_tnl_dev_init, .ndo_uninit = ip6_tnl_dev_uninit, .ndo_start_xmit = ip6_tnl_xmit, .ndo_do_ioctl = ip6_tnl_ioctl, @@ -1546,16 +1544,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) struct ip6_tnl *t = netdev_priv(dev); struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); - int err = ip6_tnl_dev_init_gen(dev); - - if (err) - return err; t->parms.proto = IPPROTO_IPV6; dev_hold(dev); - ip6_tnl_link_config(t); - rcu_assign_pointer(ip6n->tnls_wc[0], t); return 0; } diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c index b04ed72c454..8db6c98fe21 100644 --- a/net/ipv6/ip6_udp_tunnel.c +++ b/net/ipv6/ip6_udp_tunnel.c @@ -79,15 +79,13 @@ int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst, uh->source = src_port; uh->len = htons(skb->len); - uh->check = 0; memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); skb_dst_set(skb, dst); - udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr, - &sk->sk_v6_daddr, skb->len); + udp6_set_csum(udp_get_no_check6_tx(sk), skb, saddr, daddr, skb->len); __skb_push(skb, sizeof(*ip6h)); skb_reset_network_header(skb); diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index d440bb58552..bcda14de7f8 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -172,10 +172,6 @@ static int vti6_tnl_create2(struct net_device *dev) struct vti6_net *ip6n = net_generic(net, vti6_net_id); int err; - err = vti6_dev_init(dev); - if (err < 0) - goto out; - err = register_netdevice(dev); if (err < 0) goto out; @@ -783,6 +779,7 @@ static int vti6_change_mtu(struct net_device *dev, int new_mtu) } static const struct net_device_ops vti6_netdev_ops = { + .ndo_init = vti6_dev_init, .ndo_uninit = vti6_dev_uninit, .ndo_start_xmit = vti6_tnl_xmit, .ndo_do_ioctl = vti6_ioctl, @@ -852,16 +849,10 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev) struct ip6_tnl *t = netdev_priv(dev); struct net *net = dev_net(dev); struct vti6_net *ip6n = net_generic(net, vti6_net_id); - int err = vti6_dev_init_gen(dev); - - if (err) - return err; t->parms.proto = IPPROTO_IPV6; dev_hold(dev); - vti6_link_config(t); - rcu_assign_pointer(ip6n->tnls_wc[0], t); return 0; } @@ -914,6 +905,15 @@ static int vti6_newlink(struct net *src_net, struct net_device *dev, return vti6_tnl_create2(dev); } +static void vti6_dellink(struct net_device *dev, struct list_head *head) +{ + struct net *net = dev_net(dev); + struct vti6_net *ip6n = net_generic(net, vti6_net_id); + + if (dev != ip6n->fb_tnl_dev) + unregister_netdevice_queue(dev, head); +} + static int vti6_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { @@ -989,6 +989,7 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = { .setup = vti6_dev_setup, .validate = vti6_validate, .newlink = vti6_newlink, + .dellink = vti6_dellink, .changelink = vti6_changelink, .get_size = vti6_get_size, .fill_info = vti6_fill_info, @@ -1029,6 +1030,7 @@ static int __net_init vti6_init_net(struct net *net) if (!ip6n->fb_tnl_dev) goto err_alloc_dev; dev_net_set(ip6n->fb_tnl_dev, net); + ip6n->fb_tnl_dev->rtnl_link_ops = &vti6_link_ops; err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev); if (err < 0) diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 0171f08325c..1a01d79b869 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1439,6 +1439,10 @@ reg_pernet_fail: void ip6_mr_cleanup(void) { + rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE); +#ifdef CONFIG_IPV6_PIMSM_V2 + inet6_del_protocol(&pim6_protocol, IPPROTO_PIM); +#endif unregister_netdevice_notifier(&ip6_mr_notifier); unregister_pernet_subsys(&ip6mr_net_ops); kmem_cache_destroy(mrt_cachep); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 9648de2b674..ed2c4e400b4 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1550,7 +1550,7 @@ static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb, hdr->daddr = *daddr; } -static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) +static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) { struct net_device *dev = idev->dev; struct net *net = dev_net(dev); @@ -1561,13 +1561,13 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) const struct in6_addr *saddr; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; + unsigned int size = mtu + hlen + tlen; int err; u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT, 2, 0, 0, IPV6_TLV_PADN, 0 }; /* we assume size > sizeof(ra) here */ - size += hlen + tlen; /* limit our allocations to order-0 page */ size = min_t(int, size, SKB_MAX_ORDER(0, 0)); skb = sock_alloc_send_skb(sk, size, 1, &err); @@ -1576,6 +1576,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) return NULL; skb->priority = TC_PRIO_CONTROL; + skb->reserved_tailroom = skb_end_offset(skb) - + min(mtu, skb_end_offset(skb)); skb_reserve(skb, hlen); if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { @@ -1690,8 +1692,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, return skb; } -#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \ - skb_tailroom(skb)) : 0) +#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted, int crsend) diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index 5f5f0438d74..015eb8a8076 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c @@ -5,121 +5,109 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ + +#include <linux/module.h> #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/ip6_fib.h> #include <net/ip6_checksum.h> #include <linux/netfilter_ipv6.h> +#include <net/netfilter/ipv6/nf_reject.h> -void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) +const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb, + struct tcphdr *otcph, + unsigned int *otcplen, int hook) { - struct sk_buff *nskb; - struct tcphdr otcph, *tcph; - unsigned int otcplen, hh_len; - int tcphoff, needs_ack; const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); - struct ipv6hdr *ip6h; -#define DEFAULT_TOS_VALUE 0x0U - const __u8 tclass = DEFAULT_TOS_VALUE; - struct dst_entry *dst = NULL; u8 proto; __be16 frag_off; - struct flowi6 fl6; - - if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || - (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { - pr_debug("addr is not unicast.\n"); - return; - } + int tcphoff; proto = oip6h->nexthdr; - tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off); + tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), + &proto, &frag_off); if ((tcphoff < 0) || (tcphoff > oldskb->len)) { pr_debug("Cannot get TCP header.\n"); - return; + return NULL; } - otcplen = oldskb->len - tcphoff; + *otcplen = oldskb->len - tcphoff; /* IP header checks: fragment, too short. */ - if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { - pr_debug("proto(%d) != IPPROTO_TCP, " - "or too short. otcplen = %d\n", - proto, otcplen); - return; + if (proto != IPPROTO_TCP || *otcplen < sizeof(struct tcphdr)) { + pr_debug("proto(%d) != IPPROTO_TCP or too short (len = %d)\n", + proto, *otcplen); + return NULL; } - if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr))) - BUG(); + otcph = skb_header_pointer(oldskb, tcphoff, sizeof(struct tcphdr), + otcph); + if (otcph == NULL) + return NULL; /* No RST for RST. */ - if (otcph.rst) { + if (otcph->rst) { pr_debug("RST is set\n"); - return; + return NULL; } /* Check checksum. */ if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { pr_debug("TCP checksum is invalid\n"); - return; + return NULL; } - memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_proto = IPPROTO_TCP; - fl6.saddr = oip6h->daddr; - fl6.daddr = oip6h->saddr; - fl6.fl6_sport = otcph.dest; - fl6.fl6_dport = otcph.source; - security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); - dst = ip6_route_output(net, NULL, &fl6); - if (dst == NULL || dst->error) { - dst_release(dst); - return; - } - dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); - if (IS_ERR(dst)) - return; - - hh_len = (dst->dev->hard_header_len + 15)&~15; - nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) - + sizeof(struct tcphdr) + dst->trailer_len, - GFP_ATOMIC); - - if (!nskb) { - net_dbg_ratelimited("cannot alloc skb\n"); - dst_release(dst); - return; - } - - skb_dst_set(nskb, dst); + return otcph; +} +EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_get); - skb_reserve(nskb, hh_len + dst->header_len); +struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb, + const struct sk_buff *oldskb, + __be16 protocol, int hoplimit) +{ + struct ipv6hdr *ip6h; + const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); +#define DEFAULT_TOS_VALUE 0x0U + const __u8 tclass = DEFAULT_TOS_VALUE; skb_put(nskb, sizeof(struct ipv6hdr)); skb_reset_network_header(nskb); ip6h = ipv6_hdr(nskb); ip6_flow_hdr(ip6h, tclass, 0); - ip6h->hop_limit = ip6_dst_hoplimit(dst); - ip6h->nexthdr = IPPROTO_TCP; + ip6h->hop_limit = hoplimit; + ip6h->nexthdr = protocol; ip6h->saddr = oip6h->daddr; ip6h->daddr = oip6h->saddr; + nskb->protocol = htons(ETH_P_IPV6); + + return ip6h; +} +EXPORT_SYMBOL_GPL(nf_reject_ip6hdr_put); + +void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb, + const struct sk_buff *oldskb, + const struct tcphdr *oth, unsigned int otcplen) +{ + struct tcphdr *tcph; + int needs_ack; + skb_reset_transport_header(nskb); tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); /* Truncate to length (no data) */ tcph->doff = sizeof(struct tcphdr)/4; - tcph->source = otcph.dest; - tcph->dest = otcph.source; + tcph->source = oth->dest; + tcph->dest = oth->source; - if (otcph.ack) { + if (oth->ack) { needs_ack = 0; - tcph->seq = otcph.ack_seq; + tcph->seq = oth->ack_seq; tcph->ack_seq = 0; } else { needs_ack = 1; - tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin - + otcplen - (otcph.doff<<2)); + tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + + otcplen - (oth->doff<<2)); tcph->seq = 0; } @@ -137,6 +125,63 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) sizeof(struct tcphdr), IPPROTO_TCP, csum_partial(tcph, sizeof(struct tcphdr), 0)); +} +EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_put); + +void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) +{ + struct sk_buff *nskb; + struct tcphdr _otcph; + const struct tcphdr *otcph; + unsigned int otcplen, hh_len; + const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); + struct ipv6hdr *ip6h; + struct dst_entry *dst = NULL; + struct flowi6 fl6; + + if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || + (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { + pr_debug("addr is not unicast.\n"); + return; + } + + otcph = nf_reject_ip6_tcphdr_get(oldskb, &_otcph, &otcplen, hook); + if (!otcph) + return; + + memset(&fl6, 0, sizeof(fl6)); + fl6.flowi6_proto = IPPROTO_TCP; + fl6.saddr = oip6h->daddr; + fl6.daddr = oip6h->saddr; + fl6.fl6_sport = otcph->dest; + fl6.fl6_dport = otcph->source; + security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); + dst = ip6_route_output(net, NULL, &fl6); + if (dst == NULL || dst->error) { + dst_release(dst); + return; + } + dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); + if (IS_ERR(dst)) + return; + + hh_len = (dst->dev->hard_header_len + 15)&~15; + nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) + + sizeof(struct tcphdr) + dst->trailer_len, + GFP_ATOMIC); + + if (!nskb) { + net_dbg_ratelimited("cannot alloc skb\n"); + dst_release(dst); + return; + } + + skb_dst_set(nskb, dst); + + skb_reserve(nskb, hh_len + dst->header_len); + ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, + ip6_dst_hoplimit(dst)); + nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen); nf_ct_attach(nskb, oldskb); @@ -161,3 +206,5 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) ip6_local_out(nskb); } EXPORT_SYMBOL_GPL(nf_send_reset6); + +MODULE_LICENSE("GPL"); diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c index 556262f4076..529c119cbb1 100644 --- a/net/ipv6/netfilter/nft_masq_ipv6.c +++ b/net/ipv6/netfilter/nft_masq_ipv6.c @@ -25,6 +25,7 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr, struct nf_nat_range range; unsigned int verdict; + memset(&range, 0, sizeof(range)); range.flags = priv->flags; verdict = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out); @@ -39,6 +40,7 @@ static const struct nft_expr_ops nft_masq_ipv6_ops = { .eval = nft_masq_ipv6_eval, .init = nft_masq_init, .dump = nft_masq_dump, + .validate = nft_masq_validate, }; static struct nft_expr_type nft_masq_ipv6_type __read_mostly = { diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index fc24c390af0..97f41a3e68d 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -3,11 +3,45 @@ * not configured or static. These functions are needed by GSO/GRO implementation. */ #include <linux/export.h> +#include <net/ip.h> #include <net/ipv6.h> #include <net/ip6_fib.h> #include <net/addrconf.h> #include <net/secure_seq.h> +/* This function exists only for tap drivers that must support broken + * clients requesting UFO without specifying an IPv6 fragment ID. + * + * This is similar to ipv6_select_ident() but we use an independent hash + * seed to limit information leakage. + * + * The network header must be set before calling this. + */ +void ipv6_proxy_select_ident(struct sk_buff *skb) +{ + static u32 ip6_proxy_idents_hashrnd __read_mostly; + struct in6_addr buf[2]; + struct in6_addr *addrs; + u32 hash, id; + + addrs = skb_header_pointer(skb, + skb_network_offset(skb) + + offsetof(struct ipv6hdr, saddr), + sizeof(buf), buf); + if (!addrs) + return; + + net_get_random_once(&ip6_proxy_idents_hashrnd, + sizeof(ip6_proxy_idents_hashrnd)); + + hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd); + hash = __ipv6_addr_jhash(&addrs[0], hash); + + id = ip_idents_reserve(hash, 1); + skb_shinfo(skb)->ip6_frag_id = htonl(id); +} +EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); + int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) { u16 offset = sizeof(struct ipv6hdr); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 58e5b471012..a24557a1c1d 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -195,10 +195,8 @@ static int ipip6_tunnel_create(struct net_device *dev) struct sit_net *sitn = net_generic(net, sit_net_id); int err; - err = ipip6_tunnel_init(dev); - if (err < 0) - goto out; - ipip6_tunnel_clone_6rd(dev, sitn); + memcpy(dev->dev_addr, &t->parms.iph.saddr, 4); + memcpy(dev->broadcast, &t->parms.iph.daddr, 4); if ((__force u16)t->parms.i_flags & SIT_ISATAP) dev->priv_flags |= IFF_ISATAP; @@ -207,7 +205,8 @@ static int ipip6_tunnel_create(struct net_device *dev) if (err < 0) goto out; - strcpy(t->parms.name, dev->name); + ipip6_tunnel_clone_6rd(dev, sitn); + dev->rtnl_link_ops = &sit_link_ops; dev_hold(dev); @@ -1330,6 +1329,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) } static const struct net_device_ops ipip6_netdev_ops = { + .ndo_init = ipip6_tunnel_init, .ndo_uninit = ipip6_tunnel_uninit, .ndo_start_xmit = sit_tunnel_xmit, .ndo_do_ioctl = ipip6_tunnel_ioctl, @@ -1378,9 +1378,7 @@ static int ipip6_tunnel_init(struct net_device *dev) tunnel->dev = dev; tunnel->net = dev_net(dev); - - memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); - memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); + strcpy(tunnel->parms.name, dev->name); ipip6_tunnel_bind_dev(dev); dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); @@ -1405,7 +1403,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev) tunnel->dev = dev; tunnel->net = dev_net(dev); - strcpy(tunnel->parms.name, dev->name); iph->version = 4; iph->protocol = IPPROTO_IPV6; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 831495529b8..dc495ae2ead 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -200,8 +200,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, sk->sk_v6_daddr = usin->sin6_addr; np->flow_label = fl6.flowlabel; - ip6_set_txhash(sk); - /* * TCP over IPv4 */ @@ -297,6 +295,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, if (err) goto late_failure; + ip6_set_txhash(sk); + if (!tp->write_seq && likely(!tp->repair)) tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, sk->sk_v6_daddr.s6_addr32, @@ -903,7 +903,10 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) if (th->rst) return; - if (!ipv6_unicast_destination(skb)) + /* If sk not NULL, it means we did a successful lookup and incoming + * route had to be correct. prequeue might have dropped our dst. + */ + if (!sk && !ipv6_unicast_destination(skb)) return; #ifdef CONFIG_TCP_MD5SIG diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index ac49f84fe2c..5f983644373 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -170,8 +170,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) case IPPROTO_DCCP: if (!onlyproto && (nh + offset + 4 < skb->data || pskb_may_pull(skb, nh + offset + 4 - skb->data))) { - __be16 *ports = (__be16 *)exthdr; + __be16 *ports; + nh = skb_network_header(skb); + ports = (__be16 *)(nh + offset); fl6->fl6_sport = ports[!!reverse]; fl6->fl6_dport = ports[!reverse]; } @@ -180,8 +182,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) case IPPROTO_ICMPV6: if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) { - u8 *icmp = (u8 *)exthdr; + u8 *icmp; + nh = skb_network_header(skb); + icmp = (u8 *)(nh + offset); fl6->fl6_icmp_type = icmp[0]; fl6->fl6_icmp_code = icmp[1]; } @@ -192,8 +196,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) case IPPROTO_MH: if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { struct ip6_mh *mh; - mh = (struct ip6_mh *)exthdr; + nh = skb_network_header(skb); + mh = (struct ip6_mh *)(nh + offset); fl6->fl6_mh_type = mh->ip6mh_type; } fl6->flowi6_proto = nexthdr; diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 91729b807c7..1b095ca37aa 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -1764,6 +1764,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, struct ipxhdr *ipx = NULL; struct sk_buff *skb; int copied, rc; + bool locked = true; lock_sock(sk); /* put the autobinding in */ @@ -1790,6 +1791,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, if (sock_flag(sk, SOCK_ZAPPED)) goto out; + release_sock(sk); + locked = false; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); if (!skb) { @@ -1826,7 +1829,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, out_free: skb_free_datagram(sk, skb); out: - release_sock(sk); + if (locked) + release_sock(sk); return rc; } diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 92fafd485de..3f3a6cbdceb 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -1064,8 +1064,6 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, if (sk->sk_state != TCP_ESTABLISHED) { sock->state = SS_UNCONNECTED; - if (sk->sk_prot->disconnect(sk, flags)) - sock->state = SS_DISCONNECTING; err = sock_error(sk); if (!err) err = -ECONNRESET; diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c index ec24378caaa..09d9caaec59 100644 --- a/net/mac80211/aes_ccm.c +++ b/net/mac80211/aes_ccm.c @@ -53,6 +53,9 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, __aligned(__alignof__(struct aead_request)); struct aead_request *aead_req = (void *) aead_req_data; + if (data_len == 0) + return -EINVAL; + memset(aead_req, 0, sizeof(aead_req_data)); sg_init_one(&pt, data, data_len); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index fb6a1502b6d..343da1e3502 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -3458,7 +3458,7 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy, rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (chanctx_conf) { - *chandef = chanctx_conf->def; + *chandef = sdata->vif.bss_conf.chandef; ret = 0; } else if (local->open_count > 0 && local->open_count == local->monitors && diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 56b53571c80..509bc157ce5 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -805,7 +805,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata, memset(¶ms, 0, sizeof(params)); memset(&csa_ie, 0, sizeof(csa_ie)); - err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, + err = ieee80211_parse_ch_switch_ie(sdata, elems, ifibss->chandef.chan->band, sta_flags, ifibss->bssid, &csa_ie); /* can't switch to destination channel, fail */ diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index c2aaec4dfcf..8c68da30595 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1642,7 +1642,6 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, * ieee80211_parse_ch_switch_ie - parses channel switch IEs * @sdata: the sdata of the interface which has received the frame * @elems: parsed 802.11 elements received with the frame - * @beacon: indicates if the frame was a beacon or probe response * @current_band: indicates the current band * @sta_flags: contains information about own capabilities and restrictions * to decide which channel switch announcements can be accepted. Only the @@ -1656,7 +1655,7 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, * Return: 0 on success, <0 on error and >0 if there is nothing to parse. */ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, - struct ieee802_11_elems *elems, bool beacon, + struct ieee802_11_elems *elems, enum ieee80211_band current_band, u32 sta_flags, u8 *bssid, struct ieee80211_csa_ie *csa_ie); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index af237223a8c..653f5eb07a2 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -766,10 +766,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, int i, flushed; struct ps_data *ps; struct cfg80211_chan_def chandef; + bool cancel_scan; clear_bit(SDATA_STATE_RUNNING, &sdata->state); - if (rcu_access_pointer(local->scan_sdata) == sdata) + cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata; + if (cancel_scan) ieee80211_scan_cancel(local); /* @@ -898,6 +900,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, list_del(&sdata->u.vlan.list); mutex_unlock(&local->mtx); RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL); + /* see comment in the default case below */ + ieee80211_free_keys(sdata, true); /* no need to tell driver */ break; case NL80211_IFTYPE_MONITOR: @@ -923,17 +927,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, /* * When we get here, the interface is marked down. * Free the remaining keys, if there are any - * (shouldn't be, except maybe in WDS mode?) + * (which can happen in AP mode if userspace sets + * keys before the interface is operating, and maybe + * also in WDS mode) * * Force the key freeing to always synchronize_net() * to wait for the RX path in case it is using this - * interface enqueuing frames * at this very time on + * interface enqueuing frames at this very time on * another CPU. */ ieee80211_free_keys(sdata, true); - - /* fall through */ - case NL80211_IFTYPE_AP: skb_queue_purge(&sdata->skb_queue); } @@ -991,6 +994,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, ieee80211_recalc_ps(local, -1); + if (cancel_scan) + flush_delayed_work(&local->scan_work); + if (local->open_count == 0) { ieee80211_stop_device(local); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index e9f99c1e3fa..0c8b2a77d31 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -874,7 +874,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata, memset(¶ms, 0, sizeof(params)); memset(&csa_ie, 0, sizeof(csa_ie)); - err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, band, + err = ieee80211_parse_ch_switch_ie(sdata, elems, band, sta_flags, sdata->vif.addr, &csa_ie); if (err < 0) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 2de88704278..93af0f1c9d9 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1072,7 +1072,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, current_band = cbss->channel->band; memset(&csa_ie, 0, sizeof(csa_ie)); - res = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, current_band, + res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band, ifmgd->flags, ifmgd->associated->bssid, &csa_ie); if (res < 0) @@ -1168,7 +1168,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work); else mod_timer(&ifmgd->chswitch_timer, - TU_TO_EXP_TIME(csa_ie.count * cbss->beacon_interval)); + TU_TO_EXP_TIME((csa_ie.count - 1) * + cbss->beacon_interval)); } static bool diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 8fdadfd94ba..6081329784d 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -448,7 +448,7 @@ static void rate_fixup_ratelist(struct ieee80211_vif *vif, */ if (!(rates[0].flags & IEEE80211_TX_RC_MCS)) { u32 basic_rates = vif->bss_conf.basic_rates; - s8 baserate = basic_rates ? ffs(basic_rates - 1) : 0; + s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0; rate = &sband->bitrates[rates[0].idx]; diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c index edde723f9f0..2acab1bcaa4 100644 --- a/net/mac80211/rc80211_minstrel_debugfs.c +++ b/net/mac80211/rc80211_minstrel_debugfs.c @@ -62,14 +62,14 @@ minstrel_stats_open(struct inode *inode, struct file *file) unsigned int i, tp, prob, eprob; char *p; - ms = kmalloc(sizeof(*ms) + 4096, GFP_KERNEL); + ms = kmalloc(2048, GFP_KERNEL); if (!ms) return -ENOMEM; file->private_data = ms; p = ms->buf; - p += sprintf(p, "rate throughput ewma prob this prob " - "this succ/attempt success attempts\n"); + p += sprintf(p, "rate tpt eprob *prob" + " *ok(*cum) ok( cum)\n"); for (i = 0; i < mi->n_rates; i++) { struct minstrel_rate *mr = &mi->r[i]; struct minstrel_rate_stats *mrs = &mi->r[i].stats; @@ -86,8 +86,8 @@ minstrel_stats_open(struct inode *inode, struct file *file) prob = MINSTREL_TRUNC(mrs->cur_prob * 1000); eprob = MINSTREL_TRUNC(mrs->probability * 1000); - p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u " - " %3u(%3u) %8llu %8llu\n", + p += sprintf(p, " %4u.%1u %3u.%1u %3u.%1u" + " %4u(%4u) %9llu(%9llu)\n", tp / 10, tp % 10, eprob / 10, eprob % 10, prob / 10, prob % 10, @@ -102,6 +102,8 @@ minstrel_stats_open(struct inode *inode, struct file *file) mi->sample_packets); ms->len = p - ms->buf; + WARN_ON(ms->len + sizeof(*ms) > 2048); + return 0; } diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index df90ce2db00..408fd8ab4ee 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -252,19 +252,16 @@ minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index, cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp; cur_prob = mi->groups[cur_group].rates[cur_idx].probability; - tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; - tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; - tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; - tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; - - while (j > 0 && (cur_thr > tmp_thr || - (cur_thr == tmp_thr && cur_prob > tmp_prob))) { - j--; + do { tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; - } + if (cur_thr < tmp_thr || + (cur_thr == tmp_thr && cur_prob <= tmp_prob)) + break; + j--; + } while (j > 0); if (j < MAX_THR_RATES - 1) { memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) * diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c index a72ad46f2a0..d537bec9375 100644 --- a/net/mac80211/rc80211_minstrel_ht_debugfs.c +++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c @@ -63,8 +63,8 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) prob = MINSTREL_TRUNC(mr->cur_prob * 1000); eprob = MINSTREL_TRUNC(mr->probability * 1000); - p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u " - "%3u %3u(%3u) %8llu %8llu\n", + p += sprintf(p, " %4u.%1u %3u.%1u %3u.%1u " + "%3u %4u(%4u) %9llu(%9llu)\n", tp / 10, tp % 10, eprob / 10, eprob % 10, prob / 10, prob % 10, @@ -96,14 +96,15 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file) return ret; } - ms = kmalloc(sizeof(*ms) + 8192, GFP_KERNEL); + ms = kmalloc(8192, GFP_KERNEL); if (!ms) return -ENOMEM; file->private_data = ms; p = ms->buf; - p += sprintf(p, "type rate throughput ewma prob " - "this prob retry this succ/attempt success attempts\n"); + p += sprintf(p, "type rate tpt eprob *prob " + "ret *ok(*cum) ok( cum)\n"); + p = minstrel_ht_stats_dump(mi, max_mcs, p); for (i = 0; i < max_mcs; i++) @@ -118,6 +119,8 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file) MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10); ms->len = p - ms->buf; + WARN_ON(ms->len + sizeof(*ms) > 8192); + return nonseekable_open(inode, file); } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index b04ca4049c9..a37f9af634c 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1678,11 +1678,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) sc = le16_to_cpu(hdr->seq_ctrl); frag = sc & IEEE80211_SCTL_FRAG; - if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || - is_multicast_ether_addr(hdr->addr1))) { - /* not fragmented */ + if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) + goto out; + + if (is_multicast_ether_addr(hdr->addr1)) { + rx->local->dot11MulticastReceivedFrameCount++; goto out; } + I802_DEBUG_INC(rx->local->rx_handlers_fragments); if (skb_linearize(rx->skb)) @@ -1775,10 +1778,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) out: if (rx->sta) rx->sta->rx_packets++; - if (is_multicast_ether_addr(hdr->addr1)) - rx->local->dot11MulticastReceivedFrameCount++; - else - ieee80211_led_rx(rx->local); + ieee80211_led_rx(rx->local); return RX_CONTINUE; } diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c index 6ab00907008..efeba56c913 100644 --- a/net/mac80211/spectmgmt.c +++ b/net/mac80211/spectmgmt.c @@ -22,7 +22,7 @@ #include "wme.h" int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, - struct ieee802_11_elems *elems, bool beacon, + struct ieee802_11_elems *elems, enum ieee80211_band current_band, u32 sta_flags, u8 *bssid, struct ieee80211_csa_ie *csa_ie) @@ -91,19 +91,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, return -EINVAL; } - if (!beacon && sec_chan_offs) { + if (sec_chan_offs) { secondary_channel_offset = sec_chan_offs->sec_chan_offs; - } else if (beacon && ht_oper) { - secondary_channel_offset = - ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET; } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) { - /* If it's not a beacon, HT is enabled and the IE not present, - * it's 20 MHz, 802.11-2012 8.5.2.6: - * This element [the Secondary Channel Offset Element] is - * present when switching to a 40 MHz channel. It may be - * present when switching to a 20 MHz channel (in which - * case the secondary channel offset is set to SCN). - */ + /* If the secondary channel offset IE is not present, + * we can't know what's the post-CSA offset, so the + * best we can do is use 20MHz. + */ secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 42f68cb8957..bcda2ac7d84 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -336,6 +336,7 @@ struct ieee80211_tx_latency_stat { * @known_smps_mode: the smps_mode the client thinks we are in. Relevant for * AP only. * @cipher_scheme: optional cipher scheme for this station + * @last_tdls_pkt_time: holds the time in jiffies of last TDLS pkt ACKed */ struct sta_info { /* General information, mostly static */ diff --git a/net/mpls/Makefile b/net/mpls/Makefile index 0a3c171be53..6dec088c2d0 100644 --- a/net/mpls/Makefile +++ b/net/mpls/Makefile @@ -1,4 +1,4 @@ # # Makefile for MPLS. # -obj-y += mpls_gso.o +obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c index e28ed2ef5b0..e3545f21a09 100644 --- a/net/mpls/mpls_gso.c +++ b/net/mpls/mpls_gso.c @@ -48,7 +48,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, __skb_push(skb, skb->mac_len); /* Segment inner packet. */ - mpls_features = skb->dev->mpls_features & netif_skb_features(skb); + mpls_features = skb->dev->mpls_features & features; segs = skb_mac_gso_segment(skb, mpls_features); @@ -59,8 +59,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, * above pulled. It will be re-pushed after returning * skb_mac_gso_segment(), an indirect caller of this function. */ - __skb_push(skb, skb->data - skb_mac_header(skb)); - + __skb_pull(skb, skb->data - skb_mac_header(skb)); out: return segs; } diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 912e5a05b79..d259da3ce67 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -659,7 +659,7 @@ ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index) struct ip_set *set; struct ip_set_net *inst = ip_set_pernet(net); - if (index > inst->ip_set_max) + if (index >= inst->ip_set_max) return IPSET_INVALID_ID; nfnl_lock(NFNL_SUBSYS_IPSET); @@ -1863,6 +1863,12 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) if (*op < IP_SET_OP_VERSION) { /* Check the version at the beginning of operations */ struct ip_set_req_version *req_version = data; + + if (*len < sizeof(struct ip_set_req_version)) { + ret = -EINVAL; + goto done; + } + if (req_version->version != IPSET_PROTOCOL) { ret = -EPROTO; goto done; diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 91f17c1eb8a..bd90bf8107d 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -316,7 +316,7 @@ __ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, local))) { IP_VS_DBG_RL("We are crossing local and non-local addresses" - " daddr=%pI4\n", &dest->addr.ip); + " daddr=%pI4\n", &daddr); goto err_put; } @@ -458,7 +458,7 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, local))) { IP_VS_DBG_RL("We are crossing local and non-local addresses" - " daddr=%pI6\n", &dest->addr.in6); + " daddr=%pI6\n", daddr); goto err_put; } @@ -846,6 +846,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af, new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) goto error; + if (skb->sk) + skb_set_owner_w(new_skb, skb->sk); consume_skb(skb); skb = new_skb; } diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 44d1ea32570..d87b6423ffb 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -213,7 +213,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { { /* REPLY */ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ -/*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sS2 }, +/*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 }, /* * sNO -> sIV Never reached. * sSS -> sS2 Simultaneous open @@ -223,7 +223,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { * sFW -> sIV * sCW -> sIV * sLA -> sIV - * sTW -> sIV Reopened connection, but server may not do it. + * sTW -> sSS Reopened connection, but server may have switched role * sCL -> sIV */ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 556a0dfa4ab..66e8425dbfe 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1328,10 +1328,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, basechain->stats = stats; } else { stats = netdev_alloc_pcpu_stats(struct nft_stats); - if (IS_ERR(stats)) { + if (stats == NULL) { module_put(type->owner); kfree(basechain); - return PTR_ERR(stats); + return -ENOMEM; } rcu_assign_pointer(basechain->stats, stats); } @@ -3484,13 +3484,8 @@ static void nft_chain_commit_update(struct nft_trans *trans) } } -/* Schedule objects for release via rcu to make sure no packets are accesing - * removed rules. - */ -static void nf_tables_commit_release_rcu(struct rcu_head *rt) +static void nf_tables_commit_release(struct nft_trans *trans) { - struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head); - switch (trans->msg_type) { case NFT_MSG_DELTABLE: nf_tables_table_destroy(&trans->ctx); @@ -3612,10 +3607,11 @@ static int nf_tables_commit(struct sk_buff *skb) } } + synchronize_rcu(); + list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { list_del(&trans->list); - trans->ctx.nla = NULL; - call_rcu(&trans->rcu_head, nf_tables_commit_release_rcu); + nf_tables_commit_release(trans); } nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); @@ -3623,13 +3619,8 @@ static int nf_tables_commit(struct sk_buff *skb) return 0; } -/* Schedule objects for release via rcu to make sure no packets are accesing - * aborted rules. - */ -static void nf_tables_abort_release_rcu(struct rcu_head *rt) +static void nf_tables_abort_release(struct nft_trans *trans) { - struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head); - switch (trans->msg_type) { case NFT_MSG_NEWTABLE: nf_tables_table_destroy(&trans->ctx); @@ -3725,11 +3716,12 @@ static int nf_tables_abort(struct sk_buff *skb) } } + synchronize_rcu(); + list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list, list) { list_del(&trans->list); - trans->ctx.nla = NULL; - call_rcu(&trans->rcu_head, nf_tables_abort_release_rcu); + nf_tables_abort_release(trans); } return 0; @@ -3744,6 +3736,20 @@ static const struct nfnetlink_subsystem nf_tables_subsys = { .abort = nf_tables_abort, }; +int nft_chain_validate_dependency(const struct nft_chain *chain, + enum nft_chain_type type) +{ + const struct nft_base_chain *basechain; + + if (chain->flags & NFT_BASE_CHAIN) { + basechain = nft_base_chain(chain); + if (basechain->type->type != type) + return -EOPNOTSUPP; + } + return 0; +} +EXPORT_SYMBOL_GPL(nft_chain_validate_dependency); + /* * Loop detection - walk through the ruleset beginning at the destination chain * of a new jump until either the source chain is reached (loop) or all diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 6c5a915cfa7..13c2e17bbe2 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -47,6 +47,8 @@ static const int nfnl_group2type[NFNLGRP_MAX+1] = { [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP, [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP, [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP, + [NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES, + [NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT, }; void nfnl_lock(__u8 subsys_id) @@ -464,7 +466,12 @@ static void nfnetlink_rcv(struct sk_buff *skb) static int nfnetlink_bind(int group) { const struct nfnetlink_subsystem *ss; - int type = nfnl_group2type[group]; + int type; + + if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) + return -EINVAL; + + type = nfnl_group2type[group]; rcu_read_lock(); ss = nfnetlink_get_subsys(type); @@ -514,6 +521,9 @@ static int __init nfnetlink_init(void) { int i; + for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++) + BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE); + for (i=0; i<NFNL_SUBSYS_COUNT; i++) mutex_init(&table[i].mutex); diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index b1e3a057941..5f1be5ba355 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -43,7 +43,8 @@ #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ -#define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ +/* max packet size is limited by 16-bit struct nfattr nfa_len field */ +#define NFULNL_COPY_RANGE_MAX (0xFFFF - NLA_HDRLEN) #define PRINTR(x, args...) do { if (net_ratelimit()) \ printk(x, ## args); } while (0); @@ -252,6 +253,8 @@ nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode, case NFULNL_COPY_PACKET: inst->copy_mode = mode; + if (range == 0) + range = NFULNL_COPY_RANGE_MAX; inst->copy_range = min_t(unsigned int, range, NFULNL_COPY_RANGE_MAX); break; @@ -343,26 +346,25 @@ nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size, return skb; } -static int +static void __nfulnl_send(struct nfulnl_instance *inst) { - int status = -1; - if (inst->qlen > 1) { struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0, NLMSG_DONE, sizeof(struct nfgenmsg), 0); - if (!nlh) + if (WARN_ONCE(!nlh, "bad nlskb size: %u, tailroom %d\n", + inst->skb->len, skb_tailroom(inst->skb))) { + kfree_skb(inst->skb); goto out; + } } - status = nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, - MSG_DONTWAIT); - + nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, + MSG_DONTWAIT); +out: inst->qlen = 0; inst->skb = NULL; -out: - return status; } static void @@ -649,7 +651,8 @@ nfulnl_log_packet(struct net *net, + nla_total_size(sizeof(u_int32_t)) /* gid */ + nla_total_size(plen) /* prefix */ + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) - + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); + + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)) + + nla_total_size(sizeof(struct nfgenmsg)); /* NLMSG_DONE */ if (in && skb_mac_header_was_set(skb)) { size += nla_total_size(skb->dev->hard_header_len) @@ -678,8 +681,7 @@ nfulnl_log_packet(struct net *net, break; case NFULNL_COPY_PACKET: - if (inst->copy_range == 0 - || inst->copy_range > skb->len) + if (inst->copy_range > skb->len) data_len = skb->len; else data_len = inst->copy_range; @@ -692,8 +694,7 @@ nfulnl_log_packet(struct net *net, goto unlock_and_release; } - if (inst->skb && - size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) { + if (inst->skb && size > skb_tailroom(inst->skb)) { /* either the queue len is too high or we don't have * enough room in the skb left. flush to userspace. */ __nfulnl_flush(inst); diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index a82077d9f59..7c60ccd61a3 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -665,7 +665,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) * returned by nf_queue. For instance, callers rely on -ECANCELED to * mean 'ignore this hook'. */ - if (IS_ERR(segs)) + if (IS_ERR_OR_NULL(segs)) goto out_err; queued = 0; err = 0; diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 7e2683c8a44..265e190f221 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -19,9 +19,24 @@ #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv6/ip6_tables.h> -#include <asm/uaccess.h> /* for set_fs */ #include <net/netfilter/nf_tables.h> +static int nft_compat_chain_validate_dependency(const char *tablename, + const struct nft_chain *chain) +{ + const struct nft_base_chain *basechain; + + if (!tablename || !(chain->flags & NFT_BASE_CHAIN)) + return 0; + + basechain = nft_base_chain(chain); + if (strcmp(tablename, "nat") == 0 && + basechain->type->type != NFT_CHAIN_T_NAT) + return -EINVAL; + + return 0; +} + union nft_entry { struct ipt_entry e4; struct ip6t_entry e6; @@ -74,7 +89,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, struct xt_target *target, void *info, union nft_entry *entry, u8 proto, bool inv) { - par->net = &init_net; + par->net = ctx->net; par->table = ctx->table->name; switch (ctx->afi->family) { case AF_INET: @@ -95,6 +110,8 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, const struct nf_hook_ops *ops = &basechain->ops[0]; par->hook_mask = 1 << ops->hooknum; + } else { + par->hook_mask = 0; } par->family = ctx->afi->family; } @@ -151,6 +168,10 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, union nft_entry e = {}; int ret; + ret = nft_compat_chain_validate_dependency(target->table, ctx->chain); + if (ret < 0) + goto err; + target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info); if (ctx->nla[NFTA_RULE_COMPAT]) { @@ -216,6 +237,7 @@ static int nft_target_validate(const struct nft_ctx *ctx, { struct xt_target *target = expr->ops->data; unsigned int hook_mask = 0; + int ret; if (ctx->chain->flags & NFT_BASE_CHAIN) { const struct nft_base_chain *basechain = @@ -223,11 +245,13 @@ static int nft_target_validate(const struct nft_ctx *ctx, const struct nf_hook_ops *ops = &basechain->ops[0]; hook_mask = 1 << ops->hooknum; - if (hook_mask & target->hooks) - return 0; + if (!(hook_mask & target->hooks)) + return -EINVAL; - /* This target is being called from an invalid chain */ - return -EINVAL; + ret = nft_compat_chain_validate_dependency(target->table, + ctx->chain); + if (ret < 0) + return ret; } return 0; } @@ -272,7 +296,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, struct xt_match *match, void *info, union nft_entry *entry, u8 proto, bool inv) { - par->net = &init_net; + par->net = ctx->net; par->table = ctx->table->name; switch (ctx->afi->family) { case AF_INET: @@ -293,6 +317,8 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, const struct nf_hook_ops *ops = &basechain->ops[0]; par->hook_mask = 1 << ops->hooknum; + } else { + par->hook_mask = 0; } par->family = ctx->afi->family; } @@ -320,6 +346,10 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, union nft_entry e = {}; int ret; + ret = nft_compat_chain_validate_dependency(match->table, ctx->chain); + if (ret < 0) + goto err; + match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info); if (ctx->nla[NFTA_RULE_COMPAT]) { @@ -379,6 +409,7 @@ static int nft_match_validate(const struct nft_ctx *ctx, { struct xt_match *match = expr->ops->data; unsigned int hook_mask = 0; + int ret; if (ctx->chain->flags & NFT_BASE_CHAIN) { const struct nft_base_chain *basechain = @@ -386,11 +417,13 @@ static int nft_match_validate(const struct nft_ctx *ctx, const struct nf_hook_ops *ops = &basechain->ops[0]; hook_mask = 1 << ops->hooknum; - if (hook_mask & match->hooks) - return 0; + if (!(hook_mask & match->hooks)) + return -EINVAL; - /* This match is being called from an invalid chain */ - return -EINVAL; + ret = nft_compat_chain_validate_dependency(match->table, + ctx->chain); + if (ret < 0) + return ret; } return 0; } @@ -611,7 +644,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, family = ctx->afi->family; /* Re-use the existing target if it's already loaded. */ - list_for_each_entry(nft_target, &nft_match_list, head) { + list_for_each_entry(nft_target, &nft_target_list, head) { struct xt_target *target = nft_target->ops.data; if (strcmp(target->name, tg_name) == 0 && diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c index 6637bab0056..d1ffd5eb3a9 100644 --- a/net/netfilter/nft_masq.c +++ b/net/netfilter/nft_masq.c @@ -26,6 +26,11 @@ int nft_masq_init(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { struct nft_masq *priv = nft_expr_priv(expr); + int err; + + err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); + if (err < 0) + return err; if (tb[NFTA_MASQ_FLAGS] == NULL) return 0; @@ -55,5 +60,12 @@ nla_put_failure: } EXPORT_SYMBOL_GPL(nft_masq_dump); +int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, + const struct nft_data **data) +{ + return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); +} +EXPORT_SYMBOL_GPL(nft_masq_validate); + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index 799550b476f..afe2b0b45ec 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -95,7 +95,13 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, u32 family; int err; - if (tb[NFTA_NAT_TYPE] == NULL) + err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); + if (err < 0) + return err; + + if (tb[NFTA_NAT_TYPE] == NULL || + (tb[NFTA_NAT_REG_ADDR_MIN] == NULL && + tb[NFTA_NAT_REG_PROTO_MIN] == NULL)) return -EINVAL; switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) { @@ -120,38 +126,44 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, priv->family = family; if (tb[NFTA_NAT_REG_ADDR_MIN]) { - priv->sreg_addr_min = ntohl(nla_get_be32( - tb[NFTA_NAT_REG_ADDR_MIN])); + priv->sreg_addr_min = + ntohl(nla_get_be32(tb[NFTA_NAT_REG_ADDR_MIN])); + err = nft_validate_input_register(priv->sreg_addr_min); if (err < 0) return err; - } - if (tb[NFTA_NAT_REG_ADDR_MAX]) { - priv->sreg_addr_max = ntohl(nla_get_be32( - tb[NFTA_NAT_REG_ADDR_MAX])); - err = nft_validate_input_register(priv->sreg_addr_max); - if (err < 0) - return err; - } else - priv->sreg_addr_max = priv->sreg_addr_min; + if (tb[NFTA_NAT_REG_ADDR_MAX]) { + priv->sreg_addr_max = + ntohl(nla_get_be32(tb[NFTA_NAT_REG_ADDR_MAX])); + + err = nft_validate_input_register(priv->sreg_addr_max); + if (err < 0) + return err; + } else { + priv->sreg_addr_max = priv->sreg_addr_min; + } + } if (tb[NFTA_NAT_REG_PROTO_MIN]) { - priv->sreg_proto_min = ntohl(nla_get_be32( - tb[NFTA_NAT_REG_PROTO_MIN])); + priv->sreg_proto_min = + ntohl(nla_get_be32(tb[NFTA_NAT_REG_PROTO_MIN])); + err = nft_validate_input_register(priv->sreg_proto_min); if (err < 0) return err; - } - if (tb[NFTA_NAT_REG_PROTO_MAX]) { - priv->sreg_proto_max = ntohl(nla_get_be32( - tb[NFTA_NAT_REG_PROTO_MAX])); - err = nft_validate_input_register(priv->sreg_proto_max); - if (err < 0) - return err; - } else - priv->sreg_proto_max = priv->sreg_proto_min; + if (tb[NFTA_NAT_REG_PROTO_MAX]) { + priv->sreg_proto_max = + ntohl(nla_get_be32(tb[NFTA_NAT_REG_PROTO_MAX])); + + err = nft_validate_input_register(priv->sreg_proto_max); + if (err < 0) + return err; + } else { + priv->sreg_proto_max = priv->sreg_proto_min; + } + } if (tb[NFTA_NAT_FLAGS]) { priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); @@ -179,17 +191,19 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr) if (nla_put_be32(skb, NFTA_NAT_FAMILY, htonl(priv->family))) goto nla_put_failure; - if (nla_put_be32(skb, - NFTA_NAT_REG_ADDR_MIN, htonl(priv->sreg_addr_min))) - goto nla_put_failure; - if (nla_put_be32(skb, - NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max))) - goto nla_put_failure; + + if (priv->sreg_addr_min) { + if (nla_put_be32(skb, NFTA_NAT_REG_ADDR_MIN, + htonl(priv->sreg_addr_min)) || + nla_put_be32(skb, NFTA_NAT_REG_ADDR_MAX, + htonl(priv->sreg_addr_max))) + goto nla_put_failure; + } + if (priv->sreg_proto_min) { if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN, - htonl(priv->sreg_proto_min))) - goto nla_put_failure; - if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX, + htonl(priv->sreg_proto_min)) || + nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX, htonl(priv->sreg_proto_max))) goto nla_put_failure; } @@ -205,6 +219,13 @@ nla_put_failure: return -1; } +static int nft_nat_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ + return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); +} + static struct nft_expr_type nft_nat_type; static const struct nft_expr_ops nft_nat_ops = { .type = &nft_nat_type, @@ -212,6 +233,7 @@ static const struct nft_expr_ops nft_nat_ops = { .eval = nft_nat_eval, .init = nft_nat_init, .dump = nft_nat_dump, + .validate = nft_nat_validate, }; static struct nft_expr_type nft_nat_type __read_mostly = { diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 7a186e74b1b..0007b818039 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -96,6 +96,14 @@ static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); static int netlink_dump(struct sock *sk); static void netlink_skb_destructor(struct sk_buff *skb); +/* nl_table locking explained: + * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock + * combined with an RCU read-side lock. Insertion and removal are protected + * with nl_sk_hash_lock while using RCU list modification primitives and may + * run in parallel to nl_table_lock protected lookups. Destruction of the + * Netlink socket may only occur *after* nl_table_lock has been acquired + * either during or after the socket has been removed from the list. + */ DEFINE_RWLOCK(nl_table_lock); EXPORT_SYMBOL_GPL(nl_table_lock); static atomic_t nl_table_users = ATOMIC_INIT(0); @@ -109,10 +117,10 @@ EXPORT_SYMBOL_GPL(nl_sk_hash_lock); static int lockdep_nl_sk_hash_is_held(void) { #ifdef CONFIG_LOCKDEP - return (debug_locks) ? lockdep_is_held(&nl_sk_hash_lock) : 1; -#else - return 1; + if (debug_locks) + return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock); #endif + return 1; } static ATOMIC_NOTIFIER_HEAD(netlink_chain); @@ -1028,11 +1036,13 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid) struct netlink_table *table = &nl_table[protocol]; struct sock *sk; + read_lock(&nl_table_lock); rcu_read_lock(); sk = __netlink_lookup(table, portid, net); if (sk) sock_hold(sk); rcu_read_unlock(); + read_unlock(&nl_table_lock); return sk; } @@ -1257,9 +1267,6 @@ static int netlink_release(struct socket *sock) } netlink_table_ungrab(); - /* Wait for readers to complete */ - synchronize_net(); - kfree(nlk->groups); nlk->groups = NULL; @@ -1281,6 +1288,7 @@ static int netlink_autobind(struct socket *sock) retry: cond_resched(); + netlink_table_grab(); rcu_read_lock(); if (__netlink_lookup(table, portid, net)) { /* Bind collision, search negative portid values. */ @@ -1288,9 +1296,11 @@ retry: if (rover > -4097) rover = -4097; rcu_read_unlock(); + netlink_table_ungrab(); goto retry; } rcu_read_unlock(); + netlink_table_ungrab(); err = netlink_insert(sk, net, portid); if (err == -EADDRINUSE) @@ -1430,7 +1440,7 @@ static void netlink_unbind(int group, long unsigned int groups, return; for (undo = 0; undo < group; undo++) - if (test_bit(group, &groups)) + if (test_bit(undo, &groups)) nlk->netlink_unbind(undo); } @@ -1482,7 +1492,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, netlink_insert(sk, net, nladdr->nl_pid) : netlink_autobind(sock); if (err) { - netlink_unbind(nlk->ngroups - 1, groups, nlk); + netlink_unbind(nlk->ngroups, groups, nlk); return err; } } @@ -2499,6 +2509,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module, nl_table[unit].module = module; if (cfg) { nl_table[unit].bind = cfg->bind; + nl_table[unit].unbind = cfg->unbind; nl_table[unit].flags = cfg->flags; if (cfg->compare) nl_table[unit].compare = cfg->compare; @@ -2921,14 +2932,16 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos) } static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) - __acquires(RCU) + __acquires(nl_table_lock) __acquires(RCU) { + read_lock(&nl_table_lock); rcu_read_lock(); return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + struct rhashtable *ht; struct netlink_sock *nlk; struct nl_seq_iter *iter; struct net *net; @@ -2943,19 +2956,19 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) iter = seq->private; nlk = v; - rht_for_each_entry_rcu(nlk, nlk->node.next, node) + i = iter->link; + ht = &nl_table[i].hash; + rht_for_each_entry(nlk, nlk->node.next, ht, node) if (net_eq(sock_net((struct sock *)nlk), net)) return nlk; - i = iter->link; j = iter->hash_idx + 1; do { - struct rhashtable *ht = &nl_table[i].hash; const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); for (; j < tbl->size; j++) { - rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) { + rht_for_each_entry(nlk, tbl->buckets[j], ht, node) { if (net_eq(sock_net((struct sock *)nlk), net)) { iter->link = i; iter->hash_idx = j; @@ -2971,9 +2984,10 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) } static void netlink_seq_stop(struct seq_file *seq, void *v) - __releases(RCU) + __releases(RCU) __releases(nl_table_lock) { rcu_read_unlock(); + read_unlock(&nl_table_lock); } diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 006886dbee3..8c4229b11c3 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -246,11 +246,11 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, { int transport_len = skb->len - skb_transport_offset(skb); - if (l4_proto == IPPROTO_TCP) { + if (l4_proto == NEXTHDR_TCP) { if (likely(transport_len >= sizeof(struct tcphdr))) inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, addr, new_addr, 1); - } else if (l4_proto == IPPROTO_UDP) { + } else if (l4_proto == NEXTHDR_UDP) { if (likely(transport_len >= sizeof(struct udphdr))) { struct udphdr *uh = udp_hdr(skb); @@ -261,6 +261,10 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, uh->check = CSUM_MANGLED_0; } } + } else if (l4_proto == NEXTHDR_ICMP) { + if (likely(transport_len >= sizeof(struct icmp6hdr))) + inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum, + skb, addr, new_addr, 1); } } @@ -722,8 +726,6 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, case OVS_ACTION_ATTR_SAMPLE: err = sample(dp, skb, key, a); - if (unlikely(err)) /* skb already freed. */ - return err; break; } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 2e31d9e7f4d..f9e556b5608 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -324,6 +324,8 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, segs = __skb_gso_segment(skb, NETIF_F_SG, false); if (IS_ERR(segs)) return PTR_ERR(segs); + if (segs == NULL) + return -EINVAL; /* Queue all of the segments. */ skb = segs; @@ -1263,7 +1265,7 @@ static size_t ovs_dp_cmd_msg_size(void) return msgsize; } -/* Called with ovs_mutex or RCU read lock. */ +/* Called with ovs_mutex. */ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, u32 portid, u32 seq, u32 flags, u8 cmd) { @@ -1553,7 +1555,7 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) if (!reply) return -ENOMEM; - rcu_read_lock(); + ovs_lock(); dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); if (IS_ERR(dp)) { err = PTR_ERR(dp); @@ -1562,12 +1564,12 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, info->snd_seq, 0, OVS_DP_CMD_NEW); BUG_ON(err < 0); - rcu_read_unlock(); + ovs_unlock(); return genlmsg_reply(reply, info); err_unlock_free: - rcu_read_unlock(); + ovs_unlock(); kfree_skb(reply); return err; } @@ -1579,8 +1581,8 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) int skip = cb->args[0]; int i = 0; - rcu_read_lock(); - list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) { + ovs_lock(); + list_for_each_entry(dp, &ovs_net->dps, list_node) { if (i >= skip && ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, @@ -1588,7 +1590,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) break; i++; } - rcu_read_unlock(); + ovs_unlock(); cb->args[0] = i; diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 939bcb32100..089b195c064 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -145,7 +145,7 @@ static bool match_validate(const struct sw_flow_match *match, if (match->key->eth.type == htons(ETH_P_ARP) || match->key->eth.type == htons(ETH_P_RARP)) { key_expected |= 1 << OVS_KEY_ATTR_ARP; - if (match->mask && (match->mask->key.eth.type == htons(0xffff))) + if (match->mask && (match->mask->key.tp.src == htons(0xff))) mask_allowed |= 1 << OVS_KEY_ATTR_ARP; } @@ -689,6 +689,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); return -EINVAL; } + + if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) { + OVS_NLERR("IPv6 flow label %x is out of range (max=%x).\n", + ntohl(ipv6_key->ipv6_label), (1 << 20) - 1); + return -EINVAL; + } + SW_FLOW_KEY_PUT(match, ipv6.label, ipv6_key->ipv6_label, is_mask); SW_FLOW_KEY_PUT(match, ip.proto, diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 87d20f48ff0..07c04a841ba 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -378,7 +378,7 @@ static void unregister_prot_hook(struct sock *sk, bool sync) __unregister_prot_hook(sk, sync); } -static inline __pure struct page *pgv_to_page(void *addr) +static inline struct page * __pure pgv_to_page(void *addr) { if (is_vmalloc_addr(addr)) return vmalloc_to_page(addr); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 2cf61b3e633..76f402e05bd 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -947,7 +947,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { if (qdisc_is_percpu_stats(sch)) { sch->cpu_bstats = - alloc_percpu(struct gnet_stats_basic_cpu); + netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); if (!sch->cpu_bstats) goto err_out4; diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index 33d7a98a7a9..b783a446d88 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -445,7 +445,6 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt) sch->limit = q->params.limit; setup_timer(&q->adapt_timer, pie_timer, (unsigned long)sch); - mod_timer(&q->adapt_timer, jiffies + HZ / 2); if (opt) { int err = pie_change(sch, opt); @@ -454,6 +453,7 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt) return err; } + mod_timer(&q->adapt_timer, jiffies + HZ / 2); return 0; } diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 0e8529113dc..fb7976aee61 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -862,8 +862,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep, list_add(&cur_key->key_list, sh_keys); cur_key->key = key; - sctp_auth_key_hold(key); - return 0; nomem: if (!replace) diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index ab734be8cb2..9f32741abb1 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2609,6 +2609,9 @@ do_addr_param: addr_param = param.v + sizeof(sctp_addip_param_t); af = sctp_get_af_specific(param_type2af(param.p->type)); + if (af == NULL) + break; + af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index afb292cd797..53ed8d3f889 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -1353,6 +1353,7 @@ gss_stringify_acceptor(struct rpc_cred *cred) char *string = NULL; struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); struct gss_cl_ctx *ctx; + unsigned int len; struct xdr_netobj *acceptor; rcu_read_lock(); @@ -1360,15 +1361,39 @@ gss_stringify_acceptor(struct rpc_cred *cred) if (!ctx) goto out; - acceptor = &ctx->gc_acceptor; + len = ctx->gc_acceptor.len; + rcu_read_unlock(); /* no point if there's no string */ - if (!acceptor->len) - goto out; - - string = kmalloc(acceptor->len + 1, GFP_KERNEL); + if (!len) + return NULL; +realloc: + string = kmalloc(len + 1, GFP_KERNEL); if (!string) + return NULL; + + rcu_read_lock(); + ctx = rcu_dereference(gss_cred->gc_ctx); + + /* did the ctx disappear or was it replaced by one with no acceptor? */ + if (!ctx || !ctx->gc_acceptor.len) { + kfree(string); + string = NULL; goto out; + } + + acceptor = &ctx->gc_acceptor; + + /* + * Did we find a new acceptor that's longer than the original? Allocate + * a longer buffer and try again. + */ + if (len < acceptor->len) { + len = acceptor->len; + rcu_read_unlock(); + kfree(string); + goto realloc; + } memcpy(string, acceptor->data, acceptor->len); string[acceptor->len] = '\0'; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 3f959c68188..f9c052d508f 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1019,17 +1019,12 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) xid = *p++; calldir = *p; - if (bc_xprt) - req = xprt_lookup_rqst(bc_xprt, xid); - - if (!req) { - printk(KERN_NOTICE - "%s: Got unrecognized reply: " - "calldir 0x%x xpt_bc_xprt %p xid %08x\n", - __func__, ntohl(calldir), - bc_xprt, ntohl(xid)); + if (!bc_xprt) return -EAGAIN; - } + spin_lock_bh(&bc_xprt->transport_lock); + req = xprt_lookup_rqst(bc_xprt, xid); + if (!req) + goto unlock_notfound; memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); /* @@ -1040,11 +1035,21 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) dst = &req->rq_private_buf.head[0]; src = &rqstp->rq_arg.head[0]; if (dst->iov_len < src->iov_len) - return -EAGAIN; /* whatever; just giving up. */ + goto unlock_eagain; /* whatever; just giving up. */ memcpy(dst->iov_base, src->iov_base, src->iov_len); xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len); rqstp->rq_arg.len = 0; + spin_unlock_bh(&bc_xprt->transport_lock); return 0; +unlock_notfound: + printk(KERN_NOTICE + "%s: Got unrecognized reply: " + "calldir 0x%x xpt_bc_xprt %p xid %08x\n", + __func__, ntohl(calldir), + bc_xprt, ntohl(xid)); +unlock_eagain: + spin_unlock_bh(&bc_xprt->transport_lock); + return -EAGAIN; } static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len) diff --git a/net/tipc/node.c b/net/tipc/node.c index 90cee4a6fce..5781634e957 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -219,11 +219,11 @@ void tipc_node_abort_sock_conns(struct list_head *conns) void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) { struct tipc_link **active = &n_ptr->active_links[0]; - u32 addr = n_ptr->addr; n_ptr->working_links++; - tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE, - l_ptr->bearer_id, addr); + n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP; + n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id; + pr_info("Established link <%s> on network plane %c\n", l_ptr->name, l_ptr->net_plane); @@ -284,10 +284,10 @@ static void node_select_active_links(struct tipc_node *n_ptr) void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) { struct tipc_link **active; - u32 addr = n_ptr->addr; n_ptr->working_links--; - tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, l_ptr->bearer_id, addr); + n_ptr->action_flags |= TIPC_NOTIFY_LINK_DOWN; + n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id; if (!tipc_link_is_active(l_ptr)) { pr_info("Lost standby link <%s> on network plane %c\n", @@ -552,28 +552,30 @@ void tipc_node_unlock(struct tipc_node *node) LIST_HEAD(conn_sks); struct sk_buff_head waiting_sks; u32 addr = 0; - unsigned int flags = node->action_flags; + int flags = node->action_flags; + u32 link_id = 0; - if (likely(!node->action_flags)) { + if (likely(!flags)) { spin_unlock_bh(&node->lock); return; } + addr = node->addr; + link_id = node->link_id; __skb_queue_head_init(&waiting_sks); - if (node->action_flags & TIPC_WAKEUP_USERS) { + + if (flags & TIPC_WAKEUP_USERS) skb_queue_splice_init(&node->waiting_sks, &waiting_sks); - node->action_flags &= ~TIPC_WAKEUP_USERS; - } - if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) { + + if (flags & TIPC_NOTIFY_NODE_DOWN) { list_replace_init(&node->nsub, &nsub_list); list_replace_init(&node->conn_sks, &conn_sks); - node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN; } - if (node->action_flags & TIPC_NOTIFY_NODE_UP) { - node->action_flags &= ~TIPC_NOTIFY_NODE_UP; - addr = node->addr; - } - node->action_flags &= ~TIPC_WAKEUP_BCAST_USERS; + node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN | + TIPC_NOTIFY_NODE_UP | TIPC_NOTIFY_LINK_UP | + TIPC_NOTIFY_LINK_DOWN | + TIPC_WAKEUP_BCAST_USERS); + spin_unlock_bh(&node->lock); while (!skb_queue_empty(&waiting_sks)) @@ -588,6 +590,14 @@ void tipc_node_unlock(struct tipc_node *node) if (flags & TIPC_WAKEUP_BCAST_USERS) tipc_bclink_wakeup_users(); - if (addr) + if (flags & TIPC_NOTIFY_NODE_UP) tipc_named_node_up(addr); + + if (flags & TIPC_NOTIFY_LINK_UP) + tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, + TIPC_NODE_SCOPE, link_id, addr); + + if (flags & TIPC_NOTIFY_LINK_DOWN) + tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, + link_id, addr); } diff --git a/net/tipc/node.h b/net/tipc/node.h index 67513c3c852..04e91458bb2 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -53,6 +53,7 @@ * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down * TIPC_NOTIFY_NODE_DOWN: notify node is down * TIPC_NOTIFY_NODE_UP: notify node is up + * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type */ enum { TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1), @@ -60,7 +61,9 @@ enum { TIPC_NOTIFY_NODE_DOWN = (1 << 3), TIPC_NOTIFY_NODE_UP = (1 << 4), TIPC_WAKEUP_USERS = (1 << 5), - TIPC_WAKEUP_BCAST_USERS = (1 << 6) + TIPC_WAKEUP_BCAST_USERS = (1 << 6), + TIPC_NOTIFY_LINK_UP = (1 << 7), + TIPC_NOTIFY_LINK_DOWN = (1 << 8) }; /** @@ -100,6 +103,7 @@ struct tipc_node_bclink { * @working_links: number of working links to node (both active and standby) * @link_cnt: number of links to node * @signature: node instance identifier + * @link_id: local and remote bearer ids of changing link, if any * @nsub: list of "node down" subscriptions monitoring node * @rcu: rcu struct for tipc_node */ @@ -116,6 +120,7 @@ struct tipc_node { int link_cnt; int working_links; u32 signature; + u32 link_id; struct list_head nsub; struct sk_buff_head waiting_sks; struct list_head conn_sks; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 75275c5cf92..51bddc236a1 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1776,7 +1776,7 @@ int tipc_sk_rcv(struct sk_buff *buf) sk = &tsk->sk; /* Queue message */ - bh_lock_sock(sk); + spin_lock_bh(&sk->sk_lock.slock); if (!sock_owned_by_user(sk)) { rc = filter_rcv(sk, buf); @@ -1787,7 +1787,7 @@ int tipc_sk_rcv(struct sk_buff *buf) if (sk_add_backlog(sk, buf, limit)) rc = -TIPC_ERR_OVERLOAD; } - bh_unlock_sock(sk); + spin_unlock_bh(&sk->sk_lock.slock); tipc_sk_put(tsk); if (likely(!rc)) return 0; @@ -2673,7 +2673,7 @@ static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg) case SIOCGETLINKNAME: if (copy_from_user(&lnr, argp, sizeof(lnr))) return -EFAULT; - if (!tipc_node_get_linkname(lnr.bearer_id, lnr.peer, + if (!tipc_node_get_linkname(lnr.bearer_id & 0xffff, lnr.peer, lnr.linkname, TIPC_MAX_LINK_NAME)) { if (copy_to_user(argp, &lnr, sizeof(lnr))) return -EFAULT; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index cb9f5a44ffa..5839c85075f 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5927,6 +5927,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) int err; bool need_new_beacon = false; int len, i; + u32 cs_count; if (!rdev->ops->channel_switch || !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)) @@ -5963,7 +5964,14 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) if (need_new_beacon && !info->attrs[NL80211_ATTR_CSA_IES]) return -EINVAL; - params.count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]); + /* Even though the attribute is u32, the specification says + * u8, so let's make sure we don't overflow. + */ + cs_count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]); + if (cs_count > 255) + return -EINVAL; + + params.count = cs_count; if (!need_new_beacon) goto skip_beacons; diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 499d6c18a8c..7c532856b39 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -157,6 +157,8 @@ static int xfrm_output_gso(struct sk_buff *skb) kfree_skb(skb); if (IS_ERR(segs)) return PTR_ERR(segs); + if (segs == NULL) + return -EINVAL; do { struct sk_buff *nskb = segs->next; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 4c4e457e788..88bf289abdc 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1962,7 +1962,7 @@ static int xdst_queue_output(struct sock *sk, struct sk_buff *skb) struct xfrm_policy *pol = xdst->pols[0]; struct xfrm_policy_queue *pq = &pol->polq; - if (unlikely(skb_fclone_busy(skb))) { + if (unlikely(skb_fclone_busy(sk, skb))) { kfree_skb(skb); return 0; } diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c index f44ef11f65a..eb4bec0ad8a 100644 --- a/samples/bpf/test_verifier.c +++ b/samples/bpf/test_verifier.c @@ -209,6 +209,17 @@ static struct bpf_test tests[] = { .result = REJECT, }, { + "program doesn't init R0 before exit in all branches", + .insns = { + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .errstr = "R0 !read_ok", + .result = REJECT, + }, + { "stack out of bounds", .insns = { BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0), diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index 9685af330de..c5ee1a7c5e8 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -319,9 +319,12 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name, { const struct evm_ima_xattr_data *xattr_data = xattr_value; - if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0) - && (xattr_data->type == EVM_XATTR_HMAC)) - return -EPERM; + if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) { + if (!xattr_value_len) + return -EINVAL; + if (xattr_data->type != EVM_IMA_XATTR_DIGSIG) + return -EPERM; + } return evm_protect_xattr(dentry, xattr_name, xattr_value, xattr_value_len); } diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 922685483bd..7c8f41e618b 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -378,6 +378,8 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name, result = ima_protect_xattr(dentry, xattr_name, xattr_value, xattr_value_len); if (result == 1) { + if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST)) + return -EINVAL; ima_reset_appraise_flags(dentry->d_inode, (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0); result = 0; diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h index c0379d13dbe..9d1c2ebfe12 100644 --- a/security/integrity/integrity.h +++ b/security/integrity/integrity.h @@ -61,6 +61,7 @@ enum evm_ima_xattr_type { EVM_XATTR_HMAC, EVM_IMA_XATTR_DIGSIG, IMA_XATTR_DIGEST_NG, + IMA_XATTR_LAST }; struct evm_ima_xattr_data { diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index e66314138b3..c603b20356a 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -4725,9 +4725,10 @@ static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb) err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm); if (err) { if (err == -EINVAL) { - WARN_ONCE(1, "selinux_nlmsg_perm: unrecognized netlink message:" - " protocol=%hu nlmsg_type=%hu sclass=%hu\n", - sk->sk_protocol, nlh->nlmsg_type, sksec->sclass); + printk(KERN_WARNING + "SELinux: unrecognized netlink message:" + " protocol=%hu nlmsg_type=%hu sclass=%hu\n", + sk->sk_protocol, nlh->nlmsg_type, sksec->sclass); if (!selinux_enforcing || security_get_allow_unknown()) err = 0; } diff --git a/sound/core/pcm.c b/sound/core/pcm.c index 42ded997b22..c6ff94ab1ad 100644 --- a/sound/core/pcm.c +++ b/sound/core/pcm.c @@ -216,6 +216,8 @@ static char *snd_pcm_format_names[] = { FORMAT(DSD_U8), FORMAT(DSD_U16_LE), FORMAT(DSD_U32_LE), + FORMAT(DSD_U16_BE), + FORMAT(DSD_U32_BE), }; const char *snd_pcm_format_name(snd_pcm_format_t format) diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c index 102e8fd1d45..2d957ba6355 100644 --- a/sound/core/pcm_compat.c +++ b/sound/core/pcm_compat.c @@ -210,6 +210,8 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream, if (err < 0) return err; + if (clear_user(src, sizeof(*src))) + return -EFAULT; if (put_user(status.state, &src->state) || compat_put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) || compat_put_timespec(&status.tstamp, &src->tstamp) || diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c index ae7a0feb3b7..ebe8444de6c 100644 --- a/sound/core/pcm_misc.c +++ b/sound/core/pcm_misc.c @@ -152,6 +152,14 @@ static struct pcm_format_data pcm_formats[(INT)SNDRV_PCM_FORMAT_LAST+1] = { .width = 32, .phys = 32, .le = 1, .signd = 0, .silence = { 0x69, 0x69, 0x69, 0x69 }, }, + [SNDRV_PCM_FORMAT_DSD_U16_BE] = { + .width = 16, .phys = 16, .le = 0, .signd = 0, + .silence = { 0x69, 0x69 }, + }, + [SNDRV_PCM_FORMAT_DSD_U32_BE] = { + .width = 32, .phys = 32, .le = 0, .signd = 0, + .silence = { 0x69, 0x69, 0x69, 0x69 }, + }, /* FIXME: the following three formats are not defined properly yet */ [SNDRV_PCM_FORMAT_MPEG] = { .le = -1, .signd = -1, diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index bfe1cf6b492..166d59cdc86 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -781,16 +781,15 @@ static int snd_pcm_action_group(struct action_ops *ops, { struct snd_pcm_substream *s = NULL; struct snd_pcm_substream *s1; - int res = 0; + int res = 0, depth = 1; snd_pcm_group_for_each_entry(s, substream) { if (do_lock && s != substream) { if (s->pcm->nonatomic) - mutex_lock_nested(&s->self_group.mutex, - SINGLE_DEPTH_NESTING); + mutex_lock_nested(&s->self_group.mutex, depth); else - spin_lock_nested(&s->self_group.lock, - SINGLE_DEPTH_NESTING); + spin_lock_nested(&s->self_group.lock, depth); + depth++; } res = ops->pre_action(s, state); if (res < 0) @@ -906,8 +905,7 @@ static int snd_pcm_action_lock_mutex(struct action_ops *ops, down_read(&snd_pcm_link_rwsem); if (snd_pcm_stream_linked(substream)) { mutex_lock(&substream->group->mutex); - mutex_lock_nested(&substream->self_group.mutex, - SINGLE_DEPTH_NESTING); + mutex_lock(&substream->self_group.mutex); res = snd_pcm_action_group(ops, substream, state, 1); mutex_unlock(&substream->self_group.mutex); mutex_unlock(&substream->group->mutex); @@ -3311,7 +3309,7 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = { #ifndef ARCH_HAS_DMA_MMAP_COHERENT /* This should be defined / handled globally! */ -#ifdef CONFIG_ARM +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) #define ARCH_HAS_DMA_MMAP_COHERENT #endif #endif diff --git a/sound/firewire/bebob/bebob_focusrite.c b/sound/firewire/bebob/bebob_focusrite.c index 45a0eed6d5b..3b052ed0fbf 100644 --- a/sound/firewire/bebob/bebob_focusrite.c +++ b/sound/firewire/bebob/bebob_focusrite.c @@ -27,12 +27,14 @@ #define SAFFIRE_CLOCK_SOURCE_INTERNAL 0 #define SAFFIRE_CLOCK_SOURCE_SPDIF 1 -/* '1' is absent, why... */ +/* clock sources as returned from register of Saffire Pro 10 and 26 */ #define SAFFIREPRO_CLOCK_SOURCE_INTERNAL 0 +#define SAFFIREPRO_CLOCK_SOURCE_SKIP 1 /* never used on hardware */ #define SAFFIREPRO_CLOCK_SOURCE_SPDIF 2 -#define SAFFIREPRO_CLOCK_SOURCE_ADAT1 3 -#define SAFFIREPRO_CLOCK_SOURCE_ADAT2 4 +#define SAFFIREPRO_CLOCK_SOURCE_ADAT1 3 /* not used on s.pro. 10 */ +#define SAFFIREPRO_CLOCK_SOURCE_ADAT2 4 /* not used on s.pro. 10 */ #define SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK 5 +#define SAFFIREPRO_CLOCK_SOURCE_COUNT 6 /* S/PDIF, ADAT1, ADAT2 is enabled or not. three quadlets */ #define SAFFIREPRO_ENABLE_DIG_IFACES 0x01a4 @@ -101,13 +103,34 @@ saffire_write_quad(struct snd_bebob *bebob, u64 offset, u32 value) &data, sizeof(__be32), 0); } +static char *const saffirepro_10_clk_src_labels[] = { + SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "Word Clock" +}; static char *const saffirepro_26_clk_src_labels[] = { SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "ADAT1", "ADAT2", "Word Clock" }; - -static char *const saffirepro_10_clk_src_labels[] = { - SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "Word Clock" +/* Value maps between registers and labels for SaffirePro 10/26. */ +static const signed char saffirepro_clk_maps[][SAFFIREPRO_CLOCK_SOURCE_COUNT] = { + /* SaffirePro 10 */ + [0] = { + [SAFFIREPRO_CLOCK_SOURCE_INTERNAL] = 0, + [SAFFIREPRO_CLOCK_SOURCE_SKIP] = -1, /* not supported */ + [SAFFIREPRO_CLOCK_SOURCE_SPDIF] = 1, + [SAFFIREPRO_CLOCK_SOURCE_ADAT1] = -1, /* not supported */ + [SAFFIREPRO_CLOCK_SOURCE_ADAT2] = -1, /* not supported */ + [SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK] = 2, + }, + /* SaffirePro 26 */ + [1] = { + [SAFFIREPRO_CLOCK_SOURCE_INTERNAL] = 0, + [SAFFIREPRO_CLOCK_SOURCE_SKIP] = -1, /* not supported */ + [SAFFIREPRO_CLOCK_SOURCE_SPDIF] = 1, + [SAFFIREPRO_CLOCK_SOURCE_ADAT1] = 2, + [SAFFIREPRO_CLOCK_SOURCE_ADAT2] = 3, + [SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK] = 4, + } }; + static int saffirepro_both_clk_freq_get(struct snd_bebob *bebob, unsigned int *rate) { @@ -138,24 +161,35 @@ saffirepro_both_clk_freq_set(struct snd_bebob *bebob, unsigned int rate) return saffire_write_quad(bebob, SAFFIREPRO_RATE_NOREBOOT, id); } + +/* + * query hardware for current clock source, return our internally + * used clock index in *id, depending on hardware. + */ static int saffirepro_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id) { int err; - u32 value; + u32 value; /* clock source read from hw register */ + const signed char *map; err = saffire_read_quad(bebob, SAFFIREPRO_OFFSET_CLOCK_SOURCE, &value); if (err < 0) goto end; - if (bebob->spec->clock->labels == saffirepro_10_clk_src_labels) { - if (value == SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK) - *id = 2; - else if (value == SAFFIREPRO_CLOCK_SOURCE_SPDIF) - *id = 1; - } else if (value > 1) { - *id = value - 1; + /* depending on hardware, use a different mapping */ + if (bebob->spec->clock->labels == saffirepro_10_clk_src_labels) + map = saffirepro_clk_maps[0]; + else + map = saffirepro_clk_maps[1]; + + /* In a case that this driver cannot handle the value of register. */ + if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) { + err = -EIO; + goto end; } + + *id = (unsigned int)map[value]; end: return err; } diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c index ef4d0c9f657..1aab0a32870 100644 --- a/sound/firewire/bebob/bebob_stream.c +++ b/sound/firewire/bebob/bebob_stream.c @@ -129,12 +129,24 @@ snd_bebob_stream_check_internal_clock(struct snd_bebob *bebob, bool *internal) /* 1.The device has its own operation to switch source of clock */ if (clk_spec) { err = clk_spec->get(bebob, &id); - if (err < 0) + if (err < 0) { dev_err(&bebob->unit->device, "fail to get clock source: %d\n", err); - else if (strncmp(clk_spec->labels[id], SND_BEBOB_CLOCK_INTERNAL, - strlen(SND_BEBOB_CLOCK_INTERNAL)) == 0) + goto end; + } + + if (id >= clk_spec->num) { + dev_err(&bebob->unit->device, + "clock source %d out of range 0..%d\n", + id, clk_spec->num - 1); + err = -EIO; + goto end; + } + + if (strncmp(clk_spec->labels[id], SND_BEBOB_CLOCK_INTERNAL, + strlen(SND_BEBOB_CLOCK_INTERNAL)) == 0) *internal = true; + goto end; } diff --git a/sound/firewire/bebob/bebob_terratec.c b/sound/firewire/bebob/bebob_terratec.c index 0e4c0bfc463..9940611f2e1 100644 --- a/sound/firewire/bebob/bebob_terratec.c +++ b/sound/firewire/bebob/bebob_terratec.c @@ -24,7 +24,12 @@ phase88_rack_clk_src_get(struct snd_bebob *bebob, unsigned int *id) if (err < 0) goto end; - *id = (enable_ext & 0x01) | ((enable_word & 0x01) << 1); + if (enable_ext == 0) + *id = 0; + else if (enable_word == 0) + *id = 1; + else + *id = 2; end: return err; } diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c index 7bfdf9c5141..1610c38337a 100644 --- a/sound/pci/ad1889.c +++ b/sound/pci/ad1889.c @@ -681,7 +681,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe /* WARQ is at offset 12 */ tmp = (reg & AD_DS_WSMC_WARQ) ? - (((reg & AD_DS_WSMC_WARQ >> 12) & 0x01) ? 12 : 18) : 4; + ((((reg & AD_DS_WSMC_WARQ) >> 12) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_WSMC_WAST) ? 2 : 1; snd_iprintf(buffer, "Wave FIFO: %d %s words\n\n", tmp, @@ -693,7 +693,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe /* SYRQ is at offset 4 */ tmp = (reg & AD_DS_WSMC_SYRQ) ? - (((reg & AD_DS_WSMC_SYRQ >> 4) & 0x01) ? 12 : 18) : 4; + ((((reg & AD_DS_WSMC_SYRQ) >> 4) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_WSMC_WAST) ? 2 : 1; snd_iprintf(buffer, "Synthesis FIFO: %d %s words\n\n", tmp, @@ -709,7 +709,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe /* ACRQ is at offset 4 */ tmp = (reg & AD_DS_RAMC_ACRQ) ? - (((reg & AD_DS_RAMC_ACRQ >> 4) & 0x01) ? 12 : 18) : 4; + ((((reg & AD_DS_RAMC_ACRQ) >> 4) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_RAMC_ADST) ? 2 : 1; snd_iprintf(buffer, "ADC FIFO: %d %s words\n\n", tmp, @@ -720,7 +720,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe /* RERQ is at offset 12 */ tmp = (reg & AD_DS_RAMC_RERQ) ? - (((reg & AD_DS_RAMC_RERQ >> 12) & 0x01) ? 12 : 18) : 4; + ((((reg & AD_DS_RAMC_RERQ) >> 12) & 0x01) ? 12 : 18) : 4; tmp /= (reg & AD_DS_RAMC_ADST) ? 2 : 1; snd_iprintf(buffer, "Resampler FIFO: %d %s words\n\n", tmp, diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index cfcca4c30d4..48b6c5a3884 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -219,6 +219,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6}," "{Intel, LPT_LP}," "{Intel, WPT_LP}," "{Intel, SPT}," + "{Intel, SPT_LP}," "{Intel, HPT}," "{Intel, PBG}," "{Intel, SCH}," @@ -297,7 +298,8 @@ enum { /* quirks for ATI/AMD HDMI */ #define AZX_DCAPS_PRESET_ATI_HDMI \ - (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB) + (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB|\ + AZX_DCAPS_NO_MSI64) /* quirks for Nvidia */ #define AZX_DCAPS_PRESET_NVIDIA \ @@ -374,6 +376,8 @@ static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool #ifdef CONFIG_SND_DMA_SGBUF if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) { struct snd_sg_buf *sgbuf = dmab->private_data; + if (chip->driver_type == AZX_DRIVER_CMEDIA) + return; /* deal with only CORB/RIRB buffers */ if (on) set_pages_array_wc(sgbuf->page_table, sgbuf->pages); else @@ -1483,6 +1487,7 @@ static int azx_first_init(struct azx *chip) struct snd_card *card = chip->card; int err; unsigned short gcap; + unsigned int dma_bits = 64; #if BITS_PER_LONG != 64 /* Fix up base address on ULI M5461 */ @@ -1506,9 +1511,14 @@ static int azx_first_init(struct azx *chip) return -ENXIO; } - if (chip->msi) + if (chip->msi) { + if (chip->driver_caps & AZX_DCAPS_NO_MSI64) { + dev_dbg(card->dev, "Disabling 64bit MSI\n"); + pci->no_64bit_msi = true; + } if (pci_enable_msi(pci) < 0) chip->msi = 0; + } if (azx_acquire_irq(chip, 0) < 0) return -EBUSY; @@ -1519,9 +1529,14 @@ static int azx_first_init(struct azx *chip) gcap = azx_readw(chip, GCAP); dev_dbg(card->dev, "chipset global capabilities = 0x%x\n", gcap); + /* AMD devices support 40 or 48bit DMA, take the safe one */ + if (chip->pci->vendor == PCI_VENDOR_ID_AMD) + dma_bits = 40; + /* disable SB600 64bit support for safety */ if (chip->pci->vendor == PCI_VENDOR_ID_ATI) { struct pci_dev *p_smbus; + dma_bits = 40; p_smbus = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL); @@ -1551,9 +1566,11 @@ static int azx_first_init(struct azx *chip) } /* allow 64bit DMA address if supported by H/W */ - if ((gcap & AZX_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64))) - pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64)); - else { + if (!(gcap & AZX_GCAP_64OK)) + dma_bits = 32; + if (!pci_set_dma_mask(pci, DMA_BIT_MASK(dma_bits))) { + pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(dma_bits)); + } else { pci_set_dma_mask(pci, DMA_BIT_MASK(32)); pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)); } @@ -1769,7 +1786,7 @@ static void pcm_mmap_prepare(struct snd_pcm_substream *substream, #ifdef CONFIG_X86 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); struct azx *chip = apcm->chip; - if (!azx_snoop(chip)) + if (!azx_snoop(chip) && chip->driver_type != AZX_DRIVER_CMEDIA) area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); #endif } @@ -2002,6 +2019,9 @@ static const struct pci_device_id azx_ids[] = { /* Sunrise Point */ { PCI_DEVICE(0x8086, 0xa170), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, + /* Sunrise Point-LP */ + { PCI_DEVICE(0x8086, 0x9d70), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, /* Haswell */ { PCI_DEVICE(0x8086, 0x0a0c), .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL }, diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h index 7eb44e78e14..62658f2f8c9 100644 --- a/sound/pci/hda/hda_local.h +++ b/sound/pci/hda/hda_local.h @@ -419,7 +419,7 @@ struct snd_hda_pin_quirk { .subvendor = _subvendor,\ .name = _name,\ .value = _value,\ - .pins = (const struct hda_pintbl[]) { _pins } \ + .pins = (const struct hda_pintbl[]) { _pins, {0, 0}} \ } #else @@ -427,7 +427,7 @@ struct snd_hda_pin_quirk { { .codec = _codec,\ .subvendor = _subvendor,\ .value = _value,\ - .pins = (const struct hda_pintbl[]) { _pins } \ + .pins = (const struct hda_pintbl[]) { _pins, {0, 0}} \ } #endif diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h index 949cd437eeb..5016014e57f 100644 --- a/sound/pci/hda/hda_priv.h +++ b/sound/pci/hda/hda_priv.h @@ -171,6 +171,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 }; #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ #define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */ #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ +#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ /* HD Audio class code */ #define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 71e4bad0634..e9ebc7bd752 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -43,6 +43,7 @@ struct conexant_spec { unsigned int num_eapds; hda_nid_t eapds[4]; bool dynamic_eapd; + hda_nid_t mute_led_eapd; unsigned int parse_flags; /* flag for snd_hda_parse_pin_defcfg() */ @@ -163,6 +164,17 @@ static void cx_auto_vmaster_hook(void *private_data, int enabled) cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, enabled); } +/* turn on/off EAPD according to Master switch (inversely!) for mute LED */ +static void cx_auto_vmaster_hook_mute_led(void *private_data, int enabled) +{ + struct hda_codec *codec = private_data; + struct conexant_spec *spec = codec->spec; + + snd_hda_codec_write(codec, spec->mute_led_eapd, 0, + AC_VERB_SET_EAPD_BTLENABLE, + enabled ? 0x00 : 0x02); +} + static int cx_auto_build_controls(struct hda_codec *codec) { int err; @@ -223,6 +235,7 @@ enum { CXT_FIXUP_TOSHIBA_P105, CXT_FIXUP_HP_530, CXT_FIXUP_CAP_MIX_AMP_5047, + CXT_FIXUP_MUTE_LED_EAPD, }; /* for hda_fixup_thinkpad_acpi() */ @@ -557,6 +570,18 @@ static void cxt_fixup_olpc_xo(struct hda_codec *codec, } } +static void cxt_fixup_mute_led_eapd(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct conexant_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->mute_led_eapd = 0x1b; + spec->dynamic_eapd = 1; + spec->gen.vmaster_mute.hook = cx_auto_vmaster_hook_mute_led; + } +} + /* * Fix max input level on mixer widget to 0dB * (originally it has 0x2b steps with 0dB offset 0x14) @@ -705,6 +730,10 @@ static const struct hda_fixup cxt_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = cxt_fixup_cap_mix_amp_5047, }, + [CXT_FIXUP_MUTE_LED_EAPD] = { + .type = HDA_FIXUP_FUNC, + .v.func = cxt_fixup_mute_led_eapd, + }, }; static const struct snd_pci_quirk cxt5045_fixups[] = { @@ -762,6 +791,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410), SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410), SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410), + SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD), SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), @@ -780,6 +810,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = { { .id = CXT_PINCFG_LEMOTE_A1004, .name = "lemote-a1004" }, { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" }, { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" }, + { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" }, {} }; diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 39862e98551..9dc9cf8c90e 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1583,19 +1583,22 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll) } } - if (pin_eld->eld_valid && !eld->eld_valid) { - update_eld = true; + if (pin_eld->eld_valid != eld->eld_valid) eld_changed = true; - } + + if (pin_eld->eld_valid && !eld->eld_valid) + update_eld = true; + if (update_eld) { bool old_eld_valid = pin_eld->eld_valid; pin_eld->eld_valid = eld->eld_valid; - eld_changed = pin_eld->eld_size != eld->eld_size || + if (pin_eld->eld_size != eld->eld_size || memcmp(pin_eld->eld_buffer, eld->eld_buffer, - eld->eld_size) != 0; - if (eld_changed) + eld->eld_size) != 0) { memcpy(pin_eld->eld_buffer, eld->eld_buffer, eld->eld_size); + eld_changed = true; + } pin_eld->eld_size = eld->eld_size; pin_eld->info = eld->info; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index bc86c36b4bf..14f16be3f37 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -288,21 +288,91 @@ static void alc880_unsol_event(struct hda_codec *codec, unsigned int res) snd_hda_jack_unsol_event(codec, res >> 2); } -/* additional initialization for ALC888 variants */ -static void alc888_coef_init(struct hda_codec *codec) +/* Change EAPD to verb control */ +static void alc_fill_eapd_coef(struct hda_codec *codec) { - if (alc_get_coef0(codec) == 0x20) - /* alc888S-VC */ - alc_write_coef_idx(codec, 7, 0x830); - else - /* alc888-VB */ - alc_write_coef_idx(codec, 7, 0x3030); + int coef; + + coef = alc_get_coef0(codec); + + switch (codec->vendor_id) { + case 0x10ec0262: + alc_update_coef_idx(codec, 0x7, 0, 1<<5); + break; + case 0x10ec0267: + case 0x10ec0268: + alc_update_coef_idx(codec, 0x7, 0, 1<<13); + break; + case 0x10ec0269: + if ((coef & 0x00f0) == 0x0010) + alc_update_coef_idx(codec, 0xd, 0, 1<<14); + if ((coef & 0x00f0) == 0x0020) + alc_update_coef_idx(codec, 0x4, 1<<15, 0); + if ((coef & 0x00f0) == 0x0030) + alc_update_coef_idx(codec, 0x10, 1<<9, 0); + break; + case 0x10ec0280: + case 0x10ec0284: + case 0x10ec0290: + case 0x10ec0292: + alc_update_coef_idx(codec, 0x4, 1<<15, 0); + break; + case 0x10ec0233: + case 0x10ec0255: + case 0x10ec0282: + case 0x10ec0283: + case 0x10ec0286: + case 0x10ec0288: + alc_update_coef_idx(codec, 0x10, 1<<9, 0); + break; + case 0x10ec0285: + case 0x10ec0293: + alc_update_coef_idx(codec, 0xa, 1<<13, 0); + break; + case 0x10ec0662: + if ((coef & 0x00f0) == 0x0030) + alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */ + break; + case 0x10ec0272: + case 0x10ec0273: + case 0x10ec0663: + case 0x10ec0665: + case 0x10ec0670: + case 0x10ec0671: + case 0x10ec0672: + alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */ + break; + case 0x10ec0668: + alc_update_coef_idx(codec, 0x7, 3<<13, 0); + break; + case 0x10ec0867: + alc_update_coef_idx(codec, 0x4, 1<<10, 0); + break; + case 0x10ec0888: + if ((coef & 0x00f0) == 0x0020 || (coef & 0x00f0) == 0x0030) + alc_update_coef_idx(codec, 0x7, 1<<5, 0); + break; + case 0x10ec0892: + alc_update_coef_idx(codec, 0x7, 1<<5, 0); + break; + case 0x10ec0899: + case 0x10ec0900: + alc_update_coef_idx(codec, 0x7, 1<<1, 0); + break; + } } -/* additional initialization for ALC889 variants */ -static void alc889_coef_init(struct hda_codec *codec) +/* additional initialization for ALC888 variants */ +static void alc888_coef_init(struct hda_codec *codec) { - alc_update_coef_idx(codec, 7, 0, 0x2010); + switch (alc_get_coef0(codec) & 0x00f0) { + /* alc888-VA */ + case 0x00: + /* alc888-VB */ + case 0x10: + alc_update_coef_idx(codec, 7, 0, 0x2030); /* Turn EAPD to High */ + break; + } } /* turn on/off EAPD control (only if available) */ @@ -343,6 +413,7 @@ static void alc_eapd_shutup(struct hda_codec *codec) /* generic EAPD initialization */ static void alc_auto_init_amp(struct hda_codec *codec, int type) { + alc_fill_eapd_coef(codec); alc_auto_setup_eapd(codec, true); switch (type) { case ALC_INIT_GPIO1: @@ -359,25 +430,15 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type) case 0x10ec0260: alc_update_coefex_idx(codec, 0x1a, 7, 0, 0x2010); break; - case 0x10ec0262: case 0x10ec0880: case 0x10ec0882: case 0x10ec0883: case 0x10ec0885: - case 0x10ec0887: - /*case 0x10ec0889:*/ /* this causes an SPDIF problem */ - case 0x10ec0900: - alc889_coef_init(codec); + alc_update_coef_idx(codec, 7, 0, 0x2030); break; case 0x10ec0888: alc888_coef_init(codec); break; -#if 0 /* XXX: This may cause the silent output on speaker on some machines */ - case 0x10ec0267: - case 0x10ec0268: - alc_update_coef_idx(codec, 7, 0, 0x3000); - break; -#endif /* XXX */ } break; } @@ -1710,7 +1771,7 @@ static void alc889_fixup_coef(struct hda_codec *codec, { if (action != HDA_FIXUP_ACT_INIT) return; - alc889_coef_init(codec); + alc_update_coef_idx(codec, 7, 0, 0x2030); } /* toggle speaker-output according to the hp-jack state */ @@ -2675,7 +2736,7 @@ static void alc269_shutup(struct hda_codec *codec) static struct coef_fw alc282_coefs[] = { WRITE_COEF(0x03, 0x0002), /* Power Down Control */ - WRITE_COEF(0x05, 0x0700), /* FIFO and filter clock */ + UPDATE_COEF(0x05, 0xff3f, 0x0700), /* FIFO and filter clock */ WRITE_COEF(0x07, 0x0200), /* DMIC control */ UPDATE_COEF(0x06, 0x00f0, 0), /* Analog clock */ UPDATE_COEF(0x08, 0xfffc, 0x0c2c), /* JD */ @@ -2786,7 +2847,7 @@ static void alc282_shutup(struct hda_codec *codec) static struct coef_fw alc283_coefs[] = { WRITE_COEF(0x03, 0x0002), /* Power Down Control */ - WRITE_COEF(0x05, 0x0700), /* FIFO and filter clock */ + UPDATE_COEF(0x05, 0xff3f, 0x0700), /* FIFO and filter clock */ WRITE_COEF(0x07, 0x0200), /* DMIC control */ UPDATE_COEF(0x06, 0x00f0, 0), /* Analog clock */ UPDATE_COEF(0x08, 0xfffc, 0x0c2c), /* JD */ @@ -2817,6 +2878,7 @@ static struct coef_fw alc283_coefs[] = { UPDATE_COEF(0x40, 0xf800, 0x9800), /* Class D DC enable */ UPDATE_COEF(0x42, 0xf000, 0x2000), /* DC offset */ WRITE_COEF(0x37, 0xfc06), /* Class D amp control */ + UPDATE_COEF(0x1b, 0x8000, 0), /* HP JD control */ {} }; @@ -2884,6 +2946,9 @@ static void alc283_shutup(struct hda_codec *codec) alc_write_coef_idx(codec, 0x43, 0x9004); + /*depop hp during suspend*/ + alc_write_coef_idx(codec, 0x06, 0x2100); + snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); @@ -3346,6 +3411,27 @@ static void alc269_fixup_hp_gpio_mic1_led(struct hda_codec *codec, } } +static void alc280_fixup_hp_gpio4(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + /* Like hp_gpio_mic1_led, but also needs GPIO4 low to enable headphone amp */ + struct alc_spec *spec = codec->spec; + static const struct hda_verb gpio_init[] = { + { 0x01, AC_VERB_SET_GPIO_MASK, 0x18 }, + { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x18 }, + {} + }; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->gen.vmaster_mute.hook = alc269_fixup_hp_gpio_mute_hook; + spec->gen.cap_sync_hook = alc269_fixup_hp_cap_mic_mute_hook; + spec->gpio_led = 0; + spec->cap_mute_led_nid = 0x18; + snd_hda_add_verbs(codec, gpio_init); + codec->power_filter = led_power_filter; + } +} + static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -4213,6 +4299,7 @@ enum { ALC283_FIXUP_BXBT2807_MIC, ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED, ALC282_FIXUP_ASPIRE_V5_PINS, + ALC280_FIXUP_HP_GPIO4, }; static const struct hda_fixup alc269_fixups[] = { @@ -4433,6 +4520,8 @@ static const struct hda_fixup alc269_fixups[] = { [ALC269_FIXUP_HEADSET_MODE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mode, + .chained = true, + .chain_id = ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED }, [ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC] = { .type = HDA_FIXUP_FUNC, @@ -4622,6 +4711,8 @@ static const struct hda_fixup alc269_fixups[] = { [ALC255_FIXUP_HEADSET_MODE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mode_alc255, + .chained = true, + .chain_id = ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED }, [ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC] = { .type = HDA_FIXUP_FUNC, @@ -4657,8 +4748,6 @@ static const struct hda_fixup alc269_fixups[] = { [ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_dell_wmi, - .chained_before = true, - .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE }, [ALC282_FIXUP_ASPIRE_V5_PINS] = { .type = HDA_FIXUP_PINS, @@ -4676,7 +4765,10 @@ static const struct hda_fixup alc269_fixups[] = { { }, }, }, - + [ALC280_FIXUP_HP_GPIO4] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc280_fixup_hp_gpio4, + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -4693,10 +4785,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED), SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK), SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK), - SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED), SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK), SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), @@ -4724,21 +4814,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), - SND_PCI_QUIRK(0x103c, 0x8004, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), /* ALC290 */ SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), - SND_PCI_QUIRK(0x103c, 0x2246, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), - SND_PCI_QUIRK(0x103c, 0x2247, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), - SND_PCI_QUIRK(0x103c, 0x2248, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), - SND_PCI_QUIRK(0x103c, 0x2249, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), - SND_PCI_QUIRK(0x103c, 0x2258, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), @@ -4747,7 +4831,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), - SND_PCI_QUIRK(0x103c, 0x2277, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x2278, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), @@ -4800,7 +4883,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), - SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), @@ -4980,6 +5063,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x17, 0x40000000}, {0x1d, 0x40700001}, {0x21, 0x02211040}), + SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, + {0x12, 0x90a60130}, + {0x13, 0x40000000}, + {0x14, 0x90170110}, + {0x15, 0x0421101f}, + {0x16, 0x411111f0}, + {0x17, 0x411111f0}, + {0x18, 0x411111f0}, + {0x19, 0x411111f0}, + {0x1a, 0x04a11020}, + {0x1b, 0x411111f0}, + {0x1d, 0x40748605}, + {0x1e, 0x411111f0}), SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED, {0x12, 0x90a60140}, {0x13, 0x40000000}, @@ -5190,9 +5286,6 @@ static void alc269_fill_coef(struct hda_codec *codec) } } - /* Class D */ - alc_update_coef_idx(codec, 0xd, 0, 1<<14); - /* HP */ alc_update_coef_idx(codec, 0x4, 0, 1<<11); } @@ -5610,9 +5703,9 @@ static void alc662_led_gpio1_mute_hook(void *private_data, int enabled) unsigned int oldval = spec->gpio_led; if (enabled) - spec->gpio_led &= ~0x01; - else spec->gpio_led |= 0x01; + else + spec->gpio_led &= ~0x01; if (spec->gpio_led != oldval) snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, spec->gpio_led); @@ -5647,6 +5740,35 @@ static void alc662_fixup_led_gpio1(struct hda_codec *codec, } } +static struct coef_fw alc668_coefs[] = { + WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0), + WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80), + WRITE_COEF(0x08, 0x0031), WRITE_COEF(0x0a, 0x0060), WRITE_COEF(0x0b, 0x0), + WRITE_COEF(0x0c, 0x7cf7), WRITE_COEF(0x0d, 0x1080), WRITE_COEF(0x0e, 0x7f7f), + WRITE_COEF(0x0f, 0xcccc), WRITE_COEF(0x10, 0xddcc), WRITE_COEF(0x11, 0x0001), + WRITE_COEF(0x13, 0x0), WRITE_COEF(0x14, 0x2aa0), WRITE_COEF(0x17, 0xa940), + WRITE_COEF(0x19, 0x0), WRITE_COEF(0x1a, 0x0), WRITE_COEF(0x1b, 0x0), + WRITE_COEF(0x1c, 0x0), WRITE_COEF(0x1d, 0x0), WRITE_COEF(0x1e, 0x7418), + WRITE_COEF(0x1f, 0x0804), WRITE_COEF(0x20, 0x4200), WRITE_COEF(0x21, 0x0468), + WRITE_COEF(0x22, 0x8ccc), WRITE_COEF(0x23, 0x0250), WRITE_COEF(0x24, 0x7418), + WRITE_COEF(0x27, 0x0), WRITE_COEF(0x28, 0x8ccc), WRITE_COEF(0x2a, 0xff00), + WRITE_COEF(0x2b, 0x8000), WRITE_COEF(0xa7, 0xff00), WRITE_COEF(0xa8, 0x8000), + WRITE_COEF(0xaa, 0x2e17), WRITE_COEF(0xab, 0xa0c0), WRITE_COEF(0xac, 0x0), + WRITE_COEF(0xad, 0x0), WRITE_COEF(0xae, 0x2ac6), WRITE_COEF(0xaf, 0xa480), + WRITE_COEF(0xb0, 0x0), WRITE_COEF(0xb1, 0x0), WRITE_COEF(0xb2, 0x0), + WRITE_COEF(0xb3, 0x0), WRITE_COEF(0xb4, 0x0), WRITE_COEF(0xb5, 0x1040), + WRITE_COEF(0xb6, 0xd697), WRITE_COEF(0xb7, 0x902b), WRITE_COEF(0xb8, 0xd697), + WRITE_COEF(0xb9, 0x902b), WRITE_COEF(0xba, 0xb8ba), WRITE_COEF(0xbb, 0xaaab), + WRITE_COEF(0xbc, 0xaaaf), WRITE_COEF(0xbd, 0x6aaa), WRITE_COEF(0xbe, 0x1c02), + WRITE_COEF(0xc0, 0x00ff), WRITE_COEF(0xc1, 0x0fa6), + {} +}; + +static void alc668_restore_default_value(struct hda_codec *codec) +{ + alc_process_coef_fw(codec, alc668_coefs); +} + enum { ALC662_FIXUP_ASPIRE, ALC662_FIXUP_LED_GPIO1, @@ -5919,6 +6041,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0696, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A), SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP), @@ -6072,29 +6195,6 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = { {} }; -static void alc662_fill_coef(struct hda_codec *codec) -{ - int coef; - - coef = alc_get_coef0(codec); - - switch (codec->vendor_id) { - case 0x10ec0662: - if ((coef & 0x00f0) == 0x0030) - alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */ - break; - case 0x10ec0272: - case 0x10ec0273: - case 0x10ec0663: - case 0x10ec0665: - case 0x10ec0670: - case 0x10ec0671: - case 0x10ec0672: - alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */ - break; - } -} - /* */ static int patch_alc662(struct hda_codec *codec) @@ -6113,8 +6213,11 @@ static int patch_alc662(struct hda_codec *codec) alc_fix_pll_init(codec, 0x20, 0x04, 15); - spec->init_hook = alc662_fill_coef; - alc662_fill_coef(codec); + switch (codec->vendor_id) { + case 0x10ec0668: + spec->init_hook = alc668_restore_default_value; + break; + } snd_hda_pick_fixup(codec, alc662_fixup_models, alc662_fixup_tbl, alc662_fixups); diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig index 0e9623368ab..7d5d6444a83 100644 --- a/sound/soc/Kconfig +++ b/sound/soc/Kconfig @@ -49,7 +49,6 @@ source "sound/soc/mxs/Kconfig" source "sound/soc/pxa/Kconfig" source "sound/soc/rockchip/Kconfig" source "sound/soc/samsung/Kconfig" -source "sound/soc/s6000/Kconfig" source "sound/soc/sh/Kconfig" source "sound/soc/sirf/Kconfig" source "sound/soc/spear/Kconfig" diff --git a/sound/soc/Makefile b/sound/soc/Makefile index 534714a1ca4..d88edfced8c 100644 --- a/sound/soc/Makefile +++ b/sound/soc/Makefile @@ -26,7 +26,6 @@ obj-$(CONFIG_SND_SOC) += kirkwood/ obj-$(CONFIG_SND_SOC) += pxa/ obj-$(CONFIG_SND_SOC) += rockchip/ obj-$(CONFIG_SND_SOC) += samsung/ -obj-$(CONFIG_SND_SOC) += s6000/ obj-$(CONFIG_SND_SOC) += sh/ obj-$(CONFIG_SND_SOC) += sirf/ obj-$(CONFIG_SND_SOC) += spear/ diff --git a/sound/soc/codecs/adau1761.c b/sound/soc/codecs/adau1761.c index 5518ebd6947..91f60282fd2 100644 --- a/sound/soc/codecs/adau1761.c +++ b/sound/soc/codecs/adau1761.c @@ -405,6 +405,7 @@ static const struct snd_soc_dapm_widget adau1761_dapm_widgets[] = { 2, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("Slew Clock", ADAU1761_CLK_ENABLE0, 6, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("ALC Clock", ADAU1761_CLK_ENABLE0, 5, 0, NULL, 0), SND_SOC_DAPM_SUPPLY_S("Digital Clock 0", 1, ADAU1761_CLK_ENABLE1, 0, 0, NULL, 0), @@ -436,6 +437,9 @@ static const struct snd_soc_dapm_route adau1761_dapm_routes[] = { { "Right Playback Mixer", NULL, "Slew Clock" }, { "Left Playback Mixer", NULL, "Slew Clock" }, + { "Left Input Mixer", NULL, "ALC Clock" }, + { "Right Input Mixer", NULL, "ALC Clock" }, + { "Digital Clock 0", NULL, "SYSCLK" }, { "Digital Clock 1", NULL, "SYSCLK" }, }; diff --git a/sound/soc/codecs/cs42l51-i2c.c b/sound/soc/codecs/cs42l51-i2c.c index cee51ae177c..c40428f25ba 100644 --- a/sound/soc/codecs/cs42l51-i2c.c +++ b/sound/soc/codecs/cs42l51-i2c.c @@ -46,6 +46,7 @@ static struct i2c_driver cs42l51_i2c_driver = { .driver = { .name = "cs42l51", .owner = THIS_MODULE, + .of_match_table = cs42l51_of_match, }, .probe = cs42l51_i2c_probe, .remove = cs42l51_i2c_remove, diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c index 09488d97de6..669c38fc303 100644 --- a/sound/soc/codecs/cs42l51.c +++ b/sound/soc/codecs/cs42l51.c @@ -558,11 +558,13 @@ error: } EXPORT_SYMBOL_GPL(cs42l51_probe); -static const struct of_device_id cs42l51_of_match[] = { +const struct of_device_id cs42l51_of_match[] = { { .compatible = "cirrus,cs42l51", }, { } }; MODULE_DEVICE_TABLE(of, cs42l51_of_match); +EXPORT_SYMBOL_GPL(cs42l51_of_match); + MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>"); MODULE_DESCRIPTION("Cirrus Logic CS42L51 ALSA SoC Codec Driver"); MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/cs42l51.h b/sound/soc/codecs/cs42l51.h index 8c55bf384bc..0ca805492ac 100644 --- a/sound/soc/codecs/cs42l51.h +++ b/sound/soc/codecs/cs42l51.h @@ -22,6 +22,7 @@ struct device; extern const struct regmap_config cs42l51_regmap; int cs42l51_probe(struct device *dev, struct regmap *regmap); +extern const struct of_device_id cs42l51_of_match[]; #define CS42L51_CHIP_ID 0x1B #define CS42L51_CHIP_REV_A 0x00 diff --git a/sound/soc/codecs/es8328-i2c.c b/sound/soc/codecs/es8328-i2c.c index aae410d122e..2d05b5d3a6c 100644 --- a/sound/soc/codecs/es8328-i2c.c +++ b/sound/soc/codecs/es8328-i2c.c @@ -19,7 +19,7 @@ #include "es8328.h" static const struct i2c_device_id es8328_id[] = { - { "everest,es8328", 0 }, + { "es8328", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, es8328_id); diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index d519294f57c..1229554f146 100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c @@ -1941,13 +1941,13 @@ static int max98090_dai_set_sysclk(struct snd_soc_dai *dai, * 0x02 (when master clk is 20MHz to 40MHz).. * 0x03 (when master clk is 40MHz to 60MHz).. */ - if ((freq >= 10000000) && (freq < 20000000)) { + if ((freq >= 10000000) && (freq <= 20000000)) { snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK, M98090_PSCLK_DIV1); - } else if ((freq >= 20000000) && (freq < 40000000)) { + } else if ((freq > 20000000) && (freq <= 40000000)) { snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK, M98090_PSCLK_DIV2); - } else if ((freq >= 40000000) && (freq < 60000000)) { + } else if ((freq > 40000000) && (freq <= 60000000)) { snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK, M98090_PSCLK_DIV4); } else { diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index 3fb83bf0976..d16331e0b64 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -139,6 +139,7 @@ static const struct reg_default rt5645_reg[] = { { 0x76, 0x000a }, { 0x77, 0x0c00 }, { 0x78, 0x0000 }, + { 0x79, 0x0123 }, { 0x80, 0x0000 }, { 0x81, 0x0000 }, { 0x82, 0x0000 }, @@ -334,6 +335,7 @@ static bool rt5645_readable_register(struct device *dev, unsigned int reg) case RT5645_DMIC_CTRL2: case RT5645_TDM_CTRL_1: case RT5645_TDM_CTRL_2: + case RT5645_TDM_CTRL_3: case RT5645_GLB_CLK: case RT5645_PLL_CTRL1: case RT5645_PLL_CTRL2: diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c index ba9d9b4d485..9bd8b4f6330 100644 --- a/sound/soc/codecs/rt5670.c +++ b/sound/soc/codecs/rt5670.c @@ -100,18 +100,18 @@ static const struct reg_default rt5670_reg[] = { { 0x4c, 0x5380 }, { 0x4f, 0x0073 }, { 0x52, 0x00d3 }, - { 0x53, 0xf0f0 }, + { 0x53, 0xf000 }, { 0x61, 0x0000 }, { 0x62, 0x0001 }, { 0x63, 0x00c3 }, { 0x64, 0x0000 }, - { 0x65, 0x0000 }, + { 0x65, 0x0001 }, { 0x66, 0x0000 }, { 0x6f, 0x8000 }, { 0x70, 0x8000 }, { 0x71, 0x8000 }, { 0x72, 0x8000 }, - { 0x73, 0x1110 }, + { 0x73, 0x7770 }, { 0x74, 0x0e00 }, { 0x75, 0x1505 }, { 0x76, 0x0015 }, @@ -125,21 +125,21 @@ static const struct reg_default rt5670_reg[] = { { 0x83, 0x0000 }, { 0x84, 0x0000 }, { 0x85, 0x0000 }, - { 0x86, 0x0008 }, + { 0x86, 0x0004 }, { 0x87, 0x0000 }, { 0x88, 0x0000 }, { 0x89, 0x0000 }, { 0x8a, 0x0000 }, { 0x8b, 0x0000 }, - { 0x8c, 0x0007 }, + { 0x8c, 0x0003 }, { 0x8d, 0x0000 }, { 0x8e, 0x0004 }, { 0x8f, 0x1100 }, { 0x90, 0x0646 }, { 0x91, 0x0c06 }, { 0x93, 0x0000 }, - { 0x94, 0x0000 }, - { 0x95, 0x0000 }, + { 0x94, 0x1270 }, + { 0x95, 0x1000 }, { 0x97, 0x0000 }, { 0x98, 0x0000 }, { 0x99, 0x0000 }, @@ -150,11 +150,11 @@ static const struct reg_default rt5670_reg[] = { { 0x9e, 0x0400 }, { 0xae, 0x7000 }, { 0xaf, 0x0000 }, - { 0xb0, 0x6000 }, + { 0xb0, 0x7000 }, { 0xb1, 0x0000 }, { 0xb2, 0x0000 }, { 0xb3, 0x001f }, - { 0xb4, 0x2206 }, + { 0xb4, 0x220c }, { 0xb5, 0x1f00 }, { 0xb6, 0x0000 }, { 0xb7, 0x0000 }, @@ -171,25 +171,25 @@ static const struct reg_default rt5670_reg[] = { { 0xcf, 0x1813 }, { 0xd0, 0x0690 }, { 0xd1, 0x1c17 }, - { 0xd3, 0xb320 }, + { 0xd3, 0xa220 }, { 0xd4, 0x0000 }, { 0xd6, 0x0400 }, { 0xd9, 0x0809 }, { 0xda, 0x0000 }, { 0xdb, 0x0001 }, { 0xdc, 0x0049 }, - { 0xdd, 0x0009 }, + { 0xdd, 0x0024 }, { 0xe6, 0x8000 }, { 0xe7, 0x0000 }, - { 0xec, 0xb300 }, + { 0xec, 0xa200 }, { 0xed, 0x0000 }, - { 0xee, 0xb300 }, + { 0xee, 0xa200 }, { 0xef, 0x0000 }, { 0xf8, 0x0000 }, { 0xf9, 0x0000 }, { 0xfa, 0x8010 }, { 0xfb, 0x0033 }, - { 0xfc, 0x0080 }, + { 0xfc, 0x0100 }, }; static bool rt5670_volatile_register(struct device *dev, unsigned int reg) @@ -1877,6 +1877,10 @@ static const struct snd_soc_dapm_route rt5670_dapm_routes[] = { { "DAC1 MIXR", "DAC1 Switch", "DAC1 R Mux" }, { "DAC1 MIXR", NULL, "DAC Stereo1 Filter" }, + { "DAC Stereo1 Filter", NULL, "PLL1", is_sys_clk_from_pll }, + { "DAC Mono Left Filter", NULL, "PLL1", is_sys_clk_from_pll }, + { "DAC Mono Right Filter", NULL, "PLL1", is_sys_clk_from_pll }, + { "DAC MIX", NULL, "DAC1 MIXL" }, { "DAC MIX", NULL, "DAC1 MIXR" }, @@ -1926,14 +1930,10 @@ static const struct snd_soc_dapm_route rt5670_dapm_routes[] = { { "DAC L1", NULL, "DAC L1 Power" }, { "DAC L1", NULL, "Stereo DAC MIXL" }, - { "DAC L1", NULL, "PLL1", is_sys_clk_from_pll }, { "DAC R1", NULL, "DAC R1 Power" }, { "DAC R1", NULL, "Stereo DAC MIXR" }, - { "DAC R1", NULL, "PLL1", is_sys_clk_from_pll }, { "DAC L2", NULL, "Mono DAC MIXL" }, - { "DAC L2", NULL, "PLL1", is_sys_clk_from_pll }, { "DAC R2", NULL, "Mono DAC MIXR" }, - { "DAC R2", NULL, "PLL1", is_sys_clk_from_pll }, { "OUT MIXL", "BST1 Switch", "BST1" }, { "OUT MIXL", "INL Switch", "INL VOL" }, diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 6bb77d76561..dab9b15304a 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -1299,8 +1299,7 @@ static int sgtl5000_probe(struct snd_soc_codec *codec) /* enable small pop, introduce 400ms delay in turning off */ snd_soc_update_bits(codec, SGTL5000_CHIP_REF_CTRL, - SGTL5000_SMALL_POP, - SGTL5000_SMALL_POP); + SGTL5000_SMALL_POP, 1); /* disable short cut detector */ snd_soc_write(codec, SGTL5000_CHIP_SHORT_CTRL, 0); diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h index 2f8c88931f6..bd7a344bf8c 100644 --- a/sound/soc/codecs/sgtl5000.h +++ b/sound/soc/codecs/sgtl5000.h @@ -275,7 +275,7 @@ #define SGTL5000_BIAS_CTRL_MASK 0x000e #define SGTL5000_BIAS_CTRL_SHIFT 1 #define SGTL5000_BIAS_CTRL_WIDTH 3 -#define SGTL5000_SMALL_POP 0x0001 +#define SGTL5000_SMALL_POP 0 /* * SGTL5000_CHIP_MIC_CTRL diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index f412a9911a7..67124783558 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -1355,6 +1355,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp) file, blocks, pos - firmware->size); out_fw: + regmap_async_complete(regmap); release_firmware(firmware); wm_adsp_buf_free(&buf_list); out: diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c index 3b145313f93..9deabdd2b1a 100644 --- a/sound/soc/fsl/fsl_asrc.c +++ b/sound/soc/fsl/fsl_asrc.c @@ -684,12 +684,38 @@ static bool fsl_asrc_writeable_reg(struct device *dev, unsigned int reg) } } +static struct reg_default fsl_asrc_reg[] = { + { REG_ASRCTR, 0x0000 }, { REG_ASRIER, 0x0000 }, + { REG_ASRCNCR, 0x0000 }, { REG_ASRCFG, 0x0000 }, + { REG_ASRCSR, 0x0000 }, { REG_ASRCDR1, 0x0000 }, + { REG_ASRCDR2, 0x0000 }, { REG_ASRSTR, 0x0000 }, + { REG_ASRRA, 0x0000 }, { REG_ASRRB, 0x0000 }, + { REG_ASRRC, 0x0000 }, { REG_ASRPM1, 0x0000 }, + { REG_ASRPM2, 0x0000 }, { REG_ASRPM3, 0x0000 }, + { REG_ASRPM4, 0x0000 }, { REG_ASRPM5, 0x0000 }, + { REG_ASRTFR1, 0x0000 }, { REG_ASRCCR, 0x0000 }, + { REG_ASRDIA, 0x0000 }, { REG_ASRDOA, 0x0000 }, + { REG_ASRDIB, 0x0000 }, { REG_ASRDOB, 0x0000 }, + { REG_ASRDIC, 0x0000 }, { REG_ASRDOC, 0x0000 }, + { REG_ASRIDRHA, 0x0000 }, { REG_ASRIDRLA, 0x0000 }, + { REG_ASRIDRHB, 0x0000 }, { REG_ASRIDRLB, 0x0000 }, + { REG_ASRIDRHC, 0x0000 }, { REG_ASRIDRLC, 0x0000 }, + { REG_ASR76K, 0x0A47 }, { REG_ASR56K, 0x0DF3 }, + { REG_ASRMCRA, 0x0000 }, { REG_ASRFSTA, 0x0000 }, + { REG_ASRMCRB, 0x0000 }, { REG_ASRFSTB, 0x0000 }, + { REG_ASRMCRC, 0x0000 }, { REG_ASRFSTC, 0x0000 }, + { REG_ASRMCR1A, 0x0000 }, { REG_ASRMCR1B, 0x0000 }, + { REG_ASRMCR1C, 0x0000 }, +}; + static const struct regmap_config fsl_asrc_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = REG_ASRMCR1C, + .reg_defaults = fsl_asrc_reg, + .num_reg_defaults = ARRAY_SIZE(fsl_asrc_reg), .readable_reg = fsl_asrc_readable_reg, .volatile_reg = fsl_asrc_volatile_reg, .writeable_reg = fsl_asrc_writeable_reg, @@ -792,7 +818,7 @@ static int fsl_asrc_probe(struct platform_device *pdev) return -ENOMEM; asrc_priv->pdev = pdev; - strcpy(asrc_priv->name, np->name); + strncpy(asrc_priv->name, np->name, sizeof(asrc_priv->name) - 1); /* Get the addresses and IRQ */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c index 8bcdfda09d7..a645e296199 100644 --- a/sound/soc/fsl/fsl_esai.c +++ b/sound/soc/fsl/fsl_esai.c @@ -734,7 +734,7 @@ static int fsl_esai_probe(struct platform_device *pdev) return -ENOMEM; esai_priv->pdev = pdev; - strcpy(esai_priv->name, np->name); + strncpy(esai_priv->name, np->name, sizeof(esai_priv->name) - 1); /* Get the addresses and IRQ */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/sound/soc/intel/sst-haswell-pcm.c b/sound/soc/intel/sst-haswell-pcm.c index 33fc5c3abf5..4df867cbb92 100644 --- a/sound/soc/intel/sst-haswell-pcm.c +++ b/sound/soc/intel/sst-haswell-pcm.c @@ -691,9 +691,7 @@ static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd) } #define HSW_FORMATS \ - (SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S24_LE | \ - SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S16_LE |\ - SNDRV_PCM_FMTBIT_S8) + (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE) static struct snd_soc_dai_driver hsw_dais[] = { { diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c index f373e37f830..c74ba37f862 100644 --- a/sound/soc/rockchip/rockchip_i2s.c +++ b/sound/soc/rockchip/rockchip_i2s.c @@ -154,8 +154,10 @@ static void rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on) while (val) { regmap_read(i2s->regmap, I2S_CLR, &val); retry--; - if (!retry) + if (!retry) { dev_warn(i2s->dev, "fail to clear\n"); + break; + } } } } diff --git a/sound/soc/s6000/Kconfig b/sound/soc/s6000/Kconfig deleted file mode 100644 index f244a2566f2..00000000000 --- a/sound/soc/s6000/Kconfig +++ /dev/null @@ -1,26 +0,0 @@ -config SND_S6000_SOC - tristate "SoC Audio for the Stretch s6000 family" - depends on XTENSA_VARIANT_S6000 || COMPILE_TEST - depends on HAS_IOMEM - select SND_S6000_SOC_PCM if XTENSA_VARIANT_S6000 - help - Say Y or M if you want to add support for codecs attached to - s6000 family chips. You will also need to select the platform - to support below. - -config SND_S6000_SOC_PCM - tristate - -config SND_S6000_SOC_I2S - tristate - -config SND_S6000_SOC_S6IPCAM - bool "SoC Audio support for Stretch 6105 IP Camera" - depends on SND_S6000_SOC=y - depends on I2C=y - depends on XTENSA_PLATFORM_S6105 || COMPILE_TEST - select SND_S6000_SOC_I2S - select SND_SOC_TLV320AIC3X - help - Say Y if you want to add support for SoC audio on the - Stretch s6105 IP Camera Reference Design. diff --git a/sound/soc/s6000/Makefile b/sound/soc/s6000/Makefile deleted file mode 100644 index 0f0ae2a012a..00000000000 --- a/sound/soc/s6000/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -# s6000 Platform Support -snd-soc-s6000-objs := s6000-pcm.o -snd-soc-s6000-i2s-objs := s6000-i2s.o - -obj-$(CONFIG_SND_S6000_SOC_PCM) += snd-soc-s6000.o -obj-$(CONFIG_SND_S6000_SOC_I2S) += snd-soc-s6000-i2s.o - -# s6105 Machine Support -snd-soc-s6ipcam-objs := s6105-ipcam.o - -obj-$(CONFIG_SND_S6000_SOC_S6IPCAM) += snd-soc-s6ipcam.o diff --git a/sound/soc/s6000/s6000-i2s.c b/sound/soc/s6000/s6000-i2s.c deleted file mode 100644 index 1c8d01166e5..00000000000 --- a/sound/soc/s6000/s6000-i2s.c +++ /dev/null @@ -1,617 +0,0 @@ -/* - * ALSA SoC I2S Audio Layer for the Stretch S6000 family - * - * Author: Daniel Gloeckner, <dg@emlix.com> - * Copyright: (C) 2009 emlix GmbH <info@emlix.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/device.h> -#include <linux/delay.h> -#include <linux/clk.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/slab.h> - -#include <sound/core.h> -#include <sound/pcm.h> -#include <sound/pcm_params.h> -#include <sound/initval.h> -#include <sound/soc.h> - -#include "s6000-i2s.h" -#include "s6000-pcm.h" - -struct s6000_i2s_dev { - dma_addr_t sifbase; - u8 __iomem *scbbase; - unsigned int wide; - unsigned int channel_in; - unsigned int channel_out; - unsigned int lines_in; - unsigned int lines_out; - struct s6000_pcm_dma_params dma_params; -}; - -#define S6_I2S_INTERRUPT_STATUS 0x00 -#define S6_I2S_INT_OVERRUN 1 -#define S6_I2S_INT_UNDERRUN 2 -#define S6_I2S_INT_ALIGNMENT 4 -#define S6_I2S_INTERRUPT_ENABLE 0x04 -#define S6_I2S_INTERRUPT_RAW 0x08 -#define S6_I2S_INTERRUPT_CLEAR 0x0C -#define S6_I2S_INTERRUPT_SET 0x10 -#define S6_I2S_MODE 0x20 -#define S6_I2S_DUAL 0 -#define S6_I2S_WIDE 1 -#define S6_I2S_TX_DEFAULT 0x24 -#define S6_I2S_DATA_CFG(c) (0x40 + 0x10 * (c)) -#define S6_I2S_IN 0 -#define S6_I2S_OUT 1 -#define S6_I2S_UNUSED 2 -#define S6_I2S_INTERFACE_CFG(c) (0x44 + 0x10 * (c)) -#define S6_I2S_DIV_MASK 0x001fff -#define S6_I2S_16BIT 0x000000 -#define S6_I2S_20BIT 0x002000 -#define S6_I2S_24BIT 0x004000 -#define S6_I2S_32BIT 0x006000 -#define S6_I2S_BITS_MASK 0x006000 -#define S6_I2S_MEM_16BIT 0x000000 -#define S6_I2S_MEM_32BIT 0x008000 -#define S6_I2S_MEM_MASK 0x008000 -#define S6_I2S_CHANNELS_SHIFT 16 -#define S6_I2S_CHANNELS_MASK 0x030000 -#define S6_I2S_SCK_IN 0x000000 -#define S6_I2S_SCK_OUT 0x040000 -#define S6_I2S_SCK_DIR 0x040000 -#define S6_I2S_WS_IN 0x000000 -#define S6_I2S_WS_OUT 0x080000 -#define S6_I2S_WS_DIR 0x080000 -#define S6_I2S_LEFT_FIRST 0x000000 -#define S6_I2S_RIGHT_FIRST 0x100000 -#define S6_I2S_FIRST 0x100000 -#define S6_I2S_CUR_SCK 0x200000 -#define S6_I2S_CUR_WS 0x400000 -#define S6_I2S_ENABLE(c) (0x48 + 0x10 * (c)) -#define S6_I2S_DISABLE_IF 0x02 -#define S6_I2S_ENABLE_IF 0x03 -#define S6_I2S_IS_BUSY 0x04 -#define S6_I2S_DMA_ACTIVE 0x08 -#define S6_I2S_IS_ENABLED 0x10 - -#define S6_I2S_NUM_LINES 4 - -#define S6_I2S_SIF_PORT0 0x0000000 -#define S6_I2S_SIF_PORT1 0x0000080 /* docs say 0x0000010 */ - -static inline void s6_i2s_write_reg(struct s6000_i2s_dev *dev, int reg, u32 val) -{ - writel(val, dev->scbbase + reg); -} - -static inline u32 s6_i2s_read_reg(struct s6000_i2s_dev *dev, int reg) -{ - return readl(dev->scbbase + reg); -} - -static inline void s6_i2s_mod_reg(struct s6000_i2s_dev *dev, int reg, - u32 mask, u32 val) -{ - val ^= s6_i2s_read_reg(dev, reg) & ~mask; - s6_i2s_write_reg(dev, reg, val); -} - -static void s6000_i2s_start_channel(struct s6000_i2s_dev *dev, int channel) -{ - int i, j, cur, prev; - - /* - * Wait for WCLK to toggle 5 times before enabling the channel - * s6000 Family Datasheet 3.6.4: - * "At least two cycles of WS must occur between commands - * to disable or enable the interface" - */ - j = 0; - prev = ~S6_I2S_CUR_WS; - for (i = 1000000; --i && j < 6; ) { - cur = s6_i2s_read_reg(dev, S6_I2S_INTERFACE_CFG(channel)) - & S6_I2S_CUR_WS; - if (prev != cur) { - prev = cur; - j++; - } - } - if (j < 6) - printk(KERN_WARNING "s6000-i2s: timeout waiting for WCLK\n"); - - s6_i2s_write_reg(dev, S6_I2S_ENABLE(channel), S6_I2S_ENABLE_IF); -} - -static void s6000_i2s_stop_channel(struct s6000_i2s_dev *dev, int channel) -{ - s6_i2s_write_reg(dev, S6_I2S_ENABLE(channel), S6_I2S_DISABLE_IF); -} - -static void s6000_i2s_start(struct snd_pcm_substream *substream) -{ - struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai); - int channel; - - channel = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? - dev->channel_out : dev->channel_in; - - s6000_i2s_start_channel(dev, channel); -} - -static void s6000_i2s_stop(struct snd_pcm_substream *substream) -{ - struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai); - int channel; - - channel = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? - dev->channel_out : dev->channel_in; - - s6000_i2s_stop_channel(dev, channel); -} - -static int s6000_i2s_trigger(struct snd_pcm_substream *substream, int cmd, - int after) -{ - switch (cmd) { - case SNDRV_PCM_TRIGGER_START: - case SNDRV_PCM_TRIGGER_RESUME: - case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - if ((substream->stream == SNDRV_PCM_STREAM_CAPTURE) ^ !after) - s6000_i2s_start(substream); - break; - case SNDRV_PCM_TRIGGER_STOP: - case SNDRV_PCM_TRIGGER_SUSPEND: - case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - if (!after) - s6000_i2s_stop(substream); - } - return 0; -} - -static unsigned int s6000_i2s_int_sources(struct s6000_i2s_dev *dev) -{ - unsigned int pending; - pending = s6_i2s_read_reg(dev, S6_I2S_INTERRUPT_RAW); - pending &= S6_I2S_INT_ALIGNMENT | - S6_I2S_INT_UNDERRUN | - S6_I2S_INT_OVERRUN; - s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_CLEAR, pending); - - return pending; -} - -static unsigned int s6000_i2s_check_xrun(struct snd_soc_dai *cpu_dai) -{ - struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(cpu_dai); - unsigned int errors; - unsigned int ret; - - errors = s6000_i2s_int_sources(dev); - if (likely(!errors)) - return 0; - - ret = 0; - if (errors & S6_I2S_INT_ALIGNMENT) - printk(KERN_ERR "s6000-i2s: WCLK misaligned\n"); - if (errors & S6_I2S_INT_UNDERRUN) - ret |= 1 << SNDRV_PCM_STREAM_PLAYBACK; - if (errors & S6_I2S_INT_OVERRUN) - ret |= 1 << SNDRV_PCM_STREAM_CAPTURE; - return ret; -} - -static void s6000_i2s_wait_disabled(struct s6000_i2s_dev *dev) -{ - int channel; - int n = 50; - for (channel = 0; channel < 2; channel++) { - while (--n >= 0) { - int v = s6_i2s_read_reg(dev, S6_I2S_ENABLE(channel)); - if ((v & S6_I2S_IS_ENABLED) - || !(v & (S6_I2S_DMA_ACTIVE | S6_I2S_IS_BUSY))) - break; - udelay(20); - } - } - if (n < 0) - printk(KERN_WARNING "s6000-i2s: timeout disabling interfaces"); -} - -static int s6000_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai, - unsigned int fmt) -{ - struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(cpu_dai); - u32 w; - - switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { - case SND_SOC_DAIFMT_CBM_CFM: - w = S6_I2S_SCK_IN | S6_I2S_WS_IN; - break; - case SND_SOC_DAIFMT_CBS_CFM: - w = S6_I2S_SCK_OUT | S6_I2S_WS_IN; - break; - case SND_SOC_DAIFMT_CBM_CFS: - w = S6_I2S_SCK_IN | S6_I2S_WS_OUT; - break; - case SND_SOC_DAIFMT_CBS_CFS: - w = S6_I2S_SCK_OUT | S6_I2S_WS_OUT; - break; - default: - return -EINVAL; - } - - switch (fmt & SND_SOC_DAIFMT_INV_MASK) { - case SND_SOC_DAIFMT_NB_NF: - w |= S6_I2S_LEFT_FIRST; - break; - case SND_SOC_DAIFMT_NB_IF: - w |= S6_I2S_RIGHT_FIRST; - break; - default: - return -EINVAL; - } - - s6_i2s_mod_reg(dev, S6_I2S_INTERFACE_CFG(0), - S6_I2S_FIRST | S6_I2S_WS_DIR | S6_I2S_SCK_DIR, w); - s6_i2s_mod_reg(dev, S6_I2S_INTERFACE_CFG(1), - S6_I2S_FIRST | S6_I2S_WS_DIR | S6_I2S_SCK_DIR, w); - - return 0; -} - -static int s6000_i2s_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div) -{ - struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(dai); - - if (!div || (div & 1) || div > (S6_I2S_DIV_MASK + 1) * 2) - return -EINVAL; - - s6_i2s_mod_reg(dev, S6_I2S_INTERFACE_CFG(div_id), - S6_I2S_DIV_MASK, div / 2 - 1); - return 0; -} - -static int s6000_i2s_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params, - struct snd_soc_dai *dai) -{ - struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(dai); - int interf; - u32 w = 0; - - if (dev->wide) - interf = 0; - else { - w |= (((params_channels(params) - 2) / 2) - << S6_I2S_CHANNELS_SHIFT) & S6_I2S_CHANNELS_MASK; - interf = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - ? dev->channel_out : dev->channel_in; - } - - switch (params_format(params)) { - case SNDRV_PCM_FORMAT_S16_LE: - w |= S6_I2S_16BIT | S6_I2S_MEM_16BIT; - break; - case SNDRV_PCM_FORMAT_S32_LE: - w |= S6_I2S_32BIT | S6_I2S_MEM_32BIT; - break; - default: - printk(KERN_WARNING "s6000-i2s: unsupported PCM format %x\n", - params_format(params)); - return -EINVAL; - } - - if (s6_i2s_read_reg(dev, S6_I2S_INTERFACE_CFG(interf)) - & S6_I2S_IS_ENABLED) { - printk(KERN_ERR "s6000-i2s: interface already enabled\n"); - return -EBUSY; - } - - s6_i2s_mod_reg(dev, S6_I2S_INTERFACE_CFG(interf), - S6_I2S_CHANNELS_MASK|S6_I2S_MEM_MASK|S6_I2S_BITS_MASK, - w); - - return 0; -} - -static int s6000_i2s_dai_probe(struct snd_soc_dai *dai) -{ - struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(dai); - struct s6000_snd_platform_data *pdata = dai->dev->platform_data; - - if (!pdata) - return -EINVAL; - - dai->capture_dma_data = &dev->dma_params; - dai->playback_dma_data = &dev->dma_params; - - dev->wide = pdata->wide; - dev->channel_in = pdata->channel_in; - dev->channel_out = pdata->channel_out; - dev->lines_in = pdata->lines_in; - dev->lines_out = pdata->lines_out; - - s6_i2s_write_reg(dev, S6_I2S_MODE, - dev->wide ? S6_I2S_WIDE : S6_I2S_DUAL); - - if (dev->wide) { - int i; - - if (dev->lines_in + dev->lines_out > S6_I2S_NUM_LINES) - return -EINVAL; - - dev->channel_in = 0; - dev->channel_out = 1; - dai->driver->capture.channels_min = 2 * dev->lines_in; - dai->driver->capture.channels_max = dai->driver->capture.channels_min; - dai->driver->playback.channels_min = 2 * dev->lines_out; - dai->driver->playback.channels_max = dai->driver->playback.channels_min; - - for (i = 0; i < dev->lines_out; i++) - s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(i), S6_I2S_OUT); - - for (; i < S6_I2S_NUM_LINES - dev->lines_in; i++) - s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(i), - S6_I2S_UNUSED); - - for (; i < S6_I2S_NUM_LINES; i++) - s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(i), S6_I2S_IN); - } else { - unsigned int cfg[2] = {S6_I2S_UNUSED, S6_I2S_UNUSED}; - - if (dev->lines_in > 1 || dev->lines_out > 1) - return -EINVAL; - - dai->driver->capture.channels_min = 2 * dev->lines_in; - dai->driver->capture.channels_max = 8 * dev->lines_in; - dai->driver->playback.channels_min = 2 * dev->lines_out; - dai->driver->playback.channels_max = 8 * dev->lines_out; - - if (dev->lines_in) - cfg[dev->channel_in] = S6_I2S_IN; - if (dev->lines_out) - cfg[dev->channel_out] = S6_I2S_OUT; - - s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(0), cfg[0]); - s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(1), cfg[1]); - } - - if (dev->lines_out) { - if (dev->lines_in) { - if (!dev->dma_params.dma_out) - return -ENODEV; - } else { - dev->dma_params.dma_out = dev->dma_params.dma_in; - dev->dma_params.dma_in = 0; - } - } - dev->dma_params.sif_in = dev->sifbase + (dev->channel_in ? - S6_I2S_SIF_PORT1 : S6_I2S_SIF_PORT0); - dev->dma_params.sif_out = dev->sifbase + (dev->channel_out ? - S6_I2S_SIF_PORT1 : S6_I2S_SIF_PORT0); - dev->dma_params.same_rate = pdata->same_rate | pdata->wide; - return 0; -} - -#define S6000_I2S_RATES SNDRV_PCM_RATE_CONTINUOUS -#define S6000_I2S_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE) - -static const struct snd_soc_dai_ops s6000_i2s_dai_ops = { - .set_fmt = s6000_i2s_set_dai_fmt, - .set_clkdiv = s6000_i2s_set_clkdiv, - .hw_params = s6000_i2s_hw_params, -}; - -static struct snd_soc_dai_driver s6000_i2s_dai = { - .probe = s6000_i2s_dai_probe, - .playback = { - .channels_min = 2, - .channels_max = 8, - .formats = S6000_I2S_FORMATS, - .rates = S6000_I2S_RATES, - .rate_min = 0, - .rate_max = 1562500, - }, - .capture = { - .channels_min = 2, - .channels_max = 8, - .formats = S6000_I2S_FORMATS, - .rates = S6000_I2S_RATES, - .rate_min = 0, - .rate_max = 1562500, - }, - .ops = &s6000_i2s_dai_ops, -}; - -static const struct snd_soc_component_driver s6000_i2s_component = { - .name = "s6000-i2s", -}; - -static int s6000_i2s_probe(struct platform_device *pdev) -{ - struct s6000_i2s_dev *dev; - struct resource *scbmem, *sifmem, *region, *dma1, *dma2; - u8 __iomem *mmio; - int ret; - - scbmem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!scbmem) { - dev_err(&pdev->dev, "no mem resource?\n"); - ret = -ENODEV; - goto err_release_none; - } - - region = request_mem_region(scbmem->start, resource_size(scbmem), - pdev->name); - if (!region) { - dev_err(&pdev->dev, "I2S SCB region already claimed\n"); - ret = -EBUSY; - goto err_release_none; - } - - mmio = ioremap(scbmem->start, resource_size(scbmem)); - if (!mmio) { - dev_err(&pdev->dev, "can't ioremap SCB region\n"); - ret = -ENOMEM; - goto err_release_scb; - } - - sifmem = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!sifmem) { - dev_err(&pdev->dev, "no second mem resource?\n"); - ret = -ENODEV; - goto err_release_map; - } - - region = request_mem_region(sifmem->start, resource_size(sifmem), - pdev->name); - if (!region) { - dev_err(&pdev->dev, "I2S SIF region already claimed\n"); - ret = -EBUSY; - goto err_release_map; - } - - dma1 = platform_get_resource(pdev, IORESOURCE_DMA, 0); - if (!dma1) { - dev_err(&pdev->dev, "no dma resource?\n"); - ret = -ENODEV; - goto err_release_sif; - } - - region = request_mem_region(dma1->start, resource_size(dma1), - pdev->name); - if (!region) { - dev_err(&pdev->dev, "I2S DMA region already claimed\n"); - ret = -EBUSY; - goto err_release_sif; - } - - dma2 = platform_get_resource(pdev, IORESOURCE_DMA, 1); - if (dma2) { - region = request_mem_region(dma2->start, resource_size(dma2), - pdev->name); - if (!region) { - dev_err(&pdev->dev, - "I2S DMA region already claimed\n"); - ret = -EBUSY; - goto err_release_dma1; - } - } - - dev = kzalloc(sizeof(struct s6000_i2s_dev), GFP_KERNEL); - if (!dev) { - ret = -ENOMEM; - goto err_release_dma2; - } - dev_set_drvdata(&pdev->dev, dev); - - dev->sifbase = sifmem->start; - dev->scbbase = mmio; - - s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_ENABLE, 0); - s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_CLEAR, - S6_I2S_INT_ALIGNMENT | - S6_I2S_INT_UNDERRUN | - S6_I2S_INT_OVERRUN); - - s6000_i2s_stop_channel(dev, 0); - s6000_i2s_stop_channel(dev, 1); - s6000_i2s_wait_disabled(dev); - - dev->dma_params.check_xrun = s6000_i2s_check_xrun; - dev->dma_params.trigger = s6000_i2s_trigger; - dev->dma_params.dma_in = dma1->start; - dev->dma_params.dma_out = dma2 ? dma2->start : 0; - dev->dma_params.irq = platform_get_irq(pdev, 0); - if (dev->dma_params.irq < 0) { - dev_err(&pdev->dev, "no irq resource?\n"); - ret = -ENODEV; - goto err_release_dev; - } - - s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_ENABLE, - S6_I2S_INT_ALIGNMENT | - S6_I2S_INT_UNDERRUN | - S6_I2S_INT_OVERRUN); - - ret = snd_soc_register_component(&pdev->dev, &s6000_i2s_component, - &s6000_i2s_dai, 1); - if (ret) - goto err_release_dev; - - return 0; - -err_release_dev: - kfree(dev); -err_release_dma2: - if (dma2) - release_mem_region(dma2->start, resource_size(dma2)); -err_release_dma1: - release_mem_region(dma1->start, resource_size(dma1)); -err_release_sif: - release_mem_region(sifmem->start, resource_size(sifmem)); -err_release_map: - iounmap(mmio); -err_release_scb: - release_mem_region(scbmem->start, resource_size(scbmem)); -err_release_none: - return ret; -} - -static int s6000_i2s_remove(struct platform_device *pdev) -{ - struct s6000_i2s_dev *dev = dev_get_drvdata(&pdev->dev); - struct resource *region; - void __iomem *mmio = dev->scbbase; - - snd_soc_unregister_component(&pdev->dev); - - s6000_i2s_stop_channel(dev, 0); - s6000_i2s_stop_channel(dev, 1); - - s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_ENABLE, 0); - kfree(dev); - - region = platform_get_resource(pdev, IORESOURCE_DMA, 0); - release_mem_region(region->start, resource_size(region)); - - region = platform_get_resource(pdev, IORESOURCE_DMA, 1); - if (region) - release_mem_region(region->start, resource_size(region)); - - region = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(region->start, resource_size(region)); - - iounmap(mmio); - region = platform_get_resource(pdev, IORESOURCE_IO, 0); - release_mem_region(region->start, resource_size(region)); - - return 0; -} - -static struct platform_driver s6000_i2s_driver = { - .probe = s6000_i2s_probe, - .remove = s6000_i2s_remove, - .driver = { - .name = "s6000-i2s", - .owner = THIS_MODULE, - }, -}; - -module_platform_driver(s6000_i2s_driver); - -MODULE_AUTHOR("Daniel Gloeckner"); -MODULE_DESCRIPTION("Stretch s6000 family I2S SoC Interface"); -MODULE_LICENSE("GPL"); diff --git a/sound/soc/s6000/s6000-i2s.h b/sound/soc/s6000/s6000-i2s.h deleted file mode 100644 index 86aa1921c89..00000000000 --- a/sound/soc/s6000/s6000-i2s.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * ALSA SoC I2S Audio Layer for the Stretch s6000 family - * - * Author: Daniel Gloeckner, <dg@emlix.com> - * Copyright: (C) 2009 emlix GmbH <info@emlix.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _S6000_I2S_H -#define _S6000_I2S_H - -struct s6000_snd_platform_data { - int lines_in; - int lines_out; - int channel_in; - int channel_out; - int wide; - int same_rate; -}; -#endif diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c deleted file mode 100644 index fb8461e1b1f..00000000000 --- a/sound/soc/s6000/s6000-pcm.c +++ /dev/null @@ -1,521 +0,0 @@ -/* - * ALSA PCM interface for the Stetch s6000 family - * - * Author: Daniel Gloeckner, <dg@emlix.com> - * Copyright: (C) 2009 emlix GmbH <info@emlix.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/dma-mapping.h> -#include <linux/interrupt.h> - -#include <sound/core.h> -#include <sound/pcm.h> -#include <sound/pcm_params.h> -#include <sound/soc.h> - -#include <asm/dma.h> -#include <variant/dmac.h> - -#include "s6000-pcm.h" - -#define S6_PCM_PREALLOCATE_SIZE (96 * 1024) -#define S6_PCM_PREALLOCATE_MAX (2048 * 1024) - -static struct snd_pcm_hardware s6000_pcm_hardware = { - .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | - SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | - SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_JOINT_DUPLEX), - .buffer_bytes_max = 0x7ffffff0, - .period_bytes_min = 16, - .period_bytes_max = 0xfffff0, - .periods_min = 2, - .periods_max = 1024, /* no limit */ - .fifo_size = 0, -}; - -struct s6000_runtime_data { - spinlock_t lock; - int period; /* current DMA period */ -}; - -static void s6000_pcm_enqueue_dma(struct snd_pcm_substream *substream) -{ - struct snd_pcm_runtime *runtime = substream->runtime; - struct s6000_runtime_data *prtd = runtime->private_data; - struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; - struct s6000_pcm_dma_params *par; - int channel; - unsigned int period_size; - unsigned int dma_offset; - dma_addr_t dma_pos; - dma_addr_t src, dst; - - par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream); - - period_size = snd_pcm_lib_period_bytes(substream); - dma_offset = prtd->period * period_size; - dma_pos = runtime->dma_addr + dma_offset; - - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { - src = dma_pos; - dst = par->sif_out; - channel = par->dma_out; - } else { - src = par->sif_in; - dst = dma_pos; - channel = par->dma_in; - } - - if (!s6dmac_channel_enabled(DMA_MASK_DMAC(channel), - DMA_INDEX_CHNL(channel))) - return; - - if (s6dmac_fifo_full(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel))) { - printk(KERN_ERR "s6000-pcm: fifo full\n"); - return; - } - - if (WARN_ON(period_size & 15)) - return; - s6dmac_put_fifo(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel), - src, dst, period_size); - - prtd->period++; - if (unlikely(prtd->period >= runtime->periods)) - prtd->period = 0; -} - -static irqreturn_t s6000_pcm_irq(int irq, void *data) -{ - struct snd_pcm *pcm = data; - struct snd_soc_pcm_runtime *runtime = pcm->private_data; - struct s6000_runtime_data *prtd; - unsigned int has_xrun; - int i, ret = IRQ_NONE; - - for (i = 0; i < 2; ++i) { - struct snd_pcm_substream *substream = pcm->streams[i].substream; - struct s6000_pcm_dma_params *params = - snd_soc_dai_get_dma_data(runtime->cpu_dai, substream); - u32 channel; - unsigned int pending; - - if (substream == SNDRV_PCM_STREAM_PLAYBACK) - channel = params->dma_out; - else - channel = params->dma_in; - - has_xrun = params->check_xrun(runtime->cpu_dai); - - if (!channel) - continue; - - if (unlikely(has_xrun & (1 << i)) && - substream->runtime && - snd_pcm_running(substream)) { - dev_dbg(pcm->dev, "xrun\n"); - snd_pcm_stream_lock(substream); - snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); - snd_pcm_stream_unlock(substream); - ret = IRQ_HANDLED; - } - - pending = s6dmac_int_sources(DMA_MASK_DMAC(channel), - DMA_INDEX_CHNL(channel)); - - if (pending & 1) { - ret = IRQ_HANDLED; - if (likely(substream->runtime && - snd_pcm_running(substream))) { - snd_pcm_period_elapsed(substream); - dev_dbg(pcm->dev, "period elapsed %x %x\n", - s6dmac_cur_src(DMA_MASK_DMAC(channel), - DMA_INDEX_CHNL(channel)), - s6dmac_cur_dst(DMA_MASK_DMAC(channel), - DMA_INDEX_CHNL(channel))); - prtd = substream->runtime->private_data; - spin_lock(&prtd->lock); - s6000_pcm_enqueue_dma(substream); - spin_unlock(&prtd->lock); - } - } - - if (unlikely(pending & ~7)) { - if (pending & (1 << 3)) - printk(KERN_WARNING - "s6000-pcm: DMA %x Underflow\n", - channel); - if (pending & (1 << 4)) - printk(KERN_WARNING - "s6000-pcm: DMA %x Overflow\n", - channel); - if (pending & 0x1e0) - printk(KERN_WARNING - "s6000-pcm: DMA %x Master Error " - "(mask %x)\n", - channel, pending >> 5); - - } - } - - return ret; -} - -static int s6000_pcm_start(struct snd_pcm_substream *substream) -{ - struct s6000_runtime_data *prtd = substream->runtime->private_data; - struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; - struct s6000_pcm_dma_params *par; - unsigned long flags; - int srcinc; - u32 dma; - - par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream); - - spin_lock_irqsave(&prtd->lock, flags); - - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { - srcinc = 1; - dma = par->dma_out; - } else { - srcinc = 0; - dma = par->dma_in; - } - s6dmac_enable_chan(DMA_MASK_DMAC(dma), DMA_INDEX_CHNL(dma), - 1 /* priority 1 (0 is max) */, - 0 /* peripheral requests w/o xfer length mode */, - srcinc /* source address increment */, - srcinc^1 /* destination address increment */, - 0 /* chunksize 0 (skip impossible on this dma) */, - 0 /* source skip after chunk (impossible) */, - 0 /* destination skip after chunk (impossible) */, - 4 /* 16 byte burst size */, - -1 /* don't conserve bandwidth */, - 0 /* low watermark irq descriptor threshold */, - 0 /* disable hardware timestamps */, - 1 /* enable channel */); - - s6000_pcm_enqueue_dma(substream); - s6000_pcm_enqueue_dma(substream); - - spin_unlock_irqrestore(&prtd->lock, flags); - - return 0; -} - -static int s6000_pcm_stop(struct snd_pcm_substream *substream) -{ - struct s6000_runtime_data *prtd = substream->runtime->private_data; - struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; - struct s6000_pcm_dma_params *par; - unsigned long flags; - u32 channel; - - par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream); - - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - channel = par->dma_out; - else - channel = par->dma_in; - - s6dmac_set_terminal_count(DMA_MASK_DMAC(channel), - DMA_INDEX_CHNL(channel), 0); - - spin_lock_irqsave(&prtd->lock, flags); - - s6dmac_disable_chan(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel)); - - spin_unlock_irqrestore(&prtd->lock, flags); - - return 0; -} - -static int s6000_pcm_trigger(struct snd_pcm_substream *substream, int cmd) -{ - struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; - struct s6000_pcm_dma_params *par; - int ret; - - par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream); - - ret = par->trigger(substream, cmd, 0); - if (ret < 0) - return ret; - - switch (cmd) { - case SNDRV_PCM_TRIGGER_START: - case SNDRV_PCM_TRIGGER_RESUME: - case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - ret = s6000_pcm_start(substream); - break; - case SNDRV_PCM_TRIGGER_STOP: - case SNDRV_PCM_TRIGGER_SUSPEND: - case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - ret = s6000_pcm_stop(substream); - break; - default: - ret = -EINVAL; - } - if (ret < 0) - return ret; - - return par->trigger(substream, cmd, 1); -} - -static int s6000_pcm_prepare(struct snd_pcm_substream *substream) -{ - struct s6000_runtime_data *prtd = substream->runtime->private_data; - - prtd->period = 0; - - return 0; -} - -static snd_pcm_uframes_t s6000_pcm_pointer(struct snd_pcm_substream *substream) -{ - struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; - struct s6000_pcm_dma_params *par; - struct snd_pcm_runtime *runtime = substream->runtime; - struct s6000_runtime_data *prtd = runtime->private_data; - unsigned long flags; - unsigned int offset; - dma_addr_t count; - - par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream); - - spin_lock_irqsave(&prtd->lock, flags); - - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - count = s6dmac_cur_src(DMA_MASK_DMAC(par->dma_out), - DMA_INDEX_CHNL(par->dma_out)); - else - count = s6dmac_cur_dst(DMA_MASK_DMAC(par->dma_in), - DMA_INDEX_CHNL(par->dma_in)); - - count -= runtime->dma_addr; - - spin_unlock_irqrestore(&prtd->lock, flags); - - offset = bytes_to_frames(runtime, count); - if (unlikely(offset >= runtime->buffer_size)) - offset = 0; - - return offset; -} - -static int s6000_pcm_open(struct snd_pcm_substream *substream) -{ - struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; - struct s6000_pcm_dma_params *par; - struct snd_pcm_runtime *runtime = substream->runtime; - struct s6000_runtime_data *prtd; - int ret; - - par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream); - snd_soc_set_runtime_hwparams(substream, &s6000_pcm_hardware); - - ret = snd_pcm_hw_constraint_step(runtime, 0, - SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 16); - if (ret < 0) - return ret; - ret = snd_pcm_hw_constraint_step(runtime, 0, - SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16); - if (ret < 0) - return ret; - ret = snd_pcm_hw_constraint_integer(runtime, - SNDRV_PCM_HW_PARAM_PERIODS); - if (ret < 0) - return ret; - - if (par->same_rate) { - int rate; - spin_lock(&par->lock); /* needed? */ - rate = par->rate; - spin_unlock(&par->lock); - if (rate != -1) { - ret = snd_pcm_hw_constraint_minmax(runtime, - SNDRV_PCM_HW_PARAM_RATE, - rate, rate); - if (ret < 0) - return ret; - } - } - - prtd = kzalloc(sizeof(struct s6000_runtime_data), GFP_KERNEL); - if (prtd == NULL) - return -ENOMEM; - - spin_lock_init(&prtd->lock); - - runtime->private_data = prtd; - - return 0; -} - -static int s6000_pcm_close(struct snd_pcm_substream *substream) -{ - struct snd_pcm_runtime *runtime = substream->runtime; - struct s6000_runtime_data *prtd = runtime->private_data; - - kfree(prtd); - - return 0; -} - -static int s6000_pcm_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *hw_params) -{ - struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; - struct s6000_pcm_dma_params *par; - int ret; - ret = snd_pcm_lib_malloc_pages(substream, - params_buffer_bytes(hw_params)); - if (ret < 0) { - printk(KERN_WARNING "s6000-pcm: allocation of memory failed\n"); - return ret; - } - - par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream); - - if (par->same_rate) { - spin_lock(&par->lock); - if (par->rate == -1 || - !(par->in_use & ~(1 << substream->stream))) { - par->rate = params_rate(hw_params); - par->in_use |= 1 << substream->stream; - } else if (params_rate(hw_params) != par->rate) { - snd_pcm_lib_free_pages(substream); - par->in_use &= ~(1 << substream->stream); - ret = -EBUSY; - } - spin_unlock(&par->lock); - } - return ret; -} - -static int s6000_pcm_hw_free(struct snd_pcm_substream *substream) -{ - struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; - struct s6000_pcm_dma_params *par = - snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream); - - spin_lock(&par->lock); - par->in_use &= ~(1 << substream->stream); - if (!par->in_use) - par->rate = -1; - spin_unlock(&par->lock); - - return snd_pcm_lib_free_pages(substream); -} - -static struct snd_pcm_ops s6000_pcm_ops = { - .open = s6000_pcm_open, - .close = s6000_pcm_close, - .ioctl = snd_pcm_lib_ioctl, - .hw_params = s6000_pcm_hw_params, - .hw_free = s6000_pcm_hw_free, - .trigger = s6000_pcm_trigger, - .prepare = s6000_pcm_prepare, - .pointer = s6000_pcm_pointer, -}; - -static void s6000_pcm_free(struct snd_pcm *pcm) -{ - struct snd_soc_pcm_runtime *runtime = pcm->private_data; - struct s6000_pcm_dma_params *params = - snd_soc_dai_get_dma_data(runtime->cpu_dai, - pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream); - - free_irq(params->irq, pcm); - snd_pcm_lib_preallocate_free_for_all(pcm); -} - -static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime) -{ - struct snd_card *card = runtime->card->snd_card; - struct snd_pcm *pcm = runtime->pcm; - struct s6000_pcm_dma_params *params; - int res; - - params = snd_soc_dai_get_dma_data(runtime->cpu_dai, - pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream); - - res = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32)); - if (res) - return res; - - if (params->dma_in) { - s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in), - DMA_INDEX_CHNL(params->dma_in)); - s6dmac_int_sources(DMA_MASK_DMAC(params->dma_in), - DMA_INDEX_CHNL(params->dma_in)); - } - - if (params->dma_out) { - s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_out), - DMA_INDEX_CHNL(params->dma_out)); - s6dmac_int_sources(DMA_MASK_DMAC(params->dma_out), - DMA_INDEX_CHNL(params->dma_out)); - } - - res = request_irq(params->irq, s6000_pcm_irq, IRQF_SHARED, - "s6000-audio", pcm); - if (res) { - printk(KERN_ERR "s6000-pcm couldn't get IRQ\n"); - return res; - } - - res = snd_pcm_lib_preallocate_pages_for_all(pcm, - SNDRV_DMA_TYPE_DEV, - card->dev, - S6_PCM_PREALLOCATE_SIZE, - S6_PCM_PREALLOCATE_MAX); - if (res) - printk(KERN_WARNING "s6000-pcm: preallocation failed\n"); - - spin_lock_init(¶ms->lock); - params->in_use = 0; - params->rate = -1; - return 0; -} - -static struct snd_soc_platform_driver s6000_soc_platform = { - .ops = &s6000_pcm_ops, - .pcm_new = s6000_pcm_new, - .pcm_free = s6000_pcm_free, -}; - -static int s6000_soc_platform_probe(struct platform_device *pdev) -{ - return snd_soc_register_platform(&pdev->dev, &s6000_soc_platform); -} - -static int s6000_soc_platform_remove(struct platform_device *pdev) -{ - snd_soc_unregister_platform(&pdev->dev); - return 0; -} - -static struct platform_driver s6000_pcm_driver = { - .driver = { - .name = "s6000-pcm-audio", - .owner = THIS_MODULE, - }, - - .probe = s6000_soc_platform_probe, - .remove = s6000_soc_platform_remove, -}; - -module_platform_driver(s6000_pcm_driver); - -MODULE_AUTHOR("Daniel Gloeckner"); -MODULE_DESCRIPTION("Stretch s6000 family PCM DMA module"); -MODULE_LICENSE("GPL"); diff --git a/sound/soc/s6000/s6000-pcm.h b/sound/soc/s6000/s6000-pcm.h deleted file mode 100644 index 09d9b883e58..00000000000 --- a/sound/soc/s6000/s6000-pcm.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * ALSA PCM interface for the Stretch s6000 family - * - * Author: Daniel Gloeckner, <dg@emlix.com> - * Copyright: (C) 2009 emlix GmbH <info@emlix.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _S6000_PCM_H -#define _S6000_PCM_H - -struct snd_soc_dai; -struct snd_pcm_substream; - -struct s6000_pcm_dma_params { - unsigned int (*check_xrun)(struct snd_soc_dai *cpu_dai); - int (*trigger)(struct snd_pcm_substream *substream, int cmd, int after); - dma_addr_t sif_in; - dma_addr_t sif_out; - u32 dma_in; - u32 dma_out; - int irq; - int same_rate; - - spinlock_t lock; - int in_use; - int rate; -}; - -#endif diff --git a/sound/soc/s6000/s6105-ipcam.c b/sound/soc/s6000/s6105-ipcam.c deleted file mode 100644 index 3510c01f8a6..00000000000 --- a/sound/soc/s6000/s6105-ipcam.c +++ /dev/null @@ -1,221 +0,0 @@ -/* - * ASoC driver for Stretch s6105 IP camera platform - * - * Author: Daniel Gloeckner, <dg@emlix.com> - * Copyright: (C) 2009 emlix GmbH <info@emlix.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/timer.h> -#include <linux/interrupt.h> -#include <linux/platform_device.h> -#include <linux/i2c.h> -#include <sound/core.h> -#include <sound/pcm.h> -#include <sound/soc.h> - -#include "s6000-pcm.h" -#include "s6000-i2s.h" - -#define S6105_CAM_CODEC_CLOCK 12288000 - -static int s6105_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params) -{ - struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_dai *codec_dai = rtd->codec_dai; - struct snd_soc_dai *cpu_dai = rtd->cpu_dai; - int ret = 0; - - /* set codec DAI configuration */ - ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | - SND_SOC_DAIFMT_CBM_CFM); - if (ret < 0) - return ret; - - /* set cpu DAI configuration */ - ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBM_CFM | - SND_SOC_DAIFMT_NB_NF); - if (ret < 0) - return ret; - - /* set the codec system clock */ - ret = snd_soc_dai_set_sysclk(codec_dai, 0, S6105_CAM_CODEC_CLOCK, - SND_SOC_CLOCK_OUT); - if (ret < 0) - return ret; - - return 0; -} - -static struct snd_soc_ops s6105_ops = { - .hw_params = s6105_hw_params, -}; - -/* s6105 machine dapm widgets */ -static const struct snd_soc_dapm_widget aic3x_dapm_widgets[] = { - SND_SOC_DAPM_LINE("Audio Out Differential", NULL), - SND_SOC_DAPM_LINE("Audio Out Stereo", NULL), - SND_SOC_DAPM_LINE("Audio In", NULL), -}; - -/* s6105 machine audio_mapnections to the codec pins */ -static const struct snd_soc_dapm_route audio_map[] = { - /* Audio Out connected to HPLOUT, HPLCOM, HPROUT */ - {"Audio Out Differential", NULL, "HPLOUT"}, - {"Audio Out Differential", NULL, "HPLCOM"}, - {"Audio Out Stereo", NULL, "HPLOUT"}, - {"Audio Out Stereo", NULL, "HPROUT"}, - - /* Audio In connected to LINE1L, LINE1R */ - {"LINE1L", NULL, "Audio In"}, - {"LINE1R", NULL, "Audio In"}, -}; - -static int output_type_info(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_info *uinfo) -{ - uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - uinfo->count = 1; - uinfo->value.enumerated.items = 2; - if (uinfo->value.enumerated.item) { - uinfo->value.enumerated.item = 1; - strcpy(uinfo->value.enumerated.name, "HPLOUT/HPROUT"); - } else { - strcpy(uinfo->value.enumerated.name, "HPLOUT/HPLCOM"); - } - return 0; -} - -static int output_type_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - ucontrol->value.enumerated.item[0] = kcontrol->private_value; - return 0; -} - -static int output_type_put(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_card *card = kcontrol->private_data; - struct snd_soc_dapm_context *dapm = &card->dapm; - unsigned int val = (ucontrol->value.enumerated.item[0] != 0); - char *differential = "Audio Out Differential"; - char *stereo = "Audio Out Stereo"; - - if (kcontrol->private_value == val) - return 0; - kcontrol->private_value = val; - snd_soc_dapm_disable_pin(dapm, val ? differential : stereo); - snd_soc_dapm_sync(dapm); - snd_soc_dapm_enable_pin(dapm, val ? stereo : differential); - snd_soc_dapm_sync(dapm); - - return 1; -} - -static const struct snd_kcontrol_new audio_out_mux = { - .iface = SNDRV_CTL_ELEM_IFACE_MIXER, - .name = "Master Output Mux", - .index = 0, - .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, - .info = output_type_info, - .get = output_type_get, - .put = output_type_put, - .private_value = 1 /* default to stereo */ -}; - -/* Logic for a aic3x as connected on the s6105 ip camera ref design */ -static int s6105_aic3x_init(struct snd_soc_pcm_runtime *rtd) -{ - struct snd_soc_card *card = rtd->card; - - /* must correspond to audio_out_mux.private_value initializer */ - snd_soc_dapm_disable_pin(&card->dapm, "Audio Out Differential"); - - snd_ctl_add(card->snd_card, snd_ctl_new1(&audio_out_mux, card)); - - return 0; -} - -/* s6105 digital audio interface glue - connects codec <--> CPU */ -static struct snd_soc_dai_link s6105_dai = { - .name = "TLV320AIC31", - .stream_name = "AIC31", - .cpu_dai_name = "s6000-i2s", - .codec_dai_name = "tlv320aic3x-hifi", - .platform_name = "s6000-pcm-audio", - .codec_name = "tlv320aic3x-codec.0-001a", - .init = s6105_aic3x_init, - .ops = &s6105_ops, -}; - -/* s6105 audio machine driver */ -static struct snd_soc_card snd_soc_card_s6105 = { - .name = "Stretch IP Camera", - .owner = THIS_MODULE, - .dai_link = &s6105_dai, - .num_links = 1, - - .dapm_widgets = aic3x_dapm_widgets, - .num_dapm_widgets = ARRAY_SIZE(aic3x_dapm_widgets), - .dapm_routes = audio_map, - .num_dapm_routes = ARRAY_SIZE(audio_map), - .fully_routed = true, -}; - -static struct s6000_snd_platform_data s6105_snd_data __initdata = { - .wide = 0, - .channel_in = 0, - .channel_out = 1, - .lines_in = 1, - .lines_out = 1, - .same_rate = 1, -}; - -static struct platform_device *s6105_snd_device; - -/* temporary i2c device creation until this can be moved into the machine - * support file. -*/ -static struct i2c_board_info i2c_device[] = { - { I2C_BOARD_INFO("tlv320aic33", 0x18), } -}; - -static int __init s6105_init(void) -{ - int ret; - - i2c_register_board_info(0, i2c_device, ARRAY_SIZE(i2c_device)); - - s6105_snd_device = platform_device_alloc("soc-audio", -1); - if (!s6105_snd_device) - return -ENOMEM; - - platform_set_drvdata(s6105_snd_device, &snd_soc_card_s6105); - platform_device_add_data(s6105_snd_device, &s6105_snd_data, - sizeof(s6105_snd_data)); - - ret = platform_device_add(s6105_snd_device); - if (ret) - platform_device_put(s6105_snd_device); - - return ret; -} - -static void __exit s6105_exit(void) -{ - platform_device_unregister(s6105_snd_device); -} - -module_init(s6105_init); -module_exit(s6105_exit); - -MODULE_AUTHOR("Daniel Gloeckner"); -MODULE_DESCRIPTION("Stretch s6105 IP camera ASoC driver"); -MODULE_LICENSE("GPL"); diff --git a/sound/soc/samsung/snow.c b/sound/soc/samsung/snow.c index 0acf5d0eed5..72118a77dd5 100644 --- a/sound/soc/samsung/snow.c +++ b/sound/soc/samsung/snow.c @@ -110,6 +110,7 @@ static const struct of_device_id snow_of_match[] = { { .compatible = "google,snow-audio-max98095", }, {}, }; +MODULE_DEVICE_TABLE(of, snow_of_match); static struct platform_driver snow_driver = { .driver = { diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c index 66fddec9543..88e5df474cc 100644 --- a/sound/soc/sh/fsi.c +++ b/sound/soc/sh/fsi.c @@ -1711,8 +1711,7 @@ static const struct snd_soc_dai_ops fsi_dai_ops = { static struct snd_pcm_hardware fsi_pcm_hardware = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | - SNDRV_PCM_INFO_MMAP_VALID | - SNDRV_PCM_INFO_PAUSE, + SNDRV_PCM_INFO_MMAP_VALID, .buffer_bytes_max = 64 * 1024, .period_bytes_min = 32, .period_bytes_max = 8192, diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index 1922ec57d10..70042197f9e 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -886,8 +886,7 @@ static int rsnd_dai_probe(struct platform_device *pdev, static struct snd_pcm_hardware rsnd_pcm_hardware = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | - SNDRV_PCM_INFO_MMAP_VALID | - SNDRV_PCM_INFO_PAUSE, + SNDRV_PCM_INFO_MMAP_VALID, .buffer_bytes_max = 64 * 1024, .period_bytes_min = 32, .period_bytes_max = 8192, diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 4c8f8a23a0e..b60ff56ebc0 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -884,7 +884,7 @@ static struct snd_soc_dai *snd_soc_find_dai( list_for_each_entry(component, &component_list, list) { if (dlc->of_node && component->dev->of_node != dlc->of_node) continue; - if (dlc->name && strcmp(dev_name(component->dev), dlc->name)) + if (dlc->name && strcmp(component->name, dlc->name)) continue; list_for_each_entry(dai, &component->dai_list, list) { if (dlc->dai_name && strcmp(dai->name, dlc->dai_name)) diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 002311afdea..57277dd79e1 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1522,13 +1522,36 @@ static void dpcm_set_fe_runtime(struct snd_pcm_substream *substream) dpcm_init_runtime_hw(runtime, &cpu_dai_drv->capture); } +static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd); + +/* Set FE's runtime_update state; the state is protected via PCM stream lock + * for avoiding the race with trigger callback. + * If the state is unset and a trigger is pending while the previous operation, + * process the pending trigger action here. + */ +static void dpcm_set_fe_update_state(struct snd_soc_pcm_runtime *fe, + int stream, enum snd_soc_dpcm_update state) +{ + struct snd_pcm_substream *substream = + snd_soc_dpcm_get_substream(fe, stream); + + snd_pcm_stream_lock_irq(substream); + if (state == SND_SOC_DPCM_UPDATE_NO && fe->dpcm[stream].trigger_pending) { + dpcm_fe_dai_do_trigger(substream, + fe->dpcm[stream].trigger_pending - 1); + fe->dpcm[stream].trigger_pending = 0; + } + fe->dpcm[stream].runtime_update = state; + snd_pcm_stream_unlock_irq(substream); +} + static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream) { struct snd_soc_pcm_runtime *fe = fe_substream->private_data; struct snd_pcm_runtime *runtime = fe_substream->runtime; int stream = fe_substream->stream, ret = 0; - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); ret = dpcm_be_dai_startup(fe, fe_substream->stream); if (ret < 0) { @@ -1550,13 +1573,13 @@ static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream) dpcm_set_fe_runtime(fe_substream); snd_pcm_limit_hw_rates(runtime); - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); return 0; unwind: dpcm_be_dai_startup_unwind(fe, fe_substream->stream); be_err: - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); return ret; } @@ -1603,7 +1626,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream) struct snd_soc_pcm_runtime *fe = substream->private_data; int stream = substream->stream; - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); /* shutdown the BEs */ dpcm_be_dai_shutdown(fe, substream->stream); @@ -1617,7 +1640,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream) dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP); fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE; - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); return 0; } @@ -1665,7 +1688,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream) int err, stream = substream->stream; mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME); - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); dev_dbg(fe->dev, "ASoC: hw_free FE %s\n", fe->dai_link->name); @@ -1680,7 +1703,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream) err = dpcm_be_dai_hw_free(fe, stream); fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE; - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); mutex_unlock(&fe->card->mutex); return 0; @@ -1773,7 +1796,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream, int ret, stream = substream->stream; mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME); - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); memcpy(&fe->dpcm[substream->stream].hw_params, params, sizeof(struct snd_pcm_hw_params)); @@ -1796,7 +1819,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream, fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS; out: - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); mutex_unlock(&fe->card->mutex); return ret; } @@ -1910,7 +1933,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, } EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger); -static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd) +static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_soc_pcm_runtime *fe = substream->private_data; int stream = substream->stream, ret; @@ -1984,6 +2007,23 @@ out: return ret; } +static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct snd_soc_pcm_runtime *fe = substream->private_data; + int stream = substream->stream; + + /* if FE's runtime_update is already set, we're in race; + * process this trigger later at exit + */ + if (fe->dpcm[stream].runtime_update != SND_SOC_DPCM_UPDATE_NO) { + fe->dpcm[stream].trigger_pending = cmd + 1; + return 0; /* delayed, assuming it's successful */ + } + + /* we're alone, let's trigger */ + return dpcm_fe_dai_do_trigger(substream, cmd); +} + int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream) { struct snd_soc_dpcm *dpcm; @@ -2027,7 +2067,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream) dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name); - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); /* there is no point preparing this FE if there are no BEs */ if (list_empty(&fe->dpcm[stream].be_clients)) { @@ -2054,7 +2094,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream) fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE; out: - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); mutex_unlock(&fe->card->mutex); return ret; @@ -2201,11 +2241,11 @@ static int dpcm_run_new_update(struct snd_soc_pcm_runtime *fe, int stream) { int ret; - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE); ret = dpcm_run_update_startup(fe, stream); if (ret < 0) dev_err(fe->dev, "ASoC: failed to startup some BEs\n"); - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); return ret; } @@ -2214,11 +2254,11 @@ static int dpcm_run_old_update(struct snd_soc_pcm_runtime *fe, int stream) { int ret; - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE); ret = dpcm_run_update_shutdown(fe, stream); if (ret < 0) dev_err(fe->dev, "ASoC: failed to shutdown some BEs\n"); - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); return ret; } diff --git a/sound/usb/card.c b/sound/usb/card.c index 7ecd0e8a5c5..f61ebb17cc6 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -591,18 +591,19 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, { struct snd_card *card; struct list_head *p; + bool was_shutdown; if (chip == (void *)-1L) return; card = chip->card; down_write(&chip->shutdown_rwsem); + was_shutdown = chip->shutdown; chip->shutdown = 1; up_write(&chip->shutdown_rwsem); mutex_lock(®ister_mutex); - chip->num_interfaces--; - if (chip->num_interfaces <= 0) { + if (!was_shutdown) { struct snd_usb_endpoint *ep; snd_card_disconnect(card); @@ -622,6 +623,10 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, list_for_each(p, &chip->mixer_list) { snd_usb_mixer_disconnect(p); } + } + + chip->num_interfaces--; + if (chip->num_interfaces <= 0) { usb_chip[chip->index] = NULL; mutex_unlock(®ister_mutex); snd_card_free_when_closed(card); diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 2e4a9dbc51f..6e354d32685 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -2033,10 +2033,11 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, cval->res = 1; cval->initialized = 1; - if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) - cval->control = UAC2_CX_CLOCK_SELECTOR; - else + if (state->mixer->protocol == UAC_VERSION_1) cval->control = 0; + else /* UAC_VERSION_2 */ + cval->control = (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) ? + UAC2_CX_CLOCK_SELECTOR : UAC2_SU_SELECTOR; namelist = kmalloc(sizeof(char *) * desc->bNrInPins, GFP_KERNEL); if (!namelist) { diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index f119a41ed9a..8c9bf4b7aaf 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -593,10 +593,10 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol, if (mixer->chip->shutdown) ret = -ENODEV; else - ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, + ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0, wIndex, - &tmp, sizeof(tmp), 1000); + &tmp, sizeof(tmp)); up_read(&mixer->chip->shutdown_rwsem); if (ret < 0) { @@ -885,6 +885,11 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl, return changed; } +static void kctl_private_value_free(struct snd_kcontrol *kctl) +{ + kfree((void *)kctl->private_value); +} + static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer, int validx, int bUnitID) { @@ -919,6 +924,7 @@ static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer, return -ENOMEM; } + kctl->private_free = kctl_private_value_free; err = snd_ctl_add(mixer->chip->card, kctl); if (err < 0) return err; diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 223c47b33ba..c657752a420 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -385,6 +385,36 @@ YAMAHA_DEVICE(0x105d, NULL), } }, { + USB_DEVICE(0x0499, 0x1509), + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { + /* .vendor_name = "Yamaha", */ + /* .product_name = "Steinberg UR22", */ + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 1, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + }, + { + .ifnum = 2, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + }, + { + .ifnum = 3, + .type = QUIRK_MIDI_YAMAHA + }, + { + .ifnum = 4, + .type = QUIRK_IGNORE_INTERFACE + }, + { + .ifnum = -1 + } + } + } +}, +{ USB_DEVICE(0x0499, 0x150a), .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { /* .vendor_name = "Yamaha", */ diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index d2aa45a8d89..60dfe0d2877 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1146,6 +1146,20 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, if ((le16_to_cpu(dev->descriptor.idVendor) == 0x23ba) && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) mdelay(20); + + /* Marantz/Denon devices with USB DAC functionality need a delay + * after each class compliant request + */ + if ((le16_to_cpu(dev->descriptor.idVendor) == 0x154e) && + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) { + + switch (le16_to_cpu(dev->descriptor.idProduct)) { + case 0x3005: /* Marantz HD-DAC1 */ + case 0x3006: /* Marantz SA-14S1 */ + mdelay(20); + break; + } + } } /* @@ -1179,12 +1193,12 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, /* iFi Audio micro/nano iDSD */ case USB_ID(0x20b1, 0x3008): if (fp->altsetting == 2) - return SNDRV_PCM_FMTBIT_DSD_U32_LE; + return SNDRV_PCM_FMTBIT_DSD_U32_BE; break; /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */ case USB_ID(0x20b1, 0x2009): if (fp->altsetting == 3) - return SNDRV_PCM_FMTBIT_DSD_U32_LE; + return SNDRV_PCM_FMTBIT_DSD_U32_BE; break; default: break; diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 8c5c11ca8c5..25114c9a680 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -1142,6 +1142,11 @@ static int data_init(int argc, const char **argv) int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused) { + int ret = hists__init(); + + if (ret < 0) + return ret; + perf_config(perf_default_config, NULL); argc = parse_options(argc, argv, options, diff_usage, 0); diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 04412b4770a..7af26acf06d 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -375,7 +375,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) OPT_CALLBACK('x', "exec", NULL, "executable|path", "target executable name or path", opt_set_target), OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle, - "Disable symbol demangling"), + "Enable symbol demangling"), OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, "Enable kernel symbol demangling"), OPT_END() diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h index 937e4324ad9..a3b13d7dc1d 100644 --- a/tools/perf/perf-sys.h +++ b/tools/perf/perf-sys.h @@ -13,7 +13,7 @@ #define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") #define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#define CPUINFO_PROC "model name" +#define CPUINFO_PROC {"model name"} #ifndef __NR_perf_event_open # define __NR_perf_event_open 336 #endif @@ -30,7 +30,7 @@ #define wmb() asm volatile("sfence" ::: "memory") #define rmb() asm volatile("lfence" ::: "memory") #define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#define CPUINFO_PROC "model name" +#define CPUINFO_PROC {"model name"} #ifndef __NR_perf_event_open # define __NR_perf_event_open 298 #endif @@ -47,14 +47,14 @@ #define mb() asm volatile ("sync" ::: "memory") #define wmb() asm volatile ("sync" ::: "memory") #define rmb() asm volatile ("sync" ::: "memory") -#define CPUINFO_PROC "cpu" +#define CPUINFO_PROC {"cpu"} #endif #ifdef __s390__ #define mb() asm volatile("bcr 15,0" ::: "memory") #define wmb() asm volatile("bcr 15,0" ::: "memory") #define rmb() asm volatile("bcr 15,0" ::: "memory") -#define CPUINFO_PROC "vendor_id" +#define CPUINFO_PROC {"vendor_id"} #endif #ifdef __sh__ @@ -67,14 +67,14 @@ # define wmb() asm volatile("" ::: "memory") # define rmb() asm volatile("" ::: "memory") #endif -#define CPUINFO_PROC "cpu type" +#define CPUINFO_PROC {"cpu type"} #endif #ifdef __hppa__ #define mb() asm volatile("" ::: "memory") #define wmb() asm volatile("" ::: "memory") #define rmb() asm volatile("" ::: "memory") -#define CPUINFO_PROC "cpu" +#define CPUINFO_PROC {"cpu"} #endif #ifdef __sparc__ @@ -87,14 +87,14 @@ #endif #define wmb() asm volatile("":::"memory") #define rmb() asm volatile("":::"memory") -#define CPUINFO_PROC "cpu" +#define CPUINFO_PROC {"cpu"} #endif #ifdef __alpha__ #define mb() asm volatile("mb" ::: "memory") #define wmb() asm volatile("wmb" ::: "memory") #define rmb() asm volatile("mb" ::: "memory") -#define CPUINFO_PROC "cpu model" +#define CPUINFO_PROC {"cpu model"} #endif #ifdef __ia64__ @@ -102,7 +102,7 @@ #define wmb() asm volatile ("mf" ::: "memory") #define rmb() asm volatile ("mf" ::: "memory") #define cpu_relax() asm volatile ("hint @pause" ::: "memory") -#define CPUINFO_PROC "model name" +#define CPUINFO_PROC {"model name"} #endif #ifdef __arm__ @@ -113,7 +113,7 @@ #define mb() ((void(*)(void))0xffff0fa0)() #define wmb() ((void(*)(void))0xffff0fa0)() #define rmb() ((void(*)(void))0xffff0fa0)() -#define CPUINFO_PROC "Processor" +#define CPUINFO_PROC {"model name", "Processor"} #endif #ifdef __aarch64__ @@ -133,28 +133,28 @@ : "memory") #define wmb() mb() #define rmb() mb() -#define CPUINFO_PROC "cpu model" +#define CPUINFO_PROC {"cpu model"} #endif #ifdef __arc__ #define mb() asm volatile("" ::: "memory") #define wmb() asm volatile("" ::: "memory") #define rmb() asm volatile("" ::: "memory") -#define CPUINFO_PROC "Processor" +#define CPUINFO_PROC {"Processor"} #endif #ifdef __metag__ #define mb() asm volatile("" ::: "memory") #define wmb() asm volatile("" ::: "memory") #define rmb() asm volatile("" ::: "memory") -#define CPUINFO_PROC "CPU" +#define CPUINFO_PROC {"CPU"} #endif #ifdef __xtensa__ #define mb() asm volatile("memw" ::: "memory") #define wmb() asm volatile("memw" ::: "memory") #define rmb() asm volatile("" ::: "memory") -#define CPUINFO_PROC "core ID" +#define CPUINFO_PROC {"core ID"} #endif #ifdef __tile__ @@ -162,7 +162,7 @@ #define wmb() asm volatile ("mf" ::: "memory") #define rmb() asm volatile ("mf" ::: "memory") #define cpu_relax() asm volatile ("mfspr zero, PASS" ::: "memory") -#define CPUINFO_PROC "model name" +#define CPUINFO_PROC {"model name"} #endif #define barrier() asm volatile ("" ::: "memory") diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index ce0de00399d..26f5b2fe5dc 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -579,16 +579,12 @@ static int write_version(int fd, struct perf_header *h __maybe_unused, return do_write_string(fd, perf_version_string); } -static int write_cpudesc(int fd, struct perf_header *h __maybe_unused, - struct perf_evlist *evlist __maybe_unused) +static int __write_cpudesc(int fd, const char *cpuinfo_proc) { -#ifndef CPUINFO_PROC -#define CPUINFO_PROC NULL -#endif FILE *file; char *buf = NULL; char *s, *p; - const char *search = CPUINFO_PROC; + const char *search = cpuinfo_proc; size_t len = 0; int ret = -1; @@ -638,6 +634,25 @@ done: return ret; } +static int write_cpudesc(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) +{ +#ifndef CPUINFO_PROC +#define CPUINFO_PROC {"model name", } +#endif + const char *cpuinfo_procs[] = CPUINFO_PROC; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) { + int ret; + ret = __write_cpudesc(fd, cpuinfo_procs[i]); + if (ret >= 0) + return ret; + } + return -1; +} + + static int write_nrcpus(int fd, struct perf_header *h __maybe_unused, struct perf_evlist *evlist __maybe_unused) { diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 4906cd81cb5..9402885a77f 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -373,6 +373,9 @@ struct sort_entry sort_cpu = { static int64_t sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) { + if (!left->branch_info || !right->branch_info) + return cmp_null(left->branch_info, right->branch_info); + return _sort__dso_cmp(left->branch_info->from.map, right->branch_info->from.map); } @@ -380,13 +383,19 @@ sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return _hist_entry__dso_snprintf(he->branch_info->from.map, - bf, size, width); + if (he->branch_info) + return _hist_entry__dso_snprintf(he->branch_info->from.map, + bf, size, width); + else + return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); } static int64_t sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) { + if (!left->branch_info || !right->branch_info) + return cmp_null(left->branch_info, right->branch_info); + return _sort__dso_cmp(left->branch_info->to.map, right->branch_info->to.map); } @@ -394,8 +403,11 @@ sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return _hist_entry__dso_snprintf(he->branch_info->to.map, - bf, size, width); + if (he->branch_info) + return _hist_entry__dso_snprintf(he->branch_info->to.map, + bf, size, width); + else + return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); } static int64_t @@ -404,6 +416,12 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) struct addr_map_symbol *from_l = &left->branch_info->from; struct addr_map_symbol *from_r = &right->branch_info->from; + if (!left->branch_info || !right->branch_info) + return cmp_null(left->branch_info, right->branch_info); + + from_l = &left->branch_info->from; + from_r = &right->branch_info->from; + if (!from_l->sym && !from_r->sym) return _sort__addr_cmp(from_l->addr, from_r->addr); @@ -413,8 +431,13 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) static int64_t sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) { - struct addr_map_symbol *to_l = &left->branch_info->to; - struct addr_map_symbol *to_r = &right->branch_info->to; + struct addr_map_symbol *to_l, *to_r; + + if (!left->branch_info || !right->branch_info) + return cmp_null(left->branch_info, right->branch_info); + + to_l = &left->branch_info->to; + to_r = &right->branch_info->to; if (!to_l->sym && !to_r->sym) return _sort__addr_cmp(to_l->addr, to_r->addr); @@ -425,19 +448,27 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - struct addr_map_symbol *from = &he->branch_info->from; - return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, - he->level, bf, size, width); + if (he->branch_info) { + struct addr_map_symbol *from = &he->branch_info->from; + return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, + he->level, bf, size, width); + } + + return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); } static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - struct addr_map_symbol *to = &he->branch_info->to; - return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, - he->level, bf, size, width); + if (he->branch_info) { + struct addr_map_symbol *to = &he->branch_info->to; + return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, + he->level, bf, size, width); + } + + return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); } struct sort_entry sort_dso_from = { @@ -471,11 +502,13 @@ struct sort_entry sort_sym_to = { static int64_t sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) { - const unsigned char mp = left->branch_info->flags.mispred != - right->branch_info->flags.mispred; - const unsigned char p = left->branch_info->flags.predicted != - right->branch_info->flags.predicted; + unsigned char mp, p; + + if (!left->branch_info || !right->branch_info) + return cmp_null(left->branch_info, right->branch_info); + mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; + p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; return mp || p; } @@ -483,10 +516,12 @@ static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width){ static const char *out = "N/A"; - if (he->branch_info->flags.predicted) - out = "N"; - else if (he->branch_info->flags.mispred) - out = "Y"; + if (he->branch_info) { + if (he->branch_info->flags.predicted) + out = "N"; + else if (he->branch_info->flags.mispred) + out = "Y"; + } return repsep_snprintf(bf, size, "%-*.*s", width, width, out); } @@ -989,6 +1024,9 @@ struct sort_entry sort_mem_dcacheline = { static int64_t sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) { + if (!left->branch_info || !right->branch_info) + return cmp_null(left->branch_info, right->branch_info); + return left->branch_info->flags.abort != right->branch_info->flags.abort; } @@ -996,10 +1034,15 @@ sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - static const char *out = "."; + static const char *out = "N/A"; + + if (he->branch_info) { + if (he->branch_info->flags.abort) + out = "A"; + else + out = "."; + } - if (he->branch_info->flags.abort) - out = "A"; return repsep_snprintf(bf, size, "%-*s", width, out); } @@ -1013,6 +1056,9 @@ struct sort_entry sort_abort = { static int64_t sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) { + if (!left->branch_info || !right->branch_info) + return cmp_null(left->branch_info, right->branch_info); + return left->branch_info->flags.in_tx != right->branch_info->flags.in_tx; } @@ -1020,10 +1066,14 @@ sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - static const char *out = "."; + static const char *out = "N/A"; - if (he->branch_info->flags.in_tx) - out = "T"; + if (he->branch_info) { + if (he->branch_info->flags.in_tx) + out = "T"; + else + out = "."; + } return repsep_snprintf(bf, size, "%-*s", width, out); } diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 2b7b2d91c01..c41411726c7 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -117,6 +117,9 @@ int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp, if (!new) return -ENOMEM; list_add(&new->list, &thread->comm_list); + + if (exec) + unwind__flush_access(thread); } thread->comm_set = true; diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c index e060386165c..4d45c0dfe34 100644 --- a/tools/perf/util/unwind-libunwind.c +++ b/tools/perf/util/unwind-libunwind.c @@ -539,11 +539,23 @@ int unwind__prepare_access(struct thread *thread) return -ENOMEM; } + unw_set_caching_policy(addr_space, UNW_CACHE_GLOBAL); thread__set_priv(thread, addr_space); return 0; } +void unwind__flush_access(struct thread *thread) +{ + unw_addr_space_t addr_space; + + if (callchain_param.record_mode != CALLCHAIN_DWARF) + return; + + addr_space = thread__priv(thread); + unw_flush_cache(addr_space, 0, 0); +} + void unwind__finish_access(struct thread *thread) { unw_addr_space_t addr_space; diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h index c17c4855bdb..f50b737235e 100644 --- a/tools/perf/util/unwind.h +++ b/tools/perf/util/unwind.h @@ -23,6 +23,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg, #ifdef HAVE_LIBUNWIND_SUPPORT int libunwind__arch_reg_id(int regnum); int unwind__prepare_access(struct thread *thread); +void unwind__flush_access(struct thread *thread); void unwind__finish_access(struct thread *thread); #else static inline int unwind__prepare_access(struct thread *thread __maybe_unused) @@ -30,6 +31,7 @@ static inline int unwind__prepare_access(struct thread *thread __maybe_unused) return 0; } +static inline void unwind__flush_access(struct thread *thread __maybe_unused) {} static inline void unwind__finish_access(struct thread *thread __maybe_unused) {} #endif #else @@ -49,6 +51,7 @@ static inline int unwind__prepare_access(struct thread *thread __maybe_unused) return 0; } +static inline void unwind__flush_access(struct thread *thread __maybe_unused) {} static inline void unwind__finish_access(struct thread *thread __maybe_unused) {} #endif /* HAVE_DWARF_UNWIND_SUPPORT */ #endif /* __UNWIND_H */ diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c index 60b58cd1841..7ccb073f831 100644 --- a/tools/power/acpi/os_specific/service_layers/osunixxf.c +++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c @@ -122,6 +122,14 @@ static void os_enter_line_edit_mode(void) { struct termios local_term_attributes; + term_attributes_were_set = 0; + + /* STDIN must be a terminal */ + + if (!isatty(STDIN_FILENO)) { + return; + } + /* Get and keep the original attributes */ if (tcgetattr(STDIN_FILENO, &original_term_attributes)) { diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c index 53cee781e24..24d32968802 100644 --- a/tools/power/acpi/tools/acpidump/apdump.c +++ b/tools/power/acpi/tools/acpidump/apdump.c @@ -146,7 +146,7 @@ u32 ap_get_table_length(struct acpi_table_header *table) if (ACPI_VALIDATE_RSDP_SIG(table->signature)) { rsdp = ACPI_CAST_PTR(struct acpi_table_rsdp, table); - return (rsdp->length); + return (acpi_tb_get_rsdp_length(rsdp)); } /* Normal ACPI table */ diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest index a8f81c78285..515247601df 100755 --- a/tools/testing/selftests/ftrace/ftracetest +++ b/tools/testing/selftests/ftrace/ftracetest @@ -82,7 +82,7 @@ parse_opts() { # opts } # Parameters -DEBUGFS_DIR=`grep debugfs /proc/mounts | cut -f2 -d' '` +DEBUGFS_DIR=`grep debugfs /proc/mounts | cut -f2 -d' ' | head -1` TRACING_DIR=$DEBUGFS_DIR/tracing TOP_DIR=`absdir $0` TEST_DIR=$TOP_DIR/test.d diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c index 57b9c2b7c4f..6f6733331d9 100644 --- a/tools/testing/selftests/net/psock_fanout.c +++ b/tools/testing/selftests/net/psock_fanout.c @@ -128,7 +128,7 @@ static int sock_fanout_read_ring(int fd, void *ring) struct tpacket2_hdr *header = ring; int count = 0; - while (header->tp_status & TP_STATUS_USER && count < RING_NUM_FRAMES) { + while (count < RING_NUM_FRAMES && header->tp_status & TP_STATUS_USER) { count++; header = ring + (count * getpagesize()); } diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 3aaca49de32..aacdb59f30d 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -1933,7 +1933,7 @@ out: int kvm_vgic_create(struct kvm *kvm) { - int i, vcpu_lock_idx = -1, ret = 0; + int i, vcpu_lock_idx = -1, ret; struct kvm_vcpu *vcpu; mutex_lock(&kvm->lock); @@ -1948,6 +1948,7 @@ int kvm_vgic_create(struct kvm *kvm) * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure * that no other VCPUs are run while we create the vgic. */ + ret = -EBUSY; kvm_for_each_vcpu(i, vcpu, kvm) { if (!mutex_trylock(&vcpu->mutex)) goto out_unlock; @@ -1955,11 +1956,10 @@ int kvm_vgic_create(struct kvm *kvm) } kvm_for_each_vcpu(i, vcpu, kvm) { - if (vcpu->arch.has_run_once) { - ret = -EBUSY; + if (vcpu->arch.has_run_once) goto out_unlock; - } } + ret = 0; spin_lock_init(&kvm->arch.vgic.lock); kvm->arch.vgic.in_kernel = true; diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index e51d9f9b995..c1e6ae989a4 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages); static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, - unsigned long size) + unsigned long npages) { gfn_t end_gfn; pfn_t pfn; pfn = gfn_to_pfn_memslot(slot, gfn); - end_gfn = gfn + (size >> PAGE_SHIFT); + end_gfn = gfn + npages; gfn += 1; if (is_error_noslot_pfn(pfn)) @@ -119,7 +119,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) * Pin all pages we are about to map in memory. This is * important because we unmap and unpin in 4kb steps later. */ - pfn = kvm_pin_pages(slot, gfn, page_size); + pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT); if (is_error_noslot_pfn(pfn)) { gfn += 1; continue; @@ -131,7 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) if (r) { printk(KERN_ERR "kvm_iommu_map_address:" "iommu failed to map pfn=%llx\n", pfn); - kvm_unpin_pages(kvm, pfn, page_size); + kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT); goto unmap_pages; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 384eaa7b02f..3cee7b16705 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting); static bool largepages_enabled = true; -bool kvm_is_mmio_pfn(pfn_t pfn) +bool kvm_is_reserved_pfn(pfn_t pfn) { if (pfn_valid(pfn)) - return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); + return PageReserved(pfn_to_page(pfn)); return true; } @@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, else if ((vma->vm_flags & VM_PFNMAP)) { pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - BUG_ON(!kvm_is_mmio_pfn(pfn)); + BUG_ON(!kvm_is_reserved_pfn(pfn)); } else { if (async && vma_is_valid(vma, write_fault)) *async = true; @@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn) if (is_error_noslot_pfn(pfn)) return KVM_ERR_PTR_BAD_PAGE; - if (kvm_is_mmio_pfn(pfn)) { + if (kvm_is_reserved_pfn(pfn)) { WARN_ON(1); return KVM_ERR_PTR_BAD_PAGE; } @@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean); void kvm_release_pfn_clean(pfn_t pfn) { - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) put_page(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); @@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn) void kvm_set_pfn_dirty(pfn_t pfn) { - if (!kvm_is_mmio_pfn(pfn)) { + if (!kvm_is_reserved_pfn(pfn)) { struct page *page = pfn_to_page(pfn); if (!PageReserved(page)) SetPageDirty(page); @@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); void kvm_set_pfn_accessed(pfn_t pfn) { - if (!kvm_is_mmio_pfn(pfn)) + if (!kvm_is_reserved_pfn(pfn)) mark_page_accessed(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); void kvm_get_pfn(pfn_t pfn) { - if (!kvm_is_mmio_pfn(pfn)) + if (!kvm_is_reserved_pfn(pfn)) get_page(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_get_pfn); @@ -2354,6 +2354,12 @@ int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type) return 0; } +void kvm_unregister_device_ops(u32 type) +{ + if (kvm_device_ops_table[type] != NULL) + kvm_device_ops_table[type] = NULL; +} + static int kvm_ioctl_create_device(struct kvm *kvm, struct kvm_create_device *cd) { @@ -3328,5 +3334,6 @@ void kvm_exit(void) kvm_arch_exit(); kvm_irqfd_exit(); free_cpumask_var(cpus_hardware_enabled); + kvm_vfio_ops_exit(); } EXPORT_SYMBOL_GPL(kvm_exit); diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c index 281e7cf2b8e..620e37f741b 100644 --- a/virt/kvm/vfio.c +++ b/virt/kvm/vfio.c @@ -283,3 +283,8 @@ int kvm_vfio_ops_init(void) { return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO); } + +void kvm_vfio_ops_exit(void) +{ + kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO); +} diff --git a/virt/kvm/vfio.h b/virt/kvm/vfio.h index 92eac75d6b6..ab88c7dc051 100644 --- a/virt/kvm/vfio.h +++ b/virt/kvm/vfio.h @@ -3,11 +3,15 @@ #ifdef CONFIG_KVM_VFIO int kvm_vfio_ops_init(void); +void kvm_vfio_ops_exit(void); #else static inline int kvm_vfio_ops_init(void) { return 0; } +static inline void kvm_vfio_ops_exit(void) +{ +} #endif #endif |