From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from lists.gentoo.org (pigeon.gentoo.org [208.92.234.80]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by finch.gentoo.org (Postfix) with ESMTPS id 9256C1382C5 for ; Thu, 22 Mar 2018 12:58:38 +0000 (UTC) Received: from pigeon.gentoo.org (localhost [127.0.0.1]) by pigeon.gentoo.org (Postfix) with SMTP id 7DC1CE0855; Thu, 22 Mar 2018 12:58:37 +0000 (UTC) Received: from smtp.gentoo.org (smtp.gentoo.org [140.211.166.183]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by pigeon.gentoo.org (Postfix) with ESMTPS id 130A3E0855 for ; Thu, 22 Mar 2018 12:58:35 +0000 (UTC) Received: from oystercatcher.gentoo.org (oystercatcher.gentoo.org [148.251.78.52]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by smtp.gentoo.org (Postfix) with ESMTPS id 7E7C2335C36 for ; Thu, 22 Mar 2018 12:58:34 +0000 (UTC) Received: from localhost.localdomain (localhost [IPv6:::1]) by oystercatcher.gentoo.org (Postfix) with ESMTP id B23F524C for ; Thu, 22 Mar 2018 12:58:32 +0000 (UTC) From: "Mike Pagano" To: gentoo-commits@lists.gentoo.org Content-Transfer-Encoding: 8bit Content-type: text/plain; charset=UTF-8 Reply-To: gentoo-dev@lists.gentoo.org, "Mike Pagano" Message-ID: <1521723503.142a01c7a24d5d52764bfa9750c4adaa5454ea7f.mpagano@gentoo> Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: / X-VCS-Repository: proj/linux-patches X-VCS-Files: 0000_README 1088_linux-4.9.89.patch X-VCS-Directories: / X-VCS-Committer: mpagano X-VCS-Committer-Name: Mike Pagano X-VCS-Revision: 142a01c7a24d5d52764bfa9750c4adaa5454ea7f X-VCS-Branch: 4.9 Date: Thu, 22 Mar 2018 12:58:32 +0000 (UTC) Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-Id: Gentoo Linux mail X-BeenThere: gentoo-commits@lists.gentoo.org X-Archives-Salt: 0318d32d-3f0d-441a-9783-d5edc1c7f9e4 X-Archives-Hash: d6ea84176cfd2f92820d05798afae553 commit: 142a01c7a24d5d52764bfa9750c4adaa5454ea7f Author: Mike Pagano gentoo org> AuthorDate: Thu Mar 22 12:58:23 2018 +0000 Commit: Mike Pagano gentoo org> CommitDate: Thu Mar 22 12:58:23 2018 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=142a01c7 Linux patch 4.9.89 0000_README | 4 + 1088_linux-4.9.89.patch | 6967 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 6971 insertions(+) diff --git a/0000_README b/0000_README index 6fb8490..bb64a79 100644 --- a/0000_README +++ b/0000_README @@ -395,6 +395,10 @@ Patch: 1087_linux-4.9.88.patch From: http://www.kernel.org Desc: Linux 4.9.88 +Patch: 1088_linux-4.9.89.patch +From: http://www.kernel.org +Desc: Linux 4.9.89 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1088_linux-4.9.89.patch b/1088_linux-4.9.89.patch new file mode 100644 index 0000000..73289f2 --- /dev/null +++ b/1088_linux-4.9.89.patch @@ -0,0 +1,6967 @@ +diff --git a/Makefile b/Makefile +index 1512ebceffda..16dca98900e7 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 88 ++SUBLEVEL = 89 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts +index 30e2f8770aaf..42b62f54e4b7 100644 +--- a/arch/arm/boot/dts/am335x-pepper.dts ++++ b/arch/arm/boot/dts/am335x-pepper.dts +@@ -139,7 +139,7 @@ + &audio_codec { + status = "okay"; + +- gpio-reset = <&gpio1 16 GPIO_ACTIVE_LOW>; ++ reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>; + AVDD-supply = <&ldo3_reg>; + IOVDD-supply = <&ldo3_reg>; + DRVDD-supply = <&ldo3_reg>; +diff --git a/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi b/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi +index 12c981e51134..9a0599f711ff 100644 +--- a/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi ++++ b/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi +@@ -1,6 +1,6 @@ + / { + aliases { +- ethernet = ðernet; ++ ethernet0 = ðernet; + }; + }; + +diff --git a/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi b/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi +index 3f0a56ebcf1f..dc7ae776db5f 100644 +--- a/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi ++++ b/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi +@@ -1,6 +1,6 @@ + / { + aliases { +- ethernet = ðernet; ++ ethernet0 = ðernet; + }; + }; + +diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts +index 41ecd6d465a7..75a60633efff 100644 +--- a/arch/arm/boot/dts/exynos4412-trats2.dts ++++ b/arch/arm/boot/dts/exynos4412-trats2.dts +@@ -408,7 +408,7 @@ + reg = <0>; + vdd3-supply = <&lcd_vdd3_reg>; + vci-supply = <&ldo25_reg>; +- reset-gpios = <&gpy4 5 GPIO_ACTIVE_HIGH>; ++ reset-gpios = <&gpf2 1 GPIO_ACTIVE_HIGH>; + power-on-delay= <50>; + reset-delay = <100>; + init-delay = <100>; +diff --git a/arch/arm/boot/dts/moxart-uc7112lx.dts b/arch/arm/boot/dts/moxart-uc7112lx.dts +index 10d088df0c35..4a962a26482d 100644 +--- a/arch/arm/boot/dts/moxart-uc7112lx.dts ++++ b/arch/arm/boot/dts/moxart-uc7112lx.dts +@@ -6,7 +6,7 @@ + */ + + /dts-v1/; +-/include/ "moxart.dtsi" ++#include "moxart.dtsi" + + / { + model = "MOXA UC-7112-LX"; +diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi +index 1fd27ed65a01..64f2f44235d0 100644 +--- a/arch/arm/boot/dts/moxart.dtsi ++++ b/arch/arm/boot/dts/moxart.dtsi +@@ -6,6 +6,7 @@ + */ + + /include/ "skeleton.dtsi" ++#include + + / { + compatible = "moxa,moxart"; +@@ -36,8 +37,8 @@ + ranges; + + intc: interrupt-controller@98800000 { +- compatible = "moxa,moxart-ic"; +- reg = <0x98800000 0x38>; ++ compatible = "moxa,moxart-ic", "faraday,ftintc010"; ++ reg = <0x98800000 0x100>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-mask = <0x00080000>; +@@ -59,7 +60,7 @@ + timer: timer@98400000 { + compatible = "moxa,moxart-timer"; + reg = <0x98400000 0x42>; +- interrupts = <19 1>; ++ interrupts = <19 IRQ_TYPE_EDGE_FALLING>; + clocks = <&clk_apb>; + }; + +@@ -80,7 +81,7 @@ + dma: dma@90500000 { + compatible = "moxa,moxart-dma"; + reg = <0x90500080 0x40>; +- interrupts = <24 0>; ++ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; + #dma-cells = <1>; + }; + +@@ -93,7 +94,7 @@ + sdhci: sdhci@98e00000 { + compatible = "moxa,moxart-sdhci"; + reg = <0x98e00000 0x5C>; +- interrupts = <5 0>; ++ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk_apb>; + dmas = <&dma 5>, + <&dma 5>; +@@ -120,7 +121,7 @@ + mac0: mac@90900000 { + compatible = "moxa,moxart-mac"; + reg = <0x90900000 0x90>; +- interrupts = <25 0>; ++ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; + phy-handle = <ðphy0>; + phy-mode = "mii"; + status = "disabled"; +@@ -129,7 +130,7 @@ + mac1: mac@92000000 { + compatible = "moxa,moxart-mac"; + reg = <0x92000000 0x90>; +- interrupts = <27 0>; ++ interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; + phy-handle = <ðphy1>; + phy-mode = "mii"; + status = "disabled"; +@@ -138,7 +139,7 @@ + uart0: uart@98200000 { + compatible = "ns16550a"; + reg = <0x98200000 0x20>; +- interrupts = <31 8>; ++ interrupts = <31 IRQ_TYPE_LEVEL_HIGH>; + reg-shift = <2>; + reg-io-width = <4>; + clock-frequency = <14745600>; +diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts +index 4d448f145ed1..6003b29c0fc0 100644 +--- a/arch/arm/boot/dts/omap3-n900.dts ++++ b/arch/arm/boot/dts/omap3-n900.dts +@@ -510,7 +510,7 @@ + tlv320aic3x: tlv320aic3x@18 { + compatible = "ti,tlv320aic3x"; + reg = <0x18>; +- gpio-reset = <&gpio2 28 GPIO_ACTIVE_HIGH>; /* 60 */ ++ reset-gpios = <&gpio2 28 GPIO_ACTIVE_LOW>; /* 60 */ + ai3x-gpio-func = < + 0 /* AIC3X_GPIO1_FUNC_DISABLED */ + 5 /* AIC3X_GPIO2_FUNC_DIGITAL_MIC_INPUT */ +@@ -527,7 +527,7 @@ + tlv320aic3x_aux: tlv320aic3x@19 { + compatible = "ti,tlv320aic3x"; + reg = <0x19>; +- gpio-reset = <&gpio2 28 GPIO_ACTIVE_HIGH>; /* 60 */ ++ reset-gpios = <&gpio2 28 GPIO_ACTIVE_LOW>; /* 60 */ + + AVDD-supply = <&vmmc2>; + DRVDD-supply = <&vmmc2>; +diff --git a/arch/arm/boot/dts/r7s72100.dtsi b/arch/arm/boot/dts/r7s72100.dtsi +index fb9ef9ca120e..959e3edf367b 100644 +--- a/arch/arm/boot/dts/r7s72100.dtsi ++++ b/arch/arm/boot/dts/r7s72100.dtsi +@@ -112,7 +112,7 @@ + #clock-cells = <1>; + compatible = "renesas,r7s72100-mstp-clocks", "renesas,cpg-mstp-clocks"; + reg = <0xfcfe0430 4>; +- clocks = <&p0_clk>; ++ clocks = <&b_clk>; + clock-indices = ; + clock-output-names = "ether"; + }; +diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi +index b6c6410ca384..262a51205aee 100644 +--- a/arch/arm/boot/dts/r8a7790.dtsi ++++ b/arch/arm/boot/dts/r8a7790.dtsi +@@ -1437,8 +1437,11 @@ + compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks"; + reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>; + clocks = <&p_clk>, +- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, +- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, ++ <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>, ++ <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>, ++ <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>, ++ <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>, ++ <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>, + <&p_clk>, + <&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>, + <&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>, +diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts +index f8a7d090fd01..12841e9bab98 100644 +--- a/arch/arm/boot/dts/r8a7791-koelsch.dts ++++ b/arch/arm/boot/dts/r8a7791-koelsch.dts +@@ -279,7 +279,7 @@ + x2_clk: x2-clock { + compatible = "fixed-clock"; + #clock-cells = <0>; +- clock-frequency = <148500000>; ++ clock-frequency = <74250000>; + }; + + x13_clk: x13-clock { +diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi +index 162b55c665a3..59405ebdce01 100644 +--- a/arch/arm/boot/dts/r8a7791.dtsi ++++ b/arch/arm/boot/dts/r8a7791.dtsi +@@ -74,9 +74,8 @@ + next-level-cache = <&L2_CA15>; + }; + +- L2_CA15: cache-controller@0 { ++ L2_CA15: cache-controller-0 { + compatible = "cache"; +- reg = <0>; + power-domains = <&sysc R8A7791_PD_CA15_SCU>; + cache-unified; + cache-level = <2>; +@@ -1438,8 +1437,11 @@ + compatible = "renesas,r8a7791-mstp-clocks", "renesas,cpg-mstp-clocks"; + reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>; + clocks = <&p_clk>, +- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, +- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, ++ <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>, ++ <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>, ++ <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>, ++ <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>, ++ <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>, + <&p_clk>, + <&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>, + <&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>, +diff --git a/arch/arm/boot/dts/r8a7792.dtsi b/arch/arm/boot/dts/r8a7792.dtsi +index 713141d38b3e..0b50c6766867 100644 +--- a/arch/arm/boot/dts/r8a7792.dtsi ++++ b/arch/arm/boot/dts/r8a7792.dtsi +@@ -58,9 +58,8 @@ + next-level-cache = <&L2_CA15>; + }; + +- L2_CA15: cache-controller@0 { ++ L2_CA15: cache-controller-0 { + compatible = "cache"; +- reg = <0>; + cache-unified; + cache-level = <2>; + power-domains = <&sysc R8A7792_PD_CA15_SCU>; +diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi +index 8d02aacf2892..e9625cb3bbaa 100644 +--- a/arch/arm/boot/dts/r8a7793.dtsi ++++ b/arch/arm/boot/dts/r8a7793.dtsi +@@ -65,9 +65,8 @@ + power-domains = <&sysc R8A7793_PD_CA15_CPU1>; + }; + +- L2_CA15: cache-controller@0 { ++ L2_CA15: cache-controller-0 { + compatible = "cache"; +- reg = <0>; + power-domains = <&sysc R8A7793_PD_CA15_SCU>; + cache-unified; + cache-level = <2>; +@@ -1235,8 +1234,11 @@ + compatible = "renesas,r8a7793-mstp-clocks", "renesas,cpg-mstp-clocks"; + reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>; + clocks = <&p_clk>, +- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, +- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, ++ <&mstp10_clks R8A7793_CLK_SSI_ALL>, <&mstp10_clks R8A7793_CLK_SSI_ALL>, ++ <&mstp10_clks R8A7793_CLK_SSI_ALL>, <&mstp10_clks R8A7793_CLK_SSI_ALL>, ++ <&mstp10_clks R8A7793_CLK_SSI_ALL>, <&mstp10_clks R8A7793_CLK_SSI_ALL>, ++ <&mstp10_clks R8A7793_CLK_SSI_ALL>, <&mstp10_clks R8A7793_CLK_SSI_ALL>, ++ <&mstp10_clks R8A7793_CLK_SSI_ALL>, <&mstp10_clks R8A7793_CLK_SSI_ALL>, + <&p_clk>, + <&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>, + <&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>, +diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts +index cf880ac06f4b..8874451fb914 100644 +--- a/arch/arm/boot/dts/r8a7794-silk.dts ++++ b/arch/arm/boot/dts/r8a7794-silk.dts +@@ -425,7 +425,7 @@ + status = "okay"; + + clocks = <&mstp7_clks R8A7794_CLK_DU0>, +- <&mstp7_clks R8A7794_CLK_DU0>, ++ <&mstp7_clks R8A7794_CLK_DU1>, + <&x2_clk>, <&x3_clk>; + clock-names = "du.0", "du.1", "dclkin.0", "dclkin.1"; + +diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi +index 7e860d3737ff..d8f4ca85ed3f 100644 +--- a/arch/arm/boot/dts/r8a7794.dtsi ++++ b/arch/arm/boot/dts/r8a7794.dtsi +@@ -56,9 +56,8 @@ + next-level-cache = <&L2_CA7>; + }; + +- L2_CA7: cache-controller@0 { ++ L2_CA7: cache-controller-0 { + compatible = "cache"; +- reg = <0>; + power-domains = <&sysc R8A7794_PD_CA7_SCU>; + cache-unified; + cache-level = <2>; +@@ -917,7 +916,7 @@ + interrupts = , + ; + clocks = <&mstp7_clks R8A7794_CLK_DU0>, +- <&mstp7_clks R8A7794_CLK_DU0>; ++ <&mstp7_clks R8A7794_CLK_DU1>; + clock-names = "du.0", "du.1"; + status = "disabled"; + +@@ -1262,19 +1261,21 @@ + clocks = <&mp_clk>, <&hp_clk>, + <&zs_clk>, <&p_clk>, <&p_clk>, <&zs_clk>, + <&zs_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, +- <&zx_clk>; ++ <&zx_clk>, <&zx_clk>; + #clock-cells = <1>; + clock-indices = < + R8A7794_CLK_EHCI R8A7794_CLK_HSUSB + R8A7794_CLK_HSCIF2 R8A7794_CLK_SCIF5 + R8A7794_CLK_SCIF4 R8A7794_CLK_HSCIF1 R8A7794_CLK_HSCIF0 + R8A7794_CLK_SCIF3 R8A7794_CLK_SCIF2 R8A7794_CLK_SCIF1 +- R8A7794_CLK_SCIF0 R8A7794_CLK_DU0 ++ R8A7794_CLK_SCIF0 ++ R8A7794_CLK_DU1 R8A7794_CLK_DU0 + >; + clock-output-names = + "ehci", "hsusb", + "hscif2", "scif5", "scif4", "hscif1", "hscif0", +- "scif3", "scif2", "scif1", "scif0", "du0"; ++ "scif3", "scif2", "scif1", "scif0", ++ "du1", "du0"; + }; + mstp8_clks: mstp8_clks@e6150990 { + compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks"; +diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig +index 79de828e49ad..e32b0550a338 100644 +--- a/arch/arm/configs/bcm2835_defconfig ++++ b/arch/arm/configs/bcm2835_defconfig +@@ -1,6 +1,5 @@ + # CONFIG_LOCALVERSION_AUTO is not set + CONFIG_SYSVIPC=y +-CONFIG_FHANDLE=y + CONFIG_NO_HZ=y + CONFIG_HIGH_RES_TIMERS=y + CONFIG_BSD_PROCESS_ACCT=y +@@ -32,6 +31,7 @@ CONFIG_PREEMPT_VOLUNTARY=y + CONFIG_AEABI=y + CONFIG_KSM=y + CONFIG_CLEANCACHE=y ++CONFIG_CMA=y + CONFIG_SECCOMP=y + CONFIG_KEXEC=y + CONFIG_CRASH_DUMP=y +@@ -52,6 +52,7 @@ CONFIG_MAC80211=y + CONFIG_DEVTMPFS=y + CONFIG_DEVTMPFS_MOUNT=y + # CONFIG_STANDALONE is not set ++CONFIG_DMA_CMA=y + CONFIG_SCSI=y + CONFIG_BLK_DEV_SD=y + CONFIG_SCSI_CONSTANTS=y +@@ -62,7 +63,6 @@ CONFIG_USB_NET_SMSC95XX=y + CONFIG_ZD1211RW=y + CONFIG_INPUT_EVDEV=y + # CONFIG_LEGACY_PTYS is not set +-# CONFIG_DEVKMEM is not set + CONFIG_SERIAL_AMBA_PL011=y + CONFIG_SERIAL_AMBA_PL011_CONSOLE=y + CONFIG_TTY_PRINTK=y +diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig +index a0e66d8200c5..403db76e3497 100644 +--- a/arch/arm/mach-bcm/Kconfig ++++ b/arch/arm/mach-bcm/Kconfig +@@ -199,6 +199,7 @@ config ARCH_BRCMSTB + select BRCMSTB_L2_IRQ + select BCM7120_L2_IRQ + select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE ++ select ZONE_DMA if ARM_LPAE + select SOC_BRCMSTB + select SOC_BUS + help +diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi +index 9217da983525..53d03cb144e4 100644 +--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi ++++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi +@@ -36,9 +36,8 @@ + enable-method = "psci"; + }; + +- L2_CA57: cache-controller@0 { ++ L2_CA57: cache-controller-0 { + compatible = "cache"; +- reg = <0>; + power-domains = <&sysc R8A7796_PD_CA57_SCU>; + cache-unified; + cache-level = <2>; +diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c +index d8227f289d7f..9a4aed652736 100644 +--- a/arch/mips/kernel/mips-r2-to-r6-emul.c ++++ b/arch/mips/kernel/mips-r2-to-r6-emul.c +@@ -1096,10 +1096,20 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) + } + break; + +- case beql_op: +- case bnel_op: + case blezl_op: + case bgtzl_op: ++ /* ++ * For BLEZL and BGTZL, rt field must be set to 0. If this ++ * is not the case, this may be an encoding of a MIPS R6 ++ * instruction, so return to CPU execution if this occurs ++ */ ++ if (MIPSInst_RT(inst)) { ++ err = SIGILL; ++ break; ++ } ++ /* fall through */ ++ case beql_op: ++ case bnel_op: + if (delay_slot(regs)) { + err = SIGILL; + break; +@@ -2329,6 +2339,8 @@ static int mipsr2_stats_clear_show(struct seq_file *s, void *unused) + __this_cpu_write((mipsr2bremustats).bgezl, 0); + __this_cpu_write((mipsr2bremustats).bltzll, 0); + __this_cpu_write((mipsr2bremustats).bgezll, 0); ++ __this_cpu_write((mipsr2bremustats).bltzall, 0); ++ __this_cpu_write((mipsr2bremustats).bgezall, 0); + __this_cpu_write((mipsr2bremustats).bltzal, 0); + __this_cpu_write((mipsr2bremustats).bgezal, 0); + __this_cpu_write((mipsr2bremustats).beql, 0); +diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c +index 49a2e2226fee..248603739198 100644 +--- a/arch/mips/net/bpf_jit.c ++++ b/arch/mips/net/bpf_jit.c +@@ -526,7 +526,8 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset) + u32 sflags, tmp_flags; + + /* Adjust the stack pointer */ +- emit_stack_offset(-align_sp(offset), ctx); ++ if (offset) ++ emit_stack_offset(-align_sp(offset), ctx); + + tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT; + /* sflags is essentially a bitmap */ +@@ -578,7 +579,8 @@ static void restore_bpf_jit_regs(struct jit_ctx *ctx, + emit_load_stack_reg(r_ra, r_sp, real_off, ctx); + + /* Restore the sp and discard the scrach memory */ +- emit_stack_offset(align_sp(offset), ctx); ++ if (offset) ++ emit_stack_offset(align_sp(offset), ctx); + } + + static unsigned int get_stack_depth(struct jit_ctx *ctx) +@@ -625,8 +627,14 @@ static void build_prologue(struct jit_ctx *ctx) + if (ctx->flags & SEEN_X) + emit_jit_reg_move(r_X, r_zero, ctx); + +- /* Do not leak kernel data to userspace */ +- if (bpf_needs_clear_a(&ctx->skf->insns[0])) ++ /* ++ * Do not leak kernel data to userspace, we only need to clear ++ * r_A if it is ever used. In fact if it is never used, we ++ * will not save/restore it, so clearing it in this case would ++ * corrupt the state of the caller. ++ */ ++ if (bpf_needs_clear_a(&ctx->skf->insns[0]) && ++ (ctx->flags & SEEN_A)) + emit_jit_reg_move(r_A, r_zero, ctx); + } + +diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S +index 5d2e0c8d29c0..88a2075305d1 100644 +--- a/arch/mips/net/bpf_jit_asm.S ++++ b/arch/mips/net/bpf_jit_asm.S +@@ -90,18 +90,14 @@ FEXPORT(sk_load_half_positive) + is_offset_in_header(2, half) + /* Offset within header boundaries */ + PTR_ADDU t1, $r_skb_data, offset +- .set reorder +- lh $r_A, 0(t1) +- .set noreorder ++ lhu $r_A, 0(t1) + #ifdef CONFIG_CPU_LITTLE_ENDIAN + # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) +- wsbh t0, $r_A +- seh $r_A, t0 ++ wsbh $r_A, $r_A + # else +- sll t0, $r_A, 24 +- andi t1, $r_A, 0xff00 +- sra t0, t0, 16 +- srl t1, t1, 8 ++ sll t0, $r_A, 8 ++ srl t1, $r_A, 8 ++ andi t0, t0, 0xff00 + or $r_A, t0, t1 + # endif + #endif +@@ -115,7 +111,7 @@ FEXPORT(sk_load_byte_positive) + is_offset_in_header(1, byte) + /* Offset within header boundaries */ + PTR_ADDU t1, $r_skb_data, offset +- lb $r_A, 0(t1) ++ lbu $r_A, 0(t1) + jr $r_ra + move $r_ret, zero + END(sk_load_byte) +@@ -139,6 +135,11 @@ FEXPORT(sk_load_byte_positive) + * (void *to) is returned in r_s0 + * + */ ++#ifdef CONFIG_CPU_LITTLE_ENDIAN ++#define DS_OFFSET(SIZE) (4 * SZREG) ++#else ++#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE)) ++#endif + #define bpf_slow_path_common(SIZE) \ + /* Quick check. Are we within reasonable boundaries? */ \ + LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \ +@@ -150,7 +151,7 @@ FEXPORT(sk_load_byte_positive) + PTR_LA t0, skb_copy_bits; \ + PTR_S $r_ra, (5 * SZREG)($r_sp); \ + /* Assign low slot to a2 */ \ +- move a2, $r_sp; \ ++ PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \ + jalr t0; \ + /* Reset our destination slot (DS but it's ok) */ \ + INT_S zero, (4 * SZREG)($r_sp); \ +diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c +index 025afe5f17a7..8a0822125f8b 100644 +--- a/arch/parisc/kernel/cache.c ++++ b/arch/parisc/kernel/cache.c +@@ -542,7 +542,8 @@ void flush_cache_mm(struct mm_struct *mm) + rp3440, etc. So, avoid it if the mm isn't too big. */ + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && + mm_total_size(mm) >= parisc_cache_flush_threshold) { +- flush_tlb_all(); ++ if (mm->context) ++ flush_tlb_all(); + flush_cache_all(); + return; + } +@@ -570,6 +571,8 @@ void flush_cache_mm(struct mm_struct *mm) + pfn = pte_pfn(*ptep); + if (!pfn_valid(pfn)) + continue; ++ if (unlikely(mm->context)) ++ flush_tlb_page(vma, addr); + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); + } + } +@@ -596,26 +599,46 @@ flush_user_icache_range(unsigned long start, unsigned long end) + void flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) + { ++ pgd_t *pgd; ++ unsigned long addr; ++ + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && + end - start >= parisc_cache_flush_threshold) { +- flush_tlb_range(vma, start, end); ++ if (vma->vm_mm->context) ++ flush_tlb_range(vma, start, end); + flush_cache_all(); + return; + } + +- flush_user_dcache_range_asm(start, end); +- if (vma->vm_flags & VM_EXEC) +- flush_user_icache_range_asm(start, end); +- flush_tlb_range(vma, start, end); ++ if (vma->vm_mm->context == mfsp(3)) { ++ flush_user_dcache_range_asm(start, end); ++ if (vma->vm_flags & VM_EXEC) ++ flush_user_icache_range_asm(start, end); ++ flush_tlb_range(vma, start, end); ++ return; ++ } ++ ++ pgd = vma->vm_mm->pgd; ++ for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { ++ unsigned long pfn; ++ pte_t *ptep = get_ptep(pgd, addr); ++ if (!ptep) ++ continue; ++ pfn = pte_pfn(*ptep); ++ if (pfn_valid(pfn)) { ++ if (unlikely(vma->vm_mm->context)) ++ flush_tlb_page(vma, addr); ++ __flush_cache_page(vma, addr, PFN_PHYS(pfn)); ++ } ++ } + } + + void + flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) + { +- BUG_ON(!vma->vm_mm->context); +- + if (pfn_valid(pfn)) { +- flush_tlb_page(vma, vmaddr); ++ if (likely(vma->vm_mm->context)) ++ flush_tlb_page(vma, vmaddr); + __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); + } + } +diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h +index 2015b072422c..b4ab1f497335 100644 +--- a/arch/powerpc/include/asm/code-patching.h ++++ b/arch/powerpc/include/asm/code-patching.h +@@ -30,6 +30,7 @@ int patch_branch(unsigned int *addr, unsigned long target, int flags); + int patch_instruction(unsigned int *addr, unsigned int instr); + + int instr_is_relative_branch(unsigned int instr); ++int instr_is_relative_link_branch(unsigned int instr); + int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr); + unsigned long branch_target(const unsigned int *instr); + unsigned int translate_branch(const unsigned int *dest, +diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c +index 183368e008cf..99407cf12ad5 100644 +--- a/arch/powerpc/kernel/module_64.c ++++ b/arch/powerpc/kernel/module_64.c +@@ -494,7 +494,17 @@ static bool is_early_mcount_callsite(u32 *instruction) + restore r2. */ + static int restore_r2(u32 *instruction, struct module *me) + { +- if (is_early_mcount_callsite(instruction - 1)) ++ u32 *prev_insn = instruction - 1; ++ ++ if (is_early_mcount_callsite(prev_insn)) ++ return 1; ++ ++ /* ++ * Make sure the branch isn't a sibling call. Sibling calls aren't ++ * "link" branches and they don't return, so they don't need the r2 ++ * restore afterwards. ++ */ ++ if (!instr_is_relative_link_branch(*prev_insn)) + return 1; + + if (*instruction != PPC_INST_NOP) { +diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c +index d5edbeb8eb82..753d591f1b52 100644 +--- a/arch/powerpc/lib/code-patching.c ++++ b/arch/powerpc/lib/code-patching.c +@@ -95,6 +95,11 @@ int instr_is_relative_branch(unsigned int instr) + return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); + } + ++int instr_is_relative_link_branch(unsigned int instr) ++{ ++ return instr_is_relative_branch(instr) && (instr & BRANCH_SET_LINK); ++} ++ + static unsigned long branch_iform_target(const unsigned int *instr) + { + signed long imm; +diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c +index d0b137d96df1..9376e8e53bfa 100644 +--- a/arch/powerpc/mm/fault.c ++++ b/arch/powerpc/mm/fault.c +@@ -294,7 +294,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, + * can result in fault, which will cause a deadlock when called with + * mmap_sem held + */ +- if (user_mode(regs)) ++ if (!is_exec && user_mode(regs)) + store_update_sp = store_updates_sp(regs); + + if (user_mode(regs)) +diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c +index a5d3ecdabc44..035dfb65df4b 100644 +--- a/arch/powerpc/mm/hugetlbpage.c ++++ b/arch/powerpc/mm/hugetlbpage.c +@@ -765,6 +765,24 @@ static int __init add_huge_page_size(unsigned long long size) + if ((mmu_psize = shift_to_mmu_psize(shift)) < 0) + return -EINVAL; + ++#ifdef CONFIG_PPC_BOOK3S_64 ++ /* ++ * We need to make sure that for different page sizes reported by ++ * firmware we only add hugetlb support for page sizes that can be ++ * supported by linux page table layout. ++ * For now we have ++ * Radix: 2M ++ * Hash: 16M and 16G ++ */ ++ if (radix_enabled()) { ++ if (mmu_psize != MMU_PAGE_2M) ++ return -EINVAL; ++ } else { ++ if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G) ++ return -EINVAL; ++ } ++#endif ++ + BUG_ON(mmu_psize_defs[mmu_psize].shift != shift); + + /* Return if huge page size has already been setup */ +diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c +index 050badc0ebd3..0b50019505a5 100644 +--- a/arch/powerpc/mm/tlb_nohash.c ++++ b/arch/powerpc/mm/tlb_nohash.c +@@ -751,7 +751,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, + * avoid going over total available memory just in case... + */ + #ifdef CONFIG_PPC_FSL_BOOK3E +- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { ++ if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { + unsigned long linear_sz; + unsigned int num_cams; + +diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c +index 29d87444a655..62578989c74d 100644 +--- a/arch/s390/kernel/early.c ++++ b/arch/s390/kernel/early.c +@@ -372,7 +372,7 @@ static int __init topology_setup(char *str) + + rc = kstrtobool(str, &enabled); + if (!rc && !enabled) +- S390_lowcore.machine_flags &= ~MACHINE_HAS_TOPOLOGY; ++ S390_lowcore.machine_flags &= ~MACHINE_FLAG_TOPOLOGY; + return rc; + } + early_param("topology", topology_setup); +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index ed7a1d2c4235..a2485311164b 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -302,6 +302,7 @@ + /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ + #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ + #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ ++#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ + #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ + #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ + #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +index d0dabeae0505..f928ad9b143f 100644 +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -183,7 +183,10 @@ + * otherwise we'll run out of registers. We don't care about CET + * here, anyway. + */ +-# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \ ++# define CALL_NOSPEC \ ++ ALTERNATIVE( \ ++ ANNOTATE_RETPOLINE_SAFE \ ++ "call *%[thunk_target]\n", \ + " jmp 904f;\n" \ + " .align 16\n" \ + "901: call 903f;\n" \ +diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h +index 2cb1cc253d51..fc62ba8dce93 100644 +--- a/arch/x86/include/asm/reboot.h ++++ b/arch/x86/include/asm/reboot.h +@@ -15,6 +15,7 @@ struct machine_ops { + }; + + extern struct machine_ops machine_ops; ++extern int crashing_cpu; + + void native_machine_crash_shutdown(struct pt_regs *regs); + void native_machine_shutdown(void); +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c +index 768042530af2..8fb1d6522f8e 100644 +--- a/arch/x86/kernel/cpu/intel.c ++++ b/arch/x86/kernel/cpu/intel.c +@@ -64,7 +64,7 @@ void check_mpx_erratum(struct cpuinfo_x86 *c) + /* + * Early microcode releases for the Spectre v2 mitigation were broken. + * Information taken from; +- * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf ++ * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf + * - https://kb.vmware.com/s/article/52345 + * - Microcode revisions observed in the wild + * - Release note from 20180108 microcode release +@@ -82,7 +82,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = { + { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, + { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, + { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, +- { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 }, + { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, + { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, + { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, +diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c +index 684d9fd191e0..7bbd50fa72ad 100644 +--- a/arch/x86/kernel/cpu/mcheck/mce.c ++++ b/arch/x86/kernel/cpu/mcheck/mce.c +@@ -48,6 +48,7 @@ + #include + #include + #include ++#include + + #include "mce-internal.h" + +@@ -1081,9 +1082,22 @@ void do_machine_check(struct pt_regs *regs, long error_code) + * on Intel. + */ + int lmce = 1; ++ int cpu = smp_processor_id(); + +- /* If this CPU is offline, just bail out. */ +- if (cpu_is_offline(smp_processor_id())) { ++ /* ++ * Cases where we avoid rendezvous handler timeout: ++ * 1) If this CPU is offline. ++ * ++ * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to ++ * skip those CPUs which remain looping in the 1st kernel - see ++ * crash_nmi_callback(). ++ * ++ * Note: there still is a small window between kexec-ing and the new, ++ * kdump kernel establishing a new #MC handler where a broadcasted MCE ++ * might not get handled properly. ++ */ ++ if (cpu_is_offline(cpu) || ++ (crashing_cpu != -1 && crashing_cpu != cpu)) { + u64 mcgstatus; + + mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); +@@ -1681,30 +1695,35 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) + return 0; + } + +-static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) ++/* ++ * Init basic CPU features needed for early decoding of MCEs. ++ */ ++static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) + { +- switch (c->x86_vendor) { +- case X86_VENDOR_INTEL: +- mce_intel_feature_init(c); +- mce_adjust_timer = cmci_intel_adjust_timer; +- break; +- +- case X86_VENDOR_AMD: { ++ if (c->x86_vendor == X86_VENDOR_AMD) { + mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV); + mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); + mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); + +- /* +- * Install proper ops for Scalable MCA enabled processors +- */ + if (mce_flags.smca) { + msr_ops.ctl = smca_ctl_reg; + msr_ops.status = smca_status_reg; + msr_ops.addr = smca_addr_reg; + msr_ops.misc = smca_misc_reg; + } +- mce_amd_feature_init(c); ++ } ++} ++ ++static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) ++{ ++ switch (c->x86_vendor) { ++ case X86_VENDOR_INTEL: ++ mce_intel_feature_init(c); ++ mce_adjust_timer = cmci_intel_adjust_timer; ++ break; + ++ case X86_VENDOR_AMD: { ++ mce_amd_feature_init(c); + break; + } + +@@ -1790,6 +1809,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c) + + machine_check_vector = do_machine_check; + ++ __mcheck_cpu_init_early(c); + __mcheck_cpu_init_generic(); + __mcheck_cpu_init_vendor(c); + __mcheck_cpu_init_clear_banks(); +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c +index b55d07b9d530..b8d3f1b60331 100644 +--- a/arch/x86/kernel/kprobes/core.c ++++ b/arch/x86/kernel/kprobes/core.c +@@ -199,6 +199,8 @@ int can_boost(kprobe_opcode_t *opcodes, void *addr) + return (opcode != 0x62 && opcode != 0x67); + case 0x70: + return 0; /* can't boost conditional jump */ ++ case 0x90: ++ return opcode != 0x9a; /* can't boost call far */ + case 0xc0: + /* can't boost software-interruptions */ + return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; +@@ -407,6 +409,8 @@ static int arch_copy_kprobe(struct kprobe *p) + { + int ret; + ++ set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1); ++ + /* Copy an instruction with recovering if other optprobe modifies it.*/ + ret = __copy_instruction(p->ainsn.insn, p->addr); + if (!ret) +@@ -421,6 +425,8 @@ static int arch_copy_kprobe(struct kprobe *p) + else + p->ainsn.boostable = -1; + ++ set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1); ++ + /* Check whether the instruction modifies Interrupt Flag or not */ + p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn); + +diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c +index dc20da1c78f0..fa671b90c374 100644 +--- a/arch/x86/kernel/kprobes/opt.c ++++ b/arch/x86/kernel/kprobes/opt.c +@@ -371,6 +371,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, + } + + buf = (u8 *)op->optinsn.insn; ++ set_memory_rw((unsigned long)buf & PAGE_MASK, 1); + + /* Copy instructions into the out-of-line buffer */ + ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); +@@ -393,6 +394,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, + synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, + (u8 *)op->kp.addr + op->optinsn.size); + ++ set_memory_ro((unsigned long)buf & PAGE_MASK, 1); ++ + flush_icache_range((unsigned long) buf, + (unsigned long) buf + TMPL_END_IDX + + op->optinsn.size + RELATIVEJUMP_SIZE); +diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c +index ce020a69bba9..03f21dbfaa9d 100644 +--- a/arch/x86/kernel/reboot.c ++++ b/arch/x86/kernel/reboot.c +@@ -769,10 +769,11 @@ void machine_crash_shutdown(struct pt_regs *regs) + #endif + + ++/* This is the CPU performing the emergency shutdown work. */ ++int crashing_cpu = -1; ++ + #if defined(CONFIG_SMP) + +-/* This keeps a track of which one is crashing cpu. */ +-static int crashing_cpu; + static nmi_shootdown_cb shootdown_callback; + + static atomic_t waiting_for_crash_ipi; +diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c +index 2bbd27f89802..f9da471a7707 100644 +--- a/arch/x86/kernel/setup_percpu.c ++++ b/arch/x86/kernel/setup_percpu.c +@@ -287,4 +287,25 @@ void __init setup_per_cpu_areas(void) + + /* Setup cpu initialized, callin, callout masks */ + setup_cpu_local_masks(); ++ ++#ifdef CONFIG_X86_32 ++ /* ++ * Sync back kernel address range again. We already did this in ++ * setup_arch(), but percpu data also needs to be available in ++ * the smpboot asm. We can't reliably pick up percpu mappings ++ * using vmalloc_fault(), because exception dispatch needs ++ * percpu data. ++ */ ++ clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ KERNEL_PGD_PTRS); ++ ++ /* ++ * sync back low identity map too. It is used for example ++ * in the 32-bit EFI stub. ++ */ ++ clone_pgd_range(initial_page_table, ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); ++#endif + } +diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c +index 1119414ab419..1d4e7fd3e66d 100644 +--- a/arch/x86/kernel/sys_x86_64.c ++++ b/arch/x86/kernel/sys_x86_64.c +@@ -16,6 +16,7 @@ + #include + #include + ++#include + #include + #include + +@@ -100,7 +101,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, + static void find_start_end(unsigned long flags, unsigned long *begin, + unsigned long *end) + { +- if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) { ++ if (!in_compat_syscall() && (flags & MAP_32BIT)) { + /* This is usually used needed to map code in small + model, so it needs to be in the first 31bit. Limit + it to that. This means we need to move the +@@ -175,7 +176,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + return addr; + + /* for MAP_32BIT mappings we force the legacy mmap base */ +- if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) ++ if (!in_compat_syscall() && (flags & MAP_32BIT)) + goto bottomup; + + /* requesting a specific address */ +diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c +index 8a1d63591399..961831bf74b1 100644 +--- a/arch/x86/kernel/vm86_32.c ++++ b/arch/x86/kernel/vm86_32.c +@@ -719,7 +719,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) + return; + + check_vip: +- if (VEFLAGS & X86_EFLAGS_VIP) { ++ if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) == ++ (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) { + save_v86_state(regs, VM86_STI); + return; + } +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 24d2a3ee743f..8c99f2fbae80 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -5449,6 +5449,12 @@ static inline void avic_post_state_restore(struct kvm_vcpu *vcpu) + avic_handle_ldr_update(vcpu); + } + ++static void svm_setup_mce(struct kvm_vcpu *vcpu) ++{ ++ /* [63:9] are reserved. */ ++ vcpu->arch.mcg_cap &= 0x1ff; ++} ++ + static struct kvm_x86_ops svm_x86_ops __ro_after_init = { + .cpu_has_kvm_support = has_svm, + .disabled_by_bios = is_disabled, +@@ -5564,6 +5570,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { + .pmu_ops = &amd_pmu_ops, + .deliver_posted_interrupt = svm_deliver_avic_intr, + .update_pi_irte = svm_update_pi_irte, ++ .setup_mce = svm_setup_mce, + }; + + static int __init svm_init(void) +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 4b19ec1da22d..3aaaf305420d 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -3070,7 +3070,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, + return -EINVAL; + + if (events->exception.injected && +- (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) ++ (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR || ++ is_guest_mode(vcpu))) + return -EINVAL; + + /* INITs are latched while in SMM */ +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c +index 74dea7f14c20..ae23c996e3a8 100644 +--- a/arch/x86/mm/fault.c ++++ b/arch/x86/mm/fault.c +@@ -343,7 +343,7 @@ static noinline int vmalloc_fault(unsigned long address) + if (!pmd_k) + return -1; + +- if (pmd_huge(*pmd_k)) ++ if (pmd_large(*pmd_k)) + return 0; + + pte_k = pte_offset_kernel(pmd_k, address); +@@ -463,7 +463,7 @@ static noinline int vmalloc_fault(unsigned long address) + if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) + BUG(); + +- if (pud_huge(*pud)) ++ if (pud_large(*pud)) + return 0; + + pmd = pmd_offset(pud, address); +@@ -474,7 +474,7 @@ static noinline int vmalloc_fault(unsigned long address) + if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) + BUG(); + +- if (pmd_huge(*pmd)) ++ if (pmd_large(*pmd)) + return 0; + + pte_ref = pte_offset_kernel(pmd_ref, address); +diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c +index b08ccbb9393a..6cd839c1f507 100644 +--- a/block/blk-cgroup.c ++++ b/block/blk-cgroup.c +@@ -1078,10 +1078,8 @@ int blkcg_init_queue(struct request_queue *q) + if (preloaded) + radix_tree_preload_end(); + +- if (IS_ERR(blkg)) { +- blkg_free(new_blkg); ++ if (IS_ERR(blkg)) + return PTR_ERR(blkg); +- } + + q->root_blkg = blkg; + q->root_rl.blkg = blkg; +diff --git a/block/blk-throttle.c b/block/blk-throttle.c +index a3ea8260c94c..3a4c9a3c1427 100644 +--- a/block/blk-throttle.c ++++ b/block/blk-throttle.c +@@ -499,6 +499,17 @@ static void throtl_dequeue_tg(struct throtl_grp *tg) + static void throtl_schedule_pending_timer(struct throtl_service_queue *sq, + unsigned long expires) + { ++ unsigned long max_expire = jiffies + 8 * throtl_slice; ++ ++ /* ++ * Since we are adjusting the throttle limit dynamically, the sleep ++ * time calculated according to previous limit might be invalid. It's ++ * possible the cgroup sleep time is very long and no other cgroups ++ * have IO running so notify the limit changes. Make sure the cgroup ++ * doesn't sleep too long to avoid the missed notification. ++ */ ++ if (time_after(expires, max_expire)) ++ expires = max_expire; + mod_timer(&sq->pending_timer, expires); + throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu", + expires - jiffies, jiffies); +diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c +index 0f7d28a98b9a..a7cc5b7be598 100644 +--- a/drivers/char/agp/intel-gtt.c ++++ b/drivers/char/agp/intel-gtt.c +@@ -871,6 +871,8 @@ void intel_gtt_insert_sg_entries(struct sg_table *st, + } + } + wmb(); ++ if (intel_private.driver->chipset_flush) ++ intel_private.driver->chipset_flush(); + } + EXPORT_SYMBOL(intel_gtt_insert_sg_entries); + +diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c +index 9d9af446bafc..37e05d6e010a 100644 +--- a/drivers/clk/meson/gxbb.c ++++ b/drivers/clk/meson/gxbb.c +@@ -572,7 +572,7 @@ static MESON_GATE(gxbb_pl301, HHI_GCLK_MPEG0, 6); + static MESON_GATE(gxbb_periphs, HHI_GCLK_MPEG0, 7); + static MESON_GATE(gxbb_spicc, HHI_GCLK_MPEG0, 8); + static MESON_GATE(gxbb_i2c, HHI_GCLK_MPEG0, 9); +-static MESON_GATE(gxbb_sar_adc, HHI_GCLK_MPEG0, 10); ++static MESON_GATE(gxbb_sana, HHI_GCLK_MPEG0, 10); + static MESON_GATE(gxbb_smart_card, HHI_GCLK_MPEG0, 11); + static MESON_GATE(gxbb_rng0, HHI_GCLK_MPEG0, 12); + static MESON_GATE(gxbb_uart0, HHI_GCLK_MPEG0, 13); +@@ -623,7 +623,7 @@ static MESON_GATE(gxbb_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9); + static MESON_GATE(gxbb_mmc_pclk, HHI_GCLK_MPEG2, 11); + static MESON_GATE(gxbb_dvin, HHI_GCLK_MPEG2, 12); + static MESON_GATE(gxbb_uart2, HHI_GCLK_MPEG2, 15); +-static MESON_GATE(gxbb_sana, HHI_GCLK_MPEG2, 22); ++static MESON_GATE(gxbb_sar_adc, HHI_GCLK_MPEG2, 22); + static MESON_GATE(gxbb_vpu_intr, HHI_GCLK_MPEG2, 25); + static MESON_GATE(gxbb_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26); + static MESON_GATE(gxbb_clk81_a53, HHI_GCLK_MPEG2, 29); +diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c +index 5c4e193164d4..8dd71345b5d0 100644 +--- a/drivers/clk/qcom/gcc-msm8916.c ++++ b/drivers/clk/qcom/gcc-msm8916.c +@@ -1437,6 +1437,7 @@ static const struct freq_tbl ftbl_codec_clk[] = { + + static struct clk_rcg2 codec_digcodec_clk_src = { + .cmd_rcgr = 0x1c09c, ++ .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll1_emclk_sleep_map, + .freq_tbl = ftbl_codec_clk, +diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c +index ca97e1151797..3b171bef913a 100644 +--- a/drivers/clk/qcom/mmcc-msm8996.c ++++ b/drivers/clk/qcom/mmcc-msm8996.c +@@ -2984,7 +2984,7 @@ static struct gdsc vfe1_gdsc = { + .cxcs = (unsigned int []){ 0x36ac }, + .cxc_count = 1, + .pd = { +- .name = "vfe0", ++ .name = "vfe1", + }, + .parent = &camss_gdsc.pd, + .pwrsts = PWRSTS_OFF_ON, +diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c +index d1651a50c349..21726a270fc4 100644 +--- a/drivers/dma/imx-sdma.c ++++ b/drivers/dma/imx-sdma.c +@@ -937,6 +937,21 @@ static int sdma_disable_channel(struct dma_chan *chan) + return 0; + } + ++static int sdma_disable_channel_with_delay(struct dma_chan *chan) ++{ ++ sdma_disable_channel(chan); ++ ++ /* ++ * According to NXP R&D team a delay of one BD SDMA cost time ++ * (maximum is 1ms) should be added after disable of the channel ++ * bit, to ensure SDMA core has really been stopped after SDMA ++ * clients call .device_terminate_all. ++ */ ++ mdelay(1); ++ ++ return 0; ++} ++ + static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) + { + struct sdma_engine *sdma = sdmac->sdma; +@@ -1828,7 +1843,7 @@ static int sdma_probe(struct platform_device *pdev) + sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; + sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; + sdma->dma_device.device_config = sdma_config; +- sdma->dma_device.device_terminate_all = sdma_disable_channel; ++ sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay; + sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); +diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c +index 58d3e2b39b5b..61262a7a5c3a 100644 +--- a/drivers/edac/altera_edac.c ++++ b/drivers/edac/altera_edac.c +@@ -1020,13 +1020,23 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask, + return ret; + } + ++static int socfpga_is_a10(void) ++{ ++ return of_machine_is_compatible("altr,socfpga-arria10"); ++} ++ + static int validate_parent_available(struct device_node *np); + static const struct of_device_id altr_edac_a10_device_of_match[]; + static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat) + { + int irq; +- struct device_node *child, *np = of_find_compatible_node(NULL, NULL, +- "altr,socfpga-a10-ecc-manager"); ++ struct device_node *child, *np; ++ ++ if (!socfpga_is_a10()) ++ return -ENODEV; ++ ++ np = of_find_compatible_node(NULL, NULL, ++ "altr,socfpga-a10-ecc-manager"); + if (!np) { + edac_printk(KERN_ERR, EDAC_DEVICE, "ECC Manager not found\n"); + return -ENODEV; +@@ -1542,8 +1552,12 @@ static const struct edac_device_prv_data a10_sdmmceccb_data = { + static int __init socfpga_init_sdmmc_ecc(void) + { + int rc = -ENODEV; +- struct device_node *child = of_find_compatible_node(NULL, NULL, +- "altr,socfpga-sdmmc-ecc"); ++ struct device_node *child; ++ ++ if (!socfpga_is_a10()) ++ return -ENODEV; ++ ++ child = of_find_compatible_node(NULL, NULL, "altr,socfpga-sdmmc-ecc"); + if (!child) { + edac_printk(KERN_WARNING, EDAC_DEVICE, "SDMMC node not found\n"); + return -ENODEV; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +index c82b04b24bf9..e9311eb7b8d9 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector) + /* don't do anything if sink is not display port, i.e., + * passive dp->(dvi|hdmi) adaptor + */ +- if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { +- int saved_dpms = connector->dpms; +- /* Only turn off the display if it's physically disconnected */ +- if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { +- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); +- } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { +- /* Don't try to start link training before we +- * have the dpcd */ +- if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) +- return; +- +- /* set it to OFF so that drm_helper_connector_dpms() +- * won't return immediately since the current state +- * is ON at this point. +- */ +- connector->dpms = DRM_MODE_DPMS_OFF; +- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); +- } +- connector->dpms = saved_dpms; ++ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT && ++ amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) && ++ amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { ++ /* Don't start link training before we have the DPCD */ ++ if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) ++ return; ++ ++ /* Turn the connector off and back on immediately, which ++ * will trigger link training ++ */ ++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); ++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + } + } + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +index 083e2b429872..15a2d8f3725d 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +@@ -533,6 +533,12 @@ amdgpu_user_framebuffer_create(struct drm_device *dev, + return ERR_PTR(-ENOENT); + } + ++ /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ ++ if (obj->import_attach) { ++ DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n"); ++ return ERR_PTR(-EINVAL); ++ } ++ + amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); + if (amdgpu_fb == NULL) { + drm_gem_object_unreference_unlocked(obj); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +index a7ea9a3b454e..d5e4748e3300 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +@@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj) + struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); + + if (robj) { +- if (robj->gem_base.import_attach) +- drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); + amdgpu_mn_unregister(robj); + amdgpu_bo_unref(&robj); + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +index f3efb1c5dae9..5afe72778518 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +@@ -94,6 +94,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) + + amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); + ++ if (bo->gem_base.import_attach) ++ drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); + drm_gem_object_release(&bo->gem_base); + amdgpu_bo_unref(&bo->parent); + if (!list_empty(&bo->shadow_list)) { +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +index 1e5064749959..8c6e47c5507f 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +@@ -519,11 +519,17 @@ static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr, + return ret; + } + ++static void kfd_topology_kobj_release(struct kobject *kobj) ++{ ++ kfree(kobj); ++} ++ + static const struct sysfs_ops sysprops_ops = { + .show = sysprops_show, + }; + + static struct kobj_type sysprops_type = { ++ .release = kfd_topology_kobj_release, + .sysfs_ops = &sysprops_ops, + }; + +@@ -559,6 +565,7 @@ static const struct sysfs_ops iolink_ops = { + }; + + static struct kobj_type iolink_type = { ++ .release = kfd_topology_kobj_release, + .sysfs_ops = &iolink_ops, + }; + +@@ -586,6 +593,7 @@ static const struct sysfs_ops mem_ops = { + }; + + static struct kobj_type mem_type = { ++ .release = kfd_topology_kobj_release, + .sysfs_ops = &mem_ops, + }; + +@@ -625,6 +633,7 @@ static const struct sysfs_ops cache_ops = { + }; + + static struct kobj_type cache_type = { ++ .release = kfd_topology_kobj_release, + .sysfs_ops = &cache_ops, + }; + +@@ -747,6 +756,7 @@ static const struct sysfs_ops node_ops = { + }; + + static struct kobj_type node_type = { ++ .release = kfd_topology_kobj_release, + .sysfs_ops = &node_ops, + }; + +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c +index c6b281aa762f..6b31e0474271 100644 +--- a/drivers/gpu/drm/drm_edid.c ++++ b/drivers/gpu/drm/drm_edid.c +@@ -3347,8 +3347,7 @@ EXPORT_SYMBOL(drm_edid_get_monitor_name); + * @edid: EDID to parse + * + * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The +- * Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to +- * fill in. ++ * HDCP and Port_ID ELD fields are left for the graphics driver to fill in. + */ + void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) + { +@@ -3426,6 +3425,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) + } + eld[5] |= total_sad_count << 4; + ++ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || ++ connector->connector_type == DRM_MODE_CONNECTOR_eDP) ++ eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_DP; ++ else ++ eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_HDMI; ++ + eld[DRM_ELD_BASELINE_ELD_LEN] = + DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4); + +diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c +index 48a6167f5e7b..00c815a7c414 100644 +--- a/drivers/gpu/drm/drm_irq.c ++++ b/drivers/gpu/drm/drm_irq.c +@@ -1202,9 +1202,9 @@ static void drm_vblank_put(struct drm_device *dev, unsigned int pipe) + if (atomic_dec_and_test(&vblank->refcount)) { + if (drm_vblank_offdelay == 0) + return; +- else if (dev->vblank_disable_immediate || drm_vblank_offdelay < 0) ++ else if (drm_vblank_offdelay < 0) + vblank_disable_fn((unsigned long)vblank); +- else ++ else if (!dev->vblank_disable_immediate) + mod_timer(&vblank->disable_timer, + jiffies + ((drm_vblank_offdelay * HZ)/1000)); + } +@@ -1819,6 +1819,16 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe) + wake_up(&vblank->queue); + drm_handle_vblank_events(dev, pipe); + ++ /* With instant-off, we defer disabling the interrupt until after ++ * we finish processing the following vblank. The disable has to ++ * be last (after drm_handle_vblank_events) so that the timestamp ++ * is always accurate. ++ */ ++ if (dev->vblank_disable_immediate && ++ drm_vblank_offdelay > 0 && ++ !atomic_read(&vblank->refcount)) ++ vblank_disable_fn((unsigned long)vblank); ++ + spin_unlock_irqrestore(&dev->event_lock, irqflags); + + return true; +diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c +index 2cd879a4ae15..cdbb6e625f05 100644 +--- a/drivers/gpu/drm/qxl/qxl_fb.c ++++ b/drivers/gpu/drm/qxl/qxl_fb.c +@@ -387,9 +387,11 @@ static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = { + + int qxl_fbdev_init(struct qxl_device *qdev) + { ++ int ret = 0; ++ ++#ifdef CONFIG_DRM_FBDEV_EMULATION + struct qxl_fbdev *qfbdev; + int bpp_sel = 32; /* TODO: parameter from somewhere? */ +- int ret; + + qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL); + if (!qfbdev) +@@ -423,6 +425,8 @@ int qxl_fbdev_init(struct qxl_device *qdev) + drm_fb_helper_fini(&qfbdev->helper); + free: + kfree(qfbdev); ++#endif ++ + return ret; + } + +@@ -438,6 +442,9 @@ void qxl_fbdev_fini(struct qxl_device *qdev) + + void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state) + { ++ if (!qdev->mode_info.qfbdev) ++ return; ++ + drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state); + } + +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c +index cdb8cb568c15..ca1caf405832 100644 +--- a/drivers/gpu/drm/radeon/radeon_display.c ++++ b/drivers/gpu/drm/radeon/radeon_display.c +@@ -1352,6 +1352,12 @@ radeon_user_framebuffer_create(struct drm_device *dev, + return ERR_PTR(-ENOENT); + } + ++ /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ ++ if (obj->import_attach) { ++ DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n"); ++ return ERR_PTR(-EINVAL); ++ } ++ + radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); + if (radeon_fb == NULL) { + drm_gem_object_unreference_unlocked(obj); +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +index 3322b157106d..1c4d95dea887 100644 +--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c ++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +@@ -512,6 +512,13 @@ static void rcar_du_crtc_disable(struct drm_crtc *crtc) + rcar_du_crtc_stop(rcrtc); + rcar_du_crtc_put(rcrtc); + ++ spin_lock_irq(&crtc->dev->event_lock); ++ if (crtc->state->event) { ++ drm_crtc_send_vblank_event(crtc, crtc->state->event); ++ crtc->state->event = NULL; ++ } ++ spin_unlock_irq(&crtc->dev->event_lock); ++ + rcrtc->outputs = 0; + } + +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +index c7eba305c488..6e3c4acb16ac 100644 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +@@ -503,7 +503,7 @@ static int vop_enable(struct drm_crtc *crtc) + ret = pm_runtime_get_sync(vop->dev); + if (ret < 0) { + dev_err(vop->dev, "failed to get pm runtime: %d\n", ret); +- goto err_put_pm_runtime; ++ return ret; + } + + ret = clk_enable(vop->hclk); +@@ -1348,10 +1348,16 @@ static int vop_initial(struct vop *vop) + return PTR_ERR(vop->dclk); + } + ++ ret = pm_runtime_get_sync(vop->dev); ++ if (ret < 0) { ++ dev_err(vop->dev, "failed to get pm runtime: %d\n", ret); ++ return ret; ++ } ++ + ret = clk_prepare(vop->dclk); + if (ret < 0) { + dev_err(vop->dev, "failed to prepare dclk\n"); +- return ret; ++ goto err_put_pm_runtime; + } + + /* Enable both the hclk and aclk to setup the vop */ +@@ -1411,6 +1417,8 @@ static int vop_initial(struct vop *vop) + + vop->is_enabled = false; + ++ pm_runtime_put_sync(vop->dev); ++ + return 0; + + err_disable_aclk: +@@ -1419,6 +1427,8 @@ static int vop_initial(struct vop *vop) + clk_disable_unprepare(vop->hclk); + err_unprepare_dclk: + clk_unprepare(vop->dclk); ++err_put_pm_runtime: ++ pm_runtime_put_sync(vop->dev); + return ret; + } + +@@ -1519,12 +1529,6 @@ static int vop_bind(struct device *dev, struct device *master, void *data) + if (!vop->regsbak) + return -ENOMEM; + +- ret = vop_initial(vop); +- if (ret < 0) { +- dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret); +- return ret; +- } +- + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "cannot find irq for vop\n"); +@@ -1551,8 +1555,17 @@ static int vop_bind(struct device *dev, struct device *master, void *data) + + pm_runtime_enable(&pdev->dev); + ++ ret = vop_initial(vop); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret); ++ goto err_disable_pm_runtime; ++ } ++ + return 0; + ++err_disable_pm_runtime: ++ pm_runtime_disable(&pdev->dev); ++ vop_destroy_crtc(vop); + err_enable_irq: + enable_irq(vop->irq); /* To balance out the disable_irq above */ + return ret; +diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c +index 4a192210574f..caba0311c86c 100644 +--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c ++++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -136,5 +137,9 @@ struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm) + + drm_crtc_helper_add(&scrtc->crtc, &sun4i_crtc_helper_funcs); + ++ /* Set crtc.port to output port node of the tcon */ ++ scrtc->crtc.port = of_graph_get_port_by_id(drv->tcon->dev->of_node, ++ 1); ++ + return scrtc; + } +diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c +index 1feec34ca9dd..9e77fc034e0a 100644 +--- a/drivers/gpu/drm/sun4i/sun4i_drv.c ++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c +@@ -145,7 +145,7 @@ static int sun4i_drv_bind(struct device *dev) + ret = component_bind_all(drm->dev, drm); + if (ret) { + dev_err(drm->dev, "Couldn't bind all pipelines components\n"); +- goto free_drm; ++ goto cleanup_mode_config; + } + + /* Create our layers */ +@@ -153,7 +153,7 @@ static int sun4i_drv_bind(struct device *dev) + if (IS_ERR(drv->layers)) { + dev_err(drm->dev, "Couldn't create the planes\n"); + ret = PTR_ERR(drv->layers); +- goto free_drm; ++ goto cleanup_mode_config; + } + + /* Create our CRTC */ +@@ -161,7 +161,7 @@ static int sun4i_drv_bind(struct device *dev) + if (!drv->crtc) { + dev_err(drm->dev, "Couldn't create the CRTC\n"); + ret = -EINVAL; +- goto free_drm; ++ goto cleanup_mode_config; + } + drm->irq_enabled = true; + +@@ -173,7 +173,7 @@ static int sun4i_drv_bind(struct device *dev) + if (IS_ERR(drv->fbdev)) { + dev_err(drm->dev, "Couldn't create our framebuffer\n"); + ret = PTR_ERR(drv->fbdev); +- goto free_drm; ++ goto cleanup_mode_config; + } + + /* Enable connectors polling */ +@@ -181,10 +181,16 @@ static int sun4i_drv_bind(struct device *dev) + + ret = drm_dev_register(drm, 0); + if (ret) +- goto free_drm; ++ goto finish_poll; + + return 0; + ++finish_poll: ++ drm_kms_helper_poll_fini(drm); ++ sun4i_framebuffer_free(drm); ++cleanup_mode_config: ++ drm_mode_config_cleanup(drm); ++ drm_vblank_cleanup(drm); + free_drm: + drm_dev_unref(drm); + return ret; +diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c +index c6afb2448655..f2975a1525be 100644 +--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c ++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c +@@ -336,12 +336,11 @@ static int sun4i_tcon_init_clocks(struct device *dev, + } + } + +- return sun4i_dclk_create(dev, tcon); ++ return 0; + } + + static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) + { +- sun4i_dclk_free(tcon); + clk_disable_unprepare(tcon->clk); + } + +@@ -506,22 +505,28 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master, + return ret; + } + ++ ret = sun4i_tcon_init_clocks(dev, tcon); ++ if (ret) { ++ dev_err(dev, "Couldn't init our TCON clocks\n"); ++ goto err_assert_reset; ++ } ++ + ret = sun4i_tcon_init_regmap(dev, tcon); + if (ret) { + dev_err(dev, "Couldn't init our TCON regmap\n"); +- goto err_assert_reset; ++ goto err_free_clocks; + } + +- ret = sun4i_tcon_init_clocks(dev, tcon); ++ ret = sun4i_dclk_create(dev, tcon); + if (ret) { +- dev_err(dev, "Couldn't init our TCON clocks\n"); +- goto err_assert_reset; ++ dev_err(dev, "Couldn't create our TCON dot clock\n"); ++ goto err_free_clocks; + } + + ret = sun4i_tcon_init_irq(dev, tcon); + if (ret) { + dev_err(dev, "Couldn't init our TCON interrupts\n"); +- goto err_free_clocks; ++ goto err_free_dotclock; + } + + ret = sun4i_rgb_init(drm); +@@ -530,6 +535,8 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master, + + return 0; + ++err_free_dotclock: ++ sun4i_dclk_free(tcon); + err_free_clocks: + sun4i_tcon_free_clocks(tcon); + err_assert_reset: +@@ -542,6 +549,7 @@ static void sun4i_tcon_unbind(struct device *dev, struct device *master, + { + struct sun4i_tcon *tcon = dev_get_drvdata(dev); + ++ sun4i_dclk_free(tcon); + sun4i_tcon_free_clocks(tcon); + } + +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c +index d09276ec7e90..52a2a1a75682 100644 +--- a/drivers/gpu/drm/ttm/ttm_bo.c ++++ b/drivers/gpu/drm/ttm/ttm_bo.c +@@ -1209,18 +1209,20 @@ int ttm_bo_init(struct ttm_bo_device *bdev, + if (likely(!ret)) + ret = ttm_bo_validate(bo, placement, interruptible, false); + +- if (!resv) { ++ if (!resv) + ttm_bo_unreserve(bo); + +- } else if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { ++ if (unlikely(ret)) { ++ ttm_bo_unref(&bo); ++ return ret; ++ } ++ ++ if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { + spin_lock(&bo->glob->lru_lock); + ttm_bo_add_to_lru(bo); + spin_unlock(&bo->glob->lru_lock); + } + +- if (unlikely(ret)) +- ttm_bo_unref(&bo); +- + return ret; + } + EXPORT_SYMBOL(ttm_bo_init); +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +index d2d93959b119..aec6e9eef489 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +@@ -433,7 +433,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par, + set.y = 0; + set.mode = NULL; + set.fb = NULL; +- set.num_connectors = 1; ++ set.num_connectors = 0; + set.connectors = &par->con; + ret = drm_mode_set_config_internal(&set); + if (ret) { +@@ -821,7 +821,9 @@ int vmw_fb_off(struct vmw_private *vmw_priv) + flush_delayed_work(&par->local_work); + + mutex_lock(&par->bo_mutex); ++ drm_modeset_lock_all(vmw_priv->dev); + (void) vmw_fb_kms_detach(par, true, false); ++ drm_modeset_unlock_all(vmw_priv->dev); + mutex_unlock(&par->bo_mutex); + + return 0; +diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c +index 0cd4f7216239..5eea6fe0d7bd 100644 +--- a/drivers/hid/hid-elo.c ++++ b/drivers/hid/hid-elo.c +@@ -42,6 +42,12 @@ static int elo_input_configured(struct hid_device *hdev, + { + struct input_dev *input = hidinput->input; + ++ /* ++ * ELO devices have one Button usage in GenDesk field, which makes ++ * hid-input map it to BTN_LEFT; that confuses userspace, which then ++ * considers the device to be a mouse/touchpad instead of touchscreen. ++ */ ++ clear_bit(BTN_LEFT, input->keybit); + set_bit(BTN_TOUCH, input->keybit); + set_bit(ABS_PRESSURE, input->absbit); + input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0); +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c +index fb9ace1cef8b..40233315d5f5 100644 +--- a/drivers/hid/hid-input.c ++++ b/drivers/hid/hid-input.c +@@ -1149,18 +1149,26 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct + + /* + * Ignore out-of-range values as per HID specification, +- * section 5.10 and 6.2.25. ++ * section 5.10 and 6.2.25, when NULL state bit is present. ++ * When it's not, clamp the value to match Microsoft's input ++ * driver as mentioned in "Required HID usages for digitizers": ++ * https://msdn.microsoft.com/en-us/library/windows/hardware/dn672278(v=vs.85).asp + * + * The logical_minimum < logical_maximum check is done so that we + * don't unintentionally discard values sent by devices which + * don't specify logical min and max. + */ + if ((field->flags & HID_MAIN_ITEM_VARIABLE) && +- (field->logical_minimum < field->logical_maximum) && +- (value < field->logical_minimum || +- value > field->logical_maximum)) { +- dbg_hid("Ignoring out-of-range value %x\n", value); +- return; ++ (field->logical_minimum < field->logical_maximum)) { ++ if (field->flags & HID_MAIN_ITEM_NULL_STATE && ++ (value < field->logical_minimum || ++ value > field->logical_maximum)) { ++ dbg_hid("Ignoring out-of-range value %x\n", value); ++ return; ++ } ++ value = clamp(value, ++ field->logical_minimum, ++ field->logical_maximum); + } + + /* +diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c +index 3baa4f4a8c5e..d659a02647d4 100644 +--- a/drivers/hwmon/pmbus/adm1275.c ++++ b/drivers/hwmon/pmbus/adm1275.c +@@ -101,8 +101,8 @@ static const struct coefficients adm1075_coefficients[] = { + [0] = { 27169, 0, -1 }, /* voltage */ + [1] = { 806, 20475, -1 }, /* current, irange25 */ + [2] = { 404, 20475, -1 }, /* current, irange50 */ +- [3] = { 0, -1, 8549 }, /* power, irange25 */ +- [4] = { 0, -1, 4279 }, /* power, irange50 */ ++ [3] = { 8549, 0, -1 }, /* power, irange25 */ ++ [4] = { 4279, 0, -1 }, /* power, irange50 */ + }; + + static const struct coefficients adm1275_coefficients[] = { +diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c +index 629e031b7456..09142e99e915 100644 +--- a/drivers/hwtracing/coresight/of_coresight.c ++++ b/drivers/hwtracing/coresight/of_coresight.c +@@ -149,7 +149,7 @@ struct coresight_platform_data *of_get_coresight_platform_data( + continue; + + /* The local out port number */ +- pdata->outports[i] = endpoint.id; ++ pdata->outports[i] = endpoint.port; + + /* + * Get a handle on the remote port and parent +diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c +index 4682909b021b..3be62ef154d1 100644 +--- a/drivers/infiniband/hw/hfi1/chip.c ++++ b/drivers/infiniband/hw/hfi1/chip.c +@@ -9489,8 +9489,11 @@ static int test_qsfp_read(struct hfi1_pportdata *ppd) + int ret; + u8 status; + +- /* report success if not a QSFP */ +- if (ppd->port_type != PORT_TYPE_QSFP) ++ /* ++ * Report success if not a QSFP or, if it is a QSFP, but the cable is ++ * not present ++ */ ++ if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd)) + return 0; + + /* read byte 2, the status byte */ +diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c +index 5a5778729e37..76bb51309a78 100644 +--- a/drivers/input/keyboard/qt1070.c ++++ b/drivers/input/keyboard/qt1070.c +@@ -274,9 +274,18 @@ static const struct i2c_device_id qt1070_id[] = { + }; + MODULE_DEVICE_TABLE(i2c, qt1070_id); + ++#ifdef CONFIG_OF ++static const struct of_device_id qt1070_of_match[] = { ++ { .compatible = "qt1070", }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, qt1070_of_match); ++#endif ++ + static struct i2c_driver qt1070_driver = { + .driver = { + .name = "qt1070", ++ .of_match_table = of_match_ptr(qt1070_of_match), + .pm = &qt1070_pm_ops, + }, + .id_table = qt1070_id, +diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c +index 5d0cd51c6f41..a4b7b4c3d27b 100644 +--- a/drivers/input/touchscreen/tsc2007.c ++++ b/drivers/input/touchscreen/tsc2007.c +@@ -455,6 +455,14 @@ static int tsc2007_probe(struct i2c_client *client, + + tsc2007_stop(ts); + ++ /* power down the chip (TSC2007_SETUP does not ACK on I2C) */ ++ err = tsc2007_xfer(ts, PWRDOWN); ++ if (err < 0) { ++ dev_err(&client->dev, ++ "Failed to setup chip: %d\n", err); ++ return err; /* usually, chip does not respond */ ++ } ++ + err = input_register_device(input_dev); + if (err) { + dev_err(&client->dev, +diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c +index e23001bfcfee..f106fd9782bf 100644 +--- a/drivers/iommu/iova.c ++++ b/drivers/iommu/iova.c +@@ -138,7 +138,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, + break; /* found a free slot */ + } + adjust_limit_pfn: +- limit_pfn = curr_iova->pfn_lo - 1; ++ limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0; + move_left: + prev = curr; + curr = rb_prev(curr); +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c +index acb9d250a905..ac15e5d5d9b2 100644 +--- a/drivers/irqchip/irq-gic-v3-its.c ++++ b/drivers/irqchip/irq-gic-v3-its.c +@@ -684,7 +684,7 @@ static struct irq_chip its_irq_chip = { + * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. + */ + #define IRQS_PER_CHUNK_SHIFT 5 +-#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) ++#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT) + + static unsigned long *lpi_bitmap; + static u32 lpi_chunks; +@@ -1320,11 +1320,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + /* +- * At least one bit of EventID is being used, hence a minimum +- * of two entries. No, the architecture doesn't let you +- * express an ITT with a single entry. ++ * We allocate at least one chunk worth of LPIs bet device, ++ * and thus that many ITEs. The device may require less though. + */ +- nr_ites = max(2UL, roundup_pow_of_two(nvecs)); ++ nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs)); + sz = nr_ites * its->ite_size; + sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; + itt = kzalloc(sz, GFP_KERNEL); +diff --git a/drivers/leds/leds-pm8058.c b/drivers/leds/leds-pm8058.c +index a52674327857..8988ba3b2d65 100644 +--- a/drivers/leds/leds-pm8058.c ++++ b/drivers/leds/leds-pm8058.c +@@ -106,7 +106,7 @@ static int pm8058_led_probe(struct platform_device *pdev) + if (!led) + return -ENOMEM; + +- led->ledtype = (u32)of_device_get_match_data(&pdev->dev); ++ led->ledtype = (u32)(unsigned long)of_device_get_match_data(&pdev->dev); + + map = dev_get_regmap(pdev->dev.parent, NULL); + if (!map) { +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 27d8bb21e04f..a7bc70334f0e 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -4826,8 +4826,10 @@ array_size_store(struct mddev *mddev, const char *buf, size_t len) + return err; + + /* cluster raid doesn't support change array_sectors */ +- if (mddev_is_clustered(mddev)) ++ if (mddev_is_clustered(mddev)) { ++ mddev_unlock(mddev); + return -EINVAL; ++ } + + if (strncmp(buf, "default", 7) == 0) { + if (mddev->pers) +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 475a7a1bcfe0..4493be50fc6a 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -3391,9 +3391,20 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, + BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); + BUG_ON(test_bit(R5_Wantread, &dev->flags)); + BUG_ON(sh->batch_head); ++ ++ /* ++ * In the raid6 case if the only non-uptodate disk is P ++ * then we already trusted P to compute the other failed ++ * drives. It is safe to compute rather than re-read P. ++ * In other cases we only compute blocks from failed ++ * devices, otherwise check/repair might fail to detect ++ * a real inconsistency. ++ */ ++ + if ((s->uptodate == disks - 1) && ++ ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) || + (s->failed && (disk_idx == s->failed_num[0] || +- disk_idx == s->failed_num[1]))) { ++ disk_idx == s->failed_num[1])))) { + /* have disk failed, and we're requested to fetch it; + * do compute it + */ +diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c +index 4bf2995e1cb8..8f85910eda5d 100644 +--- a/drivers/media/i2c/soc_camera/ov6650.c ++++ b/drivers/media/i2c/soc_camera/ov6650.c +@@ -1033,7 +1033,7 @@ static int ov6650_probe(struct i2c_client *client, + priv->code = MEDIA_BUS_FMT_YUYV8_2X8; + priv->colorspace = V4L2_COLORSPACE_JPEG; + +- priv->clk = v4l2_clk_get(&client->dev, "mclk"); ++ priv->clk = v4l2_clk_get(&client->dev, NULL); + if (IS_ERR(priv->clk)) { + ret = PTR_ERR(priv->clk); + goto eclkget; +diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c +index b4be47969b6b..e17d6b945c07 100644 +--- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c ++++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c +@@ -341,6 +341,17 @@ static void solo_stop_streaming(struct vb2_queue *q) + struct solo_dev *solo_dev = vb2_get_drv_priv(q); + + solo_stop_thread(solo_dev); ++ ++ spin_lock(&solo_dev->slock); ++ while (!list_empty(&solo_dev->vidq_active)) { ++ struct solo_vb2_buf *buf = list_entry( ++ solo_dev->vidq_active.next, ++ struct solo_vb2_buf, list); ++ ++ list_del(&buf->list); ++ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); ++ } ++ spin_unlock(&solo_dev->slock); + INIT_LIST_HEAD(&solo_dev->vidq_active); + } + +diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c +index cd209dccff1b..8e2aa3f8e52f 100644 +--- a/drivers/media/platform/vsp1/vsp1_drm.c ++++ b/drivers/media/platform/vsp1/vsp1_drm.c +@@ -596,6 +596,7 @@ int vsp1_drm_init(struct vsp1_device *vsp1) + pipe->bru = &vsp1->bru->entity; + pipe->lif = &vsp1->lif->entity; + pipe->output = vsp1->wpf[0]; ++ pipe->output->pipe = pipe; + + return 0; + } +diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c +index 57c713a4e1df..4ac1ff482a0b 100644 +--- a/drivers/media/platform/vsp1/vsp1_drv.c ++++ b/drivers/media/platform/vsp1/vsp1_drv.c +@@ -509,7 +509,13 @@ static int __maybe_unused vsp1_pm_suspend(struct device *dev) + { + struct vsp1_device *vsp1 = dev_get_drvdata(dev); + +- vsp1_pipelines_suspend(vsp1); ++ /* ++ * When used as part of a display pipeline, the VSP is stopped and ++ * restarted explicitly by the DU. ++ */ ++ if (!vsp1->drm) ++ vsp1_pipelines_suspend(vsp1); ++ + pm_runtime_force_suspend(vsp1->dev); + + return 0; +@@ -520,7 +526,13 @@ static int __maybe_unused vsp1_pm_resume(struct device *dev) + struct vsp1_device *vsp1 = dev_get_drvdata(dev); + + pm_runtime_force_resume(vsp1->dev); +- vsp1_pipelines_resume(vsp1); ++ ++ /* ++ * When used as part of a display pipeline, the VSP is stopped and ++ * restarted explicitly by the DU. ++ */ ++ if (!vsp1->drm) ++ vsp1_pipelines_resume(vsp1); + + return 0; + } +diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c +index d351b9c768d2..743aa0febc09 100644 +--- a/drivers/media/platform/vsp1/vsp1_video.c ++++ b/drivers/media/platform/vsp1/vsp1_video.c +@@ -792,6 +792,7 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count) + { + struct vsp1_video *video = vb2_get_drv_priv(vq); + struct vsp1_pipeline *pipe = video->rwpf->pipe; ++ bool start_pipeline = false; + unsigned long flags; + int ret; + +@@ -802,11 +803,23 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count) + mutex_unlock(&pipe->lock); + return ret; + } ++ ++ start_pipeline = true; + } + + pipe->stream_count++; + mutex_unlock(&pipe->lock); + ++ /* ++ * vsp1_pipeline_ready() is not sufficient to establish that all streams ++ * are prepared and the pipeline is configured, as multiple streams ++ * can race through streamon with buffers already queued; Therefore we ++ * don't even attempt to start the pipeline until the last stream has ++ * called through here. ++ */ ++ if (!start_pipeline) ++ return 0; ++ + spin_lock_irqsave(&pipe->irqlock, flags); + if (vsp1_pipeline_ready(pipe)) + vsp1_video_pipeline_run(pipe); +diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c +index 9caea8344547..d793c630f1dd 100644 +--- a/drivers/media/usb/cpia2/cpia2_v4l.c ++++ b/drivers/media/usb/cpia2/cpia2_v4l.c +@@ -812,7 +812,7 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf) + struct camera_data *cam = video_drvdata(file); + + if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || +- buf->index > cam->num_frames) ++ buf->index >= cam->num_frames) + return -EINVAL; + + buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer; +@@ -863,7 +863,7 @@ static int cpia2_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) + + if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || + buf->memory != V4L2_MEMORY_MMAP || +- buf->index > cam->num_frames) ++ buf->index >= cam->num_frames) + return -EINVAL; + + DBG("QBUF #%d\n", buf->index); +diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile +index 31983366090a..2bf79ba4a39e 100644 +--- a/drivers/misc/Makefile ++++ b/drivers/misc/Makefile +@@ -61,6 +61,8 @@ lkdtm-$(CONFIG_LKDTM) += lkdtm_perms.o + lkdtm-$(CONFIG_LKDTM) += lkdtm_rodata_objcopy.o + lkdtm-$(CONFIG_LKDTM) += lkdtm_usercopy.o + ++KCOV_INSTRUMENT_lkdtm_rodata.o := n ++ + OBJCOPYFLAGS := + OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \ + --set-section-flags .text=alloc,readonly \ +diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c +index cc91f7b3d90c..eb29113e0bac 100644 +--- a/drivers/misc/enclosure.c ++++ b/drivers/misc/enclosure.c +@@ -148,7 +148,7 @@ enclosure_register(struct device *dev, const char *name, int components, + for (i = 0; i < components; i++) { + edev->component[i].number = -1; + edev->component[i].slot = -1; +- edev->component[i].power_status = 1; ++ edev->component[i].power_status = -1; + } + + mutex_lock(&container_list_lock); +@@ -600,6 +600,11 @@ static ssize_t get_component_power_status(struct device *cdev, + + if (edev->cb->get_power_status) + edev->cb->get_power_status(edev, ecomp); ++ ++ /* If still uninitialized, the callback failed or does not exist. */ ++ if (ecomp->power_status == -1) ++ return (edev->cb->get_power_status) ? -EIO : -ENOTTY; ++ + return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off"); + } + +diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c +index d1570f512f0b..f8f12ccc6471 100644 +--- a/drivers/mtd/nand/fsl_ifc_nand.c ++++ b/drivers/mtd/nand/fsl_ifc_nand.c +@@ -907,6 +907,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) + if (ctrl->version == FSL_IFC_VERSION_1_1_0) + fsl_ifc_sram_init(priv); + ++ /* ++ * As IFC version 2.0.0 has 16KB of internal SRAM as compared to older ++ * versions which had 8KB. Hence bufnum mask needs to be updated. ++ */ ++ if (ctrl->version >= FSL_IFC_VERSION_2_0_0) ++ priv->bufnum_mask = (priv->bufnum_mask * 2) + 1; ++ + return 0; + } + +diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c +index 21c03086bb7f..a3e86e52640a 100644 +--- a/drivers/mtd/nand/nand_base.c ++++ b/drivers/mtd/nand/nand_base.c +@@ -715,7 +715,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, + chip->cmd_ctrl(mtd, readcmd, ctrl); + ctrl &= ~NAND_CTRL_CHANGE; + } +- chip->cmd_ctrl(mtd, command, ctrl); ++ if (command != NAND_CMD_NONE) ++ chip->cmd_ctrl(mtd, command, ctrl); + + /* Address cycle, when necessary */ + ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE; +@@ -744,6 +745,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, + */ + switch (command) { + ++ case NAND_CMD_NONE: + case NAND_CMD_PAGEPROG: + case NAND_CMD_ERASE1: + case NAND_CMD_ERASE2: +@@ -806,7 +808,9 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, + } + + /* Command latch cycle */ +- chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); ++ if (command != NAND_CMD_NONE) ++ chip->cmd_ctrl(mtd, command, ++ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); + + if (column != -1 || page_addr != -1) { + int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE; +@@ -842,6 +846,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, + */ + switch (command) { + ++ case NAND_CMD_NONE: + case NAND_CMD_CACHEDPROG: + case NAND_CMD_PAGEPROG: + case NAND_CMD_ERASE1: +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index 63d61c084815..c3f3096b24ae 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -371,9 +371,10 @@ int bond_set_carrier(struct bonding *bond) + /* Get link speed and duplex from the slave's base driver + * using ethtool. If for some reason the call fails or the + * values are invalid, set speed and duplex to -1, +- * and return. ++ * and return. Return 1 if speed or duplex settings are ++ * UNKNOWN; 0 otherwise. + */ +-static void bond_update_speed_duplex(struct slave *slave) ++static int bond_update_speed_duplex(struct slave *slave) + { + struct net_device *slave_dev = slave->dev; + struct ethtool_link_ksettings ecmd; +@@ -383,24 +384,27 @@ static void bond_update_speed_duplex(struct slave *slave) + slave->duplex = DUPLEX_UNKNOWN; + + res = __ethtool_get_link_ksettings(slave_dev, &ecmd); +- if (res < 0) +- return; +- +- if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1)) +- return; +- ++ if (res < 0) { ++ slave->link = BOND_LINK_DOWN; ++ return 1; ++ } ++ if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1)) { ++ slave->link = BOND_LINK_DOWN; ++ return 1; ++ } + switch (ecmd.base.duplex) { + case DUPLEX_FULL: + case DUPLEX_HALF: + break; + default: +- return; ++ slave->link = BOND_LINK_DOWN; ++ return 1; + } + + slave->speed = ecmd.base.speed; + slave->duplex = ecmd.base.duplex; + +- return; ++ return 0; + } + + const char *bond_slave_link_status(s8 link) +@@ -3327,12 +3331,17 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res, + for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) { + u64 nv = new[i]; + u64 ov = old[i]; ++ s64 delta = nv - ov; + + /* detects if this particular field is 32bit only */ + if (((nv | ov) >> 32) == 0) +- res[i] += (u32)nv - (u32)ov; +- else +- res[i] += nv - ov; ++ delta = (s64)(s32)((u32)nv - (u32)ov); ++ ++ /* filter anomalies, some drivers reset their stats ++ * at down/up events. ++ */ ++ if (delta > 0) ++ res[i] += delta; + } + } + +diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +index 5390ae89136c..71611bd6384b 100644 +--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c ++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +@@ -560,6 +560,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, + xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); + cb |= CFG_CLE_BYPASS_EN0; + CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); ++ CFG_CLE_IP_HDR_LEN_SET(&cb, 0); + xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); + + xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); +diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +index 06e598c8bc16..c82faf1a88b8 100644 +--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h ++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +@@ -163,6 +163,7 @@ enum xgene_enet_rm { + #define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3) + + #define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2) ++#define CFG_CLE_IP_HDR_LEN_SET(dst, val) xgene_set_bits(dst, val, 8, 5) + #define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12) + #define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4) + #define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2) +diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +index 8158d4698734..651f308cdc60 100644 +--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c ++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +@@ -505,14 +505,24 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, + return NETDEV_TX_OK; + } + +-static void xgene_enet_skip_csum(struct sk_buff *skb) ++static void xgene_enet_rx_csum(struct sk_buff *skb) + { ++ struct net_device *ndev = skb->dev; + struct iphdr *iph = ip_hdr(skb); + +- if (!ip_is_fragment(iph) || +- (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) { +- skb->ip_summed = CHECKSUM_UNNECESSARY; +- } ++ if (!(ndev->features & NETIF_F_RXCSUM)) ++ return; ++ ++ if (skb->protocol != htons(ETH_P_IP)) ++ return; ++ ++ if (ip_is_fragment(iph)) ++ return; ++ ++ if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP) ++ return; ++ ++ skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, +@@ -537,9 +547,9 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, + buf_pool->rx_skb[skb_index] = NULL; + + /* checking for error */ +- status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) || ++ status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) | + GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); +- if (unlikely(status > 2)) { ++ if (unlikely(status)) { + dev_kfree_skb_any(skb); + xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), + status); +@@ -555,10 +565,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, + + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, ndev); +- if (likely((ndev->features & NETIF_F_IP_CSUM) && +- skb->protocol == htons(ETH_P_IP))) { +- xgene_enet_skip_csum(skb); +- } ++ xgene_enet_rx_csum(skb); + + rx_ring->rx_packets++; + rx_ring->rx_bytes += datalen; +@@ -1725,7 +1732,7 @@ static int xgene_enet_probe(struct platform_device *pdev) + xgene_enet_setup_ops(pdata); + + if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { +- ndev->features |= NETIF_F_TSO; ++ ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM; + spin_lock_init(&pdata->mss_lock); + } + ndev->hw_features = ndev->features; +diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c +index c16ec3a51876..1ee11b600645 100644 +--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c ++++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include "bgmac.h" + + static inline bool bgmac_is_bcm4707_family(struct bcma_device *core) +@@ -96,7 +97,7 @@ static int bgmac_probe(struct bcma_device *core) + struct ssb_sprom *sprom = &core->bus->sprom; + struct mii_bus *mii_bus; + struct bgmac *bgmac; +- u8 *mac; ++ const u8 *mac = NULL; + int err; + + bgmac = kzalloc(sizeof(*bgmac), GFP_KERNEL); +@@ -110,21 +111,27 @@ static int bgmac_probe(struct bcma_device *core) + + bcma_set_drvdata(core, bgmac); + +- switch (core->core_unit) { +- case 0: +- mac = sprom->et0mac; +- break; +- case 1: +- mac = sprom->et1mac; +- break; +- case 2: +- mac = sprom->et2mac; +- break; +- default: +- dev_err(bgmac->dev, "Unsupported core_unit %d\n", +- core->core_unit); +- err = -ENOTSUPP; +- goto err; ++ if (bgmac->dev->of_node) ++ mac = of_get_mac_address(bgmac->dev->of_node); ++ ++ /* If no MAC address assigned via device tree, check SPROM */ ++ if (!mac) { ++ switch (core->core_unit) { ++ case 0: ++ mac = sprom->et0mac; ++ break; ++ case 1: ++ mac = sprom->et1mac; ++ break; ++ case 2: ++ mac = sprom->et2mac; ++ break; ++ default: ++ dev_err(bgmac->dev, "Unsupported core_unit %d\n", ++ core->core_unit); ++ err = -ENOTSUPP; ++ goto err; ++ } + } + + ether_addr_copy(bgmac->mac_addr, mac); +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index bbb3641eddcb..3aa993bbafd9 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -1498,12 +1498,16 @@ static int bnxt_async_event_process(struct bnxt *bp, + + if (BNXT_VF(bp)) + goto async_event_process_exit; +- if (data1 & 0x20000) { ++ ++ /* print unsupported speed warning in forced speed mode only */ ++ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && ++ (data1 & 0x20000)) { + u16 fw_speed = link_info->force_link_speed; + u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); + +- netdev_warn(bp->dev, "Link speed %d no longer supported\n", +- speed); ++ if (speed != SPEED_UNKNOWN) ++ netdev_warn(bp->dev, "Link speed %d no longer supported\n", ++ speed); + } + set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); + /* fall thru */ +diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c +index 8a37012c9c89..c75d4ea9342b 100644 +--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c ++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c +@@ -1576,6 +1576,11 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + nic->pdev = pdev; + nic->pnicvf = nic; + nic->max_queues = qcount; ++ /* If no of CPUs are too low, there won't be any queues left ++ * for XDP_TX, hence double it. ++ */ ++ if (!nic->t88) ++ nic->max_queues *= 2; + + /* MAP VF's configuration registers */ + nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c +index 262587240c86..0437149f5939 100644 +--- a/drivers/net/ethernet/faraday/ftgmac100.c ++++ b/drivers/net/ethernet/faraday/ftgmac100.c +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + #include + #include + #include +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c +index 917091871259..dd6e07c748f5 100644 +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -3209,7 +3209,7 @@ static int fec_enet_init(struct net_device *ndev) + } + + #ifdef CONFIG_OF +-static void fec_reset_phy(struct platform_device *pdev) ++static int fec_reset_phy(struct platform_device *pdev) + { + int err, phy_reset; + bool active_high = false; +@@ -3217,7 +3217,7 @@ static void fec_reset_phy(struct platform_device *pdev) + struct device_node *np = pdev->dev.of_node; + + if (!np) +- return; ++ return 0; + + of_property_read_u32(np, "phy-reset-duration", &msec); + /* A sane reset duration should not be longer than 1s */ +@@ -3225,8 +3225,10 @@ static void fec_reset_phy(struct platform_device *pdev) + msec = 1; + + phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); +- if (!gpio_is_valid(phy_reset)) +- return; ++ if (phy_reset == -EPROBE_DEFER) ++ return phy_reset; ++ else if (!gpio_is_valid(phy_reset)) ++ return 0; + + active_high = of_property_read_bool(np, "phy-reset-active-high"); + +@@ -3235,7 +3237,7 @@ static void fec_reset_phy(struct platform_device *pdev) + "phy-reset"); + if (err) { + dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); +- return; ++ return err; + } + + if (msec > 20) +@@ -3244,14 +3246,17 @@ static void fec_reset_phy(struct platform_device *pdev) + usleep_range(msec * 1000, msec * 1000 + 1000); + + gpio_set_value_cansleep(phy_reset, !active_high); ++ ++ return 0; + } + #else /* CONFIG_OF */ +-static void fec_reset_phy(struct platform_device *pdev) ++static int fec_reset_phy(struct platform_device *pdev) + { + /* + * In case of platform probe, the reset has been done + * by machine code. + */ ++ return 0; + } + #endif /* CONFIG_OF */ + +@@ -3422,6 +3427,7 @@ fec_probe(struct platform_device *pdev) + if (ret) { + dev_err(&pdev->dev, + "Failed to enable phy regulator: %d\n", ret); ++ clk_disable_unprepare(fep->clk_ipg); + goto failed_regulator; + } + } else { +@@ -3434,7 +3440,9 @@ fec_probe(struct platform_device *pdev) + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + +- fec_reset_phy(pdev); ++ ret = fec_reset_phy(pdev); ++ if (ret) ++ goto failed_reset; + + if (fep->bufdesc_ex) + fec_ptp_init(pdev); +@@ -3495,8 +3503,10 @@ fec_probe(struct platform_device *pdev) + fec_ptp_stop(pdev); + if (fep->reg_phy) + regulator_disable(fep->reg_phy); ++failed_reset: ++ pm_runtime_put(&pdev->dev); ++ pm_runtime_disable(&pdev->dev); + failed_regulator: +- clk_disable_unprepare(fep->clk_ipg); + failed_clk_ipg: + fec_enet_clk_enable(ndev, false); + failed_clk: +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +index 2d0cb609adc3..b7c8433a7a37 100644 +--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c ++++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +@@ -773,8 +773,9 @@ static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key, + memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE); + + /* update the current hash->queue mappings from the shadow RSS table */ +- memcpy(indir, ppe_cb->rss_indir_table, +- HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir)); ++ if (indir) ++ memcpy(indir, ppe_cb->rss_indir_table, ++ HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir)); + + return 0; + } +@@ -785,15 +786,19 @@ static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir, + struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); + + /* set the RSS Hash Key if specififed by the user */ +- if (key) +- hns_ppe_set_rss_key(ppe_cb, (u32 *)key); ++ if (key) { ++ memcpy(ppe_cb->rss_key, key, HNS_PPEV2_RSS_KEY_SIZE); ++ hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key); ++ } + +- /* update the shadow RSS table with user specified qids */ +- memcpy(ppe_cb->rss_indir_table, indir, +- HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir)); ++ if (indir) { ++ /* update the shadow RSS table with user specified qids */ ++ memcpy(ppe_cb->rss_indir_table, indir, ++ HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir)); + +- /* now update the hardware */ +- hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table); ++ /* now update the hardware */ ++ hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table); ++ } + + return 0; + } +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +index 1e1eb92998fb..02a03bccde7b 100644 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +@@ -86,12 +86,11 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode) + dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0); + } + +-/** +-*hns_gmac_get_en - get port enable +-*@mac_drv:mac device +-*@rx:rx enable +-*@tx:tx enable +-*/ ++/* hns_gmac_get_en - get port enable ++ * @mac_drv:mac device ++ * @rx:rx enable ++ * @tx:tx enable ++ */ + static void hns_gmac_get_en(void *mac_drv, u32 *rx, u32 *tx) + { + struct mac_driver *drv = (struct mac_driver *)mac_drv; +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +index c494fc52be74..62a12991ce9a 100644 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +@@ -70,7 +70,7 @@ enum dsaf_roce_qos_sl { + }; + + #define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) +-#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP) ++#define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP) + + enum hal_dsaf_mode { + HRD_DSAF_NO_DSAF_MODE = 0x0, +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +index f0ed80d6ef9c..f3be9ac47bfb 100644 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +@@ -430,7 +430,6 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb) + static int hns_rcb_get_port_in_comm( + struct rcb_common_cb *rcb_common, int ring_idx) + { +- + return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn); + } + +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c +index c06845b7b666..a79e0a1100aa 100644 +--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c ++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c +@@ -511,7 +511,8 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i, + int last_offset; + bool twobufs; + +- twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048); ++ twobufs = ((PAGE_SIZE < 8192) && ++ hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048); + + desc = &ring->desc[ring->next_to_clean]; + size = le16_to_cpu(desc->rx.size); +@@ -1700,7 +1701,7 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv) + static void hns_nic_service_event_complete(struct hns_nic_priv *priv) + { + WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state)); +- ++ /* make sure to commit the things */ + smp_mb__before_atomic(); + clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state); + } +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +index 87d5c94b2810..86a496d71995 100644 +--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c ++++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +@@ -1252,12 +1252,10 @@ hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key, + + ops = priv->ae_handle->dev->ops; + +- /* currently hfunc can only be Toeplitz hash */ +- if (key || +- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) ++ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) { ++ netdev_err(netdev, "Invalid hfunc!\n"); + return -EOPNOTSUPP; +- if (!indir) +- return 0; ++ } + + return ops->set_rss(priv->ae_handle, indir, key, hfunc); + } +diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +index 5241e0873397..7041d83d48bf 100644 +--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c ++++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +@@ -942,7 +942,7 @@ static void fm10k_self_test(struct net_device *dev, + + memset(data, 0, sizeof(*data) * FM10K_TEST_LEN); + +- if (FM10K_REMOVED(hw)) { ++ if (FM10K_REMOVED(hw->hw_addr)) { + netif_err(interface, drv, dev, + "Interface removed - test blocked\n"); + eth_test->flags |= ETH_TEST_FL_FAILED; +diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +index 92bc8846f1ba..f4569461dcb8 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +@@ -1135,6 +1135,11 @@ static int i40e_get_eeprom_len(struct net_device *netdev) + struct i40e_hw *hw = &np->vsi->back->hw; + u32 val; + ++#define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF ++ if (hw->mac.type == I40E_MAC_X722) { ++ val = X722_EEPROM_SCOPE_LIMIT + 1; ++ return val; ++ } + val = (rd32(hw, I40E_GLPCI_LBARCTRL) + & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) + >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c +index becffd15c092..57c7456a5751 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c +@@ -11142,10 +11142,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + round_jiffies(jiffies + pf->service_timer_period)); + + /* add this PF to client device list and launch a client service task */ +- err = i40e_lan_add_device(pf); +- if (err) +- dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", +- err); ++ if (pf->flags & I40E_FLAG_IWARP_ENABLED) { ++ err = i40e_lan_add_device(pf); ++ if (err) ++ dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", ++ err); ++ } + + #ifdef I40E_FCOE + /* create FCoE interface */ +@@ -11323,10 +11325,11 @@ static void i40e_remove(struct pci_dev *pdev) + i40e_vsi_release(pf->vsi[pf->lan_vsi]); + + /* remove attached clients */ +- ret_code = i40e_lan_del_device(pf); +- if (ret_code) { +- dev_warn(&pdev->dev, "Failed to delete client device: %d\n", +- ret_code); ++ if (pf->flags & I40E_FLAG_IWARP_ENABLED) { ++ ret_code = i40e_lan_del_device(pf); ++ if (ret_code) ++ dev_warn(&pdev->dev, "Failed to delete client device: %d\n", ++ ret_code); + } + + /* shutdown and destroy the HMC */ +diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c +index 954efe3118db..abe290bfc638 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c +@@ -292,14 +292,14 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + { + enum i40e_status_code ret_code = 0; + +- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { +- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); +- if (!ret_code) { ++ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); ++ if (!ret_code) { ++ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + ret_code = i40e_read_nvm_word_aq(hw, offset, data); +- i40e_release_nvm(hw); ++ } else { ++ ret_code = i40e_read_nvm_word_srctl(hw, offset, data); + } +- } else { +- ret_code = i40e_read_nvm_word_srctl(hw, offset, data); ++ i40e_release_nvm(hw); + } + return ret_code; + } +diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c +index 28b640fa2e35..2e12ccf73dba 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c +@@ -1820,6 +1820,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) + */ + if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { + dev_kfree_skb_any(skb); ++ skb = NULL; + continue; + } + +diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +index 90ebc5ac16fd..7bfed441c466 100644 +--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c ++++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +@@ -1262,6 +1262,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) + */ + if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { + dev_kfree_skb_any(skb); ++ skb = NULL; + continue; + } + +diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c +index ed014bdbbabd..457e30427535 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c +@@ -271,16 +271,34 @@ struct qed_tm_iids { + u32 per_vf_tids; + }; + +-static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr, ++static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn, ++ struct qed_cxt_mngr *p_mngr, + struct qed_tm_iids *iids) + { +- u32 i, j; +- +- for (i = 0; i < MAX_CONN_TYPES; i++) { ++ bool tm_vf_required = false; ++ bool tm_required = false; ++ int i, j; ++ ++ /* Timers is a special case -> we don't count how many cids require ++ * timers but what's the max cid that will be used by the timer block. ++ * therefore we traverse in reverse order, and once we hit a protocol ++ * that requires the timers memory, we'll sum all the protocols up ++ * to that one. ++ */ ++ for (i = MAX_CONN_TYPES - 1; i >= 0; i--) { + struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i]; + +- if (tm_cid_proto(i)) { ++ if (tm_cid_proto(i) || tm_required) { ++ if (p_cfg->cid_count) ++ tm_required = true; ++ + iids->pf_cids += p_cfg->cid_count; ++ } ++ ++ if (tm_cid_proto(i) || tm_vf_required) { ++ if (p_cfg->cids_per_vf) ++ tm_vf_required = true; ++ + iids->per_vf_cids += p_cfg->cids_per_vf; + } + } +@@ -696,7 +714,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) + + /* TM PF */ + p_cli = &p_mngr->clients[ILT_CLI_TM]; +- qed_cxt_tm_iids(p_mngr, &tm_iids); ++ qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids); + total = tm_iids.pf_cids + tm_iids.pf_tids_total; + if (total) { + p_blk = &p_cli->pf_blks[0]; +@@ -1591,7 +1609,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn) + u8 i; + + memset(&tm_iids, 0, sizeof(tm_iids)); +- qed_cxt_tm_iids(p_mngr, &tm_iids); ++ qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids); + + /* @@@TBD No pre-scan for now */ + +diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c +index 333c7442e48a..dba3fbe4800e 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_main.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c +@@ -711,7 +711,8 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, + cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - + cdev->num_hwfns; + +- if (!IS_ENABLED(CONFIG_QED_RDMA)) ++ if (!IS_ENABLED(CONFIG_QED_RDMA) || ++ QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE) + return 0; + + for_each_hwfn(cdev, i) +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c +index d2d6621fe0e5..48bc5c151336 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c +@@ -3573,6 +3573,7 @@ static int qed_get_vf_config(struct qed_dev *cdev, + + void qed_inform_vf_link_state(struct qed_hwfn *hwfn) + { ++ struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev); + struct qed_mcp_link_capabilities caps; + struct qed_mcp_link_params params; + struct qed_mcp_link_state link; +@@ -3589,9 +3590,15 @@ void qed_inform_vf_link_state(struct qed_hwfn *hwfn) + if (!vf_info) + continue; + +- memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); +- memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); +- memcpy(&caps, qed_mcp_get_link_capabilities(hwfn), ++ /* Only hwfn0 is actually interested in the link speed. ++ * But since only it would receive an MFW indication of link, ++ * need to take configuration from it - otherwise things like ++ * rate limiting for hwfn1 VF would not work. ++ */ ++ memcpy(¶ms, qed_mcp_get_link_params(lead_hwfn), ++ sizeof(params)); ++ memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link)); ++ memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn), + sizeof(caps)); + + /* Modify link according to the VF's configured link state */ +diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c +index f355df7cf84a..1b980f12663a 100644 +--- a/drivers/net/ieee802154/adf7242.c ++++ b/drivers/net/ieee802154/adf7242.c +@@ -888,7 +888,7 @@ static struct ieee802154_ops adf7242_ops = { + .set_cca_ed_level = adf7242_set_cca_ed_level, + }; + +-static void adf7242_debug(u8 irq1) ++static void adf7242_debug(struct adf7242_local *lp, u8 irq1) + { + #ifdef DEBUG + u8 stat; +@@ -932,7 +932,7 @@ static irqreturn_t adf7242_isr(int irq, void *data) + dev_err(&lp->spi->dev, "%s :ERROR IRQ1 = 0x%X\n", + __func__, irq1); + +- adf7242_debug(irq1); ++ adf7242_debug(lp, irq1); + + xmit = test_bit(FLAG_XMIT, &lp->flags); + +diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c +index 627eb825eb74..c747ab652665 100644 +--- a/drivers/net/ipvlan/ipvlan_core.c ++++ b/drivers/net/ipvlan/ipvlan_core.c +@@ -299,6 +299,10 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb, + if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS) + success = true; + } else { ++ if (!ether_addr_equal_64bits(eth_hdr(skb)->h_dest, ++ ipvlan->phy_dev->dev_addr)) ++ skb->pkt_type = PACKET_OTHERHOST; ++ + ret = RX_HANDLER_ANOTHER; + success = true; + } +diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c +index 8eb077b677f6..39be3b82608f 100644 +--- a/drivers/net/phy/mdio-xgene.c ++++ b/drivers/net/phy/mdio-xgene.c +@@ -232,7 +232,7 @@ static int xgene_xfi_mdio_write(struct mii_bus *bus, int phy_id, + + val = SET_VAL(HSTPHYADX, phy_id) | SET_VAL(HSTREGADX, reg) | + SET_VAL(HSTMIIMWRDAT, data); +- xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, data); ++ xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, val); + + val = HSTLDCMD | SET_VAL(HSTMIIMCMD, MIIM_CMD_LEGACY_WRITE); + xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, val); +diff --git a/drivers/net/veth.c b/drivers/net/veth.c +index fbc853e64531..ee7460ee3d05 100644 +--- a/drivers/net/veth.c ++++ b/drivers/net/veth.c +@@ -425,6 +425,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, + if (ifmp && (dev->ifindex != 0)) + peer->ifindex = ifmp->ifi_index; + ++ peer->gso_max_size = dev->gso_max_size; ++ peer->gso_max_segs = dev->gso_max_segs; ++ + err = register_netdevice(peer); + put_net(net); + net = NULL; +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index 983e941bdf29..50570d779b01 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -2912,6 +2912,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, + return -EINVAL; + } + ++ if (lowerdev) { ++ dev->gso_max_size = lowerdev->gso_max_size; ++ dev->gso_max_segs = lowerdev->gso_max_segs; ++ } ++ + if (conf->mtu) { + err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false); + if (err) +diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c +index 0b4d79659884..041ef3be87b9 100644 +--- a/drivers/net/wireless/ath/ath10k/ce.c ++++ b/drivers/net/wireless/ath/ath10k/ce.c +@@ -1059,7 +1059,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, + */ + BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC > + (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); +- BUILD_BUG_ON(2 * TARGET_10X_NUM_MSDU_DESC > ++ BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC > + (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); + BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC > + (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); +diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c +index 82a4c67f3672..6aa2b93497dd 100644 +--- a/drivers/net/wireless/ath/ath10k/debug.c ++++ b/drivers/net/wireless/ath/ath10k/debug.c +@@ -1942,6 +1942,15 @@ static ssize_t ath10k_write_simulate_radar(struct file *file, + size_t count, loff_t *ppos) + { + struct ath10k *ar = file->private_data; ++ struct ath10k_vif *arvif; ++ ++ /* Just check for for the first vif alone, as all the vifs will be ++ * sharing the same channel and if the channel is disabled, all the ++ * vifs will share the same 'is_started' state. ++ */ ++ arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list); ++ if (!arvif->is_started) ++ return -EINVAL; + + ieee80211_radar_detected(ar->hw); + +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c +index 17ab8efdac35..1e6e63dbd61c 100644 +--- a/drivers/net/wireless/ath/ath10k/mac.c ++++ b/drivers/net/wireless/ath/ath10k/mac.c +@@ -6054,6 +6054,16 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, + "mac vdev %d peer delete %pM sta %pK (sta gone)\n", + arvif->vdev_id, sta->addr, sta); + ++ if (sta->tdls) { ++ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, ++ sta, ++ WMI_TDLS_PEER_STATE_TEARDOWN); ++ if (ret) ++ ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n", ++ sta->addr, ++ WMI_TDLS_PEER_STATE_TEARDOWN, ret); ++ } ++ + ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); + if (ret) + ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", +@@ -7070,7 +7080,7 @@ ath10k_mac_update_rx_channel(struct ath10k *ar, + lockdep_assert_held(&ar->data_lock); + + WARN_ON(ctx && vifs); +- WARN_ON(vifs && n_vifs != 1); ++ WARN_ON(vifs && !n_vifs); + + /* FIXME: Sort of an optimization and a workaround. Peers and vifs are + * on a linked list now. Doing a lookup peer -> vif -> chanctx for each +diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c +index 54df425bb0fc..e518b640aad0 100644 +--- a/drivers/net/wireless/ath/ath10k/wmi.c ++++ b/drivers/net/wireless/ath/ath10k/wmi.c +@@ -3638,6 +3638,11 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, + + spin_lock_bh(&ar->data_lock); + ch = ar->rx_channel; ++ ++ /* fetch target operating channel during channel change */ ++ if (!ch) ++ ch = ar->tgt_oper_chan; ++ + spin_unlock_bh(&ar->data_lock); + + if (!ch) { +diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h +index 1b243c899bef..9b8562ff6698 100644 +--- a/drivers/net/wireless/ath/ath10k/wmi.h ++++ b/drivers/net/wireless/ath/ath10k/wmi.h +@@ -5017,7 +5017,8 @@ enum wmi_10_4_vdev_param { + #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3) + + #define WMI_TXBF_STS_CAP_OFFSET_LSB 4 +-#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0 ++#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70 ++#define WMI_TXBF_CONF_IMPLICIT_BF BIT(7) + #define WMI_BF_SOUND_DIM_OFFSET_LSB 8 + #define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00 + +diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c +index 24b07a0ce6f7..f8bce58d48cc 100644 +--- a/drivers/net/wireless/ath/wil6210/main.c ++++ b/drivers/net/wireless/ath/wil6210/main.c +@@ -129,9 +129,15 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src, + u32 *d = dst; + const volatile u32 __iomem *s = src; + +- /* size_t is unsigned, if (count%4 != 0) it will wrap */ +- for (count += 4; count > 4; count -= 4) ++ for (; count >= 4; count -= 4) + *d++ = __raw_readl(s++); ++ ++ if (unlikely(count)) { ++ /* count can be 1..3 */ ++ u32 tmp = __raw_readl(s); ++ ++ memcpy(d, &tmp, count); ++ } + } + + void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst, +@@ -148,8 +154,16 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src, + volatile u32 __iomem *d = dst; + const u32 *s = src; + +- for (count += 4; count > 4; count -= 4) ++ for (; count >= 4; count -= 4) + __raw_writel(*s++, d++); ++ ++ if (unlikely(count)) { ++ /* count can be 1..3 */ ++ u32 tmp = 0; ++ ++ memcpy(&tmp, s, count); ++ __raw_writel(tmp, d); ++ } + } + + void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil, +diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c +index fae4f1285d08..94a356bbb6b9 100644 +--- a/drivers/net/wireless/ath/wil6210/wmi.c ++++ b/drivers/net/wireless/ath/wil6210/wmi.c +@@ -501,16 +501,16 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) + assoc_resp_ielen = 0; + } + +- mutex_lock(&wil->mutex); + if (test_bit(wil_status_resetting, wil->status) || + !test_bit(wil_status_fwready, wil->status)) { + wil_err(wil, "status_resetting, cancel connect event, CID %d\n", + evt->cid); +- mutex_unlock(&wil->mutex); + /* no need for cleanup, wil_reset will do that */ + return; + } + ++ mutex_lock(&wil->mutex); ++ + if ((wdev->iftype == NL80211_IFTYPE_STATION) || + (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { + if (!test_bit(wil_status_fwconnecting, wil->status)) { +@@ -608,6 +608,13 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, + + wil->sinfo_gen++; + ++ if (test_bit(wil_status_resetting, wil->status) || ++ !test_bit(wil_status_fwready, wil->status)) { ++ wil_err(wil, "status_resetting, cancel disconnect event\n"); ++ /* no need for cleanup, wil_reset will do that */ ++ return; ++ } ++ + mutex_lock(&wil->mutex); + wil6210_disconnect(wil, evt->bssid, reason_code, true); + mutex_unlock(&wil->mutex); +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +index 227c5ed9cbe6..0aea476ebf50 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +@@ -1867,12 +1867,10 @@ static int rs_switch_to_column(struct iwl_mvm *mvm, + struct rs_rate *rate = &search_tbl->rate; + const struct rs_tx_column *column = &rs_tx_columns[col_id]; + const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column]; +- u32 sz = (sizeof(struct iwl_scale_tbl_info) - +- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + unsigned long rate_mask = 0; + u32 rate_idx = 0; + +- memcpy(search_tbl, tbl, sz); ++ memcpy(search_tbl, tbl, offsetof(struct iwl_scale_tbl_info, win)); + + rate->sgi = column->sgi; + rate->ant = column->ant; +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +index 0e60e38b2acf..b78e60eb600f 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +@@ -104,7 +104,20 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, + u8 crypt_len, + struct iwl_rx_cmd_buffer *rxb) + { +- unsigned int hdrlen, fraglen; ++ unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); ++ unsigned int fraglen; ++ ++ /* ++ * The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len, ++ * but those are all multiples of 4 long) all goes away, but we ++ * want the *end* of it, which is going to be the start of the IP ++ * header, to be aligned when it gets pulled in. ++ * The beginning of the skb->data is aligned on at least a 4-byte ++ * boundary after allocation. Everything here is aligned at least ++ * on a 2-byte boundary so we can just take hdrlen & 3 and pad by ++ * the result. ++ */ ++ skb_reserve(skb, hdrlen & 3); + + /* If frame is small enough to fit in skb->head, pull it completely. + * If not, only pull ieee80211_hdr (including crypto if present, and +@@ -118,8 +131,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, + * If the latter changes (there are efforts in the standards group + * to do so) we should revisit this and ieee80211_data_to_8023(). + */ +- hdrlen = (len <= skb_tailroom(skb)) ? len : +- sizeof(*hdr) + crypt_len + 8; ++ hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8; + + memcpy(skb_put(skb, hdrlen), hdr, hdrlen); + fraglen = len - hdrlen; +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c +index 61e85eac706a..4182c3775a72 100644 +--- a/drivers/net/wireless/mac80211_hwsim.c ++++ b/drivers/net/wireless/mac80211_hwsim.c +@@ -552,8 +552,6 @@ struct mac80211_hwsim_data { + /* wmediumd portid responsible for netgroup of this radio */ + u32 wmediumd; + +- int power_level; +- + /* difference between this hw's clock and the real clock, in usecs */ + s64 tsf_offset; + s64 bcn_delta; +@@ -730,16 +728,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val) + val != PS_MANUAL_POLL) + return -EINVAL; + +- old_ps = data->ps; +- data->ps = val; +- +- local_bh_disable(); + if (val == PS_MANUAL_POLL) { ++ if (data->ps != PS_ENABLED) ++ return -EINVAL; ++ local_bh_disable(); + ieee80211_iterate_active_interfaces_atomic( + data->hw, IEEE80211_IFACE_ITER_NORMAL, + hwsim_send_ps_poll, data); +- data->ps_poll_pending = true; +- } else if (old_ps == PS_DISABLED && val != PS_DISABLED) { ++ local_bh_enable(); ++ return 0; ++ } ++ old_ps = data->ps; ++ data->ps = val; ++ ++ local_bh_disable(); ++ if (old_ps == PS_DISABLED && val != PS_DISABLED) { + ieee80211_iterate_active_interfaces_atomic( + data->hw, IEEE80211_IFACE_ITER_NORMAL, + hwsim_send_nullfunc_ps, data); +@@ -1208,7 +1211,9 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, + if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI) + rx_status.flag |= RX_FLAG_SHORT_GI; + /* TODO: simulate real signal strength (and optional packet loss) */ +- rx_status.signal = data->power_level - 50; ++ rx_status.signal = -50; ++ if (info->control.vif) ++ rx_status.signal += info->control.vif->bss_conf.txpower; + + if (data->ps != PS_DISABLED) + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); +@@ -1607,7 +1612,6 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed) + + WARN_ON(data->channel && data->use_chanctx); + +- data->power_level = conf->power_level; + if (!data->started || !data->beacon_int) + tasklet_hrtimer_cancel(&data->beacon_timer); + else if (!hrtimer_is_queued(&data->beacon_timer.timer)) { +@@ -2212,7 +2216,6 @@ static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = { + "d_tx_failed", + "d_ps_mode", + "d_group", +- "d_tx_power", + }; + + #define MAC80211_HWSIM_SSTATS_LEN ARRAY_SIZE(mac80211_hwsim_gstrings_stats) +@@ -2249,7 +2252,6 @@ static void mac80211_hwsim_get_et_stats(struct ieee80211_hw *hw, + data[i++] = ar->tx_failed; + data[i++] = ar->ps; + data[i++] = ar->group; +- data[i++] = ar->power_level; + + WARN_ON(i != MAC80211_HWSIM_SSTATS_LEN); + } +diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +index 8677a53ef725..48d51be11f9b 100644 +--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c ++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +@@ -1109,6 +1109,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + enum nl80211_iftype curr_iftype = dev->ieee80211_ptr->iftype; + ++ if (priv->scan_request) { ++ mwifiex_dbg(priv->adapter, ERROR, ++ "change virtual interface: scan in process\n"); ++ return -EBUSY; ++ } ++ + switch (curr_iftype) { + case NL80211_IFTYPE_ADHOC: + switch (type) { +diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c +index 8d601dcf2948..486b8c75cd1f 100644 +--- a/drivers/net/wireless/marvell/mwifiex/sdio.c ++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c +@@ -1458,7 +1458,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, + } + + if (card->mpa_rx.pkt_cnt == 1) +- mport = adapter->ioport + port; ++ mport = adapter->ioport + card->mpa_rx.start_port; + + if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf, + card->mpa_rx.buf_len, mport, 1)) +@@ -1891,7 +1891,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, + } + + if (card->mpa_tx.pkt_cnt == 1) +- mport = adapter->ioport + port; ++ mport = adapter->ioport + card->mpa_tx.start_port; + + ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf, + card->mpa_tx.buf_len, mport); +diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c +index c5effd6c6be9..01ca1d57b3d9 100644 +--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c ++++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c +@@ -1278,6 +1278,9 @@ static int eject_installer(struct usb_interface *intf) + u8 bulk_out_ep; + int r; + ++ if (iface_desc->desc.bNumEndpoints < 2) ++ return -ENODEV; ++ + /* Find bulk out endpoint */ + for (r = 1; r >= 0; r--) { + endpoint = &iface_desc->endpoint[r].desc; +diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c +index af62c4c854f3..b4f31dad40d6 100644 +--- a/drivers/nfc/nfcmrvl/fw_dnld.c ++++ b/drivers/nfc/nfcmrvl/fw_dnld.c +@@ -17,7 +17,7 @@ + */ + + #include +-#include ++#include + #include + #include + #include +diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c +index a7faa0bcc01e..fc8e78a29d77 100644 +--- a/drivers/nfc/nfcmrvl/spi.c ++++ b/drivers/nfc/nfcmrvl/spi.c +@@ -96,10 +96,9 @@ static int nfcmrvl_spi_nci_send(struct nfcmrvl_private *priv, + /* Send the SPI packet */ + err = nci_spi_send(drv_data->nci_spi, &drv_data->handshake_completion, + skb); +- if (err != 0) { ++ if (err) + nfc_err(priv->dev, "spi_send failed %d", err); +- kfree_skb(skb); +- } ++ + return err; + } + +diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c +index 1dc89248e58e..11d78b43cf76 100644 +--- a/drivers/nfc/pn533/i2c.c ++++ b/drivers/nfc/pn533/i2c.c +@@ -242,10 +242,10 @@ static int pn533_i2c_remove(struct i2c_client *client) + + dev_dbg(&client->dev, "%s\n", __func__); + +- pn533_unregister_device(phy->priv); +- + free_irq(client->irq, phy); + ++ pn533_unregister_device(phy->priv); ++ + return 0; + } + +diff --git a/drivers/of/device.c b/drivers/of/device.c +index f7a970120055..3cda60c036f9 100644 +--- a/drivers/of/device.c ++++ b/drivers/of/device.c +@@ -223,7 +223,7 @@ ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len) + str[i] = '_'; + } + +- return tsize; ++ return repend; + } + EXPORT_SYMBOL_GPL(of_device_get_modalias); + +diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c +index dafb4cdb2b7f..d392a55ec0a9 100644 +--- a/drivers/pci/host/pci-hyperv.c ++++ b/drivers/pci/host/pci-hyperv.c +@@ -351,6 +351,7 @@ enum hv_pcibus_state { + hv_pcibus_init = 0, + hv_pcibus_probed, + hv_pcibus_installed, ++ hv_pcibus_removed, + hv_pcibus_maximum + }; + +@@ -1205,9 +1206,11 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus) + hbus->pci_bus->msi = &hbus->msi_chip; + hbus->pci_bus->msi->dev = &hbus->hdev->device; + ++ pci_lock_rescan_remove(); + pci_scan_child_bus(hbus->pci_bus); + pci_bus_assign_resources(hbus->pci_bus); + pci_bus_add_devices(hbus->pci_bus); ++ pci_unlock_rescan_remove(); + hbus->state = hv_pcibus_installed; + return 0; + } +@@ -1489,13 +1492,24 @@ static void pci_devices_present_work(struct work_struct *work) + put_pcichild(hpdev, hv_pcidev_ref_initial); + } + +- /* Tell the core to rescan bus because there may have been changes. */ +- if (hbus->state == hv_pcibus_installed) { ++ switch(hbus->state) { ++ case hv_pcibus_installed: ++ /* ++ * Tell the core to rescan bus ++ * because there may have been changes. ++ */ + pci_lock_rescan_remove(); + pci_scan_child_bus(hbus->pci_bus); + pci_unlock_rescan_remove(); +- } else { ++ break; ++ ++ case hv_pcibus_init: ++ case hv_pcibus_probed: + survey_child_resources(hbus); ++ break; ++ ++ default: ++ break; + } + + up(&hbus->enum_sem); +@@ -1585,8 +1599,10 @@ static void hv_eject_device_work(struct work_struct *work) + pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0, + wslot); + if (pdev) { ++ pci_lock_rescan_remove(); + pci_stop_and_remove_bus_device(pdev); + pci_dev_put(pdev); ++ pci_unlock_rescan_remove(); + } + + memset(&ctxt, 0, sizeof(ctxt)); +@@ -2170,6 +2186,7 @@ static int hv_pci_probe(struct hv_device *hdev, + hbus = kzalloc(sizeof(*hbus), GFP_KERNEL); + if (!hbus) + return -ENOMEM; ++ hbus->state = hv_pcibus_init; + + /* + * The PCI bus "domain" is what is called "segment" in ACPI and +@@ -2312,6 +2329,7 @@ static int hv_pci_remove(struct hv_device *hdev) + pci_stop_root_bus(hbus->pci_bus); + pci_remove_root_bus(hbus->pci_bus); + pci_unlock_rescan_remove(); ++ hbus->state = hv_pcibus_removed; + } + + ret = hv_send_resources_released(hdev); +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c +index 802997e2ddcc..d81ad841dc0c 100644 +--- a/drivers/pci/pci-driver.c ++++ b/drivers/pci/pci-driver.c +@@ -463,8 +463,6 @@ static void pci_device_shutdown(struct device *dev) + + if (drv && drv->shutdown) + drv->shutdown(pci_dev); +- pci_msi_shutdown(pci_dev); +- pci_msix_shutdown(pci_dev); + + /* + * If this is a kexec reboot, turn off Bus Master bit on the +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 0c9edc9d7c44..4c9fb8b323e8 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -4104,6 +4104,9 @@ static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) + */ + acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF); + ++ if (!((dev->device >= 0xa000) && (dev->device <= 0xa0ff))) ++ return -ENOTTY; ++ + return acs_flags ? 0 : 1; + } + +diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c +index b37b57294566..af82edc7fa5c 100644 +--- a/drivers/perf/arm_pmu.c ++++ b/drivers/perf/arm_pmu.c +@@ -322,10 +322,16 @@ validate_group(struct perf_event *event) + return 0; + } + ++static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu) ++{ ++ struct platform_device *pdev = armpmu->plat_device; ++ ++ return pdev ? dev_get_platdata(&pdev->dev) : NULL; ++} ++ + static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) + { + struct arm_pmu *armpmu; +- struct platform_device *plat_device; + struct arm_pmu_platdata *plat; + int ret; + u64 start_clock, finish_clock; +@@ -337,8 +343,8 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) + * dereference. + */ + armpmu = *(void **)dev; +- plat_device = armpmu->plat_device; +- plat = dev_get_platdata(&plat_device->dev); ++ ++ plat = armpmu_get_platdata(armpmu); + + start_clock = sched_clock(); + if (plat && plat->handle_irq) +diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c +index 5cee9aa87aa3..48a11fd86a7f 100644 +--- a/drivers/power/supply/ab8500_charger.c ++++ b/drivers/power/supply/ab8500_charger.c +@@ -3218,11 +3218,13 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di) + } + + /* Enable backup battery charging */ +- abx500_mask_and_set_register_interruptible(di->dev, ++ ret = abx500_mask_and_set_register_interruptible(di->dev, + AB8500_RTC, AB8500_RTC_CTRL_REG, + RTC_BUP_CH_ENA, RTC_BUP_CH_ENA); +- if (ret < 0) ++ if (ret < 0) { + dev_err(di->dev, "%s mask and set failed\n", __func__); ++ goto out; ++ } + + if (is_ab8540(di->parent)) { + ret = abx500_mask_and_set_register_interruptible(di->dev, +diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c +index e464582a390a..3439f1e902cb 100644 +--- a/drivers/pwm/pwm-stmpe.c ++++ b/drivers/pwm/pwm-stmpe.c +@@ -145,7 +145,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + break; + + case 2: +- offset = STMPE24XX_PWMIC1; ++ offset = STMPE24XX_PWMIC2; + break; + + default: +diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c +index e4647840cd6e..7e8906d6ab7a 100644 +--- a/drivers/pwm/pwm-tegra.c ++++ b/drivers/pwm/pwm-tegra.c +@@ -76,6 +76,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip); + unsigned long long c = duty_ns; + unsigned long rate, hz; ++ unsigned long long ns100 = NSEC_PER_SEC; + u32 val = 0; + int err; + +@@ -95,9 +96,11 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + * cycles at the PWM clock rate will take period_ns nanoseconds. + */ + rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH; +- hz = NSEC_PER_SEC / period_ns; + +- rate = (rate + (hz / 2)) / hz; ++ /* Consider precision in PWM_SCALE_WIDTH rate calculation */ ++ ns100 *= 100; ++ hz = DIV_ROUND_CLOSEST_ULL(ns100, period_ns); ++ rate = DIV_ROUND_CLOSEST(rate * 100, hz); + + /* + * Since the actual PWM divider is the register's frequency divider +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index 9403245503de..178fcda12cec 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -2465,7 +2465,7 @@ static int _regulator_list_voltage(struct regulator *regulator, + ret = ops->list_voltage(rdev, selector); + if (lock) + mutex_unlock(&rdev->mutex); +- } else if (rdev->supply) { ++ } else if (rdev->is_switch && rdev->supply) { + ret = _regulator_list_voltage(rdev->supply, selector, lock); + } else { + return -EINVAL; +@@ -2523,7 +2523,7 @@ int regulator_count_voltages(struct regulator *regulator) + if (rdev->desc->n_voltages) + return rdev->desc->n_voltages; + +- if (!rdev->supply) ++ if (!rdev->is_switch || !rdev->supply) + return -EINVAL; + + return regulator_count_voltages(rdev->supply); +@@ -4049,6 +4049,11 @@ regulator_register(const struct regulator_desc *regulator_desc, + mutex_unlock(®ulator_list_mutex); + } + ++ if (!rdev->desc->ops->get_voltage && ++ !rdev->desc->ops->list_voltage && ++ !rdev->desc->fixed_uV) ++ rdev->is_switch = true; ++ + ret = device_register(&rdev->dev); + if (ret != 0) { + put_device(&rdev->dev); +diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c +index be65da2988fb..838edbba8aec 100644 +--- a/drivers/scsi/be2iscsi/be_cmds.c ++++ b/drivers/scsi/be2iscsi/be_cmds.c +@@ -246,6 +246,12 @@ int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba, + { + int rc = 0; + ++ if (!tag || tag > MAX_MCC_CMD) { ++ __beiscsi_log(phba, KERN_ERR, ++ "BC_%d : invalid tag %u\n", tag); ++ return -EINVAL; ++ } ++ + if (beiscsi_hba_in_error(phba)) { + clear_bit(MCC_TAG_STATE_RUNNING, + &phba->ctrl.ptag_state[tag].tag_state); +diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c +index 44dd372aa7d3..c056b8111ad2 100644 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@ -1127,12 +1127,6 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, + else + CMD_ABTS_STATUS(sc) = hdr_status; + +- atomic64_dec(&fnic_stats->io_stats.active_ios); +- if (atomic64_read(&fnic->io_cmpl_skip)) +- atomic64_dec(&fnic->io_cmpl_skip); +- else +- atomic64_inc(&fnic_stats->io_stats.io_completions); +- + if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) + atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls); + +@@ -1173,6 +1167,11 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, + (((u64)CMD_FLAGS(sc) << 32) | + CMD_STATE(sc))); + sc->scsi_done(sc); ++ atomic64_dec(&fnic_stats->io_stats.active_ios); ++ if (atomic64_read(&fnic->io_cmpl_skip)) ++ atomic64_dec(&fnic->io_cmpl_skip); ++ else ++ atomic64_inc(&fnic_stats->io_stats.io_completions); + } + } + +@@ -1962,6 +1961,11 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) + /* Call SCSI completion function to complete the IO */ + sc->result = (DID_ABORT << 16); + sc->scsi_done(sc); ++ atomic64_dec(&fnic_stats->io_stats.active_ios); ++ if (atomic64_read(&fnic->io_cmpl_skip)) ++ atomic64_dec(&fnic->io_cmpl_skip); ++ else ++ atomic64_inc(&fnic_stats->io_stats.io_completions); + } + + fnic_abort_cmd_end: +diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c +index 532474109624..c5bc41d97f84 100644 +--- a/drivers/scsi/ipr.c ++++ b/drivers/scsi/ipr.c +@@ -836,8 +836,10 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) + + qc->err_mask |= AC_ERR_OTHER; + sata_port->ioasa.status |= ATA_BUSY; +- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + ata_qc_complete(qc); ++ if (ipr_cmd->eh_comp) ++ complete(ipr_cmd->eh_comp); ++ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + } + + /** +@@ -5947,8 +5949,10 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) + res->in_erp = 0; + } + scsi_dma_unmap(ipr_cmd->scsi_cmd); +- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + scsi_cmd->scsi_done(scsi_cmd); ++ if (ipr_cmd->eh_comp) ++ complete(ipr_cmd->eh_comp); ++ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + } + + /** +@@ -6338,8 +6342,10 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, + } + + scsi_dma_unmap(ipr_cmd->scsi_cmd); +- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + scsi_cmd->scsi_done(scsi_cmd); ++ if (ipr_cmd->eh_comp) ++ complete(ipr_cmd->eh_comp); ++ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + } + + /** +@@ -6365,8 +6371,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) + scsi_dma_unmap(scsi_cmd); + + spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); +- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + scsi_cmd->scsi_done(scsi_cmd); ++ if (ipr_cmd->eh_comp) ++ complete(ipr_cmd->eh_comp); ++ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); + spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); + } else { + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c +index 94630d4738e6..baccd116f864 100644 +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -1443,7 +1443,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) + void + qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) + { +- int que, cnt; ++ int que, cnt, status; + unsigned long flags; + srb_t *sp; + struct qla_hw_data *ha = vha->hw; +@@ -1473,8 +1473,12 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) + */ + sp_get(sp); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +- qla2xxx_eh_abort(GET_CMD_SP(sp)); ++ status = qla2xxx_eh_abort(GET_CMD_SP(sp)); + spin_lock_irqsave(&ha->hardware_lock, flags); ++ /* Get rid of extra reference if immediate exit ++ * from ql2xxx_eh_abort */ ++ if (status == FAILED && (qla2x00_isp_reg_stat(ha))) ++ atomic_dec(&sp->ref_count); + } + req->outstanding_cmds[cnt] = NULL; + sp->done(vha, sp, res); +diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c +index 26e6b05d05fc..43d4b30cbf65 100644 +--- a/drivers/scsi/scsi_devinfo.c ++++ b/drivers/scsi/scsi_devinfo.c +@@ -180,7 +180,7 @@ static struct { + {"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, + {"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, + {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */ +- {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */ ++ {"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */ + {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN}, + {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, + {"HP", "C1557A", NULL, BLIST_FORCELUN}, +@@ -596,17 +596,12 @@ int scsi_get_device_flags_keyed(struct scsi_device *sdev, + int key) + { + struct scsi_dev_info_list *devinfo; +- int err; + + devinfo = scsi_dev_info_list_find(vendor, model, key); + if (!IS_ERR(devinfo)) + return devinfo->flags; + +- err = PTR_ERR(devinfo); +- if (err != -ENOENT) +- return err; +- +- /* nothing found, return nothing */ ++ /* key or device not found: return nothing */ + if (key != SCSI_DEVINFO_GLOBAL) + return 0; + +diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c +index 84addee05be6..a5e30e9449ef 100644 +--- a/drivers/scsi/scsi_dh.c ++++ b/drivers/scsi/scsi_dh.c +@@ -56,10 +56,13 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { + {"IBM", "1815", "rdac", }, + {"IBM", "1818", "rdac", }, + {"IBM", "3526", "rdac", }, ++ {"IBM", "3542", "rdac", }, ++ {"IBM", "3552", "rdac", }, + {"SGI", "TP9", "rdac", }, + {"SGI", "IS", "rdac", }, +- {"STK", "OPENstorage D280", "rdac", }, ++ {"STK", "OPENstorage", "rdac", }, + {"STK", "FLEXLINE 380", "rdac", }, ++ {"STK", "BladeCtlr", "rdac", }, + {"SUN", "CSM", "rdac", }, + {"SUN", "LCSM100", "rdac", }, + {"SUN", "STK6580_6780", "rdac", }, +diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c +index 50adabbb5808..69046d342bc5 100644 +--- a/drivers/scsi/ses.c ++++ b/drivers/scsi/ses.c +@@ -548,7 +548,6 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, + ecomp = &edev->component[components++]; + + if (!IS_ERR(ecomp)) { +- ses_get_power_status(edev, ecomp); + if (addl_desc_ptr) + ses_process_descriptor( + ecomp, +@@ -579,13 +578,16 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, + } + + static void ses_match_to_enclosure(struct enclosure_device *edev, +- struct scsi_device *sdev) ++ struct scsi_device *sdev, ++ int refresh) + { ++ struct scsi_device *edev_sdev = to_scsi_device(edev->edev.parent); + struct efd efd = { + .addr = 0, + }; + +- ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); ++ if (refresh) ++ ses_enclosure_data_process(edev, edev_sdev, 0); + + if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent)) + efd.addr = sas_get_address(sdev); +@@ -616,7 +618,7 @@ static int ses_intf_add(struct device *cdev, + struct enclosure_device *prev = NULL; + + while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) { +- ses_match_to_enclosure(edev, sdev); ++ ses_match_to_enclosure(edev, sdev, 1); + prev = edev; + } + return -ENODEV; +@@ -728,7 +730,7 @@ static int ses_intf_add(struct device *cdev, + shost_for_each_device(tmp_sdev, sdev->host) { + if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev)) + continue; +- ses_match_to_enclosure(edev, tmp_sdev); ++ ses_match_to_enclosure(edev, tmp_sdev, 0); + } + + return 0; +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index cd9537ddc19f..7592ac8514d2 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -524,6 +524,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) + } else + count = (old_hdr->result == 0) ? 0 : -EIO; + sg_finish_rem_req(srp); ++ sg_remove_request(sfp, srp); + retval = count; + free_old_hdr: + kfree(old_hdr); +@@ -564,6 +565,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) + } + err_out: + err2 = sg_finish_rem_req(srp); ++ sg_remove_request(sfp, srp); + return err ? : err2 ? : count; + } + +@@ -663,18 +665,14 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) + * is a non-zero input_size, so emit a warning. + */ + if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) { +- static char cmd[TASK_COMM_LEN]; +- if (strcmp(current->comm, cmd)) { +- printk_ratelimited(KERN_WARNING +- "sg_write: data in/out %d/%d bytes " +- "for SCSI command 0x%x-- guessing " +- "data in;\n program %s not setting " +- "count and/or reply_len properly\n", +- old_hdr.reply_len - (int)SZ_SG_HEADER, +- input_size, (unsigned int) cmnd[0], +- current->comm); +- strcpy(cmd, current->comm); +- } ++ printk_ratelimited(KERN_WARNING ++ "sg_write: data in/out %d/%d bytes " ++ "for SCSI command 0x%x-- guessing " ++ "data in;\n program %s not setting " ++ "count and/or reply_len properly\n", ++ old_hdr.reply_len - (int)SZ_SG_HEADER, ++ input_size, (unsigned int) cmnd[0], ++ current->comm); + } + k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); + return (k < 0) ? k : count; +@@ -773,11 +771,15 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, + "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", + (int) cmnd[0], (int) hp->cmd_len)); + ++ if (hp->dxfer_len >= SZ_256M) ++ return -EINVAL; ++ + k = sg_start_req(srp, cmnd); + if (k) { + SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, + "sg_common_write: start_req err=%d\n", k)); + sg_finish_rem_req(srp); ++ sg_remove_request(sfp, srp); + return k; /* probably out of space --> ENOMEM */ + } + if (atomic_read(&sdp->detaching)) { +@@ -790,6 +792,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, + } + + sg_finish_rem_req(srp); ++ sg_remove_request(sfp, srp); + return -ENODEV; + } + +@@ -1280,6 +1283,7 @@ sg_rq_end_io_usercontext(struct work_struct *work) + struct sg_fd *sfp = srp->parentfp; + + sg_finish_rem_req(srp); ++ sg_remove_request(sfp, srp); + kref_put(&sfp->f_ref, sg_remove_sfp); + } + +@@ -1824,8 +1828,6 @@ sg_finish_rem_req(Sg_request *srp) + else + sg_remove_scat(sfp, req_schp); + +- sg_remove_request(sfp, srp); +- + return ret; + } + +@@ -2172,12 +2174,17 @@ sg_remove_sfp_usercontext(struct work_struct *work) + struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); + struct sg_device *sdp = sfp->parentdp; + Sg_request *srp; ++ unsigned long iflags; + + /* Cleanup any responses which were never read(). */ ++ write_lock_irqsave(&sfp->rq_list_lock, iflags); + while (!list_empty(&sfp->rq_list)) { + srp = list_first_entry(&sfp->rq_list, Sg_request, entry); + sg_finish_rem_req(srp); ++ list_del(&srp->entry); ++ srp->parentfp = NULL; + } ++ write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + + if (sfp->reserve.bufflen > 0) { + SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, +diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c +index d5157b2222ce..a47cf638460a 100644 +--- a/drivers/spi/spi-omap2-mcspi.c ++++ b/drivers/spi/spi-omap2-mcspi.c +@@ -454,6 +454,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, + int elements = 0; + int word_len, element_count; + struct omap2_mcspi_cs *cs = spi->controller_state; ++ void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; ++ + mcspi = spi_master_get_devdata(spi->master); + mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + count = xfer->len; +@@ -549,8 +551,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, + if (l & OMAP2_MCSPI_CHCONF_TURBO) { + elements--; + +- if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) +- & OMAP2_MCSPI_CHSTAT_RXS)) { ++ if (!mcspi_wait_for_reg_bit(chstat_reg, ++ OMAP2_MCSPI_CHSTAT_RXS)) { + u32 w; + + w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); +@@ -568,8 +570,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, + return count; + } + } +- if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) +- & OMAP2_MCSPI_CHSTAT_RXS)) { ++ if (!mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS)) { + u32 w; + + w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); +diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c +index 9918a57a6a6e..7e7da97982aa 100644 +--- a/drivers/spi/spi-sun6i.c ++++ b/drivers/spi/spi-sun6i.c +@@ -464,7 +464,7 @@ static int sun6i_spi_probe(struct platform_device *pdev) + + static int sun6i_spi_remove(struct platform_device *pdev) + { +- pm_runtime_disable(&pdev->dev); ++ pm_runtime_force_suspend(&pdev->dev); + + return 0; + } +diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c +index e744aa9730ff..dea018cba094 100644 +--- a/drivers/staging/speakup/kobjects.c ++++ b/drivers/staging/speakup/kobjects.c +@@ -834,7 +834,9 @@ static ssize_t message_show(struct kobject *kobj, + struct msg_group_t *group = spk_find_msg_group(attr->attr.name); + unsigned long flags; + +- BUG_ON(!group); ++ if (WARN_ON(!group)) ++ return -EINVAL; ++ + spin_lock_irqsave(&speakup_info.spinlock, flags); + retval = message_show_helper(buf, group->start, group->end); + spin_unlock_irqrestore(&speakup_info.spinlock, flags); +@@ -846,7 +848,9 @@ static ssize_t message_store(struct kobject *kobj, struct kobj_attribute *attr, + { + struct msg_group_t *group = spk_find_msg_group(attr->attr.name); + +- BUG_ON(!group); ++ if (WARN_ON(!group)) ++ return -EINVAL; ++ + return message_store_helper(buf, count, group); + } + +diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c +index 6ab7443eabde..6326375b76ab 100644 +--- a/drivers/staging/wilc1000/host_interface.c ++++ b/drivers/staging/wilc1000/host_interface.c +@@ -1930,6 +1930,8 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif, + wid.type = WID_STR; + wid.size = ETH_ALEN; + wid.val = kmalloc(wid.size, GFP_KERNEL); ++ if (!wid.val) ++ return -ENOMEM; + + stamac = wid.val; + memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN); +diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c +index e2c33b9528d8..b42d7f1c9089 100644 +--- a/drivers/tty/serial/amba-pl011.c ++++ b/drivers/tty/serial/amba-pl011.c +@@ -1302,14 +1302,15 @@ static void pl011_stop_tx(struct uart_port *port) + pl011_dma_tx_stop(uap); + } + +-static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq); ++static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq); + + /* Start TX with programmed I/O only (no DMA) */ + static void pl011_start_tx_pio(struct uart_amba_port *uap) + { +- uap->im |= UART011_TXIM; +- pl011_write(uap->im, uap, REG_IMSC); +- pl011_tx_chars(uap, false); ++ if (pl011_tx_chars(uap, false)) { ++ uap->im |= UART011_TXIM; ++ pl011_write(uap->im, uap, REG_IMSC); ++ } + } + + static void pl011_start_tx(struct uart_port *port) +@@ -1389,25 +1390,26 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c, + return true; + } + +-static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq) ++/* Returns true if tx interrupts have to be (kept) enabled */ ++static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq) + { + struct circ_buf *xmit = &uap->port.state->xmit; + int count = uap->fifosize >> 1; + + if (uap->port.x_char) { + if (!pl011_tx_char(uap, uap->port.x_char, from_irq)) +- return; ++ return true; + uap->port.x_char = 0; + --count; + } + if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) { + pl011_stop_tx(&uap->port); +- return; ++ return false; + } + + /* If we are using DMA mode, try to send some characters. */ + if (pl011_dma_tx_irq(uap)) +- return; ++ return true; + + do { + if (likely(from_irq) && count-- == 0) +@@ -1422,8 +1424,11 @@ static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq) + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&uap->port); + +- if (uart_circ_empty(xmit)) ++ if (uart_circ_empty(xmit)) { + pl011_stop_tx(&uap->port); ++ return false; ++ } ++ return true; + } + + static void pl011_modem_status(struct uart_amba_port *uap) +diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c +index 521a6e450755..f575a33974fa 100644 +--- a/drivers/tty/serial/imx.c ++++ b/drivers/tty/serial/imx.c +@@ -1316,19 +1316,10 @@ static int imx_startup(struct uart_port *port) + if (!is_imx1_uart(sport)) { + temp = readl(sport->port.membase + UCR3); + +- /* +- * The effect of RI and DCD differs depending on the UFCR_DCEDTE +- * bit. In DCE mode they control the outputs, in DTE mode they +- * enable the respective irqs. At least the DCD irq cannot be +- * cleared on i.MX25 at least, so it's not usable and must be +- * disabled. I don't have test hardware to check if RI has the +- * same problem but I consider this likely so it's disabled for +- * now, too. +- */ +- temp |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP | +- UCR3_DTRDEN | UCR3_RI | UCR3_DCD; ++ temp |= UCR3_DTRDEN | UCR3_RI | UCR3_DCD; + + if (sport->dte_mode) ++ /* disable broken interrupts */ + temp &= ~(UCR3_RI | UCR3_DCD); + + writel(temp, sport->port.membase + UCR3); +@@ -1583,8 +1574,6 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios, + + ufcr = readl(sport->port.membase + UFCR); + ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div); +- if (sport->dte_mode) +- ufcr |= UFCR_DCEDTE; + writel(ufcr, sport->port.membase + UFCR); + + writel(num, sport->port.membase + UBIR); +@@ -2149,6 +2138,27 @@ static int serial_imx_probe(struct platform_device *pdev) + UCR1_TXMPTYEN | UCR1_RTSDEN); + writel_relaxed(reg, sport->port.membase + UCR1); + ++ if (!is_imx1_uart(sport) && sport->dte_mode) { ++ /* ++ * The DCEDTE bit changes the direction of DSR, DCD, DTR and RI ++ * and influences if UCR3_RI and UCR3_DCD changes the level of RI ++ * and DCD (when they are outputs) or enables the respective ++ * irqs. So set this bit early, i.e. before requesting irqs. ++ */ ++ writel(UFCR_DCEDTE, sport->port.membase + UFCR); ++ ++ /* ++ * Disable UCR3_RI and UCR3_DCD irqs. They are also not ++ * enabled later because they cannot be cleared ++ * (confirmed on i.MX25) which makes them unusable. ++ */ ++ writel(IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP | UCR3_DSR, ++ sport->port.membase + UCR3); ++ ++ } else { ++ writel(0, sport->port.membase + UFCR); ++ } ++ + clk_disable_unprepare(sport->clk_ipg); + + /* +diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c +index df5a06578005..dfc0566bb155 100644 +--- a/drivers/usb/dwc2/hcd.c ++++ b/drivers/usb/dwc2/hcd.c +@@ -3220,6 +3220,7 @@ static void dwc2_conn_id_status_change(struct work_struct *work) + dwc2_core_init(hsotg, false); + dwc2_enable_global_interrupts(hsotg); + spin_lock_irqsave(&hsotg->lock, flags); ++ dwc2_hsotg_disconnect(hsotg); + dwc2_hsotg_core_init_disconnected(hsotg, false); + spin_unlock_irqrestore(&hsotg->lock, flags); + dwc2_hsotg_core_connect(hsotg); +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index fea446900cad..a0c2b8b6edd0 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -463,6 +463,12 @@ static int dwc3_phy_setup(struct dwc3 *dwc) + + reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); + ++ /* ++ * Make sure UX_EXIT_PX is cleared as that causes issues with some ++ * PHYs. Also, this bit is not supposed to be used in normal operation. ++ */ ++ reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX; ++ + /* + * Above 1.94a, it is recommended to set DWC3_GUSB3PIPECTL_SUSPHY + * to '0' during coreConsultant configuration. So default value +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h +index 884c43714456..94d6a3e2ad97 100644 +--- a/drivers/usb/dwc3/core.h ++++ b/drivers/usb/dwc3/core.h +@@ -159,13 +159,15 @@ + #define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0) + #define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff) + +-#define DWC3_TXFIFOQ 1 +-#define DWC3_RXFIFOQ 3 +-#define DWC3_TXREQQ 5 +-#define DWC3_RXREQQ 7 +-#define DWC3_RXINFOQ 9 +-#define DWC3_DESCFETCHQ 13 +-#define DWC3_EVENTQ 15 ++#define DWC3_TXFIFOQ 0 ++#define DWC3_RXFIFOQ 1 ++#define DWC3_TXREQQ 2 ++#define DWC3_RXREQQ 3 ++#define DWC3_RXINFOQ 4 ++#define DWC3_PSTATQ 5 ++#define DWC3_DESCFETCHQ 6 ++#define DWC3_EVENTQ 7 ++#define DWC3_AUXEVENTQ 8 + + /* Global RX Threshold Configuration Register */ + #define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19) +@@ -223,6 +225,7 @@ + #define DWC3_GUSB3PIPECTL_PHYSOFTRST (1 << 31) + #define DWC3_GUSB3PIPECTL_U2SSINP3OK (1 << 29) + #define DWC3_GUSB3PIPECTL_DISRXDETINP3 (1 << 28) ++#define DWC3_GUSB3PIPECTL_UX_EXIT_PX (1 << 27) + #define DWC3_GUSB3PIPECTL_REQP1P2P3 (1 << 24) + #define DWC3_GUSB3PIPECTL_DEP1P2P3(n) ((n) << 19) + #define DWC3_GUSB3PIPECTL_DEP1P2P3_MASK DWC3_GUSB3PIPECTL_DEP1P2P3(7) +diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c +index ccb9c213cc9f..e9bd8d4abca0 100644 +--- a/drivers/usb/gadget/udc/bdc/bdc_core.c ++++ b/drivers/usb/gadget/udc/bdc/bdc_core.c +@@ -475,7 +475,7 @@ static int bdc_probe(struct platform_device *pdev) + bdc->dev = dev; + dev_dbg(bdc->dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq); + +- temp = bdc_readl(bdc->regs, BDC_BDCSC); ++ temp = bdc_readl(bdc->regs, BDC_BDCCAP1); + if ((temp & BDC_P64) && + !dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) { + dev_dbg(bdc->dev, "Using 64-bit address\n"); +diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c +index 02968842b359..708e36f530d8 100644 +--- a/drivers/usb/gadget/udc/bdc/bdc_pci.c ++++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c +@@ -82,6 +82,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) + if (ret) { + dev_err(&pci->dev, + "couldn't add resources to bdc device\n"); ++ platform_device_put(bdc); + return ret; + } + +diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c +index b62a3de65075..ff4d6cac7ac0 100644 +--- a/drivers/usb/gadget/udc/dummy_hcd.c ++++ b/drivers/usb/gadget/udc/dummy_hcd.c +@@ -2103,16 +2103,13 @@ static int dummy_hub_control( + } + break; + case USB_PORT_FEAT_POWER: +- if (hcd->speed == HCD_USB3) { +- if (dum_hcd->port_status & USB_PORT_STAT_POWER) +- dev_dbg(dummy_dev(dum_hcd), +- "power-off\n"); +- } else +- if (dum_hcd->port_status & +- USB_SS_PORT_STAT_POWER) +- dev_dbg(dummy_dev(dum_hcd), +- "power-off\n"); +- /* FALLS THROUGH */ ++ dev_dbg(dummy_dev(dum_hcd), "power-off\n"); ++ if (hcd->speed == HCD_USB3) ++ dum_hcd->port_status &= ~USB_SS_PORT_STAT_POWER; ++ else ++ dum_hcd->port_status &= ~USB_PORT_STAT_POWER; ++ set_link_state(dum_hcd); ++ break; + default: + dum_hcd->port_status &= ~(1 << wValue); + set_link_state(dum_hcd); +@@ -2283,14 +2280,13 @@ static int dummy_hub_control( + if ((dum_hcd->port_status & + USB_SS_PORT_STAT_POWER) != 0) { + dum_hcd->port_status |= (1 << wValue); +- set_link_state(dum_hcd); + } + } else + if ((dum_hcd->port_status & + USB_PORT_STAT_POWER) != 0) { + dum_hcd->port_status |= (1 << wValue); +- set_link_state(dum_hcd); + } ++ set_link_state(dum_hcd); + } + break; + case GetPortErrorCount: +diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c +index d3d124753266..bd6e06ef88ac 100644 +--- a/drivers/usb/misc/lvstest.c ++++ b/drivers/usb/misc/lvstest.c +@@ -433,6 +433,7 @@ static void lvs_rh_disconnect(struct usb_interface *intf) + struct lvs_rh *lvs = usb_get_intfdata(intf); + + sysfs_remove_group(&intf->dev.kobj, &lvs_attr_group); ++ usb_poison_urb(lvs->urb); /* used in scheduled work */ + flush_work(&lvs->rh_work); + usb_free_urb(lvs->urb); + } +diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c +index 59b3f62a2d64..70c748a5fbcc 100644 +--- a/drivers/vfio/vfio_iommu_spapr_tce.c ++++ b/drivers/vfio/vfio_iommu_spapr_tce.c +@@ -195,6 +195,11 @@ static long tce_iommu_register_pages(struct tce_container *container, + return ret; + + tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL); ++ if (!tcemem) { ++ mm_iommu_put(container->mm, mem); ++ return -ENOMEM; ++ } ++ + tcemem->mem = mem; + list_add(&tcemem->next, &container->prereg_list); + +@@ -1332,8 +1337,16 @@ static int tce_iommu_attach_group(void *iommu_data, + + if (!table_group->ops || !table_group->ops->take_ownership || + !table_group->ops->release_ownership) { ++ if (container->v2) { ++ ret = -EPERM; ++ goto unlock_exit; ++ } + ret = tce_iommu_take_ownership(container, table_group); + } else { ++ if (!container->v2) { ++ ret = -EPERM; ++ goto unlock_exit; ++ } + ret = tce_iommu_take_ownership_ddw(container, table_group); + if (!tce_groups_attached(container) && !container->tables[0]) + container->def_window_pending = true; +diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c +index ec2671d98abc..89880b70cc28 100644 +--- a/drivers/video/fbdev/amba-clcd.c ++++ b/drivers/video/fbdev/amba-clcd.c +@@ -892,8 +892,8 @@ static int clcdfb_of_dma_setup(struct clcd_fb *fb) + if (err) + return err; + +- framesize = fb->panel->mode.xres * fb->panel->mode.yres * +- fb->panel->bpp / 8; ++ framesize = PAGE_ALIGN(fb->panel->mode.xres * fb->panel->mode.yres * ++ fb->panel->bpp / 8); + fb->fb.screen_base = dma_alloc_coherent(&fb->dev->dev, framesize, + &dma, GFP_KERNEL); + if (!fb->fb.screen_base) +diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c +index 47d7f69ad9ad..48c6500c24e1 100644 +--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c ++++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c +@@ -941,11 +941,13 @@ static int dss_init_features(struct platform_device *pdev) + return 0; + } + ++static void dss_uninit_ports(struct platform_device *pdev); ++ + static int dss_init_ports(struct platform_device *pdev) + { + struct device_node *parent = pdev->dev.of_node; + struct device_node *port; +- int r; ++ int r, ret = 0; + + if (parent == NULL) + return 0; +@@ -972,17 +974,21 @@ static int dss_init_ports(struct platform_device *pdev) + + switch (port_type) { + case OMAP_DISPLAY_TYPE_DPI: +- dpi_init_port(pdev, port); ++ ret = dpi_init_port(pdev, port); + break; + case OMAP_DISPLAY_TYPE_SDI: +- sdi_init_port(pdev, port); ++ ret = sdi_init_port(pdev, port); + break; + default: + break; + } +- } while ((port = omapdss_of_get_next_port(parent, port)) != NULL); ++ } while (!ret && ++ (port = omapdss_of_get_next_port(parent, port)) != NULL); + +- return 0; ++ if (ret) ++ dss_uninit_ports(pdev); ++ ++ return ret; + } + + static void dss_uninit_ports(struct platform_device *pdev) +diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c +index 162689227a23..b73520aaf697 100644 +--- a/drivers/video/hdmi.c ++++ b/drivers/video/hdmi.c +@@ -321,6 +321,17 @@ int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame) + } + EXPORT_SYMBOL(hdmi_vendor_infoframe_init); + ++static int hdmi_vendor_infoframe_length(const struct hdmi_vendor_infoframe *frame) ++{ ++ /* for side by side (half) we also need to provide 3D_Ext_Data */ ++ if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) ++ return 6; ++ else if (frame->vic != 0 || frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) ++ return 5; ++ else ++ return 4; ++} ++ + /** + * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer + * @frame: HDMI infoframe +@@ -341,19 +352,11 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, + u8 *ptr = buffer; + size_t length; + +- /* empty info frame */ +- if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID) +- return -EINVAL; +- + /* only one of those can be supplied */ + if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) + return -EINVAL; + +- /* for side by side (half) we also need to provide 3D_Ext_Data */ +- if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) +- frame->length = 6; +- else +- frame->length = 5; ++ frame->length = hdmi_vendor_infoframe_length(frame); + + length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; + +@@ -372,14 +375,16 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, + ptr[5] = 0x0c; + ptr[6] = 0x00; + +- if (frame->vic) { +- ptr[7] = 0x1 << 5; /* video format */ +- ptr[8] = frame->vic; +- } else { ++ if (frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) { + ptr[7] = 0x2 << 5; /* video format */ + ptr[8] = (frame->s3d_struct & 0xf) << 4; + if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) + ptr[9] = (frame->s3d_ext_data & 0xf) << 4; ++ } else if (frame->vic) { ++ ptr[7] = 0x1 << 5; /* video format */ ++ ptr[8] = frame->vic; ++ } else { ++ ptr[7] = 0x0 << 5; /* video format */ + } + + hdmi_infoframe_set_checksum(buffer, length); +@@ -1161,7 +1166,7 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame, + + if (ptr[0] != HDMI_INFOFRAME_TYPE_VENDOR || + ptr[1] != 1 || +- (ptr[2] != 5 && ptr[2] != 6)) ++ (ptr[2] != 4 && ptr[2] != 5 && ptr[2] != 6)) + return -EINVAL; + + length = ptr[2]; +@@ -1189,16 +1194,22 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame, + + hvf->length = length; + +- if (hdmi_video_format == 0x1) { +- hvf->vic = ptr[4]; +- } else if (hdmi_video_format == 0x2) { ++ if (hdmi_video_format == 0x2) { ++ if (length != 5 && length != 6) ++ return -EINVAL; + hvf->s3d_struct = ptr[4] >> 4; + if (hvf->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) { +- if (length == 6) +- hvf->s3d_ext_data = ptr[5] >> 4; +- else ++ if (length != 6) + return -EINVAL; ++ hvf->s3d_ext_data = ptr[5] >> 4; + } ++ } else if (hdmi_video_format == 0x1) { ++ if (length != 5) ++ return -EINVAL; ++ hvf->vic = ptr[4]; ++ } else { ++ if (length != 4) ++ return -EINVAL; + } + + return 0; +diff --git a/fs/aio.c b/fs/aio.c +index 0fcb49ad67d4..0606f033cd9b 100644 +--- a/fs/aio.c ++++ b/fs/aio.c +@@ -68,9 +68,9 @@ struct aio_ring { + #define AIO_RING_PAGES 8 + + struct kioctx_table { +- struct rcu_head rcu; +- unsigned nr; +- struct kioctx *table[]; ++ struct rcu_head rcu; ++ unsigned nr; ++ struct kioctx __rcu *table[]; + }; + + struct kioctx_cpu { +@@ -115,7 +115,8 @@ struct kioctx { + struct page **ring_pages; + long nr_pages; + +- struct work_struct free_work; ++ struct rcu_head free_rcu; ++ struct work_struct free_work; /* see free_ioctx() */ + + /* + * signals when all in-flight requests are done +@@ -329,7 +330,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma) + for (i = 0; i < table->nr; i++) { + struct kioctx *ctx; + +- ctx = table->table[i]; ++ ctx = rcu_dereference(table->table[i]); + if (ctx && ctx->aio_ring_file == file) { + if (!atomic_read(&ctx->dead)) { + ctx->user_id = ctx->mmap_base = vma->vm_start; +@@ -581,6 +582,12 @@ static int kiocb_cancel(struct aio_kiocb *kiocb) + return cancel(&kiocb->common); + } + ++/* ++ * free_ioctx() should be RCU delayed to synchronize against the RCU ++ * protected lookup_ioctx() and also needs process context to call ++ * aio_free_ring(), so the double bouncing through kioctx->free_rcu and ++ * ->free_work. ++ */ + static void free_ioctx(struct work_struct *work) + { + struct kioctx *ctx = container_of(work, struct kioctx, free_work); +@@ -594,6 +601,14 @@ static void free_ioctx(struct work_struct *work) + kmem_cache_free(kioctx_cachep, ctx); + } + ++static void free_ioctx_rcufn(struct rcu_head *head) ++{ ++ struct kioctx *ctx = container_of(head, struct kioctx, free_rcu); ++ ++ INIT_WORK(&ctx->free_work, free_ioctx); ++ schedule_work(&ctx->free_work); ++} ++ + static void free_ioctx_reqs(struct percpu_ref *ref) + { + struct kioctx *ctx = container_of(ref, struct kioctx, reqs); +@@ -602,8 +617,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref) + if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) + complete(&ctx->rq_wait->comp); + +- INIT_WORK(&ctx->free_work, free_ioctx); +- schedule_work(&ctx->free_work); ++ /* Synchronize against RCU protected table->table[] dereferences */ ++ call_rcu(&ctx->free_rcu, free_ioctx_rcufn); + } + + /* +@@ -644,9 +659,9 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) + while (1) { + if (table) + for (i = 0; i < table->nr; i++) +- if (!table->table[i]) { ++ if (!rcu_access_pointer(table->table[i])) { + ctx->id = i; +- table->table[i] = ctx; ++ rcu_assign_pointer(table->table[i], ctx); + spin_unlock(&mm->ioctx_lock); + + /* While kioctx setup is in progress, +@@ -821,11 +836,11 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, + } + + table = rcu_dereference_raw(mm->ioctx_table); +- WARN_ON(ctx != table->table[ctx->id]); +- table->table[ctx->id] = NULL; ++ WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); ++ RCU_INIT_POINTER(table->table[ctx->id], NULL); + spin_unlock(&mm->ioctx_lock); + +- /* percpu_ref_kill() will do the necessary call_rcu() */ ++ /* free_ioctx_reqs() will do the necessary RCU synchronization */ + wake_up_all(&ctx->wait); + + /* +@@ -867,7 +882,8 @@ void exit_aio(struct mm_struct *mm) + + skipped = 0; + for (i = 0; i < table->nr; ++i) { +- struct kioctx *ctx = table->table[i]; ++ struct kioctx *ctx = ++ rcu_dereference_protected(table->table[i], true); + + if (!ctx) { + skipped++; +@@ -1056,7 +1072,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) + if (!table || id >= table->nr) + goto out; + +- ctx = table->table[id]; ++ ctx = rcu_dereference(table->table[id]); + if (ctx && ctx->user_id == ctx_id) { + percpu_ref_get(&ctx->users); + ret = ctx; +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index 06a77e47957d..5900508ca6ed 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -583,6 +583,7 @@ void btrfs_free_stale_device(struct btrfs_device *cur_dev) + btrfs_sysfs_remove_fsid(fs_devs); + list_del(&fs_devs->list); + free_fs_devices(fs_devs); ++ break; + } else { + fs_devs->num_devices--; + list_del(&dev->dev_list); +@@ -4748,10 +4749,13 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, + if (devs_max && ndevs > devs_max) + ndevs = devs_max; + /* +- * the primary goal is to maximize the number of stripes, so use as many +- * devices as possible, even if the stripes are not maximum sized. ++ * The primary goal is to maximize the number of stripes, so use as ++ * many devices as possible, even if the stripes are not maximum sized. ++ * ++ * The DUP profile stores more than one stripe per device, the ++ * max_avail is the total size so we have to adjust. + */ +- stripe_size = devices_info[ndevs-1].max_avail; ++ stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes); + num_stripes = ndevs * dev_stripes; + + /* +@@ -4791,8 +4795,6 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, + stripe_size = devices_info[ndevs-1].max_avail; + } + +- stripe_size = div_u64(stripe_size, dev_stripes); +- + /* align to BTRFS_STRIPE_LEN */ + stripe_size = div_u64(stripe_size, raid_stripe_len); + stripe_size *= raid_stripe_len; +diff --git a/fs/dcache.c b/fs/dcache.c +index 67957f5b325c..c0c7fa8224ba 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -637,11 +637,16 @@ static inline struct dentry *lock_parent(struct dentry *dentry) + spin_unlock(&parent->d_lock); + goto again; + } +- rcu_read_unlock(); +- if (parent != dentry) ++ if (parent != dentry) { + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); +- else ++ if (unlikely(dentry->d_lockref.count < 0)) { ++ spin_unlock(&parent->d_lock); ++ parent = NULL; ++ } ++ } else { + parent = NULL; ++ } ++ rcu_read_unlock(); + return parent; + } + +diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c +index 34a69e7ed90b..17ab23f64bba 100644 +--- a/fs/f2fs/gc.c ++++ b/fs/f2fs/gc.c +@@ -538,8 +538,10 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, + get_node_info(sbi, nid, dni); + + if (sum->version != dni->version) { +- f2fs_put_page(node_page, 1); +- return false; ++ f2fs_msg(sbi->sb, KERN_WARNING, ++ "%s: valid data with mismatched node version.", ++ __func__); ++ set_sbi_flag(sbi, SBI_NEED_FSCK); + } + + *nofs = ofs_of_node(node_page); +diff --git a/fs/namei.c b/fs/namei.c +index 6cfb45f262aa..891670e0956b 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -578,9 +578,10 @@ static int __nd_alloc_stack(struct nameidata *nd) + static bool path_connected(const struct path *path) + { + struct vfsmount *mnt = path->mnt; ++ struct super_block *sb = mnt->mnt_sb; + +- /* Only bind mounts can have disconnected paths */ +- if (mnt->mnt_root == mnt->mnt_sb->s_root) ++ /* Bind mounts and multi-root filesystems can have disconnected paths */ ++ if (!(sb->s_iflags & SB_I_MULTIROOT) && (mnt->mnt_root == sb->s_root)) + return true; + + return is_subdir(path->dentry, mnt->mnt_root); +@@ -1121,9 +1122,6 @@ static int follow_automount(struct path *path, struct nameidata *nd, + path->dentry->d_inode) + return -EISDIR; + +- if (path->dentry->d_sb->s_user_ns != &init_user_ns) +- return -EACCES; +- + nd->total_link_count++; + if (nd->total_link_count >= 40) + return -ELOOP; +diff --git a/fs/nfs/super.c b/fs/nfs/super.c +index 51bf1f9ab287..2fdb8f5a7b69 100644 +--- a/fs/nfs/super.c ++++ b/fs/nfs/super.c +@@ -2613,6 +2613,8 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server, + /* initial superblock/root creation */ + mount_info->fill_super(s, mount_info); + nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned); ++ if (!(server->flags & NFS_MOUNT_UNSHARED)) ++ s->s_iflags |= SB_I_MULTIROOT; + } + + mntroot = nfs_get_root(s, mount_info->mntfh, dev_name); +diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c +index bc2dde2423c2..76108185854e 100644 +--- a/fs/reiserfs/journal.c ++++ b/fs/reiserfs/journal.c +@@ -1959,7 +1959,7 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, + * will be requeued because superblock is being shutdown and doesn't + * have MS_ACTIVE set. + */ +- cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); ++ reiserfs_cancel_old_flush(sb); + /* wait for all commits to finish */ + cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work); + +diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h +index 5dcf3ab83886..6ca00471afbf 100644 +--- a/fs/reiserfs/reiserfs.h ++++ b/fs/reiserfs/reiserfs.h +@@ -2948,6 +2948,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *s, + struct reiserfs_list_bitmap *, unsigned int); + + void reiserfs_schedule_old_flush(struct super_block *s); ++void reiserfs_cancel_old_flush(struct super_block *s); + void add_save_link(struct reiserfs_transaction_handle *th, + struct inode *inode, int truncate); + int remove_save_link(struct inode *inode, int truncate); +diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c +index e101d70d2327..dec6c93044fa 100644 +--- a/fs/reiserfs/super.c ++++ b/fs/reiserfs/super.c +@@ -90,7 +90,9 @@ static void flush_old_commits(struct work_struct *work) + s = sbi->s_journal->j_work_sb; + + spin_lock(&sbi->old_work_lock); +- sbi->work_queued = 0; ++ /* Avoid clobbering the cancel state... */ ++ if (sbi->work_queued == 1) ++ sbi->work_queued = 0; + spin_unlock(&sbi->old_work_lock); + + reiserfs_sync_fs(s, 1); +@@ -117,21 +119,22 @@ void reiserfs_schedule_old_flush(struct super_block *s) + spin_unlock(&sbi->old_work_lock); + } + +-static void cancel_old_flush(struct super_block *s) ++void reiserfs_cancel_old_flush(struct super_block *s) + { + struct reiserfs_sb_info *sbi = REISERFS_SB(s); + +- cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); + spin_lock(&sbi->old_work_lock); +- sbi->work_queued = 0; ++ /* Make sure no new flushes will be queued */ ++ sbi->work_queued = 2; + spin_unlock(&sbi->old_work_lock); ++ cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); + } + + static int reiserfs_freeze(struct super_block *s) + { + struct reiserfs_transaction_handle th; + +- cancel_old_flush(s); ++ reiserfs_cancel_old_flush(s); + + reiserfs_write_lock(s); + if (!(s->s_flags & MS_RDONLY)) { +@@ -152,7 +155,13 @@ static int reiserfs_freeze(struct super_block *s) + + static int reiserfs_unfreeze(struct super_block *s) + { ++ struct reiserfs_sb_info *sbi = REISERFS_SB(s); ++ + reiserfs_allow_writes(s); ++ spin_lock(&sbi->old_work_lock); ++ /* Allow old_work to run again */ ++ sbi->work_queued = 0; ++ spin_unlock(&sbi->old_work_lock); + return 0; + } + +@@ -2194,7 +2203,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) + if (sbi->commit_wq) + destroy_workqueue(sbi->commit_wq); + +- cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); ++ reiserfs_cancel_old_flush(s); + + reiserfs_free_bitmap_cache(s); + if (SB_BUFFER_WITH_SB(s)) +diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h +index 88e64846cf37..cdeafd9cab07 100644 +--- a/include/dt-bindings/clock/r8a7794-clock.h ++++ b/include/dt-bindings/clock/r8a7794-clock.h +@@ -81,6 +81,7 @@ + #define R8A7794_CLK_SCIF2 19 + #define R8A7794_CLK_SCIF1 20 + #define R8A7794_CLK_SCIF0 21 ++#define R8A7794_CLK_DU1 23 + #define R8A7794_CLK_DU0 24 + + /* MSTP8 */ +diff --git a/include/linux/fs.h b/include/linux/fs.h +index 18552189560b..e9867aff53d8 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -1319,6 +1319,7 @@ struct mm_struct; + #define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ + #define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */ + #define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */ ++#define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */ + + /* sb->s_iflags to limit user namespace mounts */ + #define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h +index 7dbe9148b2f8..35f4c4d9c405 100644 +--- a/include/linux/pagemap.h ++++ b/include/linux/pagemap.h +@@ -148,7 +148,7 @@ static inline int page_cache_get_speculative(struct page *page) + + #ifdef CONFIG_TINY_RCU + # ifdef CONFIG_PREEMPT_COUNT +- VM_BUG_ON(!in_atomic()); ++ VM_BUG_ON(!in_atomic() && !irqs_disabled()); + # endif + /* + * Preempt must be disabled here - we rely on rcu_read_lock doing +@@ -186,7 +186,7 @@ static inline int page_cache_add_speculative(struct page *page, int count) + + #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) + # ifdef CONFIG_PREEMPT_COUNT +- VM_BUG_ON(!in_atomic()); ++ VM_BUG_ON(!in_atomic() && !irqs_disabled()); + # endif + VM_BUG_ON_PAGE(page_count(page) == 0, page); + page_ref_add(page, count); +diff --git a/include/linux/platform_data/isl9305.h b/include/linux/platform_data/isl9305.h +index 1419133fa69e..4ac1a070af0a 100644 +--- a/include/linux/platform_data/isl9305.h ++++ b/include/linux/platform_data/isl9305.h +@@ -24,7 +24,7 @@ + struct regulator_init_data; + + struct isl9305_pdata { +- struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR]; ++ struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR + 1]; + }; + + #endif +diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h +index 37b532410528..3c3786df044c 100644 +--- a/include/linux/regulator/driver.h ++++ b/include/linux/regulator/driver.h +@@ -425,6 +425,8 @@ struct regulator_dev { + struct regulator_enable_gpio *ena_pin; + unsigned int ena_gpio_state:1; + ++ unsigned int is_switch:1; ++ + /* time when this regulator was disabled last time */ + unsigned long last_off_jiffy; + }; +diff --git a/include/net/tcp.h b/include/net/tcp.h +index caf35e062639..18f029bcb8c7 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -1265,9 +1265,11 @@ void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd, + + static inline int tcp_win_from_space(int space) + { +- return sysctl_tcp_adv_win_scale<=0 ? +- (space>>(-sysctl_tcp_adv_win_scale)) : +- space - (space>>sysctl_tcp_adv_win_scale); ++ int tcp_adv_win_scale = sysctl_tcp_adv_win_scale; ++ ++ return tcp_adv_win_scale <= 0 ? ++ (space>>(-tcp_adv_win_scale)) : ++ space - (space>>tcp_adv_win_scale); + } + + /* Note: caller must be prepared to deal with negative returns */ +diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h +index bc96b14dfb2c..f4d5c998cc2b 100644 +--- a/include/uapi/linux/eventpoll.h ++++ b/include/uapi/linux/eventpoll.h +@@ -40,7 +40,7 @@ + #define EPOLLRDHUP 0x00002000 + + /* Set exclusive wakeup mode for the target file descriptor */ +-#define EPOLLEXCLUSIVE (1 << 28) ++#define EPOLLEXCLUSIVE (1U << 28) + + /* + * Request the handling of system wakeup events so as to prevent system suspends +@@ -52,13 +52,13 @@ + * + * Requires CAP_BLOCK_SUSPEND + */ +-#define EPOLLWAKEUP (1 << 29) ++#define EPOLLWAKEUP (1U << 29) + + /* Set the One Shot behaviour for the target file descriptor */ +-#define EPOLLONESHOT (1 << 30) ++#define EPOLLONESHOT (1U << 30) + + /* Set the Edge Triggered behaviour for the target file descriptor */ +-#define EPOLLET (1 << 31) ++#define EPOLLET (1U << 31) + + /* + * On x86-64 make the 64bit structure have the same alignment as the +diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c +index d3de04b12f8c..babc67cfed69 100644 +--- a/kernel/locking/locktorture.c ++++ b/kernel/locking/locktorture.c +@@ -641,8 +641,7 @@ static void __torture_print_stats(char *page, + { + bool fail = 0; + int i, n_stress; +- long max = 0; +- long min = statp[0].n_lock_acquired; ++ long max = 0, min = statp ? statp[0].n_lock_acquired : 0; + long long sum = 0; + + n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress; +@@ -749,7 +748,7 @@ static void lock_torture_cleanup(void) + * such, only perform the underlying torture-specific cleanups, + * and avoid anything related to locktorture. + */ +- if (!cxt.lwsa) ++ if (!cxt.lwsa && !cxt.lrsa) + goto end; + + if (writer_tasks) { +@@ -823,6 +822,13 @@ static int __init lock_torture_init(void) + firsterr = -EINVAL; + goto unwind; + } ++ ++ if (nwriters_stress == 0 && nreaders_stress == 0) { ++ pr_alert("lock-torture: must run at least one locking thread\n"); ++ firsterr = -EINVAL; ++ goto unwind; ++ } ++ + if (cxt.cur_ops->init) + cxt.cur_ops->init(); + +@@ -846,17 +852,19 @@ static int __init lock_torture_init(void) + #endif + + /* Initialize the statistics so that each run gets its own numbers. */ ++ if (nwriters_stress) { ++ lock_is_write_held = 0; ++ cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL); ++ if (cxt.lwsa == NULL) { ++ VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory"); ++ firsterr = -ENOMEM; ++ goto unwind; ++ } + +- lock_is_write_held = 0; +- cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL); +- if (cxt.lwsa == NULL) { +- VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory"); +- firsterr = -ENOMEM; +- goto unwind; +- } +- for (i = 0; i < cxt.nrealwriters_stress; i++) { +- cxt.lwsa[i].n_lock_fail = 0; +- cxt.lwsa[i].n_lock_acquired = 0; ++ for (i = 0; i < cxt.nrealwriters_stress; i++) { ++ cxt.lwsa[i].n_lock_fail = 0; ++ cxt.lwsa[i].n_lock_acquired = 0; ++ } + } + + if (cxt.cur_ops->readlock) { +@@ -873,19 +881,21 @@ static int __init lock_torture_init(void) + cxt.nrealreaders_stress = cxt.nrealwriters_stress; + } + +- lock_is_read_held = 0; +- cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL); +- if (cxt.lrsa == NULL) { +- VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory"); +- firsterr = -ENOMEM; +- kfree(cxt.lwsa); +- cxt.lwsa = NULL; +- goto unwind; +- } +- +- for (i = 0; i < cxt.nrealreaders_stress; i++) { +- cxt.lrsa[i].n_lock_fail = 0; +- cxt.lrsa[i].n_lock_acquired = 0; ++ if (nreaders_stress) { ++ lock_is_read_held = 0; ++ cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL); ++ if (cxt.lrsa == NULL) { ++ VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory"); ++ firsterr = -ENOMEM; ++ kfree(cxt.lwsa); ++ cxt.lwsa = NULL; ++ goto unwind; ++ } ++ ++ for (i = 0; i < cxt.nrealreaders_stress; i++) { ++ cxt.lrsa[i].n_lock_fail = 0; ++ cxt.lrsa[i].n_lock_acquired = 0; ++ } + } + } + +@@ -915,12 +925,14 @@ static int __init lock_torture_init(void) + goto unwind; + } + +- writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]), +- GFP_KERNEL); +- if (writer_tasks == NULL) { +- VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory"); +- firsterr = -ENOMEM; +- goto unwind; ++ if (nwriters_stress) { ++ writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]), ++ GFP_KERNEL); ++ if (writer_tasks == NULL) { ++ VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory"); ++ firsterr = -ENOMEM; ++ goto unwind; ++ } + } + + if (cxt.cur_ops->readlock) { +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c +index 2c49d76f96c3..196cc460e38d 100644 +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -236,8 +236,7 @@ rt_mutex_waiter_less(struct rt_mutex_waiter *left, + * then right waiter has a dl_prio() too. + */ + if (dl_prio(left->prio)) +- return dl_time_before(left->task->dl.deadline, +- right->task->dl.deadline); ++ return dl_time_before(left->deadline, right->deadline); + + return 0; + } +@@ -704,7 +703,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, + + /* [7] Requeue the waiter in the lock waiter tree. */ + rt_mutex_dequeue(lock, waiter); ++ ++ /* ++ * Update the waiter prio fields now that we're dequeued. ++ * ++ * These values can have changed through either: ++ * ++ * sys_sched_set_scheduler() / sys_sched_setattr() ++ * ++ * or ++ * ++ * DL CBS enforcement advancing the effective deadline. ++ * ++ * Even though pi_waiters also uses these fields, and that tree is only ++ * updated in [11], we can do this here, since we hold [L], which ++ * serializes all pi_waiters access and rb_erase() does not care about ++ * the values of the node being removed. ++ */ + waiter->prio = task->prio; ++ waiter->deadline = task->dl.deadline; ++ + rt_mutex_enqueue(lock, waiter); + + /* [8] Release the task */ +@@ -831,6 +849,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, + static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, + struct rt_mutex_waiter *waiter) + { ++ lockdep_assert_held(&lock->wait_lock); ++ + /* + * Before testing whether we can acquire @lock, we set the + * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all +@@ -958,6 +978,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, + struct rt_mutex *next_lock; + int chain_walk = 0, res; + ++ lockdep_assert_held(&lock->wait_lock); ++ + /* + * Early deadlock detection. We really don't want the task to + * enqueue on itself just to untangle the mess later. It's not +@@ -975,6 +997,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, + waiter->task = task; + waiter->lock = lock; + waiter->prio = task->prio; ++ waiter->deadline = task->dl.deadline; + + /* Get the top priority waiter on the lock */ + if (rt_mutex_has_waiters(lock)) +@@ -1080,6 +1103,8 @@ static void remove_waiter(struct rt_mutex *lock, + struct task_struct *owner = rt_mutex_owner(lock); + struct rt_mutex *next_lock; + ++ lockdep_assert_held(&lock->wait_lock); ++ + raw_spin_lock(¤t->pi_lock); + rt_mutex_dequeue(lock, waiter); + current->pi_blocked_on = NULL; +diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h +index e317e1cbb3eb..50848b460851 100644 +--- a/kernel/locking/rtmutex_common.h ++++ b/kernel/locking/rtmutex_common.h +@@ -33,6 +33,7 @@ struct rt_mutex_waiter { + struct rt_mutex *deadlock_lock; + #endif + int prio; ++ u64 deadline; + }; + + /* +diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c +index d5760c42f042..61d41ca41844 100644 +--- a/kernel/printk/braille.c ++++ b/kernel/printk/braille.c +@@ -2,12 +2,13 @@ + + #include + #include ++#include + #include + + #include "console_cmdline.h" + #include "braille.h" + +-char *_braille_console_setup(char **str, char **brl_options) ++int _braille_console_setup(char **str, char **brl_options) + { + if (!strncmp(*str, "brl,", 4)) { + *brl_options = ""; +@@ -15,14 +16,14 @@ char *_braille_console_setup(char **str, char **brl_options) + } else if (!strncmp(*str, "brl=", 4)) { + *brl_options = *str + 4; + *str = strchr(*brl_options, ','); +- if (!*str) ++ if (!*str) { + pr_err("need port name after brl=\n"); +- else +- *((*str)++) = 0; +- } else +- return NULL; ++ return -EINVAL; ++ } ++ *((*str)++) = 0; ++ } + +- return *str; ++ return 0; + } + + int +diff --git a/kernel/printk/braille.h b/kernel/printk/braille.h +index 769d771145c8..749a6756843a 100644 +--- a/kernel/printk/braille.h ++++ b/kernel/printk/braille.h +@@ -9,7 +9,14 @@ braille_set_options(struct console_cmdline *c, char *brl_options) + c->brl_options = brl_options; + } + +-char * ++/* ++ * Setup console according to braille options. ++ * Return -EINVAL on syntax error, 0 on success (or no braille option was ++ * actually given). ++ * Modifies str to point to the serial options ++ * Sets brl_options to the parsed braille options. ++ */ ++int + _braille_console_setup(char **str, char **brl_options); + + int +@@ -25,10 +32,10 @@ braille_set_options(struct console_cmdline *c, char *brl_options) + { + } + +-static inline char * ++static inline int + _braille_console_setup(char **str, char **brl_options) + { +- return NULL; ++ return 0; + } + + static inline int +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c +index 9c5b231684d0..ab6855a4218b 100644 +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -2342,7 +2342,7 @@ void console_unlock(void) + } + + /* +- * Console drivers are called under logbuf_lock, so ++ * Console drivers are called with interrupts disabled, so + * @console_may_schedule should be cleared before; however, we may + * end up dumping a lot of lines, for example, if called from + * console registration path, and should invoke cond_resched() +@@ -2350,11 +2350,15 @@ void console_unlock(void) + * scheduling stall on a slow console leading to RCU stall and + * softlockup warnings which exacerbate the issue with more + * messages practically incapacitating the system. ++ * ++ * console_trylock() is not able to detect the preemptive ++ * context reliably. Therefore the value must be stored before ++ * and cleared after the the "again" goto label. + */ + do_cond_resched = console_may_schedule; ++again: + console_may_schedule = 0; + +-again: + /* + * We released the console_sem lock, so we need to recheck if + * cpu is online and (if not) is there at least one CON_ANYTIME +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index bce3a7ad4253..291ea6fa7ee6 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -508,7 +508,8 @@ void resched_cpu(int cpu) + unsigned long flags; + + raw_spin_lock_irqsave(&rq->lock, flags); +- resched_curr(rq); ++ if (cpu_online(cpu) || cpu == smp_processor_id()) ++ resched_curr(rq); + raw_spin_unlock_irqrestore(&rq->lock, flags); + } + +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c +index f6d68ddfa2f3..c7b0d2e7a9aa 100644 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -2206,7 +2206,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) + if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded) + queue_push_tasks(rq); + #endif /* CONFIG_SMP */ +- if (p->prio < rq->curr->prio) ++ if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) + resched_curr(rq); + } + } +diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c +index a26036d37a38..382b159d8592 100644 +--- a/kernel/time/sched_clock.c ++++ b/kernel/time/sched_clock.c +@@ -205,6 +205,11 @@ sched_clock_register(u64 (*read)(void), int bits, unsigned long rate) + + update_clock_read_data(&rd); + ++ if (sched_clock_timer.function != NULL) { ++ /* update timeout for clock wrap */ ++ hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); ++ } ++ + r = rate; + if (r >= 4000000) { + r /= 1000000; +diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c +index ba7d8b288bb3..ef4f16e81283 100644 +--- a/kernel/time/timer_list.c ++++ b/kernel/time/timer_list.c +@@ -16,6 +16,7 @@ + #include + #include + #include ++#include + + #include + +@@ -96,6 +97,9 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base, + + next_one: + i = 0; ++ ++ touch_nmi_watchdog(); ++ + raw_spin_lock_irqsave(&base->cpu_base->lock, flags); + + curr = timerqueue_getnext(&base->active); +@@ -207,6 +211,8 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) + { + struct clock_event_device *dev = td->evtdev; + ++ touch_nmi_watchdog(); ++ + SEQ_printf(m, "Tick Device: mode: %d\n", td->mode); + if (cpu < 0) + SEQ_printf(m, "Broadcast device\n"); +diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c +index fbfacd51aa34..767144128b95 100644 +--- a/net/8021q/vlan_dev.c ++++ b/net/8021q/vlan_dev.c +@@ -562,8 +562,7 @@ static int vlan_dev_init(struct net_device *dev) + NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC | + NETIF_F_ALL_FCOE; + +- dev->features |= real_dev->vlan_features | NETIF_F_LLTX | +- NETIF_F_GSO_SOFTWARE; ++ dev->features |= dev->hw_features | NETIF_F_LLTX; + dev->gso_max_size = real_dev->gso_max_size; + dev->gso_max_segs = real_dev->gso_max_segs; + if (dev->features & NETIF_F_VLAN_FEATURES) +diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c +index e7f690b571ea..5419b1214abd 100644 +--- a/net/batman-adv/bridge_loop_avoidance.c ++++ b/net/batman-adv/bridge_loop_avoidance.c +@@ -1964,10 +1964,22 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, + /* if yes, the client has roamed and we have + * to unclaim it. + */ +- batadv_handle_unclaim(bat_priv, primary_if, +- primary_if->net_dev->dev_addr, +- ethhdr->h_source, vid); +- goto allow; ++ if (batadv_has_timed_out(claim->lasttime, 100)) { ++ /* only unclaim if the last claim entry is ++ * older than 100 ms to make sure we really ++ * have a roaming client here. ++ */ ++ batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_tx(): Roaming client %pM detected. Unclaim it.\n", ++ ethhdr->h_source); ++ batadv_handle_unclaim(bat_priv, primary_if, ++ primary_if->net_dev->dev_addr, ++ ethhdr->h_source, vid); ++ goto allow; ++ } else { ++ batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_tx(): Race for claim %pM detected. Drop packet.\n", ++ ethhdr->h_source); ++ goto handled; ++ } + } + + /* check if it is a multicast/broadcast frame */ +diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c +index 1904a93f47d5..de7b82ece499 100644 +--- a/net/bluetooth/6lowpan.c ++++ b/net/bluetooth/6lowpan.c +@@ -755,7 +755,8 @@ static void set_ip_addr_bits(u8 addr_type, u8 *addr) + } + + static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, +- struct lowpan_btle_dev *dev) ++ struct lowpan_btle_dev *dev, ++ bool new_netdev) + { + struct lowpan_peer *peer; + +@@ -786,7 +787,8 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, + spin_unlock(&devices_lock); + + /* Notifying peers about us needs to be done without locks held */ +- INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers); ++ if (new_netdev) ++ INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers); + schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100)); + + return peer->chan; +@@ -843,6 +845,7 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev) + static inline void chan_ready_cb(struct l2cap_chan *chan) + { + struct lowpan_btle_dev *dev; ++ bool new_netdev = false; + + dev = lookup_dev(chan->conn); + +@@ -853,12 +856,13 @@ static inline void chan_ready_cb(struct l2cap_chan *chan) + l2cap_chan_del(chan, -ENOENT); + return; + } ++ new_netdev = true; + } + + if (!try_module_get(THIS_MODULE)) + return; + +- add_peer_chan(chan, dev); ++ add_peer_chan(chan, dev, new_netdev); + ifup(dev->netdev); + } + +diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c +index 1aff2da9bc74..5d3698170004 100644 +--- a/net/bluetooth/af_bluetooth.c ++++ b/net/bluetooth/af_bluetooth.c +@@ -163,6 +163,9 @@ void bt_accept_enqueue(struct sock *parent, struct sock *sk) + } + EXPORT_SYMBOL(bt_accept_enqueue); + ++/* Calling function must hold the sk lock. ++ * bt_sk(sk)->parent must be non-NULL meaning sk is in the parent list. ++ */ + void bt_accept_unlink(struct sock *sk) + { + BT_DBG("sk %p state %d", sk, sk->sk_state); +@@ -181,11 +184,32 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock) + + BT_DBG("parent %p", parent); + ++restart: + list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) { + sk = (struct sock *)s; + ++ /* Prevent early freeing of sk due to unlink and sock_kill */ ++ sock_hold(sk); + lock_sock(sk); + ++ /* Check sk has not already been unlinked via ++ * bt_accept_unlink() due to serialisation caused by sk locking ++ */ ++ if (!bt_sk(sk)->parent) { ++ BT_DBG("sk %p, already unlinked", sk); ++ release_sock(sk); ++ sock_put(sk); ++ ++ /* Restart the loop as sk is no longer in the list ++ * and also avoid a potential infinite loop because ++ * list_for_each_entry_safe() is not thread safe. ++ */ ++ goto restart; ++ } ++ ++ /* sk is safely in the parent list so reduce reference count */ ++ sock_put(sk); ++ + /* FIXME: Is this check still needed */ + if (sk->sk_state == BT_CLOSED) { + bt_accept_unlink(sk); +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c +index a7aa54f45e19..fa7d757fef95 100644 +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -1520,7 +1520,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, + break; + case NL80211_IFTYPE_UNSPECIFIED: + case NUM_NL80211_IFTYPES: +- BUG(); ++ WARN_ON(1); + break; + } + +diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c +index e0defcef376d..24a0c66394a0 100644 +--- a/net/sched/act_csum.c ++++ b/net/sched/act_csum.c +@@ -180,6 +180,9 @@ static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl, + struct tcphdr *tcph; + const struct iphdr *iph; + ++ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) ++ return 1; ++ + tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); + if (tcph == NULL) + return 0; +@@ -201,6 +204,9 @@ static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl, + struct tcphdr *tcph; + const struct ipv6hdr *ip6h; + ++ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ++ return 1; ++ + tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); + if (tcph == NULL) + return 0; +@@ -224,6 +230,9 @@ static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl, + const struct iphdr *iph; + u16 ul; + ++ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) ++ return 1; ++ + /* + * Support both UDP and UDPLITE checksum algorithms, Don't use + * udph->len to get the real length without any protocol check, +@@ -277,6 +286,9 @@ static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl, + const struct ipv6hdr *ip6h; + u16 ul; + ++ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) ++ return 1; ++ + /* + * Support both UDP and UDPLITE checksum algorithms, Don't use + * udph->len to get the real length without any protocol check, +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c +index 9f7b380cf0a3..c73d58872cf8 100644 +--- a/net/sched/sch_netem.c ++++ b/net/sched/sch_netem.c +@@ -462,7 +462,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, + /* If a delay is expected, orphan the skb. (orphaning usually takes + * place at TX completion time, so _before_ the link transit delay) + */ +- if (q->latency || q->jitter) ++ if (q->latency || q->jitter || q->rate) + skb_orphan_partial(skb); + + /* +@@ -530,21 +530,31 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, + now = psched_get_time(); + + if (q->rate) { +- struct sk_buff *last; ++ struct netem_skb_cb *last = NULL; ++ ++ if (sch->q.tail) ++ last = netem_skb_cb(sch->q.tail); ++ if (q->t_root.rb_node) { ++ struct sk_buff *t_skb; ++ struct netem_skb_cb *t_last; ++ ++ t_skb = netem_rb_to_skb(rb_last(&q->t_root)); ++ t_last = netem_skb_cb(t_skb); ++ if (!last || ++ t_last->time_to_send > last->time_to_send) { ++ last = t_last; ++ } ++ } + +- if (sch->q.qlen) +- last = sch->q.tail; +- else +- last = netem_rb_to_skb(rb_last(&q->t_root)); + if (last) { + /* + * Last packet in queue is reference point (now), + * calculate this time bonus and subtract + * from delay. + */ +- delay -= netem_skb_cb(last)->time_to_send - now; ++ delay -= last->time_to_send - now; + delay = max_t(psched_tdiff_t, 0, delay); +- now = netem_skb_cb(last)->time_to_send; ++ now = last->time_to_send; + } + + delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q); +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 5e89b7461f99..5b8fa6832687 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -1346,7 +1346,7 @@ EXPORT_SYMBOL(xfrm_policy_delete); + + int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) + { +- struct net *net = xp_net(pol); ++ struct net *net = sock_net(sk); + struct xfrm_policy *old_pol; + + #ifdef CONFIG_XFRM_SUB_POLICY +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c +index 419bf5d463bd..13e0611a9085 100644 +--- a/net/xfrm/xfrm_state.c ++++ b/net/xfrm/xfrm_state.c +@@ -1883,6 +1883,13 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen + struct xfrm_mgr *km; + struct xfrm_policy *pol = NULL; + ++ if (!optval && !optlen) { ++ xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); ++ xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL); ++ __sk_dst_reset(sk); ++ return 0; ++ } ++ + if (optlen <= 0 || optlen > PAGE_SIZE) + return -EMSGSIZE; + +diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c +index 7d3a98b2d55a..02cc952b86aa 100644 +--- a/security/apparmor/lsm.c ++++ b/security/apparmor/lsm.c +@@ -707,7 +707,7 @@ module_param_named(logsyscall, aa_g_logsyscall, aabool, S_IRUSR | S_IWUSR); + + /* Maximum pathname length before accesses will start getting rejected */ + unsigned int aa_g_path_max = 2 * PATH_MAX; +-module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR | S_IWUSR); ++module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR); + + /* Determines how paranoid loading of policy is and how much verification + * on the loaded policy is done. +diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c +index 6830d2427e47..7bf8b005a178 100644 +--- a/security/integrity/ima/ima_appraise.c ++++ b/security/integrity/ima/ima_appraise.c +@@ -207,7 +207,8 @@ int ima_appraise_measurement(enum ima_hooks func, + if (opened & FILE_CREATED) + iint->flags |= IMA_NEW_FILE; + if ((iint->flags & IMA_NEW_FILE) && +- !(iint->flags & IMA_DIGSIG_REQUIRED)) ++ (!(iint->flags & IMA_DIGSIG_REQUIRED) || ++ (inode->i_size == 0))) + status = INTEGRITY_PASS; + goto out; + } +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index c2da45ae5b2a..b8278f3af9da 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -4328,10 +4328,18 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in + u32 sid, node_perm; + + if (family == PF_INET) { ++ if (addrlen < sizeof(struct sockaddr_in)) { ++ err = -EINVAL; ++ goto out; ++ } + addr4 = (struct sockaddr_in *)address; + snum = ntohs(addr4->sin_port); + addrp = (char *)&addr4->sin_addr.s_addr; + } else { ++ if (addrlen < SIN6_LEN_RFC2133) { ++ err = -EINVAL; ++ goto out; ++ } + addr6 = (struct sockaddr_in6 *)address; + snum = ntohs(addr6->sin6_port); + addrp = (char *)&addr6->sin6_addr.s6_addr; +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c +index 3321348fd86b..3e7c3573871d 100644 +--- a/sound/core/oss/pcm_oss.c ++++ b/sound/core/oss/pcm_oss.c +@@ -1814,10 +1814,9 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file) + return -ENOMEM; + _snd_pcm_hw_params_any(params); + err = snd_pcm_hw_refine(substream, params); +- format_mask = *hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); +- kfree(params); + if (err < 0) +- return err; ++ goto error; ++ format_mask = *hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); + for (fmt = 0; fmt < 32; ++fmt) { + if (snd_mask_test(&format_mask, fmt)) { + int f = snd_pcm_oss_format_to(fmt); +@@ -1825,7 +1824,10 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file) + formats |= f; + } + } +- return formats; ++ ++ error: ++ kfree(params); ++ return err < 0 ? err : formats; + } + + static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format) +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c +index 799ad3e1d24b..ecd1c5fc8db8 100644 +--- a/sound/core/seq/seq_clientmgr.c ++++ b/sound/core/seq/seq_clientmgr.c +@@ -255,12 +255,12 @@ static int seq_free_client1(struct snd_seq_client *client) + + if (!client) + return 0; +- snd_seq_delete_all_ports(client); +- snd_seq_queue_client_leave(client->number); + spin_lock_irqsave(&clients_lock, flags); + clienttablock[client->number] = 1; + clienttab[client->number] = NULL; + spin_unlock_irqrestore(&clients_lock, flags); ++ snd_seq_delete_all_ports(client); ++ snd_seq_queue_client_leave(client->number); + snd_use_lock_sync(&client->use_lock); + snd_seq_queue_client_termination(client->number); + if (client->pool) +diff --git a/sound/core/seq/seq_prioq.c b/sound/core/seq/seq_prioq.c +index bc1c8488fc2a..2bc6759e4adc 100644 +--- a/sound/core/seq/seq_prioq.c ++++ b/sound/core/seq/seq_prioq.c +@@ -87,7 +87,7 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo) + if (f->cells > 0) { + /* drain prioQ */ + while (f->cells > 0) +- snd_seq_cell_free(snd_seq_prioq_cell_out(f)); ++ snd_seq_cell_free(snd_seq_prioq_cell_out(f, NULL)); + } + + kfree(f); +@@ -214,8 +214,18 @@ int snd_seq_prioq_cell_in(struct snd_seq_prioq * f, + return 0; + } + ++/* return 1 if the current time >= event timestamp */ ++static int event_is_ready(struct snd_seq_event *ev, void *current_time) ++{ ++ if ((ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) == SNDRV_SEQ_TIME_STAMP_TICK) ++ return snd_seq_compare_tick_time(current_time, &ev->time.tick); ++ else ++ return snd_seq_compare_real_time(current_time, &ev->time.time); ++} ++ + /* dequeue cell from prioq */ +-struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f) ++struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f, ++ void *current_time) + { + struct snd_seq_event_cell *cell; + unsigned long flags; +@@ -227,6 +237,8 @@ struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f) + spin_lock_irqsave(&f->lock, flags); + + cell = f->head; ++ if (cell && current_time && !event_is_ready(&cell->event, current_time)) ++ cell = NULL; + if (cell) { + f->head = cell->next; + +@@ -252,18 +264,6 @@ int snd_seq_prioq_avail(struct snd_seq_prioq * f) + return f->cells; + } + +- +-/* peek at cell at the head of the prioq */ +-struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq * f) +-{ +- if (f == NULL) { +- pr_debug("ALSA: seq: snd_seq_prioq_cell_in() called with NULL prioq\n"); +- return NULL; +- } +- return f->head; +-} +- +- + static inline int prioq_match(struct snd_seq_event_cell *cell, + int client, int timestamp) + { +diff --git a/sound/core/seq/seq_prioq.h b/sound/core/seq/seq_prioq.h +index d38bb78d9345..2c315ca10fc4 100644 +--- a/sound/core/seq/seq_prioq.h ++++ b/sound/core/seq/seq_prioq.h +@@ -44,14 +44,12 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo); + int snd_seq_prioq_cell_in(struct snd_seq_prioq *f, struct snd_seq_event_cell *cell); + + /* dequeue cell from prioq */ +-struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f); ++struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f, ++ void *current_time); + + /* return number of events available in prioq */ + int snd_seq_prioq_avail(struct snd_seq_prioq *f); + +-/* peek at cell at the head of the prioq */ +-struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq *f); +- + /* client left queue */ + void snd_seq_prioq_leave(struct snd_seq_prioq *f, int client, int timestamp); + +diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c +index 79e0c5604ef8..1a6dc4ff44a6 100644 +--- a/sound/core/seq/seq_queue.c ++++ b/sound/core/seq/seq_queue.c +@@ -277,30 +277,20 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) + + __again: + /* Process tick queue... */ +- while ((cell = snd_seq_prioq_cell_peek(q->tickq)) != NULL) { +- if (snd_seq_compare_tick_time(&q->timer->tick.cur_tick, +- &cell->event.time.tick)) { +- cell = snd_seq_prioq_cell_out(q->tickq); +- if (cell) +- snd_seq_dispatch_event(cell, atomic, hop); +- } else { +- /* event remains in the queue */ ++ for (;;) { ++ cell = snd_seq_prioq_cell_out(q->tickq, ++ &q->timer->tick.cur_tick); ++ if (!cell) + break; +- } ++ snd_seq_dispatch_event(cell, atomic, hop); + } + +- + /* Process time queue... */ +- while ((cell = snd_seq_prioq_cell_peek(q->timeq)) != NULL) { +- if (snd_seq_compare_real_time(&q->timer->cur_time, +- &cell->event.time.time)) { +- cell = snd_seq_prioq_cell_out(q->timeq); +- if (cell) +- snd_seq_dispatch_event(cell, atomic, hop); +- } else { +- /* event remains in the queue */ ++ for (;;) { ++ cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time); ++ if (!cell) + break; +- } ++ snd_seq_dispatch_event(cell, atomic, hop); + } + + /* free lock */ +diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c +index 9741757436be..d0ad61f563e2 100644 +--- a/sound/firewire/amdtp-stream.c ++++ b/sound/firewire/amdtp-stream.c +@@ -471,8 +471,9 @@ static int handle_in_packet(struct amdtp_stream *s, + * This module supports 'Two-quadlet CIP header with SYT field'. + * For convenience, also check FMT field is AM824 or not. + */ +- if (((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) || +- ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) { ++ if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) || ++ ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) && ++ (!(s->flags & CIP_HEADER_WITHOUT_EOH))) { + dev_info_ratelimited(&s->unit->device, + "Invalid CIP header for AMDTP: %08X:%08X\n", + cip_header[0], cip_header[1]); +diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h +index f7c054bc9d92..8136bd20c8b1 100644 +--- a/sound/firewire/amdtp-stream.h ++++ b/sound/firewire/amdtp-stream.h +@@ -29,6 +29,8 @@ + * @CIP_JUMBO_PAYLOAD: Only for in-stream. The number of data blocks in an + * packet is larger than IEC 61883-6 defines. Current implementation + * allows 5 times as large as IEC 61883-6 defines. ++ * @CIP_HEADER_WITHOUT_EOH: Only for in-stream. CIP Header doesn't include ++ * valid EOH. + */ + enum cip_flags { + CIP_NONBLOCKING = 0x00, +@@ -39,6 +41,7 @@ enum cip_flags { + CIP_SKIP_DBC_ZERO_CHECK = 0x10, + CIP_EMPTY_HAS_WRONG_DBC = 0x20, + CIP_JUMBO_PAYLOAD = 0x40, ++ CIP_HEADER_WITHOUT_EOH = 0x80, + }; + + /** +diff --git a/sound/firewire/digi00x/amdtp-dot.c b/sound/firewire/digi00x/amdtp-dot.c +index b3cffd01a19f..a4688545339c 100644 +--- a/sound/firewire/digi00x/amdtp-dot.c ++++ b/sound/firewire/digi00x/amdtp-dot.c +@@ -28,6 +28,9 @@ + */ + #define MAX_MIDI_RX_BLOCKS 8 + ++/* 3 = MAX(DOT_MIDI_IN_PORTS, DOT_MIDI_OUT_PORTS) + 1. */ ++#define MAX_MIDI_PORTS 3 ++ + /* + * The double-oh-three algorithm was discovered by Robin Gareus and Damien + * Zammit in 2012, with reverse-engineering for Digi 003 Rack. +@@ -42,10 +45,8 @@ struct amdtp_dot { + unsigned int pcm_channels; + struct dot_state state; + +- unsigned int midi_ports; +- /* 2 = MAX(DOT_MIDI_IN_PORTS, DOT_MIDI_OUT_PORTS) */ +- struct snd_rawmidi_substream *midi[2]; +- int midi_fifo_used[2]; ++ struct snd_rawmidi_substream *midi[MAX_MIDI_PORTS]; ++ int midi_fifo_used[MAX_MIDI_PORTS]; + int midi_fifo_limit; + + void (*transfer_samples)(struct amdtp_stream *s, +@@ -124,8 +125,8 @@ int amdtp_dot_set_parameters(struct amdtp_stream *s, unsigned int rate, + return -EBUSY; + + /* +- * A first data channel is for MIDI conformant data channel, the rest is +- * Multi Bit Linear Audio data channel. ++ * A first data channel is for MIDI messages, the rest is Multi Bit ++ * Linear Audio data channel. + */ + err = amdtp_stream_set_parameters(s, rate, pcm_channels + 1); + if (err < 0) +@@ -135,11 +136,6 @@ int amdtp_dot_set_parameters(struct amdtp_stream *s, unsigned int rate, + + p->pcm_channels = pcm_channels; + +- if (s->direction == AMDTP_IN_STREAM) +- p->midi_ports = DOT_MIDI_IN_PORTS; +- else +- p->midi_ports = DOT_MIDI_OUT_PORTS; +- + /* + * We do not know the actual MIDI FIFO size of most devices. Just + * assume two bytes, i.e., one byte can be received over the bus while +@@ -281,13 +277,25 @@ static void write_midi_messages(struct amdtp_stream *s, __be32 *buffer, + b = (u8 *)&buffer[0]; + + len = 0; +- if (port < p->midi_ports && ++ if (port < MAX_MIDI_PORTS && + midi_ratelimit_per_packet(s, port) && + p->midi[port] != NULL) + len = snd_rawmidi_transmit(p->midi[port], b + 1, 2); + + if (len > 0) { +- b[3] = (0x10 << port) | len; ++ /* ++ * Upper 4 bits of LSB represent port number. ++ * - 0000b: physical MIDI port 1. ++ * - 0010b: physical MIDI port 2. ++ * - 1110b: console MIDI port. ++ */ ++ if (port == 2) ++ b[3] = 0xe0; ++ else if (port == 1) ++ b[3] = 0x20; ++ else ++ b[3] = 0x00; ++ b[3] |= len; + midi_use_bytes(s, port, len); + } else { + b[1] = 0; +@@ -309,11 +317,22 @@ static void read_midi_messages(struct amdtp_stream *s, __be32 *buffer, + + for (f = 0; f < data_blocks; f++) { + b = (u8 *)&buffer[0]; +- port = b[3] >> 4; +- len = b[3] & 0x0f; + +- if (port < p->midi_ports && p->midi[port] && len > 0) +- snd_rawmidi_receive(p->midi[port], b + 1, len); ++ len = b[3] & 0x0f; ++ if (len > 0) { ++ /* ++ * Upper 4 bits of LSB represent port number. ++ * - 0000b: physical MIDI port 1. Use port 0. ++ * - 1110b: console MIDI port. Use port 2. ++ */ ++ if (b[3] >> 4 > 0) ++ port = 2; ++ else ++ port = 0; ++ ++ if (port < MAX_MIDI_PORTS && p->midi[port]) ++ snd_rawmidi_receive(p->midi[port], b + 1, len); ++ } + + buffer += s->data_block_quadlets; + } +@@ -364,7 +383,7 @@ void amdtp_dot_midi_trigger(struct amdtp_stream *s, unsigned int port, + { + struct amdtp_dot *p = s->protocol; + +- if (port < p->midi_ports) ++ if (port < MAX_MIDI_PORTS) + ACCESS_ONCE(p->midi[port]) = midi; + } + +diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c +index cc4776c6ded3..1f5e1d23f31a 100644 +--- a/sound/firewire/digi00x/digi00x.c ++++ b/sound/firewire/digi00x/digi00x.c +@@ -13,7 +13,8 @@ MODULE_AUTHOR("Takashi Sakamoto "); + MODULE_LICENSE("GPL v2"); + + #define VENDOR_DIGIDESIGN 0x00a07e +-#define MODEL_DIGI00X 0x000002 ++#define MODEL_CONSOLE 0x000001 ++#define MODEL_RACK 0x000002 + + static int name_card(struct snd_dg00x *dg00x) + { +@@ -129,6 +130,8 @@ static int snd_dg00x_probe(struct fw_unit *unit, + spin_lock_init(&dg00x->lock); + init_waitqueue_head(&dg00x->hwdep_wait); + ++ dg00x->is_console = entry->model_id == MODEL_CONSOLE; ++ + /* Allocate and register this sound card later. */ + INIT_DEFERRABLE_WORK(&dg00x->dwork, do_registration); + snd_fw_schedule_registration(unit, &dg00x->dwork); +@@ -183,7 +186,13 @@ static const struct ieee1394_device_id snd_dg00x_id_table[] = { + .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_MODEL_ID, + .vendor_id = VENDOR_DIGIDESIGN, +- .model_id = MODEL_DIGI00X, ++ .model_id = MODEL_CONSOLE, ++ }, ++ { ++ .match_flags = IEEE1394_MATCH_VENDOR_ID | ++ IEEE1394_MATCH_MODEL_ID, ++ .vendor_id = VENDOR_DIGIDESIGN, ++ .model_id = MODEL_RACK, + }, + {} + }; +diff --git a/sound/firewire/digi00x/digi00x.h b/sound/firewire/digi00x/digi00x.h +index 2cd465c0caae..43bcb0ce69c0 100644 +--- a/sound/firewire/digi00x/digi00x.h ++++ b/sound/firewire/digi00x/digi00x.h +@@ -60,6 +60,7 @@ struct snd_dg00x { + /* For asynchronous MIDI controls. */ + struct snd_rawmidi_substream *in_control; + struct snd_fw_async_midi_port out_control; ++ bool is_console; + }; + + #define DG00X_ADDR_BASE 0xffffe0000000ull +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index ceb162a9dcfd..733b3423baa2 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -180,11 +180,15 @@ static const struct kernel_param_ops param_ops_xint = { + }; + #define param_check_xint param_check_int + +-static int power_save = -1; ++static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT; + module_param(power_save, xint, 0644); + MODULE_PARM_DESC(power_save, "Automatic power-saving timeout " + "(in second, 0 = disable)."); + ++static bool pm_blacklist = true; ++module_param(pm_blacklist, bool, 0644); ++MODULE_PARM_DESC(pm_blacklist, "Enable power-management blacklist"); ++ + /* reset the HD-audio controller in power save mode. + * this may give more power-saving, but will take longer time to + * wake up. +@@ -369,8 +373,10 @@ enum { + #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) + #define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0) + #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) ++#define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198) + #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ +- IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci) ++ IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci) || \ ++ IS_GLK(pci) + + static char *driver_short_names[] = { + [AZX_DRIVER_ICH] = "HDA Intel", +@@ -2151,10 +2157,9 @@ static int azx_probe_continue(struct azx *chip) + + val = power_save; + #ifdef CONFIG_PM +- if (val == -1) { ++ if (pm_blacklist) { + const struct snd_pci_quirk *q; + +- val = CONFIG_SND_HDA_POWER_SAVE_DEFAULT; + q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist); + if (q && val) { + dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n", +diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c +index abc802a5a479..65ac4518ad06 100644 +--- a/sound/soc/codecs/rt5677.c ++++ b/sound/soc/codecs/rt5677.c +@@ -5035,6 +5035,12 @@ static const struct i2c_device_id rt5677_i2c_id[] = { + }; + MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id); + ++static const struct of_device_id rt5677_of_match[] = { ++ { .compatible = "realtek,rt5677", }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, rt5677_of_match); ++ + static const struct acpi_gpio_params plug_det_gpio = { RT5677_GPIO_PLUG_DET, 0, false }; + static const struct acpi_gpio_params mic_present_gpio = { RT5677_GPIO_MIC_PRESENT_L, 0, false }; + static const struct acpi_gpio_params headphone_enable_gpio = { RT5677_GPIO_HP_AMP_SHDN_L, 0, false }; +@@ -5294,6 +5300,7 @@ static int rt5677_i2c_remove(struct i2c_client *i2c) + static struct i2c_driver rt5677_i2c_driver = { + .driver = { + .name = "rt5677", ++ .of_match_table = rt5677_of_match, + }, + .probe = rt5677_i2c_probe, + .remove = rt5677_i2c_remove, +diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c +index b6615affe571..fde974d52bb2 100644 +--- a/sound/soc/nuc900/nuc900-ac97.c ++++ b/sound/soc/nuc900/nuc900-ac97.c +@@ -67,7 +67,7 @@ static unsigned short nuc900_ac97_read(struct snd_ac97 *ac97, + + /* polling the AC_R_FINISH */ + while (!(AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_R_FINISH) +- && timeout--) ++ && --timeout) + mdelay(1); + + if (!timeout) { +@@ -121,7 +121,7 @@ static void nuc900_ac97_write(struct snd_ac97 *ac97, unsigned short reg, + + /* polling the AC_W_FINISH */ + while ((AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_W_FINISH) +- && timeout--) ++ && --timeout) + mdelay(1); + + if (!timeout) +diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c +index a9a43acce30e..fefa6ad5de8b 100644 +--- a/sound/soc/sh/rcar/ssi.c ++++ b/sound/soc/sh/rcar/ssi.c +@@ -232,6 +232,15 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod, + */ + for (j = 0; j < ARRAY_SIZE(ssi_clk_mul_table); j++) { + ++ /* ++ * It will set SSIWSR.CONT here, but SSICR.CKDV = 000 ++ * with it is not allowed. (SSIWSR.WS_MODE with ++ * SSICR.CKDV = 000 is not allowed either). ++ * Skip it. See SSICR.CKDV ++ */ ++ if (j == 0) ++ continue; ++ + /* + * this driver is assuming that + * system word is 32bit x chan +diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c +index f87996b0cb29..9a250c71840e 100644 +--- a/tools/perf/builtin-probe.c ++++ b/tools/perf/builtin-probe.c +@@ -442,9 +442,9 @@ static int perf_del_probe_events(struct strfilter *filter) + } + + if (ret == -ENOENT && ret2 == -ENOENT) +- pr_debug("\"%s\" does not hit any event.\n", str); +- /* Note that this is silently ignored */ +- ret = 0; ++ pr_warning("\"%s\" does not hit any event.\n", str); ++ else ++ ret = 0; + + error: + if (kfd >= 0) +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c +index 688dea7cb08f..5b60ec669e73 100644 +--- a/tools/perf/builtin-stat.c ++++ b/tools/perf/builtin-stat.c +@@ -146,6 +146,7 @@ static aggr_get_id_t aggr_get_id; + static bool append_file; + static const char *output_name; + static int output_fd; ++static int print_free_counters_hint; + + struct perf_stat { + bool record; +@@ -310,8 +311,12 @@ static int read_counter(struct perf_evsel *counter) + struct perf_counts_values *count; + + count = perf_counts(counter->counts, cpu, thread); +- if (perf_evsel__read(counter, cpu, thread, count)) ++ if (perf_evsel__read(counter, cpu, thread, count)) { ++ counter->counts->scaled = -1; ++ perf_counts(counter->counts, cpu, thread)->ena = 0; ++ perf_counts(counter->counts, cpu, thread)->run = 0; + return -1; ++ } + + if (STAT_RECORD) { + if (perf_evsel__write_stat_event(counter, cpu, thread, count)) { +@@ -336,12 +341,14 @@ static int read_counter(struct perf_evsel *counter) + static void read_counters(void) + { + struct perf_evsel *counter; ++ int ret; + + evlist__for_each_entry(evsel_list, counter) { +- if (read_counter(counter)) ++ ret = read_counter(counter); ++ if (ret) + pr_debug("failed to read counter %s\n", counter->name); + +- if (perf_stat_process_counter(&stat_config, counter)) ++ if (ret == 0 && perf_stat_process_counter(&stat_config, counter)) + pr_warning("failed to process counter %s\n", counter->name); + } + } +@@ -1109,6 +1116,9 @@ static void printout(int id, int nr, struct perf_evsel *counter, double uval, + counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, + csv_sep); + ++ if (counter->supported) ++ print_free_counters_hint = 1; ++ + fprintf(stat_config.output, "%-*s%s", + csv_output ? 0 : unit_width, + counter->unit, csv_sep); +@@ -1477,6 +1487,13 @@ static void print_footer(void) + avg_stats(&walltime_nsecs_stats)); + } + fprintf(output, "\n\n"); ++ ++ if (print_free_counters_hint) ++ fprintf(output, ++"Some events weren't counted. Try disabling the NMI watchdog:\n" ++" echo 0 > /proc/sys/kernel/nmi_watchdog\n" ++" perf stat ...\n" ++" echo 1 > /proc/sys/kernel/nmi_watchdog\n"); + } + + static void print_counters(struct timespec *ts, int argc, const char **argv) +diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c +index 21f8a81797a0..4c596ba310cb 100644 +--- a/tools/perf/builtin-trace.c ++++ b/tools/perf/builtin-trace.c +@@ -822,12 +822,21 @@ struct syscall { + void **arg_parm; + }; + +-static size_t fprintf_duration(unsigned long t, FILE *fp) ++/* ++ * We need to have this 'calculated' boolean because in some cases we really ++ * don't know what is the duration of a syscall, for instance, when we start ++ * a session and some threads are waiting for a syscall to finish, say 'poll', ++ * in which case all we can do is to print "( ? ) for duration and for the ++ * start timestamp. ++ */ ++static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp) + { + double duration = (double)t / NSEC_PER_MSEC; + size_t printed = fprintf(fp, "("); + +- if (duration >= 1.0) ++ if (!calculated) ++ printed += fprintf(fp, " ? "); ++ else if (duration >= 1.0) + printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration); + else if (duration >= 0.01) + printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration); +@@ -1030,13 +1039,27 @@ static bool trace__filter_duration(struct trace *trace, double t) + return t < (trace->duration_filter * NSEC_PER_MSEC); + } + +-static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) ++static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) + { + double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; + + return fprintf(fp, "%10.3f ", ts); + } + ++/* ++ * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are ++ * using ttrace->entry_time for a thread that receives a sys_exit without ++ * first having received a sys_enter ("poll" issued before tracing session ++ * starts, lost sys_enter exit due to ring buffer overflow). ++ */ ++static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) ++{ ++ if (tstamp > 0) ++ return __trace__fprintf_tstamp(trace, tstamp, fp); ++ ++ return fprintf(fp, " ? "); ++} ++ + static bool done = false; + static bool interrupted = false; + +@@ -1047,10 +1070,10 @@ static void sig_handler(int sig) + } + + static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, +- u64 duration, u64 tstamp, FILE *fp) ++ u64 duration, bool duration_calculated, u64 tstamp, FILE *fp) + { + size_t printed = trace__fprintf_tstamp(trace, tstamp, fp); +- printed += fprintf_duration(duration, fp); ++ printed += fprintf_duration(duration, duration_calculated, fp); + + if (trace->multiple_threads) { + if (trace->show_comm) +@@ -1452,7 +1475,7 @@ static int trace__printf_interrupted_entry(struct trace *trace, struct perf_samp + + duration = sample->time - ttrace->entry_time; + +- printed = trace__fprintf_entry_head(trace, trace->current, duration, ttrace->entry_time, trace->output); ++ printed = trace__fprintf_entry_head(trace, trace->current, duration, true, ttrace->entry_time, trace->output); + printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str); + ttrace->entry_pending = false; + +@@ -1499,7 +1522,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel, + + if (sc->is_exit) { + if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) { +- trace__fprintf_entry_head(trace, thread, 1, ttrace->entry_time, trace->output); ++ trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output); + fprintf(trace->output, "%-70s)\n", ttrace->entry_str); + } + } else { +@@ -1547,6 +1570,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel, + { + long ret; + u64 duration = 0; ++ bool duration_calculated = false; + struct thread *thread; + int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0; + struct syscall *sc = trace__syscall_info(trace, evsel, id); +@@ -1577,6 +1601,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel, + duration = sample->time - ttrace->entry_time; + if (trace__filter_duration(trace, duration)) + goto out; ++ duration_calculated = true; + } else if (trace->duration_filter) + goto out; + +@@ -1592,7 +1617,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel, + if (trace->summary_only) + goto out; + +- trace__fprintf_entry_head(trace, thread, duration, ttrace->entry_time, trace->output); ++ trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output); + + if (ttrace->entry_pending) { + fprintf(trace->output, "%-70s", ttrace->entry_str); +@@ -1855,7 +1880,7 @@ static int trace__pgfault(struct trace *trace, + thread__find_addr_location(thread, sample->cpumode, MAP__FUNCTION, + sample->ip, &al); + +- trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output); ++ trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output); + + fprintf(trace->output, "%sfault [", + evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ? +diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c +index 430d039d0079..a38227eb5450 100644 +--- a/tools/perf/util/annotate.c ++++ b/tools/perf/util/annotate.c +@@ -1250,6 +1250,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil + { + char linkname[PATH_MAX]; + char *build_id_filename; ++ char *build_id_path = NULL; + + if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && + !dso__is_kcore(dso)) +@@ -1265,8 +1266,14 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil + goto fallback; + } + ++ build_id_path = strdup(filename); ++ if (!build_id_path) ++ return -1; ++ ++ dirname(build_id_path); ++ + if (dso__is_kcore(dso) || +- readlink(filename, linkname, sizeof(linkname)) < 0 || ++ readlink(build_id_path, linkname, sizeof(linkname)) < 0 || + strstr(linkname, DSO__NAME_KALLSYMS) || + access(filename, R_OK)) { + fallback: +@@ -1278,6 +1285,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil + __symbol__join_symfs(filename, filename_size, dso->long_name); + } + ++ free(build_id_path); + return 0; + } + +diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c +index e528c40739cc..993ef2762508 100644 +--- a/tools/perf/util/build-id.c ++++ b/tools/perf/util/build-id.c +@@ -182,13 +182,17 @@ char *build_id_cache__origname(const char *sbuild_id) + char buf[PATH_MAX]; + char *ret = NULL, *p; + size_t offs = 5; /* == strlen("../..") */ ++ ssize_t len; + + linkname = build_id_cache__linkname(sbuild_id, NULL, 0); + if (!linkname) + return NULL; + +- if (readlink(linkname, buf, PATH_MAX) < 0) ++ len = readlink(linkname, buf, sizeof(buf) - 1); ++ if (len <= 0) + goto out; ++ buf[len] = '\0'; ++ + /* The link should be "../../" */ + p = strrchr(buf, '/'); /* Cut off the "/" */ + if (p && (p > buf + offs)) { +diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c +index 8ab0d7da956b..663192395780 100644 +--- a/tools/perf/util/event.c ++++ b/tools/perf/util/event.c +@@ -255,8 +255,8 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, + if (machine__is_default_guest(machine)) + return 0; + +- snprintf(filename, sizeof(filename), "%s/proc/%d/maps", +- machine->root_dir, pid); ++ snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps", ++ machine->root_dir, pid, pid); + + fp = fopen(filename, "r"); + if (fp == NULL) { +diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c +index 8bc271141d9d..bce80f866dd0 100644 +--- a/tools/perf/util/evsel.c ++++ b/tools/perf/util/evsel.c +@@ -1221,7 +1221,7 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, + if (FD(evsel, cpu, thread) < 0) + return -EINVAL; + +- if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) < 0) ++ if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) <= 0) + return -errno; + + return 0; +@@ -1239,7 +1239,7 @@ int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, + if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0) + return -ENOMEM; + +- if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) ++ if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0) + return -errno; + + perf_evsel__compute_deltas(evsel, cpu, thread, &count); +@@ -2400,11 +2400,17 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, + int err, char *msg, size_t size) + { + char sbuf[STRERR_BUFSIZE]; ++ int printed = 0; + + switch (err) { + case EPERM: + case EACCES: +- return scnprintf(msg, size, ++ if (err == EPERM) ++ printed = scnprintf(msg, size, ++ "No permission to enable %s event.\n\n", ++ perf_evsel__name(evsel)); ++ ++ return scnprintf(msg + printed, size - printed, + "You may not have permission to collect %sstats.\n\n" + "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n" + "which controls use of the performance events system by\n" +diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c +index fe84df1875aa..e70e935b1841 100644 +--- a/tools/perf/util/ordered-events.c ++++ b/tools/perf/util/ordered-events.c +@@ -79,7 +79,7 @@ static union perf_event *dup_event(struct ordered_events *oe, + + static void free_dup_event(struct ordered_events *oe, union perf_event *event) + { +- if (oe->copy_on_queue) { ++ if (event && oe->copy_on_queue) { + oe->cur_alloc_size -= event->header.size; + free(event); + } +@@ -150,6 +150,7 @@ void ordered_events__delete(struct ordered_events *oe, struct ordered_event *eve + list_move(&event->list, &oe->cache); + oe->nr_events--; + free_dup_event(oe, event->event); ++ event->event = NULL; + } + + int ordered_events__queue(struct ordered_events *oe, union perf_event *event, +diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c +index 6a6f44dd594b..4d2e22f8bd94 100644 +--- a/tools/perf/util/probe-event.c ++++ b/tools/perf/util/probe-event.c +@@ -3060,7 +3060,7 @@ concat_probe_trace_events(struct probe_trace_event **tevs, int *ntevs, + struct probe_trace_event *new_tevs; + int ret = 0; + +- if (ntevs == 0) { ++ if (*ntevs == 0) { + *tevs = *tevs2; + *ntevs = ntevs2; + *tevs2 = NULL; +diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c +index 5d61242a6e64..7e0573e55a35 100644 +--- a/tools/perf/util/session.c ++++ b/tools/perf/util/session.c +@@ -139,8 +139,14 @@ struct perf_session *perf_session__new(struct perf_data_file *file, + if (perf_session__open(session) < 0) + goto out_close; + +- perf_session__set_id_hdr_size(session); +- perf_session__set_comm_exec(session); ++ /* ++ * set session attributes that are present in perf.data ++ * but not in pipe-mode. ++ */ ++ if (!file->is_pipe) { ++ perf_session__set_id_hdr_size(session); ++ perf_session__set_comm_exec(session); ++ } + } + } else { + session->machines.host.env = &perf_env; +@@ -155,7 +161,11 @@ struct perf_session *perf_session__new(struct perf_data_file *file, + pr_warning("Cannot read kernel map\n"); + } + +- if (tool && tool->ordering_requires_timestamps && ++ /* ++ * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is ++ * processed, so perf_evlist__sample_id_all is not meaningful here. ++ */ ++ if ((!file || !file->is_pipe) && tool && tool->ordering_requires_timestamps && + tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) { + dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); + tool->ordered_events = false; +@@ -1628,6 +1638,7 @@ static int __perf_session__process_pipe_events(struct perf_session *session) + buf = malloc(cur_size); + if (!buf) + return -errno; ++ ordered_events__set_copy_on_queue(oe, true); + more: + event = buf; + err = readn(fd, event, sizeof(struct perf_event_header)); +diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c +index 452e15a10dd2..031e64ce7156 100644 +--- a/tools/perf/util/sort.c ++++ b/tools/perf/util/sort.c +@@ -846,6 +846,9 @@ static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, + static int64_t + sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) + { ++ if (!left->branch_info || !right->branch_info) ++ return cmp_null(left->branch_info, right->branch_info); ++ + return left->branch_info->flags.cycles - + right->branch_info->flags.cycles; + } +@@ -853,6 +856,8 @@ sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) + static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) + { ++ if (!he->branch_info) ++ return scnprintf(bf, size, "%-.*s", width, "N/A"); + if (he->branch_info->flags.cycles == 0) + return repsep_snprintf(bf, size, "%-*s", width, "-"); + return repsep_snprintf(bf, size, "%-*hd", width, +diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh +index d8ac9ba67688..17e16fcaa0cc 100755 +--- a/tools/testing/selftests/firmware/fw_filesystem.sh ++++ b/tools/testing/selftests/firmware/fw_filesystem.sh +@@ -28,7 +28,10 @@ test_finish() + if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then + echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout + fi +- echo -n "$OLD_PATH" >/sys/module/firmware_class/parameters/path ++ if [ "$OLD_FWPATH" = "" ]; then ++ OLD_FWPATH=" " ++ fi ++ echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path + rm -f "$FW" + rmdir "$FWPATH" + } +diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh +index 3f81a1095206..50a6371b2b2e 100755 +--- a/tools/testing/selftests/rcutorture/bin/configinit.sh ++++ b/tools/testing/selftests/rcutorture/bin/configinit.sh +@@ -51,7 +51,7 @@ then + mkdir $builddir + fi + else +- echo Bad build directory: \"$builddir\" ++ echo Bad build directory: \"$buildloc\" + exit 2 + fi + fi +diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c +index d075ea0e5ca1..ade443a88421 100644 +--- a/tools/testing/selftests/x86/entry_from_vm86.c ++++ b/tools/testing/selftests/x86/entry_from_vm86.c +@@ -95,6 +95,31 @@ asm ( + "int3\n\t" + "vmcode_int80:\n\t" + "int $0x80\n\t" ++ "vmcode_popf_hlt:\n\t" ++ "push %ax\n\t" ++ "popf\n\t" ++ "hlt\n\t" ++ "vmcode_umip:\n\t" ++ /* addressing via displacements */ ++ "smsw (2052)\n\t" ++ "sidt (2054)\n\t" ++ "sgdt (2060)\n\t" ++ /* addressing via registers */ ++ "mov $2066, %bx\n\t" ++ "smsw (%bx)\n\t" ++ "mov $2068, %bx\n\t" ++ "sidt (%bx)\n\t" ++ "mov $2074, %bx\n\t" ++ "sgdt (%bx)\n\t" ++ /* register operands, only for smsw */ ++ "smsw %ax\n\t" ++ "mov %ax, (2080)\n\t" ++ "int3\n\t" ++ "vmcode_umip_str:\n\t" ++ "str %eax\n\t" ++ "vmcode_umip_sldt:\n\t" ++ "sldt %eax\n\t" ++ "int3\n\t" + ".size vmcode, . - vmcode\n\t" + "end_vmcode:\n\t" + ".code32\n\t" +@@ -103,7 +128,8 @@ asm ( + + extern unsigned char vmcode[], end_vmcode[]; + extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[], +- vmcode_sti[], vmcode_int3[], vmcode_int80[]; ++ vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_popf_hlt[], ++ vmcode_umip[], vmcode_umip_str[], vmcode_umip_sldt[]; + + /* Returns false if the test was skipped. */ + static bool do_test(struct vm86plus_struct *v86, unsigned long eip, +@@ -153,13 +179,75 @@ static bool do_test(struct vm86plus_struct *v86, unsigned long eip, + (VM86_TYPE(ret) == rettype && VM86_ARG(ret) == retarg)) { + printf("[OK]\tReturned correctly\n"); + } else { +- printf("[FAIL]\tIncorrect return reason\n"); ++ printf("[FAIL]\tIncorrect return reason (started at eip = 0x%lx, ended at eip = 0x%lx)\n", eip, v86->regs.eip); + nerrs++; + } + + return true; + } + ++void do_umip_tests(struct vm86plus_struct *vm86, unsigned char *test_mem) ++{ ++ struct table_desc { ++ unsigned short limit; ++ unsigned long base; ++ } __attribute__((packed)); ++ ++ /* Initialize variables with arbitrary values */ ++ struct table_desc gdt1 = { .base = 0x3c3c3c3c, .limit = 0x9999 }; ++ struct table_desc gdt2 = { .base = 0x1a1a1a1a, .limit = 0xaeae }; ++ struct table_desc idt1 = { .base = 0x7b7b7b7b, .limit = 0xf1f1 }; ++ struct table_desc idt2 = { .base = 0x89898989, .limit = 0x1313 }; ++ unsigned short msw1 = 0x1414, msw2 = 0x2525, msw3 = 3737; ++ ++ /* UMIP -- exit with INT3 unless kernel emulation did not trap #GP */ ++ do_test(vm86, vmcode_umip - vmcode, VM86_TRAP, 3, "UMIP tests"); ++ ++ /* Results from displacement-only addressing */ ++ msw1 = *(unsigned short *)(test_mem + 2052); ++ memcpy(&idt1, test_mem + 2054, sizeof(idt1)); ++ memcpy(&gdt1, test_mem + 2060, sizeof(gdt1)); ++ ++ /* Results from register-indirect addressing */ ++ msw2 = *(unsigned short *)(test_mem + 2066); ++ memcpy(&idt2, test_mem + 2068, sizeof(idt2)); ++ memcpy(&gdt2, test_mem + 2074, sizeof(gdt2)); ++ ++ /* Results when using register operands */ ++ msw3 = *(unsigned short *)(test_mem + 2080); ++ ++ printf("[INFO]\tResult from SMSW:[0x%04x]\n", msw1); ++ printf("[INFO]\tResult from SIDT: limit[0x%04x]base[0x%08lx]\n", ++ idt1.limit, idt1.base); ++ printf("[INFO]\tResult from SGDT: limit[0x%04x]base[0x%08lx]\n", ++ gdt1.limit, gdt1.base); ++ ++ if (msw1 != msw2 || msw1 != msw3) ++ printf("[FAIL]\tAll the results of SMSW should be the same.\n"); ++ else ++ printf("[PASS]\tAll the results from SMSW are identical.\n"); ++ ++ if (memcmp(&gdt1, &gdt2, sizeof(gdt1))) ++ printf("[FAIL]\tAll the results of SGDT should be the same.\n"); ++ else ++ printf("[PASS]\tAll the results from SGDT are identical.\n"); ++ ++ if (memcmp(&idt1, &idt2, sizeof(idt1))) ++ printf("[FAIL]\tAll the results of SIDT should be the same.\n"); ++ else ++ printf("[PASS]\tAll the results from SIDT are identical.\n"); ++ ++ sethandler(SIGILL, sighandler, 0); ++ do_test(vm86, vmcode_umip_str - vmcode, VM86_SIGNAL, 0, ++ "STR instruction"); ++ clearhandler(SIGILL); ++ ++ sethandler(SIGILL, sighandler, 0); ++ do_test(vm86, vmcode_umip_sldt - vmcode, VM86_SIGNAL, 0, ++ "SLDT instruction"); ++ clearhandler(SIGILL); ++} ++ + int main(void) + { + struct vm86plus_struct v86; +@@ -180,6 +268,9 @@ int main(void) + v86.regs.ds = load_addr / 16; + v86.regs.es = load_addr / 16; + ++ /* Use the end of the page as our stack. */ ++ v86.regs.esp = 4096; ++ + assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */ + + /* #BR -- should deliver SIG??? */ +@@ -211,6 +302,23 @@ int main(void) + v86.regs.eflags &= ~X86_EFLAGS_IF; + do_test(&v86, vmcode_sti - vmcode, VM86_STI, 0, "STI with VIP set"); + ++ /* POPF with VIP set but IF clear: should not trap */ ++ v86.regs.eflags = X86_EFLAGS_VIP; ++ v86.regs.eax = 0; ++ do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP set and IF clear"); ++ ++ /* POPF with VIP set and IF set: should trap */ ++ v86.regs.eflags = X86_EFLAGS_VIP; ++ v86.regs.eax = X86_EFLAGS_IF; ++ do_test(&v86, vmcode_popf_hlt - vmcode, VM86_STI, 0, "POPF with VIP and IF set"); ++ ++ /* POPF with VIP clear and IF set: should not trap */ ++ v86.regs.eflags = 0; ++ v86.regs.eax = X86_EFLAGS_IF; ++ do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP clear and IF set"); ++ ++ v86.regs.eflags = 0; ++ + /* INT3 -- should cause #BP */ + do_test(&v86, vmcode_int3 - vmcode, VM86_TRAP, 3, "INT3"); + +@@ -218,6 +326,9 @@ int main(void) + v86.regs.eax = (unsigned int)-1; + do_test(&v86, vmcode_int80 - vmcode, VM86_INTx, 0x80, "int80"); + ++ /* UMIP -- should exit with INTx 0x80 unless UMIP was not disabled */ ++ do_umip_tests(&v86, addr); ++ + /* Execute a null pointer */ + v86.regs.cs = 0; + v86.regs.ss = 0; +@@ -231,7 +342,7 @@ int main(void) + clearhandler(SIGSEGV); + + /* Make sure nothing explodes if we fork. */ +- if (fork() > 0) ++ if (fork() == 0) + return 0; + + return (nerrs == 0 ? 0 : 1); +diff --git a/tools/usb/usbip/src/usbipd.c b/tools/usb/usbip/src/usbipd.c +index a0972dea9e6c..193556507957 100644 +--- a/tools/usb/usbip/src/usbipd.c ++++ b/tools/usb/usbip/src/usbipd.c +@@ -463,7 +463,7 @@ static void set_signal(void) + sigaction(SIGTERM, &act, NULL); + sigaction(SIGINT, &act, NULL); + act.sa_handler = SIG_IGN; +- sigaction(SIGCLD, &act, NULL); ++ sigaction(SIGCHLD, &act, NULL); + } + + static const char *pid_file;