From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Sun, 21 Jul 2019 14:38:10 +0000 (UTC) [thread overview]
Message-ID: <1563719872.b0626d2a9563fcf1aa3766ccee4a3f29ba21e120.mpagano@gentoo> (raw)
commit: b0626d2a9563fcf1aa3766ccee4a3f29ba21e120
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jul 21 14:37:52 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jul 21 14:37:52 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b0626d2a
Linux patch 4.9.186
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1185_linux-4.9.186.patch | 5559 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 5563 insertions(+)
diff --git a/0000_README b/0000_README
index d75882a..e61465c 100644
--- a/0000_README
+++ b/0000_README
@@ -783,6 +783,10 @@ Patch: 1184_linux-4.9.185.patch
From: http://www.kernel.org
Desc: Linux 4.9.185
+Patch: 1185_linux-4.9.186.patch
+From: http://www.kernel.org
+Desc: Linux 4.9.186
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1185_linux-4.9.186.patch b/1185_linux-4.9.186.patch
new file mode 100644
index 0000000..ec0c25d
--- /dev/null
+++ b/1185_linux-4.9.186.patch
@@ -0,0 +1,5559 @@
+diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
+index ee3723beb701..33b38716b77f 100644
+--- a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
++++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
+@@ -4,6 +4,7 @@ Required properties:
+ - compatible: Should be one of the following:
+ - "microchip,mcp2510" for MCP2510.
+ - "microchip,mcp2515" for MCP2515.
++ - "microchip,mcp25625" for MCP25625.
+ - reg: SPI chip select.
+ - clocks: The clock feeding the CAN controller.
+ - interrupt-parent: The parent interrupt controller.
+diff --git a/Makefile b/Makefile
+index c80dad45334e..1ab22a85118f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 185
++SUBLEVEL = 186
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
+index 61fd1ce63c56..6bb9f8ea9291 100644
+--- a/arch/arc/kernel/unwind.c
++++ b/arch/arc/kernel/unwind.c
+@@ -185,11 +185,6 @@ static void *__init unw_hdr_alloc_early(unsigned long sz)
+ MAX_DMA_ADDRESS);
+ }
+
+-static void *unw_hdr_alloc(unsigned long sz)
+-{
+- return kmalloc(sz, GFP_KERNEL);
+-}
+-
+ static void init_unwind_table(struct unwind_table *table, const char *name,
+ const void *core_start, unsigned long core_size,
+ const void *init_start, unsigned long init_size,
+@@ -370,6 +365,10 @@ ret_err:
+ }
+
+ #ifdef CONFIG_MODULES
++static void *unw_hdr_alloc(unsigned long sz)
++{
++ return kmalloc(sz, GFP_KERNEL);
++}
+
+ static struct unwind_table *last_table;
+
+diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
+index 7839300fe46b..200d9082caa4 100644
+--- a/arch/arm/boot/dts/imx6ul.dtsi
++++ b/arch/arm/boot/dts/imx6ul.dtsi
+@@ -332,7 +332,7 @@
+ pwm1: pwm@02080000 {
+ compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
+ reg = <0x02080000 0x4000>;
+- interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6UL_CLK_PWM1>,
+ <&clks IMX6UL_CLK_PWM1>;
+ clock-names = "ipg", "per";
+@@ -343,7 +343,7 @@
+ pwm2: pwm@02084000 {
+ compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
+ reg = <0x02084000 0x4000>;
+- interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6UL_CLK_PWM2>,
+ <&clks IMX6UL_CLK_PWM2>;
+ clock-names = "ipg", "per";
+@@ -354,7 +354,7 @@
+ pwm3: pwm@02088000 {
+ compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
+ reg = <0x02088000 0x4000>;
+- interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6UL_CLK_PWM3>,
+ <&clks IMX6UL_CLK_PWM3>;
+ clock-names = "ipg", "per";
+@@ -365,7 +365,7 @@
+ pwm4: pwm@0208c000 {
+ compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
+ reg = <0x0208c000 0x4000>;
+- interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6UL_CLK_PWM4>,
+ <&clks IMX6UL_CLK_PWM4>;
+ clock-names = "ipg", "per";
+diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
+index 8e4539f69fdc..3bdf0d588238 100644
+--- a/arch/arm/mach-davinci/board-da850-evm.c
++++ b/arch/arm/mach-davinci/board-da850-evm.c
+@@ -1479,6 +1479,8 @@ static __init void da850_evm_init(void)
+ if (ret)
+ pr_warn("%s: dsp/rproc registration failed: %d\n",
+ __func__, ret);
++
++ regulator_has_full_constraints();
+ }
+
+ #ifdef CONFIG_SERIAL_8250_CONSOLE
+diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
+index 9a22d40602aa..24779504f489 100644
+--- a/arch/arm/mach-davinci/devices-da8xx.c
++++ b/arch/arm/mach-davinci/devices-da8xx.c
+@@ -706,6 +706,9 @@ static struct platform_device da8xx_lcdc_device = {
+ .id = 0,
+ .num_resources = ARRAY_SIZE(da8xx_lcdc_resources),
+ .resource = da8xx_lcdc_resources,
++ .dev = {
++ .coherent_dma_mask = DMA_BIT_MASK(32),
++ }
+ };
+
+ int __init da8xx_register_lcdc(struct da8xx_lcdc_platform_data *pdata)
+diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c
+index 718981bb80cd..0aec48c1736b 100644
+--- a/arch/arm/mach-omap2/prm3xxx.c
++++ b/arch/arm/mach-omap2/prm3xxx.c
+@@ -433,7 +433,7 @@ static void omap3_prm_reconfigure_io_chain(void)
+ * registers, and omap3xxx_prm_reconfigure_io_chain() must be called.
+ * No return value.
+ */
+-static void __init omap3xxx_prm_enable_io_wakeup(void)
++static void omap3xxx_prm_enable_io_wakeup(void)
+ {
+ if (prm_features & PRM_HAS_IO_WAKEUP)
+ omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
+diff --git a/arch/arm64/crypto/sha256-core.S b/arch/arm64/crypto/sha256-core.S
+deleted file mode 100644
+index 3ce82cc860bc..000000000000
+--- a/arch/arm64/crypto/sha256-core.S
++++ /dev/null
+@@ -1,2061 +0,0 @@
+-// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+-//
+-// Licensed under the OpenSSL license (the "License"). You may not use
+-// this file except in compliance with the License. You can obtain a copy
+-// in the file LICENSE in the source distribution or at
+-// https://www.openssl.org/source/license.html
+-
+-// ====================================================================
+-// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+-// project. The module is, however, dual licensed under OpenSSL and
+-// CRYPTOGAMS licenses depending on where you obtain it. For further
+-// details see http://www.openssl.org/~appro/cryptogams/.
+-//
+-// Permission to use under GPLv2 terms is granted.
+-// ====================================================================
+-//
+-// SHA256/512 for ARMv8.
+-//
+-// Performance in cycles per processed byte and improvement coefficient
+-// over code generated with "default" compiler:
+-//
+-// SHA256-hw SHA256(*) SHA512
+-// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
+-// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
+-// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
+-// Denver 2.01 10.5 (+26%) 6.70 (+8%)
+-// X-Gene 20.0 (+100%) 12.8 (+300%(***))
+-// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
+-//
+-// (*) Software SHA256 results are of lesser relevance, presented
+-// mostly for informational purposes.
+-// (**) The result is a trade-off: it's possible to improve it by
+-// 10% (or by 1 cycle per round), but at the cost of 20% loss
+-// on Cortex-A53 (or by 4 cycles per round).
+-// (***) Super-impressive coefficients over gcc-generated code are
+-// indication of some compiler "pathology", most notably code
+-// generated with -mgeneral-regs-only is significanty faster
+-// and the gap is only 40-90%.
+-//
+-// October 2016.
+-//
+-// Originally it was reckoned that it makes no sense to implement NEON
+-// version of SHA256 for 64-bit processors. This is because performance
+-// improvement on most wide-spread Cortex-A5x processors was observed
+-// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
+-// observed that 32-bit NEON SHA256 performs significantly better than
+-// 64-bit scalar version on *some* of the more recent processors. As
+-// result 64-bit NEON version of SHA256 was added to provide best
+-// all-round performance. For example it executes ~30% faster on X-Gene
+-// and Mongoose. [For reference, NEON version of SHA512 is bound to
+-// deliver much less improvement, likely *negative* on Cortex-A5x.
+-// Which is why NEON support is limited to SHA256.]
+-
+-#ifndef __KERNEL__
+-# include "arm_arch.h"
+-#endif
+-
+-.text
+-
+-.extern OPENSSL_armcap_P
+-.globl sha256_block_data_order
+-.type sha256_block_data_order,%function
+-.align 6
+-sha256_block_data_order:
+-#ifndef __KERNEL__
+-# ifdef __ILP32__
+- ldrsw x16,.LOPENSSL_armcap_P
+-# else
+- ldr x16,.LOPENSSL_armcap_P
+-# endif
+- adr x17,.LOPENSSL_armcap_P
+- add x16,x16,x17
+- ldr w16,[x16]
+- tst w16,#ARMV8_SHA256
+- b.ne .Lv8_entry
+- tst w16,#ARMV7_NEON
+- b.ne .Lneon_entry
+-#endif
+- stp x29,x30,[sp,#-128]!
+- add x29,sp,#0
+-
+- stp x19,x20,[sp,#16]
+- stp x21,x22,[sp,#32]
+- stp x23,x24,[sp,#48]
+- stp x25,x26,[sp,#64]
+- stp x27,x28,[sp,#80]
+- sub sp,sp,#4*4
+-
+- ldp w20,w21,[x0] // load context
+- ldp w22,w23,[x0,#2*4]
+- ldp w24,w25,[x0,#4*4]
+- add x2,x1,x2,lsl#6 // end of input
+- ldp w26,w27,[x0,#6*4]
+- adr x30,.LK256
+- stp x0,x2,[x29,#96]
+-
+-.Loop:
+- ldp w3,w4,[x1],#2*4
+- ldr w19,[x30],#4 // *K++
+- eor w28,w21,w22 // magic seed
+- str x1,[x29,#112]
+-#ifndef __AARCH64EB__
+- rev w3,w3 // 0
+-#endif
+- ror w16,w24,#6
+- add w27,w27,w19 // h+=K[i]
+- eor w6,w24,w24,ror#14
+- and w17,w25,w24
+- bic w19,w26,w24
+- add w27,w27,w3 // h+=X[i]
+- orr w17,w17,w19 // Ch(e,f,g)
+- eor w19,w20,w21 // a^b, b^c in next round
+- eor w16,w16,w6,ror#11 // Sigma1(e)
+- ror w6,w20,#2
+- add w27,w27,w17 // h+=Ch(e,f,g)
+- eor w17,w20,w20,ror#9
+- add w27,w27,w16 // h+=Sigma1(e)
+- and w28,w28,w19 // (b^c)&=(a^b)
+- add w23,w23,w27 // d+=h
+- eor w28,w28,w21 // Maj(a,b,c)
+- eor w17,w6,w17,ror#13 // Sigma0(a)
+- add w27,w27,w28 // h+=Maj(a,b,c)
+- ldr w28,[x30],#4 // *K++, w19 in next round
+- //add w27,w27,w17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev w4,w4 // 1
+-#endif
+- ldp w5,w6,[x1],#2*4
+- add w27,w27,w17 // h+=Sigma0(a)
+- ror w16,w23,#6
+- add w26,w26,w28 // h+=K[i]
+- eor w7,w23,w23,ror#14
+- and w17,w24,w23
+- bic w28,w25,w23
+- add w26,w26,w4 // h+=X[i]
+- orr w17,w17,w28 // Ch(e,f,g)
+- eor w28,w27,w20 // a^b, b^c in next round
+- eor w16,w16,w7,ror#11 // Sigma1(e)
+- ror w7,w27,#2
+- add w26,w26,w17 // h+=Ch(e,f,g)
+- eor w17,w27,w27,ror#9
+- add w26,w26,w16 // h+=Sigma1(e)
+- and w19,w19,w28 // (b^c)&=(a^b)
+- add w22,w22,w26 // d+=h
+- eor w19,w19,w20 // Maj(a,b,c)
+- eor w17,w7,w17,ror#13 // Sigma0(a)
+- add w26,w26,w19 // h+=Maj(a,b,c)
+- ldr w19,[x30],#4 // *K++, w28 in next round
+- //add w26,w26,w17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev w5,w5 // 2
+-#endif
+- add w26,w26,w17 // h+=Sigma0(a)
+- ror w16,w22,#6
+- add w25,w25,w19 // h+=K[i]
+- eor w8,w22,w22,ror#14
+- and w17,w23,w22
+- bic w19,w24,w22
+- add w25,w25,w5 // h+=X[i]
+- orr w17,w17,w19 // Ch(e,f,g)
+- eor w19,w26,w27 // a^b, b^c in next round
+- eor w16,w16,w8,ror#11 // Sigma1(e)
+- ror w8,w26,#2
+- add w25,w25,w17 // h+=Ch(e,f,g)
+- eor w17,w26,w26,ror#9
+- add w25,w25,w16 // h+=Sigma1(e)
+- and w28,w28,w19 // (b^c)&=(a^b)
+- add w21,w21,w25 // d+=h
+- eor w28,w28,w27 // Maj(a,b,c)
+- eor w17,w8,w17,ror#13 // Sigma0(a)
+- add w25,w25,w28 // h+=Maj(a,b,c)
+- ldr w28,[x30],#4 // *K++, w19 in next round
+- //add w25,w25,w17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev w6,w6 // 3
+-#endif
+- ldp w7,w8,[x1],#2*4
+- add w25,w25,w17 // h+=Sigma0(a)
+- ror w16,w21,#6
+- add w24,w24,w28 // h+=K[i]
+- eor w9,w21,w21,ror#14
+- and w17,w22,w21
+- bic w28,w23,w21
+- add w24,w24,w6 // h+=X[i]
+- orr w17,w17,w28 // Ch(e,f,g)
+- eor w28,w25,w26 // a^b, b^c in next round
+- eor w16,w16,w9,ror#11 // Sigma1(e)
+- ror w9,w25,#2
+- add w24,w24,w17 // h+=Ch(e,f,g)
+- eor w17,w25,w25,ror#9
+- add w24,w24,w16 // h+=Sigma1(e)
+- and w19,w19,w28 // (b^c)&=(a^b)
+- add w20,w20,w24 // d+=h
+- eor w19,w19,w26 // Maj(a,b,c)
+- eor w17,w9,w17,ror#13 // Sigma0(a)
+- add w24,w24,w19 // h+=Maj(a,b,c)
+- ldr w19,[x30],#4 // *K++, w28 in next round
+- //add w24,w24,w17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev w7,w7 // 4
+-#endif
+- add w24,w24,w17 // h+=Sigma0(a)
+- ror w16,w20,#6
+- add w23,w23,w19 // h+=K[i]
+- eor w10,w20,w20,ror#14
+- and w17,w21,w20
+- bic w19,w22,w20
+- add w23,w23,w7 // h+=X[i]
+- orr w17,w17,w19 // Ch(e,f,g)
+- eor w19,w24,w25 // a^b, b^c in next round
+- eor w16,w16,w10,ror#11 // Sigma1(e)
+- ror w10,w24,#2
+- add w23,w23,w17 // h+=Ch(e,f,g)
+- eor w17,w24,w24,ror#9
+- add w23,w23,w16 // h+=Sigma1(e)
+- and w28,w28,w19 // (b^c)&=(a^b)
+- add w27,w27,w23 // d+=h
+- eor w28,w28,w25 // Maj(a,b,c)
+- eor w17,w10,w17,ror#13 // Sigma0(a)
+- add w23,w23,w28 // h+=Maj(a,b,c)
+- ldr w28,[x30],#4 // *K++, w19 in next round
+- //add w23,w23,w17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev w8,w8 // 5
+-#endif
+- ldp w9,w10,[x1],#2*4
+- add w23,w23,w17 // h+=Sigma0(a)
+- ror w16,w27,#6
+- add w22,w22,w28 // h+=K[i]
+- eor w11,w27,w27,ror#14
+- and w17,w20,w27
+- bic w28,w21,w27
+- add w22,w22,w8 // h+=X[i]
+- orr w17,w17,w28 // Ch(e,f,g)
+- eor w28,w23,w24 // a^b, b^c in next round
+- eor w16,w16,w11,ror#11 // Sigma1(e)
+- ror w11,w23,#2
+- add w22,w22,w17 // h+=Ch(e,f,g)
+- eor w17,w23,w23,ror#9
+- add w22,w22,w16 // h+=Sigma1(e)
+- and w19,w19,w28 // (b^c)&=(a^b)
+- add w26,w26,w22 // d+=h
+- eor w19,w19,w24 // Maj(a,b,c)
+- eor w17,w11,w17,ror#13 // Sigma0(a)
+- add w22,w22,w19 // h+=Maj(a,b,c)
+- ldr w19,[x30],#4 // *K++, w28 in next round
+- //add w22,w22,w17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev w9,w9 // 6
+-#endif
+- add w22,w22,w17 // h+=Sigma0(a)
+- ror w16,w26,#6
+- add w21,w21,w19 // h+=K[i]
+- eor w12,w26,w26,ror#14
+- and w17,w27,w26
+- bic w19,w20,w26
+- add w21,w21,w9 // h+=X[i]
+- orr w17,w17,w19 // Ch(e,f,g)
+- eor w19,w22,w23 // a^b, b^c in next round
+- eor w16,w16,w12,ror#11 // Sigma1(e)
+- ror w12,w22,#2
+- add w21,w21,w17 // h+=Ch(e,f,g)
+- eor w17,w22,w22,ror#9
+- add w21,w21,w16 // h+=Sigma1(e)
+- and w28,w28,w19 // (b^c)&=(a^b)
+- add w25,w25,w21 // d+=h
+- eor w28,w28,w23 // Maj(a,b,c)
+- eor w17,w12,w17,ror#13 // Sigma0(a)
+- add w21,w21,w28 // h+=Maj(a,b,c)
+- ldr w28,[x30],#4 // *K++, w19 in next round
+- //add w21,w21,w17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev w10,w10 // 7
+-#endif
+- ldp w11,w12,[x1],#2*4
+- add w21,w21,w17 // h+=Sigma0(a)
+- ror w16,w25,#6
+- add w20,w20,w28 // h+=K[i]
+- eor w13,w25,w25,ror#14
+- and w17,w26,w25
+- bic w28,w27,w25
+- add w20,w20,w10 // h+=X[i]
+- orr w17,w17,w28 // Ch(e,f,g)
+- eor w28,w21,w22 // a^b, b^c in next round
+- eor w16,w16,w13,ror#11 // Sigma1(e)
+- ror w13,w21,#2
+- add w20,w20,w17 // h+=Ch(e,f,g)
+- eor w17,w21,w21,ror#9
+- add w20,w20,w16 // h+=Sigma1(e)
+- and w19,w19,w28 // (b^c)&=(a^b)
+- add w24,w24,w20 // d+=h
+- eor w19,w19,w22 // Maj(a,b,c)
+- eor w17,w13,w17,ror#13 // Sigma0(a)
+- add w20,w20,w19 // h+=Maj(a,b,c)
+- ldr w19,[x30],#4 // *K++, w28 in next round
+- //add w20,w20,w17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev w11,w11 // 8
+-#endif
+- add w20,w20,w17 // h+=Sigma0(a)
+- ror w16,w24,#6
+- add w27,w27,w19 // h+=K[i]
+- eor w14,w24,w24,ror#14
+- and w17,w25,w24
+- bic w19,w26,w24
+- add w27,w27,w11 // h+=X[i]
+- orr w17,w17,w19 // Ch(e,f,g)
+- eor w19,w20,w21 // a^b, b^c in next round
+- eor w16,w16,w14,ror#11 // Sigma1(e)
+- ror w14,w20,#2
+- add w27,w27,w17 // h+=Ch(e,f,g)
+- eor w17,w20,w20,ror#9
+- add w27,w27,w16 // h+=Sigma1(e)
+- and w28,w28,w19 // (b^c)&=(a^b)
+- add w23,w23,w27 // d+=h
+- eor w28,w28,w21 // Maj(a,b,c)
+- eor w17,w14,w17,ror#13 // Sigma0(a)
+- add w27,w27,w28 // h+=Maj(a,b,c)
+- ldr w28,[x30],#4 // *K++, w19 in next round
+- //add w27,w27,w17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev w12,w12 // 9
+-#endif
+- ldp w13,w14,[x1],#2*4
+- add w27,w27,w17 // h+=Sigma0(a)
+- ror w16,w23,#6
+- add w26,w26,w28 // h+=K[i]
+- eor w15,w23,w23,ror#14
+- and w17,w24,w23
+- bic w28,w25,w23
+- add w26,w26,w12 // h+=X[i]
+- orr w17,w17,w28 // Ch(e,f,g)
+- eor w28,w27,w20 // a^b, b^c in next round
+- eor w16,w16,w15,ror#11 // Sigma1(e)
+- ror w15,w27,#2
+- add w26,w26,w17 // h+=Ch(e,f,g)
+- eor w17,w27,w27,ror#9
+- add w26,w26,w16 // h+=Sigma1(e)
+- and w19,w19,w28 // (b^c)&=(a^b)
+- add w22,w22,w26 // d+=h
+- eor w19,w19,w20 // Maj(a,b,c)
+- eor w17,w15,w17,ror#13 // Sigma0(a)
+- add w26,w26,w19 // h+=Maj(a,b,c)
+- ldr w19,[x30],#4 // *K++, w28 in next round
+- //add w26,w26,w17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev w13,w13 // 10
+-#endif
+- add w26,w26,w17 // h+=Sigma0(a)
+- ror w16,w22,#6
+- add w25,w25,w19 // h+=K[i]
+- eor w0,w22,w22,ror#14
+- and w17,w23,w22
+- bic w19,w24,w22
+- add w25,w25,w13 // h+=X[i]
+- orr w17,w17,w19 // Ch(e,f,g)
+- eor w19,w26,w27 // a^b, b^c in next round
+- eor w16,w16,w0,ror#11 // Sigma1(e)
+- ror w0,w26,#2
+- add w25,w25,w17 // h+=Ch(e,f,g)
+- eor w17,w26,w26,ror#9
+- add w25,w25,w16 // h+=Sigma1(e)
+- and w28,w28,w19 // (b^c)&=(a^b)
+- add w21,w21,w25 // d+=h
+- eor w28,w28,w27 // Maj(a,b,c)
+- eor w17,w0,w17,ror#13 // Sigma0(a)
+- add w25,w25,w28 // h+=Maj(a,b,c)
+- ldr w28,[x30],#4 // *K++, w19 in next round
+- //add w25,w25,w17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev w14,w14 // 11
+-#endif
+- ldp w15,w0,[x1],#2*4
+- add w25,w25,w17 // h+=Sigma0(a)
+- str w6,[sp,#12]
+- ror w16,w21,#6
+- add w24,w24,w28 // h+=K[i]
+- eor w6,w21,w21,ror#14
+- and w17,w22,w21
+- bic w28,w23,w21
+- add w24,w24,w14 // h+=X[i]
+- orr w17,w17,w28 // Ch(e,f,g)
+- eor w28,w25,w26 // a^b, b^c in next round
+- eor w16,w16,w6,ror#11 // Sigma1(e)
+- ror w6,w25,#2
+- add w24,w24,w17 // h+=Ch(e,f,g)
+- eor w17,w25,w25,ror#9
+- add w24,w24,w16 // h+=Sigma1(e)
+- and w19,w19,w28 // (b^c)&=(a^b)
+- add w20,w20,w24 // d+=h
+- eor w19,w19,w26 // Maj(a,b,c)
+- eor w17,w6,w17,ror#13 // Sigma0(a)
+- add w24,w24,w19 // h+=Maj(a,b,c)
+- ldr w19,[x30],#4 // *K++, w28 in next round
+- //add w24,w24,w17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev w15,w15 // 12
+-#endif
+- add w24,w24,w17 // h+=Sigma0(a)
+- str w7,[sp,#0]
+- ror w16,w20,#6
+- add w23,w23,w19 // h+=K[i]
+- eor w7,w20,w20,ror#14
+- and w17,w21,w20
+- bic w19,w22,w20
+- add w23,w23,w15 // h+=X[i]
+- orr w17,w17,w19 // Ch(e,f,g)
+- eor w19,w24,w25 // a^b, b^c in next round
+- eor w16,w16,w7,ror#11 // Sigma1(e)
+- ror w7,w24,#2
+- add w23,w23,w17 // h+=Ch(e,f,g)
+- eor w17,w24,w24,ror#9
+- add w23,w23,w16 // h+=Sigma1(e)
+- and w28,w28,w19 // (b^c)&=(a^b)
+- add w27,w27,w23 // d+=h
+- eor w28,w28,w25 // Maj(a,b,c)
+- eor w17,w7,w17,ror#13 // Sigma0(a)
+- add w23,w23,w28 // h+=Maj(a,b,c)
+- ldr w28,[x30],#4 // *K++, w19 in next round
+- //add w23,w23,w17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev w0,w0 // 13
+-#endif
+- ldp w1,w2,[x1]
+- add w23,w23,w17 // h+=Sigma0(a)
+- str w8,[sp,#4]
+- ror w16,w27,#6
+- add w22,w22,w28 // h+=K[i]
+- eor w8,w27,w27,ror#14
+- and w17,w20,w27
+- bic w28,w21,w27
+- add w22,w22,w0 // h+=X[i]
+- orr w17,w17,w28 // Ch(e,f,g)
+- eor w28,w23,w24 // a^b, b^c in next round
+- eor w16,w16,w8,ror#11 // Sigma1(e)
+- ror w8,w23,#2
+- add w22,w22,w17 // h+=Ch(e,f,g)
+- eor w17,w23,w23,ror#9
+- add w22,w22,w16 // h+=Sigma1(e)
+- and w19,w19,w28 // (b^c)&=(a^b)
+- add w26,w26,w22 // d+=h
+- eor w19,w19,w24 // Maj(a,b,c)
+- eor w17,w8,w17,ror#13 // Sigma0(a)
+- add w22,w22,w19 // h+=Maj(a,b,c)
+- ldr w19,[x30],#4 // *K++, w28 in next round
+- //add w22,w22,w17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev w1,w1 // 14
+-#endif
+- ldr w6,[sp,#12]
+- add w22,w22,w17 // h+=Sigma0(a)
+- str w9,[sp,#8]
+- ror w16,w26,#6
+- add w21,w21,w19 // h+=K[i]
+- eor w9,w26,w26,ror#14
+- and w17,w27,w26
+- bic w19,w20,w26
+- add w21,w21,w1 // h+=X[i]
+- orr w17,w17,w19 // Ch(e,f,g)
+- eor w19,w22,w23 // a^b, b^c in next round
+- eor w16,w16,w9,ror#11 // Sigma1(e)
+- ror w9,w22,#2
+- add w21,w21,w17 // h+=Ch(e,f,g)
+- eor w17,w22,w22,ror#9
+- add w21,w21,w16 // h+=Sigma1(e)
+- and w28,w28,w19 // (b^c)&=(a^b)
+- add w25,w25,w21 // d+=h
+- eor w28,w28,w23 // Maj(a,b,c)
+- eor w17,w9,w17,ror#13 // Sigma0(a)
+- add w21,w21,w28 // h+=Maj(a,b,c)
+- ldr w28,[x30],#4 // *K++, w19 in next round
+- //add w21,w21,w17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev w2,w2 // 15
+-#endif
+- ldr w7,[sp,#0]
+- add w21,w21,w17 // h+=Sigma0(a)
+- str w10,[sp,#12]
+- ror w16,w25,#6
+- add w20,w20,w28 // h+=K[i]
+- ror w9,w4,#7
+- and w17,w26,w25
+- ror w8,w1,#17
+- bic w28,w27,w25
+- ror w10,w21,#2
+- add w20,w20,w2 // h+=X[i]
+- eor w16,w16,w25,ror#11
+- eor w9,w9,w4,ror#18
+- orr w17,w17,w28 // Ch(e,f,g)
+- eor w28,w21,w22 // a^b, b^c in next round
+- eor w16,w16,w25,ror#25 // Sigma1(e)
+- eor w10,w10,w21,ror#13
+- add w20,w20,w17 // h+=Ch(e,f,g)
+- and w19,w19,w28 // (b^c)&=(a^b)
+- eor w8,w8,w1,ror#19
+- eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
+- add w20,w20,w16 // h+=Sigma1(e)
+- eor w19,w19,w22 // Maj(a,b,c)
+- eor w17,w10,w21,ror#22 // Sigma0(a)
+- eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
+- add w3,w3,w12
+- add w24,w24,w20 // d+=h
+- add w20,w20,w19 // h+=Maj(a,b,c)
+- ldr w19,[x30],#4 // *K++, w28 in next round
+- add w3,w3,w9
+- add w20,w20,w17 // h+=Sigma0(a)
+- add w3,w3,w8
+-.Loop_16_xx:
+- ldr w8,[sp,#4]
+- str w11,[sp,#0]
+- ror w16,w24,#6
+- add w27,w27,w19 // h+=K[i]
+- ror w10,w5,#7
+- and w17,w25,w24
+- ror w9,w2,#17
+- bic w19,w26,w24
+- ror w11,w20,#2
+- add w27,w27,w3 // h+=X[i]
+- eor w16,w16,w24,ror#11
+- eor w10,w10,w5,ror#18
+- orr w17,w17,w19 // Ch(e,f,g)
+- eor w19,w20,w21 // a^b, b^c in next round
+- eor w16,w16,w24,ror#25 // Sigma1(e)
+- eor w11,w11,w20,ror#13
+- add w27,w27,w17 // h+=Ch(e,f,g)
+- and w28,w28,w19 // (b^c)&=(a^b)
+- eor w9,w9,w2,ror#19
+- eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
+- add w27,w27,w16 // h+=Sigma1(e)
+- eor w28,w28,w21 // Maj(a,b,c)
+- eor w17,w11,w20,ror#22 // Sigma0(a)
+- eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
+- add w4,w4,w13
+- add w23,w23,w27 // d+=h
+- add w27,w27,w28 // h+=Maj(a,b,c)
+- ldr w28,[x30],#4 // *K++, w19 in next round
+- add w4,w4,w10
+- add w27,w27,w17 // h+=Sigma0(a)
+- add w4,w4,w9
+- ldr w9,[sp,#8]
+- str w12,[sp,#4]
+- ror w16,w23,#6
+- add w26,w26,w28 // h+=K[i]
+- ror w11,w6,#7
+- and w17,w24,w23
+- ror w10,w3,#17
+- bic w28,w25,w23
+- ror w12,w27,#2
+- add w26,w26,w4 // h+=X[i]
+- eor w16,w16,w23,ror#11
+- eor w11,w11,w6,ror#18
+- orr w17,w17,w28 // Ch(e,f,g)
+- eor w28,w27,w20 // a^b, b^c in next round
+- eor w16,w16,w23,ror#25 // Sigma1(e)
+- eor w12,w12,w27,ror#13
+- add w26,w26,w17 // h+=Ch(e,f,g)
+- and w19,w19,w28 // (b^c)&=(a^b)
+- eor w10,w10,w3,ror#19
+- eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
+- add w26,w26,w16 // h+=Sigma1(e)
+- eor w19,w19,w20 // Maj(a,b,c)
+- eor w17,w12,w27,ror#22 // Sigma0(a)
+- eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
+- add w5,w5,w14
+- add w22,w22,w26 // d+=h
+- add w26,w26,w19 // h+=Maj(a,b,c)
+- ldr w19,[x30],#4 // *K++, w28 in next round
+- add w5,w5,w11
+- add w26,w26,w17 // h+=Sigma0(a)
+- add w5,w5,w10
+- ldr w10,[sp,#12]
+- str w13,[sp,#8]
+- ror w16,w22,#6
+- add w25,w25,w19 // h+=K[i]
+- ror w12,w7,#7
+- and w17,w23,w22
+- ror w11,w4,#17
+- bic w19,w24,w22
+- ror w13,w26,#2
+- add w25,w25,w5 // h+=X[i]
+- eor w16,w16,w22,ror#11
+- eor w12,w12,w7,ror#18
+- orr w17,w17,w19 // Ch(e,f,g)
+- eor w19,w26,w27 // a^b, b^c in next round
+- eor w16,w16,w22,ror#25 // Sigma1(e)
+- eor w13,w13,w26,ror#13
+- add w25,w25,w17 // h+=Ch(e,f,g)
+- and w28,w28,w19 // (b^c)&=(a^b)
+- eor w11,w11,w4,ror#19
+- eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
+- add w25,w25,w16 // h+=Sigma1(e)
+- eor w28,w28,w27 // Maj(a,b,c)
+- eor w17,w13,w26,ror#22 // Sigma0(a)
+- eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
+- add w6,w6,w15
+- add w21,w21,w25 // d+=h
+- add w25,w25,w28 // h+=Maj(a,b,c)
+- ldr w28,[x30],#4 // *K++, w19 in next round
+- add w6,w6,w12
+- add w25,w25,w17 // h+=Sigma0(a)
+- add w6,w6,w11
+- ldr w11,[sp,#0]
+- str w14,[sp,#12]
+- ror w16,w21,#6
+- add w24,w24,w28 // h+=K[i]
+- ror w13,w8,#7
+- and w17,w22,w21
+- ror w12,w5,#17
+- bic w28,w23,w21
+- ror w14,w25,#2
+- add w24,w24,w6 // h+=X[i]
+- eor w16,w16,w21,ror#11
+- eor w13,w13,w8,ror#18
+- orr w17,w17,w28 // Ch(e,f,g)
+- eor w28,w25,w26 // a^b, b^c in next round
+- eor w16,w16,w21,ror#25 // Sigma1(e)
+- eor w14,w14,w25,ror#13
+- add w24,w24,w17 // h+=Ch(e,f,g)
+- and w19,w19,w28 // (b^c)&=(a^b)
+- eor w12,w12,w5,ror#19
+- eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
+- add w24,w24,w16 // h+=Sigma1(e)
+- eor w19,w19,w26 // Maj(a,b,c)
+- eor w17,w14,w25,ror#22 // Sigma0(a)
+- eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
+- add w7,w7,w0
+- add w20,w20,w24 // d+=h
+- add w24,w24,w19 // h+=Maj(a,b,c)
+- ldr w19,[x30],#4 // *K++, w28 in next round
+- add w7,w7,w13
+- add w24,w24,w17 // h+=Sigma0(a)
+- add w7,w7,w12
+- ldr w12,[sp,#4]
+- str w15,[sp,#0]
+- ror w16,w20,#6
+- add w23,w23,w19 // h+=K[i]
+- ror w14,w9,#7
+- and w17,w21,w20
+- ror w13,w6,#17
+- bic w19,w22,w20
+- ror w15,w24,#2
+- add w23,w23,w7 // h+=X[i]
+- eor w16,w16,w20,ror#11
+- eor w14,w14,w9,ror#18
+- orr w17,w17,w19 // Ch(e,f,g)
+- eor w19,w24,w25 // a^b, b^c in next round
+- eor w16,w16,w20,ror#25 // Sigma1(e)
+- eor w15,w15,w24,ror#13
+- add w23,w23,w17 // h+=Ch(e,f,g)
+- and w28,w28,w19 // (b^c)&=(a^b)
+- eor w13,w13,w6,ror#19
+- eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
+- add w23,w23,w16 // h+=Sigma1(e)
+- eor w28,w28,w25 // Maj(a,b,c)
+- eor w17,w15,w24,ror#22 // Sigma0(a)
+- eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
+- add w8,w8,w1
+- add w27,w27,w23 // d+=h
+- add w23,w23,w28 // h+=Maj(a,b,c)
+- ldr w28,[x30],#4 // *K++, w19 in next round
+- add w8,w8,w14
+- add w23,w23,w17 // h+=Sigma0(a)
+- add w8,w8,w13
+- ldr w13,[sp,#8]
+- str w0,[sp,#4]
+- ror w16,w27,#6
+- add w22,w22,w28 // h+=K[i]
+- ror w15,w10,#7
+- and w17,w20,w27
+- ror w14,w7,#17
+- bic w28,w21,w27
+- ror w0,w23,#2
+- add w22,w22,w8 // h+=X[i]
+- eor w16,w16,w27,ror#11
+- eor w15,w15,w10,ror#18
+- orr w17,w17,w28 // Ch(e,f,g)
+- eor w28,w23,w24 // a^b, b^c in next round
+- eor w16,w16,w27,ror#25 // Sigma1(e)
+- eor w0,w0,w23,ror#13
+- add w22,w22,w17 // h+=Ch(e,f,g)
+- and w19,w19,w28 // (b^c)&=(a^b)
+- eor w14,w14,w7,ror#19
+- eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
+- add w22,w22,w16 // h+=Sigma1(e)
+- eor w19,w19,w24 // Maj(a,b,c)
+- eor w17,w0,w23,ror#22 // Sigma0(a)
+- eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
+- add w9,w9,w2
+- add w26,w26,w22 // d+=h
+- add w22,w22,w19 // h+=Maj(a,b,c)
+- ldr w19,[x30],#4 // *K++, w28 in next round
+- add w9,w9,w15
+- add w22,w22,w17 // h+=Sigma0(a)
+- add w9,w9,w14
+- ldr w14,[sp,#12]
+- str w1,[sp,#8]
+- ror w16,w26,#6
+- add w21,w21,w19 // h+=K[i]
+- ror w0,w11,#7
+- and w17,w27,w26
+- ror w15,w8,#17
+- bic w19,w20,w26
+- ror w1,w22,#2
+- add w21,w21,w9 // h+=X[i]
+- eor w16,w16,w26,ror#11
+- eor w0,w0,w11,ror#18
+- orr w17,w17,w19 // Ch(e,f,g)
+- eor w19,w22,w23 // a^b, b^c in next round
+- eor w16,w16,w26,ror#25 // Sigma1(e)
+- eor w1,w1,w22,ror#13
+- add w21,w21,w17 // h+=Ch(e,f,g)
+- and w28,w28,w19 // (b^c)&=(a^b)
+- eor w15,w15,w8,ror#19
+- eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
+- add w21,w21,w16 // h+=Sigma1(e)
+- eor w28,w28,w23 // Maj(a,b,c)
+- eor w17,w1,w22,ror#22 // Sigma0(a)
+- eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
+- add w10,w10,w3
+- add w25,w25,w21 // d+=h
+- add w21,w21,w28 // h+=Maj(a,b,c)
+- ldr w28,[x30],#4 // *K++, w19 in next round
+- add w10,w10,w0
+- add w21,w21,w17 // h+=Sigma0(a)
+- add w10,w10,w15
+- ldr w15,[sp,#0]
+- str w2,[sp,#12]
+- ror w16,w25,#6
+- add w20,w20,w28 // h+=K[i]
+- ror w1,w12,#7
+- and w17,w26,w25
+- ror w0,w9,#17
+- bic w28,w27,w25
+- ror w2,w21,#2
+- add w20,w20,w10 // h+=X[i]
+- eor w16,w16,w25,ror#11
+- eor w1,w1,w12,ror#18
+- orr w17,w17,w28 // Ch(e,f,g)
+- eor w28,w21,w22 // a^b, b^c in next round
+- eor w16,w16,w25,ror#25 // Sigma1(e)
+- eor w2,w2,w21,ror#13
+- add w20,w20,w17 // h+=Ch(e,f,g)
+- and w19,w19,w28 // (b^c)&=(a^b)
+- eor w0,w0,w9,ror#19
+- eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
+- add w20,w20,w16 // h+=Sigma1(e)
+- eor w19,w19,w22 // Maj(a,b,c)
+- eor w17,w2,w21,ror#22 // Sigma0(a)
+- eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
+- add w11,w11,w4
+- add w24,w24,w20 // d+=h
+- add w20,w20,w19 // h+=Maj(a,b,c)
+- ldr w19,[x30],#4 // *K++, w28 in next round
+- add w11,w11,w1
+- add w20,w20,w17 // h+=Sigma0(a)
+- add w11,w11,w0
+- ldr w0,[sp,#4]
+- str w3,[sp,#0]
+- ror w16,w24,#6
+- add w27,w27,w19 // h+=K[i]
+- ror w2,w13,#7
+- and w17,w25,w24
+- ror w1,w10,#17
+- bic w19,w26,w24
+- ror w3,w20,#2
+- add w27,w27,w11 // h+=X[i]
+- eor w16,w16,w24,ror#11
+- eor w2,w2,w13,ror#18
+- orr w17,w17,w19 // Ch(e,f,g)
+- eor w19,w20,w21 // a^b, b^c in next round
+- eor w16,w16,w24,ror#25 // Sigma1(e)
+- eor w3,w3,w20,ror#13
+- add w27,w27,w17 // h+=Ch(e,f,g)
+- and w28,w28,w19 // (b^c)&=(a^b)
+- eor w1,w1,w10,ror#19
+- eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
+- add w27,w27,w16 // h+=Sigma1(e)
+- eor w28,w28,w21 // Maj(a,b,c)
+- eor w17,w3,w20,ror#22 // Sigma0(a)
+- eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
+- add w12,w12,w5
+- add w23,w23,w27 // d+=h
+- add w27,w27,w28 // h+=Maj(a,b,c)
+- ldr w28,[x30],#4 // *K++, w19 in next round
+- add w12,w12,w2
+- add w27,w27,w17 // h+=Sigma0(a)
+- add w12,w12,w1
+- ldr w1,[sp,#8]
+- str w4,[sp,#4]
+- ror w16,w23,#6
+- add w26,w26,w28 // h+=K[i]
+- ror w3,w14,#7
+- and w17,w24,w23
+- ror w2,w11,#17
+- bic w28,w25,w23
+- ror w4,w27,#2
+- add w26,w26,w12 // h+=X[i]
+- eor w16,w16,w23,ror#11
+- eor w3,w3,w14,ror#18
+- orr w17,w17,w28 // Ch(e,f,g)
+- eor w28,w27,w20 // a^b, b^c in next round
+- eor w16,w16,w23,ror#25 // Sigma1(e)
+- eor w4,w4,w27,ror#13
+- add w26,w26,w17 // h+=Ch(e,f,g)
+- and w19,w19,w28 // (b^c)&=(a^b)
+- eor w2,w2,w11,ror#19
+- eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
+- add w26,w26,w16 // h+=Sigma1(e)
+- eor w19,w19,w20 // Maj(a,b,c)
+- eor w17,w4,w27,ror#22 // Sigma0(a)
+- eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
+- add w13,w13,w6
+- add w22,w22,w26 // d+=h
+- add w26,w26,w19 // h+=Maj(a,b,c)
+- ldr w19,[x30],#4 // *K++, w28 in next round
+- add w13,w13,w3
+- add w26,w26,w17 // h+=Sigma0(a)
+- add w13,w13,w2
+- ldr w2,[sp,#12]
+- str w5,[sp,#8]
+- ror w16,w22,#6
+- add w25,w25,w19 // h+=K[i]
+- ror w4,w15,#7
+- and w17,w23,w22
+- ror w3,w12,#17
+- bic w19,w24,w22
+- ror w5,w26,#2
+- add w25,w25,w13 // h+=X[i]
+- eor w16,w16,w22,ror#11
+- eor w4,w4,w15,ror#18
+- orr w17,w17,w19 // Ch(e,f,g)
+- eor w19,w26,w27 // a^b, b^c in next round
+- eor w16,w16,w22,ror#25 // Sigma1(e)
+- eor w5,w5,w26,ror#13
+- add w25,w25,w17 // h+=Ch(e,f,g)
+- and w28,w28,w19 // (b^c)&=(a^b)
+- eor w3,w3,w12,ror#19
+- eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
+- add w25,w25,w16 // h+=Sigma1(e)
+- eor w28,w28,w27 // Maj(a,b,c)
+- eor w17,w5,w26,ror#22 // Sigma0(a)
+- eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
+- add w14,w14,w7
+- add w21,w21,w25 // d+=h
+- add w25,w25,w28 // h+=Maj(a,b,c)
+- ldr w28,[x30],#4 // *K++, w19 in next round
+- add w14,w14,w4
+- add w25,w25,w17 // h+=Sigma0(a)
+- add w14,w14,w3
+- ldr w3,[sp,#0]
+- str w6,[sp,#12]
+- ror w16,w21,#6
+- add w24,w24,w28 // h+=K[i]
+- ror w5,w0,#7
+- and w17,w22,w21
+- ror w4,w13,#17
+- bic w28,w23,w21
+- ror w6,w25,#2
+- add w24,w24,w14 // h+=X[i]
+- eor w16,w16,w21,ror#11
+- eor w5,w5,w0,ror#18
+- orr w17,w17,w28 // Ch(e,f,g)
+- eor w28,w25,w26 // a^b, b^c in next round
+- eor w16,w16,w21,ror#25 // Sigma1(e)
+- eor w6,w6,w25,ror#13
+- add w24,w24,w17 // h+=Ch(e,f,g)
+- and w19,w19,w28 // (b^c)&=(a^b)
+- eor w4,w4,w13,ror#19
+- eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
+- add w24,w24,w16 // h+=Sigma1(e)
+- eor w19,w19,w26 // Maj(a,b,c)
+- eor w17,w6,w25,ror#22 // Sigma0(a)
+- eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
+- add w15,w15,w8
+- add w20,w20,w24 // d+=h
+- add w24,w24,w19 // h+=Maj(a,b,c)
+- ldr w19,[x30],#4 // *K++, w28 in next round
+- add w15,w15,w5
+- add w24,w24,w17 // h+=Sigma0(a)
+- add w15,w15,w4
+- ldr w4,[sp,#4]
+- str w7,[sp,#0]
+- ror w16,w20,#6
+- add w23,w23,w19 // h+=K[i]
+- ror w6,w1,#7
+- and w17,w21,w20
+- ror w5,w14,#17
+- bic w19,w22,w20
+- ror w7,w24,#2
+- add w23,w23,w15 // h+=X[i]
+- eor w16,w16,w20,ror#11
+- eor w6,w6,w1,ror#18
+- orr w17,w17,w19 // Ch(e,f,g)
+- eor w19,w24,w25 // a^b, b^c in next round
+- eor w16,w16,w20,ror#25 // Sigma1(e)
+- eor w7,w7,w24,ror#13
+- add w23,w23,w17 // h+=Ch(e,f,g)
+- and w28,w28,w19 // (b^c)&=(a^b)
+- eor w5,w5,w14,ror#19
+- eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
+- add w23,w23,w16 // h+=Sigma1(e)
+- eor w28,w28,w25 // Maj(a,b,c)
+- eor w17,w7,w24,ror#22 // Sigma0(a)
+- eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
+- add w0,w0,w9
+- add w27,w27,w23 // d+=h
+- add w23,w23,w28 // h+=Maj(a,b,c)
+- ldr w28,[x30],#4 // *K++, w19 in next round
+- add w0,w0,w6
+- add w23,w23,w17 // h+=Sigma0(a)
+- add w0,w0,w5
+- ldr w5,[sp,#8]
+- str w8,[sp,#4]
+- ror w16,w27,#6
+- add w22,w22,w28 // h+=K[i]
+- ror w7,w2,#7
+- and w17,w20,w27
+- ror w6,w15,#17
+- bic w28,w21,w27
+- ror w8,w23,#2
+- add w22,w22,w0 // h+=X[i]
+- eor w16,w16,w27,ror#11
+- eor w7,w7,w2,ror#18
+- orr w17,w17,w28 // Ch(e,f,g)
+- eor w28,w23,w24 // a^b, b^c in next round
+- eor w16,w16,w27,ror#25 // Sigma1(e)
+- eor w8,w8,w23,ror#13
+- add w22,w22,w17 // h+=Ch(e,f,g)
+- and w19,w19,w28 // (b^c)&=(a^b)
+- eor w6,w6,w15,ror#19
+- eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
+- add w22,w22,w16 // h+=Sigma1(e)
+- eor w19,w19,w24 // Maj(a,b,c)
+- eor w17,w8,w23,ror#22 // Sigma0(a)
+- eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
+- add w1,w1,w10
+- add w26,w26,w22 // d+=h
+- add w22,w22,w19 // h+=Maj(a,b,c)
+- ldr w19,[x30],#4 // *K++, w28 in next round
+- add w1,w1,w7
+- add w22,w22,w17 // h+=Sigma0(a)
+- add w1,w1,w6
+- ldr w6,[sp,#12]
+- str w9,[sp,#8]
+- ror w16,w26,#6
+- add w21,w21,w19 // h+=K[i]
+- ror w8,w3,#7
+- and w17,w27,w26
+- ror w7,w0,#17
+- bic w19,w20,w26
+- ror w9,w22,#2
+- add w21,w21,w1 // h+=X[i]
+- eor w16,w16,w26,ror#11
+- eor w8,w8,w3,ror#18
+- orr w17,w17,w19 // Ch(e,f,g)
+- eor w19,w22,w23 // a^b, b^c in next round
+- eor w16,w16,w26,ror#25 // Sigma1(e)
+- eor w9,w9,w22,ror#13
+- add w21,w21,w17 // h+=Ch(e,f,g)
+- and w28,w28,w19 // (b^c)&=(a^b)
+- eor w7,w7,w0,ror#19
+- eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
+- add w21,w21,w16 // h+=Sigma1(e)
+- eor w28,w28,w23 // Maj(a,b,c)
+- eor w17,w9,w22,ror#22 // Sigma0(a)
+- eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
+- add w2,w2,w11
+- add w25,w25,w21 // d+=h
+- add w21,w21,w28 // h+=Maj(a,b,c)
+- ldr w28,[x30],#4 // *K++, w19 in next round
+- add w2,w2,w8
+- add w21,w21,w17 // h+=Sigma0(a)
+- add w2,w2,w7
+- ldr w7,[sp,#0]
+- str w10,[sp,#12]
+- ror w16,w25,#6
+- add w20,w20,w28 // h+=K[i]
+- ror w9,w4,#7
+- and w17,w26,w25
+- ror w8,w1,#17
+- bic w28,w27,w25
+- ror w10,w21,#2
+- add w20,w20,w2 // h+=X[i]
+- eor w16,w16,w25,ror#11
+- eor w9,w9,w4,ror#18
+- orr w17,w17,w28 // Ch(e,f,g)
+- eor w28,w21,w22 // a^b, b^c in next round
+- eor w16,w16,w25,ror#25 // Sigma1(e)
+- eor w10,w10,w21,ror#13
+- add w20,w20,w17 // h+=Ch(e,f,g)
+- and w19,w19,w28 // (b^c)&=(a^b)
+- eor w8,w8,w1,ror#19
+- eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
+- add w20,w20,w16 // h+=Sigma1(e)
+- eor w19,w19,w22 // Maj(a,b,c)
+- eor w17,w10,w21,ror#22 // Sigma0(a)
+- eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
+- add w3,w3,w12
+- add w24,w24,w20 // d+=h
+- add w20,w20,w19 // h+=Maj(a,b,c)
+- ldr w19,[x30],#4 // *K++, w28 in next round
+- add w3,w3,w9
+- add w20,w20,w17 // h+=Sigma0(a)
+- add w3,w3,w8
+- cbnz w19,.Loop_16_xx
+-
+- ldp x0,x2,[x29,#96]
+- ldr x1,[x29,#112]
+- sub x30,x30,#260 // rewind
+-
+- ldp w3,w4,[x0]
+- ldp w5,w6,[x0,#2*4]
+- add x1,x1,#14*4 // advance input pointer
+- ldp w7,w8,[x0,#4*4]
+- add w20,w20,w3
+- ldp w9,w10,[x0,#6*4]
+- add w21,w21,w4
+- add w22,w22,w5
+- add w23,w23,w6
+- stp w20,w21,[x0]
+- add w24,w24,w7
+- add w25,w25,w8
+- stp w22,w23,[x0,#2*4]
+- add w26,w26,w9
+- add w27,w27,w10
+- cmp x1,x2
+- stp w24,w25,[x0,#4*4]
+- stp w26,w27,[x0,#6*4]
+- b.ne .Loop
+-
+- ldp x19,x20,[x29,#16]
+- add sp,sp,#4*4
+- ldp x21,x22,[x29,#32]
+- ldp x23,x24,[x29,#48]
+- ldp x25,x26,[x29,#64]
+- ldp x27,x28,[x29,#80]
+- ldp x29,x30,[sp],#128
+- ret
+-.size sha256_block_data_order,.-sha256_block_data_order
+-
+-.align 6
+-.type .LK256,%object
+-.LK256:
+- .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+- .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+- .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+- .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+- .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+- .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+- .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+- .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+- .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+- .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+- .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+- .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+- .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+- .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+- .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+- .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+- .long 0 //terminator
+-.size .LK256,.-.LK256
+-#ifndef __KERNEL__
+-.align 3
+-.LOPENSSL_armcap_P:
+-# ifdef __ILP32__
+- .long OPENSSL_armcap_P-.
+-# else
+- .quad OPENSSL_armcap_P-.
+-# endif
+-#endif
+-.asciz "SHA256 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+-.align 2
+-#ifndef __KERNEL__
+-.type sha256_block_armv8,%function
+-.align 6
+-sha256_block_armv8:
+-.Lv8_entry:
+- stp x29,x30,[sp,#-16]!
+- add x29,sp,#0
+-
+- ld1 {v0.4s,v1.4s},[x0]
+- adr x3,.LK256
+-
+-.Loop_hw:
+- ld1 {v4.16b-v7.16b},[x1],#64
+- sub x2,x2,#1
+- ld1 {v16.4s},[x3],#16
+- rev32 v4.16b,v4.16b
+- rev32 v5.16b,v5.16b
+- rev32 v6.16b,v6.16b
+- rev32 v7.16b,v7.16b
+- orr v18.16b,v0.16b,v0.16b // offload
+- orr v19.16b,v1.16b,v1.16b
+- ld1 {v17.4s},[x3],#16
+- add v16.4s,v16.4s,v4.4s
+- .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
+- orr v2.16b,v0.16b,v0.16b
+- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+- .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
+- ld1 {v16.4s},[x3],#16
+- add v17.4s,v17.4s,v5.4s
+- .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
+- orr v2.16b,v0.16b,v0.16b
+- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+- .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
+- ld1 {v17.4s},[x3],#16
+- add v16.4s,v16.4s,v6.4s
+- .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
+- orr v2.16b,v0.16b,v0.16b
+- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+- .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
+- ld1 {v16.4s},[x3],#16
+- add v17.4s,v17.4s,v7.4s
+- .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
+- orr v2.16b,v0.16b,v0.16b
+- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+- .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
+- ld1 {v17.4s},[x3],#16
+- add v16.4s,v16.4s,v4.4s
+- .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
+- orr v2.16b,v0.16b,v0.16b
+- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+- .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
+- ld1 {v16.4s},[x3],#16
+- add v17.4s,v17.4s,v5.4s
+- .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
+- orr v2.16b,v0.16b,v0.16b
+- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+- .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
+- ld1 {v17.4s},[x3],#16
+- add v16.4s,v16.4s,v6.4s
+- .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
+- orr v2.16b,v0.16b,v0.16b
+- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+- .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
+- ld1 {v16.4s},[x3],#16
+- add v17.4s,v17.4s,v7.4s
+- .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
+- orr v2.16b,v0.16b,v0.16b
+- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+- .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
+- ld1 {v17.4s},[x3],#16
+- add v16.4s,v16.4s,v4.4s
+- .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
+- orr v2.16b,v0.16b,v0.16b
+- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+- .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
+- ld1 {v16.4s},[x3],#16
+- add v17.4s,v17.4s,v5.4s
+- .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
+- orr v2.16b,v0.16b,v0.16b
+- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+- .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
+- ld1 {v17.4s},[x3],#16
+- add v16.4s,v16.4s,v6.4s
+- .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
+- orr v2.16b,v0.16b,v0.16b
+- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+- .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
+- ld1 {v16.4s},[x3],#16
+- add v17.4s,v17.4s,v7.4s
+- .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
+- orr v2.16b,v0.16b,v0.16b
+- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+- .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
+- ld1 {v17.4s},[x3],#16
+- add v16.4s,v16.4s,v4.4s
+- orr v2.16b,v0.16b,v0.16b
+- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+-
+- ld1 {v16.4s},[x3],#16
+- add v17.4s,v17.4s,v5.4s
+- orr v2.16b,v0.16b,v0.16b
+- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+-
+- ld1 {v17.4s},[x3]
+- add v16.4s,v16.4s,v6.4s
+- sub x3,x3,#64*4-16 // rewind
+- orr v2.16b,v0.16b,v0.16b
+- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+-
+- add v17.4s,v17.4s,v7.4s
+- orr v2.16b,v0.16b,v0.16b
+- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+-
+- add v0.4s,v0.4s,v18.4s
+- add v1.4s,v1.4s,v19.4s
+-
+- cbnz x2,.Loop_hw
+-
+- st1 {v0.4s,v1.4s},[x0]
+-
+- ldr x29,[sp],#16
+- ret
+-.size sha256_block_armv8,.-sha256_block_armv8
+-#endif
+-#ifdef __KERNEL__
+-.globl sha256_block_neon
+-#endif
+-.type sha256_block_neon,%function
+-.align 4
+-sha256_block_neon:
+-.Lneon_entry:
+- stp x29, x30, [sp, #-16]!
+- mov x29, sp
+- sub sp,sp,#16*4
+-
+- adr x16,.LK256
+- add x2,x1,x2,lsl#6 // len to point at the end of inp
+-
+- ld1 {v0.16b},[x1], #16
+- ld1 {v1.16b},[x1], #16
+- ld1 {v2.16b},[x1], #16
+- ld1 {v3.16b},[x1], #16
+- ld1 {v4.4s},[x16], #16
+- ld1 {v5.4s},[x16], #16
+- ld1 {v6.4s},[x16], #16
+- ld1 {v7.4s},[x16], #16
+- rev32 v0.16b,v0.16b // yes, even on
+- rev32 v1.16b,v1.16b // big-endian
+- rev32 v2.16b,v2.16b
+- rev32 v3.16b,v3.16b
+- mov x17,sp
+- add v4.4s,v4.4s,v0.4s
+- add v5.4s,v5.4s,v1.4s
+- add v6.4s,v6.4s,v2.4s
+- st1 {v4.4s-v5.4s},[x17], #32
+- add v7.4s,v7.4s,v3.4s
+- st1 {v6.4s-v7.4s},[x17]
+- sub x17,x17,#32
+-
+- ldp w3,w4,[x0]
+- ldp w5,w6,[x0,#8]
+- ldp w7,w8,[x0,#16]
+- ldp w9,w10,[x0,#24]
+- ldr w12,[sp,#0]
+- mov w13,wzr
+- eor w14,w4,w5
+- mov w15,wzr
+- b .L_00_48
+-
+-.align 4
+-.L_00_48:
+- ext v4.16b,v0.16b,v1.16b,#4
+- add w10,w10,w12
+- add w3,w3,w15
+- and w12,w8,w7
+- bic w15,w9,w7
+- ext v7.16b,v2.16b,v3.16b,#4
+- eor w11,w7,w7,ror#5
+- add w3,w3,w13
+- mov d19,v3.d[1]
+- orr w12,w12,w15
+- eor w11,w11,w7,ror#19
+- ushr v6.4s,v4.4s,#7
+- eor w15,w3,w3,ror#11
+- ushr v5.4s,v4.4s,#3
+- add w10,w10,w12
+- add v0.4s,v0.4s,v7.4s
+- ror w11,w11,#6
+- sli v6.4s,v4.4s,#25
+- eor w13,w3,w4
+- eor w15,w15,w3,ror#20
+- ushr v7.4s,v4.4s,#18
+- add w10,w10,w11
+- ldr w12,[sp,#4]
+- and w14,w14,w13
+- eor v5.16b,v5.16b,v6.16b
+- ror w15,w15,#2
+- add w6,w6,w10
+- sli v7.4s,v4.4s,#14
+- eor w14,w14,w4
+- ushr v16.4s,v19.4s,#17
+- add w9,w9,w12
+- add w10,w10,w15
+- and w12,w7,w6
+- eor v5.16b,v5.16b,v7.16b
+- bic w15,w8,w6
+- eor w11,w6,w6,ror#5
+- sli v16.4s,v19.4s,#15
+- add w10,w10,w14
+- orr w12,w12,w15
+- ushr v17.4s,v19.4s,#10
+- eor w11,w11,w6,ror#19
+- eor w15,w10,w10,ror#11
+- ushr v7.4s,v19.4s,#19
+- add w9,w9,w12
+- ror w11,w11,#6
+- add v0.4s,v0.4s,v5.4s
+- eor w14,w10,w3
+- eor w15,w15,w10,ror#20
+- sli v7.4s,v19.4s,#13
+- add w9,w9,w11
+- ldr w12,[sp,#8]
+- and w13,w13,w14
+- eor v17.16b,v17.16b,v16.16b
+- ror w15,w15,#2
+- add w5,w5,w9
+- eor w13,w13,w3
+- eor v17.16b,v17.16b,v7.16b
+- add w8,w8,w12
+- add w9,w9,w15
+- and w12,w6,w5
+- add v0.4s,v0.4s,v17.4s
+- bic w15,w7,w5
+- eor w11,w5,w5,ror#5
+- add w9,w9,w13
+- ushr v18.4s,v0.4s,#17
+- orr w12,w12,w15
+- ushr v19.4s,v0.4s,#10
+- eor w11,w11,w5,ror#19
+- eor w15,w9,w9,ror#11
+- sli v18.4s,v0.4s,#15
+- add w8,w8,w12
+- ushr v17.4s,v0.4s,#19
+- ror w11,w11,#6
+- eor w13,w9,w10
+- eor v19.16b,v19.16b,v18.16b
+- eor w15,w15,w9,ror#20
+- add w8,w8,w11
+- sli v17.4s,v0.4s,#13
+- ldr w12,[sp,#12]
+- and w14,w14,w13
+- ror w15,w15,#2
+- ld1 {v4.4s},[x16], #16
+- add w4,w4,w8
+- eor v19.16b,v19.16b,v17.16b
+- eor w14,w14,w10
+- eor v17.16b,v17.16b,v17.16b
+- add w7,w7,w12
+- add w8,w8,w15
+- and w12,w5,w4
+- mov v17.d[1],v19.d[0]
+- bic w15,w6,w4
+- eor w11,w4,w4,ror#5
+- add w8,w8,w14
+- add v0.4s,v0.4s,v17.4s
+- orr w12,w12,w15
+- eor w11,w11,w4,ror#19
+- eor w15,w8,w8,ror#11
+- add v4.4s,v4.4s,v0.4s
+- add w7,w7,w12
+- ror w11,w11,#6
+- eor w14,w8,w9
+- eor w15,w15,w8,ror#20
+- add w7,w7,w11
+- ldr w12,[sp,#16]
+- and w13,w13,w14
+- ror w15,w15,#2
+- add w3,w3,w7
+- eor w13,w13,w9
+- st1 {v4.4s},[x17], #16
+- ext v4.16b,v1.16b,v2.16b,#4
+- add w6,w6,w12
+- add w7,w7,w15
+- and w12,w4,w3
+- bic w15,w5,w3
+- ext v7.16b,v3.16b,v0.16b,#4
+- eor w11,w3,w3,ror#5
+- add w7,w7,w13
+- mov d19,v0.d[1]
+- orr w12,w12,w15
+- eor w11,w11,w3,ror#19
+- ushr v6.4s,v4.4s,#7
+- eor w15,w7,w7,ror#11
+- ushr v5.4s,v4.4s,#3
+- add w6,w6,w12
+- add v1.4s,v1.4s,v7.4s
+- ror w11,w11,#6
+- sli v6.4s,v4.4s,#25
+- eor w13,w7,w8
+- eor w15,w15,w7,ror#20
+- ushr v7.4s,v4.4s,#18
+- add w6,w6,w11
+- ldr w12,[sp,#20]
+- and w14,w14,w13
+- eor v5.16b,v5.16b,v6.16b
+- ror w15,w15,#2
+- add w10,w10,w6
+- sli v7.4s,v4.4s,#14
+- eor w14,w14,w8
+- ushr v16.4s,v19.4s,#17
+- add w5,w5,w12
+- add w6,w6,w15
+- and w12,w3,w10
+- eor v5.16b,v5.16b,v7.16b
+- bic w15,w4,w10
+- eor w11,w10,w10,ror#5
+- sli v16.4s,v19.4s,#15
+- add w6,w6,w14
+- orr w12,w12,w15
+- ushr v17.4s,v19.4s,#10
+- eor w11,w11,w10,ror#19
+- eor w15,w6,w6,ror#11
+- ushr v7.4s,v19.4s,#19
+- add w5,w5,w12
+- ror w11,w11,#6
+- add v1.4s,v1.4s,v5.4s
+- eor w14,w6,w7
+- eor w15,w15,w6,ror#20
+- sli v7.4s,v19.4s,#13
+- add w5,w5,w11
+- ldr w12,[sp,#24]
+- and w13,w13,w14
+- eor v17.16b,v17.16b,v16.16b
+- ror w15,w15,#2
+- add w9,w9,w5
+- eor w13,w13,w7
+- eor v17.16b,v17.16b,v7.16b
+- add w4,w4,w12
+- add w5,w5,w15
+- and w12,w10,w9
+- add v1.4s,v1.4s,v17.4s
+- bic w15,w3,w9
+- eor w11,w9,w9,ror#5
+- add w5,w5,w13
+- ushr v18.4s,v1.4s,#17
+- orr w12,w12,w15
+- ushr v19.4s,v1.4s,#10
+- eor w11,w11,w9,ror#19
+- eor w15,w5,w5,ror#11
+- sli v18.4s,v1.4s,#15
+- add w4,w4,w12
+- ushr v17.4s,v1.4s,#19
+- ror w11,w11,#6
+- eor w13,w5,w6
+- eor v19.16b,v19.16b,v18.16b
+- eor w15,w15,w5,ror#20
+- add w4,w4,w11
+- sli v17.4s,v1.4s,#13
+- ldr w12,[sp,#28]
+- and w14,w14,w13
+- ror w15,w15,#2
+- ld1 {v4.4s},[x16], #16
+- add w8,w8,w4
+- eor v19.16b,v19.16b,v17.16b
+- eor w14,w14,w6
+- eor v17.16b,v17.16b,v17.16b
+- add w3,w3,w12
+- add w4,w4,w15
+- and w12,w9,w8
+- mov v17.d[1],v19.d[0]
+- bic w15,w10,w8
+- eor w11,w8,w8,ror#5
+- add w4,w4,w14
+- add v1.4s,v1.4s,v17.4s
+- orr w12,w12,w15
+- eor w11,w11,w8,ror#19
+- eor w15,w4,w4,ror#11
+- add v4.4s,v4.4s,v1.4s
+- add w3,w3,w12
+- ror w11,w11,#6
+- eor w14,w4,w5
+- eor w15,w15,w4,ror#20
+- add w3,w3,w11
+- ldr w12,[sp,#32]
+- and w13,w13,w14
+- ror w15,w15,#2
+- add w7,w7,w3
+- eor w13,w13,w5
+- st1 {v4.4s},[x17], #16
+- ext v4.16b,v2.16b,v3.16b,#4
+- add w10,w10,w12
+- add w3,w3,w15
+- and w12,w8,w7
+- bic w15,w9,w7
+- ext v7.16b,v0.16b,v1.16b,#4
+- eor w11,w7,w7,ror#5
+- add w3,w3,w13
+- mov d19,v1.d[1]
+- orr w12,w12,w15
+- eor w11,w11,w7,ror#19
+- ushr v6.4s,v4.4s,#7
+- eor w15,w3,w3,ror#11
+- ushr v5.4s,v4.4s,#3
+- add w10,w10,w12
+- add v2.4s,v2.4s,v7.4s
+- ror w11,w11,#6
+- sli v6.4s,v4.4s,#25
+- eor w13,w3,w4
+- eor w15,w15,w3,ror#20
+- ushr v7.4s,v4.4s,#18
+- add w10,w10,w11
+- ldr w12,[sp,#36]
+- and w14,w14,w13
+- eor v5.16b,v5.16b,v6.16b
+- ror w15,w15,#2
+- add w6,w6,w10
+- sli v7.4s,v4.4s,#14
+- eor w14,w14,w4
+- ushr v16.4s,v19.4s,#17
+- add w9,w9,w12
+- add w10,w10,w15
+- and w12,w7,w6
+- eor v5.16b,v5.16b,v7.16b
+- bic w15,w8,w6
+- eor w11,w6,w6,ror#5
+- sli v16.4s,v19.4s,#15
+- add w10,w10,w14
+- orr w12,w12,w15
+- ushr v17.4s,v19.4s,#10
+- eor w11,w11,w6,ror#19
+- eor w15,w10,w10,ror#11
+- ushr v7.4s,v19.4s,#19
+- add w9,w9,w12
+- ror w11,w11,#6
+- add v2.4s,v2.4s,v5.4s
+- eor w14,w10,w3
+- eor w15,w15,w10,ror#20
+- sli v7.4s,v19.4s,#13
+- add w9,w9,w11
+- ldr w12,[sp,#40]
+- and w13,w13,w14
+- eor v17.16b,v17.16b,v16.16b
+- ror w15,w15,#2
+- add w5,w5,w9
+- eor w13,w13,w3
+- eor v17.16b,v17.16b,v7.16b
+- add w8,w8,w12
+- add w9,w9,w15
+- and w12,w6,w5
+- add v2.4s,v2.4s,v17.4s
+- bic w15,w7,w5
+- eor w11,w5,w5,ror#5
+- add w9,w9,w13
+- ushr v18.4s,v2.4s,#17
+- orr w12,w12,w15
+- ushr v19.4s,v2.4s,#10
+- eor w11,w11,w5,ror#19
+- eor w15,w9,w9,ror#11
+- sli v18.4s,v2.4s,#15
+- add w8,w8,w12
+- ushr v17.4s,v2.4s,#19
+- ror w11,w11,#6
+- eor w13,w9,w10
+- eor v19.16b,v19.16b,v18.16b
+- eor w15,w15,w9,ror#20
+- add w8,w8,w11
+- sli v17.4s,v2.4s,#13
+- ldr w12,[sp,#44]
+- and w14,w14,w13
+- ror w15,w15,#2
+- ld1 {v4.4s},[x16], #16
+- add w4,w4,w8
+- eor v19.16b,v19.16b,v17.16b
+- eor w14,w14,w10
+- eor v17.16b,v17.16b,v17.16b
+- add w7,w7,w12
+- add w8,w8,w15
+- and w12,w5,w4
+- mov v17.d[1],v19.d[0]
+- bic w15,w6,w4
+- eor w11,w4,w4,ror#5
+- add w8,w8,w14
+- add v2.4s,v2.4s,v17.4s
+- orr w12,w12,w15
+- eor w11,w11,w4,ror#19
+- eor w15,w8,w8,ror#11
+- add v4.4s,v4.4s,v2.4s
+- add w7,w7,w12
+- ror w11,w11,#6
+- eor w14,w8,w9
+- eor w15,w15,w8,ror#20
+- add w7,w7,w11
+- ldr w12,[sp,#48]
+- and w13,w13,w14
+- ror w15,w15,#2
+- add w3,w3,w7
+- eor w13,w13,w9
+- st1 {v4.4s},[x17], #16
+- ext v4.16b,v3.16b,v0.16b,#4
+- add w6,w6,w12
+- add w7,w7,w15
+- and w12,w4,w3
+- bic w15,w5,w3
+- ext v7.16b,v1.16b,v2.16b,#4
+- eor w11,w3,w3,ror#5
+- add w7,w7,w13
+- mov d19,v2.d[1]
+- orr w12,w12,w15
+- eor w11,w11,w3,ror#19
+- ushr v6.4s,v4.4s,#7
+- eor w15,w7,w7,ror#11
+- ushr v5.4s,v4.4s,#3
+- add w6,w6,w12
+- add v3.4s,v3.4s,v7.4s
+- ror w11,w11,#6
+- sli v6.4s,v4.4s,#25
+- eor w13,w7,w8
+- eor w15,w15,w7,ror#20
+- ushr v7.4s,v4.4s,#18
+- add w6,w6,w11
+- ldr w12,[sp,#52]
+- and w14,w14,w13
+- eor v5.16b,v5.16b,v6.16b
+- ror w15,w15,#2
+- add w10,w10,w6
+- sli v7.4s,v4.4s,#14
+- eor w14,w14,w8
+- ushr v16.4s,v19.4s,#17
+- add w5,w5,w12
+- add w6,w6,w15
+- and w12,w3,w10
+- eor v5.16b,v5.16b,v7.16b
+- bic w15,w4,w10
+- eor w11,w10,w10,ror#5
+- sli v16.4s,v19.4s,#15
+- add w6,w6,w14
+- orr w12,w12,w15
+- ushr v17.4s,v19.4s,#10
+- eor w11,w11,w10,ror#19
+- eor w15,w6,w6,ror#11
+- ushr v7.4s,v19.4s,#19
+- add w5,w5,w12
+- ror w11,w11,#6
+- add v3.4s,v3.4s,v5.4s
+- eor w14,w6,w7
+- eor w15,w15,w6,ror#20
+- sli v7.4s,v19.4s,#13
+- add w5,w5,w11
+- ldr w12,[sp,#56]
+- and w13,w13,w14
+- eor v17.16b,v17.16b,v16.16b
+- ror w15,w15,#2
+- add w9,w9,w5
+- eor w13,w13,w7
+- eor v17.16b,v17.16b,v7.16b
+- add w4,w4,w12
+- add w5,w5,w15
+- and w12,w10,w9
+- add v3.4s,v3.4s,v17.4s
+- bic w15,w3,w9
+- eor w11,w9,w9,ror#5
+- add w5,w5,w13
+- ushr v18.4s,v3.4s,#17
+- orr w12,w12,w15
+- ushr v19.4s,v3.4s,#10
+- eor w11,w11,w9,ror#19
+- eor w15,w5,w5,ror#11
+- sli v18.4s,v3.4s,#15
+- add w4,w4,w12
+- ushr v17.4s,v3.4s,#19
+- ror w11,w11,#6
+- eor w13,w5,w6
+- eor v19.16b,v19.16b,v18.16b
+- eor w15,w15,w5,ror#20
+- add w4,w4,w11
+- sli v17.4s,v3.4s,#13
+- ldr w12,[sp,#60]
+- and w14,w14,w13
+- ror w15,w15,#2
+- ld1 {v4.4s},[x16], #16
+- add w8,w8,w4
+- eor v19.16b,v19.16b,v17.16b
+- eor w14,w14,w6
+- eor v17.16b,v17.16b,v17.16b
+- add w3,w3,w12
+- add w4,w4,w15
+- and w12,w9,w8
+- mov v17.d[1],v19.d[0]
+- bic w15,w10,w8
+- eor w11,w8,w8,ror#5
+- add w4,w4,w14
+- add v3.4s,v3.4s,v17.4s
+- orr w12,w12,w15
+- eor w11,w11,w8,ror#19
+- eor w15,w4,w4,ror#11
+- add v4.4s,v4.4s,v3.4s
+- add w3,w3,w12
+- ror w11,w11,#6
+- eor w14,w4,w5
+- eor w15,w15,w4,ror#20
+- add w3,w3,w11
+- ldr w12,[x16]
+- and w13,w13,w14
+- ror w15,w15,#2
+- add w7,w7,w3
+- eor w13,w13,w5
+- st1 {v4.4s},[x17], #16
+- cmp w12,#0 // check for K256 terminator
+- ldr w12,[sp,#0]
+- sub x17,x17,#64
+- bne .L_00_48
+-
+- sub x16,x16,#256 // rewind x16
+- cmp x1,x2
+- mov x17, #64
+- csel x17, x17, xzr, eq
+- sub x1,x1,x17 // avoid SEGV
+- mov x17,sp
+- add w10,w10,w12
+- add w3,w3,w15
+- and w12,w8,w7
+- ld1 {v0.16b},[x1],#16
+- bic w15,w9,w7
+- eor w11,w7,w7,ror#5
+- ld1 {v4.4s},[x16],#16
+- add w3,w3,w13
+- orr w12,w12,w15
+- eor w11,w11,w7,ror#19
+- eor w15,w3,w3,ror#11
+- rev32 v0.16b,v0.16b
+- add w10,w10,w12
+- ror w11,w11,#6
+- eor w13,w3,w4
+- eor w15,w15,w3,ror#20
+- add v4.4s,v4.4s,v0.4s
+- add w10,w10,w11
+- ldr w12,[sp,#4]
+- and w14,w14,w13
+- ror w15,w15,#2
+- add w6,w6,w10
+- eor w14,w14,w4
+- add w9,w9,w12
+- add w10,w10,w15
+- and w12,w7,w6
+- bic w15,w8,w6
+- eor w11,w6,w6,ror#5
+- add w10,w10,w14
+- orr w12,w12,w15
+- eor w11,w11,w6,ror#19
+- eor w15,w10,w10,ror#11
+- add w9,w9,w12
+- ror w11,w11,#6
+- eor w14,w10,w3
+- eor w15,w15,w10,ror#20
+- add w9,w9,w11
+- ldr w12,[sp,#8]
+- and w13,w13,w14
+- ror w15,w15,#2
+- add w5,w5,w9
+- eor w13,w13,w3
+- add w8,w8,w12
+- add w9,w9,w15
+- and w12,w6,w5
+- bic w15,w7,w5
+- eor w11,w5,w5,ror#5
+- add w9,w9,w13
+- orr w12,w12,w15
+- eor w11,w11,w5,ror#19
+- eor w15,w9,w9,ror#11
+- add w8,w8,w12
+- ror w11,w11,#6
+- eor w13,w9,w10
+- eor w15,w15,w9,ror#20
+- add w8,w8,w11
+- ldr w12,[sp,#12]
+- and w14,w14,w13
+- ror w15,w15,#2
+- add w4,w4,w8
+- eor w14,w14,w10
+- add w7,w7,w12
+- add w8,w8,w15
+- and w12,w5,w4
+- bic w15,w6,w4
+- eor w11,w4,w4,ror#5
+- add w8,w8,w14
+- orr w12,w12,w15
+- eor w11,w11,w4,ror#19
+- eor w15,w8,w8,ror#11
+- add w7,w7,w12
+- ror w11,w11,#6
+- eor w14,w8,w9
+- eor w15,w15,w8,ror#20
+- add w7,w7,w11
+- ldr w12,[sp,#16]
+- and w13,w13,w14
+- ror w15,w15,#2
+- add w3,w3,w7
+- eor w13,w13,w9
+- st1 {v4.4s},[x17], #16
+- add w6,w6,w12
+- add w7,w7,w15
+- and w12,w4,w3
+- ld1 {v1.16b},[x1],#16
+- bic w15,w5,w3
+- eor w11,w3,w3,ror#5
+- ld1 {v4.4s},[x16],#16
+- add w7,w7,w13
+- orr w12,w12,w15
+- eor w11,w11,w3,ror#19
+- eor w15,w7,w7,ror#11
+- rev32 v1.16b,v1.16b
+- add w6,w6,w12
+- ror w11,w11,#6
+- eor w13,w7,w8
+- eor w15,w15,w7,ror#20
+- add v4.4s,v4.4s,v1.4s
+- add w6,w6,w11
+- ldr w12,[sp,#20]
+- and w14,w14,w13
+- ror w15,w15,#2
+- add w10,w10,w6
+- eor w14,w14,w8
+- add w5,w5,w12
+- add w6,w6,w15
+- and w12,w3,w10
+- bic w15,w4,w10
+- eor w11,w10,w10,ror#5
+- add w6,w6,w14
+- orr w12,w12,w15
+- eor w11,w11,w10,ror#19
+- eor w15,w6,w6,ror#11
+- add w5,w5,w12
+- ror w11,w11,#6
+- eor w14,w6,w7
+- eor w15,w15,w6,ror#20
+- add w5,w5,w11
+- ldr w12,[sp,#24]
+- and w13,w13,w14
+- ror w15,w15,#2
+- add w9,w9,w5
+- eor w13,w13,w7
+- add w4,w4,w12
+- add w5,w5,w15
+- and w12,w10,w9
+- bic w15,w3,w9
+- eor w11,w9,w9,ror#5
+- add w5,w5,w13
+- orr w12,w12,w15
+- eor w11,w11,w9,ror#19
+- eor w15,w5,w5,ror#11
+- add w4,w4,w12
+- ror w11,w11,#6
+- eor w13,w5,w6
+- eor w15,w15,w5,ror#20
+- add w4,w4,w11
+- ldr w12,[sp,#28]
+- and w14,w14,w13
+- ror w15,w15,#2
+- add w8,w8,w4
+- eor w14,w14,w6
+- add w3,w3,w12
+- add w4,w4,w15
+- and w12,w9,w8
+- bic w15,w10,w8
+- eor w11,w8,w8,ror#5
+- add w4,w4,w14
+- orr w12,w12,w15
+- eor w11,w11,w8,ror#19
+- eor w15,w4,w4,ror#11
+- add w3,w3,w12
+- ror w11,w11,#6
+- eor w14,w4,w5
+- eor w15,w15,w4,ror#20
+- add w3,w3,w11
+- ldr w12,[sp,#32]
+- and w13,w13,w14
+- ror w15,w15,#2
+- add w7,w7,w3
+- eor w13,w13,w5
+- st1 {v4.4s},[x17], #16
+- add w10,w10,w12
+- add w3,w3,w15
+- and w12,w8,w7
+- ld1 {v2.16b},[x1],#16
+- bic w15,w9,w7
+- eor w11,w7,w7,ror#5
+- ld1 {v4.4s},[x16],#16
+- add w3,w3,w13
+- orr w12,w12,w15
+- eor w11,w11,w7,ror#19
+- eor w15,w3,w3,ror#11
+- rev32 v2.16b,v2.16b
+- add w10,w10,w12
+- ror w11,w11,#6
+- eor w13,w3,w4
+- eor w15,w15,w3,ror#20
+- add v4.4s,v4.4s,v2.4s
+- add w10,w10,w11
+- ldr w12,[sp,#36]
+- and w14,w14,w13
+- ror w15,w15,#2
+- add w6,w6,w10
+- eor w14,w14,w4
+- add w9,w9,w12
+- add w10,w10,w15
+- and w12,w7,w6
+- bic w15,w8,w6
+- eor w11,w6,w6,ror#5
+- add w10,w10,w14
+- orr w12,w12,w15
+- eor w11,w11,w6,ror#19
+- eor w15,w10,w10,ror#11
+- add w9,w9,w12
+- ror w11,w11,#6
+- eor w14,w10,w3
+- eor w15,w15,w10,ror#20
+- add w9,w9,w11
+- ldr w12,[sp,#40]
+- and w13,w13,w14
+- ror w15,w15,#2
+- add w5,w5,w9
+- eor w13,w13,w3
+- add w8,w8,w12
+- add w9,w9,w15
+- and w12,w6,w5
+- bic w15,w7,w5
+- eor w11,w5,w5,ror#5
+- add w9,w9,w13
+- orr w12,w12,w15
+- eor w11,w11,w5,ror#19
+- eor w15,w9,w9,ror#11
+- add w8,w8,w12
+- ror w11,w11,#6
+- eor w13,w9,w10
+- eor w15,w15,w9,ror#20
+- add w8,w8,w11
+- ldr w12,[sp,#44]
+- and w14,w14,w13
+- ror w15,w15,#2
+- add w4,w4,w8
+- eor w14,w14,w10
+- add w7,w7,w12
+- add w8,w8,w15
+- and w12,w5,w4
+- bic w15,w6,w4
+- eor w11,w4,w4,ror#5
+- add w8,w8,w14
+- orr w12,w12,w15
+- eor w11,w11,w4,ror#19
+- eor w15,w8,w8,ror#11
+- add w7,w7,w12
+- ror w11,w11,#6
+- eor w14,w8,w9
+- eor w15,w15,w8,ror#20
+- add w7,w7,w11
+- ldr w12,[sp,#48]
+- and w13,w13,w14
+- ror w15,w15,#2
+- add w3,w3,w7
+- eor w13,w13,w9
+- st1 {v4.4s},[x17], #16
+- add w6,w6,w12
+- add w7,w7,w15
+- and w12,w4,w3
+- ld1 {v3.16b},[x1],#16
+- bic w15,w5,w3
+- eor w11,w3,w3,ror#5
+- ld1 {v4.4s},[x16],#16
+- add w7,w7,w13
+- orr w12,w12,w15
+- eor w11,w11,w3,ror#19
+- eor w15,w7,w7,ror#11
+- rev32 v3.16b,v3.16b
+- add w6,w6,w12
+- ror w11,w11,#6
+- eor w13,w7,w8
+- eor w15,w15,w7,ror#20
+- add v4.4s,v4.4s,v3.4s
+- add w6,w6,w11
+- ldr w12,[sp,#52]
+- and w14,w14,w13
+- ror w15,w15,#2
+- add w10,w10,w6
+- eor w14,w14,w8
+- add w5,w5,w12
+- add w6,w6,w15
+- and w12,w3,w10
+- bic w15,w4,w10
+- eor w11,w10,w10,ror#5
+- add w6,w6,w14
+- orr w12,w12,w15
+- eor w11,w11,w10,ror#19
+- eor w15,w6,w6,ror#11
+- add w5,w5,w12
+- ror w11,w11,#6
+- eor w14,w6,w7
+- eor w15,w15,w6,ror#20
+- add w5,w5,w11
+- ldr w12,[sp,#56]
+- and w13,w13,w14
+- ror w15,w15,#2
+- add w9,w9,w5
+- eor w13,w13,w7
+- add w4,w4,w12
+- add w5,w5,w15
+- and w12,w10,w9
+- bic w15,w3,w9
+- eor w11,w9,w9,ror#5
+- add w5,w5,w13
+- orr w12,w12,w15
+- eor w11,w11,w9,ror#19
+- eor w15,w5,w5,ror#11
+- add w4,w4,w12
+- ror w11,w11,#6
+- eor w13,w5,w6
+- eor w15,w15,w5,ror#20
+- add w4,w4,w11
+- ldr w12,[sp,#60]
+- and w14,w14,w13
+- ror w15,w15,#2
+- add w8,w8,w4
+- eor w14,w14,w6
+- add w3,w3,w12
+- add w4,w4,w15
+- and w12,w9,w8
+- bic w15,w10,w8
+- eor w11,w8,w8,ror#5
+- add w4,w4,w14
+- orr w12,w12,w15
+- eor w11,w11,w8,ror#19
+- eor w15,w4,w4,ror#11
+- add w3,w3,w12
+- ror w11,w11,#6
+- eor w14,w4,w5
+- eor w15,w15,w4,ror#20
+- add w3,w3,w11
+- and w13,w13,w14
+- ror w15,w15,#2
+- add w7,w7,w3
+- eor w13,w13,w5
+- st1 {v4.4s},[x17], #16
+- add w3,w3,w15 // h+=Sigma0(a) from the past
+- ldp w11,w12,[x0,#0]
+- add w3,w3,w13 // h+=Maj(a,b,c) from the past
+- ldp w13,w14,[x0,#8]
+- add w3,w3,w11 // accumulate
+- add w4,w4,w12
+- ldp w11,w12,[x0,#16]
+- add w5,w5,w13
+- add w6,w6,w14
+- ldp w13,w14,[x0,#24]
+- add w7,w7,w11
+- add w8,w8,w12
+- ldr w12,[sp,#0]
+- stp w3,w4,[x0,#0]
+- add w9,w9,w13
+- mov w13,wzr
+- stp w5,w6,[x0,#8]
+- add w10,w10,w14
+- stp w7,w8,[x0,#16]
+- eor w14,w4,w5
+- stp w9,w10,[x0,#24]
+- mov w15,wzr
+- mov x17,sp
+- b.ne .L_00_48
+-
+- ldr x29,[x29]
+- add sp,sp,#16*4+16
+- ret
+-.size sha256_block_neon,.-sha256_block_neon
+-#ifndef __KERNEL__
+-.comm OPENSSL_armcap_P,4,4
+-#endif
+diff --git a/arch/arm64/crypto/sha512-core.S b/arch/arm64/crypto/sha512-core.S
+deleted file mode 100644
+index bd0f59f06c9d..000000000000
+--- a/arch/arm64/crypto/sha512-core.S
++++ /dev/null
+@@ -1,1085 +0,0 @@
+-// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+-//
+-// Licensed under the OpenSSL license (the "License"). You may not use
+-// this file except in compliance with the License. You can obtain a copy
+-// in the file LICENSE in the source distribution or at
+-// https://www.openssl.org/source/license.html
+-
+-// ====================================================================
+-// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+-// project. The module is, however, dual licensed under OpenSSL and
+-// CRYPTOGAMS licenses depending on where you obtain it. For further
+-// details see http://www.openssl.org/~appro/cryptogams/.
+-//
+-// Permission to use under GPLv2 terms is granted.
+-// ====================================================================
+-//
+-// SHA256/512 for ARMv8.
+-//
+-// Performance in cycles per processed byte and improvement coefficient
+-// over code generated with "default" compiler:
+-//
+-// SHA256-hw SHA256(*) SHA512
+-// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
+-// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
+-// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
+-// Denver 2.01 10.5 (+26%) 6.70 (+8%)
+-// X-Gene 20.0 (+100%) 12.8 (+300%(***))
+-// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
+-//
+-// (*) Software SHA256 results are of lesser relevance, presented
+-// mostly for informational purposes.
+-// (**) The result is a trade-off: it's possible to improve it by
+-// 10% (or by 1 cycle per round), but at the cost of 20% loss
+-// on Cortex-A53 (or by 4 cycles per round).
+-// (***) Super-impressive coefficients over gcc-generated code are
+-// indication of some compiler "pathology", most notably code
+-// generated with -mgeneral-regs-only is significanty faster
+-// and the gap is only 40-90%.
+-//
+-// October 2016.
+-//
+-// Originally it was reckoned that it makes no sense to implement NEON
+-// version of SHA256 for 64-bit processors. This is because performance
+-// improvement on most wide-spread Cortex-A5x processors was observed
+-// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
+-// observed that 32-bit NEON SHA256 performs significantly better than
+-// 64-bit scalar version on *some* of the more recent processors. As
+-// result 64-bit NEON version of SHA256 was added to provide best
+-// all-round performance. For example it executes ~30% faster on X-Gene
+-// and Mongoose. [For reference, NEON version of SHA512 is bound to
+-// deliver much less improvement, likely *negative* on Cortex-A5x.
+-// Which is why NEON support is limited to SHA256.]
+-
+-#ifndef __KERNEL__
+-# include "arm_arch.h"
+-#endif
+-
+-.text
+-
+-.extern OPENSSL_armcap_P
+-.globl sha512_block_data_order
+-.type sha512_block_data_order,%function
+-.align 6
+-sha512_block_data_order:
+- stp x29,x30,[sp,#-128]!
+- add x29,sp,#0
+-
+- stp x19,x20,[sp,#16]
+- stp x21,x22,[sp,#32]
+- stp x23,x24,[sp,#48]
+- stp x25,x26,[sp,#64]
+- stp x27,x28,[sp,#80]
+- sub sp,sp,#4*8
+-
+- ldp x20,x21,[x0] // load context
+- ldp x22,x23,[x0,#2*8]
+- ldp x24,x25,[x0,#4*8]
+- add x2,x1,x2,lsl#7 // end of input
+- ldp x26,x27,[x0,#6*8]
+- adr x30,.LK512
+- stp x0,x2,[x29,#96]
+-
+-.Loop:
+- ldp x3,x4,[x1],#2*8
+- ldr x19,[x30],#8 // *K++
+- eor x28,x21,x22 // magic seed
+- str x1,[x29,#112]
+-#ifndef __AARCH64EB__
+- rev x3,x3 // 0
+-#endif
+- ror x16,x24,#14
+- add x27,x27,x19 // h+=K[i]
+- eor x6,x24,x24,ror#23
+- and x17,x25,x24
+- bic x19,x26,x24
+- add x27,x27,x3 // h+=X[i]
+- orr x17,x17,x19 // Ch(e,f,g)
+- eor x19,x20,x21 // a^b, b^c in next round
+- eor x16,x16,x6,ror#18 // Sigma1(e)
+- ror x6,x20,#28
+- add x27,x27,x17 // h+=Ch(e,f,g)
+- eor x17,x20,x20,ror#5
+- add x27,x27,x16 // h+=Sigma1(e)
+- and x28,x28,x19 // (b^c)&=(a^b)
+- add x23,x23,x27 // d+=h
+- eor x28,x28,x21 // Maj(a,b,c)
+- eor x17,x6,x17,ror#34 // Sigma0(a)
+- add x27,x27,x28 // h+=Maj(a,b,c)
+- ldr x28,[x30],#8 // *K++, x19 in next round
+- //add x27,x27,x17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev x4,x4 // 1
+-#endif
+- ldp x5,x6,[x1],#2*8
+- add x27,x27,x17 // h+=Sigma0(a)
+- ror x16,x23,#14
+- add x26,x26,x28 // h+=K[i]
+- eor x7,x23,x23,ror#23
+- and x17,x24,x23
+- bic x28,x25,x23
+- add x26,x26,x4 // h+=X[i]
+- orr x17,x17,x28 // Ch(e,f,g)
+- eor x28,x27,x20 // a^b, b^c in next round
+- eor x16,x16,x7,ror#18 // Sigma1(e)
+- ror x7,x27,#28
+- add x26,x26,x17 // h+=Ch(e,f,g)
+- eor x17,x27,x27,ror#5
+- add x26,x26,x16 // h+=Sigma1(e)
+- and x19,x19,x28 // (b^c)&=(a^b)
+- add x22,x22,x26 // d+=h
+- eor x19,x19,x20 // Maj(a,b,c)
+- eor x17,x7,x17,ror#34 // Sigma0(a)
+- add x26,x26,x19 // h+=Maj(a,b,c)
+- ldr x19,[x30],#8 // *K++, x28 in next round
+- //add x26,x26,x17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev x5,x5 // 2
+-#endif
+- add x26,x26,x17 // h+=Sigma0(a)
+- ror x16,x22,#14
+- add x25,x25,x19 // h+=K[i]
+- eor x8,x22,x22,ror#23
+- and x17,x23,x22
+- bic x19,x24,x22
+- add x25,x25,x5 // h+=X[i]
+- orr x17,x17,x19 // Ch(e,f,g)
+- eor x19,x26,x27 // a^b, b^c in next round
+- eor x16,x16,x8,ror#18 // Sigma1(e)
+- ror x8,x26,#28
+- add x25,x25,x17 // h+=Ch(e,f,g)
+- eor x17,x26,x26,ror#5
+- add x25,x25,x16 // h+=Sigma1(e)
+- and x28,x28,x19 // (b^c)&=(a^b)
+- add x21,x21,x25 // d+=h
+- eor x28,x28,x27 // Maj(a,b,c)
+- eor x17,x8,x17,ror#34 // Sigma0(a)
+- add x25,x25,x28 // h+=Maj(a,b,c)
+- ldr x28,[x30],#8 // *K++, x19 in next round
+- //add x25,x25,x17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev x6,x6 // 3
+-#endif
+- ldp x7,x8,[x1],#2*8
+- add x25,x25,x17 // h+=Sigma0(a)
+- ror x16,x21,#14
+- add x24,x24,x28 // h+=K[i]
+- eor x9,x21,x21,ror#23
+- and x17,x22,x21
+- bic x28,x23,x21
+- add x24,x24,x6 // h+=X[i]
+- orr x17,x17,x28 // Ch(e,f,g)
+- eor x28,x25,x26 // a^b, b^c in next round
+- eor x16,x16,x9,ror#18 // Sigma1(e)
+- ror x9,x25,#28
+- add x24,x24,x17 // h+=Ch(e,f,g)
+- eor x17,x25,x25,ror#5
+- add x24,x24,x16 // h+=Sigma1(e)
+- and x19,x19,x28 // (b^c)&=(a^b)
+- add x20,x20,x24 // d+=h
+- eor x19,x19,x26 // Maj(a,b,c)
+- eor x17,x9,x17,ror#34 // Sigma0(a)
+- add x24,x24,x19 // h+=Maj(a,b,c)
+- ldr x19,[x30],#8 // *K++, x28 in next round
+- //add x24,x24,x17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev x7,x7 // 4
+-#endif
+- add x24,x24,x17 // h+=Sigma0(a)
+- ror x16,x20,#14
+- add x23,x23,x19 // h+=K[i]
+- eor x10,x20,x20,ror#23
+- and x17,x21,x20
+- bic x19,x22,x20
+- add x23,x23,x7 // h+=X[i]
+- orr x17,x17,x19 // Ch(e,f,g)
+- eor x19,x24,x25 // a^b, b^c in next round
+- eor x16,x16,x10,ror#18 // Sigma1(e)
+- ror x10,x24,#28
+- add x23,x23,x17 // h+=Ch(e,f,g)
+- eor x17,x24,x24,ror#5
+- add x23,x23,x16 // h+=Sigma1(e)
+- and x28,x28,x19 // (b^c)&=(a^b)
+- add x27,x27,x23 // d+=h
+- eor x28,x28,x25 // Maj(a,b,c)
+- eor x17,x10,x17,ror#34 // Sigma0(a)
+- add x23,x23,x28 // h+=Maj(a,b,c)
+- ldr x28,[x30],#8 // *K++, x19 in next round
+- //add x23,x23,x17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev x8,x8 // 5
+-#endif
+- ldp x9,x10,[x1],#2*8
+- add x23,x23,x17 // h+=Sigma0(a)
+- ror x16,x27,#14
+- add x22,x22,x28 // h+=K[i]
+- eor x11,x27,x27,ror#23
+- and x17,x20,x27
+- bic x28,x21,x27
+- add x22,x22,x8 // h+=X[i]
+- orr x17,x17,x28 // Ch(e,f,g)
+- eor x28,x23,x24 // a^b, b^c in next round
+- eor x16,x16,x11,ror#18 // Sigma1(e)
+- ror x11,x23,#28
+- add x22,x22,x17 // h+=Ch(e,f,g)
+- eor x17,x23,x23,ror#5
+- add x22,x22,x16 // h+=Sigma1(e)
+- and x19,x19,x28 // (b^c)&=(a^b)
+- add x26,x26,x22 // d+=h
+- eor x19,x19,x24 // Maj(a,b,c)
+- eor x17,x11,x17,ror#34 // Sigma0(a)
+- add x22,x22,x19 // h+=Maj(a,b,c)
+- ldr x19,[x30],#8 // *K++, x28 in next round
+- //add x22,x22,x17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev x9,x9 // 6
+-#endif
+- add x22,x22,x17 // h+=Sigma0(a)
+- ror x16,x26,#14
+- add x21,x21,x19 // h+=K[i]
+- eor x12,x26,x26,ror#23
+- and x17,x27,x26
+- bic x19,x20,x26
+- add x21,x21,x9 // h+=X[i]
+- orr x17,x17,x19 // Ch(e,f,g)
+- eor x19,x22,x23 // a^b, b^c in next round
+- eor x16,x16,x12,ror#18 // Sigma1(e)
+- ror x12,x22,#28
+- add x21,x21,x17 // h+=Ch(e,f,g)
+- eor x17,x22,x22,ror#5
+- add x21,x21,x16 // h+=Sigma1(e)
+- and x28,x28,x19 // (b^c)&=(a^b)
+- add x25,x25,x21 // d+=h
+- eor x28,x28,x23 // Maj(a,b,c)
+- eor x17,x12,x17,ror#34 // Sigma0(a)
+- add x21,x21,x28 // h+=Maj(a,b,c)
+- ldr x28,[x30],#8 // *K++, x19 in next round
+- //add x21,x21,x17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev x10,x10 // 7
+-#endif
+- ldp x11,x12,[x1],#2*8
+- add x21,x21,x17 // h+=Sigma0(a)
+- ror x16,x25,#14
+- add x20,x20,x28 // h+=K[i]
+- eor x13,x25,x25,ror#23
+- and x17,x26,x25
+- bic x28,x27,x25
+- add x20,x20,x10 // h+=X[i]
+- orr x17,x17,x28 // Ch(e,f,g)
+- eor x28,x21,x22 // a^b, b^c in next round
+- eor x16,x16,x13,ror#18 // Sigma1(e)
+- ror x13,x21,#28
+- add x20,x20,x17 // h+=Ch(e,f,g)
+- eor x17,x21,x21,ror#5
+- add x20,x20,x16 // h+=Sigma1(e)
+- and x19,x19,x28 // (b^c)&=(a^b)
+- add x24,x24,x20 // d+=h
+- eor x19,x19,x22 // Maj(a,b,c)
+- eor x17,x13,x17,ror#34 // Sigma0(a)
+- add x20,x20,x19 // h+=Maj(a,b,c)
+- ldr x19,[x30],#8 // *K++, x28 in next round
+- //add x20,x20,x17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev x11,x11 // 8
+-#endif
+- add x20,x20,x17 // h+=Sigma0(a)
+- ror x16,x24,#14
+- add x27,x27,x19 // h+=K[i]
+- eor x14,x24,x24,ror#23
+- and x17,x25,x24
+- bic x19,x26,x24
+- add x27,x27,x11 // h+=X[i]
+- orr x17,x17,x19 // Ch(e,f,g)
+- eor x19,x20,x21 // a^b, b^c in next round
+- eor x16,x16,x14,ror#18 // Sigma1(e)
+- ror x14,x20,#28
+- add x27,x27,x17 // h+=Ch(e,f,g)
+- eor x17,x20,x20,ror#5
+- add x27,x27,x16 // h+=Sigma1(e)
+- and x28,x28,x19 // (b^c)&=(a^b)
+- add x23,x23,x27 // d+=h
+- eor x28,x28,x21 // Maj(a,b,c)
+- eor x17,x14,x17,ror#34 // Sigma0(a)
+- add x27,x27,x28 // h+=Maj(a,b,c)
+- ldr x28,[x30],#8 // *K++, x19 in next round
+- //add x27,x27,x17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev x12,x12 // 9
+-#endif
+- ldp x13,x14,[x1],#2*8
+- add x27,x27,x17 // h+=Sigma0(a)
+- ror x16,x23,#14
+- add x26,x26,x28 // h+=K[i]
+- eor x15,x23,x23,ror#23
+- and x17,x24,x23
+- bic x28,x25,x23
+- add x26,x26,x12 // h+=X[i]
+- orr x17,x17,x28 // Ch(e,f,g)
+- eor x28,x27,x20 // a^b, b^c in next round
+- eor x16,x16,x15,ror#18 // Sigma1(e)
+- ror x15,x27,#28
+- add x26,x26,x17 // h+=Ch(e,f,g)
+- eor x17,x27,x27,ror#5
+- add x26,x26,x16 // h+=Sigma1(e)
+- and x19,x19,x28 // (b^c)&=(a^b)
+- add x22,x22,x26 // d+=h
+- eor x19,x19,x20 // Maj(a,b,c)
+- eor x17,x15,x17,ror#34 // Sigma0(a)
+- add x26,x26,x19 // h+=Maj(a,b,c)
+- ldr x19,[x30],#8 // *K++, x28 in next round
+- //add x26,x26,x17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev x13,x13 // 10
+-#endif
+- add x26,x26,x17 // h+=Sigma0(a)
+- ror x16,x22,#14
+- add x25,x25,x19 // h+=K[i]
+- eor x0,x22,x22,ror#23
+- and x17,x23,x22
+- bic x19,x24,x22
+- add x25,x25,x13 // h+=X[i]
+- orr x17,x17,x19 // Ch(e,f,g)
+- eor x19,x26,x27 // a^b, b^c in next round
+- eor x16,x16,x0,ror#18 // Sigma1(e)
+- ror x0,x26,#28
+- add x25,x25,x17 // h+=Ch(e,f,g)
+- eor x17,x26,x26,ror#5
+- add x25,x25,x16 // h+=Sigma1(e)
+- and x28,x28,x19 // (b^c)&=(a^b)
+- add x21,x21,x25 // d+=h
+- eor x28,x28,x27 // Maj(a,b,c)
+- eor x17,x0,x17,ror#34 // Sigma0(a)
+- add x25,x25,x28 // h+=Maj(a,b,c)
+- ldr x28,[x30],#8 // *K++, x19 in next round
+- //add x25,x25,x17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev x14,x14 // 11
+-#endif
+- ldp x15,x0,[x1],#2*8
+- add x25,x25,x17 // h+=Sigma0(a)
+- str x6,[sp,#24]
+- ror x16,x21,#14
+- add x24,x24,x28 // h+=K[i]
+- eor x6,x21,x21,ror#23
+- and x17,x22,x21
+- bic x28,x23,x21
+- add x24,x24,x14 // h+=X[i]
+- orr x17,x17,x28 // Ch(e,f,g)
+- eor x28,x25,x26 // a^b, b^c in next round
+- eor x16,x16,x6,ror#18 // Sigma1(e)
+- ror x6,x25,#28
+- add x24,x24,x17 // h+=Ch(e,f,g)
+- eor x17,x25,x25,ror#5
+- add x24,x24,x16 // h+=Sigma1(e)
+- and x19,x19,x28 // (b^c)&=(a^b)
+- add x20,x20,x24 // d+=h
+- eor x19,x19,x26 // Maj(a,b,c)
+- eor x17,x6,x17,ror#34 // Sigma0(a)
+- add x24,x24,x19 // h+=Maj(a,b,c)
+- ldr x19,[x30],#8 // *K++, x28 in next round
+- //add x24,x24,x17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev x15,x15 // 12
+-#endif
+- add x24,x24,x17 // h+=Sigma0(a)
+- str x7,[sp,#0]
+- ror x16,x20,#14
+- add x23,x23,x19 // h+=K[i]
+- eor x7,x20,x20,ror#23
+- and x17,x21,x20
+- bic x19,x22,x20
+- add x23,x23,x15 // h+=X[i]
+- orr x17,x17,x19 // Ch(e,f,g)
+- eor x19,x24,x25 // a^b, b^c in next round
+- eor x16,x16,x7,ror#18 // Sigma1(e)
+- ror x7,x24,#28
+- add x23,x23,x17 // h+=Ch(e,f,g)
+- eor x17,x24,x24,ror#5
+- add x23,x23,x16 // h+=Sigma1(e)
+- and x28,x28,x19 // (b^c)&=(a^b)
+- add x27,x27,x23 // d+=h
+- eor x28,x28,x25 // Maj(a,b,c)
+- eor x17,x7,x17,ror#34 // Sigma0(a)
+- add x23,x23,x28 // h+=Maj(a,b,c)
+- ldr x28,[x30],#8 // *K++, x19 in next round
+- //add x23,x23,x17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev x0,x0 // 13
+-#endif
+- ldp x1,x2,[x1]
+- add x23,x23,x17 // h+=Sigma0(a)
+- str x8,[sp,#8]
+- ror x16,x27,#14
+- add x22,x22,x28 // h+=K[i]
+- eor x8,x27,x27,ror#23
+- and x17,x20,x27
+- bic x28,x21,x27
+- add x22,x22,x0 // h+=X[i]
+- orr x17,x17,x28 // Ch(e,f,g)
+- eor x28,x23,x24 // a^b, b^c in next round
+- eor x16,x16,x8,ror#18 // Sigma1(e)
+- ror x8,x23,#28
+- add x22,x22,x17 // h+=Ch(e,f,g)
+- eor x17,x23,x23,ror#5
+- add x22,x22,x16 // h+=Sigma1(e)
+- and x19,x19,x28 // (b^c)&=(a^b)
+- add x26,x26,x22 // d+=h
+- eor x19,x19,x24 // Maj(a,b,c)
+- eor x17,x8,x17,ror#34 // Sigma0(a)
+- add x22,x22,x19 // h+=Maj(a,b,c)
+- ldr x19,[x30],#8 // *K++, x28 in next round
+- //add x22,x22,x17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev x1,x1 // 14
+-#endif
+- ldr x6,[sp,#24]
+- add x22,x22,x17 // h+=Sigma0(a)
+- str x9,[sp,#16]
+- ror x16,x26,#14
+- add x21,x21,x19 // h+=K[i]
+- eor x9,x26,x26,ror#23
+- and x17,x27,x26
+- bic x19,x20,x26
+- add x21,x21,x1 // h+=X[i]
+- orr x17,x17,x19 // Ch(e,f,g)
+- eor x19,x22,x23 // a^b, b^c in next round
+- eor x16,x16,x9,ror#18 // Sigma1(e)
+- ror x9,x22,#28
+- add x21,x21,x17 // h+=Ch(e,f,g)
+- eor x17,x22,x22,ror#5
+- add x21,x21,x16 // h+=Sigma1(e)
+- and x28,x28,x19 // (b^c)&=(a^b)
+- add x25,x25,x21 // d+=h
+- eor x28,x28,x23 // Maj(a,b,c)
+- eor x17,x9,x17,ror#34 // Sigma0(a)
+- add x21,x21,x28 // h+=Maj(a,b,c)
+- ldr x28,[x30],#8 // *K++, x19 in next round
+- //add x21,x21,x17 // h+=Sigma0(a)
+-#ifndef __AARCH64EB__
+- rev x2,x2 // 15
+-#endif
+- ldr x7,[sp,#0]
+- add x21,x21,x17 // h+=Sigma0(a)
+- str x10,[sp,#24]
+- ror x16,x25,#14
+- add x20,x20,x28 // h+=K[i]
+- ror x9,x4,#1
+- and x17,x26,x25
+- ror x8,x1,#19
+- bic x28,x27,x25
+- ror x10,x21,#28
+- add x20,x20,x2 // h+=X[i]
+- eor x16,x16,x25,ror#18
+- eor x9,x9,x4,ror#8
+- orr x17,x17,x28 // Ch(e,f,g)
+- eor x28,x21,x22 // a^b, b^c in next round
+- eor x16,x16,x25,ror#41 // Sigma1(e)
+- eor x10,x10,x21,ror#34
+- add x20,x20,x17 // h+=Ch(e,f,g)
+- and x19,x19,x28 // (b^c)&=(a^b)
+- eor x8,x8,x1,ror#61
+- eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
+- add x20,x20,x16 // h+=Sigma1(e)
+- eor x19,x19,x22 // Maj(a,b,c)
+- eor x17,x10,x21,ror#39 // Sigma0(a)
+- eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
+- add x3,x3,x12
+- add x24,x24,x20 // d+=h
+- add x20,x20,x19 // h+=Maj(a,b,c)
+- ldr x19,[x30],#8 // *K++, x28 in next round
+- add x3,x3,x9
+- add x20,x20,x17 // h+=Sigma0(a)
+- add x3,x3,x8
+-.Loop_16_xx:
+- ldr x8,[sp,#8]
+- str x11,[sp,#0]
+- ror x16,x24,#14
+- add x27,x27,x19 // h+=K[i]
+- ror x10,x5,#1
+- and x17,x25,x24
+- ror x9,x2,#19
+- bic x19,x26,x24
+- ror x11,x20,#28
+- add x27,x27,x3 // h+=X[i]
+- eor x16,x16,x24,ror#18
+- eor x10,x10,x5,ror#8
+- orr x17,x17,x19 // Ch(e,f,g)
+- eor x19,x20,x21 // a^b, b^c in next round
+- eor x16,x16,x24,ror#41 // Sigma1(e)
+- eor x11,x11,x20,ror#34
+- add x27,x27,x17 // h+=Ch(e,f,g)
+- and x28,x28,x19 // (b^c)&=(a^b)
+- eor x9,x9,x2,ror#61
+- eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
+- add x27,x27,x16 // h+=Sigma1(e)
+- eor x28,x28,x21 // Maj(a,b,c)
+- eor x17,x11,x20,ror#39 // Sigma0(a)
+- eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
+- add x4,x4,x13
+- add x23,x23,x27 // d+=h
+- add x27,x27,x28 // h+=Maj(a,b,c)
+- ldr x28,[x30],#8 // *K++, x19 in next round
+- add x4,x4,x10
+- add x27,x27,x17 // h+=Sigma0(a)
+- add x4,x4,x9
+- ldr x9,[sp,#16]
+- str x12,[sp,#8]
+- ror x16,x23,#14
+- add x26,x26,x28 // h+=K[i]
+- ror x11,x6,#1
+- and x17,x24,x23
+- ror x10,x3,#19
+- bic x28,x25,x23
+- ror x12,x27,#28
+- add x26,x26,x4 // h+=X[i]
+- eor x16,x16,x23,ror#18
+- eor x11,x11,x6,ror#8
+- orr x17,x17,x28 // Ch(e,f,g)
+- eor x28,x27,x20 // a^b, b^c in next round
+- eor x16,x16,x23,ror#41 // Sigma1(e)
+- eor x12,x12,x27,ror#34
+- add x26,x26,x17 // h+=Ch(e,f,g)
+- and x19,x19,x28 // (b^c)&=(a^b)
+- eor x10,x10,x3,ror#61
+- eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
+- add x26,x26,x16 // h+=Sigma1(e)
+- eor x19,x19,x20 // Maj(a,b,c)
+- eor x17,x12,x27,ror#39 // Sigma0(a)
+- eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
+- add x5,x5,x14
+- add x22,x22,x26 // d+=h
+- add x26,x26,x19 // h+=Maj(a,b,c)
+- ldr x19,[x30],#8 // *K++, x28 in next round
+- add x5,x5,x11
+- add x26,x26,x17 // h+=Sigma0(a)
+- add x5,x5,x10
+- ldr x10,[sp,#24]
+- str x13,[sp,#16]
+- ror x16,x22,#14
+- add x25,x25,x19 // h+=K[i]
+- ror x12,x7,#1
+- and x17,x23,x22
+- ror x11,x4,#19
+- bic x19,x24,x22
+- ror x13,x26,#28
+- add x25,x25,x5 // h+=X[i]
+- eor x16,x16,x22,ror#18
+- eor x12,x12,x7,ror#8
+- orr x17,x17,x19 // Ch(e,f,g)
+- eor x19,x26,x27 // a^b, b^c in next round
+- eor x16,x16,x22,ror#41 // Sigma1(e)
+- eor x13,x13,x26,ror#34
+- add x25,x25,x17 // h+=Ch(e,f,g)
+- and x28,x28,x19 // (b^c)&=(a^b)
+- eor x11,x11,x4,ror#61
+- eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
+- add x25,x25,x16 // h+=Sigma1(e)
+- eor x28,x28,x27 // Maj(a,b,c)
+- eor x17,x13,x26,ror#39 // Sigma0(a)
+- eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
+- add x6,x6,x15
+- add x21,x21,x25 // d+=h
+- add x25,x25,x28 // h+=Maj(a,b,c)
+- ldr x28,[x30],#8 // *K++, x19 in next round
+- add x6,x6,x12
+- add x25,x25,x17 // h+=Sigma0(a)
+- add x6,x6,x11
+- ldr x11,[sp,#0]
+- str x14,[sp,#24]
+- ror x16,x21,#14
+- add x24,x24,x28 // h+=K[i]
+- ror x13,x8,#1
+- and x17,x22,x21
+- ror x12,x5,#19
+- bic x28,x23,x21
+- ror x14,x25,#28
+- add x24,x24,x6 // h+=X[i]
+- eor x16,x16,x21,ror#18
+- eor x13,x13,x8,ror#8
+- orr x17,x17,x28 // Ch(e,f,g)
+- eor x28,x25,x26 // a^b, b^c in next round
+- eor x16,x16,x21,ror#41 // Sigma1(e)
+- eor x14,x14,x25,ror#34
+- add x24,x24,x17 // h+=Ch(e,f,g)
+- and x19,x19,x28 // (b^c)&=(a^b)
+- eor x12,x12,x5,ror#61
+- eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
+- add x24,x24,x16 // h+=Sigma1(e)
+- eor x19,x19,x26 // Maj(a,b,c)
+- eor x17,x14,x25,ror#39 // Sigma0(a)
+- eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
+- add x7,x7,x0
+- add x20,x20,x24 // d+=h
+- add x24,x24,x19 // h+=Maj(a,b,c)
+- ldr x19,[x30],#8 // *K++, x28 in next round
+- add x7,x7,x13
+- add x24,x24,x17 // h+=Sigma0(a)
+- add x7,x7,x12
+- ldr x12,[sp,#8]
+- str x15,[sp,#0]
+- ror x16,x20,#14
+- add x23,x23,x19 // h+=K[i]
+- ror x14,x9,#1
+- and x17,x21,x20
+- ror x13,x6,#19
+- bic x19,x22,x20
+- ror x15,x24,#28
+- add x23,x23,x7 // h+=X[i]
+- eor x16,x16,x20,ror#18
+- eor x14,x14,x9,ror#8
+- orr x17,x17,x19 // Ch(e,f,g)
+- eor x19,x24,x25 // a^b, b^c in next round
+- eor x16,x16,x20,ror#41 // Sigma1(e)
+- eor x15,x15,x24,ror#34
+- add x23,x23,x17 // h+=Ch(e,f,g)
+- and x28,x28,x19 // (b^c)&=(a^b)
+- eor x13,x13,x6,ror#61
+- eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
+- add x23,x23,x16 // h+=Sigma1(e)
+- eor x28,x28,x25 // Maj(a,b,c)
+- eor x17,x15,x24,ror#39 // Sigma0(a)
+- eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
+- add x8,x8,x1
+- add x27,x27,x23 // d+=h
+- add x23,x23,x28 // h+=Maj(a,b,c)
+- ldr x28,[x30],#8 // *K++, x19 in next round
+- add x8,x8,x14
+- add x23,x23,x17 // h+=Sigma0(a)
+- add x8,x8,x13
+- ldr x13,[sp,#16]
+- str x0,[sp,#8]
+- ror x16,x27,#14
+- add x22,x22,x28 // h+=K[i]
+- ror x15,x10,#1
+- and x17,x20,x27
+- ror x14,x7,#19
+- bic x28,x21,x27
+- ror x0,x23,#28
+- add x22,x22,x8 // h+=X[i]
+- eor x16,x16,x27,ror#18
+- eor x15,x15,x10,ror#8
+- orr x17,x17,x28 // Ch(e,f,g)
+- eor x28,x23,x24 // a^b, b^c in next round
+- eor x16,x16,x27,ror#41 // Sigma1(e)
+- eor x0,x0,x23,ror#34
+- add x22,x22,x17 // h+=Ch(e,f,g)
+- and x19,x19,x28 // (b^c)&=(a^b)
+- eor x14,x14,x7,ror#61
+- eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
+- add x22,x22,x16 // h+=Sigma1(e)
+- eor x19,x19,x24 // Maj(a,b,c)
+- eor x17,x0,x23,ror#39 // Sigma0(a)
+- eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
+- add x9,x9,x2
+- add x26,x26,x22 // d+=h
+- add x22,x22,x19 // h+=Maj(a,b,c)
+- ldr x19,[x30],#8 // *K++, x28 in next round
+- add x9,x9,x15
+- add x22,x22,x17 // h+=Sigma0(a)
+- add x9,x9,x14
+- ldr x14,[sp,#24]
+- str x1,[sp,#16]
+- ror x16,x26,#14
+- add x21,x21,x19 // h+=K[i]
+- ror x0,x11,#1
+- and x17,x27,x26
+- ror x15,x8,#19
+- bic x19,x20,x26
+- ror x1,x22,#28
+- add x21,x21,x9 // h+=X[i]
+- eor x16,x16,x26,ror#18
+- eor x0,x0,x11,ror#8
+- orr x17,x17,x19 // Ch(e,f,g)
+- eor x19,x22,x23 // a^b, b^c in next round
+- eor x16,x16,x26,ror#41 // Sigma1(e)
+- eor x1,x1,x22,ror#34
+- add x21,x21,x17 // h+=Ch(e,f,g)
+- and x28,x28,x19 // (b^c)&=(a^b)
+- eor x15,x15,x8,ror#61
+- eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
+- add x21,x21,x16 // h+=Sigma1(e)
+- eor x28,x28,x23 // Maj(a,b,c)
+- eor x17,x1,x22,ror#39 // Sigma0(a)
+- eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
+- add x10,x10,x3
+- add x25,x25,x21 // d+=h
+- add x21,x21,x28 // h+=Maj(a,b,c)
+- ldr x28,[x30],#8 // *K++, x19 in next round
+- add x10,x10,x0
+- add x21,x21,x17 // h+=Sigma0(a)
+- add x10,x10,x15
+- ldr x15,[sp,#0]
+- str x2,[sp,#24]
+- ror x16,x25,#14
+- add x20,x20,x28 // h+=K[i]
+- ror x1,x12,#1
+- and x17,x26,x25
+- ror x0,x9,#19
+- bic x28,x27,x25
+- ror x2,x21,#28
+- add x20,x20,x10 // h+=X[i]
+- eor x16,x16,x25,ror#18
+- eor x1,x1,x12,ror#8
+- orr x17,x17,x28 // Ch(e,f,g)
+- eor x28,x21,x22 // a^b, b^c in next round
+- eor x16,x16,x25,ror#41 // Sigma1(e)
+- eor x2,x2,x21,ror#34
+- add x20,x20,x17 // h+=Ch(e,f,g)
+- and x19,x19,x28 // (b^c)&=(a^b)
+- eor x0,x0,x9,ror#61
+- eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
+- add x20,x20,x16 // h+=Sigma1(e)
+- eor x19,x19,x22 // Maj(a,b,c)
+- eor x17,x2,x21,ror#39 // Sigma0(a)
+- eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
+- add x11,x11,x4
+- add x24,x24,x20 // d+=h
+- add x20,x20,x19 // h+=Maj(a,b,c)
+- ldr x19,[x30],#8 // *K++, x28 in next round
+- add x11,x11,x1
+- add x20,x20,x17 // h+=Sigma0(a)
+- add x11,x11,x0
+- ldr x0,[sp,#8]
+- str x3,[sp,#0]
+- ror x16,x24,#14
+- add x27,x27,x19 // h+=K[i]
+- ror x2,x13,#1
+- and x17,x25,x24
+- ror x1,x10,#19
+- bic x19,x26,x24
+- ror x3,x20,#28
+- add x27,x27,x11 // h+=X[i]
+- eor x16,x16,x24,ror#18
+- eor x2,x2,x13,ror#8
+- orr x17,x17,x19 // Ch(e,f,g)
+- eor x19,x20,x21 // a^b, b^c in next round
+- eor x16,x16,x24,ror#41 // Sigma1(e)
+- eor x3,x3,x20,ror#34
+- add x27,x27,x17 // h+=Ch(e,f,g)
+- and x28,x28,x19 // (b^c)&=(a^b)
+- eor x1,x1,x10,ror#61
+- eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
+- add x27,x27,x16 // h+=Sigma1(e)
+- eor x28,x28,x21 // Maj(a,b,c)
+- eor x17,x3,x20,ror#39 // Sigma0(a)
+- eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
+- add x12,x12,x5
+- add x23,x23,x27 // d+=h
+- add x27,x27,x28 // h+=Maj(a,b,c)
+- ldr x28,[x30],#8 // *K++, x19 in next round
+- add x12,x12,x2
+- add x27,x27,x17 // h+=Sigma0(a)
+- add x12,x12,x1
+- ldr x1,[sp,#16]
+- str x4,[sp,#8]
+- ror x16,x23,#14
+- add x26,x26,x28 // h+=K[i]
+- ror x3,x14,#1
+- and x17,x24,x23
+- ror x2,x11,#19
+- bic x28,x25,x23
+- ror x4,x27,#28
+- add x26,x26,x12 // h+=X[i]
+- eor x16,x16,x23,ror#18
+- eor x3,x3,x14,ror#8
+- orr x17,x17,x28 // Ch(e,f,g)
+- eor x28,x27,x20 // a^b, b^c in next round
+- eor x16,x16,x23,ror#41 // Sigma1(e)
+- eor x4,x4,x27,ror#34
+- add x26,x26,x17 // h+=Ch(e,f,g)
+- and x19,x19,x28 // (b^c)&=(a^b)
+- eor x2,x2,x11,ror#61
+- eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
+- add x26,x26,x16 // h+=Sigma1(e)
+- eor x19,x19,x20 // Maj(a,b,c)
+- eor x17,x4,x27,ror#39 // Sigma0(a)
+- eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
+- add x13,x13,x6
+- add x22,x22,x26 // d+=h
+- add x26,x26,x19 // h+=Maj(a,b,c)
+- ldr x19,[x30],#8 // *K++, x28 in next round
+- add x13,x13,x3
+- add x26,x26,x17 // h+=Sigma0(a)
+- add x13,x13,x2
+- ldr x2,[sp,#24]
+- str x5,[sp,#16]
+- ror x16,x22,#14
+- add x25,x25,x19 // h+=K[i]
+- ror x4,x15,#1
+- and x17,x23,x22
+- ror x3,x12,#19
+- bic x19,x24,x22
+- ror x5,x26,#28
+- add x25,x25,x13 // h+=X[i]
+- eor x16,x16,x22,ror#18
+- eor x4,x4,x15,ror#8
+- orr x17,x17,x19 // Ch(e,f,g)
+- eor x19,x26,x27 // a^b, b^c in next round
+- eor x16,x16,x22,ror#41 // Sigma1(e)
+- eor x5,x5,x26,ror#34
+- add x25,x25,x17 // h+=Ch(e,f,g)
+- and x28,x28,x19 // (b^c)&=(a^b)
+- eor x3,x3,x12,ror#61
+- eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
+- add x25,x25,x16 // h+=Sigma1(e)
+- eor x28,x28,x27 // Maj(a,b,c)
+- eor x17,x5,x26,ror#39 // Sigma0(a)
+- eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
+- add x14,x14,x7
+- add x21,x21,x25 // d+=h
+- add x25,x25,x28 // h+=Maj(a,b,c)
+- ldr x28,[x30],#8 // *K++, x19 in next round
+- add x14,x14,x4
+- add x25,x25,x17 // h+=Sigma0(a)
+- add x14,x14,x3
+- ldr x3,[sp,#0]
+- str x6,[sp,#24]
+- ror x16,x21,#14
+- add x24,x24,x28 // h+=K[i]
+- ror x5,x0,#1
+- and x17,x22,x21
+- ror x4,x13,#19
+- bic x28,x23,x21
+- ror x6,x25,#28
+- add x24,x24,x14 // h+=X[i]
+- eor x16,x16,x21,ror#18
+- eor x5,x5,x0,ror#8
+- orr x17,x17,x28 // Ch(e,f,g)
+- eor x28,x25,x26 // a^b, b^c in next round
+- eor x16,x16,x21,ror#41 // Sigma1(e)
+- eor x6,x6,x25,ror#34
+- add x24,x24,x17 // h+=Ch(e,f,g)
+- and x19,x19,x28 // (b^c)&=(a^b)
+- eor x4,x4,x13,ror#61
+- eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
+- add x24,x24,x16 // h+=Sigma1(e)
+- eor x19,x19,x26 // Maj(a,b,c)
+- eor x17,x6,x25,ror#39 // Sigma0(a)
+- eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
+- add x15,x15,x8
+- add x20,x20,x24 // d+=h
+- add x24,x24,x19 // h+=Maj(a,b,c)
+- ldr x19,[x30],#8 // *K++, x28 in next round
+- add x15,x15,x5
+- add x24,x24,x17 // h+=Sigma0(a)
+- add x15,x15,x4
+- ldr x4,[sp,#8]
+- str x7,[sp,#0]
+- ror x16,x20,#14
+- add x23,x23,x19 // h+=K[i]
+- ror x6,x1,#1
+- and x17,x21,x20
+- ror x5,x14,#19
+- bic x19,x22,x20
+- ror x7,x24,#28
+- add x23,x23,x15 // h+=X[i]
+- eor x16,x16,x20,ror#18
+- eor x6,x6,x1,ror#8
+- orr x17,x17,x19 // Ch(e,f,g)
+- eor x19,x24,x25 // a^b, b^c in next round
+- eor x16,x16,x20,ror#41 // Sigma1(e)
+- eor x7,x7,x24,ror#34
+- add x23,x23,x17 // h+=Ch(e,f,g)
+- and x28,x28,x19 // (b^c)&=(a^b)
+- eor x5,x5,x14,ror#61
+- eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
+- add x23,x23,x16 // h+=Sigma1(e)
+- eor x28,x28,x25 // Maj(a,b,c)
+- eor x17,x7,x24,ror#39 // Sigma0(a)
+- eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
+- add x0,x0,x9
+- add x27,x27,x23 // d+=h
+- add x23,x23,x28 // h+=Maj(a,b,c)
+- ldr x28,[x30],#8 // *K++, x19 in next round
+- add x0,x0,x6
+- add x23,x23,x17 // h+=Sigma0(a)
+- add x0,x0,x5
+- ldr x5,[sp,#16]
+- str x8,[sp,#8]
+- ror x16,x27,#14
+- add x22,x22,x28 // h+=K[i]
+- ror x7,x2,#1
+- and x17,x20,x27
+- ror x6,x15,#19
+- bic x28,x21,x27
+- ror x8,x23,#28
+- add x22,x22,x0 // h+=X[i]
+- eor x16,x16,x27,ror#18
+- eor x7,x7,x2,ror#8
+- orr x17,x17,x28 // Ch(e,f,g)
+- eor x28,x23,x24 // a^b, b^c in next round
+- eor x16,x16,x27,ror#41 // Sigma1(e)
+- eor x8,x8,x23,ror#34
+- add x22,x22,x17 // h+=Ch(e,f,g)
+- and x19,x19,x28 // (b^c)&=(a^b)
+- eor x6,x6,x15,ror#61
+- eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
+- add x22,x22,x16 // h+=Sigma1(e)
+- eor x19,x19,x24 // Maj(a,b,c)
+- eor x17,x8,x23,ror#39 // Sigma0(a)
+- eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
+- add x1,x1,x10
+- add x26,x26,x22 // d+=h
+- add x22,x22,x19 // h+=Maj(a,b,c)
+- ldr x19,[x30],#8 // *K++, x28 in next round
+- add x1,x1,x7
+- add x22,x22,x17 // h+=Sigma0(a)
+- add x1,x1,x6
+- ldr x6,[sp,#24]
+- str x9,[sp,#16]
+- ror x16,x26,#14
+- add x21,x21,x19 // h+=K[i]
+- ror x8,x3,#1
+- and x17,x27,x26
+- ror x7,x0,#19
+- bic x19,x20,x26
+- ror x9,x22,#28
+- add x21,x21,x1 // h+=X[i]
+- eor x16,x16,x26,ror#18
+- eor x8,x8,x3,ror#8
+- orr x17,x17,x19 // Ch(e,f,g)
+- eor x19,x22,x23 // a^b, b^c in next round
+- eor x16,x16,x26,ror#41 // Sigma1(e)
+- eor x9,x9,x22,ror#34
+- add x21,x21,x17 // h+=Ch(e,f,g)
+- and x28,x28,x19 // (b^c)&=(a^b)
+- eor x7,x7,x0,ror#61
+- eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
+- add x21,x21,x16 // h+=Sigma1(e)
+- eor x28,x28,x23 // Maj(a,b,c)
+- eor x17,x9,x22,ror#39 // Sigma0(a)
+- eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
+- add x2,x2,x11
+- add x25,x25,x21 // d+=h
+- add x21,x21,x28 // h+=Maj(a,b,c)
+- ldr x28,[x30],#8 // *K++, x19 in next round
+- add x2,x2,x8
+- add x21,x21,x17 // h+=Sigma0(a)
+- add x2,x2,x7
+- ldr x7,[sp,#0]
+- str x10,[sp,#24]
+- ror x16,x25,#14
+- add x20,x20,x28 // h+=K[i]
+- ror x9,x4,#1
+- and x17,x26,x25
+- ror x8,x1,#19
+- bic x28,x27,x25
+- ror x10,x21,#28
+- add x20,x20,x2 // h+=X[i]
+- eor x16,x16,x25,ror#18
+- eor x9,x9,x4,ror#8
+- orr x17,x17,x28 // Ch(e,f,g)
+- eor x28,x21,x22 // a^b, b^c in next round
+- eor x16,x16,x25,ror#41 // Sigma1(e)
+- eor x10,x10,x21,ror#34
+- add x20,x20,x17 // h+=Ch(e,f,g)
+- and x19,x19,x28 // (b^c)&=(a^b)
+- eor x8,x8,x1,ror#61
+- eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
+- add x20,x20,x16 // h+=Sigma1(e)
+- eor x19,x19,x22 // Maj(a,b,c)
+- eor x17,x10,x21,ror#39 // Sigma0(a)
+- eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
+- add x3,x3,x12
+- add x24,x24,x20 // d+=h
+- add x20,x20,x19 // h+=Maj(a,b,c)
+- ldr x19,[x30],#8 // *K++, x28 in next round
+- add x3,x3,x9
+- add x20,x20,x17 // h+=Sigma0(a)
+- add x3,x3,x8
+- cbnz x19,.Loop_16_xx
+-
+- ldp x0,x2,[x29,#96]
+- ldr x1,[x29,#112]
+- sub x30,x30,#648 // rewind
+-
+- ldp x3,x4,[x0]
+- ldp x5,x6,[x0,#2*8]
+- add x1,x1,#14*8 // advance input pointer
+- ldp x7,x8,[x0,#4*8]
+- add x20,x20,x3
+- ldp x9,x10,[x0,#6*8]
+- add x21,x21,x4
+- add x22,x22,x5
+- add x23,x23,x6
+- stp x20,x21,[x0]
+- add x24,x24,x7
+- add x25,x25,x8
+- stp x22,x23,[x0,#2*8]
+- add x26,x26,x9
+- add x27,x27,x10
+- cmp x1,x2
+- stp x24,x25,[x0,#4*8]
+- stp x26,x27,[x0,#6*8]
+- b.ne .Loop
+-
+- ldp x19,x20,[x29,#16]
+- add sp,sp,#4*8
+- ldp x21,x22,[x29,#32]
+- ldp x23,x24,[x29,#48]
+- ldp x25,x26,[x29,#64]
+- ldp x27,x28,[x29,#80]
+- ldp x29,x30,[sp],#128
+- ret
+-.size sha512_block_data_order,.-sha512_block_data_order
+-
+-.align 6
+-.type .LK512,%object
+-.LK512:
+- .quad 0x428a2f98d728ae22,0x7137449123ef65cd
+- .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+- .quad 0x3956c25bf348b538,0x59f111f1b605d019
+- .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
+- .quad 0xd807aa98a3030242,0x12835b0145706fbe
+- .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+- .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+- .quad 0x9bdc06a725c71235,0xc19bf174cf692694
+- .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
+- .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+- .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
+- .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+- .quad 0x983e5152ee66dfab,0xa831c66d2db43210
+- .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
+- .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
+- .quad 0x06ca6351e003826f,0x142929670a0e6e70
+- .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
+- .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+- .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
+- .quad 0x81c2c92e47edaee6,0x92722c851482353b
+- .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
+- .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
+- .quad 0xd192e819d6ef5218,0xd69906245565a910
+- .quad 0xf40e35855771202a,0x106aa07032bbd1b8
+- .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+- .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+- .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+- .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+- .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
+- .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
+- .quad 0x90befffa23631e28,0xa4506cebde82bde9
+- .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
+- .quad 0xca273eceea26619c,0xd186b8c721c0c207
+- .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+- .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
+- .quad 0x113f9804bef90dae,0x1b710b35131c471b
+- .quad 0x28db77f523047d84,0x32caab7b40c72493
+- .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+- .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+- .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
+- .quad 0 // terminator
+-.size .LK512,.-.LK512
+-#ifndef __KERNEL__
+-.align 3
+-.LOPENSSL_armcap_P:
+-# ifdef __ILP32__
+- .long OPENSSL_armcap_P-.
+-# else
+- .quad OPENSSL_armcap_P-.
+-# endif
+-#endif
+-.asciz "SHA512 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+-.align 2
+-#ifndef __KERNEL__
+-.comm OPENSSL_armcap_P,4,4
+-#endif
+diff --git a/arch/mips/include/uapi/asm/sgidefs.h b/arch/mips/include/uapi/asm/sgidefs.h
+index 876442fcfb32..5be81f8fd479 100644
+--- a/arch/mips/include/uapi/asm/sgidefs.h
++++ b/arch/mips/include/uapi/asm/sgidefs.h
+@@ -10,14 +10,6 @@
+ #ifndef __ASM_SGIDEFS_H
+ #define __ASM_SGIDEFS_H
+
+-/*
+- * Using a Linux compiler for building Linux seems logic but not to
+- * everybody.
+- */
+-#ifndef __linux__
+-#error Use a Linux compiler or give up.
+-#endif
+-
+ /*
+ * Definitions for the ISA levels
+ *
+diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
+index 5811e7849a2e..1df70a73dc5c 100644
+--- a/arch/s390/include/asm/facility.h
++++ b/arch/s390/include/asm/facility.h
+@@ -61,6 +61,18 @@ static inline int test_facility(unsigned long nr)
+ return __test_facility(nr, &S390_lowcore.stfle_fac_list);
+ }
+
++static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
++{
++ register unsigned long reg0 asm("0") = size - 1;
++
++ asm volatile(
++ ".insn s,0xb2b00000,0(%1)" /* stfle */
++ : "+d" (reg0)
++ : "a" (stfle_fac_list)
++ : "memory", "cc");
++ return reg0;
++}
++
+ /**
+ * stfle - Store facility list extended
+ * @stfle_fac_list: array where facility list can be stored
+@@ -78,13 +90,8 @@ static inline void stfle(u64 *stfle_fac_list, int size)
+ memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
+ if (S390_lowcore.stfl_fac_list & 0x01000000) {
+ /* More facility bits available with stfle */
+- register unsigned long reg0 asm("0") = size - 1;
+-
+- asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
+- : "+d" (reg0)
+- : "a" (stfle_fac_list)
+- : "memory", "cc");
+- nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
++ nr = __stfle_asm(stfle_fac_list, size);
++ nr = min_t(unsigned long, (nr + 1) * 8, size * 8);
+ }
+ memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
+ preempt_enable();
+diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
+index e497d374412a..8d20fb09722c 100644
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -23,6 +23,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/export.h>
+ #include <linux/context_tracking.h>
++#include <linux/nospec.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -650,9 +651,11 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
+ {
+ struct thread_struct *thread = &tsk->thread;
+ unsigned long val = 0;
++ int index = n;
+
+ if (n < HBP_NUM) {
+- struct perf_event *bp = thread->ptrace_bps[n];
++ struct perf_event *bp = thread->ptrace_bps[index];
++ index = array_index_nospec(index, HBP_NUM);
+
+ if (bp)
+ val = bp->hw.info.address;
+diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
+index 9692a5e9fdab..b95693a73f12 100644
+--- a/arch/x86/kernel/tls.c
++++ b/arch/x86/kernel/tls.c
+@@ -4,6 +4,7 @@
+ #include <linux/user.h>
+ #include <linux/regset.h>
+ #include <linux/syscalls.h>
++#include <linux/nospec.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/desc.h>
+@@ -219,6 +220,7 @@ int do_get_thread_area(struct task_struct *p, int idx,
+ struct user_desc __user *u_info)
+ {
+ struct user_desc info;
++ int index;
+
+ if (idx == -1 && get_user(idx, &u_info->entry_number))
+ return -EFAULT;
+@@ -226,8 +228,11 @@ int do_get_thread_area(struct task_struct *p, int idx,
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+- fill_user_desc(&info, idx,
+- &p->thread.tls_array[idx - GDT_ENTRY_TLS_MIN]);
++ index = idx - GDT_ENTRY_TLS_MIN;
++ index = array_index_nospec(index,
++ GDT_ENTRY_TLS_MAX - GDT_ENTRY_TLS_MIN + 1);
++
++ fill_user_desc(&info, idx, &p->thread.tls_array[index]);
+
+ if (copy_to_user(u_info, &info, sizeof(info)))
+ return -EFAULT;
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 463033b4db1d..5a24a484ecc7 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -2185,7 +2185,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+- "cbc-aes-talitos",
++ "cbc-aes-talitos-hsna",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+@@ -2229,7 +2229,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .cra_name = "authenc(hmac(sha1),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+- "cbc-3des-talitos",
++ "cbc-3des-talitos-hsna",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+@@ -2271,7 +2271,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+- "cbc-aes-talitos",
++ "cbc-aes-talitos-hsna",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+@@ -2315,7 +2315,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .cra_name = "authenc(hmac(sha224),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+- "cbc-3des-talitos",
++ "cbc-3des-talitos-hsna",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+@@ -2357,7 +2357,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+- "cbc-aes-talitos",
++ "cbc-aes-talitos-hsna",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+@@ -2401,7 +2401,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+- "cbc-3des-talitos",
++ "cbc-3des-talitos-hsna",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+@@ -2527,7 +2527,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+- "cbc-aes-talitos",
++ "cbc-aes-talitos-hsna",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+@@ -2569,7 +2569,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+- "cbc-3des-talitos",
++ "cbc-3des-talitos-hsna",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
+index 2165f3dd328b..842c0235471d 100644
+--- a/drivers/input/keyboard/imx_keypad.c
++++ b/drivers/input/keyboard/imx_keypad.c
+@@ -530,11 +530,12 @@ static int imx_keypad_probe(struct platform_device *pdev)
+ return 0;
+ }
+
+-static int __maybe_unused imx_kbd_suspend(struct device *dev)
++static int __maybe_unused imx_kbd_noirq_suspend(struct device *dev)
+ {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx_keypad *kbd = platform_get_drvdata(pdev);
+ struct input_dev *input_dev = kbd->input_dev;
++ unsigned short reg_val = readw(kbd->mmio_base + KPSR);
+
+ /* imx kbd can wake up system even clock is disabled */
+ mutex_lock(&input_dev->mutex);
+@@ -544,13 +545,20 @@ static int __maybe_unused imx_kbd_suspend(struct device *dev)
+
+ mutex_unlock(&input_dev->mutex);
+
+- if (device_may_wakeup(&pdev->dev))
++ if (device_may_wakeup(&pdev->dev)) {
++ if (reg_val & KBD_STAT_KPKD)
++ reg_val |= KBD_STAT_KRIE;
++ if (reg_val & KBD_STAT_KPKR)
++ reg_val |= KBD_STAT_KDIE;
++ writew(reg_val, kbd->mmio_base + KPSR);
++
+ enable_irq_wake(kbd->irq);
++ }
+
+ return 0;
+ }
+
+-static int __maybe_unused imx_kbd_resume(struct device *dev)
++static int __maybe_unused imx_kbd_noirq_resume(struct device *dev)
+ {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx_keypad *kbd = platform_get_drvdata(pdev);
+@@ -574,7 +582,9 @@ err_clk:
+ return ret;
+ }
+
+-static SIMPLE_DEV_PM_OPS(imx_kbd_pm_ops, imx_kbd_suspend, imx_kbd_resume);
++static const struct dev_pm_ops imx_kbd_pm_ops = {
++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_kbd_noirq_suspend, imx_kbd_noirq_resume)
++};
+
+ static struct platform_driver imx_keypad_driver = {
+ .driver = {
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 38edf8f5bf8a..15be3ee6cc50 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1187,6 +1187,8 @@ static const char * const middle_button_pnp_ids[] = {
+ "LEN2132", /* ThinkPad P52 */
+ "LEN2133", /* ThinkPad P72 w/ NFC */
+ "LEN2134", /* ThinkPad P72 */
++ "LEN0407",
++ "LEN0408",
+ NULL
+ };
+
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 0aba34a7b3b3..727f9e571955 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -218,8 +218,8 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
+ BUG();
+ }
+
+- DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str,
+- block);
++ DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name,
++ type_str, block);
+
+ if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
+ DMERR("%s: reached maximum errors", v->data_dev->name);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 21698eb671d7..765a16dab2e5 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -7296,9 +7296,9 @@ static void status_unused(struct seq_file *seq)
+ static int status_resync(struct seq_file *seq, struct mddev *mddev)
+ {
+ sector_t max_sectors, resync, res;
+- unsigned long dt, db;
+- sector_t rt;
+- int scale;
++ unsigned long dt, db = 0;
++ sector_t rt, curr_mark_cnt, resync_mark_cnt;
++ int scale, recovery_active;
+ unsigned int per_milli;
+
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
+@@ -7368,22 +7368,30 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
+ * db: blocks written from mark until now
+ * rt: remaining time
+ *
+- * rt is a sector_t, so could be 32bit or 64bit.
+- * So we divide before multiply in case it is 32bit and close
+- * to the limit.
+- * We scale the divisor (db) by 32 to avoid losing precision
+- * near the end of resync when the number of remaining sectors
+- * is close to 'db'.
+- * We then divide rt by 32 after multiplying by db to compensate.
+- * The '+1' avoids division by zero if db is very small.
++ * rt is a sector_t, which is always 64bit now. We are keeping
++ * the original algorithm, but it is not really necessary.
++ *
++ * Original algorithm:
++ * So we divide before multiply in case it is 32bit and close
++ * to the limit.
++ * We scale the divisor (db) by 32 to avoid losing precision
++ * near the end of resync when the number of remaining sectors
++ * is close to 'db'.
++ * We then divide rt by 32 after multiplying by db to compensate.
++ * The '+1' avoids division by zero if db is very small.
+ */
+ dt = ((jiffies - mddev->resync_mark) / HZ);
+ if (!dt) dt++;
+- db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
+- - mddev->resync_mark_cnt;
++
++ curr_mark_cnt = mddev->curr_mark_cnt;
++ recovery_active = atomic_read(&mddev->recovery_active);
++ resync_mark_cnt = mddev->resync_mark_cnt;
++
++ if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
++ db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
+
+ rt = max_sectors - resync; /* number of remaining sectors */
+- sector_div(rt, db/32+1);
++ rt = div64_u64(rt, db/32+1);
+ rt *= dt;
+ rt >>= 5;
+
+diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
+index f866a4baecb5..b9da2c6cc981 100644
+--- a/drivers/misc/vmw_vmci/vmci_context.c
++++ b/drivers/misc/vmw_vmci/vmci_context.c
+@@ -28,6 +28,9 @@
+ #include "vmci_driver.h"
+ #include "vmci_event.h"
+
++/* Use a wide upper bound for the maximum contexts. */
++#define VMCI_MAX_CONTEXTS 2000
++
+ /*
+ * List of current VMCI contexts. Contexts can be added by
+ * vmci_ctx_create() and removed via vmci_ctx_destroy().
+@@ -124,19 +127,22 @@ struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags,
+ /* Initialize host-specific VMCI context. */
+ init_waitqueue_head(&context->host_context.wait_queue);
+
+- context->queue_pair_array = vmci_handle_arr_create(0);
++ context->queue_pair_array =
++ vmci_handle_arr_create(0, VMCI_MAX_GUEST_QP_COUNT);
+ if (!context->queue_pair_array) {
+ error = -ENOMEM;
+ goto err_free_ctx;
+ }
+
+- context->doorbell_array = vmci_handle_arr_create(0);
++ context->doorbell_array =
++ vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
+ if (!context->doorbell_array) {
+ error = -ENOMEM;
+ goto err_free_qp_array;
+ }
+
+- context->pending_doorbell_array = vmci_handle_arr_create(0);
++ context->pending_doorbell_array =
++ vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
+ if (!context->pending_doorbell_array) {
+ error = -ENOMEM;
+ goto err_free_db_array;
+@@ -211,7 +217,7 @@ static int ctx_fire_notification(u32 context_id, u32 priv_flags)
+ * We create an array to hold the subscribers we find when
+ * scanning through all contexts.
+ */
+- subscriber_array = vmci_handle_arr_create(0);
++ subscriber_array = vmci_handle_arr_create(0, VMCI_MAX_CONTEXTS);
+ if (subscriber_array == NULL)
+ return VMCI_ERROR_NO_MEM;
+
+@@ -630,20 +636,26 @@ int vmci_ctx_add_notification(u32 context_id, u32 remote_cid)
+
+ spin_lock(&context->lock);
+
+- list_for_each_entry(n, &context->notifier_list, node) {
+- if (vmci_handle_is_equal(n->handle, notifier->handle)) {
+- exists = true;
+- break;
++ if (context->n_notifiers < VMCI_MAX_CONTEXTS) {
++ list_for_each_entry(n, &context->notifier_list, node) {
++ if (vmci_handle_is_equal(n->handle, notifier->handle)) {
++ exists = true;
++ break;
++ }
+ }
+- }
+
+- if (exists) {
+- kfree(notifier);
+- result = VMCI_ERROR_ALREADY_EXISTS;
++ if (exists) {
++ kfree(notifier);
++ result = VMCI_ERROR_ALREADY_EXISTS;
++ } else {
++ list_add_tail_rcu(¬ifier->node,
++ &context->notifier_list);
++ context->n_notifiers++;
++ result = VMCI_SUCCESS;
++ }
+ } else {
+- list_add_tail_rcu(¬ifier->node, &context->notifier_list);
+- context->n_notifiers++;
+- result = VMCI_SUCCESS;
++ kfree(notifier);
++ result = VMCI_ERROR_NO_MEM;
+ }
+
+ spin_unlock(&context->lock);
+@@ -728,8 +740,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
+ u32 *buf_size, void **pbuf)
+ {
+ struct dbell_cpt_state *dbells;
+- size_t n_doorbells;
+- int i;
++ u32 i, n_doorbells;
+
+ n_doorbells = vmci_handle_arr_get_size(context->doorbell_array);
+ if (n_doorbells > 0) {
+@@ -867,7 +878,8 @@ int vmci_ctx_rcv_notifications_get(u32 context_id,
+ spin_lock(&context->lock);
+
+ *db_handle_array = context->pending_doorbell_array;
+- context->pending_doorbell_array = vmci_handle_arr_create(0);
++ context->pending_doorbell_array =
++ vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
+ if (!context->pending_doorbell_array) {
+ context->pending_doorbell_array = *db_handle_array;
+ *db_handle_array = NULL;
+@@ -949,12 +961,11 @@ int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+- if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) {
+- vmci_handle_arr_append_entry(&context->doorbell_array, handle);
+- result = VMCI_SUCCESS;
+- } else {
++ if (!vmci_handle_arr_has_entry(context->doorbell_array, handle))
++ result = vmci_handle_arr_append_entry(&context->doorbell_array,
++ handle);
++ else
+ result = VMCI_ERROR_DUPLICATE_ENTRY;
+- }
+
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+@@ -1090,15 +1101,16 @@ int vmci_ctx_notify_dbell(u32 src_cid,
+ if (!vmci_handle_arr_has_entry(
+ dst_context->pending_doorbell_array,
+ handle)) {
+- vmci_handle_arr_append_entry(
++ result = vmci_handle_arr_append_entry(
+ &dst_context->pending_doorbell_array,
+ handle);
+-
+- ctx_signal_notify(dst_context);
+- wake_up(&dst_context->host_context.wait_queue);
+-
++ if (result == VMCI_SUCCESS) {
++ ctx_signal_notify(dst_context);
++ wake_up(&dst_context->host_context.wait_queue);
++ }
++ } else {
++ result = VMCI_SUCCESS;
+ }
+- result = VMCI_SUCCESS;
+ }
+ spin_unlock(&dst_context->lock);
+ }
+@@ -1125,13 +1137,11 @@ int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle)
+ if (context == NULL || vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+- if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) {
+- vmci_handle_arr_append_entry(&context->queue_pair_array,
+- handle);
+- result = VMCI_SUCCESS;
+- } else {
++ if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle))
++ result = vmci_handle_arr_append_entry(
++ &context->queue_pair_array, handle);
++ else
+ result = VMCI_ERROR_DUPLICATE_ENTRY;
+- }
+
+ return result;
+ }
+diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c
+index 344973a0fb0a..917e18a8af95 100644
+--- a/drivers/misc/vmw_vmci/vmci_handle_array.c
++++ b/drivers/misc/vmw_vmci/vmci_handle_array.c
+@@ -16,24 +16,29 @@
+ #include <linux/slab.h>
+ #include "vmci_handle_array.h"
+
+-static size_t handle_arr_calc_size(size_t capacity)
++static size_t handle_arr_calc_size(u32 capacity)
+ {
+- return sizeof(struct vmci_handle_arr) +
++ return VMCI_HANDLE_ARRAY_HEADER_SIZE +
+ capacity * sizeof(struct vmci_handle);
+ }
+
+-struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity)
++struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity)
+ {
+ struct vmci_handle_arr *array;
+
++ if (max_capacity == 0 || capacity > max_capacity)
++ return NULL;
++
+ if (capacity == 0)
+- capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE;
++ capacity = min((u32)VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY,
++ max_capacity);
+
+ array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC);
+ if (!array)
+ return NULL;
+
+ array->capacity = capacity;
++ array->max_capacity = max_capacity;
+ array->size = 0;
+
+ return array;
+@@ -44,27 +49,34 @@ void vmci_handle_arr_destroy(struct vmci_handle_arr *array)
+ kfree(array);
+ }
+
+-void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+- struct vmci_handle handle)
++int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
++ struct vmci_handle handle)
+ {
+ struct vmci_handle_arr *array = *array_ptr;
+
+ if (unlikely(array->size >= array->capacity)) {
+ /* reallocate. */
+ struct vmci_handle_arr *new_array;
+- size_t new_capacity = array->capacity * VMCI_ARR_CAP_MULT;
+- size_t new_size = handle_arr_calc_size(new_capacity);
++ u32 capacity_bump = min(array->max_capacity - array->capacity,
++ array->capacity);
++ size_t new_size = handle_arr_calc_size(array->capacity +
++ capacity_bump);
++
++ if (array->size >= array->max_capacity)
++ return VMCI_ERROR_NO_MEM;
+
+ new_array = krealloc(array, new_size, GFP_ATOMIC);
+ if (!new_array)
+- return;
++ return VMCI_ERROR_NO_MEM;
+
+- new_array->capacity = new_capacity;
++ new_array->capacity += capacity_bump;
+ *array_ptr = array = new_array;
+ }
+
+ array->entries[array->size] = handle;
+ array->size++;
++
++ return VMCI_SUCCESS;
+ }
+
+ /*
+@@ -74,7 +86,7 @@ struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
+ struct vmci_handle entry_handle)
+ {
+ struct vmci_handle handle = VMCI_INVALID_HANDLE;
+- size_t i;
++ u32 i;
+
+ for (i = 0; i < array->size; i++) {
+ if (vmci_handle_is_equal(array->entries[i], entry_handle)) {
+@@ -109,7 +121,7 @@ struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array)
+ * Handle at given index, VMCI_INVALID_HANDLE if invalid index.
+ */
+ struct vmci_handle
+-vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index)
++vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, u32 index)
+ {
+ if (unlikely(index >= array->size))
+ return VMCI_INVALID_HANDLE;
+@@ -120,7 +132,7 @@ vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index)
+ bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
+ struct vmci_handle entry_handle)
+ {
+- size_t i;
++ u32 i;
+
+ for (i = 0; i < array->size; i++)
+ if (vmci_handle_is_equal(array->entries[i], entry_handle))
+diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h
+index b5f3a7f98cf1..0fc58597820e 100644
+--- a/drivers/misc/vmw_vmci/vmci_handle_array.h
++++ b/drivers/misc/vmw_vmci/vmci_handle_array.h
+@@ -17,32 +17,41 @@
+ #define _VMCI_HANDLE_ARRAY_H_
+
+ #include <linux/vmw_vmci_defs.h>
++#include <linux/limits.h>
+ #include <linux/types.h>
+
+-#define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4
+-#define VMCI_ARR_CAP_MULT 2 /* Array capacity multiplier */
+-
+ struct vmci_handle_arr {
+- size_t capacity;
+- size_t size;
++ u32 capacity;
++ u32 max_capacity;
++ u32 size;
++ u32 pad;
+ struct vmci_handle entries[];
+ };
+
+-struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity);
++#define VMCI_HANDLE_ARRAY_HEADER_SIZE \
++ offsetof(struct vmci_handle_arr, entries)
++/* Select a default capacity that results in a 64 byte sized array */
++#define VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY 6
++/* Make sure that the max array size can be expressed by a u32 */
++#define VMCI_HANDLE_ARRAY_MAX_CAPACITY \
++ ((U32_MAX - VMCI_HANDLE_ARRAY_HEADER_SIZE - 1) / \
++ sizeof(struct vmci_handle))
++
++struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity);
+ void vmci_handle_arr_destroy(struct vmci_handle_arr *array);
+-void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+- struct vmci_handle handle);
++int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
++ struct vmci_handle handle);
+ struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
+ struct vmci_handle
+ entry_handle);
+ struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array);
+ struct vmci_handle
+-vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index);
++vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, u32 index);
+ bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
+ struct vmci_handle entry_handle);
+ struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array);
+
+-static inline size_t vmci_handle_arr_get_size(
++static inline u32 vmci_handle_arr_get_size(
+ const struct vmci_handle_arr *array)
+ {
+ return array->size;
+diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
+index 148cae5871a6..249d2db7d600 100644
+--- a/drivers/net/can/spi/Kconfig
++++ b/drivers/net/can/spi/Kconfig
+@@ -2,9 +2,10 @@ menu "CAN SPI interfaces"
+ depends on SPI
+
+ config CAN_MCP251X
+- tristate "Microchip MCP251x SPI CAN controllers"
++ tristate "Microchip MCP251x and MCP25625 SPI CAN controllers"
+ depends on HAS_DMA
+ ---help---
+- Driver for the Microchip MCP251x SPI CAN controllers.
++ Driver for the Microchip MCP251x and MCP25625 SPI CAN
++ controllers.
+
+ endmenu
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index f3f05fea8e1f..d8c448beab24 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -1,5 +1,5 @@
+ /*
+- * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
++ * CAN bus driver for Microchip 251x/25625 CAN Controller with SPI Interface
+ *
+ * MCP2510 support and bug fixes by Christian Pellegrin
+ * <chripell@evolware.org>
+@@ -41,7 +41,7 @@
+ * static struct spi_board_info spi_board_info[] = {
+ * {
+ * .modalias = "mcp2510",
+- * // or "mcp2515" depending on your controller
++ * // "mcp2515" or "mcp25625" depending on your controller
+ * .platform_data = &mcp251x_info,
+ * .irq = IRQ_EINT13,
+ * .max_speed_hz = 2*1000*1000,
+@@ -238,6 +238,7 @@ static const struct can_bittiming_const mcp251x_bittiming_const = {
+ enum mcp251x_model {
+ CAN_MCP251X_MCP2510 = 0x2510,
+ CAN_MCP251X_MCP2515 = 0x2515,
++ CAN_MCP251X_MCP25625 = 0x25625,
+ };
+
+ struct mcp251x_priv {
+@@ -280,7 +281,6 @@ static inline int mcp251x_is_##_model(struct spi_device *spi) \
+ }
+
+ MCP251X_IS(2510);
+-MCP251X_IS(2515);
+
+ static void mcp251x_clean(struct net_device *net)
+ {
+@@ -640,7 +640,7 @@ static int mcp251x_hw_reset(struct spi_device *spi)
+
+ /* Wait for oscillator startup timer after reset */
+ mdelay(MCP251X_OST_DELAY_MS);
+-
++
+ reg = mcp251x_read_reg(spi, CANSTAT);
+ if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
+ return -ENODEV;
+@@ -821,9 +821,8 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
+ /* receive buffer 0 */
+ if (intf & CANINTF_RX0IF) {
+ mcp251x_hw_rx(spi, 0);
+- /*
+- * Free one buffer ASAP
+- * (The MCP2515 does this automatically.)
++ /* Free one buffer ASAP
++ * (The MCP2515/25625 does this automatically.)
+ */
+ if (mcp251x_is_2510(spi))
+ mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00);
+@@ -832,7 +831,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
+ /* receive buffer 1 */
+ if (intf & CANINTF_RX1IF) {
+ mcp251x_hw_rx(spi, 1);
+- /* the MCP2515 does this automatically */
++ /* The MCP2515/25625 does this automatically. */
+ if (mcp251x_is_2510(spi))
+ clear_intf |= CANINTF_RX1IF;
+ }
+@@ -1007,6 +1006,10 @@ static const struct of_device_id mcp251x_of_match[] = {
+ .compatible = "microchip,mcp2515",
+ .data = (void *)CAN_MCP251X_MCP2515,
+ },
++ {
++ .compatible = "microchip,mcp25625",
++ .data = (void *)CAN_MCP251X_MCP25625,
++ },
+ { }
+ };
+ MODULE_DEVICE_TABLE(of, mcp251x_of_match);
+@@ -1020,6 +1023,10 @@ static const struct spi_device_id mcp251x_id_table[] = {
+ .name = "mcp2515",
+ .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP2515,
+ },
++ {
++ .name = "mcp25625",
++ .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP25625,
++ },
+ { }
+ };
+ MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
+@@ -1260,5 +1267,5 @@ module_spi_driver(mcp251x_can_driver);
+
+ MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
+ "Christian Pellegrin <chripell@evolware.org>");
+-MODULE_DESCRIPTION("Microchip 251x CAN driver");
++MODULE_DESCRIPTION("Microchip 251x/25625 CAN driver");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+index 8aecd8ef6542..15a0850e6bde 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+@@ -1562,7 +1562,8 @@ static int bnx2x_get_module_info(struct net_device *dev,
+ }
+
+ if (!sff8472_comp ||
+- (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) {
++ (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ) ||
++ !(diag_type & SFP_EEPROM_DDM_IMPLEMENTED)) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+index b7d251108c19..7115f5025664 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+@@ -62,6 +62,7 @@
+ #define SFP_EEPROM_DIAG_TYPE_ADDR 0x5c
+ #define SFP_EEPROM_DIAG_TYPE_SIZE 1
+ #define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2)
++#define SFP_EEPROM_DDM_IMPLEMENTED (1<<6)
+ #define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
+ #define SFP_EEPROM_SFF_8472_COMP_SIZE 1
+
+diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
+index 345818193de9..56db37d92937 100644
+--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
++++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
+@@ -898,7 +898,7 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
+ u64 *data)
+ {
+ struct be_adapter *adapter = netdev_priv(netdev);
+- int status;
++ int status, cnt;
+ u8 link_status = 0;
+
+ if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
+@@ -909,6 +909,9 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
+
+ memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
+
++ /* check link status before offline tests */
++ link_status = netif_carrier_ok(netdev);
++
+ if (test->flags & ETH_TEST_FL_OFFLINE) {
+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
+ test->flags |= ETH_TEST_FL_FAILED;
+@@ -929,13 +932,26 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
+ test->flags |= ETH_TEST_FL_FAILED;
+ }
+
+- status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
+- if (status) {
+- test->flags |= ETH_TEST_FL_FAILED;
+- data[4] = -1;
+- } else if (!link_status) {
++ /* link status was down prior to test */
++ if (!link_status) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ data[4] = 1;
++ return;
++ }
++
++ for (cnt = 10; cnt; cnt--) {
++ status = be_cmd_link_status_query(adapter, NULL, &link_status,
++ 0);
++ if (status) {
++ test->flags |= ETH_TEST_FL_FAILED;
++ data[4] = -1;
++ break;
++ }
++
++ if (link_status)
++ break;
++
++ msleep_interruptible(500);
+ }
+ }
+
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 8bbedfc9c48f..a0f97c5ab6ef 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -4212,7 +4212,7 @@ void e1000e_up(struct e1000_adapter *adapter)
+ e1000_configure_msix(adapter);
+ e1000_irq_enable(adapter);
+
+- netif_start_queue(adapter->netdev);
++ /* Tx queue started by watchdog timer when link is up */
+
+ e1000e_trigger_lsc(adapter);
+ }
+@@ -4588,6 +4588,7 @@ int e1000e_open(struct net_device *netdev)
+ pm_runtime_get_sync(&pdev->dev);
+
+ netif_carrier_off(netdev);
++ netif_stop_queue(netdev);
+
+ /* allocate transmit descriptors */
+ err = e1000e_setup_tx_resources(adapter->tx_ring);
+@@ -4648,7 +4649,6 @@ int e1000e_open(struct net_device *netdev)
+ e1000_irq_enable(adapter);
+
+ adapter->tx_hang_recheck = false;
+- netif_start_queue(netdev);
+
+ hw->mac.get_link_status = true;
+ pm_runtime_put(&pdev->dev);
+@@ -5271,6 +5271,7 @@ static void e1000_watchdog_task(struct work_struct *work)
+ if (phy->ops.cfg_on_link_up)
+ phy->ops.cfg_on_link_up(hw);
+
++ netif_wake_queue(netdev);
+ netif_carrier_on(netdev);
+
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+@@ -5284,6 +5285,7 @@ static void e1000_watchdog_task(struct work_struct *work)
+ /* Link status message must follow this format */
+ pr_info("%s NIC Link is Down\n", adapter->netdev->name);
+ netif_carrier_off(netdev);
++ netif_stop_queue(netdev);
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+@@ -5291,13 +5293,8 @@ static void e1000_watchdog_task(struct work_struct *work)
+ /* 8000ES2LAN requires a Rx packet buffer work-around
+ * on link down event; reset the controller to flush
+ * the Rx packet buffer.
+- *
+- * If the link is lost the controller stops DMA, but
+- * if there is queued Tx work it cannot be done. So
+- * reset the controller to flush the Tx packet buffers.
+ */
+- if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
+- e1000_desc_unused(tx_ring) + 1 < tx_ring->count)
++ if (adapter->flags & FLAG_RX_NEEDS_RESTART)
+ adapter->flags |= FLAG_RESTART_NOW;
+ else
+ pm_schedule_suspend(netdev->dev.parent,
+@@ -5320,6 +5317,14 @@ link_up:
+ adapter->gotc_old = adapter->stats.gotc;
+ spin_unlock(&adapter->stats64_lock);
+
++ /* If the link is lost the controller stops DMA, but
++ * if there is queued Tx work it cannot be done. So
++ * reset the controller to flush the Tx packet buffers.
++ */
++ if (!netif_carrier_ok(netdev) &&
++ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
++ adapter->flags |= FLAG_RESTART_NOW;
++
+ /* If reset is necessary, do it outside of interrupt context. */
+ if (adapter->flags & FLAG_RESTART_NOW) {
+ schedule_work(&adapter->reset_task);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
+index a01e6c0d0cd1..b2a745b579fd 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
+@@ -935,7 +935,7 @@ static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port,
+ MLXSW_REG_ZERO(spaft, payload);
+ mlxsw_reg_spaft_local_port_set(payload, local_port);
+ mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged);
+- mlxsw_reg_spaft_allow_prio_tagged_set(payload, true);
++ mlxsw_reg_spaft_allow_prio_tagged_set(payload, allow_untagged);
+ mlxsw_reg_spaft_allow_tagged_set(payload, true);
+ }
+
+diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
+index 6f85276376e8..ae9b983e8e5c 100644
+--- a/drivers/net/ethernet/sis/sis900.c
++++ b/drivers/net/ethernet/sis/sis900.c
+@@ -1058,7 +1058,7 @@ sis900_open(struct net_device *net_dev)
+ sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+- sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
++ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
+ sw32(cr, RxENA | sr32(cr));
+ sw32(ier, IE);
+
+@@ -1581,7 +1581,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
+ sw32(txdp, sis_priv->tx_ring_dma);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+- sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
++ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
+ }
+
+ /**
+@@ -1621,7 +1621,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+ spin_unlock_irqrestore(&sis_priv->lock, flags);
+ return NETDEV_TX_OK;
+ }
+- sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
++ sis_priv->tx_ring[entry].cmdsts = (OWN | INTR | skb->len);
+ sw32(cr, TxENA | sr32(cr));
+
+ sis_priv->cur_tx ++;
+@@ -1677,7 +1677,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
+ do {
+ status = sr32(isr);
+
+- if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
++ if ((status & (HIBERR|TxURN|TxERR|TxIDLE|TxDESC|RxORN|RxERR|RxOK)) == 0)
+ /* nothing intresting happened */
+ break;
+ handled = 1;
+@@ -1687,7 +1687,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
+ /* Rx interrupt */
+ sis900_rx(net_dev);
+
+- if (status & (TxURN | TxERR | TxIDLE))
++ if (status & (TxURN | TxERR | TxIDLE | TxDESC))
+ /* Tx interrupt */
+ sis900_finish_xmit(net_dev);
+
+@@ -1899,8 +1899,8 @@ static void sis900_finish_xmit (struct net_device *net_dev)
+
+ if (tx_status & OWN) {
+ /* The packet is not transmitted yet (owned by hardware) !
+- * Note: the interrupt is generated only when Tx Machine
+- * is idle, so this is an almost impossible case */
++ * Note: this is an almost impossible condition
++ * in case of TxDESC ('descriptor interrupt') */
+ break;
+ }
+
+@@ -2476,7 +2476,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
+ sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+- sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
++ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
+ sw32(cr, RxENA | sr32(cr));
+ sw32(ier, IE);
+
+diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
+index f60f7660b451..92f52a73ec0e 100644
+--- a/drivers/net/ppp/ppp_mppe.c
++++ b/drivers/net/ppp/ppp_mppe.c
+@@ -63,6 +63,7 @@ MODULE_AUTHOR("Frank Cusack <fcusack@fcusack.com>");
+ MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support");
+ MODULE_LICENSE("Dual BSD/GPL");
+ MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
++MODULE_SOFTDEP("pre: arc4");
+ MODULE_VERSION("1.0.2");
+
+ static unsigned int
+diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
+index 99ab20334d21..37c3cbe0ff2b 100644
+--- a/drivers/net/wireless/ath/carl9170/usb.c
++++ b/drivers/net/wireless/ath/carl9170/usb.c
+@@ -128,6 +128,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
+ };
+ MODULE_DEVICE_TABLE(usb, carl9170_usb_ids);
+
++static struct usb_driver carl9170_driver;
++
+ static void carl9170_usb_submit_data_urb(struct ar9170 *ar)
+ {
+ struct urb *urb;
+@@ -966,32 +968,28 @@ err_out:
+
+ static void carl9170_usb_firmware_failed(struct ar9170 *ar)
+ {
+- struct device *parent = ar->udev->dev.parent;
+- struct usb_device *udev;
+-
+- /*
+- * Store a copy of the usb_device pointer locally.
+- * This is because device_release_driver initiates
+- * carl9170_usb_disconnect, which in turn frees our
+- * driver context (ar).
++ /* Store a copies of the usb_interface and usb_device pointer locally.
++ * This is because release_driver initiates carl9170_usb_disconnect,
++ * which in turn frees our driver context (ar).
+ */
+- udev = ar->udev;
++ struct usb_interface *intf = ar->intf;
++ struct usb_device *udev = ar->udev;
+
+ complete(&ar->fw_load_wait);
++ /* at this point 'ar' could be already freed. Don't use it anymore */
++ ar = NULL;
+
+ /* unbind anything failed */
+- if (parent)
+- device_lock(parent);
+-
+- device_release_driver(&udev->dev);
+- if (parent)
+- device_unlock(parent);
++ usb_lock_device(udev);
++ usb_driver_release_interface(&carl9170_driver, intf);
++ usb_unlock_device(udev);
+
+- usb_put_dev(udev);
++ usb_put_intf(intf);
+ }
+
+ static void carl9170_usb_firmware_finish(struct ar9170 *ar)
+ {
++ struct usb_interface *intf = ar->intf;
+ int err;
+
+ err = carl9170_parse_firmware(ar);
+@@ -1009,7 +1007,7 @@ static void carl9170_usb_firmware_finish(struct ar9170 *ar)
+ goto err_unrx;
+
+ complete(&ar->fw_load_wait);
+- usb_put_dev(ar->udev);
++ usb_put_intf(intf);
+ return;
+
+ err_unrx:
+@@ -1052,7 +1050,6 @@ static int carl9170_usb_probe(struct usb_interface *intf,
+ return PTR_ERR(ar);
+
+ udev = interface_to_usbdev(intf);
+- usb_get_dev(udev);
+ ar->udev = udev;
+ ar->intf = intf;
+ ar->features = id->driver_info;
+@@ -1094,15 +1091,14 @@ static int carl9170_usb_probe(struct usb_interface *intf,
+ atomic_set(&ar->rx_anch_urbs, 0);
+ atomic_set(&ar->rx_pool_urbs, 0);
+
+- usb_get_dev(ar->udev);
++ usb_get_intf(intf);
+
+ carl9170_set_state(ar, CARL9170_STOPPED);
+
+ err = request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
+ &ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2);
+ if (err) {
+- usb_put_dev(udev);
+- usb_put_dev(udev);
++ usb_put_intf(intf);
+ carl9170_free(ar);
+ }
+ return err;
+@@ -1131,7 +1127,6 @@ static void carl9170_usb_disconnect(struct usb_interface *intf)
+
+ carl9170_release_firmware(ar);
+ carl9170_free(ar);
+- usb_put_dev(udev);
+ }
+
+ #ifdef CONFIG_PM
+diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c
+index 043bd1c23c19..4a197a32d78c 100644
+--- a/drivers/net/wireless/intersil/p54/p54usb.c
++++ b/drivers/net/wireless/intersil/p54/p54usb.c
+@@ -33,6 +33,8 @@ MODULE_ALIAS("prism54usb");
+ MODULE_FIRMWARE("isl3886usb");
+ MODULE_FIRMWARE("isl3887usb");
+
++static struct usb_driver p54u_driver;
++
+ /*
+ * Note:
+ *
+@@ -921,9 +923,9 @@ static void p54u_load_firmware_cb(const struct firmware *firmware,
+ {
+ struct p54u_priv *priv = context;
+ struct usb_device *udev = priv->udev;
++ struct usb_interface *intf = priv->intf;
+ int err;
+
+- complete(&priv->fw_wait_load);
+ if (firmware) {
+ priv->fw = firmware;
+ err = p54u_start_ops(priv);
+@@ -932,26 +934,22 @@ static void p54u_load_firmware_cb(const struct firmware *firmware,
+ dev_err(&udev->dev, "Firmware not found.\n");
+ }
+
+- if (err) {
+- struct device *parent = priv->udev->dev.parent;
+-
+- dev_err(&udev->dev, "failed to initialize device (%d)\n", err);
+-
+- if (parent)
+- device_lock(parent);
++ complete(&priv->fw_wait_load);
++ /*
++ * At this point p54u_disconnect may have already freed
++ * the "priv" context. Do not use it anymore!
++ */
++ priv = NULL;
+
+- device_release_driver(&udev->dev);
+- /*
+- * At this point p54u_disconnect has already freed
+- * the "priv" context. Do not use it anymore!
+- */
+- priv = NULL;
++ if (err) {
++ dev_err(&intf->dev, "failed to initialize device (%d)\n", err);
+
+- if (parent)
+- device_unlock(parent);
++ usb_lock_device(udev);
++ usb_driver_release_interface(&p54u_driver, intf);
++ usb_unlock_device(udev);
+ }
+
+- usb_put_dev(udev);
++ usb_put_intf(intf);
+ }
+
+ static int p54u_load_firmware(struct ieee80211_hw *dev,
+@@ -972,14 +970,14 @@ static int p54u_load_firmware(struct ieee80211_hw *dev,
+ dev_info(&priv->udev->dev, "Loading firmware file %s\n",
+ p54u_fwlist[i].fw);
+
+- usb_get_dev(udev);
++ usb_get_intf(intf);
+ err = request_firmware_nowait(THIS_MODULE, 1, p54u_fwlist[i].fw,
+ device, GFP_KERNEL, priv,
+ p54u_load_firmware_cb);
+ if (err) {
+ dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
+ "(%d)!\n", p54u_fwlist[i].fw, err);
+- usb_put_dev(udev);
++ usb_put_intf(intf);
+ }
+
+ return err;
+@@ -1011,8 +1009,6 @@ static int p54u_probe(struct usb_interface *intf,
+ skb_queue_head_init(&priv->rx_queue);
+ init_usb_anchor(&priv->submitted);
+
+- usb_get_dev(udev);
+-
+ /* really lazy and simple way of figuring out if we're a 3887 */
+ /* TODO: should just stick the identification in the device table */
+ i = intf->altsetting->desc.bNumEndpoints;
+@@ -1053,10 +1049,8 @@ static int p54u_probe(struct usb_interface *intf,
+ priv->upload_fw = p54u_upload_firmware_net2280;
+ }
+ err = p54u_load_firmware(dev, intf);
+- if (err) {
+- usb_put_dev(udev);
++ if (err)
+ p54_free_common(dev);
+- }
+ return err;
+ }
+
+@@ -1072,7 +1066,6 @@ static void p54u_disconnect(struct usb_interface *intf)
+ wait_for_completion(&priv->fw_wait_load);
+ p54_unregister_common(dev);
+
+- usb_put_dev(interface_to_usbdev(intf));
+ release_firmware(priv->fw);
+ p54_free_common(dev);
+ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
+index 4b1894b4757f..395d6ece2cac 100644
+--- a/drivers/net/wireless/marvell/mwifiex/fw.h
++++ b/drivers/net/wireless/marvell/mwifiex/fw.h
+@@ -1719,9 +1719,10 @@ struct mwifiex_ie_types_wmm_queue_status {
+ struct ieee_types_vendor_header {
+ u8 element_id;
+ u8 len;
+- u8 oui[4]; /* 0~2: oui, 3: oui_type */
+- u8 oui_subtype;
+- u8 version;
++ struct {
++ u8 oui[3];
++ u8 oui_type;
++ } __packed oui;
+ } __packed;
+
+ struct ieee_types_wmm_parameter {
+@@ -1735,6 +1736,9 @@ struct ieee_types_wmm_parameter {
+ * Version [1]
+ */
+ struct ieee_types_vendor_header vend_hdr;
++ u8 oui_subtype;
++ u8 version;
++
+ u8 qos_info_bitmap;
+ u8 reserved;
+ struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS];
+@@ -1752,6 +1756,8 @@ struct ieee_types_wmm_info {
+ * Version [1]
+ */
+ struct ieee_types_vendor_header vend_hdr;
++ u8 oui_subtype;
++ u8 version;
+
+ u8 qos_info_bitmap;
+ } __packed;
+diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
+index c488c3068abc..0f977dc556ca 100644
+--- a/drivers/net/wireless/marvell/mwifiex/ie.c
++++ b/drivers/net/wireless/marvell/mwifiex/ie.c
+@@ -328,6 +328,8 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
+ struct ieee80211_vendor_ie *vendorhdr;
+ u16 gen_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0;
+ int left_len, parsed_len = 0;
++ unsigned int token_len;
++ int err = 0;
+
+ if (!info->tail || !info->tail_len)
+ return 0;
+@@ -343,6 +345,12 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
+ */
+ while (left_len > sizeof(struct ieee_types_header)) {
+ hdr = (void *)(info->tail + parsed_len);
++ token_len = hdr->len + sizeof(struct ieee_types_header);
++ if (token_len > left_len) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ switch (hdr->element_id) {
+ case WLAN_EID_SSID:
+ case WLAN_EID_SUPP_RATES:
+@@ -356,13 +364,16 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
+ case WLAN_EID_VENDOR_SPECIFIC:
+ break;
+ default:
+- memcpy(gen_ie->ie_buffer + ie_len, hdr,
+- hdr->len + sizeof(struct ieee_types_header));
+- ie_len += hdr->len + sizeof(struct ieee_types_header);
++ if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
++ err = -EINVAL;
++ goto out;
++ }
++ memcpy(gen_ie->ie_buffer + ie_len, hdr, token_len);
++ ie_len += token_len;
+ break;
+ }
+- left_len -= hdr->len + sizeof(struct ieee_types_header);
+- parsed_len += hdr->len + sizeof(struct ieee_types_header);
++ left_len -= token_len;
++ parsed_len += token_len;
+ }
+
+ /* parse only WPA vendor IE from tail, WMM IE is configured by
+@@ -372,15 +383,17 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ info->tail, info->tail_len);
+ if (vendorhdr) {
+- memcpy(gen_ie->ie_buffer + ie_len, vendorhdr,
+- vendorhdr->len + sizeof(struct ieee_types_header));
+- ie_len += vendorhdr->len + sizeof(struct ieee_types_header);
++ token_len = vendorhdr->len + sizeof(struct ieee_types_header);
++ if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
++ err = -EINVAL;
++ goto out;
++ }
++ memcpy(gen_ie->ie_buffer + ie_len, vendorhdr, token_len);
++ ie_len += token_len;
+ }
+
+- if (!ie_len) {
+- kfree(gen_ie);
+- return 0;
+- }
++ if (!ie_len)
++ goto out;
+
+ gen_ie->ie_index = cpu_to_le16(gen_idx);
+ gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON |
+@@ -390,13 +403,15 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
+
+ if (mwifiex_update_uap_custom_ie(priv, gen_ie, &gen_idx, NULL, NULL,
+ NULL, NULL)) {
+- kfree(gen_ie);
+- return -1;
++ err = -EINVAL;
++ goto out;
+ }
+
+ priv->gen_idx = gen_idx;
++
++ out:
+ kfree(gen_ie);
+- return 0;
++ return err;
+ }
+
+ /* This function parses different IEs-head & tail IEs, beacon IEs,
+diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
+index 78d59a67f7e1..97847eee2dfb 100644
+--- a/drivers/net/wireless/marvell/mwifiex/scan.c
++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
+@@ -1236,6 +1236,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
+ }
+ switch (element_id) {
+ case WLAN_EID_SSID:
++ if (element_len > IEEE80211_MAX_SSID_LEN)
++ return -EINVAL;
+ bss_entry->ssid.ssid_len = element_len;
+ memcpy(bss_entry->ssid.ssid, (current_ptr + 2),
+ element_len);
+@@ -1245,6 +1247,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
+ break;
+
+ case WLAN_EID_SUPP_RATES:
++ if (element_len > MWIFIEX_SUPPORTED_RATES)
++ return -EINVAL;
+ memcpy(bss_entry->data_rates, current_ptr + 2,
+ element_len);
+ memcpy(bss_entry->supported_rates, current_ptr + 2,
+@@ -1254,6 +1258,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
+ break;
+
+ case WLAN_EID_FH_PARAMS:
++ if (element_len + 2 < sizeof(*fh_param_set))
++ return -EINVAL;
+ fh_param_set =
+ (struct ieee_types_fh_param_set *) current_ptr;
+ memcpy(&bss_entry->phy_param_set.fh_param_set,
+@@ -1262,6 +1268,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
+ break;
+
+ case WLAN_EID_DS_PARAMS:
++ if (element_len + 2 < sizeof(*ds_param_set))
++ return -EINVAL;
+ ds_param_set =
+ (struct ieee_types_ds_param_set *) current_ptr;
+
+@@ -1273,6 +1281,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
+ break;
+
+ case WLAN_EID_CF_PARAMS:
++ if (element_len + 2 < sizeof(*cf_param_set))
++ return -EINVAL;
+ cf_param_set =
+ (struct ieee_types_cf_param_set *) current_ptr;
+ memcpy(&bss_entry->ss_param_set.cf_param_set,
+@@ -1281,6 +1291,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
+ break;
+
+ case WLAN_EID_IBSS_PARAMS:
++ if (element_len + 2 < sizeof(*ibss_param_set))
++ return -EINVAL;
+ ibss_param_set =
+ (struct ieee_types_ibss_param_set *)
+ current_ptr;
+@@ -1290,10 +1302,14 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
+ break;
+
+ case WLAN_EID_ERP_INFO:
++ if (!element_len)
++ return -EINVAL;
+ bss_entry->erp_flags = *(current_ptr + 2);
+ break;
+
+ case WLAN_EID_PWR_CONSTRAINT:
++ if (!element_len)
++ return -EINVAL;
+ bss_entry->local_constraint = *(current_ptr + 2);
+ bss_entry->sensed_11h = true;
+ break;
+@@ -1336,15 +1352,22 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
+ vendor_ie = (struct ieee_types_vendor_specific *)
+ current_ptr;
+
+- if (!memcmp
+- (vendor_ie->vend_hdr.oui, wpa_oui,
+- sizeof(wpa_oui))) {
++ /* 802.11 requires at least 3-byte OUI. */
++ if (element_len < sizeof(vendor_ie->vend_hdr.oui.oui))
++ return -EINVAL;
++
++ /* Not long enough for a match? Skip it. */
++ if (element_len < sizeof(wpa_oui))
++ break;
++
++ if (!memcmp(&vendor_ie->vend_hdr.oui, wpa_oui,
++ sizeof(wpa_oui))) {
+ bss_entry->bcn_wpa_ie =
+ (struct ieee_types_vendor_specific *)
+ current_ptr;
+ bss_entry->wpa_offset = (u16)
+ (current_ptr - bss_entry->beacon_buf);
+- } else if (!memcmp(vendor_ie->vend_hdr.oui, wmm_oui,
++ } else if (!memcmp(&vendor_ie->vend_hdr.oui, wmm_oui,
+ sizeof(wmm_oui))) {
+ if (total_ie_len ==
+ sizeof(struct ieee_types_wmm_parameter) ||
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+index 1532ac9cee0b..7f9645703d96 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
++++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+@@ -1374,7 +1374,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
+ /* Test to see if it is a WPA IE, if not, then it is a
+ * gen IE
+ */
+- if (!memcmp(pvendor_ie->oui, wpa_oui,
++ if (!memcmp(&pvendor_ie->oui, wpa_oui,
+ sizeof(wpa_oui))) {
+ find_wpa_ie = 1;
+ break;
+@@ -1383,7 +1383,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
+ /* Test to see if it is a WPS IE, if so, enable
+ * wps session flag
+ */
+- if (!memcmp(pvendor_ie->oui, wps_oui,
++ if (!memcmp(&pvendor_ie->oui, wps_oui,
+ sizeof(wps_oui))) {
+ priv->wps.session_enable = true;
+ mwifiex_dbg(priv->adapter, MSG,
+diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
+index dea2fe671dfe..9843560e784f 100644
+--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
++++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
+@@ -240,7 +240,7 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: WMM Parameter IE: version=%d,\t"
+ "qos_info Parameter Set Count=%d, Reserved=%#x\n",
+- wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
++ wmm_ie->version, wmm_ie->qos_info_bitmap &
+ IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
+ wmm_ie->reserved);
+
+diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
+index 35286907c636..d0090c5c88e7 100644
+--- a/drivers/s390/cio/qdio_setup.c
++++ b/drivers/s390/cio/qdio_setup.c
+@@ -150,6 +150,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
+ return -ENOMEM;
+ }
+ irq_ptr_qs[i] = q;
++ INIT_LIST_HEAD(&q->entry);
+ }
+ return 0;
+ }
+@@ -178,6 +179,7 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
+ q->mask = 1 << (31 - i);
+ q->nr = i;
+ q->handler = handler;
++ INIT_LIST_HEAD(&q->entry);
+ }
+
+ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
+diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
+index 30e9fbbff051..debe69adfc70 100644
+--- a/drivers/s390/cio/qdio_thinint.c
++++ b/drivers/s390/cio/qdio_thinint.c
+@@ -80,7 +80,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
+ mutex_lock(&tiq_list_lock);
+ list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
+ mutex_unlock(&tiq_list_lock);
+- xchg(irq_ptr->dsci, 1 << 7);
+ }
+
+ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
+@@ -88,14 +87,14 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
+ struct qdio_q *q;
+
+ q = irq_ptr->input_qs[0];
+- /* if establish triggered an error */
+- if (!q || !q->entry.prev || !q->entry.next)
++ if (!q)
+ return;
+
+ mutex_lock(&tiq_list_lock);
+ list_del_rcu(&q->entry);
+ mutex_unlock(&tiq_list_lock);
+ synchronize_rcu();
++ INIT_LIST_HEAD(&q->entry);
+ }
+
+ static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
+diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
+index 42945de31fe2..d23aa5d8e62a 100644
+--- a/drivers/staging/comedi/drivers/amplc_pci230.c
++++ b/drivers/staging/comedi/drivers/amplc_pci230.c
+@@ -2337,7 +2337,8 @@ static irqreturn_t pci230_interrupt(int irq, void *d)
+ devpriv->intr_running = false;
+ spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
+
+- comedi_handle_events(dev, s_ao);
++ if (s_ao)
++ comedi_handle_events(dev, s_ao);
+ comedi_handle_events(dev, s_ai);
+
+ return IRQ_HANDLED;
+diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
+index d5295bbdd28c..37133d54dda1 100644
+--- a/drivers/staging/comedi/drivers/dt282x.c
++++ b/drivers/staging/comedi/drivers/dt282x.c
+@@ -566,7 +566,8 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
+ }
+ #endif
+ comedi_handle_events(dev, s);
+- comedi_handle_events(dev, s_ao);
++ if (s_ao)
++ comedi_handle_events(dev, s_ao);
+
+ return IRQ_RETVAL(handled);
+ }
+diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
+index 50a5b0c2cc7b..7ab95efcf1dc 100644
+--- a/drivers/staging/iio/cdc/ad7150.c
++++ b/drivers/staging/iio/cdc/ad7150.c
+@@ -6,6 +6,7 @@
+ * Licensed under the GPL-2 or later.
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/interrupt.h>
+ #include <linux/device.h>
+ #include <linux/kernel.h>
+@@ -129,7 +130,7 @@ static int ad7150_read_event_config(struct iio_dev *indio_dev,
+ {
+ int ret;
+ u8 threshtype;
+- bool adaptive;
++ bool thrfixed;
+ struct ad7150_chip_info *chip = iio_priv(indio_dev);
+
+ ret = i2c_smbus_read_byte_data(chip->client, AD7150_CFG);
+@@ -137,21 +138,23 @@ static int ad7150_read_event_config(struct iio_dev *indio_dev,
+ return ret;
+
+ threshtype = (ret >> 5) & 0x03;
+- adaptive = !!(ret & 0x80);
++
++ /*check if threshold mode is fixed or adaptive*/
++ thrfixed = FIELD_GET(AD7150_CFG_FIX, ret);
+
+ switch (type) {
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ if (dir == IIO_EV_DIR_RISING)
+- return adaptive && (threshtype == 0x1);
+- return adaptive && (threshtype == 0x0);
++ return !thrfixed && (threshtype == 0x1);
++ return !thrfixed && (threshtype == 0x0);
+ case IIO_EV_TYPE_THRESH_ADAPTIVE:
+ if (dir == IIO_EV_DIR_RISING)
+- return adaptive && (threshtype == 0x3);
+- return adaptive && (threshtype == 0x2);
++ return !thrfixed && (threshtype == 0x3);
++ return !thrfixed && (threshtype == 0x2);
+ case IIO_EV_TYPE_THRESH:
+ if (dir == IIO_EV_DIR_RISING)
+- return !adaptive && (threshtype == 0x1);
+- return !adaptive && (threshtype == 0x0);
++ return thrfixed && (threshtype == 0x1);
++ return thrfixed && (threshtype == 0x0);
+ default:
+ break;
+ }
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 5b54439a8a9b..84474f06dbcf 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1814,8 +1814,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+
+ status = serial_port_in(port, UART_LSR);
+
+- if (status & (UART_LSR_DR | UART_LSR_BI) &&
+- iir & UART_IIR_RDI) {
++ if (status & (UART_LSR_DR | UART_LSR_BI)) {
+ if (!up->dma || handle_rx_dma(up, iir))
+ status = serial8250_rx_chars(up, status);
+ }
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
+index 5d1bd13a56c1..d5fbc2352029 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -198,11 +198,12 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
+ out = dev->port_usb->out_ep;
+ else
+ out = NULL;
+- spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (!out)
++ {
++ spin_unlock_irqrestore(&dev->lock, flags);
+ return -ENOTCONN;
+-
++ }
+
+ /* Padding up to RX_EXTRA handles minor disagreements with host.
+ * Normally we use the USB "terminate on short read" convention;
+@@ -223,6 +224,7 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
+
+ if (dev->port_usb->is_fixed)
+ size = max_t(size_t, size, dev->port_usb->fixed_out_len);
++ spin_unlock_irqrestore(&dev->lock, flags);
+
+ skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
+ if (skb == NULL) {
+diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
+index 968ade5a35f5..696560529e6a 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -821,9 +821,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
+ }
+
+ static void usbhsf_dma_complete(void *arg);
+-static void xfer_work(struct work_struct *work)
++static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
+ {
+- struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
+ struct usbhs_pipe *pipe = pkt->pipe;
+ struct usbhs_fifo *fifo;
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+@@ -831,12 +830,10 @@ static void xfer_work(struct work_struct *work)
+ struct dma_chan *chan;
+ struct device *dev = usbhs_priv_to_dev(priv);
+ enum dma_transfer_direction dir;
+- unsigned long flags;
+
+- usbhs_lock(priv, flags);
+ fifo = usbhs_pipe_to_fifo(pipe);
+ if (!fifo)
+- goto xfer_work_end;
++ return;
+
+ chan = usbhsf_dma_chan_get(fifo, pkt);
+ dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+@@ -845,7 +842,7 @@ static void xfer_work(struct work_struct *work)
+ pkt->trans, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+- goto xfer_work_end;
++ return;
+
+ desc->callback = usbhsf_dma_complete;
+ desc->callback_param = pipe;
+@@ -853,7 +850,7 @@ static void xfer_work(struct work_struct *work)
+ pkt->cookie = dmaengine_submit(desc);
+ if (pkt->cookie < 0) {
+ dev_err(dev, "Failed to submit dma descriptor\n");
+- goto xfer_work_end;
++ return;
+ }
+
+ dev_dbg(dev, " %s %d (%d/ %d)\n",
+@@ -864,8 +861,17 @@ static void xfer_work(struct work_struct *work)
+ dma_async_issue_pending(chan);
+ usbhsf_dma_start(pipe, fifo);
+ usbhs_pipe_enable(pipe);
++}
++
++static void xfer_work(struct work_struct *work)
++{
++ struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
++ struct usbhs_pipe *pipe = pkt->pipe;
++ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
++ unsigned long flags;
+
+-xfer_work_end:
++ usbhs_lock(priv, flags);
++ usbhsf_dma_xfer_preparing(pkt);
+ usbhs_unlock(priv, flags);
+ }
+
+@@ -918,8 +924,13 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
+ pkt->trans = len;
+
+ usbhsf_tx_irq_ctrl(pipe, 0);
+- INIT_WORK(&pkt->work, xfer_work);
+- schedule_work(&pkt->work);
++ /* FIXME: Workaound for usb dmac that driver can be used in atomic */
++ if (usbhs_get_dparam(priv, has_usb_dmac)) {
++ usbhsf_dma_xfer_preparing(pkt);
++ } else {
++ INIT_WORK(&pkt->work, xfer_work);
++ schedule_work(&pkt->work);
++ }
+
+ return 0;
+
+@@ -1025,8 +1036,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
+
+ pkt->trans = pkt->length;
+
+- INIT_WORK(&pkt->work, xfer_work);
+- schedule_work(&pkt->work);
++ usbhsf_dma_xfer_preparing(pkt);
+
+ return 0;
+
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index f54931aa7528..63ff1a4f2e41 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1024,6 +1024,7 @@ static const struct usb_device_id id_table_combined[] = {
+ { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
+ /* EZPrototypes devices */
+ { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
++ { USB_DEVICE_INTERFACE_NUMBER(UNJO_VID, UNJO_ISODEBUG_V1_PID, 1) },
+ { } /* Terminating entry */
+ };
+
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 15d220eaf6e6..ed6b36674c15 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1542,3 +1542,9 @@
+ #define CHETCO_SEASMART_DISPLAY_PID 0xA5AD /* SeaSmart NMEA2000 Display */
+ #define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */
+ #define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */
++
++/*
++ * Unjo AB
++ */
++#define UNJO_VID 0x22B7
++#define UNJO_ISODEBUG_V1_PID 0x150D
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 1effe74ec638..d7b31fdce94d 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1338,6 +1338,7 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0601, 0xff) }, /* GosunCn ZTE WeLink ME3630 (RNDIS mode) */
+ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) }, /* GosunCn ZTE WeLink ME3630 (MBIM mode) */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
+ .driver_info = RSVD(4) },
+diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
+index c160d2d0e18d..57a97b38a2fa 100644
+--- a/fs/crypto/policy.c
++++ b/fs/crypto/policy.c
+@@ -114,6 +114,8 @@ int fscrypt_process_policy(struct file *filp,
+ if (!inode_has_encryption_context(inode)) {
+ if (!S_ISDIR(inode->i_mode))
+ ret = -ENOTDIR;
++ else if (IS_DEADDIR(inode))
++ ret = -ENOENT;
+ else if (!inode->i_sb->s_cop->empty_dir)
+ ret = -EOPNOTSUPP;
+ else if (!inode->i_sb->s_cop->empty_dir(inode))
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index fd817022cb9b..9e66d85021fc 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -478,13 +478,15 @@ static struct buffer_head *udf_getblk(struct inode *inode, long block,
+ return NULL;
+ }
+
+-/* Extend the file by 'blocks' blocks, return the number of extents added */
++/* Extend the file with new blocks totaling 'new_block_bytes',
++ * return the number of extents added
++ */
+ static int udf_do_extend_file(struct inode *inode,
+ struct extent_position *last_pos,
+ struct kernel_long_ad *last_ext,
+- sector_t blocks)
++ loff_t new_block_bytes)
+ {
+- sector_t add;
++ uint32_t add;
+ int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
+ struct super_block *sb = inode->i_sb;
+ struct kernel_lb_addr prealloc_loc = {};
+@@ -494,7 +496,7 @@ static int udf_do_extend_file(struct inode *inode,
+
+ /* The previous extent is fake and we should not extend by anything
+ * - there's nothing to do... */
+- if (!blocks && fake)
++ if (!new_block_bytes && fake)
+ return 0;
+
+ iinfo = UDF_I(inode);
+@@ -525,13 +527,12 @@ static int udf_do_extend_file(struct inode *inode,
+ /* Can we merge with the previous extent? */
+ if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
+ EXT_NOT_RECORDED_NOT_ALLOCATED) {
+- add = ((1 << 30) - sb->s_blocksize -
+- (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >>
+- sb->s_blocksize_bits;
+- if (add > blocks)
+- add = blocks;
+- blocks -= add;
+- last_ext->extLength += add << sb->s_blocksize_bits;
++ add = (1 << 30) - sb->s_blocksize -
++ (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
++ if (add > new_block_bytes)
++ add = new_block_bytes;
++ new_block_bytes -= add;
++ last_ext->extLength += add;
+ }
+
+ if (fake) {
+@@ -552,28 +553,27 @@ static int udf_do_extend_file(struct inode *inode,
+ }
+
+ /* Managed to do everything necessary? */
+- if (!blocks)
++ if (!new_block_bytes)
+ goto out;
+
+ /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
+ last_ext->extLocation.logicalBlockNum = 0;
+ last_ext->extLocation.partitionReferenceNum = 0;
+- add = (1 << (30-sb->s_blocksize_bits)) - 1;
+- last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
+- (add << sb->s_blocksize_bits);
++ add = (1 << 30) - sb->s_blocksize;
++ last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | add;
+
+ /* Create enough extents to cover the whole hole */
+- while (blocks > add) {
+- blocks -= add;
++ while (new_block_bytes > add) {
++ new_block_bytes -= add;
+ err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
+ last_ext->extLength, 1);
+ if (err)
+ return err;
+ count++;
+ }
+- if (blocks) {
++ if (new_block_bytes) {
+ last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
+- (blocks << sb->s_blocksize_bits);
++ new_block_bytes;
+ err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
+ last_ext->extLength, 1);
+ if (err)
+@@ -604,6 +604,24 @@ out:
+ return count;
+ }
+
++/* Extend the final block of the file to final_block_len bytes */
++static void udf_do_extend_final_block(struct inode *inode,
++ struct extent_position *last_pos,
++ struct kernel_long_ad *last_ext,
++ uint32_t final_block_len)
++{
++ struct super_block *sb = inode->i_sb;
++ uint32_t added_bytes;
++
++ added_bytes = final_block_len -
++ (last_ext->extLength & (sb->s_blocksize - 1));
++ last_ext->extLength += added_bytes;
++ UDF_I(inode)->i_lenExtents += added_bytes;
++
++ udf_write_aext(inode, last_pos, &last_ext->extLocation,
++ last_ext->extLength, 1);
++}
++
+ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ {
+
+@@ -613,10 +631,12 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ int8_t etype;
+ struct super_block *sb = inode->i_sb;
+ sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
++ unsigned long partial_final_block;
+ int adsize;
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ struct kernel_long_ad extent;
+- int err;
++ int err = 0;
++ int within_final_block;
+
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+ adsize = sizeof(struct short_ad);
+@@ -626,18 +646,8 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ BUG();
+
+ etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
++ within_final_block = (etype != -1);
+
+- /* File has extent covering the new size (could happen when extending
+- * inside a block)? */
+- if (etype != -1)
+- return 0;
+- if (newsize & (sb->s_blocksize - 1))
+- offset++;
+- /* Extended file just to the boundary of the last file block? */
+- if (offset == 0)
+- return 0;
+-
+- /* Truncate is extending the file by 'offset' blocks */
+ if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
+ (epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
+ /* File has no extents at all or has empty last
+@@ -651,7 +661,22 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ &extent.extLength, 0);
+ extent.extLength |= etype << 30;
+ }
+- err = udf_do_extend_file(inode, &epos, &extent, offset);
++
++ partial_final_block = newsize & (sb->s_blocksize - 1);
++
++ /* File has extent covering the new size (could happen when extending
++ * inside a block)?
++ */
++ if (within_final_block) {
++ /* Extending file within the last file block */
++ udf_do_extend_final_block(inode, &epos, &extent,
++ partial_final_block);
++ } else {
++ loff_t add = ((loff_t)offset << sb->s_blocksize_bits) |
++ partial_final_block;
++ err = udf_do_extend_file(inode, &epos, &extent, add);
++ }
++
+ if (err < 0)
+ goto out;
+ err = 0;
+@@ -756,6 +781,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ /* Are we beyond EOF? */
+ if (etype == -1) {
+ int ret;
++ loff_t hole_len;
+ isBeyondEOF = true;
+ if (count) {
+ if (c)
+@@ -771,7 +797,8 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ startnum = (offset > 0);
+ }
+ /* Create extents for the hole between EOF and offset */
+- ret = udf_do_extend_file(inode, &prev_epos, laarr, offset);
++ hole_len = (loff_t)offset << inode->i_blkbits;
++ ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len);
+ if (ret < 0) {
+ brelse(prev_epos.bh);
+ brelse(cur_epos.bh);
+diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
+index 1bd31a38c51e..ce13d4677f1d 100644
+--- a/include/linux/vmw_vmci_defs.h
++++ b/include/linux/vmw_vmci_defs.h
+@@ -75,9 +75,18 @@ enum {
+
+ /*
+ * A single VMCI device has an upper limit of 128MB on the amount of
+- * memory that can be used for queue pairs.
++ * memory that can be used for queue pairs. Since each queue pair
++ * consists of at least two pages, the memory limit also dictates the
++ * number of queue pairs a guest can create.
+ */
+ #define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
++#define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2)
++
++/*
++ * There can be at most PAGE_SIZE doorbells since there is one doorbell
++ * per byte in the doorbell bitmap page.
++ */
++#define VMCI_MAX_GUEST_DOORBELL_COUNT PAGE_SIZE
+
+ /*
+ * Queues with pre-mapped data pages must be small, so that we don't pin
+diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
+index 1b1cf33cbfb0..2b6abd046087 100644
+--- a/include/net/ip6_tunnel.h
++++ b/include/net/ip6_tunnel.h
+@@ -149,9 +149,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
+ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+ pkt_len = skb->len - skb_inner_network_offset(skb);
+ err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
+- if (unlikely(net_xmit_eval(err)))
+- pkt_len = -1;
+- iptunnel_xmit_stats(dev, pkt_len);
++
++ if (dev) {
++ if (unlikely(net_xmit_eval(err)))
++ pkt_len = -1;
++ iptunnel_xmit_stats(dev, pkt_len);
++ }
+ }
+ #endif
+ #endif
+diff --git a/include/uapi/linux/nilfs2_ondisk.h b/include/uapi/linux/nilfs2_ondisk.h
+index 2a8a3addb675..f9b6b5be7ddf 100644
+--- a/include/uapi/linux/nilfs2_ondisk.h
++++ b/include/uapi/linux/nilfs2_ondisk.h
+@@ -28,7 +28,7 @@
+
+ #include <linux/types.h>
+ #include <linux/magic.h>
+-
++#include <asm/byteorder.h>
+
+ #define NILFS_INODE_BMAP_SIZE 7
+
+@@ -532,19 +532,19 @@ enum {
+ static inline void \
+ nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \
+ { \
+- cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \
+- (1UL << NILFS_CHECKPOINT_##flag)); \
++ cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) | \
++ (1UL << NILFS_CHECKPOINT_##flag)); \
+ } \
+ static inline void \
+ nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \
+ { \
+- cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \
++ cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) & \
+ ~(1UL << NILFS_CHECKPOINT_##flag)); \
+ } \
+ static inline int \
+ nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \
+ { \
+- return !!(le32_to_cpu(cp->cp_flags) & \
++ return !!(__le32_to_cpu(cp->cp_flags) & \
+ (1UL << NILFS_CHECKPOINT_##flag)); \
+ }
+
+@@ -594,20 +594,20 @@ enum {
+ static inline void \
+ nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \
+ { \
+- su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \
++ su->su_flags = __cpu_to_le32(__le32_to_cpu(su->su_flags) | \
+ (1UL << NILFS_SEGMENT_USAGE_##flag));\
+ } \
+ static inline void \
+ nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \
+ { \
+ su->su_flags = \
+- cpu_to_le32(le32_to_cpu(su->su_flags) & \
++ __cpu_to_le32(__le32_to_cpu(su->su_flags) & \
+ ~(1UL << NILFS_SEGMENT_USAGE_##flag)); \
+ } \
+ static inline int \
+ nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \
+ { \
+- return !!(le32_to_cpu(su->su_flags) & \
++ return !!(__le32_to_cpu(su->su_flags) & \
+ (1UL << NILFS_SEGMENT_USAGE_##flag)); \
+ }
+
+@@ -618,15 +618,15 @@ NILFS_SEGMENT_USAGE_FNS(ERROR, error)
+ static inline void
+ nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su)
+ {
+- su->su_lastmod = cpu_to_le64(0);
+- su->su_nblocks = cpu_to_le32(0);
+- su->su_flags = cpu_to_le32(0);
++ su->su_lastmod = __cpu_to_le64(0);
++ su->su_nblocks = __cpu_to_le32(0);
++ su->su_flags = __cpu_to_le32(0);
+ }
+
+ static inline int
+ nilfs_segment_usage_clean(const struct nilfs_segment_usage *su)
+ {
+- return !le32_to_cpu(su->su_flags);
++ return !__le32_to_cpu(su->su_flags);
+ }
+
+ /**
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 7929526e96e2..93d7333c64d8 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5492,7 +5492,7 @@ static void perf_sample_regs_user(struct perf_regs *regs_user,
+ if (user_mode(regs)) {
+ regs_user->abi = perf_reg_abi(current);
+ regs_user->regs = regs;
+- } else if (current->mm) {
++ } else if (!(current->flags & PF_KTHREAD)) {
+ perf_get_regs_user(regs_user, regs, regs_user_copy);
+ } else {
+ regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index 1e1fa99b3243..0b53d1907e4a 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -264,8 +264,14 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
+
+ prev = fq->q.fragments_tail;
+ err = inet_frag_queue_insert(&fq->q, skb, offset, end);
+- if (err)
++ if (err) {
++ if (err == IPFRAG_DUP) {
++ /* No error for duplicates, pretend they got queued. */
++ kfree_skb(skb);
++ return -EINPROGRESS;
++ }
+ goto insert_error;
++ }
+
+ if (dev)
+ fq->iif = dev->ifindex;
+@@ -292,15 +298,17 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
+ skb->_skb_refdst = 0UL;
+ err = nf_ct_frag6_reasm(fq, skb, prev, dev);
+ skb->_skb_refdst = orefdst;
+- return err;
++
++ /* After queue has assumed skb ownership, only 0 or
++ * -EINPROGRESS must be returned.
++ */
++ return err ? -EINPROGRESS : 0;
+ }
+
+ skb_dst_drop(skb);
+ return -EINPROGRESS;
+
+ insert_error:
+- if (err == IPFRAG_DUP)
+- goto err;
+ inet_frag_kill(&fq->q);
+ err:
+ skb_dst_drop(skb);
+@@ -480,12 +488,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
+ ret = 0;
+ }
+
+- /* after queue has assumed skb ownership, only 0 or -EINPROGRESS
+- * must be returned.
+- */
+- if (ret)
+- ret = -EINPROGRESS;
+-
+ spin_unlock_bh(&fq->q.lock);
+ inet_frag_put(&fq->q);
+ return ret;
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 8a690ebd7374..6708de10a3e5 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1403,7 +1403,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+
+- if (WARN_ON(!chanctx_conf)) {
++ if (WARN_ON_ONCE(!chanctx_conf)) {
+ rcu_read_unlock();
+ return NULL;
+ }
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index b2a27263d6ff..5c347d3a92c9 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -885,6 +885,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
+
+ /* flush STAs and mpaths on this iface */
+ sta_info_flush(sdata);
++ ieee80211_free_keys(sdata, true);
+ mesh_path_flush_by_iface(sdata);
+
+ /* stop the beacon */
+@@ -1135,7 +1136,8 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
+ ifmsh->chsw_ttl = 0;
+
+ /* Remove the CSA and MCSP elements from the beacon */
+- tmp_csa_settings = rcu_dereference(ifmsh->csa);
++ tmp_csa_settings = rcu_dereference_protected(ifmsh->csa,
++ lockdep_is_held(&sdata->wdev.mtx));
+ RCU_INIT_POINTER(ifmsh->csa, NULL);
+ if (tmp_csa_settings)
+ kfree_rcu(tmp_csa_settings, rcu_head);
+@@ -1157,6 +1159,8 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
+ struct mesh_csa_settings *tmp_csa_settings;
+ int ret = 0;
+
++ lockdep_assert_held(&sdata->wdev.mtx);
++
+ tmp_csa_settings = kmalloc(sizeof(*tmp_csa_settings),
+ GFP_ATOMIC);
+ if (!tmp_csa_settings)
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 244eac1bd648..de18a463ac96 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2718,6 +2718,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
+ xprt = xprt_iter_xprt(&clnt->cl_xpi);
+ if (xps == NULL || xprt == NULL) {
+ rcu_read_unlock();
++ xprt_switch_put(xps);
+ return -EAGAIN;
+ }
+ resvport = xprt->resvport;
+diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
+index 97913e109b14..99e5a2f63e76 100644
+--- a/samples/bpf/bpf_load.c
++++ b/samples/bpf/bpf_load.c
+@@ -369,7 +369,7 @@ void read_trace_pipe(void)
+ static char buf[4096];
+ ssize_t sz;
+
+- sz = read(trace_fd, buf, sizeof(buf));
++ sz = read(trace_fd, buf, sizeof(buf) - 1);
+ if (sz > 0) {
+ buf[sz] = 0;
+ puts(buf);
+diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
+index 1ebbf233de9a..6d64b2cb02ab 100644
+--- a/virt/kvm/arm/vgic/vgic-its.c
++++ b/virt/kvm/arm/vgic/vgic-its.c
+@@ -1466,6 +1466,7 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev)
+ mutex_unlock(&its->its_lock);
+
+ kfree(its);
++ kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */
+ }
+
+ static int vgic_its_has_attr(struct kvm_device *dev,
next reply other threads:[~2019-07-21 14:38 UTC|newest]
Thread overview: 393+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-07-21 14:38 Mike Pagano [this message]
-- strict thread matches above, loose matches on Subject: below --
2023-01-07 11:37 [gentoo-commits] proj/linux-patches:4.9 commit in: / Mike Pagano
2022-12-14 12:24 Mike Pagano
2022-12-08 13:09 Alice Ferrazzi
2022-11-25 17:02 Mike Pagano
2022-11-10 15:14 Mike Pagano
2022-11-03 15:09 Mike Pagano
2022-10-26 11:43 Mike Pagano
2022-09-28 9:19 Mike Pagano
2022-09-20 12:04 Mike Pagano
2022-09-15 11:10 Mike Pagano
2022-09-05 12:08 Mike Pagano
2022-08-25 10:37 Mike Pagano
2022-07-29 15:25 Mike Pagano
2022-07-21 20:14 Mike Pagano
2022-07-12 16:03 Mike Pagano
2022-07-07 16:20 Mike Pagano
2022-07-02 16:04 Mike Pagano
2022-06-25 10:24 Mike Pagano
2022-06-16 11:42 Mike Pagano
2022-06-14 15:49 Mike Pagano
2022-06-06 11:07 Mike Pagano
2022-05-27 12:41 Mike Pagano
2022-05-25 11:57 Mike Pagano
2022-05-18 9:52 Mike Pagano
2022-05-15 22:14 Mike Pagano
2022-05-12 11:32 Mike Pagano
2022-04-27 11:38 Mike Pagano
2022-04-20 12:12 Mike Pagano
2022-03-28 11:01 Mike Pagano
2022-03-23 11:59 Mike Pagano
2022-03-16 13:22 Mike Pagano
2022-03-11 10:57 Mike Pagano
2022-03-08 18:28 Mike Pagano
2022-03-02 13:09 Mike Pagano
2022-02-26 23:38 Mike Pagano
2022-02-23 12:40 Mike Pagano
2022-02-16 12:49 Mike Pagano
2022-02-11 12:38 Mike Pagano
2022-02-08 18:03 Mike Pagano
2022-01-29 17:46 Mike Pagano
2022-01-27 11:41 Mike Pagano
2022-01-11 12:59 Mike Pagano
2022-01-05 12:57 Mike Pagano
2021-12-29 13:13 Mike Pagano
2021-12-22 14:08 Mike Pagano
2021-12-14 10:37 Mike Pagano
2021-12-08 12:57 Mike Pagano
2021-11-26 12:01 Mike Pagano
2021-11-12 13:38 Mike Pagano
2021-11-02 17:06 Mike Pagano
2021-10-27 12:00 Mike Pagano
2021-10-17 13:14 Mike Pagano
2021-10-09 21:35 Mike Pagano
2021-10-06 11:32 Mike Pagano
2021-09-26 14:15 Mike Pagano
2021-09-22 11:42 Mike Pagano
2021-09-20 22:06 Mike Pagano
2021-09-03 11:24 Mike Pagano
2021-08-26 14:03 Mike Pagano
2021-08-25 23:14 Mike Pagano
2021-08-25 23:13 Mike Pagano
2021-08-15 20:10 Mike Pagano
2021-08-08 13:41 Mike Pagano
2021-08-04 11:55 Mike Pagano
2021-08-03 12:49 Mike Pagano
2021-07-28 12:39 Mike Pagano
2021-07-20 15:29 Alice Ferrazzi
2021-07-11 14:47 Mike Pagano
2021-06-30 14:28 Mike Pagano
2021-06-17 14:23 Alice Ferrazzi
2021-06-17 11:08 Alice Ferrazzi
2021-06-10 11:10 Mike Pagano
2021-06-03 10:41 Alice Ferrazzi
2021-05-26 12:03 Mike Pagano
2021-05-22 10:01 Mike Pagano
2021-04-28 11:03 Alice Ferrazzi
2021-04-16 11:19 Alice Ferrazzi
2021-04-10 13:22 Mike Pagano
2021-04-07 12:14 Mike Pagano
2021-03-30 14:14 Mike Pagano
2021-03-24 12:07 Mike Pagano
2021-03-17 15:58 Mike Pagano
2021-03-11 14:04 Mike Pagano
2021-03-07 15:13 Mike Pagano
2021-03-03 17:24 Alice Ferrazzi
2021-02-23 13:38 Alice Ferrazzi
2021-02-10 10:15 Alice Ferrazzi
2021-02-05 14:53 Alice Ferrazzi
2021-02-03 23:25 Mike Pagano
2021-01-30 13:18 Alice Ferrazzi
2021-01-23 16:34 Mike Pagano
2021-01-17 16:22 Mike Pagano
2021-01-12 20:08 Mike Pagano
2021-01-09 12:54 Mike Pagano
2020-12-29 14:18 Mike Pagano
2020-12-11 12:54 Mike Pagano
2020-12-02 12:48 Mike Pagano
2020-11-24 13:39 Mike Pagano
2020-11-22 19:12 Mike Pagano
2020-11-18 19:23 Mike Pagano
2020-11-11 15:32 Mike Pagano
2020-11-10 13:54 Mike Pagano
2020-10-29 11:17 Mike Pagano
2020-10-17 10:14 Mike Pagano
2020-10-14 20:34 Mike Pagano
2020-10-01 19:03 Mike Pagano
2020-10-01 18:59 Mike Pagano
2020-09-24 16:02 Mike Pagano
2020-09-23 11:59 Mike Pagano
2020-09-23 11:57 Mike Pagano
2020-09-12 17:31 Mike Pagano
2020-09-03 11:34 Mike Pagano
2020-08-26 11:13 Mike Pagano
2020-08-21 11:23 Alice Ferrazzi
2020-08-21 11:02 Alice Ferrazzi
2020-07-31 16:13 Mike Pagano
2020-07-22 12:30 Mike Pagano
2020-07-09 12:07 Mike Pagano
2020-07-01 12:10 Mike Pagano
2020-06-22 14:44 Mike Pagano
2020-06-11 11:28 Mike Pagano
2020-06-03 11:37 Mike Pagano
2020-05-27 15:26 Mike Pagano
2020-05-20 11:24 Mike Pagano
2020-05-13 12:50 Mike Pagano
2020-05-11 22:52 Mike Pagano
2020-05-05 17:39 Mike Pagano
2020-05-02 19:22 Mike Pagano
2020-04-24 12:01 Mike Pagano
2020-04-15 17:55 Mike Pagano
2020-04-13 11:15 Mike Pagano
2020-04-02 18:55 Mike Pagano
2020-03-20 11:54 Mike Pagano
2020-03-11 10:15 Mike Pagano
2020-02-28 15:29 Mike Pagano
2020-02-14 23:36 Mike Pagano
2020-02-05 14:48 Mike Pagano
2020-01-29 12:36 Mike Pagano
2020-01-23 11:02 Mike Pagano
2020-01-14 22:26 Mike Pagano
2020-01-12 14:52 Mike Pagano
2020-01-04 16:48 Mike Pagano
2019-12-21 14:54 Mike Pagano
2019-12-05 15:17 Alice Ferrazzi
2019-11-29 21:39 Thomas Deutschmann
2019-11-28 23:51 Mike Pagano
2019-11-25 12:08 Mike Pagano
2019-11-16 10:54 Mike Pagano
2019-11-12 20:58 Mike Pagano
2019-11-10 16:15 Mike Pagano
2019-11-06 14:24 Mike Pagano
2019-10-29 11:16 Mike Pagano
2019-10-17 22:21 Mike Pagano
2019-10-07 17:37 Mike Pagano
2019-10-05 11:39 Mike Pagano
2019-09-21 15:57 Mike Pagano
2019-09-19 23:16 Mike Pagano
2019-09-16 12:22 Mike Pagano
2019-09-10 11:10 Mike Pagano
2019-09-06 17:18 Mike Pagano
2019-08-25 17:34 Mike Pagano
2019-08-11 10:59 Mike Pagano
2019-08-06 19:16 Mike Pagano
2019-08-04 16:05 Mike Pagano
2019-07-10 11:03 Mike Pagano
2019-06-27 11:10 Mike Pagano
2019-06-22 19:04 Mike Pagano
2019-06-17 19:19 Mike Pagano
2019-06-11 17:40 Mike Pagano
2019-06-11 12:39 Mike Pagano
2019-05-31 16:42 Mike Pagano
2019-05-26 17:12 Mike Pagano
2019-05-21 17:14 Mike Pagano
2019-05-16 22:59 Mike Pagano
2019-05-14 20:08 Mike Pagano
2019-05-10 19:38 Mike Pagano
2019-05-08 10:03 Mike Pagano
2019-05-04 18:26 Mike Pagano
2019-05-02 10:16 Mike Pagano
2019-04-27 17:29 Mike Pagano
2019-04-20 11:06 Mike Pagano
2019-04-19 19:54 Mike Pagano
2019-04-05 21:42 Mike Pagano
2019-04-03 10:48 Mike Pagano
2019-03-27 10:20 Mike Pagano
2019-03-23 14:57 Mike Pagano
2019-03-23 14:18 Mike Pagano
2019-03-19 16:56 Mike Pagano
2019-03-13 22:05 Mike Pagano
2019-03-06 19:12 Mike Pagano
2019-03-05 17:59 Mike Pagano
2019-02-27 11:20 Mike Pagano
2019-02-23 14:42 Mike Pagano
2019-02-20 11:16 Mike Pagano
2019-02-15 12:46 Mike Pagano
2019-02-12 20:51 Mike Pagano
2019-02-06 20:14 Mike Pagano
2019-01-31 11:22 Mike Pagano
2019-01-26 15:03 Mike Pagano
2019-01-23 11:29 Mike Pagano
2019-01-16 23:29 Mike Pagano
2019-01-13 19:26 Mike Pagano
2019-01-09 18:09 Mike Pagano
2019-01-09 17:52 Mike Pagano
2018-12-29 22:53 Mike Pagano
2018-12-29 18:51 Mike Pagano
2018-12-21 14:44 Mike Pagano
2018-12-17 11:39 Mike Pagano
2018-12-13 11:36 Mike Pagano
2018-12-08 13:25 Mike Pagano
2018-12-05 19:44 Mike Pagano
2018-12-01 18:00 Mike Pagano
2018-12-01 15:04 Mike Pagano
2018-11-27 16:22 Mike Pagano
2018-11-23 12:48 Mike Pagano
2018-11-23 12:45 Mike Pagano
2018-11-21 12:20 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-13 21:20 Mike Pagano
2018-11-11 1:44 Mike Pagano
2018-11-11 1:31 Mike Pagano
2018-11-10 21:30 Mike Pagano
2018-10-20 12:43 Mike Pagano
2018-10-18 10:25 Mike Pagano
2018-10-13 16:34 Mike Pagano
2018-10-10 11:19 Mike Pagano
2018-10-04 10:40 Mike Pagano
2018-09-29 13:33 Mike Pagano
2018-09-26 10:42 Mike Pagano
2018-09-19 22:38 Mike Pagano
2018-09-15 10:10 Mike Pagano
2018-09-09 23:27 Mike Pagano
2018-09-05 15:27 Mike Pagano
2018-08-24 11:43 Mike Pagano
2018-08-22 10:06 Alice Ferrazzi
2018-08-18 18:07 Mike Pagano
2018-08-17 19:32 Mike Pagano
2018-08-17 19:25 Mike Pagano
2018-08-16 11:51 Mike Pagano
2018-08-15 16:46 Mike Pagano
2018-08-09 10:52 Mike Pagano
2018-08-07 18:12 Mike Pagano
2018-08-03 12:25 Mike Pagano
2018-07-28 10:38 Mike Pagano
2018-07-25 10:26 Mike Pagano
2018-07-22 15:14 Mike Pagano
2018-07-17 10:25 Mike Pagano
2018-07-12 15:42 Alice Ferrazzi
2018-07-03 13:16 Mike Pagano
2018-06-26 16:34 Alice Ferrazzi
2018-06-16 15:42 Mike Pagano
2018-06-13 15:03 Mike Pagano
2018-06-06 18:04 Mike Pagano
2018-06-05 11:21 Mike Pagano
2018-05-30 22:34 Mike Pagano
2018-05-30 11:39 Mike Pagano
2018-05-25 14:54 Mike Pagano
2018-05-22 17:28 Mike Pagano
2018-05-20 22:20 Mike Pagano
2018-05-16 10:23 Mike Pagano
2018-05-09 10:54 Mike Pagano
2018-05-02 16:13 Mike Pagano
2018-04-30 10:29 Mike Pagano
2018-04-24 11:30 Mike Pagano
2018-04-20 11:12 Mike Pagano
2018-04-13 22:21 Mike Pagano
2018-04-08 14:26 Mike Pagano
2018-03-31 22:17 Mike Pagano
2018-03-28 17:42 Mike Pagano
2018-03-25 14:31 Mike Pagano
2018-03-25 13:39 Mike Pagano
2018-03-22 12:58 Mike Pagano
2018-03-18 22:15 Mike Pagano
2018-03-11 18:26 Mike Pagano
2018-03-05 2:38 Alice Ferrazzi
2018-02-28 18:46 Alice Ferrazzi
2018-02-28 15:02 Alice Ferrazzi
2018-02-25 15:47 Mike Pagano
2018-02-22 23:22 Mike Pagano
2018-02-17 15:02 Alice Ferrazzi
2018-02-13 13:25 Alice Ferrazzi
2018-02-03 21:22 Mike Pagano
2018-01-31 13:31 Alice Ferrazzi
2018-01-23 21:17 Mike Pagano
2018-01-17 10:18 Alice Ferrazzi
2018-01-17 10:18 Alice Ferrazzi
2018-01-17 9:16 Alice Ferrazzi
2018-01-15 14:57 Alice Ferrazzi
2018-01-10 12:21 Alice Ferrazzi
2018-01-10 11:47 Mike Pagano
2018-01-05 15:54 Alice Ferrazzi
2018-01-05 15:04 Alice Ferrazzi
2018-01-02 20:13 Mike Pagano
2017-12-29 17:20 Alice Ferrazzi
2017-12-25 14:36 Alice Ferrazzi
2017-12-20 12:44 Mike Pagano
2017-12-16 17:42 Alice Ferrazzi
2017-12-14 8:58 Alice Ferrazzi
2017-12-09 23:29 Mike Pagano
2017-12-05 11:38 Mike Pagano
2017-11-30 12:19 Alice Ferrazzi
2017-11-24 9:44 Alice Ferrazzi
2017-11-21 9:18 Alice Ferrazzi
2017-11-18 18:24 Mike Pagano
2017-11-15 15:44 Mike Pagano
2017-11-08 13:49 Mike Pagano
2017-11-02 10:03 Mike Pagano
2017-10-27 10:29 Mike Pagano
2017-10-21 20:15 Mike Pagano
2017-10-18 13:46 Mike Pagano
2017-10-12 22:26 Mike Pagano
2017-10-12 12:37 Mike Pagano
2017-10-08 14:23 Mike Pagano
2017-10-08 14:21 Mike Pagano
2017-10-08 14:13 Mike Pagano
2017-10-05 11:38 Mike Pagano
2017-09-27 16:38 Mike Pagano
2017-09-20 10:11 Mike Pagano
2017-09-14 11:39 Mike Pagano
2017-09-13 22:28 Mike Pagano
2017-09-13 16:25 Mike Pagano
2017-09-10 14:38 Mike Pagano
2017-09-07 22:43 Mike Pagano
2017-09-02 17:45 Mike Pagano
2017-08-30 10:06 Mike Pagano
2017-08-25 10:59 Mike Pagano
2017-08-16 22:29 Mike Pagano
2017-08-13 16:51 Mike Pagano
2017-08-11 17:41 Mike Pagano
2017-08-07 10:26 Mike Pagano
2017-05-14 13:31 Mike Pagano
2017-05-08 10:43 Mike Pagano
2017-05-03 17:45 Mike Pagano
2017-04-27 9:05 Alice Ferrazzi
2017-04-22 17:01 Mike Pagano
2017-04-18 10:23 Mike Pagano
2017-04-12 18:01 Mike Pagano
2017-04-08 13:53 Mike Pagano
2017-03-31 10:44 Mike Pagano
2017-03-30 18:15 Mike Pagano
2017-03-26 11:54 Mike Pagano
2017-03-23 18:38 Mike Pagano
2017-03-22 12:42 Mike Pagano
2017-03-18 14:34 Mike Pagano
2017-03-15 19:21 Mike Pagano
2017-03-12 12:22 Mike Pagano
2017-03-02 16:23 Mike Pagano
2017-02-26 20:38 Mike Pagano
2017-02-26 20:36 Mike Pagano
2017-02-23 20:34 Mike Pagano
2017-02-23 20:11 Mike Pagano
2017-02-18 20:37 Mike Pagano
2017-02-18 16:13 Alice Ferrazzi
2017-02-15 16:02 Alice Ferrazzi
2017-02-14 23:08 Mike Pagano
2017-02-09 11:11 Alice Ferrazzi
2017-02-04 11:34 Alice Ferrazzi
2017-02-01 13:07 Alice Ferrazzi
2017-01-29 23:08 Alice Ferrazzi
2017-01-26 8:51 Alice Ferrazzi
2017-01-20 11:33 Alice Ferrazzi
2017-01-15 22:59 Mike Pagano
2017-01-12 22:53 Mike Pagano
2017-01-09 12:41 Mike Pagano
2017-01-07 0:55 Mike Pagano
2017-01-06 23:09 Mike Pagano
2016-12-31 19:39 Mike Pagano
2016-12-11 23:20 Mike Pagano
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1563719872.b0626d2a9563fcf1aa3766ccee4a3f29ba21e120.mpagano@gentoo \
--to=mpagano@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox