From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from lists.gentoo.org (pigeon.gentoo.org [208.92.234.80]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by finch.gentoo.org (Postfix) with ESMTPS id E033A1395E2 for ; Tue, 15 Nov 2016 10:05:14 +0000 (UTC) Received: from pigeon.gentoo.org (localhost [127.0.0.1]) by pigeon.gentoo.org (Postfix) with SMTP id 79C82E0AA9; Tue, 15 Nov 2016 10:05:12 +0000 (UTC) Received: from smtp.gentoo.org (smtp.gentoo.org [140.211.166.183]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by pigeon.gentoo.org (Postfix) with ESMTPS id 4E0FAE0AA9 for ; Tue, 15 Nov 2016 10:05:12 +0000 (UTC) Received: from oystercatcher.gentoo.org (unknown [IPv6:2a01:4f8:202:4333:225:90ff:fed9:fc84]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.gentoo.org (Postfix) with ESMTPS id A0A4F3411D7 for ; Tue, 15 Nov 2016 10:05:09 +0000 (UTC) Received: from localhost.localdomain (localhost [127.0.0.1]) by oystercatcher.gentoo.org (Postfix) with ESMTP id C04482EF for ; Tue, 15 Nov 2016 10:05:07 +0000 (UTC) From: "Alice Ferrazzi" To: gentoo-commits@lists.gentoo.org Content-Transfer-Encoding: 8bit Content-type: text/plain; charset=UTF-8 Reply-To: gentoo-dev@lists.gentoo.org, "Alice Ferrazzi" Message-ID: <1479204248.1f96ed77596bc5e963e6cb2d1362a38efb071e51.alicef@gentoo> Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: / X-VCS-Repository: proj/linux-patches X-VCS-Files: 0000_README 1031_linux-4.4.32.patch X-VCS-Directories: / X-VCS-Committer: alicef X-VCS-Committer-Name: Alice Ferrazzi X-VCS-Revision: 1f96ed77596bc5e963e6cb2d1362a38efb071e51 X-VCS-Branch: 4.4 Date: Tue, 15 Nov 2016 10:05:07 +0000 (UTC) Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-Id: Gentoo Linux mail X-BeenThere: gentoo-commits@lists.gentoo.org X-Archives-Salt: 18e35bf7-f4f3-4020-b751-5710bc3ac98b X-Archives-Hash: e60ee93803cbf82ed30d8f79a1615ec0 commit: 1f96ed77596bc5e963e6cb2d1362a38efb071e51 Author: Alice Ferrazzi gentoo org> AuthorDate: Tue Nov 15 10:04:08 2016 +0000 Commit: Alice Ferrazzi gentoo org> CommitDate: Tue Nov 15 10:04:08 2016 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1f96ed77 Linux patch 4.4.32 0000_README | 4 + 1031_linux-4.4.32.patch | 1382 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1386 insertions(+) diff --git a/0000_README b/0000_README index 0028596..fd829db 100644 --- a/0000_README +++ b/0000_README @@ -167,6 +167,10 @@ Patch: 1030_linux-4.4.31.patch From: http://www.kernel.org Desc: Linux 4.4.31 +Patch: 1031_linux-4.4.32.patch +From: http://www.kernel.org +Desc: Linux 4.4.32 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1031_linux-4.4.32.patch b/1031_linux-4.4.32.patch new file mode 100644 index 0000000..7b16c50 --- /dev/null +++ b/1031_linux-4.4.32.patch @@ -0,0 +1,1382 @@ +diff --git a/Makefile b/Makefile +index 7c6f28e7a2f6..fba9b09a1330 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 31 ++SUBLEVEL = 32 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c +index bbe56871245c..4298aeb1e20f 100644 +--- a/arch/mips/kvm/emulate.c ++++ b/arch/mips/kvm/emulate.c +@@ -822,7 +822,7 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, + bool user; + + /* No need to flush for entries which are already invalid */ +- if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) ++ if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V)) + return; + /* User address space doesn't need flushing for KSeg2/3 changes */ + user = tlb->tlb_hi < KVM_GUEST_KSEG0; +diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +index 21aacc1f45c1..7f85c2c1d681 100644 +--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c ++++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +@@ -265,15 +265,27 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector + unsigned max_lane_num = drm_dp_max_lane_count(dpcd); + unsigned lane_num, i, max_pix_clock; + +- for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { +- for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { +- max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; ++ if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) == ++ ENCODER_OBJECT_ID_NUTMEG) { ++ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { ++ max_pix_clock = (lane_num * 270000 * 8) / bpp; + if (max_pix_clock >= pix_clock) { + *dp_lanes = lane_num; +- *dp_rate = link_rates[i]; ++ *dp_rate = 270000; + return 0; + } + } ++ } else { ++ for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { ++ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { ++ max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; ++ if (max_pix_clock >= pix_clock) { ++ *dp_lanes = lane_num; ++ *dp_rate = link_rates[i]; ++ return 0; ++ } ++ } ++ } + } + + return -EINVAL; +diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c +index 44ee72e04df9..b5760851195c 100644 +--- a/drivers/gpu/drm/radeon/atombios_dp.c ++++ b/drivers/gpu/drm/radeon/atombios_dp.c +@@ -315,15 +315,27 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector, + unsigned max_lane_num = drm_dp_max_lane_count(dpcd); + unsigned lane_num, i, max_pix_clock; + +- for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { +- for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { +- max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; ++ if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == ++ ENCODER_OBJECT_ID_NUTMEG) { ++ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { ++ max_pix_clock = (lane_num * 270000 * 8) / bpp; + if (max_pix_clock >= pix_clock) { + *dp_lanes = lane_num; +- *dp_rate = link_rates[i]; ++ *dp_rate = 270000; + return 0; + } + } ++ } else { ++ for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { ++ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { ++ max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; ++ if (max_pix_clock >= pix_clock) { ++ *dp_lanes = lane_num; ++ *dp_rate = link_rates[i]; ++ return 0; ++ } ++ } ++ } + } + + return -EINVAL; +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c +index ca5ac5d6f4e6..49056c33be74 100644 +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -18142,14 +18142,14 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, + + rtnl_lock(); + +- /* We needn't recover from permanent error */ +- if (state == pci_channel_io_frozen) +- tp->pcierr_recovery = true; +- + /* We probably don't have netdev yet */ + if (!netdev || !netif_running(netdev)) + goto done; + ++ /* We needn't recover from permanent error */ ++ if (state == pci_channel_io_frozen) ++ tp->pcierr_recovery = true; ++ + tg3_phy_stop(tp); + + tg3_netif_stop(tp); +@@ -18246,7 +18246,7 @@ static void tg3_io_resume(struct pci_dev *pdev) + + rtnl_lock(); + +- if (!netif_running(netdev)) ++ if (!netdev || !netif_running(netdev)) + goto done; + + tg3_full_lock(tp, 0); +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c +index f6147ffc7fbc..ab716042bdd2 100644 +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -944,11 +944,11 @@ fec_restart(struct net_device *ndev) + * enet-mac reset will reset mac address registers too, + * so need to reconfigure it. + */ +- if (fep->quirks & FEC_QUIRK_ENET_MAC) { +- memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); +- writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); +- writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); +- } ++ memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); ++ writel((__force u32)cpu_to_be32(temp_mac[0]), ++ fep->hwp + FEC_ADDR_LOW); ++ writel((__force u32)cpu_to_be32(temp_mac[1]), ++ fep->hwp + FEC_ADDR_HIGH); + + /* Clear any outstanding interrupt. */ + writel(0xffffffff, fep->hwp + FEC_IEVENT); +diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c +index 69e31e2a68fc..4827c6987ac3 100644 +--- a/drivers/net/geneve.c ++++ b/drivers/net/geneve.c +@@ -440,7 +440,7 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head, + + skb_gro_pull(skb, gh_len); + skb_gro_postpull_rcsum(skb, gh, gh_len); +- pp = ptype->callbacks.gro_receive(head, skb); ++ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); + + out_unlock: + rcu_read_unlock(); +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index 003780901628..6fa8e165878e 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -593,7 +593,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, + } + } + +- pp = eth_gro_receive(head, skb); ++ pp = call_gro_receive(eth_gro_receive, head, skb); + + out: + skb_gro_remcsum_cleanup(skb, &grc); +diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c +index ed01c0172e4a..07dd81586c52 100644 +--- a/drivers/of/of_reserved_mem.c ++++ b/drivers/of/of_reserved_mem.c +@@ -127,8 +127,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, + } + + /* Need adjust the alignment to satisfy the CMA requirement */ +- if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool")) +- align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); ++ if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool")) { ++ unsigned long order = ++ max_t(unsigned long, MAX_ORDER - 1, pageblock_order); ++ ++ align = max(align, (phys_addr_t)PAGE_SIZE << order); ++ } + + prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); + if (prop) { +diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h +index ef4ff03242ea..aaf7da07a358 100644 +--- a/drivers/scsi/megaraid/megaraid_sas.h ++++ b/drivers/scsi/megaraid/megaraid_sas.h +@@ -1923,7 +1923,7 @@ struct megasas_instance_template { + }; + + #define MEGASAS_IS_LOGICAL(scp) \ +- (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 ++ ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) + + #define MEGASAS_DEV_INDEX(scp) \ + (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ +diff --git a/include/linux/mroute.h b/include/linux/mroute.h +index 79aaa9fc1a15..d5277fc3ce2e 100644 +--- a/include/linux/mroute.h ++++ b/include/linux/mroute.h +@@ -103,5 +103,5 @@ struct mfc_cache { + struct rtmsg; + extern int ipmr_get_route(struct net *net, struct sk_buff *skb, + __be32 saddr, __be32 daddr, +- struct rtmsg *rtm, int nowait); ++ struct rtmsg *rtm, int nowait, u32 portid); + #endif +diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h +index 66982e764051..f831155dc7d1 100644 +--- a/include/linux/mroute6.h ++++ b/include/linux/mroute6.h +@@ -115,7 +115,7 @@ struct mfc6_cache { + + struct rtmsg; + extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, +- struct rtmsg *rtm, int nowait); ++ struct rtmsg *rtm, int nowait, u32 portid); + + #ifdef CONFIG_IPV6_MROUTE + extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 12b4d54a8ffa..9d6025703f73 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -2003,7 +2003,10 @@ struct napi_gro_cb { + /* Used in foo-over-udp, set in udp[46]_gro_receive */ + u8 is_ipv6:1; + +- /* 7 bit hole */ ++ /* Number of gro_receive callbacks this packet already went through */ ++ u8 recursion_counter:4; ++ ++ /* 3 bit hole */ + + /* used to support CHECKSUM_COMPLETE for tunneling protocols */ + __wsum csum; +@@ -2014,6 +2017,25 @@ struct napi_gro_cb { + + #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) + ++#define GRO_RECURSION_LIMIT 15 ++static inline int gro_recursion_inc_test(struct sk_buff *skb) ++{ ++ return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; ++} ++ ++typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *); ++static inline struct sk_buff **call_gro_receive(gro_receive_t cb, ++ struct sk_buff **head, ++ struct sk_buff *skb) ++{ ++ if (unlikely(gro_recursion_inc_test(skb))) { ++ NAPI_GRO_CB(skb)->flush |= 1; ++ return NULL; ++ } ++ ++ return cb(head, skb); ++} ++ + struct packet_type { + __be16 type; /* This is really htons(ether_type). */ + struct net_device *dev; /* NULL is wildcarded here */ +@@ -2059,6 +2081,22 @@ struct udp_offload { + struct udp_offload_callbacks callbacks; + }; + ++typedef struct sk_buff **(*gro_receive_udp_t)(struct sk_buff **, ++ struct sk_buff *, ++ struct udp_offload *); ++static inline struct sk_buff **call_gro_receive_udp(gro_receive_udp_t cb, ++ struct sk_buff **head, ++ struct sk_buff *skb, ++ struct udp_offload *uoff) ++{ ++ if (unlikely(gro_recursion_inc_test(skb))) { ++ NAPI_GRO_CB(skb)->flush |= 1; ++ return NULL; ++ } ++ ++ return cb(head, skb, uoff); ++} ++ + /* often modified stats are per cpu, other are shared (netdev->stats) */ + struct pcpu_sw_netstats { + u64 rx_packets; +diff --git a/include/net/ip.h b/include/net/ip.h +index 1a98f1ca1638..b450d8653b30 100644 +--- a/include/net/ip.h ++++ b/include/net/ip.h +@@ -553,7 +553,7 @@ int ip_options_rcv_srr(struct sk_buff *skb); + */ + + void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); +-void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset); ++void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset); + int ip_cmsg_send(struct net *net, struct msghdr *msg, + struct ipcm_cookie *ipc, bool allow_ipv6); + int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, +@@ -575,7 +575,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, + + static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) + { +- ip_cmsg_recv_offset(msg, skb, 0); ++ ip_cmsg_recv_offset(msg, skb, 0, 0); + } + + bool icmp_global_allow(void); +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h +index 86df0835f6b5..e5bba897d206 100644 +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -408,6 +408,15 @@ bool tcf_destroy(struct tcf_proto *tp, bool force); + void tcf_destroy_chain(struct tcf_proto __rcu **fl); + int skb_do_redirect(struct sk_buff *); + ++static inline bool skb_at_tc_ingress(const struct sk_buff *skb) ++{ ++#ifdef CONFIG_NET_CLS_ACT ++ return G_TC_AT(skb->tc_verd) & AT_INGRESS; ++#else ++ return false; ++#endif ++} ++ + /* Reset all TX qdiscs greater then index of a device. */ + static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) + { +diff --git a/include/net/sock.h b/include/net/sock.h +index 14d3c0734007..3d5ff7436f41 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -1425,6 +1425,16 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) + if (!sk_has_account(sk)) + return; + sk->sk_forward_alloc += size; ++ ++ /* Avoid a possible overflow. ++ * TCP send queues can make this happen, if sk_mem_reclaim() ++ * is not called and more than 2 GBytes are released at once. ++ * ++ * If we reach 2 MBytes, reclaim 1 MBytes right now, there is ++ * no need to hold that much forward allocation anyway. ++ */ ++ if (unlikely(sk->sk_forward_alloc >= 1 << 21)) ++ __sk_mem_reclaim(sk, 1 << 20); + } + + static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) +diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h +index 123a5af4e8bb..fa3b34365560 100644 +--- a/include/uapi/linux/rtnetlink.h ++++ b/include/uapi/linux/rtnetlink.h +@@ -343,7 +343,7 @@ struct rtnexthop { + #define RTNH_F_OFFLOAD 8 /* offloaded route */ + #define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */ + +-#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN) ++#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD) + + /* Macros to handle hexthops */ + +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c +index d2cd9de4b724..ad8d6e6b87ca 100644 +--- a/net/8021q/vlan.c ++++ b/net/8021q/vlan.c +@@ -659,7 +659,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head, + + skb_gro_pull(skb, sizeof(*vhdr)); + skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr)); +- pp = ptype->callbacks.gro_receive(head, skb); ++ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); + + out_unlock: + rcu_read_unlock(); +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c +index 9542e84a9455..d80c15d028fe 100644 +--- a/net/bridge/br_multicast.c ++++ b/net/bridge/br_multicast.c +@@ -951,13 +951,12 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query) + mod_timer(&query->timer, jiffies); + } + +-void br_multicast_enable_port(struct net_bridge_port *port) ++static void __br_multicast_enable_port(struct net_bridge_port *port) + { + struct net_bridge *br = port->br; + +- spin_lock(&br->multicast_lock); + if (br->multicast_disabled || !netif_running(br->dev)) +- goto out; ++ return; + + br_multicast_enable(&port->ip4_own_query); + #if IS_ENABLED(CONFIG_IPV6) +@@ -965,8 +964,14 @@ void br_multicast_enable_port(struct net_bridge_port *port) + #endif + if (port->multicast_router == 2 && hlist_unhashed(&port->rlist)) + br_multicast_add_router(br, port); ++} + +-out: ++void br_multicast_enable_port(struct net_bridge_port *port) ++{ ++ struct net_bridge *br = port->br; ++ ++ spin_lock(&br->multicast_lock); ++ __br_multicast_enable_port(port); + spin_unlock(&br->multicast_lock); + } + +@@ -1905,8 +1910,9 @@ static void br_multicast_start_querier(struct net_bridge *br, + + int br_multicast_toggle(struct net_bridge *br, unsigned long val) + { +- int err = 0; + struct net_bridge_mdb_htable *mdb; ++ struct net_bridge_port *port; ++ int err = 0; + + spin_lock_bh(&br->multicast_lock); + if (br->multicast_disabled == !val) +@@ -1934,10 +1940,9 @@ rollback: + goto rollback; + } + +- br_multicast_start_querier(br, &br->ip4_own_query); +-#if IS_ENABLED(CONFIG_IPV6) +- br_multicast_start_querier(br, &br->ip6_own_query); +-#endif ++ br_multicast_open(br); ++ list_for_each_entry(port, &br->port_list, list) ++ __br_multicast_enable_port(port); + + unlock: + spin_unlock_bh(&br->multicast_lock); +diff --git a/net/core/dev.c b/net/core/dev.c +index 0989fea88c44..b3fa4b86ab4c 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -2836,6 +2836,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d + } + return head; + } ++EXPORT_SYMBOL_GPL(validate_xmit_skb_list); + + static void qdisc_pkt_len_init(struct sk_buff *skb) + { +@@ -4240,6 +4241,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff + NAPI_GRO_CB(skb)->flush = 0; + NAPI_GRO_CB(skb)->free = 0; + NAPI_GRO_CB(skb)->encap_mark = 0; ++ NAPI_GRO_CB(skb)->recursion_counter = 0; + NAPI_GRO_CB(skb)->gro_remcsum_start = 0; + + /* Setup for GRO checksum validation */ +@@ -5204,6 +5206,7 @@ static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev, + + static int __netdev_adjacent_dev_insert(struct net_device *dev, + struct net_device *adj_dev, ++ u16 ref_nr, + struct list_head *dev_list, + void *private, bool master) + { +@@ -5213,7 +5216,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, + adj = __netdev_find_adj(adj_dev, dev_list); + + if (adj) { +- adj->ref_nr++; ++ adj->ref_nr += ref_nr; + return 0; + } + +@@ -5223,7 +5226,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, + + adj->dev = adj_dev; + adj->master = master; +- adj->ref_nr = 1; ++ adj->ref_nr = ref_nr; + adj->private = private; + dev_hold(adj_dev); + +@@ -5262,6 +5265,7 @@ free_adj: + + static void __netdev_adjacent_dev_remove(struct net_device *dev, + struct net_device *adj_dev, ++ u16 ref_nr, + struct list_head *dev_list) + { + struct netdev_adjacent *adj; +@@ -5274,10 +5278,10 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev, + BUG(); + } + +- if (adj->ref_nr > 1) { +- pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name, +- adj->ref_nr-1); +- adj->ref_nr--; ++ if (adj->ref_nr > ref_nr) { ++ pr_debug("%s to %s ref_nr-%d = %d\n", dev->name, adj_dev->name, ++ ref_nr, adj->ref_nr-ref_nr); ++ adj->ref_nr -= ref_nr; + return; + } + +@@ -5296,21 +5300,22 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev, + + static int __netdev_adjacent_dev_link_lists(struct net_device *dev, + struct net_device *upper_dev, ++ u16 ref_nr, + struct list_head *up_list, + struct list_head *down_list, + void *private, bool master) + { + int ret; + +- ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private, +- master); ++ ret = __netdev_adjacent_dev_insert(dev, upper_dev, ref_nr, up_list, ++ private, master); + if (ret) + return ret; + +- ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private, +- false); ++ ret = __netdev_adjacent_dev_insert(upper_dev, dev, ref_nr, down_list, ++ private, false); + if (ret) { +- __netdev_adjacent_dev_remove(dev, upper_dev, up_list); ++ __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); + return ret; + } + +@@ -5318,9 +5323,10 @@ static int __netdev_adjacent_dev_link_lists(struct net_device *dev, + } + + static int __netdev_adjacent_dev_link(struct net_device *dev, +- struct net_device *upper_dev) ++ struct net_device *upper_dev, ++ u16 ref_nr) + { +- return __netdev_adjacent_dev_link_lists(dev, upper_dev, ++ return __netdev_adjacent_dev_link_lists(dev, upper_dev, ref_nr, + &dev->all_adj_list.upper, + &upper_dev->all_adj_list.lower, + NULL, false); +@@ -5328,17 +5334,19 @@ static int __netdev_adjacent_dev_link(struct net_device *dev, + + static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, + struct net_device *upper_dev, ++ u16 ref_nr, + struct list_head *up_list, + struct list_head *down_list) + { +- __netdev_adjacent_dev_remove(dev, upper_dev, up_list); +- __netdev_adjacent_dev_remove(upper_dev, dev, down_list); ++ __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list); ++ __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list); + } + + static void __netdev_adjacent_dev_unlink(struct net_device *dev, +- struct net_device *upper_dev) ++ struct net_device *upper_dev, ++ u16 ref_nr) + { +- __netdev_adjacent_dev_unlink_lists(dev, upper_dev, ++ __netdev_adjacent_dev_unlink_lists(dev, upper_dev, ref_nr, + &dev->all_adj_list.upper, + &upper_dev->all_adj_list.lower); + } +@@ -5347,17 +5355,17 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, + struct net_device *upper_dev, + void *private, bool master) + { +- int ret = __netdev_adjacent_dev_link(dev, upper_dev); ++ int ret = __netdev_adjacent_dev_link(dev, upper_dev, 1); + + if (ret) + return ret; + +- ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, ++ ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, 1, + &dev->adj_list.upper, + &upper_dev->adj_list.lower, + private, master); + if (ret) { +- __netdev_adjacent_dev_unlink(dev, upper_dev); ++ __netdev_adjacent_dev_unlink(dev, upper_dev, 1); + return ret; + } + +@@ -5367,8 +5375,8 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev, + static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, + struct net_device *upper_dev) + { +- __netdev_adjacent_dev_unlink(dev, upper_dev); +- __netdev_adjacent_dev_unlink_lists(dev, upper_dev, ++ __netdev_adjacent_dev_unlink(dev, upper_dev, 1); ++ __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1, + &dev->adj_list.upper, + &upper_dev->adj_list.lower); + } +@@ -5420,7 +5428,7 @@ static int __netdev_upper_dev_link(struct net_device *dev, + list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) { + pr_debug("Interlinking %s with %s, non-neighbour\n", + i->dev->name, j->dev->name); +- ret = __netdev_adjacent_dev_link(i->dev, j->dev); ++ ret = __netdev_adjacent_dev_link(i->dev, j->dev, i->ref_nr); + if (ret) + goto rollback_mesh; + } +@@ -5430,7 +5438,7 @@ static int __netdev_upper_dev_link(struct net_device *dev, + list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) { + pr_debug("linking %s's upper device %s with %s\n", + upper_dev->name, i->dev->name, dev->name); +- ret = __netdev_adjacent_dev_link(dev, i->dev); ++ ret = __netdev_adjacent_dev_link(dev, i->dev, i->ref_nr); + if (ret) + goto rollback_upper_mesh; + } +@@ -5439,7 +5447,7 @@ static int __netdev_upper_dev_link(struct net_device *dev, + list_for_each_entry(i, &dev->all_adj_list.lower, list) { + pr_debug("linking %s's lower device %s with %s\n", dev->name, + i->dev->name, upper_dev->name); +- ret = __netdev_adjacent_dev_link(i->dev, upper_dev); ++ ret = __netdev_adjacent_dev_link(i->dev, upper_dev, i->ref_nr); + if (ret) + goto rollback_lower_mesh; + } +@@ -5453,7 +5461,7 @@ rollback_lower_mesh: + list_for_each_entry(i, &dev->all_adj_list.lower, list) { + if (i == to_i) + break; +- __netdev_adjacent_dev_unlink(i->dev, upper_dev); ++ __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr); + } + + i = NULL; +@@ -5463,7 +5471,7 @@ rollback_upper_mesh: + list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) { + if (i == to_i) + break; +- __netdev_adjacent_dev_unlink(dev, i->dev); ++ __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr); + } + + i = j = NULL; +@@ -5475,7 +5483,7 @@ rollback_mesh: + list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) { + if (i == to_i && j == to_j) + break; +- __netdev_adjacent_dev_unlink(i->dev, j->dev); ++ __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr); + } + if (i == to_i) + break; +@@ -5559,16 +5567,16 @@ void netdev_upper_dev_unlink(struct net_device *dev, + */ + list_for_each_entry(i, &dev->all_adj_list.lower, list) + list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) +- __netdev_adjacent_dev_unlink(i->dev, j->dev); ++ __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr); + + /* remove also the devices itself from lower/upper device + * list + */ + list_for_each_entry(i, &dev->all_adj_list.lower, list) +- __netdev_adjacent_dev_unlink(i->dev, upper_dev); ++ __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr); + + list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) +- __netdev_adjacent_dev_unlink(dev, i->dev); ++ __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr); + + call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, + &changeupper_info.info); +diff --git a/net/core/pktgen.c b/net/core/pktgen.c +index 4da4d51a2ccf..b6327601f979 100644 +--- a/net/core/pktgen.c ++++ b/net/core/pktgen.c +@@ -215,8 +215,8 @@ + #define M_NETIF_RECEIVE 1 /* Inject packets into stack */ + + /* If lock -- protects updating of if_list */ +-#define if_lock(t) spin_lock(&(t->if_lock)); +-#define if_unlock(t) spin_unlock(&(t->if_lock)); ++#define if_lock(t) mutex_lock(&(t->if_lock)); ++#define if_unlock(t) mutex_unlock(&(t->if_lock)); + + /* Used to help with determining the pkts on receive */ + #define PKTGEN_MAGIC 0xbe9be955 +@@ -422,7 +422,7 @@ struct pktgen_net { + }; + + struct pktgen_thread { +- spinlock_t if_lock; /* for list of devices */ ++ struct mutex if_lock; /* for list of devices */ + struct list_head if_list; /* All device here */ + struct list_head th_list; + struct task_struct *tsk; +@@ -2002,11 +2002,13 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d + { + struct pktgen_thread *t; + ++ mutex_lock(&pktgen_thread_lock); ++ + list_for_each_entry(t, &pn->pktgen_threads, th_list) { + struct pktgen_dev *pkt_dev; + +- rcu_read_lock(); +- list_for_each_entry_rcu(pkt_dev, &t->if_list, list) { ++ if_lock(t); ++ list_for_each_entry(pkt_dev, &t->if_list, list) { + if (pkt_dev->odev != dev) + continue; + +@@ -2021,8 +2023,9 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d + dev->name); + break; + } +- rcu_read_unlock(); ++ if_unlock(t); + } ++ mutex_unlock(&pktgen_thread_lock); + } + + static int pktgen_device_event(struct notifier_block *unused, +@@ -2278,7 +2281,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) + + static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) + { +- pkt_dev->pkt_overhead = LL_RESERVED_SPACE(pkt_dev->odev); ++ pkt_dev->pkt_overhead = 0; + pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32); + pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev); + pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev); +@@ -2769,13 +2772,13 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb, + } + + static struct sk_buff *pktgen_alloc_skb(struct net_device *dev, +- struct pktgen_dev *pkt_dev, +- unsigned int extralen) ++ struct pktgen_dev *pkt_dev) + { ++ unsigned int extralen = LL_RESERVED_SPACE(dev); + struct sk_buff *skb = NULL; +- unsigned int size = pkt_dev->cur_pkt_size + 64 + extralen + +- pkt_dev->pkt_overhead; ++ unsigned int size; + ++ size = pkt_dev->cur_pkt_size + 64 + extralen + pkt_dev->pkt_overhead; + if (pkt_dev->flags & F_NODE) { + int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id(); + +@@ -2788,8 +2791,9 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev, + skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT); + } + ++ /* the caller pre-fetches from skb->data and reserves for the mac hdr */ + if (likely(skb)) +- skb_reserve(skb, LL_RESERVED_SPACE(dev)); ++ skb_reserve(skb, extralen - 16); + + return skb; + } +@@ -2822,16 +2826,14 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, + mod_cur_headers(pkt_dev); + queue_map = pkt_dev->cur_queue_map; + +- datalen = (odev->hard_header_len + 16) & ~0xf; +- +- skb = pktgen_alloc_skb(odev, pkt_dev, datalen); ++ skb = pktgen_alloc_skb(odev, pkt_dev); + if (!skb) { + sprintf(pkt_dev->result, "No memory"); + return NULL; + } + + prefetchw(skb->data); +- skb_reserve(skb, datalen); ++ skb_reserve(skb, 16); + + /* Reserve for ethernet and IP header */ + eth = (__u8 *) skb_push(skb, 14); +@@ -2951,7 +2953,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, + mod_cur_headers(pkt_dev); + queue_map = pkt_dev->cur_queue_map; + +- skb = pktgen_alloc_skb(odev, pkt_dev, 16); ++ skb = pktgen_alloc_skb(odev, pkt_dev); + if (!skb) { + sprintf(pkt_dev->result, "No memory"); + return NULL; +@@ -3727,7 +3729,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn) + return -ENOMEM; + } + +- spin_lock_init(&t->if_lock); ++ mutex_init(&t->if_lock); + t->cpu = cpu; + + INIT_LIST_HEAD(&t->if_list); +diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c +index 9e63f252a89e..de85d4e1cf43 100644 +--- a/net/ethernet/eth.c ++++ b/net/ethernet/eth.c +@@ -436,7 +436,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head, + + skb_gro_pull(skb, sizeof(*eh)); + skb_gro_postpull_rcsum(skb, eh, sizeof(*eh)); +- pp = ptype->callbacks.gro_receive(head, skb); ++ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); + + out_unlock: + rcu_read_unlock(); +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c +index 1a5c1ca3ad3c..afc18e9ca94a 100644 +--- a/net/ipv4/af_inet.c ++++ b/net/ipv4/af_inet.c +@@ -1372,7 +1372,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, + skb_gro_pull(skb, sizeof(*iph)); + skb_set_transport_header(skb, skb_gro_offset(skb)); + +- pp = ops->callbacks.gro_receive(head, skb); ++ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); + + out_unlock: + rcu_read_unlock(); +diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c +index 08d7de55e57e..08d8ee124538 100644 +--- a/net/ipv4/fou.c ++++ b/net/ipv4/fou.c +@@ -201,7 +201,7 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head, + if (!ops || !ops->callbacks.gro_receive) + goto out_unlock; + +- pp = ops->callbacks.gro_receive(head, skb); ++ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); + + out_unlock: + rcu_read_unlock(); +@@ -360,7 +360,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, + if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive)) + goto out_unlock; + +- pp = ops->callbacks.gro_receive(head, skb); ++ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); + + out_unlock: + rcu_read_unlock(); +diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c +index e603004c1af8..79ae0d7becbf 100644 +--- a/net/ipv4/gre_offload.c ++++ b/net/ipv4/gre_offload.c +@@ -219,7 +219,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, + /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ + skb_gro_postpull_rcsum(skb, greh, grehlen); + +- pp = ptype->callbacks.gro_receive(head, skb); ++ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); + + out_unlock: + rcu_read_unlock(); +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c +index a50124260f5a..9ce202549e7a 100644 +--- a/net/ipv4/ip_sockglue.c ++++ b/net/ipv4/ip_sockglue.c +@@ -98,7 +98,7 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb) + } + + static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, +- int offset) ++ int tlen, int offset) + { + __wsum csum = skb->csum; + +@@ -106,7 +106,9 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, + return; + + if (offset != 0) +- csum = csum_sub(csum, csum_partial(skb->data, offset, 0)); ++ csum = csum_sub(csum, ++ csum_partial(skb->data + tlen, ++ offset, 0)); + + put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); + } +@@ -152,7 +154,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) + } + + void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, +- int offset) ++ int tlen, int offset) + { + struct inet_sock *inet = inet_sk(skb->sk); + unsigned int flags = inet->cmsg_flags; +@@ -215,7 +217,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, + } + + if (flags & IP_CMSG_CHECKSUM) +- ip_cmsg_recv_checksum(msg, skb, offset); ++ ip_cmsg_recv_checksum(msg, skb, tlen, offset); + } + EXPORT_SYMBOL(ip_cmsg_recv_offset); + +diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c +index 9d1e555496e3..8e77786549c6 100644 +--- a/net/ipv4/ipmr.c ++++ b/net/ipv4/ipmr.c +@@ -2192,7 +2192,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, + + int ipmr_get_route(struct net *net, struct sk_buff *skb, + __be32 saddr, __be32 daddr, +- struct rtmsg *rtm, int nowait) ++ struct rtmsg *rtm, int nowait, u32 portid) + { + struct mfc_cache *cache; + struct mr_table *mrt; +@@ -2237,6 +2237,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, + return -ENOMEM; + } + ++ NETLINK_CB(skb2).portid = portid; + skb_push(skb2, sizeof(struct iphdr)); + skb_reset_network_header(skb2); + iph = ip_hdr(skb2); +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index b050cf980a57..8533a75a9328 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -2492,7 +2492,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id, + IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { + int err = ipmr_get_route(net, skb, + fl4->saddr, fl4->daddr, +- r, nowait); ++ r, nowait, portid); ++ + if (err <= 0) { + if (!nowait) { + if (err == 0) +diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c +index a0bd7a55193e..70fb352e317f 100644 +--- a/net/ipv4/sysctl_net_ipv4.c ++++ b/net/ipv4/sysctl_net_ipv4.c +@@ -97,11 +97,11 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low + container_of(table->data, struct net, ipv4.ping_group_range.range); + unsigned int seq; + do { +- seq = read_seqbegin(&net->ipv4.ip_local_ports.lock); ++ seq = read_seqbegin(&net->ipv4.ping_group_range.lock); + + *low = data[0]; + *high = data[1]; +- } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq)); ++ } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq)); + } + + /* Update system visible IP port range */ +@@ -110,10 +110,10 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig + kgid_t *data = table->data; + struct net *net = + container_of(table->data, struct net, ipv4.ping_group_range.range); +- write_seqlock(&net->ipv4.ip_local_ports.lock); ++ write_seqlock(&net->ipv4.ping_group_range.lock); + data[0] = low; + data[1] = high; +- write_sequnlock(&net->ipv4.ip_local_ports.lock); ++ write_sequnlock(&net->ipv4.ping_group_range.lock); + } + + /* Validate changes from /proc interface. */ +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 12b98e257c5f..7cc0f8aac28f 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -2324,10 +2324,9 @@ static void DBGUNDO(struct sock *sk, const char *msg) + } + #if IS_ENABLED(CONFIG_IPV6) + else if (sk->sk_family == AF_INET6) { +- struct ipv6_pinfo *np = inet6_sk(sk); + pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", + msg, +- &np->daddr, ntohs(inet->inet_dport), ++ &sk->sk_v6_daddr, ntohs(inet->inet_dport), + tp->snd_cwnd, tcp_left_out(tp), + tp->snd_ssthresh, tp->prior_ssthresh, + tp->packets_out); +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 660c967ba84a..0795647e94c6 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1950,12 +1950,14 @@ static int tcp_mtu_probe(struct sock *sk) + len = 0; + tcp_for_write_queue_from_safe(skb, next, sk) { + copy = min_t(int, skb->len, probe_size - len); +- if (nskb->ip_summed) ++ if (nskb->ip_summed) { + skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); +- else +- nskb->csum = skb_copy_and_csum_bits(skb, 0, +- skb_put(nskb, copy), +- copy, nskb->csum); ++ } else { ++ __wsum csum = skb_copy_and_csum_bits(skb, 0, ++ skb_put(nskb, copy), ++ copy, 0); ++ nskb->csum = csum_block_add(nskb->csum, csum, len); ++ } + + if (skb->len <= copy) { + /* We've eaten all the data from this skb. +@@ -2569,7 +2571,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) + * copying overhead: fragmentation, tunneling, mangling etc. + */ + if (atomic_read(&sk->sk_wmem_alloc) > +- min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) ++ min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), ++ sk->sk_sndbuf)) + return -EAGAIN; + + if (skb_still_in_host_queue(sk, skb)) +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c +index 0b1ea5abcc04..e9513e397c4f 100644 +--- a/net/ipv4/udp.c ++++ b/net/ipv4/udp.c +@@ -1342,7 +1342,7 @@ try_again: + *addr_len = sizeof(*sin); + } + if (inet->cmsg_flags) +- ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr)); ++ ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off); + + err = copied; + if (flags & MSG_TRUNC) +diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c +index 0e36e56dfd22..6396f1c80ae9 100644 +--- a/net/ipv4/udp_offload.c ++++ b/net/ipv4/udp_offload.c +@@ -339,8 +339,8 @@ unflush: + skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ + skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); + NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; +- pp = uo_priv->offload->callbacks.gro_receive(head, skb, +- uo_priv->offload); ++ pp = call_gro_receive_udp(uo_priv->offload->callbacks.gro_receive, ++ head, skb, uo_priv->offload); + + out_unlock: + rcu_read_unlock(); +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 036b39eb1220..cb8bb5988c03 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -2916,7 +2916,7 @@ static void init_loopback(struct net_device *dev) + * lo device down, release this obsolete dst and + * reallocate a new router for ifa. + */ +- if (sp_ifa->rt->dst.obsolete > 0) { ++ if (!atomic_read(&sp_ifa->rt->rt6i_ref)) { + ip6_rt_put(sp_ifa->rt); + sp_ifa->rt = NULL; + } else { +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c +index 4650c6824783..17430f341073 100644 +--- a/net/ipv6/ip6_gre.c ++++ b/net/ipv6/ip6_gre.c +@@ -886,7 +886,6 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev) + encap_limit = t->parms.encap_limit; + + memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); +- fl6.flowi6_proto = skb->protocol; + + err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu); + +diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c +index 82e9f3076028..efe6268b8bc3 100644 +--- a/net/ipv6/ip6_offload.c ++++ b/net/ipv6/ip6_offload.c +@@ -247,7 +247,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, + + skb_gro_postpull_rcsum(skb, iph, nlen); + +- pp = ops->callbacks.gro_receive(head, skb); ++ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); + + out_unlock: + rcu_read_unlock(); +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c +index 3991b21e24ad..e8878886eba4 100644 +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -246,6 +246,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_ + hash = HASH(&any, local); + for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { + if (ipv6_addr_equal(local, &t->parms.laddr) && ++ ipv6_addr_any(&t->parms.raddr) && + (t->dev->flags & IFF_UP)) + return t; + } +@@ -253,6 +254,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_ + hash = HASH(remote, &any); + for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { + if (ipv6_addr_equal(remote, &t->parms.raddr) && ++ ipv6_addr_any(&t->parms.laddr) && + (t->dev->flags & IFF_UP)) + return t; + } +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c +index e207cb2468da..d9843e5a667f 100644 +--- a/net/ipv6/ip6mr.c ++++ b/net/ipv6/ip6mr.c +@@ -2276,8 +2276,8 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, + return 1; + } + +-int ip6mr_get_route(struct net *net, +- struct sk_buff *skb, struct rtmsg *rtm, int nowait) ++int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, ++ int nowait, u32 portid) + { + int err; + struct mr6_table *mrt; +@@ -2322,6 +2322,7 @@ int ip6mr_get_route(struct net *net, + return -ENOMEM; + } + ++ NETLINK_CB(skb2).portid = portid; + skb_reset_transport_header(skb2); + + skb_put(skb2, sizeof(struct ipv6hdr)); +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 5af2cca0a46d..dbffc9de184b 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -3140,7 +3140,9 @@ static int rt6_fill_node(struct net *net, + if (iif) { + #ifdef CONFIG_IPV6_MROUTE + if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { +- int err = ip6mr_get_route(net, skb, rtm, nowait); ++ int err = ip6mr_get_route(net, skb, rtm, nowait, ++ portid); ++ + if (err <= 0) { + if (!nowait) { + if (err == 0) +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 2d81e2f33ef2..fbd521fdae53 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -1179,6 +1179,16 @@ out: + return NULL; + } + ++static void tcp_v6_restore_cb(struct sk_buff *skb) ++{ ++ /* We need to move header back to the beginning if xfrm6_policy_check() ++ * and tcp_v6_fill_cb() are going to be called again. ++ * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there. ++ */ ++ memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, ++ sizeof(struct inet6_skb_parm)); ++} ++ + /* The socket must have it's spinlock held when we get + * here, unless it is a TCP_LISTEN socket. + * +@@ -1308,6 +1318,7 @@ ipv6_pktoptions: + np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); + if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { + skb_set_owner_r(opt_skb, sk); ++ tcp_v6_restore_cb(opt_skb); + opt_skb = xchg(&np->pktoptions, opt_skb); + } else { + __kfree_skb(opt_skb); +@@ -1341,15 +1352,6 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, + TCP_SKB_CB(skb)->sacked = 0; + } + +-static void tcp_v6_restore_cb(struct sk_buff *skb) +-{ +- /* We need to move header back to the beginning if xfrm6_policy_check() +- * and tcp_v6_fill_cb() are going to be called again. +- */ +- memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, +- sizeof(struct inet6_skb_parm)); +-} +- + static int tcp_v6_rcv(struct sk_buff *skb) + { + const struct tcphdr *th; +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c +index e6092bd72ee2..dfa85e7264df 100644 +--- a/net/ipv6/udp.c ++++ b/net/ipv6/udp.c +@@ -498,7 +498,8 @@ try_again: + + if (is_udp4) { + if (inet->cmsg_flags) +- ip_cmsg_recv(msg, skb); ++ ip_cmsg_recv_offset(msg, skb, ++ sizeof(struct udphdr), off); + } else { + if (np->rxopt.all) + ip6_datagram_recv_specific_ctl(sk, msg, skb); +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 7a5fa0c98377..28fc283c1ec1 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -2557,7 +2557,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + /* Record the max length of recvmsg() calls for future allocations */ + nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len); + nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len, +- 16384); ++ SKB_WITH_OVERHEAD(32768)); + + copied = data_skb->len; + if (len < copied) { +@@ -2810,14 +2810,13 @@ static int netlink_dump(struct sock *sk) + if (alloc_min_size < nlk->max_recvmsg_len) { + alloc_size = nlk->max_recvmsg_len; + skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, +- GFP_KERNEL | +- __GFP_NOWARN | +- __GFP_NORETRY); ++ (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) | ++ __GFP_NOWARN | __GFP_NORETRY); + } + if (!skb) { + alloc_size = alloc_min_size; + skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, +- GFP_KERNEL); ++ (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM)); + } + if (!skb) + goto errout_skb; +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index a86f26d05bc2..34e4fcfd240b 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -249,7 +249,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po); + static int packet_direct_xmit(struct sk_buff *skb) + { + struct net_device *dev = skb->dev; +- netdev_features_t features; ++ struct sk_buff *orig_skb = skb; + struct netdev_queue *txq; + int ret = NETDEV_TX_BUSY; + +@@ -257,9 +257,8 @@ static int packet_direct_xmit(struct sk_buff *skb) + !netif_carrier_ok(dev))) + goto drop; + +- features = netif_skb_features(skb); +- if (skb_needs_linearize(skb, features) && +- __skb_linearize(skb)) ++ skb = validate_xmit_skb_list(skb, dev); ++ if (skb != orig_skb) + goto drop; + + txq = skb_get_tx_queue(dev, skb); +@@ -279,7 +278,7 @@ static int packet_direct_xmit(struct sk_buff *skb) + return ret; + drop: + atomic_long_inc(&dev->tx_dropped); +- kfree_skb(skb); ++ kfree_skb_list(skb); + return NET_XMIT_DROP; + } + +@@ -3855,6 +3854,7 @@ static int packet_notifier(struct notifier_block *this, + } + if (msg == NETDEV_UNREGISTER) { + packet_cached_dev_reset(po); ++ fanout_release(sk); + po->ifindex = -1; + if (po->prot_hook.dev) + dev_put(po->prot_hook.dev); +diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c +index 796785e0bf96..d7edba4536bd 100644 +--- a/net/sched/act_vlan.c ++++ b/net/sched/act_vlan.c +@@ -33,6 +33,12 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a, + bstats_update(&v->tcf_bstats, skb); + action = v->tcf_action; + ++ /* Ensure 'data' points at mac_header prior calling vlan manipulating ++ * functions. ++ */ ++ if (skb_at_tc_ingress(skb)) ++ skb_push_rcsum(skb, skb->mac_len); ++ + switch (v->tcfv_action) { + case TCA_VLAN_ACT_POP: + err = skb_vlan_pop(skb); +@@ -54,6 +60,9 @@ drop: + action = TC_ACT_SHOT; + v->tcf_qstats.drops++; + unlock: ++ if (skb_at_tc_ingress(skb)) ++ skb_pull_rcsum(skb, skb->mac_len); ++ + spin_unlock(&v->tcf_lock); + return action; + } +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c +index a75864d93142..ecc1904e454f 100644 +--- a/net/sched/cls_api.c ++++ b/net/sched/cls_api.c +@@ -315,7 +315,8 @@ replay: + if (err == 0) { + struct tcf_proto *next = rtnl_dereference(tp->next); + +- tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); ++ tfilter_notify(net, skb, n, tp, ++ t->tcm_handle, RTM_DELTFILTER); + if (tcf_destroy(tp, false)) + RCU_INIT_POINTER(*back, next); + } +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c +index 22c2bf367d7e..29c7c43de108 100644 +--- a/net/sctp/sm_statefuns.c ++++ b/net/sctp/sm_statefuns.c +@@ -3426,6 +3426,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net, + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + ++ /* Report violation if chunk len overflows */ ++ ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); ++ if (ch_end > skb_tail_pointer(skb)) ++ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, ++ commands); ++ + /* Now that we know we at least have a chunk header, + * do things that are type appropriate. + */ +@@ -3457,12 +3463,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net, + } + } + +- /* Report violation if chunk len overflows */ +- ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); +- if (ch_end > skb_tail_pointer(skb)) +- return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, +- commands); +- + ch = (sctp_chunkhdr_t *) ch_end; + } while (ch_end < skb_tail_pointer(skb)); + +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index be1489fc3234..402817be3873 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -4371,7 +4371,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, + static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, + int __user *optlen) + { +- if (len <= 0) ++ if (len == 0) + return -EINVAL; + if (len > sizeof(struct sctp_event_subscribe)) + len = sizeof(struct sctp_event_subscribe); +@@ -5972,6 +5972,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, + if (get_user(len, optlen)) + return -EFAULT; + ++ if (len < 0) ++ return -EINVAL; ++ + lock_sock(sk); + + switch (optname) {