From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Fri, 20 Mar 2020 11:49:39 +0000 (UTC) [thread overview]
Message-ID: <1584704964.1cdf83ecb0f5a15ea2e2eebd7f0ee13d711491dd.mpagano@gentoo> (raw)
commit: 1cdf83ecb0f5a15ea2e2eebd7f0ee13d711491dd
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar 20 11:49:24 2020 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar 20 11:49:24 2020 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1cdf83ec
Linux patch 4.4.217
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
1216_linux-4.4.217.patch | 3442 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 3442 insertions(+)
diff --git a/1216_linux-4.4.217.patch b/1216_linux-4.4.217.patch
new file mode 100644
index 0000000..b8f6e98
--- /dev/null
+++ b/1216_linux-4.4.217.patch
@@ -0,0 +1,3442 @@
+diff --git a/Makefile b/Makefile
+index e0bcd5a0ae9b..d983151a864b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 216
++SUBLEVEL = 217
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+
+diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
+index 5faad17118b4..3ed7ea726fb5 100644
+--- a/arch/arc/include/asm/linkage.h
++++ b/arch/arc/include/asm/linkage.h
+@@ -12,6 +12,8 @@
+ #ifdef __ASSEMBLY__
+
+ #define ASM_NL ` /* use '`' to mark new line in macro */
++#define __ALIGN .align 4
++#define __ALIGN_STR __stringify(__ALIGN)
+
+ /* annotation for data we want in DCCM - if enabled in .config */
+ .macro ARCFP_DATA nm
+diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
+index 2dee87273e51..5ab9af255c78 100644
+--- a/arch/arm/kernel/vdso.c
++++ b/arch/arm/kernel/vdso.c
+@@ -84,6 +84,8 @@ static bool __init cntvct_functional(void)
+ * this.
+ */
+ np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
++ if (!np)
++ np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer");
+ if (!np)
+ goto out_put;
+
+diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
+index e32b51838439..05955ed85c2c 100644
+--- a/arch/arm/lib/copy_from_user.S
++++ b/arch/arm/lib/copy_from_user.S
+@@ -100,7 +100,7 @@ ENTRY(arm_copy_from_user)
+
+ ENDPROC(arm_copy_from_user)
+
+- .pushsection .fixup,"ax"
++ .pushsection .text.fixup,"ax"
+ .align 0
+ copy_abort_preamble
+ ldmfd sp!, {r1, r2}
+diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+index 49742746a6c9..98e786a779fd 100644
+--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
++++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+@@ -181,21 +181,19 @@ static int amd_uncore_event_init(struct perf_event *event)
+ return -ENOENT;
+
+ /*
+- * NB and L2 counters (MSRs) are shared across all cores that share the
+- * same NB / L2 cache. Interrupts can be directed to a single target
+- * core, however, event counts generated by processes running on other
+- * cores cannot be masked out. So we do not support sampling and
+- * per-thread events.
++ * NB and Last level cache counters (MSRs) are shared across all cores
++ * that share the same NB / Last level cache. On family 16h and below,
++ * Interrupts can be directed to a single target core, however, event
++ * counts generated by processes running on other cores cannot be masked
++ * out. So we do not support sampling and per-thread events via
++ * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
+ */
+- if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+- return -EINVAL;
+
+ /* NB and L2 counters do not have usr/os/guest/host bits */
+ if (event->attr.exclude_user || event->attr.exclude_kernel ||
+ event->attr.exclude_host || event->attr.exclude_guest)
+ return -EINVAL;
+
+- /* and we do not enable counter overflow interrupts */
+ hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
+ hwc->idx = -1;
+
+@@ -271,6 +269,7 @@ static struct pmu amd_nb_pmu = {
+ .start = amd_uncore_start,
+ .stop = amd_uncore_stop,
+ .read = amd_uncore_read,
++ .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
+ };
+
+ static struct pmu amd_l2_pmu = {
+@@ -282,6 +281,7 @@ static struct pmu amd_l2_pmu = {
+ .start = amd_uncore_start,
+ .stop = amd_uncore_stop,
+ .read = amd_uncore_read,
++ .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
+ };
+
+ static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 767be5e61913..466028623e1a 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -5010,6 +5010,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
+ ctxt->fetch.ptr = ctxt->fetch.data;
+ ctxt->fetch.end = ctxt->fetch.data + insn_len;
+ ctxt->opcode_len = 1;
++ ctxt->intercept = x86_intercept_none;
+ if (insn_len > 0)
+ memcpy(ctxt->fetch.data, insn, insn_len);
+ else {
+diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
+index 10e6774ab2a2..fc4d0228b7d8 100644
+--- a/drivers/firmware/efi/efivars.c
++++ b/drivers/firmware/efi/efivars.c
+@@ -139,13 +139,16 @@ static ssize_t
+ efivar_attr_read(struct efivar_entry *entry, char *buf)
+ {
+ struct efi_variable *var = &entry->var;
++ unsigned long size = sizeof(var->Data);
+ char *str = buf;
++ int ret;
+
+ if (!entry || !buf)
+ return -EINVAL;
+
+- var->DataSize = 1024;
+- if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
++ ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
++ var->DataSize = size;
++ if (ret)
+ return -EIO;
+
+ if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
+@@ -172,13 +175,16 @@ static ssize_t
+ efivar_size_read(struct efivar_entry *entry, char *buf)
+ {
+ struct efi_variable *var = &entry->var;
++ unsigned long size = sizeof(var->Data);
+ char *str = buf;
++ int ret;
+
+ if (!entry || !buf)
+ return -EINVAL;
+
+- var->DataSize = 1024;
+- if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
++ ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
++ var->DataSize = size;
++ if (ret)
+ return -EIO;
+
+ str += sprintf(str, "0x%lx\n", var->DataSize);
+@@ -189,12 +195,15 @@ static ssize_t
+ efivar_data_read(struct efivar_entry *entry, char *buf)
+ {
+ struct efi_variable *var = &entry->var;
++ unsigned long size = sizeof(var->Data);
++ int ret;
+
+ if (!entry || !buf)
+ return -EINVAL;
+
+- var->DataSize = 1024;
+- if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
++ ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
++ var->DataSize = size;
++ if (ret)
+ return -EIO;
+
+ memcpy(buf, var->Data, var->DataSize);
+@@ -263,6 +272,9 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
+ u8 *data;
+ int err;
+
++ if (!entry || !buf)
++ return -EINVAL;
++
+ if (is_compat()) {
+ struct compat_efi_variable *compat;
+
+@@ -314,14 +326,16 @@ efivar_show_raw(struct efivar_entry *entry, char *buf)
+ {
+ struct efi_variable *var = &entry->var;
+ struct compat_efi_variable *compat;
++ unsigned long datasize = sizeof(var->Data);
+ size_t size;
++ int ret;
+
+ if (!entry || !buf)
+ return 0;
+
+- var->DataSize = 1024;
+- if (efivar_entry_get(entry, &entry->var.Attributes,
+- &entry->var.DataSize, entry->var.Data))
++ ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data);
++ var->DataSize = datasize;
++ if (ret)
+ return -EIO;
+
+ if (is_compat()) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+index d799927d3a5d..96d557b97f5c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+@@ -346,8 +346,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
+ router.ddc_valid = false;
+ router.cd_valid = false;
+ for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
+- uint8_t grph_obj_type=
+- grph_obj_type =
++ uint8_t grph_obj_type =
+ (le16_to_cpu(path->usGraphicObjIds[j]) &
+ OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+
+diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
+index cbad1926cec1..00169c9eb3ee 100644
+--- a/drivers/iommu/dmar.c
++++ b/drivers/iommu/dmar.c
+@@ -39,6 +39,7 @@
+ #include <linux/dmi.h>
+ #include <linux/slab.h>
+ #include <linux/iommu.h>
++#include <linux/limits.h>
+ #include <asm/irq_remapping.h>
+ #include <asm/iommu_table.h>
+
+@@ -138,6 +139,13 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
+
+ BUG_ON(dev->is_virtfn);
+
++ /*
++ * Ignore devices that have a domain number higher than what can
++ * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
++ */
++ if (pci_domain_nr(dev->bus) > U16_MAX)
++ return NULL;
++
+ /* Only generate path[] for device addition event */
+ if (event == BUS_NOTIFY_ADD_DEVICE)
+ for (tmp = dev; tmp; tmp = tmp->bus->self)
+@@ -438,12 +446,13 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
+
+ /* Check for NUL termination within the designated length */
+ if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
+- WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
++ pr_warn(FW_BUG
+ "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
++ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+ return -EINVAL;
+ }
+ pr_info("ANDD device: %x name: %s\n", andd->device_number,
+@@ -469,14 +478,14 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
+ return 0;
+ }
+ }
+- WARN_TAINT(
+- 1, TAINT_FIRMWARE_WORKAROUND,
++ pr_warn(FW_BUG
+ "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+- drhd->reg_base_addr,
++ rhsa->base_address,
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
++ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+
+ return 0;
+ }
+@@ -822,14 +831,14 @@ int __init dmar_table_init(void)
+
+ static void warn_invalid_dmar(u64 addr, const char *message)
+ {
+- WARN_TAINT_ONCE(
+- 1, TAINT_FIRMWARE_WORKAROUND,
++ pr_warn_once(FW_BUG
+ "Your BIOS is broken; DMAR reported at address %llx%s!\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ addr, message,
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
++ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+ }
+
+ static int __ref
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index a2005b82ec8f..ed6cb3abf645 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -3949,10 +3949,11 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
+
+ /* we know that the this iommu should be at offset 0xa000 from vtbar */
+ drhd = dmar_find_matched_drhd_unit(pdev);
+- if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
+- TAINT_FIRMWARE_WORKAROUND,
+- "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
++ if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
++ pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
++ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+ pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
++ }
+ }
+ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
+
+@@ -5016,8 +5017,10 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
+ u64 phys = 0;
+
+ pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
+- if (pte)
+- phys = dma_pte_addr(pte);
++ if (pte && dma_pte_present(pte))
++ phys = dma_pte_addr(pte) +
++ (iova & (BIT_MASK(level_to_offset_bits(level) +
++ VTD_PAGE_SHIFT) - 1));
+
+ return phys;
+ }
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index 0615522933dc..41bd9186d383 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -74,11 +74,6 @@ struct arp_pkt {
+ };
+ #pragma pack()
+
+-static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
+-{
+- return (struct arp_pkt *)skb_network_header(skb);
+-}
+-
+ /* Forward declaration */
+ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+ bool strict_match);
+@@ -577,10 +572,11 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
+ spin_unlock(&bond->mode_lock);
+ }
+
+-static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
++static struct slave *rlb_choose_channel(struct sk_buff *skb,
++ struct bonding *bond,
++ const struct arp_pkt *arp)
+ {
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+- struct arp_pkt *arp = arp_pkt(skb);
+ struct slave *assigned_slave, *curr_active_slave;
+ struct rlb_client_info *client_info;
+ u32 hash_index = 0;
+@@ -677,8 +673,12 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
+ */
+ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
+ {
+- struct arp_pkt *arp = arp_pkt(skb);
+ struct slave *tx_slave = NULL;
++ struct arp_pkt *arp;
++
++ if (!pskb_network_may_pull(skb, sizeof(*arp)))
++ return NULL;
++ arp = (struct arp_pkt *)skb_network_header(skb);
+
+ /* Don't modify or load balance ARPs that do not originate locally
+ * (e.g.,arrive via a bridge).
+@@ -688,7 +688,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
+
+ if (arp->op_code == htons(ARPOP_REPLY)) {
+ /* the arp must be sent on the selected rx channel */
+- tx_slave = rlb_choose_channel(skb, bond);
++ tx_slave = rlb_choose_channel(skb, bond, arp);
+ if (tx_slave)
+ ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr);
+ netdev_dbg(bond->dev, "Server sent ARP Reply packet\n");
+@@ -698,7 +698,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
+ * When the arp reply is received the entry will be updated
+ * with the correct unicast address of the client.
+ */
+- rlb_choose_channel(skb, bond);
++ rlb_choose_channel(skb, bond, arp);
+
+ /* The ARP reply packets must be delayed so that
+ * they can cancel out the influence of the ARP request.
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 81282b811a6c..d91953eabfeb 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -5310,13 +5310,13 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
+ return -EINVAL;
+
+ if (netif_running(dev))
+- bnxt_close_nic(bp, false, false);
++ bnxt_close_nic(bp, true, false);
+
+ dev->mtu = new_mtu;
+ bnxt_set_ring_params(bp);
+
+ if (netif_running(dev))
+- return bnxt_open_nic(bp, false, false);
++ return bnxt_open_nic(bp, true, false);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 0ee164d09f39..9c608211fcfd 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2510,15 +2510,15 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
+ return -EINVAL;
+ }
+
+- cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
++ cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
+ if (cycle > 0xFFFF) {
+ pr_err("Rx coalesed usec exceeed hardware limiation");
+ return -EINVAL;
+ }
+
+- cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
++ cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
+ if (cycle > 0xFFFF) {
+- pr_err("Rx coalesed usec exceeed hardware limiation");
++ pr_err("Tx coalesed usec exceeed hardware limiation");
+ return -EINVAL;
+ }
+
+diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
+index d94e151cff12..d4747caf1e7c 100644
+--- a/drivers/net/ethernet/micrel/ks8851_mll.c
++++ b/drivers/net/ethernet/micrel/ks8851_mll.c
+@@ -831,14 +831,17 @@ static irqreturn_t ks_irq(int irq, void *pw)
+ {
+ struct net_device *netdev = pw;
+ struct ks_net *ks = netdev_priv(netdev);
++ unsigned long flags;
+ u16 status;
+
++ spin_lock_irqsave(&ks->statelock, flags);
+ /*this should be the first in IRQ handler */
+ ks_save_cmd_reg(ks);
+
+ status = ks_rdreg16(ks, KS_ISR);
+ if (unlikely(!status)) {
+ ks_restore_cmd_reg(ks);
++ spin_unlock_irqrestore(&ks->statelock, flags);
+ return IRQ_NONE;
+ }
+
+@@ -864,6 +867,7 @@ static irqreturn_t ks_irq(int irq, void *pw)
+ ks->netdev->stats.rx_over_errors++;
+ /* this should be the last in IRQ handler*/
+ ks_restore_cmd_reg(ks);
++ spin_unlock_irqrestore(&ks->statelock, flags);
+ return IRQ_HANDLED;
+ }
+
+@@ -933,6 +937,7 @@ static int ks_net_stop(struct net_device *netdev)
+
+ /* shutdown RX/TX QMU */
+ ks_disable_qmu(ks);
++ ks_disable_int(ks);
+
+ /* set powermode to soft power down to save power */
+ ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
+@@ -989,10 +994,9 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ {
+ netdev_tx_t retv = NETDEV_TX_OK;
+ struct ks_net *ks = netdev_priv(netdev);
++ unsigned long flags;
+
+- disable_irq(netdev->irq);
+- ks_disable_int(ks);
+- spin_lock(&ks->statelock);
++ spin_lock_irqsave(&ks->statelock, flags);
+
+ /* Extra space are required:
+ * 4 byte for alignment, 4 for status/length, 4 for CRC
+@@ -1006,9 +1010,7 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ dev_kfree_skb(skb);
+ } else
+ retv = NETDEV_TX_BUSY;
+- spin_unlock(&ks->statelock);
+- ks_enable_int(ks);
+- enable_irq(netdev->irq);
++ spin_unlock_irqrestore(&ks->statelock, flags);
+ return retv;
+ }
+
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index 142015af43db..835087597b82 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -251,6 +251,7 @@ acct:
+ } else {
+ kfree_skb(skb);
+ }
++ cond_resched();
+ }
+ }
+
+@@ -429,19 +430,21 @@ static int ipvlan_process_outbound(struct sk_buff *skb,
+ struct ethhdr *ethh = eth_hdr(skb);
+ int ret = NET_XMIT_DROP;
+
+- /* In this mode we dont care about multicast and broadcast traffic */
+- if (is_multicast_ether_addr(ethh->h_dest)) {
+- pr_warn_ratelimited("Dropped {multi|broad}cast of type= [%x]\n",
+- ntohs(skb->protocol));
+- kfree_skb(skb);
+- goto out;
+- }
+-
+ /* The ipvlan is a pseudo-L2 device, so the packets that we receive
+ * will have L2; which need to discarded and processed further
+ * in the net-ns of the main-device.
+ */
+ if (skb_mac_header_was_set(skb)) {
++ /* In this mode we dont care about
++ * multicast and broadcast traffic */
++ if (is_multicast_ether_addr(ethh->h_dest)) {
++ pr_debug_ratelimited(
++ "Dropped {multi|broad}cast of type=[%x]\n",
++ ntohs(skb->protocol));
++ kfree_skb(skb);
++ goto out;
++ }
++
+ skb_pull(skb, sizeof(*ethh));
+ skb->mac_header = (typeof(skb->mac_header))~0U;
+ skb_reset_network_header(skb);
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index ae02ce17c505..b4d5f53c97d3 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -145,7 +145,6 @@ static void ipvlan_uninit(struct net_device *dev)
+ static int ipvlan_open(struct net_device *dev)
+ {
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+- struct net_device *phy_dev = ipvlan->phy_dev;
+ struct ipvl_addr *addr;
+
+ if (ipvlan->port->mode == IPVLAN_MODE_L3)
+@@ -156,7 +155,7 @@ static int ipvlan_open(struct net_device *dev)
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
+ ipvlan_ht_addr_add(ipvlan, addr);
+
+- return dev_uc_add(phy_dev, phy_dev->dev_addr);
++ return 0;
+ }
+
+ static int ipvlan_stop(struct net_device *dev)
+@@ -168,8 +167,6 @@ static int ipvlan_stop(struct net_device *dev)
+ dev_uc_unsync(phy_dev, dev);
+ dev_mc_unsync(phy_dev, dev);
+
+- dev_uc_del(phy_dev, phy_dev->dev_addr);
+-
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
+ ipvlan_ht_addr_del(addr);
+
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index bd49303f7db2..84767722065a 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -306,6 +306,8 @@ static void macvlan_process_broadcast(struct work_struct *w)
+ rcu_read_unlock();
+
+ kfree_skb(skb);
++
++ cond_resched();
+ }
+ }
+
+diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
+index ddceed3c5a4a..a516470da015 100644
+--- a/drivers/net/slip/slhc.c
++++ b/drivers/net/slip/slhc.c
+@@ -232,7 +232,7 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
+ register struct cstate *cs = lcs->next;
+ register unsigned long deltaS, deltaA;
+ register short changes = 0;
+- int hlen;
++ int nlen, hlen;
+ unsigned char new_seq[16];
+ register unsigned char *cp = new_seq;
+ struct iphdr *ip;
+@@ -248,6 +248,8 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
+ return isize;
+
+ ip = (struct iphdr *) icp;
++ if (ip->version != 4 || ip->ihl < 5)
++ return isize;
+
+ /* Bail if this packet isn't TCP, or is an IP fragment */
+ if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) {
+@@ -258,10 +260,14 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
+ comp->sls_o_tcp++;
+ return isize;
+ }
+- /* Extract TCP header */
++ nlen = ip->ihl * 4;
++ if (isize < nlen + sizeof(*th))
++ return isize;
+
+- th = (struct tcphdr *)(((unsigned char *)ip) + ip->ihl*4);
+- hlen = ip->ihl*4 + th->doff*4;
++ th = (struct tcphdr *)(icp + nlen);
++ if (th->doff < sizeof(struct tcphdr) / 4)
++ return isize;
++ hlen = nlen + th->doff * 4;
+
+ /* Bail if the TCP packet isn't `compressible' (i.e., ACK isn't set or
+ * some other control bit is set). Also uncompressible if
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 285d376f53ef..e51fb7cb7728 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2169,6 +2169,8 @@ team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
+ [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
+ [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
+ [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
++ [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32 },
++ [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32 },
+ };
+
+ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 23e299c86b81..27e9c089b2fc 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -3328,14 +3328,20 @@ static void r8153_init(struct r8152 *tp)
+ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ AUTOLOAD_DONE)
+ break;
++
+ msleep(20);
++ if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ break;
+ }
+
+ for (i = 0; i < 500; i++) {
+ ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
+ if (ocp_data == PHY_STAT_LAN_ON || ocp_data == PHY_STAT_PWRDN)
+ break;
++
+ msleep(20);
++ if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ break;
+ }
+
+ usb_disable_lpm(tp->udev);
+diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
+index 9275f9c3f869..14ce8ead5941 100644
+--- a/drivers/net/wireless/mwifiex/tdls.c
++++ b/drivers/net/wireless/mwifiex/tdls.c
+@@ -910,59 +910,117 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+
+ switch (*pos) {
+ case WLAN_EID_SUPP_RATES:
++ if (pos[1] > 32)
++ return;
+ sta_ptr->tdls_cap.rates_len = pos[1];
+ for (i = 0; i < pos[1]; i++)
+ sta_ptr->tdls_cap.rates[i] = pos[i + 2];
+ break;
+
+ case WLAN_EID_EXT_SUPP_RATES:
++ if (pos[1] > 32)
++ return;
+ basic = sta_ptr->tdls_cap.rates_len;
++ if (pos[1] > 32 - basic)
++ return;
+ for (i = 0; i < pos[1]; i++)
+ sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
+ sta_ptr->tdls_cap.rates_len += pos[1];
+ break;
+ case WLAN_EID_HT_CAPABILITY:
+- memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
++ if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
++ return;
++ if (pos[1] != sizeof(struct ieee80211_ht_cap))
++ return;
++ /* copy the ie's value into ht_capb*/
++ memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
+ sizeof(struct ieee80211_ht_cap));
+ sta_ptr->is_11n_enabled = 1;
+ break;
+ case WLAN_EID_HT_OPERATION:
+- memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
++ if (pos > end -
++ sizeof(struct ieee80211_ht_operation) - 2)
++ return;
++ if (pos[1] != sizeof(struct ieee80211_ht_operation))
++ return;
++ /* copy the ie's value into ht_oper*/
++ memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
+ sizeof(struct ieee80211_ht_operation));
+ break;
+ case WLAN_EID_BSS_COEX_2040:
++ if (pos > end - 3)
++ return;
++ if (pos[1] != 1)
++ return;
+ sta_ptr->tdls_cap.coex_2040 = pos[2];
+ break;
+ case WLAN_EID_EXT_CAPABILITY:
++ if (pos > end - sizeof(struct ieee_types_header))
++ return;
++ if (pos[1] < sizeof(struct ieee_types_header))
++ return;
++ if (pos[1] > 8)
++ return;
+ memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
+ sizeof(struct ieee_types_header) +
+ min_t(u8, pos[1], 8));
+ break;
+ case WLAN_EID_RSN:
++ if (pos > end - sizeof(struct ieee_types_header))
++ return;
++ if (pos[1] < sizeof(struct ieee_types_header))
++ return;
++ if (pos[1] > IEEE_MAX_IE_SIZE -
++ sizeof(struct ieee_types_header))
++ return;
+ memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
+ sizeof(struct ieee_types_header) +
+ min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
+ sizeof(struct ieee_types_header)));
+ break;
+ case WLAN_EID_QOS_CAPA:
++ if (pos > end - 3)
++ return;
++ if (pos[1] != 1)
++ return;
+ sta_ptr->tdls_cap.qos_info = pos[2];
+ break;
+ case WLAN_EID_VHT_OPERATION:
+- if (priv->adapter->is_hw_11ac_capable)
+- memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
++ if (priv->adapter->is_hw_11ac_capable) {
++ if (pos > end -
++ sizeof(struct ieee80211_vht_operation) - 2)
++ return;
++ if (pos[1] !=
++ sizeof(struct ieee80211_vht_operation))
++ return;
++ /* copy the ie's value into vhtoper*/
++ memcpy(&sta_ptr->tdls_cap.vhtoper, pos + 2,
+ sizeof(struct ieee80211_vht_operation));
++ }
+ break;
+ case WLAN_EID_VHT_CAPABILITY:
+ if (priv->adapter->is_hw_11ac_capable) {
+- memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
++ if (pos > end -
++ sizeof(struct ieee80211_vht_cap) - 2)
++ return;
++ if (pos[1] != sizeof(struct ieee80211_vht_cap))
++ return;
++ /* copy the ie's value into vhtcap*/
++ memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
+ sizeof(struct ieee80211_vht_cap));
+ sta_ptr->is_11ac_enabled = 1;
+ }
+ break;
+ case WLAN_EID_AID:
+- if (priv->adapter->is_hw_11ac_capable)
++ if (priv->adapter->is_hw_11ac_capable) {
++ if (pos > end - 4)
++ return;
++ if (pos[1] != 2)
++ return;
+ sta_ptr->tdls_cap.aid =
+ le16_to_cpu(*(__le16 *)(pos + 2));
++ }
++ break;
+ default:
+ break;
+ }
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 063fdfcf8275..32226dd19932 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -1245,7 +1245,7 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
+ if (!(*opened & FILE_OPENED))
+ return finish_no_open(file, d);
+ dput(d);
+- return 0;
++ return excl && (flags & O_CREAT) ? -EEXIST : 0;
+ }
+
+ BUG_ON(d != NULL);
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 6457023d8fac..3233e5ac9774 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1041,8 +1041,8 @@ static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
+ /* For undo access buffer must have data copied */
+ if (undo && !jh->b_committed_data)
+ goto out;
+- if (jh->b_transaction != handle->h_transaction &&
+- jh->b_next_transaction != handle->h_transaction)
++ if (READ_ONCE(jh->b_transaction) != handle->h_transaction &&
++ READ_ONCE(jh->b_next_transaction) != handle->h_transaction)
+ goto out;
+ /*
+ * There are two reasons for the barrier here:
+@@ -2458,8 +2458,8 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
+ * our jh reference and thus __jbd2_journal_file_buffer() must not
+ * take a new one.
+ */
+- jh->b_transaction = jh->b_next_transaction;
+- jh->b_next_transaction = NULL;
++ WRITE_ONCE(jh->b_transaction, jh->b_next_transaction);
++ WRITE_ONCE(jh->b_next_transaction, NULL);
+ if (buffer_freed(bh))
+ jlist = BJ_Forget;
+ else if (jh->b_modified)
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 2ac3d2527ad2..21e5fcbcb227 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -657,8 +657,6 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
+ goto out_label_free;
+ }
+
+- array = kmap(page);
+-
+ status = nfs_readdir_alloc_pages(pages, array_size);
+ if (status < 0)
+ goto out_release_array;
+diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
+index 59160de702b6..99ae8ac65eca 100644
+--- a/include/net/fib_rules.h
++++ b/include/net/fib_rules.h
+@@ -85,6 +85,7 @@ struct fib_rules_ops {
+ [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
+ [FRA_PRIORITY] = { .type = NLA_U32 }, \
+ [FRA_FWMARK] = { .type = NLA_U32 }, \
++ [FRA_TUN_ID] = { .type = NLA_U64 }, \
+ [FRA_FWMASK] = { .type = NLA_U32 }, \
+ [FRA_TABLE] = { .type = NLA_U32 }, \
+ [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 7e4a4b199a11..90a94e54db09 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -373,27 +373,32 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
+ {
+ struct sigqueue *q = NULL;
+ struct user_struct *user;
++ int sigpending;
+
+ /*
+ * Protect access to @t credentials. This can go away when all
+ * callers hold rcu read lock.
++ *
++ * NOTE! A pending signal will hold on to the user refcount,
++ * and we get/put the refcount only when the sigpending count
++ * changes from/to zero.
+ */
+ rcu_read_lock();
+- user = get_uid(__task_cred(t)->user);
+- atomic_inc(&user->sigpending);
++ user = __task_cred(t)->user;
++ sigpending = atomic_inc_return(&user->sigpending);
++ if (sigpending == 1)
++ get_uid(user);
+ rcu_read_unlock();
+
+- if (override_rlimit ||
+- atomic_read(&user->sigpending) <=
+- task_rlimit(t, RLIMIT_SIGPENDING)) {
++ if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
+ q = kmem_cache_alloc(sigqueue_cachep, flags);
+ } else {
+ print_dropped_signal(sig);
+ }
+
+ if (unlikely(q == NULL)) {
+- atomic_dec(&user->sigpending);
+- free_uid(user);
++ if (atomic_dec_and_test(&user->sigpending))
++ free_uid(user);
+ } else {
+ INIT_LIST_HEAD(&q->list);
+ q->flags = 0;
+@@ -407,8 +412,8 @@ static void __sigqueue_free(struct sigqueue *q)
+ {
+ if (q->flags & SIGQUEUE_PREALLOC)
+ return;
+- atomic_dec(&q->user->sigpending);
+- free_uid(q->user);
++ if (atomic_dec_and_test(&q->user->sigpending))
++ free_uid(q->user);
+ kmem_cache_free(sigqueue_cachep, q);
+ }
+
+diff --git a/mm/slub.c b/mm/slub.c
+index 8f5dcb0ac24f..cb9069ccf67c 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2931,6 +2931,15 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+ void *object = c->freelist;
+
+ if (unlikely(!object)) {
++ /*
++ * We may have removed an object from c->freelist using
++ * the fastpath in the previous iteration; in that case,
++ * c->tid has not been bumped yet.
++ * Since ___slab_alloc() may reenable interrupts while
++ * allocating memory, we should bump c->tid now.
++ */
++ c->tid = next_tid(c->tid);
++
+ /*
+ * Invoking slow path likely have side-effect
+ * of re-populating per CPU c->freelist
+diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
+index 912d9c36fb1c..caea5bb38d4b 100644
+--- a/net/batman-adv/bat_iv_ogm.c
++++ b/net/batman-adv/bat_iv_ogm.c
+@@ -135,7 +135,7 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
+ * Returns 0 on success, a negative error code otherwise.
+ */
+ static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
+- int max_if_num)
++ unsigned int max_if_num)
+ {
+ void *data_ptr;
+ size_t old_size;
+@@ -155,10 +155,8 @@ static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
+ orig_node->bat_iv.bcast_own = data_ptr;
+
+ data_ptr = kmalloc_array(max_if_num, sizeof(u8), GFP_ATOMIC);
+- if (!data_ptr) {
+- kfree(orig_node->bat_iv.bcast_own);
++ if (!data_ptr)
+ goto unlock;
+- }
+
+ memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
+ (max_if_num - 1) * sizeof(u8));
+@@ -183,9 +181,11 @@ unlock:
+ * Returns 0 on success, a negative error code otherwise.
+ */
+ static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
+- int max_if_num, int del_if_num)
++ unsigned int max_if_num,
++ unsigned int del_if_num)
+ {
+- int chunk_size, ret = -ENOMEM, if_offset;
++ int ret = -ENOMEM;
++ size_t chunk_size, if_offset;
+ void *data_ptr = NULL;
+
+ spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+@@ -203,8 +203,9 @@ static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
+ memcpy(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size);
+
+ /* copy second part */
++ if_offset = (del_if_num + 1) * chunk_size;
+ memcpy((char *)data_ptr + del_if_num * chunk_size,
+- orig_node->bat_iv.bcast_own + ((del_if_num + 1) * chunk_size),
++ (uint8_t *)orig_node->bat_iv.bcast_own + if_offset,
+ (max_if_num - del_if_num) * chunk_size);
+
+ free_bcast_own:
+@@ -252,7 +253,8 @@ static struct batadv_orig_node *
+ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
+ {
+ struct batadv_orig_node *orig_node;
+- int size, hash_added;
++ int hash_added;
++ size_t size;
+
+ orig_node = batadv_orig_hash_find(bat_priv, addr);
+ if (orig_node)
+@@ -314,14 +316,18 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
+ unsigned char *ogm_buff;
+ u32 random_seqno;
+
++ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
++
+ /* randomize initial seqno to avoid collision */
+ get_random_bytes(&random_seqno, sizeof(random_seqno));
+ atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
+
+ hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
+ ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
+- if (!ogm_buff)
++ if (!ogm_buff) {
++ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+ return -ENOMEM;
++ }
+
+ hard_iface->bat_iv.ogm_buff = ogm_buff;
+
+@@ -333,36 +339,60 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
+ batadv_ogm_packet->reserved = 0;
+ batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
+
++ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
++
+ return 0;
+ }
+
+ static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
+ {
++ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
++
+ kfree(hard_iface->bat_iv.ogm_buff);
+ hard_iface->bat_iv.ogm_buff = NULL;
++
++ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+ }
+
+ static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
+ {
+ struct batadv_ogm_packet *batadv_ogm_packet;
+- unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
++ void *ogm_buff;
+
+- batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
++ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
++
++ ogm_buff = hard_iface->bat_iv.ogm_buff;
++ if (!ogm_buff)
++ goto unlock;
++
++ batadv_ogm_packet = ogm_buff;
+ ether_addr_copy(batadv_ogm_packet->orig,
+ hard_iface->net_dev->dev_addr);
+ ether_addr_copy(batadv_ogm_packet->prev_sender,
+ hard_iface->net_dev->dev_addr);
++
++unlock:
++ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+ }
+
+ static void
+ batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
+ {
+ struct batadv_ogm_packet *batadv_ogm_packet;
+- unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
++ void *ogm_buff;
+
+- batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
++ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
++
++ ogm_buff = hard_iface->bat_iv.ogm_buff;
++ if (!ogm_buff)
++ goto unlock;
++
++ batadv_ogm_packet = ogm_buff;
+ batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP;
+ batadv_ogm_packet->ttl = BATADV_TTL;
++
++unlock:
++ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+ }
+
+ /* when do we schedule our own ogm to be sent */
+@@ -395,14 +425,19 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
+ return new_tq;
+ }
+
+-/* is there another aggregated packet here? */
+-static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
+- __be16 tvlv_len)
++static bool
++batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
++ const struct batadv_ogm_packet *ogm_packet)
+ {
+ int next_buff_pos = 0;
+
+- next_buff_pos += buff_pos + BATADV_OGM_HLEN;
+- next_buff_pos += ntohs(tvlv_len);
++ /* check if there is enough space for the header */
++ next_buff_pos += buff_pos + sizeof(*ogm_packet);
++ if (next_buff_pos > packet_len)
++ return false;
++
++ /* check if there is enough space for the optional TVLV */
++ next_buff_pos += ntohs(ogm_packet->tvlv_len);
+
+ return (next_buff_pos <= packet_len) &&
+ (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
+@@ -430,7 +465,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
+
+ /* adjust all flags and log packets */
+ while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
+- batadv_ogm_packet->tvlv_len)) {
++ batadv_ogm_packet)) {
+ /* we might have aggregated direct link packets with an
+ * ordinary base packet
+ */
+@@ -871,7 +906,7 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
+ u32 i;
+ size_t word_index;
+ u8 *w;
+- int if_num;
++ unsigned int if_num;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+@@ -892,7 +927,11 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
+ }
+ }
+
+-static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
++/**
++ * batadv_iv_ogm_schedule_buff() - schedule submission of hardif ogm buffer
++ * @hard_iface: interface whose ogm buffer should be transmitted
++ */
++static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
+ {
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff;
+@@ -903,6 +942,12 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
+ u16 tvlv_len = 0;
+ unsigned long send_time;
+
++ lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
++
++ /* interface already disabled by batadv_iv_ogm_iface_disable */
++ if (!*ogm_buff)
++ return;
++
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+
+ if (hard_iface == primary_if) {
+@@ -954,6 +999,17 @@ out:
+ batadv_hardif_free_ref(primary_if);
+ }
+
++static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
++{
++ if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
++ hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
++ return;
++
++ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
++ batadv_iv_ogm_schedule_buff(hard_iface);
++ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
++}
++
+ /**
+ * batadv_iv_ogm_orig_update - use OGM to update corresponding data in an
+ * originator
+@@ -982,7 +1038,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
+ struct batadv_neigh_node *tmp_neigh_node = NULL;
+ struct batadv_neigh_node *router = NULL;
+ struct batadv_orig_node *orig_node_tmp;
+- int if_num;
++ unsigned int if_num;
+ u8 sum_orig, sum_neigh;
+ u8 *neigh_addr;
+ u8 tq_avg;
+@@ -1140,9 +1196,10 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
+ u8 total_count;
+ u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
+ unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
+- int tq_asym_penalty, inv_asym_penalty, if_num, ret = 0;
++ int if_num, ret = 0;
++ unsigned int tq_asym_penalty, inv_asym_penalty;
+ unsigned int combined_tq;
+- int tq_iface_penalty;
++ unsigned int tq_iface_penalty;
+
+ /* find corresponding one hop neighbor */
+ rcu_read_lock();
+@@ -1179,7 +1236,7 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
+ orig_node->last_seen = jiffies;
+
+ /* find packet count of corresponding one hop neighbor */
+- spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
++ spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
+ if_num = if_incoming->if_num;
+ orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num];
+ neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
+@@ -1189,7 +1246,7 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
+ } else {
+ neigh_rq_count = 0;
+ }
+- spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
++ spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
+
+ /* pay attention to not get a value bigger than 100 % */
+ if (orig_eq_count > neigh_rq_count)
+@@ -1646,9 +1703,9 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
+
+ if (is_my_orig) {
+ unsigned long *word;
+- int offset;
++ size_t offset;
+ s32 bit_pos;
+- s16 if_num;
++ unsigned int if_num;
+ u8 *weight;
+
+ orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
+@@ -1748,7 +1805,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
+
+ /* unpack the aggregated packets and process them one by one */
+ while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
+- ogm_packet->tvlv_len)) {
++ ogm_packet)) {
+ batadv_iv_ogm_process(skb, ogm_offset, if_incoming);
+
+ ogm_offset += BATADV_OGM_HLEN;
+diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
+index c5208136e3fc..cea7fdeac5aa 100644
+--- a/net/batman-adv/bridge_loop_avoidance.c
++++ b/net/batman-adv/bridge_loop_avoidance.c
+@@ -129,7 +129,19 @@ batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
+ /* finally deinitialize the claim */
+ static void batadv_claim_release(struct batadv_bla_claim *claim)
+ {
+- batadv_backbone_gw_free_ref(claim->backbone_gw);
++ struct batadv_bla_backbone_gw *old_backbone_gw;
++
++ spin_lock_bh(&claim->backbone_lock);
++ old_backbone_gw = claim->backbone_gw;
++ claim->backbone_gw = NULL;
++ spin_unlock_bh(&claim->backbone_lock);
++
++ spin_lock_bh(&old_backbone_gw->crc_lock);
++ old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
++ spin_unlock_bh(&old_backbone_gw->crc_lock);
++
++ batadv_backbone_gw_free_ref(old_backbone_gw);
++
+ kfree_rcu(claim, rcu);
+ }
+
+@@ -256,7 +268,9 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
+ }
+
+ /* all claims gone, initialize CRC */
++ spin_lock_bh(&backbone_gw->crc_lock);
+ backbone_gw->crc = BATADV_BLA_CRC_INIT;
++ spin_unlock_bh(&backbone_gw->crc_lock);
+ }
+
+ /**
+@@ -352,9 +366,12 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
+ break;
+ }
+
+- if (vid & BATADV_VLAN_HAS_TAG)
++ if (vid & BATADV_VLAN_HAS_TAG) {
+ skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
+ vid & VLAN_VID_MASK);
++ if (!skb)
++ goto out;
++ }
+
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_type_trans(skb, soft_iface);
+@@ -404,6 +421,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
+ entry->lasttime = jiffies;
+ entry->crc = BATADV_BLA_CRC_INIT;
+ entry->bat_priv = bat_priv;
++ spin_lock_init(&entry->crc_lock);
+ atomic_set(&entry->request_sent, 0);
+ atomic_set(&entry->wait_periods, 0);
+ ether_addr_copy(entry->orig, orig);
+@@ -553,7 +571,9 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
+ __be16 crc;
+
+ memcpy(mac, batadv_announce_mac, 4);
++ spin_lock_bh(&backbone_gw->crc_lock);
+ crc = htons(backbone_gw->crc);
++ spin_unlock_bh(&backbone_gw->crc_lock);
+ memcpy(&mac[4], &crc, 2);
+
+ batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
+@@ -571,8 +591,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
+ const u8 *mac, const unsigned short vid,
+ struct batadv_bla_backbone_gw *backbone_gw)
+ {
++ struct batadv_bla_backbone_gw *old_backbone_gw;
+ struct batadv_bla_claim *claim;
+ struct batadv_bla_claim search_claim;
++ bool remove_crc = false;
+ int hash_added;
+
+ ether_addr_copy(search_claim.addr, mac);
+@@ -586,8 +608,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
+ return;
+
+ ether_addr_copy(claim->addr, mac);
++ spin_lock_init(&claim->backbone_lock);
+ claim->vid = vid;
+ claim->lasttime = jiffies;
++ atomic_inc(&backbone_gw->refcount);
+ claim->backbone_gw = backbone_gw;
+
+ atomic_set(&claim->refcount, 2);
+@@ -614,20 +638,55 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
+ "bla_add_claim(): changing ownership for %pM, vid %d\n",
+ mac, BATADV_PRINT_VID(vid));
+
+- claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+- batadv_backbone_gw_free_ref(claim->backbone_gw);
++ remove_crc = true;
+ }
+- /* set (new) backbone gw */
++
++ /* replace backbone_gw atomically and adjust reference counters */
++ spin_lock_bh(&claim->backbone_lock);
++ old_backbone_gw = claim->backbone_gw;
+ atomic_inc(&backbone_gw->refcount);
+ claim->backbone_gw = backbone_gw;
++ spin_unlock_bh(&claim->backbone_lock);
++
++ if (remove_crc) {
++ /* remove claim address from old backbone_gw */
++ spin_lock_bh(&old_backbone_gw->crc_lock);
++ old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
++ spin_unlock_bh(&old_backbone_gw->crc_lock);
++ }
+
++ batadv_backbone_gw_free_ref(old_backbone_gw);
++
++ /* add claim address to new backbone_gw */
++ spin_lock_bh(&backbone_gw->crc_lock);
+ backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
++ spin_unlock_bh(&backbone_gw->crc_lock);
+ backbone_gw->lasttime = jiffies;
+
+ claim_free_ref:
+ batadv_claim_free_ref(claim);
+ }
+
++/**
++ * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
++ * claim
++ * @claim: claim whose backbone_gw should be returned
++ *
++ * Return: valid reference to claim::backbone_gw
++ */
++static struct batadv_bla_backbone_gw *
++batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
++{
++ struct batadv_bla_backbone_gw *backbone_gw;
++
++ spin_lock_bh(&claim->backbone_lock);
++ backbone_gw = claim->backbone_gw;
++ atomic_inc(&backbone_gw->refcount);
++ spin_unlock_bh(&claim->backbone_lock);
++
++ return backbone_gw;
++}
++
+ /* Delete a claim from the claim hash which has the
+ * given mac address and vid.
+ */
+@@ -635,6 +694,8 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
+ const u8 *mac, const unsigned short vid)
+ {
+ struct batadv_bla_claim search_claim, *claim;
++ struct batadv_bla_claim *claim_removed_entry;
++ struct hlist_node *claim_removed_node;
+
+ ether_addr_copy(search_claim.addr, mac);
+ search_claim.vid = vid;
+@@ -645,12 +706,18 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
+ mac, BATADV_PRINT_VID(vid));
+
+- batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
+- batadv_choose_claim, claim);
+- batadv_claim_free_ref(claim); /* reference from the hash is gone */
++ claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
++ batadv_compare_claim,
++ batadv_choose_claim, claim);
++ if (!claim_removed_node)
++ goto free_claim;
+
+- claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
++ /* reference from the hash is gone */
++ claim_removed_entry = hlist_entry(claim_removed_node,
++ struct batadv_bla_claim, hash_entry);
++ batadv_claim_free_ref(claim_removed_entry);
+
++free_claim:
+ /* don't need the reference from hash_find() anymore */
+ batadv_claim_free_ref(claim);
+ }
+@@ -660,7 +727,7 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
+ u8 *backbone_addr, unsigned short vid)
+ {
+ struct batadv_bla_backbone_gw *backbone_gw;
+- u16 crc;
++ u16 backbone_crc, crc;
+
+ if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
+ return 0;
+@@ -679,12 +746,16 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
+ "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
+ BATADV_PRINT_VID(vid), backbone_gw->orig, crc);
+
+- if (backbone_gw->crc != crc) {
++ spin_lock_bh(&backbone_gw->crc_lock);
++ backbone_crc = backbone_gw->crc;
++ spin_unlock_bh(&backbone_gw->crc_lock);
++
++ if (backbone_crc != crc) {
+ batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
+ "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
+ backbone_gw->orig,
+ BATADV_PRINT_VID(backbone_gw->vid),
+- backbone_gw->crc, crc);
++ backbone_crc, crc);
+
+ batadv_bla_send_request(backbone_gw);
+ } else {
+@@ -1056,6 +1127,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ int now)
+ {
++ struct batadv_bla_backbone_gw *backbone_gw;
+ struct batadv_bla_claim *claim;
+ struct hlist_head *head;
+ struct batadv_hashtable *hash;
+@@ -1070,14 +1142,17 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(claim, head, hash_entry) {
++ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+ if (now)
+ goto purge_now;
+- if (!batadv_compare_eth(claim->backbone_gw->orig,
++
++ if (!batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr))
+- continue;
++ goto skip;
++
+ if (!batadv_has_timed_out(claim->lasttime,
+ BATADV_BLA_CLAIM_TIMEOUT))
+- continue;
++ goto skip;
+
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_purge_claims(): %pM, vid %d, time out\n",
+@@ -1085,8 +1160,10 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
+
+ purge_now:
+ batadv_handle_unclaim(bat_priv, primary_if,
+- claim->backbone_gw->orig,
++ backbone_gw->orig,
+ claim->addr, claim->vid);
++skip:
++ batadv_backbone_gw_free_ref(backbone_gw);
+ }
+ rcu_read_unlock();
+ }
+@@ -1470,9 +1547,11 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
+ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ unsigned short vid, bool is_bcast)
+ {
++ struct batadv_bla_backbone_gw *backbone_gw;
+ struct ethhdr *ethhdr;
+ struct batadv_bla_claim search_claim, *claim = NULL;
+ struct batadv_hard_iface *primary_if;
++ bool own_claim;
+ int ret;
+
+ ethhdr = eth_hdr(skb);
+@@ -1504,8 +1583,12 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ }
+
+ /* if it is our own claim ... */
+- if (batadv_compare_eth(claim->backbone_gw->orig,
+- primary_if->net_dev->dev_addr)) {
++ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
++ own_claim = batadv_compare_eth(backbone_gw->orig,
++ primary_if->net_dev->dev_addr);
++ batadv_backbone_gw_free_ref(backbone_gw);
++
++ if (own_claim) {
+ /* ... allow it in any case */
+ claim->lasttime = jiffies;
+ goto allow;
+@@ -1568,7 +1651,9 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ {
+ struct ethhdr *ethhdr;
+ struct batadv_bla_claim search_claim, *claim = NULL;
++ struct batadv_bla_backbone_gw *backbone_gw;
+ struct batadv_hard_iface *primary_if;
++ bool client_roamed;
+ int ret = 0;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+@@ -1598,8 +1683,12 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ goto allow;
+
+ /* check if we are responsible. */
+- if (batadv_compare_eth(claim->backbone_gw->orig,
+- primary_if->net_dev->dev_addr)) {
++ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
++ client_roamed = batadv_compare_eth(backbone_gw->orig,
++ primary_if->net_dev->dev_addr);
++ batadv_backbone_gw_free_ref(backbone_gw);
++
++ if (client_roamed) {
+ /* if yes, the client has roamed and we have
+ * to unclaim it.
+ */
+@@ -1652,9 +1741,11 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
+ struct net_device *net_dev = (struct net_device *)seq->private;
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
++ struct batadv_bla_backbone_gw *backbone_gw;
+ struct batadv_bla_claim *claim;
+ struct batadv_hard_iface *primary_if;
+ struct hlist_head *head;
++ u16 backbone_crc;
+ u32 i;
+ bool is_own;
+ u8 *primary_addr;
+@@ -1675,13 +1766,21 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(claim, head, hash_entry) {
+- is_own = batadv_compare_eth(claim->backbone_gw->orig,
++ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
++
++ is_own = batadv_compare_eth(backbone_gw->orig,
+ primary_addr);
++
++ spin_lock_bh(&backbone_gw->crc_lock);
++ backbone_crc = backbone_gw->crc;
++ spin_unlock_bh(&backbone_gw->crc_lock);
+ seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
+ claim->addr, BATADV_PRINT_VID(claim->vid),
+- claim->backbone_gw->orig,
++ backbone_gw->orig,
+ (is_own ? 'x' : ' '),
+- claim->backbone_gw->crc);
++ backbone_crc);
++
++ batadv_backbone_gw_free_ref(backbone_gw);
+ }
+ rcu_read_unlock();
+ }
+@@ -1700,6 +1799,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
+ struct batadv_hard_iface *primary_if;
+ struct hlist_head *head;
+ int secs, msecs;
++ u16 backbone_crc;
+ u32 i;
+ bool is_own;
+ u8 *primary_addr;
+@@ -1730,10 +1830,14 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
+ if (is_own)
+ continue;
+
++ spin_lock_bh(&backbone_gw->crc_lock);
++ backbone_crc = backbone_gw->crc;
++ spin_unlock_bh(&backbone_gw->crc_lock);
++
+ seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
+ backbone_gw->orig,
+ BATADV_PRINT_VID(backbone_gw->vid), secs,
+- msecs, backbone_gw->crc);
++ msecs, backbone_crc);
+ }
+ rcu_read_unlock();
+ }
+diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
+index c4c1e8030ba0..b2ef03a3a2d4 100644
+--- a/net/batman-adv/debugfs.c
++++ b/net/batman-adv/debugfs.c
+@@ -19,6 +19,7 @@
+ #include "main.h"
+
+ #include <linux/compiler.h>
++#include <linux/dcache.h>
+ #include <linux/debugfs.h>
+ #include <linux/device.h>
+ #include <linux/errno.h>
+@@ -506,6 +507,25 @@ out:
+ return -ENOMEM;
+ }
+
++/**
++ * batadv_debugfs_rename_hardif() - Fix debugfs path for renamed hardif
++ * @hard_iface: hard interface which was renamed
++ */
++void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
++{
++ const char *name = hard_iface->net_dev->name;
++ struct dentry *dir;
++ struct dentry *d;
++
++ dir = hard_iface->debug_dir;
++ if (!dir)
++ return;
++
++ d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
++ if (!d)
++ pr_err("Can't rename debugfs dir to %s\n", name);
++}
++
+ /**
+ * batadv_debugfs_del_hardif - delete the base directory for a hard interface
+ * in debugfs.
+@@ -561,6 +581,26 @@ out:
+ return -ENOMEM;
+ }
+
++/**
++ * batadv_debugfs_rename_meshif() - Fix debugfs path for renamed softif
++ * @dev: net_device which was renamed
++ */
++void batadv_debugfs_rename_meshif(struct net_device *dev)
++{
++ struct batadv_priv *bat_priv = netdev_priv(dev);
++ const char *name = dev->name;
++ struct dentry *dir;
++ struct dentry *d;
++
++ dir = bat_priv->debug_dir;
++ if (!dir)
++ return;
++
++ d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
++ if (!d)
++ pr_err("Can't rename debugfs dir to %s\n", name);
++}
++
+ void batadv_debugfs_del_meshif(struct net_device *dev)
+ {
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
+index 80ab8d6f0ab3..347f793a18b2 100644
+--- a/net/batman-adv/debugfs.h
++++ b/net/batman-adv/debugfs.h
+@@ -31,8 +31,10 @@ struct net_device;
+ void batadv_debugfs_init(void);
+ void batadv_debugfs_destroy(void);
+ int batadv_debugfs_add_meshif(struct net_device *dev);
++void batadv_debugfs_rename_meshif(struct net_device *dev);
+ void batadv_debugfs_del_meshif(struct net_device *dev);
+ int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface);
++void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface);
+ void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface);
+
+ #else
+@@ -50,6 +52,10 @@ static inline int batadv_debugfs_add_meshif(struct net_device *dev)
+ return 0;
+ }
+
++static inline void batadv_debugfs_rename_meshif(struct net_device *dev)
++{
++}
++
+ static inline void batadv_debugfs_del_meshif(struct net_device *dev)
+ {
+ }
+@@ -60,6 +66,11 @@ int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
+ return 0;
+ }
+
++static inline
++void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
++{
++}
++
+ static inline
+ void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
+ {
+diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
+index 76808c5e8183..769683da8d9d 100644
+--- a/net/batman-adv/distributed-arp-table.c
++++ b/net/batman-adv/distributed-arp-table.c
+@@ -993,15 +993,19 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
+ if (!skb_new)
+ goto out;
+
+- if (vid & BATADV_VLAN_HAS_TAG)
++ if (vid & BATADV_VLAN_HAS_TAG) {
+ skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
+ vid & VLAN_VID_MASK);
++ if (!skb_new)
++ goto out;
++ }
+
+ skb_reset_mac_header(skb_new);
+ skb_new->protocol = eth_type_trans(skb_new,
+ bat_priv->soft_iface);
+- bat_priv->stats.rx_packets++;
+- bat_priv->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size;
++ batadv_inc_counter(bat_priv, BATADV_CNT_RX);
++ batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
++ skb->len + ETH_HLEN + hdr_size);
+ bat_priv->soft_iface->last_rx = jiffies;
+
+ netif_rx(skb_new);
+@@ -1073,9 +1077,12 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
+ */
+ skb_reset_mac_header(skb_new);
+
+- if (vid & BATADV_VLAN_HAS_TAG)
++ if (vid & BATADV_VLAN_HAS_TAG) {
+ skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
+ vid & VLAN_VID_MASK);
++ if (!skb_new)
++ goto out;
++ }
+
+ /* To preserve backwards compatibility, the node has choose the outgoing
+ * format based on the incoming request packet type. The assumption is
+diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
+index d50c3b003dc9..9751b207b01f 100644
+--- a/net/batman-adv/fragmentation.c
++++ b/net/batman-adv/fragmentation.c
+@@ -233,8 +233,10 @@ err_unlock:
+ spin_unlock_bh(&chain->lock);
+
+ err:
+- if (!ret)
++ if (!ret) {
+ kfree(frag_entry_new);
++ kfree_skb(skb);
++ }
+
+ return ret;
+ }
+@@ -329,9 +331,9 @@ bool batadv_frag_skb_buffer(struct sk_buff **skb,
+ goto out_err;
+
+ out:
+- *skb = skb_out;
+ ret = true;
+ out_err:
++ *skb = skb_out;
+ return ret;
+ }
+
+@@ -478,6 +480,10 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
+
+ /* Eat and send fragments from the tail of skb */
+ while (skb->len > max_fragment_size) {
++ /* The initial check in this function should cover this case */
++ if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)
++ goto out_err;
++
+ skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
+ if (!skb_fragment)
+ goto out_err;
+@@ -488,10 +494,6 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
+ batadv_send_skb_packet(skb_fragment, neigh_node->if_incoming,
+ neigh_node->addr);
+ frag_header.no++;
+-
+- /* The initial check in this function should cover this case */
+- if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)
+- goto out_err;
+ }
+
+ /* Make room for the fragment header. */
+diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
+index 6abfba1e227f..a88b529b7ca0 100644
+--- a/net/batman-adv/gateway_client.c
++++ b/net/batman-adv/gateway_client.c
+@@ -29,6 +29,7 @@
+ #include <linux/ipv6.h>
+ #include <linux/kernel.h>
+ #include <linux/list.h>
++#include <linux/lockdep.h>
+ #include <linux/netdevice.h>
+ #include <linux/rculist.h>
+ #include <linux/rcupdate.h>
+@@ -413,6 +414,9 @@ out:
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator announcing gateway capabilities
+ * @gateway: announced bandwidth information
++ *
++ * Has to be called with the appropriate locks being acquired
++ * (gw.list_lock).
+ */
+ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+@@ -420,6 +424,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
+ {
+ struct batadv_gw_node *gw_node;
+
++ lockdep_assert_held(&bat_priv->gw.list_lock);
++
+ if (gateway->bandwidth_down == 0)
+ return;
+
+@@ -438,9 +444,7 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
+ gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
+ atomic_set(&gw_node->refcount, 1);
+
+- spin_lock_bh(&bat_priv->gw.list_lock);
+ hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
+- spin_unlock_bh(&bat_priv->gw.list_lock);
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
+@@ -493,11 +497,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
+ {
+ struct batadv_gw_node *gw_node, *curr_gw = NULL;
+
++ spin_lock_bh(&bat_priv->gw.list_lock);
+ gw_node = batadv_gw_node_get(bat_priv, orig_node);
+ if (!gw_node) {
+ batadv_gw_node_add(bat_priv, orig_node, gateway);
++ spin_unlock_bh(&bat_priv->gw.list_lock);
+ goto out;
+ }
++ spin_unlock_bh(&bat_priv->gw.list_lock);
+
+ if ((gw_node->bandwidth_down == ntohl(gateway->bandwidth_down)) &&
+ (gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)))
+@@ -527,11 +534,12 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
+ * gets dereferenced.
+ */
+ spin_lock_bh(&bat_priv->gw.list_lock);
+- hlist_del_init_rcu(&gw_node->list);
++ if (!hlist_unhashed(&gw_node->list)) {
++ hlist_del_init_rcu(&gw_node->list);
++ batadv_gw_node_free_ref(gw_node);
++ }
+ spin_unlock_bh(&bat_priv->gw.list_lock);
+
+- batadv_gw_node_free_ref(gw_node);
+-
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+ if (gw_node == curr_gw)
+ batadv_gw_reselect(bat_priv);
+diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
+index 3c8d8142e8c6..c59bbc327763 100644
+--- a/net/batman-adv/hard-interface.c
++++ b/net/batman-adv/hard-interface.c
+@@ -26,6 +26,7 @@
+ #include <linux/if.h>
+ #include <linux/kernel.h>
+ #include <linux/list.h>
++#include <linux/mutex.h>
+ #include <linux/netdevice.h>
+ #include <linux/printk.h>
+ #include <linux/rculist.h>
+@@ -45,13 +46,16 @@
+ #include "sysfs.h"
+ #include "translation-table.h"
+
+-void batadv_hardif_free_rcu(struct rcu_head *rcu)
++/**
++ * batadv_hardif_release - release hard interface from lists and queue for
++ * free after rcu grace period
++ * @hard_iface: the hard interface to free
++ */
++void batadv_hardif_release(struct batadv_hard_iface *hard_iface)
+ {
+- struct batadv_hard_iface *hard_iface;
+-
+- hard_iface = container_of(rcu, struct batadv_hard_iface, rcu);
+ dev_put(hard_iface->net_dev);
+- kfree(hard_iface);
++
++ kfree_rcu(hard_iface, rcu);
+ }
+
+ struct batadv_hard_iface *
+@@ -73,6 +77,28 @@ out:
+ return hard_iface;
+ }
+
++/**
++ * batadv_mutual_parents - check if two devices are each others parent
++ * @dev1: 1st net_device
++ * @dev2: 2nd net_device
++ *
++ * veth devices come in pairs and each is the parent of the other!
++ *
++ * Return: true if the devices are each others parent, otherwise false
++ */
++static bool batadv_mutual_parents(const struct net_device *dev1,
++ const struct net_device *dev2)
++{
++ int dev1_parent_iflink = dev_get_iflink(dev1);
++ int dev2_parent_iflink = dev_get_iflink(dev2);
++
++ if (!dev1_parent_iflink || !dev2_parent_iflink)
++ return false;
++
++ return (dev1_parent_iflink == dev2->ifindex) &&
++ (dev2_parent_iflink == dev1->ifindex);
++}
++
+ /**
+ * batadv_is_on_batman_iface - check if a device is a batman iface descendant
+ * @net_dev: the device to check
+@@ -108,6 +134,9 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
+ return false;
+ }
+
++ if (batadv_mutual_parents(net_dev, parent_dev))
++ return false;
++
+ ret = batadv_is_on_batman_iface(parent_dev);
+
+ return ret;
+@@ -465,6 +494,11 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
+ hard_iface->soft_iface = soft_iface;
+ bat_priv = netdev_priv(hard_iface->soft_iface);
+
++ if (bat_priv->num_ifaces >= UINT_MAX) {
++ ret = -ENOSPC;
++ goto err_dev;
++ }
++
+ ret = netdev_master_upper_dev_link(hard_iface->net_dev, soft_iface);
+ if (ret)
+ goto err_dev;
+@@ -537,8 +571,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_hard_iface *primary_if = NULL;
+
+- if (hard_iface->if_status == BATADV_IF_ACTIVE)
+- batadv_hardif_deactivate_interface(hard_iface);
++ batadv_hardif_deactivate_interface(hard_iface);
+
+ if (hard_iface->if_status != BATADV_IF_INACTIVE)
+ goto out;
+@@ -573,7 +606,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
+ batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface);
+
+ /* nobody uses this interface anymore */
+- if (!bat_priv->num_ifaces) {
++ if (bat_priv->num_ifaces == 0) {
+ batadv_gw_check_client_stop(bat_priv);
+
+ if (autodel == BATADV_IF_CLEANUP_AUTO)
+@@ -629,7 +662,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
+ if (ret)
+ goto free_if;
+
+- hard_iface->if_num = -1;
++ hard_iface->if_num = 0;
+ hard_iface->net_dev = net_dev;
+ hard_iface->soft_iface = NULL;
+ hard_iface->if_status = BATADV_IF_NOT_IN_USE;
+@@ -639,6 +672,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
+ goto free_sysfs;
+
+ INIT_LIST_HEAD(&hard_iface->list);
++ mutex_init(&hard_iface->bat_iv.ogm_buff_mutex);
+ INIT_WORK(&hard_iface->cleanup_work,
+ batadv_hardif_remove_interface_finish);
+
+@@ -693,6 +727,32 @@ void batadv_hardif_remove_interfaces(void)
+ rtnl_unlock();
+ }
+
++/**
++ * batadv_hard_if_event_softif() - Handle events for soft interfaces
++ * @event: NETDEV_* event to handle
++ * @net_dev: net_device which generated an event
++ *
++ * Return: NOTIFY_* result
++ */
++static int batadv_hard_if_event_softif(unsigned long event,
++ struct net_device *net_dev)
++{
++ struct batadv_priv *bat_priv;
++
++ switch (event) {
++ case NETDEV_REGISTER:
++ batadv_sysfs_add_meshif(net_dev);
++ bat_priv = netdev_priv(net_dev);
++ batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
++ break;
++ case NETDEV_CHANGENAME:
++ batadv_debugfs_rename_meshif(net_dev);
++ break;
++ }
++
++ return NOTIFY_DONE;
++}
++
+ static int batadv_hard_if_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+ {
+@@ -701,12 +761,8 @@ static int batadv_hard_if_event(struct notifier_block *this,
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_priv *bat_priv;
+
+- if (batadv_softif_is_valid(net_dev) && event == NETDEV_REGISTER) {
+- batadv_sysfs_add_meshif(net_dev);
+- bat_priv = netdev_priv(net_dev);
+- batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
+- return NOTIFY_DONE;
+- }
++ if (batadv_softif_is_valid(net_dev))
++ return batadv_hard_if_event_softif(event, net_dev);
+
+ hard_iface = batadv_hardif_get_by_netdev(net_dev);
+ if (!hard_iface && event == NETDEV_REGISTER)
+@@ -748,6 +804,9 @@ static int batadv_hard_if_event(struct notifier_block *this,
+ if (hard_iface == primary_if)
+ batadv_primary_if_update_addr(bat_priv, NULL);
+ break;
++ case NETDEV_CHANGENAME:
++ batadv_debugfs_rename_hardif(hard_iface);
++ break;
+ default:
+ break;
+ }
+diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
+index 7b12ea8ea29d..4d74c0415911 100644
+--- a/net/batman-adv/hard-interface.h
++++ b/net/batman-adv/hard-interface.h
+@@ -61,18 +61,18 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
+ void batadv_hardif_remove_interfaces(void);
+ int batadv_hardif_min_mtu(struct net_device *soft_iface);
+ void batadv_update_min_mtu(struct net_device *soft_iface);
+-void batadv_hardif_free_rcu(struct rcu_head *rcu);
++void batadv_hardif_release(struct batadv_hard_iface *hard_iface);
+
+ /**
+ * batadv_hardif_free_ref - decrement the hard interface refcounter and
+- * possibly free it
++ * possibly release it
+ * @hard_iface: the hard interface to free
+ */
+ static inline void
+ batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
+ {
+ if (atomic_dec_and_test(&hard_iface->refcount))
+- call_rcu(&hard_iface->rcu, batadv_hardif_free_rcu);
++ batadv_hardif_release(hard_iface);
+ }
+
+ static inline struct batadv_hard_iface *
+diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
+index d7f17c1aa4a4..2bdbaff3279b 100644
+--- a/net/batman-adv/main.c
++++ b/net/batman-adv/main.c
+@@ -1079,15 +1079,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
+ {
+ struct batadv_tvlv_handler *tvlv_handler;
+
++ spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
++
+ tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
+ if (tvlv_handler) {
++ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
+ batadv_tvlv_handler_free_ref(tvlv_handler);
+ return;
+ }
+
+ tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
+- if (!tvlv_handler)
++ if (!tvlv_handler) {
++ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
+ return;
++ }
+
+ tvlv_handler->ogm_handler = optr;
+ tvlv_handler->unicast_handler = uptr;
+@@ -1097,7 +1102,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
+ atomic_set(&tvlv_handler->refcount, 1);
+ INIT_HLIST_NODE(&tvlv_handler->list);
+
+- spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+ hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
+ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
+ }
+diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
+index d0956f726547..86c69208da2b 100644
+--- a/net/batman-adv/network-coding.c
++++ b/net/batman-adv/network-coding.c
+@@ -828,19 +828,29 @@ static struct batadv_nc_node
+ spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
+ struct list_head *list;
+
++ /* Select ingoing or outgoing coding node */
++ if (in_coding) {
++ lock = &orig_neigh_node->in_coding_list_lock;
++ list = &orig_neigh_node->in_coding_list;
++ } else {
++ lock = &orig_neigh_node->out_coding_list_lock;
++ list = &orig_neigh_node->out_coding_list;
++ }
++
++ spin_lock_bh(lock);
++
+ /* Check if nc_node is already added */
+ nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
+
+ /* Node found */
+ if (nc_node)
+- return nc_node;
++ goto unlock;
+
+ nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
+ if (!nc_node)
+- return NULL;
++ goto unlock;
+
+- if (!atomic_inc_not_zero(&orig_neigh_node->refcount))
+- goto free;
++ atomic_inc(&orig_neigh_node->refcount);
+
+ /* Initialize nc_node */
+ INIT_LIST_HEAD(&nc_node->list);
+@@ -848,28 +858,15 @@ static struct batadv_nc_node
+ nc_node->orig_node = orig_neigh_node;
+ atomic_set(&nc_node->refcount, 2);
+
+- /* Select ingoing or outgoing coding node */
+- if (in_coding) {
+- lock = &orig_neigh_node->in_coding_list_lock;
+- list = &orig_neigh_node->in_coding_list;
+- } else {
+- lock = &orig_neigh_node->out_coding_list_lock;
+- list = &orig_neigh_node->out_coding_list;
+- }
+-
+ batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
+ nc_node->addr, nc_node->orig_node->orig);
+
+ /* Add nc_node to orig_node */
+- spin_lock_bh(lock);
+ list_add_tail_rcu(&nc_node->list, list);
++unlock:
+ spin_unlock_bh(lock);
+
+ return nc_node;
+-
+-free:
+- kfree(nc_node);
+- return NULL;
+ }
+
+ /**
+diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
+index 6282f021ddfb..b3013fbc417e 100644
+--- a/net/batman-adv/originator.c
++++ b/net/batman-adv/originator.c
+@@ -462,6 +462,8 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
+ {
+ struct batadv_neigh_node *neigh_node;
+
++ spin_lock_bh(&orig_node->neigh_list_lock);
++
+ neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
+ if (neigh_node)
+ goto out;
+@@ -483,19 +485,20 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
+ ether_addr_copy(neigh_node->addr, neigh_addr);
+ neigh_node->if_incoming = hard_iface;
+ neigh_node->orig_node = orig_node;
++ neigh_node->last_seen = jiffies;
+
+ /* extra reference for return */
+ atomic_set(&neigh_node->refcount, 2);
+
+- spin_lock_bh(&orig_node->neigh_list_lock);
+ hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
+- spin_unlock_bh(&orig_node->neigh_list_lock);
+
+ batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
+ "Creating new neighbor %pM for orig_node %pM on interface %s\n",
+ neigh_addr, orig_node->orig, hard_iface->net_dev->name);
+
+ out:
++ spin_unlock_bh(&orig_node->neigh_list_lock);
++
+ return neigh_node;
+ }
+
+@@ -561,6 +564,8 @@ static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
+ struct hlist_node *node_tmp;
+ struct batadv_neigh_node *neigh_node;
+ struct batadv_orig_ifinfo *orig_ifinfo;
++ struct batadv_orig_node_vlan *vlan;
++ struct batadv_orig_ifinfo *last_candidate;
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
+
+@@ -576,8 +581,21 @@ static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
+ hlist_del_rcu(&orig_ifinfo->list);
+ batadv_orig_ifinfo_free_ref(orig_ifinfo);
+ }
++
++ last_candidate = orig_node->last_bonding_candidate;
++ orig_node->last_bonding_candidate = NULL;
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+
++ if (last_candidate)
++ batadv_orig_ifinfo_free_ref(last_candidate);
++
++ spin_lock_bh(&orig_node->vlan_list_lock);
++ hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) {
++ hlist_del_rcu(&vlan->list);
++ batadv_orig_node_vlan_free_ref(vlan);
++ }
++ spin_unlock_bh(&orig_node->vlan_list_lock);
++
+ /* Free nc_nodes */
+ batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
+
+@@ -1085,7 +1103,7 @@ out:
+ }
+
+ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
+- int max_if_num)
++ unsigned int max_if_num)
+ {
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
+@@ -1121,7 +1139,7 @@ err:
+ }
+
+ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
+- int max_if_num)
++ unsigned int max_if_num)
+ {
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
+index a5c37882b409..65824d892a6a 100644
+--- a/net/batman-adv/originator.h
++++ b/net/batman-adv/originator.h
+@@ -67,9 +67,9 @@ void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo);
+ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset);
+ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset);
+ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
+- int max_if_num);
++ unsigned int max_if_num);
+ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
+- int max_if_num);
++ unsigned int max_if_num);
+ struct batadv_orig_node_vlan *
+ batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
+ unsigned short vid);
+diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
+index d8a2f33e60e5..b3e8b0e3073c 100644
+--- a/net/batman-adv/routing.c
++++ b/net/batman-adv/routing.c
+@@ -359,6 +359,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
+ if (skb_cow(skb, ETH_HLEN) < 0)
+ goto out;
+
++ ethhdr = eth_hdr(skb);
+ icmph = (struct batadv_icmp_header *)skb->data;
+ icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
+ if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
+@@ -438,6 +439,52 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
+ return 0;
+ }
+
++/**
++ * batadv_last_bonding_get - Get last_bonding_candidate of orig_node
++ * @orig_node: originator node whose last bonding candidate should be retrieved
++ *
++ * Return: last bonding candidate of router or NULL if not found
++ *
++ * The object is returned with refcounter increased by 1.
++ */
++static struct batadv_orig_ifinfo *
++batadv_last_bonding_get(struct batadv_orig_node *orig_node)
++{
++ struct batadv_orig_ifinfo *last_bonding_candidate;
++
++ spin_lock_bh(&orig_node->neigh_list_lock);
++ last_bonding_candidate = orig_node->last_bonding_candidate;
++
++ if (last_bonding_candidate)
++ atomic_inc(&last_bonding_candidate->refcount);
++ spin_unlock_bh(&orig_node->neigh_list_lock);
++
++ return last_bonding_candidate;
++}
++
++/**
++ * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
++ * @orig_node: originator node whose bonding candidates should be replaced
++ * @new_candidate: new bonding candidate or NULL
++ */
++static void
++batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
++ struct batadv_orig_ifinfo *new_candidate)
++{
++ struct batadv_orig_ifinfo *old_candidate;
++
++ spin_lock_bh(&orig_node->neigh_list_lock);
++ old_candidate = orig_node->last_bonding_candidate;
++
++ if (new_candidate)
++ atomic_inc(&new_candidate->refcount);
++ orig_node->last_bonding_candidate = new_candidate;
++ spin_unlock_bh(&orig_node->neigh_list_lock);
++
++ if (old_candidate)
++ batadv_orig_ifinfo_free_ref(old_candidate);
++}
++
+ /**
+ * batadv_find_router - find a suitable router for this originator
+ * @bat_priv: the bat priv with all the soft interface information
+@@ -485,7 +532,7 @@ batadv_find_router(struct batadv_priv *bat_priv,
+ * router - obviously there are no other candidates.
+ */
+ rcu_read_lock();
+- last_candidate = orig_node->last_bonding_candidate;
++ last_candidate = batadv_last_bonding_get(orig_node);
+ if (last_candidate)
+ last_cand_router = rcu_dereference(last_candidate->router);
+
+@@ -545,10 +592,6 @@ next:
+ }
+ rcu_read_unlock();
+
+- /* last_bonding_candidate is reset below, remove the old reference. */
+- if (orig_node->last_bonding_candidate)
+- batadv_orig_ifinfo_free_ref(orig_node->last_bonding_candidate);
+-
+ /* After finding candidates, handle the three cases:
+ * 1) there is a next candidate, use that
+ * 2) there is no next candidate, use the first of the list
+@@ -557,23 +600,33 @@ next:
+ if (next_candidate) {
+ batadv_neigh_node_free_ref(router);
+
+- /* remove references to first candidate, we don't need it. */
+- if (first_candidate) {
+- batadv_neigh_node_free_ref(first_candidate_router);
+- batadv_orig_ifinfo_free_ref(first_candidate);
+- }
++ atomic_inc(&next_candidate_router->refcount);
+ router = next_candidate_router;
+- orig_node->last_bonding_candidate = next_candidate;
++ batadv_last_bonding_replace(orig_node, next_candidate);
+ } else if (first_candidate) {
+ batadv_neigh_node_free_ref(router);
+
+- /* refcounting has already been done in the loop above. */
++ atomic_inc(&first_candidate_router->refcount);
+ router = first_candidate_router;
+- orig_node->last_bonding_candidate = first_candidate;
++ batadv_last_bonding_replace(orig_node, first_candidate);
+ } else {
+- orig_node->last_bonding_candidate = NULL;
++ batadv_last_bonding_replace(orig_node, NULL);
++ }
++
++ /* cleanup of candidates */
++ if (first_candidate) {
++ batadv_neigh_node_free_ref(first_candidate_router);
++ batadv_orig_ifinfo_free_ref(first_candidate);
+ }
+
++ if (next_candidate) {
++ batadv_neigh_node_free_ref(next_candidate_router);
++ batadv_orig_ifinfo_free_ref(next_candidate);
++ }
++
++ if (last_candidate)
++ batadv_orig_ifinfo_free_ref(last_candidate);
++
+ return router;
+ }
+
+@@ -585,6 +638,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
+ struct batadv_unicast_packet *unicast_packet;
+ struct ethhdr *ethhdr = eth_hdr(skb);
+ int res, hdr_len, ret = NET_RX_DROP;
++ unsigned int len;
+
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+
+@@ -625,6 +679,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
+ if (hdr_len > 0)
+ batadv_skb_set_priority(skb, hdr_len);
+
++ len = skb->len;
+ res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
+
+ /* translate transmit result into receive result */
+@@ -632,7 +687,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
+ /* skb was transmitted and consumed */
+ batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
+ batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
+- skb->len + ETH_HLEN);
++ len + ETH_HLEN);
+
+ ret = NET_RX_SUCCESS;
+ } else if (res == NET_XMIT_POLICED) {
+@@ -649,6 +704,7 @@ out:
+ /**
+ * batadv_reroute_unicast_packet - update the unicast header for re-routing
+ * @bat_priv: the bat priv with all the soft interface information
++ * @skb: unicast packet to process
+ * @unicast_packet: the unicast header to be updated
+ * @dst_addr: the payload destination
+ * @vid: VLAN identifier
+@@ -660,7 +716,7 @@ out:
+ * Returns true if the packet header has been updated, false otherwise
+ */
+ static bool
+-batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
++batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ struct batadv_unicast_packet *unicast_packet,
+ u8 *dst_addr, unsigned short vid)
+ {
+@@ -689,8 +745,10 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
+ }
+
+ /* update the packet header */
++ skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
+ ether_addr_copy(unicast_packet->dest, orig_addr);
+ unicast_packet->ttvn = orig_ttvn;
++ skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
+
+ ret = true;
+ out:
+@@ -730,7 +788,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
+ * the packet to
+ */
+ if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
+- if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
++ if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
+ ethhdr->h_dest, vid))
+ batadv_dbg_ratelimited(BATADV_DBG_TT,
+ bat_priv,
+@@ -776,7 +834,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
+ * destination can possibly be updated and forwarded towards the new
+ * target host
+ */
+- if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
++ if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
+ ethhdr->h_dest, vid)) {
+ batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv,
+ "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
+@@ -799,12 +857,14 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
+ if (!primary_if)
+ return 0;
+
++ /* update the packet header */
++ skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
+ ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
++ unicast_packet->ttvn = curr_ttvn;
++ skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
+
+ batadv_hardif_free_ref(primary_if);
+
+- unicast_packet->ttvn = curr_ttvn;
+-
+ return 1;
+ }
+
+@@ -849,7 +909,6 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
+ bool is4addr;
+
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+- unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
+
+ is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
+ /* the caller function should have already pulled 2 bytes */
+@@ -870,9 +929,13 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
+ if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
+ return NET_RX_DROP;
+
++ unicast_packet = (struct batadv_unicast_packet *)skb->data;
++
+ /* packet for me */
+ if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
+ if (is4addr) {
++ unicast_4addr_packet =
++ (struct batadv_unicast_4addr_packet *)skb->data;
+ subtype = unicast_4addr_packet->subtype;
+ batadv_dat_inc_counter(bat_priv, subtype);
+
+@@ -998,6 +1061,12 @@ int batadv_recv_frag_packet(struct sk_buff *skb,
+ batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_RX);
+ batadv_add_counter(bat_priv, BATADV_CNT_FRAG_RX_BYTES, skb->len);
+
++ /* batadv_frag_skb_buffer will always consume the skb and
++ * the caller should therefore never try to free the
++ * skb after this point
++ */
++ ret = NET_RX_SUCCESS;
++
+ /* Add fragment to buffer and merge if possible. */
+ if (!batadv_frag_skb_buffer(&skb, orig_node_src))
+ goto out;
+diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
+index 0e0c3b8ed927..11fbfb222c49 100644
+--- a/net/batman-adv/send.c
++++ b/net/batman-adv/send.c
+@@ -381,8 +381,8 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ struct batadv_orig_node *orig_node;
+
+ orig_node = batadv_gw_get_selected_orig(bat_priv);
+- return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
+- orig_node, vid);
++ return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
++ BATADV_P_DATA, orig_node, vid);
+ }
+
+ void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index 4812123e0a2c..ff693887ea82 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -1000,7 +1000,9 @@ void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
+ static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
+ struct list_head *head)
+ {
++ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ struct batadv_hard_iface *hard_iface;
++ struct batadv_softif_vlan *vlan;
+
+ list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->soft_iface == soft_iface)
+@@ -1008,6 +1010,13 @@ static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
+ BATADV_IF_CLEANUP_KEEP);
+ }
+
++ /* destroy the "untagged" VLAN */
++ vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
++ if (vlan) {
++ batadv_softif_destroy_vlan(bat_priv, vlan);
++ batadv_softif_vlan_free_ref(vlan);
++ }
++
+ batadv_sysfs_del_meshif(soft_iface);
+ unregister_netdevice_queue(soft_iface, head);
+ }
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index ffd49b40e76a..67ee7c83a28d 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -197,8 +197,11 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
+ static void
+ batadv_tt_local_entry_free_ref(struct batadv_tt_local_entry *tt_local_entry)
+ {
+- if (atomic_dec_and_test(&tt_local_entry->common.refcount))
++ if (atomic_dec_and_test(&tt_local_entry->common.refcount)) {
++ batadv_softif_vlan_free_ref(tt_local_entry->vlan);
++
+ kfree_rcu(tt_local_entry, common.rcu);
++ }
+ }
+
+ /**
+@@ -303,9 +306,11 @@ static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node,
+
+ if (atomic_add_return(v, &vlan->tt.num_entries) == 0) {
+ spin_lock_bh(&orig_node->vlan_list_lock);
+- hlist_del_init_rcu(&vlan->list);
++ if (!hlist_unhashed(&vlan->list)) {
++ hlist_del_init_rcu(&vlan->list);
++ batadv_orig_node_vlan_free_ref(vlan);
++ }
+ spin_unlock_bh(&orig_node->vlan_list_lock);
+- batadv_orig_node_vlan_free_ref(vlan);
+ }
+
+ batadv_orig_node_vlan_free_ref(vlan);
+@@ -503,14 +508,26 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
+ struct batadv_tt_global_entry *tt_global,
+ const char *message)
+ {
++ struct batadv_tt_global_entry *tt_removed_entry;
++ struct hlist_node *tt_removed_node;
++
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting global tt entry %pM (vid: %d): %s\n",
+ tt_global->common.addr,
+ BATADV_PRINT_VID(tt_global->common.vid), message);
+
+- batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
+- batadv_choose_tt, &tt_global->common);
+- batadv_tt_global_entry_free_ref(tt_global);
++ tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
++ batadv_compare_tt,
++ batadv_choose_tt,
++ &tt_global->common);
++ if (!tt_removed_node)
++ return;
++
++ /* drop reference of remove hash entry */
++ tt_removed_entry = hlist_entry(tt_removed_node,
++ struct batadv_tt_global_entry,
++ common.hash_entry);
++ batadv_tt_global_entry_free_ref(tt_removed_entry);
+ }
+
+ /**
+@@ -636,7 +653,6 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
+ if (unlikely(hash_added != 0)) {
+ /* remove the reference for the hash */
+ batadv_tt_local_entry_free_ref(tt_local);
+- batadv_softif_vlan_free_ref(vlan);
+ goto out;
+ }
+
+@@ -740,7 +756,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
+ struct batadv_orig_node_vlan *vlan;
+ u8 *tt_change_ptr;
+
+- rcu_read_lock();
++ spin_lock_bh(&orig_node->vlan_list_lock);
+ hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+ num_vlan++;
+ num_entries += atomic_read(&vlan->tt.num_entries);
+@@ -778,7 +794,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
+ *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
+
+ out:
+- rcu_read_unlock();
++ spin_unlock_bh(&orig_node->vlan_list_lock);
+ return tvlv_len;
+ }
+
+@@ -809,15 +825,20 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
+ struct batadv_tvlv_tt_vlan_data *tt_vlan;
+ struct batadv_softif_vlan *vlan;
+ u16 num_vlan = 0;
+- u16 num_entries = 0;
++ u16 vlan_entries = 0;
++ u16 total_entries = 0;
+ u16 tvlv_len;
+ u8 *tt_change_ptr;
+ int change_offset;
+
+- rcu_read_lock();
++ spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+ hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
++ vlan_entries = atomic_read(&vlan->tt.num_entries);
++ if (vlan_entries < 1)
++ continue;
++
+ num_vlan++;
+- num_entries += atomic_read(&vlan->tt.num_entries);
++ total_entries += vlan_entries;
+ }
+
+ change_offset = sizeof(**tt_data);
+@@ -825,7 +846,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
+
+ /* if tt_len is negative, allocate the space needed by the full table */
+ if (*tt_len < 0)
+- *tt_len = batadv_tt_len(num_entries);
++ *tt_len = batadv_tt_len(total_entries);
+
+ tvlv_len = *tt_len;
+ tvlv_len += change_offset;
+@@ -842,6 +863,10 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
+
+ tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
+ hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
++ vlan_entries = atomic_read(&vlan->tt.num_entries);
++ if (vlan_entries < 1)
++ continue;
++
+ tt_vlan->vid = htons(vlan->vid);
+ tt_vlan->crc = htonl(vlan->tt.crc);
+
+@@ -852,7 +877,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
+ *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
+
+ out:
+- rcu_read_unlock();
++ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+ return tvlv_len;
+ }
+
+@@ -940,7 +965,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_local_entry *tt_local;
+ struct batadv_hard_iface *primary_if;
+- struct batadv_softif_vlan *vlan;
+ struct hlist_head *head;
+ unsigned short vid;
+ u32 i;
+@@ -977,13 +1001,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
+
+ no_purge = tt_common_entry->flags & np_flag;
+
+- vlan = batadv_softif_vlan_get(bat_priv, vid);
+- if (!vlan) {
+- seq_printf(seq, "Cannot retrieve VLAN %d\n",
+- BATADV_PRINT_VID(vid));
+- continue;
+- }
+-
+ seq_printf(seq,
+ " * %pM %4i [%c%c%c%c%c%c] %3u.%03u (%#.8x)\n",
+ tt_common_entry->addr,
+@@ -1001,9 +1018,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
+ BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
+ no_purge ? 0 : last_seen_secs,
+ no_purge ? 0 : last_seen_msecs,
+- vlan->tt.crc);
+-
+- batadv_softif_vlan_free_ref(vlan);
++ tt_local->vlan->tt.crc);
+ }
+ rcu_read_unlock();
+ }
+@@ -1046,10 +1061,10 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
+ unsigned short vid, const char *message,
+ bool roaming)
+ {
++ struct batadv_tt_local_entry *tt_removed_entry;
+ struct batadv_tt_local_entry *tt_local_entry;
+ u16 flags, curr_flags = BATADV_NO_FLAGS;
+- struct batadv_softif_vlan *vlan;
+- void *tt_entry_exists;
++ struct hlist_node *tt_removed_node;
+
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
+ if (!tt_local_entry)
+@@ -1078,23 +1093,18 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
+ */
+ batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
+
+- tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
++ tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
+ batadv_compare_tt,
+ batadv_choose_tt,
+ &tt_local_entry->common);
+- if (!tt_entry_exists)
+- goto out;
+-
+- /* extra call to free the local tt entry */
+- batadv_tt_local_entry_free_ref(tt_local_entry);
+-
+- /* decrease the reference held for this vlan */
+- vlan = batadv_softif_vlan_get(bat_priv, vid);
+- if (!vlan)
++ if (!tt_removed_node)
+ goto out;
+
+- batadv_softif_vlan_free_ref(vlan);
+- batadv_softif_vlan_free_ref(vlan);
++ /* drop reference of remove hash entry */
++ tt_removed_entry = hlist_entry(tt_removed_node,
++ struct batadv_tt_local_entry,
++ common.hash_entry);
++ batadv_tt_local_entry_free_ref(tt_removed_entry);
+
+ out:
+ if (tt_local_entry)
+@@ -1168,7 +1178,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
+ spinlock_t *list_lock; /* protects write access to the hash lists */
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_local_entry *tt_local;
+- struct batadv_softif_vlan *vlan;
+ struct hlist_node *node_tmp;
+ struct hlist_head *head;
+ u32 i;
+@@ -1190,14 +1199,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
+ struct batadv_tt_local_entry,
+ common);
+
+- /* decrease the reference held for this vlan */
+- vlan = batadv_softif_vlan_get(bat_priv,
+- tt_common_entry->vid);
+- if (vlan) {
+- batadv_softif_vlan_free_ref(vlan);
+- batadv_softif_vlan_free_ref(vlan);
+- }
+-
+ batadv_tt_local_entry_free_ref(tt_local);
+ }
+ spin_unlock_bh(list_lock);
+@@ -1273,7 +1274,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
+ */
+ static bool
+ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
+- const struct batadv_orig_node *orig_node)
++ const struct batadv_orig_node *orig_node,
++ u8 *flags)
+ {
+ struct batadv_tt_orig_list_entry *orig_entry;
+ bool found = false;
+@@ -1281,25 +1283,64 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
+ orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
+ if (orig_entry) {
+ found = true;
++
++ if (flags)
++ *flags = orig_entry->flags;
++
+ batadv_tt_orig_list_entry_free_ref(orig_entry);
+ }
+
+ return found;
+ }
+
++/**
++ * batadv_tt_global_sync_flags - update TT sync flags
++ * @tt_global: the TT global entry to update sync flags in
++ *
++ * Updates the sync flag bits in the tt_global flag attribute with a logical
++ * OR of all sync flags from any of its TT orig entries.
++ */
++static void
++batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
++{
++ struct batadv_tt_orig_list_entry *orig_entry;
++ const struct hlist_head *head;
++ u16 flags = BATADV_NO_FLAGS;
++
++ rcu_read_lock();
++ head = &tt_global->orig_list;
++ hlist_for_each_entry_rcu(orig_entry, head, list)
++ flags |= orig_entry->flags;
++ rcu_read_unlock();
++
++ flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK);
++ tt_global->common.flags = flags;
++}
++
++/**
++ * batadv_tt_global_orig_entry_add - add or update a TT orig entry
++ * @tt_global: the TT global entry to add an orig entry in
++ * @orig_node: the originator to add an orig entry for
++ * @ttvn: translation table version number of this changeset
++ * @flags: TT sync flags
++ */
+ static void
+ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
+- struct batadv_orig_node *orig_node, int ttvn)
++ struct batadv_orig_node *orig_node, int ttvn,
++ u8 flags)
+ {
+ struct batadv_tt_orig_list_entry *orig_entry;
+
++ spin_lock_bh(&tt_global->list_lock);
++
+ orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
+ if (orig_entry) {
+ /* refresh the ttvn: the current value could be a bogus one that
+ * was added during a "temporary client detection"
+ */
+ orig_entry->ttvn = ttvn;
+- goto out;
++ orig_entry->flags = flags;
++ goto sync_flags;
+ }
+
+ orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
+@@ -1311,17 +1352,20 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
+ batadv_tt_global_size_inc(orig_node, tt_global->common.vid);
+ orig_entry->orig_node = orig_node;
+ orig_entry->ttvn = ttvn;
++ orig_entry->flags = flags;
+ atomic_set(&orig_entry->refcount, 2);
+
+- spin_lock_bh(&tt_global->list_lock);
+ hlist_add_head_rcu(&orig_entry->list,
+ &tt_global->orig_list);
+- spin_unlock_bh(&tt_global->list_lock);
+ atomic_inc(&tt_global->orig_list_count);
+
++sync_flags:
++ batadv_tt_global_sync_flags(tt_global);
+ out:
+ if (orig_entry)
+ batadv_tt_orig_list_entry_free_ref(orig_entry);
++
++ spin_unlock_bh(&tt_global->list_lock);
+ }
+
+ /**
+@@ -1379,7 +1423,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
+ ether_addr_copy(common->addr, tt_addr);
+ common->vid = vid;
+
+- common->flags = flags;
++ common->flags = flags & (~BATADV_TT_SYNC_MASK);
++
+ tt_global_entry->roam_at = 0;
+ /* node must store current time in case of roaming. This is
+ * needed to purge this entry out on timeout (if nobody claims
+@@ -1420,7 +1465,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
+ if (!(common->flags & BATADV_TT_CLIENT_TEMP))
+ goto out;
+ if (batadv_tt_global_entry_has_orig(tt_global_entry,
+- orig_node))
++ orig_node, NULL))
+ goto out_remove;
+ batadv_tt_global_del_orig_list(tt_global_entry);
+ goto add_orig_entry;
+@@ -1441,7 +1486,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
+ * TT_CLIENT_WIFI, therefore they have to be copied in the
+ * client entry
+ */
+- tt_global_entry->common.flags |= flags;
++ tt_global_entry->common.flags |= flags & (~BATADV_TT_SYNC_MASK);
+
+ /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
+ * one originator left in the list and we previously received a
+@@ -1458,7 +1503,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
+ }
+ add_orig_entry:
+ /* add the new orig_entry (if needed) or update it */
+- batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
++ batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn,
++ flags & BATADV_TT_SYNC_MASK);
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Creating new global tt entry: %pM (vid: %d, via %pM)\n",
+@@ -2111,6 +2157,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
+ unsigned short vid)
+ {
+ struct batadv_hashtable *hash = bat_priv->tt.global_hash;
++ struct batadv_tt_orig_list_entry *tt_orig;
+ struct batadv_tt_common_entry *tt_common;
+ struct batadv_tt_global_entry *tt_global;
+ struct hlist_head *head;
+@@ -2149,8 +2196,9 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
+ /* find out if this global entry is announced by this
+ * originator
+ */
+- if (!batadv_tt_global_entry_has_orig(tt_global,
+- orig_node))
++ tt_orig = batadv_tt_global_orig_entry_find(tt_global,
++ orig_node);
++ if (!tt_orig)
+ continue;
+
+ /* use network order to read the VID: this ensures that
+@@ -2162,10 +2210,12 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
+ /* compute the CRC on flags that have to be kept in sync
+ * among nodes
+ */
+- flags = tt_common->flags & BATADV_TT_SYNC_MASK;
++ flags = tt_orig->flags;
+ crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
+
+ crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
++
++ batadv_tt_orig_list_entry_free_ref(tt_orig);
+ }
+ rcu_read_unlock();
+ }
+@@ -2230,6 +2280,29 @@ static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
+ return crc;
+ }
+
++/**
++ * batadv_tt_req_node_release - free tt_req node entry
++ * @ref: kref pointer of the tt req_node entry
++ */
++static void batadv_tt_req_node_release(struct kref *ref)
++{
++ struct batadv_tt_req_node *tt_req_node;
++
++ tt_req_node = container_of(ref, struct batadv_tt_req_node, refcount);
++
++ kfree(tt_req_node);
++}
++
++/**
++ * batadv_tt_req_node_put - decrement the tt_req_node refcounter and
++ * possibly release it
++ * @tt_req_node: tt_req_node to be free'd
++ */
++static void batadv_tt_req_node_put(struct batadv_tt_req_node *tt_req_node)
++{
++ kref_put(&tt_req_node->refcount, batadv_tt_req_node_release);
++}
++
+ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
+ {
+ struct batadv_tt_req_node *node;
+@@ -2239,7 +2312,7 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
+
+ hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
+ hlist_del_init(&node->list);
+- kfree(node);
++ batadv_tt_req_node_put(node);
+ }
+
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
+@@ -2276,7 +2349,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
+ if (batadv_has_timed_out(node->issued_at,
+ BATADV_TT_REQUEST_TIMEOUT)) {
+ hlist_del_init(&node->list);
+- kfree(node);
++ batadv_tt_req_node_put(node);
+ }
+ }
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
+@@ -2308,9 +2381,11 @@ batadv_tt_req_node_new(struct batadv_priv *bat_priv,
+ if (!tt_req_node)
+ goto unlock;
+
++ kref_init(&tt_req_node->refcount);
+ ether_addr_copy(tt_req_node->addr, orig_node->orig);
+ tt_req_node->issued_at = jiffies;
+
++ kref_get(&tt_req_node->refcount);
+ hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list);
+ unlock:
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
+@@ -2324,17 +2399,24 @@ unlock:
+ *
+ * Returns 1 if the entry is a valid, 0 otherwise.
+ */
+-static int batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr)
++static int batadv_tt_local_valid(const void *entry_ptr,
++ const void *data_ptr,
++ u8 *flags)
+ {
+ const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
+
+ if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
+ return 0;
++
++ if (flags)
++ *flags = tt_common_entry->flags;
++
+ return 1;
+ }
+
+ static int batadv_tt_global_valid(const void *entry_ptr,
+- const void *data_ptr)
++ const void *data_ptr,
++ u8 *flags)
+ {
+ const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
+ const struct batadv_tt_global_entry *tt_global_entry;
+@@ -2348,7 +2430,8 @@ static int batadv_tt_global_valid(const void *entry_ptr,
+ struct batadv_tt_global_entry,
+ common);
+
+- return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
++ return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node,
++ flags);
+ }
+
+ /**
+@@ -2364,18 +2447,25 @@ static int batadv_tt_global_valid(const void *entry_ptr,
+ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+ struct batadv_hashtable *hash,
+ void *tvlv_buff, u16 tt_len,
+- int (*valid_cb)(const void *, const void *),
++ int (*valid_cb)(const void *,
++ const void *,
++ u8 *flags),
+ void *cb_data)
+ {
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tvlv_tt_change *tt_change;
+ struct hlist_head *head;
+ u16 tt_tot, tt_num_entries = 0;
++ u8 flags;
++ bool ret;
+ u32 i;
+
+ tt_tot = batadv_tt_entries(tt_len);
+ tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff;
+
++ if (!valid_cb)
++ return;
++
+ rcu_read_lock();
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+@@ -2385,11 +2475,12 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+ if (tt_tot == tt_num_entries)
+ break;
+
+- if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
++ ret = valid_cb(tt_common_entry, cb_data, &flags);
++ if (!ret)
+ continue;
+
+ ether_addr_copy(tt_change->addr, tt_common_entry->addr);
+- tt_change->flags = tt_common_entry->flags;
++ tt_change->flags = flags;
+ tt_change->vid = htons(tt_common_entry->vid);
+ memset(tt_change->reserved, 0,
+ sizeof(tt_change->reserved));
+@@ -2560,13 +2651,19 @@ static int batadv_send_tt_request(struct batadv_priv *bat_priv,
+ out:
+ if (primary_if)
+ batadv_hardif_free_ref(primary_if);
++
+ if (ret && tt_req_node) {
+ spin_lock_bh(&bat_priv->tt.req_list_lock);
+- /* hlist_del_init() verifies tt_req_node still is in the list */
+- hlist_del_init(&tt_req_node->list);
++ if (!hlist_unhashed(&tt_req_node->list)) {
++ hlist_del_init(&tt_req_node->list);
++ batadv_tt_req_node_put(tt_req_node);
++ }
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
+- kfree(tt_req_node);
+ }
++
++ if (tt_req_node)
++ batadv_tt_req_node_put(tt_req_node);
++
+ kfree(tvlv_tt_data);
+ return ret;
+ }
+@@ -3002,7 +3099,7 @@ static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
+ if (!batadv_compare_eth(node->addr, resp_src))
+ continue;
+ hlist_del_init(&node->list);
+- kfree(node);
++ batadv_tt_req_node_put(node);
+ }
+
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
+@@ -3227,7 +3324,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
+ struct batadv_hashtable *hash = bat_priv->tt.local_hash;
+ struct batadv_tt_common_entry *tt_common;
+ struct batadv_tt_local_entry *tt_local;
+- struct batadv_softif_vlan *vlan;
+ struct hlist_node *node_tmp;
+ struct hlist_head *head;
+ spinlock_t *list_lock; /* protects write access to the hash lists */
+@@ -3257,13 +3353,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
+ struct batadv_tt_local_entry,
+ common);
+
+- /* decrease the reference held for this vlan */
+- vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
+- if (vlan) {
+- batadv_softif_vlan_free_ref(vlan);
+- batadv_softif_vlan_free_ref(vlan);
+- }
+-
+ batadv_tt_local_entry_free_ref(tt_local);
+ }
+ spin_unlock_bh(list_lock);
+diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
+index cbd347c2e4a5..8fce1241ad6d 100644
+--- a/net/batman-adv/types.h
++++ b/net/batman-adv/types.h
+@@ -77,11 +77,13 @@ enum batadv_dhcp_recipient {
+ * @ogm_buff: buffer holding the OGM packet
+ * @ogm_buff_len: length of the OGM packet buffer
+ * @ogm_seqno: OGM sequence number - used to identify each OGM
++ * @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len
+ */
+ struct batadv_hard_iface_bat_iv {
+ unsigned char *ogm_buff;
+ int ogm_buff_len;
+ atomic_t ogm_seqno;
++ struct mutex ogm_buff_mutex;
+ };
+
+ /**
+@@ -103,7 +105,7 @@ struct batadv_hard_iface_bat_iv {
+ */
+ struct batadv_hard_iface {
+ struct list_head list;
+- s16 if_num;
++ unsigned int if_num;
+ char if_status;
+ struct net_device *net_dev;
+ u8 num_bcasts;
+@@ -287,7 +289,9 @@ struct batadv_orig_node {
+ DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+ u32 last_bcast_seqno;
+ struct hlist_head neigh_list;
+- /* neigh_list_lock protects: neigh_list and router */
++ /* neigh_list_lock protects: neigh_list, ifinfo_list,
++ * last_bonding_candidate and router
++ */
+ spinlock_t neigh_list_lock;
+ struct hlist_node hash_entry;
+ struct batadv_priv *bat_priv;
+@@ -806,7 +810,7 @@ struct batadv_priv {
+ atomic_t bcast_seqno;
+ atomic_t bcast_queue_left;
+ atomic_t batman_queue_left;
+- char num_ifaces;
++ unsigned int num_ifaces;
+ struct kobject *mesh_obj;
+ struct dentry *debug_dir;
+ struct hlist_head forw_bat_list;
+@@ -884,6 +888,7 @@ struct batadv_socket_packet {
+ * backbone gateway - no bcast traffic is formwared until the situation was
+ * resolved
+ * @crc: crc16 checksum over all claims
++ * @crc_lock: lock protecting crc
+ * @refcount: number of contexts the object is used
+ * @rcu: struct used for freeing in an RCU-safe manner
+ */
+@@ -897,6 +902,7 @@ struct batadv_bla_backbone_gw {
+ atomic_t wait_periods;
+ atomic_t request_sent;
+ u16 crc;
++ spinlock_t crc_lock; /* protects crc */
+ atomic_t refcount;
+ struct rcu_head rcu;
+ };
+@@ -915,6 +921,7 @@ struct batadv_bla_claim {
+ u8 addr[ETH_ALEN];
+ unsigned short vid;
+ struct batadv_bla_backbone_gw *backbone_gw;
++ spinlock_t backbone_lock; /* protects backbone_gw */
+ unsigned long lasttime;
+ struct hlist_node hash_entry;
+ struct rcu_head rcu;
+@@ -947,10 +954,12 @@ struct batadv_tt_common_entry {
+ * struct batadv_tt_local_entry - translation table local entry data
+ * @common: general translation table data
+ * @last_seen: timestamp used for purging stale tt local entries
++ * @vlan: soft-interface vlan of the entry
+ */
+ struct batadv_tt_local_entry {
+ struct batadv_tt_common_entry common;
+ unsigned long last_seen;
++ struct batadv_softif_vlan *vlan;
+ };
+
+ /**
+@@ -973,6 +982,7 @@ struct batadv_tt_global_entry {
+ * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
+ * @orig_node: pointer to orig node announcing this non-mesh client
+ * @ttvn: translation table version number which added the non-mesh client
++ * @flags: per orig entry TT sync flags
+ * @list: list node for batadv_tt_global_entry::orig_list
+ * @refcount: number of contexts the object is used
+ * @rcu: struct used for freeing in an RCU-safe manner
+@@ -980,6 +990,7 @@ struct batadv_tt_global_entry {
+ struct batadv_tt_orig_list_entry {
+ struct batadv_orig_node *orig_node;
+ u8 ttvn;
++ u8 flags;
+ struct hlist_node list;
+ atomic_t refcount;
+ struct rcu_head rcu;
+@@ -999,11 +1010,13 @@ struct batadv_tt_change_node {
+ * struct batadv_tt_req_node - data to keep track of the tt requests in flight
+ * @addr: mac address address of the originator this request was sent to
+ * @issued_at: timestamp used for purging stale tt requests
++ * @refcount: number of contexts the object is used by
+ * @list: list node for batadv_priv_tt::req_list
+ */
+ struct batadv_tt_req_node {
+ u8 addr[ETH_ALEN];
+ unsigned long issued_at;
++ struct kref refcount;
+ struct hlist_node list;
+ };
+
+@@ -1168,9 +1181,9 @@ struct batadv_algo_ops {
+ struct batadv_hard_iface *hard_iface);
+ void (*bat_orig_free)(struct batadv_orig_node *orig_node);
+ int (*bat_orig_add_if)(struct batadv_orig_node *orig_node,
+- int max_if_num);
++ unsigned int max_if_num);
+ int (*bat_orig_del_if)(struct batadv_orig_node *orig_node,
+- int max_if_num, int del_if_num);
++ unsigned int max_if_num, unsigned int del_if_num);
+ };
+
+ /**
+diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
+index 35c432668454..040983fc15da 100644
+--- a/net/ieee802154/nl_policy.c
++++ b/net/ieee802154/nl_policy.c
+@@ -30,7 +30,13 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
+ [IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, },
+ [IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, },
+ [IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, },
++ [IEEE802154_ATTR_BCN_ORD] = { .type = NLA_U8, },
++ [IEEE802154_ATTR_SF_ORD] = { .type = NLA_U8, },
++ [IEEE802154_ATTR_PAN_COORD] = { .type = NLA_U8, },
++ [IEEE802154_ATTR_BAT_EXT] = { .type = NLA_U8, },
++ [IEEE802154_ATTR_COORD_REALIGN] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_PAGE] = { .type = NLA_U8, },
++ [IEEE802154_ATTR_DEV_TYPE] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, },
+ [IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, },
+ [IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, },
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 7fe643062013..98ed5e43ab7b 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -1809,6 +1809,7 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
+ {
+ unsigned char optbuf[sizeof(struct ip_options) + 40];
+ struct ip_options *opt = (struct ip_options *)optbuf;
++ int res;
+
+ if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
+ return;
+@@ -1820,7 +1821,11 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
+
+ memset(opt, 0, sizeof(struct ip_options));
+ opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
+- if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
++ rcu_read_lock();
++ res = __ip_options_compile(dev_net(skb->dev), opt, skb, NULL);
++ rcu_read_unlock();
++
++ if (res)
+ return;
+
+ if (gateway)
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index 71263754b19b..bd2a6ec7572a 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -185,9 +185,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ retv = -EBUSY;
+ break;
+ }
+- } else if (sk->sk_protocol != IPPROTO_TCP)
++ } else if (sk->sk_protocol == IPPROTO_TCP) {
++ if (sk->sk_prot != &tcpv6_prot) {
++ retv = -EBUSY;
++ break;
++ }
+ break;
+-
++ } else {
++ break;
++ }
+ if (sk->sk_state != TCP_ESTABLISHED) {
+ retv = -ENOTCONN;
+ break;
+diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
+index ac143ae4f7b6..8c1733869343 100644
+--- a/net/netfilter/nfnetlink_cthelper.c
++++ b/net/netfilter/nfnetlink_cthelper.c
+@@ -711,6 +711,8 @@ static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
+ [NFCTH_NAME] = { .type = NLA_NUL_STRING,
+ .len = NF_CT_HELPER_NAME_LEN-1 },
+ [NFCTH_QUEUE_NUM] = { .type = NLA_U32, },
++ [NFCTH_PRIV_DATA_LEN] = { .type = NLA_U32, },
++ [NFCTH_STATUS] = { .type = NLA_U32, },
+ };
+
+ static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = {
+diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
+index 5a58f9f38095..291f24fef19a 100644
+--- a/net/nfc/hci/core.c
++++ b/net/nfc/hci/core.c
+@@ -193,13 +193,20 @@ exit:
+ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+ struct sk_buff *skb)
+ {
+- u8 gate = hdev->pipes[pipe].gate;
+ u8 status = NFC_HCI_ANY_OK;
+ struct hci_create_pipe_resp *create_info;
+ struct hci_delete_pipe_noti *delete_info;
+ struct hci_all_pipe_cleared_noti *cleared_info;
++ u8 gate;
+
+- pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd);
++ pr_debug("from pipe %x cmd %x\n", pipe, cmd);
++
++ if (pipe >= NFC_HCI_MAX_PIPES) {
++ status = NFC_HCI_ANY_E_NOK;
++ goto exit;
++ }
++
++ gate = hdev->pipes[pipe].gate;
+
+ switch (cmd) {
+ case NFC_HCI_ADM_NOTIFY_PIPE_CREATED:
+@@ -387,8 +394,14 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
+ struct sk_buff *skb)
+ {
+ int r = 0;
+- u8 gate = hdev->pipes[pipe].gate;
++ u8 gate;
++
++ if (pipe >= NFC_HCI_MAX_PIPES) {
++ pr_err("Discarded event %x to invalid pipe %x\n", event, pipe);
++ goto exit;
++ }
+
++ gate = hdev->pipes[pipe].gate;
+ if (gate == NFC_HCI_INVALID_GATE) {
+ pr_err("Discarded event %x to unopened pipe %x\n", event, pipe);
+ goto exit;
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index 04d4c388a7a8..c9d5e9c62178 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -62,7 +62,10 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
+ [NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED },
+ [NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING,
+ .len = NFC_FIRMWARE_NAME_MAXSIZE },
++ [NFC_ATTR_SE_INDEX] = { .type = NLA_U32 },
+ [NFC_ATTR_SE_APDU] = { .type = NLA_BINARY },
++ [NFC_ATTR_VENDOR_ID] = { .type = NLA_U32 },
++ [NFC_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 },
+ [NFC_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
+
+ };
+diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
+index fb99872ef426..eb814ffc0902 100644
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -668,6 +668,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
+ [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
+ [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
+ [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
++ [TCA_FQ_ORPHAN_MASK] = { .type = NLA_U32 },
+ };
+
+ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 4b30e91106d0..c6c168f20b0f 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -404,6 +404,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ [NL80211_ATTR_MDID] = { .type = NLA_U16 },
+ [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_DATA_LEN },
++ [NL80211_ATTR_CRIT_PROT_ID] = { .type = NLA_U16 },
++ [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = { .type = NLA_U16 },
+ [NL80211_ATTR_PEER_AID] = { .type = NLA_U16 },
+ [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
+ [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG },
+@@ -429,6 +431,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ [NL80211_ATTR_USER_PRIO] = { .type = NLA_U8 },
+ [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 },
+ [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
++ [NL80211_ATTR_OPER_CLASS] = { .type = NLA_U8 },
+ [NL80211_ATTR_MAC_MASK] = { .len = ETH_ALEN },
+ [NL80211_ATTR_WIPHY_SELF_MANAGED_REG] = { .type = NLA_FLAG },
+ [NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
next reply other threads:[~2020-03-20 11:49 UTC|newest]
Thread overview: 355+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-03-20 11:49 Mike Pagano [this message]
-- strict thread matches above, loose matches on Subject: below --
2022-02-03 11:46 [gentoo-commits] proj/linux-patches:4.4 commit in: / Mike Pagano
2022-01-29 17:47 Mike Pagano
2022-01-27 11:42 Mike Pagano
2022-01-11 12:57 Mike Pagano
2022-01-05 12:57 Mike Pagano
2021-12-29 13:13 Mike Pagano
2021-12-22 14:09 Mike Pagano
2021-12-14 10:38 Mike Pagano
2021-12-08 12:58 Mike Pagano
2021-11-26 12:02 Mike Pagano
2021-11-12 13:39 Mike Pagano
2021-11-02 17:07 Mike Pagano
2021-10-27 12:01 Mike Pagano
2021-10-17 13:15 Mike Pagano
2021-10-09 21:36 Mike Pagano
2021-10-07 10:37 Mike Pagano
2021-10-06 11:33 Mike Pagano
2021-09-26 14:16 Mike Pagano
2021-09-22 11:43 Mike Pagano
2021-09-20 22:07 Mike Pagano
2021-09-03 11:26 Mike Pagano
2021-08-26 14:02 Mike Pagano
2021-08-25 23:20 Mike Pagano
2021-08-15 20:12 Mike Pagano
2021-08-10 16:22 Mike Pagano
2021-08-08 13:47 Mike Pagano
2021-08-04 11:56 Mike Pagano
2021-08-03 12:51 Mike Pagano
2021-07-28 12:39 Mike Pagano
2021-07-20 15:17 Alice Ferrazzi
2021-07-11 14:48 Mike Pagano
2021-06-30 14:29 Mike Pagano
2021-06-17 11:05 Alice Ferrazzi
2021-06-10 11:09 Mike Pagano
2021-06-03 10:43 Alice Ferrazzi
2021-05-26 11:59 Mike Pagano
2021-05-22 10:00 Mike Pagano
2021-04-28 11:08 Alice Ferrazzi
2021-04-16 11:20 Alice Ferrazzi
2021-04-10 13:21 Mike Pagano
2021-04-07 12:10 Mike Pagano
2021-03-30 14:13 Mike Pagano
2021-03-24 12:06 Mike Pagano
2021-03-17 15:39 Mike Pagano
2021-03-11 13:34 Mike Pagano
2021-03-07 15:12 Mike Pagano
2021-03-03 16:34 Alice Ferrazzi
2021-02-23 13:46 Mike Pagano
2021-02-10 10:17 Alice Ferrazzi
2021-02-05 14:57 Alice Ferrazzi
2021-02-03 23:23 Mike Pagano
2021-01-30 13:11 Alice Ferrazzi
2021-01-23 16:33 Mike Pagano
2021-01-17 16:23 Mike Pagano
2021-01-12 20:08 Mike Pagano
2021-01-09 12:53 Mike Pagano
2020-12-29 14:16 Mike Pagano
2020-12-11 12:54 Mike Pagano
2020-12-02 12:17 Mike Pagano
2020-11-24 13:29 Mike Pagano
2020-11-22 19:08 Mike Pagano
2020-11-18 19:21 Mike Pagano
2020-11-11 15:27 Mike Pagano
2020-11-10 13:53 Mike Pagano
2020-10-29 11:14 Mike Pagano
2020-10-17 10:13 Mike Pagano
2020-10-14 20:30 Mike Pagano
2020-10-01 11:41 Mike Pagano
2020-10-01 11:24 Mike Pagano
2020-09-24 16:04 Mike Pagano
2020-09-23 11:51 Mike Pagano
2020-09-23 11:50 Mike Pagano
2020-09-12 17:08 Mike Pagano
2020-09-03 11:32 Mike Pagano
2020-08-26 11:12 Mike Pagano
2020-08-21 11:11 Alice Ferrazzi
2020-07-31 16:10 Mike Pagano
2020-07-22 12:24 Mike Pagano
2020-07-09 12:05 Mike Pagano
2020-07-01 12:09 Mike Pagano
2020-06-22 14:43 Mike Pagano
2020-06-11 11:25 Mike Pagano
2020-06-03 11:35 Mike Pagano
2020-05-27 15:26 Mike Pagano
2020-05-20 11:20 Mike Pagano
2020-05-13 13:01 Mike Pagano
2020-05-11 22:52 Mike Pagano
2020-05-05 17:37 Mike Pagano
2020-05-02 19:20 Mike Pagano
2020-04-24 11:59 Mike Pagano
2020-04-15 18:24 Mike Pagano
2020-04-13 11:14 Mike Pagano
2020-04-02 18:55 Mike Pagano
2020-03-20 11:53 Mike Pagano
2020-03-20 11:51 Mike Pagano
2020-03-11 10:14 Mike Pagano
2020-02-28 15:24 Mike Pagano
2020-02-14 23:34 Mike Pagano
2020-02-05 14:47 Mike Pagano
2020-01-29 12:36 Mike Pagano
2020-01-23 11:00 Mike Pagano
2020-01-14 22:24 Mike Pagano
2020-01-12 14:48 Mike Pagano
2020-01-04 16:46 Mike Pagano
2019-12-21 14:51 Mike Pagano
2019-12-05 14:47 Alice Ferrazzi
2019-11-29 21:41 Thomas Deutschmann
2019-11-28 23:49 Mike Pagano
2019-11-25 16:25 Mike Pagano
2019-11-16 10:54 Mike Pagano
2019-11-12 20:57 Mike Pagano
2019-11-10 16:13 Mike Pagano
2019-11-06 14:22 Mike Pagano
2019-10-29 10:08 Mike Pagano
2019-10-17 22:18 Mike Pagano
2019-10-07 21:03 Mike Pagano
2019-10-05 20:43 Mike Pagano
2019-09-21 15:56 Mike Pagano
2019-09-20 15:50 Mike Pagano
2019-09-16 12:21 Mike Pagano
2019-09-10 11:10 Mike Pagano
2019-09-06 17:17 Mike Pagano
2019-08-25 17:33 Mike Pagano
2019-08-11 10:58 Mike Pagano
2019-08-06 19:14 Mike Pagano
2019-08-04 16:03 Mike Pagano
2019-07-21 14:36 Mike Pagano
2019-07-10 11:01 Mike Pagano
2019-06-27 11:11 Mike Pagano
2019-06-22 19:01 Mike Pagano
2019-06-17 19:18 Mike Pagano
2019-06-11 17:30 Mike Pagano
2019-06-11 12:38 Mike Pagano
2019-05-16 23:01 Mike Pagano
2019-04-27 17:28 Mike Pagano
2019-04-03 10:49 Mike Pagano
2019-04-03 10:49 Mike Pagano
2019-03-23 14:17 Mike Pagano
2019-02-23 14:40 Mike Pagano
2019-02-20 11:14 Mike Pagano
2019-02-15 23:38 Mike Pagano
2019-02-15 23:35 Mike Pagano
2019-02-08 15:21 Mike Pagano
2019-02-06 20:51 Mike Pagano
2019-02-06 0:05 Mike Pagano
2019-01-26 14:59 Mike Pagano
2019-01-16 23:27 Mike Pagano
2019-01-13 19:46 Mike Pagano
2019-01-13 19:24 Mike Pagano
2018-12-29 22:56 Mike Pagano
2018-12-21 14:40 Mike Pagano
2018-12-17 21:56 Mike Pagano
2018-12-13 11:35 Mike Pagano
2018-12-01 18:35 Mike Pagano
2018-12-01 15:02 Mike Pagano
2018-11-27 16:59 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 12:18 Mike Pagano
2018-11-10 21:27 Mike Pagano
2018-10-20 12:33 Mike Pagano
2018-10-13 16:35 Mike Pagano
2018-10-10 11:20 Mike Pagano
2018-09-29 13:32 Mike Pagano
2018-09-26 10:44 Mike Pagano
2018-09-19 22:37 Mike Pagano
2018-09-15 10:09 Mike Pagano
2018-09-09 23:26 Mike Pagano
2018-09-05 15:21 Mike Pagano
2018-08-28 22:32 Mike Pagano
2018-08-24 11:41 Mike Pagano
2018-08-22 10:08 Alice Ferrazzi
2018-08-18 18:06 Mike Pagano
2018-08-17 19:24 Mike Pagano
2018-08-15 16:44 Mike Pagano
2018-08-09 10:49 Mike Pagano
2018-08-07 18:14 Mike Pagano
2018-07-28 10:37 Mike Pagano
2018-07-22 15:15 Mike Pagano
2018-07-19 15:27 Mike Pagano
2018-07-17 10:24 Mike Pagano
2018-07-12 16:21 Alice Ferrazzi
2018-07-04 14:26 Mike Pagano
2018-06-16 15:41 Mike Pagano
2018-06-13 14:54 Mike Pagano
2018-06-06 18:00 Mike Pagano
2018-05-30 22:35 Mike Pagano
2018-05-30 11:38 Mike Pagano
2018-05-26 13:43 Mike Pagano
2018-05-16 10:22 Mike Pagano
2018-05-02 16:11 Mike Pagano
2018-04-29 11:48 Mike Pagano
2018-04-24 11:28 Mike Pagano
2018-04-13 22:20 Mike Pagano
2018-04-08 14:25 Mike Pagano
2018-03-31 23:00 Mike Pagano
2018-03-31 22:16 Mike Pagano
2018-03-25 13:42 Mike Pagano
2018-03-22 12:54 Mike Pagano
2018-03-11 18:25 Mike Pagano
2018-03-05 2:52 Alice Ferrazzi
2018-02-28 15:05 Alice Ferrazzi
2018-02-25 15:46 Mike Pagano
2018-02-22 23:20 Mike Pagano
2018-02-17 15:10 Alice Ferrazzi
2018-02-03 21:23 Mike Pagano
2018-01-31 13:36 Alice Ferrazzi
2018-01-23 21:15 Mike Pagano
2018-01-17 10:20 Alice Ferrazzi
2018-01-17 9:18 Alice Ferrazzi
2018-01-15 15:01 Alice Ferrazzi
2018-01-10 11:56 Mike Pagano
2018-01-10 11:48 Mike Pagano
2018-01-05 15:59 Alice Ferrazzi
2018-01-05 15:05 Alice Ferrazzi
2018-01-02 20:12 Mike Pagano
2017-12-25 14:41 Alice Ferrazzi
2017-12-20 12:45 Mike Pagano
2017-12-16 11:46 Alice Ferrazzi
2017-12-09 18:50 Alice Ferrazzi
2017-12-05 11:39 Mike Pagano
2017-11-30 12:25 Alice Ferrazzi
2017-11-24 10:49 Alice Ferrazzi
2017-11-24 9:46 Alice Ferrazzi
2017-11-21 8:40 Alice Ferrazzi
2017-11-18 18:12 Mike Pagano
2017-11-15 16:44 Alice Ferrazzi
2017-11-08 13:50 Mike Pagano
2017-11-02 10:02 Mike Pagano
2017-10-27 10:33 Mike Pagano
2017-10-21 20:13 Mike Pagano
2017-10-18 13:44 Mike Pagano
2017-10-12 12:22 Mike Pagano
2017-10-08 14:25 Mike Pagano
2017-10-05 11:39 Mike Pagano
2017-09-27 10:38 Mike Pagano
2017-09-14 13:37 Mike Pagano
2017-09-13 22:26 Mike Pagano
2017-09-13 14:33 Mike Pagano
2017-09-07 22:42 Mike Pagano
2017-09-02 17:14 Mike Pagano
2017-08-30 10:08 Mike Pagano
2017-08-25 10:53 Mike Pagano
2017-08-16 22:30 Mike Pagano
2017-08-13 16:52 Mike Pagano
2017-08-11 17:44 Mike Pagano
2017-08-07 10:25 Mike Pagano
2017-05-14 13:32 Mike Pagano
2017-05-08 10:40 Mike Pagano
2017-05-03 17:41 Mike Pagano
2017-04-30 18:08 Mike Pagano
2017-04-30 17:59 Mike Pagano
2017-04-27 8:18 Alice Ferrazzi
2017-04-22 17:00 Mike Pagano
2017-04-18 10:21 Mike Pagano
2017-04-12 17:59 Mike Pagano
2017-04-08 13:56 Mike Pagano
2017-03-31 10:43 Mike Pagano
2017-03-30 18:16 Mike Pagano
2017-03-26 11:53 Mike Pagano
2017-03-22 12:28 Mike Pagano
2017-03-18 14:32 Mike Pagano
2017-03-15 14:39 Mike Pagano
2017-03-12 12:17 Mike Pagano
2017-03-02 16:29 Mike Pagano
2017-03-02 16:29 Mike Pagano
2017-02-26 20:45 Mike Pagano
2017-02-24 0:38 Mike Pagano
2017-02-23 20:12 Mike Pagano
2017-02-18 16:27 Alice Ferrazzi
2017-02-15 16:22 Alice Ferrazzi
2017-02-09 8:05 Alice Ferrazzi
2017-02-04 13:47 Alice Ferrazzi
2017-02-01 12:59 Alice Ferrazzi
2017-01-26 8:24 Alice Ferrazzi
2017-01-20 12:45 Alice Ferrazzi
2017-01-15 22:57 Mike Pagano
2017-01-14 14:46 Mike Pagano
2017-01-12 12:11 Mike Pagano
2017-01-09 12:46 Mike Pagano
2017-01-06 23:13 Mike Pagano
2016-12-15 23:41 Mike Pagano
2016-12-11 15:02 Alice Ferrazzi
2016-12-09 13:57 Alice Ferrazzi
2016-12-08 0:03 Mike Pagano
2016-12-02 16:21 Mike Pagano
2016-11-26 18:51 Mike Pagano
2016-11-26 18:40 Mike Pagano
2016-11-22 0:14 Mike Pagano
2016-11-19 11:03 Mike Pagano
2016-11-15 10:05 Alice Ferrazzi
2016-11-10 18:13 Alice Ferrazzi
2016-11-01 3:14 Alice Ferrazzi
2016-10-31 14:09 Alice Ferrazzi
2016-10-28 18:27 Alice Ferrazzi
2016-10-22 13:05 Mike Pagano
2016-10-21 11:10 Mike Pagano
2016-10-16 19:25 Mike Pagano
2016-10-08 19:55 Mike Pagano
2016-09-30 19:07 Mike Pagano
2016-09-24 10:51 Mike Pagano
2016-09-16 19:10 Mike Pagano
2016-09-15 13:58 Mike Pagano
2016-09-09 19:20 Mike Pagano
2016-08-20 16:31 Mike Pagano
2016-08-17 11:48 Mike Pagano
2016-08-10 12:56 Mike Pagano
2016-07-27 19:19 Mike Pagano
2016-07-11 19:59 Mike Pagano
2016-07-02 15:30 Mike Pagano
2016-07-01 0:55 Mike Pagano
2016-06-24 20:40 Mike Pagano
2016-06-08 13:38 Mike Pagano
2016-06-02 18:24 Mike Pagano
2016-05-19 13:00 Mike Pagano
2016-05-12 0:14 Mike Pagano
2016-05-04 23:51 Mike Pagano
2016-04-20 11:27 Mike Pagano
2016-04-12 18:59 Mike Pagano
2016-03-22 22:47 Mike Pagano
2016-03-16 19:43 Mike Pagano
2016-03-10 0:51 Mike Pagano
2016-03-04 11:15 Mike Pagano
2016-02-26 0:02 Mike Pagano
2016-02-19 23:33 Mike Pagano
2016-02-18 0:20 Mike Pagano
2016-02-01 0:19 Mike Pagano
2016-02-01 0:13 Mike Pagano
2016-01-31 23:33 Mike Pagano
2016-01-20 12:38 Mike Pagano
2016-01-10 17:19 Mike Pagano
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1584704964.1cdf83ecb0f5a15ea2e2eebd7f0ee13d711491dd.mpagano@gentoo \
--to=mpagano@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox