diff options
author | Jakub Kicinski <kuba@kernel.org> | 2023-06-27 09:45:22 -0700 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2023-06-27 09:45:22 -0700 |
commit | 3674fbf0451df0395f9fa18df3122927006a3829 (patch) | |
tree | 632690d6c74a7bedb166299bbc92059cd3a537f9 | |
parent | 9d797ee2dce1e3e243bcc18dad7728df72fd11a4 (diff) | |
parent | 25a9c8a4431c364f97f75558cb346d2ad3f53fbb (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Merge in late fixes to prepare for the 6.5 net-next PR.
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
37 files changed, 417 insertions, 195 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index dd3becee7eae..095c4278a0f6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3606,6 +3606,7 @@ S: Supported W: http://www.bluez.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git +F: Documentation/devicetree/bindings/net/bluetooth/ F: drivers/bluetooth/ BLUETOOTH SUBSYSTEM @@ -12549,7 +12550,7 @@ F: drivers/mtd/nand/raw/marvell_nand.c MARVELL OCTEON ENDPOINT DRIVER M: Veerasenareddy Burru <vburru@marvell.com> -M: Abhijit Ayarekar <aayarekar@marvell.com> +M: Sathesh Edara <sedara@marvell.com> L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/marvell/octeon_ep diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 16405b84dc2f..7a0f25301f7e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4200,7 +4200,7 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) return skb->hash; return __bond_xmit_hash(bond, skb, skb->data, skb->protocol, - skb_mac_offset(skb), skb_network_offset(skb), + 0, skb_network_offset(skb), skb_headlen(skb)); } diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index c15ed0acdb77..0092e46c46f8 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -673,5 +673,7 @@ void bcmgenet_mii_exit(struct net_device *dev) if (of_phy_is_fixed_link(dn)) of_phy_deregister_fixed_link(dn); of_node_put(priv->phy_dn); + clk_prepare_enable(priv->clk); platform_device_unregister(priv->mii_pdev); + clk_disable_unprepare(priv->clk); } diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 18d4af934d8c..00a5ee487812 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -229,7 +229,10 @@ struct igc_adapter { struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_caps; - struct work_struct ptp_tx_work; + /* Access to ptp_tx_skb and ptp_tx_start are protected by the + * ptp_tx_lock. + */ + spinlock_t ptp_tx_lock; struct sk_buff *ptp_tx_skb; struct hwtstamp_config tstamp_config; unsigned long ptp_tx_start; @@ -429,7 +432,6 @@ enum igc_state_t { __IGC_TESTING, __IGC_RESETTING, __IGC_DOWN, - __IGC_PTP_TX_IN_PROGRESS, }; enum igc_tx_flags { @@ -613,6 +615,7 @@ enum igc_ring_flags_t { IGC_RING_FLAG_TX_CTX_IDX, IGC_RING_FLAG_TX_DETECT_HANG, IGC_RING_FLAG_AF_XDP_ZC, + IGC_RING_FLAG_TX_HWTSTAMP, }; #define ring_uses_large_buffer(ring) \ @@ -669,6 +672,7 @@ int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); void igc_ptp_tx_hang(struct igc_adapter *adapter); void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts); +void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter); #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 5ed7901fd2fb..019ce91c45aa 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1585,14 +1585,16 @@ done: } } - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { /* FIXME: add support for retrieving timestamps from * the other timer registers before skipping the * timestamping request. */ - if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && - !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS, - &adapter->state)) { + unsigned long flags; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + if (!adapter->ptp_tx_skb) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IGC_TX_FLAGS_TSTAMP; @@ -1601,6 +1603,8 @@ done: } else { adapter->tx_hwtstamp_skipped++; } + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); } if (skb_vlan_tag_present(skb)) { @@ -5264,7 +5268,7 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter) if (tsicr & IGC_TSICR_TXTS) { /* retrieve hardware timestamp */ - schedule_work(&adapter->ptp_tx_work); + igc_ptp_tx_tstamp_event(adapter); ack |= IGC_TSICR_TXTS; } diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 4e10ced736db..32ef112f8291 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -536,9 +536,34 @@ static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter) wr32(IGC_TSYNCRXCTL, val); } +static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); +} + static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; + int i; + + /* Clear the flags first to avoid new packets to be enqueued + * for TX timestamping. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + clear_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags); + } + + /* Now we can clean the pending TX timestamp requests. */ + igc_ptp_clear_tx_tstamp(adapter); wr32(IGC_TSYNCTXCTL, 0); } @@ -546,12 +571,23 @@ static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter) static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; + int i; wr32(IGC_TSYNCTXCTL, IGC_TSYNCTXCTL_ENABLED | IGC_TSYNCTXCTL_TXSYNSIG); /* Read TXSTMP registers to discard any timestamp previously stored. */ rd32(IGC_TXSTMPL); rd32(IGC_TXSTMPH); + + /* The hardware is ready to accept TX timestamp requests, + * notify the transmit path. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + set_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags); + } + } /** @@ -603,6 +639,7 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, return 0; } +/* Requires adapter->ptp_tx_lock held by caller. */ static void igc_ptp_tx_timeout(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; @@ -610,7 +647,6 @@ static void igc_ptp_tx_timeout(struct igc_adapter *adapter) dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; adapter->tx_hwtstamp_timeouts++; - clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); /* Clear the tx valid bit in TSYNCTXCTL register to enable interrupt. */ rd32(IGC_TXSTMPH); netdev_warn(adapter->netdev, "Tx timestamp timeout\n"); @@ -618,20 +654,20 @@ static void igc_ptp_tx_timeout(struct igc_adapter *adapter) void igc_ptp_tx_hang(struct igc_adapter *adapter) { - bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + - IGC_PTP_TX_TIMEOUT); + unsigned long flags; - if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state)) - return; + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); - /* If we haven't received a timestamp within the timeout, it is - * reasonable to assume that it will never occur, so we can unlock the - * timestamp bit when this occurs. - */ - if (timeout) { - cancel_work_sync(&adapter->ptp_tx_work); - igc_ptp_tx_timeout(adapter); - } + if (!adapter->ptp_tx_skb) + goto unlock; + + if (time_is_after_jiffies(adapter->ptp_tx_start + IGC_PTP_TX_TIMEOUT)) + goto unlock; + + igc_ptp_tx_timeout(adapter); + +unlock: + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); } /** @@ -641,20 +677,57 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter) * If we were asked to do hardware stamping and such a time stamp is * available, then it must have been for this skb here because we only * allow only one such packet into the queue. + * + * Context: Expects adapter->ptp_tx_lock to be held by caller. */ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) { struct sk_buff *skb = adapter->ptp_tx_skb; struct skb_shared_hwtstamps shhwtstamps; struct igc_hw *hw = &adapter->hw; + u32 tsynctxctl; int adjust = 0; u64 regval; if (WARN_ON_ONCE(!skb)) return; - regval = rd32(IGC_TXSTMPL); - regval |= (u64)rd32(IGC_TXSTMPH) << 32; + tsynctxctl = rd32(IGC_TSYNCTXCTL); + tsynctxctl &= IGC_TSYNCTXCTL_TXTT_0; + if (tsynctxctl) { + regval = rd32(IGC_TXSTMPL); + regval |= (u64)rd32(IGC_TXSTMPH) << 32; + } else { + /* There's a bug in the hardware that could cause + * missing interrupts for TX timestamping. The issue + * is that for new interrupts to be triggered, the + * IGC_TXSTMPH_0 register must be read. + * + * To avoid discarding a valid timestamp that just + * happened at the "wrong" time, we need to confirm + * that there was no timestamp captured, we do that by + * assuming that no two timestamps in sequence have + * the same nanosecond value. + * + * So, we read the "low" register, read the "high" + * register (to latch a new timestamp) and read the + * "low" register again, if "old" and "new" versions + * of the "low" register are different, a valid + * timestamp was captured, we can read the "high" + * register again. + */ + u32 txstmpl_old, txstmpl_new; + + txstmpl_old = rd32(IGC_TXSTMPL); + rd32(IGC_TXSTMPH); + txstmpl_new = rd32(IGC_TXSTMPL); + + if (txstmpl_old == txstmpl_new) + return; + + regval = txstmpl_new; + regval |= (u64)rd32(IGC_TXSTMPH) << 32; + } if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval)) return; @@ -676,13 +749,7 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) shhwtstamps.hwtstamp = ktime_add_ns(shhwtstamps.hwtstamp, adjust); - /* Clear the lock early before calling skb_tstamp_tx so that - * applications are not woken up before the lock bit is clear. We use - * a copy of the skb pointer to ensure other threads can't change it - * while we're notifying the stack. - */ adapter->ptp_tx_skb = NULL; - clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); /* Notify the stack and free the skb after we've unlocked */ skb_tstamp_tx(skb, &shhwtstamps); @@ -690,27 +757,25 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) } /** - * igc_ptp_tx_work - * @work: pointer to work struct + * igc_ptp_tx_tstamp_event + * @adapter: board private structure * - * This work function polls the TSYNCTXCTL valid bit to determine when a - * timestamp has been taken for the current stored skb. + * Called when a TX timestamp interrupt happens to retrieve the + * timestamp and send it up to the socket. */ -static void igc_ptp_tx_work(struct work_struct *work) +void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter) { - struct igc_adapter *adapter = container_of(work, struct igc_adapter, - ptp_tx_work); - struct igc_hw *hw = &adapter->hw; - u32 tsynctxctl; + unsigned long flags; - if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state)) - return; + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); - tsynctxctl = rd32(IGC_TSYNCTXCTL); - if (WARN_ON_ONCE(!(tsynctxctl & IGC_TSYNCTXCTL_TXTT_0))) - return; + if (!adapter->ptp_tx_skb) + goto unlock; igc_ptp_tx_hwtstamp(adapter); + +unlock: + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); } /** @@ -959,8 +1024,8 @@ void igc_ptp_init(struct igc_adapter *adapter) return; } + spin_lock_init(&adapter->ptp_tx_lock); spin_lock_init(&adapter->tmreg_lock); - INIT_WORK(&adapter->ptp_tx_work, igc_ptp_tx_work); adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; @@ -1020,10 +1085,7 @@ void igc_ptp_suspend(struct igc_adapter *adapter) if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) return; - cancel_work_sync(&adapter->ptp_tx_work); - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; - clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + igc_ptp_clear_tx_tstamp(adapter); if (pci_device_is_present(adapter->pdev)) { igc_ptp_time_save(adapter); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index b63e47af6365..8c019f382a7f 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1297,8 +1297,10 @@ static void efx_ef10_fini_nic(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; + spin_lock_bh(&efx->stats_lock); kfree(nic_data->mc_stats); nic_data->mc_stats = NULL; + spin_unlock_bh(&efx->stats_lock); } static int efx_ef10_init_nic(struct efx_nic *efx) @@ -1852,9 +1854,14 @@ static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, efx_ef10_get_stat_mask(efx, mask); - efx_nic_copy_stats(efx, nic_data->mc_stats); - efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, - mask, stats, nic_data->mc_stats, false); + /* If NIC was fini'd (probably resetting), then we can't read + * updated stats right now. + */ + if (nic_data->mc_stats) { + efx_nic_copy_stats(efx, nic_data->mc_stats); + efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, + mask, stats, nic_data->mc_stats, false); + } /* Update derived statistics */ efx_nic_fix_nodesc_drop_stat(efx, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 10e8a5606ba6..4727f7be4f86 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -7461,12 +7461,6 @@ void stmmac_dvr_remove(struct device *dev) netif_carrier_off(ndev); unregister_netdev(ndev); - /* Serdes power down needs to happen after VLAN filter - * is deleted that is triggered by unregister_netdev(). - */ - if (priv->plat->serdes_powerdown) - priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); - #ifdef CONFIG_DEBUG_FS stmmac_exit_fs(ndev); #endif diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index ae7b9af7b7d7..8e32dc50a408 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -2043,6 +2043,11 @@ static int axienet_probe(struct platform_device *pdev) goto cleanup_clk; } + /* Reset core now that clocks are enabled, prior to accessing MDIO */ + ret = __axienet_device_reset(lp); + if (ret) + goto cleanup_clk; + /* Autodetect the need for 64-bit DMA pointers. * When the IP is configured for a bus width bigger than 32 bits, * writing the MSB registers is mandatory, even if they are all 0. @@ -2097,11 +2102,6 @@ static int axienet_probe(struct platform_device *pdev) lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; - /* Reset core now that clocks are enabled, prior to accessing MDIO */ - ret = __axienet_device_reset(lp); - if (ret) - goto cleanup_clk; - ret = axienet_mdio_setup(lp); if (ret) dev_warn(&pdev->dev, diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 15c7dc82107f..acb20ad4e37e 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -631,7 +631,9 @@ static void __gtp_encap_destroy(struct sock *sk) gtp->sk1u = NULL; udp_sk(sk)->encap_type = 0; rcu_assign_sk_user_data(sk, NULL); + release_sock(sk); sock_put(sk); + return; } release_sock(sk); } diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index a8977965a7f2..c0c49f181367 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -584,7 +584,8 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) consume_skb(skb); return NET_XMIT_DROP; } - return ipvlan_rcv_frame(addr, &skb, true); + ipvlan_rcv_frame(addr, &skb, true); + return NET_XMIT_SUCCESS; } } out: @@ -610,7 +611,8 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) consume_skb(skb); return NET_XMIT_DROP; } - return ipvlan_rcv_frame(addr, &skb, true); + ipvlan_rcv_frame(addr, &skb, true); + return NET_XMIT_SUCCESS; } } skb = skb_share_check(skb, GFP_ATOMIC); @@ -622,7 +624,8 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) * the skb for the main-dev. At the RX side we just return * RX_PASS for it to be processed further on the stack. */ - return dev_forward_skb(ipvlan->phy_dev, skb); + dev_forward_skb(ipvlan->phy_dev, skb); + return NET_XMIT_SUCCESS; } else if (is_multicast_ether_addr(eth->h_dest)) { skb_reset_mac_header(skb); diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c index 3cd9a77f9532..d7616b13c594 100644 --- a/drivers/net/phy/dp83td510.c +++ b/drivers/net/phy/dp83td510.c @@ -12,6 +12,11 @@ /* MDIO_MMD_VEND2 registers */ #define DP83TD510E_PHY_STS 0x10 +/* Bit 7 - mii_interrupt, active high. Clears on read. + * Note: Clearing does not necessarily deactivate IRQ pin if interrupts pending. + * This differs from the DP83TD510E datasheet (2020) which states this bit + * clears on write 0. + */ #define DP83TD510E_STS_MII_INT BIT(7) #define DP83TD510E_LINK_STATUS BIT(0) @@ -53,12 +58,6 @@ static int dp83td510_config_intr(struct phy_device *phydev) int ret; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { - /* Clear any pending interrupts */ - ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS, - 0x0); - if (ret) - return ret; - ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_INTERRUPT_REG_1, DP83TD510E_INT1_LINK_EN); @@ -81,10 +80,6 @@ static int dp83td510_config_intr(struct phy_device *phydev) DP83TD510E_GENCFG_INT_EN); if (ret) return ret; - - /* Clear any pending interrupts */ - ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS, - 0x0); } return ret; @@ -94,14 +89,6 @@ static irqreturn_t dp83td510_handle_interrupt(struct phy_device *phydev) { int ret; - ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS); - if (ret < 0) { - phy_error(phydev); - return IRQ_NONE; - } else if (!(ret & DP83TD510E_STS_MII_INT)) { - return IRQ_NONE; - } - /* Read the current enabled interrupts */ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_INTERRUPT_REG_1); if (ret < 0) { diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 2e7c7b0cdc54..417f7ea1fffa 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1427,6 +1427,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/ {QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */ + {QMI_QUIRK_SET_DTR(0x1546, 0x1312, 4)}, /* u-blox LARA-R6 01B */ {QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */ /* 4. Gobi 1000 devices */ diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 0762444e3767..d4fed4c508ca 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -481,7 +481,7 @@ struct nfnl_ct_hook { }; extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook; -/** +/* * nf_skb_duplicated - TEE target has sent a packet * * When a xtables target sends a packet, the OUTPUT and POSTROUTING @@ -492,7 +492,7 @@ extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook; */ DECLARE_PER_CPU(bool, nf_skb_duplicated); -/** +/* * Contains bitmask of ctnetlink event subscribers, if any. * Can't be pernet due to NETLINK_LISTEN_ALL_NSID setsockopt flag. */ diff --git a/include/net/dsa.h b/include/net/dsa.h index 90bba1ce5899..d309ee7ed04b 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -314,9 +314,17 @@ struct dsa_port { struct list_head fdbs; struct list_head mdbs; - /* List of VLANs that CPU and DSA ports are members of. */ struct mutex vlans_lock; - struct list_head vlans; + union { + /* List of VLANs that CPU and DSA ports are members of. + * Access to this is serialized by the sleepable @vlans_lock. + */ + struct list_head vlans; + /* List of VLANs that user ports are members of. + * Access to this is serialized by netif_addr_lock_bh(). + */ + struct list_head user_vlans; + }; }; /* TODO: ideally DSA ports would have a single dp->link_dp member, diff --git a/include/net/sock.h b/include/net/sock.h index 121284f455a8..2eb916d1ff64 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2095,6 +2095,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent) } kuid_t sock_i_uid(struct sock *sk); +unsigned long __sock_i_ino(struct sock *sk); unsigned long sock_i_ino(struct sock *sk); static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk) diff --git a/lib/ts_bm.c b/lib/ts_bm.c index 1f2234221dd1..c8ecbf74ef29 100644 --- a/lib/ts_bm.c +++ b/lib/ts_bm.c @@ -60,10 +60,12 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state) struct ts_bm *bm = ts_config_priv(conf); unsigned int i, text_len, consumed = state->offset; const u8 *text; - int shift = bm->patlen - 1, bs; + int bs; const u8 icase = conf->flags & TS_IGNORECASE; for (;;) { + int shift = bm->patlen - 1; + text_len = conf->get_next_block(consumed, &text, conf, state); if (unlikely(text_len == 0)) diff --git a/net/can/isotp.c b/net/can/isotp.c index 1f25b45868cf..99770ed28531 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -1112,8 +1112,9 @@ wait_free_buffer: if (err) goto err_event_drop; - if (sk->sk_err) - return -sk->sk_err; + err = sock_error(sk); + if (err) + return err; } return size; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 2c61fb912772..3ad4e030846d 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -4097,7 +4097,7 @@ static int nlmsg_populate_fdb_fill(struct sk_buff *skb, ndm->ndm_ifindex = dev->ifindex; ndm->ndm_state = ndm_state; - if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) + if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr)) goto nla_put_failure; if (vid) if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid)) @@ -4111,10 +4111,10 @@ nla_put_failure: return -EMSGSIZE; } -static inline size_t rtnl_fdb_nlmsg_size(void) +static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev) { return NLMSG_ALIGN(sizeof(struct ndmsg)) + - nla_total_size(ETH_ALEN) + /* NDA_LLADDR */ + nla_total_size(dev->addr_len) + /* NDA_LLADDR */ nla_total_size(sizeof(u16)) + /* NDA_VLAN */ 0; } @@ -4126,7 +4126,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, struct sk_buff *skb; int err = -ENOBUFS; - skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC); + skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC); if (!skb) goto errout; diff --git a/net/core/sock.c b/net/core/sock.c index de719094b804..9370fd50aa2c 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2598,13 +2598,24 @@ kuid_t sock_i_uid(struct sock *sk) } EXPORT_SYMBOL(sock_i_uid); -unsigned long sock_i_ino(struct sock *sk) +unsigned long __sock_i_ino(struct sock *sk) { unsigned long ino; - read_lock_bh(&sk->sk_callback_lock); + read_lock(&sk->sk_callback_lock); ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; - read_unlock_bh(&sk->sk_callback_lock); + read_unlock(&sk->sk_callback_lock); + return ino; +} +EXPORT_SYMBOL(__sock_i_ino); + +unsigned long sock_i_ino(struct sock *sk) +{ + unsigned long ino; + + local_bh_disable(); + ino = __sock_i_ino(sk); + local_bh_enable(); return ino; } EXPORT_SYMBOL(sock_i_ino); diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 1afed89e03c0..ccbdb98109f8 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -1106,7 +1106,7 @@ static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index) mutex_init(&dp->vlans_lock); INIT_LIST_HEAD(&dp->fdbs); INIT_LIST_HEAD(&dp->mdbs); - INIT_LIST_HEAD(&dp->vlans); + INIT_LIST_HEAD(&dp->vlans); /* also initializes &dp->user_vlans */ INIT_LIST_HEAD(&dp->list); list_add_tail(&dp->list, &dst->ports); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 165bb2cb8431..527b1d576460 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -27,6 +27,7 @@ #include "master.h" #include "netlink.h" #include "slave.h" +#include "switch.h" #include "tag.h" struct dsa_switchdev_event_work { @@ -161,8 +162,7 @@ static int dsa_slave_schedule_standalone_work(struct net_device *dev, return 0; } -static int dsa_slave_host_vlan_rx_filtering(struct net_device *vdev, int vid, - void *arg) +static int dsa_slave_host_vlan_rx_filtering(void *arg, int vid) { struct dsa_host_vlan_rx_filtering_ctx *ctx = arg; @@ -170,6 +170,28 @@ static int dsa_slave_host_vlan_rx_filtering(struct net_device *vdev, int vid, ctx->addr, vid); } +static int dsa_slave_vlan_for_each(struct net_device *dev, + int (*cb)(void *arg, int vid), void *arg) +{ + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_vlan *v; + int err; + + lockdep_assert_held(&dev->addr_list_lock); + + err = cb(arg, 0); + if (err) + return err; + + list_for_each_entry(v, &dp->user_vlans, list) { + err = cb(arg, v->vid); + if (err) + return err; + } + + return 0; +} + static int dsa_slave_sync_uc(struct net_device *dev, const unsigned char *addr) { @@ -180,18 +202,14 @@ static int dsa_slave_sync_uc(struct net_device *dev, .addr = addr, .event = DSA_UC_ADD, }; - int err; dev_uc_add(master, addr); if (!dsa_switch_supports_uc_filtering(dp->ds)) return 0; - err = dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0); - if (err) - return err; - - return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx); + return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, + &ctx); } static int dsa_slave_unsync_uc(struct net_device *dev, @@ -204,18 +222,14 @@ static int dsa_slave_unsync_uc(struct net_device *dev, .addr = addr, .event = DSA_UC_DEL, }; - int err; dev_uc_del(master, addr); if (!dsa_switch_supports_uc_filtering(dp->ds)) return 0; - err = dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0); - if (err) - return err; - - return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx); + return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, + &ctx); } static int dsa_slave_sync_mc(struct net_device *dev, @@ -228,18 +242,14 @@ static int dsa_slave_sync_mc(struct net_device *dev, .addr = addr, .event = DSA_MC_ADD, }; - int err; dev_mc_add(master, addr); if (!dsa_switch_supports_mc_filtering(dp->ds)) return 0; - err = dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0); - if (err) - return err; - - return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx); + return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, + &ctx); } static int dsa_slave_unsync_mc(struct net_device *dev, @@ -252,18 +262,14 @@ static int dsa_slave_unsync_mc(struct net_device *dev, .addr = addr, .event = DSA_MC_DEL, }; - int err; dev_mc_del(master, addr); if (!dsa_switch_supports_mc_filtering(dp->ds)) return 0; - err = dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0); - if (err) - return err; - - return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx); + return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, + &ctx); } void dsa_slave_sync_ha(struct net_device *dev) @@ -1759,6 +1765,7 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto, struct netlink_ext_ack extack = {0}; struct dsa_switch *ds = dp->ds; struct netdev_hw_addr *ha; + struct dsa_vlan *v; int ret; /* User port... */ @@ -1782,8 +1789,17 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto, !dsa_switch_supports_mc_filtering(ds)) return 0; + v = kzalloc(sizeof(*v), GFP_KERNEL); + if (!v) { + ret = -ENOMEM; + goto rollback; + } + netif_addr_lock_bh(dev); + v->vid = vid; + list_add_tail(&v->list, &dp->user_vlans); + if (dsa_switch_supports_mc_filtering(ds)) { netdev_for_each_synced_mc_addr(ha, dev) { dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, @@ -1803,6 +1819,12 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto, dsa_flush_workqueue(); return 0; + +rollback: + dsa_port_host_vlan_del(dp, &vlan); + dsa_port_vlan_del(dp, &vlan); + + return ret; } static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, @@ -1816,6 +1838,7 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, }; struct dsa_switch *ds = dp->ds; struct netdev_hw_addr *ha; + struct dsa_vlan *v; int err; err = dsa_port_vlan_del(dp, &vlan); @@ -1832,6 +1855,15 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, netif_addr_lock_bh(dev); + v = dsa_vlan_find(&dp->user_vlans, &vlan); + if (!v) { + netif_addr_unlock_bh(dev); + return -ENOENT; + } + + list_del(&v->list); + kfree(v); + if (dsa_switch_supports_mc_filtering(ds)) { netdev_for_each_synced_mc_addr(ha, dev) { dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, diff --git a/net/dsa/switch.c b/net/dsa/switch.c index 8c9a9f94b756..1a42f9317334 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -673,8 +673,8 @@ static bool dsa_port_host_vlan_match(struct dsa_port *dp, return false; } -static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list, - const struct switchdev_obj_port_vlan *vlan) +struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list, + const struct switchdev_obj_port_vlan *vlan) { struct dsa_vlan *v; diff --git a/net/dsa/switch.h b/net/dsa/switch.h index 15e67b95eb6e..ea034677da15 100644 --- a/net/dsa/switch.h +++ b/net/dsa/switch.h @@ -111,6 +111,9 @@ struct dsa_notifier_master_state_info { bool operational; }; +struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list, + const struct switchdev_obj_port_vlan *vlan); + int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v); int dsa_broadcast(unsigned long e, void *v); diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index c1557d47ccd1..d4fd626d2b8c 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -432,9 +432,19 @@ static bool dccp_error(const struct dccp_hdr *dh, struct sk_buff *skb, unsigned int dataoff, const struct nf_hook_state *state) { + static const unsigned long require_seq48 = 1 << DCCP_PKT_REQUEST | + 1 << DCCP_PKT_RESPONSE | + 1 << DCCP_PKT_CLOSEREQ | + 1 << DCCP_PKT_CLOSE | + 1 << DCCP_PKT_RESET | + 1 << DCCP_PKT_SYNC | + 1 << DCCP_PKT_SYNCACK; unsigned int dccp_len = skb->len - dataoff; unsigned int cscov; const char *msg; + u8 type; + + BUILD_BUG_ON(DCCP_PKT_INVALID >= BITS_PER_LONG); if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) || dh->dccph_doff * 4 > dccp_len) { @@ -459,34 +469,70 @@ static bool dccp_error(const struct dccp_hdr *dh, goto out_invalid; } - if (dh->dccph_type >= DCCP_PKT_INVALID) { + type = dh->dccph_type; + if (type >= DCCP_PKT_INVALID) { msg = "nf_ct_dccp: reserved packet type "; goto out_invalid; } + + if (test_bit(type, &require_seq48) && !dh->dccph_x) { + msg = "nf_ct_dccp: type lacks 48bit sequence numbers"; + goto out_invalid; + } + return false; out_invalid: nf_l4proto_log_invalid(skb, state, IPPROTO_DCCP, "%s", msg); return true; } +struct nf_conntrack_dccp_buf { + struct dccp_hdr dh; /* generic header part */ + struct dccp_hdr_ext ext; /* optional depending dh->dccph_x */ + union { /* depends on header type */ + struct dccp_hdr_ack_bits ack; + struct dccp_hdr_request req; + struct dccp_hdr_response response; + struct dccp_hdr_reset rst; + } u; +}; + +static struct dccp_hdr * +dccp_header_pointer(const struct sk_buff *skb, int offset, const struct dccp_hdr *dh, + struct nf_conntrack_dccp_buf *buf) +{ + unsigned int hdrlen = __dccp_hdr_len(dh); + + if (hdrlen > sizeof(*buf)) + return NULL; + + return skb_header_pointer(skb, offset, hdrlen, buf); +} + int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, const struct nf_hook_state *state) { enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - struct dccp_hdr _dh, *dh; + struct nf_conntrack_dccp_buf _dh; u_int8_t type, old_state, new_state; enum ct_dccp_roles role; unsigned int *timeouts; + struct dccp_hdr *dh; - dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); + dh = skb_header_pointer(skb, dataoff, sizeof(*dh), &_dh.dh); if (!dh) return NF_DROP; if (dccp_error(dh, skb, dataoff, state)) return -NF_ACCEPT; + /* pull again, including possible 48 bit sequences and subtype header */ + dh = dccp_header_pointer(skb, dataoff, dh, &_dh); + if (!dh) + return NF_DROP; + type = dh->dccph_type; if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh, state)) return -NF_ACCEPT; diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 77f5e82d8e3f..d0eac27f6ba0 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -611,7 +611,7 @@ int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr, start += strlen(name); *val = simple_strtoul(start, &end, 0); if (start == end) - return 0; + return -1; if (matchoff && matchlen) { *matchoff = start - dptr; *matchlen = end - start; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 0911e6d5f5d2..9573a8fcad79 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -5344,6 +5344,8 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, nft_set_trans_unbind(ctx, set); if (nft_set_is_anonymous(set)) nft_deactivate_next(ctx->net, set); + else + list_del_rcu(&binding->list); set->use--; break; @@ -6795,7 +6797,9 @@ err_set_full: err_element_clash: kfree(trans); err_elem_free: - nft_set_elem_destroy(set, elem.priv, true); + nf_tables_set_elem_destroy(ctx, set, elem.priv); + if (obj) + obj->use--; err_parse_data: if (nla[NFTA_SET_ELEM_DATA] != NULL) nft_data_release(&elem.data.val, desc.type); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 39cfb778ebc5..383631873748 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1600,6 +1600,7 @@ out: int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code) { struct netlink_set_err_data info; + unsigned long flags; struct sock *sk; int ret = 0; @@ -1609,12 +1610,12 @@ int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code) /* sk->sk_err wants a positive error value */ info.code = -code; - read_lock(&nl_table_lock); + read_lock_irqsave(&nl_table_lock, flags); sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) ret += do_one_set_err(sk, &info); - read_unlock(&nl_table_lock); + read_unlock_irqrestore(&nl_table_lock, flags); return ret; } EXPORT_SYMBOL(netlink_set_err); diff --git a/net/netlink/diag.c b/net/netlink/diag.c index c6255eac305c..e4f21b1067bc 100644 --- a/net/netlink/diag.c +++ b/net/netlink/diag.c @@ -94,6 +94,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net *net = sock_net(skb->sk); struct netlink_diag_req *req; struct netlink_sock *nlsk; + unsigned long flags; struct sock *sk; int num = 2; int ret = 0; @@ -152,7 +153,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, num++; mc_list: - read_lock(&nl_table_lock); + read_lock_irqsave(&nl_table_lock, flags); sk_for_each_bound(sk, &tbl->mc_list) { if (sk_hashed(sk)) continue; @@ -167,13 +168,13 @@ mc_list: NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, - sock_i_ino(sk)) < 0) { + __sock_i_ino(sk)) < 0) { ret = 1; break; } num++; } - read_unlock(&nl_table_lock); + read_unlock_irqrestore(&nl_table_lock, flags); done: cb->args[0] = num; diff --git a/net/nfc/llcp.h b/net/nfc/llcp.h index c1d9be636933..d8345ed57c95 100644 --- a/net/nfc/llcp.h +++ b/net/nfc/llcp.h @@ -201,7 +201,6 @@ void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *s); void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *s); void nfc_llcp_socket_remote_param_init(struct nfc_llcp_sock *sock); struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev); -struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local); int nfc_llcp_local_put(struct nfc_llcp_local *local); u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local, struct nfc_llcp_sock *sock); diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c index cdb001de0692..e2680a3bef79 100644 --- a/net/nfc/llcp_commands.c +++ b/net/nfc/llcp_commands.c @@ -359,6 +359,7 @@ int nfc_llcp_send_symm(struct nfc_dev *dev) struct sk_buff *skb; struct nfc_llcp_local *local; u16 size = 0; + int err; local = nfc_llcp_find_local(dev); if (local == NULL) @@ -368,8 +369,10 @@ int nfc_llcp_send_symm(struct nfc_dev *dev) size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; skb = alloc_skb(size, GFP_KERNEL); - if (skb == NULL) - return -ENOMEM; + if (skb == NULL) { + err = -ENOMEM; + goto out; + } skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); @@ -379,8 +382,11 @@ int nfc_llcp_send_symm(struct nfc_dev *dev) nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_TX); - return nfc_data_exchange(dev, local->target_idx, skb, + err = nfc_data_exchange(dev, local->target_idx, skb, nfc_llcp_recv, local); +out: + nfc_llcp_local_put(local); + return err; } int nfc_llcp_send_connect(struct nfc_llcp_sock *sock) diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c index a27e1842b2a0..f60e424e0607 100644 --- a/net/nfc/llcp_core.c +++ b/net/nfc/llcp_core.c @@ -17,6 +17,8 @@ static u8 llcp_magic[3] = {0x46, 0x66, 0x6d}; static LIST_HEAD(llcp_devices); +/* Protects llcp_devices list */ +static DEFINE_SPINLOCK(llcp_devices_lock); static void nfc_llcp_rx_skb(struct nfc_llcp_local *local, struct sk_buff *skb); @@ -141,7 +143,7 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool device, write_unlock(&local->raw_sockets.lock); } -struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) +static struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) { kref_get(&local->ref); @@ -169,7 +171,6 @@ static void local_release(struct kref *ref) local = container_of(ref, struct nfc_llcp_local, ref); - list_del(&local->list); local_cleanup(local); kfree(local); } @@ -282,12 +283,33 @@ static void nfc_llcp_sdreq_timer(struct timer_list *t) struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev) { struct nfc_llcp_local *local; + struct nfc_llcp_local *res = NULL; + spin_lock(&llcp_devices_lock); list_for_each_entry(local, &llcp_devices, list) - if (local->dev == dev) + if (local->dev == dev) { + res = nfc_llcp_local_get(local); + break; + } + spin_unlock(&llcp_devices_lock); + + return res; +} + +static struct nfc_llcp_local *nfc_llcp_remove_local(struct nfc_dev *dev) +{ + struct nfc_llcp_local *local, *tmp; + + spin_lock(&llcp_devices_lock); + list_for_each_entry_safe(local, tmp, &llcp_devices, list) + if (local->dev == dev) { + list_del(&local->list); + spin_unlock(&llcp_devices_lock); return local; + } + spin_unlock(&llcp_devices_lock); - pr_debug("No device found\n"); + pr_warn("Shutting down device not found\n"); return NULL; } @@ -608,12 +630,15 @@ u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len) *general_bytes_len = local->gb_len; + nfc_llcp_local_put(local); + return local->gb; } int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len) { struct nfc_llcp_local *local; + int err; if (gb_len < 3 || gb_len > NFC_MAX_GT_LEN) return -EINVAL; @@ -630,12 +655,16 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len) if (memcmp(local->remote_gb, llcp_magic, 3)) { pr_err("MAC does not support LLCP\n"); - return -EINVAL; + err = -EINVAL; + goto out; } - return nfc_llcp_parse_gb_tlv(local, + err = nfc_llcp_parse_gb_tlv(local, &local->remote_gb[3], local->remote_gb_len - 3); +out: + nfc_llcp_local_put(local); + return err; } static u8 nfc_llcp_dsap(const struct sk_buff *pdu) @@ -1517,6 +1546,8 @@ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb) __nfc_llcp_recv(local, skb); + nfc_llcp_local_put(local); + return 0; } @@ -1533,6 +1564,8 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev) /* Close and purge all existing sockets */ nfc_llcp_socket_release(local, true, 0); + + nfc_llcp_local_put(local); } void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, @@ -1558,6 +1591,8 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, mod_timer(&local->link_timer, jiffies + msecs_to_jiffies(local->remote_lto)); } + + nfc_llcp_local_put(local); } int nfc_llcp_register_device(struct nfc_dev *ndev) @@ -1608,7 +1643,7 @@ int nfc_llcp_register_device(struct nfc_dev *ndev) void nfc_llcp_unregister_device(struct nfc_dev *dev) { - struct nfc_llcp_local *local = nfc_llcp_find_local(dev); + struct nfc_llcp_local *local = nfc_llcp_remove_local(dev); if (local == NULL) { pr_debug("No such device\n"); diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 77642d18a3b4..645677f84dba 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -99,7 +99,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) } llcp_sock->dev = dev; - llcp_sock->local = nfc_llcp_local_get(local); + llcp_sock->local = local; llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; llcp_sock->service_name_len = min_t(unsigned int, llcp_addr.service_name_len, @@ -186,7 +186,7 @@ static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr, } llcp_sock->dev = dev; - llcp_sock->local = nfc_llcp_local_get(local); + llcp_sock->local = local; llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; nfc_llcp_sock_link(&local->raw_sockets, sk); @@ -696,22 +696,22 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, if (dev->dep_link_up == false) { ret = -ENOLINK; device_unlock(&dev->dev); - goto put_dev; + goto sock_llcp_put_local; } device_unlock(&dev->dev); if (local->rf_mode == NFC_RF_INITIATOR && addr->target_idx != local->target_idx) { ret = -ENOLINK; - goto put_dev; + goto sock_llcp_put_local; } llcp_sock->dev = dev; - llcp_sock->local = nfc_llcp_local_get(local); + llcp_sock->local = local; llcp_sock->ssap = nfc_llcp_get_local_ssap(local); if (llcp_sock->ssap == LLCP_SAP_MAX) { ret = -ENOMEM; - goto sock_llcp_put_local; + goto sock_llcp_nullify; } llcp_sock->reserved_ssap = llcp_sock->ssap; @@ -757,11 +757,13 @@ sock_unlink: sock_llcp_release: nfc_llcp_put_ssap(local, llcp_sock->ssap); -sock_llcp_put_local: - nfc_llcp_local_put(llcp_sock->local); +sock_llcp_nullify: llcp_sock->local = NULL; llcp_sock->dev = NULL; +sock_llcp_put_local: + nfc_llcp_local_put(local); + put_dev: nfc_put_device(dev); diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index b9264e730fd9..e9ac6a6f934e 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -1039,11 +1039,14 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info) msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { rc = -ENOMEM; - goto exit; + goto put_local; } rc = nfc_genl_send_params(msg, local, info->snd_portid, info->snd_seq); +put_local: + nfc_llcp_local_put(local); + exit: device_unlock(&dev->dev); @@ -1105,7 +1108,7 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NFC_ATTR_LLC_PARAM_LTO]) { if (dev->dep_link_up) { rc = -EINPROGRESS; - goto exit; + goto put_local; } local->lto = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_LTO]); @@ -1117,6 +1120,9 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX]) local->miux = cpu_to_be16(miux); +put_local: + nfc_llcp_local_put(local); + exit: device_unlock(&dev->dev); @@ -1172,7 +1178,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info) if (rc != 0) { rc = -EINVAL; - goto exit; + goto put_local; } if (!sdp_attrs[NFC_SDP_ATTR_URI]) @@ -1191,7 +1197,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info) sdreq = nfc_llcp_build_sdreq_tlv(tid, uri, uri_len); if (sdreq == NULL) { rc = -ENOMEM; - goto exit; + goto put_local; } tlvs_len += sdreq->tlv_len; @@ -1201,10 +1207,14 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info) if (hlist_empty(&sdreq_list)) { rc = -EINVAL; - goto exit; + goto put_local; } rc = nfc_llcp_send_snl_sdreq(local, &sdreq_list, tlvs_len); + +put_local: + nfc_llcp_local_put(local); + exit: device_unlock(&dev->dev); diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h index de2ec66d7e83..0b1e6466f4fb 100644 --- a/net/nfc/nfc.h +++ b/net/nfc/nfc.h @@ -52,6 +52,7 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len); u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len); int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb); struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev); +int nfc_llcp_local_put(struct nfc_llcp_local *local); int __init nfc_llcp_init(void); void nfc_llcp_exit(void); void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp); diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 33c0dbe35956..38d9aa0cd30e 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -774,12 +774,10 @@ static void dist_free(struct disttable *d) * signed 16 bit values. */ -static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, - const struct nlattr *attr) +static int get_dist_table(struct disttable **tbl, const struct nlattr *attr) { size_t n = nla_len(attr)/sizeof(__s16); const __s16 *data = nla_data(attr); - spinlock_t *root_lock; struct disttable *d; int i; @@ -794,13 +792,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, for (i = 0; i < n; i++) d->table[i] = data[i]; - root_lock = qdisc_root_sleeping_lock(sch); - - spin_lock_bh(root_lock); - swap(*tbl, d); - spin_unlock_bh(root_lock); - - dist_free(d); + *tbl = d; return 0; } @@ -957,6 +949,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, { struct netem_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_NETEM_MAX + 1]; + struct disttable *delay_dist = NULL; + struct disttable *slot_dist = NULL; struct tc_netem_qopt *qopt; struct clgstate old_clg; int old_loss_model = CLG_RANDOM; @@ -967,6 +961,18 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, if (ret < 0) return ret; + if (tb[TCA_NETEM_DELAY_DIST]) { + ret = get_dist_table(&delay_dist, tb[TCA_NETEM_DELAY_DIST]); + if (ret) + goto table_free; + } + + if (tb[TCA_NETEM_SLOT_DIST]) { + ret = get_dist_table(&slot_dist, tb[TCA_NETEM_SLOT_DIST]); + if (ret) + goto table_free; + } + sch_tree_lock(sch); /* backup q->clg and q->loss_model */ old_clg = q->clg; @@ -976,26 +982,17 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); if (ret) { q->loss_model = old_loss_model; + q->clg = old_clg; goto unlock; } } else { q->loss_model = CLG_RANDOM; } - if (tb[TCA_NETEM_DELAY_DIST]) { - ret = get_dist_table(sch, &q->delay_dist, - tb[TCA_NETEM_DELAY_DIST]); - if (ret) - goto get_table_failure; - } - - if (tb[TCA_NETEM_SLOT_DIST]) { - ret = get_dist_table(sch, &q->slot_dist, - tb[TCA_NETEM_SLOT_DIST]); - if (ret) - goto get_table_failure; - } - + if (delay_dist) + swap(q->delay_dist, delay_dist); + if (slot_dist) + swap(q->slot_dist, slot_dist); sch->limit = qopt->limit; q->latency = PSCHED_TICKS2NS(qopt->latency); @@ -1045,17 +1042,11 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, unlock: sch_tree_unlock(sch); - return ret; -get_table_failure: - /* recover clg and loss_model, in case of - * q->clg and q->loss_model were modified - * in get_loss_clg() - */ - q->clg = old_clg; - q->loss_model = old_loss_model; - - goto unlock; +table_free: + dist_free(delay_dist); + dist_free(slot_dist); + return ret; } static int netem_init(struct Qdisc *sch, struct nlattr *opt, diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index 383ac6fc037d..ba286d680fd9 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -860,6 +860,7 @@ EOF fi # clean up any leftovers + echo 0 > /sys/bus/netdevsim/del_device $probed && rmmod netdevsim if [ $ret -ne 0 ]; then |