summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-01-04 16:34:50 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-01-04 16:34:50 -0800
commit1f874787ed9a2d78ed59cb21d0d90ac0178eceb0 (patch)
tree8e3acc874abaa1ec4a7b6c2be2a8b2c3a0802cd8 /drivers/net
parenta476aae3f1dc78a162a0d2e7945feea7d2b29401 (diff)
parent4c8530dc7d7da4abe97d65e8e038ce9852491369 (diff)
Merge tag 'net-6.7-rc9' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski: "Including fixes from wireless and netfilter. We haven't accumulated much over the break. If it wasn't for the uninterrupted stream of fixes for Intel drivers this PR would be very slim. There was a handful of user reports, however, either they stood out because of the lower traffic or users have had more time to test over the break. The ones which are v6.7-relevant should be wrapped up. Current release - regressions: - Revert "net: ipv6/addrconf: clamp preferred_lft to the minimum required", it caused issues on networks where routers send prefixes with preferred_lft=0 - wifi: - iwlwifi: pcie: don't synchronize IRQs from IRQ, prevent deadlock - mac80211: fix re-adding debugfs entries during reconfiguration Current release - new code bugs: - tcp: print AO/MD5 messages only if there are any keys Previous releases - regressions: - virtio_net: fix missing dma unmap for resize, prevent OOM Previous releases - always broken: - mptcp: prevent tcp diag from closing listener subflows - nf_tables: - set transport header offset for egress hook, fix IPv4 mangling - skip set commit for deleted/destroyed sets, avoid double deactivation - nat: make sure action is set for all ct states, fix openvswitch matching on ICMP packets in related state - eth: mlxbf_gige: fix receive hang under heavy traffic - eth: r8169: fix PCI error on system resume for RTL8168FP - net: add missing getsockopt(SO_TIMESTAMPING_NEW) and cmsg handling" * tag 'net-6.7-rc9' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (52 commits) net/tcp: Only produce AO/MD5 logs if there are any keys net: Implement missing SO_TIMESTAMPING_NEW cmsg support bnxt_en: Remove mis-applied code from bnxt_cfg_ntp_filters() net: ravb: Wait for operating mode to be applied asix: Add check for usbnet_get_endpoints octeontx2-af: Re-enable MAC TX in otx2_stop processing octeontx2-af: Always configure NIX TX link credits based on max frame size net/smc: fix invalid link access in dumping SMC-R connections net/qla3xxx: fix potential memleak in ql_alloc_buffer_queues virtio_net: fix missing dma unmap for resize igc: Fix hicredit calculation ice: fix Get link status data length i40e: Restore VF MSI-X state during PCI reset i40e: fix use-after-free in i40e_aqc_add_filters() net: Save and restore msg_namelen in sock_sendmsg netfilter: nft_immediate: drop chain reference counter on error netfilter: nf_nat: fix action not being set for all ct states net: bcmgenet: Fix FCS generation for fragmented skbuffs mptcp: prevent tcp diag from closing listener subflows MAINTAINERS: add Geliang as reviewer for MPTCP ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c12
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h6
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c42
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c9
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c65
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c4
-rw-r--r--drivers/net/usb/ax88172a.c4
-rw-r--r--drivers/net/virtio_net.c60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c17
28 files changed, 238 insertions, 206 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 579eebb6fc56..e1f1e646cf48 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -12093,6 +12093,8 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_cfg_ntp_filters(bp);
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
bnxt_hwrm_exec_fwd_req(bp);
+ if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
+ netdev_info(bp->dev, "Receive PF driver unload event!\n");
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
bnxt_hwrm_port_qstats(bp, 0);
bnxt_hwrm_port_qstats_ext(bp, 0);
@@ -13093,8 +13095,6 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
}
}
}
- if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
- netdev_info(bp->dev, "Receive PF driver unload event!\n");
}
#else
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 9282403d1bf6..2d7ae71287b1 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2132,8 +2132,10 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
/* Note: if we ever change from DMA_TX_APPEND_CRC below we
* will need to restore software padding of "runt" packets
*/
+ len_stat |= DMA_TX_APPEND_CRC;
+
if (!i) {
- len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
+ len_stat |= DMA_SOP;
if (skb->ip_summed == CHECKSUM_PARTIAL)
len_stat |= DMA_TX_DO_CSUM;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1ab8dbe2d880..d5519af34657 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -107,12 +107,18 @@ static struct workqueue_struct *i40e_wq;
static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
struct net_device *netdev, int delta)
{
+ struct netdev_hw_addr_list *ha_list;
struct netdev_hw_addr *ha;
if (!f || !netdev)
return;
- netdev_for_each_mc_addr(ha, netdev) {
+ if (is_unicast_ether_addr(f->macaddr) || is_link_local_ether_addr(f->macaddr))
+ ha_list = &netdev->uc;
+ else
+ ha_list = &netdev->mc;
+
+ netdev_hw_addr_list_for_each(ha, ha_list) {
if (ether_addr_equal(ha->addr, f->macaddr)) {
ha->refcount += delta;
if (ha->refcount <= 0)
@@ -16512,6 +16518,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev)
return;
i40e_reset_and_rebuild(pf, false, false);
+#ifdef CONFIG_PCI_IOV
+ i40e_restore_all_vfs_msi_state(pdev);
+#endif /* CONFIG_PCI_IOV */
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 3f99eb198245..de5ec4e6bedf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -154,6 +154,32 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
(u8 *)&pfe, sizeof(struct virtchnl_pf_event));
}
+#ifdef CONFIG_PCI_IOV
+void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev)
+{
+ u16 vf_id;
+ u16 pos;
+
+ /* Continue only if this is a PF */
+ if (!pdev->is_physfn)
+ return;
+
+ if (!pci_num_vf(pdev))
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ struct pci_dev *vf_dev = NULL;
+
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
+ while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) {
+ if (vf_dev->is_virtfn && vf_dev->physfn == pdev)
+ pci_restore_msi_state(vf_dev);
+ }
+ }
+}
+#endif /* CONFIG_PCI_IOV */
+
/**
* i40e_vc_notify_vf_reset
* @vf: pointer to the VF structure
@@ -3521,16 +3547,16 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
bool found = false;
int bkt;
- if (!tc_filter->action) {
+ if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) {
dev_info(&pf->pdev->dev,
- "VF %d: Currently ADq doesn't support Drop Action\n",
- vf->vf_id);
+ "VF %d: ADQ doesn't support this action (%d)\n",
+ vf->vf_id, tc_filter->action);
goto err;
}
/* action_meta is TC number here to which the filter is applied */
if (!tc_filter->action_meta ||
- tc_filter->action_meta > I40E_MAX_VF_VSI) {
+ tc_filter->action_meta > vf->num_tc) {
dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
vf->vf_id, tc_filter->action_meta);
goto err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 2ee0f8a23248..5fd607c0de0a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -137,6 +137,9 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
+#ifdef CONFIG_PCI_IOV
+void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev);
+#endif /* CONFIG_PCI_IOV */
int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
struct ifla_vf_stats *vf_stats);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index d7fdb7ba7268..fbd5d92182d3 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1359,8 +1359,9 @@ struct ice_aqc_get_link_status_data {
u8 lp_flowcontrol;
#define ICE_AQ_LINK_LP_PAUSE_ADV BIT(0)
#define ICE_AQ_LINK_LP_ASM_DIR_ADV BIT(1)
+ u8 reserved5[5];
#define ICE_AQC_LS_DATA_SIZE_V2 \
- offsetofend(struct ice_aqc_get_link_status_data, lp_flowcontrol)
+ offsetofend(struct ice_aqc_get_link_status_data, reserved5)
} __packed;
/* Set event mask command (direct 0x0613) */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 9a6c25f98632..edac34c796ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -5332,7 +5332,6 @@ ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
u8 *eec_mode)
{
struct ice_aqc_get_cgu_dpll_status *cmd;
- const s64 nsec_per_psec = 1000LL;
struct ice_aq_desc desc;
int status;
@@ -5348,8 +5347,7 @@ ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
*phase_offset = le32_to_cpu(cmd->phase_offset_h);
*phase_offset <<= 32;
*phase_offset += le32_to_cpu(cmd->phase_offset_l);
- *phase_offset = div64_s64(sign_extend64(*phase_offset, 47),
- nsec_per_psec);
+ *phase_offset = sign_extend64(*phase_offset, 47);
*eec_mode = cmd->eec_mode;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index fb9c93f37e84..adfdea1e2805 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2146,7 +2146,7 @@ static int ice_configure_phy(struct ice_vsi *vsi)
/* Ensure we have media as we cannot configure a medialess port */
if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
- return -EPERM;
+ return -ENOMEDIUM;
ice_print_topo_conflict(vsi);
@@ -9187,8 +9187,14 @@ int ice_stop(struct net_device *netdev)
int link_err = ice_force_phys_link_state(vsi, false);
if (link_err) {
- netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
- vsi->vsi_num, link_err);
+ if (link_err == -ENOMEDIUM)
+ netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
+ vsi->vsi_num);
+ else
+ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
+ vsi->vsi_num, link_err);
+
+ ice_vsi_close(vsi);
return -EIO;
}
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 81288a17da2a..20c4b3a64710 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -1044,7 +1044,6 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
}
idpf_rx_sync_for_cpu(rx_buf, fields.size);
- skb = rx_q->skb;
if (skb)
idpf_rx_add_frag(rx_buf, skb, fields.size);
else
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 1f728a9004d9..9e942e5baf39 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -396,7 +396,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
if (!rxq)
return;
- if (!bufq && idpf_is_queue_model_split(q_model) && rxq->skb) {
+ if (rxq->skb) {
dev_kfree_skb_any(rxq->skb);
rxq->skb = NULL;
}
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
index 07e72c72d156..8dc837889723 100644
--- a/drivers/net/ethernet/intel/idpf/virtchnl2.h
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -1104,9 +1104,9 @@ struct virtchnl2_rss_key {
__le32 vport_id;
__le16 key_len;
u8 pad;
- __DECLARE_FLEX_ARRAY(u8, key_flex);
-};
-VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rss_key);
+ u8 key_flex[];
+} __packed;
+VIRTCHNL2_CHECK_STRUCT_LEN(7, virtchnl2_rss_key);
/**
* struct virtchnl2_queue_chunk - chunk of contiguous queues
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index f48f82d5e274..85cc16396506 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -568,6 +568,7 @@ struct igc_nfc_filter {
u16 etype;
__be16 vlan_etype;
u16 vlan_tci;
+ u16 vlan_tci_mask;
u8 src_addr[ETH_ALEN];
u8 dst_addr[ETH_ALEN];
u8 user_data[8];
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 785eaa8e0ba8..859b2636f3d9 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -958,6 +958,7 @@ static int igc_ethtool_set_coalesce(struct net_device *netdev,
}
#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+#define VLAN_TCI_FULL_MASK ((__force __be16)~0)
static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
@@ -980,10 +981,16 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
}
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
+ fsp->flow_type |= FLOW_EXT;
+ fsp->h_ext.vlan_etype = rule->filter.vlan_etype;
+ fsp->m_ext.vlan_etype = ETHER_TYPE_FULL_MASK;
+ }
+
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
fsp->flow_type |= FLOW_EXT;
fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci);
- fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
+ fsp->m_ext.vlan_tci = htons(rule->filter.vlan_tci_mask);
}
if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
@@ -1218,6 +1225,7 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci);
+ rule->filter.vlan_tci_mask = ntohs(fsp->m_ext.vlan_tci);
rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI;
}
@@ -1255,11 +1263,19 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data));
}
- /* When multiple filter options or user data or vlan etype is set, use a
- * flex filter.
+ /* The i225/i226 has various different filters. Flex filters provide a
+ * way to match up to the first 128 bytes of a packet. Use them for:
+ * a) For specific user data
+ * b) For VLAN EtherType
+ * c) For full TCI match
+ * d) Or in case multiple filter criteria are set
+ *
+ * Otherwise, use the simple MAC, VLAN PRIO or EtherType filters.
*/
if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) ||
(rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) ||
+ ((rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) &&
+ rule->filter.vlan_tci_mask == ntohs(VLAN_TCI_FULL_MASK)) ||
(rule->filter.match_flags & (rule->filter.match_flags - 1)))
rule->flex = true;
else
@@ -1329,6 +1345,26 @@ static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter,
return -EINVAL;
}
+ /* There are two ways to match the VLAN TCI:
+ * 1. Match on PCP field and use vlan prio filter for it
+ * 2. Match on complete TCI field and use flex filter for it
+ */
+ if ((fsp->flow_type & FLOW_EXT) &&
+ fsp->m_ext.vlan_tci &&
+ fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK) &&
+ fsp->m_ext.vlan_tci != VLAN_TCI_FULL_MASK) {
+ netdev_dbg(netdev, "VLAN mask not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* VLAN EtherType can only be matched by full mask. */
+ if ((fsp->flow_type & FLOW_EXT) &&
+ fsp->m_ext.vlan_etype &&
+ fsp->m_ext.vlan_etype != ETHER_TYPE_FULL_MASK) {
+ netdev_dbg(netdev, "VLAN EtherType mask not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if (fsp->location >= IGC_MAX_RXNFC_RULES) {
netdev_dbg(netdev, "Invalid location\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index a9c08321aca9..22cefb1eeedf 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -227,7 +227,7 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
wr32(IGC_TQAVCC(i), tqavcc);
wr32(IGC_TQAVHC(i),
- 0x80000000 + ring->hicredit * 0x7735);
+ 0x80000000 + ring->hicredit * 0x7736);
} else {
/* Disable any CBS for the queue */
txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index ab3e39eef2eb..8c0732c9a7ee 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -528,7 +528,7 @@ struct npc_lt_def {
u8 ltype_mask;
u8 ltype_match;
u8 lid;
-};
+} __packed;
struct npc_lt_def_ipsec {
u8 ltype_mask;
@@ -536,7 +536,7 @@ struct npc_lt_def_ipsec {
u8 lid;
u8 spi_offset;
u8 spi_nz;
-};
+} __packed;
struct npc_lt_def_apad {
u8 ltype_mask;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index cce2806aaa50..8802961b8889 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -905,6 +905,7 @@ u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
+int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable);
int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
u16 pfc_en);
int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 15a319684ed3..38acdc7a73bb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -465,6 +465,23 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
}
+int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
+}
+
int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
{
struct mac_ops *mac_ops;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 4227ebb4a758..58744313f0eb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -4143,90 +4143,18 @@ static void nix_find_link_frs(struct rvu *rvu,
req->minlen = minlen;
}
-static int
-nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
- u16 pcifunc, u64 tx_credits)
-{
- struct rvu_hwinfo *hw = rvu->hw;
- int pf = rvu_get_pf(pcifunc);
- u8 cgx_id = 0, lmac_id = 0;
- unsigned long poll_tmo;
- bool restore_tx_en = 0;
- struct nix_hw *nix_hw;
- u64 cfg, sw_xoff = 0;
- u32 schq = 0;
- u32 credits;
- int rc;
-
- nix_hw = get_nix_hw(rvu->hw, blkaddr);
- if (!nix_hw)
- return NIX_AF_ERR_INVALID_NIXBLK;
-
- if (tx_credits == nix_hw->tx_credits[link])
- return 0;
-
- /* Enable cgx tx if disabled for credits to be back */
- if (is_pf_cgxmapped(rvu, pf)) {
- rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
- lmac_id, true);
- }
-
- mutex_lock(&rvu->rsrc_lock);
- /* Disable new traffic to link */
- if (hw->cap.nix_shaping) {
- schq = nix_get_tx_link(rvu, pcifunc);
- sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq));
- rvu_write64(rvu, blkaddr,
- NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
- }
-
- rc = NIX_AF_ERR_LINK_CREDITS;
- poll_tmo = jiffies + usecs_to_jiffies(200000);
- /* Wait for credits to return */
- do {
- if (time_after(jiffies, poll_tmo))
- goto exit;
- usleep_range(100, 200);
-
- cfg = rvu_read64(rvu, blkaddr,
- NIX_AF_TX_LINKX_NORM_CREDIT(link));
- credits = (cfg >> 12) & 0xFFFFFULL;
- } while (credits != nix_hw->tx_credits[link]);
-
- cfg &= ~(0xFFFFFULL << 12);
- cfg |= (tx_credits << 12);
- rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
- rc = 0;
-
- nix_hw->tx_credits[link] = tx_credits;
-
-exit:
- /* Enable traffic back */
- if (hw->cap.nix_shaping && !sw_xoff)
- rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0);
-
- /* Restore state of cgx tx */
- if (restore_tx_en)
- rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
-
- mutex_unlock(&rvu->rsrc_lock);
- return rc;
-}
-
int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int pf = rvu_get_pf(pcifunc);
- int blkaddr, schq, link = -1;
- struct nix_txsch *txsch;
- u64 cfg, lmac_fifo_len;
+ int blkaddr, link = -1;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
u8 cgx = 0, lmac = 0;
u16 max_mtu;
+ u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -4247,25 +4175,6 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
return NIX_AF_ERR_FRS_INVALID;
- /* Check if requester wants to update SMQ's */
- if (!req->update_smq)
- goto rx_frscfg;
-
- /* Update min/maxlen in each of the SMQ attached to this PF/VF */
- txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
- mutex_lock(&rvu->rsrc_lock);
- for (schq = 0; schq < txsch->schq.max; schq++) {
- if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
- continue;
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
- cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
- if (req->update_minlen)
- cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
- }
- mutex_unlock(&rvu->rsrc_lock);
-
-rx_frscfg:
/* Check if config is for SDP link */
if (req->sdp_link) {
if (!hw->sdp_links)
@@ -4288,7 +4197,6 @@ rx_frscfg:
if (link < 0)
return NIX_AF_ERR_RX_LINK_INVALID;
-
linkcfg:
nix_find_link_frs(rvu, req, pcifunc);
@@ -4298,19 +4206,7 @@ linkcfg:
cfg = (cfg & ~0xFFFFULL) | req->minlen;
rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
- if (req->sdp_link || pf == 0)
- return 0;
-
- /* Update transmit credits for CGX links */
- lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac);
- if (!lmac_fifo_len) {
- dev_err(rvu->dev,
- "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
- __func__, cgx, lmac);
- return 0;
- }
- return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
- (lmac_fifo_len - req->maxlen) / 16);
+ return 0;
}
int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
@@ -4841,7 +4737,13 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
pfvf = rvu_get_pfvf(rvu, pcifunc);
clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
- return rvu_cgx_start_stop_io(rvu, pcifunc, false);
+ err = rvu_cgx_start_stop_io(rvu, pcifunc, false);
+ if (err)
+ return err;
+
+ rvu_cgx_tx_enable(rvu, pcifunc, true);
+
+ return 0;
}
#define RX_SA_BASE GENMASK_ULL(52, 7)
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
index 0d5a41a2ae01..227d01cace3f 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
@@ -267,6 +267,13 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
priv->stats.rx_truncate_errors++;
}
+ /* Read receive consumer index before replenish so that this routine
+ * returns accurate return value even if packet is received into
+ * just-replenished buffer prior to exiting this routine.
+ */
+ rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
+ rx_ci_rem = rx_ci % priv->rx_q_entries;
+
/* Let hardware know we've replenished one buffer */
rx_pi++;
@@ -279,8 +286,6 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
rx_pi_rem = rx_pi % priv->rx_q_entries;
if (rx_pi_rem == 0)
priv->valid_polarity ^= 1;
- rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
- rx_ci_rem = rx_ci % priv->rx_q_entries;
if (skb)
netif_receive_skb(skb);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 0d57ffcedf0c..fc78bc959ded 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2591,6 +2591,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
netdev_err(qdev->ndev, "lBufQ failed\n");
+ kfree(qdev->lrg_buf);
return -ENOMEM;
}
qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
@@ -2615,6 +2616,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
qdev->lrg_buf_q_alloc_size,
qdev->lrg_buf_q_alloc_virt_addr,
qdev->lrg_buf_q_alloc_phy_addr);
+ kfree(qdev->lrg_buf);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index bb787a52bc75..81fd31f6fac4 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -1211,7 +1211,7 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
{
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
- rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10);
+ rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
}
static void rtl8168_driver_start(struct rtl8169_private *tp)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 664eda4b5a11..8649b3e90edb 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -66,16 +66,27 @@ int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
return -ETIMEDOUT;
}
-static int ravb_config(struct net_device *ndev)
+static int ravb_set_opmode(struct net_device *ndev, u32 opmode)
{
+ u32 csr_ops = 1U << (opmode & CCC_OPC);
+ u32 ccc_mask = CCC_OPC;
int error;
- /* Set config mode */
- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
- /* Check if the operating mode is changed to the config mode */
- error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
- if (error)
- netdev_err(ndev, "failed to switch device to config mode\n");
+ /* If gPTP active in config mode is supported it needs to be configured
+ * along with CSEL and operating mode in the same access. This is a
+ * hardware limitation.
+ */
+ if (opmode & CCC_GAC)
+ ccc_mask |= CCC_GAC | CCC_CSEL;
+
+ /* Set operating mode */
+ ravb_modify(ndev, CCC, ccc_mask, opmode);
+ /* Check if the operating mode is changed to the requested one */
+ error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops);
+ if (error) {
+ netdev_err(ndev, "failed to switch device to requested mode (%u)\n",
+ opmode & CCC_OPC);
+ }
return error;
}
@@ -673,7 +684,7 @@ static int ravb_dmac_init(struct net_device *ndev)
int error;
/* Set CONFIG mode */
- error = ravb_config(ndev);
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
if (error)
return error;
@@ -682,9 +693,7 @@ static int ravb_dmac_init(struct net_device *ndev)
return error;
/* Setting the control will start the AVB-DMAC process. */
- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
-
- return 0;
+ return ravb_set_opmode(ndev, CCC_OPC_OPERATION);
}
static void ravb_get_tx_tstamp(struct net_device *ndev)
@@ -1046,7 +1055,7 @@ static int ravb_stop_dma(struct net_device *ndev)
return error;
/* Stop AVB-DMAC process */
- return ravb_config(ndev);
+ return ravb_set_opmode(ndev, CCC_OPC_CONFIG);
}
/* E-MAC interrupt handler */
@@ -2560,21 +2569,25 @@ static int ravb_set_gti(struct net_device *ndev)
return 0;
}
-static void ravb_set_config_mode(struct net_device *ndev)
+static int ravb_set_config_mode(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ int error;
if (info->gptp) {
- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ if (error)
+ return error;
/* Set CSEL value */
ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
} else if (info->ccc_gac) {
- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
- CCC_GAC | CCC_CSEL_HPB);
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
} else {
- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
}
+
+ return error;
}
/* Set tx and rx clock internal delay modes */
@@ -2794,7 +2807,9 @@ static int ravb_probe(struct platform_device *pdev)
ndev->ethtool_ops = &ravb_ethtool_ops;
/* Set AVB config mode */
- ravb_set_config_mode(ndev);
+ error = ravb_set_config_mode(ndev);
+ if (error)
+ goto out_disable_gptp_clk;
if (info->gptp || info->ccc_gac) {
/* Set GTI value */
@@ -2917,8 +2932,7 @@ static void ravb_remove(struct platform_device *pdev)
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
- /* Set reset mode */
- ravb_write(ndev, CCC_OPC_RESET, CCC);
+ ravb_set_opmode(ndev, CCC_OPC_RESET);
clk_disable_unprepare(priv->gptp_clk);
clk_disable_unprepare(priv->refclk);
@@ -3000,8 +3014,11 @@ static int __maybe_unused ravb_resume(struct device *dev)
int ret = 0;
/* If WoL is enabled set reset mode to rearm the WoL logic */
- if (priv->wol_enabled)
- ravb_write(ndev, CCC_OPC_RESET, CCC);
+ if (priv->wol_enabled) {
+ ret = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (ret)
+ return ret;
+ }
/* All register have been reset to default values.
* Restore all registers which where setup at probe time and
@@ -3009,7 +3026,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
*/
/* Set AVB config mode */
- ravb_set_config_mode(ndev);
+ ret = ravb_set_config_mode(ndev);
+ if (ret)
+ return ret;
if (info->gptp || info->ccc_gac) {
/* Set GTI value */
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index d2f35ee15eff..fac227d372db 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -823,8 +823,10 @@ int efx_probe_filters(struct efx_nic *efx)
}
if (!success) {
- efx_for_each_channel(channel, efx)
+ efx_for_each_channel(channel, efx) {
kfree(channel->rps_flow_id);
+ channel->rps_flow_id = NULL;
+ }
efx->type->filter_table_remove(efx);
rc = -ENOMEM;
goto out_unlock;
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 3777c7e2e6fc..e47bb125048d 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -161,7 +161,9 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
u8 buf[ETH_ALEN];
struct ax88172a_private *priv;
- usbnet_get_endpoints(dev, intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ return ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d16f592c2061..51b1868d2f22 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -334,7 +334,6 @@ struct virtio_net_common_hdr {
};
};
-static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
static bool is_xdp_frame(void *ptr)
@@ -408,6 +407,17 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
return p;
}
+static void virtnet_rq_free_buf(struct virtnet_info *vi,
+ struct receive_queue *rq, void *buf)
+{
+ if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
+ else if (vi->big_packets)
+ give_pages(rq, buf);
+ else
+ put_page(virt_to_head_page(buf));
+}
+
static void enable_delayed_refill(struct virtnet_info *vi)
{
spin_lock_bh(&vi->refill_lock);
@@ -634,17 +644,6 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
return buf;
}
-static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
-{
- void *buf;
-
- buf = virtqueue_detach_unused_buf(rq->vq);
- if (buf && rq->do_dma)
- virtnet_rq_unmap(rq, buf, 0);
-
- return buf;
-}
-
static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
{
struct virtnet_rq_dma *dma;
@@ -744,6 +743,20 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
}
}
+static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
+{
+ struct virtnet_info *vi = vq->vdev->priv;
+ struct receive_queue *rq;
+ int i = vq2rxq(vq);
+
+ rq = &vi->rq[i];
+
+ if (rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
+
+ virtnet_rq_free_buf(vi, rq, buf);
+}
+
static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
{
unsigned int len;
@@ -1764,7 +1777,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
DEV_STATS_INC(dev, rx_length_errors);
- virtnet_rq_free_unused_buf(rq->vq, buf);
+ virtnet_rq_free_buf(vi, rq, buf);
return;
}
@@ -2392,7 +2405,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
if (running)
napi_disable(&rq->napi);
- err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
+ err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
if (err)
netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
@@ -4031,19 +4044,6 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
xdp_return_frame(ptr_to_xdp(buf));
}
-static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
-{
- struct virtnet_info *vi = vq->vdev->priv;
- int i = vq2rxq(vq);
-
- if (vi->mergeable_rx_bufs)
- put_page(virt_to_head_page(buf));
- else if (vi->big_packets)
- give_pages(&vi->rq[i], buf);
- else
- put_page(virt_to_head_page(buf));
-}
-
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -4057,10 +4057,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
}
for (i = 0; i < vi->max_queue_pairs; i++) {
- struct receive_queue *rq = &vi->rq[i];
+ struct virtqueue *vq = vi->rq[i].vq;
- while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
- virtnet_rq_free_unused_buf(rq->vq, buf);
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_rq_unmap_free_buf(vq, buf);
cond_resched();
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 56def20374f3..7805a42948af 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -770,7 +770,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
}
}
-void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
+void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq);
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
{
@@ -817,7 +817,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
}
-void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq);
void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
#ifdef CONFIG_IWLWIFI_DEBUGFS
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index bc6a9f861711..07931c2db494 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1783,7 +1783,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
return inta;
}
-void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
+void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
@@ -1807,7 +1807,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
isr_stats->rfkill++;
if (prev != report)
- iwl_trans_pcie_rf_kill(trans, report);
+ iwl_trans_pcie_rf_kill(trans, report, from_irq);
mutex_unlock(&trans_pcie->mutex);
if (hw_rfkill) {
@@ -1947,7 +1947,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* HW RF KILL switch toggled */
if (inta & CSR_INT_BIT_RF_KILL) {
- iwl_pcie_handle_rfkill_irq(trans);
+ iwl_pcie_handle_rfkill_irq(trans, true);
handled |= CSR_INT_BIT_RF_KILL;
}
@@ -2370,7 +2370,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
/* HW RF KILL switch toggled */
if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
- iwl_pcie_handle_rfkill_irq(trans);
+ iwl_pcie_handle_rfkill_irq(trans, true);
if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
IWL_ERR(trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 92253260f568..d10208075ae5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1082,7 +1082,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
if (prev != report)
- iwl_trans_pcie_rf_kill(trans, report);
+ iwl_trans_pcie_rf_kill(trans, report, false);
return hw_rfkill;
}
@@ -1237,7 +1237,7 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
trans_pcie->hw_mask = trans_pcie->hw_init_mask;
}
-static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1264,7 +1264,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
IWL_DEBUG_INFO(trans,
"DEVICE_ENABLED bit was set and is now cleared\n");
- iwl_pcie_synchronize_irqs(trans);
+ if (!from_irq)
+ iwl_pcie_synchronize_irqs(trans);
iwl_pcie_rx_napi_sync(trans);
iwl_pcie_tx_stop(trans);
iwl_pcie_rx_stop(trans);
@@ -1454,7 +1455,7 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
}
if (hw_rfkill != was_in_rfkill)
- iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill, false);
}
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
@@ -1469,12 +1470,12 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
mutex_lock(&trans_pcie->mutex);
trans_pcie->opmode_down = true;
was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
- _iwl_trans_pcie_stop_device(trans);
+ _iwl_trans_pcie_stop_device(trans, false);
iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
mutex_unlock(&trans_pcie->mutex);
}
-void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
{
struct iwl_trans_pcie __maybe_unused *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1487,7 +1488,7 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
if (trans->trans_cfg->gen2)
_iwl_trans_pcie_gen2_stop_device(trans);
else
- _iwl_trans_pcie_stop_device(trans);
+ _iwl_trans_pcie_stop_device(trans, from_irq);
}
}
@@ -2887,7 +2888,7 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
IWL_WARN(trans, "changing debug rfkill %d->%d\n",
trans_pcie->debug_rfkill, new_value);
trans_pcie->debug_rfkill = new_value;
- iwl_pcie_handle_rfkill_irq(trans);
+ iwl_pcie_handle_rfkill_irq(trans, false);
return count;
}