diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
90 files changed, 5920 insertions, 2123 deletions
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index a41008523c98..71d3d8854d8f 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -937,8 +937,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter) txdr->buffer_info[i].dma, txdr->buffer_info[i].length, DMA_TO_DEVICE); - if (txdr->buffer_info[i].skb) - dev_kfree_skb(txdr->buffer_info[i].skb); + dev_kfree_skb(txdr->buffer_info[i].skb); } } diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index f703fa58458e..86493fea56e4 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -2889,9 +2889,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, } for (f = 0; f < nr_frags; f++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; - frag = &skb_shinfo(skb)->frags[f]; len = skb_frag_size(frag); offset = 0; @@ -4176,8 +4175,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* an error means any chain goes out the window * too */ - if (rx_ring->rx_skb_top) - dev_kfree_skb(rx_ring->rx_skb_top); + dev_kfree_skb(rx_ring->rx_skb_top); rx_ring->rx_skb_top = NULL; goto next_desc; } diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 08342698386d..de8c5818a305 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1126,8 +1126,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter) buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); - if (buffer_info->skb) - dev_kfree_skb(buffer_info->skb); + dev_kfree_skb(buffer_info->skb); } } @@ -1139,8 +1138,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter) dma_unmap_single(&pdev->dev, buffer_info->dma, 2048, DMA_FROM_DEVICE); - if (buffer_info->skb) - dev_kfree_skb(buffer_info->skb); + dev_kfree_skb(buffer_info->skb); } } diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 395b05701480..a1fab77b2096 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1429,6 +1429,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) else phy_reg |= 0xFA; e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg); + + if (speed == SPEED_1000) { + hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL, + &phy_reg); + + phy_reg |= HV_PM_CTRL_K1_CLK_REQ; + + hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL, + phy_reg); + } } hw->phy.ops.release(hw); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index eb09c755fa17..1502895eb45d 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -210,7 +210,7 @@ /* PHY Power Management Control */ #define HV_PM_CTRL PHY_REG(770, 17) -#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 +#define HV_PM_CTRL_K1_CLK_REQ 0x200 #define HV_PM_CTRL_K1_ENABLE 0x4000 #define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index e4baa13b3cda..d7d56e42a6aa 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1780,8 +1780,8 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - queue_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, 1); + mod_delayed_work(adapter->e1000_workqueue, + &adapter->watchdog_task, HZ); } /* Reset on uncorrectable ECC error */ @@ -1861,8 +1861,8 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - queue_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, 1); + mod_delayed_work(adapter->e1000_workqueue, + &adapter->watchdog_task, HZ); } /* Reset on uncorrectable ECC error */ @@ -1907,8 +1907,8 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - queue_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, 1); + mod_delayed_work(adapter->e1000_workqueue, + &adapter->watchdog_task, HZ); } if (!test_bit(__E1000_DOWN, &adapter->state)) @@ -5579,9 +5579,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, } for (f = 0; f < nr_frags; f++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; - frag = &skb_shinfo(skb)->frags[f]; len = skb_frag_size(frag); offset = 0; @@ -6297,7 +6296,7 @@ fl_out: static int e1000e_pm_freeze(struct device *dev) { - struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = dev_get_drvdata(dev); struct e1000_adapter *adapter = netdev_priv(netdev); netif_device_detach(netdev); @@ -6630,7 +6629,7 @@ static int __e1000_resume(struct pci_dev *pdev) #ifdef CONFIG_PM_SLEEP static int e1000e_pm_thaw(struct device *dev) { - struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); + struct net_device *netdev = dev_get_drvdata(dev); struct e1000_adapter *adapter = netdev_priv(netdev); e1000e_set_interrupt_capability(adapter); @@ -6679,8 +6678,7 @@ static int e1000e_pm_resume(struct device *dev) static int e1000e_pm_runtime_idle(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct e1000_adapter *adapter = netdev_priv(netdev); u16 eee_lp; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 7d42582ed48d..b14441944b4b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #ifndef _FM10K_H_ #define _FM10K_H_ @@ -177,14 +177,10 @@ static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring) #define MIN_Q_VECTORS 1 enum fm10k_non_q_vectors { FM10K_MBX_VECTOR, -#define NON_Q_VECTORS_VF NON_Q_VECTORS_PF - NON_Q_VECTORS_PF + NON_Q_VECTORS }; -#define NON_Q_VECTORS(hw) (((hw)->mac.type == fm10k_mac_pf) ? \ - NON_Q_VECTORS_PF : \ - NON_Q_VECTORS_VF) -#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS(hw)) +#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS) struct fm10k_q_vector { struct fm10k_intfc *interface; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c index 20768ac7f17e..c45315472245 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k.h" @@ -36,7 +36,7 @@ static int fm10k_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) { u8 num_tc = 0; - int i, err; + int i; /* verify type and determine num_tcs needed */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { @@ -57,7 +57,7 @@ static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) /* update TC hardware mapping if necessary */ if (num_tc != netdev_get_num_tc(dev)) { - err = fm10k_setup_tc(dev, num_tc); + int err = fm10k_setup_tc(dev, num_tc); if (err) return err; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index dca104121c05..1d27b2fb23af 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -160,8 +160,6 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) snprintf(name, sizeof(name), "q_vector.%03d", q_vector->v_idx); q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc); - if (!q_vector->dbg_q_vector) - return; /* Generate a file for each rx ring in the q_vector */ for (i = 0; i < q_vector->tx.count; i++) { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 4895dd83dd08..c681d2d28107 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include <linux/vmalloc.h> @@ -222,7 +222,6 @@ static void __fm10k_add_ethtool_stats(u64 **data, void *pointer, const unsigned int size) { unsigned int i; - char *p; if (!pointer) { /* memory is not zero allocated so we have to clear it */ @@ -232,7 +231,7 @@ static void __fm10k_add_ethtool_stats(u64 **data, void *pointer, } for (i = 0; i < size; i++) { - p = (char *)pointer + stats[i].stat_offset; + char *p = (char *)pointer + stats[i].stat_offset; switch (stats[i].sizeof_stat) { case sizeof(u64): @@ -651,7 +650,6 @@ static int fm10k_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct fm10k_intfc *interface = netdev_priv(dev); - struct fm10k_q_vector *qv; u16 tx_itr, rx_itr; int i; @@ -677,7 +675,8 @@ static int fm10k_set_coalesce(struct net_device *dev, /* update q_vectors */ for (i = 0; i < interface->num_q_vectors; i++) { - qv = interface->q_vector[i]; + struct fm10k_q_vector *qv = interface->q_vector[i]; + qv->tx.itr = tx_itr; qv->rx.itr = rx_itr; } @@ -1115,13 +1114,12 @@ static void fm10k_get_channels(struct net_device *dev, struct ethtool_channels *ch) { struct fm10k_intfc *interface = netdev_priv(dev); - struct fm10k_hw *hw = &interface->hw; /* report maximum channels */ ch->max_combined = fm10k_max_channels(dev); /* report info for other vector */ - ch->max_other = NON_Q_VECTORS(hw); + ch->max_other = NON_Q_VECTORS; ch->other_count = ch->max_other; /* record RSS queues */ @@ -1133,14 +1131,13 @@ static int fm10k_set_channels(struct net_device *dev, { struct fm10k_intfc *interface = netdev_priv(dev); unsigned int count = ch->combined_count; - struct fm10k_hw *hw = &interface->hw; /* verify they are not requesting separate vectors */ if (!count || ch->rx_count || ch->tx_count) return -EINVAL; /* verify other_count has not changed */ - if (ch->other_count != NON_Q_VECTORS(hw)) + if (ch->other_count != NON_Q_VECTORS) return -EINVAL; /* verify the number of channels does not exceed hardware limits */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 8de77155f2e7..afe1fafd2447 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k.h" #include "fm10k_vf.h" @@ -426,7 +426,7 @@ static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs) struct fm10k_iov_data *iov_data = interface->iov_data; struct fm10k_hw *hw = &interface->hw; size_t size; - int i, err; + int i; /* return error if iov_data is already populated */ if (iov_data) @@ -452,6 +452,7 @@ static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs) /* loop through vf_info structures initializing each entry */ for (i = 0; i < num_vfs; i++) { struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; + int err; /* Record VF VSI value */ vf_info->vsi = i + 1; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 90270b4a1682..2be9222510e7 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include <linux/types.h> #include <linux/module.h> @@ -17,7 +17,7 @@ const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; static const char fm10k_driver_string[] = DRV_SUMMARY; static const char fm10k_copyright[] = - "Copyright(c) 2013 - 2018 Intel Corporation."; + "Copyright(c) 2013 - 2019 Intel Corporation."; MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION(DRV_SUMMARY); @@ -315,7 +315,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, /* prefetch first cache line of first page */ prefetch(page_addr); #if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); + prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES)); #endif /* allocate a skb to store the frags */ @@ -946,7 +946,7 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring, struct sk_buff *skb = first->skb; struct fm10k_tx_buffer *tx_buffer; struct fm10k_tx_desc *tx_desc; - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned char *data; dma_addr_t dma; unsigned int data_len, size; @@ -1073,8 +1073,11 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, * + 2 desc gap to keep tail from touching head * otherwise try next time */ - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + count += TXD_USE_COUNT(skb_frag_size(frag)); + } if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { tx_ring->tx_stats.tx_busy++; @@ -1823,7 +1826,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface) v_budget = min_t(u16, v_budget, num_online_cpus()); /* account for vectors not related to queues */ - v_budget += NON_Q_VECTORS(hw); + v_budget += NON_Q_VECTORS; /* At the same time, hardware can only support a maximum of * hw.mac->max_msix_vectors vectors. With features @@ -1855,7 +1858,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface) } /* record the number of queues available for q_vectors */ - interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw); + interface->num_q_vectors = v_budget - NON_Q_VECTORS; return 0; } @@ -1869,7 +1872,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface) static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface) { struct net_device *dev = interface->netdev; - int pc, offset, rss_i, i, q_idx; + int pc, offset, rss_i, i; u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1; u8 num_pcs = netdev_get_num_tc(dev); @@ -1879,7 +1882,8 @@ static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface) rss_i = interface->ring_feature[RING_F_RSS].indices; for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) { - q_idx = pc; + int q_idx = pc; + for (i = 0; i < rss_i; i++) { interface->tx_ring[offset + i]->reg_idx = q_idx; interface->tx_ring[offset + i]->qos_pc = pc; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 21021fe4f1c3..75e51f91036c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k_common.h" @@ -297,13 +297,14 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) { struct fm10k_mbx_fifo *fifo = &mbx->rx; u16 total_len = 0, msg_len; - u32 *msg; /* length should include previous amounts pushed */ len += mbx->pushed; /* offset in message is based off of current message size */ do { + u32 *msg; + msg = fifo->buffer + fm10k_fifo_tail_offset(fifo, total_len); msg_len = FM10K_TLV_DWORD_LEN(*msg); total_len += msg_len; @@ -1920,7 +1921,6 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw, /* reduce length by 1 to convert to a mask */ u16 mbmem_len = mbx->mbmem_len - 1; u16 tail_len, len = 0; - u32 *msg; /* push head behind tail */ if (mbx->tail < head) @@ -1930,6 +1930,8 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw, /* determine msg aligned offset for end of buffer */ do { + u32 *msg; + msg = fifo->buffer + fm10k_fifo_head_offset(fifo, len); tail_len = len; len += FM10K_TLV_DWORD_LEN(*msg); @@ -2132,7 +2134,8 @@ fifo_err: * DWORDs, not bytes. Any invalid values will cause the mailbox to return * error. **/ -s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, +s32 fm10k_sm_mbx_init(struct fm10k_hw __always_unused *hw, + struct fm10k_mbx_info *mbx, const struct fm10k_msg_data *msg_data) { mbx->mbx_reg = FM10K_GMBX; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 538a8467f434..09f7a246e134 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k.h" #include <linux/vmalloc.h> @@ -54,7 +54,7 @@ err: **/ static int fm10k_setup_all_tx_resources(struct fm10k_intfc *interface) { - int i, err = 0; + int i, err; for (i = 0; i < interface->num_tx_queues; i++) { err = fm10k_setup_tx_resources(interface->tx_ring[i]); @@ -121,7 +121,7 @@ err: **/ static int fm10k_setup_all_rx_resources(struct fm10k_intfc *interface) { - int i, err = 0; + int i, err; for (i = 0; i < interface->num_rx_queues; i++) { err = fm10k_setup_rx_resources(interface->rx_ring[i]); @@ -169,7 +169,6 @@ void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring, **/ static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring) { - struct fm10k_tx_buffer *tx_buffer; unsigned long size; u16 i; @@ -179,7 +178,8 @@ static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring) /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) { - tx_buffer = &tx_ring->tx_buffer[i]; + struct fm10k_tx_buffer *tx_buffer = &tx_ring->tx_buffer[i]; + fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); } @@ -253,8 +253,7 @@ static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring) if (!rx_ring->rx_buffer) return; - if (rx_ring->skb) - dev_kfree_skb(rx_ring->skb); + dev_kfree_skb(rx_ring->skb); rx_ring->skb = NULL; /* Free all the Rx ring sk_buffs */ @@ -871,7 +870,7 @@ static int fm10k_uc_vlan_unsync(struct net_device *netdev, u16 glort = interface->glort; u16 vid = interface->vid; bool set = !!(vid / VLAN_N_VID); - int err = -EHOSTDOWN; + int err; /* drop any leading bits on the VLAN ID */ vid &= VLAN_N_VID - 1; @@ -891,7 +890,7 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev, u16 glort = interface->glort; u16 vid = interface->vid; bool set = !!(vid / VLAN_N_VID); - int err = -EHOSTDOWN; + int err; /* drop any leading bits on the VLAN ID */ vid &= VLAN_N_VID - 1; @@ -1444,11 +1443,11 @@ static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type, static void fm10k_assign_l2_accel(struct fm10k_intfc *interface, struct fm10k_l2_accel *l2_accel) { - struct fm10k_ring *ring; int i; for (i = 0; i < interface->num_rx_queues; i++) { - ring = interface->rx_ring[i]; + struct fm10k_ring *ring = interface->rx_ring[i]; + rcu_assign_pointer(ring->l2_accel, l2_accel); } @@ -1463,7 +1462,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev, struct fm10k_l2_accel *old_l2_accel = NULL; struct fm10k_dglort_cfg dglort = { 0 }; struct fm10k_hw *hw = &interface->hw; - int size = 0, i; + int size, i; u16 vid, glort; /* The hardware supported by fm10k only filters on the destination MAC diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index e49fb51d3613..bb236fa44048 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include <linux/module.h> #include <linux/interrupt.h> @@ -344,7 +344,6 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface) struct net_device *netdev = interface->netdev; u32 __iomem *hw_addr; u32 value; - int err; /* do nothing if netdev is still present or hw_addr is set */ if (netif_device_present(netdev) || interface->hw.hw_addr) @@ -362,6 +361,8 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface) hw_addr = READ_ONCE(interface->uc_addr); value = readl(hw_addr); if (~value) { + int err; + /* Make sure the reset was initiated because we detached, * otherwise we might race with a different reset flow. */ @@ -697,8 +698,6 @@ static void fm10k_watchdog_subtask(struct fm10k_intfc *interface) */ static void fm10k_check_hang_subtask(struct fm10k_intfc *interface) { - int i; - /* If we're down or resetting, just bail */ if (test_bit(__FM10K_DOWN, interface->state) || test_bit(__FM10K_RESETTING, interface->state)) @@ -710,6 +709,8 @@ static void fm10k_check_hang_subtask(struct fm10k_intfc *interface) interface->next_tx_hang_check = jiffies + (2 * HZ); if (netif_carrier_ok(interface->netdev)) { + int i; + /* Force detection of hung controller */ for (i = 0; i < interface->num_tx_queues; i++) set_check_for_tx_hang(interface->tx_ring[i]); @@ -897,7 +898,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, /* Map interrupt */ if (ring->q_vector) { - txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); + txint = ring->q_vector->v_idx + NON_Q_VECTORS; txint |= FM10K_INT_MAP_TIMER0; } @@ -1036,7 +1037,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, /* Map interrupt */ if (ring->q_vector) { - rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); + rxint = ring->q_vector->v_idx + NON_Q_VECTORS; rxint |= FM10K_INT_MAP_TIMER1; } @@ -1719,10 +1720,9 @@ int fm10k_mbx_request_irq(struct fm10k_intfc *interface) void fm10k_qv_free_irq(struct fm10k_intfc *interface) { int vector = interface->num_q_vectors; - struct fm10k_hw *hw = &interface->hw; struct msix_entry *entry; - entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector]; + entry = &interface->msix_entries[NON_Q_VECTORS + vector]; while (vector) { struct fm10k_q_vector *q_vector; @@ -1759,7 +1759,7 @@ int fm10k_qv_request_irq(struct fm10k_intfc *interface) unsigned int ri = 0, ti = 0; int vector, err; - entry = &interface->msix_entries[NON_Q_VECTORS(hw)]; + entry = &interface->msix_entries[NON_Q_VECTORS]; for (vector = 0; vector < interface->num_q_vectors; vector++) { struct fm10k_q_vector *q_vector = interface->q_vector[vector]; @@ -2339,7 +2339,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface) /* Restart the MAC/VLAN request queue in-case of outstanding events */ fm10k_macvlan_schedule(interface); - return err; + return 0; } /** @@ -2352,7 +2352,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface) **/ static int __maybe_unused fm10k_resume(struct device *dev) { - struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); + struct fm10k_intfc *interface = dev_get_drvdata(dev); struct net_device *netdev = interface->netdev; struct fm10k_hw *hw = &interface->hw; int err; @@ -2379,7 +2379,7 @@ static int __maybe_unused fm10k_resume(struct device *dev) **/ static int __maybe_unused fm10k_suspend(struct device *dev) { - struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); + struct fm10k_intfc *interface = dev_get_drvdata(dev); struct net_device *netdev = interface->netdev; netif_device_detach(netdev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index cb4d02629b86..be07bfdb0bb4 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k_pf.h" #include "fm10k_vf.h" @@ -1152,7 +1152,7 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw, * assumption is that in this case it is acceptable to just directly * hand off the message from the VF to the underlying shared code. **/ -s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, +s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 __always_unused **results, struct fm10k_mbx_info *mbx) { struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; @@ -1352,7 +1352,6 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) { struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; - u32 *result; s32 err = 0; u32 msg[2]; u8 mode = 0; @@ -1362,7 +1361,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, return FM10K_ERR_PARAM; if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) { - result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE]; + u32 *result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE]; /* XCAST mode update requested */ err = fm10k_tlv_attr_get_u8(result, &mode); @@ -1566,7 +1565,7 @@ static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type, /* read remaining fields */ fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_HI); fault->address <<= 32; - fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO); + fault->address |= fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO); fault->specinfo = fm10k_read_reg(hw, type + FM10K_FAULT_SPECINFO); /* clear valid bit to allow for next error */ @@ -1642,7 +1641,7 @@ const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = { * switch API. **/ s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { u16 glort, mask; u32 dglort_map; @@ -1685,7 +1684,7 @@ const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = { * This handler configures the default VLAN for the PF **/ static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { u16 glort, pvid; u32 pvid_update; @@ -1746,7 +1745,7 @@ const struct fm10k_tlv_attr fm10k_err_msg_attr[] = { * messages that the PF has sent. **/ s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { struct fm10k_swapi_error err_msg; s32 err; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index 2a7a40bf2b1c..21eff0895a7a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k_tlv.h" @@ -472,7 +472,7 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, const struct fm10k_tlv_attr *tlv_attr) { u32 i, attr_id, offset = 0; - s32 err = 0; + s32 err; u16 len; /* verify pointers are not NULL */ @@ -587,8 +587,9 @@ s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg, * a minimum it just indicates that the message requested was * unimplemented. **/ -s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) +s32 fm10k_tlv_msg_error(struct fm10k_hw __always_unused *hw, + u32 __always_unused **results, + struct fm10k_mbx_info __always_unused *mbx) { return FM10K_NOT_IMPLEMENTED; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 9fb9fca375e3..15ac1c7885bc 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #ifndef _FM10K_TYPE_H_ #define _FM10K_TYPE_H_ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index a8519c1f0406..dc8ccd378ec9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #include "fm10k_vf.h" @@ -198,7 +198,7 @@ static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) * This function should determine the MAC address for the VF **/ s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { u8 perm_addr[ETH_ALEN]; u16 vid; @@ -267,8 +267,10 @@ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw) * This function is used to add or remove unicast MAC addresses for * the VF. **/ -static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, - const u8 *mac, u16 vid, bool add, u8 flags) +static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, + u16 __always_unused glort, + const u8 *mac, u16 vid, bool add, + u8 __always_unused flags) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[7]; @@ -309,7 +311,8 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, * This function is used to add or remove multicast MAC addresses for * the VF. **/ -static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort, +static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, + u16 __always_unused glort, const u8 *mac, u16 vid, bool add) { struct fm10k_mbx_info *mbx = &hw->mbx; @@ -373,7 +376,7 @@ const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = { * are ready to bring up the interface. **/ s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ? FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO; @@ -392,8 +395,9 @@ s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results, * enabled we can add filters, if it is disabled all filters for this * logical port are flushed. **/ -static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort, - u16 count, bool enable) +static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, + u16 __always_unused glort, + u16 __always_unused count, bool enable) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[2]; @@ -420,7 +424,8 @@ static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort, * so that it can enable either multicast, multicast promiscuous, or * promiscuous mode of operation. **/ -static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode) +static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, + u16 __always_unused glort, u8 mode) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[3]; @@ -475,7 +480,7 @@ static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw, * that information to then populate a DGLORTMAP/DEC entry and the queues * to which it has been assigned. **/ -static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw, +static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw __always_unused *hw, struct fm10k_dglort_cfg *dglort) { /* verify the dglort pointer */ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 84bd06901014..2af9f6308f84 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -131,7 +131,6 @@ enum i40e_state_t { __I40E_PF_RESET_REQUESTED, __I40E_CORE_RESET_REQUESTED, __I40E_GLOBAL_RESET_REQUESTED, - __I40E_EMP_RESET_REQUESTED, __I40E_EMP_RESET_INTR_RECEIVED, __I40E_SUSPENDED, __I40E_PTP_TX_IN_PROGRESS, @@ -244,11 +243,11 @@ struct i40e_fdir_filter { u32 fd_id; }; -#define I40E_CLOUD_FIELD_OMAC 0x01 -#define I40E_CLOUD_FIELD_IMAC 0x02 -#define I40E_CLOUD_FIELD_IVLAN 0x04 -#define I40E_CLOUD_FIELD_TEN_ID 0x08 -#define I40E_CLOUD_FIELD_IIP 0x10 +#define I40E_CLOUD_FIELD_OMAC BIT(0) +#define I40E_CLOUD_FIELD_IMAC BIT(1) +#define I40E_CLOUD_FIELD_IVLAN BIT(2) +#define I40E_CLOUD_FIELD_TEN_ID BIT(3) +#define I40E_CLOUD_FIELD_IIP BIT(4) #define I40E_CLOUD_FILTER_FLAGS_OMAC I40E_CLOUD_FIELD_OMAC #define I40E_CLOUD_FILTER_FLAGS_IMAC I40E_CLOUD_FIELD_IMAC @@ -1021,6 +1020,7 @@ i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type) return NULL; } void i40e_update_stats(struct i40e_vsi *vsi); +void i40e_update_veb_stats(struct i40e_veb *veb); void i40e_update_eth_stats(struct i40e_vsi *vsi); struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi); int i40e_fetch_switch_configuration(struct i40e_pf *pf, diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 814acbe79ffd..72c04881d290 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -610,8 +610,10 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) if (hw->aq.api_maj_ver > 1 || (hw->aq.api_maj_ver == 1 && - hw->aq.api_min_ver >= 8)) + hw->aq.api_min_ver >= 8)) { hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT; + hw->flags |= I40E_HW_FLAG_DROP_MODE; + } if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { ret_code = I40E_ERR_FIRMWARE_API_VERSION; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 6536023fa074..530613f31527 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -11,8 +11,8 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR_X722 0x0008 -#define I40E_FW_API_VERSION_MINOR_X710 0x0008 +#define I40E_FW_API_VERSION_MINOR_X722 0x0009 +#define I40E_FW_API_VERSION_MINOR_X710 0x0009 #define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ I40E_FW_API_VERSION_MINOR_X710 : \ @@ -1382,7 +1382,7 @@ struct i40e_aqc_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ I40E_AQC_ADD_CLOUD_FILTER_SHIFT) /* 0x0000 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +/* 0x0001 reserved */ /* 0x0002 reserved */ #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 @@ -1394,6 +1394,9 @@ struct i40e_aqc_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A #define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B #define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C +/* 0x000D reserved */ +/* 0x000E reserved */ +/* 0x000F reserved */ /* 0x0010 to 0x0017 is for custom filters */ #define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */ #define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */ @@ -2051,20 +2054,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); struct i40e_aq_set_mac_config { __le16 max_frame_size; u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 +#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80 u8 tx_timer_priority; /* bitmap */ __le16 tx_timer_value; __le16 fc_refresh_threshold; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 906cf68d3453..d37c6e0e5f08 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include "i40e.h" #include "i40e_type.h" #include "i40e_adminq.h" #include "i40e_prototype.h" @@ -13,7 +14,7 @@ * This function sets the mac type of the adapter based on the * vendor ID and device ID stored in the hw structure. **/ -static i40e_status i40e_set_mac_type(struct i40e_hw *hw) +i40e_status i40e_set_mac_type(struct i40e_hw *hw) { i40e_status status = 0; @@ -1577,19 +1578,22 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, status = i40e_asq_send_command(hw, &desc, abilities, abilities_size, cmd_details); - if (status) - break; - - if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) { + switch (hw->aq.asq_last_status) { + case I40E_AQ_RC_EIO: status = I40E_ERR_UNKNOWN_PHY; break; - } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) { + case I40E_AQ_RC_EAGAIN: usleep_range(1000, 2000); total_delay++; status = I40E_ERR_TIMEOUT; + break; + /* also covers I40E_AQ_RC_OK */ + default: + break; } - } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) && - (total_delay < max_delay)); + + } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && + (total_delay < max_delay)); if (status) return status; @@ -1643,25 +1647,15 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, return status; } -/** - * i40e_set_fc - * @hw: pointer to the hw struct - * @aq_failures: buffer to return AdminQ failure information - * @atomic_restart: whether to enable atomic link restart - * - * Set the requested flow control mode using set_phy_config. - **/ -enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, - bool atomic_restart) +static noinline_for_stack enum i40e_status_code +i40e_set_fc_status(struct i40e_hw *hw, + struct i40e_aq_get_phy_abilities_resp *abilities, + bool atomic_restart) { - enum i40e_fc_mode fc_mode = hw->fc.requested_mode; - struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config; - enum i40e_status_code status; + enum i40e_fc_mode fc_mode = hw->fc.requested_mode; u8 pause_mask = 0x0; - *aq_failures = 0x0; - switch (fc_mode) { case I40E_FC_FULL: pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; @@ -1677,6 +1671,48 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, break; } + memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); + /* clear the old pause settings */ + config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & + ~(I40E_AQ_PHY_FLAG_PAUSE_RX); + /* set the new abilities */ + config.abilities |= pause_mask; + /* If the abilities have changed, then set the new config */ + if (config.abilities == abilities->abilities) + return 0; + + /* Auto restart link so settings take effect */ + if (atomic_restart) + config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + /* Copy over all the old settings */ + config.phy_type = abilities->phy_type; + config.phy_type_ext = abilities->phy_type_ext; + config.link_speed = abilities->link_speed; + config.eee_capability = abilities->eee_capability; + config.eeer = abilities->eeer_val; + config.low_power_ctrl = abilities->d3_lpan; + config.fec_config = abilities->fec_cfg_curr_mod_ext_info & + I40E_AQ_PHY_FEC_CONFIG_MASK; + + return i40e_aq_set_phy_config(hw, &config, NULL); +} + +/** + * i40e_set_fc + * @hw: pointer to the hw struct + * @aq_failures: buffer to return AdminQ failure information + * @atomic_restart: whether to enable atomic link restart + * + * Set the requested flow control mode using set_phy_config. + **/ +enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, + bool atomic_restart) +{ + struct i40e_aq_get_phy_abilities_resp abilities; + enum i40e_status_code status; + + *aq_failures = 0x0; + /* Get the current phy config */ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); @@ -1685,31 +1721,10 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, return status; } - memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); - /* clear the old pause settings */ - config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & - ~(I40E_AQ_PHY_FLAG_PAUSE_RX); - /* set the new abilities */ - config.abilities |= pause_mask; - /* If the abilities have changed, then set the new config */ - if (config.abilities != abilities.abilities) { - /* Auto restart link so settings take effect */ - if (atomic_restart) - config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; - /* Copy over all the old settings */ - config.phy_type = abilities.phy_type; - config.phy_type_ext = abilities.phy_type_ext; - config.link_speed = abilities.link_speed; - config.eee_capability = abilities.eee_capability; - config.eeer = abilities.eeer_val; - config.low_power_ctrl = abilities.d3_lpan; - config.fec_config = abilities.fec_cfg_curr_mod_ext_info & - I40E_AQ_PHY_FEC_CONFIG_MASK; - status = i40e_aq_set_phy_config(hw, &config, NULL); + status = i40e_set_fc_status(hw, &abilities, atomic_restart); + if (status) + *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; - if (status) - *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; - } /* Update the link info */ status = i40e_update_link_info(hw); if (status) { @@ -2537,7 +2552,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) * i40e_updatelink_status - update status of the HW network link * @hw: pointer to the hw struct **/ -i40e_status i40e_update_link_info(struct i40e_hw *hw) +noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) { struct i40e_aq_get_phy_abilities_resp abilities; i40e_status status = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 292eeb3def10..200a1cb3b536 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -877,7 +877,23 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) return I40E_NOT_SUPPORTED; /* Read LLDP NVM area */ - ret = i40e_read_lldp_cfg(hw, &lldp_cfg); + if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) { + u8 offset = 0; + + if (hw->mac.type == I40E_MAC_XL710) + offset = I40E_LLDP_CURRENT_STATUS_XL710_OFFSET; + else if (hw->mac.type == I40E_MAC_X722) + offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET; + else + return I40E_NOT_SUPPORTED; + + ret = i40e_read_nvm_module_data(hw, + I40E_SR_EMP_SR_SETTINGS_PTR, + offset, 1, + &lldp_cfg.adminstatus); + } else { + ret = i40e_read_lldp_cfg(hw, &lldp_cfg); + } if (ret) return I40E_ERR_NOT_READY; diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index ddb48ae7cce4..2a80c5daa376 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -30,6 +30,8 @@ #define I40E_CEE_SUBTYPE_APP_PRI 4 #define I40E_CEE_MAX_FEAT_TYPE 3 +#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B +#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31 /* Defines for LLDP TLV header */ #define I40E_LLDP_TLV_LEN_SHIFT 0 #define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 55d20acfcf70..99ea543dd245 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1125,10 +1125,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED)); - } else if (strncmp(cmd_buf, "empr", 4) == 0) { - dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n"); - i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED)); - } else if (strncmp(cmd_buf, "read", 4) == 0) { u32 address; u32 value; @@ -1732,29 +1728,15 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = { **/ void i40e_dbg_pf_init(struct i40e_pf *pf) { - struct dentry *pfile; const char *name = pci_name(pf->pdev); - const struct device *dev = &pf->pdev->dev; pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); - if (!pf->i40e_dbg_pf) - return; - - pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, - &i40e_dbg_command_fops); - if (!pfile) - goto create_failed; - pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, - &i40e_dbg_netdev_ops_fops); - if (!pfile) - goto create_failed; + debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_command_fops); - return; - -create_failed: - dev_info(dev, "debugfs dir/file for %s failed\n", name); - debugfs_remove_recursive(pf->i40e_dbg_pf); + debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_netdev_ops_fops); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 527eb52c5401..41e1240acaea 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -711,6 +711,35 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, } /** + * i40e_get_settings_link_up_fec - Get the FEC mode encoding from mask + * @req_fec_info: mask request FEC info + * @ks: ethtool ksettings to fill in + **/ +static void i40e_get_settings_link_up_fec(u8 req_fec_info, + struct ethtool_link_ksettings *ks) +{ + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + + if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) { + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + } else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_BASER); + } else { + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_NONE); + if (I40E_AQ_SET_FEC_AUTO & req_fec_info) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_BASER); + } + } +} + +/** * i40e_get_settings_link_up - Get the Link settings for when link is up * @hw: hw structure * @ks: ethtool ksettings to fill in @@ -769,13 +798,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 25000baseSR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseSR_Full); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); + i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseSR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -892,9 +915,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 40000baseKR4_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseKR_Full); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); ethtool_link_ksettings_add_link_mode(ks, supported, 20000baseKR2_Full); ethtool_link_ksettings_add_link_mode(ks, supported, @@ -908,10 +928,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 40000baseKR4_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseKR_Full); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); + i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); ethtool_link_ksettings_add_link_mode(ks, advertising, 20000baseKR2_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -929,13 +946,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 25000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); + i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); + break; case I40E_PHY_TYPE_25GBASE_AOC: case I40E_PHY_TYPE_25GBASE_ACC: @@ -945,13 +957,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 25000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); - ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - ethtool_link_ksettings_add_link_mode(ks, advertising, - FEC_BASER); + i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); + ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -2250,7 +2257,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - struct i40e_veb *veb = pf->veb[pf->lan_veb]; + struct i40e_veb *veb = NULL; unsigned int i; bool veb_stats; u64 *p = data; @@ -2273,8 +2280,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, goto check_data_pointer; veb_stats = ((pf->lan_veb != I40E_NO_VEB) && + (pf->lan_veb < I40E_MAX_VEB) && (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)); + if (veb_stats) { + veb = pf->veb[pf->lan_veb]; + i40e_update_veb_stats(veb); + } + /* If veb stats aren't enabled, pass NULL instead of the veb so that * we initialize stats to zero and update the data pointer * intelligently @@ -2329,7 +2342,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) } if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) - return; + goto check_data_pointer; i40e_add_stat_strings(&data, i40e_gstrings_veb_stats); @@ -2341,6 +2354,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); +check_data_pointer: WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, "stat strings count mismatch!"); } @@ -5123,6 +5137,12 @@ static int i40e_get_module_info(struct net_device *netdev, /* Module is not SFF-8472 compliant */ modinfo->type = ETH_MODULE_SFF_8079; modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else if (!(sff8472_swap & I40E_MODULE_SFF_DDM_IMPLEMENTED)) { + /* Module is SFF-8472 compliant but doesn't implement + * Digital Diagnostic Monitoring (DDM). + */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; } else { modinfo->type = ETH_MODULE_SFF_8472; modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 19ce93d7fd0a..163ee8c6311c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include "i40e.h" #include "i40e_osdep.h" #include "i40e_register.h" #include "i40e_status.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index 994011c38fb4..be24d42280d8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include "i40e.h" #include "i40e_osdep.h" #include "i40e_register.h" #include "i40e_type.h" @@ -963,7 +964,7 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes, /** * i40e_hmc_get_object_va - retrieves an object's virtual address - * @hmc_info: pointer to i40e_hmc_info struct + * @hw: the hardware struct, from which we obtain the i40e_hmc_info pointer * @object_base: pointer to u64 to get the va * @rsrc_type: the hmc resource type * @obj_idx: hmc object index @@ -972,16 +973,16 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes, * base pointer. This function is used for LAN Queue contexts. **/ static -i40e_status i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info, - u8 **object_base, - enum i40e_hmc_lan_rsrc_type rsrc_type, - u32 obj_idx) +i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base, + enum i40e_hmc_lan_rsrc_type rsrc_type, + u32 obj_idx) { + struct i40e_hmc_info *hmc_info = &hw->hmc; u32 obj_offset_in_sd, obj_offset_in_pd; - i40e_status ret_code = 0; struct i40e_hmc_sd_entry *sd_entry; struct i40e_hmc_pd_entry *pd_entry; u32 pd_idx, pd_lmt, rel_pd_idx; + i40e_status ret_code = 0; u64 obj_offset_in_fpm; u32 sd_idx, sd_lmt; @@ -1047,7 +1048,7 @@ i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, i40e_status err; u8 *context_bytes; - err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); if (err < 0) return err; @@ -1068,7 +1069,7 @@ i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, i40e_status err; u8 *context_bytes; - err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); if (err < 0) return err; @@ -1088,7 +1089,7 @@ i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, i40e_status err; u8 *context_bytes; - err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); if (err < 0) return err; @@ -1109,7 +1110,7 @@ i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, i40e_status err; u8 *context_bytes; - err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); if (err < 0) return err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 9ebbe3da61bb..6031223eafab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -73,6 +73,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, @@ -534,6 +535,10 @@ void i40e_pf_reset_stats(struct i40e_pf *pf) sizeof(pf->veb[i]->stats)); memset(&pf->veb[i]->stats_offsets, 0, sizeof(pf->veb[i]->stats_offsets)); + memset(&pf->veb[i]->tc_stats, 0, + sizeof(pf->veb[i]->tc_stats)); + memset(&pf->veb[i]->tc_stats_offsets, 0, + sizeof(pf->veb[i]->tc_stats_offsets)); pf->veb[i]->stat_offsets_loaded = false; } } @@ -677,7 +682,7 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi) * i40e_update_veb_stats - Update Switch component statistics * @veb: the VEB being updated **/ -static void i40e_update_veb_stats(struct i40e_veb *veb) +void i40e_update_veb_stats(struct i40e_veb *veb) { struct i40e_pf *pf = veb->pf; struct i40e_hw *hw = &pf->hw; @@ -2530,6 +2535,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) vsi_name, i40e_stat_str(hw, aq_ret), i40e_aq_str(hw, hw->aq.asq_last_status)); + } else { + dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n", + vsi->netdev->name, + cur_multipromisc ? "entering" : "leaving"); } } @@ -2583,6 +2592,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) return; if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) return; + if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) { + set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); + return; + } for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && @@ -2597,6 +2610,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) } } } + clear_bit(__I40E_VF_DISABLE, pf->state); } /** @@ -3360,7 +3374,7 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) err = i40e_configure_tx_ring(vsi->tx_rings[i]); - if (!i40e_enabled_xdp_vsi(vsi)) + if (err || !i40e_enabled_xdp_vsi(vsi)) return err; for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) @@ -6412,50 +6426,6 @@ static int i40e_resume_port_tx(struct i40e_pf *pf) } /** - * i40e_update_dcb_config - * @hw: pointer to the HW struct - * @enable_mib_change: enable MIB change event - * - * Update DCB configuration from the firmware - **/ -static enum i40e_status_code -i40e_update_dcb_config(struct i40e_hw *hw, bool enable_mib_change) -{ - struct i40e_lldp_variables lldp_cfg; - i40e_status ret; - - if (!hw->func_caps.dcb) - return I40E_NOT_SUPPORTED; - - /* Read LLDP NVM area */ - ret = i40e_read_lldp_cfg(hw, &lldp_cfg); - if (ret) - return I40E_ERR_NOT_READY; - - /* Get DCBX status */ - ret = i40e_get_dcbx_status(hw, &hw->dcbx_status); - if (ret) - return ret; - - /* Check the DCBX Status */ - if (hw->dcbx_status == I40E_DCBX_STATUS_DONE || - hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) { - /* Get current DCBX configuration */ - ret = i40e_get_dcb_config(hw); - if (ret) - return ret; - } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { - return I40E_ERR_NOT_READY; - } - - /* Configure the LLDP MIB change event */ - if (enable_mib_change) - ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL); - - return ret; -} - -/** * i40e_init_pf_dcb - Initialize DCB configuration * @pf: PF being configured * @@ -6477,7 +6447,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) goto out; } - err = i40e_update_dcb_config(hw, true); + err = i40e_init_dcb(hw, true); if (!err) { /* Device/Function is not DCBX capable */ if ((!hw->func_caps.dcb) || @@ -6599,19 +6569,19 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) } if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { - req_fec = ", Requested FEC: None"; - fec = ", FEC: None"; - an = ", Autoneg: False"; + req_fec = "None"; + fec = "None"; + an = "False"; if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) - an = ", Autoneg: True"; + an = "True"; if (pf->hw.phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) - fec = ", FEC: CL74 FC-FEC/BASE-R"; + fec = "CL74 FC-FEC/BASE-R"; else if (pf->hw.phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) - fec = ", FEC: CL108 RS-FEC"; + fec = "CL108 RS-FEC"; /* 'CL108 RS-FEC' should be displayed when RS is requested, or * both RS and FC are requested @@ -6620,14 +6590,19 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) { if (vsi->back->hw.phy.link_info.req_fec_info & I40E_AQ_REQUEST_FEC_RS) - req_fec = ", Requested FEC: CL108 RS-FEC"; + req_fec = "CL108 RS-FEC"; else - req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R"; + req_fec = "CL74 FC-FEC/BASE-R"; } + netdev_info(vsi->netdev, + "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", + speed, req_fec, fec, an, fc); + } else { + netdev_info(vsi->netdev, + "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n", + speed, fc); } - netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n", - speed, req_fec, fec, an, fc); } /** @@ -8486,6 +8461,11 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) dev_dbg(&pf->pdev->dev, "PFR requested\n"); i40e_handle_reset_warning(pf, lock_acquired); + dev_info(&pf->pdev->dev, + pf->flags & I40E_FLAG_DISABLE_FW_LLDP ? + "FW LLDP is disabled\n" : + "FW LLDP is enabled\n"); + } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { int v; @@ -12561,7 +12541,8 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, if (need_reset && prog) for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->xdp_rings[i]->xsk_umem) - (void)i40e_xsk_async_xmit(vsi->netdev, i); + (void)i40e_xsk_wakeup(vsi->netdev, i, + XDP_WAKEUP_RX); return 0; } @@ -12883,7 +12864,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_bridge_setlink = i40e_ndo_bridge_setlink, .ndo_bpf = i40e_xdp, .ndo_xdp_xmit = i40e_xdp_xmit, - .ndo_xsk_async_xmit = i40e_xsk_async_xmit, + .ndo_xsk_wakeup = i40e_xsk_wakeup, .ndo_dfwd_add_station = i40e_fwd_add, .ndo_dfwd_del_station = i40e_fwd_del, }; @@ -14569,9 +14550,20 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags) **/ static bool i40e_check_recovery_mode(struct i40e_pf *pf) { - u32 val = rd32(&pf->hw, I40E_GL_FWSTS); - - if (val & I40E_GL_FWSTS_FWS1B_MASK) { + u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; + bool is_recovery_mode = false; + + if (pf->hw.mac.type == I40E_MAC_XL710) + is_recovery_mode = + val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK || + val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || + val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK || + val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK; + if (pf->hw.mac.type == I40E_MAC_X722) + is_recovery_mode = + val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK || + val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK; + if (is_recovery_mode) { dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); set_bit(__I40E_RECOVERY_MODE, pf->state); @@ -14585,6 +14577,51 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf) } /** + * i40e_pf_loop_reset - perform reset in a loop. + * @pf: board private structure + * + * This function is useful when a NIC is about to enter recovery mode. + * When a NIC's internal data structures are corrupted the NIC's + * firmware is going to enter recovery mode. + * Right after a POR it takes about 7 minutes for firmware to enter + * recovery mode. Until that time a NIC is in some kind of intermediate + * state. After that time period the NIC almost surely enters + * recovery mode. The only way for a driver to detect intermediate + * state is to issue a series of pf-resets and check a return value. + * If a PF reset returns success then the firmware could be in recovery + * mode so the caller of this code needs to check for recovery mode + * if this function returns success. There is a little chance that + * firmware will hang in intermediate state forever. + * Since waiting 7 minutes is quite a lot of time this function waits + * 10 seconds and then gives up by returning an error. + * + * Return 0 on success, negative on failure. + **/ +static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf) +{ + const unsigned short MAX_CNT = 1000; + const unsigned short MSECS = 10; + struct i40e_hw *hw = &pf->hw; + i40e_status ret; + int cnt; + + for (cnt = 0; cnt < MAX_CNT; ++cnt) { + ret = i40e_pf_reset(hw); + if (!ret) + break; + msleep(MSECS); + } + + if (cnt == MAX_CNT) { + dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); + return ret; + } + + pf->pfr_count++; + return ret; +} + +/** * i40e_init_recovery_mode - initialize subsystems needed in recovery mode * @pf: board private structure * @hw: ptr to the hardware info @@ -14812,14 +14849,22 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Reset here to make sure all is clean and to define PF 'n' */ i40e_clear_hw(hw); - if (!i40e_check_recovery_mode(pf)) { - err = i40e_pf_reset(hw); - if (err) { - dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); - goto err_pf_reset; - } - pf->pfr_count++; + + err = i40e_set_mac_type(hw); + if (err) { + dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", + err); + goto err_pf_reset; } + + err = i40e_pf_loop_reset(pf); + if (err) { + dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); + goto err_pf_reset; + } + + i40e_check_recovery_mode(pf); + hw->aq.num_arq_entries = I40E_AQ_LEN; hw->aq.num_asq_entries = I40E_AQ_LEN; hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; @@ -15605,8 +15650,7 @@ static void i40e_shutdown(struct pci_dev *pdev) **/ static int __maybe_unused i40e_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_pf *pf = dev_get_drvdata(dev); struct i40e_hw *hw = &pf->hw; /* If we're already suspended, then there is nothing to do */ @@ -15656,8 +15700,7 @@ static int __maybe_unused i40e_suspend(struct device *dev) **/ static int __maybe_unused i40e_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_pf *pf = dev_get_drvdata(dev); int err; /* If we're not suspended, then there is nothing to do */ @@ -15674,7 +15717,7 @@ static int __maybe_unused i40e_resume(struct device *dev) */ err = i40e_restore_interrupt_scheme(pf); if (err) { - dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n", + dev_err(dev, "Cannot restore interrupt scheme: %d\n", err); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index c508b75c3c09..e4d8d20baf3b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -322,6 +322,77 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, } /** + * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location + * @hw: pointer to the HW structure + * @module_ptr: Pointer to module in words with respect to NVM beginning + * @offset: offset in words from module start + * @words_data_size: Words to read from NVM + * @data_ptr: Pointer to memory location where resulting buffer will be stored + **/ +i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw, + u8 module_ptr, u16 offset, + u16 words_data_size, + u16 *data_ptr) +{ + i40e_status status; + u16 ptr_value = 0; + u32 flat_offset; + + if (module_ptr != 0) { + status = i40e_read_nvm_word(hw, module_ptr, &ptr_value); + if (status) { + i40e_debug(hw, I40E_DEBUG_ALL, + "Reading nvm word failed.Error code: %d.\n", + status); + return I40E_ERR_NVM; + } + } +#define I40E_NVM_INVALID_PTR_VAL 0x7FFF +#define I40E_NVM_INVALID_VAL 0xFFFF + + /* Pointer not initialized */ + if (ptr_value == I40E_NVM_INVALID_PTR_VAL || + ptr_value == I40E_NVM_INVALID_VAL) + return I40E_ERR_BAD_PTR; + + /* Check whether the module is in SR mapped area or outside */ + if (ptr_value & I40E_PTR_TYPE) { + /* Pointer points outside of the Shared RAM mapped area */ + ptr_value &= ~I40E_PTR_TYPE; + + /* PtrValue in 4kB units, need to convert to words */ + ptr_value /= 2; + flat_offset = ((u32)ptr_value * 0x1000) + (u32)offset; + status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!status) { + status = i40e_aq_read_nvm(hw, 0, 2 * flat_offset, + 2 * words_data_size, + data_ptr, true, NULL); + i40e_release_nvm(hw); + if (status) { + i40e_debug(hw, I40E_DEBUG_ALL, + "Reading nvm aq failed.Error code: %d.\n", + status); + return I40E_ERR_NVM; + } + } else { + return I40E_ERR_NVM; + } + } else { + /* Read from the Shadow RAM */ + status = i40e_read_nvm_buffer(hw, ptr_value + offset, + &words_data_size, data_ptr); + if (status) { + i40e_debug(hw, I40E_DEBUG_ALL, + "Reading nvm buffer failed.Error code: %d.\n", + status); + } + } + + return status; +} + +/** * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). @@ -430,6 +501,36 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw, } /** + * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + i40e_status ret_code = 0; + + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, + data); + i40e_release_nvm(hw); + } + } else { + ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); + } + + return ret_code; +} + +/** * i40e_write_nvm_aq - Writes Shadow RAM. * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index a07574bff550..c302ef2524f8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -18,7 +18,10 @@ * actual OS primitives */ -#define hw_dbg(hw, S, A...) do {} while (0) +#define hw_dbg(hw, S, A...) \ +do { \ + dev_dbg(&((struct i40e_pf *)hw->back)->pdev->dev, S, ##A); \ +} while (0) #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) #define rd32(a, reg) readl((a)->hw_addr + (reg)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index eac88bcc6c06..5250441bf75b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -315,6 +315,12 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw, void i40e_release_nvm(struct i40e_hw *hw); i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data); +i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw, + u8 module_ptr, u16 offset, + u16 words_data_size, + u16 *data_ptr); +i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data); i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw); i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum); @@ -326,6 +332,8 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw); void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); +i40e_status i40e_set_mac_type(struct i40e_hw *hw); + extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 11394a52e21c..9bf1ad4319f5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -725,7 +725,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF; /* Set the previous "reset" time to the current Kernel clock time */ - pf->ptp_prev_hw_time = ktime_to_timespec64(ktime_get_real()); + ktime_get_real_ts64(&pf->ptp_prev_hw_time); pf->ptp_reset_start = ktime_get(); return 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 52e3680c57f8..d35d690ca10f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -58,7 +58,7 @@ #define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30 #define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT) #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT) #define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ #define I40E_PF_ARQT_ARQT_SHIFT 0 #define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) @@ -81,7 +81,7 @@ #define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT) #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT) #define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ #define I40E_PF_ATQT_ATQT_SHIFT 0 #define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) @@ -108,7 +108,7 @@ #define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 #define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) #define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT) #define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ARQT_MAX_INDEX 127 #define I40E_VF_ARQT_ARQT_SHIFT 0 @@ -136,7 +136,7 @@ #define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) #define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT) #define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ATQT_MAX_INDEX 127 #define I40E_VF_ATQT_ATQT_SHIFT 0 @@ -259,7 +259,7 @@ #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) #define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 -#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) +#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) #define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ #define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 #define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) @@ -363,6 +363,12 @@ #define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT) #define I40E_GL_FWSTS_FWS1B_SHIFT 16 #define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */ #define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0 #define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT) @@ -503,7 +509,7 @@ #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 -#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) +#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) #define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MSRWD_MAX_INDEX 3 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 @@ -1242,14 +1248,14 @@ #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 -#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) #define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */ #define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 #define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) #define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16 #define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT) #define I40E_PFLAN_QALLOC_VALID_SHIFT 31 -#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT) +#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT) #define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ #define I40E_QRX_ENA_MAX_INDEX 1535 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0 @@ -1658,7 +1664,7 @@ #define I40E_GLNVM_SRCTL_START_SHIFT 30 #define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) #define I40E_GLNVM_SRCTL_DONE_SHIFT 31 -#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT) +#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT) #define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ #define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 #define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) @@ -3025,7 +3031,7 @@ #define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8 #define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT) #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 -#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT) +#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT) #define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VP_MDET_RX_MAX_INDEX 127 #define I40E_VP_MDET_RX_VALID_SHIFT 0 @@ -3161,7 +3167,7 @@ #define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 #define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) #define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) +#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) #define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ #define I40E_VF_ARQT1_ARQT_SHIFT 0 #define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) @@ -3184,7 +3190,7 @@ #define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 #define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) #define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) +#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) #define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ #define I40E_VF_ATQT1_ATQT_SHIFT 0 #define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 2a2fe3ec7926..e3f29dc8b290 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3262,7 +3262,7 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) **/ bool __i40e_chk_linearize(struct sk_buff *skb) { - const struct skb_frag_struct *frag, *stale; + const skb_frag_t *frag, *stale; int nr_frags, sum; /* no need to check if number of frags is less than 7 */ @@ -3306,7 +3306,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb) * descriptor associated with the fragment. */ if (stale_size > I40E_MAX_DATA_PER_TXD) { - int align_pad = -(stale->page_offset) & + int align_pad = -(skb_frag_off(stale)) & (I40E_MAX_READ_REQ_SIZE - 1); sum -= align_pad; @@ -3349,7 +3349,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); - struct skb_frag_struct *frag; + skb_frag_t *frag; struct i40e_tx_buffer *tx_bi; struct i40e_tx_desc *tx_desc; u16 i = tx_ring->next_to_use; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 100e92d2982f..36d37f31a287 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -521,7 +521,7 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) **/ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; int count = 0, size = skb_headlen(skb); diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 8f43aa47c263..b43ec94a0f29 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -443,6 +443,7 @@ struct i40e_nvm_access { #define I40E_MODULE_SFF_8472_COMP 0x5E #define I40E_MODULE_SFF_8472_SWAP 0x5C #define I40E_MODULE_SFF_ADDR_MODE 0x04 +#define I40E_MODULE_SFF_DDM_IMPLEMENTED 0x40 #define I40E_MODULE_TYPE_QSFP_PLUS 0x0D #define I40E_MODULE_TYPE_QSFP28 0x11 #define I40E_MODULE_QSFP_MAX_LEN 640 @@ -623,6 +624,7 @@ struct i40e_hw { #define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3) #define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4) #define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5) +#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7) u64 flags; /* Used in set switch config AQ command */ @@ -1316,6 +1318,7 @@ struct i40e_hw_port_stats { #define I40E_SR_VPD_PTR 0x2F #define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E #define I40E_SR_SW_CHECKSUM_WORD 0x3F +#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48 /* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ #define I40E_SR_VPD_MODULE_MAX_SIZE 1024 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 02b09a8ad54c..3d2440838822 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -55,7 +55,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; - if (vf->link_forced) { + + /* Always report link is down if the VF queues aren't enabled */ + if (!vf->queues_enabled) { + pfe.event_data.link_event.link_status = false; + pfe.event_data.link_event.link_speed = 0; + } else if (vf->link_forced) { pfe.event_data.link_event.link_status = vf->link_up; pfe.event_data.link_event.link_speed = (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); @@ -65,6 +70,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) pfe.event_data.link_event.link_speed = i40e_virtchnl_link_speed(ls->link_speed); } + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(pfe), NULL); } @@ -2037,30 +2043,33 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) alluni = true; aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti, alluni); - if (!aq_ret) { - if (allmulti) { + if (aq_ret) + goto err_out; + + if (allmulti) { + if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC, + &vf->vf_states)) dev_info(&pf->pdev->dev, "VF %d successfully set multicast promiscuous mode\n", vf->vf_id); - set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); - } else { - dev_info(&pf->pdev->dev, - "VF %d successfully unset multicast promiscuous mode\n", - vf->vf_id); - clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); - } - if (alluni) { + } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC, + &vf->vf_states)) + dev_info(&pf->pdev->dev, + "VF %d successfully unset multicast promiscuous mode\n", + vf->vf_id); + + if (alluni) { + if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC, + &vf->vf_states)) dev_info(&pf->pdev->dev, "VF %d successfully set unicast promiscuous mode\n", vf->vf_id); - set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); - } else { - dev_info(&pf->pdev->dev, - "VF %d successfully unset unicast promiscuous mode\n", - vf->vf_id); - clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); - } - } + } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC, + &vf->vf_states)) + dev_info(&pf->pdev->dev, + "VF %d successfully unset unicast promiscuous mode\n", + vf->vf_id); + err_out: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, @@ -2153,7 +2162,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) * VF does not know about these additional VSIs and all * it cares is about its own queues. PF configures these queues * to its appropriate VSIs based on TC mapping - **/ + */ if (vf->adq_enabled) { if (idx >= ARRAY_SIZE(vf->ch)) { aq_ret = I40E_ERR_NO_AVAILABLE_VSI; @@ -2364,6 +2373,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) } } + vf->queues_enabled = true; + error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, @@ -2385,6 +2396,9 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; + /* Immediately mark queues as disabled */ + vf->queues_enabled = false; + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -3953,10 +3967,15 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) /* When the VF is resetting wait until it is done. * It can take up to 200 milliseconds, * but wait for up to 300 milliseconds to be safe. + * If the VF is indeed in reset, the vsi pointer has + * to show on the newly loaded vsi under pf->vsi[id]. */ for (i = 0; i < 15; i++) { - if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) + if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { + if (i > 0) + vsi = pf->vsi[vf->lan_vsi_idx]; break; + } msleep(20); } if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { @@ -4244,7 +4263,8 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, if (min_tx_rate) { dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", min_tx_rate, vf_id); - return -EINVAL; + ret = -EINVAL; + goto error; } vf = &pf->vf[vf_id]; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index f65cc0c16550..7164b9bb294f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -99,6 +99,7 @@ struct i40e_vf { unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ bool link_forced; bool link_up; /* only valid if VF link is forced */ + bool queues_enabled; /* true if the VF queues are enabled */ bool spoofchk; u16 num_mac; u16 num_vlan; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 32bad014d76c..b1c3227ae4ab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -116,7 +116,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, return err; /* Kick start the NAPI context so that receiving will start */ - err = i40e_xsk_async_xmit(vsi->netdev, qid); + err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); if (err) return err; } @@ -157,6 +157,11 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) err = i40e_queue_pair_enable(vsi, qid); if (err) return err; + + /* Kick start the NAPI context so that receiving will start */ + err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); + if (err) + return err; } return 0; @@ -190,9 +195,11 @@ int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, **/ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) { + struct xdp_umem *umem = rx_ring->xsk_umem; int err, result = I40E_XDP_PASS; struct i40e_ring *xdp_ring; struct bpf_prog *xdp_prog; + u64 offset; u32 act; rcu_read_lock(); @@ -201,7 +208,10 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) */ xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); - xdp->handle += xdp->data - xdp->data_hard_start; + offset = xdp->data - xdp->data_hard_start; + + xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset); + switch (act) { case XDP_PASS: break; @@ -262,7 +272,7 @@ static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring, bi->addr = xdp_umem_get_data(umem, handle); bi->addr += hr; - bi->handle = handle + umem->headroom; + bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); xsk_umem_discard_addr(umem); return true; @@ -299,7 +309,7 @@ static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring, bi->addr = xdp_umem_get_data(umem, handle); bi->addr += hr; - bi->handle = handle + umem->headroom; + bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); xsk_umem_discard_addr_rq(umem); return true; @@ -420,8 +430,6 @@ static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring, struct i40e_rx_buffer *old_bi) { struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc]; - unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; - u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; u16 nta = rx_ring->next_to_alloc; /* update, and store next to alloc */ @@ -429,14 +437,9 @@ static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring, rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* transfer page from old buffer to new buffer */ - new_bi->dma = old_bi->dma & mask; - new_bi->dma += hr; - - new_bi->addr = (void *)((unsigned long)old_bi->addr & mask); - new_bi->addr += hr; - - new_bi->handle = old_bi->handle & mask; - new_bi->handle += rx_ring->xsk_umem->headroom; + new_bi->dma = old_bi->dma; + new_bi->addr = old_bi->addr; + new_bi->handle = old_bi->handle; old_bi->addr = NULL; } @@ -471,7 +474,8 @@ void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); bi->addr += hr; - bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; + bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle, + rx_ring->xsk_umem->headroom); } /** @@ -626,6 +630,15 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) i40e_finalize_xdp_rx(rx_ring, xdp_xmit); i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); + + if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) { + if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) + xsk_set_rx_need_wakeup(rx_ring->xsk_umem); + else + xsk_clear_rx_need_wakeup(rx_ring->xsk_umem); + + return (int)total_rx_packets; + } return failure ? budget : (int)total_rx_packets; } @@ -681,6 +694,8 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) i40e_xdp_ring_update_tail(xdp_ring); xsk_umem_consume_tx_done(xdp_ring->xsk_umem); + if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) + xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); } return !!budget && work_done; @@ -759,19 +774,27 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); out_xmit: + if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { + if (tx_ring->next_to_clean == tx_ring->next_to_use) + xsk_set_tx_need_wakeup(tx_ring->xsk_umem); + else + xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); + } + xmit_done = i40e_xmit_zc(tx_ring, budget); return work_done && xmit_done; } /** - * i40e_xsk_async_xmit - Implements the ndo_xsk_async_xmit + * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup * @dev: the netdevice * @queue_id: queue id to wake up + * @flags: ignored in our case since we have Rx and Tx in the same NAPI. * * Returns <0 for errors, 0 otherwise. **/ -int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id) +int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index 8cc0a2e7d9a2..9ed59c14eb55 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -18,6 +18,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget); bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring, int napi_budget); -int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id); +int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); #endif /* _I40E_XSK_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 9fc635d816d2..29de3ae96ef2 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -253,7 +253,6 @@ struct iavf_adapter { #define IAVF_FLAG_RESET_PENDING BIT(4) #define IAVF_FLAG_RESET_NEEDED BIT(5) #define IAVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) -#define IAVF_FLAG_ADDR_SET_BY_PF BIT(8) #define IAVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9) #define IAVF_FLAG_CLIENT_NEEDS_OPEN BIT(10) #define IAVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11) diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 9d2b50964a08..8f310e520b06 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -143,28 +143,6 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, } /** - * iavf_debug_d - OS dependent version of debug printing - * @hw: pointer to the HW structure - * @mask: debug level mask - * @fmt_str: printf-type format description - **/ -void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...) -{ - char buf[512]; - va_list argptr; - - if (!(mask & ((struct iavf_hw *)hw)->debug_mask)) - return; - - va_start(argptr, fmt_str); - vsnprintf(buf, sizeof(buf), fmt_str, argptr); - va_end(argptr); - - /* the debug string is already formatted with a newline */ - pr_info("%s", buf); -} - -/** * iavf_schedule_reset - Set the flags and schedule a reset event * @adapter: board private structure **/ @@ -812,9 +790,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p) if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) return 0; - if (adapter->flags & IAVF_FLAG_ADDR_SET_BY_PF) - return -EPERM; - spin_lock_bh(&adapter->mac_vlan_list_lock); f = iavf_find_filter(adapter, hw->mac.addr); @@ -829,7 +804,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p) if (f) { ether_addr_copy(hw->mac.addr, addr->sa_data); - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); } return (f == NULL) ? -ENOMEM : 0; @@ -1833,7 +1807,6 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) eth_hw_addr_random(netdev); ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); } else { - adapter->flags |= IAVF_FLAG_ADDR_SET_BY_PF; ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 0cca1b589b56..7a30d5d5ef53 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -2161,7 +2161,7 @@ static void iavf_create_tx_ctx(struct iavf_ring *tx_ring, **/ bool __iavf_chk_linearize(struct sk_buff *skb) { - const struct skb_frag_struct *frag, *stale; + const skb_frag_t *frag, *stale; int nr_frags, sum; /* no need to check if number of frags is less than 7 */ @@ -2205,7 +2205,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb) * descriptor associated with the fragment. */ if (stale_size > IAVF_MAX_DATA_PER_TXD) { - int align_pad = -(stale->page_offset) & + int align_pad = -(skb_frag_off(stale)) & (IAVF_MAX_READ_REQ_SIZE - 1); sum -= align_pad; @@ -2269,7 +2269,7 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); - struct skb_frag_struct *frag; + skb_frag_t *frag; struct iavf_tx_buffer *tx_bi; struct iavf_tx_desc *tx_desc; u16 i = tx_ring->next_to_use; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index 71e7d090f8db..dd3348f9da9d 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -462,7 +462,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb); **/ static inline int iavf_xmit_descriptor_count(struct sk_buff *skb) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; int count = 0, size = skb_headlen(skb); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index d49d58a6de80..c46770eba320 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1252,6 +1252,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, case VIRTCHNL_OP_ADD_ETH_ADDR: dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", iavf_stat_str(&adapter->hw, v_retval)); + /* restore administratively set MAC address */ + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); break; case VIRTCHNL_OP_DEL_VLAN: dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", @@ -1319,6 +1321,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } } switch (v_opcode) { + case VIRTCHNL_OP_ADD_ETH_ADDR: { + if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr)) + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + } + break; case VIRTCHNL_OP_GET_STATS: { struct iavf_eth_stats *stats = (struct iavf_eth_stats *)msg; diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 2d140ba83781..9edde960b4f2 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -15,6 +15,7 @@ ice-y := ice_main.o \ ice_sched.o \ ice_lib.o \ ice_txrx.o \ + ice_flex_pipe.o \ ice_ethtool.o ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 9ee6b55553c0..45e100666049 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -8,6 +8,7 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/firmware.h> #include <linux/netdevice.h> #include <linux/compiler.h> #include <linux/etherdevice.h> @@ -29,6 +30,7 @@ #include <linux/sctp.h> #include <linux/ipv6.h> #include <linux/if_bridge.h> +#include <linux/ctype.h> #include <linux/avf/virtchnl.h> #include <net/ipv6.h> #include "ice_devids.h" @@ -47,33 +49,16 @@ extern const char ice_drv_ver[]; #define ICE_MIN_NUM_DESC 64 #define ICE_MAX_NUM_DESC 8160 #define ICE_DFLT_MIN_RX_DESC 512 -/* if the default number of Rx descriptors between ICE_MAX_NUM_DESC and the - * number of descriptors to fill up an entire page is greater than or equal to - * ICE_DFLT_MIN_RX_DESC set it based on page size, otherwise set it to - * ICE_DFLT_MIN_RX_DESC - */ -#define ICE_DFLT_NUM_RX_DESC \ - min_t(u16, ICE_MAX_NUM_DESC, \ - max_t(u16, ALIGN(PAGE_SIZE / sizeof(union ice_32byte_rx_desc), \ - ICE_REQ_DESC_MULTIPLE), \ - ICE_DFLT_MIN_RX_DESC)) -/* set default number of Tx descriptors to the minimum between ICE_MAX_NUM_DESC - * and the number of descriptors to fill up an entire page - */ -#define ICE_DFLT_NUM_TX_DESC min_t(u16, ICE_MAX_NUM_DESC, \ - ALIGN(PAGE_SIZE / \ - sizeof(struct ice_tx_desc), \ - ICE_REQ_DESC_MULTIPLE)) +#define ICE_DFLT_NUM_TX_DESC 256 +#define ICE_DFLT_NUM_RX_DESC 2048 #define ICE_DFLT_TRAFFIC_CLASS BIT(0) #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) -#define ICE_ETHTOOL_FWVER_LEN 32 #define ICE_AQ_LEN 64 -#define ICE_MBXQ_LEN 64 +#define ICE_MBXSQ_LEN 64 +#define ICE_MBXRQ_LEN 512 #define ICE_MIN_MSIX 2 #define ICE_NO_VSI 0xffff -#define ICE_MAX_TXQS 2048 -#define ICE_MAX_RXQS 2048 #define ICE_VSI_MAP_CONTIG 0 #define ICE_VSI_MAP_SCATTER 1 #define ICE_MAX_SCATTER_TXQS 16 @@ -86,16 +71,6 @@ extern const char ice_drv_ver[]; #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) #define ICE_INVAL_Q_INDEX 0xffff #define ICE_INVAL_VFID 256 -#define ICE_MAX_VF_COUNT 256 -#define ICE_MAX_QS_PER_VF 256 -#define ICE_MIN_QS_PER_VF 1 -#define ICE_DFLT_QS_PER_VF 4 -#define ICE_NONQ_VECS_VF 1 -#define ICE_MAX_SCATTER_QS_PER_VF 16 -#define ICE_MAX_BASE_QS_PER_VF 16 -#define ICE_MAX_INTR_PER_VF 65 -#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) -#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) #define ICE_MAX_RESET_WAIT 20 @@ -220,6 +195,7 @@ enum ice_state { __ICE_CFG_BUSY, __ICE_SERVICE_SCHED, __ICE_SERVICE_DIS, + __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ __ICE_STATE_NBITS /* must be last */ }; @@ -257,9 +233,6 @@ struct ice_vsi { u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ - /* Interrupt thresholds */ - u16 work_lmt; - s16 vf_id; /* VF ID for SR-IOV VSIs */ u16 ethtype; /* Ethernet protocol for pause frame */ @@ -292,8 +265,8 @@ struct ice_vsi { /* queue information */ u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ - u16 txq_map[ICE_MAX_TXQS]; /* index in pf->avail_txqs */ - u16 rxq_map[ICE_MAX_RXQS]; /* index in pf->avail_rxqs */ + u16 *txq_map; /* index in pf->avail_txqs */ + u16 *rxq_map; /* index in pf->avail_rxqs */ u16 alloc_txq; /* Allocated Tx queues */ u16 num_txq; /* Used Tx queues */ u16 alloc_rxq; /* Allocated Rx queues */ @@ -329,15 +302,16 @@ struct ice_q_vector { } ____cacheline_internodealigned_in_smp; enum ice_pf_flags { - ICE_FLAG_MSIX_ENA, ICE_FLAG_FLTR_SYNC, ICE_FLAG_RSS_ENA, ICE_FLAG_SRIOV_ENA, ICE_FLAG_SRIOV_CAPABLE, ICE_FLAG_DCB_CAPABLE, ICE_FLAG_DCB_ENA, + ICE_FLAG_ADV_FEATURES, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, - ICE_FLAG_ENABLE_FW_LLDP, + ICE_FLAG_NO_MEDIA, + ICE_FLAG_FW_LLDP_AGENT, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_PF_FLAGS_NBITS /* must be last */ }; @@ -363,9 +337,9 @@ struct ice_pf { u16 num_vf_qps; /* num queue pairs per VF */ u16 num_vf_msix; /* num vectors per VF */ DECLARE_BITMAP(state, __ICE_STATE_NBITS); - DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS); - DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); + unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ + unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */ unsigned long serv_tmr_period; unsigned long serv_tmr_prev; struct timer_list serv_tmr; @@ -376,11 +350,11 @@ struct ice_pf { u32 hw_csum_rx_error; u32 oicr_idx; /* Other interrupt cause MSIX vector index */ u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ + u16 max_pf_txqs; /* Total Tx queues PF wide */ + u16 max_pf_rxqs; /* Total Rx queues PF wide */ u32 num_lan_msix; /* Total MSIX vectors for base driver */ u16 num_lan_tx; /* num LAN Tx queues setup */ u16 num_lan_rx; /* num LAN Rx queues setup */ - u16 q_left_tx; /* remaining num Tx queues left unclaimed */ - u16 q_left_rx; /* remaining num Rx queues left unclaimed */ u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ u16 num_alloc_vsi; u16 corer_count; /* Core reset count */ @@ -433,21 +407,26 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, } /** - * ice_find_vsi_by_type - Find and return VSI of a given type - * @pf: PF to search for VSI - * @type: Value indicating type of VSI we are looking for + * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev + * @netdev: pointer to the netdev struct */ -static inline struct ice_vsi * -ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type) +static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) { - int i; + struct ice_netdev_priv *np = netdev_priv(netdev); - for (i = 0; i < pf->num_alloc_vsi; i++) { - struct ice_vsi *vsi = pf->vsi[i]; + return np->vsi->back; +} - if (vsi && vsi->type == type) - return vsi; - } +/** + * ice_get_main_vsi - Get the PF VSI + * @pf: PF instance + * + * returns pf->vsi[0], which by definition is the PF VSI + */ +static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) +{ + if (pf->vsi) + return pf->vsi[0]; return NULL; } @@ -455,6 +434,11 @@ ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type) int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); void ice_set_ethtool_ops(struct net_device *netdev); +void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); +u16 ice_get_avail_txq_count(struct ice_pf *pf); +u16 ice_get_avail_rxq_count(struct ice_pf *pf); +void ice_update_vsi_stats(struct ice_vsi *vsi); +void ice_update_pf_stats(struct ice_pf *pf); int ice_up(struct ice_vsi *vsi); int ice_down(struct ice_vsi *vsi); int ice_vsi_cfg(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 765e3c2ed045..023e3d2fee5f 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -33,11 +33,22 @@ struct ice_aqc_get_ver { u8 api_patch; }; +/* Send driver version (indirect 0x0002) */ +struct ice_aqc_driver_ver { + u8 major_ver; + u8 minor_ver; + u8 build_ver; + u8 subbuild_ver; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + /* Queue Shutdown (direct 0x0003) */ struct ice_aqc_q_shutdown { - __le32 driver_unloading; + u8 driver_unloading; #define ICE_AQC_DRIVER_UNLOADING BIT(0) - u8 reserved[12]; + u8 reserved[15]; }; /* Request resource ownership (direct 0x0008) @@ -91,6 +102,7 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_SRIOV 0x0012 #define ICE_AQC_CAPS_VF 0x0013 #define ICE_AQC_CAPS_VSI 0x0017 +#define ICE_AQC_CAPS_DCB 0x0018 #define ICE_AQC_CAPS_RSS 0x0040 #define ICE_AQC_CAPS_RXQS 0x0041 #define ICE_AQC_CAPS_TXQS 0x0042 @@ -1518,6 +1530,56 @@ struct ice_aqc_get_clear_fw_log { __le32 addr_low; }; +/* Download Package (indirect 0x0C40) */ +/* Also used for Update Package (indirect 0x0C42) */ +struct ice_aqc_download_pkg { + u8 flags; +#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01 + u8 reserved[3]; + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_download_pkg_resp { + __le32 error_offset; + __le32 error_info; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get Package Info List (indirect 0x0C43) */ +struct ice_aqc_get_pkg_info_list { + __le32 reserved1; + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; + +/* Version format for packages */ +struct ice_pkg_ver { + u8 major; + u8 minor; + u8 update; + u8 draft; +}; + +#define ICE_PKG_NAME_SIZE 32 + +struct ice_aqc_get_pkg_info { + struct ice_pkg_ver ver; + char name[ICE_PKG_NAME_SIZE]; + u8 is_in_nvm; + u8 is_active; + u8 is_active_at_boot; + u8 is_modified; +}; + +/* Get Package Info List response buffer format (0x0C43) */ +struct ice_aqc_get_pkg_info_resp { + __le32 count; + struct ice_aqc_get_pkg_info pkg_info[1]; +}; /** * struct ice_aq_desc - Admin Queue (AQ) descriptor * @flags: ICE_AQ_FLAG_* flags @@ -1546,6 +1608,7 @@ struct ice_aq_desc { u8 raw[16]; struct ice_aqc_generic generic; struct ice_aqc_get_ver get_ver; + struct ice_aqc_driver_ver driver_ver; struct ice_aqc_q_shutdown q_shutdown; struct ice_aqc_req_res res_owner; struct ice_aqc_manage_mac_read mac_read; @@ -1579,6 +1642,7 @@ struct ice_aq_desc { struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; struct ice_aqc_fw_logging fw_logging; struct ice_aqc_get_clear_fw_log get_clear_fw_log; + struct ice_aqc_download_pkg download_pkg; struct ice_aqc_set_mac_lb set_mac_lb; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; struct ice_aqc_set_event_mask set_event_mask; @@ -1610,12 +1674,19 @@ enum ice_aq_err { ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */ ICE_AQ_RC_EEXIST = 13, /* Object already exists */ ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */ + ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */ + ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */ + ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */ + ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */ + ICE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */ + ICE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */ }; /* Admin Queue command opcodes */ enum ice_adminq_opc { /* AQ commands */ ice_aqc_opc_get_ver = 0x0001, + ice_aqc_opc_driver_ver = 0x0002, ice_aqc_opc_q_shutdown = 0x0003, /* resource ownership */ @@ -1697,6 +1768,10 @@ enum ice_adminq_opc { ice_aqc_opc_add_txqs = 0x0C30, ice_aqc_opc_dis_txqs = 0x0C31, + /* package commands */ + ice_aqc_opc_download_pkg = 0x0C40, + ice_aqc_opc_get_pkg_info_list = 0x0C43, + /* debug commands */ ice_aqc_opc_fw_logging = 0xFF09, ice_aqc_opc_fw_logging_info = 0xFF10, diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 2e0731c1e1a3..3a6b3950eb0e 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -263,21 +263,23 @@ enum ice_status ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd) { - struct ice_link_status *hw_link_info_old, *hw_link_info; struct ice_aqc_get_link_status_data link_data = { 0 }; struct ice_aqc_get_link_status *resp; + struct ice_link_status *li_old, *li; enum ice_media_type *hw_media_type; struct ice_fc_info *hw_fc_info; bool tx_pause, rx_pause; struct ice_aq_desc desc; enum ice_status status; + struct ice_hw *hw; u16 cmd_flags; if (!pi) return ICE_ERR_PARAM; - hw_link_info_old = &pi->phy.link_info_old; + hw = pi->hw; + li_old = &pi->phy.link_info_old; hw_media_type = &pi->phy.media_type; - hw_link_info = &pi->phy.link_info; + li = &pi->phy.link_info; hw_fc_info = &pi->fc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); @@ -286,27 +288,27 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, resp->cmd_flags = cpu_to_le16(cmd_flags); resp->lport_num = pi->lport; - status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data), - cd); + status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); if (status) return status; /* save off old link status information */ - *hw_link_info_old = *hw_link_info; + *li_old = *li; /* update current link status information */ - hw_link_info->link_speed = le16_to_cpu(link_data.link_speed); - hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low); - hw_link_info->phy_type_high = le64_to_cpu(link_data.phy_type_high); + li->link_speed = le16_to_cpu(link_data.link_speed); + li->phy_type_low = le64_to_cpu(link_data.phy_type_low); + li->phy_type_high = le64_to_cpu(link_data.phy_type_high); *hw_media_type = ice_get_media_type(pi); - hw_link_info->link_info = link_data.link_info; - hw_link_info->an_info = link_data.an_info; - hw_link_info->ext_info = link_data.ext_info; - hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size); - hw_link_info->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; - hw_link_info->topo_media_conflict = link_data.topo_media_conflict; - hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M; + li->link_info = link_data.link_info; + li->an_info = link_data.an_info; + li->ext_info = link_data.ext_info; + li->max_frame_size = le16_to_cpu(link_data.max_frame_size); + li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; + li->topo_media_conflict = link_data.topo_media_conflict; + li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | + ICE_AQ_CFG_PACING_TYPE_M); /* update fc info */ tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); @@ -320,12 +322,24 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, else hw_fc_info->current_mode = ICE_FC_NONE; - hw_link_info->lse_ena = - !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); + li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); + + ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed); + ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n", + (unsigned long long)li->phy_type_low); + ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n", + (unsigned long long)li->phy_type_high); + ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type); + ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info); + ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info); + ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info); + ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena); + ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size); + ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing); /* save link status information */ if (link) - *link = *hw_link_info; + *link = *li; /* flag cleared so calling functions don't call AQ again */ pi->phy.get_link_info = false; @@ -715,6 +729,29 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw) } /** + * ice_get_nvm_version - get cached NVM version data + * @hw: pointer to the hardware structure + * @oem_ver: 8 bit NVM version + * @oem_build: 16 bit NVM build number + * @oem_patch: 8 NVM patch number + * @ver_hi: high 16 bits of the NVM version + * @ver_lo: low 16 bits of the NVM version + */ +void +ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build, + u8 *oem_patch, u8 *ver_hi, u8 *ver_lo) +{ + struct ice_nvm_info *nvm = &hw->nvm; + + *oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT); + *oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK); + *oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >> + ICE_OEM_VER_BUILD_SHIFT); + *ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; + *ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; +} + +/** * ice_init_hw - main hardware initialization routine * @hw: pointer to the hardware structure */ @@ -740,7 +777,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ice_get_itr_intrl_gran(hw); - status = ice_init_all_ctrlq(hw); + status = ice_create_all_ctrlq(hw); if (status) goto err_unroll_cqinit; @@ -845,7 +882,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC); ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2); - + status = ice_init_hw_tbls(hw); + if (status) + goto err_unroll_fltr_mgmt_struct; return 0; err_unroll_fltr_mgmt_struct: @@ -855,7 +894,7 @@ err_unroll_sched: err_unroll_alloc: devm_kfree(ice_hw_to_dev(hw), hw->port_info); err_unroll_cqinit: - ice_shutdown_all_ctrlq(hw); + ice_destroy_all_ctrlq(hw); return status; } @@ -873,6 +912,8 @@ void ice_deinit_hw(struct ice_hw *hw) ice_sched_cleanup_all(hw); ice_sched_clear_agg(hw); + ice_free_seg(hw); + ice_free_hw_tbls(hw); if (hw->port_info) { devm_kfree(ice_hw_to_dev(hw), hw->port_info); @@ -881,7 +922,7 @@ void ice_deinit_hw(struct ice_hw *hw) /* Attempt to disable FW logging before shutting down control queues */ ice_cfg_fw_log(hw, false); - ice_shutdown_all_ctrlq(hw); + ice_destroy_all_ctrlq(hw); /* Clear VSI contexts if not already cleared */ ice_clear_all_vsi_ctx(hw); @@ -1078,6 +1119,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), + ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), { 0 } }; @@ -1088,7 +1130,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { * @rxq_index: the index of the Rx queue * * Converts rxq context from sparse to dense structure and then writes - * it to HW register space + * it to HW register space and enables the hardware to prefetch descriptors + * instead of only fetching them on demand */ enum ice_status ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, @@ -1096,6 +1139,11 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, { u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; + if (!rlan_ctx) + return ICE_ERR_BAD_PTR; + + rlan_ctx->prefena = 1; + ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); } @@ -1111,6 +1159,7 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), + ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), @@ -1129,7 +1178,7 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), - ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171), + ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), { 0 } }; @@ -1185,6 +1234,12 @@ ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf, /* FW Admin Queue command wrappers */ +/* Software lock/mutex that is meant to be held while the Global Config Lock + * in firmware is acquired by the software to prevent most (but not all) types + * of AQ commands from being sent to FW + */ +DEFINE_MUTEX(ice_global_cfg_lock_sw); + /** * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue * @hw: pointer to the HW struct @@ -1199,7 +1254,38 @@ enum ice_status ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { - return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); + struct ice_aqc_req_res *cmd = &desc->params.res_owner; + bool lock_acquired = false; + enum ice_status status; + + /* When a package download is in process (i.e. when the firmware's + * Global Configuration Lock resource is held), only the Download + * Package, Get Version, Get Package Info List and Release Resource + * (with resource ID set to Global Config Lock) AdminQ commands are + * allowed; all others must block until the package download completes + * and the Global Config Lock is released. See also + * ice_acquire_global_cfg_lock(). + */ + switch (le16_to_cpu(desc->opcode)) { + case ice_aqc_opc_download_pkg: + case ice_aqc_opc_get_pkg_info_list: + case ice_aqc_opc_get_ver: + break; + case ice_aqc_opc_release_res: + if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) + break; + /* fall-through */ + default: + mutex_lock(&ice_global_cfg_lock_sw); + lock_acquired = true; + break; + } + + status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); + if (lock_acquired) + mutex_unlock(&ice_global_cfg_lock_sw); + + return status; } /** @@ -1237,6 +1323,43 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) } /** + * ice_aq_send_driver_ver + * @hw: pointer to the HW struct + * @dv: driver's major, minor version + * @cd: pointer to command details structure or NULL + * + * Send the driver version (0x0002) to the firmware + */ +enum ice_status +ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, + struct ice_sq_cd *cd) +{ + struct ice_aqc_driver_ver *cmd; + struct ice_aq_desc desc; + u16 len; + + cmd = &desc.params.driver_ver; + + if (!dv) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); + + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + cmd->major_ver = dv->major_ver; + cmd->minor_ver = dv->minor_ver; + cmd->build_ver = dv->build_ver; + cmd->subbuild_ver = dv->subbuild_ver; + + len = 0; + while (len < sizeof(dv->driver_string) && + isascii(dv->driver_string[len]) && dv->driver_string[len]) + len++; + + return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); +} + +/** * ice_aq_q_shutdown * @hw: pointer to the HW struct * @unloading: is the driver unloading itself @@ -1254,7 +1377,7 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); if (unloading) - cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING); + cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } @@ -1529,29 +1652,29 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, case ICE_AQC_CAPS_VALID_FUNCTIONS: caps->valid_functions = number; ice_debug(hw, ICE_DBG_INIT, - "%s: valid functions = %d\n", prefix, + "%s: valid_functions (bitmap) = %d\n", prefix, caps->valid_functions); break; case ICE_AQC_CAPS_SRIOV: caps->sr_iov_1_1 = (number == 1); ice_debug(hw, ICE_DBG_INIT, - "%s: SR-IOV = %d\n", prefix, + "%s: sr_iov_1_1 = %d\n", prefix, caps->sr_iov_1_1); break; case ICE_AQC_CAPS_VF: if (dev_p) { dev_p->num_vfs_exposed = number; ice_debug(hw, ICE_DBG_INIT, - "%s: VFs exposed = %d\n", prefix, + "%s: num_vfs_exposed = %d\n", prefix, dev_p->num_vfs_exposed); } else if (func_p) { func_p->num_allocd_vfs = number; func_p->vf_base_id = logical_id; ice_debug(hw, ICE_DBG_INIT, - "%s: VFs allocated = %d\n", prefix, + "%s: num_allocd_vfs = %d\n", prefix, func_p->num_allocd_vfs); ice_debug(hw, ICE_DBG_INIT, - "%s: VF base_id = %d\n", prefix, + "%s: vf_base_id = %d\n", prefix, func_p->vf_base_id); } break; @@ -1559,63 +1682,75 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, if (dev_p) { dev_p->num_vsi_allocd_to_host = number; ice_debug(hw, ICE_DBG_INIT, - "%s: num VSI alloc to host = %d\n", + "%s: num_vsi_allocd_to_host = %d\n", prefix, dev_p->num_vsi_allocd_to_host); } else if (func_p) { func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); ice_debug(hw, ICE_DBG_INIT, - "%s: num guaranteed VSI (fw) = %d\n", + "%s: guar_num_vsi (fw) = %d\n", prefix, number); ice_debug(hw, ICE_DBG_INIT, - "%s: num guaranteed VSI = %d\n", + "%s: guar_num_vsi = %d\n", prefix, func_p->guar_num_vsi); } break; + case ICE_AQC_CAPS_DCB: + caps->dcb = (number == 1); + caps->active_tc_bitmap = logical_id; + caps->maxtc = phys_id; + ice_debug(hw, ICE_DBG_INIT, + "%s: dcb = %d\n", prefix, caps->dcb); + ice_debug(hw, ICE_DBG_INIT, + "%s: active_tc_bitmap = %d\n", prefix, + caps->active_tc_bitmap); + ice_debug(hw, ICE_DBG_INIT, + "%s: maxtc = %d\n", prefix, caps->maxtc); + break; case ICE_AQC_CAPS_RSS: caps->rss_table_size = number; caps->rss_table_entry_width = logical_id; ice_debug(hw, ICE_DBG_INIT, - "%s: RSS table size = %d\n", prefix, + "%s: rss_table_size = %d\n", prefix, caps->rss_table_size); ice_debug(hw, ICE_DBG_INIT, - "%s: RSS table width = %d\n", prefix, + "%s: rss_table_entry_width = %d\n", prefix, caps->rss_table_entry_width); break; case ICE_AQC_CAPS_RXQS: caps->num_rxq = number; caps->rxq_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "%s: num Rx queues = %d\n", prefix, + "%s: num_rxq = %d\n", prefix, caps->num_rxq); ice_debug(hw, ICE_DBG_INIT, - "%s: Rx first queue ID = %d\n", prefix, + "%s: rxq_first_id = %d\n", prefix, caps->rxq_first_id); break; case ICE_AQC_CAPS_TXQS: caps->num_txq = number; caps->txq_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "%s: num Tx queues = %d\n", prefix, + "%s: num_txq = %d\n", prefix, caps->num_txq); ice_debug(hw, ICE_DBG_INIT, - "%s: Tx first queue ID = %d\n", prefix, + "%s: txq_first_id = %d\n", prefix, caps->txq_first_id); break; case ICE_AQC_CAPS_MSIX: caps->num_msix_vectors = number; caps->msix_vector_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "%s: MSIX vector count = %d\n", prefix, + "%s: num_msix_vectors = %d\n", prefix, caps->num_msix_vectors); ice_debug(hw, ICE_DBG_INIT, - "%s: MSIX first vector index = %d\n", prefix, + "%s: msix_vector_first_id = %d\n", prefix, caps->msix_vector_first_id); break; case ICE_AQC_CAPS_MAX_MTU: caps->max_mtu = number; - ice_debug(hw, ICE_DBG_INIT, "%s: max MTU = %d\n", + ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", prefix, caps->max_mtu); break; default: @@ -1712,6 +1847,75 @@ ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc) } /** + * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode + * @hw: pointer to the hardware structure + */ +void ice_set_safe_mode_caps(struct ice_hw *hw) +{ + struct ice_hw_func_caps *func_caps = &hw->func_caps; + struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; + u32 valid_func, rxq_first_id, txq_first_id; + u32 msix_vector_first_id, max_mtu; + u32 num_func = 0; + u8 i; + + /* cache some func_caps values that should be restored after memset */ + valid_func = func_caps->common_cap.valid_functions; + txq_first_id = func_caps->common_cap.txq_first_id; + rxq_first_id = func_caps->common_cap.rxq_first_id; + msix_vector_first_id = func_caps->common_cap.msix_vector_first_id; + max_mtu = func_caps->common_cap.max_mtu; + + /* unset func capabilities */ + memset(func_caps, 0, sizeof(*func_caps)); + + /* restore cached values */ + func_caps->common_cap.valid_functions = valid_func; + func_caps->common_cap.txq_first_id = txq_first_id; + func_caps->common_cap.rxq_first_id = rxq_first_id; + func_caps->common_cap.msix_vector_first_id = msix_vector_first_id; + func_caps->common_cap.max_mtu = max_mtu; + + /* one Tx and one Rx queue in safe mode */ + func_caps->common_cap.num_rxq = 1; + func_caps->common_cap.num_txq = 1; + + /* two MSIX vectors, one for traffic and one for misc causes */ + func_caps->common_cap.num_msix_vectors = 2; + func_caps->guar_num_vsi = 1; + + /* cache some dev_caps values that should be restored after memset */ + valid_func = dev_caps->common_cap.valid_functions; + txq_first_id = dev_caps->common_cap.txq_first_id; + rxq_first_id = dev_caps->common_cap.rxq_first_id; + msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id; + max_mtu = dev_caps->common_cap.max_mtu; + + /* unset dev capabilities */ + memset(dev_caps, 0, sizeof(*dev_caps)); + + /* restore cached values */ + dev_caps->common_cap.valid_functions = valid_func; + dev_caps->common_cap.txq_first_id = txq_first_id; + dev_caps->common_cap.rxq_first_id = rxq_first_id; + dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id; + dev_caps->common_cap.max_mtu = max_mtu; + + /* valid_func is a bitmap. get number of functions */ +#define ICE_MAX_FUNCS 8 + for (i = 0; i < ICE_MAX_FUNCS; i++) + if (valid_func & BIT(i)) + num_func++; + + /* one Tx and one Rx queue per function in safe mode */ + dev_caps->common_cap.num_rxq = num_func; + dev_caps->common_cap.num_txq = num_func; + + /* two MSIX vectors per function */ + dev_caps->common_cap.num_msix_vectors = 2 * num_func; +} + +/** * ice_get_caps - get info about the HW * @hw: pointer to the hardware structure */ @@ -1993,6 +2197,17 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, desc.params.set_phy.lport_num = lport; desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n", + (unsigned long long)le64_to_cpu(cfg->phy_type_low)); + ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n", + (unsigned long long)le64_to_cpu(cfg->phy_type_high)); + ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps); + ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n", + cfg->low_power_ctrl); + ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap); + ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value); + ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt); + return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); } @@ -2024,7 +2239,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi) if (!pcaps) return ICE_ERR_NO_MEMORY; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); if (!status) memcpy(li->module_type, &pcaps->module_type, @@ -2174,27 +2389,24 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) { switch (fec) { case ICE_FEC_BASER: - /* Clear auto FEC and RS bits, and AND BASE-R ability + /* Clear RS bits, and AND BASE-R ability * bits and OR request bits. */ - cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | ICE_AQC_PHY_FEC_25G_KR_REQ; break; case ICE_FEC_RS: - /* Clear auto FEC and BASE-R bits, and AND RS ability + /* Clear BASE-R bits, and AND RS ability * bits and OR request bits. */ - cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | ICE_AQC_PHY_FEC_25G_RS_544_REQ; break; case ICE_FEC_NONE: - /* Clear auto FEC and all FEC option bits. */ - cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; + /* Clear all FEC option bits. */ cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; break; case ICE_FEC_AUTO: @@ -3240,40 +3452,44 @@ void ice_replay_post(struct ice_hw *hw) /** * ice_stat_update40 - read 40 bit stat from the chip and update stat values * @hw: ptr to the hardware info - * @hireg: high 32 bit HW register to read from - * @loreg: low 32 bit HW register to read from + * @reg: offset of 64 bit HW register to read from * @prev_stat_loaded: bool to specify if previous stats are loaded * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value */ void -ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, - bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) +ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat) { - u64 new_data; - - new_data = rd32(hw, loreg); - new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; + u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); /* device stats are not reset at PFR, they likely will not be zeroed - * when the driver starts. So save the first values read and use them as - * offsets to be subtracted from the raw values in order to report stats - * that count from zero. + * when the driver starts. Thus, save the value from the first read + * without adding to the statistic value so that we report stats which + * count up from zero. */ - if (!prev_stat_loaded) + if (!prev_stat_loaded) { *prev_stat = new_data; + return; + } + + /* Calculate the difference between the new and old values, and then + * add it to the software stat value. + */ if (new_data >= *prev_stat) - *cur_stat = new_data - *prev_stat; + *cur_stat += new_data - *prev_stat; else /* to manage the potential roll-over */ - *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat; - *cur_stat &= 0xFFFFFFFFFFULL; + *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; + + /* Update the previously stored value to prepare for next read */ + *prev_stat = new_data; } /** * ice_stat_update32 - read 32 bit stat from the chip and update stat values * @hw: ptr to the hardware info - * @reg: HW register to read from + * @reg: offset of HW register to read from * @prev_stat_loaded: bool to specify if previous stats are loaded * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value @@ -3287,17 +3503,26 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, new_data = rd32(hw, reg); /* device stats are not reset at PFR, they likely will not be zeroed - * when the driver starts. So save the first values read and use them as - * offsets to be subtracted from the raw values in order to report stats - * that count from zero. + * when the driver starts. Thus, save the value from the first read + * without adding to the statistic value so that we report stats which + * count up from zero. */ - if (!prev_stat_loaded) + if (!prev_stat_loaded) { *prev_stat = new_data; + return; + } + + /* Calculate the difference between the new and old values, and then + * add it to the software stat value. + */ if (new_data >= *prev_stat) - *cur_stat = new_data - *prev_stat; + *cur_stat += new_data - *prev_stat; else /* to manage the potential roll-over */ - *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; + *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; + + /* Update the previously stored value to prepare for next read */ + *prev_stat = new_data; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index d1f8353fe6bb..c3df92f57777 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -6,6 +6,7 @@ #include "ice.h" #include "ice_type.h" +#include "ice_flex_pipe.h" #include "ice_switch.h" #include <linux/avf/virtchnl.h> @@ -17,8 +18,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); enum ice_status ice_check_reset(struct ice_hw *hw); enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); +enum ice_status ice_create_all_ctrlq(struct ice_hw *hw); enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); void ice_shutdown_all_ctrlq(struct ice_hw *hw); +void ice_destroy_all_ctrlq(struct ice_hw *hw); enum ice_status ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending); @@ -39,6 +42,8 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, void ice_clear_pxe_mode(struct ice_hw *hw); enum ice_status ice_get_caps(struct ice_hw *hw); +void ice_set_safe_mode_caps(struct ice_hw *hw); + void ice_dev_onetime_setup(struct ice_hw *hw); enum ice_status @@ -64,12 +69,18 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); extern const struct ice_ctx_ele ice_tlan_ctx_info[]; enum ice_status ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info); + +extern struct mutex ice_global_cfg_lock_sw; + enum ice_status ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); enum ice_status +ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, + struct ice_sq_cd *cd); +enum ice_status ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *caps, struct ice_sq_cd *cd); @@ -123,11 +134,14 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); void -ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, - bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); +ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat); void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); +void +ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build, + u8 *oem_patch, u8 *ver_hi, u8 *ver_lo); enum ice_status ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_get_elem *buf); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index e91ac4df0242..2353166c654e 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -310,7 +310,7 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @cq: pointer to the specific Control queue * * This is the main initialization routine for the Control Send Queue - * Prior to calling this function, drivers *MUST* set the following fields + * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_sq_entries * - cq->sq_buf_size @@ -369,7 +369,7 @@ init_ctrlq_exit: * @cq: pointer to the specific Control queue * * The main initialization routine for the Admin Receive (Event) Queue. - * Prior to calling this function, drivers *MUST* set the following fields + * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_rq_entries * - cq->rq_buf_size @@ -569,14 +569,8 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) return 0; init_ctrlq_free_rq: - if (cq->rq.count) { - ice_shutdown_rq(hw, cq); - mutex_destroy(&cq->rq_lock); - } - if (cq->sq.count) { - ice_shutdown_sq(hw, cq); - mutex_destroy(&cq->sq_lock); - } + ice_shutdown_rq(hw, cq); + ice_shutdown_sq(hw, cq); return status; } @@ -585,12 +579,14 @@ init_ctrlq_free_rq: * @hw: pointer to the hardware structure * @q_type: specific Control queue type * - * Prior to calling this function, drivers *MUST* set the following fields + * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_sq_entries * - cq->num_rq_entries * - cq->rq_buf_size * - cq->sq_buf_size + * + * NOTE: this function does not initialize the controlq locks */ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { @@ -616,8 +612,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) !cq->rq_buf_size || !cq->sq_buf_size) { return ICE_ERR_CFG; } - mutex_init(&cq->sq_lock); - mutex_init(&cq->rq_lock); /* setup SQ command write back timeout */ cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; @@ -625,7 +619,7 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) /* allocate the ATQ */ ret_code = ice_init_sq(hw, cq); if (ret_code) - goto init_ctrlq_destroy_locks; + return ret_code; /* allocate the ARQ */ ret_code = ice_init_rq(hw, cq); @@ -637,9 +631,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) init_ctrlq_free_sq: ice_shutdown_sq(hw, cq); -init_ctrlq_destroy_locks: - mutex_destroy(&cq->sq_lock); - mutex_destroy(&cq->rq_lock); return ret_code; } @@ -647,12 +638,14 @@ init_ctrlq_destroy_locks: * ice_init_all_ctrlq - main initialization routine for all control queues * @hw: pointer to the hardware structure * - * Prior to calling this function, drivers *MUST* set the following fields + * Prior to calling this function, the driver MUST* set the following fields * in the cq->structure for all control queues: * - cq->num_sq_entries * - cq->num_rq_entries * - cq->rq_buf_size * - cq->sq_buf_size + * + * NOTE: this function does not initialize the controlq locks. */ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) { @@ -672,9 +665,47 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) } /** + * ice_init_ctrlq_locks - Initialize locks for a control queue + * @cq: pointer to the control queue + * + * Initializes the send and receive queue locks for a given control queue. + */ +static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) +{ + mutex_init(&cq->sq_lock); + mutex_init(&cq->rq_lock); +} + +/** + * ice_create_all_ctrlq - main initialization routine for all control queues + * @hw: pointer to the hardware structure + * + * Prior to calling this function, the driver *MUST* set the following fields + * in the cq->structure for all control queues: + * - cq->num_sq_entries + * - cq->num_rq_entries + * - cq->rq_buf_size + * - cq->sq_buf_size + * + * This function creates all the control queue locks and then calls + * ice_init_all_ctrlq. It should be called once during driver load. If the + * driver needs to re-initialize control queues at run time it should call + * ice_init_all_ctrlq instead. + */ +enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) +{ + ice_init_ctrlq_locks(&hw->adminq); + ice_init_ctrlq_locks(&hw->mailboxq); + + return ice_init_all_ctrlq(hw); +} + +/** * ice_shutdown_ctrlq - shutdown routine for any control queue * @hw: pointer to the hardware structure * @q_type: specific Control queue type + * + * NOTE: this function does not destroy the control queue locks. */ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { @@ -693,19 +724,17 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) return; } - if (cq->sq.count) { - ice_shutdown_sq(hw, cq); - mutex_destroy(&cq->sq_lock); - } - if (cq->rq.count) { - ice_shutdown_rq(hw, cq); - mutex_destroy(&cq->rq_lock); - } + ice_shutdown_sq(hw, cq); + ice_shutdown_rq(hw, cq); } /** * ice_shutdown_all_ctrlq - shutdown routine for all control queues * @hw: pointer to the hardware structure + * + * NOTE: this function does not destroy the control queue locks. The driver + * may call this at runtime to shutdown and later restart control queues, such + * as in response to a reset event. */ void ice_shutdown_all_ctrlq(struct ice_hw *hw) { @@ -716,6 +745,37 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw) } /** + * ice_destroy_ctrlq_locks - Destroy locks for a control queue + * @cq: pointer to the control queue + * + * Destroys the send and receive queue locks for a given control queue. + */ +static void +ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) +{ + mutex_destroy(&cq->sq_lock); + mutex_destroy(&cq->rq_lock); +} + +/** + * ice_destroy_all_ctrlq - exit routine for all control queues + * @hw: pointer to the hardware structure + * + * This function shuts down all the control queues and then destroys the + * control queue locks. It should be called once during driver unload. The + * driver should call ice_shutdown_all_ctrlq if it needs to shut down and + * reinitialize control queues, such as in response to a reset event. + */ +void ice_destroy_all_ctrlq(struct ice_hw *hw) +{ + /* shut down all the control queues first */ + ice_shutdown_all_ctrlq(hw); + + ice_destroy_ctrlq_locks(&hw->adminq); + ice_destroy_ctrlq_locks(&hw->mailboxq); +} + +/** * ice_clean_sq - cleans Admin send queue (ATQ) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index c2002ded65f6..dd7efff121bd 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -60,7 +60,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, * Enable or Disable posting of an event on ARQ when LLDP MIB * associated with the interface changes (0x0A01) */ -enum ice_status +static enum ice_status ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, struct ice_sq_cd *cd) { @@ -444,9 +444,15 @@ ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv, * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7| * --------------------------------- */ - ice_for_each_traffic_class(i) + ice_for_each_traffic_class(i) { etscfg->tcbwtable[i] = buf[offset++]; + if (etscfg->prio_table[i] == ICE_CEE_PGID_STRICT) + dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_STRICT; + else + dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS; + } + /* Number of TCs supported (1 octet) */ etscfg->maxtcs = buf[offset]; } @@ -937,10 +943,11 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) /** * ice_init_dcb * @hw: pointer to the HW struct + * @enable_mib_change: enable MIB change event * * Update DCB configuration from the Firmware */ -enum ice_status ice_init_dcb(struct ice_hw *hw) +enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) { struct ice_port_info *pi = hw->port_info; enum ice_status ret = 0; @@ -954,7 +961,8 @@ enum ice_status ice_init_dcb(struct ice_hw *hw) pi->dcbx_status = ice_get_dcbx_status(hw); if (pi->dcbx_status == ICE_DCBX_STATUS_DONE || - pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS) { + pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS || + pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) { /* Get current DCBX configuration */ ret = ice_get_dcb_cfg(pi); pi->is_sw_lldp = (hw->adminq.sq_last_status == ICE_AQ_RC_EPERM); @@ -965,9 +973,39 @@ enum ice_status ice_init_dcb(struct ice_hw *hw) } /* Configure the LLDP MIB change event */ - ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL); + if (enable_mib_change) { + ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL); + if (!ret) + pi->is_sw_lldp = false; + } + + return ret; +} + +/** + * ice_cfg_lldp_mib_change + * @hw: pointer to the HW struct + * @ena_mib: enable/disable MIB change event + * + * Configure (disable/enable) MIB + */ +enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib) +{ + struct ice_port_info *pi = hw->port_info; + enum ice_status ret; + + if (!hw->func_caps.common_cap.dcb) + return ICE_ERR_NOT_SUPPORTED; + + /* Get DCBX status */ + pi->dcbx_status = ice_get_dcbx_status(hw); + + if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) + return ICE_ERR_NOT_READY; + + ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL); if (!ret) - pi->is_sw_lldp = false; + pi->is_sw_lldp = !ena_mib; return ret; } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h index 522e1452abe2..ee138f9bdc7c 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb.h @@ -125,7 +125,7 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg); enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi); enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi); -enum ice_status ice_init_dcb(struct ice_hw *hw); +enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); enum ice_status ice_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, @@ -139,9 +139,7 @@ ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd); enum ice_status ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, bool *dcbx_agent_status, struct ice_sq_cd *cd); -enum ice_status -ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, - struct ice_sq_cd *cd); +enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib); #else /* CONFIG_DCB */ static inline enum ice_status ice_aq_stop_lldp(struct ice_hw __always_unused *hw, @@ -172,9 +170,8 @@ ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw, } static inline enum ice_status -ice_aq_cfg_lldp_mib_change(struct ice_hw __always_unused *hw, - bool __always_unused ena_update, - struct ice_sq_cd __always_unused *cd) +ice_cfg_lldp_mib_change(struct ice_hw __always_unused *hw, + bool __always_unused ena_mib) { return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index fe88b127ca42..dd47869c4ad4 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -4,6 +4,48 @@ #include "ice_dcb_lib.h" /** + * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration + * @vsi: the VSI being configured + * @ena_tc: TC map to be enabled + */ +void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) +{ + struct net_device *netdev = vsi->netdev; + struct ice_pf *pf = vsi->back; + struct ice_dcbx_cfg *dcbcfg; + u8 netdev_tc; + int i; + + if (!netdev) + return; + + if (!ena_tc) { + netdev_reset_tc(netdev); + return; + } + + if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc)) + return; + + dcbcfg = &pf->hw.port_info->local_dcbx_cfg; + + ice_for_each_traffic_class(i) + if (vsi->tc_cfg.ena_tc & BIT(i)) + netdev_set_tc_queue(netdev, + vsi->tc_cfg.tc_info[i].netdev_tc, + vsi->tc_cfg.tc_info[i].qcount_tx, + vsi->tc_cfg.tc_info[i].qoffset); + + for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { + u8 ets_tc = dcbcfg->etscfg.prio_table[i]; + + /* Get the mapped netdev TC# for the UP */ + netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; + netdev_set_prio_tc_map(netdev, i, netdev_tc); + } +} + +/** * ice_dcb_get_ena_tc - return bitmap of enabled TCs * @dcbcfg: DCB config to evaluate for enabled TCs */ @@ -204,15 +246,86 @@ out: } /** + * ice_cfg_etsrec_defaults - Set default ETS recommended DCB config + * @pi: port information structure + */ +static void ice_cfg_etsrec_defaults(struct ice_port_info *pi) +{ + struct ice_dcbx_cfg *dcbcfg = &pi->local_dcbx_cfg; + u8 i; + + /* Ensure ETS recommended DCB configuration is not already set */ + if (dcbcfg->etsrec.maxtcs) + return; + + /* In CEE mode, set the default to 1 TC */ + dcbcfg->etsrec.maxtcs = 1; + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + dcbcfg->etsrec.tcbwtable[i] = i ? 0 : 100; + dcbcfg->etsrec.tsatable[i] = i ? ICE_IEEE_TSA_STRICT : + ICE_IEEE_TSA_ETS; + } +} + +/** + * ice_dcb_need_recfg - Check if DCB needs reconfig + * @pf: board private structure + * @old_cfg: current DCB config + * @new_cfg: new DCB config + */ +static bool +ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, + struct ice_dcbx_cfg *new_cfg) +{ + bool need_reconfig = false; + + /* Check if ETS configuration has changed */ + if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg, + sizeof(new_cfg->etscfg))) { + /* If Priority Table has changed reconfig is needed */ + if (memcmp(&new_cfg->etscfg.prio_table, + &old_cfg->etscfg.prio_table, + sizeof(new_cfg->etscfg.prio_table))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); + } + + if (memcmp(&new_cfg->etscfg.tcbwtable, + &old_cfg->etscfg.tcbwtable, + sizeof(new_cfg->etscfg.tcbwtable))) + dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); + + if (memcmp(&new_cfg->etscfg.tsatable, + &old_cfg->etscfg.tsatable, + sizeof(new_cfg->etscfg.tsatable))) + dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); + } + + /* Check if PFC configuration has changed */ + if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); + } + + /* Check if APP Table has changed */ + if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); + } + + dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); + return need_reconfig; +} + +/** * ice_dcb_rebuild - rebuild DCB post reset * @pf: physical function instance */ void ice_dcb_rebuild(struct ice_pf *pf) { + struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg; struct ice_aqc_port_ets_elem buf = { 0 }; - struct ice_dcbx_cfg *prev_cfg; enum ice_status ret; - u8 willing; ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) { @@ -224,9 +337,15 @@ void ice_dcb_rebuild(struct ice_pf *pf) if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags)) return; + local_dcbx_cfg = &pf->hw.port_info->local_dcbx_cfg; + desired_dcbx_cfg = &pf->hw.port_info->desired_dcbx_cfg; + /* Save current willing state and force FW to unwilling */ - willing = pf->hw.port_info->local_dcbx_cfg.etscfg.willing; - pf->hw.port_info->local_dcbx_cfg.etscfg.willing = 0x0; + local_dcbx_cfg->etscfg.willing = 0x0; + local_dcbx_cfg->pfc.willing = 0x0; + local_dcbx_cfg->app_mode = ICE_DCBX_APPS_NON_WILLING; + + ice_cfg_etsrec_defaults(pf->hw.port_info); ret = ice_set_dcb_cfg(pf->hw.port_info); if (ret) { dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n"); @@ -234,31 +353,37 @@ void ice_dcb_rebuild(struct ice_pf *pf) } /* Retrieve DCB config and ensure same as current in SW */ - prev_cfg = devm_kmemdup(&pf->pdev->dev, - &pf->hw.port_info->local_dcbx_cfg, + prev_cfg = devm_kmemdup(&pf->pdev->dev, local_dcbx_cfg, sizeof(*prev_cfg), GFP_KERNEL); if (!prev_cfg) { dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n"); goto dcb_error; } - ice_init_dcb(&pf->hw); - if (memcmp(prev_cfg, &pf->hw.port_info->local_dcbx_cfg, - sizeof(*prev_cfg))) { + ice_init_dcb(&pf->hw, true); + if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS) + pf->hw.port_info->is_sw_lldp = true; + else + pf->hw.port_info->is_sw_lldp = false; + + if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) { /* difference in cfg detected - disable DCB till next MIB */ dev_err(&pf->pdev->dev, "Set local MIB not accurate\n"); - devm_kfree(&pf->pdev->dev, prev_cfg); goto dcb_error; } /* fetched config congruent to previous configuration */ devm_kfree(&pf->pdev->dev, prev_cfg); - /* Configuration replayed - reset willing state to previous */ - pf->hw.port_info->local_dcbx_cfg.etscfg.willing = willing; + /* Set the local desired config */ + if (local_dcbx_cfg->dcbx_mode == ICE_DCBX_MODE_CEE) + memcpy(local_dcbx_cfg, desired_dcbx_cfg, + sizeof(*local_dcbx_cfg)); + + ice_cfg_etsrec_defaults(pf->hw.port_info); ret = ice_set_dcb_cfg(pf->hw.port_info); if (ret) { - dev_err(&pf->pdev->dev, "Fail restoring prev willing state\n"); + dev_err(&pf->pdev->dev, "Failed to set desired config\n"); goto dcb_error; } dev_info(&pf->pdev->dev, "DCB restored after reset\n"); @@ -330,7 +455,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked) memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg)); dcbcfg->etscfg.willing = 1; - dcbcfg->etscfg.maxtcs = 8; + dcbcfg->etscfg.maxtcs = hw->func_caps.common_cap.maxtc; dcbcfg->etscfg.tcbwtable[0] = 100; dcbcfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS; @@ -339,7 +464,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked) dcbcfg->etsrec.willing = 0; dcbcfg->pfc.willing = 1; - dcbcfg->pfc.pfccap = IEEE_8021QAZ_MAX_TCS; + dcbcfg->pfc.pfccap = hw->func_caps.common_cap.maxtc; dcbcfg->numapps = 1; dcbcfg->app[0].selector = ICE_APP_SEL_ETHTYPE; @@ -364,35 +489,24 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) struct device *dev = &pf->pdev->dev; struct ice_port_info *port_info; struct ice_hw *hw = &pf->hw; - int sw_default = 0; int err; port_info = hw->port_info; - err = ice_init_dcb(hw); - if (err) { - /* FW LLDP is not active, default to SW DCBX/LLDP */ - dev_info(&pf->pdev->dev, "FW LLDP is not active\n"); - hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED; - hw->port_info->is_sw_lldp = true; - } - - if (port_info->dcbx_status == ICE_DCBX_STATUS_DIS) - dev_info(&pf->pdev->dev, "DCBX disabled\n"); - - /* LLDP disabled in FW */ - if (port_info->is_sw_lldp) { - sw_default = 1; - dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n"); - clear_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags); - } else { - set_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags); + err = ice_init_dcb(hw, false); + if (err && !port_info->is_sw_lldp) { + dev_err(&pf->pdev->dev, "Error initializing DCB %d\n", err); + goto dcb_init_err; } - if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) - dev_info(&pf->pdev->dev, "DCBX not started\n"); - - if (sw_default) { + dev_info(&pf->pdev->dev, + "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n", + pf->hw.func_caps.common_cap.maxtc); + if (err) { + /* FW LLDP is disabled, activate SW DCBX/LLDP mode */ + dev_info(&pf->pdev->dev, + "FW LLDP is disabled, DCBx/LLDP in SW mode.\n"); + clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); err = ice_dcb_sw_dflt_cfg(pf, locked); if (err) { dev_err(&pf->pdev->dev, @@ -402,21 +516,18 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) } pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; - set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); - set_bit(ICE_FLAG_DCB_ENA, pf->flags); return 0; } + set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); + /* DCBX in FW and LLDP enabled in FW */ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; - set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); - err = ice_dcb_init_cfg(pf, locked); if (err) goto dcb_init_err; - dev_info(&pf->pdev->dev, "DCBX offload supported\n"); return err; dcb_init_err: @@ -432,30 +543,31 @@ void ice_update_dcb_stats(struct ice_pf *pf) { struct ice_hw_port_stats *prev_ps, *cur_ps; struct ice_hw *hw = &pf->hw; - u8 pf_id = hw->pf_id; + u8 port; int i; + port = hw->port_info->lport; prev_ps = &pf->stats_prev; cur_ps = &pf->stats; for (i = 0; i < 8; i++) { - ice_stat_update32(hw, GLPRT_PXOFFRXC(pf_id, i), + ice_stat_update32(hw, GLPRT_PXOFFRXC(port, i), pf->stat_prev_loaded, &prev_ps->priority_xoff_rx[i], &cur_ps->priority_xoff_rx[i]); - ice_stat_update32(hw, GLPRT_PXONRXC(pf_id, i), + ice_stat_update32(hw, GLPRT_PXONRXC(port, i), pf->stat_prev_loaded, &prev_ps->priority_xon_rx[i], &cur_ps->priority_xon_rx[i]); - ice_stat_update32(hw, GLPRT_PXONTXC(pf_id, i), + ice_stat_update32(hw, GLPRT_PXONTXC(port, i), pf->stat_prev_loaded, &prev_ps->priority_xon_tx[i], &cur_ps->priority_xon_tx[i]); - ice_stat_update32(hw, GLPRT_PXOFFTXC(pf_id, i), + ice_stat_update32(hw, GLPRT_PXOFFTXC(port, i), pf->stat_prev_loaded, &prev_ps->priority_xoff_tx[i], &cur_ps->priority_xoff_tx[i]); - ice_stat_update32(hw, GLPRT_RXON2OFFCNT(pf_id, i), + ice_stat_update32(hw, GLPRT_RXON2OFFCNT(port, i), pf->stat_prev_loaded, &prev_ps->priority_xon_2_xoff[i], &cur_ps->priority_xon_2_xoff[i]); @@ -502,55 +614,6 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, } /** - * ice_dcb_need_recfg - Check if DCB needs reconfig - * @pf: board private structure - * @old_cfg: current DCB config - * @new_cfg: new DCB config - */ -static bool ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, - struct ice_dcbx_cfg *new_cfg) -{ - bool need_reconfig = false; - - /* Check if ETS configuration has changed */ - if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg, - sizeof(new_cfg->etscfg))) { - /* If Priority Table has changed reconfig is needed */ - if (memcmp(&new_cfg->etscfg.prio_table, - &old_cfg->etscfg.prio_table, - sizeof(new_cfg->etscfg.prio_table))) { - need_reconfig = true; - dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); - } - - if (memcmp(&new_cfg->etscfg.tcbwtable, - &old_cfg->etscfg.tcbwtable, - sizeof(new_cfg->etscfg.tcbwtable))) - dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); - - if (memcmp(&new_cfg->etscfg.tsatable, - &old_cfg->etscfg.tsatable, - sizeof(new_cfg->etscfg.tsatable))) - dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); - } - - /* Check if PFC configuration has changed */ - if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) { - need_reconfig = true; - dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); - } - - /* Check if APP Table has changed */ - if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) { - need_reconfig = true; - dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); - } - - dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); - return need_reconfig; -} - -/** * ice_dcb_process_lldp_set_mib_change - Process MIB change * @pf: ptr to ice_pf * @event: pointer to the admin queue receive event diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index 819081053ff5..661a6f7bca64 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -22,6 +22,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, void ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event); +void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { @@ -58,5 +59,6 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring, #define ice_vsi_cfg_dcb_rings(vsi) do {} while (0) #define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0) #define ice_set_cgd_num(tlan_ctx, ring) do {} while (0) +#define ice_vsi_cfg_netdev_tc(vsi, ena_tc) do {} while (0) #endif /* CONFIG_DCB */ #endif /* _ICE_DCB_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 52083a63dee6..7e23034df955 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -155,36 +155,11 @@ struct ice_priv_flag { static const struct ice_priv_flag ice_gstrings_priv_flags[] = { ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA), - ICE_PRIV_FLAG("enable-fw-lldp", ICE_FLAG_ENABLE_FW_LLDP), + ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT), }; #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) -/** - * ice_nvm_version_str - format the NVM version strings - * @hw: ptr to the hardware info - */ -static char *ice_nvm_version_str(struct ice_hw *hw) -{ - static char buf[ICE_ETHTOOL_FWVER_LEN]; - u8 ver, patch; - u32 full_ver; - u16 build; - - full_ver = hw->nvm.oem_ver; - ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT); - build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >> - ICE_OEM_VER_BUILD_SHIFT); - patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK); - - snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", - (hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT, - (hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT, - hw->nvm.eetrack, ver, build, patch); - - return buf; -} - static void ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { @@ -1201,13 +1176,13 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS); - if (test_bit(ICE_FLAG_ENABLE_FW_LLDP, change_flags)) { - if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) { + if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) { + if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) { enum ice_status status; /* Disable FW LLDP engine */ - status = ice_aq_cfg_lldp_mib_change(&pf->hw, false, - NULL); + status = ice_cfg_lldp_mib_change(&pf->hw, false); + /* If unregistering for LLDP events fails, this is * not an error state, as there shouldn't be any * events to respond to. @@ -1273,6 +1248,12 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) * The FW LLDP engine will now be consuming them. */ ice_cfg_sw_lldp(vsi, false, false); + + /* Register for MIB change events */ + status = ice_cfg_lldp_mib_change(&pf->hw, true); + if (status) + dev_dbg(&pf->pdev->dev, + "Fail to enable MIB change events\n"); } } clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); @@ -1319,14 +1300,17 @@ ice_get_ethtool_stats(struct net_device *netdev, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_ring *ring; - unsigned int j = 0; + unsigned int j; int i = 0; char *p; + ice_update_pf_stats(pf); + ice_update_vsi_stats(vsi); + for (j = 0; j < ICE_VSI_STATS_LEN; j++) { p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset; data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } /* populate per queue stats */ @@ -1716,6 +1700,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_port_info *pi = np->vsi->port_info; struct ethtool_link_ksettings cap_ksettings; struct ice_link_status *link_info; struct ice_vsi *vsi = np->vsi; @@ -2040,6 +2025,33 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, break; } ks->base.duplex = DUPLEX_FULL; + + if (link_info->an_info & ICE_AQ_AN_COMPLETED) + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Autoneg); + + /* Set flow control negotiated Rx/Tx pause */ + switch (pi->fc.current_mode) { + case ICE_FC_FULL: + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause); + break; + case ICE_FC_TX_PAUSE: + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause); + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Asym_Pause); + break; + case ICE_FC_RX_PAUSE: + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Asym_Pause); + break; + case ICE_FC_PFC: + /* fall through */ + default: + ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause); + ethtool_link_ksettings_del_link_mode(ks, lp_advertising, + Asym_Pause); + break; + } } /** @@ -2078,9 +2090,12 @@ ice_get_link_ksettings(struct net_device *netdev, struct ice_aqc_get_phy_caps_data *caps; struct ice_link_status *hw_link_info; struct ice_vsi *vsi = np->vsi; + enum ice_status status; + int err = 0; ethtool_link_ksettings_zero_link_mode(ks, supported); ethtool_link_ksettings_zero_link_mode(ks, advertising); + ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); hw_link_info = &vsi->port_info->phy.link_info; /* set speed and duplex */ @@ -2125,48 +2140,36 @@ ice_get_link_ksettings(struct net_device *netdev, /* flow control is symmetric and always supported */ ethtool_link_ksettings_add_link_mode(ks, supported, Pause); - switch (vsi->port_info->fc.req_mode) { - case ICE_FC_FULL: + caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); + if (!caps) + return -ENOMEM; + + status = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_SW_CFG, caps, NULL); + if (status) { + err = -EIO; + goto done; + } + + /* Set the advertised flow control based on the PHY capability */ + if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && + (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) { ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); - break; - case ICE_FC_TX_PAUSE: ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); - break; - case ICE_FC_RX_PAUSE: + } else if (caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); + } else if (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) { ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); - break; - case ICE_FC_PFC: - default: + } else { ethtool_link_ksettings_del_link_mode(ks, advertising, Pause); ethtool_link_ksettings_del_link_mode(ks, advertising, Asym_Pause); - break; } - caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); - if (!caps) - goto done; - - if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_TOPO_CAP, - caps, NULL)) - netdev_info(netdev, "Get phy capability failed.\n"); - - /* Set supported FEC modes based on PHY capability */ - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); - - if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN || - caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN) - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); - - if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_SW_CFG, - caps, NULL)) - netdev_info(netdev, "Get phy capability failed.\n"); - /* Set advertised FEC modes based on PHY capability */ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); @@ -2178,9 +2181,25 @@ ice_get_link_ksettings(struct net_device *netdev, caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + status = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_TOPO_CAP, caps, NULL); + if (status) { + err = -EIO; + goto done; + } + + /* Set supported FEC modes based on PHY capability */ + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + + if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN || + caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN) + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + done: devm_kfree(&vsi->back->pdev->dev, caps); - return 0; + return err; } /** @@ -2763,6 +2782,11 @@ static int ice_nway_reset(struct net_device *netdev) * ice_get_pauseparam - Get Flow Control status * @netdev: network interface device structure * @pause: ethernet pause (flow control) parameters + * + * Get requested flow control status from PHY capability. + * If autoneg is true, then ethtool will send the ETHTOOL_GSET ioctl which + * is handled by ice_get_link_ksettings. ice_get_link_ksettings will report + * the negotiated Rx/Tx pause via lp_advertising. */ static void ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) @@ -2816,6 +2840,7 @@ static int ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_aqc_get_phy_caps_data *pcaps; struct ice_link_status *hw_link_info; struct ice_pf *pf = np->vsi->back; struct ice_dcbx_cfg *dcbx_cfg; @@ -2826,6 +2851,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) u8 aq_failures; bool link_up; int err = 0; + u32 is_an; pi = vsi->port_info; hw_link_info = &pi->phy.link_info; @@ -2840,7 +2866,30 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return -EOPNOTSUPP; } - if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) { + /* Get pause param reports configured and negotiated flow control pause + * when ETHTOOL_GLINKSETTINGS is defined. Since ETHTOOL_GLINKSETTINGS is + * defined get pause param pause->autoneg reports SW configured setting, + * so compare pause->autoneg with SW configured to prevent the user from + * using set pause param to chance autoneg. + */ + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return -ENOMEM; + + /* Get current PHY config */ + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + NULL); + if (status) { + kfree(pcaps); + return -EIO; + } + + is_an = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ? + AUTONEG_ENABLE : AUTONEG_DISABLE); + + kfree(pcaps); + + if (pause->autoneg != is_an) { netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); return -EOPNOTSUPP; } @@ -3146,12 +3195,6 @@ __ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, if (ice_get_q_coalesce(vsi, ec, q_num)) return -EINVAL; - if (q_num < vsi->num_txq) - ec->tx_max_coalesced_frames_irq = vsi->work_lmt; - - if (q_num < vsi->num_rxq) - ec->rx_max_coalesced_frames_irq = vsi->work_lmt; - return 0; } @@ -3185,25 +3228,25 @@ static int ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, struct ice_ring_container *rc, struct ice_vsi *vsi) { + const char *c_type_str = (c_type == ICE_RX_CONTAINER) ? "rx" : "tx"; + u32 use_adaptive_coalesce, coalesce_usecs; struct ice_pf *pf = vsi->back; u16 itr_setting; if (!rc->ring) return -EINVAL; - itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC; - switch (c_type) { case ICE_RX_CONTAINER: if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL || (ec->rx_coalesce_usecs_high && ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) { netdev_info(vsi->netdev, - "Invalid value, rx-usecs-high valid values are 0 (disabled), %d-%d\n", - pf->hw.intrl_gran, ICE_MAX_INTRL); + "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n", + c_type_str, pf->hw.intrl_gran, + ICE_MAX_INTRL); return -EINVAL; } - if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) { rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high; wr32(&pf->hw, GLINT_RATE(rc->ring->q_vector->reg_idx), @@ -3211,60 +3254,60 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, pf->hw.intrl_gran)); } - if (ec->rx_coalesce_usecs != itr_setting && - ec->use_adaptive_rx_coalesce) { - netdev_info(vsi->netdev, - "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n"); - return -EINVAL; - } + use_adaptive_coalesce = ec->use_adaptive_rx_coalesce; + coalesce_usecs = ec->rx_coalesce_usecs; - if (ec->rx_coalesce_usecs > ICE_ITR_MAX) { - netdev_info(vsi->netdev, - "Invalid value, rx-usecs range is 0-%d\n", - ICE_ITR_MAX); - return -EINVAL; - } - - if (ec->use_adaptive_rx_coalesce) { - rc->itr_setting |= ICE_ITR_DYNAMIC; - } else { - rc->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); - rc->target_itr = ITR_TO_REG(rc->itr_setting); - } break; case ICE_TX_CONTAINER: if (ec->tx_coalesce_usecs_high) { netdev_info(vsi->netdev, - "setting tx-usecs-high is not supported\n"); + "setting %s-usecs-high is not supported\n", + c_type_str); return -EINVAL; } - if (ec->tx_coalesce_usecs != itr_setting && - ec->use_adaptive_tx_coalesce) { - netdev_info(vsi->netdev, - "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n"); - return -EINVAL; - } + use_adaptive_coalesce = ec->use_adaptive_tx_coalesce; + coalesce_usecs = ec->tx_coalesce_usecs; - if (ec->tx_coalesce_usecs > ICE_ITR_MAX) { - netdev_info(vsi->netdev, - "Invalid value, tx-usecs range is 0-%d\n", - ICE_ITR_MAX); - return -EINVAL; - } - - if (ec->use_adaptive_tx_coalesce) { - rc->itr_setting |= ICE_ITR_DYNAMIC; - } else { - rc->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); - rc->target_itr = ITR_TO_REG(rc->itr_setting); - } break; default: dev_dbg(&pf->pdev->dev, "Invalid container type %d\n", c_type); return -EINVAL; } + itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC; + if (coalesce_usecs != itr_setting && use_adaptive_coalesce) { + netdev_info(vsi->netdev, + "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n", + c_type_str, c_type_str); + return -EINVAL; + } + + if (coalesce_usecs > ICE_ITR_MAX) { + netdev_info(vsi->netdev, + "Invalid value, %s-usecs range is 0-%d\n", + c_type_str, ICE_ITR_MAX); + return -EINVAL; + } + + /* hardware only supports an ITR granularity of 2us */ + if (coalesce_usecs % 2 != 0) { + netdev_info(vsi->netdev, + "Invalid value, %s-usecs must be even\n", + c_type_str); + return -EINVAL; + } + + if (use_adaptive_coalesce) { + rc->itr_setting |= ICE_ITR_DYNAMIC; + } else { + /* store user facing value how it was set */ + rc->itr_setting = coalesce_usecs; + /* set to static and convert to value HW understands */ + rc->target_itr = + ITR_TO_REG(ITR_REG_ALIGN(rc->itr_setting)); + } + return 0; } @@ -3331,17 +3374,13 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, if (ice_set_q_coalesce(vsi, ec, i)) return -EINVAL; } - goto set_work_lmt; + goto set_complete; } if (ice_set_q_coalesce(vsi, ec, q_num)) return -EINVAL; -set_work_lmt: - - if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) - vsi->work_lmt = max(ec->tx_max_coalesced_frames_irq, - ec->rx_max_coalesced_frames_irq); +set_complete: return 0; } @@ -3396,6 +3435,33 @@ static const struct ethtool_ops ice_ethtool_ops = { .set_fecparam = ice_set_fecparam, }; +static const struct ethtool_ops ice_ethtool_safe_mode_ops = { + .get_link_ksettings = ice_get_link_ksettings, + .set_link_ksettings = ice_set_link_ksettings, + .get_drvinfo = ice_get_drvinfo, + .get_regs_len = ice_get_regs_len, + .get_regs = ice_get_regs, + .get_msglevel = ice_get_msglevel, + .set_msglevel = ice_set_msglevel, + .get_eeprom_len = ice_get_eeprom_len, + .get_eeprom = ice_get_eeprom, + .get_strings = ice_get_strings, + .get_ethtool_stats = ice_get_ethtool_stats, + .get_sset_count = ice_get_sset_count, + .get_ringparam = ice_get_ringparam, + .set_ringparam = ice_set_ringparam, + .nway_reset = ice_nway_reset, +}; + +/** + * ice_set_ethtool_safe_mode_ops - setup safe mode ethtool ops + * @netdev: network interface device structure + */ +void ice_set_ethtool_safe_mode_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ice_ethtool_safe_mode_ops; +} + /** * ice_set_ethtool_ops - setup netdev ethtool ops * @netdev: network interface device structure diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c new file mode 100644 index 000000000000..cbd53b586c36 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -0,0 +1,1549 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_flex_pipe.h" + +/** + * ice_pkg_val_buf + * @buf: pointer to the ice buffer + * + * This helper function validates a buffer's header. + */ +static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) +{ + struct ice_buf_hdr *hdr; + u16 section_count; + u16 data_end; + + hdr = (struct ice_buf_hdr *)buf->buf; + /* verify data */ + section_count = le16_to_cpu(hdr->section_count); + if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) + return NULL; + + data_end = le16_to_cpu(hdr->data_end); + if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) + return NULL; + + return hdr; +} + +/** + * ice_find_buf_table + * @ice_seg: pointer to the ice segment + * + * Returns the address of the buffer table within the ice segment. + */ +static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) +{ + struct ice_nvm_table *nvms; + + nvms = (struct ice_nvm_table *) + (ice_seg->device_table + + le32_to_cpu(ice_seg->device_table_count)); + + return (__force struct ice_buf_table *) + (nvms->vers + le32_to_cpu(nvms->table_count)); +} + +/** + * ice_pkg_enum_buf + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This function will enumerate all the buffers in the ice segment. The first + * call is made with the ice_seg parameter non-NULL; on subsequent calls, + * ice_seg is set to NULL which continues the enumeration. When the function + * returns a NULL pointer, then the end of the buffers has been reached, or an + * unexpected value has been detected (for example an invalid section count or + * an invalid buffer end value). + */ +static struct ice_buf_hdr * +ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) +{ + if (ice_seg) { + state->buf_table = ice_find_buf_table(ice_seg); + if (!state->buf_table) + return NULL; + + state->buf_idx = 0; + return ice_pkg_val_buf(state->buf_table->buf_array); + } + + if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) + return ice_pkg_val_buf(state->buf_table->buf_array + + state->buf_idx); + else + return NULL; +} + +/** + * ice_pkg_advance_sect + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This helper function will advance the section within the ice segment, + * also advancing the buffer if needed. + */ +static bool +ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) +{ + if (!ice_seg && !state->buf) + return false; + + if (!ice_seg && state->buf) + if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) + return true; + + state->buf = ice_pkg_enum_buf(ice_seg, state); + if (!state->buf) + return false; + + /* start of new buffer, reset section index */ + state->sect_idx = 0; + return true; +} + +/** + * ice_pkg_enum_section + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * + * This function will enumerate all the sections of a particular type in the + * ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the matching + * sections has been reached. + */ +static void * +ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type) +{ + u16 offset, size; + + if (ice_seg) + state->type = sect_type; + + if (!ice_pkg_advance_sect(ice_seg, state)) + return NULL; + + /* scan for next matching section */ + while (state->buf->section_entry[state->sect_idx].type != + cpu_to_le32(state->type)) + if (!ice_pkg_advance_sect(NULL, state)) + return NULL; + + /* validate section */ + offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); + if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) + return NULL; + + size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) + return NULL; + + /* make sure the section fits in the buffer */ + if (offset + size > ICE_PKG_BUF_SIZE) + return NULL; + + state->sect_type = + le32_to_cpu(state->buf->section_entry[state->sect_idx].type); + + /* calc pointer to this section */ + state->sect = ((u8 *)state->buf) + + le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); + + return state->sect; +} + +/** + * ice_acquire_global_cfg_lock + * @hw: pointer to the HW structure + * @access: access type (read or write) + * + * This function will request ownership of the global config lock for reading + * or writing of the package. When attempting to obtain write access, the + * caller must check for the following two return values: + * + * ICE_SUCCESS - Means the caller has acquired the global config lock + * and can perform writing of the package. + * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the + * package or has found that no update was necessary; in + * this case, the caller can just skip performing any + * update of the package. + */ +static enum ice_status +ice_acquire_global_cfg_lock(struct ice_hw *hw, + enum ice_aq_res_access_type access) +{ + enum ice_status status; + + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, + ICE_GLOBAL_CFG_LOCK_TIMEOUT); + + if (!status) + mutex_lock(&ice_global_cfg_lock_sw); + else if (status == ICE_ERR_AQ_NO_WORK) + ice_debug(hw, ICE_DBG_PKG, + "Global config lock: No work to do\n"); + + return status; +} + +/** + * ice_release_global_cfg_lock + * @hw: pointer to the HW structure + * + * This function will release the global config lock. + */ +static void ice_release_global_cfg_lock(struct ice_hw *hw) +{ + mutex_unlock(&ice_global_cfg_lock_sw); + ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); +} + +/** + * ice_aq_download_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer to transfer + * @buf_size: the size of the package buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Download Package (0x0C40) + */ +static enum ice_status +ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, bool last_buf, u32 *error_offset, + u32 *error_info, struct ice_sq_cd *cd) +{ + struct ice_aqc_download_pkg *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (error_offset) + *error_offset = 0; + if (error_info) + *error_info = 0; + + cmd = &desc.params.download_pkg; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + if (last_buf) + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + if (status == ICE_ERR_AQ_ERROR) { + /* Read error from buffer only when the FW returned an error */ + struct ice_aqc_download_pkg_resp *resp; + + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; + if (error_offset) + *error_offset = le32_to_cpu(resp->error_offset); + if (error_info) + *error_info = le32_to_cpu(resp->error_info); + } + + return status; +} + +/** + * ice_find_seg_in_pkg + * @hw: pointer to the hardware structure + * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) + * @pkg_hdr: pointer to the package header to be searched + * + * This function searches a package file for a particular segment type. On + * success it returns a pointer to the segment header, otherwise it will + * return NULL. + */ +static struct ice_generic_seg_hdr * +ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, + struct ice_pkg_hdr *pkg_hdr) +{ + u32 i; + + ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", + pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor, + pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft); + + /* Search all package segments for the requested segment type */ + for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { + struct ice_generic_seg_hdr *seg; + + seg = (struct ice_generic_seg_hdr *) + ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i])); + + if (le32_to_cpu(seg->seg_type) == seg_type) + return seg; + } + + return NULL; +} + +/** + * ice_dwnld_cfg_bufs + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains global config lock and downloads the package configuration buffers + * to the firmware. Metadata buffers are skipped, and the first metadata buffer + * found indicates that the rest of the buffers are all metadata buffers. + */ +static enum ice_status +ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + enum ice_status status; + struct ice_buf_hdr *bh; + u32 offset, info, i; + + if (!bufs || !count) + return ICE_ERR_PARAM; + + /* If the first buffer's first section has its metadata bit set + * then there are no buffers to be downloaded, and the operation is + * considered a success. + */ + bh = (struct ice_buf_hdr *)bufs; + if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) + return 0; + + /* reset pkg_dwnld_status in case this function is called in the + * reset/rebuild flow + */ + hw->pkg_dwnld_status = ICE_AQ_RC_OK; + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == ICE_ERR_AQ_NO_WORK) + hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST; + else + hw->pkg_dwnld_status = hw->adminq.sq_last_status; + return status; + } + + for (i = 0; i < count; i++) { + bool last = ((i + 1) == count); + + if (!last) { + /* check next buffer for metadata flag */ + bh = (struct ice_buf_hdr *)(bufs + i + 1); + + /* A set metadata flag in the next buffer will signal + * that the current buffer will be the last buffer + * downloaded + */ + if (le16_to_cpu(bh->section_count)) + if (le32_to_cpu(bh->section_entry[0].type) & + ICE_METADATA_BUF) + last = true; + } + + bh = (struct ice_buf_hdr *)(bufs + i); + + status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, + &offset, &info, NULL); + + /* Save AQ status from download package */ + hw->pkg_dwnld_status = hw->adminq.sq_last_status; + if (status) { + ice_debug(hw, ICE_DBG_PKG, + "Pkg download failed: err %d off %d inf %d\n", + status, offset, info); + + break; + } + + if (last) + break; + } + + ice_release_global_cfg_lock(hw); + + return status; +} + +/** + * ice_aq_get_pkg_info_list + * @hw: pointer to the hardware structure + * @pkg_info: the buffer which will receive the information list + * @buf_size: the size of the pkg_info information buffer + * @cd: pointer to command details structure or NULL + * + * Get Package Info List (0x0C43) + */ +static enum ice_status +ice_aq_get_pkg_info_list(struct ice_hw *hw, + struct ice_aqc_get_pkg_info_resp *pkg_info, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); + + return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); +} + +/** + * ice_download_pkg + * @hw: pointer to the hardware structure + * @ice_seg: pointer to the segment of the package to be downloaded + * + * Handles the download of a complete package. + */ +static enum ice_status +ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) +{ + struct ice_buf_table *ice_buf_tbl; + + ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n", + ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor, + ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft); + + ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", + le32_to_cpu(ice_seg->hdr.seg_type), + le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name); + + ice_buf_tbl = ice_find_buf_table(ice_seg); + + ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", + le32_to_cpu(ice_buf_tbl->buf_count)); + + return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, + le32_to_cpu(ice_buf_tbl->buf_count)); +} + +/** + * ice_init_pkg_info + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to the driver's package hdr + * + * Saves off the package details into the HW structure. + */ +static enum ice_status +ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + struct ice_global_metadata_seg *meta_seg; + struct ice_generic_seg_hdr *seg_hdr; + + if (!pkg_hdr) + return ICE_ERR_PARAM; + + meta_seg = (struct ice_global_metadata_seg *) + ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr); + if (meta_seg) { + hw->pkg_ver = meta_seg->pkg_ver; + memcpy(hw->pkg_name, meta_seg->pkg_name, sizeof(hw->pkg_name)); + + ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", + meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor, + meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft, + meta_seg->pkg_name); + } else { + ice_debug(hw, ICE_DBG_INIT, + "Did not find metadata segment in driver package\n"); + return ICE_ERR_CFG; + } + + seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); + if (seg_hdr) { + hw->ice_pkg_ver = seg_hdr->seg_ver; + memcpy(hw->ice_pkg_name, seg_hdr->seg_name, + sizeof(hw->ice_pkg_name)); + + ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n", + seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor, + seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft, + seg_hdr->seg_name); + } else { + ice_debug(hw, ICE_DBG_INIT, + "Did not find ice segment in driver package\n"); + return ICE_ERR_CFG; + } + + return 0; +} + +/** + * ice_get_pkg_info + * @hw: pointer to the hardware structure + * + * Store details of the package currently loaded in HW into the HW structure. + */ +static enum ice_status ice_get_pkg_info(struct ice_hw *hw) +{ + struct ice_aqc_get_pkg_info_resp *pkg_info; + enum ice_status status; + u16 size; + u32 i; + + size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) * + (ICE_PKG_CNT - 1)); + pkg_info = kzalloc(size, GFP_KERNEL); + if (!pkg_info) + return ICE_ERR_NO_MEMORY; + + status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL); + if (status) + goto init_pkg_free_alloc; + + for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { +#define ICE_PKG_FLAG_COUNT 4 + char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; + u8 place = 0; + + if (pkg_info->pkg_info[i].is_active) { + flags[place++] = 'A'; + hw->active_pkg_ver = pkg_info->pkg_info[i].ver; + memcpy(hw->active_pkg_name, + pkg_info->pkg_info[i].name, + sizeof(hw->active_pkg_name)); + hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; + } + if (pkg_info->pkg_info[i].is_active_at_boot) + flags[place++] = 'B'; + if (pkg_info->pkg_info[i].is_modified) + flags[place++] = 'M'; + if (pkg_info->pkg_info[i].is_in_nvm) + flags[place++] = 'N'; + + ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", + i, pkg_info->pkg_info[i].ver.major, + pkg_info->pkg_info[i].ver.minor, + pkg_info->pkg_info[i].ver.update, + pkg_info->pkg_info[i].ver.draft, + pkg_info->pkg_info[i].name, flags); + } + +init_pkg_free_alloc: + kfree(pkg_info); + + return status; +} + +/** + * ice_verify_pkg - verify package + * @pkg: pointer to the package buffer + * @len: size of the package buffer + * + * Verifies various attributes of the package file, including length, format + * version, and the requirement of at least one segment. + */ +static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) +{ + u32 seg_count; + u32 i; + + if (len < sizeof(*pkg)) + return ICE_ERR_BUF_TOO_SHORT; + + if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ || + pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR || + pkg->format_ver.update != ICE_PKG_FMT_VER_UPD || + pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT) + return ICE_ERR_CFG; + + /* pkg must have at least one segment */ + seg_count = le32_to_cpu(pkg->seg_count); + if (seg_count < 1) + return ICE_ERR_CFG; + + /* make sure segment array fits in package length */ + if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset))) + return ICE_ERR_BUF_TOO_SHORT; + + /* all segments must fit within length */ + for (i = 0; i < seg_count; i++) { + u32 off = le32_to_cpu(pkg->seg_offset[i]); + struct ice_generic_seg_hdr *seg; + + /* segment header must fit */ + if (len < off + sizeof(*seg)) + return ICE_ERR_BUF_TOO_SHORT; + + seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); + + /* segment body must fit */ + if (len < off + le32_to_cpu(seg->seg_size)) + return ICE_ERR_BUF_TOO_SHORT; + } + + return 0; +} + +/** + * ice_free_seg - free package segment pointer + * @hw: pointer to the hardware structure + * + * Frees the package segment pointer in the proper manner, depending on if the + * segment was allocated or just the passed in pointer was stored. + */ +void ice_free_seg(struct ice_hw *hw) +{ + if (hw->pkg_copy) { + devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); + hw->pkg_copy = NULL; + hw->pkg_size = 0; + } + hw->seg = NULL; +} + +/** + * ice_init_pkg_regs - initialize additional package registers + * @hw: pointer to the hardware structure + */ +static void ice_init_pkg_regs(struct ice_hw *hw) +{ +#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF +#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF +#define ICE_SW_BLK_IDX 0 + + /* setup Switch block input mask, which is 48-bits in two parts */ + wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); + wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); +} + +/** + * ice_chk_pkg_version - check package version for compatibility with driver + * @pkg_ver: pointer to a version structure to check + * + * Check to make sure that the package about to be downloaded is compatible with + * the driver. To be compatible, the major and minor components of the package + * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR + * definitions. + */ +static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) +{ + if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ || + pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) + return ICE_ERR_NOT_SUPPORTED; + + return 0; +} + +/** + * ice_init_pkg - initialize/download package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function initializes a package. The package contains HW tables + * required to do packet processing. First, the function extracts package + * information such as version. Then it finds the ice configuration segment + * within the package; this function then saves a copy of the segment pointer + * within the supplied package buffer. Next, the function will cache any hints + * from the package, followed by downloading the package itself. Note, that if + * a previous PF driver has already downloaded the package successfully, then + * the current driver will not have to download the package again. + * + * The local package contents will be used to query default behavior and to + * update specific sections of the HW's version of the package (e.g. to update + * the parse graph to understand new protocols). + * + * This function stores a pointer to the package buffer memory, and it is + * expected that the supplied buffer will not be freed immediately. If the + * package buffer needs to be freed, such as when read from a file, use + * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this + * case. + */ +enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) +{ + struct ice_pkg_hdr *pkg; + enum ice_status status; + struct ice_seg *seg; + + if (!buf || !len) + return ICE_ERR_PARAM; + + pkg = (struct ice_pkg_hdr *)buf; + status = ice_verify_pkg(pkg, len); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", + status); + return status; + } + + /* initialize package info */ + status = ice_init_pkg_info(hw, pkg); + if (status) + return status; + + /* before downloading the package, check package version for + * compatibility with driver + */ + status = ice_chk_pkg_version(&hw->pkg_ver); + if (status) + return status; + + /* find segment in given package */ + seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg); + if (!seg) { + ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); + return ICE_ERR_CFG; + } + + /* download package */ + status = ice_download_pkg(hw, seg); + if (status == ICE_ERR_AQ_NO_WORK) { + ice_debug(hw, ICE_DBG_INIT, + "package previously loaded - no work.\n"); + status = 0; + } + + /* Get information on the package currently loaded in HW, then make sure + * the driver is compatible with this version. + */ + if (!status) { + status = ice_get_pkg_info(hw); + if (!status) + status = ice_chk_pkg_version(&hw->active_pkg_ver); + } + + if (!status) { + hw->seg = seg; + /* on successful package download update other required + * registers to support the package and fill HW tables + * with package content. + */ + ice_init_pkg_regs(hw); + ice_fill_blk_tbls(hw); + } else { + ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", + status); + } + + return status; +} + +/** + * ice_copy_and_init_pkg - initialize/download a copy of the package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function copies the package buffer, and then calls ice_init_pkg() to + * initialize the copied package contents. + * + * The copying is necessary if the package buffer supplied is constant, or if + * the memory may disappear shortly after calling this function. + * + * If the package buffer resides in the data segment and can be modified, the + * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). + * + * However, if the package buffer needs to be copied first, such as when being + * read from a file, the caller should use ice_copy_and_init_pkg(). + * + * This function will first copy the package buffer, before calling + * ice_init_pkg(). The caller is free to immediately destroy the original + * package buffer, as the new copy will be managed by this function and + * related routines. + */ +enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) +{ + enum ice_status status; + u8 *buf_copy; + + if (!buf || !len) + return ICE_ERR_PARAM; + + buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); + + status = ice_init_pkg(hw, buf_copy, len); + if (status) { + /* Free the copy, since we failed to initialize the package */ + devm_kfree(ice_hw_to_dev(hw), buf_copy); + } else { + /* Track the copied pkg so we can free it later */ + hw->pkg_copy = buf_copy; + hw->pkg_size = len; + } + + return status; +} + +/* PTG Management */ + +/** + * ice_ptg_find_ptype - Search for packet type group using packet type (ptype) + * @hw: pointer to the hardware structure + * @blk: HW block + * @ptype: the ptype to search for + * @ptg: pointer to variable that receives the PTG + * + * This function will search the PTGs for a particular ptype, returning the + * PTG ID that contains it through the PTG parameter, with the value of + * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. + */ +static enum ice_status +ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) +{ + if (ptype >= ICE_XLT1_CNT || !ptg) + return ICE_ERR_PARAM; + + *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; + return 0; +} + +/** + * ice_ptg_alloc_val - Allocates a new packet type group ID by value + * @hw: pointer to the hardware structure + * @blk: HW block + * @ptg: the PTG to allocate + * + * This function allocates a given packet type group ID specified by the PTG + * parameter. + */ +static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg) +{ + hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true; +} + +/** + * ice_ptg_remove_ptype - Removes ptype from a particular packet type group + * @hw: pointer to the hardware structure + * @blk: HW block + * @ptype: the ptype to remove + * @ptg: the PTG to remove the ptype from + * + * This function will remove the ptype from the specific PTG, and move it to + * the default PTG (ICE_DEFAULT_PTG). + */ +static enum ice_status +ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) +{ + struct ice_ptg_ptype **ch; + struct ice_ptg_ptype *p; + + if (ptype > ICE_XLT1_CNT - 1) + return ICE_ERR_PARAM; + + if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) + return ICE_ERR_DOES_NOT_EXIST; + + /* Should not happen if .in_use is set, bad config */ + if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) + return ICE_ERR_CFG; + + /* find the ptype within this PTG, and bypass the link over it */ + p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; + ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; + while (p) { + if (ptype == (p - hw->blk[blk].xlt1.ptypes)) { + *ch = p->next_ptype; + break; + } + + ch = &p->next_ptype; + p = p->next_ptype; + } + + hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG; + hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL; + + return 0; +} + +/** + * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group + * @hw: pointer to the hardware structure + * @blk: HW block + * @ptype: the ptype to add or move + * @ptg: the PTG to add or move the ptype to + * + * This function will either add or move a ptype to a particular PTG depending + * on if the ptype is already part of another group. Note that using a + * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the + * default PTG. + */ +static enum ice_status +ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) +{ + enum ice_status status; + u8 original_ptg; + + if (ptype > ICE_XLT1_CNT - 1) + return ICE_ERR_PARAM; + + if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) + return ICE_ERR_DOES_NOT_EXIST; + + status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); + if (status) + return status; + + /* Is ptype already in the correct PTG? */ + if (original_ptg == ptg) + return 0; + + /* Remove from original PTG and move back to the default PTG */ + if (original_ptg != ICE_DEFAULT_PTG) + ice_ptg_remove_ptype(hw, blk, ptype, original_ptg); + + /* Moving to default PTG? Then we're done with this request */ + if (ptg == ICE_DEFAULT_PTG) + return 0; + + /* Add ptype to PTG at beginning of list */ + hw->blk[blk].xlt1.ptypes[ptype].next_ptype = + hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; + hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = + &hw->blk[blk].xlt1.ptypes[ptype]; + + hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg; + hw->blk[blk].xlt1.t[ptype] = ptg; + + return 0; +} + +/* Block / table size info */ +struct ice_blk_size_details { + u16 xlt1; /* # XLT1 entries */ + u16 xlt2; /* # XLT2 entries */ + u16 prof_tcam; /* # profile ID TCAM entries */ + u16 prof_id; /* # profile IDs */ + u8 prof_cdid_bits; /* # CDID one-hot bits used in key */ + u16 prof_redir; /* # profile redirection entries */ + u16 es; /* # extraction sequence entries */ + u16 fvw; /* # field vector words */ + u8 overwrite; /* overwrite existing entries allowed */ + u8 reverse; /* reverse FV order */ +}; + +static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = { + /** + * Table Definitions + * XLT1 - Number of entries in XLT1 table + * XLT2 - Number of entries in XLT2 table + * TCAM - Number of entries Profile ID TCAM table + * CDID - Control Domain ID of the hardware block + * PRED - Number of entries in the Profile Redirection Table + * FV - Number of entries in the Field Vector + * FVW - Width (in WORDs) of the Field Vector + * OVR - Overwrite existing table entries + * REV - Reverse FV + */ + /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */ + /* Overwrite , Reverse FV */ + /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48, + false, false }, + /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32, + false, false }, + /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, + false, true }, + /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, + true, true }, + /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24, + false, false }, +}; + +enum ice_sid_all { + ICE_SID_XLT1_OFF = 0, + ICE_SID_XLT2_OFF, + ICE_SID_PR_OFF, + ICE_SID_PR_REDIR_OFF, + ICE_SID_ES_OFF, + ICE_SID_OFF_COUNT, +}; + +/* VSIG Management */ + +/** + * ice_vsig_find_vsi - find a VSIG that contains a specified VSI + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsi: VSI of interest + * @vsig: pointer to receive the VSI group + * + * This function will lookup the VSI entry in the XLT2 list and return + * the VSI group its associated with. + */ +static enum ice_status +ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) +{ + if (!vsig || vsi >= ICE_MAX_VSI) + return ICE_ERR_PARAM; + + /* As long as there's a default or valid VSIG associated with the input + * VSI, the functions returns a success. Any handling of VSIG will be + * done by the following add, update or remove functions. + */ + *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; + + return 0; +} + +/** + * ice_vsig_alloc_val - allocate a new VSIG by value + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsig: the VSIG to allocate + * + * This function will allocate a given VSIG specified by the VSIG parameter. + */ +static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig) +{ + u16 idx = vsig & ICE_VSIG_IDX_M; + + if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) { + INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); + hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true; + } + + return ICE_VSIG_VALUE(idx, hw->pf_id); +} + +/** + * ice_vsig_remove_vsi - remove VSI from VSIG + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsi: VSI to remove + * @vsig: VSI group to remove from + * + * The function will remove the input VSI from its VSI group and move it + * to the DEFAULT_VSIG. + */ +static enum ice_status +ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) +{ + struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; + u16 idx; + + idx = vsig & ICE_VSIG_IDX_M; + + if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) + return ICE_ERR_PARAM; + + if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) + return ICE_ERR_DOES_NOT_EXIST; + + /* entry already in default VSIG, don't have to remove */ + if (idx == ICE_DEFAULT_VSIG) + return 0; + + vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; + if (!(*vsi_head)) + return ICE_ERR_CFG; + + vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; + vsi_cur = (*vsi_head); + + /* iterate the VSI list, skip over the entry to be removed */ + while (vsi_cur) { + if (vsi_tgt == vsi_cur) { + (*vsi_head) = vsi_cur->next_vsi; + break; + } + vsi_head = &vsi_cur->next_vsi; + vsi_cur = vsi_cur->next_vsi; + } + + /* verify if VSI was removed from group list */ + if (!vsi_cur) + return ICE_ERR_DOES_NOT_EXIST; + + vsi_cur->vsig = ICE_DEFAULT_VSIG; + vsi_cur->changed = 1; + vsi_cur->next_vsi = NULL; + + return 0; +} + +/** + * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group + * @hw: pointer to the hardware structure + * @blk: HW block + * @vsi: VSI to move + * @vsig: destination VSI group + * + * This function will move or add the input VSI to the target VSIG. + * The function will find the original VSIG the VSI belongs to and + * move the entry to the DEFAULT_VSIG, update the original VSIG and + * then move entry to the new VSIG. + */ +static enum ice_status +ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) +{ + struct ice_vsig_vsi *tmp; + enum ice_status status; + u16 orig_vsig, idx; + + idx = vsig & ICE_VSIG_IDX_M; + + if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) + return ICE_ERR_PARAM; + + /* if VSIG not in use and VSIG is not default type this VSIG + * doesn't exist. + */ + if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && + vsig != ICE_DEFAULT_VSIG) + return ICE_ERR_DOES_NOT_EXIST; + + status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); + if (status) + return status; + + /* no update required if vsigs match */ + if (orig_vsig == vsig) + return 0; + + if (orig_vsig != ICE_DEFAULT_VSIG) { + /* remove entry from orig_vsig and add to default VSIG */ + status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig); + if (status) + return status; + } + + if (idx == ICE_DEFAULT_VSIG) + return 0; + + /* Create VSI entry and add VSIG and prop_mask values */ + hw->blk[blk].xlt2.vsis[vsi].vsig = vsig; + hw->blk[blk].xlt2.vsis[vsi].changed = 1; + + /* Add new entry to the head of the VSIG list */ + tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; + hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = + &hw->blk[blk].xlt2.vsis[vsi]; + hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp; + hw->blk[blk].xlt2.t[vsi] = vsig; + + return 0; +} + +/* Block / table section IDs */ +static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = { + /* SWITCH */ + { ICE_SID_XLT1_SW, + ICE_SID_XLT2_SW, + ICE_SID_PROFID_TCAM_SW, + ICE_SID_PROFID_REDIR_SW, + ICE_SID_FLD_VEC_SW + }, + + /* ACL */ + { ICE_SID_XLT1_ACL, + ICE_SID_XLT2_ACL, + ICE_SID_PROFID_TCAM_ACL, + ICE_SID_PROFID_REDIR_ACL, + ICE_SID_FLD_VEC_ACL + }, + + /* FD */ + { ICE_SID_XLT1_FD, + ICE_SID_XLT2_FD, + ICE_SID_PROFID_TCAM_FD, + ICE_SID_PROFID_REDIR_FD, + ICE_SID_FLD_VEC_FD + }, + + /* RSS */ + { ICE_SID_XLT1_RSS, + ICE_SID_XLT2_RSS, + ICE_SID_PROFID_TCAM_RSS, + ICE_SID_PROFID_REDIR_RSS, + ICE_SID_FLD_VEC_RSS + }, + + /* PE */ + { ICE_SID_XLT1_PE, + ICE_SID_XLT2_PE, + ICE_SID_PROFID_TCAM_PE, + ICE_SID_PROFID_REDIR_PE, + ICE_SID_FLD_VEC_PE + } +}; + +/** + * ice_init_sw_xlt1_db - init software XLT1 database from HW tables + * @hw: pointer to the hardware structure + * @blk: the HW block to initialize + */ +static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk) +{ + u16 pt; + + for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) { + u8 ptg; + + ptg = hw->blk[blk].xlt1.t[pt]; + if (ptg != ICE_DEFAULT_PTG) { + ice_ptg_alloc_val(hw, blk, ptg); + ice_ptg_add_mv_ptype(hw, blk, pt, ptg); + } + } +} + +/** + * ice_init_sw_xlt2_db - init software XLT2 database from HW tables + * @hw: pointer to the hardware structure + * @blk: the HW block to initialize + */ +static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk) +{ + u16 vsi; + + for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) { + u16 vsig; + + vsig = hw->blk[blk].xlt2.t[vsi]; + if (vsig) { + ice_vsig_alloc_val(hw, blk, vsig); + ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); + /* no changes at this time, since this has been + * initialized from the original package + */ + hw->blk[blk].xlt2.vsis[vsi].changed = 0; + } + } +} + +/** + * ice_init_sw_db - init software database from HW tables + * @hw: pointer to the hardware structure + */ +static void ice_init_sw_db(struct ice_hw *hw) +{ + u16 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + ice_init_sw_xlt1_db(hw, (enum ice_block)i); + ice_init_sw_xlt2_db(hw, (enum ice_block)i); + } +} + +/** + * ice_fill_tbl - Reads content of a single table type into database + * @hw: pointer to the hardware structure + * @block_id: Block ID of the table to copy + * @sid: Section ID of the table to copy + * + * Will attempt to read the entire content of a given table of a single block + * into the driver database. We assume that the buffer will always + * be as large or larger than the data contained in the package. If + * this condition is not met, there is most likely an error in the package + * contents. + */ +static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid) +{ + u32 dst_len, sect_len, offset = 0; + struct ice_prof_redir_section *pr; + struct ice_prof_id_section *pid; + struct ice_xlt1_section *xlt1; + struct ice_xlt2_section *xlt2; + struct ice_sw_fv_section *es; + struct ice_pkg_enum state; + u8 *src, *dst; + void *sect; + + /* if the HW segment pointer is null then the first iteration of + * ice_pkg_enum_section() will fail. In this case the HW tables will + * not be filled and return success. + */ + if (!hw->seg) { + ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n"); + return; + } + + memset(&state, 0, sizeof(state)); + + sect = ice_pkg_enum_section(hw->seg, &state, sid); + + while (sect) { + switch (sid) { + case ICE_SID_XLT1_SW: + case ICE_SID_XLT1_FD: + case ICE_SID_XLT1_RSS: + case ICE_SID_XLT1_ACL: + case ICE_SID_XLT1_PE: + xlt1 = (struct ice_xlt1_section *)sect; + src = xlt1->value; + sect_len = le16_to_cpu(xlt1->count) * + sizeof(*hw->blk[block_id].xlt1.t); + dst = hw->blk[block_id].xlt1.t; + dst_len = hw->blk[block_id].xlt1.count * + sizeof(*hw->blk[block_id].xlt1.t); + break; + case ICE_SID_XLT2_SW: + case ICE_SID_XLT2_FD: + case ICE_SID_XLT2_RSS: + case ICE_SID_XLT2_ACL: + case ICE_SID_XLT2_PE: + xlt2 = (struct ice_xlt2_section *)sect; + src = (__force u8 *)xlt2->value; + sect_len = le16_to_cpu(xlt2->count) * + sizeof(*hw->blk[block_id].xlt2.t); + dst = (u8 *)hw->blk[block_id].xlt2.t; + dst_len = hw->blk[block_id].xlt2.count * + sizeof(*hw->blk[block_id].xlt2.t); + break; + case ICE_SID_PROFID_TCAM_SW: + case ICE_SID_PROFID_TCAM_FD: + case ICE_SID_PROFID_TCAM_RSS: + case ICE_SID_PROFID_TCAM_ACL: + case ICE_SID_PROFID_TCAM_PE: + pid = (struct ice_prof_id_section *)sect; + src = (u8 *)pid->entry; + sect_len = le16_to_cpu(pid->count) * + sizeof(*hw->blk[block_id].prof.t); + dst = (u8 *)hw->blk[block_id].prof.t; + dst_len = hw->blk[block_id].prof.count * + sizeof(*hw->blk[block_id].prof.t); + break; + case ICE_SID_PROFID_REDIR_SW: + case ICE_SID_PROFID_REDIR_FD: + case ICE_SID_PROFID_REDIR_RSS: + case ICE_SID_PROFID_REDIR_ACL: + case ICE_SID_PROFID_REDIR_PE: + pr = (struct ice_prof_redir_section *)sect; + src = pr->redir_value; + sect_len = le16_to_cpu(pr->count) * + sizeof(*hw->blk[block_id].prof_redir.t); + dst = hw->blk[block_id].prof_redir.t; + dst_len = hw->blk[block_id].prof_redir.count * + sizeof(*hw->blk[block_id].prof_redir.t); + break; + case ICE_SID_FLD_VEC_SW: + case ICE_SID_FLD_VEC_FD: + case ICE_SID_FLD_VEC_RSS: + case ICE_SID_FLD_VEC_ACL: + case ICE_SID_FLD_VEC_PE: + es = (struct ice_sw_fv_section *)sect; + src = (u8 *)es->fv; + sect_len = (u32)(le16_to_cpu(es->count) * + hw->blk[block_id].es.fvw) * + sizeof(*hw->blk[block_id].es.t); + dst = (u8 *)hw->blk[block_id].es.t; + dst_len = (u32)(hw->blk[block_id].es.count * + hw->blk[block_id].es.fvw) * + sizeof(*hw->blk[block_id].es.t); + break; + default: + return; + } + + /* if the section offset exceeds destination length, terminate + * table fill. + */ + if (offset > dst_len) + return; + + /* if the sum of section size and offset exceed destination size + * then we are out of bounds of the HW table size for that PF. + * Changing section length to fill the remaining table space + * of that PF. + */ + if ((offset + sect_len) > dst_len) + sect_len = dst_len - offset; + + memcpy(dst + offset, src, sect_len); + offset += sect_len; + sect = ice_pkg_enum_section(NULL, &state, sid); + } +} + +/** + * ice_fill_blk_tbls - Read package context for tables + * @hw: pointer to the hardware structure + * + * Reads the current package contents and populates the driver + * database with the data iteratively for all advanced feature + * blocks. Assume that the HW tables have been allocated. + */ +void ice_fill_blk_tbls(struct ice_hw *hw) +{ + u8 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + enum ice_block blk_id = (enum ice_block)i; + + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid); + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid); + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid); + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid); + ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid); + } + + ice_init_sw_db(hw); +} + +/** + * ice_free_hw_tbls - free hardware table memory + * @hw: pointer to the hardware structure + */ +void ice_free_hw_tbls(struct ice_hw *hw) +{ + u8 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + hw->blk[i].is_list_init = false; + + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written); + } + + memset(hw->blk, 0, sizeof(hw->blk)); +} + +/** + * ice_clear_hw_tbls - clear HW tables and flow profiles + * @hw: pointer to the hardware structure + */ +void ice_clear_hw_tbls(struct ice_hw *hw) +{ + u8 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_tcam *prof = &hw->blk[i].prof; + struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; + struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; + struct ice_es *es = &hw->blk[i].es; + + memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes)); + memset(xlt1->ptg_tbl, 0, + ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl)); + memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t)); + + memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis)); + memset(xlt2->vsig_tbl, 0, + xlt2->count * sizeof(*xlt2->vsig_tbl)); + memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t)); + + memset(prof->t, 0, prof->count * sizeof(*prof->t)); + memset(prof_redir->t, 0, + prof_redir->count * sizeof(*prof_redir->t)); + + memset(es->t, 0, es->count * sizeof(*es->t)); + memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); + memset(es->written, 0, es->count * sizeof(*es->written)); + } +} + +/** + * ice_init_hw_tbls - init hardware table memory + * @hw: pointer to the hardware structure + */ +enum ice_status ice_init_hw_tbls(struct ice_hw *hw) +{ + u8 i; + + for (i = 0; i < ICE_BLK_COUNT; i++) { + struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_tcam *prof = &hw->blk[i].prof; + struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; + struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; + struct ice_es *es = &hw->blk[i].es; + u16 j; + + if (hw->blk[i].is_list_init) + continue; + + hw->blk[i].is_list_init = true; + + hw->blk[i].overwrite = blk_sizes[i].overwrite; + es->reverse = blk_sizes[i].reverse; + + xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; + xlt1->count = blk_sizes[i].xlt1; + + xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count, + sizeof(*xlt1->ptypes), GFP_KERNEL); + + if (!xlt1->ptypes) + goto err; + + xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS, + sizeof(*xlt1->ptg_tbl), + GFP_KERNEL); + + if (!xlt1->ptg_tbl) + goto err; + + xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count, + sizeof(*xlt1->t), GFP_KERNEL); + if (!xlt1->t) + goto err; + + xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; + xlt2->count = blk_sizes[i].xlt2; + + xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, + sizeof(*xlt2->vsis), GFP_KERNEL); + + if (!xlt2->vsis) + goto err; + + xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, + sizeof(*xlt2->vsig_tbl), + GFP_KERNEL); + if (!xlt2->vsig_tbl) + goto err; + + for (j = 0; j < xlt2->count; j++) + INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); + + xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, + sizeof(*xlt2->t), GFP_KERNEL); + if (!xlt2->t) + goto err; + + prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; + prof->count = blk_sizes[i].prof_tcam; + prof->max_prof_id = blk_sizes[i].prof_id; + prof->cdid_bits = blk_sizes[i].prof_cdid_bits; + prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count, + sizeof(*prof->t), GFP_KERNEL); + + if (!prof->t) + goto err; + + prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; + prof_redir->count = blk_sizes[i].prof_redir; + prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw), + prof_redir->count, + sizeof(*prof_redir->t), + GFP_KERNEL); + + if (!prof_redir->t) + goto err; + + es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; + es->count = blk_sizes[i].es; + es->fvw = blk_sizes[i].fvw; + es->t = devm_kcalloc(ice_hw_to_dev(hw), + (u32)(es->count * es->fvw), + sizeof(*es->t), GFP_KERNEL); + if (!es->t) + goto err; + + es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count, + sizeof(*es->ref_count), + GFP_KERNEL); + + es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, + sizeof(*es->written), GFP_KERNEL); + if (!es->ref_count) + goto err; + } + return 0; + +err: + ice_free_hw_tbls(hw); + return ICE_ERR_NO_MEMORY; +} diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h new file mode 100644 index 000000000000..37eb282742d1 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Intel Corporation. */ + +#ifndef _ICE_FLEX_PIPE_H_ +#define _ICE_FLEX_PIPE_H_ + +#include "ice_type.h" + +/* Package minimal version supported */ +#define ICE_PKG_SUPP_VER_MAJ 1 +#define ICE_PKG_SUPP_VER_MNR 3 + +/* Package format version */ +#define ICE_PKG_FMT_VER_MAJ 1 +#define ICE_PKG_FMT_VER_MNR 0 +#define ICE_PKG_FMT_VER_UPD 0 +#define ICE_PKG_FMT_VER_DFT 0 + +#define ICE_PKG_CNT 4 + +enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); +enum ice_status +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); +enum ice_status ice_init_hw_tbls(struct ice_hw *hw); +void ice_free_seg(struct ice_hw *hw); +void ice_fill_blk_tbls(struct ice_hw *hw); +void ice_clear_hw_tbls(struct ice_hw *hw); +void ice_free_hw_tbls(struct ice_hw *hw); +#endif /* _ICE_FLEX_PIPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h new file mode 100644 index 000000000000..5d5a7eaffa30 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -0,0 +1,374 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Intel Corporation. */ + +#ifndef _ICE_FLEX_TYPE_H_ +#define _ICE_FLEX_TYPE_H_ +/* Extraction Sequence (Field Vector) Table */ +struct ice_fv_word { + u8 prot_id; + u16 off; /* Offset within the protocol header */ + u8 resvrd; +} __packed; + +#define ICE_MAX_FV_WORDS 48 +struct ice_fv { + struct ice_fv_word ew[ICE_MAX_FV_WORDS]; +}; + +/* Package and segment headers and tables */ +struct ice_pkg_hdr { + struct ice_pkg_ver format_ver; + __le32 seg_count; + __le32 seg_offset[1]; +}; + +/* generic segment */ +struct ice_generic_seg_hdr { +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_ICE 0x00000010 + __le32 seg_type; + struct ice_pkg_ver seg_ver; + __le32 seg_size; + char seg_name[ICE_PKG_NAME_SIZE]; +}; + +/* ice specific segment */ + +union ice_device_id { + struct { + __le16 device_id; + __le16 vendor_id; + } dev_vend_id; + __le32 id; +}; + +struct ice_device_id_entry { + union ice_device_id device; + union ice_device_id sub_device; +}; + +struct ice_seg { + struct ice_generic_seg_hdr hdr; + __le32 device_table_count; + struct ice_device_id_entry device_table[1]; +}; + +struct ice_nvm_table { + __le32 table_count; + __le32 vers[1]; +}; + +struct ice_buf { +#define ICE_PKG_BUF_SIZE 4096 + u8 buf[ICE_PKG_BUF_SIZE]; +}; + +struct ice_buf_table { + __le32 buf_count; + struct ice_buf buf_array[1]; +}; + +/* global metadata specific segment */ +struct ice_global_metadata_seg { + struct ice_generic_seg_hdr hdr; + struct ice_pkg_ver pkg_ver; + __le32 track_id; + char pkg_name[ICE_PKG_NAME_SIZE]; +}; + +#define ICE_MIN_S_OFF 12 +#define ICE_MAX_S_OFF 4095 +#define ICE_MIN_S_SZ 1 +#define ICE_MAX_S_SZ 4084 + +/* section information */ +struct ice_section_entry { + __le32 type; + __le16 offset; + __le16 size; +}; + +#define ICE_MIN_S_COUNT 1 +#define ICE_MAX_S_COUNT 511 +#define ICE_MIN_S_DATA_END 12 +#define ICE_MAX_S_DATA_END 4096 + +#define ICE_METADATA_BUF 0x80000000 + +struct ice_buf_hdr { + __le16 section_count; + __le16 data_end; + struct ice_section_entry section_entry[1]; +}; + +#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \ + sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz)) + +/* ice package section IDs */ +#define ICE_SID_XLT1_SW 12 +#define ICE_SID_XLT2_SW 13 +#define ICE_SID_PROFID_TCAM_SW 14 +#define ICE_SID_PROFID_REDIR_SW 15 +#define ICE_SID_FLD_VEC_SW 16 + +#define ICE_SID_XLT1_ACL 22 +#define ICE_SID_XLT2_ACL 23 +#define ICE_SID_PROFID_TCAM_ACL 24 +#define ICE_SID_PROFID_REDIR_ACL 25 +#define ICE_SID_FLD_VEC_ACL 26 + +#define ICE_SID_XLT1_FD 32 +#define ICE_SID_XLT2_FD 33 +#define ICE_SID_PROFID_TCAM_FD 34 +#define ICE_SID_PROFID_REDIR_FD 35 +#define ICE_SID_FLD_VEC_FD 36 + +#define ICE_SID_XLT1_RSS 42 +#define ICE_SID_XLT2_RSS 43 +#define ICE_SID_PROFID_TCAM_RSS 44 +#define ICE_SID_PROFID_REDIR_RSS 45 +#define ICE_SID_FLD_VEC_RSS 46 + +#define ICE_SID_RXPARSER_BOOST_TCAM 56 + +#define ICE_SID_XLT1_PE 82 +#define ICE_SID_XLT2_PE 83 +#define ICE_SID_PROFID_TCAM_PE 84 +#define ICE_SID_PROFID_REDIR_PE 85 +#define ICE_SID_FLD_VEC_PE 86 + +/* Label Metadata section IDs */ +#define ICE_SID_LBL_FIRST 0x80000010 +#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 +/* The following define MUST be updated to reflect the last label section ID */ +#define ICE_SID_LBL_LAST 0x80000038 + +enum ice_block { + ICE_BLK_SW = 0, + ICE_BLK_ACL, + ICE_BLK_FD, + ICE_BLK_RSS, + ICE_BLK_PE, + ICE_BLK_COUNT +}; + +/* package labels */ +struct ice_label { + __le16 value; +#define ICE_PKG_LABEL_SIZE 64 + char name[ICE_PKG_LABEL_SIZE]; +}; + +struct ice_label_section { + __le16 count; + struct ice_label label[1]; +}; + +#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ + sizeof(struct ice_label_section) - sizeof(struct ice_label), \ + sizeof(struct ice_label)) + +struct ice_sw_fv_section { + __le16 count; + __le16 base_offset; + struct ice_fv fv[1]; +}; + +/* The BOOST TCAM stores the match packet header in reverse order, meaning + * the fields are reversed; in addition, this means that the normally big endian + * fields of the packet are now little endian. + */ +struct ice_boost_key_value { +#define ICE_BOOST_REMAINING_HV_KEY 15 + u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY]; + __le16 hv_dst_port_key; + __le16 hv_src_port_key; + u8 tcam_search_key; +} __packed; + +struct ice_boost_key { + struct ice_boost_key_value key; + struct ice_boost_key_value key2; +}; + +/* package Boost TCAM entry */ +struct ice_boost_tcam_entry { + __le16 addr; + __le16 reserved; + /* break up the 40 bytes of key into different fields */ + struct ice_boost_key key; + u8 boost_hit_index_group; + /* The following contains bitfields which are not on byte boundaries. + * These fields are currently unused by driver software. + */ +#define ICE_BOOST_BIT_FIELDS 43 + u8 bit_fields[ICE_BOOST_BIT_FIELDS]; +}; + +struct ice_boost_tcam_section { + __le16 count; + __le16 reserved; + struct ice_boost_tcam_entry tcam[1]; +}; + +#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ + sizeof(struct ice_boost_tcam_section) - \ + sizeof(struct ice_boost_tcam_entry), \ + sizeof(struct ice_boost_tcam_entry)) + +struct ice_xlt1_section { + __le16 count; + __le16 offset; + u8 value[1]; +} __packed; + +struct ice_xlt2_section { + __le16 count; + __le16 offset; + __le16 value[1]; +}; + +struct ice_prof_redir_section { + __le16 count; + __le16 offset; + u8 redir_value[1]; +}; + +struct ice_pkg_enum { + struct ice_buf_table *buf_table; + u32 buf_idx; + + u32 type; + struct ice_buf_hdr *buf; + u32 sect_idx; + void *sect; + u32 sect_type; + + u32 entry_idx; + void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); +}; + +struct ice_es { + u32 sid; + u16 count; + u16 fvw; + u16 *ref_count; + struct list_head prof_map; + struct ice_fv_word *t; + struct mutex prof_map_lock; /* protect access to profiles list */ + u8 *written; + u8 reverse; /* set to true to reverse FV order */ +}; + +/* PTYPE Group management */ + +/* Note: XLT1 table takes 13-bit as input, and results in an 8-bit packet type + * group (PTG) ID as output. + * + * Note: PTG 0 is the default packet type group and it is assumed that all PTYPE + * are a part of this group until moved to a new PTG. + */ +#define ICE_DEFAULT_PTG 0 + +struct ice_ptg_entry { + struct ice_ptg_ptype *first_ptype; + u8 in_use; +}; + +struct ice_ptg_ptype { + struct ice_ptg_ptype *next_ptype; + u8 ptg; +}; + +struct ice_vsig_entry { + struct list_head prop_lst; + struct ice_vsig_vsi *first_vsi; + u8 in_use; +}; + +struct ice_vsig_vsi { + struct ice_vsig_vsi *next_vsi; + u32 prop_mask; + u16 changed; + u16 vsig; +}; + +#define ICE_XLT1_CNT 1024 +#define ICE_MAX_PTGS 256 + +/* XLT1 Table */ +struct ice_xlt1 { + struct ice_ptg_entry *ptg_tbl; + struct ice_ptg_ptype *ptypes; + u8 *t; + u32 sid; + u16 count; +}; + +#define ICE_XLT2_CNT 768 +#define ICE_MAX_VSIGS 768 + +/* VSIG bit layout: + * [0:12]: incremental VSIG index 1 to ICE_MAX_VSIGS + * [13:15]: PF number of device + */ +#define ICE_VSIG_IDX_M (0x1FFF) +#define ICE_PF_NUM_S 13 +#define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S) +#define ICE_VSIG_VALUE(vsig, pf_id) \ + (u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \ + (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M)) +#define ICE_DEFAULT_VSIG 0 + +/* XLT2 Table */ +struct ice_xlt2 { + struct ice_vsig_entry *vsig_tbl; + struct ice_vsig_vsi *vsis; + u16 *t; + u32 sid; + u16 count; +}; + +/* Keys are made up of two values, each one-half the size of the key. + * For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values) + */ +#define ICE_TCAM_KEY_VAL_SZ 5 +#define ICE_TCAM_KEY_SZ (2 * ICE_TCAM_KEY_VAL_SZ) + +struct ice_prof_tcam_entry { + __le16 addr; + u8 key[ICE_TCAM_KEY_SZ]; + u8 prof_id; +} __packed; + +struct ice_prof_id_section { + __le16 count; + struct ice_prof_tcam_entry entry[1]; +} __packed; + +struct ice_prof_tcam { + u32 sid; + u16 count; + u16 max_prof_id; + struct ice_prof_tcam_entry *t; + u8 cdid_bits; /* # CDID bits to use in key, 0, 2, 4, or 8 */ +}; + +struct ice_prof_redir { + u8 *t; + u32 sid; + u16 count; +}; + +/* Tables per block */ +struct ice_blk_info { + struct ice_xlt1 xlt1; + struct ice_xlt2 xlt2; + struct ice_prof_tcam prof; + struct ice_prof_redir prof_redir; + struct ice_es es; + u8 overwrite; /* set to true to allow overwrite of table entries */ + u8 is_list_init; +}; + +#endif /* _ICE_FLEX_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 6c5ce05742b1..152fbd556e9b 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -55,6 +55,8 @@ #define PRTDCB_GENS 0x00083020 #define PRTDCB_GENS_DCBX_STATUS_S 0 #define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0) +#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4)) +#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4)) #define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256)) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0 #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0) @@ -127,8 +129,11 @@ #define GLINT_DYN_CTL_CLEARPBA_M BIT(1) #define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2) #define GLINT_DYN_CTL_ITR_INDX_S 3 +#define GLINT_DYN_CTL_ITR_INDX_M ICE_M(0x3, 3) #define GLINT_DYN_CTL_INTERVAL_S 5 +#define GLINT_DYN_CTL_INTERVAL_M ICE_M(0xFFF, 5) #define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25) +#define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30) #define GLINT_DYN_CTL_INTENA_MSK_M BIT(31) #define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) #define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) @@ -281,14 +286,10 @@ #define GL_PWR_MODE_CTL 0x000B820C #define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 #define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30) -#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8)) #define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) -#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8)) #define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) #define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) -#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8)) #define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8)) -#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8)) #define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8)) #define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8)) #define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8)) @@ -296,38 +297,22 @@ #define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8)) #define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8)) #define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8)) -#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8)) #define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8)) -#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8)) #define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8)) #define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8)) -#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8)) #define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8)) -#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8)) #define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8)) -#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8)) #define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8)) -#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8)) #define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8)) -#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8)) #define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8)) -#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8)) #define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8)) -#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8)) #define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8)) -#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8)) #define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8)) -#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8)) #define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8)) -#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8)) #define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8)) -#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8)) #define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8)) -#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8)) #define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8)) -#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8)) #define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8)) -#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8)) #define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8)) #define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64)) #define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64)) @@ -340,32 +325,23 @@ #define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8)) #define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64)) #define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) -#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8)) #define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) -#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8)) #define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) -#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8)) #define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) -#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8)) #define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) -#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8)) #define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) -#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8)) #define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8)) -#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8)) #define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8)) -#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8)) #define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8)) #define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4)) #define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) -#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8)) #define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) -#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) #define PF_VT_PFALLOC_HIF 0x0009DD80 #define VSIQF_HKEY_MAX_INDEX 12 #define VSIQF_HLUT_MAX_INDEX 15 #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) +#define PRTRPB_RDPC 0x000AC260 #endif /* _ICE_HW_AUTOGEN_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 510a8c900e61..2aac8f13daeb 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -290,6 +290,7 @@ struct ice_rlan_ctx { u8 tphdata_ena; u8 tphhead_ena; u16 lrxqthresh; /* bigger than needed, see above for reason */ + u8 prefena; /* NOTE: normally must be set to 1 at init */ }; struct ice_ctx_ele { @@ -427,6 +428,7 @@ struct ice_tlan_ctx { #define ICE_TLAN_CTX_VMVF_TYPE_PF 2 u16 src_vsi; u8 tsyn_ena; + u8 internal_usage_flag; u8 alt_vlan; u16 cpuid; /* bigger than needed, see above for reason */ u8 wb_mode; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index a19f5920733b..cc755382df25 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -191,41 +191,58 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) } /** - * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings + * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring * @vsi: the VSI being configured * @ena: start or stop the Rx rings + * @rxq_idx: Rx queue index */ -static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) +#ifndef CONFIG_PCI_IOV +static +#endif /* !CONFIG_PCI_IOV */ +int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) { + int pf_q = vsi->rxq_map[rxq_idx]; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - int i, ret = 0; + int ret = 0; + u32 rx_reg; - for (i = 0; i < vsi->num_rxq; i++) { - int pf_q = vsi->rxq_map[i]; - u32 rx_reg; + rx_reg = rd32(hw, QRX_CTRL(pf_q)); - rx_reg = rd32(hw, QRX_CTRL(pf_q)); + /* Skip if the queue is already in the requested state */ + if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) + return 0; - /* Skip if the queue is already in the requested state */ - if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) - continue; + /* turn on/off the queue */ + if (ena) + rx_reg |= QRX_CTRL_QENA_REQ_M; + else + rx_reg &= ~QRX_CTRL_QENA_REQ_M; + wr32(hw, QRX_CTRL(pf_q), rx_reg); - /* turn on/off the queue */ - if (ena) - rx_reg |= QRX_CTRL_QENA_REQ_M; - else - rx_reg &= ~QRX_CTRL_QENA_REQ_M; - wr32(hw, QRX_CTRL(pf_q), rx_reg); - - /* wait for the change to finish */ - ret = ice_pf_rxq_wait(pf, pf_q, ena); - if (ret) { - dev_err(&pf->pdev->dev, - "VSI idx %d Rx ring %d %sable timeout\n", - vsi->idx, pf_q, (ena ? "en" : "dis")); + /* wait for the change to finish */ + ret = ice_pf_rxq_wait(pf, pf_q, ena); + if (ret) + dev_err(&pf->pdev->dev, + "VSI idx %d Rx ring %d %sable timeout\n", + vsi->idx, pf_q, (ena ? "en" : "dis")); + + return ret; +} + +/** + * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings + * @vsi: the VSI being configured + * @ena: start or stop the Rx rings + */ +static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) +{ + int i, ret = 0; + + for (i = 0; i < vsi->num_rxq; i++) { + ret = ice_vsi_ctrl_rx_ring(vsi, ena, i); + if (ret) break; - } } return ret; @@ -246,12 +263,24 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, sizeof(*vsi->tx_rings), GFP_KERNEL); if (!vsi->tx_rings) - goto err_txrings; + return -ENOMEM; vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, sizeof(*vsi->rx_rings), GFP_KERNEL); if (!vsi->rx_rings) - goto err_rxrings; + goto err_rings; + + vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, + sizeof(*vsi->txq_map), GFP_KERNEL); + + if (!vsi->txq_map) + goto err_txq_map; + + vsi->rxq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, + sizeof(*vsi->rxq_map), GFP_KERNEL); + if (!vsi->rxq_map) + goto err_rxq_map; + /* There is no need to allocate q_vectors for a loopback VSI. */ if (vsi->type == ICE_VSI_LB) @@ -266,10 +295,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) return 0; err_vectors: + devm_kfree(&pf->pdev->dev, vsi->rxq_map); +err_rxq_map: + devm_kfree(&pf->pdev->dev, vsi->txq_map); +err_txq_map: devm_kfree(&pf->pdev->dev, vsi->rx_rings); -err_rxrings: +err_rings: devm_kfree(&pf->pdev->dev, vsi->tx_rings); -err_txrings: return -ENOMEM; } @@ -311,9 +343,21 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) switch (vsi->type) { case ICE_VSI_PF: - vsi->alloc_txq = pf->num_lan_tx; - vsi->alloc_rxq = pf->num_lan_rx; - vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); + vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf), + num_online_cpus()); + + pf->num_lan_tx = vsi->alloc_txq; + + /* only 1 Rx queue unless RSS is enabled */ + if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + vsi->alloc_rxq = 1; + else + vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf), + num_online_cpus()); + + pf->num_lan_rx = vsi->alloc_rxq; + + vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq); break; case ICE_VSI_VF: vf = &pf->vf[vsi->vf_id]; @@ -416,6 +460,14 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) devm_kfree(&pf->pdev->dev, vsi->rx_rings); vsi->rx_rings = NULL; } + if (vsi->txq_map) { + devm_kfree(&pf->pdev->dev, vsi->txq_map); + vsi->txq_map = NULL; + } + if (vsi->rxq_map) { + devm_kfree(&pf->pdev->dev, vsi->rxq_map); + vsi->rxq_map = NULL; + } } /** @@ -508,8 +560,8 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) vsi->type = type; vsi->back = pf; set_bit(__ICE_DOWN, vsi->state); + vsi->idx = pf->next_vsi; - vsi->work_lmt = ICE_DFLT_IRQ_WORK; if (type == ICE_VSI_VF) ice_vsi_set_num_qs(vsi, vf_id); @@ -647,7 +699,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi) struct ice_qs_cfg tx_qs_cfg = { .qs_mutex = &pf->avail_q_mutex, .pf_map = pf->avail_txqs, - .pf_map_size = ICE_MAX_TXQS, + .pf_map_size = pf->max_pf_txqs, .q_count = vsi->alloc_txq, .scatter_count = ICE_MAX_SCATTER_TXQS, .vsi_map = vsi->txq_map, @@ -657,7 +709,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi) struct ice_qs_cfg rx_qs_cfg = { .qs_mutex = &pf->avail_q_mutex, .pf_map = pf->avail_rxqs, - .pf_map_size = ICE_MAX_RXQS, + .pf_map_size = pf->max_pf_rxqs, .q_count = vsi->alloc_rxq, .scatter_count = ICE_MAX_SCATTER_RXQS, .vsi_map = vsi->rxq_map, @@ -701,6 +753,17 @@ void ice_vsi_put_qs(struct ice_vsi *vsi) } /** + * ice_is_safe_mode + * @pf: pointer to the PF struct + * + * returns true if driver is in safe mode, false otherwise + */ +bool ice_is_safe_mode(struct ice_pf *pf) +{ + return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags); +} + +/** * ice_rss_clean - Delete RSS related VSI structures that hold user inputs * @vsi: the VSI being removed */ @@ -1010,6 +1073,13 @@ static int ice_vsi_init(struct ice_vsi *vsi) ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; } + /* Allow control frames out of main VSI */ + if (vsi->type == ICE_VSI_PF) { + ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; + ctxt->info.valid_sections |= + cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); + } + ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); if (ret) { dev_err(&pf->pdev->dev, @@ -1129,12 +1199,7 @@ static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) return -EEXIST; } - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - num_q_vectors = vsi->num_q_vectors; - } else { - err = -EINVAL; - goto err_out; - } + num_q_vectors = vsi->num_q_vectors; for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { err = ice_vsi_alloc_q_vector(vsi, v_idx); @@ -1180,9 +1245,6 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) return -EEXIST; } - if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - return -ENOENT; - num_q_vectors = vsi->num_q_vectors; /* reserve slots from OS requested IRQs */ vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors, @@ -1477,40 +1539,32 @@ void ice_update_eth_stats(struct ice_vsi *vsi) prev_es = &vsi->eth_stats_prev; cur_es = &vsi->eth_stats; - ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_bytes, - &cur_es->rx_bytes); + ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_bytes, &cur_es->rx_bytes); - ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_unicast, - &cur_es->rx_unicast); + ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_unicast, &cur_es->rx_unicast); - ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_multicast, - &cur_es->rx_multicast); + ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_multicast, &cur_es->rx_multicast); - ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->rx_broadcast, - &cur_es->rx_broadcast); + ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->rx_broadcast, &cur_es->rx_broadcast); ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded, &prev_es->rx_discards, &cur_es->rx_discards); - ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_bytes, - &cur_es->tx_bytes); + ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_bytes, &cur_es->tx_bytes); - ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_unicast, - &cur_es->tx_unicast); + ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_unicast, &cur_es->tx_unicast); - ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_multicast, - &cur_es->tx_multicast); + ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_multicast, &cur_es->tx_multicast); - ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num), - vsi->stat_offsets_loaded, &prev_es->tx_broadcast, - &cur_es->tx_broadcast); + ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded, + &prev_es->tx_broadcast, &cur_es->tx_broadcast); ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded, &prev_es->tx_errors, &cur_es->tx_errors); @@ -1658,6 +1712,62 @@ setup_rings: } /** + * ice_vsi_cfg_txq - Configure single Tx queue + * @vsi: the VSI that queue belongs to + * @ring: Tx ring to be configured + * @tc_q_idx: queue index within given TC + * @qg_buf: queue group buffer + * @tc: TC that Tx ring belongs to + */ +static int +ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx, + struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc) +{ + struct ice_tlan_ctx tlan_ctx = { 0 }; + struct ice_aqc_add_txqs_perq *txq; + struct ice_pf *pf = vsi->back; + u8 buf_len = sizeof(*qg_buf); + enum ice_status status; + u16 pf_q; + + pf_q = ring->reg_idx; + ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); + /* copy context contents into the qg_buf */ + qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); + ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, + ice_tlan_ctx_info); + + /* init queue specific tail reg. It is referred as + * transmit comm scheduler queue doorbell. + */ + ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); + + /* Add unique software queue handle of the Tx queue per + * TC into the VSI Tx ring + */ + ring->q_handle = tc_q_idx; + + status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, + 1, qg_buf, buf_len, NULL); + if (status) { + dev_err(&pf->pdev->dev, + "Failed to set LAN Tx queue context, error: %d\n", + status); + return -ENODEV; + } + + /* Add Tx Queue TEID into the VSI Tx ring from the + * response. This will complete configuring and + * enabling the queue. + */ + txq = &qg_buf->txqs[0]; + if (pf_q == le16_to_cpu(txq->txq_id)) + ring->txq_teid = le32_to_cpu(txq->q_teid); + + return 0; +} + +/** * ice_vsi_cfg_txqs - Configure the VSI for Tx * @vsi: the VSI being configured * @rings: Tx ring array to be configured @@ -1670,20 +1780,16 @@ static int ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) { struct ice_aqc_add_tx_qgrp *qg_buf; - struct ice_aqc_add_txqs_perq *txq; struct ice_pf *pf = vsi->back; - u8 num_q_grps, q_idx = 0; - enum ice_status status; - u16 buf_len, i, pf_q; - int err = 0, tc; + u16 q_idx = 0, i; + int err = 0; + u8 tc; - buf_len = sizeof(*qg_buf); - qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); + qg_buf = devm_kzalloc(&pf->pdev->dev, sizeof(*qg_buf), GFP_KERNEL); if (!qg_buf) return -ENOMEM; qg_buf->num_txqs = 1; - num_q_grps = 1; /* set up and configure the Tx queues for each enabled TC */ ice_for_each_traffic_class(tc) { @@ -1691,39 +1797,10 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) break; for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { - struct ice_tlan_ctx tlan_ctx = { 0 }; - - pf_q = vsi->txq_map[q_idx + offset]; - ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q); - /* copy context contents into the qg_buf */ - qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); - ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, - ice_tlan_ctx_info); - - /* init queue specific tail reg. It is referred as - * transmit comm scheduler queue doorbell. - */ - rings[q_idx]->tail = - pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); - status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, - i, num_q_grps, qg_buf, - buf_len, NULL); - if (status) { - dev_err(&pf->pdev->dev, - "Failed to set LAN Tx queue context, error: %d\n", - status); - err = -ENODEV; + err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset, + qg_buf, tc); + if (err) goto err_cfg_txqs; - } - - /* Add Tx Queue TEID into the VSI Tx ring from the - * response. This will complete configuring and - * enabling the queue. - */ - txq = &qg_buf->txqs[0]; - if (pf_q == le16_to_cpu(txq->txq_id)) - rings[q_idx]->txq_teid = - le32_to_cpu(txq->q_teid); q_idx++; } @@ -2070,45 +2147,112 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) } /** - * ice_vsi_stop_tx_rings - Disable Tx rings + * ice_vsi_stop_tx_ring - Disable single Tx ring * @vsi: the VSI being configured * @rst_src: reset source * @rel_vmvf_num: Relative ID of VF/VM - * @rings: Tx ring array to be stopped - * @offset: offset within vsi->txq_map + * @ring: Tx ring to be stopped + * @txq_meta: Meta data of Tx ring to be stopped */ -static int -ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, - u16 rel_vmvf_num, struct ice_ring **rings, int offset) +#ifndef CONFIG_PCI_IOV +static +#endif /* !CONFIG_PCI_IOV */ +int +ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num, struct ice_ring *ring, + struct ice_txq_meta *txq_meta) { struct ice_pf *pf = vsi->back; + struct ice_q_vector *q_vector; struct ice_hw *hw = &pf->hw; - int tc, q_idx = 0, err = 0; - u16 *q_ids, *q_handles, i; enum ice_status status; - u32 *q_teids, val; + u32 val; - if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) - return -EINVAL; + /* clear cause_ena bit for disabled queues */ + val = rd32(hw, QINT_TQCTL(ring->reg_idx)); + val &= ~QINT_TQCTL_CAUSE_ENA_M; + wr32(hw, QINT_TQCTL(ring->reg_idx), val); - q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), - GFP_KERNEL); - if (!q_teids) - return -ENOMEM; + /* software is expected to wait for 100 ns */ + ndelay(100); - q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), - GFP_KERNEL); - if (!q_ids) { - err = -ENOMEM; - goto err_alloc_q_ids; + /* trigger a software interrupt for the vector + * associated to the queue to schedule NAPI handler + */ + q_vector = ring->q_vector; + if (q_vector) + ice_trigger_sw_intr(hw, q_vector); + + status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx, + txq_meta->tc, 1, &txq_meta->q_handle, + &txq_meta->q_id, &txq_meta->q_teid, rst_src, + rel_vmvf_num, NULL); + + /* if the disable queue command was exercised during an + * active reset flow, ICE_ERR_RESET_ONGOING is returned. + * This is not an error as the reset operation disables + * queues at the hardware level anyway. + */ + if (status == ICE_ERR_RESET_ONGOING) { + dev_dbg(&vsi->back->pdev->dev, + "Reset in progress. LAN Tx queues already disabled\n"); + } else if (status == ICE_ERR_DOES_NOT_EXIST) { + dev_dbg(&vsi->back->pdev->dev, + "LAN Tx queues do not exist, nothing to disable\n"); + } else if (status) { + dev_err(&vsi->back->pdev->dev, + "Failed to disable LAN Tx queues, error: %d\n", status); + return -ENODEV; } - q_handles = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, - sizeof(*q_handles), GFP_KERNEL); - if (!q_handles) { - err = -ENOMEM; - goto err_alloc_q_handles; - } + return 0; +} + +/** + * ice_fill_txq_meta - Prepare the Tx queue's meta data + * @vsi: VSI that ring belongs to + * @ring: ring that txq_meta will be based on + * @txq_meta: a helper struct that wraps Tx queue's information + * + * Set up a helper struct that will contain all the necessary fields that + * are needed for stopping Tx queue + */ +#ifndef CONFIG_PCI_IOV +static +#endif /* !CONFIG_PCI_IOV */ +void +ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, + struct ice_txq_meta *txq_meta) +{ + u8 tc = 0; + +#ifdef CONFIG_DCB + tc = ring->dcb_tc; +#endif /* CONFIG_DCB */ + txq_meta->q_id = ring->reg_idx; + txq_meta->q_teid = ring->txq_teid; + txq_meta->q_handle = ring->q_handle; + txq_meta->vsi_idx = vsi->idx; + txq_meta->tc = tc; +} + +/** + * ice_vsi_stop_tx_rings - Disable Tx rings + * @vsi: the VSI being configured + * @rst_src: reset source + * @rel_vmvf_num: Relative ID of VF/VM + * @rings: Tx ring array to be stopped + */ +static int +ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num, struct ice_ring **rings) +{ + u16 i, q_idx = 0; + int status; + u8 tc; + + if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) + return -EINVAL; /* set up the Tx queue list to be disabled for each enabled TC */ ice_for_each_traffic_class(tc) { @@ -2116,64 +2260,24 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, break; for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { - struct ice_q_vector *q_vector; + struct ice_txq_meta txq_meta = { }; - if (!rings || !rings[q_idx]) { - err = -EINVAL; - goto err_out; - } - - q_ids[i] = vsi->txq_map[q_idx + offset]; - q_teids[i] = rings[q_idx]->txq_teid; - q_handles[i] = i; + if (!rings || !rings[q_idx]) + return -EINVAL; - /* clear cause_ena bit for disabled queues */ - val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx)); - val &= ~QINT_TQCTL_CAUSE_ENA_M; - wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val); + ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); + status = ice_vsi_stop_tx_ring(vsi, rst_src, + rel_vmvf_num, + rings[q_idx], &txq_meta); - /* software is expected to wait for 100 ns */ - ndelay(100); - - /* trigger a software interrupt for the vector - * associated to the queue to schedule NAPI handler - */ - q_vector = rings[i]->q_vector; - if (q_vector) - ice_trigger_sw_intr(hw, q_vector); + if (status) + return status; q_idx++; } - status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc, - vsi->num_txq, q_handles, q_ids, - q_teids, rst_src, rel_vmvf_num, NULL); - - /* if the disable queue command was exercised during an active - * reset flow, ICE_ERR_RESET_ONGOING is returned. This is not - * an error as the reset operation disables queues at the - * hardware level anyway. - */ - if (status == ICE_ERR_RESET_ONGOING) { - dev_dbg(&pf->pdev->dev, - "Reset in progress. LAN Tx queues already disabled\n"); - } else if (status) { - dev_err(&pf->pdev->dev, - "Failed to disable LAN Tx queues, error: %d\n", - status); - err = -ENODEV; - } } -err_out: - devm_kfree(&pf->pdev->dev, q_handles); - -err_alloc_q_handles: - devm_kfree(&pf->pdev->dev, q_ids); - -err_alloc_q_ids: - devm_kfree(&pf->pdev->dev, q_teids); - - return err; + return 0; } /** @@ -2186,8 +2290,7 @@ int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, u16 rel_vmvf_num) { - return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, - 0); + return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings); } /** @@ -2497,9 +2600,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, if (ret) goto unroll_vector_base; - pf->q_left_tx -= vsi->alloc_txq; - pf->q_left_rx -= vsi->alloc_rxq; - /* Do not exit if configuring RSS had an issue, at least * receive traffic on first queue. Hence no need to capture * return value @@ -2519,7 +2619,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, /* configure VSI nodes based on number of queues and TC's */ for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = pf->num_lan_tx; + max_txqs[i] = vsi->alloc_txq; status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); @@ -2540,15 +2640,17 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, * DCB settings in the HW. Also, if the FW DCBX engine is not running * then Rx LLDP packets need to be redirected up the stack. */ - if (vsi->type == ICE_VSI_PF) { - ice_vsi_add_rem_eth_mac(vsi, true); + if (!ice_is_safe_mode(pf)) { + if (vsi->type == ICE_VSI_PF) { + ice_vsi_add_rem_eth_mac(vsi, true); - /* Tx LLDP packets */ - ice_cfg_sw_lldp(vsi, true, true); + /* Tx LLDP packets */ + ice_cfg_sw_lldp(vsi, true, true); - /* Rx LLDP packets */ - if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) - ice_cfg_sw_lldp(vsi, false, true); + /* Rx LLDP packets */ + if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) + ice_cfg_sw_lldp(vsi, false, true); + } } return vsi; @@ -2563,8 +2665,6 @@ unroll_vsi_init: ice_vsi_delete(vsi); unroll_get_qs: ice_vsi_put_qs(vsi); - pf->q_left_tx += vsi->alloc_txq; - pf->q_left_rx += vsi->alloc_rxq; ice_vsi_clear(vsi); return NULL; @@ -2610,39 +2710,36 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; int base = vsi->base_vector; + int i; - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - int i; - - if (!vsi->q_vectors || !vsi->irqs_ready) - return; + if (!vsi->q_vectors || !vsi->irqs_ready) + return; - ice_vsi_release_msix(vsi); - if (vsi->type == ICE_VSI_VF) - return; + ice_vsi_release_msix(vsi); + if (vsi->type == ICE_VSI_VF) + return; - vsi->irqs_ready = false; - ice_for_each_q_vector(vsi, i) { - u16 vector = i + base; - int irq_num; + vsi->irqs_ready = false; + ice_for_each_q_vector(vsi, i) { + u16 vector = i + base; + int irq_num; - irq_num = pf->msix_entries[vector].vector; + irq_num = pf->msix_entries[vector].vector; - /* free only the irqs that were actually requested */ - if (!vsi->q_vectors[i] || - !(vsi->q_vectors[i]->num_ring_tx || - vsi->q_vectors[i]->num_ring_rx)) - continue; + /* free only the irqs that were actually requested */ + if (!vsi->q_vectors[i] || + !(vsi->q_vectors[i]->num_ring_tx || + vsi->q_vectors[i]->num_ring_rx)) + continue; - /* clear the affinity notifier in the IRQ descriptor */ - irq_set_affinity_notifier(irq_num, NULL); + /* clear the affinity notifier in the IRQ descriptor */ + irq_set_affinity_notifier(irq_num, NULL); - /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(irq_num, NULL); - synchronize_irq(irq_num); - devm_free_irq(&pf->pdev->dev, irq_num, - vsi->q_vectors[i]); - } + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(irq_num, NULL); + synchronize_irq(irq_num); + devm_free_irq(&pf->pdev->dev, irq_num, + vsi->q_vectors[i]); } } @@ -2821,15 +2918,20 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) } /* disable each interrupt */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - ice_for_each_q_vector(vsi, i) - wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); + ice_for_each_q_vector(vsi, i) { + if (!vsi->q_vectors[i]) + continue; + wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); + } - ice_flush(hw); + ice_flush(hw); - ice_for_each_q_vector(vsi, i) - synchronize_irq(pf->msix_entries[i + base].vector); - } + /* don't call synchronize_irq() for VF's from the host */ + if (vsi->type == ICE_VSI_VF) + return; + + ice_for_each_q_vector(vsi, i) + synchronize_irq(pf->msix_entries[i + base].vector); } /** @@ -2889,14 +2991,16 @@ int ice_vsi_release(struct ice_vsi *vsi) pf->num_avail_sw_msix += vsi->num_q_vectors; } - if (vsi->type == ICE_VSI_PF) { - ice_vsi_add_rem_eth_mac(vsi, false); - ice_cfg_sw_lldp(vsi, true, false); - /* The Rx rule will only exist to remove if the LLDP FW - * engine is currently stopped - */ - if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) - ice_cfg_sw_lldp(vsi, false, false); + if (!ice_is_safe_mode(pf)) { + if (vsi->type == ICE_VSI_PF) { + ice_vsi_add_rem_eth_mac(vsi, false); + ice_cfg_sw_lldp(vsi, true, false); + /* The Rx rule will only exist to remove if the LLDP FW + * engine is currently stopped + */ + if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) + ice_cfg_sw_lldp(vsi, false, false); + } } ice_remove_vsi_fltr(&pf->hw, vsi->idx); @@ -2913,8 +3017,6 @@ int ice_vsi_release(struct ice_vsi *vsi) ice_vsi_clear_rings(vsi); ice_vsi_put_qs(vsi); - pf->q_left_tx += vsi->alloc_txq; - pf->q_left_rx += vsi->alloc_rxq; /* retain SW VSI data structure since it is needed to unregister and * free VSI netdev when PF is not in reset recovery pending state,\ @@ -2962,6 +3064,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) vsi->base_vector = 0; } + ice_vsi_put_qs(vsi); ice_vsi_clear_rings(vsi); ice_vsi_free_arrays(vsi); ice_dev_onetime_setup(&pf->hw); @@ -2969,6 +3072,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ice_vsi_set_num_qs(vsi, vf->vf_id); else ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); + + ret = ice_vsi_alloc_arrays(vsi); + if (ret < 0) + goto err_vsi; + + ice_vsi_get_qs(vsi); ice_vsi_set_tc_cfg(vsi); /* Initialize VSI struct elements and create VSI in FW */ @@ -2976,9 +3085,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret < 0) goto err_vsi; - ret = ice_vsi_alloc_arrays(vsi); - if (ret < 0) - goto err_vsi; switch (vsi->type) { case ICE_VSI_PF: @@ -2986,6 +3092,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret) goto err_rings; + ret = ice_vsi_setup_vector_base(vsi); + if (ret) + goto err_vectors; + ret = ice_vsi_set_q_vectors_reg_idx(vsi); if (ret) goto err_vectors; @@ -3007,10 +3117,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret) goto err_rings; - ret = ice_vsi_setup_vector_base(vsi); - if (ret) - goto err_vectors; - ret = ice_vsi_set_q_vectors_reg_idx(vsi); if (ret) goto err_vectors; @@ -3019,8 +3125,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret) goto err_vectors; - pf->q_left_tx -= vsi->alloc_txq; - pf->q_left_rx -= vsi->alloc_rxq; break; default: break; @@ -3028,7 +3132,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) /* configure VSI nodes based on number of queues and TC's */ for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = pf->num_lan_tx; + max_txqs[i] = vsi->alloc_txq; status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); @@ -3083,48 +3187,6 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) } /** - * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration - * @vsi: the VSI being configured - * @ena_tc: TC map to be enabled - */ -static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) -{ - struct net_device *netdev = vsi->netdev; - struct ice_pf *pf = vsi->back; - struct ice_dcbx_cfg *dcbcfg; - u8 netdev_tc; - int i; - - if (!netdev) - return; - - if (!ena_tc) { - netdev_reset_tc(netdev); - return; - } - - if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc)) - return; - - dcbcfg = &pf->hw.port_info->local_dcbx_cfg; - - ice_for_each_traffic_class(i) - if (vsi->tc_cfg.ena_tc & BIT(i)) - netdev_set_tc_queue(netdev, - vsi->tc_cfg.tc_info[i].netdev_tc, - vsi->tc_cfg.tc_info[i].qcount_tx, - vsi->tc_cfg.tc_info[i].qoffset); - - for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { - u8 ets_tc = dcbcfg->etscfg.prio_table[i]; - - /* Get the mapped netdev TC# for the UP */ - netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; - netdev_set_prio_tc_map(netdev, i, netdev_tc); - } -} - -/** * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map * @vsi: VSI to be configured * @ena_tc: TC bitmap @@ -3145,7 +3207,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) if (ena_tc & BIT(i)) num_tc++; /* populate max_txqs per TC */ - max_txqs[i] = pf->num_lan_tx; + max_txqs[i] = vsi->alloc_txq; } vsi->tc_cfg.ena_tc = ena_tc; @@ -3188,3 +3250,52 @@ out: return ret; } #endif /* CONFIG_DCB */ + +/** + * ice_nvm_version_str - format the NVM version strings + * @hw: ptr to the hardware info + */ +char *ice_nvm_version_str(struct ice_hw *hw) +{ + u8 oem_ver, oem_patch, ver_hi, ver_lo; + static char buf[ICE_NVM_VER_LEN]; + u16 oem_build; + + ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi, + &ver_lo); + + snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo, + hw->nvm.eetrack, oem_ver, oem_build, oem_patch); + + return buf; +} + +/** + * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI + * @vsi: the VSI being configured MAC filter + * @macaddr: the MAC address to be added. + * @set: Add or delete a MAC filter + * + * Adds or removes MAC address filter entry for VF VSI + */ +enum ice_status +ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set) +{ + LIST_HEAD(tmp_add_list); + enum ice_status status; + + /* Update MAC filter list to be added or removed for a VSI */ + if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) { + status = ICE_ERR_NO_MEMORY; + goto cfg_mac_fltr_exit; + } + + if (set) + status = ice_add_mac(&vsi->back->hw, &tmp_add_list); + else + status = ice_remove_mac(&vsi->back->hw, &tmp_add_list); + +cfg_mac_fltr_exit: + ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list); + return status; +} diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 6e43ef03bfc3..47bc033fff20 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -6,8 +6,22 @@ #include "ice.h" -int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, - const u8 *macaddr); +struct ice_txq_meta { + /* Tx-scheduler element identifier */ + u32 q_teid; + /* Entry in VSI's txq_map bitmap */ + u16 q_id; + /* Relative index of Tx queue within TC */ + u16 q_handle; + /* VSI index that Tx queue belongs to */ + u16 vsi_idx; + /* TC number that Tx queue belongs to */ + u8 tc; +}; + +int +ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, + const u8 *macaddr); void ice_free_fltr_list(struct device *dev, struct list_head *h); @@ -25,6 +39,16 @@ ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx); void ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx); + +int +ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num, struct ice_ring *ring, + struct ice_txq_meta *txq_meta); + +void ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, + struct ice_txq_meta *txq_meta); + +int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx); #endif /* CONFIG_PCI_IOV */ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid); @@ -95,4 +119,11 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi); int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran); + +char *ice_nvm_version_str(struct ice_hw *hw); + +enum ice_status +ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); + +bool ice_is_safe_mode(struct ice_pf *pf); #endif /* !_ICE_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 63db08d9bafa..214cd6eca405 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -9,16 +9,27 @@ #include "ice_lib.h" #include "ice_dcb_lib.h" -#define DRV_VERSION "0.7.4-k" +#define DRV_VERSION_MAJOR 0 +#define DRV_VERSION_MINOR 8 +#define DRV_VERSION_BUILD 1 + +#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ + __stringify(DRV_VERSION_MINOR) "." \ + __stringify(DRV_VERSION_BUILD) "-k" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" const char ice_drv_ver[] = DRV_VERSION; static const char ice_driver_string[] = DRV_SUMMARY; static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; +/* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ +#define ICE_DDP_PKG_PATH "intel/ice/ddp/" +#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" + MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); +MODULE_FIRMWARE(ICE_DDP_PKG_FILE); static int debug = -1; module_param(debug, int, 0644); @@ -29,24 +40,23 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); #endif /* !CONFIG_DYNAMIC_DEBUG */ static struct workqueue_struct *ice_wq; +static const struct net_device_ops ice_netdev_safe_mode_ops; static const struct net_device_ops ice_netdev_ops; -static void ice_rebuild(struct ice_pf *pf); +static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); static void ice_vsi_release_all(struct ice_pf *pf); -static void ice_update_vsi_stats(struct ice_vsi *vsi); -static void ice_update_pf_stats(struct ice_pf *pf); /** * ice_get_tx_pending - returns number of Tx descriptors not processed * @ring: the ring of descriptors */ -static u32 ice_get_tx_pending(struct ice_ring *ring) +static u16 ice_get_tx_pending(struct ice_ring *ring) { - u32 head, tail; + u16 head, tail; head = ring->next_to_clean; - tail = readl(ring->tail); + tail = ring->next_to_use; if (head != tail) return (head < tail) ? @@ -118,12 +128,11 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) */ static int ice_init_mac_fltr(struct ice_pf *pf) { - LIST_HEAD(tmp_add_list); + enum ice_status status; u8 broadcast[ETH_ALEN]; struct ice_vsi *vsi; - int status; - vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF); + vsi = ice_get_main_vsi(pf); if (!vsi) return -EINVAL; @@ -132,8 +141,7 @@ static int ice_init_mac_fltr(struct ice_pf *pf) */ /* Add a unicast MAC filter so the VSI can get its packets */ - status = ice_add_mac_to_list(vsi, &tmp_add_list, - vsi->port_info->mac.perm_addr); + status = ice_vsi_cfg_mac_fltr(vsi, vsi->port_info->mac.perm_addr, true); if (status) goto unregister; @@ -141,18 +149,11 @@ static int ice_init_mac_fltr(struct ice_pf *pf) * MAC address to the list as well. */ eth_broadcast_addr(broadcast); - status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); - if (status) - goto free_mac_list; - - /* Program MAC filters for entries in tmp_add_list */ - status = ice_add_mac(&pf->hw, &tmp_add_list); + status = ice_vsi_cfg_mac_fltr(vsi, broadcast, true); if (status) - status = -ENOMEM; - -free_mac_list: - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + goto unregister; + return 0; unregister: /* We aren't useful with no MAC filters, so unregister if we * had an error @@ -166,7 +167,7 @@ unregister: vsi->netdev = NULL; } - return status; + return -EIO; } /** @@ -447,13 +448,13 @@ static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) if (vsi->type == ICE_VSI_PF && vsi->netdev) { if (netif_running(vsi->netdev)) { - if (!locked) { + if (!locked) rtnl_lock(); - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); + + ice_stop(vsi->netdev); + + if (!locked) rtnl_unlock(); - } else { - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); - } } else { ice_vsi_close(vsi); } @@ -488,6 +489,7 @@ static void ice_prepare_for_reset(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; + int i; /* already prepared for reset */ if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) @@ -497,6 +499,12 @@ ice_prepare_for_reset(struct ice_pf *pf) if (ice_check_sq_alive(hw, &hw->mailboxq)) ice_vc_notify_reset(pf); + /* Disable VFs until reset is completed */ + for (i = 0; i < pf->num_alloc_vfs; i++) + ice_set_vf_state_qs_dis(&pf->vf[i]); + + /* clear SW filtering DB */ + ice_clear_hw_tbls(hw); /* disable the VSIs and their queues that are not already DOWN */ ice_pf_dis_all_vsi(pf, false); @@ -542,7 +550,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) */ if (reset_type == ICE_RESET_PFR) { pf->pfr_count++; - ice_rebuild(pf); + ice_rebuild(pf, reset_type); clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); clear_bit(__ICE_PFR_REQ, pf->state); ice_reset_all_vfs(pf, true); @@ -573,6 +581,8 @@ static void ice_reset_subtask(struct ice_pf *pf) reset_type = ICE_RESET_CORER; if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state)) reset_type = ICE_RESET_GLOBR; + if (test_and_clear_bit(__ICE_EMPR_RECV, pf->state)) + reset_type = ICE_RESET_EMPR; /* return if no valid reset type requested */ if (reset_type == ICE_RESET_INVAL) return; @@ -584,7 +594,7 @@ static void ice_reset_subtask(struct ice_pf *pf) } else { /* done with reset. start rebuild */ pf->hw.reset_ongoing = false; - ice_rebuild(pf); + ice_rebuild(pf, reset_type); /* clear bit to resume normal operations, but * ICE_NEEDS_RESTART bit is set in case rebuild failed */ @@ -618,6 +628,22 @@ static void ice_reset_subtask(struct ice_pf *pf) } /** + * ice_print_topo_conflict - print topology conflict message + * @vsi: the VSI whose topology status is being checked + */ +static void ice_print_topo_conflict(struct ice_vsi *vsi) +{ + switch (vsi->port_info->phy.link_info.topo_media_conflict) { + case ICE_AQ_LINK_TOPO_CONFLICT: + case ICE_AQ_LINK_MEDIA_CONFLICT: + netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n"); + break; + default: + break; + } +} + +/** * ice_print_link_msg - print link up or down message * @vsi: the VSI whose link status is being queried * @isup: boolean for if the link is now up or down @@ -630,6 +656,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) const char *speed; const char *fec; const char *fc; + const char *an; if (!vsi) return; @@ -713,6 +740,12 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) break; } + /* check if autoneg completed, might be false due to not supported */ + if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) + an = "True"; + else + an = "False"; + /* Get FEC mode requested based on PHY caps last SW configuration */ caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); if (!caps) { @@ -737,8 +770,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) devm_kfree(&vsi->back->pdev->dev, caps); done: - netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Flow Control: %s\n", - speed, fec_req, fec, fc); + netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Autoneg: %s, Flow Control: %s\n", + speed, fec_req, fec, an, fc); + ice_print_topo_conflict(vsi); } /** @@ -806,10 +840,24 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, if (link_up == old_link && link_speed == old_link_speed) return result; - vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF); + vsi = ice_get_main_vsi(pf); if (!vsi || !vsi->port_info) return -EINVAL; + /* turn off PHY if media was removed */ + if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && + !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { + set_bit(ICE_FLAG_NO_MEDIA, pf->flags); + + result = ice_aq_set_link_restart_an(pi, false, NULL); + if (result) { + dev_dbg(&pf->pdev->dev, + "Failed to set link down, VSI %d error %d\n", + vsi->vsi_num, result); + return result; + } + } + ice_vsi_link_event(vsi, link_up); ice_print_link_msg(vsi, link_up); @@ -1307,14 +1355,134 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (vf_mdd_detected) { vf->num_mdd_events++; - if (vf->num_mdd_events > 1) - dev_info(&pf->pdev->dev, "VF %d has had %llu MDD events since last boot\n", + if (vf->num_mdd_events && + vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD) + dev_info(&pf->pdev->dev, + "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n", i, vf->num_mdd_events); } } } /** + * ice_force_phys_link_state - Force the physical link state + * @vsi: VSI to force the physical link state to up/down + * @link_up: true/false indicates to set the physical link to up/down + * + * Force the physical link state by getting the current PHY capabilities from + * hardware and setting the PHY config based on the determined capabilities. If + * link changes a link event will be triggered because both the Enable Automatic + * Link Update and LESM Enable bits are set when setting the PHY capabilities. + * + * Returns 0 on success, negative on failure + */ +static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) +{ + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_aqc_set_phy_cfg_data *cfg; + struct ice_port_info *pi; + struct device *dev; + int retcode; + + if (!vsi || !vsi->port_info || !vsi->back) + return -EINVAL; + if (vsi->type != ICE_VSI_PF) + return 0; + + dev = &vsi->back->pdev->dev; + + pi = vsi->port_info; + + pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return -ENOMEM; + + retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + NULL); + if (retcode) { + dev_err(dev, + "Failed to get phy capabilities, VSI %d error %d\n", + vsi->vsi_num, retcode); + retcode = -EIO; + goto out; + } + + /* No change in link */ + if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && + link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) + goto out; + + cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL); + if (!cfg) { + retcode = -ENOMEM; + goto out; + } + + cfg->phy_type_low = pcaps->phy_type_low; + cfg->phy_type_high = pcaps->phy_type_high; + cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + cfg->low_power_ctrl = pcaps->low_power_ctrl; + cfg->eee_cap = pcaps->eee_cap; + cfg->eeer_value = pcaps->eeer_value; + cfg->link_fec_opt = pcaps->link_fec_options; + if (link_up) + cfg->caps |= ICE_AQ_PHY_ENA_LINK; + else + cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; + + retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL); + if (retcode) { + dev_err(dev, "Failed to set phy config, VSI %d error %d\n", + vsi->vsi_num, retcode); + retcode = -EIO; + } + + devm_kfree(dev, cfg); +out: + devm_kfree(dev, pcaps); + return retcode; +} + +/** + * ice_check_media_subtask - Check for media; bring link up if detected. + * @pf: pointer to PF struct + */ +static void ice_check_media_subtask(struct ice_pf *pf) +{ + struct ice_port_info *pi; + struct ice_vsi *vsi; + int err; + + vsi = ice_get_main_vsi(pf); + if (!vsi) + return; + + /* No need to check for media if it's already present or the interface + * is down + */ + if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) || + test_bit(__ICE_DOWN, vsi->state)) + return; + + /* Refresh link info and check if media is present */ + pi = vsi->port_info; + err = ice_update_link_info(pi); + if (err) + return; + + if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { + err = ice_force_phys_link_state(vsi, true); + if (err) + return; + clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); + + /* A Link Status Event will be generated; the event handler + * will complete bringing the interface up + */ + } +} + +/** * ice_service_task - manage and run subtasks * @work: pointer to work_struct contained by the PF struct */ @@ -1336,12 +1504,19 @@ static void ice_service_task(struct work_struct *work) return; } + ice_clean_adminq_subtask(pf); + ice_check_media_subtask(pf); ice_check_for_hang_subtask(pf); ice_sync_fltr_subtask(pf); ice_handle_mdd_event(pf); - ice_process_vflr_event(pf); ice_watchdog_subtask(pf); - ice_clean_adminq_subtask(pf); + + if (ice_is_safe_mode(pf)) { + ice_service_task_complete(pf); + return; + } + + ice_process_vflr_event(pf); ice_clean_mailboxq_subtask(pf); /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ @@ -1369,8 +1544,8 @@ static void ice_set_ctrlq_len(struct ice_hw *hw) hw->adminq.num_sq_entries = ICE_AQ_LEN; hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; - hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN; - hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN; + hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN; + hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; } @@ -1409,15 +1584,11 @@ static void ice_irq_affinity_release(struct kref __always_unused *ref) {} */ static int ice_vsi_ena_irq(struct ice_vsi *vsi) { - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; + struct ice_hw *hw = &vsi->back->hw; + int i; - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - int i; - - ice_for_each_q_vector(vsi, i) - ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); - } + ice_for_each_q_vector(vsi, i) + ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); ice_flush(hw); return 0; @@ -1665,7 +1836,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) wr32(hw, PFINT_OICR_ENA, 0); ice_flush(hw); - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { + if (pf->msix_entries) { synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); devm_free_irq(&pf->pdev->dev, pf->msix_entries[pf->oicr_idx].vector, pf); @@ -1780,30 +1951,41 @@ static void ice_napi_add(struct ice_vsi *vsi) } /** - * ice_cfg_netdev - Allocate, configure and register a netdev - * @vsi: the VSI associated with the new netdev - * - * Returns 0 on success, negative value on failure + * ice_set_ops - set netdev and ethtools ops for the given netdev + * @netdev: netdev instance */ -static int ice_cfg_netdev(struct ice_vsi *vsi) +static void ice_set_ops(struct net_device *netdev) { + struct ice_pf *pf = ice_netdev_to_pf(netdev); + + if (ice_is_safe_mode(pf)) { + netdev->netdev_ops = &ice_netdev_safe_mode_ops; + ice_set_ethtool_safe_mode_ops(netdev); + return; + } + + netdev->netdev_ops = &ice_netdev_ops; + ice_set_ethtool_ops(netdev); +} + +/** + * ice_set_netdev_features - set features for the given netdev + * @netdev: netdev instance + */ +static void ice_set_netdev_features(struct net_device *netdev) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); netdev_features_t csumo_features; netdev_features_t vlano_features; netdev_features_t dflt_features; netdev_features_t tso_features; - struct ice_netdev_priv *np; - struct net_device *netdev; - u8 mac_addr[ETH_ALEN]; - int err; - - netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, - vsi->alloc_rxq); - if (!netdev) - return -ENOMEM; - vsi->netdev = netdev; - np = netdev_priv(netdev); - np->vsi = vsi; + if (ice_is_safe_mode(pf)) { + /* safe mode */ + netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; + netdev->hw_features = netdev->features; + return; + } dflt_features = NETIF_F_SG | NETIF_F_HIGHDMA | @@ -1831,25 +2013,50 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) tso_features; netdev->vlan_features |= dflt_features | csumo_features | tso_features; +} + +/** + * ice_cfg_netdev - Allocate, configure and register a netdev + * @vsi: the VSI associated with the new netdev + * + * Returns 0 on success, negative value on failure + */ +static int ice_cfg_netdev(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct ice_netdev_priv *np; + struct net_device *netdev; + u8 mac_addr[ETH_ALEN]; + int err; + + netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, + vsi->alloc_rxq); + if (!netdev) + return -ENOMEM; + + vsi->netdev = netdev; + np = netdev_priv(netdev); + np->vsi = vsi; + + ice_set_netdev_features(netdev); + + ice_set_ops(netdev); if (vsi->type == ICE_VSI_PF) { - SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev); + SET_NETDEV_DEV(netdev, &pf->pdev->dev); ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); - ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); } netdev->priv_flags |= IFF_UNICAST_FLT; - /* assign netdev_ops */ - netdev->netdev_ops = &ice_netdev_ops; + /* Setup netdev TC information */ + ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); /* setup watchdog timeout value to be 5 second */ netdev->watchdog_timeo = 5 * HZ; - ice_set_ethtool_ops(netdev); - netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = ICE_MAX_MTU; @@ -2041,36 +2248,48 @@ unroll_vsi_setup: ice_vsi_free_q_vectors(vsi); ice_vsi_delete(vsi); ice_vsi_put_qs(vsi); - pf->q_left_tx += vsi->alloc_txq; - pf->q_left_rx += vsi->alloc_rxq; ice_vsi_clear(vsi); } return status; } /** - * ice_determine_q_usage - Calculate queue distribution - * @pf: board private structure - * - * Return -ENOMEM if we don't get enough queues for all ports + * ice_get_avail_q_count - Get count of queues in use + * @pf_qmap: bitmap to get queue use count from + * @lock: pointer to a mutex that protects access to pf_qmap + * @size: size of the bitmap */ -static void ice_determine_q_usage(struct ice_pf *pf) +static u16 +ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) { - u16 q_left_tx, q_left_rx; + u16 count = 0, bit; - q_left_tx = pf->hw.func_caps.common_cap.num_txq; - q_left_rx = pf->hw.func_caps.common_cap.num_rxq; + mutex_lock(lock); + for_each_clear_bit(bit, pf_qmap, size) + count++; + mutex_unlock(lock); - pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus()); + return count; +} - /* only 1 Rx queue unless RSS is enabled */ - if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) - pf->num_lan_rx = 1; - else - pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus()); +/** + * ice_get_avail_txq_count - Get count of Tx queues in use + * @pf: pointer to an ice_pf instance + */ +u16 ice_get_avail_txq_count(struct ice_pf *pf) +{ + return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, + pf->max_pf_txqs); +} - pf->q_left_tx = q_left_tx - pf->num_lan_tx; - pf->q_left_rx = q_left_rx - pf->num_lan_rx; +/** + * ice_get_avail_rxq_count - Get count of Rx queues in use + * @pf: pointer to an ice_pf instance + */ +u16 ice_get_avail_rxq_count(struct ice_pf *pf) +{ + return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, + pf->max_pf_rxqs); } /** @@ -2082,43 +2301,74 @@ static void ice_deinit_pf(struct ice_pf *pf) ice_service_task_stop(pf); mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->avail_q_mutex); + + if (pf->avail_txqs) { + bitmap_free(pf->avail_txqs); + pf->avail_txqs = NULL; + } + + if (pf->avail_rxqs) { + bitmap_free(pf->avail_rxqs); + pf->avail_rxqs = NULL; + } } /** - * ice_init_pf - Initialize general software structures (struct ice_pf) - * @pf: board private structure to initialize + * ice_set_pf_caps - set PFs capability flags + * @pf: pointer to the PF instance */ -static void ice_init_pf(struct ice_pf *pf) +static void ice_set_pf_caps(struct ice_pf *pf) { - bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); - set_bit(ICE_FLAG_MSIX_ENA, pf->flags); -#ifdef CONFIG_PCI_IOV - if (pf->hw.func_caps.common_cap.sr_iov_1_1) { - struct ice_hw *hw = &pf->hw; + struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; + clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); + if (func_caps->common_cap.dcb) + set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); +#ifdef CONFIG_PCI_IOV + clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); + if (func_caps->common_cap.sr_iov_1_1) { set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); - pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs, + pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, ICE_MAX_VF_COUNT); } #endif /* CONFIG_PCI_IOV */ + clear_bit(ICE_FLAG_RSS_ENA, pf->flags); + if (func_caps->common_cap.rss_table_size) + set_bit(ICE_FLAG_RSS_ENA, pf->flags); - mutex_init(&pf->sw_mutex); - mutex_init(&pf->avail_q_mutex); + pf->max_pf_txqs = func_caps->common_cap.num_txq; + pf->max_pf_rxqs = func_caps->common_cap.num_rxq; +} - /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */ - mutex_lock(&pf->avail_q_mutex); - bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS); - bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS); - mutex_unlock(&pf->avail_q_mutex); +/** + * ice_init_pf - Initialize general software structures (struct ice_pf) + * @pf: board private structure to initialize + */ +static int ice_init_pf(struct ice_pf *pf) +{ + ice_set_pf_caps(pf); - if (pf->hw.func_caps.common_cap.rss_table_size) - set_bit(ICE_FLAG_RSS_ENA, pf->flags); + mutex_init(&pf->sw_mutex); /* setup service timer and periodic service task */ timer_setup(&pf->serv_tmr, ice_service_timer, 0); pf->serv_tmr_period = HZ; INIT_WORK(&pf->serv_task, ice_service_task); clear_bit(__ICE_SERVICE_SCHED, pf->state); + + mutex_init(&pf->avail_q_mutex); + pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); + if (!pf->avail_txqs) + return -ENOMEM; + + pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); + if (!pf->avail_rxqs) { + devm_kfree(&pf->pdev->dev, pf->avail_txqs); + pf->avail_txqs = NULL; + return -ENOMEM; + } + + return 0; } /** @@ -2137,13 +2387,18 @@ static int ice_ena_msix_range(struct ice_pf *pf) /* reserve one vector for miscellaneous handler */ needed = 1; + if (v_left < needed) + goto no_hw_vecs_left_err; v_budget += needed; v_left -= needed; /* reserve vectors for LAN traffic */ - pf->num_lan_msix = min_t(int, num_online_cpus(), v_left); - v_budget += pf->num_lan_msix; - v_left -= pf->num_lan_msix; + needed = min_t(int, num_online_cpus(), v_left); + if (v_left < needed) + goto no_hw_vecs_left_err; + pf->num_lan_msix = needed; + v_budget += needed; + v_left -= needed; pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, sizeof(*pf->msix_entries), GFP_KERNEL); @@ -2168,18 +2423,18 @@ static int ice_ena_msix_range(struct ice_pf *pf) if (v_actual < v_budget) { dev_warn(&pf->pdev->dev, - "not enough vectors. requested = %d, obtained = %d\n", + "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", v_budget, v_actual); - if (v_actual >= (pf->num_lan_msix + 1)) { - pf->num_avail_sw_msix = v_actual - - (pf->num_lan_msix + 1); - } else if (v_actual >= 2) { - pf->num_lan_msix = 1; - pf->num_avail_sw_msix = v_actual - 2; - } else { +/* 2 vectors for LAN (traffic + OICR) */ +#define ICE_MIN_LAN_VECS 2 + + if (v_actual < ICE_MIN_LAN_VECS) { + /* error if we can't get minimum vectors */ pci_disable_msix(pf->pdev); err = -ERANGE; goto msix_err; + } else { + pf->num_lan_msix = ICE_MIN_LAN_VECS; } } @@ -2189,9 +2444,13 @@ msix_err: devm_kfree(&pf->pdev->dev, pf->msix_entries); goto exit_err; +no_hw_vecs_left_err: + dev_err(&pf->pdev->dev, + "not enough device MSI-X vectors. requested = %d, available = %d\n", + needed, v_left); + err = -ERANGE; exit_err: pf->num_lan_msix = 0; - clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); return err; } @@ -2204,7 +2463,6 @@ static void ice_dis_msix(struct ice_pf *pf) pci_disable_msix(pf->pdev); devm_kfree(&pf->pdev->dev, pf->msix_entries); pf->msix_entries = NULL; - clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); } /** @@ -2213,8 +2471,7 @@ static void ice_dis_msix(struct ice_pf *pf) */ static void ice_clear_interrupt_scheme(struct ice_pf *pf) { - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_dis_msix(pf); + ice_dis_msix(pf); if (pf->irq_tracker) { devm_kfree(&pf->pdev->dev, pf->irq_tracker); @@ -2230,10 +2487,7 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) { int vectors; - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - vectors = ice_ena_msix_range(pf); - else - return -ENODEV; + vectors = ice_ena_msix_range(pf); if (vectors < 0) return vectors; @@ -2256,6 +2510,163 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) } /** + * ice_log_pkg_init - log result of DDP package load + * @hw: pointer to hardware info + * @status: status of package load + */ +static void +ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) +{ + struct ice_pf *pf = (struct ice_pf *)hw->back; + struct device *dev = &pf->pdev->dev; + + switch (*status) { + case ICE_SUCCESS: + /* The package download AdminQ command returned success because + * this download succeeded or ICE_ERR_AQ_NO_WORK since there is + * already a package loaded on the device. + */ + if (hw->pkg_ver.major == hw->active_pkg_ver.major && + hw->pkg_ver.minor == hw->active_pkg_ver.minor && + hw->pkg_ver.update == hw->active_pkg_ver.update && + hw->pkg_ver.draft == hw->active_pkg_ver.draft && + !memcmp(hw->pkg_name, hw->active_pkg_name, + sizeof(hw->pkg_name))) { + if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST) + dev_info(dev, + "DDP package already present on device: %s version %d.%d.%d.%d\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft); + else + dev_info(dev, + "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft); + } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || + hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { + dev_err(dev, + "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + *status = ICE_ERR_NOT_SUPPORTED; + } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { + dev_info(dev, + "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft, + hw->pkg_name, + hw->pkg_ver.major, + hw->pkg_ver.minor, + hw->pkg_ver.update, + hw->pkg_ver.draft); + } else { + dev_err(dev, + "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n"); + *status = ICE_ERR_NOT_SUPPORTED; + } + break; + case ICE_ERR_BUF_TOO_SHORT: + /* fall-through */ + case ICE_ERR_CFG: + dev_err(dev, + "The DDP package file is invalid. Entering Safe Mode.\n"); + break; + case ICE_ERR_NOT_SUPPORTED: + /* Package File version not supported */ + if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ || + (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR)) + dev_err(dev, + "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); + else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ || + (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR)) + dev_err(dev, + "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + break; + case ICE_ERR_AQ_ERROR: + switch (hw->adminq.sq_last_status) { + case ICE_AQ_RC_ENOSEC: + case ICE_AQ_RC_EBADSIG: + dev_err(dev, + "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); + return; + case ICE_AQ_RC_ESVN: + dev_err(dev, + "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); + return; + case ICE_AQ_RC_EBADMAN: + case ICE_AQ_RC_EBADBUF: + dev_err(dev, + "An error occurred on the device while loading the DDP package. The device will be reset.\n"); + return; + default: + break; + } + /* fall-through */ + default: + dev_err(dev, + "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n", + *status); + break; + } +} + +/** + * ice_load_pkg - load/reload the DDP Package file + * @firmware: firmware structure when firmware requested or NULL for reload + * @pf: pointer to the PF instance + * + * Called on probe and post CORER/GLOBR rebuild to load DDP Package and + * initialize HW tables. + */ +static void +ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) +{ + enum ice_status status = ICE_ERR_PARAM; + struct device *dev = &pf->pdev->dev; + struct ice_hw *hw = &pf->hw; + + /* Load DDP Package */ + if (firmware && !hw->pkg_copy) { + status = ice_copy_and_init_pkg(hw, firmware->data, + firmware->size); + ice_log_pkg_init(hw, &status); + } else if (!firmware && hw->pkg_copy) { + /* Reload package during rebuild after CORER/GLOBR reset */ + status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); + ice_log_pkg_init(hw, &status); + } else { + dev_err(dev, + "The DDP package file failed to load. Entering Safe Mode.\n"); + } + + if (status) { + /* Safe Mode */ + clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); + return; + } + + /* Successful download package is the precondition for advanced + * features, hence setting the ICE_FLAG_ADV_FEATURES flag + */ + set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); +} + +/** * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines * @pf: pointer to the PF structure * @@ -2272,6 +2683,105 @@ static void ice_verify_cacheline_size(struct ice_pf *pf) } /** + * ice_send_version - update firmware with driver version + * @pf: PF struct + * + * Returns ICE_SUCCESS on success, else error code + */ +static enum ice_status ice_send_version(struct ice_pf *pf) +{ + struct ice_driver_ver dv; + + dv.major_ver = DRV_VERSION_MAJOR; + dv.minor_ver = DRV_VERSION_MINOR; + dv.build_ver = DRV_VERSION_BUILD; + dv.subbuild_ver = 0; + strscpy((char *)dv.driver_string, DRV_VERSION, + sizeof(dv.driver_string)); + return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); +} + +/** + * ice_get_opt_fw_name - return optional firmware file name or NULL + * @pf: pointer to the PF instance + */ +static char *ice_get_opt_fw_name(struct ice_pf *pf) +{ + /* Optional firmware name same as default with additional dash + * followed by a EUI-64 identifier (PCIe Device Serial Number) + */ + struct pci_dev *pdev = pf->pdev; + char *opt_fw_filename = NULL; + u32 dword; + u8 dsn[8]; + int pos; + + /* Determine the name of the optional file using the DSN (two + * dwords following the start of the DSN Capability). + */ + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); + if (pos) { + opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); + if (!opt_fw_filename) + return NULL; + + pci_read_config_dword(pdev, pos + 4, &dword); + put_unaligned_le32(dword, &dsn[0]); + pci_read_config_dword(pdev, pos + 8, &dword); + put_unaligned_le32(dword, &dsn[4]); + snprintf(opt_fw_filename, NAME_MAX, + "%sice-%02x%02x%02x%02x%02x%02x%02x%02x.pkg", + ICE_DDP_PKG_PATH, + dsn[7], dsn[6], dsn[5], dsn[4], + dsn[3], dsn[2], dsn[1], dsn[0]); + } + + return opt_fw_filename; +} + +/** + * ice_request_fw - Device initialization routine + * @pf: pointer to the PF instance + */ +static void ice_request_fw(struct ice_pf *pf) +{ + char *opt_fw_filename = ice_get_opt_fw_name(pf); + const struct firmware *firmware = NULL; + struct device *dev = &pf->pdev->dev; + int err = 0; + + /* optional device-specific DDP (if present) overrides the default DDP + * package file. kernel logs a debug message if the file doesn't exist, + * and warning messages for other errors. + */ + if (opt_fw_filename) { + err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); + if (err) { + kfree(opt_fw_filename); + goto dflt_pkg_load; + } + + /* request for firmware was successful. Download to device */ + ice_load_pkg(firmware, pf); + kfree(opt_fw_filename); + release_firmware(firmware); + return; + } + +dflt_pkg_load: + err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); + if (err) { + dev_err(dev, + "The DDP package file was not found or could not be read. Entering Safe Mode\n"); + return; + } + + /* request for firmware was successful. Download to device */ + ice_load_pkg(firmware, pf); + release_firmware(firmware); +} + +/** * ice_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in ice_pci_tbl @@ -2345,22 +2855,33 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_exit_unroll; } - dev_info(dev, "firmware %d.%d.%05d api %d.%d\n", - hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, - hw->api_maj_ver, hw->api_min_ver); + dev_info(dev, "firmware %d.%d.%d api %d.%d.%d nvm %s build 0x%08x\n", + hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch, + hw->api_maj_ver, hw->api_min_ver, hw->api_patch, + ice_nvm_version_str(hw), hw->fw_build); - ice_init_pf(pf); + ice_request_fw(pf); - err = ice_init_pf_dcb(pf, false); - if (err) { - clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); - clear_bit(ICE_FLAG_DCB_ENA, pf->flags); - - /* do not fail overall init if DCB init fails */ - err = 0; + /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be + * set in pf->state, which will cause ice_is_safe_mode to return + * true + */ + if (ice_is_safe_mode(pf)) { + dev_err(dev, + "Package download failed. Advanced features disabled - Device now in Safe Mode\n"); + /* we already got function/device capabilities but these don't + * reflect what the driver needs to do in safe mode. Instead of + * adding conditional logic everywhere to ignore these + * device/function capabilities, override them. + */ + ice_set_safe_mode_caps(hw); } - ice_determine_q_usage(pf); + err = ice_init_pf(pf); + if (err) { + dev_err(dev, "ice_init_pf failed: %d\n", err); + goto err_init_pf_unroll; + } pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; if (!pf->num_alloc_vsi) { @@ -2390,12 +2911,10 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) * the misc functionality and queue processing is combined in * the same vector and that gets setup at open. */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - err = ice_req_irq_msix_misc(pf); - if (err) { - dev_err(dev, "setup of misc vector failed: %d\n", err); - goto err_init_interrupt_unroll; - } + err = ice_req_irq_msix_misc(pf); + if (err) { + dev_err(dev, "setup of misc vector failed: %d\n", err); + goto err_init_interrupt_unroll; } /* create switch struct for the switch element created by FW on boot */ @@ -2423,6 +2942,15 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) clear_bit(__ICE_SERVICE_DIS, pf->state); + /* tell the firmware we are up */ + err = ice_send_version(pf); + if (err) { + dev_err(dev, + "probe failed sending driver version %s. error: %d\n", + ice_drv_ver, err); + goto err_alloc_sw_unroll; + } + /* since everything is good, start the service timer */ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); @@ -2434,6 +2962,20 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ice_verify_cacheline_size(pf); + /* If no DDP driven features have to be setup, return here */ + if (ice_is_safe_mode(pf)) + return 0; + + /* initialize DDP driven features */ + + /* Note: DCB init failure is non-fatal to load */ + if (ice_init_pf_dcb(pf, false)) { + clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(ICE_FLAG_DCB_ENA, pf->flags); + } else { + ice_cfg_lldp_mib_change(&pf->hw, true); + } + return 0; err_alloc_sw_unroll: @@ -2483,9 +3025,14 @@ static void ice_remove(struct pci_dev *pdev) continue; ice_vsi_free_q_vectors(pf->vsi[i]); } - ice_clear_interrupt_scheme(pf); ice_deinit_pf(pf); ice_deinit_hw(&pf->hw); + ice_clear_interrupt_scheme(pf); + /* Issue a PFR as part of the prescribed driver unload flow. Do not + * do it via ice_schedule_reset() since there is no need to rebuild + * and the service task is already stopped. + */ + ice_reset(&pf->hw, ICE_RESET_PFR); pci_disable_pcie_error_reporting(pdev); } @@ -2711,10 +3258,8 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) struct ice_hw *hw = &pf->hw; struct sockaddr *addr = pi; enum ice_status status; - LIST_HEAD(a_mac_list); - LIST_HEAD(r_mac_list); u8 flags = 0; - int err; + int err = 0; u8 *mac; mac = (u8 *)addr->sa_data; @@ -2737,42 +3282,23 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) /* When we change the MAC address we also have to change the MAC address * based filter rules that were created previously for the old MAC * address. So first, we remove the old filter rule using ice_remove_mac - * and then create a new filter rule using ice_add_mac. Note that for - * both these operations, we first need to form a "list" of MAC - * addresses (even though in this case, we have only 1 MAC address to be - * added/removed) and this done using ice_add_mac_to_list. Depending on - * the ensuing operation this "list" of MAC addresses is either to be - * added or removed from the filter. + * and then create a new filter rule using ice_add_mac via + * ice_vsi_cfg_mac_fltr function call for both add and/or remove + * filters. */ - err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr); - if (err) { - err = -EADDRNOTAVAIL; - goto free_lists; - } - - status = ice_remove_mac(hw, &r_mac_list); + status = ice_vsi_cfg_mac_fltr(vsi, netdev->dev_addr, false); if (status) { err = -EADDRNOTAVAIL; - goto free_lists; + goto err_update_filters; } - err = ice_add_mac_to_list(vsi, &a_mac_list, mac); - if (err) { - err = -EADDRNOTAVAIL; - goto free_lists; - } - - status = ice_add_mac(hw, &a_mac_list); + status = ice_vsi_cfg_mac_fltr(vsi, mac, true); if (status) { err = -EADDRNOTAVAIL; - goto free_lists; + goto err_update_filters; } -free_lists: - /* free list entries */ - ice_free_fltr_list(&pf->pdev->dev, &r_mac_list); - ice_free_fltr_list(&pf->pdev->dev, &a_mac_list); - +err_update_filters: if (err) { netdev_err(netdev, "can't set MAC %pM. filter update failed\n", mac); @@ -2788,8 +3314,8 @@ free_lists: flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; status = ice_aq_manage_mac_write(hw, mac, flags, NULL); if (status) { - netdev_err(netdev, "can't set MAC %pM. write to firmware failed.\n", - mac); + netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", + mac, status); } return 0; } @@ -2902,6 +3428,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) struct ice_vsi *vsi = np->vsi; int ret = 0; + /* Don't set any netdev advanced features with device in Safe Mode */ + if (ice_is_safe_mode(vsi->back)) { + dev_err(&vsi->back->pdev->dev, + "Device is in Safe Mode - not enabling advanced netdev features\n"); + return ret; + } + /* Multiple features can be changed in one call so keep features in * separate if/else statements to guarantee each feature is checked */ @@ -3008,10 +3541,7 @@ static int ice_up_complete(struct ice_vsi *vsi) struct ice_pf *pf = vsi->back; int err; - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_vsi_cfg_msix(vsi); - else - return -ENOTSUPP; + ice_vsi_cfg_msix(vsi); /* Enable only Rx rings, Tx rings were enabled by the FW when the * Tx queue group list was configured and the context bits were @@ -3132,7 +3662,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) * ice_update_vsi_stats - Update VSI stats counters * @vsi: the VSI to be updated */ -static void ice_update_vsi_stats(struct ice_vsi *vsi) +void ice_update_vsi_stats(struct ice_vsi *vsi) { struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; struct ice_eth_stats *cur_es = &vsi->eth_stats; @@ -3159,6 +3689,8 @@ static void ice_update_vsi_stats(struct ice_vsi *vsi) cur_ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; cur_ns->rx_length_errors = pf->stats.rx_len_errors; + /* record drops from the port level */ + cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; } } @@ -3166,149 +3698,139 @@ static void ice_update_vsi_stats(struct ice_vsi *vsi) * ice_update_pf_stats - Update PF port stats counters * @pf: PF whose stats needs to be updated */ -static void ice_update_pf_stats(struct ice_pf *pf) +void ice_update_pf_stats(struct ice_pf *pf) { struct ice_hw_port_stats *prev_ps, *cur_ps; struct ice_hw *hw = &pf->hw; - u8 pf_id; + u8 port; + port = hw->port_info->lport; prev_ps = &pf->stats_prev; cur_ps = &pf->stats; - pf_id = hw->pf_id; - ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.rx_bytes, + ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, + &prev_ps->eth.rx_bytes, &cur_ps->eth.rx_bytes); - ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.rx_unicast, + ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, + &prev_ps->eth.rx_unicast, &cur_ps->eth.rx_unicast); - ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.rx_multicast, + ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, + &prev_ps->eth.rx_multicast, &cur_ps->eth.rx_multicast); - ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast, + ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, + &prev_ps->eth.rx_broadcast, &cur_ps->eth.rx_broadcast); - ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.tx_bytes, + ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, + &prev_ps->eth.rx_discards, + &cur_ps->eth.rx_discards); + + ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, + &prev_ps->eth.tx_bytes, &cur_ps->eth.tx_bytes); - ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.tx_unicast, + ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, + &prev_ps->eth.tx_unicast, &cur_ps->eth.tx_unicast); - ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.tx_multicast, + ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, + &prev_ps->eth.tx_multicast, &cur_ps->eth.tx_multicast); - ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id), - pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast, + ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, + &prev_ps->eth.tx_broadcast, &cur_ps->eth.tx_broadcast); - ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, &prev_ps->tx_dropped_link_down, &cur_ps->tx_dropped_link_down); - ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id), - pf->stat_prev_loaded, &prev_ps->rx_size_64, - &cur_ps->rx_size_64); + ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, + &prev_ps->rx_size_64, &cur_ps->rx_size_64); - ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id), - pf->stat_prev_loaded, &prev_ps->rx_size_127, - &cur_ps->rx_size_127); + ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, + &prev_ps->rx_size_127, &cur_ps->rx_size_127); - ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id), - pf->stat_prev_loaded, &prev_ps->rx_size_255, - &cur_ps->rx_size_255); + ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, + &prev_ps->rx_size_255, &cur_ps->rx_size_255); - ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id), - pf->stat_prev_loaded, &prev_ps->rx_size_511, - &cur_ps->rx_size_511); + ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, + &prev_ps->rx_size_511, &cur_ps->rx_size_511); - ice_stat_update40(hw, GLPRT_PRC1023H(pf_id), - GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); - ice_stat_update40(hw, GLPRT_PRC1522H(pf_id), - GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); - ice_stat_update40(hw, GLPRT_PRC9522H(pf_id), - GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, &prev_ps->rx_size_big, &cur_ps->rx_size_big); - ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id), - pf->stat_prev_loaded, &prev_ps->tx_size_64, - &cur_ps->tx_size_64); + ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, + &prev_ps->tx_size_64, &cur_ps->tx_size_64); - ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id), - pf->stat_prev_loaded, &prev_ps->tx_size_127, - &cur_ps->tx_size_127); + ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, + &prev_ps->tx_size_127, &cur_ps->tx_size_127); - ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id), - pf->stat_prev_loaded, &prev_ps->tx_size_255, - &cur_ps->tx_size_255); + ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, + &prev_ps->tx_size_255, &cur_ps->tx_size_255); - ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id), - pf->stat_prev_loaded, &prev_ps->tx_size_511, - &cur_ps->tx_size_511); + ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, + &prev_ps->tx_size_511, &cur_ps->tx_size_511); - ice_stat_update40(hw, GLPRT_PTC1023H(pf_id), - GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); - ice_stat_update40(hw, GLPRT_PTC1522H(pf_id), - GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); - ice_stat_update40(hw, GLPRT_PTC9522H(pf_id), - GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded, + ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, &prev_ps->tx_size_big, &cur_ps->tx_size_big); - ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); - ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); - ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); - ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); ice_update_dcb_stats(pf); - ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, &prev_ps->crc_errors, &cur_ps->crc_errors); - ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); - ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, &prev_ps->mac_local_faults, &cur_ps->mac_local_faults); - ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, &prev_ps->mac_remote_faults, &cur_ps->mac_remote_faults); - ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); - ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, &prev_ps->rx_undersize, &cur_ps->rx_undersize); - ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, &prev_ps->rx_fragments, &cur_ps->rx_fragments); - ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, &prev_ps->rx_oversize, &cur_ps->rx_oversize); - ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded, + ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, &prev_ps->rx_jabber, &cur_ps->rx_jabber); pf->stat_prev_loaded = true; @@ -3328,12 +3850,16 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) vsi_stats = &vsi->net_stats; - if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq) + if (!vsi->num_txq || !vsi->num_rxq) return; + /* netdev packet/byte stats come from ring counter. These are obtained * by summing up ring counters (done by ice_update_vsi_ring_stats). + * But, only call the update routine and read the registers if VSI is + * not down. */ - ice_update_vsi_ring_stats(vsi); + if (!test_bit(__ICE_DOWN, vsi->state)) + ice_update_vsi_ring_stats(vsi); stats->tx_packets = vsi_stats->tx_packets; stats->tx_bytes = vsi_stats->tx_bytes; stats->rx_packets = vsi_stats->rx_packets; @@ -3372,85 +3898,6 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) } /** - * ice_force_phys_link_state - Force the physical link state - * @vsi: VSI to force the physical link state to up/down - * @link_up: true/false indicates to set the physical link to up/down - * - * Force the physical link state by getting the current PHY capabilities from - * hardware and setting the PHY config based on the determined capabilities. If - * link changes a link event will be triggered because both the Enable Automatic - * Link Update and LESM Enable bits are set when setting the PHY capabilities. - * - * Returns 0 on success, negative on failure - */ -static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) -{ - struct ice_aqc_get_phy_caps_data *pcaps; - struct ice_aqc_set_phy_cfg_data *cfg; - struct ice_port_info *pi; - struct device *dev; - int retcode; - - if (!vsi || !vsi->port_info || !vsi->back) - return -EINVAL; - if (vsi->type != ICE_VSI_PF) - return 0; - - dev = &vsi->back->pdev->dev; - - pi = vsi->port_info; - - pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL); - if (!pcaps) - return -ENOMEM; - - retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, - NULL); - if (retcode) { - dev_err(dev, - "Failed to get phy capabilities, VSI %d error %d\n", - vsi->vsi_num, retcode); - retcode = -EIO; - goto out; - } - - /* No change in link */ - if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && - link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) - goto out; - - cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL); - if (!cfg) { - retcode = -ENOMEM; - goto out; - } - - cfg->phy_type_low = pcaps->phy_type_low; - cfg->phy_type_high = pcaps->phy_type_high; - cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; - cfg->low_power_ctrl = pcaps->low_power_ctrl; - cfg->eee_cap = pcaps->eee_cap; - cfg->eeer_value = pcaps->eeer_value; - cfg->link_fec_opt = pcaps->link_fec_options; - if (link_up) - cfg->caps |= ICE_AQ_PHY_ENA_LINK; - else - cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; - - retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL); - if (retcode) { - dev_err(dev, "Failed to set phy config, VSI %d error %d\n", - vsi->vsi_num, retcode); - retcode = -EIO; - } - - devm_kfree(dev, cfg); -out: - devm_kfree(dev, pcaps); - return retcode; -} - -/** * ice_down - Shutdown the connection * @vsi: The VSI being stopped */ @@ -3559,24 +4006,6 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) } /** - * ice_vsi_req_irq - Request IRQ from the OS - * @vsi: The VSI IRQ is being requested for - * @basename: name for the vector - * - * Return 0 on success and a negative value on error - */ -static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename) -{ - struct ice_pf *pf = vsi->back; - int err = -EINVAL; - - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - err = ice_vsi_req_irq_msix(vsi, basename); - - return err; -} - -/** * ice_vsi_open - Called when a network interface is made active * @vsi: the VSI to open * @@ -3605,7 +4034,7 @@ static int ice_vsi_open(struct ice_vsi *vsi) snprintf(int_name, sizeof(int_name) - 1, "%s-%s", dev_driver_string(&pf->pdev->dev), vsi->netdev->name); - err = ice_vsi_req_irq(vsi, int_name); + err = ice_vsi_req_irq_msix(vsi, int_name); if (err) goto err_setup_rx; @@ -3669,23 +4098,19 @@ static int ice_ena_vsi(struct ice_vsi *vsi, bool locked) int err = 0; if (!test_bit(__ICE_NEEDS_RESTART, vsi->state)) - return err; + return 0; clear_bit(__ICE_NEEDS_RESTART, vsi->state); if (vsi->netdev && vsi->type == ICE_VSI_PF) { - struct net_device *netd = vsi->netdev; - if (netif_running(vsi->netdev)) { - if (locked) { - err = netd->netdev_ops->ndo_open(netd); - } else { + if (!locked) rtnl_lock(); - err = netd->netdev_ops->ndo_open(netd); + + err = ice_open(vsi->netdev); + + if (!locked) rtnl_unlock(); - } - } else { - err = ice_vsi_open(vsi); } } @@ -3699,9 +4124,6 @@ static int ice_ena_vsi(struct ice_vsi *vsi, bool locked) */ #ifdef CONFIG_DCB int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) -#else -static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) -#endif /* CONFIG_DCB */ { int v; @@ -3712,91 +4134,107 @@ static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) return 0; } +#endif /* CONFIG_DCB */ /** - * ice_vsi_rebuild_all - rebuild all VSIs in PF - * @pf: the PF + * ice_vsi_rebuild_by_type - Rebuild VSI of a given type + * @pf: pointer to the PF instance + * @type: VSI type to rebuild + * + * Iterates through the pf->vsi array and rebuilds VSIs of the requested type */ -static int ice_vsi_rebuild_all(struct ice_pf *pf) +static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) { - int i; + enum ice_status status; + int i, err; - /* loop through pf->vsi array and reinit the VSI if found */ ice_for_each_vsi(pf, i) { - int err; + struct ice_vsi *vsi = pf->vsi[i]; - if (!pf->vsi[i]) + if (!vsi || vsi->type != type) continue; - err = ice_vsi_rebuild(pf->vsi[i]); + /* rebuild the VSI */ + err = ice_vsi_rebuild(vsi); if (err) { dev_err(&pf->pdev->dev, - "VSI at index %d rebuild failed\n", - pf->vsi[i]->idx); + "rebuild VSI failed, err %d, VSI index %d, type %d\n", + err, vsi->idx, type); return err; } - dev_info(&pf->pdev->dev, - "VSI at index %d rebuilt. vsi_num = 0x%x\n", - pf->vsi[i]->idx, pf->vsi[i]->vsi_num); + /* replay filters for the VSI */ + status = ice_replay_vsi(&pf->hw, vsi->idx); + if (status) { + dev_err(&pf->pdev->dev, + "replay VSI failed, status %d, VSI index %d, type %d\n", + status, vsi->idx, type); + return -EIO; + } + + /* Re-map HW VSI number, using VSI handle that has been + * previously validated in ice_replay_vsi() call above + */ + vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); + + /* enable the VSI */ + err = ice_ena_vsi(vsi, false); + if (err) { + dev_err(&pf->pdev->dev, + "enable VSI failed, err %d, VSI index %d, type %d\n", + err, vsi->idx, type); + return err; + } + + dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %d\n", + vsi->idx, type); } return 0; } /** - * ice_vsi_replay_all - replay all VSIs configuration in the PF - * @pf: the PF + * ice_update_pf_netdev_link - Update PF netdev link status + * @pf: pointer to the PF instance */ -static int ice_vsi_replay_all(struct ice_pf *pf) +static void ice_update_pf_netdev_link(struct ice_pf *pf) { - struct ice_hw *hw = &pf->hw; - enum ice_status ret; + bool link_up; int i; - /* loop through pf->vsi array and replay the VSI if found */ ice_for_each_vsi(pf, i) { - if (!pf->vsi[i]) - continue; + struct ice_vsi *vsi = pf->vsi[i]; - ret = ice_replay_vsi(hw, pf->vsi[i]->idx); - if (ret) { - dev_err(&pf->pdev->dev, - "VSI at index %d replay failed %d\n", - pf->vsi[i]->idx, ret); - return -EIO; - } - - /* Re-map HW VSI number, using VSI handle that has been - * previously validated in ice_replay_vsi() call above - */ - pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx); + if (!vsi || vsi->type != ICE_VSI_PF) + return; - dev_info(&pf->pdev->dev, - "VSI at index %d filter replayed successfully - vsi_num %i\n", - pf->vsi[i]->idx, pf->vsi[i]->vsi_num); + ice_get_link_status(pf->vsi[i]->port_info, &link_up); + if (link_up) { + netif_carrier_on(pf->vsi[i]->netdev); + netif_tx_wake_all_queues(pf->vsi[i]->netdev); + } else { + netif_carrier_off(pf->vsi[i]->netdev); + netif_tx_stop_all_queues(pf->vsi[i]->netdev); + } } - - /* Clean up replay filter after successful re-configuration */ - ice_replay_post(hw); - return 0; } /** * ice_rebuild - rebuild after reset * @pf: PF to rebuild + * @reset_type: type of reset */ -static void ice_rebuild(struct ice_pf *pf) +static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) { struct device *dev = &pf->pdev->dev; struct ice_hw *hw = &pf->hw; enum ice_status ret; - int err, i; + int err; if (test_bit(__ICE_DOWN, pf->state)) goto clear_recovery; - dev_dbg(dev, "rebuilding PF\n"); + dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); ret = ice_init_all_ctrlq(hw); if (ret) { @@ -3804,6 +4242,16 @@ static void ice_rebuild(struct ice_pf *pf) goto err_init_ctrlq; } + /* if DDP was previously loaded successfully */ + if (!ice_is_safe_mode(pf)) { + /* reload the SW DB of filter tables */ + if (reset_type == ICE_RESET_PFR) + ice_fill_blk_tbls(hw); + else + /* Reload DDP Package after CORER/GLOBR reset */ + ice_load_pkg(NULL, pf); + } + ret = ice_clear_pf_cfg(hw); if (ret) { dev_err(dev, "clear PF configuration failed %d\n", ret); @@ -3822,65 +4270,53 @@ static void ice_rebuild(struct ice_pf *pf) if (err) goto err_sched_init_port; - ice_dcb_rebuild(pf); + err = ice_update_link_info(hw->port_info); + if (err) + dev_err(&pf->pdev->dev, "Get link status error %d\n", err); - err = ice_vsi_rebuild_all(pf); + /* start misc vector */ + err = ice_req_irq_msix_misc(pf); if (err) { - dev_err(dev, "ice_vsi_rebuild_all failed\n"); - goto err_vsi_rebuild; + dev_err(dev, "misc vector setup failed: %d\n", err); + goto err_sched_init_port; } - err = ice_update_link_info(hw->port_info); - if (err) - dev_err(&pf->pdev->dev, "Get link status error %d\n", err); + if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) + ice_dcb_rebuild(pf); - /* Replay all VSIs Configuration, including filters after reset */ - if (ice_vsi_replay_all(pf)) { - dev_err(&pf->pdev->dev, - "error replaying VSI configurations with switch filter rules\n"); + /* rebuild PF VSI */ + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); + if (err) { + dev_err(dev, "PF VSI rebuild failed: %d\n", err); goto err_vsi_rebuild; } - /* start misc vector */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { - err = ice_req_irq_msix_misc(pf); + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_VF); if (err) { - dev_err(dev, "misc vector setup failed: %d\n", err); + dev_err(dev, "VF VSI rebuild failed: %d\n", err); goto err_vsi_rebuild; } } - /* restart the VSIs that were rebuilt and running before the reset */ - err = ice_pf_ena_all_vsi(pf, false); - if (err) { - dev_err(&pf->pdev->dev, "error enabling VSIs\n"); - /* no need to disable VSIs in tear down path in ice_rebuild() - * since its already taken care in ice_vsi_open() - */ + ice_update_pf_netdev_link(pf); + + /* tell the firmware we are up */ + ret = ice_send_version(pf); + if (ret) { + dev_err(dev, + "Rebuild failed due to error sending driver version: %d\n", + ret); goto err_vsi_rebuild; } - ice_for_each_vsi(pf, i) { - bool link_up; - - if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF) - continue; - ice_get_link_status(pf->vsi[i]->port_info, &link_up); - if (link_up) { - netif_carrier_on(pf->vsi[i]->netdev); - netif_tx_wake_all_queues(pf->vsi[i]->netdev); - } else { - netif_carrier_off(pf->vsi[i]->netdev); - netif_tx_stop_all_queues(pf->vsi[i]->netdev); - } - } + ice_replay_post(hw); /* if we get here, reset flow is successful */ clear_bit(__ICE_RESET_FAILED, pf->state); return; err_vsi_rebuild: - ice_vsi_release_all(pf); err_sched_init_port: ice_sched_cleanup_all(hw); err_init_ctrlq: @@ -4244,9 +4680,7 @@ static void ice_tx_timeout(struct net_device *netdev) head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) & QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; /* Read interrupt register */ - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - val = rd32(hw, - GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); + val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", vsi->vsi_num, hung_queue, tx_ring->next_to_clean, @@ -4295,6 +4729,7 @@ int ice_open(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + struct ice_port_info *pi; int err; if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { @@ -4304,13 +4739,33 @@ int ice_open(struct net_device *netdev) netif_carrier_off(netdev); - err = ice_force_phys_link_state(vsi, true); + pi = vsi->port_info; + err = ice_update_link_info(pi); if (err) { - netdev_err(netdev, - "Failed to set physical link up, error %d\n", err); + netdev_err(netdev, "Failed to get link info, error %d\n", + err); return err; } + /* Set PHY if there is media, otherwise, turn off PHY */ + if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { + err = ice_force_phys_link_state(vsi, true); + if (err) { + netdev_err(netdev, + "Failed to set physical link up, error %d\n", + err); + return err; + } + } else { + err = ice_aq_set_link_restart_an(pi, false, NULL); + if (err) { + netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n", + vsi->vsi_num, err); + return err; + } + set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags); + } + err = ice_vsi_open(vsi); if (err) netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", @@ -4388,6 +4843,17 @@ out_rm_features: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } +static const struct net_device_ops ice_netdev_safe_mode_ops = { + .ndo_open = ice_open, + .ndo_stop = ice_stop, + .ndo_start_xmit = ice_start_xmit, + .ndo_set_mac_address = ice_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = ice_change_mtu, + .ndo_get_stats64 = ice_get_stats64, + .ndo_tx_timeout = ice_tx_timeout, +}; + static const struct net_device_ops ice_netdev_ops = { .ndo_open = ice_open, .ndo_stop = ice_stop, diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 2a232504379d..fc624b73d05d 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -260,33 +260,17 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, /** * ice_sched_get_first_node - get the first node of the given layer - * @hw: pointer to the HW struct + * @pi: port information structure * @parent: pointer the base node of the subtree * @layer: layer number * * This function retrieves the first node of the given layer from the subtree */ static struct ice_sched_node * -ice_sched_get_first_node(struct ice_hw *hw, struct ice_sched_node *parent, - u8 layer) +ice_sched_get_first_node(struct ice_port_info *pi, + struct ice_sched_node *parent, u8 layer) { - u8 i; - - if (layer < hw->sw_entry_point_layer) - return NULL; - for (i = 0; i < parent->num_children; i++) { - struct ice_sched_node *node = parent->children[i]; - - if (node) { - if (node->tx_sched_layer == layer) - return node; - /* this recursion is intentional, and wouldn't - * go more than 9 calls - */ - return ice_sched_get_first_node(hw, node, layer); - } - } - return NULL; + return pi->sib_head[parent->tc_num][layer]; } /** @@ -300,7 +284,7 @@ struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc) { u8 i; - if (!pi) + if (!pi || !pi->root) return NULL; for (i = 0; i < pi->root->num_children; i++) if (pi->root->children[i]->tc_num == tc) @@ -342,7 +326,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) parent = node->parent; /* root has no parent */ if (parent) { - struct ice_sched_node *p, *tc_node; + struct ice_sched_node *p; /* update the parent */ for (i = 0; i < parent->num_children; i++) @@ -354,16 +338,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) break; } - /* search for previous sibling that points to this node and - * remove the reference - */ - tc_node = ice_sched_get_tc_node(pi, node->tc_num); - if (!tc_node) { - ice_debug(hw, ICE_DBG_SCHED, - "Invalid TC number %d\n", node->tc_num); - goto err_exit; - } - p = ice_sched_get_first_node(hw, tc_node, node->tx_sched_layer); + p = ice_sched_get_first_node(pi, node, node->tx_sched_layer); while (p) { if (p->sibling == node) { p->sibling = node->sibling; @@ -371,8 +346,13 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) } p = p->sibling; } + + /* update the sibling head if head is getting removed */ + if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node) + pi->sib_head[node->tc_num][node->tx_sched_layer] = + node->sibling; } -err_exit: + /* leaf nodes have no children */ if (node->children) devm_kfree(ice_hw_to_dev(hw), node->children); @@ -743,13 +723,17 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, /* add it to previous node sibling pointer */ /* Note: siblings are not linked across branches */ - prev = ice_sched_get_first_node(hw, tc_node, layer); + prev = ice_sched_get_first_node(pi, tc_node, layer); if (prev && prev != new_node) { while (prev->sibling) prev = prev->sibling; prev->sibling = new_node; } + /* initialize the sibling head */ + if (!pi->sib_head[tc_node->tc_num][layer]) + pi->sib_head[tc_node->tc_num][layer] = new_node; + if (i == 0) *first_node_teid = teid; } @@ -1160,7 +1144,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, goto lan_q_exit; /* get the first queue group node from VSI sub-tree */ - qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer); + qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); while (qgrp_node) { /* make sure the qgroup node is part of the VSI subtree */ if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) @@ -1191,7 +1175,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node, u8 vsi_layer; vsi_layer = ice_sched_get_vsi_layer(hw); - node = ice_sched_get_first_node(hw, tc_node, vsi_layer); + node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer); /* Check whether it already exists */ while (node) { @@ -1316,7 +1300,8 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, /* If intermediate nodes are reached max children * then add a new one. */ - node = ice_sched_get_first_node(hw, tc_node, (u8)i); + node = ice_sched_get_first_node(hw->port_info, tc_node, + (u8)i); /* scan all the siblings */ while (node) { if (node->num_children < hw->max_children[i]) diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 8271fd651725..1acdd43a2edd 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -1623,12 +1623,13 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, ice_aqc_opc_remove_sw_rules, NULL); - if (status) - goto exit; /* Remove a book keeping from the list */ devm_kfree(ice_hw_to_dev(hw), s_rule); + if (status) + goto exit; + list_del(&list_elem->list_entry); devm_kfree(ice_hw_to_dev(hw), list_elem); } @@ -2137,6 +2138,38 @@ out: } /** + * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry + * @hw: pointer to the hardware structure + * @recp_id: lookup type for which the specified rule needs to be searched + * @f_info: rule information + * + * Helper function to search for a unicast rule entry - this is to be used + * to remove unicast MAC filter that is not shared with other VSIs on the + * PF switch. + * + * Returns pointer to entry storing the rule if found + */ +static struct ice_fltr_mgmt_list_entry * +ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id, + struct ice_fltr_info *f_info) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *list_itr; + struct list_head *list_head; + + list_head = &sw->recp_list[recp_id].filt_rules; + list_for_each_entry(list_itr, list_head, list_entry) { + if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, + sizeof(f_info->l_data)) && + f_info->fwd_id.hw_vsi_id == + list_itr->fltr_info.fwd_id.hw_vsi_id && + f_info->flag == list_itr->fltr_info.flag) + return list_itr; + } + return NULL; +} + +/** * ice_remove_mac - remove a MAC address based filter rule * @hw: pointer to the hardware structure * @m_list: list of MAC addresses and forwarding information @@ -2153,15 +2186,39 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) { struct ice_fltr_list_entry *list_itr, *tmp; + struct mutex *rule_lock; /* Lock to protect filter rule list */ if (!m_list) return ICE_ERR_PARAM; + rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; + u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0]; + u16 vsi_handle; if (l_type != ICE_SW_LKUP_MAC) return ICE_ERR_PARAM; + + vsi_handle = list_itr->fltr_info.vsi_handle; + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + list_itr->fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, vsi_handle); + if (is_unicast_ether_addr(add) && !hw->ucast_shared) { + /* Don't remove the unicast address that belongs to + * another VSI on the switch, since it is not being + * shared... + */ + mutex_lock(rule_lock); + if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC, + &list_itr->fltr_info)) { + mutex_unlock(rule_lock); + return ICE_ERR_DOES_NOT_EXIST; + } + mutex_unlock(rule_lock); + } list_itr->status = ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC, list_itr); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 3c83230434b6..33dd103035dc 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -95,17 +95,16 @@ void ice_free_tx_ring(struct ice_ring *tx_ring) /** * ice_clean_tx_irq - Reclaim resources after transmit completes - * @vsi: the VSI we care about * @tx_ring: Tx ring to clean * @napi_budget: Used to determine if we are in netpoll * * Returns true if there's any budget left (e.g. the clean is finished) */ -static bool -ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget) +static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) { unsigned int total_bytes = 0, total_pkts = 0; - unsigned int budget = vsi->work_lmt; + unsigned int budget = ICE_DFLT_IRQ_WORK; + struct ice_vsi *vsi = tx_ring->vsi; s16 i = tx_ring->next_to_clean; struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; @@ -114,6 +113,8 @@ ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget) tx_desc = ICE_TX_DESC(tx_ring, i); i -= tx_ring->count; + prefetch(&vsi->state); + do { struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; @@ -206,7 +207,7 @@ ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget) smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->q_index) && - !test_bit(__ICE_DOWN, vsi->state)) { + !test_bit(__ICE_DOWN, vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->q_index); ++tx_ring->tx_stats.restart_q; @@ -377,18 +378,28 @@ err: */ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) { + u16 prev_ntu = rx_ring->next_to_use; + rx_ring->next_to_use = val; /* update next to alloc since we have filled the ring */ rx_ring->next_to_alloc = val; - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). + /* QRX_TAIL will be updated with any tail value, but hardware ignores + * the lower 3 bits. This makes it so we only bump tail on meaningful + * boundaries. Also, this allows us to bump tail on intervals of 8 up to + * the budget depending on the current traffic load. */ - wmb(); - writel(val, rx_ring->tail); + val &= ~0x7; + if (prev_ntu != val) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); + } } /** @@ -445,7 +456,13 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace * - * Returns false if all allocations were successful, true if any fail + * Returns false if all allocations were successful, true if any fail. Returning + * true signals to the caller that we didn't replace cleaned_count buffers and + * there is more work to do. + * + * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx + * buffers. Then bump tail at most one time. Grouping like this lets us avoid + * multiple tail writes per call. */ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) { @@ -462,8 +479,9 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) bi = &rx_ring->rx_buf[ntu]; do { + /* if we fail here, we have work remaining */ if (!ice_alloc_mapped_page(rx_ring, bi)) - goto no_bufs; + break; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, bi->dma, @@ -494,16 +512,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) if (rx_ring->next_to_use != ntu) ice_release_rx_desc(rx_ring, ntu); - return false; - -no_bufs: - if (rx_ring->next_to_use != ntu) - ice_release_rx_desc(rx_ring, ntu); - - /* make sure to come back via polling to try again after - * allocation failure - */ - return true; + return !!cleaned_count; } /** @@ -599,6 +608,8 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, unsigned int truesize = ICE_RXBUF_2048; #endif + if (!size) + return; skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, rx_buf->page_offset, size, truesize); @@ -654,6 +665,8 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, prefetchw(rx_buf->page); *skb = rx_buf->skb; + if (!size) + return rx_buf; /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, rx_buf->page_offset, size, @@ -737,8 +750,11 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, */ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) { - /* hand second half of page back to the ring */ + if (!rx_buf) + return; + if (ice_can_reuse_rx_page(rx_buf)) { + /* hand second half of page back to the ring */ ice_reuse_rx_page(rx_ring, rx_buf); rx_ring->rx_stats.page_reuse_count++; } else { @@ -864,7 +880,7 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, /** * ice_rx_csum - Indicate in skb if checksum is good - * @vsi: the VSI we care about + * @ring: the ring we care about * @skb: skb currently being received and modified * @rx_desc: the receive descriptor * @ptype: the packet type decoded by hardware @@ -872,7 +888,7 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, * skb->protocol must be set before this function is called */ static void -ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, +ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, union ice_32b_rx_flex_desc *rx_desc, u8 ptype) { struct ice_rx_ptype_decoded decoded; @@ -889,7 +905,7 @@ ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, skb_checksum_none_assert(skb); /* check if Rx checksum is enabled */ - if (!(vsi->netdev->features & NETIF_F_RXCSUM)) + if (!(ring->netdev->features & NETIF_F_RXCSUM)) return; /* check if HW has decoded the packet and checksum */ @@ -929,7 +945,7 @@ ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, return; checksum_fail: - vsi->back->hw_csum_rx_error++; + ring->vsi->back->hw_csum_rx_error++; } /** @@ -953,7 +969,7 @@ ice_process_skb_fields(struct ice_ring *rx_ring, /* modifies the skb - consumes the enet header */ skb->protocol = eth_type_trans(skb, rx_ring->netdev); - ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype); + ice_rx_csum(rx_ring, skb, rx_desc, ptype); } /** @@ -990,7 +1006,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_pkts = 0; u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); - bool failure = false; + bool failure; /* start the loop to process Rx packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsigned int)budget)) { @@ -1002,13 +1018,6 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) u16 vlan_tag = 0; u8 rx_ptype; - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= ICE_RX_BUF_WRITE) { - failure = failure || - ice_alloc_rx_bufs(rx_ring, cleaned_count); - cleaned_count = 0; - } - /* get the Rx desc from Rx ring based on 'next_to_clean' */ rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); @@ -1030,8 +1039,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) size = le16_to_cpu(rx_desc->wb.pkt_len) & ICE_RX_FLX_DESC_PKT_LEN_M; + /* retrieve a buffer from the ring */ rx_buf = ice_get_rx_buf(rx_ring, &skb, size); - /* allocate (if needed) and populate skb */ + if (skb) ice_add_rx_frag(rx_buf, skb, size); else @@ -1040,7 +1050,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_buf_failed++; - rx_buf->pagecnt_bias++; + if (rx_buf) + rx_buf->pagecnt_bias++; break; } @@ -1057,9 +1068,6 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) continue; } - rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & - ICE_RX_FLEX_DESC_PTYPE_M; - stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); if (ice_test_staterr(rx_desc, stat_err_bits)) vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); @@ -1076,6 +1084,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) total_rx_bytes += skb->len; /* populate checksum, VLAN, and protocol */ + rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & + ICE_RX_FLEX_DESC_PTYPE_M; + ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); /* send completed skb up the stack */ @@ -1085,6 +1096,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) total_rx_pkts++; } + /* return up to cleaned_count buffers to hardware */ + failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); + /* update queue and vector specific stats */ u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.pkts += total_rx_pkts; @@ -1212,6 +1226,8 @@ ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) if (time_after(next_update, rc->next_update)) goto clear_counts; + prefetch(q_vector->vsi->port_info); + packets = rc->total_pkts; bytes = rc->total_bytes; @@ -1341,16 +1357,32 @@ static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) /** * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt - * @vsi: the VSI associated with the q_vector * @q_vector: q_vector for which ITR is being updated and interrupt enabled */ -static void -ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) +static void ice_update_ena_itr(struct ice_q_vector *q_vector) { struct ice_ring_container *tx = &q_vector->tx; struct ice_ring_container *rx = &q_vector->rx; + struct ice_vsi *vsi = q_vector->vsi; u32 itr_val; + /* when exiting WB_ON_ITR lets set a low ITR value and trigger + * interrupts to expire right away in case we have more work ready to go + * already + */ + if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) { + itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS); + wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); + /* set target back to last user set value */ + rx->target_itr = rx->itr_setting; + /* set current to what we just wrote and dynamic if needed */ + rx->current_itr = ICE_WB_ON_ITR_USECS | + (rx->itr_setting & ICE_ITR_DYNAMIC); + /* allow normal interrupt flow to start */ + q_vector->itr_countdown = 0; + return; + } + /* This will do nothing if dynamic updates are not enabled */ ice_update_itr(q_vector, tx); ice_update_itr(q_vector, rx); @@ -1389,13 +1421,48 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) q_vector->itr_countdown--; } - if (!test_bit(__ICE_DOWN, vsi->state)) - wr32(&vsi->back->hw, + if (!test_bit(__ICE_DOWN, q_vector->vsi->state)) + wr32(&q_vector->vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); } /** + * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector + * @q_vector: q_vector to set WB_ON_ITR on + * + * We need to tell hardware to write-back completed descriptors even when + * interrupts are disabled. Descriptors will be written back on cache line + * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR + * descriptors may not be written back if they don't fill a cache line until the + * next interrupt. + * + * This sets the write-back frequency to 2 microseconds as that is the minimum + * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to + * make sure hardware knows we aren't meddling with the INTENA_M bit. + */ +static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) +{ + struct ice_vsi *vsi = q_vector->vsi; + + /* already in WB_ON_ITR mode no need to change it */ + if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) + return; + + if (q_vector->num_ring_rx) + wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), + ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, + ICE_RX_ITR)); + + if (q_vector->num_ring_tx) + wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), + ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, + ICE_TX_ITR)); + + q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE; +} + +/** * ice_napi_poll - NAPI polling Rx/Tx cleanup routine * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets @@ -1408,29 +1475,32 @@ int ice_napi_poll(struct napi_struct *napi, int budget) { struct ice_q_vector *q_vector = container_of(napi, struct ice_q_vector, napi); - struct ice_vsi *vsi = q_vector->vsi; - struct ice_pf *pf = vsi->back; bool clean_complete = true; - int budget_per_ring = 0; struct ice_ring *ring; + int budget_per_ring; int work_done = 0; /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ ice_for_each_ring(ring, q_vector->tx) - if (!ice_clean_tx_irq(vsi, ring, budget)) + if (!ice_clean_tx_irq(ring, budget)) clean_complete = false; /* Handle case where we are called by netpoll with a budget of 0 */ - if (budget <= 0) + if (unlikely(budget <= 0)) return budget; - /* We attempt to distribute budget to each Rx queue fairly, but don't - * allow the budget to go below 1 because that would exit polling early. - */ - if (q_vector->num_ring_rx) + /* normally we have 1 Rx ring per q_vector */ + if (unlikely(q_vector->num_ring_rx > 1)) + /* We attempt to distribute budget to each Rx queue fairly, but + * don't allow the budget to go below 1 because that would exit + * polling early. + */ budget_per_ring = max(budget / q_vector->num_ring_rx, 1); + else + /* Max of 1 Rx ring in this q_vector so give it the budget */ + budget_per_ring = budget; ice_for_each_ring(ring, q_vector->rx) { int cleaned; @@ -1450,8 +1520,9 @@ int ice_napi_poll(struct napi_struct *napi, int budget) * poll us due to busy-polling */ if (likely(napi_complete_done(napi, work_done))) - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_update_ena_itr(vsi, q_vector); + ice_update_ena_itr(q_vector); + else + ice_set_wb_on_itr(q_vector); return min_t(int, work_done, budget - 1); } @@ -1521,7 +1592,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, { u64 td_offset, td_tag, td_cmd; u16 i = tx_ring->next_to_use; - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned int data_len, size; struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; @@ -1923,7 +1994,7 @@ static unsigned int ice_txd_use_count(unsigned int size) */ static unsigned int ice_xmit_desc_count(struct sk_buff *skb) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int count = 0, size = skb_headlen(skb); @@ -1954,7 +2025,7 @@ static unsigned int ice_xmit_desc_count(struct sk_buff *skb) */ static bool __ice_chk_linearize(struct sk_buff *skb) { - const struct skb_frag_struct *frag, *stale; + const skb_frag_t *frag, *stale; int nr_frags, sum; /* no need to check if number of frags is less than 7 */ @@ -2036,6 +2107,7 @@ static netdev_tx_t ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) { struct ice_tx_offload_params offload = { 0 }; + struct ice_vsi *vsi = tx_ring->vsi; struct ice_tx_buf *first; unsigned int count; int tso, csum; @@ -2083,7 +2155,15 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) if (csum < 0) goto out_drop; - if (tso || offload.cd_tunnel_params) { + /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ + if (unlikely(skb->priority == TC_PRIO_CONTROL && + vsi->type == ICE_VSI_PF && + vsi->port_info->is_sw_lldp)) + offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | + ICE_TX_CTX_DESC_SWTCH_UPLINK << + ICE_TXD_CTX_QW1_CMD_S); + + if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { struct ice_tx_ctx_desc *cdesc; int i = tx_ring->next_to_use; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index ec76aba347b9..94a9280193e2 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -144,6 +144,19 @@ enum ice_rx_dtype { #define ICE_DFLT_INTRL 0 #define ICE_MAX_INTRL 236 +#define ICE_WB_ON_ITR_USECS 2 +#define ICE_IN_WB_ON_ITR_MODE 255 +/* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows + * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also, + * set the write-back latency to the usecs passed in. + */ +#define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \ + ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \ + GLINT_DYN_CTL_INTERVAL_M) | \ + (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \ + GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \ + GLINT_DYN_CTL_WB_ON_ITR_M) + /* Legacy or Advanced Mode Queue */ #define ICE_TX_ADVANCED 0 #define ICE_TX_LEGACY 1 diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 24bbef8bbe69..6667d17a4206 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -4,18 +4,19 @@ #ifndef _ICE_TYPE_H_ #define _ICE_TYPE_H_ +#define ICE_BYTES_PER_WORD 2 +#define ICE_BYTES_PER_DWORD 4 + #include "ice_status.h" #include "ice_hw_autogen.h" #include "ice_osdep.h" #include "ice_controlq.h" #include "ice_lan_tx_rx.h" +#include "ice_flex_type.h" -#define ICE_BYTES_PER_WORD 2 -#define ICE_BYTES_PER_DWORD 4 - -static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) +static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) { - return test_bit(tc, (unsigned long *)&bitmap); + return test_bit(tc, &bitmap); } /* Driver always calls main vsi_handle first */ @@ -31,6 +32,7 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) #define ICE_DBG_LAN BIT_ULL(8) #define ICE_DBG_SW BIT_ULL(13) #define ICE_DBG_SCHED BIT_ULL(14) +#define ICE_DBG_PKG BIT_ULL(16) #define ICE_DBG_RES BIT_ULL(17) #define ICE_DBG_AQ_MSG BIT_ULL(24) #define ICE_DBG_AQ_CMD BIT_ULL(27) @@ -53,6 +55,14 @@ enum ice_aq_res_access_type { ICE_RES_WRITE }; +struct ice_driver_ver { + u8 major_ver; + u8 minor_ver; + u8 build_ver; + u8 subbuild_ver; + u8 driver_string[32]; +}; + enum ice_fc_mode { ICE_FC_NONE = 0, ICE_FC_RX_PAUSE, @@ -139,6 +149,9 @@ struct ice_phy_info { /* Common HW capabilities for SW use */ struct ice_hw_common_caps { u32 valid_functions; + /* DCB capabilities */ + u32 active_tc_bitmap; + u32 maxtc; /* Tx/Rx queues */ u16 num_rxq; /* Number/Total Rx queues */ @@ -219,6 +232,8 @@ struct ice_nvm_info { u8 blank_nvm_mode; /* is NVM empty (no FW present) */ }; +#define ICE_NVM_VER_LEN 32 + /* Max number of port to queue branches w.r.t topology */ #define ICE_MAX_TRAFFIC_CLASS 8 #define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS @@ -347,6 +362,8 @@ struct ice_port_info { struct ice_mac_info mac; struct ice_phy_info phy; struct mutex sched_lock; /* protect access to TXSched tree */ + struct ice_sched_node * + sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM]; struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */ /* DCBX info */ struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */ @@ -454,6 +471,30 @@ struct ice_hw { u8 ucast_shared; /* true if VSIs can share unicast addr */ + /* Active package version (currently active) */ + struct ice_pkg_ver active_pkg_ver; + u8 active_pkg_name[ICE_PKG_NAME_SIZE]; + u8 active_pkg_in_nvm; + + enum ice_aq_err pkg_dwnld_status; + + /* Driver's package ver - (from the Metadata seg) */ + struct ice_pkg_ver pkg_ver; + u8 pkg_name[ICE_PKG_NAME_SIZE]; + + /* Driver's Ice package version (from the Ice seg) */ + struct ice_pkg_ver ice_pkg_ver; + u8 ice_pkg_name[ICE_PKG_NAME_SIZE]; + + /* Pointer to the ice segment */ + struct ice_seg *seg; + + /* Pointer to allocated copy of pkg memory */ + u8 *pkg_copy; + u32 pkg_size; + + /* HW block tables */ + struct ice_blk_info blk[ICE_BLK_COUNT]; }; /* Statistics collected by each port, VSI, VEB, and S-channel */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 5d24b539648f..b45797f39b2f 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -129,7 +129,10 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf) pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; - if (vf->link_forced) + /* Always report link is down if the VF queues aren't enabled */ + if (!vf->num_qs_ena) + ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false); + else if (vf->link_forced) ice_set_pfe_link_forced(vf, &pfe, vf->link_up); else ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info & @@ -252,6 +255,35 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf) } /** + * ice_set_vf_state_qs_dis - Set VF queues state to disabled + * @vf: pointer to the VF structure + */ +void ice_set_vf_state_qs_dis(struct ice_vf *vf) +{ + /* Clear Rx/Tx enabled queues flag */ + bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF); + bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF); + vf->num_qs_ena = 0; + clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); +} + +/** + * ice_dis_vf_qs - Disable the VF queues + * @vf: pointer to the VF structure + */ +static void ice_dis_vf_qs(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = pf->vsi[vf->lan_vsi_idx]; + + ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); + ice_vsi_stop_rx_rings(vsi); + ice_set_vf_state_qs_dis(vf); +} + +/** * ice_free_vfs - Free all VFs * @pf: pointer to the PF structure */ @@ -267,19 +299,9 @@ void ice_free_vfs(struct ice_pf *pf) usleep_range(1000, 2000); /* Avoid wait time by stopping all VFs at the same time */ - for (i = 0; i < pf->num_alloc_vfs; i++) { - struct ice_vsi *vsi; - - if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states)) - continue; - - vsi = pf->vsi[pf->vf[i].lan_vsi_idx]; - /* stop rings without wait time */ - ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, i); - ice_vsi_stop_rx_rings(vsi); - - clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); - } + for (i = 0; i < pf->num_alloc_vfs; i++) + if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states)) + ice_dis_vf_qs(&pf->vf[i]); /* Disable IOV before freeing resources. This lets any VF drivers * running in the host get themselves cleaned up before we yank @@ -297,13 +319,6 @@ void ice_free_vfs(struct ice_pf *pf) if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { /* disable VF qp mappings */ ice_dis_vf_mappings(&pf->vf[i]); - - /* Set this state so that assigned VF vectors can be - * reclaimed by PF for reuse in ice_vsi_release(). No - * need to clear this bit since pf->vf array is being - * freed anyways after this for loop - */ - set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states); ice_free_vf_res(&pf->vf[i]); } } @@ -341,12 +356,13 @@ void ice_free_vfs(struct ice_pf *pf) * ice_trigger_vf_reset - Reset a VF on HW * @vf: pointer to the VF structure * @is_vflr: true if VFLR was issued, false if not + * @is_pfr: true if the reset was triggered due to a previous PFR * * Trigger hardware to start a reset for a particular VF. Expects the caller * to wait the proper amount of time to allow hardware to reset the VF before * it cleans up and restores VF functionality. */ -static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) +static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) { struct ice_pf *pf = vf->pf; u32 reg, reg_idx, bit_idx; @@ -367,10 +383,13 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) */ clear_bit(ICE_VF_STATE_INIT, vf->vf_states); - /* Clear the VF's ARQLEN register. This is how the VF detects reset, - * since the VFGEN_RSTAT register doesn't stick at 0 after reset. + /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it + * in the case of VFR. If this is done for PFR, it can mess up VF + * resets because the VF driver may already have started cleanup + * by the time we get here. */ - wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0); + if (!is_pfr) + wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0); /* In the case of a VFLR, the HW has already reset the VF and we * just need to clean up, so don't hit the VFRTRIG register. @@ -389,12 +408,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) wr32(hw, PF_PCI_CIAA, VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S)); - for (i = 0; i < 100; i++) { + for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { reg = rd32(hw, PF_PCI_CIAD); - if ((reg & VF_TRANS_PENDING_M) != 0) - dev_err(&pf->pdev->dev, - "VF %d PCI transactions stuck\n", vf->vf_id); - udelay(1); + /* no transactions pending so stop polling */ + if ((reg & VF_TRANS_PENDING_M) == 0) + break; + + dev_err(&pf->pdev->dev, + "VF %d PCI transactions stuck\n", vf->vf_id); + udelay(ICE_PCI_CIAD_WAIT_DELAY_US); } } @@ -481,19 +503,20 @@ ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id) } /** - * ice_calc_vf_first_vector_idx - Calculate absolute MSIX vector index in HW + * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space * @pf: pointer to PF structure * @vf: pointer to VF that the first MSIX vector index is being calculated for * - * This returns the first MSIX vector index in HW that is used by this VF and - * this will always be the OICR index in the AVF driver so any functionality + * This returns the first MSIX vector index in PF space that is used by this VF. + * This index is used when accessing PF relative registers such as + * GLINT_VECT2FUNC and GLINT_DYN_CTL. + * This will always be the OICR index in the AVF driver so any functionality * using vf->first_vector_idx for queue configuration will have to increment by * 1 to avoid meddling with the OICR index. */ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) { - return pf->hw.func_caps.common_cap.msix_vector_first_id + - pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix; + return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix; } /** @@ -543,7 +566,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) status = ice_add_mac(&pf->hw, &tmp_add_list); if (status) - dev_err(&pf->pdev->dev, "could not add mac filters\n"); + dev_err(&pf->pdev->dev, + "could not add mac filters error %d\n", status); + else + vf->num_mac = 1; /* Clear this bit after VF initialization since we shouldn't reclaim * and reassign interrupts for synchronous or asynchronous VFR events. @@ -551,7 +577,6 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) * expect vector assignment to be changed unless there is a request for * more vectors. */ - clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states); ice_alloc_vsi_res_exit: ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); return status; @@ -567,20 +592,21 @@ static int ice_alloc_vf_res(struct ice_vf *vf) int tx_rx_queue_left; int status; - /* setup VF VSI and necessary resources */ - status = ice_alloc_vsi_res(vf); - if (status) - goto ice_alloc_vf_res_exit; - /* Update number of VF queues, in case VF had requested for queue * changes */ - tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); + tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf), + ice_get_avail_rxq_count(pf)); tx_rx_queue_left += ICE_DFLT_QS_PER_VF; if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left && vf->num_req_qs != vf->num_vf_qs) vf->num_vf_qs = vf->num_req_qs; + /* setup VF VSI and necessary resources */ + status = ice_alloc_vsi_res(vf); + if (status) + goto ice_alloc_vf_res_exit; + if (vf->trusted) set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); else @@ -605,27 +631,30 @@ ice_alloc_vf_res_exit: */ static void ice_ena_vf_mappings(struct ice_vf *vf) { + int abs_vf_id, abs_first, abs_last; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; int first, last, v; struct ice_hw *hw; - int abs_vf_id; u32 reg; hw = &pf->hw; vsi = pf->vsi[vf->lan_vsi_idx]; first = vf->first_vector_idx; last = (first + pf->num_vf_msix) - 1; + abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id; + abs_last = (abs_first + pf->num_vf_msix) - 1; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; /* VF Vector allocation */ - reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) | - ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) | + reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) | + ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); wr32(hw, VPINT_ALLOC(vf->vf_id), reg); - reg = (((first << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) | - ((last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) | + reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S) + & VPINT_ALLOC_PCI_FIRST_M) | + ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); /* map the interrupts to its functions */ @@ -870,11 +899,11 @@ static int ice_check_avail_res(struct ice_pf *pf) * at runtime through Virtchnl, that is the reason we start by reserving * few queues. */ - num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF, - ICE_MIN_QS_PER_VF); + num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf), + ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF); - num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF, - ICE_MIN_QS_PER_VF); + num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf), + ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF); if (!num_txq || !num_rxq) return -EIO; @@ -983,6 +1012,47 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m, } /** + * ice_config_res_vfs - Finalize allocation of VFs resources in one go + * @pf: pointer to the PF structure + * + * This function is being called as last part of resetting all VFs, or when + * configuring VFs for the first time, where there is no resource to be freed + * Returns true if resources were properly allocated for all VFs, and false + * otherwise. + */ +static bool ice_config_res_vfs(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + int v; + + if (ice_check_avail_res(pf)) { + dev_err(&pf->pdev->dev, + "Cannot allocate VF resources, try with fewer number of VFs\n"); + return false; + } + + /* rearm global interrupts */ + if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state)) + ice_irq_dynamic_ena(hw, NULL, NULL); + + /* Finish resetting each VF and allocate resources */ + for (v = 0; v < pf->num_alloc_vfs; v++) { + struct ice_vf *vf = &pf->vf[v]; + + vf->num_vf_qs = pf->num_vf_qps; + dev_dbg(&pf->pdev->dev, + "VF-id %d has %d queues configured\n", + vf->vf_id, vf->num_vf_qs); + ice_cleanup_and_realloc_vf(vf); + } + + ice_flush(hw); + clear_bit(__ICE_VF_DIS, pf->state); + + return true; +} + +/** * ice_reset_all_vfs - reset all allocated VFs in one go * @pf: pointer to the PF structure * @is_vflr: true if VFLR was issued, false if not @@ -1010,18 +1080,17 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) /* Begin reset on all VFs at once */ for (v = 0; v < pf->num_alloc_vfs; v++) - ice_trigger_vf_reset(&pf->vf[v], is_vflr); + ice_trigger_vf_reset(&pf->vf[v], is_vflr, true); for (v = 0; v < pf->num_alloc_vfs; v++) { struct ice_vsi *vsi; vf = &pf->vf[v]; vsi = pf->vsi[vf->lan_vsi_idx]; - if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { - ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id); - ice_vsi_stop_rx_rings(vsi); - clear_bit(ICE_VF_STATE_ENA, vf->vf_states); - } + if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) + ice_dis_vf_qs(vf); + ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, + NULL, ICE_VF_RESET, vf->vf_id, NULL); } /* HW requires some time to make sure it can flush the FIFO for a VF @@ -1031,7 +1100,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) * finished resetting. */ for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { - usleep_range(10000, 20000); /* Check each VF in sequence */ while (v < pf->num_alloc_vfs) { @@ -1039,8 +1107,11 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) vf = &pf->vf[v]; reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); - if (!(reg & VPGEN_VFRSTAT_VFRD_M)) + if (!(reg & VPGEN_VFRSTAT_VFRD_M)) { + /* only delay if the check failed */ + usleep_range(10, 20); break; + } /* If the current VF has finished resetting, move on * to the next VF in sequence. @@ -1054,7 +1125,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) */ if (v < pf->num_alloc_vfs) dev_warn(&pf->pdev->dev, "VF reset check timeout\n"); - usleep_range(10000, 20000); /* free VF resources to begin resetting the VSI state */ for (v = 0; v < pf->num_alloc_vfs; v++) { @@ -1074,25 +1144,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) dev_err(&pf->pdev->dev, "Failed to free MSIX resources used by SR-IOV\n"); - if (ice_check_avail_res(pf)) { - dev_err(&pf->pdev->dev, - "Cannot allocate VF resources, try with fewer number of VFs\n"); + if (!ice_config_res_vfs(pf)) return false; - } - - /* Finish the reset on each VF */ - for (v = 0; v < pf->num_alloc_vfs; v++) { - vf = &pf->vf[v]; - - vf->num_vf_qs = pf->num_vf_qps; - dev_dbg(&pf->pdev->dev, - "VF-id %d has %d queues configured\n", - vf->vf_id, vf->num_vf_qs); - ice_cleanup_and_realloc_vf(vf); - } - - ice_flush(hw); - clear_bit(__ICE_VF_DIS, pf->state); return true; } @@ -1114,27 +1167,31 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) u32 reg; int i; - /* If the VFs have been disabled, this means something else is - * resetting the VF, so we shouldn't continue. + /* If the PF has been disabled, there is no need resetting VF until + * PF is active again. */ - if (test_and_set_bit(__ICE_VF_DIS, pf->state)) + if (test_bit(__ICE_VF_DIS, pf->state)) return false; - ice_trigger_vf_reset(vf, is_vflr); + /* If the VF has been disabled, this means something else is + * resetting the VF, so we shouldn't continue. Otherwise, set + * disable VF state bit for actual reset, and continue. + */ + if (test_and_set_bit(ICE_VF_STATE_DIS, vf->vf_states)) + return false; + + ice_trigger_vf_reset(vf, is_vflr, false); vsi = pf->vsi[vf->lan_vsi_idx]; - if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { - ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id); - ice_vsi_stop_rx_rings(vsi); - clear_bit(ICE_VF_STATE_ENA, vf->vf_states); - } else { - /* Call Disable LAN Tx queue AQ call even when queues are not - * enabled. This is needed for successful completiom of VFR - */ - ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, - NULL, ICE_VF_RESET, vf->vf_id, NULL); - } + if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) + ice_dis_vf_qs(vf); + + /* Call Disable LAN Tx queue AQ whether or not queues are + * enabled. This is needed for successful completion of VFR. + */ + ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, + NULL, ICE_VF_RESET, vf->vf_id, NULL); hw = &pf->hw; /* poll VPGEN_VFRSTAT reg to make sure @@ -1145,12 +1202,14 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) * poll the status register to make sure that the reset * completed successfully. */ - usleep_range(10000, 20000); reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); if (reg & VPGEN_VFRSTAT_VFRD_M) { rsd = true; break; } + + /* only sleep if the reset is not done */ + usleep_range(10, 20); } /* Display a warning if VF didn't manage to reset in time, but need to @@ -1160,8 +1219,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n", vf->vf_id); - usleep_range(10000, 20000); - /* disable promiscuous modes in case they were enabled * ignore any error if disabling process failed */ @@ -1183,7 +1240,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) ice_cleanup_and_realloc_vf(vf); ice_flush(hw); - clear_bit(__ICE_VF_DIS, pf->state); return true; } @@ -1257,7 +1313,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) /* Disable global interrupt 0 so we don't try to handle the VFLR. */ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); - + set_bit(__ICE_OICR_INTR_DIS, pf->state); ice_flush(hw); ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); @@ -1283,19 +1339,16 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) /* assign default capabilities */ set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); vfs[i].spoofchk = true; - - /* Set this state so that PF driver does VF vector assignment */ - set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states); } pf->num_alloc_vfs = num_alloc_vfs; - /* VF resources get allocated during reset */ - if (!ice_reset_all_vfs(pf, true)) { + /* VF resources get allocated with initialization */ + if (!ice_config_res_vfs(pf)) { ret = -EIO; goto err_unroll_sriov; } - goto err_unroll_intr; + return ret; err_unroll_sriov: pf->vf = NULL; @@ -1307,6 +1360,7 @@ err_pci_disable_sriov: err_unroll_intr: /* rearm interrupts here */ ice_irq_dynamic_ena(hw, NULL, NULL); + clear_bit(__ICE_OICR_INTR_DIS, pf->state); return ret; } @@ -1389,6 +1443,12 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct ice_pf *pf = pci_get_drvdata(pdev); + if (ice_is_safe_mode(pf)) { + dev_err(&pf->pdev->dev, + "SR-IOV cannot be configured - Device is in Safe Mode\n"); + return -EOPNOTSUPP; + } + if (num_vfs) return ice_pci_sriov_ena(pf, num_vfs); @@ -1490,10 +1550,10 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); - if (aq_ret) { + if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { dev_info(&pf->pdev->dev, - "Unable to send the message to VF %d aq_err %d\n", - vf->vf_id, pf->hw.mailboxq.sq_last_status); + "Unable to send the message to VF %d ret %d aq_err %d\n", + vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status); return -EIO; } @@ -1688,6 +1748,21 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) } /** + * ice_vc_isvalid_ring_len + * @ring_len: length of ring + * + * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE + * or zero + */ +static bool ice_vc_isvalid_ring_len(u16 ring_len) +{ + return ring_len == 0 || + (ring_len >= ICE_MIN_NUM_DESC && + ring_len <= ICE_MAX_NUM_DESC && + !(ring_len % ICE_REQ_DESC_MULTIPLE)); +} + +/** * ice_vc_config_rss_key * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -1712,18 +1787,18 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) { + if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) { + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1759,18 +1834,18 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) { + if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1839,6 +1914,8 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) (struct virtchnl_queue_select *)msg; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; + unsigned long q_map; + u16 vf_q_id; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1855,6 +1932,12 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } + if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF || + vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1865,12 +1948,48 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) * Tx queue group list was configured and the context bits were * programmed using ice_vsi_cfg_txqs */ - if (ice_vsi_start_rx_rings(vsi)) - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + q_map = vqs->rx_queues; + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Skip queue if enabled */ + if (test_bit(vf_q_id, vf->rxq_ena)) + continue; + + if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) { + dev_err(&vsi->back->pdev->dev, + "Failed to enable Rx ring %d on VSI %d\n", + vf_q_id, vsi->vsi_num); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + set_bit(vf_q_id, vf->rxq_ena); + vf->num_qs_ena++; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + q_map = vqs->tx_queues; + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Skip queue if enabled */ + if (test_bit(vf_q_id, vf->txq_ena)) + continue; + + set_bit(vf_q_id, vf->txq_ena); + vf->num_qs_ena++; + } /* Set flag to indicate that queues are enabled */ if (v_ret == VIRTCHNL_STATUS_SUCCESS) - set_bit(ICE_VF_STATE_ENA, vf->vf_states); + set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); error_param: /* send the response to the VF */ @@ -1893,9 +2012,11 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) (struct virtchnl_queue_select *)msg; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; + unsigned long q_map; + u16 vf_q_id; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) && - !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { + !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1910,29 +2031,81 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } + if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF || + vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) { - dev_err(&vsi->back->pdev->dev, - "Failed to stop tx rings on VSI %d\n", - vsi->vsi_num); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (vqs->tx_queues) { + q_map = vqs->tx_queues; + + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + struct ice_ring *ring = vsi->tx_rings[vf_q_id]; + struct ice_txq_meta txq_meta = { 0 }; + + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Skip queue if not enabled */ + if (!test_bit(vf_q_id, vf->txq_ena)) + continue; + + ice_fill_txq_meta(vsi, ring, &txq_meta); + + if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, + ring, &txq_meta)) { + dev_err(&vsi->back->pdev->dev, + "Failed to stop Tx ring %d on VSI %d\n", + vf_q_id, vsi->vsi_num); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Clear enabled queues flag */ + clear_bit(vf_q_id, vf->txq_ena); + vf->num_qs_ena--; + } } - if (ice_vsi_stop_rx_rings(vsi)) { - dev_err(&vsi->back->pdev->dev, - "Failed to stop rx rings on VSI %d\n", - vsi->vsi_num); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (vqs->rx_queues) { + q_map = vqs->rx_queues; + + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Skip queue if not enabled */ + if (!test_bit(vf_q_id, vf->rxq_ena)) + continue; + + if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) { + dev_err(&vsi->back->pdev->dev, + "Failed to stop Rx ring %d on VSI %d\n", + vf_q_id, vsi->vsi_num); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Clear enabled queues flag */ + clear_bit(vf_q_id, vf->rxq_ena); + vf->num_qs_ena--; + } } /* Clear enabled queues flag */ - if (v_ret == VIRTCHNL_STATUS_SUCCESS) - clear_bit(ICE_VF_STATE_ENA, vf->vf_states); + if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena) + clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); error_param: /* send the response to the VF */ @@ -1962,12 +2135,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) irqmap_info = (struct virtchnl_irq_map_info *)msg; num_q_vectors_mapped = irqmap_info->num_vectors; - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; - } - /* Check to make sure number of VF vectors mapped is not greater than * number of VF vectors originally allocated, and check that * there is actually at least a single VF queue vector mapped @@ -1979,6 +2146,12 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) goto error_param; } + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + for (i = 0; i < num_q_vectors_mapped; i++) { struct ice_q_vector *q_vector; @@ -2056,6 +2229,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; + u16 num_rxq = 0, num_txq = 0; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; int i; @@ -2071,13 +2245,16 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) } vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; + } - if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) { + if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF || + qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { dev_err(&pf->pdev->dev, "VF-%d requesting more than supported number of queues: %d\n", - vf->vf_id, qci->num_queue_pairs); + vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -2087,37 +2264,52 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) if (qpi->txq.vsi_id != qci->vsi_id || qpi->rxq.vsi_id != qci->vsi_id || qpi->rxq.queue_id != qpi->txq.queue_id || + qpi->txq.headwb_enabled || + !ice_vc_isvalid_ring_len(qpi->txq.ring_len) || + !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) || !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } /* copy Tx queue info from VF into VSI */ - vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; - vsi->tx_rings[i]->count = qpi->txq.ring_len; - /* copy Rx queue info from VF into VSI */ - vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; - vsi->rx_rings[i]->count = qpi->rxq.ring_len; - if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; + if (qpi->txq.ring_len > 0) { + num_txq++; + vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; + vsi->tx_rings[i]->count = qpi->txq.ring_len; } - vsi->rx_buf_len = qpi->rxq.databuffer_size; - if (qpi->rxq.max_pkt_size >= (16 * 1024) || - qpi->rxq.max_pkt_size < 64) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; + + /* copy Rx queue info from VF into VSI */ + if (qpi->rxq.ring_len > 0) { + num_rxq++; + vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; + vsi->rx_rings[i]->count = qpi->rxq.ring_len; + + if (qpi->rxq.databuffer_size != 0 && + (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || + qpi->rxq.databuffer_size < 1024)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + vsi->rx_buf_len = qpi->rxq.databuffer_size; + vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len; + if (qpi->rxq.max_pkt_size >= (16 * 1024) || + qpi->rxq.max_pkt_size < 64) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } } + vsi->max_frame = qpi->rxq.max_pkt_size; } /* VF can request to configure less than allocated queues * or default allocated queues. So update the VSI with new number */ - vsi->num_txq = qci->num_queue_pairs; - vsi->num_rxq = qci->num_queue_pairs; + vsi->num_txq = num_txq; + vsi->num_rxq = num_rxq; /* All queues of VF VSI are in TC 0 */ - vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs; - vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs; + vsi->tc_cfg.tc_info[0].qcount_tx = num_txq; + vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq; if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi)) v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; @@ -2171,7 +2363,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) (struct virtchnl_ether_addr_list *)msg; struct ice_pf *pf = vf->pf; enum virtchnl_ops vc_op; - LIST_HEAD(mac_list); + enum ice_status status; struct ice_vsi *vsi; int mac_count = 0; int i; @@ -2245,33 +2437,32 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) goto handle_mac_exit; } - /* get here if maddr is multicast or if VF can change MAC */ - if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) { - v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + /* program the updated filter list */ + status = ice_vsi_cfg_mac_fltr(vsi, maddr, set); + if (status == ICE_ERR_DOES_NOT_EXIST || + status == ICE_ERR_ALREADY_EXISTS) { + dev_info(&pf->pdev->dev, + "can't %s MAC filters %pM for VF %d, error %d\n", + set ? "add" : "remove", maddr, vf->vf_id, + status); + } else if (status) { + dev_err(&pf->pdev->dev, + "can't %s MAC filters for VF %d, error %d\n", + set ? "add" : "remove", vf->vf_id, status); + v_ret = ice_err_to_virt_err(status); goto handle_mac_exit; } + mac_count++; } - /* program the updated filter list */ + /* Track number of MAC filters programmed for the VF VSI */ if (set) - v_ret = ice_err_to_virt_err(ice_add_mac(&pf->hw, &mac_list)); + vf->num_mac += mac_count; else - v_ret = ice_err_to_virt_err(ice_remove_mac(&pf->hw, &mac_list)); - - if (v_ret) { - dev_err(&pf->pdev->dev, - "can't update MAC filters for VF %d, error %d\n", - vf->vf_id, v_ret); - } else { - if (set) - vf->num_mac += mac_count; - else - vf->num_mac -= mac_count; - } + vf->num_mac -= mac_count; handle_mac_exit: - ice_free_fltr_list(&pf->pdev->dev, &mac_list); /* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0); } @@ -2315,11 +2506,11 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; - int req_queues = vfres->num_queue_pairs; + u16 req_queues = vfres->num_queue_pairs; struct ice_pf *pf = vf->pf; - int max_allowed_vf_queues; - int tx_rx_queue_left; - int cur_queues; + u16 max_allowed_vf_queues; + u16 tx_rx_queue_left; + u16 cur_queues; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -2327,29 +2518,31 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) } cur_queues = vf->num_vf_qs; - tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); + tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf), + ice_get_avail_rxq_count(pf)); max_allowed_vf_queues = tx_rx_queue_left + cur_queues; - if (req_queues <= 0) { + if (!req_queues) { dev_err(&pf->pdev->dev, - "VF %d tried to request %d queues. Ignoring.\n", - vf->vf_id, req_queues); + "VF %d tried to request 0 queues. Ignoring.\n", + vf->vf_id); } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) { dev_err(&pf->pdev->dev, "VF %d tried to request more than %d queues.\n", vf->vf_id, ICE_MAX_BASE_QS_PER_VF); vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF; - } else if (req_queues - cur_queues > tx_rx_queue_left) { + } else if (req_queues > cur_queues && + req_queues - cur_queues > tx_rx_queue_left) { dev_warn(&pf->pdev->dev, - "VF %d requested %d more queues, but only %d left.\n", + "VF %d requested %u more queues, but only %u left.\n", vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); - vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues, + vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues, ICE_MAX_BASE_QS_PER_VF); } else { /* request is successful, then reset VF */ vf->num_req_qs = req_queues; ice_vc_dis_vf(vf); dev_info(&pf->pdev->dev, - "VF %d granted request of %d queues.\n", + "VF %d granted request of %u queues.\n", vf->vf_id, req_queues); return 0; } @@ -2589,8 +2782,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) } vf->num_vlan--; - /* Disable VLAN pruning when removing VLAN */ - ice_cfg_vlan_pruning(vsi, false, false); + /* Disable VLAN pruning when the last VLAN is removed */ + if (!vf->num_vlan) + ice_cfg_vlan_pruning(vsi, false, false); /* Disable Unicast/Multicast VLAN promiscuous mode */ if (vlan_promisc) { @@ -2731,20 +2925,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) err = -EPERM; else err = -EINVAL; - goto error_handler; - } - - /* Perform additional checks specific to RSS and Virtchnl */ - if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { - struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; - - if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) - err = -EINVAL; - } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { - struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; - - if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) - err = -EINVAL; } error_handler: @@ -2762,6 +2942,7 @@ error_handler: break; case VIRTCHNL_OP_GET_VF_RESOURCES: err = ice_vc_get_vf_res_msg(vf, msg); + ice_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: ice_vc_reset_vf_msg(vf); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index c3ca522c245a..0d9880c8bba3 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -15,26 +15,38 @@ #define ICE_MAX_MACADDR_PER_VF 12 /* Malicious Driver Detection */ -#define ICE_DFLT_NUM_MDD_EVENTS_ALLOWED 3 #define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10 +#define ICE_MDD_EVENTS_THRESHOLD 30 /* Static VF transaction/status register def */ #define VF_DEVICE_STATUS 0xAA #define VF_TRANS_PENDING_M 0x20 +/* wait defines for polling PF_PCI_CIAD register status */ +#define ICE_PCI_CIAD_WAIT_COUNT 100 +#define ICE_PCI_CIAD_WAIT_DELAY_US 1 + +/* VF resources default values and limitation */ +#define ICE_MAX_VF_COUNT 256 +#define ICE_MAX_QS_PER_VF 256 +#define ICE_MIN_QS_PER_VF 1 +#define ICE_DFLT_QS_PER_VF 4 +#define ICE_NONQ_VECS_VF 1 +#define ICE_MAX_SCATTER_QS_PER_VF 16 +#define ICE_MAX_BASE_QS_PER_VF 16 +#define ICE_MAX_INTR_PER_VF 65 +#define ICE_MAX_POLICY_INTR_PER_VF 33 +#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) +#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) + /* Specific VF states */ enum ice_vf_states { - ICE_VF_STATE_INIT = 0, - ICE_VF_STATE_ACTIVE, - ICE_VF_STATE_ENA, + ICE_VF_STATE_INIT = 0, /* PF is initializing VF */ + ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */ + ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */ ICE_VF_STATE_DIS, ICE_VF_STATE_MC_PROMISC, ICE_VF_STATE_UC_PROMISC, - /* state to indicate if PF needs to do vector assignment for VF. - * This needs to be set during first time VF initialization or later - * when VF asks for more Vectors through virtchnl OP. - */ - ICE_VF_STATE_CFG_INTR, ICE_VF_STATES_NBITS }; @@ -50,11 +62,14 @@ struct ice_vf { s16 vf_id; /* VF ID in the PF space */ u16 lan_vsi_idx; /* index into PF struct */ - int first_vector_idx; /* first vector index of this VF */ + /* first vector index of this VF in the PF space */ + int first_vector_idx; struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ struct virtchnl_version_info vf_ver; u32 driver_caps; /* reported by VF driver */ struct virtchnl_ether_addr dflt_lan_addr; + DECLARE_BITMAP(txq_ena, ICE_MAX_BASE_QS_PER_VF); + DECLARE_BITMAP(rxq_ena, ICE_MAX_BASE_QS_PER_VF); u16 port_vlan_id; u8 pf_set_mac:1; /* VF MAC address set by VMM admin */ u8 trusted:1; @@ -77,6 +92,7 @@ struct ice_vf { u16 num_mac; u16 num_vlan; u16 num_vf_qs; /* num of queue configured per VF */ + u16 num_qs_ena; /* total num of Tx/Rx queue enabled */ }; #ifdef CONFIG_PCI_IOV @@ -103,12 +119,15 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); + +void ice_set_vf_state_qs_dis(struct ice_vf *vf); #else /* CONFIG_PCI_IOV */ #define ice_process_vflr_event(pf) do {} while (0) #define ice_free_vfs(pf) do {} while (0) #define ice_vc_process_vf_msg(pf, event) do {} while (0) #define ice_vc_notify_link_state(pf) do {} while (0) #define ice_vc_notify_reset(pf) do {} while (0) +#define ice_set_vf_state_qs_dis(vf) do {} while (0) static inline bool ice_reset_all_vfs(struct ice_pf __always_unused *pf, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index b4df3e319467..105b0624081a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -4731,8 +4731,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) { u16 i = rx_ring->next_to_clean; - if (rx_ring->skb) - dev_kfree_skb(rx_ring->skb); + dev_kfree_skb(rx_ring->skb); rx_ring->skb = NULL; /* Free all the Rx ring sk_buffs */ @@ -5918,7 +5917,7 @@ static int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb = first->skb; struct igb_tx_buffer *tx_buffer; union e1000_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; @@ -6074,7 +6073,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, * otherwise try next time */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); if (igb_maybe_stop_tx(tx_ring, count + 3)) { /* this is a hard error */ @@ -8879,8 +8879,7 @@ static int __maybe_unused igb_resume(struct device *dev) static int __maybe_unused igb_runtime_idle(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct igb_adapter *adapter = netdev_priv(netdev); if (!igb_has_link(adapter)) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 34cd30d7162f..0f2b68f4bb0f 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2174,7 +2174,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, goto dma_error; for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { - const struct skb_frag_struct *frag; + const skb_frag_t *frag; count++; i++; diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 0f5534ce27b0..7e16345d836e 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -135,6 +135,9 @@ extern char igc_driver_version[]; /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +/* VLAN info */ +#define IGC_TX_FLAGS_VLAN_MASK 0xffff0000 + /* igc_test_staterr - tests bits within Rx descriptor status and error fields */ static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc, const u32 stat_err_bits) @@ -254,6 +257,7 @@ struct igc_ring { u16 count; /* number of desc. in the ring */ u8 queue_index; /* logical index of the ring*/ u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ /* everything past this point are written often */ u16 next_to_clean; diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index 59258d791106..db289bcce21d 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -40,7 +40,7 @@ static s32 igc_reset_hw_base(struct igc_hw *hw) ctrl = rd32(IGC_CTRL); hw_dbg("Issuing a global reset to MAC\n"); - wr32(IGC_CTRL, ctrl | IGC_CTRL_RST); + wr32(IGC_CTRL, ctrl | IGC_CTRL_DEV_RST); ret_val = igc_get_auto_rd_done(hw); if (ret_val) { @@ -209,6 +209,9 @@ static s32 igc_get_invariants_base(struct igc_hw *hw) switch (hw->device_id) { case IGC_DEV_ID_I225_LM: case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I220_V: + case IGC_DEV_ID_I225_K: mac->type = igc_i225; break; default: diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index 58d1109d7f3f..ea627ce52525 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -22,6 +22,14 @@ union igc_adv_tx_desc { } wb; }; +/* Context descriptors */ +struct igc_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 launch_time; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + /* Adv Transmit Descriptor Config Masks */ #define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ #define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index fc0ccfe38a20..f3f2325fe567 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -10,10 +10,6 @@ #define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ -/* PCI Bus Info */ -#define PCIE_DEVICE_CONTROL2 0x28 -#define PCIE_DEVICE_CONTROL2_16ms 0x0005 - /* Physical Func Reset Done Indication */ #define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000 @@ -54,7 +50,7 @@ #define IGC_ERR_SWFW_SYNC 13 /* Device Control */ -#define IGC_CTRL_RST 0x04000000 /* Global reset */ +#define IGC_CTRL_DEV_RST 0x20000000 /* Device reset */ #define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */ #define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ @@ -401,4 +397,9 @@ #define IGC_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) #define IGC_VLAPQF_QUEUE_MASK 0x03 +#define IGC_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IGC_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type:1=IPv4 */ +#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet Type of TCP */ +#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ + #endif /* _IGC_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 1039a224ac80..abb2d72911ff 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -18,6 +18,9 @@ #define IGC_DEV_ID_I225_LM 0x15F2 #define IGC_DEV_ID_I225_V 0x15F3 +#define IGC_DEV_ID_I225_I 0x15F8 +#define IGC_DEV_ID_I220_V 0x15F7 +#define IGC_DEV_ID_I225_K 0x3100 #define IGC_FUNC_0 0 @@ -151,16 +154,10 @@ struct igc_phy_info { u16 autoneg_advertised; u16 autoneg_mask; - u16 cable_length; - u16 max_cable_length; - u16 min_cable_length; - u16 pair_length[4]; u8 mdix; - bool disable_polarity_correction; bool is_mdix; - bool polarity_correction; bool reset_disable; bool speed_downgraded; bool autoneg_wait_to_complete; @@ -190,12 +187,7 @@ struct igc_fc_info { }; struct igc_dev_spec_base { - bool global_device_reset; - bool eee_disable; bool clear_semaphore_once; - bool module_plugged; - u8 media_port; - bool mas_capable; }; struct igc_hw { diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index ba4646737288..5eeb4c8caf4a 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -7,9 +7,6 @@ #include "igc_mac.h" #include "igc_hw.h" -/* forward declaration */ -static s32 igc_set_fc_watermarks(struct igc_hw *hw); - /** * igc_disable_pcie_master - Disables PCI-express master access * @hw: pointer to the HW structure @@ -75,6 +72,41 @@ void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count) } /** + * igc_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + */ +static s32 igc_set_fc_watermarks(struct igc_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & igc_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= IGC_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(IGC_FCRTL, fcrtl); + wr32(IGC_FCRTH, fcrth); + + return 0; +} + +/** * igc_setup_link - Setup flow control and link settings * @hw: pointer to the HW structure * @@ -195,41 +227,6 @@ out: } /** - * igc_set_fc_watermarks - Set flow control high/low watermarks - * @hw: pointer to the HW structure - * - * Sets the flow control high/low threshold (watermark) registers. If - * flow control XON frame transmission is enabled, then set XON frame - * transmission as well. - */ -static s32 igc_set_fc_watermarks(struct igc_hw *hw) -{ - u32 fcrtl = 0, fcrth = 0; - - /* Set the flow control receive threshold registers. Normally, - * these registers will be set to a default threshold that may be - * adjusted later by the driver's runtime code. However, if the - * ability to transmit pause frames is not enabled, then these - * registers will be set to 0. - */ - if (hw->fc.current_mode & igc_fc_tx_pause) { - /* We need to set up the Receive Threshold high and low water - * marks as well as (optionally) enabling the transmission of - * XON frames. - */ - fcrtl = hw->fc.low_water; - if (hw->fc.send_xon) - fcrtl |= IGC_FCRTL_XONE; - - fcrth = hw->fc.high_water; - } - wr32(IGC_FCRTL, fcrtl); - wr32(IGC_FCRTH, fcrth); - - return 0; -} - -/** * igc_clear_hw_cntrs_base - Clear base hardware counters * @hw: pointer to the HW structure * diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index aa9323e55406..63b62d74f961 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -5,6 +5,11 @@ #include <linux/types.h> #include <linux/if_vlan.h> #include <linux/aer.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/ip.h> + +#include <net/ipv6.h> #include "igc.h" #include "igc_hw.h" @@ -36,6 +41,9 @@ static const struct igc_info *igc_info_tbl[] = { static const struct pci_device_id igc_pci_tbl[] = { { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, /* required last entry */ {0, } }; @@ -349,8 +357,7 @@ static void igc_clean_rx_ring(struct igc_ring *rx_ring) { u16 i = rx_ring->next_to_clean; - if (rx_ring->skb) - dev_kfree_skb(rx_ring->skb); + dev_kfree_skb(rx_ring->skb); rx_ring->skb = NULL; /* Free all the Rx ring sk_buffs */ @@ -788,8 +795,96 @@ static int igc_set_mac(struct net_device *netdev, void *p) return 0; } +static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct igc_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + struct timespec64 ts; + + context_desc = IGC_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; + + /* For 82575, context index must be unique per ring. */ + if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + /* We assume there is always a valid Tx time available. Invalid times + * should have been handled by the upper layers. + */ + if (tx_ring->launchtime_enable) { + ts = ns_to_timespec64(first->skb->tstamp); + first->skb->tstamp = 0; + context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32); + } else { + context_desc->launch_time = 0; + } +} + +static inline bool igc_ipv6_csum_is_sctp(struct sk_buff *skb) +{ + unsigned int offset = 0; + + ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); + + return offset == skb_checksum_start_offset(skb); +} + static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) { + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && + !tx_ring->launchtime_enable) + return; + goto no_csum; + } + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + /* fall through */ + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if ((first->protocol == htons(ETH_P_IP) && + (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || + (first->protocol == htons(ETH_P_IPV6) && + igc_ipv6_csum_is_sctp(skb))) { + type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; + break; + } + /* fall through */ + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= IGC_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); } static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) @@ -861,7 +956,7 @@ static int igc_tx_map(struct igc_ring *tx_ring, struct igc_tx_buffer *tx_buffer; union igc_adv_tx_desc *tx_desc; u32 tx_flags = first->tx_flags; - struct skb_frag_struct *frag; + skb_frag_t *frag; u16 i = tx_ring->next_to_use; unsigned int data_len, size; dma_addr_t dma; @@ -1015,7 +1110,8 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, * otherwise try next time */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); if (igc_maybe_stop_tx(tx_ring, count + 3)) { /* this is a hard error */ @@ -4113,6 +4209,9 @@ static int igc_probe(struct pci_dev *pdev, if (err) goto err_sw_init; + /* Add supported features to the features list*/ + netdev->features |= NETIF_F_HW_CSUM; + /* setup the private structure */ err = igc_sw_init(adapter); if (err) @@ -4120,6 +4219,7 @@ static int igc_probe(struct pci_dev *pdev, /* copy netdev features into list of user selectable features */ netdev->hw_features |= NETIF_F_NTUPLE; + netdev->hw_features |= netdev->features; /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; @@ -4130,6 +4230,15 @@ static int igc_probe(struct pci_dev *pdev, */ hw->mac.ops.reset_hw(hw); + if (igc_get_flash_presence_i225(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, + "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { /* copy the MAC address out of the NVM */ if (hw->mac.ops.read_mac_addr(hw)) diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 4c8f96a9a148..f4b05af0dd2f 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -3,10 +3,6 @@ #include "igc_phy.h" -/* forward declaration */ -static s32 igc_phy_setup_autoneg(struct igc_hw *hw); -static s32 igc_wait_autoneg(struct igc_hw *hw); - /** * igc_check_reset_block - Check if PHY reset is blocked * @hw: pointer to the HW structure @@ -208,100 +204,6 @@ out: } /** - * igc_copper_link_autoneg - Setup/Enable autoneg for copper link - * @hw: pointer to the HW structure - * - * Performs initial bounds checking on autoneg advertisement parameter, then - * configure to advertise the full capability. Setup the PHY to autoneg - * and restart the negotiation process between the link partner. If - * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. - */ -static s32 igc_copper_link_autoneg(struct igc_hw *hw) -{ - struct igc_phy_info *phy = &hw->phy; - u16 phy_ctrl; - s32 ret_val; - - /* Perform some bounds checking on the autoneg advertisement - * parameter. - */ - phy->autoneg_advertised &= phy->autoneg_mask; - - /* If autoneg_advertised is zero, we assume it was not defaulted - * by the calling code so we set to advertise full capability. - */ - if (phy->autoneg_advertised == 0) - phy->autoneg_advertised = phy->autoneg_mask; - - hw_dbg("Reconfiguring auto-neg advertisement params\n"); - ret_val = igc_phy_setup_autoneg(hw); - if (ret_val) { - hw_dbg("Error Setting up Auto-Negotiation\n"); - goto out; - } - hw_dbg("Restarting Auto-Neg\n"); - - /* Restart auto-negotiation by setting the Auto Neg Enable bit and - * the Auto Neg Restart bit in the PHY control register. - */ - ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); - if (ret_val) - goto out; - - phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); - ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); - if (ret_val) - goto out; - - /* Does the user want to wait for Auto-Neg to complete here, or - * check at a later time (for example, callback routine). - */ - if (phy->autoneg_wait_to_complete) { - ret_val = igc_wait_autoneg(hw); - if (ret_val) { - hw_dbg("Error while waiting for autoneg to complete\n"); - goto out; - } - } - - hw->mac.get_link_status = true; - -out: - return ret_val; -} - -/** - * igc_wait_autoneg - Wait for auto-neg completion - * @hw: pointer to the HW structure - * - * Waits for auto-negotiation to complete or for the auto-negotiation time - * limit to expire, which ever happens first. - */ -static s32 igc_wait_autoneg(struct igc_hw *hw) -{ - u16 i, phy_status; - s32 ret_val = 0; - - /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ - for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { - ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); - if (ret_val) - break; - ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); - if (ret_val) - break; - if (phy_status & MII_SR_AUTONEG_COMPLETE) - break; - msleep(100); - } - - /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation - * has completed. - */ - return ret_val; -} - -/** * igc_phy_setup_autoneg - Configure PHY for auto-negotiation * @hw: pointer to the HW structure * @@ -486,6 +388,100 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw) } /** + * igc_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + */ +static s32 igc_wait_autoneg(struct igc_hw *hw) +{ + u16 i, phy_status; + s32 ret_val = 0; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igc_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + */ +static s32 igc_copper_link_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 phy_ctrl; + s32 ret_val; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igc_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igc_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** * igc_setup_copper_link - Configure copper link settings * @hw: pointer to the HW structure * diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index e5ac2d3fd816..0940a0da16f2 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1331,9 +1331,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, } for (f = 0; f < nr_frags; f++) { - const struct skb_frag_struct *frag; - - frag = &skb_shinfo(skb)->frags[f]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; len = skb_frag_size(frag); offset = 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c index 50dfb02fa34c..171cdc552961 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -190,22 +190,12 @@ static const struct file_operations ixgbe_dbg_netdev_ops_fops = { void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) { const char *name = pci_name(adapter->pdev); - struct dentry *pfile; + adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root); - if (adapter->ixgbe_dbg_adapter) { - pfile = debugfs_create_file("reg_ops", 0600, - adapter->ixgbe_dbg_adapter, adapter, - &ixgbe_dbg_reg_ops_fops); - if (!pfile) - e_dev_err("debugfs reg_ops for %s failed\n", name); - pfile = debugfs_create_file("netdev_ops", 0600, - adapter->ixgbe_dbg_adapter, adapter, - &ixgbe_dbg_netdev_ops_fops); - if (!pfile) - e_dev_err("debugfs netdev_ops for %s failed\n", name); - } else { - e_dev_err("debugfs entry for %s failed\n", name); - } + debugfs_create_file("reg_ops", 0600, adapter->ixgbe_dbg_adapter, + adapter, &ixgbe_dbg_reg_ops_fops); + debugfs_create_file("netdev_ops", 0600, adapter->ixgbe_dbg_adapter, + adapter, &ixgbe_dbg_netdev_ops_fops); } /** @@ -224,8 +214,6 @@ void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) void ixgbe_dbg_init(void) { ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL); - if (ixgbe_dbg_root == NULL) - pr_err("init of debugfs failed\n"); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 31629fc7e820..113f6087c7c9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -960,11 +960,9 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) return 0; err_aead: - memset(xs->aead, 0, sizeof(*xs->aead)); - kfree(xs->aead); + kzfree(xs->aead); err_xs: - memset(xs, 0, sizeof(*xs)); - kfree(xs); + kzfree(xs); err_out: msgbuf[1] = err; return err; @@ -1049,8 +1047,7 @@ int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) ixgbe_ipsec_del_sa(xs); /* remove the xs that was made-up in the add request */ - memset(xs, 0, sizeof(*xs)); - kfree(xs); + kzfree(xs); return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 51c696b6bf64..1ce2397306b9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1786,7 +1786,7 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned char *va; unsigned int pull_len; @@ -1808,7 +1808,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, /* update all of the pointers */ skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; + skb_frag_off_add(frag, pull_len); skb->data_len -= pull_len; skb->tail += pull_len; } @@ -1826,13 +1826,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { - /* if the page was released unmap it, else just sync our portion */ - if (unlikely(IXGBE_CB(skb)->page_released)) { - dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IXGBE_RX_DMA_ATTR); - } else if (ring_uses_build_skb(rx_ring)) { + if (ring_uses_build_skb(rx_ring)) { unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; dma_sync_single_range_for_cpu(rx_ring->dev, @@ -1841,14 +1835,22 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, skb_headlen(skb), DMA_FROM_DEVICE); } else { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, - frag->page_offset, + skb_frag_off(frag), skb_frag_size(frag), DMA_FROM_DEVICE); } + + /* If the page was released, just unmap it. */ + if (unlikely(IXGBE_CB(skb)->page_released)) { + dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); + } } /** @@ -8186,7 +8188,7 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, struct sk_buff *skb = first->skb; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; @@ -8605,7 +8607,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, * otherwise try next time */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { tx_ring->tx_stats.tx_busy++; @@ -8748,7 +8751,7 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, if (skb_put_padto(skb, 17)) return NETDEV_TX_OK; - tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; + tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)]; if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state))) return NETDEV_TX_BUSY; @@ -9490,6 +9493,10 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, jump->mat = nexthdr[i].jump; adapter->jump_tables[link_uhtid] = jump; break; + } else { + kfree(mask); + kfree(input); + kfree(jump); } } return 0; @@ -10262,7 +10269,8 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) if (need_reset && prog) for (i = 0; i < adapter->num_rx_queues; i++) if (adapter->xdp_ring[i]->xsk_umem) - (void)ixgbe_xsk_async_xmit(adapter->netdev, i); + (void)ixgbe_xsk_wakeup(adapter->netdev, i, + XDP_WAKEUP_RX); return 0; } @@ -10381,7 +10389,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_features_check = ixgbe_features_check, .ndo_bpf = ixgbe_xdp, .ndo_xdp_xmit = ixgbe_xdp_xmit, - .ndo_xsk_async_xmit = ixgbe_xsk_async_xmit, + .ndo_xsk_wakeup = ixgbe_xsk_wakeup, }; static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h index d93a690aff74..6d01700b46bc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -42,7 +42,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring); bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring, int napi_budget); -int ixgbe_xsk_async_xmit(struct net_device *dev, u32 queue_id); +int ixgbe_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring); #endif /* #define _IXGBE_TXRX_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index a3b6d8c89127..100ac89b345d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -100,7 +100,7 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, ixgbe_txrx_ring_enable(adapter, qid); /* Kick start the NAPI context so that receiving will start */ - err = ixgbe_xsk_async_xmit(adapter->netdev, qid); + err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); if (err) return err; } @@ -143,15 +143,20 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, struct xdp_buff *xdp) { + struct xdp_umem *umem = rx_ring->xsk_umem; int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; struct xdp_frame *xdpf; + u64 offset; u32 act; rcu_read_lock(); xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); - xdp->handle += xdp->data - xdp->data_hard_start; + offset = xdp->data - xdp->data_hard_start; + + xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset); + switch (act) { case XDP_PASS: break; @@ -201,8 +206,6 @@ ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring, static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *obi) { - unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; - u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; u16 nta = rx_ring->next_to_alloc; struct ixgbe_rx_buffer *nbi; @@ -212,14 +215,9 @@ static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring, rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* transfer page from old buffer to new buffer */ - nbi->dma = obi->dma & mask; - nbi->dma += hr; - - nbi->addr = (void *)((unsigned long)obi->addr & mask); - nbi->addr += hr; - - nbi->handle = obi->handle & mask; - nbi->handle += rx_ring->xsk_umem->headroom; + nbi->dma = obi->dma; + nbi->addr = obi->addr; + nbi->handle = obi->handle; obi->addr = NULL; obi->skb = NULL; @@ -250,7 +248,8 @@ void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); bi->addr += hr; - bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; + bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle, + rx_ring->xsk_umem->headroom); } static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring, @@ -276,7 +275,7 @@ static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring, bi->addr = xdp_umem_get_data(umem, handle); bi->addr += hr; - bi->handle = handle + umem->headroom; + bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); xsk_umem_discard_addr(umem); return true; @@ -303,7 +302,7 @@ static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring, bi->addr = xdp_umem_get_data(umem, handle); bi->addr += hr; - bi->handle = handle + umem->headroom; + bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); xsk_umem_discard_addr_rq(umem); return true; @@ -547,6 +546,14 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; + if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) { + if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) + xsk_set_rx_need_wakeup(rx_ring->xsk_umem); + else + xsk_clear_rx_need_wakeup(rx_ring->xsk_umem); + + return (int)total_rx_packets; + } return failure ? budget : (int)total_rx_packets; } @@ -615,6 +622,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) if (tx_desc) { ixgbe_xdp_ring_update_tail(xdp_ring); xsk_umem_consume_tx_done(xdp_ring->xsk_umem); + if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) + xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); } return !!budget && work_done; @@ -682,10 +691,17 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, if (xsk_frames) xsk_umem_complete_tx(umem, xsk_frames); + if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { + if (tx_ring->next_to_clean == tx_ring->next_to_use) + xsk_set_tx_need_wakeup(tx_ring->xsk_umem); + else + xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); + } + return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); } -int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid) +int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_ring *ring; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 72872d6ca80c..076f2da36f27 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2262,12 +2262,14 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int api[] = { ixgbe_mbox_api_14, - ixgbe_mbox_api_13, - ixgbe_mbox_api_12, - ixgbe_mbox_api_11, - ixgbe_mbox_api_10, - ixgbe_mbox_api_unknown }; + static const int api[] = { + ixgbe_mbox_api_14, + ixgbe_mbox_api_13, + ixgbe_mbox_api_12, + ixgbe_mbox_api_11, + ixgbe_mbox_api_10, + ixgbe_mbox_api_unknown + }; int err, idx = 0; spin_lock_bh(&adapter->mbx_lock); @@ -2518,6 +2520,7 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) msleep(1); ixgbevf_down(adapter); + pci_set_master(adapter->pdev); ixgbevf_up(adapter); clear_bit(__IXGBEVF_RESETTING, &adapter->state); @@ -3950,7 +3953,7 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, struct sk_buff *skb = first->skb; struct ixgbevf_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; @@ -4135,8 +4138,11 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, * otherwise try next time */ #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + count += TXD_USE_COUNT(skb_frag_size(frag)); + } #else count += skb_shinfo(skb)->nr_frags; #endif |