diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt.c')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 735 |
1 files changed, 530 insertions, 205 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 7b7e8b7883c8..fa147865e33f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -69,6 +69,7 @@ #include "bnxt_debugfs.h" #define BNXT_TX_TIMEOUT (5 * HZ) +#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW) MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); @@ -254,6 +255,7 @@ static const u16 bnxt_async_events_arr[] = { ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, + ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, }; static struct workqueue_struct *bnxt_pf_wq; @@ -1172,7 +1174,10 @@ static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) { if (!rxr->bnapi->in_reset) { rxr->bnapi->in_reset = true; - set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); + if (bp->flags & BNXT_FLAG_CHIP_P5) + set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); + else + set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); bnxt_queue_sp_work(bp); } rxr->rx_next_cons = 0xffff; @@ -1738,8 +1743,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (unlikely(cons != rxr->rx_next_cons)) { int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); - netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", - cons, rxr->rx_next_cons); + /* 0xffff is forced error, don't print it */ + if (rxr->rx_next_cons != 0xffff) + netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", + cons, rxr->rx_next_cons); bnxt_sched_reset(bp, rxr); return rc1; } @@ -1772,9 +1779,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rc = -EIO; if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; - if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { - netdev_warn(bp->dev, "RX buffer error %x\n", - rx_err); + if (!(bp->flags & BNXT_FLAG_CHIP_P5) && + !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { + netdev_warn_once(bp->dev, "RX buffer error %x\n", + rx_err); bnxt_sched_reset(bp, rxr); } } @@ -1941,19 +1949,43 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) return val; } +static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) +{ + int i; + + for (i = 0; i < bp->rx_nr_rings; i++) { + u16 grp_idx = bp->rx_ring[i].bnapi->index; + struct bnxt_ring_grp_info *grp_info; + + grp_info = &bp->grp_info[grp_idx]; + if (grp_info->agg_fw_ring_id == ring_id) + return grp_idx; + } + return INVALID_HW_RING_ID; +} + #define BNXT_GET_EVENT_PORT(data) \ ((data) & \ ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) +#define BNXT_EVENT_RING_TYPE(data2) \ + ((data2) & \ + ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK) + +#define BNXT_EVENT_RING_TYPE_RX(data2) \ + (BNXT_EVENT_RING_TYPE(data2) == \ + ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX) + static int bnxt_async_event_process(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl) { u16 event_id = le16_to_cpu(cmpl->event_id); + u32 data1 = le32_to_cpu(cmpl->event_data1); + u32 data2 = le32_to_cpu(cmpl->event_data2); /* TODO CHIMP_FW: Define event id's for link change, error etc */ switch (event_id) { case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { - u32 data1 = le32_to_cpu(cmpl->event_data1); struct bnxt_link_info *link_info = &bp->link_info; if (BNXT_VF(bp)) @@ -1983,7 +2015,6 @@ static int bnxt_async_event_process(struct bnxt *bp, set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); break; case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { - u32 data1 = le32_to_cpu(cmpl->event_data1); u16 port_id = BNXT_GET_EVENT_PORT(data1); if (BNXT_VF(bp)) @@ -2000,9 +2031,10 @@ static int bnxt_async_event_process(struct bnxt *bp, goto async_event_process_exit; set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); break; - case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { - u32 data1 = le32_to_cpu(cmpl->event_data1); - + case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: + if (netif_msg_hw(bp)) + netdev_warn(bp->dev, "Received RESET_NOTIFY event, data1: 0x%x, data2: 0x%x\n", + data1, data2); if (!bp->fw_health) goto async_event_process_exit; @@ -2022,10 +2054,8 @@ static int bnxt_async_event_process(struct bnxt *bp, } set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); break; - } case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { struct bnxt_fw_health *fw_health = bp->fw_health; - u32 data1 = le32_to_cpu(cmpl->event_data1); if (!fw_health) goto async_event_process_exit; @@ -2052,6 +2082,28 @@ static int bnxt_async_event_process(struct bnxt *bp, bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); goto async_event_process_exit; } + case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { + struct bnxt_rx_ring_info *rxr; + u16 grp_idx; + + if (bp->flags & BNXT_FLAG_CHIP_P5) + goto async_event_process_exit; + + netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", + BNXT_EVENT_RING_TYPE(data2), data1); + if (!BNXT_EVENT_RING_TYPE_RX(data2)) + goto async_event_process_exit; + + grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1); + if (grp_idx == INVALID_HW_RING_ID) { + netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", + data1); + goto async_event_process_exit; + } + rxr = bp->bnapi[grp_idx]->rx_ring; + bnxt_sched_reset(bp, rxr); + goto async_event_process_exit; + } default: goto async_event_process_exit; } @@ -2250,7 +2302,7 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) bnapi->tx_pkts = 0; } - if (bnapi->events & BNXT_RX_EVENT) { + if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; if (bnapi->events & BNXT_AGG_EVENT) @@ -2540,93 +2592,91 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) } } -static void bnxt_free_rx_skbs(struct bnxt *bp) +static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) { - int i, max_idx, max_agg_idx; + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; struct pci_dev *pdev = bp->pdev; - - if (!bp->rx_ring) - return; + struct bnxt_tpa_idx_map *map; + int i, max_idx, max_agg_idx; max_idx = bp->rx_nr_pages * RX_DESC_CNT; max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; - for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct bnxt_tpa_idx_map *map; - int j; - - if (rxr->rx_tpa) { - for (j = 0; j < bp->max_tpa; j++) { - struct bnxt_tpa_info *tpa_info = - &rxr->rx_tpa[j]; - u8 *data = tpa_info->data; + if (!rxr->rx_tpa) + goto skip_rx_tpa_free; - if (!data) - continue; + for (i = 0; i < bp->max_tpa; i++) { + struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; + u8 *data = tpa_info->data; - dma_unmap_single_attrs(&pdev->dev, - tpa_info->mapping, - bp->rx_buf_use_size, - bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); + if (!data) + continue; - tpa_info->data = NULL; + dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, + bp->rx_buf_use_size, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); - kfree(data); - } - } + tpa_info->data = NULL; - for (j = 0; j < max_idx; j++) { - struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; - dma_addr_t mapping = rx_buf->mapping; - void *data = rx_buf->data; + kfree(data); + } - if (!data) - continue; +skip_rx_tpa_free: + for (i = 0; i < max_idx; i++) { + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; + dma_addr_t mapping = rx_buf->mapping; + void *data = rx_buf->data; - rx_buf->data = NULL; + if (!data) + continue; - if (BNXT_RX_PAGE_MODE(bp)) { - mapping -= bp->rx_dma_offset; - dma_unmap_page_attrs(&pdev->dev, mapping, - PAGE_SIZE, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - page_pool_recycle_direct(rxr->page_pool, data); - } else { - dma_unmap_single_attrs(&pdev->dev, mapping, - bp->rx_buf_use_size, - bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - kfree(data); - } + rx_buf->data = NULL; + if (BNXT_RX_PAGE_MODE(bp)) { + mapping -= bp->rx_dma_offset; + dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE, + bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + page_pool_recycle_direct(rxr->page_pool, data); + } else { + dma_unmap_single_attrs(&pdev->dev, mapping, + bp->rx_buf_use_size, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + kfree(data); } + } + for (i = 0; i < max_agg_idx; i++) { + struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; + struct page *page = rx_agg_buf->page; - for (j = 0; j < max_agg_idx; j++) { - struct bnxt_sw_rx_agg_bd *rx_agg_buf = - &rxr->rx_agg_ring[j]; - struct page *page = rx_agg_buf->page; - - if (!page) - continue; + if (!page) + continue; - dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, - BNXT_RX_PAGE_SIZE, - PCI_DMA_FROMDEVICE, - DMA_ATTR_WEAK_ORDERING); + dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, + BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, + DMA_ATTR_WEAK_ORDERING); - rx_agg_buf->page = NULL; - __clear_bit(j, rxr->rx_agg_bmap); + rx_agg_buf->page = NULL; + __clear_bit(i, rxr->rx_agg_bmap); - __free_page(page); - } - if (rxr->rx_page) { - __free_page(rxr->rx_page); - rxr->rx_page = NULL; - } - map = rxr->rx_tpa_idx_map; - if (map) - memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); + __free_page(page); + } + if (rxr->rx_page) { + __free_page(rxr->rx_page); + rxr->rx_page = NULL; } + map = rxr->rx_tpa_idx_map; + if (map) + memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); +} + +static void bnxt_free_rx_skbs(struct bnxt *bp) +{ + int i; + + if (!bp->rx_ring) + return; + + for (i = 0; i < bp->rx_nr_rings; i++) + bnxt_free_one_rx_ring_skbs(bp, i); } static void bnxt_free_skbs(struct bnxt *bp) @@ -3165,31 +3215,16 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) } } -static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) +static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; struct net_device *dev = bp->dev; - struct bnxt_rx_ring_info *rxr; - struct bnxt_ring_struct *ring; - u32 prod, type; + u32 prod; int i; - type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | - RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; - - if (NET_IP_ALIGN == 2) - type |= RX_BD_FLAGS_SOP; - - rxr = &bp->rx_ring[ring_nr]; - ring = &rxr->rx_ring_struct; - bnxt_init_rxbd_pages(ring, type); - - if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { - bpf_prog_add(bp->xdp_prog, 1); - rxr->xdp_prog = bp->xdp_prog; - } prod = rxr->rx_prod; for (i = 0; i < bp->rx_ring_size; i++) { - if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { + if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", ring_nr, i, bp->rx_ring_size); break; @@ -3197,22 +3232,13 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) prod = NEXT_RX(prod); } rxr->rx_prod = prod; - ring->fw_ring_id = INVALID_HW_RING_ID; - - ring = &rxr->rx_agg_ring_struct; - ring->fw_ring_id = INVALID_HW_RING_ID; if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) return 0; - type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | - RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; - - bnxt_init_rxbd_pages(ring, type); - prod = rxr->rx_agg_prod; for (i = 0; i < bp->rx_agg_ring_size; i++) { - if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { + if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", ring_nr, i, bp->rx_ring_size); break; @@ -3221,30 +3247,58 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) } rxr->rx_agg_prod = prod; - if (bp->flags & BNXT_FLAG_TPA) { - if (rxr->rx_tpa) { - u8 *data; - dma_addr_t mapping; + if (rxr->rx_tpa) { + dma_addr_t mapping; + u8 *data; - for (i = 0; i < bp->max_tpa; i++) { - data = __bnxt_alloc_rx_data(bp, &mapping, - GFP_KERNEL); - if (!data) - return -ENOMEM; + for (i = 0; i < bp->max_tpa; i++) { + data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL); + if (!data) + return -ENOMEM; - rxr->rx_tpa[i].data = data; - rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; - rxr->rx_tpa[i].mapping = mapping; - } - } else { - netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); - return -ENOMEM; + rxr->rx_tpa[i].data = data; + rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; + rxr->rx_tpa[i].mapping = mapping; } } - return 0; } +static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr; + struct bnxt_ring_struct *ring; + u32 type; + + type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | + RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; + + if (NET_IP_ALIGN == 2) + type |= RX_BD_FLAGS_SOP; + + rxr = &bp->rx_ring[ring_nr]; + ring = &rxr->rx_ring_struct; + bnxt_init_rxbd_pages(ring, type); + + if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { + bpf_prog_add(bp->xdp_prog, 1); + rxr->xdp_prog = bp->xdp_prog; + } + ring->fw_ring_id = INVALID_HW_RING_ID; + + ring = &rxr->rx_agg_ring_struct; + ring->fw_ring_id = INVALID_HW_RING_ID; + + if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { + type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | + RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; + + bnxt_init_rxbd_pages(ring, type); + } + + return bnxt_alloc_one_rx_ring(bp, ring_nr); +} + static void bnxt_init_cp_rings(struct bnxt *bp) { int i, j; @@ -4269,6 +4323,8 @@ static int bnxt_hwrm_to_stderr(u32 hwrm_err) switch (hwrm_err) { case HWRM_ERR_CODE_SUCCESS: return 0; + case HWRM_ERR_CODE_RESOURCE_LOCKED: + return -EROFS; case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: return -EACCES; case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: @@ -5343,13 +5399,16 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) * VLAN_STRIP_CAP properly. */ if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || - ((bp->flags & BNXT_FLAG_CHIP_P5) && + (BNXT_CHIP_P5_THOR(bp) && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); - if (bp->max_tpa_v2) - bp->hw_ring_stats_size = - sizeof(struct ctx_hw_stats_ext); + if (bp->max_tpa_v2) { + if (BNXT_CHIP_P5_THOR(bp)) + bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; + else + bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2; + } } mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -6639,6 +6698,8 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) } if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) bp->flags |= BNXT_FLAG_MULTI_HOST; + if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) + bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; switch (resp->port_partition_type) { case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: @@ -7333,6 +7394,77 @@ hwrm_cfa_adv_qcaps_exit: return rc; } +static int __bnxt_alloc_fw_health(struct bnxt *bp) +{ + if (bp->fw_health) + return 0; + + bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); + if (!bp->fw_health) + return -ENOMEM; + + return 0; +} + +static int bnxt_alloc_fw_health(struct bnxt *bp) +{ + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && + !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return 0; + + rc = __bnxt_alloc_fw_health(bp); + if (rc) { + bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + return rc; + } + + return 0; +} + +static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) +{ + writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + + BNXT_FW_HEALTH_WIN_MAP_OFF); +} + +static void bnxt_try_map_fw_health_reg(struct bnxt *bp) +{ + void __iomem *hs; + u32 status_loc; + u32 reg_type; + u32 sig; + + __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); + hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); + + sig = readl(hs + offsetof(struct hcomm_status, sig_ver)); + if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { + if (bp->fw_health) + bp->fw_health->status_reliable = false; + return; + } + + if (__bnxt_alloc_fw_health(bp)) { + netdev_warn(bp->dev, "no memory for firmware status checks\n"); + return; + } + + status_loc = readl(hs + offsetof(struct hcomm_status, fw_status_loc)); + bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; + reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); + if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { + __bnxt_map_fw_health_reg(bp, status_loc); + bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = + BNXT_FW_HEALTH_WIN_OFF(status_loc); + } + + bp->fw_health->status_reliable = true; +} + static int bnxt_map_fw_health_regs(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; @@ -7349,14 +7481,12 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp) reg_base = reg & BNXT_GRC_BASE_MASK; if ((reg & BNXT_GRC_BASE_MASK) != reg_base) return -ERANGE; - fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE + - (reg & BNXT_GRC_OFFSET_MASK); + fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); } if (reg_base == 0xffffffff) return 0; - writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + - BNXT_FW_HEALTH_WIN_MAP_OFF); + __bnxt_map_fw_health_reg(bp, reg_base); return 0; } @@ -7432,6 +7562,16 @@ static int bnxt_hwrm_func_reset(struct bnxt *bp) return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); } +static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) +{ + struct hwrm_nvm_get_dev_info_output nvm_info; + + if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info)) + snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", + nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, + nvm_info.nvm_cfg_ver_upd); +} + static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) { int rc = 0; @@ -8635,10 +8775,9 @@ static void bnxt_del_napi(struct bnxt *bp) for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; - napi_hash_del(&bnapi->napi); - netif_napi_del(&bnapi->napi); + __netif_napi_del(&bnapi->napi); } - /* We called napi_hash_del() before netif_napi_del(), we need + /* We called __netif_napi_del(), we need * to respect an RCU grace period before freeing napi structures. */ synchronize_net(); @@ -8694,14 +8833,19 @@ static void bnxt_enable_napi(struct bnxt *bp) int i; for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; - bp->bnapi[i]->in_reset = false; + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + + cpr = &bnapi->cp_ring; + if (bnapi->in_reset) + cpr->sw_stats.rx.rx_resets++; + bnapi->in_reset = false; - if (bp->bnapi[i]->rx_ring) { + if (bnapi->rx_ring) { INIT_WORK(&cpr->dim.work, bnxt_dim_work); cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; } - napi_enable(&bp->bnapi[i]->napi); + napi_enable(&bnapi->napi); } } @@ -8735,6 +8879,30 @@ void bnxt_tx_enable(struct bnxt *bp) netif_carrier_on(bp->dev); } +static char *bnxt_report_fec(struct bnxt_link_info *link_info) +{ + u8 active_fec = link_info->active_fec_sig_mode & + PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; + + switch (active_fec) { + default: + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: + return "None"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: + return "Clause 74 BaseR"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: + return "Clause 91 RS(528,514)"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: + return "Clause 91 RS544_1XN"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: + return "Clause 91 RS(544,514)"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: + return "Clause 91 RS272_1XN"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: + return "Clause 91 RS(272,257)"; + } +} + static void bnxt_report_link(struct bnxt *bp) { if (bp->link_info.link_up) { @@ -8744,6 +8912,11 @@ static void bnxt_report_link(struct bnxt *bp) u16 fec; netif_carrier_on(bp->dev); + speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); + if (speed == SPEED_UNKNOWN) { + netdev_info(bp->dev, "NIC Link is Up, speed unknown\n"); + return; + } if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) duplex = "full"; else @@ -8756,7 +8929,6 @@ static void bnxt_report_link(struct bnxt *bp) flow_ctrl = "ON - receive"; else flow_ctrl = "none"; - speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n", speed, duplex, flow_ctrl); if (bp->flags & BNXT_FLAG_EEE_CAP) @@ -8765,16 +8937,25 @@ static void bnxt_report_link(struct bnxt *bp) "not active"); fec = bp->link_info.fec_cfg; if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) - netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n", + netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n", (fec & BNXT_FEC_AUTONEG) ? "on" : "off", - (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" : - (fec & BNXT_FEC_ENC_RS) ? "RS" : "None"); + bnxt_report_fec(&bp->link_info)); } else { netif_carrier_off(bp->dev); netdev_err(bp->dev, "NIC Link is Down\n"); } } +static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) +{ + if (!resp->supported_speeds_auto_mode && + !resp->supported_speeds_force_mode && + !resp->supported_pam4_speeds_auto_mode && + !resp->supported_pam4_speeds_force_mode) + return true; + return false; +} + static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) { int rc = 0; @@ -8822,9 +9003,24 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET) bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET; + if (bp->hwrm_spec_code >= 0x10a01) { + if (bnxt_phy_qcaps_no_speed(resp)) { + link_info->phy_state = BNXT_PHY_STATE_DISABLED; + netdev_warn(bp->dev, "Ethernet link disabled\n"); + } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { + link_info->phy_state = BNXT_PHY_STATE_ENABLED; + netdev_info(bp->dev, "Ethernet link enabled\n"); + /* Phy re-enabled, reprobe the speeds */ + link_info->support_auto_speeds = 0; + link_info->support_pam4_auto_speeds = 0; + } + } if (resp->supported_speeds_auto_mode) link_info->support_auto_speeds = le16_to_cpu(resp->supported_speeds_auto_mode); + if (resp->supported_pam4_speeds_auto_mode) + link_info->support_pam4_auto_speeds = + le16_to_cpu(resp->supported_pam4_speeds_auto_mode); bp->port_count = resp->port_cnt; @@ -8833,14 +9029,21 @@ hwrm_phy_qcaps_exit: return rc; } -static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) +static bool bnxt_support_dropped(u16 advertising, u16 supported) +{ + u16 diff = advertising ^ supported; + + return ((supported | diff) != supported); +} + +int bnxt_update_link(struct bnxt *bp, bool chng_link_state) { int rc = 0; struct bnxt_link_info *link_info = &bp->link_info; struct hwrm_port_phy_qcfg_input req = {0}; struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; u8 link_up = link_info->link_up; - u16 diff; + bool support_changed = false; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); @@ -8867,10 +9070,17 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) else link_info->link_speed = 0; link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); + link_info->force_pam4_link_speed = + le16_to_cpu(resp->force_pam4_link_speed); link_info->support_speeds = le16_to_cpu(resp->support_speeds); + link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); + link_info->auto_pam4_link_speeds = + le16_to_cpu(resp->auto_pam4_link_speed_mask); link_info->lp_auto_link_speeds = le16_to_cpu(resp->link_partner_adv_speeds); + link_info->lp_auto_pam4_link_speeds = + resp->link_partner_pam4_adv_speeds; link_info->preemphasis = le32_to_cpu(resp->preemphasis); link_info->phy_ver[0] = resp->phy_maj; link_info->phy_ver[1] = resp->phy_min; @@ -8919,9 +9129,10 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) } link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; - if (bp->hwrm_spec_code >= 0x10504) + if (bp->hwrm_spec_code >= 0x10504) { link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); - + link_info->active_fec_sig_mode = resp->active_fec_signal_mode; + } /* TODO: need to add more logic to report VF link */ if (chng_link_state) { if (link_info->phy_link_status == BNXT_LINK_LINK) @@ -8939,17 +9150,21 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) if (!BNXT_PHY_CFG_ABLE(bp)) return 0; - diff = link_info->support_auto_speeds ^ link_info->advertising; - if ((link_info->support_auto_speeds | diff) != - link_info->support_auto_speeds) { - /* An advertised speed is no longer supported, so we need to - * update the advertisement settings. Caller holds RTNL - * so we can modify link settings. - */ + /* Check if any advertised speeds are no longer supported. The caller + * holds the link_lock mutex, so we can modify link_info settings. + */ + if (bnxt_support_dropped(link_info->advertising, + link_info->support_auto_speeds)) { link_info->advertising = link_info->support_auto_speeds; - if (link_info->autoneg & BNXT_AUTONEG_SPEED) - bnxt_hwrm_set_link_setting(bp, true, false); + support_changed = true; + } + if (bnxt_support_dropped(link_info->advertising_pam4, + link_info->support_pam4_auto_speeds)) { + link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; + support_changed = true; } + if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) + bnxt_hwrm_set_link_setting(bp, true, false); return 0; } @@ -9008,27 +9223,30 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) } } -static void bnxt_hwrm_set_link_common(struct bnxt *bp, - struct hwrm_port_phy_cfg_input *req) +static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) { - u8 autoneg = bp->link_info.autoneg; - u16 fw_link_speed = bp->link_info.req_link_speed; - u16 advertising = bp->link_info.advertising; - - if (autoneg & BNXT_AUTONEG_SPEED) { - req->auto_mode |= - PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; - - req->enables |= cpu_to_le32( - PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); - req->auto_link_speed_mask = cpu_to_le16(advertising); - + if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { + req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; + if (bp->link_info.advertising) { + req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); + req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); + } + if (bp->link_info.advertising_pam4) { + req->enables |= + cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); + req->auto_link_pam4_speed_mask = + cpu_to_le16(bp->link_info.advertising_pam4); + } req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); - req->flags |= - cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); + req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); } else { - req->force_link_speed = cpu_to_le16(fw_link_speed); req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); + if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { + req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); + req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); + } else { + req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); + } } /* tell chimp that the setting takes effect immediately */ @@ -9424,14 +9642,19 @@ static int bnxt_update_phy_setting(struct bnxt *bp) if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { if (BNXT_AUTO_MODE(link_info->auto_mode)) update_link = true; - if (link_info->req_link_speed != link_info->force_link_speed) + if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && + link_info->req_link_speed != link_info->force_link_speed) + update_link = true; + else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && + link_info->req_link_speed != link_info->force_pam4_link_speed) update_link = true; if (link_info->req_duplex != link_info->duplex_setting) update_link = true; } else { if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) update_link = true; - if (link_info->advertising != link_info->auto_link_speeds) + if (link_info->advertising != link_info->auto_link_speeds || + link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) update_link = true; } @@ -10362,6 +10585,23 @@ static void bnxt_dbg_dump_states(struct bnxt *bp) } } +static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; + struct hwrm_ring_reset_input req = {0}; + struct bnxt_napi *bnapi = rxr->bnapi; + struct bnxt_cp_ring_info *cpr; + u16 cp_ring_id; + + cpr = &bnapi->cp_ring; + cp_ring_id = cpr->cp_ring_struct.fw_ring_id; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1); + req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; + req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); + return hwrm_send_message_silent(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); +} + static void bnxt_reset_task(struct bnxt *bp, bool silent) { if (!silent) @@ -10497,6 +10737,55 @@ static void bnxt_reset(struct bnxt *bp, bool silent) bnxt_rtnl_unlock_sp(bp); } +/* Only called from bnxt_sp_task() */ +static void bnxt_rx_ring_reset(struct bnxt *bp) +{ + int i; + + bnxt_rtnl_lock_sp(bp); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + bnxt_rtnl_unlock_sp(bp); + return; + } + /* Disable and flush TPA before resetting the RX ring */ + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, false); + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_cp_ring_info *cpr; + int rc; + + if (!rxr->bnapi->in_reset) + continue; + + rc = bnxt_hwrm_rx_ring_reset(bp, i); + if (rc) { + if (rc == -EINVAL || rc == -EOPNOTSUPP) + netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n"); + else + netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", + rc); + bnxt_reset_task(bp, true); + break; + } + bnxt_free_one_rx_ring_skbs(bp, i); + rxr->rx_prod = 0; + rxr->rx_agg_prod = 0; + rxr->rx_sw_agg_prod = 0; + rxr->rx_next_cons = 0; + rxr->bnapi->in_reset = false; + bnxt_alloc_one_rx_ring(bp, i); + cpr = &rxr->bnapi->cp_ring; + cpr->sw_stats.rx.rx_resets++; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + } + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, true); + bnxt_rtnl_unlock_sp(bp); +} + static void bnxt_fw_reset_close(struct bnxt *bp) { bnxt_ulp_stop(bp); @@ -10691,8 +10980,15 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp) link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; } link_info->advertising = link_info->auto_link_speeds; + link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; } else { link_info->req_link_speed = link_info->force_link_speed; + link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; + if (link_info->force_pam4_link_speed) { + link_info->req_link_speed = + link_info->force_pam4_link_speed; + link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; + } link_info->req_duplex = link_info->duplex_setting; } if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) @@ -10778,6 +11074,9 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) bnxt_reset(bp, true); + if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) + bnxt_rx_ring_reset(bp); + if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT); @@ -10882,21 +11181,19 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; } -static void bnxt_alloc_fw_health(struct bnxt *bp) +static int bnxt_fw_reset_via_optee(struct bnxt *bp) { - if (bp->fw_health) - return; +#ifdef CONFIG_TEE_BNXT_FW + int rc = tee_bnxt_fw_load(); - if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && - !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) - return; + if (rc) + netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc); - bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); - if (!bp->fw_health) { - netdev_warn(bp->dev, "Failed to allocate fw_health\n"); - bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; - bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; - } + return rc; +#else + netdev_err(bp->dev, "OP-TEE not supported\n"); + return -ENODEV; +#endif } static int bnxt_fw_init_one_p1(struct bnxt *bp) @@ -10905,8 +11202,24 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp) bp->fw_cap = 0; rc = bnxt_hwrm_ver_get(bp); - if (rc) - return rc; + bnxt_try_map_fw_health_reg(bp); + if (rc) { + if (bp->fw_health && bp->fw_health->status_reliable) { + u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + + netdev_err(bp->dev, + "Firmware not responding, status: 0x%x\n", + sts); + if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) { + netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n"); + rc = bnxt_fw_reset_via_optee(bp); + if (!rc) + rc = bnxt_hwrm_ver_get(bp); + } + } + if (rc) + return rc; + } if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { rc = bnxt_alloc_kong_hwrm_resources(bp); @@ -10920,6 +11233,8 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp) if (rc) return rc; } + bnxt_nvm_cfg_ver_get(bp); + rc = bnxt_hwrm_func_reset(bp); if (rc) return -ENODEV; @@ -10945,11 +11260,14 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp) netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", rc); - bnxt_alloc_fw_health(bp); - rc = bnxt_hwrm_error_recovery_qcfg(bp); - if (rc) - netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", - rc); + if (bnxt_alloc_fw_health(bp)) { + netdev_warn(bp->dev, "no memory for firmware error recovery\n"); + } else { + rc = bnxt_hwrm_error_recovery_qcfg(bp); + if (rc) + netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", + rc); + } rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); if (rc) @@ -11075,12 +11393,8 @@ static void bnxt_reset_all(struct bnxt *bp) int i, rc; if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { -#ifdef CONFIG_TEE_BNXT_FW - rc = tee_bnxt_fw_load(); - if (rc) - netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc); + bnxt_fw_reset_via_optee(bp); bp->fw_reset_timestamp = jiffies; -#endif return; } @@ -11199,7 +11513,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) if (time_after(jiffies, bp->fw_reset_timestamp + (bp->fw_reset_max_dsecs * HZ / 10))) { netdev_err(bp->dev, "Firmware reset aborted\n"); - goto fw_reset_abort; + goto fw_reset_abort_status; } bnxt_queue_fw_reset_work(bp, HZ / 5); return; @@ -11233,6 +11547,13 @@ static void bnxt_fw_reset_task(struct work_struct *work) } return; +fw_reset_abort_status: + if (bp->fw_health->status_reliable || + (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { + u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + + netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); + } fw_reset_abort: clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) @@ -12203,6 +12524,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENOMEM; bp = netdev_priv(dev); + bp->msg_enable = BNXT_DEF_MSG_ENABLE; bnxt_set_max_func_irqs(bp, max_irqs); if (bnxt_vf_pciid(ent->driver_data)) @@ -12234,8 +12556,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; - if (BNXT_CHIP_P5(bp)) + if (BNXT_CHIP_P5(bp)) { bp->flags |= BNXT_FLAG_CHIP_P5; + if (BNXT_CHIP_SR2(bp)) + bp->flags |= BNXT_FLAG_CHIP_SR2; + } rc = bnxt_alloc_rss_indir_tbl(bp); if (rc) |