diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 804 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.h | 59 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 181 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c | 159 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h | 44 |
5 files changed, 885 insertions, 362 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 43952689bfb0..bb3be33c1bbd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -456,8 +456,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) dma_addr_t mapping; unsigned int length, pad = 0; u32 len, free_size, vlan_tag_flags, cfa_action, flags; - u16 prod, last_frag; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; struct pci_dev *pdev = bp->pdev; + u16 prod, last_frag, txts_prod; struct bnxt_tx_ring_info *txr; struct bnxt_sw_tx_bd *tx_buf; __le32 lflags = 0; @@ -509,23 +510,29 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; } - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { - struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp && + ptp->tx_tstamp_en) { + if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) { + lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); + tx_buf->is_ts_pkt = 1; + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + } else if (!skb_is_gso(skb)) { + u16 seq_id, hdr_off; - if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) && - atomic_dec_if_positive(&ptp->tx_avail) >= 0) { - if (!bnxt_ptp_parse(skb, &ptp->tx_seqid, - &ptp->tx_hdr_off)) { + if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) && + !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) { if (vlan_tag_flags) - ptp->tx_hdr_off += VLAN_HLEN; + hdr_off += VLAN_HLEN; lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); + tx_buf->is_ts_pkt = 1; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - } else { - atomic_inc(&bp->ptp_cfg->tx_avail); + + ptp->txts_req[txts_prod].tx_seqid = seq_id; + ptp->txts_req[txts_prod].tx_hdr_off = hdr_off; + tx_buf->txts_prod = txts_prod; } } } - if (unlikely(skb->no_fcs)) lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); @@ -753,8 +760,13 @@ tx_dma_error: tx_free: dev_kfree_skb_any(skb); tx_kick_pending: - if (BNXT_TX_PTP_IS_SET(lflags)) - atomic_inc(&bp->ptp_cfg->tx_avail); + if (BNXT_TX_PTP_IS_SET(lflags)) { + txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0; + atomic64_inc(&bp->ptp_cfg->stats.ts_err); + if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) + /* set SKB to err so PTP worker will clean up */ + ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO); + } if (txr->kick_pending) bnxt_txr_db_kick(bp, txr, txr->tx_prod); txr->tx_buf_ring[txr->tx_prod].skb = NULL; @@ -762,7 +774,8 @@ tx_kick_pending: return NETDEV_TX_OK; } -static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, +/* Returns true if some remaining TX packets not processed. */ +static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, int budget) { struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); @@ -771,24 +784,33 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, unsigned int tx_bytes = 0; u16 cons = txr->tx_cons; int tx_pkts = 0; + bool rc = false; while (RING_TX(bp, cons) != hw_cons) { struct bnxt_sw_tx_bd *tx_buf; struct sk_buff *skb; + bool is_ts_pkt; int j, last; tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; - cons = NEXT_TX(cons); skb = tx_buf->skb; - tx_buf->skb = NULL; if (unlikely(!skb)) { bnxt_sched_reset_txr(bp, txr, cons); - return; + return rc; + } + + is_ts_pkt = tx_buf->is_ts_pkt; + if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) { + rc = true; + break; } + cons = NEXT_TX(cons); tx_pkts++; tx_bytes += skb->len; + tx_buf->skb = NULL; + tx_buf->is_ts_pkt = 0; if (tx_buf->is_push) { tx_buf->is_push = 0; @@ -808,13 +830,11 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE); } - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + if (unlikely(is_ts_pkt)) { if (BNXT_CHIP_P5(bp)) { /* PTP worker takes ownership of the skb */ - if (!bnxt_get_tx_ts_p5(bp, skb)) - skb = NULL; - else - atomic_inc(&bp->ptp_cfg->tx_avail); + bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod); + skb = NULL; } } @@ -829,18 +849,22 @@ next_tx_int: __netif_txq_completed_wake(txq, tx_pkts, tx_bytes, bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); + + return rc; } static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) { struct bnxt_tx_ring_info *txr; + bool more = false; int i; bnxt_for_each_napi_tx(i, bnapi, txr) { if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons)) - __bnxt_tx_int(bp, txr, budget); + more |= __bnxt_tx_int(bp, txr, budget); } - bnapi->events &= ~BNXT_TX_CMP_EVENT; + if (!more) + bnapi->events &= ~BNXT_TX_CMP_EVENT; } static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, @@ -2906,6 +2930,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, cpr->has_more_work = 1; break; } + } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) { + bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp); } else if (cmp_type >= CMP_TYPE_RX_L2_CMP && cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) { if (likely(budget)) @@ -2937,8 +2963,10 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } } - if (event & BNXT_REDIRECT_EVENT) + if (event & BNXT_REDIRECT_EVENT) { xdp_do_flush(); + event &= ~BNXT_REDIRECT_EVENT; + } if (event & BNXT_TX_EVENT) { struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; @@ -2948,6 +2976,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, wmb(); bnxt_db_write_relaxed(bp, &txr->tx_db, prod); + event &= ~BNXT_TX_EVENT; } cpr->cp_raw_cons = raw_cons; @@ -2965,13 +2994,14 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + bnapi->events &= ~BNXT_RX_EVENT; } if (bnapi->events & BNXT_AGG_EVENT) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnapi->events &= ~BNXT_AGG_EVENT; } - bnapi->events &= BNXT_TX_CMP_EVENT; } static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, @@ -3308,37 +3338,12 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) } } -static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) +static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; struct pci_dev *pdev = bp->pdev; - struct bnxt_tpa_idx_map *map; - int i, max_idx, max_agg_idx; + int i, max_idx; max_idx = bp->rx_nr_pages * RX_DESC_CNT; - max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; - if (!rxr->rx_tpa) - goto skip_rx_tpa_free; - - for (i = 0; i < bp->max_tpa; i++) { - struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; - u8 *data = tpa_info->data; - - if (!data) - continue; - - dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, - bp->rx_buf_use_size, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - - tpa_info->data = NULL; - - skb_free_frag(data); - } - -skip_rx_tpa_free: - if (!rxr->rx_buf_ring) - goto skip_rx_buf_free; for (i = 0; i < max_idx; i++) { struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; @@ -3358,12 +3363,15 @@ skip_rx_tpa_free: skb_free_frag(data); } } +} -skip_rx_buf_free: - if (!rxr->rx_agg_ring) - goto skip_rx_agg_free; +static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + int i, max_idx; - for (i = 0; i < max_agg_idx; i++) { + max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; + + for (i = 0; i < max_idx; i++) { struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; struct page *page = rx_agg_buf->page; @@ -3375,6 +3383,45 @@ skip_rx_buf_free: page_pool_recycle_direct(rxr->page_pool, page); } +} + +static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; + struct pci_dev *pdev = bp->pdev; + struct bnxt_tpa_idx_map *map; + int i; + + if (!rxr->rx_tpa) + goto skip_rx_tpa_free; + + for (i = 0; i < bp->max_tpa; i++) { + struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; + u8 *data = tpa_info->data; + + if (!data) + continue; + + dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, + bp->rx_buf_use_size, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + + tpa_info->data = NULL; + + skb_free_frag(data); + } + +skip_rx_tpa_free: + if (!rxr->rx_buf_ring) + goto skip_rx_buf_free; + + bnxt_free_one_rx_ring(bp, rxr); + +skip_rx_buf_free: + if (!rxr->rx_agg_ring) + goto skip_rx_agg_free; + + bnxt_free_one_rx_agg_ring(bp, rxr); skip_rx_agg_free: map = rxr->rx_tpa_idx_map; @@ -3971,6 +4018,62 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp) return 0; } +static void bnxt_init_rx_ring_struct(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_ring_mem_info *rmem; + struct bnxt_ring_struct *ring; + + ring = &rxr->rx_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->rx_nr_pages; + rmem->page_size = HW_RXBD_RING_SIZE; + rmem->pg_arr = (void **)rxr->rx_desc_ring; + rmem->dma_arr = rxr->rx_desc_mapping; + rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; + rmem->vmem = (void **)&rxr->rx_buf_ring; + + ring = &rxr->rx_agg_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->rx_agg_nr_pages; + rmem->page_size = HW_RXBD_RING_SIZE; + rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; + rmem->dma_arr = rxr->rx_agg_desc_mapping; + rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; + rmem->vmem = (void **)&rxr->rx_agg_ring; +} + +static void bnxt_reset_rx_ring_struct(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_ring_mem_info *rmem; + struct bnxt_ring_struct *ring; + int i; + + rxr->page_pool->p.napi = NULL; + rxr->page_pool = NULL; + + ring = &rxr->rx_ring_struct; + rmem = &ring->ring_mem; + rmem->pg_tbl = NULL; + rmem->pg_tbl_map = 0; + for (i = 0; i < rmem->nr_pages; i++) { + rmem->pg_arr[i] = NULL; + rmem->dma_arr[i] = 0; + } + *rmem->vmem = NULL; + + ring = &rxr->rx_agg_ring_struct; + rmem = &ring->ring_mem; + rmem->pg_tbl = NULL; + rmem->pg_tbl_map = 0; + for (i = 0; i < rmem->nr_pages; i++) { + rmem->pg_arr[i] = NULL; + rmem->dma_arr[i] = 0; + } + *rmem->vmem = NULL; +} + static void bnxt_init_ring_struct(struct bnxt *bp) { int i, j; @@ -4053,37 +4156,55 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) } } -static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) +static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + int ring_nr) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; - struct net_device *dev = bp->dev; u32 prod; int i; prod = rxr->rx_prod; for (i = 0; i < bp->rx_ring_size; i++) { if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { - netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", + netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", ring_nr, i, bp->rx_ring_size); break; } prod = NEXT_RX(prod); } rxr->rx_prod = prod; +} - if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) - return 0; +static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + int ring_nr) +{ + u32 prod; + int i; prod = rxr->rx_agg_prod; for (i = 0; i < bp->rx_agg_ring_size; i++) { if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { - netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", + netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n", ring_nr, i, bp->rx_ring_size); break; } prod = NEXT_RX_AGG(prod); } rxr->rx_agg_prod = prod; +} + +static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; + int i; + + bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr); + + if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) + return 0; + + bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr); if (rxr->rx_tpa) { dma_addr_t mapping; @@ -4102,9 +4223,9 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) return 0; } -static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) +static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) { - struct bnxt_rx_ring_info *rxr; struct bnxt_ring_struct *ring; u32 type; @@ -4114,28 +4235,43 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) if (NET_IP_ALIGN == 2) type |= RX_BD_FLAGS_SOP; - rxr = &bp->rx_ring[ring_nr]; ring = &rxr->rx_ring_struct; bnxt_init_rxbd_pages(ring, type); - - netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, - &rxr->bnapi->napi); - - if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { - bpf_prog_add(bp->xdp_prog, 1); - rxr->xdp_prog = bp->xdp_prog; - } ring->fw_ring_id = INVALID_HW_RING_ID; +} + +static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_ring_struct *ring; + u32 type; ring = &rxr->rx_agg_ring_struct; ring->fw_ring_id = INVALID_HW_RING_ID; - if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; bnxt_init_rxbd_pages(ring, type); } +} + +static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr; + + rxr = &bp->rx_ring[ring_nr]; + bnxt_init_one_rx_ring_rxbd(bp, rxr); + + netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, + &rxr->bnapi->napi); + + if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { + bpf_prog_add(bp->xdp_prog, 1); + rxr->xdp_prog = bp->xdp_prog; + } + + bnxt_init_one_rx_agg_ring_rxbd(bp, rxr); return bnxt_alloc_one_rx_ring(bp, ring_nr); } @@ -5834,17 +5970,20 @@ bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, struct hwrm_cfa_ntuple_filter_alloc_input *req, struct bnxt_ntuple_filter *fltr) { - struct bnxt_rss_ctx *rss_ctx, *tmp; u16 rxq = fltr->base.rxq; if (fltr->base.flags & BNXT_ACT_RSS_CTX) { - list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { - if (rss_ctx->index == fltr->base.fw_vnic_id) { - struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + struct ethtool_rxfh_context *ctx; + struct bnxt_rss_ctx *rss_ctx; + struct bnxt_vnic_info *vnic; - req->dst_id = cpu_to_le16(vnic->fw_vnic_id); - break; - } + ctx = xa_load(&bp->dev->ethtool->rss_ctx, + fltr->base.fw_vnic_id); + if (ctx) { + rss_ctx = ethtool_rxfh_context_priv(ctx); + vnic = &rss_ctx->vnic; + + req->dst_id = cpu_to_le16(vnic->fw_vnic_id); } return; } @@ -6083,10 +6222,9 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); } -int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) +static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) { int entries; - u16 *tbl; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; @@ -6094,22 +6232,19 @@ int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) entries = HW_HASH_INDEX_SIZE; bp->rss_indir_tbl_entries = entries; - tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); - if (!tbl) + bp->rss_indir_tbl = + kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); + if (!bp->rss_indir_tbl) return -ENOMEM; - if (rss_ctx) - rss_ctx->rss_indir_tbl = tbl; - else - bp->rss_indir_tbl = tbl; - return 0; } -void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) +void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, + struct ethtool_rxfh_context *rss_ctx) { u16 max_rings, max_entries, pad, i; - u16 *rss_indir_tbl; + u32 *rss_indir_tbl; if (!bp->rx_nr_rings) return; @@ -6121,7 +6256,7 @@ void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) max_entries = bnxt_get_rxfh_indir_size(bp->dev); if (rss_ctx) - rss_indir_tbl = &rss_ctx->rss_indir_tbl[0]; + rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx); else rss_indir_tbl = &bp->rss_indir_tbl[0]; @@ -6130,12 +6265,12 @@ void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) pad = bp->rss_indir_tbl_entries - max_entries; if (pad) - memset(&rss_indir_tbl[i], 0, pad * sizeof(u16)); + memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl)); } static u16 bnxt_get_max_rss_ring(struct bnxt *bp) { - u16 i, tbl_size, max_ring = 0; + u32 i, tbl_size, max_ring = 0; if (!bp->rss_indir_tbl) return 0; @@ -6146,24 +6281,6 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp) return max_ring; } -u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp) -{ - u16 i, tbl_size, max_ring = 0; - struct bnxt_rss_ctx *rss_ctx; - - if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - return 0; - - tbl_size = bnxt_get_rxfh_indir_size(bp->dev); - - list_for_each_entry(rss_ctx, &bp->rss_ctx_list, list) { - for (i = 0; i < tbl_size; i++) - max_ring = max(max_ring, rss_ctx->rss_indir_tbl[i]); - } - - return max_ring; -} - int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { @@ -6205,7 +6322,7 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) - j = vnic->rss_ctx->rss_indir_tbl[i]; + j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i]; else j = bp->rss_indir_tbl[i]; rxr = &bp->rx_ring[j]; @@ -6692,6 +6809,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, switch (ring_type) { case HWRM_RING_ALLOC_TX: { struct bnxt_tx_ring_info *txr; + u16 flags = 0; txr = container_of(ring, struct bnxt_tx_ring_info, tx_ring_struct); @@ -6705,6 +6823,9 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, if (bp->flags & BNXT_FLAG_TX_COAL_CMPL) req->cmpl_coal_cnt = RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64; + if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg) + flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE; + req->flags = cpu_to_le16(flags); break; } case HWRM_RING_ALLOC_RX: @@ -6878,6 +6999,48 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, bnxt_set_db_mask(bp, db, ring_type); } +static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + struct bnxt_napi *bnapi = rxr->bnapi; + u32 type = HWRM_RING_ALLOC_RX; + u32 map_idx = bnapi->index; + int rc; + + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + if (rc) + return rc; + + bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); + bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; + + return 0; +} + +static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; + u32 type = HWRM_RING_ALLOC_AGG; + u32 grp_idx = ring->grp_idx; + u32 map_idx; + int rc; + + map_idx = grp_idx + bp->rx_nr_rings; + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + if (rc) + return rc; + + bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, + ring->fw_ring_id); + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; + + return 0; +} + static int bnxt_hwrm_ring_alloc(struct bnxt *bp) { bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); @@ -6943,24 +7106,21 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); } - type = HWRM_RING_ALLOC_RX; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; - struct bnxt_napi *bnapi = rxr->bnapi; - u32 map_idx = bnapi->index; - rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); if (rc) goto err_out; - bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); /* If we have agg rings, post agg buffers first. */ if (!agg_rings) bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); - bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr; + struct bnxt_napi *bnapi = rxr->bnapi; u32 type2 = HWRM_RING_ALLOC_CMPL; + struct bnxt_ring_struct *ring; + u32 map_idx = bnapi->index; ring = &cpr2->cp_ring_struct; ring->handle = BNXT_SET_NQ_HDL(cpr2); @@ -6974,23 +7134,10 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) } if (agg_rings) { - type = HWRM_RING_ALLOC_AGG; for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct bnxt_ring_struct *ring = - &rxr->rx_agg_ring_struct; - u32 grp_idx = ring->grp_idx; - u32 map_idx = grp_idx + bp->rx_nr_rings; - - rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]); if (rc) goto err_out; - - bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, - ring->fw_ring_id); - bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); - bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); - bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; } } err_out: @@ -7030,6 +7177,50 @@ exit: return 0; } +static void bnxt_hwrm_rx_ring_free(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + bool close_path) +{ + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + u32 grp_idx = rxr->bnapi->index; + u32 cmpl_ring_id; + + if (ring->fw_ring_id == INVALID_HW_RING_ID) + return; + + cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); + hwrm_ring_free_send_msg(bp, ring, + RING_FREE_REQ_RING_TYPE_RX, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID; +} + +static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + bool close_path) +{ + struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; + u32 grp_idx = rxr->bnapi->index; + u32 type, cmpl_ring_id; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + type = RING_FREE_REQ_RING_TYPE_RX_AGG; + else + type = RING_FREE_REQ_RING_TYPE_RX; + + if (ring->fw_ring_id == INVALID_HW_RING_ID) + return; + + cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); + hwrm_ring_free_send_msg(bp, ring, type, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID; +} + static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) { u32 type; @@ -7054,42 +7245,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; - u32 grp_idx = rxr->bnapi->index; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); - - hwrm_ring_free_send_msg(bp, ring, - RING_FREE_REQ_RING_TYPE_RX, - close_path ? cmpl_ring_id : - INVALID_HW_RING_ID); - ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[grp_idx].rx_fw_ring_id = - INVALID_HW_RING_ID; - } - } - - if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) - type = RING_FREE_REQ_RING_TYPE_RX_AGG; - else - type = RING_FREE_REQ_RING_TYPE_RX; - for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; - u32 grp_idx = rxr->bnapi->index; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); - - hwrm_ring_free_send_msg(bp, ring, type, - close_path ? cmpl_ring_id : - INVALID_HW_RING_ID); - ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[grp_idx].agg_fw_ring_id = - INVALID_HW_RING_ID; - } + bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path); + bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path); } /* The completion rings are about to be freed. After that the @@ -8849,7 +9006,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) u8 flags; int rc; - if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) { + if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) { rc = -ENODEV; goto no_ptp; } @@ -8865,7 +9022,8 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) goto exit; flags = resp->flags; - if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { + if (BNXT_CHIP_P5_AND_MINUS(bp) && + !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { rc = -ENODEV; goto exit; } @@ -8878,10 +9036,13 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) ptp->bp = bp; bp->ptp_cfg = ptp; } - if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) { + + if (flags & + (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK | + PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) { ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper); - } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + } else if (BNXT_CHIP_P5(bp)) { ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; } else { @@ -8963,6 +9124,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED) bp->flags |= BNXT_FLAG_UDP_GSO_CAP; + if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP; bp->tx_push_thresh = 0; if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && @@ -10031,10 +10194,12 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, struct bnxt_ntuple_filter *ntp_fltr; int i; - bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); - for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { - if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) - bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); + if (netif_running(bp->dev)) { + bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); + for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { + if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) + bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); + } } if (!all) return; @@ -10055,19 +10220,17 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size, vnic->rss_table, vnic->rss_table_dma_addr); - kfree(rss_ctx->rss_indir_tbl); - list_del(&rss_ctx->list); bp->num_rss_ctx--; - clear_bit(rss_ctx->index, bp->rss_ctx_bmap); - kfree(rss_ctx); } static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) { bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); - struct bnxt_rss_ctx *rss_ctx, *tmp; + struct ethtool_rxfh_context *ctx; + unsigned long context; - list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { + xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { + struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); struct bnxt_vnic_info *vnic = &rss_ctx->vnic; if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) || @@ -10076,42 +10239,20 @@ static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) netdev_err(bp->dev, "Failed to restore RSS ctx %d\n", rss_ctx->index); bnxt_del_one_rss_ctx(bp, rss_ctx, true); + ethtool_rxfh_context_lost(bp->dev, rss_ctx->index); } } } -struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp) +void bnxt_clear_rss_ctxs(struct bnxt *bp) { - struct bnxt_rss_ctx *rss_ctx = NULL; + struct ethtool_rxfh_context *ctx; + unsigned long context; - rss_ctx = kzalloc(sizeof(*rss_ctx), GFP_KERNEL); - if (rss_ctx) { - rss_ctx->vnic.rss_ctx = rss_ctx; - list_add_tail(&rss_ctx->list, &bp->rss_ctx_list); - bp->num_rss_ctx++; - } - return rss_ctx; -} + xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { + struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); -void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all) -{ - struct bnxt_rss_ctx *rss_ctx, *tmp; - - list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) - bnxt_del_one_rss_ctx(bp, rss_ctx, all); - - if (all) - bitmap_free(bp->rss_ctx_bmap); -} - -static void bnxt_init_multi_rss_ctx(struct bnxt *bp) -{ - bp->rss_ctx_bmap = bitmap_zalloc(BNXT_RSS_CTX_BMAP_LEN, GFP_KERNEL); - if (bp->rss_ctx_bmap) { - /* burn index 0 since we cannot have context 0 */ - __set_bit(0, bp->rss_ctx_bmap); - INIT_LIST_HEAD(&bp->rss_ctx_list); - bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; + bnxt_del_one_rss_ctx(bp, rss_ctx, false); } } @@ -12004,8 +12145,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) /* VF-reps may need to be re-opened after the PF is re-opened */ if (BNXT_PF(bp)) bnxt_vf_reps_open(bp); - if (bp->ptp_cfg) - atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); + if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) + WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); bnxt_ptp_init_rtc(bp, true); bnxt_ptp_cfg_tstamp_filters(bp); if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) @@ -12158,7 +12299,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, msleep(20); if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - bnxt_clear_rss_ctxs(bp, false); + bnxt_clear_rss_ctxs(bp); /* Flush rings and disable interrupts */ bnxt_shutdown_nic(bp, irq_re_init); @@ -14842,6 +14983,220 @@ static const struct netdev_stat_ops bnxt_stat_ops = { .get_base_stats = bnxt_get_base_stats, }; +static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + u16 mem_size; + + rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; + mem_size = rxr->rx_agg_bmap_size / 8; + rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); + if (!rxr->rx_agg_bmap) + return -ENOMEM; + + return 0; +} + +static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) +{ + struct bnxt_rx_ring_info *rxr, *clone; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ring_struct *ring; + int rc; + + rxr = &bp->rx_ring[idx]; + clone = qmem; + memcpy(clone, rxr, sizeof(*rxr)); + bnxt_init_rx_ring_struct(bp, clone); + bnxt_reset_rx_ring_struct(bp, clone); + + clone->rx_prod = 0; + clone->rx_agg_prod = 0; + clone->rx_sw_agg_prod = 0; + clone->rx_next_cons = 0; + + rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid); + if (rc) + return rc; + + ring = &clone->rx_ring_struct; + rc = bnxt_alloc_ring(bp, &ring->ring_mem); + if (rc) + goto err_free_rx_ring; + + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + ring = &clone->rx_agg_ring_struct; + rc = bnxt_alloc_ring(bp, &ring->ring_mem); + if (rc) + goto err_free_rx_agg_ring; + + rc = bnxt_alloc_rx_agg_bmap(bp, clone); + if (rc) + goto err_free_rx_agg_ring; + } + + bnxt_init_one_rx_ring_rxbd(bp, clone); + bnxt_init_one_rx_agg_ring_rxbd(bp, clone); + + bnxt_alloc_one_rx_ring_skb(bp, clone, idx); + if (bp->flags & BNXT_FLAG_AGG_RINGS) + bnxt_alloc_one_rx_ring_page(bp, clone, idx); + + return 0; + +err_free_rx_agg_ring: + bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem); +err_free_rx_ring: + bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem); + clone->page_pool->p.napi = NULL; + page_pool_destroy(clone->page_pool); + clone->page_pool = NULL; + return rc; +} + +static void bnxt_queue_mem_free(struct net_device *dev, void *qmem) +{ + struct bnxt_rx_ring_info *rxr = qmem; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ring_struct *ring; + + bnxt_free_one_rx_ring(bp, rxr); + bnxt_free_one_rx_agg_ring(bp, rxr); + + page_pool_destroy(rxr->page_pool); + rxr->page_pool = NULL; + + ring = &rxr->rx_ring_struct; + bnxt_free_ring(bp, &ring->ring_mem); + + ring = &rxr->rx_agg_ring_struct; + bnxt_free_ring(bp, &ring->ring_mem); + + kfree(rxr->rx_agg_bmap); + rxr->rx_agg_bmap = NULL; +} + +static void bnxt_copy_rx_ring(struct bnxt *bp, + struct bnxt_rx_ring_info *dst, + struct bnxt_rx_ring_info *src) +{ + struct bnxt_ring_mem_info *dst_rmem, *src_rmem; + struct bnxt_ring_struct *dst_ring, *src_ring; + int i; + + dst_ring = &dst->rx_ring_struct; + dst_rmem = &dst_ring->ring_mem; + src_ring = &src->rx_ring_struct; + src_rmem = &src_ring->ring_mem; + + WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages); + WARN_ON(dst_rmem->page_size != src_rmem->page_size); + WARN_ON(dst_rmem->flags != src_rmem->flags); + WARN_ON(dst_rmem->depth != src_rmem->depth); + WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size); + WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem); + + dst_rmem->pg_tbl = src_rmem->pg_tbl; + dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map; + *dst_rmem->vmem = *src_rmem->vmem; + for (i = 0; i < dst_rmem->nr_pages; i++) { + dst_rmem->pg_arr[i] = src_rmem->pg_arr[i]; + dst_rmem->dma_arr[i] = src_rmem->dma_arr[i]; + } + + if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) + return; + + dst_ring = &dst->rx_agg_ring_struct; + dst_rmem = &dst_ring->ring_mem; + src_ring = &src->rx_agg_ring_struct; + src_rmem = &src_ring->ring_mem; + + WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages); + WARN_ON(dst_rmem->page_size != src_rmem->page_size); + WARN_ON(dst_rmem->flags != src_rmem->flags); + WARN_ON(dst_rmem->depth != src_rmem->depth); + WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size); + WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem); + WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size); + + dst_rmem->pg_tbl = src_rmem->pg_tbl; + dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map; + *dst_rmem->vmem = *src_rmem->vmem; + for (i = 0; i < dst_rmem->nr_pages; i++) { + dst_rmem->pg_arr[i] = src_rmem->pg_arr[i]; + dst_rmem->dma_arr[i] = src_rmem->dma_arr[i]; + } + + dst->rx_agg_bmap = src->rx_agg_bmap; +} + +static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rx_ring_info *rxr, *clone; + struct bnxt_cp_ring_info *cpr; + int rc; + + rxr = &bp->rx_ring[idx]; + clone = qmem; + + rxr->rx_prod = clone->rx_prod; + rxr->rx_agg_prod = clone->rx_agg_prod; + rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod; + rxr->rx_next_cons = clone->rx_next_cons; + rxr->page_pool = clone->page_pool; + + bnxt_copy_rx_ring(bp, rxr, clone); + + rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); + if (rc) + return rc; + rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr); + if (rc) + goto err_free_hwrm_rx_ring; + + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + if (bp->flags & BNXT_FLAG_AGG_RINGS) + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + + napi_enable(&rxr->bnapi->napi); + + cpr = &rxr->bnapi->cp_ring; + cpr->sw_stats->rx.rx_resets++; + + return 0; + +err_free_hwrm_rx_ring: + bnxt_hwrm_rx_ring_free(bp, rxr, false); + return rc; +} + +static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rx_ring_info *rxr; + + rxr = &bp->rx_ring[idx]; + napi_disable(&rxr->bnapi->napi); + bnxt_hwrm_rx_ring_free(bp, rxr, false); + bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); + rxr->rx_next_cons = 0; + page_pool_disable_direct_recycling(rxr->page_pool); + + memcpy(qmem, rxr, sizeof(*rxr)); + bnxt_init_rx_ring_struct(bp, qmem); + + return 0; +} + +static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = { + .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info), + .ndo_queue_mem_alloc = bnxt_queue_mem_alloc, + .ndo_queue_mem_free = bnxt_queue_mem_free, + .ndo_queue_start = bnxt_queue_start, + .ndo_queue_stop = bnxt_queue_stop, +}; + static void bnxt_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); @@ -14859,8 +15214,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_free_l2_filters(bp, true); bnxt_free_ntp_fltrs(bp, true); - if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - bnxt_clear_rss_ctxs(bp, true); + WARN_ON(bp->num_rss_ctx); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); /* Flush any pending tasks */ cancel_work_sync(&bp->sp_task); @@ -15307,6 +15661,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->stat_ops = &bnxt_stat_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; + dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops; pci_set_drvdata(pdev, dev); rc = bnxt_alloc_hwrm_resources(bp); @@ -15329,7 +15684,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bp->flags |= BNXT_FLAG_CHIP_P7; } - rc = bnxt_alloc_rss_indir_tbl(bp, NULL); + rc = bnxt_alloc_rss_indir_tbl(bp); if (rc) goto init_err_pci_clean; @@ -15486,8 +15841,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_LIST_HEAD(&bp->usr_fltr_list); if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) - bnxt_init_multi_rss_ctx(bp); - + bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; rc = register_netdev(dev); if (rc) @@ -15510,8 +15864,6 @@ init_err_dl: bnxt_clear_int_mode(bp); init_err_pci_clean: - if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - bnxt_clear_rss_ctxs(bp, true); bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); bnxt_hwmon_uninit(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 6b10a09ee1af..6bbdc718c3a7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -181,6 +181,32 @@ struct tx_cmp { #define TX_CMP_SQ_CONS_IDX(txcmp) \ (le32_to_cpu((txcmp)->sq_cons_idx) & TX_CMP_SQ_CONS_IDX_MASK) +struct tx_ts_cmp { + __le32 tx_ts_cmp_flags_type; + #define TX_TS_CMP_FLAGS_ERROR (1 << 6) + #define TX_TS_CMP_FLAGS_TS_TYPE (1 << 7) + #define TX_TS_CMP_FLAGS_TS_TYPE_PM (0 << 7) + #define TX_TS_CMP_FLAGS_TS_TYPE_PA (1 << 7) + #define TX_TS_CMP_FLAGS_TS_FALLBACK (1 << 8) + #define TX_TS_CMP_TS_SUB_NS (0xf << 12) + #define TX_TS_CMP_TS_NS_MID (0xffff << 16) + #define TX_TS_CMP_TS_NS_MID_SFT 16 + u32 tx_ts_cmp_opaque; + __le32 tx_ts_cmp_errors_v; + #define TX_TS_CMP_V (1 << 0) + #define TX_TS_CMP_TS_INVALID_ERR (1 << 10) + __le32 tx_ts_cmp_ts_ns_lo; +}; + +#define BNXT_GET_TX_TS_48B_NS(tscmp) \ + (le32_to_cpu((tscmp)->tx_ts_cmp_ts_ns_lo) | \ + ((u64)(le32_to_cpu((tscmp)->tx_ts_cmp_flags_type) & \ + TX_TS_CMP_TS_NS_MID) << TX_TS_CMP_TS_NS_MID_SFT)) + +#define BNXT_TX_TS_ERR(tscmp) \ + (((tscmp)->tx_ts_cmp_flags_type & cpu_to_le32(TX_TS_CMP_FLAGS_ERROR)) &&\ + ((tscmp)->tx_ts_cmp_errors_v & cpu_to_le32(TX_TS_CMP_TS_INVALID_ERR))) + struct rx_cmp { __le32 rx_cmp_len_flags_type; #define RX_CMP_CMP_TYPE (0x3f << 0) @@ -848,11 +874,14 @@ struct bnxt_sw_tx_bd { DEFINE_DMA_UNMAP_ADDR(mapping); DEFINE_DMA_UNMAP_LEN(len); struct page *page; - u8 is_gso; + u8 is_ts_pkt; u8 is_push; u8 action; unsigned short nr_frags; - u16 rx_prod; + union { + u16 rx_prod; + u16 txts_prod; + }; }; struct bnxt_sw_rx_bd { @@ -1257,19 +1286,16 @@ struct bnxt_vnic_info { #define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10 #define BNXT_VNIC_NTUPLE_FLAG 0x20 #define BNXT_VNIC_RSSCTX_FLAG 0x40 - struct bnxt_rss_ctx *rss_ctx; + struct ethtool_rxfh_context *rss_ctx; u32 vnic_id; }; struct bnxt_rss_ctx { - struct list_head list; struct bnxt_vnic_info vnic; - u16 *rss_indir_tbl; u8 index; }; #define BNXT_MAX_ETH_RSS_CTX 32 -#define BNXT_RSS_CTX_BMAP_LEN (BNXT_MAX_ETH_RSS_CTX + 1) #define BNXT_VNIC_ID_INVALID 0xffffffff struct bnxt_hw_rings { @@ -2237,9 +2263,17 @@ struct bnxt { (BNXT_CHIP_NUM_58700((bp)->chip_num) && \ !BNXT_CHIP_TYPE_NITRO_A0(bp))) +/* Chip class phase 3.x */ +#define BNXT_CHIP_P3(bp) \ + (BNXT_CHIP_NUM_57X0X((bp)->chip_num) || \ + BNXT_CHIP_TYPE_NITRO_A0(bp)) + #define BNXT_CHIP_P4_PLUS(bp) \ (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5_PLUS(bp)) +#define BNXT_CHIP_P5_AND_MINUS(bp) \ + (BNXT_CHIP_P3(bp) || BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp)) + struct bnxt_aux_priv *aux_priv; struct bnxt_en_dev *edev; @@ -2294,11 +2328,9 @@ struct bnxt { /* grp_info indexed by completion ring index */ struct bnxt_ring_grp_info *grp_info; struct bnxt_vnic_info *vnic_info; - struct list_head rss_ctx_list; - unsigned long *rss_ctx_bmap; u32 num_rss_ctx; int nr_vnics; - u16 *rss_indir_tbl; + u32 *rss_indir_tbl; u16 rss_indir_tbl_entries; u32 rss_hash_cfg; u32 rss_hash_delta; @@ -2384,6 +2416,7 @@ struct bnxt { #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 BIT_ULL(16) #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED BIT_ULL(17) #define BNXT_FW_CAP_EXT_STATS_SUPPORTED BIT_ULL(18) + #define BNXT_FW_CAP_TX_TS_CMP BIT_ULL(19) #define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT_ULL(20) #define BNXT_FW_CAP_HOT_RESET BIT_ULL(21) #define BNXT_FW_CAP_PTP_RTC BIT_ULL(22) @@ -2774,9 +2807,8 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, u32 tpa_flags); void bnxt_fill_ipv6_mask(__be32 mask[4]); -int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx); -void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx); -u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp); +void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, + struct ethtool_rxfh_context *rss_ctx); int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic); int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, @@ -2810,8 +2842,7 @@ int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, bool all); -struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp); -void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all); +void bnxt_clear_rss_ctxs(struct bnxt *bp); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 79c09c1cdf93..d00ef0063820 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -961,12 +961,6 @@ static int bnxt_set_channels(struct net_device *dev, return rc; } - if (req_rx_rings < bp->rx_nr_rings && - req_rx_rings <= bnxt_get_max_rss_ctx_ring(bp)) { - netdev_warn(dev, "Can't deactivate rings used by RSS contexts\n"); - return -EINVAL; - } - if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && netif_is_rxfh_configured(dev)) { @@ -976,7 +970,7 @@ static int bnxt_set_channels(struct net_device *dev, bnxt_clear_usr_fltrs(bp, true); if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - bnxt_clear_rss_ctxs(bp, false); + bnxt_clear_rss_ctxs(bp); if (netif_running(dev)) { if (BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's @@ -1216,19 +1210,18 @@ fltr_err: static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp, u32 index) { - struct bnxt_rss_ctx *rss_ctx, *tmp; + struct ethtool_rxfh_context *ctx; - list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) - if (rss_ctx->index == index) - return rss_ctx; - return NULL; + ctx = xa_load(&bp->dev->ethtool->rss_ctx, index); + if (!ctx) + return NULL; + return ethtool_rxfh_context_priv(ctx); } -static int bnxt_alloc_rss_ctx_rss_table(struct bnxt *bp, - struct bnxt_rss_ctx *rss_ctx) +static int bnxt_alloc_vnic_rss_table(struct bnxt *bp, + struct bnxt_vnic_info *vnic) { int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); - struct bnxt_vnic_info *vnic = &rss_ctx->vnic; vnic->rss_table_size = size + HW_HASH_KEY_SIZE; vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev, @@ -1807,10 +1800,9 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev) static int bnxt_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh) { - u32 rss_context = rxfh->rss_context; struct bnxt_rss_ctx *rss_ctx = NULL; struct bnxt *bp = netdev_priv(dev); - u16 *indir_tbl = bp->rss_indir_tbl; + u32 *indir_tbl = bp->rss_indir_tbl; struct bnxt_vnic_info *vnic; u32 i, tbl_size; @@ -1821,10 +1813,13 @@ static int bnxt_get_rxfh(struct net_device *dev, vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; if (rxfh->rss_context) { - rss_ctx = bnxt_get_rss_ctx_from_index(bp, rss_context); - if (!rss_ctx) + struct ethtool_rxfh_context *ctx; + + ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context); + if (!ctx) return -EINVAL; - indir_tbl = rss_ctx->rss_indir_tbl; + indir_tbl = ethtool_rxfh_context_indir(ctx); + rss_ctx = ethtool_rxfh_context_priv(ctx); vnic = &rss_ctx->vnic; } @@ -1840,8 +1835,9 @@ static int bnxt_get_rxfh(struct net_device *dev, return 0; } -static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, - struct ethtool_rxfh_param *rxfh) +static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx, + struct bnxt_rss_ctx *rss_ctx, + const struct ethtool_rxfh_param *rxfh) { if (rxfh->key) { if (rss_ctx) { @@ -1854,29 +1850,21 @@ static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, } if (rxfh->indir) { u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev); - u16 *indir_tbl = bp->rss_indir_tbl; + u32 *indir_tbl = bp->rss_indir_tbl; if (rss_ctx) - indir_tbl = rss_ctx->rss_indir_tbl; + indir_tbl = ethtool_rxfh_context_indir(ctx); for (i = 0; i < tbl_size; i++) indir_tbl[i] = rxfh->indir[i]; pad = bp->rss_indir_tbl_entries - tbl_size; if (pad) - memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); + memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl)); } } -static int bnxt_set_rxfh_context(struct bnxt *bp, - struct ethtool_rxfh_param *rxfh, - struct netlink_ext_ack *extack) +static int bnxt_rxfh_context_check(struct bnxt *bp, + struct netlink_ext_ack *extack) { - u32 *rss_context = &rxfh->rss_context; - struct bnxt_rss_ctx *rss_ctx; - struct bnxt_vnic_info *vnic; - bool modify = false; - int bit_id; - int rc; - if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) { NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported"); return -EOPNOTSUPP; @@ -1887,21 +1875,22 @@ static int bnxt_set_rxfh_context(struct bnxt *bp, return -EAGAIN; } - if (*rss_context != ETH_RXFH_CONTEXT_ALLOC) { - rss_ctx = bnxt_get_rss_ctx_from_index(bp, *rss_context); - if (!rss_ctx) { - NL_SET_ERR_MSG_FMT_MOD(extack, "RSS context %u not found", - *rss_context); - return -EINVAL; - } - if (*rss_context && rxfh->rss_delete) { - bnxt_del_one_rss_ctx(bp, rss_ctx, true); - return 0; - } - modify = true; - vnic = &rss_ctx->vnic; - goto modify_context; - } + return 0; +} + +static int bnxt_create_rxfh_context(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rss_ctx *rss_ctx; + struct bnxt_vnic_info *vnic; + int rc; + + rc = bnxt_rxfh_context_check(bp, extack); + if (rc) + return rc; if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) { NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u", @@ -1914,22 +1903,19 @@ static int bnxt_set_rxfh_context(struct bnxt *bp, return -ENOMEM; } - rss_ctx = bnxt_alloc_rss_ctx(bp); - if (!rss_ctx) - return -ENOMEM; + rss_ctx = ethtool_rxfh_context_priv(ctx); + + bp->num_rss_ctx++; vnic = &rss_ctx->vnic; + vnic->rss_ctx = ctx; vnic->flags |= BNXT_VNIC_RSSCTX_FLAG; vnic->vnic_id = BNXT_VNIC_ID_INVALID; - rc = bnxt_alloc_rss_ctx_rss_table(bp, rss_ctx); + rc = bnxt_alloc_vnic_rss_table(bp, vnic); if (rc) goto out; - rc = bnxt_alloc_rss_indir_tbl(bp, rss_ctx); - if (rc) - goto out; - - bnxt_set_dflt_rss_indir_tbl(bp, rss_ctx); + bnxt_set_dflt_rss_indir_tbl(bp, ctx); memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE); rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings); @@ -1943,11 +1929,7 @@ static int bnxt_set_rxfh_context(struct bnxt *bp, NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); goto out; } -modify_context: - bnxt_modify_rss(bp, rss_ctx, rxfh); - - if (modify) - return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); + bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); rc = __bnxt_setup_vnic_p5(bp, vnic); if (rc) { @@ -1955,21 +1937,47 @@ modify_context: goto out; } - bit_id = bitmap_find_free_region(bp->rss_ctx_bmap, - BNXT_RSS_CTX_BMAP_LEN, 0); - if (bit_id < 0) { - rc = -ENOMEM; - goto out; - } - rss_ctx->index = (u16)bit_id; - *rss_context = rss_ctx->index; - + rss_ctx->index = rxfh->rss_context; return 0; out: bnxt_del_one_rss_ctx(bp, rss_ctx, true); return rc; } +static int bnxt_modify_rxfh_context(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rss_ctx *rss_ctx; + int rc; + + rc = bnxt_rxfh_context_check(bp, extack); + if (rc) + return rc; + + rss_ctx = ethtool_rxfh_context_priv(ctx); + + bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); + + return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic); +} + +static int bnxt_remove_rxfh_context(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + u32 rss_context, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rss_ctx *rss_ctx; + + rss_ctx = ethtool_rxfh_context_priv(ctx); + + bnxt_del_one_rss_ctx(bp, rss_ctx, true); + return 0; +} + static int bnxt_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, struct netlink_ext_ack *extack) @@ -1980,10 +1988,7 @@ static int bnxt_set_rxfh(struct net_device *dev, if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (rxfh->rss_context) - return bnxt_set_rxfh_context(bp, rxfh, extack); - - bnxt_modify_rss(bp, NULL, rxfh); + bnxt_modify_rss(bp, NULL, NULL, rxfh); bnxt_clear_usr_fltrs(bp, false); if (netif_running(bp->dev)) { @@ -5019,7 +5024,7 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, } static int bnxt_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct bnxt *bp = netdev_priv(dev); struct bnxt_ptp_cfg *ptp; @@ -5239,6 +5244,19 @@ static void bnxt_get_rmon_stats(struct net_device *dev, *ranges = bnxt_rmon_ranges; } +static void bnxt_get_ptp_stats(struct net_device *dev, + struct ethtool_ts_stats *ts_stats) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (ptp) { + ts_stats->pkts = ptp->stats.ts_pkts; + ts_stats->lost = ptp->stats.ts_lost; + ts_stats->err = atomic64_read(&ptp->stats.ts_err); + } +} + static void bnxt_get_link_ext_stats(struct net_device *dev, struct ethtool_link_ext_stats *stats) { @@ -5262,6 +5280,9 @@ void bnxt_ethtool_free(struct bnxt *bp) const struct ethtool_ops bnxt_ethtool_ops = { .cap_link_lanes_supported = 1, .cap_rss_ctx_supported = 1, + .rxfh_max_context_id = BNXT_MAX_ETH_RSS_CTX, + .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5, + .rxfh_priv_size = sizeof(struct bnxt_rss_ctx), .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USECS_IRQ | @@ -5299,6 +5320,9 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_rxfh_key_size = bnxt_get_rxfh_key_size, .get_rxfh = bnxt_get_rxfh, .set_rxfh = bnxt_set_rxfh, + .create_rxfh_context = bnxt_create_rxfh_context, + .modify_rxfh_context = bnxt_modify_rxfh_context, + .remove_rxfh_context = bnxt_remove_rxfh_context, .flash_device = bnxt_flash_device, .get_eeprom_len = bnxt_get_eeprom_len, .get_eeprom = bnxt_get_eeprom, @@ -5322,4 +5346,5 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_eth_mac_stats = bnxt_get_eth_mac_stats, .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats, .get_rmon_stats = bnxt_get_rmon_stats, + .get_ts_stats = bnxt_get_ptp_stats, }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index e661ab154d6b..37d42423459c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -110,7 +110,7 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp) } static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts, - u32 txts_tmo) + u32 txts_tmo, int slot) { struct hwrm_port_ts_query_output *resp; struct hwrm_port_ts_query_input *req; @@ -123,11 +123,12 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts, req->flags = cpu_to_le32(flags); if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) == PORT_TS_QUERY_REQ_FLAGS_PATH_TX) { + struct bnxt_ptp_tx_req *txts_req = &bp->ptp_cfg->txts_req[slot]; u32 tmo_us = txts_tmo * 1000; req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES); - req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid); - req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off); + req->ptp_seq_id = cpu_to_le32(txts_req->tx_seqid); + req->ptp_hdr_offset = cpu_to_le16(txts_req->tx_hdr_off); if (!tmo_us) tmo_us = BNXT_PTP_QTS_TIMEOUT; tmo_us = min(tmo_us, BNXT_PTP_QTS_MAX_TMO_US); @@ -656,6 +657,14 @@ static int bnxt_map_ptp_regs(struct bnxt *bp) (ptp->refclk_regs[i] & BNXT_GRC_OFFSET_MASK); return 0; } + if (bp->flags & BNXT_FLAG_CHIP_P7) { + for (i = 0; i < 2; i++) { + if (reg_arr[i] & BNXT_GRC_BASE_MASK) + return -EINVAL; + ptp->refclk_mapped_regs[i] = reg_arr[i]; + } + return 0; + } return -ENODEV; } @@ -674,41 +683,44 @@ static u64 bnxt_cc_read(const struct cyclecounter *cc) return ns; } -static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb) +static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; struct skb_shared_hwtstamps timestamp; + struct bnxt_ptp_tx_req *txts_req; unsigned long now = jiffies; u64 ts = 0, ns = 0; u32 tmo = 0; int rc; - if (!ptp->txts_pending) - ptp->abs_txts_tmo = now + msecs_to_jiffies(ptp->txts_tmo); - if (!time_after_eq(now, ptp->abs_txts_tmo)) - tmo = jiffies_to_msecs(ptp->abs_txts_tmo - now); + txts_req = &ptp->txts_req[slot]; + /* make sure bnxt_get_tx_ts_p5() has updated abs_txts_tmo */ + smp_rmb(); + if (!time_after_eq(now, txts_req->abs_txts_tmo)) + tmo = jiffies_to_msecs(txts_req->abs_txts_tmo - now); rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts, - tmo); + tmo, slot); if (!rc) { memset(×tamp, 0, sizeof(timestamp)); spin_lock_bh(&ptp->ptp_lock); ns = timecounter_cyc2time(&ptp->tc, ts); spin_unlock_bh(&ptp->ptp_lock); timestamp.hwtstamp = ns_to_ktime(ns); - skb_tstamp_tx(ptp->tx_skb, ×tamp); + skb_tstamp_tx(txts_req->tx_skb, ×tamp); + ptp->stats.ts_pkts++; } else { - if (!time_after_eq(jiffies, ptp->abs_txts_tmo)) { - ptp->txts_pending = true; - return; - } + if (!time_after_eq(jiffies, txts_req->abs_txts_tmo)) + return -EAGAIN; + + ptp->stats.ts_lost++; netdev_warn_once(bp->dev, "TS query for TX timer failed rc = %x\n", rc); } - dev_kfree_skb_any(ptp->tx_skb); - ptp->tx_skb = NULL; - atomic_inc(&ptp->tx_avail); - ptp->txts_pending = false; + dev_kfree_skb_any(txts_req->tx_skb); + txts_req->tx_skb = NULL; + + return 0; } static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) @@ -717,12 +729,30 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) ptp_info); unsigned long now = jiffies; struct bnxt *bp = ptp->bp; + u16 cons = ptp->txts_cons; + u32 num_requests; + int rc = 0; + + num_requests = BNXT_MAX_TX_TS - READ_ONCE(ptp->tx_avail); + while (num_requests--) { + if (IS_ERR(ptp->txts_req[cons].tx_skb)) + goto next_slot; + if (!ptp->txts_req[cons].tx_skb) + break; + rc = bnxt_stamp_tx_skb(bp, cons); + if (rc == -EAGAIN) + break; +next_slot: + BNXT_PTP_INC_TX_AVAIL(ptp); + cons = NEXT_TXTS(cons); + } + ptp->txts_cons = cons; - if (ptp->tx_skb) - bnxt_stamp_tx_skb(bp, ptp->tx_skb); - - if (!time_after_eq(now, ptp->next_period)) + if (!time_after_eq(now, ptp->next_period)) { + if (rc == -EAGAIN) + return 0; return ptp->next_period - now; + } bnxt_ptp_get_current_time(bp); ptp->next_period = now + HZ; @@ -732,22 +762,37 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) spin_unlock_bh(&ptp->ptp_lock); ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD; } - if (ptp->txts_pending) + if (rc == -EAGAIN) return 0; return HZ; } -int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb) +int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod) +{ + spin_lock_bh(&ptp->ptp_tx_lock); + if (ptp->tx_avail) { + *prod = ptp->txts_prod; + ptp->txts_prod = NEXT_TXTS(*prod); + ptp->tx_avail--; + spin_unlock_bh(&ptp->ptp_tx_lock); + return 0; + } + spin_unlock_bh(&ptp->ptp_tx_lock); + atomic64_inc(&ptp->stats.ts_err); + return -ENOSPC; +} + +void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct bnxt_ptp_tx_req *txts_req; - if (ptp->tx_skb) { - netdev_err(bp->dev, "deferring skb:one SKB is still outstanding\n"); - return -EBUSY; - } - ptp->tx_skb = skb; + txts_req = &ptp->txts_req[prod]; + txts_req->abs_txts_tmo = jiffies + msecs_to_jiffies(ptp->txts_tmo); + /* make sure abs_txts_tmo is written first */ + smp_wmb(); + txts_req->tx_skb = skb; ptp_schedule_worker(ptp->ptp_clock, 0); - return 0; } int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts) @@ -766,6 +811,38 @@ int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts) return 0; } +void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi, + struct tx_ts_cmp *tscmp) +{ + struct skb_shared_hwtstamps timestamp = {}; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u32 opaque = tscmp->tx_ts_cmp_opaque; + struct bnxt_tx_ring_info *txr; + struct bnxt_sw_tx_bd *tx_buf; + u64 ts, ns; + u16 cons; + + txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)]; + ts = BNXT_GET_TX_TS_48B_NS(tscmp); + cons = TX_OPAQUE_IDX(opaque); + tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; + if (tx_buf->is_ts_pkt) { + if (BNXT_TX_TS_ERR(tscmp)) { + netdev_err(bp->dev, + "timestamp completion error 0x%x 0x%x\n", + le32_to_cpu(tscmp->tx_ts_cmp_flags_type), + le32_to_cpu(tscmp->tx_ts_cmp_errors_v)); + } else { + spin_lock_bh(&ptp->ptp_lock); + ns = timecounter_cyc2time(&ptp->tc, ts); + spin_unlock_bh(&ptp->ptp_lock); + timestamp.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(tx_buf->skb, ×tamp); + } + tx_buf->is_ts_pkt = 0; + } +} + static const struct ptp_clock_info bnxt_ptp_caps = { .owner = THIS_MODULE, .name = "bnxt clock", @@ -912,7 +989,7 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg) return rc; } else { rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, - &ns, 0); + &ns, 0, 0); if (rc) return rc; } @@ -952,8 +1029,9 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) bnxt_ptp_free(bp); - atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS); + WRITE_ONCE(ptp->tx_avail, BNXT_MAX_TX_TS); spin_lock_init(&ptp->ptp_lock); + spin_lock_init(&ptp->ptp_tx_lock); if (BNXT_PTP_USE_RTC(bp)) { bnxt_ptp_timecounter_init(bp, false); @@ -979,7 +1057,12 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) rc = err; goto out; } - if (BNXT_CHIP_P5(bp)) { + + ptp->stats.ts_pkts = 0; + ptp->stats.ts_lost = 0; + atomic64_set(&ptp->stats.ts_err, 0); + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { spin_lock_bh(&ptp->ptp_lock); bnxt_refclk_read(bp, NULL, &ptp->current_time); WRITE_ONCE(ptp->old_time, ptp->current_time); @@ -998,6 +1081,7 @@ out: void bnxt_ptp_clear(struct bnxt *bp) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + int i; if (!ptp) return; @@ -1009,9 +1093,12 @@ void bnxt_ptp_clear(struct bnxt *bp) kfree(ptp->ptp_info.pin_config); ptp->ptp_info.pin_config = NULL; - if (ptp->tx_skb) { - dev_kfree_skb_any(ptp->tx_skb); - ptp->tx_skb = NULL; + for (i = 0; i < BNXT_MAX_TX_TS; i++) { + if (ptp->txts_req[i].tx_skb) { + dev_kfree_skb_any(ptp->txts_req[i].tx_skb); + ptp->txts_req[i].tx_skb = NULL; + } } + bnxt_unmap_ptp_regs(bp); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index 2c3415c8fc03..a9a2f9a18c9c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -79,6 +79,22 @@ struct bnxt_pps { struct pps_pin pins[BNXT_MAX_TSIO_PINS]; }; +struct bnxt_ptp_stats { + u64 ts_pkts; + u64 ts_lost; + atomic64_t ts_err; +}; + +#define BNXT_MAX_TX_TS 4 +#define NEXT_TXTS(idx) (((idx) + 1) & (BNXT_MAX_TX_TS - 1)) + +struct bnxt_ptp_tx_req { + struct sk_buff *tx_skb; + u16 tx_seqid; + u16 tx_hdr_off; + unsigned long abs_txts_tmo; +}; + struct bnxt_ptp_cfg { struct ptp_clock_info ptp_info; struct ptp_clock *ptp_clock; @@ -87,7 +103,8 @@ struct bnxt_ptp_cfg { struct bnxt_pps pps_info; /* serialize timecounter access */ spinlock_t ptp_lock; - struct sk_buff *tx_skb; + /* serialize ts tx request queuing */ + spinlock_t ptp_tx_lock; u64 current_time; u64 old_time; unsigned long next_period; @@ -96,11 +113,10 @@ struct bnxt_ptp_cfg { /* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */ #define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ) - u16 tx_seqid; - u16 tx_hdr_off; + struct bnxt_ptp_tx_req txts_req[BNXT_MAX_TX_TS]; + struct bnxt *bp; - atomic_t tx_avail; -#define BNXT_MAX_TX_TS 1 + u32 tx_avail; u16 rxctl; #define BNXT_PTP_MSG_SYNC (1 << 0) #define BNXT_PTP_MSG_DELAY_REQ (1 << 1) @@ -117,14 +133,16 @@ struct bnxt_ptp_cfg { BNXT_PTP_MSG_PDELAY_REQ | \ BNXT_PTP_MSG_PDELAY_RESP) u8 tx_tstamp_en:1; - u8 txts_pending:1; int rx_filter; u32 tstamp_filters; u32 refclk_regs[2]; u32 refclk_mapped_regs[2]; u32 txts_tmo; - unsigned long abs_txts_tmo; + u16 txts_prod; + u16 txts_cons; + + struct bnxt_ptp_stats stats; }; #if BITS_PER_LONG == 32 @@ -139,6 +157,13 @@ do { \ ((dst) = READ_ONCE(src)) #endif +#define BNXT_PTP_INC_TX_AVAIL(ptp) \ +do { \ + spin_lock_bh(&(ptp)->ptp_tx_lock); \ + (ptp)->tx_avail++; \ + spin_unlock_bh(&(ptp)->ptp_tx_lock); \ +} while (0) + int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off); void bnxt_ptp_update_current_time(struct bnxt *bp); void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2); @@ -146,8 +171,11 @@ int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp); void bnxt_ptp_reapply_pps(struct bnxt *bp); int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr); int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr); -int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb); +int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod); +void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod); int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts); +void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi, + struct tx_ts_cmp *tscmp); void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns); int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg); int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg); |