diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3_enet.c')
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 565 |
1 files changed, 327 insertions, 238 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 8c55965a66ac..f2b31d278bc9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -25,6 +25,9 @@ #include "hnae3.h" #include "hns3_enet.h" +static void hns3_clear_all_ring(struct hnae3_handle *h); +static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); + static const char hns3_driver_name[] = "hns3"; const char hns3_driver_version[] = VERMAGIC_STRING; static const char hns3_driver_string[] = @@ -273,6 +276,10 @@ static int hns3_nic_net_up(struct net_device *netdev) int i, j; int ret; + ret = hns3_nic_reset_all_ring(h); + if (ret) + return ret; + /* get irq resource for all vectors */ ret = hns3_nic_init_irq(priv); if (ret) { @@ -333,17 +340,19 @@ static void hns3_nic_net_down(struct net_device *netdev) if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) return; + /* disable vectors */ + for (i = 0; i < priv->vector_num; i++) + hns3_vector_disable(&priv->tqp_vector[i]); + /* stop ae_dev */ ops = priv->ae_handle->ae_algo->ops; if (ops->stop) ops->stop(priv->ae_handle); - /* disable vectors */ - for (i = 0; i < priv->vector_num; i++) - hns3_vector_disable(&priv->tqp_vector[i]); - /* free irq resources */ hns3_nic_uninit_irq(priv); + + hns3_clear_all_ring(priv->ae_handle); } static int hns3_nic_net_stop(struct net_device *netdev) @@ -406,15 +415,21 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev) if (h->ae_algo->ops->set_promisc_mode) { if (netdev->flags & IFF_PROMISC) - h->ae_algo->ops->set_promisc_mode(h, 1); + h->ae_algo->ops->set_promisc_mode(h, true, true); + else if (netdev->flags & IFF_ALLMULTI) + h->ae_algo->ops->set_promisc_mode(h, false, true); else - h->ae_algo->ops->set_promisc_mode(h, 0); + h->ae_algo->ops->set_promisc_mode(h, false, false); } if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) netdev_err(netdev, "sync uc address fail\n"); - if (netdev->flags & IFF_MULTICAST) + if (netdev->flags & IFF_MULTICAST) { if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) netdev_err(netdev, "sync mc address fail\n"); + + if (h->ae_algo->ops->update_mta_status) + h->ae_algo->ops->update_mta_status(h); + } } static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, @@ -502,7 +517,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, /* find outer header point */ l3.hdr = skb_network_header(skb); - l4_hdr = skb_inner_transport_header(skb); + l4_hdr = skb_transport_header(skb); if (skb->protocol == htons(ETH_P_IPV6)) { exthdr = l3.hdr + sizeof(*l3.v6); @@ -644,6 +659,32 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, } } +/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL + * and it is udp packet, which has a dest port as the IANA assigned. + * the hardware is expected to do the checksum offload, but the + * hardware will not do the checksum offload when udp dest port is + * 4789. + */ +static bool hns3_tunnel_csum_bug(struct sk_buff *skb) +{ +#define IANA_VXLAN_PORT 4789 + union { + struct tcphdr *tcp; + struct udphdr *udp; + struct gre_base_hdr *gre; + unsigned char *hdr; + } l4; + + l4.hdr = skb_transport_header(skb); + + if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT))) + return false; + + skb_checksum_help(skb); + + return true; +} + static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, u8 il4_proto, u32 *type_cs_vlan_tso, u32 *ol_type_vlan_len_msec) @@ -732,6 +773,9 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, HNS3_L4T_TCP); break; case IPPROTO_UDP: + if (hns3_tunnel_csum_bug(skb)) + break; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, @@ -1121,6 +1165,12 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) return -EADDRNOTAVAIL; + if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { + netdev_info(netdev, "already using mac address %pM\n", + mac_addr->sa_data); + return 0; + } + ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); if (ret) { netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); @@ -1244,93 +1294,6 @@ static void hns3_nic_get_stats64(struct net_device *netdev, stats->tx_compressed = netdev->stats.tx_compressed; } -static void hns3_add_tunnel_port(struct net_device *netdev, u16 port, - enum hns3_udp_tnl_type type) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; - struct hnae3_handle *h = priv->ae_handle; - - if (udp_tnl->used && udp_tnl->dst_port == port) { - udp_tnl->used++; - return; - } - - if (udp_tnl->used) { - netdev_warn(netdev, - "UDP tunnel [%d], port [%d] offload\n", type, port); - return; - } - - udp_tnl->dst_port = port; - udp_tnl->used = 1; - /* TBD send command to hardware to add port */ - if (h->ae_algo->ops->add_tunnel_udp) - h->ae_algo->ops->add_tunnel_udp(h, port); -} - -static void hns3_del_tunnel_port(struct net_device *netdev, u16 port, - enum hns3_udp_tnl_type type) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; - struct hnae3_handle *h = priv->ae_handle; - - if (!udp_tnl->used || udp_tnl->dst_port != port) { - netdev_warn(netdev, - "Invalid UDP tunnel port %d\n", port); - return; - } - - udp_tnl->used--; - if (udp_tnl->used) - return; - - udp_tnl->dst_port = 0; - /* TBD send command to hardware to del port */ - if (h->ae_algo->ops->del_tunnel_udp) - h->ae_algo->ops->del_tunnel_udp(h, port); -} - -/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports - * @netdev: This physical ports's netdev - * @ti: Tunnel information - */ -static void hns3_nic_udp_tunnel_add(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - u16 port_n = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); - break; - case UDP_TUNNEL_TYPE_GENEVE: - hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); - break; - default: - netdev_err(netdev, "unsupported tunnel type %d\n", ti->type); - break; - } -} - -static void hns3_nic_udp_tunnel_del(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - u16 port_n = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); - break; - case UDP_TUNNEL_TYPE_GENEVE: - hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); - break; - default: - break; - } -} - static int hns3_setup_tc(struct net_device *netdev, void *type_data) { struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; @@ -1569,13 +1532,50 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_get_stats64 = hns3_nic_get_stats64, .ndo_setup_tc = hns3_nic_setup_tc, .ndo_set_rx_mode = hns3_nic_set_rx_mode, - .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add, - .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del, .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, }; +static bool hns3_is_phys_func(struct pci_dev *pdev) +{ + u32 dev_id = pdev->device; + + switch (dev_id) { + case HNAE3_DEV_ID_GE: + case HNAE3_DEV_ID_25GE: + case HNAE3_DEV_ID_25GE_RDMA: + case HNAE3_DEV_ID_25GE_RDMA_MACSEC: + case HNAE3_DEV_ID_50GE_RDMA: + case HNAE3_DEV_ID_50GE_RDMA_MACSEC: + case HNAE3_DEV_ID_100G_RDMA_MACSEC: + return true; + case HNAE3_DEV_ID_100G_VF: + case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF: + return false; + default: + dev_warn(&pdev->dev, "un-recognized pci device-id %d", + dev_id); + } + + return false; +} + +static void hns3_disable_sriov(struct pci_dev *pdev) +{ + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, + "disabling driver while VFs are assigned\n"); + return; + } + + pci_disable_sriov(pdev); +} + /* hns3_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in hns3_pci_tbl @@ -1603,7 +1603,9 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ae_dev->dev_type = HNAE3_DEV_KNIC; pci_set_drvdata(pdev, ae_dev); - return hnae3_register_ae_dev(ae_dev); + hnae3_register_ae_dev(ae_dev); + + return 0; } /* hns3_remove - Device removal routine @@ -1613,21 +1615,56 @@ static void hns3_remove(struct pci_dev *pdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) + hns3_disable_sriov(pdev); + hnae3_unregister_ae_dev(ae_dev); } +/** + * hns3_pci_sriov_configure + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs to allocate + * + * Enable or change the number of VFs. Called when the user updates the number + * of VFs in sysfs. + **/ +static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + int ret; + + if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { + dev_warn(&pdev->dev, "Can not config SRIOV\n"); + return -EINVAL; + } + + if (num_vfs) { + ret = pci_enable_sriov(pdev, num_vfs); + if (ret) + dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); + else + return num_vfs; + } else if (!pci_vfs_assigned(pdev)) { + pci_disable_sriov(pdev); + } else { + dev_warn(&pdev->dev, + "Unable to free VFs because some are assigned to VMs.\n"); + } + + return 0; +} + static struct pci_driver hns3_driver = { .name = hns3_driver_name, .id_table = hns3_pci_tbl, .probe = hns3_probe, .remove = hns3_remove, + .sriov_configure = hns3_pci_sriov_configure, }; /* set default feature to hns3 */ static void hns3_set_default_feature(struct net_device *netdev) { - struct hnae3_handle *h = hns3_get_handle(netdev); - netdev->priv_flags |= IFF_UNICAST_FLT; netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -1656,15 +1693,11 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; - - if (!(h->flags & HNAE3_SUPPORT_VF)) - netdev->hw_features |= - NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; } static int hns3_alloc_buffer(struct hns3_enet_ring *ring, @@ -1836,6 +1869,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, hns3_unmap_buffer(ring, &ring->desc_cb[i]); ring->desc_cb[i] = *res_cb; ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + ring->desc[i].rx.bd_base_info = 0; } static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) @@ -1843,6 +1877,7 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) ring->desc_cb[i].reuse_flag = 0; ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + ring->desc_cb[i].page_offset); + ring->desc[i].rx.bd_base_info = 0; } static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, @@ -1971,106 +2006,6 @@ hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); } -/* hns3_nic_get_headlen - determine size of header for LRO/GRO - * @data: pointer to the start of the headers - * @max: total length of section to find headers in - * - * This function is meant to determine the length of headers that will - * be recognized by hardware for LRO, GRO, and RSC offloads. The main - * motivation of doing this is to only perform one pull for IPv4 TCP - * packets so that we can do basic things like calculating the gso_size - * based on the average data per packet. - */ -static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag, - unsigned int max_size) -{ - unsigned char *network; - u8 hlen; - - /* This should never happen, but better safe than sorry */ - if (max_size < ETH_HLEN) - return max_size; - - /* Initialize network frame pointer */ - network = data; - - /* Set first protocol and move network header forward */ - network += ETH_HLEN; - - /* Handle any vlan tag if present */ - if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S) - == HNS3_RX_FLAG_VLAN_PRESENT) { - if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) - return max_size; - - network += VLAN_HLEN; - } - - /* Handle L3 protocols */ - if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) - == HNS3_RX_FLAG_L3ID_IPV4) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct iphdr))) - return max_size; - - /* Access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (network[0] & 0x0F) << 2; - - /* Verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct iphdr)) - return network - data; - - /* Record next protocol if header is present */ - } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) - == HNS3_RX_FLAG_L3ID_IPV6) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct ipv6hdr))) - return max_size; - - /* Record next protocol */ - hlen = sizeof(struct ipv6hdr); - } else { - return network - data; - } - - /* Relocate pointer to start of L4 header */ - network += hlen; - - /* Finally sort out TCP/UDP */ - if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) - == HNS3_RX_FLAG_L4ID_TCP) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct tcphdr))) - return max_size; - - /* Access doff as a u8 to avoid unaligned access on ia64 */ - hlen = (network[12] & 0xF0) >> 2; - - /* Verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct tcphdr)) - return network - data; - - network += hlen; - } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) - == HNS3_RX_FLAG_L4ID_UDP) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct udphdr))) - return max_size; - - network += sizeof(struct udphdr); - } - - /* If everything has gone correctly network should be the - * data section of the packet and will be the end of the header. - * If not then it probably represents the end of the last recognized - * header. - */ - if ((typeof(max_size))(network - data) < max_size) - return network - data; - else - return max_size; -} - static void hns3_nic_reuse_page(struct sk_buff *skb, int i, struct hns3_enet_ring *ring, int pull_len, struct hns3_desc_cb *desc_cb) @@ -2183,6 +2118,39 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) napi_gro_receive(&ring->tqp_vector->napi, skb); } +static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, + struct hns3_desc *desc, u32 l234info) +{ + struct pci_dev *pdev = ring->tqp->handle->pdev; + u16 vlan_tag; + + if (pdev->revision == 0x20) { + vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + if (!(vlan_tag & VLAN_VID_MASK)) + vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + + return vlan_tag; + } + +#define HNS3_STRP_OUTER_VLAN 0x1 +#define HNS3_STRP_INNER_VLAN 0x2 + + switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M, + HNS3_RXD_STRP_TAGP_S)) { + case HNS3_STRP_OUTER_VLAN: + vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + break; + case HNS3_STRP_INNER_VLAN: + vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + break; + default: + vlan_tag = 0; + break; + } + + return vlan_tag; +} + static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, struct sk_buff **out_skb, int *out_bnum) { @@ -2202,9 +2170,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, prefetch(desc); - length = le16_to_cpu(desc->rx.pkt_len); + length = le16_to_cpu(desc->rx.size); bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - l234info = le32_to_cpu(desc->rx.l234_info); /* Check valid BD */ if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) @@ -2238,22 +2205,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, prefetchw(skb->data); - /* Based on hw strategy, the tag offloaded will be stored at - * ot_vlan_tag in two layer tag case, and stored at vlan_tag - * in one layer tag case. - */ - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { - u16 vlan_tag; - - vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); - if (!(vlan_tag & VLAN_VID_MASK)) - vlan_tag = le16_to_cpu(desc->rx.vlan_tag); - if (vlan_tag & VLAN_VID_MASK) - __vlan_hwaccel_put_tag(skb, - htons(ETH_P_8021Q), - vlan_tag); - } - bnum = 1; if (length <= HNS3_RX_HEAD_SIZE) { memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); @@ -2270,8 +2221,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ring->stats.seg_pkt_cnt++; u64_stats_update_end(&ring->syncp); - pull_len = hns3_nic_get_headlen(va, l234info, - HNS3_RX_HEAD_SIZE); + pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE); + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); @@ -2290,6 +2241,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, *out_bnum = bnum; + l234info = le32_to_cpu(desc->rx.l234_info); + + /* Based on hw strategy, the tag offloaded will be stored at + * ot_vlan_tag in two layer tag case, and stored at vlan_tag + * in one layer tag case. + */ + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + u16 vlan_tag; + + vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info); + if (vlan_tag & VLAN_VID_MASK) + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vlan_tag); + } + if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { netdev_err(netdev, "no valid bd,%016llx,%016llx\n", ((u64 *)desc)[0], ((u64 *)desc)[1]); @@ -3022,8 +2989,6 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) goto out_when_alloc_ring_memory; } - hns3_init_ring_hw(priv->ring_data[i].ring); - u64_stats_init(&priv->ring_data[i].ring->syncp); } @@ -3052,13 +3017,13 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv) } /* Set mac addr if it is configured. or leave it to the AE driver */ -static void hns3_init_mac_addr(struct net_device *netdev) +static void hns3_init_mac_addr(struct net_device *netdev, bool init) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; u8 mac_addr_temp[ETH_ALEN]; - if (h->ae_algo->ops->get_mac_addr) { + if (h->ae_algo->ops->get_mac_addr && init) { h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); ether_addr_copy(netdev->dev_addr, mac_addr_temp); } @@ -3075,6 +3040,15 @@ static void hns3_init_mac_addr(struct net_device *netdev) } +static void hns3_uninit_mac_addr(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->rm_uc_addr) + h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr); +} + static void hns3_nic_set_priv_ops(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); @@ -3112,7 +3086,7 @@ static int hns3_client_init(struct hnae3_handle *handle) handle->kinfo.netdev = netdev; handle->priv = (void *)priv; - hns3_init_mac_addr(netdev); + hns3_init_mac_addr(netdev, true); hns3_set_default_feature(netdev); @@ -3185,6 +3159,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); + hns3_force_clear_all_rx_ring(handle); + ret = hns3_nic_uninit_vector_data(priv); if (ret) netdev_err(netdev, "uninit vector error\n"); @@ -3201,6 +3177,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) priv->ring_data = NULL; + hns3_uninit_mac_addr(netdev); + free_netdev(netdev); } @@ -3298,9 +3276,76 @@ static void hns3_recover_hw_addr(struct net_device *ndev) hns3_nic_mc_sync(ndev, ha->addr); } -static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb) +static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) { - dev_kfree_skb_any(skb); + while (ring->next_to_clean != ring->next_to_use) { + ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; + hns3_free_buffer_detach(ring, ring->next_to_clean); + ring_ptr_move_fw(ring, next_to_clean); + } +} + +static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) +{ + struct hns3_desc_cb res_cbs; + int ret; + + while (ring->next_to_use != ring->next_to_clean) { + /* When a buffer is not reused, it's memory has been + * freed in hns3_handle_rx_bd or will be freed by + * stack, so we need to replace the buffer here. + */ + if (!ring->desc_cb[ring->next_to_use].reuse_flag) { + ret = hns3_reserve_buffer_map(ring, &res_cbs); + if (ret) { + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + /* if alloc new buffer fail, exit directly + * and reclear in up flow. + */ + netdev_warn(ring->tqp->handle->kinfo.netdev, + "reserve buffer map failed, ret = %d\n", + ret); + return ret; + } + hns3_replace_buffer(ring, ring->next_to_use, + &res_cbs); + } + ring_ptr_move_fw(ring, next_to_use); + } + + return 0; +} + +static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) +{ + while (ring->next_to_use != ring->next_to_clean) { + /* When a buffer is not reused, it's memory has been + * freed in hns3_handle_rx_bd or will be freed by + * stack, so only need to unmap the buffer here. + */ + if (!ring->desc_cb[ring->next_to_use].reuse_flag) { + hns3_unmap_buffer(ring, + &ring->desc_cb[ring->next_to_use]); + ring->desc_cb[ring->next_to_use].dma = 0; + } + + ring_ptr_move_fw(ring, next_to_use); + } +} + +static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h) +{ + struct net_device *ndev = h->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hns3_enet_ring *ring; + u32 i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + hns3_force_clear_rx_ring(ring); + } } static void hns3_clear_all_ring(struct hnae3_handle *h) @@ -3314,14 +3359,55 @@ static void hns3_clear_all_ring(struct hnae3_handle *h) struct hns3_enet_ring *ring; ring = priv->ring_data[i].ring; - hns3_clean_tx_ring(ring, ring->desc_num); + hns3_clear_tx_ring(ring); dev_queue = netdev_get_tx_queue(ndev, priv->ring_data[i].queue_index); netdev_tx_reset_queue(dev_queue); ring = priv->ring_data[i + h->kinfo.num_tqps].ring; - hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data); + /* Continue to clear other rings even if clearing some + * rings failed. + */ + hns3_clear_rx_ring(ring); + } +} + +int hns3_nic_reset_all_ring(struct hnae3_handle *h) +{ + struct net_device *ndev = h->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hns3_enet_ring *rx_ring; + int i, j; + int ret; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + h->ae_algo->ops->reset_queue(h, i); + hns3_init_ring_hw(priv->ring_data[i].ring); + + /* We need to clear tx ring here because self test will + * use the ring and will not run down before up + */ + hns3_clear_tx_ring(priv->ring_data[i].ring); + priv->ring_data[i].ring->next_to_clean = 0; + priv->ring_data[i].ring->next_to_use = 0; + + rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + hns3_init_ring_hw(rx_ring); + ret = hns3_clear_rx_ring(rx_ring); + if (ret) + return ret; + + /* We can not know the hardware head and tail when this + * function is called in reset flow, so we reuse all desc. + */ + for (j = 0; j < rx_ring->desc_num; j++) + hns3_reuse_buffer(rx_ring, j); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; } + + return 0; } static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) @@ -3359,7 +3445,7 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - hns3_init_mac_addr(netdev); + hns3_init_mac_addr(netdev, false); hns3_nic_set_rx_mode(netdev); hns3_recover_hw_addr(netdev); @@ -3393,7 +3479,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - hns3_clear_all_ring(handle); + hns3_force_clear_all_rx_ring(handle); ret = hns3_nic_uninit_vector_data(priv); if (ret) { @@ -3409,6 +3495,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) priv->ring_data = NULL; + hns3_uninit_mac_addr(netdev); + return ret; } @@ -3529,8 +3617,6 @@ int hns3_set_channels(struct net_device *netdev, if (if_running) hns3_nic_net_stop(netdev); - hns3_clear_all_ring(h); - ret = hns3_nic_uninit_vector_data(priv); if (ret) { dev_err(&netdev->dev, @@ -3600,6 +3686,8 @@ static int __init hns3_init_module(void) client.ops = &client_ops; + INIT_LIST_HEAD(&client.node); + ret = hnae3_register_client(&client); if (ret) return ret; @@ -3627,3 +3715,4 @@ MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); MODULE_AUTHOR("Huawei Tech. Co., Ltd."); MODULE_LICENSE("GPL"); MODULE_ALIAS("pci:hns-nic"); +MODULE_VERSION(HNS3_MOD_VERSION); |