diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/dpaa/dpaa_eth.c')
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 76 |
1 files changed, 45 insertions, 31 deletions
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index baa0b3c2ce6f..cfe6b57b1da0 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -371,6 +371,7 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, void *type_data) { struct dpaa_priv *priv = netdev_priv(net_dev); + int num_txqs_per_tc = dpaa_num_txqs_per_tc(); struct tc_mqprio_qopt *mqprio = type_data; u8 num_tc; int i; @@ -398,12 +399,12 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, netdev_set_num_tc(net_dev, num_tc); for (i = 0; i < num_tc; i++) - netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM, - i * DPAA_TC_TXQ_NUM); + netdev_set_tc_queue(net_dev, i, num_txqs_per_tc, + i * num_txqs_per_tc); out: priv->num_tc = num_tc ? : 1; - netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); + netif_set_real_num_tx_queues(net_dev, priv->num_tc * num_txqs_per_tc); return 0; } @@ -649,7 +650,7 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) fq->wq = 6; break; case FQ_TYPE_TX: - switch (idx / DPAA_TC_TXQ_NUM) { + switch (idx / dpaa_num_txqs_per_tc()) { case 0: /* Low priority (best effort) */ fq->wq = 6; @@ -667,8 +668,8 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) fq->wq = 0; break; default: - WARN(1, "Too many TX FQs: more than %d!\n", - DPAA_ETH_TXQ_NUM); + WARN(1, "Too many TX FQs: more than %zu!\n", + dpaa_max_num_txqs()); } break; default: @@ -740,7 +741,8 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, port_fqs->rx_pcdq = &dpaa_fq[0]; - if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ)) + if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list, + FQ_TYPE_TX_CONF_MQ)) goto fq_alloc_failed; dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR); @@ -755,7 +757,7 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, port_fqs->tx_defq = &dpaa_fq[0]; - if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX)) + if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list, FQ_TYPE_TX)) goto fq_alloc_failed; return 0; @@ -931,14 +933,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv, } } -static void dpaa_fq_setup(struct dpaa_priv *priv, - const struct dpaa_fq_cbs *fq_cbs, - struct fman_port *tx_port) +static int dpaa_fq_setup(struct dpaa_priv *priv, + const struct dpaa_fq_cbs *fq_cbs, + struct fman_port *tx_port) { int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu; const cpumask_t *affine_cpus = qman_affine_cpus(); - u16 channels[NR_CPUS]; struct dpaa_fq *fq; + u16 *channels; + + channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL); + if (!channels) + return -ENOMEM; for_each_cpu_and(cpu, affine_cpus, cpu_online_mask) channels[num_portals++] = qman_affine_channel(cpu); @@ -965,11 +971,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, case FQ_TYPE_TX: dpaa_setup_egress(priv, fq, tx_port, &fq_cbs->egress_ern); - /* If we have more Tx queues than the number of cores, - * just ignore the extra ones. - */ - if (egress_cnt < DPAA_ETH_TXQ_NUM) - priv->egress_fqs[egress_cnt++] = &fq->fq_base; + priv->egress_fqs[egress_cnt++] = &fq->fq_base; break; case FQ_TYPE_TX_CONF_MQ: priv->conf_fqs[conf_cnt++] = &fq->fq_base; @@ -987,16 +989,9 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, } } - /* Make sure all CPUs receive a corresponding Tx queue. */ - while (egress_cnt < DPAA_ETH_TXQ_NUM) { - list_for_each_entry(fq, &priv->dpaa_fq_list, list) { - if (fq->fq_type != FQ_TYPE_TX) - continue; - priv->egress_fqs[egress_cnt++] = &fq->fq_base; - if (egress_cnt == DPAA_ETH_TXQ_NUM) - break; - } - } + kfree(channels); + + return 0; } static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, @@ -1004,7 +999,7 @@ static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, { int i; - for (i = 0; i < DPAA_ETH_TXQ_NUM; i++) + for (i = 0; i < dpaa_max_num_txqs(); i++) if (priv->egress_fqs[i] == tx_fq) return i; @@ -3324,7 +3319,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) /* Allocate this early, so we can store relevant information in * the private area */ - net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM); + net_dev = alloc_etherdev_mq(sizeof(*priv), dpaa_max_num_txqs()); if (!net_dev) { dev_err(dev, "alloc_etherdev_mq() failed\n"); return -ENOMEM; @@ -3339,6 +3334,22 @@ static int dpaa_eth_probe(struct platform_device *pdev) priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT); + priv->egress_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(), + sizeof(*priv->egress_fqs), + GFP_KERNEL); + if (!priv->egress_fqs) { + err = -ENOMEM; + goto free_netdev; + } + + priv->conf_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(), + sizeof(*priv->conf_fqs), + GFP_KERNEL); + if (!priv->conf_fqs) { + err = -ENOMEM; + goto free_netdev; + } + mac_dev = dpaa_mac_dev_get(pdev); if (IS_ERR(mac_dev)) { netdev_err(net_dev, "dpaa_mac_dev_get() failed\n"); @@ -3416,7 +3427,9 @@ static int dpaa_eth_probe(struct platform_device *pdev) */ dpaa_eth_add_channel(priv->channel, &pdev->dev); - dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); + err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); + if (err) + goto free_dpaa_bps; /* Create a congestion group for this netdev, with * dynamically-allocated CGR ID. @@ -3462,7 +3475,8 @@ static int dpaa_eth_probe(struct platform_device *pdev) } priv->num_tc = 1; - netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); + netif_set_real_num_tx_queues(net_dev, + priv->num_tc * dpaa_num_txqs_per_tc()); /* Initialize NAPI */ err = dpaa_napi_add(net_dev); |