summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlxsw/pci.c
diff options
context:
space:
mode:
authorAmit Cohen <amcohen@nvidia.com>2024-04-02 15:54:25 +0200
committerJakub Kicinski <kuba@kernel.org>2024-04-03 19:50:40 -0700
commit1df7d871e3491de2f53afafedef8383185a3f613 (patch)
tree83a188ba2c2afc90a3ec59b0751242f7c68cbe9f /drivers/net/ethernet/mellanox/mlxsw/pci.c
parenta0639236d42060c8f0e994ddf87fbb0f3cb2b048 (diff)
mlxsw: pci: Break mlxsw_pci_cq_tasklet() into tasklets per queue type
Completion queues are used for completions of RDQ or SDQ. Each completion queue is used for one DQ. The first CQs are used for SDQs and the rest are used for RDQs. Currently, for each CQE (completion queue element), we check 'sr' value (send/receive) to know if it is completion of RDQ or SDQ. Actually, we do not really have to check it, as according to the queue number we know if it handles completions of Rx or Tx. Break the tasklet into two - one for Rx (RDQ) and one for Tx (SDQ). Then, setup the appropriate tasklet for each queue as part of queue initialization. Use 'sr' value for unlikely case that we get completion with type that we do not expect. Call WARN_ON_ONCE() only after checking the value, to avoid calling this method for each completion. A next patch set will use NAPI to handle events, then we will have a separate poll method for Rx and Tx. This change is a preparation for NAPI usage. Signed-off-by: Amit Cohen <amcohen@nvidia.com> Reviewed-by: Ido Schimmel <idosch@nvidia.com> Signed-off-by: Petr Machata <petrm@nvidia.com> Reviewed-by: Simon Horman <horms@kernel.org> Link: https://lore.kernel.org/r/50fbc366f8de54cb5dc72a7c4f394333ef71f1d0.1712062203.git.petrm@nvidia.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlxsw/pci.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c86
1 files changed, 74 insertions, 12 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 84bad681021e..c10e6f22f818 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -35,6 +35,11 @@ enum mlxsw_pci_queue_type {
#define MLXSW_PCI_QUEUE_TYPE_COUNT 4
+enum mlxsw_pci_cq_type {
+ MLXSW_PCI_CQ_SDQ,
+ MLXSW_PCI_CQ_RDQ,
+};
+
static const u16 mlxsw_pci_doorbell_type_offset[] = {
MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
@@ -658,7 +663,7 @@ static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
return elem;
}
-static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
+static void mlxsw_pci_cq_rx_tasklet(struct tasklet_struct *t)
{
struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
struct mlxsw_pci *mlxsw_pci = q->pci;
@@ -671,23 +676,54 @@ static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
u8 sendq = mlxsw_pci_cqe_sr_get(q->cq.v, cqe);
u8 dqn = mlxsw_pci_cqe_dqn_get(q->cq.v, cqe);
char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
+ struct mlxsw_pci_queue *rdq;
+
+ if (unlikely(sendq)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
memcpy(ncqe, cqe, q->elem_size);
mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
- if (sendq) {
- struct mlxsw_pci_queue *sdq;
+ rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
+ mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+ wqe_counter, q->cq.v, ncqe);
+
+ if (++items == credits)
+ break;
+ }
- sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
- mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
- wqe_counter, q->cq.v, ncqe);
- } else {
- struct mlxsw_pci_queue *rdq;
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+}
- rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
- mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
- wqe_counter, q->cq.v, ncqe);
+static void mlxsw_pci_cq_tx_tasklet(struct tasklet_struct *t)
+{
+ struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ int credits = q->count >> 1;
+ int items = 0;
+ char *cqe;
+
+ while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
+ u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
+ u8 sendq = mlxsw_pci_cqe_sr_get(q->cq.v, cqe);
+ u8 dqn = mlxsw_pci_cqe_dqn_get(q->cq.v, cqe);
+ char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
+ struct mlxsw_pci_queue *sdq;
+
+ if (unlikely(!sendq)) {
+ WARN_ON_ONCE(1);
+ continue;
}
+
+ memcpy(ncqe, cqe, q->elem_size);
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+
+ sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
+ mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
+ wqe_counter, q->cq.v, ncqe);
+
if (++items == credits)
break;
}
@@ -695,6 +731,32 @@ static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
}
+static enum mlxsw_pci_cq_type
+mlxsw_pci_cq_type(const struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_pci_queue *q)
+{
+ /* Each CQ is mapped to one DQ. The first 'num_sdq_cqs' queues are used
+ * for SDQs and the rest are used for RDQs.
+ */
+ if (q->num < mlxsw_pci->num_sdq_cqs)
+ return MLXSW_PCI_CQ_SDQ;
+
+ return MLXSW_PCI_CQ_RDQ;
+}
+
+static void mlxsw_pci_cq_tasklet_setup(struct mlxsw_pci_queue *q,
+ enum mlxsw_pci_cq_type cq_type)
+{
+ switch (cq_type) {
+ case MLXSW_PCI_CQ_SDQ:
+ tasklet_setup(&q->tasklet, mlxsw_pci_cq_tx_tasklet);
+ break;
+ case MLXSW_PCI_CQ_RDQ:
+ tasklet_setup(&q->tasklet, mlxsw_pci_cq_rx_tasklet);
+ break;
+ }
+}
+
static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
@@ -727,7 +789,7 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
if (err)
return err;
- tasklet_setup(&q->tasklet, mlxsw_pci_cq_tasklet);
+ mlxsw_pci_cq_tasklet_setup(q, mlxsw_pci_cq_type(mlxsw_pci, q));
mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
return 0;