summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
diff options
context:
space:
mode:
authorAlexander Lobakin <aleksander.lobakin@intel.com>2024-06-20 15:53:43 +0200
committerTony Nguyen <anthony.l.nguyen@intel.com>2024-07-10 10:43:57 -0700
commit1b1b26208515482d8bc3d875267076f61f3355f7 (patch)
tree14085ad7654d96426474e4503a1aef875ae8f014 /drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
parentf771314d6b75181de7079c3c7d666293e4ed2b22 (diff)
idpf: reuse libeth's definitions of parsed ptype structures
idpf's in-kernel parsed ptype structure is almost identical to the one used in the previous Intel drivers, which means it can be converted to use libeth's definitions and even helpers. The only difference is that it doesn't use a constant table (libie), rather than one obtained from the device. Remove the driver counterpart and use libeth's helpers for hashes and checksums. This slightly optimizes skb fields processing due to faster checks. Also don't define big static array of ptypes in &idpf_vport -- allocate them dynamically. The pointer to it is anyway cached in &idpf_rx_queue. Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com> Reviewed-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/idpf/idpf_virtchnl.c')
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c72
1 files changed, 47 insertions, 25 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 1aa4770dfe18..83f3543a30e1 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
+#include <net/libeth/rx.h>
+
#include "idpf.h"
#include "idpf_virtchnl.h"
@@ -2483,39 +2485,52 @@ do_memcpy:
* @frag: fragmentation allowed
*
*/
-static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype,
+static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype,
struct idpf_ptype_state *pstate,
bool ipv4, bool frag)
{
if (!pstate->outer_ip || !pstate->outer_frag) {
- ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP;
pstate->outer_ip = true;
if (ipv4)
- ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4;
+ ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4;
else
- ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6;
+ ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6;
if (frag) {
- ptype->outer_frag = IDPF_RX_PTYPE_FRAG;
+ ptype->outer_frag = LIBETH_RX_PT_FRAG;
pstate->outer_frag = true;
}
} else {
- ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP;
+ ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP;
pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
if (ipv4)
- ptype->tunnel_end_prot =
- IDPF_RX_PTYPE_TUNNEL_END_IPV4;
+ ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4;
else
- ptype->tunnel_end_prot =
- IDPF_RX_PTYPE_TUNNEL_END_IPV6;
+ ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6;
if (frag)
- ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG;
+ ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG;
}
}
+static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
+{
+ if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
+ ptype->inner_prot)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4;
+ else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
+ ptype->outer_ip)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3;
+ else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
+ else
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE;
+
+ libeth_rx_pt_gen_hash_type(ptype);
+}
+
/**
* idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
* @vport: virtual port data structure
@@ -2526,7 +2541,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
{
struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
- struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
+ struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;
int max_ptype, ptypes_recvd = 0, ptype_offset;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vc_xn_params xn_params = {};
@@ -2534,12 +2549,17 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
ssize_t reply_sz;
int i, j, k;
+ if (vport->rx_ptype_lkup)
+ return 0;
+
if (idpf_is_queue_model_split(vport->rxq_model))
max_ptype = IDPF_RX_MAX_PTYPE;
else
max_ptype = IDPF_RX_MAX_BASE_PTYPE;
- memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
+ ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL);
+ if (!ptype_lkup)
+ return -ENOMEM;
get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
if (!get_ptype_info)
@@ -2597,16 +2617,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
/* 0xFFFF indicates end of ptypes */
if (le16_to_cpu(ptype->ptype_id_10) ==
IDPF_INVALID_PTYPE_ID)
- return 0;
+ goto out;
if (idpf_is_queue_model_split(vport->rxq_model))
k = le16_to_cpu(ptype->ptype_id_10);
else
k = ptype->ptype_id_8;
- if (ptype->proto_id_count)
- ptype_lkup[k].known = 1;
-
for (j = 0; j < ptype->proto_id_count; j++) {
id = le16_to_cpu(ptype->proto_id[j]);
switch (id) {
@@ -2614,18 +2631,18 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
if (pstate.tunnel_state ==
IDPF_PTYPE_TUNNEL_IP) {
ptype_lkup[k].tunnel_type =
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT;
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT;
pstate.tunnel_state |=
IDPF_PTYPE_TUNNEL_IP_GRENAT;
}
break;
case VIRTCHNL2_PROTO_HDR_MAC:
ptype_lkup[k].outer_ip =
- IDPF_RX_PTYPE_OUTER_L2;
+ LIBETH_RX_PT_OUTER_L2;
if (pstate.tunnel_state ==
IDPF_TUN_IP_GRE) {
ptype_lkup[k].tunnel_type =
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC;
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
pstate.tunnel_state |=
IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
}
@@ -2652,23 +2669,23 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
break;
case VIRTCHNL2_PROTO_HDR_UDP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_UDP;
+ LIBETH_RX_PT_INNER_UDP;
break;
case VIRTCHNL2_PROTO_HDR_TCP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_TCP;
+ LIBETH_RX_PT_INNER_TCP;
break;
case VIRTCHNL2_PROTO_HDR_SCTP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_SCTP;
+ LIBETH_RX_PT_INNER_SCTP;
break;
case VIRTCHNL2_PROTO_HDR_ICMP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_ICMP;
+ LIBETH_RX_PT_INNER_ICMP;
break;
case VIRTCHNL2_PROTO_HDR_PAY:
ptype_lkup[k].payload_layer =
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2;
+ LIBETH_RX_PT_PAYLOAD_L2;
break;
case VIRTCHNL2_PROTO_HDR_ICMPV6:
case VIRTCHNL2_PROTO_HDR_IPV6_EH:
@@ -2722,9 +2739,14 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
break;
}
}
+
+ idpf_finalize_ptype_lookup(&ptype_lkup[k]);
}
}
+out:
+ vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
+
return 0;
}