summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
blob: 7ed659eb08deb0f727d210a5d477429129cd0ce8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
 * Copyright 2008 - 2016 Freescale Semiconductor Inc.
 */

#ifndef __DPAA_H
#define __DPAA_H

#include <linux/netdevice.h>
#include <linux/refcount.h>
#include <net/xdp.h>
#include <soc/fsl/qman.h>
#include <soc/fsl/bman.h>

#include "fman.h"
#include "mac.h"
#include "dpaa_eth_trace.h"

/* Number of prioritised traffic classes */
#define DPAA_TC_NUM		4

/* More detailed FQ types - used for fine-grained WQ assignments */
enum dpaa_fq_type {
	FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
	FQ_TYPE_RX_ERROR,	/* Rx Error FQs */
	FQ_TYPE_RX_PCD,		/* Rx Parse Classify Distribute FQs */
	FQ_TYPE_TX,		/* "Real" Tx FQs */
	FQ_TYPE_TX_CONFIRM,	/* Tx default Conf FQ (actually an Rx FQ) */
	FQ_TYPE_TX_CONF_MQ,	/* Tx conf FQs (one for each Tx FQ) */
	FQ_TYPE_TX_ERROR,	/* Tx Error FQs (these are actually Rx FQs) */
};

struct dpaa_fq {
	struct qman_fq fq_base;
	struct list_head list;
	struct net_device *net_dev;
	bool init;
	u32 fqid;
	u32 flags;
	u16 channel;
	u8 wq;
	enum dpaa_fq_type fq_type;
	struct xdp_rxq_info xdp_rxq;
};

struct dpaa_fq_cbs {
	struct qman_fq rx_defq;
	struct qman_fq tx_defq;
	struct qman_fq rx_errq;
	struct qman_fq tx_errq;
	struct qman_fq egress_ern;
};

struct dpaa_priv;

struct dpaa_bp {
	/* used in the DMA mapping operations */
	struct dpaa_priv *priv;
	/* current number of buffers in the buffer pool alloted to each CPU */
	int __percpu *percpu_count;
	/* all buffers allocated for this pool have this raw size */
	size_t raw_size;
	/* all buffers in this pool have this same usable size */
	size_t size;
	/* the buffer pools are initialized with config_count buffers for each
	 * CPU; at runtime the number of buffers per CPU is constantly brought
	 * back to this level
	 */
	u16 config_count;
	u8 bpid;
	struct bman_pool *pool;
	/* bpool can be seeded before use by this cb */
	int (*seed_cb)(struct dpaa_bp *);
	/* bpool can be emptied before freeing by this cb */
	void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
	refcount_t refs;
};

struct dpaa_rx_errors {
	u64 dme;		/* DMA Error */
	u64 fpe;		/* Frame Physical Error */
	u64 fse;		/* Frame Size Error */
	u64 phe;		/* Header Error */
};

/* Counters for QMan ERN frames - one counter per rejection code */
struct dpaa_ern_cnt {
	u64 cg_tdrop;		/* Congestion group taildrop */
	u64 wred;		/* WRED congestion */
	u64 err_cond;		/* Error condition */
	u64 early_window;	/* Order restoration, frame too early */
	u64 late_window;	/* Order restoration, frame too late */
	u64 fq_tdrop;		/* FQ taildrop */
	u64 fq_retired;		/* FQ is retired */
	u64 orp_zero;		/* ORP disabled */
};

struct dpaa_napi_portal {
	struct napi_struct napi;
	struct qman_portal *p;
	bool down;
	int xdp_act;
};

struct dpaa_percpu_priv {
	struct net_device *net_dev;
	struct dpaa_napi_portal np;
	u64 in_interrupt;
	u64 tx_confirm;
	/* fragmented (non-linear) skbuffs received from the stack */
	u64 tx_frag_skbuffs;
	struct rtnl_link_stats64 stats;
	struct dpaa_rx_errors rx_errors;
	struct dpaa_ern_cnt ern_cnt;
};

struct dpaa_buffer_layout {
	u16 priv_data_size;
};

/* Information to be used on the Tx confirmation path. Stored just
 * before the start of the transmit buffer. Maximum size allowed
 * is DPAA_TX_PRIV_DATA_SIZE bytes.
 */
struct dpaa_eth_swbp {
	struct sk_buff *skb;
	struct xdp_frame *xdpf;
};

struct dpaa_priv {
	struct dpaa_percpu_priv __percpu *percpu_priv;
	struct dpaa_bp *dpaa_bp;
	/* Store here the needed Tx headroom for convenience and speed
	 * (even though it can be computed based on the fields of buf_layout)
	 */
	u16 tx_headroom;
	struct net_device *net_dev;
	struct mac_device *mac_dev;
	struct device *rx_dma_dev;
	struct device *tx_dma_dev;
	struct qman_fq **egress_fqs;
	struct qman_fq **conf_fqs;

	u16 channel;
	struct list_head dpaa_fq_list;

	u8 num_tc;
	bool keygen_in_use;
	u32 msg_enable;	/* net_device message level */

	struct {
		/* All egress queues to a given net device belong to one
		 * (and the same) congestion group.
		 */
		struct qman_cgr cgr;
		/* If congested, when it began. Used for performance stats. */
		u32 congestion_start_jiffies;
		/* Number of jiffies the Tx port was congested. */
		u32 congested_jiffies;
		/* Counter for the number of times the CGR
		 * entered congestion state
		 */
		u32 cgr_congested_count;
	} cgr_data;
	/* Use a per-port CGR for ingress traffic. */
	bool use_ingress_cgr;
	struct qman_cgr ingress_cgr;

	struct dpaa_buffer_layout buf_layout[2];
	u16 rx_headroom;

	bool tx_tstamp; /* Tx timestamping enabled */
	bool rx_tstamp; /* Rx timestamping enabled */

	struct bpf_prog *xdp_prog;
};

/* from dpaa_ethtool.c */
extern const struct ethtool_ops dpaa_ethtool_ops;

/* from dpaa_eth_sysfs.c */
void dpaa_eth_sysfs_remove(struct device *dev);
void dpaa_eth_sysfs_init(struct device *dev);

static inline size_t dpaa_num_txqs_per_tc(void)
{
	return num_possible_cpus();
}

/* Total number of Tx queues */
static inline size_t dpaa_max_num_txqs(void)
{
	return DPAA_TC_NUM * dpaa_num_txqs_per_tc();
}

#endif	/* __DPAA_H */