1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
#ifndef _IDXD_H_
#define _IDXD_H_
#include <linux/sbitmap.h>
#include <linux/dmaengine.h>
#include <linux/percpu-rwsem.h>
#include <linux/wait.h>
#include <linux/cdev.h>
#include "registers.h"
#define IDXD_DRIVER_VERSION "1.00"
extern struct kmem_cache *idxd_desc_pool;
#define IDXD_REG_TIMEOUT 50
#define IDXD_DRAIN_TIMEOUT 5000
enum idxd_type {
IDXD_TYPE_UNKNOWN = -1,
IDXD_TYPE_DSA = 0,
IDXD_TYPE_IAX,
IDXD_TYPE_MAX,
};
#define IDXD_NAME_SIZE 128
struct idxd_device_driver {
struct device_driver drv;
};
struct idxd_irq_entry {
struct idxd_device *idxd;
int id;
struct llist_head pending_llist;
struct list_head work_list;
/*
* Lock to protect access between irq thread process descriptor
* and irq thread processing error descriptor.
*/
spinlock_t list_lock;
};
struct idxd_group {
struct device conf_dev;
struct idxd_device *idxd;
struct grpcfg grpcfg;
int id;
int num_engines;
int num_wqs;
bool use_token_limit;
u8 tokens_allowed;
u8 tokens_reserved;
int tc_a;
int tc_b;
};
#define IDXD_MAX_PRIORITY 0xf
enum idxd_wq_state {
IDXD_WQ_DISABLED = 0,
IDXD_WQ_ENABLED,
};
enum idxd_wq_flag {
WQ_FLAG_DEDICATED = 0,
WQ_FLAG_BLOCK_ON_FAULT,
};
enum idxd_wq_type {
IDXD_WQT_NONE = 0,
IDXD_WQT_KERNEL,
IDXD_WQT_USER,
};
struct idxd_cdev {
struct cdev cdev;
struct device *dev;
int minor;
struct wait_queue_head err_queue;
};
#define IDXD_ALLOCATED_BATCH_SIZE 128U
#define WQ_NAME_SIZE 1024
#define WQ_TYPE_SIZE 10
enum idxd_op_type {
IDXD_OP_BLOCK = 0,
IDXD_OP_NONBLOCK = 1,
};
enum idxd_complete_type {
IDXD_COMPLETE_NORMAL = 0,
IDXD_COMPLETE_ABORT,
IDXD_COMPLETE_DEV_FAIL,
};
struct idxd_wq {
void __iomem *portal;
struct device conf_dev;
struct idxd_cdev idxd_cdev;
struct idxd_device *idxd;
int id;
enum idxd_wq_type type;
struct idxd_group *group;
int client_count;
struct mutex wq_lock; /* mutex for workqueue */
u32 size;
u32 threshold;
u32 priority;
enum idxd_wq_state state;
unsigned long flags;
union wqcfg *wqcfg;
u32 vec_ptr; /* interrupt steering */
struct dsa_hw_desc **hw_descs;
int num_descs;
union {
struct dsa_completion_record *compls;
struct iax_completion_record *iax_compls;
};
void *compls_raw;
dma_addr_t compls_addr;
dma_addr_t compls_addr_raw;
int compls_size;
struct idxd_desc **descs;
struct sbitmap_queue sbq;
struct dma_chan dma_chan;
char name[WQ_NAME_SIZE + 1];
u64 max_xfer_bytes;
u32 max_batch_size;
bool ats_dis;
};
struct idxd_engine {
struct device conf_dev;
int id;
struct idxd_group *group;
struct idxd_device *idxd;
};
/* shadow registers */
struct idxd_hw {
u32 version;
union gen_cap_reg gen_cap;
union wq_cap_reg wq_cap;
union group_cap_reg group_cap;
union engine_cap_reg engine_cap;
struct opcap opcap;
};
enum idxd_device_state {
IDXD_DEV_HALTED = -1,
IDXD_DEV_DISABLED = 0,
IDXD_DEV_CONF_READY,
IDXD_DEV_ENABLED,
};
enum idxd_device_flag {
IDXD_FLAG_CONFIGURABLE = 0,
IDXD_FLAG_CMD_RUNNING,
IDXD_FLAG_PASID_ENABLED,
};
struct idxd_device {
enum idxd_type type;
struct device conf_dev;
struct list_head list;
struct idxd_hw hw;
enum idxd_device_state state;
unsigned long flags;
int id;
int major;
u8 cmd_status;
struct pci_dev *pdev;
void __iomem *reg_base;
spinlock_t dev_lock; /* spinlock for device */
struct completion *cmd_done;
struct idxd_group *groups;
struct idxd_wq *wqs;
struct idxd_engine *engines;
struct iommu_sva *sva;
unsigned int pasid;
int num_groups;
u32 msix_perm_offset;
u32 wqcfg_offset;
u32 grpcfg_offset;
u32 perfmon_offset;
u64 max_xfer_bytes;
u32 max_batch_size;
int max_groups;
int max_engines;
int max_tokens;
int max_wqs;
int max_wq_size;
int token_limit;
int nr_tokens; /* non-reserved tokens */
unsigned int wqcfg_size;
int compl_size;
union sw_err_reg sw_err;
wait_queue_head_t cmd_waitq;
struct msix_entry *msix_entries;
int num_wq_irqs;
struct idxd_irq_entry *irq_entries;
struct dma_device dma_dev;
struct workqueue_struct *wq;
struct work_struct work;
};
/* IDXD software descriptor */
struct idxd_desc {
union {
struct dsa_hw_desc *hw;
struct iax_hw_desc *iax_hw;
};
dma_addr_t desc_dma;
union {
struct dsa_completion_record *completion;
struct iax_completion_record *iax_completion;
};
dma_addr_t compl_dma;
struct dma_async_tx_descriptor txd;
struct llist_node llnode;
struct list_head list;
int id;
int cpu;
struct idxd_wq *wq;
};
#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
extern struct bus_type dsa_bus_type;
extern struct bus_type iax_bus_type;
extern bool support_enqcmd;
static inline bool wq_dedicated(struct idxd_wq *wq)
{
return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
}
static inline bool wq_shared(struct idxd_wq *wq)
{
return !test_bit(WQ_FLAG_DEDICATED, &wq->flags);
}
static inline bool device_pasid_enabled(struct idxd_device *idxd)
{
return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
}
static inline bool device_swq_supported(struct idxd_device *idxd)
{
return (support_enqcmd && device_pasid_enabled(idxd));
}
enum idxd_portal_prot {
IDXD_PORTAL_UNLIMITED = 0,
IDXD_PORTAL_LIMITED,
};
static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
{
return prot * 0x1000;
}
static inline int idxd_get_wq_portal_full_offset(int wq_id,
enum idxd_portal_prot prot)
{
return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
}
static inline void idxd_set_type(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
idxd->type = IDXD_TYPE_DSA;
else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
idxd->type = IDXD_TYPE_IAX;
else
idxd->type = IDXD_TYPE_UNKNOWN;
}
static inline void idxd_wq_get(struct idxd_wq *wq)
{
wq->client_count++;
}
static inline void idxd_wq_put(struct idxd_wq *wq)
{
wq->client_count--;
}
static inline int idxd_wq_refcount(struct idxd_wq *wq)
{
return wq->client_count;
};
const char *idxd_get_dev_name(struct idxd_device *idxd);
int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
int idxd_setup_sysfs(struct idxd_device *idxd);
void idxd_cleanup_sysfs(struct idxd_device *idxd);
int idxd_register_driver(void);
void idxd_unregister_driver(void);
struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
/* device interrupt control */
void idxd_msix_perm_setup(struct idxd_device *idxd);
void idxd_msix_perm_clear(struct idxd_device *idxd);
irqreturn_t idxd_irq_handler(int vec, void *data);
irqreturn_t idxd_misc_thread(int vec, void *data);
irqreturn_t idxd_wq_thread(int irq, void *data);
void idxd_mask_error_interrupts(struct idxd_device *idxd);
void idxd_unmask_error_interrupts(struct idxd_device *idxd);
void idxd_mask_msix_vectors(struct idxd_device *idxd);
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
/* device control */
int idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
void idxd_device_reset(struct idxd_device *idxd);
void idxd_device_cleanup(struct idxd_device *idxd);
int idxd_device_config(struct idxd_device *idxd);
void idxd_device_wqs_clear_state(struct idxd_device *idxd);
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
/* work queue control */
int idxd_wq_alloc_resources(struct idxd_wq *wq);
void idxd_wq_free_resources(struct idxd_wq *wq);
int idxd_wq_enable(struct idxd_wq *wq);
int idxd_wq_disable(struct idxd_wq *wq);
void idxd_wq_drain(struct idxd_wq *wq);
void idxd_wq_reset(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
int idxd_wq_disable_pasid(struct idxd_wq *wq);
/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
/* dmaengine */
int idxd_register_dma_device(struct idxd_device *idxd);
void idxd_unregister_dma_device(struct idxd_device *idxd);
int idxd_register_dma_channel(struct idxd_wq *wq);
void idxd_unregister_dma_channel(struct idxd_wq *wq);
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
enum idxd_complete_type comp_type);
dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
/* cdev */
int idxd_cdev_register(void);
void idxd_cdev_remove(void);
int idxd_cdev_get_major(struct idxd_device *idxd);
int idxd_wq_add_cdev(struct idxd_wq *wq);
void idxd_wq_del_cdev(struct idxd_wq *wq);
#endif
|