1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
|
/* SPDX-License-Identifier: GPL-2.0 or MIT */
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
/* Copyright 2023 Collabora ltd. */
#ifndef __PANTHOR_GEM_H__
#define __PANTHOR_GEM_H__
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_mm.h>
#include <linux/iosys-map.h>
#include <linux/rwsem.h>
struct panthor_vm;
/**
* struct panthor_gem_object - Driver specific GEM object.
*/
struct panthor_gem_object {
/** @base: Inherit from drm_gem_shmem_object. */
struct drm_gem_shmem_object base;
/**
* @exclusive_vm_root_gem: Root GEM of the exclusive VM this GEM object
* is attached to.
*
* If @exclusive_vm_root_gem != NULL, any attempt to bind the GEM to a
* different VM will fail.
*
* All FW memory objects have this field set to the root GEM of the MCU
* VM.
*/
struct drm_gem_object *exclusive_vm_root_gem;
/**
* @gpuva_list_lock: Custom GPUVA lock.
*
* Used to protect insertion of drm_gpuva elements to the
* drm_gem_object.gpuva.list list.
*
* We can't use the GEM resv for that, because drm_gpuva_link() is
* called in a dma-signaling path, where we're not allowed to take
* resv locks.
*/
struct mutex gpuva_list_lock;
/** @flags: Combination of drm_panthor_bo_flags flags. */
u32 flags;
};
/**
* struct panthor_kernel_bo - Kernel buffer object.
*
* These objects are only manipulated by the kernel driver and not
* directly exposed to the userspace. The GPU address of a kernel
* BO might be passed to userspace though.
*/
struct panthor_kernel_bo {
/**
* @obj: The GEM object backing this kernel buffer object.
*/
struct drm_gem_object *obj;
/**
* @vm: VM this private buffer is attached to.
*/
struct panthor_vm *vm;
/**
* @va_node: VA space allocated to this GEM.
*/
struct drm_mm_node va_node;
/**
* @kmap: Kernel CPU mapping of @gem.
*/
void *kmap;
};
static inline
struct panthor_gem_object *to_panthor_bo(struct drm_gem_object *obj)
{
return container_of(to_drm_gem_shmem_obj(obj), struct panthor_gem_object, base);
}
struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size);
struct drm_gem_object *
panthor_gem_prime_import_sg_table(struct drm_device *ddev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
int
panthor_gem_create_with_handle(struct drm_file *file,
struct drm_device *ddev,
struct panthor_vm *exclusive_vm,
u64 *size, u32 flags, uint32_t *handle);
static inline u64
panthor_kernel_bo_gpuva(struct panthor_kernel_bo *bo)
{
return bo->va_node.start;
}
static inline size_t
panthor_kernel_bo_size(struct panthor_kernel_bo *bo)
{
return bo->obj->size;
}
static inline int
panthor_kernel_bo_vmap(struct panthor_kernel_bo *bo)
{
struct iosys_map map;
int ret;
if (bo->kmap)
return 0;
ret = drm_gem_vmap_unlocked(bo->obj, &map);
if (ret)
return ret;
bo->kmap = map.vaddr;
return 0;
}
static inline void
panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
{
if (bo->kmap) {
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->kmap);
drm_gem_vunmap_unlocked(bo->obj, &map);
bo->kmap = NULL;
}
}
struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
size_t size, u32 bo_flags, u32 vm_map_flags,
u64 gpu_va);
void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo);
#endif /* __PANTHOR_GEM_H__ */
|