1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
|
// SPDX-License-Identifier: GPL-2.0
//! Kernel page allocation and management.
//!
//! This module currently provides limited support. It supports pages of order 0
//! for most operations. Page allocation flags are fixed.
use crate::{bindings, error::code::*, error::Result, PAGE_SIZE};
use core::{marker::PhantomData, ptr};
/// A set of physical pages.
///
/// `Pages` holds a reference to a set of pages of order `ORDER`. Having the order as a generic
/// const allows the struct to have the same size as a pointer.
///
/// # Invariants
///
/// The pointer `Pages::pages` is valid and points to 2^ORDER pages.
pub struct Pages<const ORDER: u32> {
pub(crate) pages: *mut bindings::page,
}
impl<const ORDER: u32> Pages<ORDER> {
/// Allocates a new set of contiguous pages.
pub fn new() -> Result<Self> {
let pages = unsafe {
bindings::alloc_pages(
bindings::GFP_KERNEL | bindings::__GFP_ZERO | bindings::___GFP_HIGHMEM,
ORDER,
)
};
if pages.is_null() {
return Err(ENOMEM);
}
// INVARIANTS: We checked that the allocation above succeeded.
// SAFETY: We allocated pages above
Ok(unsafe { Self::from_raw(pages) })
}
/// Create a `Pages` from a raw `struct page` pointer
///
/// # Safety
///
/// Caller must own the pages pointed to by `ptr` as these will be freed
/// when the returned `Pages` is dropped.
pub unsafe fn from_raw(ptr: *mut bindings::page) -> Self {
Self { pages: ptr }
}
}
impl Pages<0> {
#[inline(always)]
fn check_offset_and_map<I: MappingInfo>(
&self,
offset: usize,
len: usize,
) -> Result<PageMapping<'_, I>>
where
Pages<0>: MappingActions<I>,
{
let end = offset.checked_add(len).ok_or(EINVAL)?;
if end as u32 > PAGE_SIZE {
return Err(EINVAL);
}
let mapping = <Self as MappingActions<I>>::map(self);
Ok(mapping)
}
#[inline(always)]
unsafe fn read_internal<I: MappingInfo>(
&self,
dest: *mut u8,
offset: usize,
len: usize,
) -> Result
where
Pages<0>: MappingActions<I>,
{
let mapping = self.check_offset_and_map::<I>(offset, len)?;
unsafe { ptr::copy_nonoverlapping((mapping.ptr as *mut u8).add(offset), dest, len) };
Ok(())
}
/// Maps the pages and reads from them into the given buffer.
///
/// # Safety
///
/// Callers must ensure that the destination buffer is valid for the given
/// length. Additionally, if the raw buffer is intended to be recast, they
/// must ensure that the data can be safely cast;
/// [`crate::io_buffer::ReadableFromBytes`] has more details about it.
/// `dest` may not point to the source page.
#[inline(always)]
pub unsafe fn read(&self, dest: *mut u8, offset: usize, len: usize) -> Result {
unsafe { self.read_internal::<NormalMappingInfo>(dest, offset, len) }
}
/// Maps the pages and reads from them into the given buffer. The page is
/// mapped atomically.
///
/// # Safety
///
/// Callers must ensure that the destination buffer is valid for the given
/// length. Additionally, if the raw buffer is intended to be recast, they
/// must ensure that the data can be safely cast;
/// [`crate::io_buffer::ReadableFromBytes`] has more details about it.
/// `dest` may not point to the source page.
#[inline(always)]
pub unsafe fn read_atomic(&self, dest: *mut u8, offset: usize, len: usize) -> Result {
unsafe { self.read_internal::<AtomicMappingInfo>(dest, offset, len) }
}
#[inline(always)]
unsafe fn write_internal<I: MappingInfo>(
&self,
src: *const u8,
offset: usize,
len: usize,
) -> Result
where
Pages<0>: MappingActions<I>,
{
let mapping = self.check_offset_and_map::<I>(offset, len)?;
unsafe { ptr::copy_nonoverlapping(src, (mapping.ptr as *mut u8).add(offset), len) };
Ok(())
}
/// Maps the pages and writes into them from the given buffer.
///
/// # Safety
///
/// Callers must ensure that the buffer is valid for the given length.
/// Additionally, if the page is (or will be) mapped by userspace, they must
/// ensure that no kernel data is leaked through padding if it was cast from
/// another type; [`crate::io_buffer::WritableToBytes`] has more details
/// about it. `src` must not point to the destination page.
#[inline(always)]
pub unsafe fn write(&self, src: *const u8, offset: usize, len: usize) -> Result {
unsafe { self.write_internal::<NormalMappingInfo>(src, offset, len) }
}
/// Maps the pages and writes into them from the given buffer. The page is
/// mapped atomically.
///
/// # Safety
///
/// Callers must ensure that the buffer is valid for the given length.
/// Additionally, if the page is (or will be) mapped by userspace, they must
/// ensure that no kernel data is leaked through padding if it was cast from
/// another type; [`crate::io_buffer::WritableToBytes`] has more details
/// about it. `src` must not point to the destination page.
#[inline(always)]
pub unsafe fn write_atomic(&self, src: *const u8, offset: usize, len: usize) -> Result {
unsafe { self.write_internal::<AtomicMappingInfo>(src, offset, len) }
}
/// Maps the page at index 0.
#[inline(always)]
pub fn kmap(&self) -> PageMapping<'_, NormalMappingInfo> {
let ptr = unsafe { bindings::kmap(self.pages) };
PageMapping {
page: self.pages,
ptr,
_phantom: PhantomData,
_phantom2: PhantomData,
}
}
/// Atomically Maps the page at index 0.
#[inline(always)]
pub fn kmap_atomic(&self) -> PageMapping<'_, AtomicMappingInfo> {
let ptr = unsafe { bindings::kmap_atomic(self.pages) };
PageMapping {
page: self.pages,
ptr,
_phantom: PhantomData,
_phantom2: PhantomData,
}
}
}
impl<const ORDER: u32> Drop for Pages<ORDER> {
fn drop(&mut self) {
// SAFETY: By the type invariants, we know the pages are allocated with the given order.
unsafe { bindings::__free_pages(self.pages, ORDER) };
}
}
/// Specifies the type of page mapping
pub trait MappingInfo {}
/// Encapsulates methods to map and unmap pages
pub trait MappingActions<I: MappingInfo>
where
Pages<0>: MappingActions<I>,
{
/// Map a page into the kernel address scpace
fn map(pages: &Pages<0>) -> PageMapping<'_, I>;
/// Unmap a page specified by `mapping`
///
/// # Safety
///
/// Must only be called by `PageMapping::drop()`.
unsafe fn unmap(mapping: &PageMapping<'_, I>);
}
/// A type state indicating that pages were mapped atomically
pub struct AtomicMappingInfo;
impl MappingInfo for AtomicMappingInfo {}
/// A type state indicating that pages were not mapped atomically
pub struct NormalMappingInfo;
impl MappingInfo for NormalMappingInfo {}
impl MappingActions<AtomicMappingInfo> for Pages<0> {
#[inline(always)]
fn map(pages: &Pages<0>) -> PageMapping<'_, AtomicMappingInfo> {
pages.kmap_atomic()
}
#[inline(always)]
unsafe fn unmap(mapping: &PageMapping<'_, AtomicMappingInfo>) {
// SAFETY: An instance of `PageMapping` is created only when `kmap` succeeded for the given
// page, so it is safe to unmap it here.
unsafe { bindings::kunmap_atomic(mapping.ptr) };
}
}
impl MappingActions<NormalMappingInfo> for Pages<0> {
#[inline(always)]
fn map(pages: &Pages<0>) -> PageMapping<'_, NormalMappingInfo> {
pages.kmap()
}
#[inline(always)]
unsafe fn unmap(mapping: &PageMapping<'_, NormalMappingInfo>) {
// SAFETY: An instance of `PageMapping` is created only when `kmap` succeeded for the given
// page, so it is safe to unmap it here.
unsafe { bindings::kunmap(mapping.page) };
}
}
/// An owned page mapping. When this struct is dropped, the page is unmapped.
pub struct PageMapping<'a, I: MappingInfo>
where
Pages<0>: MappingActions<I>,
{
page: *mut bindings::page,
ptr: *mut core::ffi::c_void,
_phantom: PhantomData<&'a i32>,
_phantom2: PhantomData<I>,
}
impl<'a, I: MappingInfo> PageMapping<'a, I>
where
Pages<0>: MappingActions<I>,
{
/// Return a pointer to the wrapped `struct page`
#[inline(always)]
pub fn get_ptr(&self) -> *mut core::ffi::c_void {
self.ptr
}
}
// Because we do not have Drop specialization, we have to do this dance. Life
// would be much more simple if we could have `impl Drop for PageMapping<'_,
// Atomic>` and `impl Drop for PageMapping<'_, NotAtomic>`
impl<I: MappingInfo> Drop for PageMapping<'_, I>
where
Pages<0>: MappingActions<I>,
{
#[inline(always)]
fn drop(&mut self) {
// SAFETY: We are OK to call this because we are `PageMapping::drop()`
unsafe { <Pages<0> as MappingActions<I>>::unmap(self) }
}
}
|