summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndreas Hindborg <a.hindborg@samsung.com>2023-09-04 13:34:41 +0200
committerDanilo Krummrich <dakr@redhat.com>2023-11-22 22:00:54 +0100
commitc48755fbe45ab49eeacc4f5afccacd2d45be0671 (patch)
tree2b0ba6253667c66dda91822bc9820b938f24e184
parent91cfdd8be6d059ec6a4d719ed284f72b8999214a (diff)
rust: add dma pool and coherent allocator
Based on https://github.com/wedsonaf/linux/commit/02541e65a7e778c0049fed86ae49302bc07abed3
-rw-r--r--rust/bindings/bindings_helper.h1
-rw-r--r--rust/bindings/lib.rs1
-rw-r--r--rust/kernel/dma.rs253
-rw-r--r--rust/kernel/lib.rs1
4 files changed, 256 insertions, 0 deletions
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index 02d2b68bac36..34c4ccee3fc8 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -21,4 +21,5 @@
/* `bindgen` gets confused at certain things. */
const size_t BINDINGS_ARCH_SLAB_MINALIGN = ARCH_SLAB_MINALIGN;
const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL;
+const gfp_t BINDINGS_GFP_ATOMIC = GFP_ATOMIC;
const gfp_t BINDINGS___GFP_ZERO = __GFP_ZERO;
diff --git a/rust/bindings/lib.rs b/rust/bindings/lib.rs
index 9bcbea04dac3..54d9b86ba56e 100644
--- a/rust/bindings/lib.rs
+++ b/rust/bindings/lib.rs
@@ -49,5 +49,6 @@ mod bindings_helper {
pub use bindings_raw::*;
+pub const GFP_ATOMIC: gfp_t = BINDINGS_GFP_ATOMIC;
pub const GFP_KERNEL: gfp_t = BINDINGS_GFP_KERNEL;
pub const __GFP_ZERO: gfp_t = BINDINGS___GFP_ZERO;
diff --git a/rust/kernel/dma.rs b/rust/kernel/dma.rs
new file mode 100644
index 000000000000..9f7cbff5cf24
--- /dev/null
+++ b/rust/kernel/dma.rs
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Direct memory access (DMA).
+//!
+//! C header: [`include/linux/dma-mapping.h`](../../../../include/linux/dma-mapping.h)
+
+use crate::{
+ bindings,
+ device::{Device, RawDevice},
+ error::code::*,
+ error::Result,
+ str::CStr,
+ sync::Arc,
+};
+use core::marker::PhantomData;
+
+pub trait Allocator {
+ type AllocationData;
+ type DataSource;
+
+ fn free(cpu_addr: *mut (), dma_handle: u64, size: usize, alloc_data: &mut Self::AllocationData);
+ unsafe fn allocation_data(data: &Self::DataSource) -> Self::AllocationData;
+}
+
+pub struct CoherentAllocator;
+
+impl Allocator for CoherentAllocator {
+ type AllocationData = Device;
+ type DataSource = Device;
+
+ fn free(cpu_addr: *mut (), dma_handle: u64, size: usize, dev: &mut Device) {
+ unsafe { bindings::dma_free_attrs(dev.ptr, size, cpu_addr as _, dma_handle, 0) };
+ }
+
+ unsafe fn allocation_data(data: &Device) -> Device {
+ unsafe { Device::from_dev_no_reference(data) }
+ }
+}
+
+pub fn try_alloc_coherent<T>(
+ dev: &dyn RawDevice,
+ count: usize,
+ atomic: bool,
+) -> Result<CoherentAllocation<T, CoherentAllocator>> {
+ let t_size = core::mem::size_of::<T>();
+ let size = count.checked_mul(t_size).ok_or(ENOMEM)?;
+ let mut dma_handle = 0;
+ let ret = unsafe {
+ bindings::dma_alloc_attrs(
+ dev.raw_device(),
+ size,
+ &mut dma_handle,
+ if atomic {
+ bindings::GFP_ATOMIC
+ } else {
+ bindings::GFP_KERNEL
+ },
+ 0,
+ )
+ };
+ if ret.is_null() {
+ Err(ENOMEM)
+ } else {
+ Ok(CoherentAllocation::new(
+ ret as _,
+ dma_handle,
+ count,
+ Device::from_dev(dev),
+ ))
+ }
+}
+
+pub struct Pool<T> {
+ ptr: *mut bindings::dma_pool,
+ dev: Device,
+ count: usize,
+ _p: PhantomData<T>,
+}
+
+impl<T> Pool<T> {
+ /// Creates a new DMA memory pool.
+ pub fn try_new(
+ name: &CStr,
+ dev: &dyn RawDevice,
+ count: usize,
+ align: usize,
+ boundary: usize,
+ ) -> Result<Arc<Self>> {
+ let t_size = core::mem::size_of::<T>();
+ let size = count.checked_mul(t_size).ok_or(ENOMEM)?;
+ let ptr = unsafe {
+ bindings::dma_pool_create(name.as_char_ptr(), dev.raw_device(), size, align, boundary)
+ };
+ if ptr.is_null() {
+ Err(ENOMEM)
+ } else {
+ Arc::try_new(Self {
+ ptr,
+ count,
+ dev: Device::from_dev(dev),
+ _p: PhantomData,
+ })
+ .map_err(|e| e.into())
+ }
+ }
+
+ /// Allocates some memory from the pool.
+ pub fn try_alloc(&self, atomic: bool) -> Result<CoherentAllocation<T, Self>> {
+ let flags = if atomic {
+ bindings::GFP_ATOMIC
+ } else {
+ bindings::GFP_KERNEL
+ };
+
+ let mut dma_handle = 0;
+ let ptr = unsafe { bindings::dma_pool_alloc(self.ptr, flags, &mut dma_handle) };
+ if ptr.is_null() {
+ Err(ENOMEM)
+ } else {
+ Ok(CoherentAllocation::new(
+ ptr as _, dma_handle, self.count, self.ptr,
+ ))
+ }
+ }
+}
+
+impl<T> Allocator for Pool<T> {
+ type AllocationData = *mut bindings::dma_pool;
+ type DataSource = Arc<Pool<T>>;
+
+ fn free(cpu_addr: *mut (), dma_handle: u64, _size: usize, pool: &mut *mut bindings::dma_pool) {
+ unsafe { bindings::dma_pool_free(*pool, cpu_addr as _, dma_handle) };
+ }
+
+ unsafe fn allocation_data(data: &Arc<Pool<T>>) -> *mut bindings::dma_pool {
+ data.ptr
+ }
+}
+
+impl<T> Drop for Pool<T> {
+ fn drop(&mut self) {
+ // SAFETY: `Pool` is always reference-counted and each allocation increments it, so all
+ // allocations have been freed by the time this gets called.
+ unsafe { bindings::dma_pool_destroy(self.ptr) };
+ }
+}
+
+pub struct CoherentAllocation<T, A: Allocator> {
+ alloc_data: A::AllocationData,
+ pub dma_handle: u64,
+ count: usize,
+ cpu_addr: *mut T,
+}
+
+impl<T, A: Allocator> CoherentAllocation<T, A> {
+ fn new(cpu_addr: *mut T, dma_handle: u64, count: usize, alloc_data: A::AllocationData) -> Self {
+ Self {
+ dma_handle,
+ count,
+ cpu_addr,
+ alloc_data,
+ }
+ }
+
+ pub fn read(&self, index: usize) -> Option<T> {
+ if index >= self.count {
+ return None;
+ }
+
+ let ptr = self.cpu_addr.wrapping_add(index);
+ // SAFETY: We just checked that the index is within bounds.
+ Some(unsafe { ptr.read() })
+ }
+
+ pub fn read_volatile(&self, index: usize) -> Option<T> {
+ if index >= self.count {
+ return None;
+ }
+
+ let ptr = self.cpu_addr.wrapping_add(index);
+ // SAFETY: We just checked that the index is within bounds.
+ Some(unsafe { ptr.read_volatile() })
+ }
+
+ pub fn write(&self, index: usize, value: &T) -> bool
+ where
+ T: Copy,
+ {
+ if index >= self.count {
+ return false;
+ }
+
+ let ptr = self.cpu_addr.wrapping_add(index);
+ // SAFETY: We just checked that the index is within bounds.
+ unsafe { ptr.write(*value) };
+ true
+ }
+
+ pub fn read_write(&self, index: usize, value: T) -> Option<T> {
+ if index >= self.count {
+ return None;
+ }
+
+ let ptr = self.cpu_addr.wrapping_add(index);
+ // SAFETY: We just checked that the index is within bounds.
+ let ret = unsafe { ptr.read() };
+ // SAFETY: We just checked that the index is within bounds.
+ unsafe { ptr.write(value) };
+ Some(ret)
+ }
+
+ pub unsafe fn from_parts(
+ data: &A::DataSource,
+ ptr: usize,
+ dma_handle: u64,
+ count: usize,
+ ) -> Self {
+ Self {
+ dma_handle,
+ count,
+ cpu_addr: ptr as _,
+ // SAFETY: The safety requirements of the current function satisfy those of
+ // `allocation_data`.
+ alloc_data: unsafe { A::allocation_data(data) },
+ }
+ }
+
+ pub fn into_parts(self) -> (usize, u64) {
+ let ret = (self.cpu_addr as _, self.dma_handle);
+ core::mem::forget(self);
+ ret
+ }
+
+ pub fn first_ptr(&self) -> *const T {
+ self.cpu_addr
+ }
+
+ pub fn first_ptr_mut(&self) -> *mut T {
+ self.cpu_addr
+ }
+}
+
+impl<T, A: Allocator> Drop for CoherentAllocation<T, A> {
+ fn drop(&mut self) {
+ let size = self.count * core::mem::size_of::<T>();
+ A::free(
+ self.cpu_addr as _,
+ self.dma_handle,
+ size,
+ &mut self.alloc_data,
+ );
+ }
+}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 3fff4cc54fe3..8673c792e5db 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -35,6 +35,7 @@ extern crate self as kernel;
mod allocator;
mod build_assert;
pub mod device;
+pub mod dma;
pub mod driver;
pub mod error;
pub mod init;