diff options
author | Danilo Krummrich <dakr@kernel.org> | 2024-07-23 18:59:10 +0200 |
---|---|---|
committer | Danilo Krummrich <dakr@kernel.org> | 2024-10-08 16:27:50 +0200 |
commit | 15c87cbdcf5a0ff990f591676c1557d43f80f4e0 (patch) | |
tree | 241c169c22f953523a9ea8f1381408e4074586c6 | |
parent | 09403321710d94abaa3e12a07eb5391490117bd4 (diff) |
rust: alloc: implement `collect` for `IntoIter`
Currently, we can't implement `FromIterator`. There are a couple of
issues with this trait in the kernel, namely:
- Rust's specialization feature is unstable. This prevents us to
optimze for the special case where `I::IntoIter` equals `Vec`'s
`IntoIter` type.
- We also can't use `I::IntoIter`'s type ID either to work around this,
since `FromIterator` doesn't require this type to be `'static`.
- `FromIterator::from_iter` does return `Self` instead of
`Result<Self, AllocError>`, hence we can't properly handle allocation
failures.
- Neither `Iterator::collect` nor `FromIterator::from_iter` can handle
additional allocation flags.
Instead, provide `IntoIter::collect`, such that we can at least convert
`IntoIter` into a `Vec` again.
Reviewed-by: Alice Ryhl <aliceryhl@google.com>
Reviewed-by: Benno Lossin <benno.lossin@proton.me>
Signed-off-by: Danilo Krummrich <dakr@kernel.org>
-rw-r--r-- | rust/kernel/alloc/kvec.rs | 94 |
1 files changed, 94 insertions, 0 deletions
diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs index 86382827f576..119b5a4dbf77 100644 --- a/rust/kernel/alloc/kvec.rs +++ b/rust/kernel/alloc/kvec.rs @@ -694,6 +694,100 @@ pub struct IntoIter<T, A: Allocator> { _p: PhantomData<A>, } +impl<T, A> IntoIter<T, A> +where + A: Allocator, +{ + fn into_raw_parts(self) -> (*mut T, NonNull<T>, usize, usize) { + let me = ManuallyDrop::new(self); + let ptr = me.ptr; + let buf = me.buf; + let len = me.len; + let cap = me.layout.len(); + (ptr, buf, len, cap) + } + + /// Same as `Iterator::collect` but specialized for `Vec`'s `IntoIter`. + /// + /// # Examples + /// + /// ``` + /// let v = kernel::kvec![1, 2, 3]?; + /// let mut it = v.into_iter(); + /// + /// assert_eq!(it.next(), Some(1)); + /// + /// let v = it.collect(GFP_KERNEL); + /// assert_eq!(v, [2, 3]); + /// + /// # Ok::<(), Error>(()) + /// ``` + /// # Implementation Details + /// + /// Currently, we can't implement `FromIterator`. There are a couple of issues with this trait + /// in the kernel, namely: + /// + /// - Rust's specialization feature is unstable. This prevents us to optimze for the special + /// case where `I::IntoIter` equals `Vec`'s `IntoIter` type. + /// - We also can't use `I::IntoIter`'s type ID either to work around this, since `FromIterator` + /// doesn't require this type to be `'static`. + /// - `FromIterator::from_iter` does return `Self` instead of `Result<Self, AllocError>`, hence + /// we can't properly handle allocation failures. + /// - Neither `Iterator::collect` nor `FromIterator::from_iter` can handle additional allocation + /// flags. + /// + /// Instead, provide `IntoIter::collect`, such that we can at least convert a `IntoIter` into a + /// `Vec` again. + /// + /// Note that `IntoIter::collect` doesn't require `Flags`, since it re-uses the existing backing + /// buffer. However, this backing buffer may be shrunk to the actual count of elements. + pub fn collect(self, flags: Flags) -> Vec<T, A> { + let old_layout = self.layout; + let (mut ptr, buf, len, mut cap) = self.into_raw_parts(); + let has_advanced = ptr != buf.as_ptr(); + + if has_advanced { + // Copy the contents we have advanced to at the beginning of the buffer. + // + // SAFETY: + // - `ptr` is valid for reads of `len * size_of::<T>()` bytes, + // - `buf.as_ptr()` is valid for writes of `len * size_of::<T>()` bytes, + // - `ptr` and `buf.as_ptr()` are not be subject to aliasing restrictions relative to + // each other, + // - both `ptr` and `buf.ptr()` are properly aligned. + unsafe { ptr::copy(ptr, buf.as_ptr(), len) }; + ptr = buf.as_ptr(); + + // SAFETY: `len` is guaranteed to be smaller than `self.layout.len()`. + let layout = unsafe { ArrayLayout::<T>::new_unchecked(len) }; + + // SAFETY: `buf` points to the start of the backing buffer and `len` is guaranteed to be + // smaller than `cap`. Depending on `alloc` this operation may shrink the buffer or leaves + // it as it is. + ptr = match unsafe { + A::realloc(Some(buf.cast()), layout.into(), old_layout.into(), flags) + } { + // If we fail to shrink, which likely can't even happen, continue with the existing + // buffer. + Err(_) => ptr, + Ok(ptr) => { + cap = len; + ptr.as_ptr().cast() + } + }; + } + + // SAFETY: If the iterator has been advanced, the advanced elements have been copied to + // the beginning of the buffer and `len` has been adjusted accordingly. + // + // - `ptr` is guaranteed to point to the start of the backing buffer. + // - `cap` is either the original capacity or, after shrinking the buffer, equal to `len`. + // - `alloc` is guaranteed to be unchanged since `into_iter` has been called on the original + // `Vec`. + unsafe { Vec::from_raw_parts(ptr, len, cap) } + } +} + impl<T, A> Iterator for IntoIter<T, A> where A: Allocator, |