summaryrefslogtreecommitdiff
path: root/rust
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-04-30 11:20:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-04-30 11:20:22 -0700
commit310897659cf056016e2c772a028f9b8abc934928 (patch)
treeca5f122d871a4e54026884bcc98a6309e3fd4069 /rust
parent825a0714d2b3883d4f8ff64f6933fb73ee3f1834 (diff)
parentea76e08f4d901a450619831a255e9e0a4c0ed162 (diff)
downloadlwn-310897659cf056016e2c772a028f9b8abc934928.tar.gz
lwn-310897659cf056016e2c772a028f9b8abc934928.zip
Merge tag 'rust-6.4' of https://github.com/Rust-for-Linux/linux
Pull rust updates from Miguel Ojeda "More additions to the Rust core. Importantly, this adds the pin-init API, which will be used by other abstractions, such as the synchronization ones added here too: - pin-init API: a solution for the safe pinned initialization problem. This allows to reduce the need for 'unsafe' code in the kernel when dealing with data structures that require a stable address. Commit 90e53c5e70a6 ("rust: add pin-init API core") contains a nice introduction -- here is an example of how it looks like: #[pin_data] struct Example { #[pin] value: Mutex<u32>, #[pin] value_changed: CondVar, } impl Example { fn new() -> impl PinInit<Self> { pin_init!(Self { value <- new_mutex!(0), value_changed <- new_condvar!(), }) } } // In a `Box`. let b = Box::pin_init(Example::new())?; // In the stack. stack_pin_init!(let s = Example::new()); - 'sync' module: New types 'LockClassKey' ('struct lock_class_key'), 'Lock', 'Guard', 'Mutex' ('struct mutex'), 'SpinLock' ('spinlock_t'), 'LockedBy' and 'CondVar' (uses 'wait_queue_head_t'), plus macros such as 'static_lock_class!' and 'new_spinlock!'. In particular, 'Lock' and 'Guard' are generic implementations that contain code that is common to all locks. Then, different backends (the new 'Backend' trait) are implemented and used to define types like 'Mutex': type Mutex<T> = Lock<T, MutexBackend>; In addition, new methods 'assume_init()', 'init_with()' and 'pin_init_with()' for 'UniqueArc<MaybeUninit<T>>' and 'downcast()' for 'Arc<dyn Any + Send + Sync>'; as well as 'Debug' and 'Display' implementations for 'Arc' and 'UniqueArc'. Reduced stack usage of 'UniqueArc::try_new_uninit()', too. - 'types' module: New trait 'AlwaysRefCounted' and new type 'ARef' (an owned reference to an always-reference-counted object, meant to be used in wrappers for C types that have their own ref counting functions). Moreover, new associated functions 'raw_get()' and 'ffi_init()' for 'Opaque'. - New 'task' module with a new type 'Task' ('struct task_struct'), and a new macro 'current!' to safely get a reference to the current one. - New 'ioctl' module with new '_IOC*' const functions (equivalent to the C macros). - New 'uapi' crate, intended to be accessible by drivers directly. - 'macros' crate: new 'quote!' macro (similar to the one provided in userspace by the 'quote' crate); and the 'module!' macro now allows specifying multiple module aliases. - 'error' module: New associated functions for the 'Error' type, such as 'from_errno()' and new functions such as 'to_result()'. - 'alloc' crate: More fallible 'Vec' methods: 'try_resize` and 'try_extend_from_slice' and the infrastructure (imported from the Rust standard library) they need" * tag 'rust-6.4' of https://github.com/Rust-for-Linux/linux: (44 commits) rust: ioctl: Add ioctl number manipulation functions rust: uapi: Add UAPI crate rust: sync: introduce `CondVar` rust: lock: add `Guard::do_unlocked` rust: sync: introduce `LockedBy` rust: introduce `current` rust: add basic `Task` rust: introduce `ARef` rust: lock: introduce `SpinLock` rust: lock: introduce `Mutex` rust: sync: introduce `Lock` and `Guard` rust: sync: introduce `LockClassKey` MAINTAINERS: add Benno Lossin as Rust reviewer rust: init: broaden the blanket impl of `Init` rust: sync: add functions for initializing `UniqueArc<MaybeUninit<T>>` rust: sync: reduce stack usage of `UniqueArc::try_new_uninit` rust: types: add `Opaque::ffi_init` rust: prelude: add `pin-init` API items to prelude rust: init: add `Zeroable` trait and `init::zeroed` function rust: init: add `stack_pin_init!` macro ...
Diffstat (limited to 'rust')
-rw-r--r--rust/.gitignore1
-rw-r--r--rust/Makefile28
-rw-r--r--rust/alloc/vec/mod.rs137
-rw-r--r--rust/alloc/vec/set_len_on_drop.rs30
-rw-r--r--rust/alloc/vec/spec_extend.rs174
-rw-r--r--rust/bindings/bindings_helper.h2
-rw-r--r--rust/helpers.c82
-rw-r--r--rust/kernel/error.rs137
-rw-r--r--rust/kernel/init.rs1427
-rw-r--r--rust/kernel/init/__internal.rs235
-rw-r--r--rust/kernel/init/macros.rs971
-rw-r--r--rust/kernel/ioctl.rs72
-rw-r--r--rust/kernel/lib.rs10
-rw-r--r--rust/kernel/prelude.rs8
-rw-r--r--rust/kernel/sync.rs50
-rw-r--r--rust/kernel/sync/arc.rs108
-rw-r--r--rust/kernel/sync/arc/std_vendor.rs28
-rw-r--r--rust/kernel/sync/condvar.rs174
-rw-r--r--rust/kernel/sync/lock.rs191
-rw-r--r--rust/kernel/sync/lock/mutex.rs118
-rw-r--r--rust/kernel/sync/lock/spinlock.rs117
-rw-r--r--rust/kernel/sync/locked_by.rs156
-rw-r--r--rust/kernel/task.rs155
-rw-r--r--rust/kernel/types.rs135
-rw-r--r--rust/macros/helpers.rs10
-rw-r--r--rust/macros/lib.rs80
-rw-r--r--rust/macros/module.rs32
-rw-r--r--rust/macros/pin_data.rs79
-rw-r--r--rust/macros/pinned_drop.rs49
-rw-r--r--rust/macros/quote.rs143
-rw-r--r--rust/uapi/lib.rs27
-rw-r--r--rust/uapi/uapi_helper.h9
32 files changed, 4952 insertions, 23 deletions
diff --git a/rust/.gitignore b/rust/.gitignore
index 168cb26a31b9..21552992b401 100644
--- a/rust/.gitignore
+++ b/rust/.gitignore
@@ -2,6 +2,7 @@
bindings_generated.rs
bindings_helpers_generated.rs
+uapi_generated.rs
exports_*_generated.h
doc/
test/
diff --git a/rust/Makefile b/rust/Makefile
index aef85e9e8eeb..7c9d9f11aec5 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -16,6 +16,9 @@ obj-$(CONFIG_RUST) += alloc.o bindings.o kernel.o
always-$(CONFIG_RUST) += exports_alloc_generated.h exports_bindings_generated.h \
exports_kernel_generated.h
+always-$(CONFIG_RUST) += uapi/uapi_generated.rs
+obj-$(CONFIG_RUST) += uapi.o
+
ifdef CONFIG_RUST_BUILD_ASSERT_ALLOW
obj-$(CONFIG_RUST) += build_error.o
else
@@ -113,7 +116,7 @@ rustdoc-alloc: $(src)/alloc/lib.rs rustdoc-core rustdoc-compiler_builtins FORCE
rustdoc-kernel: private rustc_target_flags = --extern alloc \
--extern build_error --extern macros=$(objtree)/$(obj)/libmacros.so \
- --extern bindings
+ --extern bindings --extern uapi
rustdoc-kernel: $(src)/kernel/lib.rs rustdoc-core rustdoc-macros \
rustdoc-compiler_builtins rustdoc-alloc $(obj)/libmacros.so \
$(obj)/bindings.o FORCE
@@ -141,6 +144,9 @@ rusttestlib-macros: $(src)/macros/lib.rs rusttest-prepare FORCE
rusttestlib-bindings: $(src)/bindings/lib.rs rusttest-prepare FORCE
$(call if_changed,rustc_test_library)
+rusttestlib-uapi: $(src)/uapi/lib.rs rusttest-prepare FORCE
+ $(call if_changed,rustc_test_library)
+
quiet_cmd_rustdoc_test = RUSTDOC T $<
cmd_rustdoc_test = \
OBJTREE=$(abspath $(objtree)) \
@@ -223,9 +229,10 @@ rusttest-macros: $(src)/macros/lib.rs rusttest-prepare FORCE
$(call if_changed,rustdoc_test)
rusttest-kernel: private rustc_target_flags = --extern alloc \
- --extern build_error --extern macros --extern bindings
+ --extern build_error --extern macros --extern bindings --extern uapi
rusttest-kernel: $(src)/kernel/lib.rs rusttest-prepare \
- rusttestlib-build_error rusttestlib-macros rusttestlib-bindings FORCE
+ rusttestlib-build_error rusttestlib-macros rusttestlib-bindings \
+ rusttestlib-uapi FORCE
$(call if_changed,rustc_test)
$(call if_changed,rustc_test_library)
@@ -302,6 +309,12 @@ $(obj)/bindings/bindings_generated.rs: $(src)/bindings/bindings_helper.h \
$(src)/bindgen_parameters FORCE
$(call if_changed_dep,bindgen)
+$(obj)/uapi/uapi_generated.rs: private bindgen_target_flags = \
+ $(shell grep -v '^#\|^$$' $(srctree)/$(src)/bindgen_parameters)
+$(obj)/uapi/uapi_generated.rs: $(src)/uapi/uapi_helper.h \
+ $(src)/bindgen_parameters FORCE
+ $(call if_changed_dep,bindgen)
+
# See `CFLAGS_REMOVE_helpers.o` above. In addition, Clang on C does not warn
# with `-Wmissing-declarations` (unlike GCC), so it is not strictly needed here
# given it is `libclang`; but for consistency, future Clang changes and/or
@@ -402,10 +415,15 @@ $(obj)/bindings.o: $(src)/bindings/lib.rs \
$(obj)/bindings/bindings_helpers_generated.rs FORCE
$(call if_changed_dep,rustc_library)
+$(obj)/uapi.o: $(src)/uapi/lib.rs \
+ $(obj)/compiler_builtins.o \
+ $(obj)/uapi/uapi_generated.rs FORCE
+ $(call if_changed_dep,rustc_library)
+
$(obj)/kernel.o: private rustc_target_flags = --extern alloc \
- --extern build_error --extern macros --extern bindings
+ --extern build_error --extern macros --extern bindings --extern uapi
$(obj)/kernel.o: $(src)/kernel/lib.rs $(obj)/alloc.o $(obj)/build_error.o \
- $(obj)/libmacros.so $(obj)/bindings.o FORCE
+ $(obj)/libmacros.so $(obj)/bindings.o $(obj)/uapi.o FORCE
$(call if_changed_dep,rustc_library)
endif # CONFIG_RUST
diff --git a/rust/alloc/vec/mod.rs b/rust/alloc/vec/mod.rs
index f77c7368d534..fe4fff5064bc 100644
--- a/rust/alloc/vec/mod.rs
+++ b/rust/alloc/vec/mod.rs
@@ -122,10 +122,8 @@ use self::spec_from_elem::SpecFromElem;
#[cfg(not(no_global_oom_handling))]
mod spec_from_elem;
-#[cfg(not(no_global_oom_handling))]
use self::set_len_on_drop::SetLenOnDrop;
-#[cfg(not(no_global_oom_handling))]
mod set_len_on_drop;
#[cfg(not(no_global_oom_handling))]
@@ -149,7 +147,8 @@ mod spec_from_iter;
#[cfg(not(no_global_oom_handling))]
use self::spec_extend::SpecExtend;
-#[cfg(not(no_global_oom_handling))]
+use self::spec_extend::TrySpecExtend;
+
mod spec_extend;
/// A contiguous growable array type, written as `Vec<T>`, short for 'vector'.
@@ -1919,6 +1918,17 @@ impl<T, A: Allocator> Vec<T, A> {
self.len += count;
}
+ /// Tries to append elements to `self` from other buffer.
+ #[inline]
+ unsafe fn try_append_elements(&mut self, other: *const [T]) -> Result<(), TryReserveError> {
+ let count = unsafe { (*other).len() };
+ self.try_reserve(count)?;
+ let len = self.len();
+ unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) };
+ self.len += count;
+ Ok(())
+ }
+
/// Removes the specified range from the vector in bulk, returning all
/// removed elements as an iterator. If the iterator is dropped before
/// being fully consumed, it drops the remaining removed elements.
@@ -2340,6 +2350,45 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
}
}
+ /// Tries to resize the `Vec` in-place so that `len` is equal to `new_len`.
+ ///
+ /// If `new_len` is greater than `len`, the `Vec` is extended by the
+ /// difference, with each additional slot filled with `value`.
+ /// If `new_len` is less than `len`, the `Vec` is simply truncated.
+ ///
+ /// This method requires `T` to implement [`Clone`],
+ /// in order to be able to clone the passed value.
+ /// If you need more flexibility (or want to rely on [`Default`] instead of
+ /// [`Clone`]), use [`Vec::resize_with`].
+ /// If you only need to resize to a smaller size, use [`Vec::truncate`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec!["hello"];
+ /// vec.try_resize(3, "world").unwrap();
+ /// assert_eq!(vec, ["hello", "world", "world"]);
+ ///
+ /// let mut vec = vec![1, 2, 3, 4];
+ /// vec.try_resize(2, 0).unwrap();
+ /// assert_eq!(vec, [1, 2]);
+ ///
+ /// let mut vec = vec![42];
+ /// let result = vec.try_resize(usize::MAX, 0);
+ /// assert!(result.is_err());
+ /// ```
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_resize(&mut self, new_len: usize, value: T) -> Result<(), TryReserveError> {
+ let len = self.len();
+
+ if new_len > len {
+ self.try_extend_with(new_len - len, ExtendElement(value))
+ } else {
+ self.truncate(new_len);
+ Ok(())
+ }
+ }
+
/// Clones and appends all elements in a slice to the `Vec`.
///
/// Iterates over the slice `other`, clones each element, and then appends
@@ -2365,6 +2414,30 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
self.spec_extend(other.iter())
}
+ /// Tries to clone and append all elements in a slice to the `Vec`.
+ ///
+ /// Iterates over the slice `other`, clones each element, and then appends
+ /// it to this `Vec`. The `other` slice is traversed in-order.
+ ///
+ /// Note that this function is same as [`extend`] except that it is
+ /// specialized to work with slices instead. If and when Rust gets
+ /// specialization this function will likely be deprecated (but still
+ /// available).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1];
+ /// vec.try_extend_from_slice(&[2, 3, 4]).unwrap();
+ /// assert_eq!(vec, [1, 2, 3, 4]);
+ /// ```
+ ///
+ /// [`extend`]: Vec::extend
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_extend_from_slice(&mut self, other: &[T]) -> Result<(), TryReserveError> {
+ self.try_spec_extend(other.iter())
+ }
+
/// Copies elements from `src` range to the end of the vector.
///
/// # Panics
@@ -2504,6 +2577,36 @@ impl<T, A: Allocator> Vec<T, A> {
// len set by scope guard
}
}
+
+ /// Try to extend the vector by `n` values, using the given generator.
+ fn try_extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) -> Result<(), TryReserveError> {
+ self.try_reserve(n)?;
+
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ // Use SetLenOnDrop to work around bug where compiler
+ // might not realize the store through `ptr` through self.set_len()
+ // don't alias.
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+
+ // Write all elements except the last one
+ for _ in 1..n {
+ ptr::write(ptr, value.next());
+ ptr = ptr.offset(1);
+ // Increment the length in every step in case next() panics
+ local_len.increment_len(1);
+ }
+
+ if n > 0 {
+ // We can write the last element directly without cloning needlessly
+ ptr::write(ptr, value.last());
+ local_len.increment_len(1);
+ }
+
+ // len set by scope guard
+ Ok(())
+ }
+ }
}
impl<T: PartialEq, A: Allocator> Vec<T, A> {
@@ -2838,6 +2941,34 @@ impl<T, A: Allocator> Vec<T, A> {
}
}
+ // leaf method to which various SpecFrom/SpecExtend implementations delegate when
+ // they have no further optimizations to apply
+ fn try_extend_desugared<I: Iterator<Item = T>>(&mut self, mut iterator: I) -> Result<(), TryReserveError> {
+ // This is the case for a general iterator.
+ //
+ // This function should be the moral equivalent of:
+ //
+ // for item in iterator {
+ // self.push(item);
+ // }
+ while let Some(element) = iterator.next() {
+ let len = self.len();
+ if len == self.capacity() {
+ let (lower, _) = iterator.size_hint();
+ self.try_reserve(lower.saturating_add(1))?;
+ }
+ unsafe {
+ ptr::write(self.as_mut_ptr().add(len), element);
+ // Since next() executes user code which can panic we have to bump the length
+ // after each step.
+ // NB can't overflow since we would have had to alloc the address space
+ self.set_len(len + 1);
+ }
+ }
+
+ Ok(())
+ }
+
/// Creates a splicing iterator that replaces the specified range in the vector
/// with the given `replace_with` iterator and yields the removed items.
/// `replace_with` does not need to be the same length as `range`.
diff --git a/rust/alloc/vec/set_len_on_drop.rs b/rust/alloc/vec/set_len_on_drop.rs
new file mode 100644
index 000000000000..448bf5076a0b
--- /dev/null
+++ b/rust/alloc/vec/set_len_on_drop.rs
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+// Set the length of the vec when the `SetLenOnDrop` value goes out of scope.
+//
+// The idea is: The length field in SetLenOnDrop is a local variable
+// that the optimizer will see does not alias with any stores through the Vec's data
+// pointer. This is a workaround for alias analysis issue #32155
+pub(super) struct SetLenOnDrop<'a> {
+ len: &'a mut usize,
+ local_len: usize,
+}
+
+impl<'a> SetLenOnDrop<'a> {
+ #[inline]
+ pub(super) fn new(len: &'a mut usize) -> Self {
+ SetLenOnDrop { local_len: *len, len }
+ }
+
+ #[inline]
+ pub(super) fn increment_len(&mut self, increment: usize) {
+ self.local_len += increment;
+ }
+}
+
+impl Drop for SetLenOnDrop<'_> {
+ #[inline]
+ fn drop(&mut self) {
+ *self.len = self.local_len;
+ }
+}
diff --git a/rust/alloc/vec/spec_extend.rs b/rust/alloc/vec/spec_extend.rs
new file mode 100644
index 000000000000..5ce2d00991bc
--- /dev/null
+++ b/rust/alloc/vec/spec_extend.rs
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use crate::alloc::Allocator;
+use crate::collections::{TryReserveError, TryReserveErrorKind};
+use core::iter::TrustedLen;
+use core::ptr::{self};
+use core::slice::{self};
+
+use super::{IntoIter, SetLenOnDrop, Vec};
+
+// Specialization trait used for Vec::extend
+#[cfg(not(no_global_oom_handling))]
+pub(super) trait SpecExtend<T, I> {
+ fn spec_extend(&mut self, iter: I);
+}
+
+// Specialization trait used for Vec::try_extend
+pub(super) trait TrySpecExtend<T, I> {
+ fn try_spec_extend(&mut self, iter: I) -> Result<(), TryReserveError>;
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
+where
+ I: Iterator<Item = T>,
+{
+ default fn spec_extend(&mut self, iter: I) {
+ self.extend_desugared(iter)
+ }
+}
+
+impl<T, I, A: Allocator> TrySpecExtend<T, I> for Vec<T, A>
+where
+ I: Iterator<Item = T>,
+{
+ default fn try_spec_extend(&mut self, iter: I) -> Result<(), TryReserveError> {
+ self.try_extend_desugared(iter)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
+where
+ I: TrustedLen<Item = T>,
+{
+ default fn spec_extend(&mut self, iterator: I) {
+ // This is the case for a TrustedLen iterator.
+ let (low, high) = iterator.size_hint();
+ if let Some(additional) = high {
+ debug_assert_eq!(
+ low,
+ additional,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+ self.reserve(additional);
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+ iterator.for_each(move |element| {
+ ptr::write(ptr, element);
+ ptr = ptr.offset(1);
+ // Since the loop executes user code which can panic we have to bump the pointer
+ // after each step.
+ // NB can't overflow since we would have had to alloc the address space
+ local_len.increment_len(1);
+ });
+ }
+ } else {
+ // Per TrustedLen contract a `None` upper bound means that the iterator length
+ // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
+ // Since the other branch already panics eagerly (via `reserve()`) we do the same here.
+ // This avoids additional codegen for a fallback code path which would eventually
+ // panic anyway.
+ panic!("capacity overflow");
+ }
+ }
+}
+
+impl<T, I, A: Allocator> TrySpecExtend<T, I> for Vec<T, A>
+where
+ I: TrustedLen<Item = T>,
+{
+ default fn try_spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> {
+ // This is the case for a TrustedLen iterator.
+ let (low, high) = iterator.size_hint();
+ if let Some(additional) = high {
+ debug_assert_eq!(
+ low,
+ additional,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+ self.try_reserve(additional)?;
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+ iterator.for_each(move |element| {
+ ptr::write(ptr, element);
+ ptr = ptr.offset(1);
+ // Since the loop executes user code which can panic we have to bump the pointer
+ // after each step.
+ // NB can't overflow since we would have had to alloc the address space
+ local_len.increment_len(1);
+ });
+ }
+ Ok(())
+ } else {
+ Err(TryReserveErrorKind::CapacityOverflow.into())
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, A: Allocator> SpecExtend<T, IntoIter<T>> for Vec<T, A> {
+ fn spec_extend(&mut self, mut iterator: IntoIter<T>) {
+ unsafe {
+ self.append_elements(iterator.as_slice() as _);
+ }
+ iterator.forget_remaining_elements();
+ }
+}
+
+impl<T, A: Allocator> TrySpecExtend<T, IntoIter<T>> for Vec<T, A> {
+ fn try_spec_extend(&mut self, mut iterator: IntoIter<T>) -> Result<(), TryReserveError> {
+ unsafe {
+ self.try_append_elements(iterator.as_slice() as _)?;
+ }
+ iterator.forget_remaining_elements();
+ Ok(())
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<'a, T: 'a, I, A: Allocator + 'a> SpecExtend<&'a T, I> for Vec<T, A>
+where
+ I: Iterator<Item = &'a T>,
+ T: Clone,
+{
+ default fn spec_extend(&mut self, iterator: I) {
+ self.spec_extend(iterator.cloned())
+ }
+}
+
+impl<'a, T: 'a, I, A: Allocator + 'a> TrySpecExtend<&'a T, I> for Vec<T, A>
+where
+ I: Iterator<Item = &'a T>,
+ T: Clone,
+{
+ default fn try_spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> {
+ self.try_spec_extend(iterator.cloned())
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<'a, T: 'a, A: Allocator + 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
+where
+ T: Copy,
+{
+ fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) {
+ let slice = iterator.as_slice();
+ unsafe { self.append_elements(slice) };
+ }
+}
+
+impl<'a, T: 'a, A: Allocator + 'a> TrySpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
+where
+ T: Copy,
+{
+ fn try_spec_extend(&mut self, iterator: slice::Iter<'a, T>) -> Result<(), TryReserveError> {
+ let slice = iterator.as_slice();
+ unsafe { self.try_append_elements(slice) }
+ }
+}
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index 75d85bd6c592..50e7a76d5455 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -8,6 +8,8 @@
#include <linux/slab.h>
#include <linux/refcount.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
/* `bindgen` gets confused at certain things. */
const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL;
diff --git a/rust/helpers.c b/rust/helpers.c
index 09a4d93f9d62..81e80261d597 100644
--- a/rust/helpers.c
+++ b/rust/helpers.c
@@ -20,7 +20,12 @@
#include <linux/bug.h>
#include <linux/build_bug.h>
+#include <linux/err.h>
#include <linux/refcount.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/sched/signal.h>
+#include <linux/wait.h>
__noreturn void rust_helper_BUG(void)
{
@@ -28,6 +33,47 @@ __noreturn void rust_helper_BUG(void)
}
EXPORT_SYMBOL_GPL(rust_helper_BUG);
+void rust_helper_mutex_lock(struct mutex *lock)
+{
+ mutex_lock(lock);
+}
+EXPORT_SYMBOL_GPL(rust_helper_mutex_lock);
+
+void rust_helper___spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
+#else
+ spin_lock_init(lock);
+#endif
+}
+EXPORT_SYMBOL_GPL(rust_helper___spin_lock_init);
+
+void rust_helper_spin_lock(spinlock_t *lock)
+{
+ spin_lock(lock);
+}
+EXPORT_SYMBOL_GPL(rust_helper_spin_lock);
+
+void rust_helper_spin_unlock(spinlock_t *lock)
+{
+ spin_unlock(lock);
+}
+EXPORT_SYMBOL_GPL(rust_helper_spin_unlock);
+
+void rust_helper_init_wait(struct wait_queue_entry *wq_entry)
+{
+ init_wait(wq_entry);
+}
+EXPORT_SYMBOL_GPL(rust_helper_init_wait);
+
+int rust_helper_signal_pending(struct task_struct *t)
+{
+ return signal_pending(t);
+}
+EXPORT_SYMBOL_GPL(rust_helper_signal_pending);
+
refcount_t rust_helper_REFCOUNT_INIT(int n)
{
return (refcount_t)REFCOUNT_INIT(n);
@@ -46,6 +92,42 @@ bool rust_helper_refcount_dec_and_test(refcount_t *r)
}
EXPORT_SYMBOL_GPL(rust_helper_refcount_dec_and_test);
+__force void *rust_helper_ERR_PTR(long err)
+{
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(rust_helper_ERR_PTR);
+
+bool rust_helper_IS_ERR(__force const void *ptr)
+{
+ return IS_ERR(ptr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_IS_ERR);
+
+long rust_helper_PTR_ERR(__force const void *ptr)
+{
+ return PTR_ERR(ptr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_PTR_ERR);
+
+struct task_struct *rust_helper_get_current(void)
+{
+ return current;
+}
+EXPORT_SYMBOL_GPL(rust_helper_get_current);
+
+void rust_helper_get_task_struct(struct task_struct *t)
+{
+ get_task_struct(t);
+}
+EXPORT_SYMBOL_GPL(rust_helper_get_task_struct);
+
+void rust_helper_put_task_struct(struct task_struct *t)
+{
+ put_task_struct(t);
+}
+EXPORT_SYMBOL_GPL(rust_helper_put_task_struct);
+
/*
* We use `bindgen`'s `--size_t-is-usize` option to bind the C `size_t` type
* as the Rust `usize` type, so we can use it in contexts where Rust
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
index 5b9751d7ff1d..5f4114b30b94 100644
--- a/rust/kernel/error.rs
+++ b/rust/kernel/error.rs
@@ -72,10 +72,47 @@ pub mod code {
pub struct Error(core::ffi::c_int);
impl Error {
+ /// Creates an [`Error`] from a kernel error code.
+ ///
+ /// It is a bug to pass an out-of-range `errno`. `EINVAL` would
+ /// be returned in such a case.
+ pub(crate) fn from_errno(errno: core::ffi::c_int) -> Error {
+ if errno < -(bindings::MAX_ERRNO as i32) || errno >= 0 {
+ // TODO: Make it a `WARN_ONCE` once available.
+ crate::pr_warn!(
+ "attempted to create `Error` with out of range `errno`: {}",
+ errno
+ );
+ return code::EINVAL;
+ }
+
+ // INVARIANT: The check above ensures the type invariant
+ // will hold.
+ Error(errno)
+ }
+
+ /// Creates an [`Error`] from a kernel error code.
+ ///
+ /// # Safety
+ ///
+ /// `errno` must be within error code range (i.e. `>= -MAX_ERRNO && < 0`).
+ unsafe fn from_errno_unchecked(errno: core::ffi::c_int) -> Error {
+ // INVARIANT: The contract ensures the type invariant
+ // will hold.
+ Error(errno)
+ }
+
/// Returns the kernel error code.
- pub fn to_kernel_errno(self) -> core::ffi::c_int {
+ pub fn to_errno(self) -> core::ffi::c_int {
self.0
}
+
+ /// Returns the error encoded as a pointer.
+ #[allow(dead_code)]
+ pub(crate) fn to_ptr<T>(self) -> *mut T {
+ // SAFETY: self.0 is a valid error due to its invariant.
+ unsafe { bindings::ERR_PTR(self.0.into()) as *mut _ }
+ }
}
impl From<AllocError> for Error {
@@ -141,3 +178,101 @@ impl From<core::convert::Infallible> for Error {
/// it should still be modeled as returning a `Result` rather than
/// just an [`Error`].
pub type Result<T = ()> = core::result::Result<T, Error>;
+
+/// Converts an integer as returned by a C kernel function to an error if it's negative, and
+/// `Ok(())` otherwise.
+pub fn to_result(err: core::ffi::c_int) -> Result {
+ if err < 0 {
+ Err(Error::from_errno(err))
+ } else {
+ Ok(())
+ }
+}
+
+/// Transform a kernel "error pointer" to a normal pointer.
+///
+/// Some kernel C API functions return an "error pointer" which optionally
+/// embeds an `errno`. Callers are supposed to check the returned pointer
+/// for errors. This function performs the check and converts the "error pointer"
+/// to a normal pointer in an idiomatic fashion.
+///
+/// # Examples
+///
+/// ```ignore
+/// # use kernel::from_err_ptr;
+/// # use kernel::bindings;
+/// fn devm_platform_ioremap_resource(
+/// pdev: &mut PlatformDevice,
+/// index: u32,
+/// ) -> Result<*mut core::ffi::c_void> {
+/// // SAFETY: FFI call.
+/// unsafe {
+/// from_err_ptr(bindings::devm_platform_ioremap_resource(
+/// pdev.to_ptr(),
+/// index,
+/// ))
+/// }
+/// }
+/// ```
+// TODO: Remove `dead_code` marker once an in-kernel client is available.
+#[allow(dead_code)]
+pub(crate) fn from_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {
+ // CAST: Casting a pointer to `*const core::ffi::c_void` is always valid.
+ let const_ptr: *const core::ffi::c_void = ptr.cast();
+ // SAFETY: The FFI function does not deref the pointer.
+ if unsafe { bindings::IS_ERR(const_ptr) } {
+ // SAFETY: The FFI function does not deref the pointer.
+ let err = unsafe { bindings::PTR_ERR(const_ptr) };
+ // CAST: If `IS_ERR()` returns `true`,
+ // then `PTR_ERR()` is guaranteed to return a
+ // negative value greater-or-equal to `-bindings::MAX_ERRNO`,
+ // which always fits in an `i16`, as per the invariant above.
+ // And an `i16` always fits in an `i32`. So casting `err` to
+ // an `i32` can never overflow, and is always valid.
+ //
+ // SAFETY: `IS_ERR()` ensures `err` is a
+ // negative value greater-or-equal to `-bindings::MAX_ERRNO`.
+ #[allow(clippy::unnecessary_cast)]
+ return Err(unsafe { Error::from_errno_unchecked(err as core::ffi::c_int) });
+ }
+ Ok(ptr)
+}
+
+/// Calls a closure returning a [`crate::error::Result<T>`] and converts the result to
+/// a C integer result.
+///
+/// This is useful when calling Rust functions that return [`crate::error::Result<T>`]
+/// from inside `extern "C"` functions that need to return an integer error result.
+///
+/// `T` should be convertible from an `i16` via `From<i16>`.
+///
+/// # Examples
+///
+/// ```ignore
+/// # use kernel::from_result;
+/// # use kernel::bindings;
+/// unsafe extern "C" fn probe_callback(
+/// pdev: *mut bindings::platform_device,
+/// ) -> core::ffi::c_int {
+/// from_result(|| {
+/// let ptr = devm_alloc(pdev)?;
+/// bindings::platform_set_drvdata(pdev, ptr);
+/// Ok(0)
+/// })
+/// }
+/// ```
+// TODO: Remove `dead_code` marker once an in-kernel client is available.
+#[allow(dead_code)]
+pub(crate) fn from_result<T, F>(f: F) -> T
+where
+ T: From<i16>,
+ F: FnOnce() -> Result<T>,
+{
+ match f() {
+ Ok(v) => v,
+ // NO-OVERFLOW: negative `errno`s are no smaller than `-bindings::MAX_ERRNO`,
+ // `-bindings::MAX_ERRNO` fits in an `i16` as per invariant above,
+ // therefore a negative `errno` always fits in an `i16` and will not overflow.
+ Err(e) => T::from(e.to_errno() as i16),
+ }
+}
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
new file mode 100644
index 000000000000..4ebfb08dab11
--- /dev/null
+++ b/rust/kernel/init.rs
@@ -0,0 +1,1427 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! API to safely and fallibly initialize pinned `struct`s using in-place constructors.
+//!
+//! It also allows in-place initialization of big `struct`s that would otherwise produce a stack
+//! overflow.
+//!
+//! Most `struct`s from the [`sync`] module need to be pinned, because they contain self-referential
+//! `struct`s from C. [Pinning][pinning] is Rust's way of ensuring data does not move.
+//!
+//! # Overview
+//!
+//! To initialize a `struct` with an in-place constructor you will need two things:
+//! - an in-place constructor,
+//! - a memory location that can hold your `struct` (this can be the [stack], an [`Arc<T>`],
+//! [`UniqueArc<T>`], [`Box<T>`] or any other smart pointer that implements [`InPlaceInit`]).
+//!
+//! To get an in-place constructor there are generally three options:
+//! - directly creating an in-place constructor using the [`pin_init!`] macro,
+//! - a custom function/macro returning an in-place constructor provided by someone else,
+//! - using the unsafe function [`pin_init_from_closure()`] to manually create an initializer.
+//!
+//! Aside from pinned initialization, this API also supports in-place construction without pinning,
+//! the macros/types/functions are generally named like the pinned variants without the `pin`
+//! prefix.
+//!
+//! # Examples
+//!
+//! ## Using the [`pin_init!`] macro
+//!
+//! If you want to use [`PinInit`], then you will have to annotate your `struct` with
+//! `#[`[`pin_data`]`]`. It is a macro that uses `#[pin]` as a marker for
+//! [structurally pinned fields]. After doing this, you can then create an in-place constructor via
+//! [`pin_init!`]. The syntax is almost the same as normal `struct` initializers. The difference is
+//! that you need to write `<-` instead of `:` for fields that you want to initialize in-place.
+//!
+//! ```rust
+//! # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+//! use kernel::{prelude::*, sync::Mutex, new_mutex};
+//! # use core::pin::Pin;
+//! #[pin_data]
+//! struct Foo {
+//! #[pin]
+//! a: Mutex<usize>,
+//! b: u32,
+//! }
+//!
+//! let foo = pin_init!(Foo {
+//! a <- new_mutex!(42, "Foo::a"),
+//! b: 24,
+//! });
+//! ```
+//!
+//! `foo` now is of the type [`impl PinInit<Foo>`]. We can now use any smart pointer that we like
+//! (or just the stack) to actually initialize a `Foo`:
+//!
+//! ```rust
+//! # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+//! # use kernel::{prelude::*, sync::Mutex, new_mutex};
+//! # use core::pin::Pin;
+//! # #[pin_data]
+//! # struct Foo {
+//! # #[pin]
+//! # a: Mutex<usize>,
+//! # b: u32,
+//! # }
+//! # let foo = pin_init!(Foo {
+//! # a <- new_mutex!(42, "Foo::a"),
+//! # b: 24,
+//! # });
+//! let foo: Result<Pin<Box<Foo>>> = Box::pin_init(foo);
+//! ```
+//!
+//! For more information see the [`pin_init!`] macro.
+//!
+//! ## Using a custom function/macro that returns an initializer
+//!
+//! Many types from the kernel supply a function/macro that returns an initializer, because the
+//! above method only works for types where you can access the fields.
+//!
+//! ```rust
+//! # use kernel::{new_mutex, sync::{Arc, Mutex}};
+//! let mtx: Result<Arc<Mutex<usize>>> = Arc::pin_init(new_mutex!(42, "example::mtx"));
+//! ```
+//!
+//! To declare an init macro/function you just return an [`impl PinInit<T, E>`]:
+//!
+//! ```rust
+//! # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+//! # use kernel::{sync::Mutex, prelude::*, new_mutex, init::PinInit, try_pin_init};
+//! #[pin_data]
+//! struct DriverData {
+//! #[pin]
+//! status: Mutex<i32>,
+//! buffer: Box<[u8; 1_000_000]>,
+//! }
+//!
+//! impl DriverData {
+//! fn new() -> impl PinInit<Self, Error> {
+//! try_pin_init!(Self {
+//! status <- new_mutex!(0, "DriverData::status"),
+//! buffer: Box::init(kernel::init::zeroed())?,
+//! })
+//! }
+//! }
+//! ```
+//!
+//! ## Manual creation of an initializer
+//!
+//! Often when working with primitives the previous approaches are not sufficient. That is where
+//! [`pin_init_from_closure()`] comes in. This `unsafe` function allows you to create a
+//! [`impl PinInit<T, E>`] directly from a closure. Of course you have to ensure that the closure
+//! actually does the initialization in the correct way. Here are the things to look out for
+//! (we are calling the parameter to the closure `slot`):
+//! - when the closure returns `Ok(())`, then it has completed the initialization successfully, so
+//! `slot` now contains a valid bit pattern for the type `T`,
+//! - when the closure returns `Err(e)`, then the caller may deallocate the memory at `slot`, so
+//! you need to take care to clean up anything if your initialization fails mid-way,
+//! - you may assume that `slot` will stay pinned even after the closure returns until `drop` of
+//! `slot` gets called.
+//!
+//! ```rust
+//! use kernel::{prelude::*, init};
+//! use core::{ptr::addr_of_mut, marker::PhantomPinned, pin::Pin};
+//! # mod bindings {
+//! # pub struct foo;
+//! # pub unsafe fn init_foo(_ptr: *mut foo) {}
+//! # pub unsafe fn destroy_foo(_ptr: *mut foo) {}
+//! # pub unsafe fn enable_foo(_ptr: *mut foo, _flags: u32) -> i32 { 0 }
+//! # }
+//! /// # Invariants
+//! ///
+//! /// `foo` is always initialized
+//! #[pin_data(PinnedDrop)]
+//! pub struct RawFoo {
+//! #[pin]
+//! foo: Opaque<bindings::foo>,
+//! #[pin]
+//! _p: PhantomPinned,
+//! }
+//!
+//! impl RawFoo {
+//! pub fn new(flags: u32) -> impl PinInit<Self, Error> {
+//! // SAFETY:
+//! // - when the closure returns `Ok(())`, then it has successfully initialized and
+//! // enabled `foo`,
+//! // - when it returns `Err(e)`, then it has cleaned up before
+//! unsafe {
+//! init::pin_init_from_closure(move |slot: *mut Self| {
+//! // `slot` contains uninit memory, avoid creating a reference.
+//! let foo = addr_of_mut!((*slot).foo);
+//!
+//! // Initialize the `foo`
+//! bindings::init_foo(Opaque::raw_get(foo));
+//!
+//! // Try to enable it.
+//! let err = bindings::enable_foo(Opaque::raw_get(foo), flags);
+//! if err != 0 {
+//! // Enabling has failed, first clean up the foo and then return the error.
+//! bindings::destroy_foo(Opaque::raw_get(foo));
+//! return Err(Error::from_kernel_errno(err));
+//! }
+//!
+//! // All fields of `RawFoo` have been initialized, since `_p` is a ZST.
+//! Ok(())
+//! })
+//! }
+//! }
+//! }
+//!
+//! #[pinned_drop]
+//! impl PinnedDrop for RawFoo {
+//! fn drop(self: Pin<&mut Self>) {
+//! // SAFETY: Since `foo` is initialized, destroying is safe.
+//! unsafe { bindings::destroy_foo(self.foo.get()) };
+//! }
+//! }
+//! ```
+//!
+//! For the special case where initializing a field is a single FFI-function call that cannot fail,
+//! there exist the helper function [`Opaque::ffi_init`]. This function initialize a single
+//! [`Opaque`] field by just delegating to the supplied closure. You can use these in combination
+//! with [`pin_init!`].
+//!
+//! For more information on how to use [`pin_init_from_closure()`], take a look at the uses inside
+//! the `kernel` crate. The [`sync`] module is a good starting point.
+//!
+//! [`sync`]: kernel::sync
+//! [pinning]: https://doc.rust-lang.org/std/pin/index.html
+//! [structurally pinned fields]:
+//! https://doc.rust-lang.org/std/pin/index.html#pinning-is-structural-for-field
+//! [stack]: crate::stack_pin_init
+//! [`Arc<T>`]: crate::sync::Arc
+//! [`impl PinInit<Foo>`]: PinInit
+//! [`impl PinInit<T, E>`]: PinInit
+//! [`impl Init<T, E>`]: Init
+//! [`Opaque`]: kernel::types::Opaque
+//! [`Opaque::ffi_init`]: kernel::types::Opaque::ffi_init
+//! [`pin_data`]: ::macros::pin_data
+
+use crate::{
+ error::{self, Error},
+ sync::UniqueArc,
+};
+use alloc::boxed::Box;
+use core::{
+ alloc::AllocError,
+ cell::Cell,
+ convert::Infallible,
+ marker::PhantomData,
+ mem::MaybeUninit,
+ num::*,
+ pin::Pin,
+ ptr::{self, NonNull},
+};
+
+#[doc(hidden)]
+pub mod __internal;
+#[doc(hidden)]
+pub mod macros;
+
+/// Initialize and pin a type directly on the stack.
+///
+/// # Examples
+///
+/// ```rust
+/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+/// # use kernel::{init, pin_init, stack_pin_init, init::*, sync::Mutex, new_mutex};
+/// # use macros::pin_data;
+/// # use core::pin::Pin;
+/// #[pin_data]
+/// struct Foo {
+/// #[pin]
+/// a: Mutex<usize>,
+/// b: Bar,
+/// }
+///
+/// #[pin_data]
+/// struct Bar {
+/// x: u32,
+/// }
+///
+/// stack_pin_init!(let foo = pin_init!(Foo {
+/// a <- new_mutex!(42),
+/// b: Bar {
+/// x: 64,
+/// },
+/// }));
+/// let foo: Pin<&mut Foo> = foo;
+/// pr_info!("a: {}", &*foo.a.lock());
+/// ```
+///
+/// # Syntax
+///
+/// A normal `let` binding with optional type annotation. The expression is expected to implement
+/// [`PinInit`]/[`Init`] with the error type [`Infallible`]. If you want to use a different error
+/// type, then use [`stack_try_pin_init!`].
+#[macro_export]
+macro_rules! stack_pin_init {
+ (let $var:ident $(: $t:ty)? = $val:expr) => {
+ let val = $val;
+ let mut $var = ::core::pin::pin!($crate::init::__internal::StackInit$(::<$t>)?::uninit());
+ let mut $var = match $crate::init::__internal::StackInit::init($var, val) {
+ Ok(res) => res,
+ Err(x) => {
+ let x: ::core::convert::Infallible = x;
+ match x {}
+ }
+ };
+ };
+}
+
+/// Initialize and pin a type directly on the stack.
+///
+/// # Examples
+///
+/// ```rust
+/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+/// # use kernel::{init, pin_init, stack_try_pin_init, init::*, sync::Mutex, new_mutex};
+/// # use macros::pin_data;
+/// # use core::{alloc::AllocError, pin::Pin};
+/// #[pin_data]
+/// struct Foo {
+/// #[pin]
+/// a: Mutex<usize>,
+/// b: Box<Bar>,
+/// }
+///
+/// struct Bar {
+/// x: u32,
+/// }
+///
+/// stack_try_pin_init!(let foo: Result<Pin<&mut Foo>, AllocError> = pin_init!(Foo {
+/// a <- new_mutex!(42),
+/// b: Box::try_new(Bar {
+/// x: 64,
+/// })?,
+/// }));
+/// let foo = foo.unwrap();
+/// pr_info!("a: {}", &*foo.a.lock());
+/// ```
+///
+/// ```rust
+/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+/// # use kernel::{init, pin_init, stack_try_pin_init, init::*, sync::Mutex, new_mutex};
+/// # use macros::pin_data;
+/// # use core::{alloc::AllocError, pin::Pin};
+/// #[pin_data]
+/// struct Foo {
+/// #[pin]
+/// a: Mutex<usize>,
+/// b: Box<Bar>,
+/// }
+///
+/// struct Bar {
+/// x: u32,
+/// }
+///
+/// stack_try_pin_init!(let foo: Pin<&mut Foo> =? pin_init!(Foo {
+/// a <- new_mutex!(42),
+/// b: Box::try_new(Bar {
+/// x: 64,
+/// })?,
+/// }));
+/// pr_info!("a: {}", &*foo.a.lock());
+/// # Ok::<_, AllocError>(())
+/// ```
+///
+/// # Syntax
+///
+/// A normal `let` binding with optional type annotation. The expression is expected to implement
+/// [`PinInit`]/[`Init`]. This macro assigns a result to the given variable, adding a `?` after the
+/// `=` will propagate this error.
+#[macro_export]
+macro_rules! stack_try_pin_init {
+ (let $var:ident $(: $t:ty)? = $val:expr) => {
+ let val = $val;
+ let mut $var = ::core::pin::pin!($crate::init::__internal::StackInit$(::<$t>)?::uninit());
+ let mut $var = $crate::init::__internal::StackInit::init($var, val);
+ };
+ (let $var:ident $(: $t:ty)? =? $val:expr) => {
+ let val = $val;
+ let mut $var = ::core::pin::pin!($crate::init::__internal::StackInit$(::<$t>)?::uninit());
+ let mut $var = $crate::init::__internal::StackInit::init($var, val)?;
+ };
+}
+
+/// Construct an in-place, pinned initializer for `struct`s.
+///
+/// This macro defaults the error to [`Infallible`]. If you need [`Error`], then use
+/// [`try_pin_init!`].
+///
+/// The syntax is almost identical to that of a normal `struct` initializer:
+///
+/// ```rust
+/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+/// # use kernel::{init, pin_init, macros::pin_data, init::*};
+/// # use core::pin::Pin;
+/// #[pin_data]
+/// struct Foo {
+/// a: usize,
+/// b: Bar,
+/// }
+///
+/// #[pin_data]
+/// struct Bar {
+/// x: u32,
+/// }
+///
+/// # fn demo() -> impl PinInit<Foo> {
+/// let a = 42;
+///
+/// let initializer = pin_init!(Foo {
+/// a,
+/// b: Bar {
+/// x: 64,
+/// },
+/// });
+/// # initializer }
+/// # Box::pin_init(demo()).unwrap();
+/// ```
+///
+/// Arbitrary Rust expressions can be used to set the value of a variable.
+///
+/// The fields are initialized in the order that they appear in the initializer. So it is possible
+/// to read already initialized fields using raw pointers.
+///
+/// IMPORTANT: You are not allowed to create references to fields of the struct inside of the
+/// initializer.
+///
+/// # Init-functions
+///
+/// When working with this API it is often desired to let others construct your types without
+/// giving access to all fields. This is where you would normally write a plain function `new`
+/// that would return a new instance of your type. With this API that is also possible.
+/// However, there are a few extra things to keep in mind.
+///
+/// To create an initializer function, simply declare it like this:
+///
+/// ```rust
+/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+/// # use kernel::{init, pin_init, prelude::*, init::*};
+/// # use core::pin::Pin;
+/// # #[pin_data]
+/// # struct Foo {
+/// # a: usize,
+/// # b: Bar,
+/// # }
+/// # #[pin_data]
+/// # struct Bar {
+/// # x: u32,
+/// # }
+/// impl Foo {
+/// fn new() -> impl PinInit<Self> {
+/// pin_init!(Self {
+/// a: 42,
+/// b: Bar {
+/// x: 64,
+/// },
+/// })
+/// }
+/// }
+/// ```
+///
+/// Users of `Foo` can now create it like this:
+///
+/// ```rust
+/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+/// # use kernel::{init, pin_init, macros::pin_data, init::*};
+/// # use core::pin::Pin;
+/// # #[pin_data]
+/// # struct Foo {
+/// # a: usize,
+/// # b: Bar,
+/// # }
+/// # #[pin_data]
+/// # struct Bar {
+/// # x: u32,
+/// # }
+/// # impl Foo {
+/// # fn new() -> impl PinInit<Self> {
+/// # pin_init!(Self {
+/// # a: 42,
+/// # b: Bar {
+/// # x: 64,
+/// # },
+/// # })
+/// # }
+/// # }
+/// let foo = Box::pin_init(Foo::new());
+/// ```
+///
+/// They can also easily embed it into their own `struct`s:
+///
+/// ```rust
+/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
+/// # use kernel::{init, pin_init, macros::pin_data, init::*};
+/// # use core::pin::Pin;
+/// # #[pin_data]
+/// # struct Foo {
+/// # a: usize,
+/// # b: Bar,
+/// # }
+/// # #[pin_data]
+/// # struct Bar {
+/// # x: u32,
+/// # }
+/// # impl Foo {
+/// # fn new() -> impl PinInit<Self> {
+/// # pin_init!(Self {
+/// # a: 42,
+/// # b: Bar {
+/// # x: 64,
+/// # },
+/// # })
+/// # }
+/// # }
+/// #[pin_data]
+/// struct FooContainer {
+/// #[pin]
+/// foo1: Foo,
+/// #[pin]
+/// foo2: Foo,
+/// other: u32,
+/// }
+///
+/// impl FooContainer {
+/// fn new(other: u32) -> impl PinInit<Self> {
+/// pin_init!(Self {
+/// foo1 <- Foo::new(),
+/// foo2 <- Foo::new(),
+/// other,
+/// })
+/// }
+/// }
+/// ```
+///
+/// Here we see that when using `pin_init!` with `PinInit`, one needs to write `<-` instead of `:`.
+/// This signifies that the given field is initialized in-place. As with `struct` initializers, just
+/// writing the field (in this case `other`) without `:` or `<-` means `other: other,`.
+///
+/// # Syntax
+///
+/// As already mentioned in the examples above, inside of `pin_init!` a `struct` initializer with
+/// the following modifications is expected:
+/// - Fields that you want to initialize in-place have to use `<-` instead of `:`.
+/// - In front of the initializer you can write `&this in` to have access to a [`NonNull<Self>`]
+/// pointer named `this` inside of the initializer.
+///
+/// For instance:
+///
+/// ```rust
+/// # use kernel::pin_init;
+/// # use macros::pin_data;
+/// # use core::{ptr::addr_of_mut, marker::PhantomPinned};
+/// #[pin_data]
+/// struct Buf {
+/// // `ptr` points into `buf`.
+/// ptr: *mut u8,
+/// buf: [u8; 64],
+/// #[pin]
+/// pin: PhantomPinned,
+/// }
+/// pin_init!(&this in Buf {
+/// buf: [0; 64],
+/// ptr: unsafe { addr_of_mut!((*this.as_ptr()).buf).cast() },
+/// pin: PhantomPinned,
+/// });
+/// ```
+///
+/// [`try_pin_init!`]: kernel::try_pin_init
+/// [`NonNull<Self>`]: core::ptr::NonNull
+// For a detailed example of how this macro works, see the module documentation of the hidden
+// module `__internal` inside of `init/__internal.rs`.
+#[macro_export]
+macro_rules! pin_init {
+ ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+ $($fields:tt)*
+ }) => {
+ $crate::try_pin_init!(
+ @this($($this)?),
+ @typ($t $(::<$($generics),*>)?),
+ @fields($($fields)*),
+ @error(::core::convert::Infallible),
+ )
+ };
+}
+
+/// Construct an in-place, fallible pinned initializer for `struct`s.
+///
+/// If the initialization can complete without error (or [`Infallible`]), then use [`pin_init!`].
+///
+/// You can use the `?` operator or use `return Err(err)` inside the initializer to stop
+/// initialization and return the error.
+///
+/// IMPORTANT: if you have `unsafe` code inside of the initializer you have to ensure that when
+/// initialization fails, the memory can be safely deallocated without any further modifications.
+///
+/// This macro defaults the error to [`Error`].
+///
+/// The syntax is identical to [`pin_init!`] with the following exception: you can append `? $type`
+/// after the `struct` initializer to specify the error type you want to use.
+///
+/// # Examples
+///
+/// ```rust
+/// # #![feature(new_uninit)]
+/// use kernel::{init::{self, PinInit}, error::Error};
+/// #[pin_data]
+/// struct BigBuf {
+/// big: Box<[u8; 1024 * 1024 * 1024]>,
+/// small: [u8; 1024 * 1024],
+/// ptr: *mut u8,
+/// }
+///
+/// impl BigBuf {
+/// fn new() -> impl PinInit<Self, Error> {
+/// try_pin_init!(Self {
+/// big: Box::init(init::zeroed())?,
+/// small: [0; 1024 * 1024],
+/// ptr: core::ptr::null_mut(),
+/// }? Error)
+/// }
+/// }
+/// ```
+// For a detailed example of how this macro works, see the module documentation of the hidden
+// module `__internal` inside of `init/__internal.rs`.
+#[macro_export]
+macro_rules! try_pin_init {
+ ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+ $($fields:tt)*
+ }) => {
+ $crate::try_pin_init!(
+ @this($($this)?),
+ @typ($t $(::<$($generics),*>)? ),
+ @fields($($fields)*),
+ @error($crate::error::Error),
+ )
+ };
+ ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+ $($fields:tt)*
+ }? $err:ty) => {
+ $crate::try_pin_init!(
+ @this($($this)?),
+ @typ($t $(::<$($generics),*>)? ),
+ @fields($($fields)*),
+ @error($err),
+ )
+ };
+ (
+ @this($($this:ident)?),
+ @typ($t:ident $(::<$($generics:ty),*>)?),
+ @fields($($fields:tt)*),
+ @error($err:ty),
+ ) => {{
+ // We do not want to allow arbitrary returns, so we declare this type as the `Ok` return
+ // type and shadow it later when we insert the arbitrary user code. That way there will be
+ // no possibility of returning without `unsafe`.
+ struct __InitOk;
+ // Get the pin data from the supplied type.
+ let data = unsafe {
+ use $crate::init::__internal::HasPinData;
+ $t$(::<$($generics),*>)?::__pin_data()
+ };
+ // Ensure that `data` really is of type `PinData` and help with type inference:
+ let init = $crate::init::__internal::PinData::make_closure::<_, __InitOk, $err>(
+ data,
+ move |slot| {
+ {
+ // Shadow the structure so it cannot be used to return early.
+ struct __InitOk;
+ // Create the `this` so it can be referenced by the user inside of the
+ // expressions creating the individual fields.
+ $(let $this = unsafe { ::core::ptr::NonNull::new_unchecked(slot) };)?
+ // Initialize every field.
+ $crate::try_pin_init!(init_slot:
+ @data(data),
+ @slot(slot),
+ @munch_fields($($fields)*,),
+ );
+ // We use unreachable code to ensure that all fields have been mentioned exactly
+ // once, this struct initializer will still be type-checked and complain with a
+ // very natural error message if a field is forgotten/mentioned more than once.
+ #[allow(unreachable_code, clippy::diverging_sub_expression)]
+ if false {
+ $crate::try_pin_init!(make_initializer:
+ @slot(slot),
+ @type_name($t),
+ @munch_fields($($fields)*,),
+ @acc(),
+ );
+ }
+ // Forget all guards, since initialization was a success.
+ $crate::try_pin_init!(forget_guards:
+ @munch_fields($($fields)*,),
+ );
+ }
+ Ok(__InitOk)
+ }
+ );
+ let init = move |slot| -> ::core::result::Result<(), $err> {
+ init(slot).map(|__InitOk| ())
+ };
+ let init = unsafe { $crate::init::pin_init_from_closure::<_, $err>(init) };
+ init
+ }};
+ (init_slot:
+ @data($data:ident),
+ @slot($slot:ident),
+ @munch_fields($(,)?),
+ ) => {
+ // Endpoint of munching, no fields are left.
+ };
+ (init_slot:
+ @data($data:ident),
+ @slot($slot:ident),
+ // In-place initialization syntax.
+ @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
+ ) => {
+ let $field = $val;
+ // Call the initializer.
+ //
+ // SAFETY: `slot` is valid, because we are inside of an initializer closure, we
+ // return when an error/panic occurs.
+ // We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
+ unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), $field)? };
+ // Create the drop guard.
+ //
+ // We only give access to `&DropGuard`, so it cannot be forgotten via safe code.
+ //
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let $field = &unsafe {
+ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::try_pin_init!(init_slot:
+ @data($data),
+ @slot($slot),
+ @munch_fields($($rest)*),
+ );
+ };
+ (init_slot:
+ @data($data:ident),
+ @slot($slot:ident),
+ // Direct value init, this is safe for every field.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ // Create the drop guard:
+ //
+ // We only give access to `&DropGuard`, so it cannot be accidentally forgotten.
+ //
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let $field = &unsafe {
+ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::try_pin_init!(init_slot:
+ @data($data),
+ @slot($slot),
+ @munch_fields($($rest)*),
+ );
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:ident),
+ @munch_fields($(,)?),
+ @acc($($acc:tt)*),
+ ) => {
+ // Endpoint, nothing more to munch, create the initializer.
+ // Since we are in the `if false` branch, this will never get executed. We abuse `slot` to
+ // get the correct type inference here:
+ unsafe {
+ ::core::ptr::write($slot, $t {
+ $($acc)*
+ });
+ }
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:ident),
+ @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
+ @acc($($acc:tt)*),
+ ) => {
+ $crate::try_pin_init!(make_initializer:
+ @slot($slot),
+ @type_name($t),
+ @munch_fields($($rest)*),
+ @acc($($acc)* $field: ::core::panic!(),),
+ );
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:ident),
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ @acc($($acc:tt)*),
+ ) => {
+ $crate::try_pin_init!(make_initializer:
+ @slot($slot),
+ @type_name($t),
+ @munch_fields($($rest)*),
+ @acc($($acc)* $field: ::core::panic!(),),
+ );
+ };
+ (forget_guards:
+ @munch_fields($(,)?),
+ ) => {
+ // Munching finished.
+ };
+ (forget_guards:
+ @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
+ ) => {
+ unsafe { $crate::init::__internal::DropGuard::forget($field) };
+
+ $crate::try_pin_init!(forget_guards:
+ @munch_fields($($rest)*),
+ );
+ };
+ (forget_guards:
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ unsafe { $crate::init::__internal::DropGuard::forget($field) };
+
+ $crate::try_pin_init!(forget_guards:
+ @munch_fields($($rest)*),
+ );
+ };
+}
+
+/// Construct an in-place initializer for `struct`s.
+///
+/// This macro defaults the error to [`Infallible`]. If you need [`Error`], then use
+/// [`try_init!`].
+///
+/// The syntax is identical to [`pin_init!`] and its safety caveats also apply:
+/// - `unsafe` code must guarantee either full initialization or return an error and allow
+/// deallocation of the memory.
+/// - the fields are initialized in the order given in the initializer.
+/// - no references to fields are allowed to be created inside of the initializer.
+///
+/// This initializer is for initializing data in-place that might later be moved. If you want to
+/// pin-initialize, use [`pin_init!`].
+// For a detailed example of how this macro works, see the module documentation of the hidden
+// module `__internal` inside of `init/__internal.rs`.
+#[macro_export]
+macro_rules! init {
+ ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+ $($fields:tt)*
+ }) => {
+ $crate::try_init!(
+ @this($($this)?),
+ @typ($t $(::<$($generics),*>)?),
+ @fields($($fields)*),
+ @error(::core::convert::Infallible),
+ )
+ }
+}
+
+/// Construct an in-place fallible initializer for `struct`s.
+///
+/// This macro defaults the error to [`Error`]. If you need [`Infallible`], then use
+/// [`init!`].
+///
+/// The syntax is identical to [`try_pin_init!`]. If you want to specify a custom error,
+/// append `? $type` after the `struct` initializer.
+/// The safety caveats from [`try_pin_init!`] also apply:
+/// - `unsafe` code must guarantee either full initialization or return an error and allow
+/// deallocation of the memory.
+/// - the fields are initialized in the order given in the initializer.
+/// - no references to fields are allowed to be created inside of the initializer.
+///
+/// # Examples
+///
+/// ```rust
+/// use kernel::{init::PinInit, error::Error, InPlaceInit};
+/// struct BigBuf {
+/// big: Box<[u8; 1024 * 1024 * 1024]>,
+/// small: [u8; 1024 * 1024],
+/// }
+///
+/// impl BigBuf {
+/// fn new() -> impl Init<Self, Error> {
+/// try_init!(Self {
+/// big: Box::init(zeroed())?,
+/// small: [0; 1024 * 1024],
+/// }? Error)
+/// }
+/// }
+/// ```
+// For a detailed example of how this macro works, see the module documentation of the hidden
+// module `__internal` inside of `init/__internal.rs`.
+#[macro_export]
+macro_rules! try_init {
+ ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+ $($fields:tt)*
+ }) => {
+ $crate::try_init!(
+ @this($($this)?),
+ @typ($t $(::<$($generics),*>)?),
+ @fields($($fields)*),
+ @error($crate::error::Error),
+ )
+ };
+ ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+ $($fields:tt)*
+ }? $err:ty) => {
+ $crate::try_init!(
+ @this($($this)?),
+ @typ($t $(::<$($generics),*>)?),
+ @fields($($fields)*),
+ @error($err),
+ )
+ };
+ (
+ @this($($this:ident)?),
+ @typ($t:ident $(::<$($generics:ty),*>)?),
+ @fields($($fields:tt)*),
+ @error($err:ty),
+ ) => {{
+ // We do not want to allow arbitrary returns, so we declare this type as the `Ok` return
+ // type and shadow it later when we insert the arbitrary user code. That way there will be
+ // no possibility of returning without `unsafe`.
+ struct __InitOk;
+ // Get the init data from the supplied type.
+ let data = unsafe {
+ use $crate::init::__internal::HasInitData;
+ $t$(::<$($generics),*>)?::__init_data()
+ };
+ // Ensure that `data` really is of type `InitData` and help with type inference:
+ let init = $crate::init::__internal::InitData::make_closure::<_, __InitOk, $err>(
+ data,
+ move |slot| {
+ {
+ // Shadow the structure so it cannot be used to return early.
+ struct __InitOk;
+ // Create the `this` so it can be referenced by the user inside of the
+ // expressions creating the individual fields.
+ $(let $this = unsafe { ::core::ptr::NonNull::new_unchecked(slot) };)?
+ // Initialize every field.
+ $crate::try_init!(init_slot:
+ @slot(slot),
+ @munch_fields($($fields)*,),
+ );
+ // We use unreachable code to ensure that all fields have been mentioned exactly
+ // once, this struct initializer will still be type-checked and complain with a
+ // very natural error message if a field is forgotten/mentioned more than once.
+ #[allow(unreachable_code, clippy::diverging_sub_expression)]
+ if false {
+ $crate::try_init!(make_initializer:
+ @slot(slot),
+ @type_name($t),
+ @munch_fields($($fields)*,),
+ @acc(),
+ );
+ }
+ // Forget all guards, since initialization was a success.
+ $crate::try_init!(forget_guards:
+ @munch_fields($($fields)*,),
+ );
+ }
+ Ok(__InitOk)
+ }
+ );
+ let init = move |slot| -> ::core::result::Result<(), $err> {
+ init(slot).map(|__InitOk| ())
+ };
+ let init = unsafe { $crate::init::init_from_closure::<_, $err>(init) };
+ init
+ }};
+ (init_slot:
+ @slot($slot:ident),
+ @munch_fields( $(,)?),
+ ) => {
+ // Endpoint of munching, no fields are left.
+ };
+ (init_slot:
+ @slot($slot:ident),
+ @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
+ ) => {
+ let $field = $val;
+ // Call the initializer.
+ //
+ // SAFETY: `slot` is valid, because we are inside of an initializer closure, we
+ // return when an error/panic occurs.
+ unsafe {
+ $crate::init::Init::__init($field, ::core::ptr::addr_of_mut!((*$slot).$field))?;
+ }
+ // Create the drop guard.
+ //
+ // We only give access to `&DropGuard`, so it cannot be accidentally forgotten.
+ //
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let $field = &unsafe {
+ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::try_init!(init_slot:
+ @slot($slot),
+ @munch_fields($($rest)*),
+ );
+ };
+ (init_slot:
+ @slot($slot:ident),
+ // Direct value init.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ $(let $field = $val;)?
+ // Call the initializer.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ // Create the drop guard.
+ //
+ // We only give access to `&DropGuard`, so it cannot be accidentally forgotten.
+ //
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let $field = &unsafe {
+ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::try_init!(init_slot:
+ @slot($slot),
+ @munch_fields($($rest)*),
+ );
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:ident),
+ @munch_fields( $(,)?),
+ @acc($($acc:tt)*),
+ ) => {
+ // Endpoint, nothing more to munch, create the initializer.
+ // Since we are in the `if false` branch, this will never get executed. We abuse `slot` to
+ // get the correct type inference here:
+ unsafe {
+ ::core::ptr::write($slot, $t {
+ $($acc)*
+ });
+ }
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:ident),
+ @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
+ @acc($($acc:tt)*),
+ ) => {
+ $crate::try_init!(make_initializer:
+ @slot($slot),
+ @type_name($t),
+ @munch_fields($($rest)*),
+ @acc($($acc)*$field: ::core::panic!(),),
+ );
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:ident),
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ @acc($($acc:tt)*),
+ ) => {
+ $crate::try_init!(make_initializer:
+ @slot($slot),
+ @type_name($t),
+ @munch_fields($($rest)*),
+ @acc($($acc)*$field: ::core::panic!(),),
+ );
+ };
+ (forget_guards:
+ @munch_fields($(,)?),
+ ) => {
+ // Munching finished.
+ };
+ (forget_guards:
+ @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
+ ) => {
+ unsafe { $crate::init::__internal::DropGuard::forget($field) };
+
+ $crate::try_init!(forget_guards:
+ @munch_fields($($rest)*),
+ );
+ };
+ (forget_guards:
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ unsafe { $crate::init::__internal::DropGuard::forget($field) };
+
+ $crate::try_init!(forget_guards:
+ @munch_fields($($rest)*),
+ );
+ };
+}
+
+/// A pin-initializer for the type `T`.
+///
+/// To use this initializer, you will need a suitable memory location that can hold a `T`. This can
+/// be [`Box<T>`], [`Arc<T>`], [`UniqueArc<T>`] or even the stack (see [`stack_pin_init!`]). Use the
+/// [`InPlaceInit::pin_init`] function of a smart pointer like [`Arc<T>`] on this.
+///
+/// Also see the [module description](self).
+///
+/// # Safety
+///
+/// When implementing this type you will need to take great care. Also there are probably very few
+/// cases where a manual implementation is necessary. Use [`pin_init_from_closure`] where possible.
+///
+/// The [`PinInit::__pinned_init`] function
+/// - returns `Ok(())` if it initialized every field of `slot`,
+/// - returns `Err(err)` if it encountered an error and then cleaned `slot`, this means:
+/// - `slot` can be deallocated without UB occurring,
+/// - `slot` does not need to be dropped,
+/// - `slot` is not partially initialized.
+/// - while constructing the `T` at `slot` it upholds the pinning invariants of `T`.
+///
+/// [`Arc<T>`]: crate::sync::Arc
+/// [`Arc::pin_init`]: crate::sync::Arc::pin_init
+#[must_use = "An initializer must be used in order to create its value."]
+pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {
+ /// Initializes `slot`.
+ ///
+ /// # Safety
+ ///
+ /// - `slot` is a valid pointer to uninitialized memory.
+ /// - the caller does not touch `slot` when `Err` is returned, they are only permitted to
+ /// deallocate.
+ /// - `slot` will not move until it is dropped, i.e. it will be pinned.
+ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E>;
+}
+
+/// An initializer for `T`.
+///
+/// To use this initializer, you will need a suitable memory location that can hold a `T`. This can
+/// be [`Box<T>`], [`Arc<T>`], [`UniqueArc<T>`] or even the stack (see [`stack_pin_init!`]). Use the
+/// [`InPlaceInit::init`] function of a smart pointer like [`Arc<T>`] on this. Because
+/// [`PinInit<T, E>`] is a super trait, you can use every function that takes it as well.
+///
+/// Also see the [module description](self).
+///
+/// # Safety
+///
+/// When implementing this type you will need to take great care. Also there are probably very few
+/// cases where a manual implementation is necessary. Use [`init_from_closure`] where possible.
+///
+/// The [`Init::__init`] function
+/// - returns `Ok(())` if it initialized every field of `slot`,
+/// - returns `Err(err)` if it encountered an error and then cleaned `slot`, this means:
+/// - `slot` can be deallocated without UB occurring,
+/// - `slot` does not need to be dropped,
+/// - `slot` is not partially initialized.
+/// - while constructing the `T` at `slot` it upholds the pinning invariants of `T`.
+///
+/// The `__pinned_init` function from the supertrait [`PinInit`] needs to execute the exact same
+/// code as `__init`.
+///
+/// Contrary to its supertype [`PinInit<T, E>`] the caller is allowed to
+/// move the pointee after initialization.
+///
+/// [`Arc<T>`]: crate::sync::Arc
+#[must_use = "An initializer must be used in order to create its value."]
+pub unsafe trait Init<T: ?Sized, E = Infallible>: Sized {
+ /// Initializes `slot`.
+ ///
+ /// # Safety
+ ///
+ /// - `slot` is a valid pointer to uninitialized memory.
+ /// - the caller does not touch `slot` when `Err` is returned, they are only permitted to
+ /// deallocate.
+ unsafe fn __init(self, slot: *mut T) -> Result<(), E>;
+}
+
+// SAFETY: Every in-place initializer can also be used as a pin-initializer.
+unsafe impl<T: ?Sized, E, I> PinInit<T, E> for I
+where
+ I: Init<T, E>,
+{
+ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
+ // SAFETY: `__init` meets the same requirements as `__pinned_init`, except that it does not
+ // require `slot` to not move after init.
+ unsafe { self.__init(slot) }
+ }
+}
+
+/// Creates a new [`PinInit<T, E>`] from the given closure.
+///
+/// # Safety
+///
+/// The closure:
+/// - returns `Ok(())` if it initialized every field of `slot`,
+/// - returns `Err(err)` if it encountered an error and then cleaned `slot`, this means:
+/// - `slot` can be deallocated without UB occurring,
+/// - `slot` does not need to be dropped,
+/// - `slot` is not partially initialized.
+/// - may assume that the `slot` does not move if `T: !Unpin`,
+/// - while constructing the `T` at `slot` it upholds the pinning invariants of `T`.
+#[inline]
+pub const unsafe fn pin_init_from_closure<T: ?Sized, E>(
+ f: impl FnOnce(*mut T) -> Result<(), E>,
+) -> impl PinInit<T, E> {
+ __internal::InitClosure(f, PhantomData)
+}
+
+/// Creates a new [`Init<T, E>`] from the given closure.
+///
+/// # Safety
+///
+/// The closure:
+/// - returns `Ok(())` if it initialized every field of `slot`,
+/// - returns `Err(err)` if it encountered an error and then cleaned `slot`, this means:
+/// - `slot` can be deallocated without UB occurring,
+/// - `slot` does not need to be dropped,
+/// - `slot` is not partially initialized.
+/// - the `slot` may move after initialization.
+/// - while constructing the `T` at `slot` it upholds the pinning invariants of `T`.
+#[inline]
+pub const unsafe fn init_from_closure<T: ?Sized, E>(
+ f: impl FnOnce(*mut T) -> Result<(), E>,
+) -> impl Init<T, E> {
+ __internal::InitClosure(f, PhantomData)
+}
+
+/// An initializer that leaves the memory uninitialized.
+///
+/// The initializer is a no-op. The `slot` memory is not changed.
+#[inline]
+pub fn uninit<T, E>() -> impl Init<MaybeUninit<T>, E> {
+ // SAFETY: The memory is allowed to be uninitialized.
+ unsafe { init_from_closure(|_| Ok(())) }
+}
+
+// SAFETY: Every type can be initialized by-value.
+unsafe impl<T, E> Init<T, E> for T {
+ unsafe fn __init(self, slot: *mut T) -> Result<(), E> {
+ unsafe { slot.write(self) };
+ Ok(())
+ }
+}
+
+/// Smart pointer that can initialize memory in-place.
+pub trait InPlaceInit<T>: Sized {
+ /// Use the given pin-initializer to pin-initialize a `T` inside of a new smart pointer of this
+ /// type.
+ ///
+ /// If `T: !Unpin` it will not be able to move afterwards.
+ fn try_pin_init<E>(init: impl PinInit<T, E>) -> Result<Pin<Self>, E>
+ where
+ E: From<AllocError>;
+
+ /// Use the given pin-initializer to pin-initialize a `T` inside of a new smart pointer of this
+ /// type.
+ ///
+ /// If `T: !Unpin` it will not be able to move afterwards.
+ fn pin_init<E>(init: impl PinInit<T, E>) -> error::Result<Pin<Self>>
+ where
+ Error: From<E>,
+ {
+ // SAFETY: We delegate to `init` and only change the error type.
+ let init = unsafe {
+ pin_init_from_closure(|slot| init.__pinned_init(slot).map_err(|e| Error::from(e)))
+ };
+ Self::try_pin_init(init)
+ }
+
+ /// Use the given initializer to in-place initialize a `T`.
+ fn try_init<E>(init: impl Init<T, E>) -> Result<Self, E>
+ where
+ E: From<AllocError>;
+
+ /// Use the given initializer to in-place initialize a `T`.
+ fn init<E>(init: impl Init<T, E>) -> error::Result<Self>
+ where
+ Error: From<E>,
+ {
+ // SAFETY: We delegate to `init` and only change the error type.
+ let init = unsafe {
+ init_from_closure(|slot| init.__pinned_init(slot).map_err(|e| Error::from(e)))
+ };
+ Self::try_init(init)
+ }
+}
+
+impl<T> InPlaceInit<T> for Box<T> {
+ #[inline]
+ fn try_pin_init<E>(init: impl PinInit<T, E>) -> Result<Pin<Self>, E>
+ where
+ E: From<AllocError>,
+ {
+ let mut this = Box::try_new_uninit()?;
+ let slot = this.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid and will not be moved, because we pin it later.
+ unsafe { init.__pinned_init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { this.assume_init() }.into())
+ }
+
+ #[inline]
+ fn try_init<E>(init: impl Init<T, E>) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ let mut this = Box::try_new_uninit()?;
+ let slot = this.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid.
+ unsafe { init.__init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { this.assume_init() })
+ }
+}
+
+impl<T> InPlaceInit<T> for UniqueArc<T> {
+ #[inline]
+ fn try_pin_init<E>(init: impl PinInit<T, E>) -> Result<Pin<Self>, E>
+ where
+ E: From<AllocError>,
+ {
+ let mut this = UniqueArc::try_new_uninit()?;
+ let slot = this.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid and will not be moved, because we pin it later.
+ unsafe { init.__pinned_init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { this.assume_init() }.into())
+ }
+
+ #[inline]
+ fn try_init<E>(init: impl Init<T, E>) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ let mut this = UniqueArc::try_new_uninit()?;
+ let slot = this.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid.
+ unsafe { init.__init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { this.assume_init() })
+ }
+}
+
+/// Trait facilitating pinned destruction.
+///
+/// Use [`pinned_drop`] to implement this trait safely:
+///
+/// ```rust
+/// # use kernel::sync::Mutex;
+/// use kernel::macros::pinned_drop;
+/// use core::pin::Pin;
+/// #[pin_data(PinnedDrop)]
+/// struct Foo {
+/// #[pin]
+/// mtx: Mutex<usize>,
+/// }
+///
+/// #[pinned_drop]
+/// impl PinnedDrop for Foo {
+/// fn drop(self: Pin<&mut Self>) {
+/// pr_info!("Foo is being dropped!");
+/// }
+/// }
+/// ```
+///
+/// # Safety
+///
+/// This trait must be implemented via the [`pinned_drop`] proc-macro attribute on the impl.
+///
+/// [`pinned_drop`]: kernel::macros::pinned_drop
+pub unsafe trait PinnedDrop: __internal::HasPinData {
+ /// Executes the pinned destructor of this type.
+ ///
+ /// While this function is marked safe, it is actually unsafe to call it manually. For this
+ /// reason it takes an additional parameter. This type can only be constructed by `unsafe` code
+ /// and thus prevents this function from being called where it should not.
+ ///
+ /// This extra parameter will be generated by the `#[pinned_drop]` proc-macro attribute
+ /// automatically.
+ fn drop(self: Pin<&mut Self>, only_call_from_drop: __internal::OnlyCallFromDrop);
+}
+
+/// Marker trait for types that can be initialized by writing just zeroes.
+///
+/// # Safety
+///
+/// The bit pattern consisting of only zeroes is a valid bit pattern for this type. In other words,
+/// this is not UB:
+///
+/// ```rust,ignore
+/// let val: Self = unsafe { core::mem::zeroed() };
+/// ```
+pub unsafe trait Zeroable {}
+
+/// Create a new zeroed T.
+///
+/// The returned initializer will write `0x00` to every byte of the given `slot`.
+#[inline]
+pub fn zeroed<T: Zeroable>() -> impl Init<T> {
+ // SAFETY: Because `T: Zeroable`, all bytes zero is a valid bit pattern for `T`
+ // and because we write all zeroes, the memory is initialized.
+ unsafe {
+ init_from_closure(|slot: *mut T| {
+ slot.write_bytes(0, 1);
+ Ok(())
+ })
+ }
+}
+
+macro_rules! impl_zeroable {
+ ($($({$($generics:tt)*})? $t:ty, )*) => {
+ $(unsafe impl$($($generics)*)? Zeroable for $t {})*
+ };
+}
+
+impl_zeroable! {
+ // SAFETY: All primitives that are allowed to be zero.
+ bool,
+ char,
+ u8, u16, u32, u64, u128, usize,
+ i8, i16, i32, i64, i128, isize,
+ f32, f64,
+
+ // SAFETY: These are ZSTs, there is nothing to zero.
+ {<T: ?Sized>} PhantomData<T>, core::marker::PhantomPinned, Infallible, (),
+
+ // SAFETY: Type is allowed to take any value, including all zeros.
+ {<T>} MaybeUninit<T>,
+
+ // SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee).
+ Option<NonZeroU8>, Option<NonZeroU16>, Option<NonZeroU32>, Option<NonZeroU64>,
+ Option<NonZeroU128>, Option<NonZeroUsize>,
+ Option<NonZeroI8>, Option<NonZeroI16>, Option<NonZeroI32>, Option<NonZeroI64>,
+ Option<NonZeroI128>, Option<NonZeroIsize>,
+
+ // SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee).
+ //
+ // In this case we are allowed to use `T: ?Sized`, since all zeros is the `None` variant.
+ {<T: ?Sized>} Option<NonNull<T>>,
+ {<T: ?Sized>} Option<Box<T>>,
+
+ // SAFETY: `null` pointer is valid.
+ //
+ // We cannot use `T: ?Sized`, since the VTABLE pointer part of fat pointers is not allowed to be
+ // null.
+ //
+ // When `Pointee` gets stabilized, we could use
+ // `T: ?Sized where <T as Pointee>::Metadata: Zeroable`
+ {<T>} *mut T, {<T>} *const T,
+
+ // SAFETY: `null` pointer is valid and the metadata part of these fat pointers is allowed to be
+ // zero.
+ {<T>} *mut [T], {<T>} *const [T], *mut str, *const str,
+
+ // SAFETY: `T` is `Zeroable`.
+ {<const N: usize, T: Zeroable>} [T; N], {<T: Zeroable>} Wrapping<T>,
+}
+
+macro_rules! impl_tuple_zeroable {
+ ($(,)?) => {};
+ ($first:ident, $($t:ident),* $(,)?) => {
+ // SAFETY: All elements are zeroable and padding can be zero.
+ unsafe impl<$first: Zeroable, $($t: Zeroable),*> Zeroable for ($first, $($t),*) {}
+ impl_tuple_zeroable!($($t),* ,);
+ }
+}
+
+impl_tuple_zeroable!(A, B, C, D, E, F, G, H, I, J);
diff --git a/rust/kernel/init/__internal.rs b/rust/kernel/init/__internal.rs
new file mode 100644
index 000000000000..44751fb62b51
--- /dev/null
+++ b/rust/kernel/init/__internal.rs
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! This module contains API-internal items for pin-init.
+//!
+//! These items must not be used outside of
+//! - `kernel/init.rs`
+//! - `macros/pin_data.rs`
+//! - `macros/pinned_drop.rs`
+
+use super::*;
+
+/// See the [nomicon] for what subtyping is. See also [this table].
+///
+/// [nomicon]: https://doc.rust-lang.org/nomicon/subtyping.html
+/// [this table]: https://doc.rust-lang.org/nomicon/phantom-data.html#table-of-phantomdata-patterns
+type Invariant<T> = PhantomData<fn(*mut T) -> *mut T>;
+
+/// This is the module-internal type implementing `PinInit` and `Init`. It is unsafe to create this
+/// type, since the closure needs to fulfill the same safety requirement as the
+/// `__pinned_init`/`__init` functions.
+pub(crate) struct InitClosure<F, T: ?Sized, E>(pub(crate) F, pub(crate) Invariant<(E, T)>);
+
+// SAFETY: While constructing the `InitClosure`, the user promised that it upholds the
+// `__init` invariants.
+unsafe impl<T: ?Sized, F, E> Init<T, E> for InitClosure<F, T, E>
+where
+ F: FnOnce(*mut T) -> Result<(), E>,
+{
+ #[inline]
+ unsafe fn __init(self, slot: *mut T) -> Result<(), E> {
+ (self.0)(slot)
+ }
+}
+
+/// This trait is only implemented via the `#[pin_data]` proc-macro. It is used to facilitate
+/// the pin projections within the initializers.
+///
+/// # Safety
+///
+/// Only the `init` module is allowed to use this trait.
+pub unsafe trait HasPinData {
+ type PinData: PinData;
+
+ unsafe fn __pin_data() -> Self::PinData;
+}
+
+/// Marker trait for pinning data of structs.
+///
+/// # Safety
+///
+/// Only the `init` module is allowed to use this trait.
+pub unsafe trait PinData: Copy {
+ type Datee: ?Sized + HasPinData;
+
+ /// Type inference helper function.
+ fn make_closure<F, O, E>(self, f: F) -> F
+ where
+ F: FnOnce(*mut Self::Datee) -> Result<O, E>,
+ {
+ f
+ }
+}
+
+/// This trait is automatically implemented for every type. It aims to provide the same type
+/// inference help as `HasPinData`.
+///
+/// # Safety
+///
+/// Only the `init` module is allowed to use this trait.
+pub unsafe trait HasInitData {
+ type InitData: InitData;
+
+ unsafe fn __init_data() -> Self::InitData;
+}
+
+/// Same function as `PinData`, but for arbitrary data.
+///
+/// # Safety
+///
+/// Only the `init` module is allowed to use this trait.
+pub unsafe trait InitData: Copy {
+ type Datee: ?Sized + HasInitData;
+
+ /// Type inference helper function.
+ fn make_closure<F, O, E>(self, f: F) -> F
+ where
+ F: FnOnce(*mut Self::Datee) -> Result<O, E>,
+ {
+ f
+ }
+}
+
+pub struct AllData<T: ?Sized>(PhantomData<fn(Box<T>) -> Box<T>>);
+
+impl<T: ?Sized> Clone for AllData<T> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<T: ?Sized> Copy for AllData<T> {}
+
+unsafe impl<T: ?Sized> InitData for AllData<T> {
+ type Datee = T;
+}
+
+unsafe impl<T: ?Sized> HasInitData for T {
+ type InitData = AllData<T>;
+
+ unsafe fn __init_data() -> Self::InitData {
+ AllData(PhantomData)
+ }
+}
+
+/// Stack initializer helper type. Use [`stack_pin_init`] instead of this primitive.
+///
+/// # Invariants
+///
+/// If `self.is_init` is true, then `self.value` is initialized.
+///
+/// [`stack_pin_init`]: kernel::stack_pin_init
+pub struct StackInit<T> {
+ value: MaybeUninit<T>,
+ is_init: bool,
+}
+
+impl<T> Drop for StackInit<T> {
+ #[inline]
+ fn drop(&mut self) {
+ if self.is_init {
+ // SAFETY: As we are being dropped, we only call this once. And since `self.is_init` is
+ // true, `self.value` is initialized.
+ unsafe { self.value.assume_init_drop() };
+ }
+ }
+}
+
+impl<T> StackInit<T> {
+ /// Creates a new [`StackInit<T>`] that is uninitialized. Use [`stack_pin_init`] instead of this
+ /// primitive.
+ ///
+ /// [`stack_pin_init`]: kernel::stack_pin_init
+ #[inline]
+ pub fn uninit() -> Self {
+ Self {
+ value: MaybeUninit::uninit(),
+ is_init: false,
+ }
+ }
+
+ /// Initializes the contents and returns the result.
+ #[inline]
+ pub fn init<E>(self: Pin<&mut Self>, init: impl PinInit<T, E>) -> Result<Pin<&mut T>, E> {
+ // SAFETY: We never move out of `this`.
+ let this = unsafe { Pin::into_inner_unchecked(self) };
+ // The value is currently initialized, so it needs to be dropped before we can reuse
+ // the memory (this is a safety guarantee of `Pin`).
+ if this.is_init {
+ this.is_init = false;
+ // SAFETY: `this.is_init` was true and therefore `this.value` is initialized.
+ unsafe { this.value.assume_init_drop() };
+ }
+ // SAFETY: The memory slot is valid and this type ensures that it will stay pinned.
+ unsafe { init.__pinned_init(this.value.as_mut_ptr())? };
+ // INVARIANT: `this.value` is initialized above.
+ this.is_init = true;
+ // SAFETY: The slot is now pinned, since we will never give access to `&mut T`.
+ Ok(unsafe { Pin::new_unchecked(this.value.assume_init_mut()) })
+ }
+}
+
+/// When a value of this type is dropped, it drops a `T`.
+///
+/// Can be forgotten to prevent the drop.
+pub struct DropGuard<T: ?Sized> {
+ ptr: *mut T,
+ do_drop: Cell<bool>,
+}
+
+impl<T: ?Sized> DropGuard<T> {
+ /// Creates a new [`DropGuard<T>`]. It will [`ptr::drop_in_place`] `ptr` when it gets dropped.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must be a valid pointer.
+ ///
+ /// It is the callers responsibility that `self` will only get dropped if the pointee of `ptr`:
+ /// - has not been dropped,
+ /// - is not accessible by any other means,
+ /// - will not be dropped by any other means.
+ #[inline]
+ pub unsafe fn new(ptr: *mut T) -> Self {
+ Self {
+ ptr,
+ do_drop: Cell::new(true),
+ }
+ }
+
+ /// Prevents this guard from dropping the supplied pointer.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe in order to prevent safe code from forgetting this guard. It should
+ /// only be called by the macros in this module.
+ #[inline]
+ pub unsafe fn forget(&self) {
+ self.do_drop.set(false);
+ }
+}
+
+impl<T: ?Sized> Drop for DropGuard<T> {
+ #[inline]
+ fn drop(&mut self) {
+ if self.do_drop.get() {
+ // SAFETY: A `DropGuard` can only be constructed using the unsafe `new` function
+ // ensuring that this operation is safe.
+ unsafe { ptr::drop_in_place(self.ptr) }
+ }
+ }
+}
+
+/// Token used by `PinnedDrop` to prevent calling the function without creating this unsafely
+/// created struct. This is needed, because the `drop` function is safe, but should not be called
+/// manually.
+pub struct OnlyCallFromDrop(());
+
+impl OnlyCallFromDrop {
+ /// # Safety
+ ///
+ /// This function should only be called from the [`Drop::drop`] function and only be used to
+ /// delegate the destruction to the pinned destructor [`PinnedDrop::drop`] of the same type.
+ pub unsafe fn new() -> Self {
+ Self(())
+ }
+}
diff --git a/rust/kernel/init/macros.rs b/rust/kernel/init/macros.rs
new file mode 100644
index 000000000000..541cfad1d8be
--- /dev/null
+++ b/rust/kernel/init/macros.rs
@@ -0,0 +1,971 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! This module provides the macros that actually implement the proc-macros `pin_data` and
+//! `pinned_drop`.
+//!
+//! These macros should never be called directly, since they expect their input to be
+//! in a certain format which is internal. Use the proc-macros instead.
+//!
+//! This architecture has been chosen because the kernel does not yet have access to `syn` which
+//! would make matters a lot easier for implementing these as proc-macros.
+//!
+//! # Macro expansion example
+//!
+//! This section is intended for readers trying to understand the macros in this module and the
+//! `pin_init!` macros from `init.rs`.
+//!
+//! We will look at the following example:
+//!
+//! ```rust
+//! # use kernel::init::*;
+//! #[pin_data]
+//! #[repr(C)]
+//! struct Bar<T> {
+//! #[pin]
+//! t: T,
+//! pub x: usize,
+//! }
+//!
+//! impl<T> Bar<T> {
+//! fn new(t: T) -> impl PinInit<Self> {
+//! pin_init!(Self { t, x: 0 })
+//! }
+//! }
+//!
+//! #[pin_data(PinnedDrop)]
+//! struct Foo {
+//! a: usize,
+//! #[pin]
+//! b: Bar<u32>,
+//! }
+//!
+//! #[pinned_drop]
+//! impl PinnedDrop for Foo {
+//! fn drop(self: Pin<&mut Self>) {
+//! println!("{self:p} is getting dropped.");
+//! }
+//! }
+//!
+//! let a = 42;
+//! let initializer = pin_init!(Foo {
+//! a,
+//! b <- Bar::new(36),
+//! });
+//! ```
+//!
+//! This example includes the most common and important features of the pin-init API.
+//!
+//! Below you can find individual section about the different macro invocations. Here are some
+//! general things we need to take into account when designing macros:
+//! - use global paths, similarly to file paths, these start with the separator: `::core::panic!()`
+//! this ensures that the correct item is used, since users could define their own `mod core {}`
+//! and then their own `panic!` inside to execute arbitrary code inside of our macro.
+//! - macro `unsafe` hygiene: we need to ensure that we do not expand arbitrary, user-supplied
+//! expressions inside of an `unsafe` block in the macro, because this would allow users to do
+//! `unsafe` operations without an associated `unsafe` block.
+//!
+//! ## `#[pin_data]` on `Bar`
+//!
+//! This macro is used to specify which fields are structurally pinned and which fields are not. It
+//! is placed on the struct definition and allows `#[pin]` to be placed on the fields.
+//!
+//! Here is the definition of `Bar` from our example:
+//!
+//! ```rust
+//! # use kernel::init::*;
+//! #[pin_data]
+//! #[repr(C)]
+//! struct Bar<T> {
+//! t: T,
+//! pub x: usize,
+//! }
+//! ```
+//!
+//! This expands to the following code:
+//!
+//! ```rust
+//! // Firstly the normal definition of the struct, attributes are preserved:
+//! #[repr(C)]
+//! struct Bar<T> {
+//! t: T,
+//! pub x: usize,
+//! }
+//! // Then an anonymous constant is defined, this is because we do not want any code to access the
+//! // types that we define inside:
+//! const _: () = {
+//! // We define the pin-data carrying struct, it is a ZST and needs to have the same generics,
+//! // since we need to implement access functions for each field and thus need to know its
+//! // type.
+//! struct __ThePinData<T> {
+//! __phantom: ::core::marker::PhantomData<fn(Bar<T>) -> Bar<T>>,
+//! }
+//! // We implement `Copy` for the pin-data struct, since all functions it defines will take
+//! // `self` by value.
+//! impl<T> ::core::clone::Clone for __ThePinData<T> {
+//! fn clone(&self) -> Self {
+//! *self
+//! }
+//! }
+//! impl<T> ::core::marker::Copy for __ThePinData<T> {}
+//! // For every field of `Bar`, the pin-data struct will define a function with the same name
+//! // and accessor (`pub` or `pub(crate)` etc.). This function will take a pointer to the
+//! // field (`slot`) and a `PinInit` or `Init` depending on the projection kind of the field
+//! // (if pinning is structural for the field, then `PinInit` otherwise `Init`).
+//! #[allow(dead_code)]
+//! impl<T> __ThePinData<T> {
+//! unsafe fn t<E>(
+//! self,
+//! slot: *mut T,
+//! init: impl ::kernel::init::Init<T, E>,
+//! ) -> ::core::result::Result<(), E> {
+//! unsafe { ::kernel::init::Init::__init(init, slot) }
+//! }
+//! pub unsafe fn x<E>(
+//! self,
+//! slot: *mut usize,
+//! init: impl ::kernel::init::Init<usize, E>,
+//! ) -> ::core::result::Result<(), E> {
+//! unsafe { ::kernel::init::Init::__init(init, slot) }
+//! }
+//! }
+//! // Implement the internal `HasPinData` trait that associates `Bar` with the pin-data struct
+//! // that we constructed beforehand.
+//! unsafe impl<T> ::kernel::init::__internal::HasPinData for Bar<T> {
+//! type PinData = __ThePinData<T>;
+//! unsafe fn __pin_data() -> Self::PinData {
+//! __ThePinData {
+//! __phantom: ::core::marker::PhantomData,
+//! }
+//! }
+//! }
+//! // Implement the internal `PinData` trait that marks the pin-data struct as a pin-data
+//! // struct. This is important to ensure that no user can implement a rouge `__pin_data`
+//! // function without using `unsafe`.
+//! unsafe impl<T> ::kernel::init::__internal::PinData for __ThePinData<T> {
+//! type Datee = Bar<T>;
+//! }
+//! // Now we only want to implement `Unpin` for `Bar` when every structurally pinned field is
+//! // `Unpin`. In other words, whether `Bar` is `Unpin` only depends on structurally pinned
+//! // fields (those marked with `#[pin]`). These fields will be listed in this struct, in our
+//! // case no such fields exist, hence this is almost empty. The two phantomdata fields exist
+//! // for two reasons:
+//! // - `__phantom`: every generic must be used, since we cannot really know which generics
+//! // are used, we declere all and then use everything here once.
+//! // - `__phantom_pin`: uses the `'__pin` lifetime and ensures that this struct is invariant
+//! // over it. The lifetime is needed to work around the limitation that trait bounds must
+//! // not be trivial, e.g. the user has a `#[pin] PhantomPinned` field -- this is
+//! // unconditionally `!Unpin` and results in an error. The lifetime tricks the compiler
+//! // into accepting these bounds regardless.
+//! #[allow(dead_code)]
+//! struct __Unpin<'__pin, T> {
+//! __phantom_pin: ::core::marker::PhantomData<fn(&'__pin ()) -> &'__pin ()>,
+//! __phantom: ::core::marker::PhantomData<fn(Bar<T>) -> Bar<T>>,
+//! }
+//! #[doc(hidden)]
+//! impl<'__pin, T>
+//! ::core::marker::Unpin for Bar<T> where __Unpin<'__pin, T>: ::core::marker::Unpin {}
+//! // Now we need to ensure that `Bar` does not implement `Drop`, since that would give users
+//! // access to `&mut self` inside of `drop` even if the struct was pinned. This could lead to
+//! // UB with only safe code, so we disallow this by giving a trait implementation error using
+//! // a direct impl and a blanket implementation.
+//! trait MustNotImplDrop {}
+//! // Normally `Drop` bounds do not have the correct semantics, but for this purpose they do
+//! // (normally people want to know if a type has any kind of drop glue at all, here we want
+//! // to know if it has any kind of custom drop glue, which is exactly what this bound does).
+//! #[allow(drop_bounds)]
+//! impl<T: ::core::ops::Drop> MustNotImplDrop for T {}
+//! impl<T> MustNotImplDrop for Bar<T> {}
+//! // Here comes a convenience check, if one implemented `PinnedDrop`, but forgot to add it to
+//! // `#[pin_data]`, then this will error with the same mechanic as above, this is not needed
+//! // for safety, but a good sanity check, since no normal code calls `PinnedDrop::drop`.
+//! #[allow(non_camel_case_types)]
+//! trait UselessPinnedDropImpl_you_need_to_specify_PinnedDrop {}
+//! impl<T: ::kernel::init::PinnedDrop>
+//! UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for T {}
+//! impl<T> UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for Bar<T> {}
+//! };
+//! ```
+//!
+//! ## `pin_init!` in `impl Bar`
+//!
+//! This macro creates an pin-initializer for the given struct. It requires that the struct is
+//! annotated by `#[pin_data]`.
+//!
+//! Here is the impl on `Bar` defining the new function:
+//!
+//! ```rust
+//! impl<T> Bar<T> {
+//! fn new(t: T) -> impl PinInit<Self> {
+//! pin_init!(Self { t, x: 0 })
+//! }
+//! }
+//! ```
+//!
+//! This expands to the following code:
+//!
+//! ```rust
+//! impl<T> Bar<T> {
+//! fn new(t: T) -> impl PinInit<Self> {
+//! {
+//! // We do not want to allow arbitrary returns, so we declare this type as the `Ok`
+//! // return type and shadow it later when we insert the arbitrary user code. That way
+//! // there will be no possibility of returning without `unsafe`.
+//! struct __InitOk;
+//! // Get the pin-data type from the initialized type.
+//! // - the function is unsafe, hence the unsafe block
+//! // - we `use` the `HasPinData` trait in the block, it is only available in that
+//! // scope.
+//! let data = unsafe {
+//! use ::kernel::init::__internal::HasPinData;
+//! Self::__pin_data()
+//! };
+//! // Use `data` to help with type inference, the closure supplied will have the type
+//! // `FnOnce(*mut Self) -> Result<__InitOk, Infallible>`.
+//! let init = ::kernel::init::__internal::PinData::make_closure::<
+//! _,
+//! __InitOk,
+//! ::core::convert::Infallible,
+//! >(data, move |slot| {
+//! {
+//! // Shadow the structure so it cannot be used to return early. If a user
+//! // tries to write `return Ok(__InitOk)`, then they get a type error, since
+//! // that will refer to this struct instead of the one defined above.
+//! struct __InitOk;
+//! // This is the expansion of `t,`, which is syntactic sugar for `t: t,`.
+//! unsafe { ::core::ptr::write(&raw mut (*slot).t, t) };
+//! // Since initialization could fail later (not in this case, since the error
+//! // type is `Infallible`) we will need to drop this field if it fails. This
+//! // `DropGuard` will drop the field when it gets dropped and has not yet
+//! // been forgotten. We make a reference to it, so users cannot `mem::forget`
+//! // it from the initializer, since the name is the same as the field.
+//! let t = &unsafe {
+//! ::kernel::init::__internal::DropGuard::new(&raw mut (*slot).t)
+//! };
+//! // Expansion of `x: 0,`:
+//! // Since this can be an arbitrary expression we cannot place it inside of
+//! // the `unsafe` block, so we bind it here.
+//! let x = 0;
+//! unsafe { ::core::ptr::write(&raw mut (*slot).x, x) };
+//! let x = &unsafe {
+//! ::kernel::init::__internal::DropGuard::new(&raw mut (*slot).x)
+//! };
+//!
+//! // Here we use the type checker to ensuer that every field has been
+//! // initialized exactly once, since this is `if false` it will never get
+//! // executed, but still type-checked.
+//! // Additionally we abuse `slot` to automatically infer the correct type for
+//! // the struct. This is also another check that every field is accessible
+//! // from this scope.
+//! #[allow(unreachable_code, clippy::diverging_sub_expression)]
+//! if false {
+//! unsafe {
+//! ::core::ptr::write(
+//! slot,
+//! Self {
+//! // We only care about typecheck finding every field here,
+//! // the expression does not matter, just conjure one using
+//! // `panic!()`:
+//! t: ::core::panic!(),
+//! x: ::core::panic!(),
+//! },
+//! );
+//! };
+//! }
+//! // Since initialization has successfully completed, we can now forget the
+//! // guards.
+//! unsafe { ::kernel::init::__internal::DropGuard::forget(t) };
+//! unsafe { ::kernel::init::__internal::DropGuard::forget(x) };
+//! }
+//! // We leave the scope above and gain access to the previously shadowed
+//! // `__InitOk` that we need to return.
+//! Ok(__InitOk)
+//! });
+//! // Change the return type of the closure.
+//! let init = move |slot| -> ::core::result::Result<(), ::core::convert::Infallible> {
+//! init(slot).map(|__InitOk| ())
+//! };
+//! // Construct the initializer.
+//! let init = unsafe {
+//! ::kernel::init::pin_init_from_closure::<_, ::core::convert::Infallible>(init)
+//! };
+//! init
+//! }
+//! }
+//! }
+//! ```
+//!
+//! ## `#[pin_data]` on `Foo`
+//!
+//! Since we already took a look at `#[pin_data]` on `Bar`, this section will only explain the
+//! differences/new things in the expansion of the `Foo` definition:
+//!
+//! ```rust
+//! #[pin_data(PinnedDrop)]
+//! struct Foo {
+//! a: usize,
+//! #[pin]
+//! b: Bar<u32>,
+//! }
+//! ```
+//!
+//! This expands to the following code:
+//!
+//! ```rust
+//! struct Foo {
+//! a: usize,
+//! b: Bar<u32>,
+//! }
+//! const _: () = {
+//! struct __ThePinData {
+//! __phantom: ::core::marker::PhantomData<fn(Foo) -> Foo>,
+//! }
+//! impl ::core::clone::Clone for __ThePinData {
+//! fn clone(&self) -> Self {
+//! *self
+//! }
+//! }
+//! impl ::core::marker::Copy for __ThePinData {}
+//! #[allow(dead_code)]
+//! impl __ThePinData {
+//! unsafe fn b<E>(
+//! self,
+//! slot: *mut Bar<u32>,
+//! // Note that this is `PinInit` instead of `Init`, this is because `b` is
+//! // structurally pinned, as marked by the `#[pin]` attribute.
+//! init: impl ::kernel::init::PinInit<Bar<u32>, E>,
+//! ) -> ::core::result::Result<(), E> {
+//! unsafe { ::kernel::init::PinInit::__pinned_init(init, slot) }
+//! }
+//! unsafe fn a<E>(
+//! self,
+//! slot: *mut usize,
+//! init: impl ::kernel::init::Init<usize, E>,
+//! ) -> ::core::result::Result<(), E> {
+//! unsafe { ::kernel::init::Init::__init(init, slot) }
+//! }
+//! }
+//! unsafe impl ::kernel::init::__internal::HasPinData for Foo {
+//! type PinData = __ThePinData;
+//! unsafe fn __pin_data() -> Self::PinData {
+//! __ThePinData {
+//! __phantom: ::core::marker::PhantomData,
+//! }
+//! }
+//! }
+//! unsafe impl ::kernel::init::__internal::PinData for __ThePinData {
+//! type Datee = Foo;
+//! }
+//! #[allow(dead_code)]
+//! struct __Unpin<'__pin> {
+//! __phantom_pin: ::core::marker::PhantomData<fn(&'__pin ()) -> &'__pin ()>,
+//! __phantom: ::core::marker::PhantomData<fn(Foo) -> Foo>,
+//! // Since this field is `#[pin]`, it is listed here.
+//! b: Bar<u32>,
+//! }
+//! #[doc(hidden)]
+//! impl<'__pin> ::core::marker::Unpin for Foo where __Unpin<'__pin>: ::core::marker::Unpin {}
+//! // Since we specified `PinnedDrop` as the argument to `#[pin_data]`, we expect `Foo` to
+//! // implement `PinnedDrop`. Thus we do not need to prevent `Drop` implementations like
+//! // before, instead we implement it here and delegate to `PinnedDrop`.
+//! impl ::core::ops::Drop for Foo {
+//! fn drop(&mut self) {
+//! // Since we are getting dropped, no one else has a reference to `self` and thus we
+//! // can assume that we never move.
+//! let pinned = unsafe { ::core::pin::Pin::new_unchecked(self) };
+//! // Create the unsafe token that proves that we are inside of a destructor, this
+//! // type is only allowed to be created in a destructor.
+//! let token = unsafe { ::kernel::init::__internal::OnlyCallFromDrop::new() };
+//! ::kernel::init::PinnedDrop::drop(pinned, token);
+//! }
+//! }
+//! };
+//! ```
+//!
+//! ## `#[pinned_drop]` on `impl PinnedDrop for Foo`
+//!
+//! This macro is used to implement the `PinnedDrop` trait, since that trait is `unsafe` and has an
+//! extra parameter that should not be used at all. The macro hides that parameter.
+//!
+//! Here is the `PinnedDrop` impl for `Foo`:
+//!
+//! ```rust
+//! #[pinned_drop]
+//! impl PinnedDrop for Foo {
+//! fn drop(self: Pin<&mut Self>) {
+//! println!("{self:p} is getting dropped.");
+//! }
+//! }
+//! ```
+//!
+//! This expands to the following code:
+//!
+//! ```rust
+//! // `unsafe`, full path and the token parameter are added, everything else stays the same.
+//! unsafe impl ::kernel::init::PinnedDrop for Foo {
+//! fn drop(self: Pin<&mut Self>, _: ::kernel::init::__internal::OnlyCallFromDrop) {
+//! println!("{self:p} is getting dropped.");
+//! }
+//! }
+//! ```
+//!
+//! ## `pin_init!` on `Foo`
+//!
+//! Since we already took a look at `pin_init!` on `Bar`, this section will only explain the
+//! differences/new things in the expansion of `pin_init!` on `Foo`:
+//!
+//! ```rust
+//! let a = 42;
+//! let initializer = pin_init!(Foo {
+//! a,
+//! b <- Bar::new(36),
+//! });
+//! ```
+//!
+//! This expands to the following code:
+//!
+//! ```rust
+//! let a = 42;
+//! let initializer = {
+//! struct __InitOk;
+//! let data = unsafe {
+//! use ::kernel::init::__internal::HasPinData;
+//! Foo::__pin_data()
+//! };
+//! let init = ::kernel::init::__internal::PinData::make_closure::<
+//! _,
+//! __InitOk,
+//! ::core::convert::Infallible,
+//! >(data, move |slot| {
+//! {
+//! struct __InitOk;
+//! unsafe { ::core::ptr::write(&raw mut (*slot).a, a) };
+//! let a = &unsafe { ::kernel::init::__internal::DropGuard::new(&raw mut (*slot).a) };
+//! let b = Bar::new(36);
+//! // Here we use `data` to access the correct field and require that `b` is of type
+//! // `PinInit<Bar<u32>, Infallible>`.
+//! unsafe { data.b(&raw mut (*slot).b, b)? };
+//! let b = &unsafe { ::kernel::init::__internal::DropGuard::new(&raw mut (*slot).b) };
+//!
+//! #[allow(unreachable_code, clippy::diverging_sub_expression)]
+//! if false {
+//! unsafe {
+//! ::core::ptr::write(
+//! slot,
+//! Foo {
+//! a: ::core::panic!(),
+//! b: ::core::panic!(),
+//! },
+//! );
+//! };
+//! }
+//! unsafe { ::kernel::init::__internal::DropGuard::forget(a) };
+//! unsafe { ::kernel::init::__internal::DropGuard::forget(b) };
+//! }
+//! Ok(__InitOk)
+//! });
+//! let init = move |slot| -> ::core::result::Result<(), ::core::convert::Infallible> {
+//! init(slot).map(|__InitOk| ())
+//! };
+//! let init = unsafe {
+//! ::kernel::init::pin_init_from_closure::<_, ::core::convert::Infallible>(init)
+//! };
+//! init
+//! };
+//! ```
+
+/// Creates a `unsafe impl<...> PinnedDrop for $type` block.
+///
+/// See [`PinnedDrop`] for more information.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __pinned_drop {
+ (
+ @impl_sig($($impl_sig:tt)*),
+ @impl_body(
+ $(#[$($attr:tt)*])*
+ fn drop($($sig:tt)*) {
+ $($inner:tt)*
+ }
+ ),
+ ) => {
+ unsafe $($impl_sig)* {
+ // Inherit all attributes and the type/ident tokens for the signature.
+ $(#[$($attr)*])*
+ fn drop($($sig)*, _: $crate::init::__internal::OnlyCallFromDrop) {
+ $($inner)*
+ }
+ }
+ }
+}
+
+/// This macro first parses the struct definition such that it separates pinned and not pinned
+/// fields. Afterwards it declares the struct and implement the `PinData` trait safely.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __pin_data {
+ // Proc-macro entry point, this is supplied by the proc-macro pre-parsing.
+ (parse_input:
+ @args($($pinned_drop:ident)?),
+ @sig(
+ $(#[$($struct_attr:tt)*])*
+ $vis:vis struct $name:ident
+ $(where $($whr:tt)*)?
+ ),
+ @impl_generics($($impl_generics:tt)*),
+ @ty_generics($($ty_generics:tt)*),
+ @body({ $($fields:tt)* }),
+ ) => {
+ // We now use token munching to iterate through all of the fields. While doing this we
+ // identify fields marked with `#[pin]`, these fields are the 'pinned fields'. The user
+ // wants these to be structurally pinned. The rest of the fields are the
+ // 'not pinned fields'. Additionally we collect all fields, since we need them in the right
+ // order to declare the struct.
+ //
+ // In this call we also put some explaining comments for the parameters.
+ $crate::__pin_data!(find_pinned_fields:
+ // Attributes on the struct itself, these will just be propagated to be put onto the
+ // struct definition.
+ @struct_attrs($(#[$($struct_attr)*])*),
+ // The visibility of the struct.
+ @vis($vis),
+ // The name of the struct.
+ @name($name),
+ // The 'impl generics', the generics that will need to be specified on the struct inside
+ // of an `impl<$ty_generics>` block.
+ @impl_generics($($impl_generics)*),
+ // The 'ty generics', the generics that will need to be specified on the impl blocks.
+ @ty_generics($($ty_generics)*),
+ // The where clause of any impl block and the declaration.
+ @where($($($whr)*)?),
+ // The remaining fields tokens that need to be processed.
+ // We add a `,` at the end to ensure correct parsing.
+ @fields_munch($($fields)* ,),
+ // The pinned fields.
+ @pinned(),
+ // The not pinned fields.
+ @not_pinned(),
+ // All fields.
+ @fields(),
+ // The accumulator containing all attributes already parsed.
+ @accum(),
+ // Contains `yes` or `` to indicate if `#[pin]` was found on the current field.
+ @is_pinned(),
+ // The proc-macro argument, this should be `PinnedDrop` or ``.
+ @pinned_drop($($pinned_drop)?),
+ );
+ };
+ (find_pinned_fields:
+ @struct_attrs($($struct_attrs:tt)*),
+ @vis($vis:vis),
+ @name($name:ident),
+ @impl_generics($($impl_generics:tt)*),
+ @ty_generics($($ty_generics:tt)*),
+ @where($($whr:tt)*),
+ // We found a PhantomPinned field, this should generally be pinned!
+ @fields_munch($field:ident : $($($(::)?core::)?marker::)?PhantomPinned, $($rest:tt)*),
+ @pinned($($pinned:tt)*),
+ @not_pinned($($not_pinned:tt)*),
+ @fields($($fields:tt)*),
+ @accum($($accum:tt)*),
+ // This field is not pinned.
+ @is_pinned(),
+ @pinned_drop($($pinned_drop:ident)?),
+ ) => {
+ ::core::compile_error!(concat!(
+ "The field `",
+ stringify!($field),
+ "` of type `PhantomPinned` only has an effect, if it has the `#[pin]` attribute.",
+ ));
+ $crate::__pin_data!(find_pinned_fields:
+ @struct_attrs($($struct_attrs)*),
+ @vis($vis),
+ @name($name),
+ @impl_generics($($impl_generics)*),
+ @ty_generics($($ty_generics)*),
+ @where($($whr)*),
+ @fields_munch($($rest)*),
+ @pinned($($pinned)* $($accum)* $field: ::core::marker::PhantomPinned,),
+ @not_pinned($($not_pinned)*),
+ @fields($($fields)* $($accum)* $field: ::core::marker::PhantomPinned,),
+ @accum(),
+ @is_pinned(),
+ @pinned_drop($($pinned_drop)?),
+ );
+ };
+ (find_pinned_fields:
+ @struct_attrs($($struct_attrs:tt)*),
+ @vis($vis:vis),
+ @name($name:ident),
+ @impl_generics($($impl_generics:tt)*),
+ @ty_generics($($ty_generics:tt)*),
+ @where($($whr:tt)*),
+ // We reached the field declaration.
+ @fields_munch($field:ident : $type:ty, $($rest:tt)*),
+ @pinned($($pinned:tt)*),
+ @not_pinned($($not_pinned:tt)*),
+ @fields($($fields:tt)*),
+ @accum($($accum:tt)*),
+ // This field is pinned.
+ @is_pinned(yes),
+ @pinned_drop($($pinned_drop:ident)?),
+ ) => {
+ $crate::__pin_data!(find_pinned_fields:
+ @struct_attrs($($struct_attrs)*),
+ @vis($vis),
+ @name($name),
+ @impl_generics($($impl_generics)*),
+ @ty_generics($($ty_generics)*),
+ @where($($whr)*),
+ @fields_munch($($rest)*),
+ @pinned($($pinned)* $($accum)* $field: $type,),
+ @not_pinned($($not_pinned)*),
+ @fields($($fields)* $($accum)* $field: $type,),
+ @accum(),
+ @is_pinned(),
+ @pinned_drop($($pinned_drop)?),
+ );
+ };
+ (find_pinned_fields:
+ @struct_attrs($($struct_attrs:tt)*),
+ @vis($vis:vis),
+ @name($name:ident),
+ @impl_generics($($impl_generics:tt)*),
+ @ty_generics($($ty_generics:tt)*),
+ @where($($whr:tt)*),
+ // We reached the field declaration.
+ @fields_munch($field:ident : $type:ty, $($rest:tt)*),
+ @pinned($($pinned:tt)*),
+ @not_pinned($($not_pinned:tt)*),
+ @fields($($fields:tt)*),
+ @accum($($accum:tt)*),
+ // This field is not pinned.
+ @is_pinned(),
+ @pinned_drop($($pinned_drop:ident)?),
+ ) => {
+ $crate::__pin_data!(find_pinned_fields:
+ @struct_attrs($($struct_attrs)*),
+ @vis($vis),
+ @name($name),
+ @impl_generics($($impl_generics)*),
+ @ty_generics($($ty_generics)*),
+ @where($($whr)*),
+ @fields_munch($($rest)*),
+ @pinned($($pinned)*),
+ @not_pinned($($not_pinned)* $($accum)* $field: $type,),
+ @fields($($fields)* $($accum)* $field: $type,),
+ @accum(),
+ @is_pinned(),
+ @pinned_drop($($pinned_drop)?),
+ );
+ };
+ (find_pinned_fields:
+ @struct_attrs($($struct_attrs:tt)*),
+ @vis($vis:vis),
+ @name($name:ident),
+ @impl_generics($($impl_generics:tt)*),
+ @ty_generics($($ty_generics:tt)*),
+ @where($($whr:tt)*),
+ // We found the `#[pin]` attr.
+ @fields_munch(#[pin] $($rest:tt)*),
+ @pinned($($pinned:tt)*),
+ @not_pinned($($not_pinned:tt)*),
+ @fields($($fields:tt)*),
+ @accum($($accum:tt)*),
+ @is_pinned($($is_pinned:ident)?),
+ @pinned_drop($($pinned_drop:ident)?),
+ ) => {
+ $crate::__pin_data!(find_pinned_fields:
+ @struct_attrs($($struct_attrs)*),
+ @vis($vis),
+ @name($name),
+ @impl_generics($($impl_generics)*),
+ @ty_generics($($ty_generics)*),
+ @where($($whr)*),
+ @fields_munch($($rest)*),
+ // We do not include `#[pin]` in the list of attributes, since it is not actually an
+ // attribute that is defined somewhere.
+ @pinned($($pinned)*),
+ @not_pinned($($not_pinned)*),
+ @fields($($fields)*),
+ @accum($($accum)*),
+ // Set this to `yes`.
+ @is_pinned(yes),
+ @pinned_drop($($pinned_drop)?),
+ );
+ };
+ (find_pinned_fields:
+ @struct_attrs($($struct_attrs:tt)*),
+ @vis($vis:vis),
+ @name($name:ident),
+ @impl_generics($($impl_generics:tt)*),
+ @ty_generics($($ty_generics:tt)*),
+ @where($($whr:tt)*),
+ // We reached the field declaration with visibility, for simplicity we only munch the
+ // visibility and put it into `$accum`.
+ @fields_munch($fvis:vis $field:ident $($rest:tt)*),
+ @pinned($($pinned:tt)*),
+ @not_pinned($($not_pinned:tt)*),
+ @fields($($fields:tt)*),
+ @accum($($accum:tt)*),
+ @is_pinned($($is_pinned:ident)?),
+ @pinned_drop($($pinned_drop:ident)?),
+ ) => {
+ $crate::__pin_data!(find_pinned_fields:
+ @struct_attrs($($struct_attrs)*),
+ @vis($vis),
+ @name($name),
+ @impl_generics($($impl_generics)*),
+ @ty_generics($($ty_generics)*),
+ @where($($whr)*),
+ @fields_munch($field $($rest)*),
+ @pinned($($pinned)*),
+ @not_pinned($($not_pinned)*),
+ @fields($($fields)*),
+ @accum($($accum)* $fvis),
+ @is_pinned($($is_pinned)?),
+ @pinned_drop($($pinned_drop)?),
+ );
+ };
+ (find_pinned_fields:
+ @struct_attrs($($struct_attrs:tt)*),
+ @vis($vis:vis),
+ @name($name:ident),
+ @impl_generics($($impl_generics:tt)*),
+ @ty_generics($($ty_generics:tt)*),
+ @where($($whr:tt)*),
+ // Some other attribute, just put it into `$accum`.
+ @fields_munch(#[$($attr:tt)*] $($rest:tt)*),
+ @pinned($($pinned:tt)*),
+ @not_pinned($($not_pinned:tt)*),
+ @fields($($fields:tt)*),
+ @accum($($accum:tt)*),
+ @is_pinned($($is_pinned:ident)?),
+ @pinned_drop($($pinned_drop:ident)?),
+ ) => {
+ $crate::__pin_data!(find_pinned_fields:
+ @struct_attrs($($struct_attrs)*),
+ @vis($vis),
+ @name($name),
+ @impl_generics($($impl_generics)*),
+ @ty_generics($($ty_generics)*),
+ @where($($whr)*),
+ @fields_munch($($rest)*),
+ @pinned($($pinned)*),
+ @not_pinned($($not_pinned)*),
+ @fields($($fields)*),
+ @accum($($accum)* #[$($attr)*]),
+ @is_pinned($($is_pinned)?),
+ @pinned_drop($($pinned_drop)?),
+ );
+ };
+ (find_pinned_fields:
+ @struct_attrs($($struct_attrs:tt)*),
+ @vis($vis:vis),
+ @name($name:ident),
+ @impl_generics($($impl_generics:tt)*),
+ @ty_generics($($ty_generics:tt)*),
+ @where($($whr:tt)*),
+ // We reached the end of the fields, plus an optional additional comma, since we added one
+ // before and the user is also allowed to put a trailing comma.
+ @fields_munch($(,)?),
+ @pinned($($pinned:tt)*),
+ @not_pinned($($not_pinned:tt)*),
+ @fields($($fields:tt)*),
+ @accum(),
+ @is_pinned(),
+ @pinned_drop($($pinned_drop:ident)?),
+ ) => {
+ // Declare the struct with all fields in the correct order.
+ $($struct_attrs)*
+ $vis struct $name <$($impl_generics)*>
+ where $($whr)*
+ {
+ $($fields)*
+ }
+
+ // We put the rest into this const item, because it then will not be accessible to anything
+ // outside.
+ const _: () = {
+ // We declare this struct which will host all of the projection function for our type.
+ // it will be invariant over all generic parameters which are inherited from the
+ // struct.
+ $vis struct __ThePinData<$($impl_generics)*>
+ where $($whr)*
+ {
+ __phantom: ::core::marker::PhantomData<
+ fn($name<$($ty_generics)*>) -> $name<$($ty_generics)*>
+ >,
+ }
+
+ impl<$($impl_generics)*> ::core::clone::Clone for __ThePinData<$($ty_generics)*>
+ where $($whr)*
+ {
+ fn clone(&self) -> Self { *self }
+ }
+
+ impl<$($impl_generics)*> ::core::marker::Copy for __ThePinData<$($ty_generics)*>
+ where $($whr)*
+ {}
+
+ // Make all projection functions.
+ $crate::__pin_data!(make_pin_data:
+ @pin_data(__ThePinData),
+ @impl_generics($($impl_generics)*),
+ @ty_generics($($ty_generics)*),
+ @where($($whr)*),
+ @pinned($($pinned)*),
+ @not_pinned($($not_pinned)*),
+ );
+
+ // SAFETY: We have added the correct projection functions above to `__ThePinData` and
+ // we also use the least restrictive generics possible.
+ unsafe impl<$($impl_generics)*>
+ $crate::init::__internal::HasPinData for $name<$($ty_generics)*>
+ where $($whr)*
+ {
+ type PinData = __ThePinData<$($ty_generics)*>;
+
+ unsafe fn __pin_data() -> Self::PinData {
+ __ThePinData { __phantom: ::core::marker::PhantomData }
+ }
+ }
+
+ unsafe impl<$($impl_generics)*>
+ $crate::init::__internal::PinData for __ThePinData<$($ty_generics)*>
+ where $($whr)*
+ {
+ type Datee = $name<$($ty_generics)*>;
+ }
+
+ // This struct will be used for the unpin analysis. Since only structurally pinned
+ // fields are relevant whether the struct should implement `Unpin`.
+ #[allow(dead_code)]
+ struct __Unpin <'__pin, $($impl_generics)*>
+ where $($whr)*
+ {
+ __phantom_pin: ::core::marker::PhantomData<fn(&'__pin ()) -> &'__pin ()>,
+ __phantom: ::core::marker::PhantomData<
+ fn($name<$($ty_generics)*>) -> $name<$($ty_generics)*>
+ >,
+ // Only the pinned fields.
+ $($pinned)*
+ }
+
+ #[doc(hidden)]
+ impl<'__pin, $($impl_generics)*> ::core::marker::Unpin for $name<$($ty_generics)*>
+ where
+ __Unpin<'__pin, $($ty_generics)*>: ::core::marker::Unpin,
+ $($whr)*
+ {}
+
+ // We need to disallow normal `Drop` implementation, the exact behavior depends on
+ // whether `PinnedDrop` was specified as the parameter.
+ $crate::__pin_data!(drop_prevention:
+ @name($name),
+ @impl_generics($($impl_generics)*),
+ @ty_generics($($ty_generics)*),
+ @where($($whr)*),
+ @pinned_drop($($pinned_drop)?),
+ );
+ };
+ };
+ // When no `PinnedDrop` was specified, then we have to prevent implementing drop.
+ (drop_prevention:
+ @name($name:ident),
+ @impl_generics($($impl_generics:tt)*),
+ @ty_generics($($ty_generics:tt)*),
+ @where($($whr:tt)*),
+ @pinned_drop(),
+ ) => {
+ // We prevent this by creating a trait that will be implemented for all types implementing
+ // `Drop`. Additionally we will implement this trait for the struct leading to a conflict,
+ // if it also implements `Drop`
+ trait MustNotImplDrop {}
+ #[allow(drop_bounds)]
+ impl<T: ::core::ops::Drop> MustNotImplDrop for T {}
+ impl<$($impl_generics)*> MustNotImplDrop for $name<$($ty_generics)*>
+ where $($whr)* {}
+ // We also take care to prevent users from writing a useless `PinnedDrop` implementation.
+ // They might implement `PinnedDrop` correctly for the struct, but forget to give
+ // `PinnedDrop` as the parameter to `#[pin_data]`.
+ #[allow(non_camel_case_types)]
+ trait UselessPinnedDropImpl_you_need_to_specify_PinnedDrop {}
+ impl<T: $crate::init::PinnedDrop>
+ UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for T {}
+ impl<$($impl_generics)*>
+ UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for $name<$($ty_generics)*>
+ where $($whr)* {}
+ };
+ // When `PinnedDrop` was specified we just implement `Drop` and delegate.
+ (drop_prevention:
+ @name($name:ident),
+ @impl_generics($($impl_generics:tt)*),
+ @ty_generics($($ty_generics:tt)*),
+ @where($($whr:tt)*),
+ @pinned_drop(PinnedDrop),
+ ) => {
+ impl<$($impl_generics)*> ::core::ops::Drop for $name<$($ty_generics)*>
+ where $($whr)*
+ {
+ fn drop(&mut self) {
+ // SAFETY: Since this is a destructor, `self` will not move after this function
+ // terminates, since it is inaccessible.
+ let pinned = unsafe { ::core::pin::Pin::new_unchecked(self) };
+ // SAFETY: Since this is a drop function, we can create this token to call the
+ // pinned destructor of this type.
+ let token = unsafe { $crate::init::__internal::OnlyCallFromDrop::new() };
+ $crate::init::PinnedDrop::drop(pinned, token);
+ }
+ }
+ };
+ // If some other parameter was specified, we emit a readable error.
+ (drop_prevention:
+ @name($name:ident),
+ @impl_generics($($impl_generics:tt)*),
+ @ty_generics($($ty_generics:tt)*),
+ @where($($whr:tt)*),
+ @pinned_drop($($rest:tt)*),
+ ) => {
+ compile_error!(
+ "Wrong parameters to `#[pin_data]`, expected nothing or `PinnedDrop`, got '{}'.",
+ stringify!($($rest)*),
+ );
+ };
+ (make_pin_data:
+ @pin_data($pin_data:ident),
+ @impl_generics($($impl_generics:tt)*),
+ @ty_generics($($ty_generics:tt)*),
+ @where($($whr:tt)*),
+ @pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
+ @not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
+ ) => {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::init::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ unsafe { $crate::init::PinInit::__pinned_init(init, slot) }
+ }
+ )*
+ $(
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::init::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ unsafe { $crate::init::Init::__init(init, slot) }
+ }
+ )*
+ }
+ };
+}
diff --git a/rust/kernel/ioctl.rs b/rust/kernel/ioctl.rs
new file mode 100644
index 000000000000..c49e1a8d3fd0
--- /dev/null
+++ b/rust/kernel/ioctl.rs
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! ioctl() number definitions
+//!
+//! C header: [`include/asm-generic/ioctl.h`](../../../../include/asm-generic/ioctl.h)
+
+#![allow(non_snake_case)]
+
+use crate::build_assert;
+
+/// Build an ioctl number, analogous to the C macro of the same name.
+#[inline(always)]
+const fn _IOC(dir: u32, ty: u32, nr: u32, size: usize) -> u32 {
+ build_assert!(dir <= uapi::_IOC_DIRMASK);
+ build_assert!(ty <= uapi::_IOC_TYPEMASK);
+ build_assert!(nr <= uapi::_IOC_NRMASK);
+ build_assert!(size <= (uapi::_IOC_SIZEMASK as usize));
+
+ (dir << uapi::_IOC_DIRSHIFT)
+ | (ty << uapi::_IOC_TYPESHIFT)
+ | (nr << uapi::_IOC_NRSHIFT)
+ | ((size as u32) << uapi::_IOC_SIZESHIFT)
+}
+
+/// Build an ioctl number for an argumentless ioctl.
+#[inline(always)]
+pub const fn _IO(ty: u32, nr: u32) -> u32 {
+ _IOC(uapi::_IOC_NONE, ty, nr, 0)
+}
+
+/// Build an ioctl number for an read-only ioctl.
+#[inline(always)]
+pub const fn _IOR<T>(ty: u32, nr: u32) -> u32 {
+ _IOC(uapi::_IOC_READ, ty, nr, core::mem::size_of::<T>())
+}
+
+/// Build an ioctl number for an write-only ioctl.
+#[inline(always)]
+pub const fn _IOW<T>(ty: u32, nr: u32) -> u32 {
+ _IOC(uapi::_IOC_WRITE, ty, nr, core::mem::size_of::<T>())
+}
+
+/// Build an ioctl number for a read-write ioctl.
+#[inline(always)]
+pub const fn _IOWR<T>(ty: u32, nr: u32) -> u32 {
+ _IOC(
+ uapi::_IOC_READ | uapi::_IOC_WRITE,
+ ty,
+ nr,
+ core::mem::size_of::<T>(),
+ )
+}
+
+/// Get the ioctl direction from an ioctl number.
+pub const fn _IOC_DIR(nr: u32) -> u32 {
+ (nr >> uapi::_IOC_DIRSHIFT) & uapi::_IOC_DIRMASK
+}
+
+/// Get the ioctl type from an ioctl number.
+pub const fn _IOC_TYPE(nr: u32) -> u32 {
+ (nr >> uapi::_IOC_TYPESHIFT) & uapi::_IOC_TYPEMASK
+}
+
+/// Get the ioctl number from an ioctl number.
+pub const fn _IOC_NR(nr: u32) -> u32 {
+ (nr >> uapi::_IOC_NRSHIFT) & uapi::_IOC_NRMASK
+}
+
+/// Get the ioctl size from an ioctl number.
+pub const fn _IOC_SIZE(nr: u32) -> usize {
+ ((nr >> uapi::_IOC_SIZESHIFT) & uapi::_IOC_SIZEMASK) as usize
+}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 223564f9f0cc..676995d4e460 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -16,7 +16,10 @@
#![feature(coerce_unsized)]
#![feature(core_ffi_c)]
#![feature(dispatch_from_dyn)]
+#![feature(explicit_generic_args_with_impl_trait)]
#![feature(generic_associated_types)]
+#![feature(new_uninit)]
+#![feature(pin_macro)]
#![feature(receiver_trait)]
#![feature(unsize)]
@@ -25,11 +28,16 @@
#[cfg(not(CONFIG_RUST))]
compile_error!("Missing kernel configuration for conditional compilation");
+// Allow proc-macros to refer to `::kernel` inside the `kernel` crate (this crate).
+extern crate self as kernel;
+
#[cfg(not(test))]
#[cfg(not(testlib))]
mod allocator;
mod build_assert;
pub mod error;
+pub mod init;
+pub mod ioctl;
pub mod prelude;
pub mod print;
mod static_assert;
@@ -37,11 +45,13 @@ mod static_assert;
pub mod std_vendor;
pub mod str;
pub mod sync;
+pub mod task;
pub mod types;
#[doc(hidden)]
pub use bindings;
pub use macros;
+pub use uapi;
#[doc(hidden)]
pub use build_error::build_error;
diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs
index 0bc1c97e5604..c28587d68ebc 100644
--- a/rust/kernel/prelude.rs
+++ b/rust/kernel/prelude.rs
@@ -18,7 +18,7 @@ pub use core::pin::Pin;
pub use alloc::{boxed::Box, vec::Vec};
#[doc(no_inline)]
-pub use macros::{module, vtable};
+pub use macros::{module, pin_data, pinned_drop, vtable};
pub use super::build_assert;
@@ -27,8 +27,14 @@ pub use super::build_assert;
pub use super::dbg;
pub use super::{pr_alert, pr_crit, pr_debug, pr_emerg, pr_err, pr_info, pr_notice, pr_warn};
+pub use super::{init, pin_init, try_init, try_pin_init};
+
pub use super::static_assert;
pub use super::error::{code::*, Error, Result};
pub use super::{str::CStr, ThisModule};
+
+pub use super::init::{InPlaceInit, Init, PinInit};
+
+pub use super::current;
diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs
index 33da23e3076d..d219ee518eff 100644
--- a/rust/kernel/sync.rs
+++ b/rust/kernel/sync.rs
@@ -5,6 +5,56 @@
//! This module contains the kernel APIs related to synchronisation that have been ported or
//! wrapped for usage by Rust code in the kernel.
+use crate::types::Opaque;
+
mod arc;
+mod condvar;
+pub mod lock;
+mod locked_by;
pub use arc::{Arc, ArcBorrow, UniqueArc};
+pub use condvar::CondVar;
+pub use lock::{mutex::Mutex, spinlock::SpinLock};
+pub use locked_by::LockedBy;
+
+/// Represents a lockdep class. It's a wrapper around C's `lock_class_key`.
+#[repr(transparent)]
+pub struct LockClassKey(Opaque<bindings::lock_class_key>);
+
+// SAFETY: `bindings::lock_class_key` is designed to be used concurrently from multiple threads and
+// provides its own synchronization.
+unsafe impl Sync for LockClassKey {}
+
+impl LockClassKey {
+ /// Creates a new lock class key.
+ pub const fn new() -> Self {
+ Self(Opaque::uninit())
+ }
+
+ pub(crate) fn as_ptr(&self) -> *mut bindings::lock_class_key {
+ self.0.get()
+ }
+}
+
+/// Defines a new static lock class and returns a pointer to it.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! static_lock_class {
+ () => {{
+ static CLASS: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ &CLASS
+ }};
+}
+
+/// Returns the given string, if one is provided, otherwise generates one based on the source code
+/// location.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! optional_name {
+ () => {
+ $crate::c_str!(::core::concat!(::core::file!(), ":", ::core::line!()))
+ };
+ ($name:literal) => {
+ $crate::c_str!($name)
+ };
+}
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index f2f1c83d72ba..e6d206242465 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -17,17 +17,24 @@
use crate::{
bindings,
- error::Result,
+ error::{self, Error},
+ init::{self, InPlaceInit, Init, PinInit},
+ try_init,
types::{ForeignOwnable, Opaque},
};
use alloc::boxed::Box;
use core::{
+ alloc::AllocError,
+ fmt,
marker::{PhantomData, Unsize},
mem::{ManuallyDrop, MaybeUninit},
ops::{Deref, DerefMut},
pin::Pin,
ptr::NonNull,
};
+use macros::pin_data;
+
+mod std_vendor;
/// A reference-counted pointer to an instance of `T`.
///
@@ -120,6 +127,7 @@ pub struct Arc<T: ?Sized> {
_p: PhantomData<ArcInner<T>>,
}
+#[pin_data]
#[repr(C)]
struct ArcInner<T: ?Sized> {
refcount: Opaque<bindings::refcount_t>,
@@ -149,7 +157,7 @@ unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
impl<T> Arc<T> {
/// Constructs a new reference counted instance of `T`.
- pub fn try_new(contents: T) -> Result<Self> {
+ pub fn try_new(contents: T) -> Result<Self, AllocError> {
// INVARIANT: The refcount is initialised to a non-zero value.
let value = ArcInner {
// SAFETY: There are no safety requirements for this FFI call.
@@ -163,6 +171,28 @@ impl<T> Arc<T> {
// `Arc` object.
Ok(unsafe { Self::from_inner(Box::leak(inner).into()) })
}
+
+ /// Use the given initializer to in-place initialize a `T`.
+ ///
+ /// If `T: !Unpin` it will not be able to move afterwards.
+ #[inline]
+ pub fn pin_init<E>(init: impl PinInit<T, E>) -> error::Result<Self>
+ where
+ Error: From<E>,
+ {
+ UniqueArc::pin_init(init).map(|u| u.into())
+ }
+
+ /// Use the given initializer to in-place initialize a `T`.
+ ///
+ /// This is equivalent to [`pin_init`], since an [`Arc`] is always pinned.
+ #[inline]
+ pub fn init<E>(init: impl Init<T, E>) -> error::Result<Self>
+ where
+ Error: From<E>,
+ {
+ UniqueArc::init(init).map(|u| u.into())
+ }
}
impl<T: ?Sized> Arc<T> {
@@ -469,7 +499,7 @@ pub struct UniqueArc<T: ?Sized> {
impl<T> UniqueArc<T> {
/// Tries to allocate a new [`UniqueArc`] instance.
- pub fn try_new(value: T) -> Result<Self> {
+ pub fn try_new(value: T) -> Result<Self, AllocError> {
Ok(Self {
// INVARIANT: The newly-created object has a ref-count of 1.
inner: Arc::try_new(value)?,
@@ -477,10 +507,17 @@ impl<T> UniqueArc<T> {
}
/// Tries to allocate a new [`UniqueArc`] instance whose contents are not initialised yet.
- pub fn try_new_uninit() -> Result<UniqueArc<MaybeUninit<T>>> {
- Ok(UniqueArc::<MaybeUninit<T>> {
+ pub fn try_new_uninit() -> Result<UniqueArc<MaybeUninit<T>>, AllocError> {
+ // INVARIANT: The refcount is initialised to a non-zero value.
+ let inner = Box::try_init::<AllocError>(try_init!(ArcInner {
+ // SAFETY: There are no safety requirements for this FFI call.
+ refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }),
+ data <- init::uninit::<T, AllocError>(),
+ }? AllocError))?;
+ Ok(UniqueArc {
// INVARIANT: The newly-created object has a ref-count of 1.
- inner: Arc::try_new(MaybeUninit::uninit())?,
+ // SAFETY: The pointer from the `Box` is valid.
+ inner: unsafe { Arc::from_inner(Box::leak(inner).into()) },
})
}
}
@@ -489,6 +526,17 @@ impl<T> UniqueArc<MaybeUninit<T>> {
/// Converts a `UniqueArc<MaybeUninit<T>>` into a `UniqueArc<T>` by writing a value into it.
pub fn write(mut self, value: T) -> UniqueArc<T> {
self.deref_mut().write(value);
+ // SAFETY: We just wrote the value to be initialized.
+ unsafe { self.assume_init() }
+ }
+
+ /// Unsafely assume that `self` is initialized.
+ ///
+ /// # Safety
+ ///
+ /// The caller guarantees that the value behind this pointer has been initialized. It is
+ /// *immediate* UB to call this when the value is not initialized.
+ pub unsafe fn assume_init(self) -> UniqueArc<T> {
let inner = ManuallyDrop::new(self).inner.ptr;
UniqueArc {
// SAFETY: The new `Arc` is taking over `ptr` from `self.inner` (which won't be
@@ -496,6 +544,30 @@ impl<T> UniqueArc<MaybeUninit<T>> {
inner: unsafe { Arc::from_inner(inner.cast()) },
}
}
+
+ /// Initialize `self` using the given initializer.
+ pub fn init_with<E>(mut self, init: impl Init<T, E>) -> core::result::Result<UniqueArc<T>, E> {
+ // SAFETY: The supplied pointer is valid for initialization.
+ match unsafe { init.__init(self.as_mut_ptr()) } {
+ // SAFETY: Initialization completed successfully.
+ Ok(()) => Ok(unsafe { self.assume_init() }),
+ Err(err) => Err(err),
+ }
+ }
+
+ /// Pin-initialize `self` using the given pin-initializer.
+ pub fn pin_init_with<E>(
+ mut self,
+ init: impl PinInit<T, E>,
+ ) -> core::result::Result<Pin<UniqueArc<T>>, E> {
+ // SAFETY: The supplied pointer is valid for initialization and we will later pin the value
+ // to ensure it does not move.
+ match unsafe { init.__pinned_init(self.as_mut_ptr()) } {
+ // SAFETY: Initialization completed successfully.
+ Ok(()) => Ok(unsafe { self.assume_init() }.into()),
+ Err(err) => Err(err),
+ }
+ }
}
impl<T: ?Sized> From<UniqueArc<T>> for Pin<UniqueArc<T>> {
@@ -522,3 +594,27 @@ impl<T: ?Sized> DerefMut for UniqueArc<T> {
unsafe { &mut self.inner.ptr.as_mut().data }
}
}
+
+impl<T: fmt::Display + ?Sized> fmt::Display for UniqueArc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self.deref(), f)
+ }
+}
+
+impl<T: fmt::Display + ?Sized> fmt::Display for Arc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self.deref(), f)
+ }
+}
+
+impl<T: fmt::Debug + ?Sized> fmt::Debug for UniqueArc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(self.deref(), f)
+ }
+}
+
+impl<T: fmt::Debug + ?Sized> fmt::Debug for Arc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(self.deref(), f)
+ }
+}
diff --git a/rust/kernel/sync/arc/std_vendor.rs b/rust/kernel/sync/arc/std_vendor.rs
new file mode 100644
index 000000000000..a66a0c2831b3
--- /dev/null
+++ b/rust/kernel/sync/arc/std_vendor.rs
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! The contents of this file come from the Rust standard library, hosted in
+//! the <https://github.com/rust-lang/rust> repository, licensed under
+//! "Apache-2.0 OR MIT" and adapted for kernel use. For copyright details,
+//! see <https://github.com/rust-lang/rust/blob/master/COPYRIGHT>.
+
+use crate::sync::{arc::ArcInner, Arc};
+use core::any::Any;
+
+impl Arc<dyn Any + Send + Sync> {
+ /// Attempt to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
+ pub fn downcast<T>(self) -> core::result::Result<Arc<T>, Self>
+ where
+ T: Any + Send + Sync,
+ {
+ if (*self).is::<T>() {
+ // SAFETY: We have just checked that the type is correct, so we can cast the pointer.
+ unsafe {
+ let ptr = self.ptr.cast::<ArcInner<T>>();
+ core::mem::forget(self);
+ Ok(Arc::from_inner(ptr))
+ }
+ } else {
+ Err(self)
+ }
+ }
+}
diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs
new file mode 100644
index 000000000000..ed353399c4e5
--- /dev/null
+++ b/rust/kernel/sync/condvar.rs
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A condition variable.
+//!
+//! This module allows Rust code to use the kernel's [`struct wait_queue_head`] as a condition
+//! variable.
+
+use super::{lock::Backend, lock::Guard, LockClassKey};
+use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque};
+use core::marker::PhantomPinned;
+use macros::pin_data;
+
+/// Creates a [`CondVar`] initialiser with the given name and a newly-created lock class.
+#[macro_export]
+macro_rules! new_condvar {
+ ($($name:literal)?) => {
+ $crate::sync::CondVar::new($crate::optional_name!($($name)?), $crate::static_lock_class!())
+ };
+}
+
+/// A conditional variable.
+///
+/// Exposes the kernel's [`struct wait_queue_head`] as a condition variable. It allows the caller to
+/// atomically release the given lock and go to sleep. It reacquires the lock when it wakes up. And
+/// it wakes up when notified by another thread (via [`CondVar::notify_one`] or
+/// [`CondVar::notify_all`]) or because the thread received a signal. It may also wake up
+/// spuriously.
+///
+/// Instances of [`CondVar`] need a lock class and to be pinned. The recommended way to create such
+/// instances is with the [`pin_init`](crate::pin_init) and [`new_condvar`] macros.
+///
+/// # Examples
+///
+/// The following is an example of using a condvar with a mutex:
+///
+/// ```
+/// use kernel::sync::{CondVar, Mutex};
+/// use kernel::{new_condvar, new_mutex};
+///
+/// #[pin_data]
+/// pub struct Example {
+/// #[pin]
+/// value: Mutex<u32>,
+///
+/// #[pin]
+/// value_changed: CondVar,
+/// }
+///
+/// /// Waits for `e.value` to become `v`.
+/// fn wait_for_value(e: &Example, v: u32) {
+/// let mut guard = e.value.lock();
+/// while *guard != v {
+/// e.value_changed.wait_uninterruptible(&mut guard);
+/// }
+/// }
+///
+/// /// Increments `e.value` and notifies all potential waiters.
+/// fn increment(e: &Example) {
+/// *e.value.lock() += 1;
+/// e.value_changed.notify_all();
+/// }
+///
+/// /// Allocates a new boxed `Example`.
+/// fn new_example() -> Result<Pin<Box<Example>>> {
+/// Box::pin_init(pin_init!(Example {
+/// value <- new_mutex!(0),
+/// value_changed <- new_condvar!(),
+/// }))
+/// }
+/// ```
+///
+/// [`struct wait_queue_head`]: ../../../include/linux/wait.h
+#[pin_data]
+pub struct CondVar {
+ #[pin]
+ pub(crate) wait_list: Opaque<bindings::wait_queue_head>,
+
+ /// A condvar needs to be pinned because it contains a [`struct list_head`] that is
+ /// self-referential, so it cannot be safely moved once it is initialised.
+ #[pin]
+ _pin: PhantomPinned,
+}
+
+// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on any thread.
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl Send for CondVar {}
+
+// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on multiple threads
+// concurrently.
+unsafe impl Sync for CondVar {}
+
+impl CondVar {
+ /// Constructs a new condvar initialiser.
+ #[allow(clippy::new_ret_no_self)]
+ pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
+ pin_init!(Self {
+ _pin: PhantomPinned,
+ // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
+ // static lifetimes so they live indefinitely.
+ wait_list <- Opaque::ffi_init(|slot| unsafe {
+ bindings::__init_waitqueue_head(slot, name.as_char_ptr(), key.as_ptr())
+ }),
+ })
+ }
+
+ fn wait_internal<T: ?Sized, B: Backend>(&self, wait_state: u32, guard: &mut Guard<'_, T, B>) {
+ let wait = Opaque::<bindings::wait_queue_entry>::uninit();
+
+ // SAFETY: `wait` points to valid memory.
+ unsafe { bindings::init_wait(wait.get()) };
+
+ // SAFETY: Both `wait` and `wait_list` point to valid memory.
+ unsafe {
+ bindings::prepare_to_wait_exclusive(self.wait_list.get(), wait.get(), wait_state as _)
+ };
+
+ // SAFETY: No arguments, switches to another thread.
+ guard.do_unlocked(|| unsafe { bindings::schedule() });
+
+ // SAFETY: Both `wait` and `wait_list` point to valid memory.
+ unsafe { bindings::finish_wait(self.wait_list.get(), wait.get()) };
+ }
+
+ /// Releases the lock and waits for a notification in interruptible mode.
+ ///
+ /// Atomically releases the given lock (whose ownership is proven by the guard) and puts the
+ /// thread to sleep, reacquiring the lock on wake up. It wakes up when notified by
+ /// [`CondVar::notify_one`] or [`CondVar::notify_all`], or when the thread receives a signal.
+ /// It may also wake up spuriously.
+ ///
+ /// Returns whether there is a signal pending.
+ #[must_use = "wait returns if a signal is pending, so the caller must check the return value"]
+ pub fn wait<T: ?Sized, B: Backend>(&self, guard: &mut Guard<'_, T, B>) -> bool {
+ self.wait_internal(bindings::TASK_INTERRUPTIBLE, guard);
+ crate::current!().signal_pending()
+ }
+
+ /// Releases the lock and waits for a notification in uninterruptible mode.
+ ///
+ /// Similar to [`CondVar::wait`], except that the wait is not interruptible. That is, the
+ /// thread won't wake up due to signals. It may, however, wake up supirously.
+ pub fn wait_uninterruptible<T: ?Sized, B: Backend>(&self, guard: &mut Guard<'_, T, B>) {
+ self.wait_internal(bindings::TASK_UNINTERRUPTIBLE, guard)
+ }
+
+ /// Calls the kernel function to notify the appropriate number of threads with the given flags.
+ fn notify(&self, count: i32, flags: u32) {
+ // SAFETY: `wait_list` points to valid memory.
+ unsafe {
+ bindings::__wake_up(
+ self.wait_list.get(),
+ bindings::TASK_NORMAL,
+ count,
+ flags as _,
+ )
+ };
+ }
+
+ /// Wakes a single waiter up, if any.
+ ///
+ /// This is not 'sticky' in the sense that if no thread is waiting, the notification is lost
+ /// completely (as opposed to automatically waking up the next waiter).
+ pub fn notify_one(&self) {
+ self.notify(1, 0);
+ }
+
+ /// Wakes all waiters up, if any.
+ ///
+ /// This is not 'sticky' in the sense that if no thread is waiting, the notification is lost
+ /// completely (as opposed to automatically waking up the next waiter).
+ pub fn notify_all(&self) {
+ self.notify(0, 0);
+ }
+}
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
new file mode 100644
index 000000000000..a2216325632d
--- /dev/null
+++ b/rust/kernel/sync/lock.rs
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic kernel lock and guard.
+//!
+//! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes,
+//! spinlocks, raw spinlocks) to be provided with minimal effort.
+
+use super::LockClassKey;
+use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque, types::ScopeGuard};
+use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned};
+use macros::pin_data;
+
+pub mod mutex;
+pub mod spinlock;
+
+/// The "backend" of a lock.
+///
+/// It is the actual implementation of the lock, without the need to repeat patterns used in all
+/// locks.
+///
+/// # Safety
+///
+/// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
+/// is owned, that is, between calls to `lock` and `unlock`.
+/// - Implementers must also ensure that `relock` uses the same locking method as the original
+/// lock operation.
+pub unsafe trait Backend {
+ /// The state required by the lock.
+ type State;
+
+ /// The state required to be kept between lock and unlock.
+ type GuardState;
+
+ /// Initialises the lock.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must be valid for write for the duration of the call, while `name` and `key` must
+ /// remain valid for read indefinitely.
+ unsafe fn init(
+ ptr: *mut Self::State,
+ name: *const core::ffi::c_char,
+ key: *mut bindings::lock_class_key,
+ );
+
+ /// Acquires the lock, making the caller its owner.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that [`Backend::init`] has been previously called.
+ #[must_use]
+ unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState;
+
+ /// Releases the lock, giving up its ownership.
+ ///
+ /// # Safety
+ ///
+ /// It must only be called by the current owner of the lock.
+ unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState);
+
+ /// Reacquires the lock, making the caller its owner.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `guard_state` comes from a previous call to [`Backend::lock`] (or
+ /// variant) that has been unlocked with [`Backend::unlock`] and will be relocked now.
+ unsafe fn relock(ptr: *mut Self::State, guard_state: &mut Self::GuardState) {
+ // SAFETY: The safety requirements ensure that the lock is initialised.
+ *guard_state = unsafe { Self::lock(ptr) };
+ }
+}
+
+/// A mutual exclusion primitive.
+///
+/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock backend
+/// specified as the generic parameter `B`.
+#[pin_data]
+pub struct Lock<T: ?Sized, B: Backend> {
+ /// The kernel lock object.
+ #[pin]
+ state: Opaque<B::State>,
+
+ /// Some locks are known to be self-referential (e.g., mutexes), while others are architecture
+ /// or config defined (e.g., spinlocks). So we conservatively require them to be pinned in case
+ /// some architecture uses self-references now or in the future.
+ #[pin]
+ _pin: PhantomPinned,
+
+ /// The data protected by the lock.
+ pub(crate) data: UnsafeCell<T>,
+}
+
+// SAFETY: `Lock` can be transferred across thread boundaries iff the data it protects can.
+unsafe impl<T: ?Sized + Send, B: Backend> Send for Lock<T, B> {}
+
+// SAFETY: `Lock` serialises the interior mutability it provides, so it is `Sync` as long as the
+// data it protects is `Send`.
+unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
+
+impl<T, B: Backend> Lock<T, B> {
+ /// Constructs a new lock initialiser.
+ #[allow(clippy::new_ret_no_self)]
+ pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
+ pin_init!(Self {
+ data: UnsafeCell::new(t),
+ _pin: PhantomPinned,
+ // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
+ // static lifetimes so they live indefinitely.
+ state <- Opaque::ffi_init(|slot| unsafe {
+ B::init(slot, name.as_char_ptr(), key.as_ptr())
+ }),
+ })
+ }
+}
+
+impl<T: ?Sized, B: Backend> Lock<T, B> {
+ /// Acquires the lock and gives the caller access to the data protected by it.
+ pub fn lock(&self) -> Guard<'_, T, B> {
+ // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
+ // that `init` was called.
+ let state = unsafe { B::lock(self.state.get()) };
+ // SAFETY: The lock was just acquired.
+ unsafe { Guard::new(self, state) }
+ }
+}
+
+/// A lock guard.
+///
+/// Allows mutual exclusion primitives that implement the `Backend` trait to automatically unlock
+/// when a guard goes out of scope. It also provides a safe and convenient way to access the data
+/// protected by the lock.
+#[must_use = "the lock unlocks immediately when the guard is unused"]
+pub struct Guard<'a, T: ?Sized, B: Backend> {
+ pub(crate) lock: &'a Lock<T, B>,
+ pub(crate) state: B::GuardState,
+ _not_send: PhantomData<*mut ()>,
+}
+
+// SAFETY: `Guard` is sync when the data protected by the lock is also sync.
+unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {}
+
+impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
+ pub(crate) fn do_unlocked(&mut self, cb: impl FnOnce()) {
+ // SAFETY: The caller owns the lock, so it is safe to unlock it.
+ unsafe { B::unlock(self.lock.state.get(), &self.state) };
+
+ // SAFETY: The lock was just unlocked above and is being relocked now.
+ let _relock =
+ ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) });
+
+ cb();
+ }
+}
+
+impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
+ unsafe { &*self.lock.data.get() }
+ }
+}
+
+impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
+ unsafe { &mut *self.lock.data.get() }
+ }
+}
+
+impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> {
+ fn drop(&mut self) {
+ // SAFETY: The caller owns the lock, so it is safe to unlock it.
+ unsafe { B::unlock(self.lock.state.get(), &self.state) };
+ }
+}
+
+impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
+ /// Constructs a new immutable lock guard.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that it owns the lock.
+ pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
+ Self {
+ lock,
+ state,
+ _not_send: PhantomData,
+ }
+ }
+}
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
new file mode 100644
index 000000000000..923472f04af4
--- /dev/null
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A kernel mutex.
+//!
+//! This module allows Rust code to use the kernel's `struct mutex`.
+
+use crate::bindings;
+
+/// Creates a [`Mutex`] initialiser with the given name and a newly-created lock class.
+///
+/// It uses the name if one is given, otherwise it generates one based on the file name and line
+/// number.
+#[macro_export]
+macro_rules! new_mutex {
+ ($inner:expr $(, $name:literal)? $(,)?) => {
+ $crate::sync::Mutex::new(
+ $inner, $crate::optional_name!($($name)?), $crate::static_lock_class!())
+ };
+}
+
+/// A mutual exclusion primitive.
+///
+/// Exposes the kernel's [`struct mutex`]. When multiple threads attempt to lock the same mutex,
+/// only one at a time is allowed to progress, the others will block (sleep) until the mutex is
+/// unlocked, at which point another thread will be allowed to wake up and make progress.
+///
+/// Since it may block, [`Mutex`] needs to be used with care in atomic contexts.
+///
+/// Instances of [`Mutex`] need a lock class and to be pinned. The recommended way to create such
+/// instances is with the [`pin_init`](crate::pin_init) and [`new_mutex`] macros.
+///
+/// # Examples
+///
+/// The following example shows how to declare, allocate and initialise a struct (`Example`) that
+/// contains an inner struct (`Inner`) that is protected by a mutex.
+///
+/// ```
+/// use kernel::{init::InPlaceInit, init::PinInit, new_mutex, pin_init, sync::Mutex};
+///
+/// struct Inner {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// #[pin_data]
+/// struct Example {
+/// c: u32,
+/// #[pin]
+/// d: Mutex<Inner>,
+/// }
+///
+/// impl Example {
+/// fn new() -> impl PinInit<Self> {
+/// pin_init!(Self {
+/// c: 10,
+/// d <- new_mutex!(Inner { a: 20, b: 30 }),
+/// })
+/// }
+/// }
+///
+/// // Allocate a boxed `Example`.
+/// let e = Box::pin_init(Example::new())?;
+/// assert_eq!(e.c, 10);
+/// assert_eq!(e.d.lock().a, 20);
+/// assert_eq!(e.d.lock().b, 30);
+/// ```
+///
+/// The following example shows how to use interior mutability to modify the contents of a struct
+/// protected by a mutex despite only having a shared reference:
+///
+/// ```
+/// use kernel::sync::Mutex;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn example(m: &Mutex<Example>) {
+/// let mut guard = m.lock();
+/// guard.a += 10;
+/// guard.b += 20;
+/// }
+/// ```
+///
+/// [`struct mutex`]: ../../../../include/linux/mutex.h
+pub type Mutex<T> = super::Lock<T, MutexBackend>;
+
+/// A kernel `struct mutex` lock backend.
+pub struct MutexBackend;
+
+// SAFETY: The underlying kernel `struct mutex` object ensures mutual exclusion.
+unsafe impl super::Backend for MutexBackend {
+ type State = bindings::mutex;
+ type GuardState = ();
+
+ unsafe fn init(
+ ptr: *mut Self::State,
+ name: *const core::ffi::c_char,
+ key: *mut bindings::lock_class_key,
+ ) {
+ // SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and
+ // `key` are valid for read indefinitely.
+ unsafe { bindings::__mutex_init(ptr, name, key) }
+ }
+
+ unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState {
+ // SAFETY: The safety requirements of this function ensure that `ptr` points to valid
+ // memory, and that it has been initialised before.
+ unsafe { bindings::mutex_lock(ptr) };
+ }
+
+ unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
+ // SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
+ // caller is the owner of the mutex.
+ unsafe { bindings::mutex_unlock(ptr) };
+ }
+}
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
new file mode 100644
index 000000000000..979b56464a4e
--- /dev/null
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A kernel spinlock.
+//!
+//! This module allows Rust code to use the kernel's `spinlock_t`.
+
+use crate::bindings;
+
+/// Creates a [`SpinLock`] initialiser with the given name and a newly-created lock class.
+///
+/// It uses the name if one is given, otherwise it generates one based on the file name and line
+/// number.
+#[macro_export]
+macro_rules! new_spinlock {
+ ($inner:expr $(, $name:literal)? $(,)?) => {
+ $crate::sync::SpinLock::new(
+ $inner, $crate::optional_name!($($name)?), $crate::static_lock_class!())
+ };
+}
+
+/// A spinlock.
+///
+/// Exposes the kernel's [`spinlock_t`]. When multiple CPUs attempt to lock the same spinlock, only
+/// one at a time is allowed to progress, the others will block (spinning) until the spinlock is
+/// unlocked, at which point another CPU will be allowed to make progress.
+///
+/// Instances of [`SpinLock`] need a lock class and to be pinned. The recommended way to create such
+/// instances is with the [`pin_init`](crate::pin_init) and [`new_spinlock`] macros.
+///
+/// # Examples
+///
+/// The following example shows how to declare, allocate and initialise a struct (`Example`) that
+/// contains an inner struct (`Inner`) that is protected by a spinlock.
+///
+/// ```
+/// use kernel::{init::InPlaceInit, init::PinInit, new_spinlock, pin_init, sync::SpinLock};
+///
+/// struct Inner {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// #[pin_data]
+/// struct Example {
+/// c: u32,
+/// #[pin]
+/// d: SpinLock<Inner>,
+/// }
+///
+/// impl Example {
+/// fn new() -> impl PinInit<Self> {
+/// pin_init!(Self {
+/// c: 10,
+/// d <- new_spinlock!(Inner { a: 20, b: 30 }),
+/// })
+/// }
+/// }
+///
+/// // Allocate a boxed `Example`.
+/// let e = Box::pin_init(Example::new())?;
+/// assert_eq!(e.c, 10);
+/// assert_eq!(e.d.lock().a, 20);
+/// assert_eq!(e.d.lock().b, 30);
+/// ```
+///
+/// The following example shows how to use interior mutability to modify the contents of a struct
+/// protected by a spinlock despite only having a shared reference:
+///
+/// ```
+/// use kernel::sync::SpinLock;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn example(m: &SpinLock<Example>) {
+/// let mut guard = m.lock();
+/// guard.a += 10;
+/// guard.b += 20;
+/// }
+/// ```
+///
+/// [`spinlock_t`]: ../../../../include/linux/spinlock.h
+pub type SpinLock<T> = super::Lock<T, SpinLockBackend>;
+
+/// A kernel `spinlock_t` lock backend.
+pub struct SpinLockBackend;
+
+// SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion. `relock` uses the
+// default implementation that always calls the same locking method.
+unsafe impl super::Backend for SpinLockBackend {
+ type State = bindings::spinlock_t;
+ type GuardState = ();
+
+ unsafe fn init(
+ ptr: *mut Self::State,
+ name: *const core::ffi::c_char,
+ key: *mut bindings::lock_class_key,
+ ) {
+ // SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and
+ // `key` are valid for read indefinitely.
+ unsafe { bindings::__spin_lock_init(ptr, name, key) }
+ }
+
+ unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState {
+ // SAFETY: The safety requirements of this function ensure that `ptr` points to valid
+ // memory, and that it has been initialised before.
+ unsafe { bindings::spin_lock(ptr) }
+ }
+
+ unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
+ // SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
+ // caller is the owner of the mutex.
+ unsafe { bindings::spin_unlock(ptr) }
+ }
+}
diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs
new file mode 100644
index 000000000000..b17ee5cd98f3
--- /dev/null
+++ b/rust/kernel/sync/locked_by.rs
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A wrapper for data protected by a lock that does not wrap it.
+
+use super::{lock::Backend, lock::Lock};
+use crate::build_assert;
+use core::{cell::UnsafeCell, mem::size_of, ptr};
+
+/// Allows access to some data to be serialised by a lock that does not wrap it.
+///
+/// In most cases, data protected by a lock is wrapped by the appropriate lock type, e.g.,
+/// [`super::Mutex`] or [`super::SpinLock`]. [`LockedBy`] is meant for cases when this is not
+/// possible. For example, if a container has a lock and some data in the contained elements needs
+/// to be protected by the same lock.
+///
+/// [`LockedBy`] wraps the data in lieu of another locking primitive, and only allows access to it
+/// when the caller shows evidence that the 'external' lock is locked. It panics if the evidence
+/// refers to the wrong instance of the lock.
+///
+/// # Examples
+///
+/// The following is an example for illustrative purposes: `InnerDirectory::bytes_used` is an
+/// aggregate of all `InnerFile::bytes_used` and must be kept consistent; so we wrap `InnerFile` in
+/// a `LockedBy` so that it shares a lock with `InnerDirectory`. This allows us to enforce at
+/// compile-time that access to `InnerFile` is only granted when an `InnerDirectory` is also
+/// locked; we enforce at run time that the right `InnerDirectory` is locked.
+///
+/// ```
+/// use kernel::sync::{LockedBy, Mutex};
+///
+/// struct InnerFile {
+/// bytes_used: u64,
+/// }
+///
+/// struct File {
+/// _ino: u32,
+/// inner: LockedBy<InnerFile, InnerDirectory>,
+/// }
+///
+/// struct InnerDirectory {
+/// /// The sum of the bytes used by all files.
+/// bytes_used: u64,
+/// _files: Vec<File>,
+/// }
+///
+/// struct Directory {
+/// _ino: u32,
+/// inner: Mutex<InnerDirectory>,
+/// }
+///
+/// /// Prints `bytes_used` from both the directory and file.
+/// fn print_bytes_used(dir: &Directory, file: &File) {
+/// let guard = dir.inner.lock();
+/// let inner_file = file.inner.access(&guard);
+/// pr_info!("{} {}", guard.bytes_used, inner_file.bytes_used);
+/// }
+///
+/// /// Increments `bytes_used` for both the directory and file.
+/// fn inc_bytes_used(dir: &Directory, file: &File) {
+/// let mut guard = dir.inner.lock();
+/// guard.bytes_used += 10;
+///
+/// let file_inner = file.inner.access_mut(&mut guard);
+/// file_inner.bytes_used += 10;
+/// }
+///
+/// /// Creates a new file.
+/// fn new_file(ino: u32, dir: &Directory) -> File {
+/// File {
+/// _ino: ino,
+/// inner: LockedBy::new(&dir.inner, InnerFile { bytes_used: 0 }),
+/// }
+/// }
+/// ```
+pub struct LockedBy<T: ?Sized, U: ?Sized> {
+ owner: *const U,
+ data: UnsafeCell<T>,
+}
+
+// SAFETY: `LockedBy` can be transferred across thread boundaries iff the data it protects can.
+unsafe impl<T: ?Sized + Send, U: ?Sized> Send for LockedBy<T, U> {}
+
+// SAFETY: `LockedBy` serialises the interior mutability it provides, so it is `Sync` as long as the
+// data it protects is `Send`.
+unsafe impl<T: ?Sized + Send, U: ?Sized> Sync for LockedBy<T, U> {}
+
+impl<T, U> LockedBy<T, U> {
+ /// Constructs a new instance of [`LockedBy`].
+ ///
+ /// It stores a raw pointer to the owner that is never dereferenced. It is only used to ensure
+ /// that the right owner is being used to access the protected data. If the owner is freed, the
+ /// data becomes inaccessible; if another instance of the owner is allocated *on the same
+ /// memory location*, the data becomes accessible again: none of this affects memory safety
+ /// because in any case at most one thread (or CPU) can access the protected data at a time.
+ pub fn new<B: Backend>(owner: &Lock<U, B>, data: T) -> Self {
+ build_assert!(
+ size_of::<Lock<U, B>>() > 0,
+ "The lock type cannot be a ZST because it may be impossible to distinguish instances"
+ );
+ Self {
+ owner: owner.data.get(),
+ data: UnsafeCell::new(data),
+ }
+ }
+}
+
+impl<T: ?Sized, U> LockedBy<T, U> {
+ /// Returns a reference to the protected data when the caller provides evidence (via a
+ /// reference) that the owner is locked.
+ ///
+ /// `U` cannot be a zero-sized type (ZST) because there are ways to get an `&U` that matches
+ /// the data protected by the lock without actually holding it.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `owner` is different from the data protected by the lock used in
+ /// [`new`](LockedBy::new).
+ pub fn access<'a>(&'a self, owner: &'a U) -> &'a T {
+ build_assert!(
+ size_of::<U>() > 0,
+ "`U` cannot be a ZST because `owner` wouldn't be unique"
+ );
+ if !ptr::eq(owner, self.owner) {
+ panic!("mismatched owners");
+ }
+
+ // SAFETY: `owner` is evidence that the owner is locked.
+ unsafe { &*self.data.get() }
+ }
+
+ /// Returns a mutable reference to the protected data when the caller provides evidence (via a
+ /// mutable owner) that the owner is locked mutably.
+ ///
+ /// `U` cannot be a zero-sized type (ZST) because there are ways to get an `&mut U` that
+ /// matches the data protected by the lock without actually holding it.
+ ///
+ /// Showing a mutable reference to the owner is sufficient because we know no other references
+ /// can exist to it.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `owner` is different from the data protected by the lock used in
+ /// [`new`](LockedBy::new).
+ pub fn access_mut<'a>(&'a self, owner: &'a mut U) -> &'a mut T {
+ build_assert!(
+ size_of::<U>() > 0,
+ "`U` cannot be a ZST because `owner` wouldn't be unique"
+ );
+ if !ptr::eq(owner, self.owner) {
+ panic!("mismatched owners");
+ }
+
+ // SAFETY: `owner` is evidence that there is only one reference to the owner.
+ unsafe { &mut *self.data.get() }
+ }
+}
diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs
new file mode 100644
index 000000000000..526d29a0ae27
--- /dev/null
+++ b/rust/kernel/task.rs
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Tasks (threads and processes).
+//!
+//! C header: [`include/linux/sched.h`](../../../../include/linux/sched.h).
+
+use crate::{bindings, types::Opaque};
+use core::{marker::PhantomData, ops::Deref, ptr};
+
+/// Returns the currently running task.
+#[macro_export]
+macro_rules! current {
+ () => {
+ // SAFETY: Deref + addr-of below create a temporary `TaskRef` that cannot outlive the
+ // caller.
+ unsafe { &*$crate::task::Task::current() }
+ };
+}
+
+/// Wraps the kernel's `struct task_struct`.
+///
+/// # Invariants
+///
+/// All instances are valid tasks created by the C portion of the kernel.
+///
+/// Instances of this type are always ref-counted, that is, a call to `get_task_struct` ensures
+/// that the allocation remains valid at least until the matching call to `put_task_struct`.
+///
+/// # Examples
+///
+/// The following is an example of getting the PID of the current thread with zero additional cost
+/// when compared to the C version:
+///
+/// ```
+/// let pid = current!().pid();
+/// ```
+///
+/// Getting the PID of the current process, also zero additional cost:
+///
+/// ```
+/// let pid = current!().group_leader().pid();
+/// ```
+///
+/// Getting the current task and storing it in some struct. The reference count is automatically
+/// incremented when creating `State` and decremented when it is dropped:
+///
+/// ```
+/// use kernel::{task::Task, types::ARef};
+///
+/// struct State {
+/// creator: ARef<Task>,
+/// index: u32,
+/// }
+///
+/// impl State {
+/// fn new() -> Self {
+/// Self {
+/// creator: current!().into(),
+/// index: 0,
+/// }
+/// }
+/// }
+/// ```
+#[repr(transparent)]
+pub struct Task(pub(crate) Opaque<bindings::task_struct>);
+
+// SAFETY: It's OK to access `Task` through references from other threads because we're either
+// accessing properties that don't change (e.g., `pid`, `group_leader`) or that are properly
+// synchronised by C code (e.g., `signal_pending`).
+unsafe impl Sync for Task {}
+
+/// The type of process identifiers (PIDs).
+type Pid = bindings::pid_t;
+
+impl Task {
+ /// Returns a task reference for the currently executing task/thread.
+ ///
+ /// The recommended way to get the current task/thread is to use the
+ /// [`current`](crate::current) macro because it is safe.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the returned object doesn't outlive the current task/thread.
+ pub unsafe fn current() -> impl Deref<Target = Task> {
+ struct TaskRef<'a> {
+ task: &'a Task,
+ _not_send: PhantomData<*mut ()>,
+ }
+
+ impl Deref for TaskRef<'_> {
+ type Target = Task;
+
+ fn deref(&self) -> &Self::Target {
+ self.task
+ }
+ }
+
+ // SAFETY: Just an FFI call with no additional safety requirements.
+ let ptr = unsafe { bindings::get_current() };
+
+ TaskRef {
+ // SAFETY: If the current thread is still running, the current task is valid. Given
+ // that `TaskRef` is not `Send`, we know it cannot be transferred to another thread
+ // (where it could potentially outlive the caller).
+ task: unsafe { &*ptr.cast() },
+ _not_send: PhantomData,
+ }
+ }
+
+ /// Returns the group leader of the given task.
+ pub fn group_leader(&self) -> &Task {
+ // SAFETY: By the type invariant, we know that `self.0` is a valid task. Valid tasks always
+ // have a valid group_leader.
+ let ptr = unsafe { *ptr::addr_of!((*self.0.get()).group_leader) };
+
+ // SAFETY: The lifetime of the returned task reference is tied to the lifetime of `self`,
+ // and given that a task has a reference to its group leader, we know it must be valid for
+ // the lifetime of the returned task reference.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Returns the PID of the given task.
+ pub fn pid(&self) -> Pid {
+ // SAFETY: By the type invariant, we know that `self.0` is a valid task. Valid tasks always
+ // have a valid pid.
+ unsafe { *ptr::addr_of!((*self.0.get()).pid) }
+ }
+
+ /// Determines whether the given task has pending signals.
+ pub fn signal_pending(&self) -> bool {
+ // SAFETY: By the type invariant, we know that `self.0` is valid.
+ unsafe { bindings::signal_pending(self.0.get()) != 0 }
+ }
+
+ /// Wakes up the task.
+ pub fn wake_up(&self) {
+ // SAFETY: By the type invariant, we know that `self.0.get()` is non-null and valid.
+ // And `wake_up_process` is safe to be called for any valid task, even if the task is
+ // running.
+ unsafe { bindings::wake_up_process(self.0.get()) };
+ }
+}
+
+// SAFETY: The type invariants guarantee that `Task` is always ref-counted.
+unsafe impl crate::types::AlwaysRefCounted for Task {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference means that the refcount is nonzero.
+ unsafe { bindings::get_task_struct(self.0.get()) };
+ }
+
+ unsafe fn dec_ref(obj: ptr::NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is nonzero.
+ unsafe { bindings::put_task_struct(obj.cast().as_ptr()) }
+ }
+}
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index 9d0fdbc55843..29db59d6119a 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -2,11 +2,14 @@
//! Kernel types.
+use crate::init::{self, PinInit};
use alloc::boxed::Box;
use core::{
cell::UnsafeCell,
+ marker::PhantomData,
mem::MaybeUninit,
ops::{Deref, DerefMut},
+ ptr::NonNull,
};
/// Used to transfer ownership to and from foreign (non-Rust) languages.
@@ -234,10 +237,142 @@ impl<T> Opaque<T> {
Self(MaybeUninit::uninit())
}
+ /// Creates a pin-initializer from the given initializer closure.
+ ///
+ /// The returned initializer calls the given closure with the pointer to the inner `T` of this
+ /// `Opaque`. Since this memory is uninitialized, the closure is not allowed to read from it.
+ ///
+ /// This function is safe, because the `T` inside of an `Opaque` is allowed to be
+ /// uninitialized. Additionally, access to the inner `T` requires `unsafe`, so the caller needs
+ /// to verify at that point that the inner value is valid.
+ pub fn ffi_init(init_func: impl FnOnce(*mut T)) -> impl PinInit<Self> {
+ // SAFETY: We contain a `MaybeUninit`, so it is OK for the `init_func` to not fully
+ // initialize the `T`.
+ unsafe {
+ init::pin_init_from_closure::<_, ::core::convert::Infallible>(move |slot| {
+ init_func(Self::raw_get(slot));
+ Ok(())
+ })
+ }
+ }
+
/// Returns a raw pointer to the opaque data.
pub fn get(&self) -> *mut T {
UnsafeCell::raw_get(self.0.as_ptr())
}
+
+ /// Gets the value behind `this`.
+ ///
+ /// This function is useful to get access to the value without creating intermediate
+ /// references.
+ pub const fn raw_get(this: *const Self) -> *mut T {
+ UnsafeCell::raw_get(this.cast::<UnsafeCell<T>>())
+ }
+}
+
+/// Types that are _always_ reference counted.
+///
+/// It allows such types to define their own custom ref increment and decrement functions.
+/// Additionally, it allows users to convert from a shared reference `&T` to an owned reference
+/// [`ARef<T>`].
+///
+/// This is usually implemented by wrappers to existing structures on the C side of the code. For
+/// Rust code, the recommendation is to use [`Arc`](crate::sync::Arc) to create reference-counted
+/// instances of a type.
+///
+/// # Safety
+///
+/// Implementers must ensure that increments to the reference count keep the object alive in memory
+/// at least until matching decrements are performed.
+///
+/// Implementers must also ensure that all instances are reference-counted. (Otherwise they
+/// won't be able to honour the requirement that [`AlwaysRefCounted::inc_ref`] keep the object
+/// alive.)
+pub unsafe trait AlwaysRefCounted {
+ /// Increments the reference count on the object.
+ fn inc_ref(&self);
+
+ /// Decrements the reference count on the object.
+ ///
+ /// Frees the object when the count reaches zero.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that there was a previous matching increment to the reference count,
+ /// and that the object is no longer used after its reference count is decremented (as it may
+ /// result in the object being freed), unless the caller owns another increment on the refcount
+ /// (e.g., it calls [`AlwaysRefCounted::inc_ref`] twice, then calls
+ /// [`AlwaysRefCounted::dec_ref`] once).
+ unsafe fn dec_ref(obj: NonNull<Self>);
+}
+
+/// An owned reference to an always-reference-counted object.
+///
+/// The object's reference count is automatically decremented when an instance of [`ARef`] is
+/// dropped. It is also automatically incremented when a new instance is created via
+/// [`ARef::clone`].
+///
+/// # Invariants
+///
+/// The pointer stored in `ptr` is non-null and valid for the lifetime of the [`ARef`] instance. In
+/// particular, the [`ARef`] instance owns an increment on the underlying object's reference count.
+pub struct ARef<T: AlwaysRefCounted> {
+ ptr: NonNull<T>,
+ _p: PhantomData<T>,
+}
+
+impl<T: AlwaysRefCounted> ARef<T> {
+ /// Creates a new instance of [`ARef`].
+ ///
+ /// It takes over an increment of the reference count on the underlying object.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the reference count was incremented at least once, and that they
+ /// are properly relinquishing one increment. That is, if there is only one increment, callers
+ /// must not use the underlying object anymore -- it is only safe to do so via the newly
+ /// created [`ARef`].
+ pub unsafe fn from_raw(ptr: NonNull<T>) -> Self {
+ // INVARIANT: The safety requirements guarantee that the new instance now owns the
+ // increment on the refcount.
+ Self {
+ ptr,
+ _p: PhantomData,
+ }
+ }
+}
+
+impl<T: AlwaysRefCounted> Clone for ARef<T> {
+ fn clone(&self) -> Self {
+ self.inc_ref();
+ // SAFETY: We just incremented the refcount above.
+ unsafe { Self::from_raw(self.ptr) }
+ }
+}
+
+impl<T: AlwaysRefCounted> Deref for ARef<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The type invariants guarantee that the object is valid.
+ unsafe { self.ptr.as_ref() }
+ }
+}
+
+impl<T: AlwaysRefCounted> From<&T> for ARef<T> {
+ fn from(b: &T) -> Self {
+ b.inc_ref();
+ // SAFETY: We just incremented the refcount above.
+ unsafe { Self::from_raw(NonNull::from(b)) }
+ }
+}
+
+impl<T: AlwaysRefCounted> Drop for ARef<T> {
+ fn drop(&mut self) {
+ // SAFETY: The type invariants guarantee that the `ARef` owns the reference we're about to
+ // decrement.
+ unsafe { T::dec_ref(self.ptr) };
+ }
}
/// A sum type that always holds either a value of type `L` or `R`.
diff --git a/rust/macros/helpers.rs b/rust/macros/helpers.rs
index cf7ad950dc1e..b2bdd4d8c958 100644
--- a/rust/macros/helpers.rs
+++ b/rust/macros/helpers.rs
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-use proc_macro::{token_stream, TokenTree};
+use proc_macro::{token_stream, Group, TokenTree};
pub(crate) fn try_ident(it: &mut token_stream::IntoIter) -> Option<String> {
if let Some(TokenTree::Ident(ident)) = it.next() {
@@ -56,6 +56,14 @@ pub(crate) fn expect_string_ascii(it: &mut token_stream::IntoIter) -> String {
string
}
+pub(crate) fn expect_group(it: &mut token_stream::IntoIter) -> Group {
+ if let TokenTree::Group(group) = it.next().expect("Reached end of token stream for Group") {
+ group
+ } else {
+ panic!("Expected Group");
+ }
+}
+
pub(crate) fn expect_end(it: &mut token_stream::IntoIter) {
if it.next().is_some() {
panic!("Expected end");
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
index c1d385e345b9..3fc74cb4ea19 100644
--- a/rust/macros/lib.rs
+++ b/rust/macros/lib.rs
@@ -2,9 +2,13 @@
//! Crate for all kernel procedural macros.
+#[macro_use]
+mod quote;
mod concat_idents;
mod helpers;
mod module;
+mod pin_data;
+mod pinned_drop;
mod vtable;
use proc_macro::TokenStream;
@@ -166,3 +170,79 @@ pub fn vtable(attr: TokenStream, ts: TokenStream) -> TokenStream {
pub fn concat_idents(ts: TokenStream) -> TokenStream {
concat_idents::concat_idents(ts)
}
+
+/// Used to specify the pinning information of the fields of a struct.
+///
+/// This is somewhat similar in purpose as
+/// [pin-project-lite](https://crates.io/crates/pin-project-lite).
+/// Place this macro on a struct definition and then `#[pin]` in front of the attributes of each
+/// field you want to structurally pin.
+///
+/// This macro enables the use of the [`pin_init!`] macro. When pin-initializing a `struct`,
+/// then `#[pin]` directs the type of initializer that is required.
+///
+/// If your `struct` implements `Drop`, then you need to add `PinnedDrop` as arguments to this
+/// macro, and change your `Drop` implementation to `PinnedDrop` annotated with
+/// `#[`[`macro@pinned_drop`]`]`, since dropping pinned values requires extra care.
+///
+/// # Examples
+///
+/// ```rust,ignore
+/// #[pin_data]
+/// struct DriverData {
+/// #[pin]
+/// queue: Mutex<Vec<Command>>,
+/// buf: Box<[u8; 1024 * 1024]>,
+/// }
+/// ```
+///
+/// ```rust,ignore
+/// #[pin_data(PinnedDrop)]
+/// struct DriverData {
+/// #[pin]
+/// queue: Mutex<Vec<Command>>,
+/// buf: Box<[u8; 1024 * 1024]>,
+/// raw_info: *mut Info,
+/// }
+///
+/// #[pinned_drop]
+/// impl PinnedDrop for DriverData {
+/// fn drop(self: Pin<&mut Self>) {
+/// unsafe { bindings::destroy_info(self.raw_info) };
+/// }
+/// }
+/// ```
+///
+/// [`pin_init!`]: ../kernel/macro.pin_init.html
+// ^ cannot use direct link, since `kernel` is not a dependency of `macros`.
+#[proc_macro_attribute]
+pub fn pin_data(inner: TokenStream, item: TokenStream) -> TokenStream {
+ pin_data::pin_data(inner, item)
+}
+
+/// Used to implement `PinnedDrop` safely.
+///
+/// Only works on structs that are annotated via `#[`[`macro@pin_data`]`]`.
+///
+/// # Examples
+///
+/// ```rust,ignore
+/// #[pin_data(PinnedDrop)]
+/// struct DriverData {
+/// #[pin]
+/// queue: Mutex<Vec<Command>>,
+/// buf: Box<[u8; 1024 * 1024]>,
+/// raw_info: *mut Info,
+/// }
+///
+/// #[pinned_drop]
+/// impl PinnedDrop for DriverData {
+/// fn drop(self: Pin<&mut Self>) {
+/// unsafe { bindings::destroy_info(self.raw_info) };
+/// }
+/// }
+/// ```
+#[proc_macro_attribute]
+pub fn pinned_drop(args: TokenStream, input: TokenStream) -> TokenStream {
+ pinned_drop::pinned_drop(args, input)
+}
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index a7e363c2b044..fb1244f8c2e6 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -1,9 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
use crate::helpers::*;
-use proc_macro::{token_stream, Literal, TokenStream, TokenTree};
+use proc_macro::{token_stream, Delimiter, Literal, TokenStream, TokenTree};
use std::fmt::Write;
+fn expect_string_array(it: &mut token_stream::IntoIter) -> Vec<String> {
+ let group = expect_group(it);
+ assert_eq!(group.delimiter(), Delimiter::Bracket);
+ let mut values = Vec::new();
+ let mut it = group.stream().into_iter();
+
+ while let Some(val) = try_string(&mut it) {
+ assert!(val.is_ascii(), "Expected ASCII string");
+ values.push(val);
+ match it.next() {
+ Some(TokenTree::Punct(punct)) => assert_eq!(punct.as_char(), ','),
+ None => break,
+ _ => panic!("Expected ',' or end of array"),
+ }
+ }
+ values
+}
+
struct ModInfoBuilder<'a> {
module: &'a str,
counter: usize,
@@ -78,7 +96,7 @@ struct ModuleInfo {
name: String,
author: Option<String>,
description: Option<String>,
- alias: Option<String>,
+ alias: Option<Vec<String>>,
}
impl ModuleInfo {
@@ -112,7 +130,7 @@ impl ModuleInfo {
"author" => info.author = Some(expect_string(it)),
"description" => info.description = Some(expect_string(it)),
"license" => info.license = expect_string_ascii(it),
- "alias" => info.alias = Some(expect_string_ascii(it)),
+ "alias" => info.alias = Some(expect_string_array(it)),
_ => panic!(
"Unknown key \"{}\". Valid keys are: {:?}.",
key, EXPECTED_KEYS
@@ -163,8 +181,10 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
modinfo.emit("description", &description);
}
modinfo.emit("license", &info.license);
- if let Some(alias) = info.alias {
- modinfo.emit("alias", &alias);
+ if let Some(aliases) = info.alias {
+ for alias in aliases {
+ modinfo.emit("alias", &alias);
+ }
}
// Built-in modules also export the `file` modinfo string.
@@ -258,7 +278,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
return 0;
}}
Err(e) => {{
- return e.to_kernel_errno();
+ return e.to_errno();
}}
}}
}}
diff --git a/rust/macros/pin_data.rs b/rust/macros/pin_data.rs
new file mode 100644
index 000000000000..954149d77181
--- /dev/null
+++ b/rust/macros/pin_data.rs
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use proc_macro::{Punct, Spacing, TokenStream, TokenTree};
+
+pub(crate) fn pin_data(args: TokenStream, input: TokenStream) -> TokenStream {
+ // This proc-macro only does some pre-parsing and then delegates the actual parsing to
+ // `kernel::__pin_data!`.
+ //
+ // In here we only collect the generics, since parsing them in declarative macros is very
+ // elaborate. We also do not need to analyse their structure, we only need to collect them.
+
+ // `impl_generics`, the declared generics with their bounds.
+ let mut impl_generics = vec![];
+ // Only the names of the generics, without any bounds.
+ let mut ty_generics = vec![];
+ // Tokens not related to the generics e.g. the `impl` token.
+ let mut rest = vec![];
+ // The current level of `<`.
+ let mut nesting = 0;
+ let mut toks = input.into_iter();
+ // If we are at the beginning of a generic parameter.
+ let mut at_start = true;
+ for tt in &mut toks {
+ match tt.clone() {
+ TokenTree::Punct(p) if p.as_char() == '<' => {
+ if nesting >= 1 {
+ impl_generics.push(tt);
+ }
+ nesting += 1;
+ }
+ TokenTree::Punct(p) if p.as_char() == '>' => {
+ if nesting == 0 {
+ break;
+ } else {
+ nesting -= 1;
+ if nesting >= 1 {
+ impl_generics.push(tt);
+ }
+ if nesting == 0 {
+ break;
+ }
+ }
+ }
+ tt => {
+ if nesting == 1 {
+ match &tt {
+ TokenTree::Ident(i) if i.to_string() == "const" => {}
+ TokenTree::Ident(_) if at_start => {
+ ty_generics.push(tt.clone());
+ ty_generics.push(TokenTree::Punct(Punct::new(',', Spacing::Alone)));
+ at_start = false;
+ }
+ TokenTree::Punct(p) if p.as_char() == ',' => at_start = true,
+ TokenTree::Punct(p) if p.as_char() == '\'' && at_start => {
+ ty_generics.push(tt.clone());
+ }
+ _ => {}
+ }
+ }
+ if nesting >= 1 {
+ impl_generics.push(tt);
+ } else if nesting == 0 {
+ rest.push(tt);
+ }
+ }
+ }
+ }
+ rest.extend(toks);
+ // This should be the body of the struct `{...}`.
+ let last = rest.pop();
+ quote!(::kernel::__pin_data! {
+ parse_input:
+ @args(#args),
+ @sig(#(#rest)*),
+ @impl_generics(#(#impl_generics)*),
+ @ty_generics(#(#ty_generics)*),
+ @body(#last),
+ })
+}
diff --git a/rust/macros/pinned_drop.rs b/rust/macros/pinned_drop.rs
new file mode 100644
index 000000000000..88fb72b20660
--- /dev/null
+++ b/rust/macros/pinned_drop.rs
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use proc_macro::{TokenStream, TokenTree};
+
+pub(crate) fn pinned_drop(_args: TokenStream, input: TokenStream) -> TokenStream {
+ let mut toks = input.into_iter().collect::<Vec<_>>();
+ assert!(!toks.is_empty());
+ // Ensure that we have an `impl` item.
+ assert!(matches!(&toks[0], TokenTree::Ident(i) if i.to_string() == "impl"));
+ // Ensure that we are implementing `PinnedDrop`.
+ let mut nesting: usize = 0;
+ let mut pinned_drop_idx = None;
+ for (i, tt) in toks.iter().enumerate() {
+ match tt {
+ TokenTree::Punct(p) if p.as_char() == '<' => {
+ nesting += 1;
+ }
+ TokenTree::Punct(p) if p.as_char() == '>' => {
+ nesting = nesting.checked_sub(1).unwrap();
+ continue;
+ }
+ _ => {}
+ }
+ if i >= 1 && nesting == 0 {
+ // Found the end of the generics, this should be `PinnedDrop`.
+ assert!(
+ matches!(tt, TokenTree::Ident(i) if i.to_string() == "PinnedDrop"),
+ "expected 'PinnedDrop', found: '{:?}'",
+ tt
+ );
+ pinned_drop_idx = Some(i);
+ break;
+ }
+ }
+ let idx = pinned_drop_idx
+ .unwrap_or_else(|| panic!("Expected an `impl` block implementing `PinnedDrop`."));
+ // Fully qualify the `PinnedDrop`, as to avoid any tampering.
+ toks.splice(idx..idx, quote!(::kernel::init::));
+ // Take the `{}` body and call the declarative macro.
+ if let Some(TokenTree::Group(last)) = toks.pop() {
+ let last = last.stream();
+ quote!(::kernel::__pinned_drop! {
+ @impl_sig(#(#toks)*),
+ @impl_body(#last),
+ })
+ } else {
+ TokenStream::from_iter(toks)
+ }
+}
diff --git a/rust/macros/quote.rs b/rust/macros/quote.rs
new file mode 100644
index 000000000000..c8e08b3c1e4c
--- /dev/null
+++ b/rust/macros/quote.rs
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use proc_macro::{TokenStream, TokenTree};
+
+pub(crate) trait ToTokens {
+ fn to_tokens(&self, tokens: &mut TokenStream);
+}
+
+impl<T: ToTokens> ToTokens for Option<T> {
+ fn to_tokens(&self, tokens: &mut TokenStream) {
+ if let Some(v) = self {
+ v.to_tokens(tokens);
+ }
+ }
+}
+
+impl ToTokens for proc_macro::Group {
+ fn to_tokens(&self, tokens: &mut TokenStream) {
+ tokens.extend([TokenTree::from(self.clone())]);
+ }
+}
+
+impl ToTokens for TokenTree {
+ fn to_tokens(&self, tokens: &mut TokenStream) {
+ tokens.extend([self.clone()]);
+ }
+}
+
+impl ToTokens for TokenStream {
+ fn to_tokens(&self, tokens: &mut TokenStream) {
+ tokens.extend(self.clone());
+ }
+}
+
+/// Converts tokens into [`proc_macro::TokenStream`] and performs variable interpolations with
+/// the given span.
+///
+/// This is a similar to the
+/// [`quote_spanned!`](https://docs.rs/quote/latest/quote/macro.quote_spanned.html) macro from the
+/// `quote` crate but provides only just enough functionality needed by the current `macros` crate.
+macro_rules! quote_spanned {
+ ($span:expr => $($tt:tt)*) => {
+ #[allow(clippy::vec_init_then_push)]
+ {
+ let mut tokens = ::std::vec::Vec::new();
+ let span = $span;
+ quote_spanned!(@proc tokens span $($tt)*);
+ ::proc_macro::TokenStream::from_iter(tokens)
+ }};
+ (@proc $v:ident $span:ident) => {};
+ (@proc $v:ident $span:ident #$id:ident $($tt:tt)*) => {
+ let mut ts = ::proc_macro::TokenStream::new();
+ $crate::quote::ToTokens::to_tokens(&$id, &mut ts);
+ $v.extend(ts);
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
+ (@proc $v:ident $span:ident #(#$id:ident)* $($tt:tt)*) => {
+ for token in $id {
+ let mut ts = ::proc_macro::TokenStream::new();
+ $crate::quote::ToTokens::to_tokens(&token, &mut ts);
+ $v.extend(ts);
+ }
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
+ (@proc $v:ident $span:ident ( $($inner:tt)* ) $($tt:tt)*) => {
+ let mut tokens = ::std::vec::Vec::new();
+ quote_spanned!(@proc tokens $span $($inner)*);
+ $v.push(::proc_macro::TokenTree::Group(::proc_macro::Group::new(
+ ::proc_macro::Delimiter::Parenthesis,
+ ::proc_macro::TokenStream::from_iter(tokens)
+ )));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
+ (@proc $v:ident $span:ident [ $($inner:tt)* ] $($tt:tt)*) => {
+ let mut tokens = ::std::vec::Vec::new();
+ quote_spanned!(@proc tokens $span $($inner)*);
+ $v.push(::proc_macro::TokenTree::Group(::proc_macro::Group::new(
+ ::proc_macro::Delimiter::Bracket,
+ ::proc_macro::TokenStream::from_iter(tokens)
+ )));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
+ (@proc $v:ident $span:ident { $($inner:tt)* } $($tt:tt)*) => {
+ let mut tokens = ::std::vec::Vec::new();
+ quote_spanned!(@proc tokens $span $($inner)*);
+ $v.push(::proc_macro::TokenTree::Group(::proc_macro::Group::new(
+ ::proc_macro::Delimiter::Brace,
+ ::proc_macro::TokenStream::from_iter(tokens)
+ )));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
+ (@proc $v:ident $span:ident :: $($tt:tt)*) => {
+ $v.push(::proc_macro::TokenTree::Punct(
+ ::proc_macro::Punct::new(':', ::proc_macro::Spacing::Joint)
+ ));
+ $v.push(::proc_macro::TokenTree::Punct(
+ ::proc_macro::Punct::new(':', ::proc_macro::Spacing::Alone)
+ ));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
+ (@proc $v:ident $span:ident : $($tt:tt)*) => {
+ $v.push(::proc_macro::TokenTree::Punct(
+ ::proc_macro::Punct::new(':', ::proc_macro::Spacing::Alone)
+ ));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
+ (@proc $v:ident $span:ident , $($tt:tt)*) => {
+ $v.push(::proc_macro::TokenTree::Punct(
+ ::proc_macro::Punct::new(',', ::proc_macro::Spacing::Alone)
+ ));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
+ (@proc $v:ident $span:ident @ $($tt:tt)*) => {
+ $v.push(::proc_macro::TokenTree::Punct(
+ ::proc_macro::Punct::new('@', ::proc_macro::Spacing::Alone)
+ ));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
+ (@proc $v:ident $span:ident ! $($tt:tt)*) => {
+ $v.push(::proc_macro::TokenTree::Punct(
+ ::proc_macro::Punct::new('!', ::proc_macro::Spacing::Alone)
+ ));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
+ (@proc $v:ident $span:ident $id:ident $($tt:tt)*) => {
+ $v.push(::proc_macro::TokenTree::Ident(::proc_macro::Ident::new(stringify!($id), $span)));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
+}
+
+/// Converts tokens into [`proc_macro::TokenStream`] and performs variable interpolations with
+/// mixed site span ([`Span::mixed_site()`]).
+///
+/// This is a similar to the [`quote!`](https://docs.rs/quote/latest/quote/macro.quote.html) macro
+/// from the `quote` crate but provides only just enough functionality needed by the current
+/// `macros` crate.
+///
+/// [`Span::mixed_site()`]: https://doc.rust-lang.org/proc_macro/struct.Span.html#method.mixed_site
+macro_rules! quote {
+ ($($tt:tt)*) => {
+ quote_spanned!(::proc_macro::Span::mixed_site() => $($tt)*)
+ }
+}
diff --git a/rust/uapi/lib.rs b/rust/uapi/lib.rs
new file mode 100644
index 000000000000..29f69f3a52de
--- /dev/null
+++ b/rust/uapi/lib.rs
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! UAPI Bindings.
+//!
+//! Contains the bindings generated by `bindgen` for UAPI interfaces.
+//!
+//! This crate may be used directly by drivers that need to interact with
+//! userspace APIs.
+
+#![no_std]
+#![feature(core_ffi_c)]
+// See <https://github.com/rust-lang/rust-bindgen/issues/1651>.
+#![cfg_attr(test, allow(deref_nullptr))]
+#![cfg_attr(test, allow(unaligned_references))]
+#![cfg_attr(test, allow(unsafe_op_in_unsafe_fn))]
+#![allow(
+ clippy::all,
+ missing_docs,
+ non_camel_case_types,
+ non_upper_case_globals,
+ non_snake_case,
+ improper_ctypes,
+ unreachable_pub,
+ unsafe_op_in_unsafe_fn
+)]
+
+include!(concat!(env!("OBJTREE"), "/rust/uapi/uapi_generated.rs"));
diff --git a/rust/uapi/uapi_helper.h b/rust/uapi/uapi_helper.h
new file mode 100644
index 000000000000..301f5207f023
--- /dev/null
+++ b/rust/uapi/uapi_helper.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header that contains the headers for which Rust UAPI bindings
+ * will be automatically generated by `bindgen`.
+ *
+ * Sorted alphabetically.
+ */
+
+#include <uapi/asm-generic/ioctl.h>