summaryrefslogtreecommitdiff
path: root/rust
diff options
context:
space:
mode:
Diffstat (limited to 'rust')
-rw-r--r--rust/Makefile74
-rw-r--r--rust/bindings/bindings_helper.h1
-rw-r--r--rust/bindings/lib.rs1
-rw-r--r--rust/helpers.c34
-rw-r--r--rust/kernel/alloc.rs17
-rw-r--r--rust/kernel/init.rs13
-rw-r--r--rust/kernel/lib.rs2
-rw-r--r--rust/kernel/page.rs250
-rw-r--r--rust/kernel/types.rs64
-rw-r--r--rust/kernel/uaccess.rs388
-rw-r--r--rust/kernel/workqueue.rs16
-rw-r--r--rust/macros/lib.rs45
-rw-r--r--rust/macros/module.rs18
-rw-r--r--rust/uapi/lib.rs1
14 files changed, 835 insertions, 89 deletions
diff --git a/rust/Makefile b/rust/Makefile
index 83f675adbfab..1f10f92737f2 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -44,17 +44,10 @@ rustc_sysroot := $(shell MAKEFLAGS= $(RUSTC) $(rust_flags) --print sysroot)
rustc_host_target := $(shell $(RUSTC) --version --verbose | grep -F 'host: ' | cut -d' ' -f2)
RUST_LIB_SRC ?= $(rustc_sysroot)/lib/rustlib/src/rust/library
-ifeq ($(quiet),silent_)
-cargo_quiet=-q
+ifneq ($(quiet),)
rust_test_quiet=-q
rustdoc_test_quiet=--test-args -q
rustdoc_test_kernel_quiet=>/dev/null
-else ifeq ($(quiet),quiet_)
-rust_test_quiet=-q
-rustdoc_test_quiet=--test-args -q
-rustdoc_test_kernel_quiet=>/dev/null
-else
-cargo_quiet=--verbose
endif
core-cfgs = \
@@ -135,22 +128,21 @@ quiet_cmd_rustc_test_library = RUSTC TL $<
@$(objtree)/include/generated/rustc_cfg $(rustc_target_flags) \
--crate-type $(if $(rustc_test_library_proc),proc-macro,rlib) \
--out-dir $(objtree)/$(obj)/test --cfg testlib \
- --sysroot $(objtree)/$(obj)/test/sysroot \
-L$(objtree)/$(obj)/test \
--crate-name $(subst rusttest-,,$(subst rusttestlib-,,$@)) $<
-rusttestlib-build_error: $(src)/build_error.rs rusttest-prepare FORCE
+rusttestlib-build_error: $(src)/build_error.rs FORCE
+$(call if_changed,rustc_test_library)
rusttestlib-macros: private rustc_target_flags = --extern proc_macro
rusttestlib-macros: private rustc_test_library_proc = yes
-rusttestlib-macros: $(src)/macros/lib.rs rusttest-prepare FORCE
+rusttestlib-macros: $(src)/macros/lib.rs FORCE
+$(call if_changed,rustc_test_library)
-rusttestlib-bindings: $(src)/bindings/lib.rs rusttest-prepare FORCE
+rusttestlib-bindings: $(src)/bindings/lib.rs FORCE
+$(call if_changed,rustc_test_library)
-rusttestlib-uapi: $(src)/uapi/lib.rs rusttest-prepare FORCE
+rusttestlib-uapi: $(src)/uapi/lib.rs FORCE
+$(call if_changed,rustc_test_library)
quiet_cmd_rustdoc_test = RUSTDOC T $<
@@ -159,7 +151,7 @@ quiet_cmd_rustdoc_test = RUSTDOC T $<
$(RUSTDOC) --test $(rust_common_flags) \
@$(objtree)/include/generated/rustc_cfg \
$(rustc_target_flags) $(rustdoc_test_target_flags) \
- --sysroot $(objtree)/$(obj)/test/sysroot $(rustdoc_test_quiet) \
+ $(rustdoc_test_quiet) \
-L$(objtree)/$(obj)/test --output $(rustdoc_output) \
--crate-name $(subst rusttest-,,$@) $<
@@ -192,7 +184,6 @@ quiet_cmd_rustc_test = RUSTC T $<
$(RUSTC) --test $(rust_common_flags) \
@$(objtree)/include/generated/rustc_cfg \
$(rustc_target_flags) --out-dir $(objtree)/$(obj)/test \
- --sysroot $(objtree)/$(obj)/test/sysroot \
-L$(objtree)/$(obj)/test \
--crate-name $(subst rusttest-,,$@) $<; \
$(objtree)/$(obj)/test/$(subst rusttest-,,$@) $(rust_test_quiet) \
@@ -200,60 +191,15 @@ quiet_cmd_rustc_test = RUSTC T $<
rusttest: rusttest-macros rusttest-kernel
-# This prepares a custom sysroot with our custom `alloc` instead of
-# the standard one.
-#
-# This requires several hacks:
-# - Unlike `core` and `alloc`, `std` depends on more than a dozen crates,
-# including third-party crates that need to be downloaded, plus custom
-# `build.rs` steps. Thus hardcoding things here is not maintainable.
-# - `cargo` knows how to build the standard library, but it is an unstable
-# feature so far (`-Zbuild-std`).
-# - `cargo` only considers the use case of building the standard library
-# to use it in a given package. Thus we need to create a dummy package
-# and pick the generated libraries from there.
-# - The usual ways of modifying the dependency graph in `cargo` do not seem
-# to apply for the `-Zbuild-std` steps, thus we have to mislead it
-# by modifying the sources in the sysroot.
-# - To avoid messing with the user's Rust installation, we create a clone
-# of the sysroot. However, `cargo` ignores `RUSTFLAGS` in the `-Zbuild-std`
-# steps, thus we use a wrapper binary passed via `RUSTC` to pass the flag.
-#
-# In the future, we hope to avoid the whole ordeal by either:
-# - Making the `test` crate not depend on `std` (either improving upstream
-# or having our own custom crate).
-# - Making the tests run in kernel space (requires the previous point).
-# - Making `std` and friends be more like a "normal" crate, so that
-# `-Zbuild-std` and related hacks are not needed.
-quiet_cmd_rustsysroot = RUSTSYSROOT
- cmd_rustsysroot = \
- rm -rf $(objtree)/$(obj)/test; \
- mkdir -p $(objtree)/$(obj)/test; \
- cp -a $(rustc_sysroot) $(objtree)/$(obj)/test/sysroot; \
- echo '\#!/bin/sh' > $(objtree)/$(obj)/test/rustc_sysroot; \
- echo "$(RUSTC) --sysroot=$(abspath $(objtree)/$(obj)/test/sysroot) \"\$$@\"" \
- >> $(objtree)/$(obj)/test/rustc_sysroot; \
- chmod u+x $(objtree)/$(obj)/test/rustc_sysroot; \
- $(CARGO) -q new $(objtree)/$(obj)/test/dummy; \
- RUSTC=$(objtree)/$(obj)/test/rustc_sysroot $(CARGO) $(cargo_quiet) \
- test -Zbuild-std --target $(rustc_host_target) \
- --manifest-path $(objtree)/$(obj)/test/dummy/Cargo.toml; \
- rm $(objtree)/$(obj)/test/sysroot/lib/rustlib/$(rustc_host_target)/lib/*; \
- cp $(objtree)/$(obj)/test/dummy/target/$(rustc_host_target)/debug/deps/* \
- $(objtree)/$(obj)/test/sysroot/lib/rustlib/$(rustc_host_target)/lib
-
-rusttest-prepare: FORCE
- +$(call if_changed,rustsysroot)
-
rusttest-macros: private rustc_target_flags = --extern proc_macro
rusttest-macros: private rustdoc_test_target_flags = --crate-type proc-macro
-rusttest-macros: $(src)/macros/lib.rs rusttest-prepare FORCE
+rusttest-macros: $(src)/macros/lib.rs FORCE
+$(call if_changed,rustc_test)
+$(call if_changed,rustdoc_test)
rusttest-kernel: private rustc_target_flags = --extern alloc \
--extern build_error --extern macros --extern bindings --extern uapi
-rusttest-kernel: $(src)/kernel/lib.rs rusttest-prepare \
+rusttest-kernel: $(src)/kernel/lib.rs \
rusttestlib-build_error rusttestlib-macros rusttestlib-bindings \
rusttestlib-uapi FORCE
+$(call if_changed,rustc_test)
@@ -421,7 +367,7 @@ ifneq ($(or $(CONFIG_ARM64),$(and $(CONFIG_RISCV),$(CONFIG_64BIT))),)
endif
$(obj)/core.o: private skip_clippy = 1
-$(obj)/core.o: private skip_flags = -Dunreachable_pub
+$(obj)/core.o: private skip_flags = -Wunreachable_pub
$(obj)/core.o: private rustc_objcopy = $(foreach sym,$(redirect-intrinsics),--redefine-sym $(sym)=__rust$(sym))
$(obj)/core.o: private rustc_target_flags = $(core-cfgs)
$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
@@ -435,7 +381,7 @@ $(obj)/compiler_builtins.o: $(src)/compiler_builtins.rs $(obj)/core.o FORCE
+$(call if_changed_dep,rustc_library)
$(obj)/alloc.o: private skip_clippy = 1
-$(obj)/alloc.o: private skip_flags = -Dunreachable_pub
+$(obj)/alloc.o: private skip_flags = -Wunreachable_pub
$(obj)/alloc.o: private rustc_target_flags = $(alloc-cfgs)
$(obj)/alloc.o: $(RUST_LIB_SRC)/alloc/src/lib.rs $(obj)/compiler_builtins.o FORCE
+$(call if_changed_dep,rustc_library)
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index 53c996e4bedf..b940a5777330 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -30,4 +30,5 @@ const gfp_t RUST_CONST_HELPER_GFP_KERNEL = GFP_KERNEL;
const gfp_t RUST_CONST_HELPER_GFP_KERNEL_ACCOUNT = GFP_KERNEL_ACCOUNT;
const gfp_t RUST_CONST_HELPER_GFP_NOWAIT = GFP_NOWAIT;
const gfp_t RUST_CONST_HELPER___GFP_ZERO = __GFP_ZERO;
+const gfp_t RUST_CONST_HELPER___GFP_HIGHMEM = ___GFP_HIGHMEM;
const blk_features_t RUST_CONST_HELPER_BLK_FEAT_ROTATIONAL = BLK_FEAT_ROTATIONAL;
diff --git a/rust/bindings/lib.rs b/rust/bindings/lib.rs
index 40ddaee50d8b..93a1a3fc97bc 100644
--- a/rust/bindings/lib.rs
+++ b/rust/bindings/lib.rs
@@ -24,6 +24,7 @@
unsafe_op_in_unsafe_fn
)]
+#[allow(dead_code)]
mod bindings_raw {
// Use glob import here to expose all helpers.
// Symbols defined within the module will take precedence to the glob import.
diff --git a/rust/helpers.c b/rust/helpers.c
index 87ed0a5b6099..92d3c03ae1bd 100644
--- a/rust/helpers.c
+++ b/rust/helpers.c
@@ -26,6 +26,8 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errname.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/sched/signal.h>
@@ -40,6 +42,20 @@ __noreturn void rust_helper_BUG(void)
}
EXPORT_SYMBOL_GPL(rust_helper_BUG);
+unsigned long rust_helper_copy_from_user(void *to, const void __user *from,
+ unsigned long n)
+{
+ return copy_from_user(to, from, n);
+}
+EXPORT_SYMBOL_GPL(rust_helper_copy_from_user);
+
+unsigned long rust_helper_copy_to_user(void __user *to, const void *from,
+ unsigned long n)
+{
+ return copy_to_user(to, from, n);
+}
+EXPORT_SYMBOL_GPL(rust_helper_copy_to_user);
+
void rust_helper_mutex_lock(struct mutex *lock)
{
mutex_lock(lock);
@@ -81,6 +97,24 @@ int rust_helper_signal_pending(struct task_struct *t)
}
EXPORT_SYMBOL_GPL(rust_helper_signal_pending);
+struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
+{
+ return alloc_pages(gfp_mask, order);
+}
+EXPORT_SYMBOL_GPL(rust_helper_alloc_pages);
+
+void *rust_helper_kmap_local_page(struct page *page)
+{
+ return kmap_local_page(page);
+}
+EXPORT_SYMBOL_GPL(rust_helper_kmap_local_page);
+
+void rust_helper_kunmap_local(const void *addr)
+{
+ kunmap_local(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_kunmap_local);
+
refcount_t rust_helper_REFCOUNT_INIT(int n)
{
return (refcount_t)REFCOUNT_INIT(n);
diff --git a/rust/kernel/alloc.rs b/rust/kernel/alloc.rs
index 531b5e471cb1..1966bd407017 100644
--- a/rust/kernel/alloc.rs
+++ b/rust/kernel/alloc.rs
@@ -20,6 +20,13 @@ pub struct AllocError;
#[derive(Clone, Copy)]
pub struct Flags(u32);
+impl Flags {
+ /// Get the raw representation of this flag.
+ pub(crate) fn as_raw(self) -> u32 {
+ self.0
+ }
+}
+
impl core::ops::BitOr for Flags {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
@@ -52,6 +59,14 @@ pub mod flags {
/// This is normally or'd with other flags.
pub const __GFP_ZERO: Flags = Flags(bindings::__GFP_ZERO);
+ /// Allow the allocation to be in high memory.
+ ///
+ /// Allocations in high memory may not be mapped into the kernel's address space, so this can't
+ /// be used with `kmalloc` and other similar methods.
+ ///
+ /// This is normally or'd with other flags.
+ pub const __GFP_HIGHMEM: Flags = Flags(bindings::__GFP_HIGHMEM);
+
/// Users can not sleep and need the allocation to succeed.
///
/// A lower watermark is applied to allow access to "atomic reserves". The current
@@ -66,7 +81,7 @@ pub mod flags {
/// The same as [`GFP_KERNEL`], except the allocation is accounted to kmemcg.
pub const GFP_KERNEL_ACCOUNT: Flags = Flags(bindings::GFP_KERNEL_ACCOUNT);
- /// Ror kernel allocations that should not stall for direct reclaim, start physical IO or
+ /// For kernel allocations that should not stall for direct reclaim, start physical IO or
/// use any filesystem callback. It is very likely to fail to allocate memory, even for very
/// small allocations.
pub const GFP_NOWAIT: Flags = Flags(bindings::GFP_NOWAIT);
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index 68605b633e73..495c09ebe3a3 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -843,11 +843,8 @@ where
let val = unsafe { &mut *slot };
// SAFETY: `slot` is considered pinned.
let val = unsafe { Pin::new_unchecked(val) };
- (self.1)(val).map_err(|e| {
- // SAFETY: `slot` was initialized above.
- unsafe { core::ptr::drop_in_place(slot) };
- e
- })
+ // SAFETY: `slot` was initialized above.
+ (self.1)(val).inspect_err(|_| unsafe { core::ptr::drop_in_place(slot) })
}
}
@@ -941,11 +938,9 @@ where
// SAFETY: All requirements fulfilled since this function is `__init`.
unsafe { self.0.__pinned_init(slot)? };
// SAFETY: The above call initialized `slot` and we still have unique access.
- (self.1)(unsafe { &mut *slot }).map_err(|e| {
+ (self.1)(unsafe { &mut *slot }).inspect_err(|_|
// SAFETY: `slot` was initialized above.
- unsafe { core::ptr::drop_in_place(slot) };
- e
- })
+ unsafe { core::ptr::drop_in_place(slot) })
}
}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index e6b7d3a80bbc..274bdc1b0a82 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -40,6 +40,7 @@ pub mod ioctl;
pub mod kunit;
#[cfg(CONFIG_NET)]
pub mod net;
+pub mod page;
pub mod prelude;
pub mod print;
mod static_assert;
@@ -50,6 +51,7 @@ pub mod sync;
pub mod task;
pub mod time;
pub mod types;
+pub mod uaccess;
pub mod workqueue;
#[doc(hidden)]
diff --git a/rust/kernel/page.rs b/rust/kernel/page.rs
new file mode 100644
index 000000000000..208a006d587c
--- /dev/null
+++ b/rust/kernel/page.rs
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Kernel page allocation and management.
+
+use crate::{
+ alloc::{AllocError, Flags},
+ bindings,
+ error::code::*,
+ error::Result,
+ uaccess::UserSliceReader,
+};
+use core::ptr::{self, NonNull};
+
+/// A bitwise shift for the page size.
+pub const PAGE_SHIFT: usize = bindings::PAGE_SHIFT as usize;
+
+/// The number of bytes in a page.
+pub const PAGE_SIZE: usize = bindings::PAGE_SIZE;
+
+/// A bitmask that gives the page containing a given address.
+pub const PAGE_MASK: usize = !(PAGE_SIZE - 1);
+
+/// A pointer to a page that owns the page allocation.
+///
+/// # Invariants
+///
+/// The pointer is valid, and has ownership over the page.
+pub struct Page {
+ page: NonNull<bindings::page>,
+}
+
+// SAFETY: Pages have no logic that relies on them staying on a given thread, so moving them across
+// threads is safe.
+unsafe impl Send for Page {}
+
+// SAFETY: Pages have no logic that relies on them not being accessed concurrently, so accessing
+// them concurrently is safe.
+unsafe impl Sync for Page {}
+
+impl Page {
+ /// Allocates a new page.
+ ///
+ /// # Examples
+ ///
+ /// Allocate memory for a page.
+ ///
+ /// ```
+ /// use kernel::page::Page;
+ ///
+ /// # fn dox() -> Result<(), kernel::alloc::AllocError> {
+ /// let page = Page::alloc_page(GFP_KERNEL)?;
+ /// # Ok(()) }
+ /// ```
+ ///
+ /// Allocate memory for a page and zero its contents.
+ ///
+ /// ```
+ /// use kernel::page::Page;
+ ///
+ /// # fn dox() -> Result<(), kernel::alloc::AllocError> {
+ /// let page = Page::alloc_page(GFP_KERNEL | __GFP_ZERO)?;
+ /// # Ok(()) }
+ /// ```
+ pub fn alloc_page(flags: Flags) -> Result<Self, AllocError> {
+ // SAFETY: Depending on the value of `gfp_flags`, this call may sleep. Other than that, it
+ // is always safe to call this method.
+ let page = unsafe { bindings::alloc_pages(flags.as_raw(), 0) };
+ let page = NonNull::new(page).ok_or(AllocError)?;
+ // INVARIANT: We just successfully allocated a page, so we now have ownership of the newly
+ // allocated page. We transfer that ownership to the new `Page` object.
+ Ok(Self { page })
+ }
+
+ /// Returns a raw pointer to the page.
+ pub fn as_ptr(&self) -> *mut bindings::page {
+ self.page.as_ptr()
+ }
+
+ /// Runs a piece of code with this page mapped to an address.
+ ///
+ /// The page is unmapped when this call returns.
+ ///
+ /// # Using the raw pointer
+ ///
+ /// It is up to the caller to use the provided raw pointer correctly. The pointer is valid for
+ /// `PAGE_SIZE` bytes and for the duration in which the closure is called. The pointer might
+ /// only be mapped on the current thread, and when that is the case, dereferencing it on other
+ /// threads is UB. Other than that, the usual rules for dereferencing a raw pointer apply: don't
+ /// cause data races, the memory may be uninitialized, and so on.
+ ///
+ /// If multiple threads map the same page at the same time, then they may reference with
+ /// different addresses. However, even if the addresses are different, the underlying memory is
+ /// still the same for these purposes (e.g., it's still a data race if they both write to the
+ /// same underlying byte at the same time).
+ fn with_page_mapped<T>(&self, f: impl FnOnce(*mut u8) -> T) -> T {
+ // SAFETY: `page` is valid due to the type invariants on `Page`.
+ let mapped_addr = unsafe { bindings::kmap_local_page(self.as_ptr()) };
+
+ let res = f(mapped_addr.cast());
+
+ // This unmaps the page mapped above.
+ //
+ // SAFETY: Since this API takes the user code as a closure, it can only be used in a manner
+ // where the pages are unmapped in reverse order. This is as required by `kunmap_local`.
+ //
+ // In other words, if this call to `kunmap_local` happens when a different page should be
+ // unmapped first, then there must necessarily be a call to `kmap_local_page` other than the
+ // call just above in `with_page_mapped` that made that possible. In this case, it is the
+ // unsafe block that wraps that other call that is incorrect.
+ unsafe { bindings::kunmap_local(mapped_addr) };
+
+ res
+ }
+
+ /// Runs a piece of code with a raw pointer to a slice of this page, with bounds checking.
+ ///
+ /// If `f` is called, then it will be called with a pointer that points at `off` bytes into the
+ /// page, and the pointer will be valid for at least `len` bytes. The pointer is only valid on
+ /// this task, as this method uses a local mapping.
+ ///
+ /// If `off` and `len` refers to a region outside of this page, then this method returns
+ /// [`EINVAL`] and does not call `f`.
+ ///
+ /// # Using the raw pointer
+ ///
+ /// It is up to the caller to use the provided raw pointer correctly. The pointer is valid for
+ /// `len` bytes and for the duration in which the closure is called. The pointer might only be
+ /// mapped on the current thread, and when that is the case, dereferencing it on other threads
+ /// is UB. Other than that, the usual rules for dereferencing a raw pointer apply: don't cause
+ /// data races, the memory may be uninitialized, and so on.
+ ///
+ /// If multiple threads map the same page at the same time, then they may reference with
+ /// different addresses. However, even if the addresses are different, the underlying memory is
+ /// still the same for these purposes (e.g., it's still a data race if they both write to the
+ /// same underlying byte at the same time).
+ fn with_pointer_into_page<T>(
+ &self,
+ off: usize,
+ len: usize,
+ f: impl FnOnce(*mut u8) -> Result<T>,
+ ) -> Result<T> {
+ let bounds_ok = off <= PAGE_SIZE && len <= PAGE_SIZE && (off + len) <= PAGE_SIZE;
+
+ if bounds_ok {
+ self.with_page_mapped(move |page_addr| {
+ // SAFETY: The `off` integer is at most `PAGE_SIZE`, so this pointer offset will
+ // result in a pointer that is in bounds or one off the end of the page.
+ f(unsafe { page_addr.add(off) })
+ })
+ } else {
+ Err(EINVAL)
+ }
+ }
+
+ /// Maps the page and reads from it into the given buffer.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// # Safety
+ ///
+ /// * Callers must ensure that `dst` is valid for writing `len` bytes.
+ /// * Callers must ensure that this call does not race with a write to the same page that
+ /// overlaps with this read.
+ pub unsafe fn read_raw(&self, dst: *mut u8, offset: usize, len: usize) -> Result {
+ self.with_pointer_into_page(offset, len, move |src| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then
+ // it has performed a bounds check and guarantees that `src` is
+ // valid for `len` bytes.
+ //
+ // There caller guarantees that there is no data race.
+ unsafe { ptr::copy_nonoverlapping(src, dst, len) };
+ Ok(())
+ })
+ }
+
+ /// Maps the page and writes into it from the given buffer.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// # Safety
+ ///
+ /// * Callers must ensure that `src` is valid for reading `len` bytes.
+ /// * Callers must ensure that this call does not race with a read or write to the same page
+ /// that overlaps with this write.
+ pub unsafe fn write_raw(&self, src: *const u8, offset: usize, len: usize) -> Result {
+ self.with_pointer_into_page(offset, len, move |dst| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then it has performed a
+ // bounds check and guarantees that `dst` is valid for `len` bytes.
+ //
+ // There caller guarantees that there is no data race.
+ unsafe { ptr::copy_nonoverlapping(src, dst, len) };
+ Ok(())
+ })
+ }
+
+ /// Maps the page and zeroes the given slice.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that this call does not race with a read or write to the same page that
+ /// overlaps with this write.
+ pub unsafe fn fill_zero_raw(&self, offset: usize, len: usize) -> Result {
+ self.with_pointer_into_page(offset, len, move |dst| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then it has performed a
+ // bounds check and guarantees that `dst` is valid for `len` bytes.
+ //
+ // There caller guarantees that there is no data race.
+ unsafe { ptr::write_bytes(dst, 0u8, len) };
+ Ok(())
+ })
+ }
+
+ /// Copies data from userspace into this page.
+ ///
+ /// This method will perform bounds checks on the page offset. If `offset .. offset+len` goes
+ /// outside of the page, then this call returns [`EINVAL`].
+ ///
+ /// Like the other `UserSliceReader` methods, data races are allowed on the userspace address.
+ /// However, they are not allowed on the page you are copying into.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that this call does not race with a read or write to the same page that
+ /// overlaps with this write.
+ pub unsafe fn copy_from_user_slice_raw(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ len: usize,
+ ) -> Result {
+ self.with_pointer_into_page(offset, len, move |dst| {
+ // SAFETY: If `with_pointer_into_page` calls into this closure, then it has performed a
+ // bounds check and guarantees that `dst` is valid for `len` bytes. Furthermore, we have
+ // exclusive access to the slice since the caller guarantees that there are no races.
+ reader.read_raw(unsafe { core::slice::from_raw_parts_mut(dst.cast(), len) })
+ })
+ }
+}
+
+impl Drop for Page {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, we have ownership of the page and can free it.
+ unsafe { bindings::__free_pages(self.page.as_ptr(), 0) };
+ }
+}
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index 2e7c9008621f..bd189d646adb 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -409,3 +409,67 @@ pub enum Either<L, R> {
/// Constructs an instance of [`Either`] containing a value of type `R`.
Right(R),
}
+
+/// Types for which any bit pattern is valid.
+///
+/// Not all types are valid for all values. For example, a `bool` must be either zero or one, so
+/// reading arbitrary bytes into something that contains a `bool` is not okay.
+///
+/// It's okay for the type to have padding, as initializing those bytes has no effect.
+///
+/// # Safety
+///
+/// All bit-patterns must be valid for this type. This type must not have interior mutability.
+pub unsafe trait FromBytes {}
+
+// SAFETY: All bit patterns are acceptable values of the types below.
+unsafe impl FromBytes for u8 {}
+unsafe impl FromBytes for u16 {}
+unsafe impl FromBytes for u32 {}
+unsafe impl FromBytes for u64 {}
+unsafe impl FromBytes for usize {}
+unsafe impl FromBytes for i8 {}
+unsafe impl FromBytes for i16 {}
+unsafe impl FromBytes for i32 {}
+unsafe impl FromBytes for i64 {}
+unsafe impl FromBytes for isize {}
+// SAFETY: If all bit patterns are acceptable for individual values in an array, then all bit
+// patterns are also acceptable for arrays of that type.
+unsafe impl<T: FromBytes> FromBytes for [T] {}
+unsafe impl<T: FromBytes, const N: usize> FromBytes for [T; N] {}
+
+/// Types that can be viewed as an immutable slice of initialized bytes.
+///
+/// If a struct implements this trait, then it is okay to copy it byte-for-byte to userspace. This
+/// means that it should not have any padding, as padding bytes are uninitialized. Reading
+/// uninitialized memory is not just undefined behavior, it may even lead to leaking sensitive
+/// information on the stack to userspace.
+///
+/// The struct should also not hold kernel pointers, as kernel pointer addresses are also considered
+/// sensitive. However, leaking kernel pointers is not considered undefined behavior by Rust, so
+/// this is a correctness requirement, but not a safety requirement.
+///
+/// # Safety
+///
+/// Values of this type may not contain any uninitialized bytes. This type must not have interior
+/// mutability.
+pub unsafe trait AsBytes {}
+
+// SAFETY: Instances of the following types have no uninitialized portions.
+unsafe impl AsBytes for u8 {}
+unsafe impl AsBytes for u16 {}
+unsafe impl AsBytes for u32 {}
+unsafe impl AsBytes for u64 {}
+unsafe impl AsBytes for usize {}
+unsafe impl AsBytes for i8 {}
+unsafe impl AsBytes for i16 {}
+unsafe impl AsBytes for i32 {}
+unsafe impl AsBytes for i64 {}
+unsafe impl AsBytes for isize {}
+unsafe impl AsBytes for bool {}
+unsafe impl AsBytes for char {}
+unsafe impl AsBytes for str {}
+// SAFETY: If individual values in an array have no uninitialized portions, then the array itself
+// does not have any uninitialized portions either.
+unsafe impl<T: AsBytes> AsBytes for [T] {}
+unsafe impl<T: AsBytes, const N: usize> AsBytes for [T; N] {}
diff --git a/rust/kernel/uaccess.rs b/rust/kernel/uaccess.rs
new file mode 100644
index 000000000000..e9347cff99ab
--- /dev/null
+++ b/rust/kernel/uaccess.rs
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Slices to user space memory regions.
+//!
+//! C header: [`include/linux/uaccess.h`](srctree/include/linux/uaccess.h)
+
+use crate::{
+ alloc::Flags,
+ bindings,
+ error::Result,
+ prelude::*,
+ types::{AsBytes, FromBytes},
+};
+use alloc::vec::Vec;
+use core::ffi::{c_ulong, c_void};
+use core::mem::{size_of, MaybeUninit};
+
+/// The type used for userspace addresses.
+pub type UserPtr = usize;
+
+/// A pointer to an area in userspace memory, which can be either read-only or read-write.
+///
+/// All methods on this struct are safe: attempting to read or write on bad addresses (either out of
+/// the bound of the slice or unmapped addresses) will return [`EFAULT`]. Concurrent access,
+/// *including data races to/from userspace memory*, is permitted, because fundamentally another
+/// userspace thread/process could always be modifying memory at the same time (in the same way that
+/// userspace Rust's [`std::io`] permits data races with the contents of files on disk). In the
+/// presence of a race, the exact byte values read/written are unspecified but the operation is
+/// well-defined. Kernelspace code should validate its copy of data after completing a read, and not
+/// expect that multiple reads of the same address will return the same value.
+///
+/// These APIs are designed to make it difficult to accidentally write TOCTOU (time-of-check to
+/// time-of-use) bugs. Every time a memory location is read, the reader's position is advanced by
+/// the read length and the next read will start from there. This helps prevent accidentally reading
+/// the same location twice and causing a TOCTOU bug.
+///
+/// Creating a [`UserSliceReader`] and/or [`UserSliceWriter`] consumes the `UserSlice`, helping
+/// ensure that there aren't multiple readers or writers to the same location.
+///
+/// If double-fetching a memory location is necessary for some reason, then that is done by creating
+/// multiple readers to the same memory location, e.g. using [`clone_reader`].
+///
+/// # Examples
+///
+/// Takes a region of userspace memory from the current process, and modify it by adding one to
+/// every byte in the region.
+///
+/// ```no_run
+/// use alloc::vec::Vec;
+/// use core::ffi::c_void;
+/// use kernel::error::Result;
+/// use kernel::uaccess::{UserPtr, UserSlice};
+///
+/// fn bytes_add_one(uptr: UserPtr, len: usize) -> Result<()> {
+/// let (read, mut write) = UserSlice::new(uptr, len).reader_writer();
+///
+/// let mut buf = Vec::new();
+/// read.read_all(&mut buf, GFP_KERNEL)?;
+///
+/// for b in &mut buf {
+/// *b = b.wrapping_add(1);
+/// }
+///
+/// write.write_slice(&buf)?;
+/// Ok(())
+/// }
+/// ```
+///
+/// Example illustrating a TOCTOU (time-of-check to time-of-use) bug.
+///
+/// ```no_run
+/// use alloc::vec::Vec;
+/// use core::ffi::c_void;
+/// use kernel::error::{code::EINVAL, Result};
+/// use kernel::uaccess::{UserPtr, UserSlice};
+///
+/// /// Returns whether the data in this region is valid.
+/// fn is_valid(uptr: UserPtr, len: usize) -> Result<bool> {
+/// let read = UserSlice::new(uptr, len).reader();
+///
+/// let mut buf = Vec::new();
+/// read.read_all(&mut buf, GFP_KERNEL)?;
+///
+/// todo!()
+/// }
+///
+/// /// Returns the bytes behind this user pointer if they are valid.
+/// fn get_bytes_if_valid(uptr: UserPtr, len: usize) -> Result<Vec<u8>> {
+/// if !is_valid(uptr, len)? {
+/// return Err(EINVAL);
+/// }
+///
+/// let read = UserSlice::new(uptr, len).reader();
+///
+/// let mut buf = Vec::new();
+/// read.read_all(&mut buf, GFP_KERNEL)?;
+///
+/// // THIS IS A BUG! The bytes could have changed since we checked them.
+/// //
+/// // To avoid this kind of bug, don't call `UserSlice::new` multiple
+/// // times with the same address.
+/// Ok(buf)
+/// }
+/// ```
+///
+/// [`std::io`]: https://doc.rust-lang.org/std/io/index.html
+/// [`clone_reader`]: UserSliceReader::clone_reader
+pub struct UserSlice {
+ ptr: UserPtr,
+ length: usize,
+}
+
+impl UserSlice {
+ /// Constructs a user slice from a raw pointer and a length in bytes.
+ ///
+ /// Constructing a [`UserSlice`] performs no checks on the provided address and length, it can
+ /// safely be constructed inside a kernel thread with no current userspace process. Reads and
+ /// writes wrap the kernel APIs `copy_from_user` and `copy_to_user`, which check the memory map
+ /// of the current process and enforce that the address range is within the user range (no
+ /// additional calls to `access_ok` are needed). Validity of the pointer is checked when you
+ /// attempt to read or write, not in the call to `UserSlice::new`.
+ ///
+ /// Callers must be careful to avoid time-of-check-time-of-use (TOCTOU) issues. The simplest way
+ /// is to create a single instance of [`UserSlice`] per user memory block as it reads each byte
+ /// at most once.
+ pub fn new(ptr: UserPtr, length: usize) -> Self {
+ UserSlice { ptr, length }
+ }
+
+ /// Reads the entirety of the user slice, appending it to the end of the provided buffer.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address.
+ pub fn read_all(self, buf: &mut Vec<u8>, flags: Flags) -> Result {
+ self.reader().read_all(buf, flags)
+ }
+
+ /// Constructs a [`UserSliceReader`].
+ pub fn reader(self) -> UserSliceReader {
+ UserSliceReader {
+ ptr: self.ptr,
+ length: self.length,
+ }
+ }
+
+ /// Constructs a [`UserSliceWriter`].
+ pub fn writer(self) -> UserSliceWriter {
+ UserSliceWriter {
+ ptr: self.ptr,
+ length: self.length,
+ }
+ }
+
+ /// Constructs both a [`UserSliceReader`] and a [`UserSliceWriter`].
+ ///
+ /// Usually when this is used, you will first read the data, and then overwrite it afterwards.
+ pub fn reader_writer(self) -> (UserSliceReader, UserSliceWriter) {
+ (
+ UserSliceReader {
+ ptr: self.ptr,
+ length: self.length,
+ },
+ UserSliceWriter {
+ ptr: self.ptr,
+ length: self.length,
+ },
+ )
+ }
+}
+
+/// A reader for [`UserSlice`].
+///
+/// Used to incrementally read from the user slice.
+pub struct UserSliceReader {
+ ptr: UserPtr,
+ length: usize,
+}
+
+impl UserSliceReader {
+ /// Skip the provided number of bytes.
+ ///
+ /// Returns an error if skipping more than the length of the buffer.
+ pub fn skip(&mut self, num_skip: usize) -> Result {
+ // Update `self.length` first since that's the fallible part of this operation.
+ self.length = self.length.checked_sub(num_skip).ok_or(EFAULT)?;
+ self.ptr = self.ptr.wrapping_add(num_skip);
+ Ok(())
+ }
+
+ /// Create a reader that can access the same range of data.
+ ///
+ /// Reading from the clone does not advance the current reader.
+ ///
+ /// The caller should take care to not introduce TOCTOU issues, as described in the
+ /// documentation for [`UserSlice`].
+ pub fn clone_reader(&self) -> UserSliceReader {
+ UserSliceReader {
+ ptr: self.ptr,
+ length: self.length,
+ }
+ }
+
+ /// Returns the number of bytes left to be read from this reader.
+ ///
+ /// Note that even reading less than this number of bytes may fail.
+ pub fn len(&self) -> usize {
+ self.length
+ }
+
+ /// Returns `true` if no data is available in the io buffer.
+ pub fn is_empty(&self) -> bool {
+ self.length == 0
+ }
+
+ /// Reads raw data from the user slice into a kernel buffer.
+ ///
+ /// For a version that uses `&mut [u8]`, please see [`UserSliceReader::read_slice`].
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
+ /// bounds of this [`UserSliceReader`]. This call may modify `out` even if it returns an error.
+ ///
+ /// # Guarantees
+ ///
+ /// After a successful call to this method, all bytes in `out` are initialized.
+ pub fn read_raw(&mut self, out: &mut [MaybeUninit<u8>]) -> Result {
+ let len = out.len();
+ let out_ptr = out.as_mut_ptr().cast::<c_void>();
+ if len > self.length {
+ return Err(EFAULT);
+ }
+ let Ok(len_ulong) = c_ulong::try_from(len) else {
+ return Err(EFAULT);
+ };
+ // SAFETY: `out_ptr` points into a mutable slice of length `len_ulong`, so we may write
+ // that many bytes to it.
+ let res =
+ unsafe { bindings::copy_from_user(out_ptr, self.ptr as *const c_void, len_ulong) };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ self.ptr = self.ptr.wrapping_add(len);
+ self.length -= len;
+ Ok(())
+ }
+
+ /// Reads raw data from the user slice into a kernel buffer.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
+ /// bounds of this [`UserSliceReader`]. This call may modify `out` even if it returns an error.
+ pub fn read_slice(&mut self, out: &mut [u8]) -> Result {
+ // SAFETY: The types are compatible and `read_raw` doesn't write uninitialized bytes to
+ // `out`.
+ let out = unsafe { &mut *(out as *mut [u8] as *mut [MaybeUninit<u8>]) };
+ self.read_raw(out)
+ }
+
+ /// Reads a value of the specified type.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
+ /// bounds of this [`UserSliceReader`].
+ pub fn read<T: FromBytes>(&mut self) -> Result<T> {
+ let len = size_of::<T>();
+ if len > self.length {
+ return Err(EFAULT);
+ }
+ let Ok(len_ulong) = c_ulong::try_from(len) else {
+ return Err(EFAULT);
+ };
+ let mut out: MaybeUninit<T> = MaybeUninit::uninit();
+ // SAFETY: The local variable `out` is valid for writing `size_of::<T>()` bytes.
+ //
+ // By using the _copy_from_user variant, we skip the check_object_size check that verifies
+ // the kernel pointer. This mirrors the logic on the C side that skips the check when the
+ // length is a compile-time constant.
+ let res = unsafe {
+ bindings::_copy_from_user(
+ out.as_mut_ptr().cast::<c_void>(),
+ self.ptr as *const c_void,
+ len_ulong,
+ )
+ };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ self.ptr = self.ptr.wrapping_add(len);
+ self.length -= len;
+ // SAFETY: The read above has initialized all bytes in `out`, and since `T` implements
+ // `FromBytes`, any bit-pattern is a valid value for this type.
+ Ok(unsafe { out.assume_init() })
+ }
+
+ /// Reads the entirety of the user slice, appending it to the end of the provided buffer.
+ ///
+ /// Fails with [`EFAULT`] if the read happens on a bad address.
+ pub fn read_all(mut self, buf: &mut Vec<u8>, flags: Flags) -> Result {
+ let len = self.length;
+ VecExt::<u8>::reserve(buf, len, flags)?;
+
+ // The call to `try_reserve` was successful, so the spare capacity is at least `len` bytes
+ // long.
+ self.read_raw(&mut buf.spare_capacity_mut()[..len])?;
+
+ // SAFETY: Since the call to `read_raw` was successful, so the next `len` bytes of the
+ // vector have been initialized.
+ unsafe { buf.set_len(buf.len() + len) };
+ Ok(())
+ }
+}
+
+/// A writer for [`UserSlice`].
+///
+/// Used to incrementally write into the user slice.
+pub struct UserSliceWriter {
+ ptr: UserPtr,
+ length: usize,
+}
+
+impl UserSliceWriter {
+ /// Returns the amount of space remaining in this buffer.
+ ///
+ /// Note that even writing less than this number of bytes may fail.
+ pub fn len(&self) -> usize {
+ self.length
+ }
+
+ /// Returns `true` if no more data can be written to this buffer.
+ pub fn is_empty(&self) -> bool {
+ self.length == 0
+ }
+
+ /// Writes raw data to this user pointer from a kernel buffer.
+ ///
+ /// Fails with [`EFAULT`] if the write happens on a bad address, or if the write goes out of
+ /// bounds of this [`UserSliceWriter`]. This call may modify the associated userspace slice even
+ /// if it returns an error.
+ pub fn write_slice(&mut self, data: &[u8]) -> Result {
+ let len = data.len();
+ let data_ptr = data.as_ptr().cast::<c_void>();
+ if len > self.length {
+ return Err(EFAULT);
+ }
+ let Ok(len_ulong) = c_ulong::try_from(len) else {
+ return Err(EFAULT);
+ };
+ // SAFETY: `data_ptr` points into an immutable slice of length `len_ulong`, so we may read
+ // that many bytes from it.
+ let res = unsafe { bindings::copy_to_user(self.ptr as *mut c_void, data_ptr, len_ulong) };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ self.ptr = self.ptr.wrapping_add(len);
+ self.length -= len;
+ Ok(())
+ }
+
+ /// Writes the provided Rust value to this userspace pointer.
+ ///
+ /// Fails with [`EFAULT`] if the write happens on a bad address, or if the write goes out of
+ /// bounds of this [`UserSliceWriter`]. This call may modify the associated userspace slice even
+ /// if it returns an error.
+ pub fn write<T: AsBytes>(&mut self, value: &T) -> Result {
+ let len = size_of::<T>();
+ if len > self.length {
+ return Err(EFAULT);
+ }
+ let Ok(len_ulong) = c_ulong::try_from(len) else {
+ return Err(EFAULT);
+ };
+ // SAFETY: The reference points to a value of type `T`, so it is valid for reading
+ // `size_of::<T>()` bytes.
+ //
+ // By using the _copy_to_user variant, we skip the check_object_size check that verifies the
+ // kernel pointer. This mirrors the logic on the C side that skips the check when the length
+ // is a compile-time constant.
+ let res = unsafe {
+ bindings::_copy_to_user(
+ self.ptr as *mut c_void,
+ (value as *const T).cast::<c_void>(),
+ len_ulong,
+ )
+ };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ self.ptr = self.ptr.wrapping_add(len);
+ self.length -= len;
+ Ok(())
+ }
+}
diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs
index 1cec63a2aea8..553a5cba2adc 100644
--- a/rust/kernel/workqueue.rs
+++ b/rust/kernel/workqueue.rs
@@ -482,24 +482,26 @@ pub unsafe trait HasWork<T, const ID: u64 = 0> {
/// use kernel::sync::Arc;
/// use kernel::workqueue::{self, impl_has_work, Work};
///
-/// struct MyStruct {
-/// work_field: Work<MyStruct, 17>,
+/// struct MyStruct<'a, T, const N: usize> {
+/// work_field: Work<MyStruct<'a, T, N>, 17>,
+/// f: fn(&'a [T; N]),
/// }
///
/// impl_has_work! {
-/// impl HasWork<MyStruct, 17> for MyStruct { self.work_field }
+/// impl{'a, T, const N: usize} HasWork<MyStruct<'a, T, N>, 17>
+/// for MyStruct<'a, T, N> { self.work_field }
/// }
/// ```
#[macro_export]
macro_rules! impl_has_work {
- ($(impl$(<$($implarg:ident),*>)?
+ ($(impl$({$($generics:tt)*})?
HasWork<$work_type:ty $(, $id:tt)?>
- for $self:ident $(<$($selfarg:ident),*>)?
+ for $self:ty
{ self.$field:ident }
)*) => {$(
// SAFETY: The implementation of `raw_get_work` only compiles if the field has the right
// type.
- unsafe impl$(<$($implarg),*>)? $crate::workqueue::HasWork<$work_type $(, $id)?> for $self $(<$($selfarg),*>)? {
+ unsafe impl$(<$($generics)+>)? $crate::workqueue::HasWork<$work_type $(, $id)?> for $self {
const OFFSET: usize = ::core::mem::offset_of!(Self, $field) as usize;
#[inline]
@@ -515,7 +517,7 @@ macro_rules! impl_has_work {
pub use impl_has_work;
impl_has_work! {
- impl<T> HasWork<Self> for ClosureWork<T> { self.work }
+ impl{T} HasWork<Self> for ClosureWork<T> { self.work }
}
unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Arc<T>
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
index 520eae5fd792..159e75292970 100644
--- a/rust/macros/lib.rs
+++ b/rust/macros/lib.rs
@@ -35,6 +35,7 @@ use proc_macro::TokenStream;
/// author: "Rust for Linux Contributors",
/// description: "My very own kernel module!",
/// license: "GPL",
+/// alias: ["alternate_module_name"],
/// }
///
/// struct MyModule;
@@ -55,13 +56,45 @@ use proc_macro::TokenStream;
/// }
/// ```
///
+/// ## Firmware
+///
+/// The following example shows how to declare a kernel module that needs
+/// to load binary firmware files. You need to specify the file names of
+/// the firmware in the `firmware` field. The information is embedded
+/// in the `modinfo` section of the kernel module. For example, a tool to
+/// build an initramfs uses this information to put the firmware files into
+/// the initramfs image.
+///
+/// ```ignore
+/// use kernel::prelude::*;
+///
+/// module!{
+/// type: MyDeviceDriverModule,
+/// name: "my_device_driver_module",
+/// author: "Rust for Linux Contributors",
+/// description: "My device driver requires firmware",
+/// license: "GPL",
+/// firmware: ["my_device_firmware1.bin", "my_device_firmware2.bin"],
+/// }
+///
+/// struct MyDeviceDriverModule;
+///
+/// impl kernel::Module for MyDeviceDriverModule {
+/// fn init() -> Result<Self> {
+/// Ok(Self)
+/// }
+/// }
+/// ```
+///
/// # Supported argument types
/// - `type`: type which implements the [`Module`] trait (required).
-/// - `name`: byte array of the name of the kernel module (required).
-/// - `author`: byte array of the author of the kernel module.
-/// - `description`: byte array of the description of the kernel module.
-/// - `license`: byte array of the license of the kernel module (required).
-/// - `alias`: byte array of alias name of the kernel module.
+/// - `name`: ASCII string literal of the name of the kernel module (required).
+/// - `author`: string literal of the author of the kernel module.
+/// - `description`: string literal of the description of the kernel module.
+/// - `license`: ASCII string literal of the license of the kernel module (required).
+/// - `alias`: array of ASCII string literals of the alias names of the kernel module.
+/// - `firmware`: array of ASCII string literals of the firmware files of
+/// the kernel module.
#[proc_macro]
pub fn module(ts: TokenStream) -> TokenStream {
module::module(ts)
@@ -312,7 +345,7 @@ pub fn pinned_drop(args: TokenStream, input: TokenStream) -> TokenStream {
///
/// Currently supported modifiers are:
/// * `span`: change the span of concatenated identifier to the span of the specified token. By
-/// default the span of the `[< >]` group is used.
+/// default the span of the `[< >]` group is used.
/// * `lower`: change the identifier to lower case.
/// * `upper`: change the identifier to upper case.
///
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index acd0393b5095..411dc103d82e 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -97,14 +97,22 @@ struct ModuleInfo {
author: Option<String>,
description: Option<String>,
alias: Option<Vec<String>>,
+ firmware: Option<Vec<String>>,
}
impl ModuleInfo {
fn parse(it: &mut token_stream::IntoIter) -> Self {
let mut info = ModuleInfo::default();
- const EXPECTED_KEYS: &[&str] =
- &["type", "name", "author", "description", "license", "alias"];
+ const EXPECTED_KEYS: &[&str] = &[
+ "type",
+ "name",
+ "author",
+ "description",
+ "license",
+ "alias",
+ "firmware",
+ ];
const REQUIRED_KEYS: &[&str] = &["type", "name", "license"];
let mut seen_keys = Vec::new();
@@ -131,6 +139,7 @@ impl ModuleInfo {
"description" => info.description = Some(expect_string(it)),
"license" => info.license = expect_string_ascii(it),
"alias" => info.alias = Some(expect_string_array(it)),
+ "firmware" => info.firmware = Some(expect_string_array(it)),
_ => panic!(
"Unknown key \"{}\". Valid keys are: {:?}.",
key, EXPECTED_KEYS
@@ -186,6 +195,11 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
modinfo.emit("alias", &alias);
}
}
+ if let Some(firmware) = info.firmware {
+ for fw in firmware {
+ modinfo.emit("firmware", &fw);
+ }
+ }
// Built-in modules also export the `file` modinfo string.
let file =
diff --git a/rust/uapi/lib.rs b/rust/uapi/lib.rs
index 0caad902ba40..80a00260e3e7 100644
--- a/rust/uapi/lib.rs
+++ b/rust/uapi/lib.rs
@@ -14,6 +14,7 @@
#![cfg_attr(test, allow(unsafe_op_in_unsafe_fn))]
#![allow(
clippy::all,
+ dead_code,
missing_docs,
non_camel_case_types,
non_upper_case_globals,