rust: slab: add basic slab module
From: | Elijah Wright <git-AT-elijahs.space> | |
To: | Miguel Ojeda <ojeda-AT-kernel.org>, Alex Gaynor <alex.gaynor-AT-gmail.com>, Boqun Feng <boqun.feng-AT-gmail.com>, Gary Guo <gary-AT-garyguo.net>, Björn Roy Baron <bjorn3_gh-AT-protonmail.com>, Benno Lossin <lossin-AT-kernel.org>, Andreas Hindborg <a.hindborg-AT-kernel.org>, Alice Ryhl <aliceryhl-AT-google.com>, Trevor Gross <tmgross-AT-umich.edu>, Danilo Krummrich <dakr-AT-kernel.org>, rust-for-linux-AT-vger.kernel.org, linux-kernel-AT-vger.kernel.org | |
Subject: | [PATCH] rust: slab: add basic slab module | |
Date: | Wed, 24 Sep 2025 12:36:35 -0700 | |
Message-ID: | <20250924193643.4001-1-git@elijahs.space> | |
Cc: | Elijah Wright <git-AT-elijahs.space> | |
Archive-link: | Article |
this patch adds a basic slab module for kmem_cache, primarily wrapping kmem_cache_create, kmem_cache_alloc, kmem_cache_free, and kmem_cache_destroy. Signed-off-by: Elijah Wright <git@elijahs.space> --- rust/helpers/slab.c | 10 ++++++ rust/kernel/lib.rs | 1 + rust/kernel/slab.rs | 85 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+) create mode 100644 rust/kernel/slab.rs diff --git a/rust/helpers/slab.c b/rust/helpers/slab.c index a842bfbddcba..799de7bc1405 100644 --- a/rust/helpers/slab.c +++ b/rust/helpers/slab.c @@ -13,3 +13,13 @@ rust_helper_kvrealloc(const void *p, size_t size, gfp_t flags) { return kvrealloc(p, size, flags); } + +struct kmem_cache * rust_helper_kmem_cache_create(const char *name, unsigned int size, unsigned int align, gfp_t flags, void (*ctor)(void *)) +{ + return kmem_cache_create(name, size, align, flags, NULL); +} + +void * rust_helper_kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) +{ + return kmem_cache_alloc(cachep, flags); +} \ No newline at end of file diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index fef97f2a5098..bd76eadbe297 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -116,6 +116,7 @@ pub mod security; pub mod seq_file; pub mod sizes; +pub mod slab; mod static_assert; #[doc(hidden)] pub mod std_vendor; diff --git a/rust/kernel/slab.rs b/rust/kernel/slab.rs new file mode 100644 index 000000000000..8b418f9db7cb --- /dev/null +++ b/rust/kernel/slab.rs @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Slab bindings. +//! +//! C header: [`include/linux/slab.h`](srctree/include/linux/slab.h) + +use core::{marker::PhantomData, mem, ptr::NonNull}; + +use crate::{ + alloc::Flags, + bindings, + error::{code::ENOMEM, Result}, + str::CStr, +}; + +/// A wrapper for kmem_cache that allocates objects of type `T`. +#[repr(transparent)] +pub struct Slab<T> { + cache: NonNull<bindings::kmem_cache>, + _p: PhantomData<T>, +} + +impl<T> Slab<T> { + /// Creates a cache for objects of type `T`. + pub fn try_new(name: &CStr, flags: Flags) -> Result<Self> { + let size = mem::size_of::<T>(); + let align = mem::align_of::<T>(); + debug_assert!(size <= usize::MAX); + debug_assert!(align <= usize::MAX); + + // SAFETY: `flags` is a valid impl, `name` is a valid C string, and + // other arguments are plain values. + let cache = unsafe { + bindings::kmem_cache_create( + name.as_char_ptr(), + size as u32, + align as u32, + flags.as_raw(), + None, + ) + }; + + NonNull::new(cache) + .map(|c| Slab { + cache: c, + _p: PhantomData, + }) + .ok_or(ENOMEM) + } + + /// Allocates one object from the cache with the given gfp flags. + #[inline] + pub fn alloc(&self, flags: Flags) -> Result<NonNull<T>> { + // SAFETY: `self.cache` is a valid pointer obtained from + // `kmem_cache_create` and still alive because `self` is borrowed. + let ptr = unsafe { bindings::kmem_cache_alloc(self.cache.as_ptr(), flags.as_raw()) }; + NonNull::new(ptr.cast()).ok_or(ENOMEM) + } + + /// Frees an object previously returned by `alloc()`. + /// + /// # Safety + /// The caller must guarantee that `obj` was allocated from this cache and + /// is no longer accessed afterwards. + #[inline] + pub unsafe fn free(&self, obj: NonNull<T>) { + // SAFETY: By the safety contract the pointer is valid and unique at + // this point. + unsafe { bindings::kmem_cache_free(self.cache.as_ptr(), obj.cast().as_ptr()) }; + } + + /// Returns the raw mutable pointer to the cache + #[inline] + pub fn as_ptr(&self) -> *mut bindings::kmem_cache { + self.cache.as_ptr() + } +} + +impl<T> Drop for Slab<T> { + fn drop(&mut self) { + // SAFETY: `self.cache` is valid and we are the final owner because + // of ownership rules. + unsafe { bindings::kmem_cache_destroy(self.cache.as_ptr()) }; + } +} -- 2.49.1