use core::{ cell::{Cell, UnsafeCell}, mem::MaybeUninit, }; pub use crate::hw::Mutex; pub use avr_device::interrupt::CriticalSection; macro_rules! define_context { ($name:ident) => { pub struct $name<'cs>(CriticalSection<'cs>); impl<'cs> $name<'cs> { /// Create a new context. /// /// # SAFETY /// /// This may only be called from the corresponding context. /// `MainCtx` may only be constructed from `main()` /// and `IrqCtx` may only be constructed from ISRs. #[inline(always)] pub unsafe fn new() -> Self { // SAFETY: This cs is used with the low level PAC primitives. // The IRQ safety is upheld by the context machinery instead. // // If a function takes a `MainCtx` argument, it can only be // called from `main()` context. Correspondingly for `IrqCtx`. // // At the low level the `MutexCell` and `MutexRefCell` ensure // that they can only being used from the main context. // With this mechanism we can run the main context with IRQs // enabled. There cannot be any concurrency in safe code. let cs = unsafe { CriticalSection::new() }; fence(); Self(cs) } /// Get the `CriticalSection` that belongs to this context. #[inline(always)] #[allow(dead_code)] pub fn cs(&self) -> CriticalSection<'cs> { self.0 } /// Convert this to a generic context. #[inline(always)] pub fn to_any(&self) -> AnyCtx { AnyCtx::new() } } impl<'cs> Drop for $name<'cs> { #[inline(always)] fn drop(&mut self) { fence(); } } }; } define_context!(MainCtx); define_context!(IrqCtx); /// Main context initialization marker. /// /// This marker does not have a pub constructor. /// It is only created by [MainCtx]. pub struct MainInitCtx(()); impl<'cs, 'a> MainCtx<'cs> { /// SAFETY: The safety contract of [MainCtx::new] must be upheld. #[inline(always)] pub unsafe fn new_with_init(f: F) -> Self { // SAFETY: We are creating the MainCtx. // Therefore, it's safe to construct the MainInitCtx marker. f(&MainInitCtx(())); // SAFETY: Safety contract of MainCtx::new is upheld. unsafe { Self::new() } } } pub struct AnyCtx(()); impl AnyCtx { /// Create a new generic context. #[inline(always)] pub fn new() -> Self { Self(()) } /// Convert this into a [MainCtx]. /// /// # SAFETY /// /// You must ensure that either: /// /// - We actually are running in main context or /// - If we are running in interrupt context, then /// all all things done with this MainCtx must be safe w.r.t. /// the interrupted main context. /// e.g. atomic accesses have to be used. etc. etc. #[inline(always)] pub unsafe fn to_main_ctx<'cs>(&self) -> MainCtx<'cs> { // SAFETY: See function doc. unsafe { MainCtx::new() } } } /// Lazy initialization of static variables. pub struct LazyMainInit(UnsafeCell>); impl LazyMainInit { /// # SAFETY /// /// It must be ensured that the returned instance is initialized /// with a call to [Self::init] during construction of the [MainCtx]. /// See [MainCtx::new_with_init]. /// /// Using this object in any way before initializing it will /// result in Undefined Behavior. #[inline(always)] pub const unsafe fn uninit() -> Self { Self(UnsafeCell::new(MaybeUninit::uninit())) } #[inline(always)] pub fn init(&self, _m: &MainInitCtx, inner: T) { // SAFETY: Initialization is required for the `assume_init` calls. unsafe { *self.0.get() = MaybeUninit::new(inner) }; } #[inline(always)] #[allow(dead_code)] pub fn deref(&self, _m: &MainCtx) -> &T { // SAFETY: the `Self::new` safety contract ensures that `Self::init` is called before us. unsafe { (*self.0.get()).assume_init_ref() } } #[inline(always)] #[allow(dead_code)] fn deref_mut(&mut self, _m: &MainCtx) -> &mut T { // SAFETY: the `Self::new` safety contract ensures that `Self::init` is called before us. unsafe { (*self.0.get()).assume_init_mut() } } } // SAFETY: If T is Send, then we can Send the whole object. The object only contains T state. unsafe impl Send for LazyMainInit {} // SAFETY: The `deref` and `deref_mut` functions ensure that they can only be called // from `MainCtx` compatible contexts. unsafe impl Sync for LazyMainInit {} /// Optimization and reordering fence. #[inline(always)] pub fn fence() { core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst); } pub struct MutexCell { inner: Mutex>, } impl MutexCell { #[inline] pub const fn new(inner: T) -> Self { Self { inner: Mutex::new(Cell::new(inner)), } } #[inline] #[allow(dead_code)] pub fn replace(&self, m: &MainCtx<'_>, inner: T) -> T { self.inner.borrow(m.cs()).replace(inner) } #[inline] #[allow(dead_code)] pub fn as_ref<'cs>(&self, m: &MainCtx<'cs>) -> &'cs T { // SAFETY: The returned reference is bound to the // lifetime of the CriticalSection. unsafe { &*self.inner.borrow(m.cs()).as_ptr() as _ } } } impl MutexCell { #[inline] pub fn get(&self, m: &MainCtx<'_>) -> T { self.inner.borrow(m.cs()).get() } #[inline] pub fn set(&self, m: &MainCtx<'_>, inner: T) { self.inner.borrow(m.cs()).set(inner); } } /// Cheaper Option::unwrap() alternative. /// /// This is cheaper, because it doesn't call into the panic unwind path. /// Therefore, it does not impose caller-saves overhead onto the calling function. #[inline(always)] #[allow(dead_code)] pub fn unwrap_option(value: Option) -> T { match value { Some(value) => value, None => reset_system(), } } /// Cheaper Result::unwrap() alternative. /// /// This is cheaper, because it doesn't call into the panic unwind path. /// Therefore, it does not impose caller-saves overhead onto the calling function. #[inline(always)] #[allow(dead_code)] pub fn unwrap_result(value: Result) -> T { match value { Ok(value) => value, Err(_) => reset_system(), } } /// Reset the system. #[inline(always)] #[allow(clippy::empty_loop)] pub fn reset_system() -> ! { loop { // Wait for the watchdog timer to trigger and reset the system. // We don't need to disable interrupts here. // No interrupt will reset the watchdog timer. } } #[inline(always)] #[panic_handler] fn panic(_: &core::panic::PanicInfo) -> ! { reset_system(); } // vim: ts=4 sw=4 expandtab