Merge #1105
1105: embassy-rp: Add multicore support r=Dirbaio a=kalkyl This PR adds multicore support + critical-section impl using hardware spinlocks. Based on the rp2040-hal implementation. Co-authored-by: kalkyl <henrik.alser@me.com> Co-authored-by: Henrik Alsér <henrik.alser@me.com>
This commit is contained in:
commit
47747d3b73
9 changed files with 590 additions and 12 deletions
|
@ -15,6 +15,9 @@ flavors = [
|
||||||
[features]
|
[features]
|
||||||
defmt = ["dep:defmt", "embassy-usb-driver?/defmt", "embassy-hal-common/defmt"]
|
defmt = ["dep:defmt", "embassy-usb-driver?/defmt", "embassy-hal-common/defmt"]
|
||||||
|
|
||||||
|
# critical section that is safe for multicore use
|
||||||
|
critical-section-impl = ["critical-section/restore-state-u8"]
|
||||||
|
|
||||||
# Reexport the PAC for the currently enabled chip at `embassy_rp::pac`.
|
# Reexport the PAC for the currently enabled chip at `embassy_rp::pac`.
|
||||||
# This is unstable because semver-minor (non-breaking) releases of embassy-rp may major-bump (breaking) the PAC version.
|
# This is unstable because semver-minor (non-breaking) releases of embassy-rp may major-bump (breaking) the PAC version.
|
||||||
# If this is an issue for you, you're encouraged to directly depend on a fixed version of the PAC.
|
# If this is an issue for you, you're encouraged to directly depend on a fixed version of the PAC.
|
||||||
|
|
142
embassy-rp/src/critical_section_impl.rs
Normal file
142
embassy-rp/src/critical_section_impl.rs
Normal file
|
@ -0,0 +1,142 @@
|
||||||
|
use core::sync::atomic::{AtomicU8, Ordering};
|
||||||
|
|
||||||
|
use crate::pac;
|
||||||
|
|
||||||
|
struct RpSpinlockCs;
|
||||||
|
critical_section::set_impl!(RpSpinlockCs);
|
||||||
|
|
||||||
|
/// Marker value to indicate no-one has the lock.
|
||||||
|
///
|
||||||
|
/// Initialising `LOCK_OWNER` to 0 means cheaper static initialisation so it's the best choice
|
||||||
|
const LOCK_UNOWNED: u8 = 0;
|
||||||
|
|
||||||
|
/// Indicates which core owns the lock so that we can call critical_section recursively.
|
||||||
|
///
|
||||||
|
/// 0 = no one has the lock, 1 = core0 has the lock, 2 = core1 has the lock
|
||||||
|
static LOCK_OWNER: AtomicU8 = AtomicU8::new(LOCK_UNOWNED);
|
||||||
|
|
||||||
|
/// Marker value to indicate that we already owned the lock when we started the `critical_section`.
|
||||||
|
///
|
||||||
|
/// Since we can't take the spinlock when we already have it, we need some other way to keep track of `critical_section` ownership.
|
||||||
|
/// `critical_section` provides a token for communicating between `acquire` and `release` so we use that.
|
||||||
|
/// If we're the outermost call to `critical_section` we use the values 0 and 1 to indicate we should release the spinlock and set the interrupts back to disabled and enabled, respectively.
|
||||||
|
/// The value 2 indicates that we aren't the outermost call, and should not release the spinlock or re-enable interrupts in `release`
|
||||||
|
const LOCK_ALREADY_OWNED: u8 = 2;
|
||||||
|
|
||||||
|
unsafe impl critical_section::Impl for RpSpinlockCs {
|
||||||
|
unsafe fn acquire() -> u8 {
|
||||||
|
RpSpinlockCs::acquire()
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn release(token: u8) {
|
||||||
|
RpSpinlockCs::release(token);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RpSpinlockCs {
|
||||||
|
unsafe fn acquire() -> u8 {
|
||||||
|
// Store the initial interrupt state and current core id in stack variables
|
||||||
|
let interrupts_active = cortex_m::register::primask::read().is_active();
|
||||||
|
// We reserved 0 as our `LOCK_UNOWNED` value, so add 1 to core_id so we get 1 for core0, 2 for core1.
|
||||||
|
let core = pac::SIO.cpuid().read() as u8 + 1;
|
||||||
|
// Do we already own the spinlock?
|
||||||
|
if LOCK_OWNER.load(Ordering::Acquire) == core {
|
||||||
|
// We already own the lock, so we must have called acquire within a critical_section.
|
||||||
|
// Return the magic inner-loop value so that we know not to re-enable interrupts in release()
|
||||||
|
LOCK_ALREADY_OWNED
|
||||||
|
} else {
|
||||||
|
// Spin until we get the lock
|
||||||
|
loop {
|
||||||
|
// Need to disable interrupts to ensure that we will not deadlock
|
||||||
|
// if an interrupt enters critical_section::Impl after we acquire the lock
|
||||||
|
cortex_m::interrupt::disable();
|
||||||
|
// Ensure the compiler doesn't re-order accesses and violate safety here
|
||||||
|
core::sync::atomic::compiler_fence(Ordering::SeqCst);
|
||||||
|
// Read the spinlock reserved for `critical_section`
|
||||||
|
if let Some(lock) = Spinlock31::try_claim() {
|
||||||
|
// We just acquired the lock.
|
||||||
|
// 1. Forget it, so we don't immediately unlock
|
||||||
|
core::mem::forget(lock);
|
||||||
|
// 2. Store which core we are so we can tell if we're called recursively
|
||||||
|
LOCK_OWNER.store(core, Ordering::Relaxed);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// We didn't get the lock, enable interrupts if they were enabled before we started
|
||||||
|
if interrupts_active {
|
||||||
|
cortex_m::interrupt::enable();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If we broke out of the loop we have just acquired the lock
|
||||||
|
// As the outermost loop, we want to return the interrupt status to restore later
|
||||||
|
interrupts_active as _
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn release(token: u8) {
|
||||||
|
// Did we already own the lock at the start of the `critical_section`?
|
||||||
|
if token != LOCK_ALREADY_OWNED {
|
||||||
|
// No, it wasn't owned at the start of this `critical_section`, so this core no longer owns it.
|
||||||
|
// Set `LOCK_OWNER` back to `LOCK_UNOWNED` to ensure the next critical section tries to obtain the spinlock instead
|
||||||
|
LOCK_OWNER.store(LOCK_UNOWNED, Ordering::Relaxed);
|
||||||
|
// Ensure the compiler doesn't re-order accesses and violate safety here
|
||||||
|
core::sync::atomic::compiler_fence(Ordering::SeqCst);
|
||||||
|
// Release the spinlock to allow others to enter critical_section again
|
||||||
|
Spinlock31::release();
|
||||||
|
// Re-enable interrupts if they were enabled when we first called acquire()
|
||||||
|
// We only do this on the outermost `critical_section` to ensure interrupts stay disabled
|
||||||
|
// for the whole time that we have the lock
|
||||||
|
if token != 0 {
|
||||||
|
cortex_m::interrupt::enable();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Spinlock<const N: usize>(core::marker::PhantomData<()>)
|
||||||
|
where
|
||||||
|
Spinlock<N>: SpinlockValid;
|
||||||
|
|
||||||
|
impl<const N: usize> Spinlock<N>
|
||||||
|
where
|
||||||
|
Spinlock<N>: SpinlockValid,
|
||||||
|
{
|
||||||
|
/// Try to claim the spinlock. Will return `Some(Self)` if the lock is obtained, and `None` if the lock is
|
||||||
|
/// already in use somewhere else.
|
||||||
|
pub fn try_claim() -> Option<Self> {
|
||||||
|
// Safety: We're only reading from this register
|
||||||
|
unsafe {
|
||||||
|
let lock = pac::SIO.spinlock(N).read();
|
||||||
|
if lock > 0 {
|
||||||
|
Some(Self(core::marker::PhantomData))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Clear a locked spin-lock.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// Only call this function if you hold the spin-lock.
|
||||||
|
pub unsafe fn release() {
|
||||||
|
unsafe {
|
||||||
|
// Write (any value): release the lock
|
||||||
|
pac::SIO.spinlock(N).write_value(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<const N: usize> Drop for Spinlock<N>
|
||||||
|
where
|
||||||
|
Spinlock<N>: SpinlockValid,
|
||||||
|
{
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// This is safe because we own the object, and hence hold the lock.
|
||||||
|
unsafe { Self::release() }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) type Spinlock31 = Spinlock<31>;
|
||||||
|
pub trait SpinlockValid {}
|
||||||
|
impl SpinlockValid for Spinlock<31> {}
|
|
@ -6,6 +6,7 @@ use embedded_storage::nor_flash::{
|
||||||
ReadNorFlash,
|
ReadNorFlash,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use crate::pac;
|
||||||
use crate::peripherals::FLASH;
|
use crate::peripherals::FLASH;
|
||||||
|
|
||||||
pub const FLASH_BASE: usize = 0x10000000;
|
pub const FLASH_BASE: usize = 0x10000000;
|
||||||
|
@ -28,6 +29,7 @@ pub enum Error {
|
||||||
OutOfBounds,
|
OutOfBounds,
|
||||||
/// Unaligned operation or using unaligned buffers.
|
/// Unaligned operation or using unaligned buffers.
|
||||||
Unaligned,
|
Unaligned,
|
||||||
|
InvalidCore,
|
||||||
Other,
|
Other,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,7 +48,7 @@ impl NorFlashError for Error {
|
||||||
match self {
|
match self {
|
||||||
Self::OutOfBounds => NorFlashErrorKind::OutOfBounds,
|
Self::OutOfBounds => NorFlashErrorKind::OutOfBounds,
|
||||||
Self::Unaligned => NorFlashErrorKind::NotAligned,
|
Self::Unaligned => NorFlashErrorKind::NotAligned,
|
||||||
Self::Other => NorFlashErrorKind::Other,
|
_ => NorFlashErrorKind::Other,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -87,7 +89,7 @@ impl<'d, T: Instance, const FLASH_SIZE: usize> Flash<'d, T, FLASH_SIZE> {
|
||||||
|
|
||||||
let len = to - from;
|
let len = to - from;
|
||||||
|
|
||||||
unsafe { self.in_ram(|| ram_helpers::flash_range_erase(from, len, true)) };
|
unsafe { self.in_ram(|| ram_helpers::flash_range_erase(from, len, true))? };
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -112,7 +114,7 @@ impl<'d, T: Instance, const FLASH_SIZE: usize> Flash<'d, T, FLASH_SIZE> {
|
||||||
|
|
||||||
let unaligned_offset = offset as usize - start;
|
let unaligned_offset = offset as usize - start;
|
||||||
|
|
||||||
unsafe { self.in_ram(|| ram_helpers::flash_range_program(unaligned_offset as u32, &pad_buf, true)) }
|
unsafe { self.in_ram(|| ram_helpers::flash_range_program(unaligned_offset as u32, &pad_buf, true))? }
|
||||||
}
|
}
|
||||||
|
|
||||||
let remaining_len = bytes.len() - start_padding;
|
let remaining_len = bytes.len() - start_padding;
|
||||||
|
@ -130,12 +132,12 @@ impl<'d, T: Instance, const FLASH_SIZE: usize> Flash<'d, T, FLASH_SIZE> {
|
||||||
if bytes.as_ptr() as usize >= 0x2000_0000 {
|
if bytes.as_ptr() as usize >= 0x2000_0000 {
|
||||||
let aligned_data = &bytes[start_padding..end_padding];
|
let aligned_data = &bytes[start_padding..end_padding];
|
||||||
|
|
||||||
unsafe { self.in_ram(|| ram_helpers::flash_range_program(aligned_offset as u32, aligned_data, true)) }
|
unsafe { self.in_ram(|| ram_helpers::flash_range_program(aligned_offset as u32, aligned_data, true))? }
|
||||||
} else {
|
} else {
|
||||||
for chunk in bytes[start_padding..end_padding].chunks_exact(PAGE_SIZE) {
|
for chunk in bytes[start_padding..end_padding].chunks_exact(PAGE_SIZE) {
|
||||||
let mut ram_buf = [0xFF_u8; PAGE_SIZE];
|
let mut ram_buf = [0xFF_u8; PAGE_SIZE];
|
||||||
ram_buf.copy_from_slice(chunk);
|
ram_buf.copy_from_slice(chunk);
|
||||||
unsafe { self.in_ram(|| ram_helpers::flash_range_program(aligned_offset as u32, &ram_buf, true)) }
|
unsafe { self.in_ram(|| ram_helpers::flash_range_program(aligned_offset as u32, &ram_buf, true))? }
|
||||||
aligned_offset += PAGE_SIZE;
|
aligned_offset += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -150,7 +152,7 @@ impl<'d, T: Instance, const FLASH_SIZE: usize> Flash<'d, T, FLASH_SIZE> {
|
||||||
|
|
||||||
let unaligned_offset = end_offset - (PAGE_SIZE - rem_offset);
|
let unaligned_offset = end_offset - (PAGE_SIZE - rem_offset);
|
||||||
|
|
||||||
unsafe { self.in_ram(|| ram_helpers::flash_range_program(unaligned_offset as u32, &pad_buf, true)) }
|
unsafe { self.in_ram(|| ram_helpers::flash_range_program(unaligned_offset as u32, &pad_buf, true))? }
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -159,10 +161,17 @@ impl<'d, T: Instance, const FLASH_SIZE: usize> Flash<'d, T, FLASH_SIZE> {
|
||||||
/// Make sure to uphold the contract points with rp2040-flash.
|
/// Make sure to uphold the contract points with rp2040-flash.
|
||||||
/// - interrupts must be disabled
|
/// - interrupts must be disabled
|
||||||
/// - DMA must not access flash memory
|
/// - DMA must not access flash memory
|
||||||
unsafe fn in_ram(&mut self, operation: impl FnOnce()) {
|
unsafe fn in_ram(&mut self, operation: impl FnOnce()) -> Result<(), Error> {
|
||||||
let dma_status = &mut [false; crate::dma::CHANNEL_COUNT];
|
let dma_status = &mut [false; crate::dma::CHANNEL_COUNT];
|
||||||
|
|
||||||
// TODO: Make sure CORE1 is paused during the entire duration of the RAM function
|
// Make sure we're running on CORE0
|
||||||
|
let core_id: u32 = unsafe { pac::SIO.cpuid().read() };
|
||||||
|
if core_id != 0 {
|
||||||
|
return Err(Error::InvalidCore);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure CORE1 is paused during the entire duration of the RAM function
|
||||||
|
crate::multicore::pause_core1();
|
||||||
|
|
||||||
critical_section::with(|_| {
|
critical_section::with(|_| {
|
||||||
// Pause all DMA channels for the duration of the ram operation
|
// Pause all DMA channels for the duration of the ram operation
|
||||||
|
@ -185,6 +194,10 @@ impl<'d, T: Instance, const FLASH_SIZE: usize> Flash<'d, T, FLASH_SIZE> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Resume CORE1 execution
|
||||||
|
crate::multicore::resume_core1();
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,9 @@
|
||||||
// This mod MUST go first, so that the others see its macros.
|
// This mod MUST go first, so that the others see its macros.
|
||||||
pub(crate) mod fmt;
|
pub(crate) mod fmt;
|
||||||
|
|
||||||
|
#[cfg(feature = "critical-section-impl")]
|
||||||
|
mod critical_section_impl;
|
||||||
|
|
||||||
mod intrinsics;
|
mod intrinsics;
|
||||||
|
|
||||||
pub mod adc;
|
pub mod adc;
|
||||||
|
@ -31,6 +34,7 @@ pub mod usb;
|
||||||
|
|
||||||
pub mod clocks;
|
pub mod clocks;
|
||||||
pub mod flash;
|
pub mod flash;
|
||||||
|
pub mod multicore;
|
||||||
mod reset;
|
mod reset;
|
||||||
|
|
||||||
// Reexports
|
// Reexports
|
||||||
|
@ -111,6 +115,8 @@ embassy_hal_common::peripherals! {
|
||||||
|
|
||||||
ADC,
|
ADC,
|
||||||
|
|
||||||
|
CORE1,
|
||||||
|
|
||||||
PIO0,
|
PIO0,
|
||||||
PIO1,
|
PIO1,
|
||||||
}
|
}
|
||||||
|
|
306
embassy-rp/src/multicore.rs
Normal file
306
embassy-rp/src/multicore.rs
Normal file
|
@ -0,0 +1,306 @@
|
||||||
|
//! Multicore support
|
||||||
|
//!
|
||||||
|
//! This module handles setup of the 2nd cpu core on the rp2040, which we refer to as core1.
|
||||||
|
//! It provides functionality for setting up the stack, and starting core1.
|
||||||
|
//!
|
||||||
|
//! The entrypoint for core1 can be any function that never returns, including closures.
|
||||||
|
//!
|
||||||
|
//! Enable the `critical-section-impl` feature in embassy-rp when sharing data across cores using
|
||||||
|
//! the `embassy-sync` primitives and `CriticalSectionRawMutex`.
|
||||||
|
//!
|
||||||
|
//! # Usage
|
||||||
|
//! ```no_run
|
||||||
|
//! static mut CORE1_STACK: Stack<4096> = Stack::new();
|
||||||
|
//! static EXECUTOR0: StaticCell<Executor> = StaticCell::new();
|
||||||
|
//! static EXECUTOR1: StaticCell<Executor> = StaticCell::new();
|
||||||
|
//!
|
||||||
|
//! #[cortex_m_rt::entry]
|
||||||
|
//! fn main() -> ! {
|
||||||
|
//! let p = embassy_rp::init(Default::default());
|
||||||
|
//!
|
||||||
|
//! spawn_core1(p.CORE1, unsafe { &mut CORE1_STACK }, move || {
|
||||||
|
//! let executor1 = EXECUTOR1.init(Executor::new());
|
||||||
|
//! executor1.run(|spawner| unwrap!(spawner.spawn(core1_task())));
|
||||||
|
//! });
|
||||||
|
//!
|
||||||
|
//! let executor0 = EXECUTOR0.init(Executor::new());
|
||||||
|
//! executor0.run(|spawner| unwrap!(spawner.spawn(core0_task())));
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
use core::mem::ManuallyDrop;
|
||||||
|
use core::sync::atomic::{compiler_fence, Ordering};
|
||||||
|
|
||||||
|
use atomic_polyfill::AtomicBool;
|
||||||
|
|
||||||
|
use crate::interrupt::{Interrupt, InterruptExt};
|
||||||
|
use crate::peripherals::CORE1;
|
||||||
|
use crate::{interrupt, pac};
|
||||||
|
|
||||||
|
const PAUSE_TOKEN: u32 = 0xDEADBEEF;
|
||||||
|
const RESUME_TOKEN: u32 = !0xDEADBEEF;
|
||||||
|
static IS_CORE1_INIT: AtomicBool = AtomicBool::new(false);
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn install_stack_guard(stack_bottom: *mut usize) {
|
||||||
|
let core = unsafe { cortex_m::Peripherals::steal() };
|
||||||
|
|
||||||
|
// Trap if MPU is already configured
|
||||||
|
if core.MPU.ctrl.read() != 0 {
|
||||||
|
cortex_m::asm::udf();
|
||||||
|
}
|
||||||
|
|
||||||
|
// The minimum we can protect is 32 bytes on a 32 byte boundary, so round up which will
|
||||||
|
// just shorten the valid stack range a tad.
|
||||||
|
let addr = (stack_bottom as u32 + 31) & !31;
|
||||||
|
// Mask is 1 bit per 32 bytes of the 256 byte range... clear the bit for the segment we want
|
||||||
|
let subregion_select = 0xff ^ (1 << ((addr >> 5) & 7));
|
||||||
|
unsafe {
|
||||||
|
core.MPU.ctrl.write(5); // enable mpu with background default map
|
||||||
|
core.MPU.rbar.write((addr & !0xff) | 0x8);
|
||||||
|
core.MPU.rasr.write(
|
||||||
|
1 // enable region
|
||||||
|
| (0x7 << 1) // size 2^(7 + 1) = 256
|
||||||
|
| (subregion_select << 8)
|
||||||
|
| 0x10000000, // XN = disable instruction fetch; no other bits means no permissions
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn core1_setup(stack_bottom: *mut usize) {
|
||||||
|
install_stack_guard(stack_bottom);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Data type for a properly aligned stack of N bytes
|
||||||
|
#[repr(C, align(32))]
|
||||||
|
pub struct Stack<const SIZE: usize> {
|
||||||
|
/// Memory to be used for the stack
|
||||||
|
pub mem: [u8; SIZE],
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<const SIZE: usize> Stack<SIZE> {
|
||||||
|
/// Construct a stack of length SIZE, initialized to 0
|
||||||
|
pub const fn new() -> Stack<SIZE> {
|
||||||
|
Stack { mem: [0_u8; SIZE] }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[interrupt]
|
||||||
|
#[link_section = ".data.ram_func"]
|
||||||
|
unsafe fn SIO_IRQ_PROC1() {
|
||||||
|
let sio = pac::SIO;
|
||||||
|
// Clear IRQ
|
||||||
|
sio.fifo().st().write(|w| w.set_wof(false));
|
||||||
|
|
||||||
|
while sio.fifo().st().read().vld() {
|
||||||
|
// Pause CORE1 execution and disable interrupts
|
||||||
|
if fifo_read_wfe() == PAUSE_TOKEN {
|
||||||
|
cortex_m::interrupt::disable();
|
||||||
|
// Signal to CORE0 that execution is paused
|
||||||
|
fifo_write(PAUSE_TOKEN);
|
||||||
|
// Wait for `resume` signal from CORE0
|
||||||
|
while fifo_read_wfe() != RESUME_TOKEN {
|
||||||
|
cortex_m::asm::nop();
|
||||||
|
}
|
||||||
|
cortex_m::interrupt::enable();
|
||||||
|
// Signal to CORE0 that execution is resumed
|
||||||
|
fifo_write(RESUME_TOKEN);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spawn a function on this core
|
||||||
|
pub fn spawn_core1<F, const SIZE: usize>(_core1: CORE1, stack: &'static mut Stack<SIZE>, entry: F)
|
||||||
|
where
|
||||||
|
F: FnOnce() -> bad::Never + Send + 'static,
|
||||||
|
{
|
||||||
|
// The first two ignored `u64` parameters are there to take up all of the registers,
|
||||||
|
// which means that the rest of the arguments are taken from the stack,
|
||||||
|
// where we're able to put them from core 0.
|
||||||
|
extern "C" fn core1_startup<F: FnOnce() -> bad::Never>(
|
||||||
|
_: u64,
|
||||||
|
_: u64,
|
||||||
|
entry: &mut ManuallyDrop<F>,
|
||||||
|
stack_bottom: *mut usize,
|
||||||
|
) -> ! {
|
||||||
|
core1_setup(stack_bottom);
|
||||||
|
let entry = unsafe { ManuallyDrop::take(entry) };
|
||||||
|
// Signal that it's safe for core 0 to get rid of the original value now.
|
||||||
|
fifo_write(1);
|
||||||
|
|
||||||
|
IS_CORE1_INIT.store(true, Ordering::Release);
|
||||||
|
// Enable fifo interrupt on CORE1 for `pause` functionality.
|
||||||
|
let irq = unsafe { interrupt::SIO_IRQ_PROC1::steal() };
|
||||||
|
irq.enable();
|
||||||
|
|
||||||
|
entry()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset the core
|
||||||
|
unsafe {
|
||||||
|
let psm = pac::PSM;
|
||||||
|
psm.frce_off().modify(|w| w.set_proc1(true));
|
||||||
|
while !psm.frce_off().read().proc1() {
|
||||||
|
cortex_m::asm::nop();
|
||||||
|
}
|
||||||
|
psm.frce_off().modify(|w| w.set_proc1(false));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mem = unsafe { core::slice::from_raw_parts_mut(stack.mem.as_mut_ptr() as *mut usize, stack.mem.len() / 4) };
|
||||||
|
|
||||||
|
// Set up the stack
|
||||||
|
let mut stack_ptr = unsafe { mem.as_mut_ptr().add(mem.len()) };
|
||||||
|
|
||||||
|
// We don't want to drop this, since it's getting moved to the other core.
|
||||||
|
let mut entry = ManuallyDrop::new(entry);
|
||||||
|
|
||||||
|
// Push the arguments to `core1_startup` onto the stack.
|
||||||
|
unsafe {
|
||||||
|
// Push `stack_bottom`.
|
||||||
|
stack_ptr = stack_ptr.sub(1);
|
||||||
|
stack_ptr.cast::<*mut usize>().write(mem.as_mut_ptr());
|
||||||
|
|
||||||
|
// Push `entry`.
|
||||||
|
stack_ptr = stack_ptr.sub(1);
|
||||||
|
stack_ptr.cast::<&mut ManuallyDrop<F>>().write(&mut entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure the compiler does not reorder the stack writes after to after the
|
||||||
|
// below FIFO writes, which would result in them not being seen by the second
|
||||||
|
// core.
|
||||||
|
//
|
||||||
|
// From the compiler perspective, this doesn't guarantee that the second core
|
||||||
|
// actually sees those writes. However, we know that the RP2040 doesn't have
|
||||||
|
// memory caches, and writes happen in-order.
|
||||||
|
compiler_fence(Ordering::Release);
|
||||||
|
|
||||||
|
let p = unsafe { cortex_m::Peripherals::steal() };
|
||||||
|
let vector_table = p.SCB.vtor.read();
|
||||||
|
|
||||||
|
// After reset, core 1 is waiting to receive commands over FIFO.
|
||||||
|
// This is the sequence to have it jump to some code.
|
||||||
|
let cmd_seq = [
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
1,
|
||||||
|
vector_table as usize,
|
||||||
|
stack_ptr as usize,
|
||||||
|
core1_startup::<F> as usize,
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut seq = 0;
|
||||||
|
let mut fails = 0;
|
||||||
|
loop {
|
||||||
|
let cmd = cmd_seq[seq] as u32;
|
||||||
|
if cmd == 0 {
|
||||||
|
fifo_drain();
|
||||||
|
cortex_m::asm::sev();
|
||||||
|
}
|
||||||
|
fifo_write(cmd);
|
||||||
|
|
||||||
|
let response = fifo_read();
|
||||||
|
if cmd == response {
|
||||||
|
seq += 1;
|
||||||
|
} else {
|
||||||
|
seq = 0;
|
||||||
|
fails += 1;
|
||||||
|
if fails > 16 {
|
||||||
|
// The second core isn't responding, and isn't going to take the entrypoint
|
||||||
|
panic!("CORE1 not responding");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if seq >= cmd_seq.len() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until the other core has copied `entry` before returning.
|
||||||
|
fifo_read();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Pause execution on CORE1.
|
||||||
|
pub fn pause_core1() {
|
||||||
|
if IS_CORE1_INIT.load(Ordering::Acquire) {
|
||||||
|
fifo_write(PAUSE_TOKEN);
|
||||||
|
// Wait for CORE1 to signal it has paused execution.
|
||||||
|
while fifo_read() != PAUSE_TOKEN {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resume CORE1 execution.
|
||||||
|
pub fn resume_core1() {
|
||||||
|
if IS_CORE1_INIT.load(Ordering::Acquire) {
|
||||||
|
fifo_write(RESUME_TOKEN);
|
||||||
|
// Wait for CORE1 to signal it has resumed execution.
|
||||||
|
while fifo_read() != RESUME_TOKEN {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push a value to the inter-core FIFO, block until space is available
|
||||||
|
#[inline(always)]
|
||||||
|
fn fifo_write(value: u32) {
|
||||||
|
unsafe {
|
||||||
|
let sio = pac::SIO;
|
||||||
|
// Wait for the FIFO to have enough space
|
||||||
|
while !sio.fifo().st().read().rdy() {
|
||||||
|
cortex_m::asm::nop();
|
||||||
|
}
|
||||||
|
sio.fifo().wr().write_value(value);
|
||||||
|
}
|
||||||
|
// Fire off an event to the other core.
|
||||||
|
// This is required as the other core may be `wfe` (waiting for event)
|
||||||
|
cortex_m::asm::sev();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pop a value from inter-core FIFO, block until available
|
||||||
|
#[inline(always)]
|
||||||
|
fn fifo_read() -> u32 {
|
||||||
|
unsafe {
|
||||||
|
let sio = pac::SIO;
|
||||||
|
// Wait until FIFO has data
|
||||||
|
while !sio.fifo().st().read().vld() {
|
||||||
|
cortex_m::asm::nop();
|
||||||
|
}
|
||||||
|
sio.fifo().rd().read()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pop a value from inter-core FIFO, `wfe` until available
|
||||||
|
#[inline(always)]
|
||||||
|
fn fifo_read_wfe() -> u32 {
|
||||||
|
unsafe {
|
||||||
|
let sio = pac::SIO;
|
||||||
|
// Wait until FIFO has data
|
||||||
|
while !sio.fifo().st().read().vld() {
|
||||||
|
cortex_m::asm::wfe();
|
||||||
|
}
|
||||||
|
sio.fifo().rd().read()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drain inter-core FIFO
|
||||||
|
#[inline(always)]
|
||||||
|
fn fifo_drain() {
|
||||||
|
unsafe {
|
||||||
|
let sio = pac::SIO;
|
||||||
|
while sio.fifo().st().read().vld() {
|
||||||
|
let _ = sio.fifo().rd().read();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://github.com/nvzqz/bad-rs/blob/master/src/never.rs
|
||||||
|
mod bad {
|
||||||
|
pub(crate) type Never = <F as HasOutput>::Output;
|
||||||
|
|
||||||
|
pub trait HasOutput {
|
||||||
|
type Output;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<O> HasOutput for fn() -> O {
|
||||||
|
type Output = O;
|
||||||
|
}
|
||||||
|
|
||||||
|
type F = fn() -> !;
|
||||||
|
}
|
|
@ -9,7 +9,7 @@ license = "MIT OR Apache-2.0"
|
||||||
embassy-sync = { version = "0.1.0", path = "../../embassy-sync", features = ["defmt"] }
|
embassy-sync = { version = "0.1.0", path = "../../embassy-sync", features = ["defmt"] }
|
||||||
embassy-executor = { version = "0.1.0", path = "../../embassy-executor", features = ["defmt", "integrated-timers"] }
|
embassy-executor = { version = "0.1.0", path = "../../embassy-executor", features = ["defmt", "integrated-timers"] }
|
||||||
embassy-time = { version = "0.1.0", path = "../../embassy-time", features = ["defmt", "defmt-timestamp-uptime"] }
|
embassy-time = { version = "0.1.0", path = "../../embassy-time", features = ["defmt", "defmt-timestamp-uptime"] }
|
||||||
embassy-rp = { version = "0.1.0", path = "../../embassy-rp", features = ["defmt", "unstable-traits", "nightly", "unstable-pac", "time-driver", "pio"] }
|
embassy-rp = { version = "0.1.0", path = "../../embassy-rp", features = ["defmt", "unstable-traits", "nightly", "unstable-pac", "time-driver", "pio", "critical-section-impl"] }
|
||||||
embassy-usb = { version = "0.1.0", path = "../../embassy-usb", features = ["defmt"] }
|
embassy-usb = { version = "0.1.0", path = "../../embassy-usb", features = ["defmt"] }
|
||||||
embassy-net = { version = "0.1.0", path = "../../embassy-net", features = ["defmt", "nightly", "tcp", "dhcpv4", "medium-ethernet", "pool-16"] }
|
embassy-net = { version = "0.1.0", path = "../../embassy-net", features = ["defmt", "nightly", "tcp", "dhcpv4", "medium-ethernet", "pool-16"] }
|
||||||
embassy-futures = { version = "0.1.0", path = "../../embassy-futures" }
|
embassy-futures = { version = "0.1.0", path = "../../embassy-futures" }
|
||||||
|
@ -18,7 +18,8 @@ embassy-usb-logger = { version = "0.1.0", path = "../../embassy-usb-logger" }
|
||||||
defmt = "0.3"
|
defmt = "0.3"
|
||||||
defmt-rtt = "0.4"
|
defmt-rtt = "0.4"
|
||||||
|
|
||||||
cortex-m = { version = "0.7.6", features = ["critical-section-single-core"] }
|
#cortex-m = { version = "0.7.6", features = ["critical-section-single-core"] }
|
||||||
|
cortex-m = { version = "0.7.6" }
|
||||||
cortex-m-rt = "0.7.0"
|
cortex-m-rt = "0.7.0"
|
||||||
panic-probe = { version = "0.3", features = ["print-defmt"] }
|
panic-probe = { version = "0.3", features = ["print-defmt"] }
|
||||||
futures = { version = "0.3.17", default-features = false, features = ["async-await", "cfg-target-has-atomic", "unstable"] }
|
futures = { version = "0.3.17", default-features = false, features = ["async-await", "cfg-target-has-atomic", "unstable"] }
|
||||||
|
|
60
examples/rp/src/bin/multicore.rs
Normal file
60
examples/rp/src/bin/multicore.rs
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
#![no_std]
|
||||||
|
#![no_main]
|
||||||
|
#![feature(type_alias_impl_trait)]
|
||||||
|
|
||||||
|
use defmt::*;
|
||||||
|
use embassy_executor::Executor;
|
||||||
|
use embassy_executor::_export::StaticCell;
|
||||||
|
use embassy_rp::gpio::{Level, Output};
|
||||||
|
use embassy_rp::multicore::{spawn_core1, Stack};
|
||||||
|
use embassy_rp::peripherals::PIN_25;
|
||||||
|
use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex;
|
||||||
|
use embassy_sync::channel::Channel;
|
||||||
|
use embassy_time::{Duration, Timer};
|
||||||
|
use {defmt_rtt as _, panic_probe as _};
|
||||||
|
|
||||||
|
static mut CORE1_STACK: Stack<4096> = Stack::new();
|
||||||
|
static EXECUTOR0: StaticCell<Executor> = StaticCell::new();
|
||||||
|
static EXECUTOR1: StaticCell<Executor> = StaticCell::new();
|
||||||
|
static CHANNEL: Channel<CriticalSectionRawMutex, LedState, 1> = Channel::new();
|
||||||
|
|
||||||
|
enum LedState {
|
||||||
|
On,
|
||||||
|
Off,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cortex_m_rt::entry]
|
||||||
|
fn main() -> ! {
|
||||||
|
let p = embassy_rp::init(Default::default());
|
||||||
|
let led = Output::new(p.PIN_25, Level::Low);
|
||||||
|
|
||||||
|
spawn_core1(p.CORE1, unsafe { &mut CORE1_STACK }, move || {
|
||||||
|
let executor1 = EXECUTOR1.init(Executor::new());
|
||||||
|
executor1.run(|spawner| unwrap!(spawner.spawn(core1_task(led))));
|
||||||
|
});
|
||||||
|
|
||||||
|
let executor0 = EXECUTOR0.init(Executor::new());
|
||||||
|
executor0.run(|spawner| unwrap!(spawner.spawn(core0_task())));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[embassy_executor::task]
|
||||||
|
async fn core0_task() {
|
||||||
|
info!("Hello from core 0");
|
||||||
|
loop {
|
||||||
|
CHANNEL.send(LedState::On).await;
|
||||||
|
Timer::after(Duration::from_millis(100)).await;
|
||||||
|
CHANNEL.send(LedState::Off).await;
|
||||||
|
Timer::after(Duration::from_millis(400)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[embassy_executor::task]
|
||||||
|
async fn core1_task(mut led: Output<'static, PIN_25>) {
|
||||||
|
info!("Hello from core 1");
|
||||||
|
loop {
|
||||||
|
match CHANNEL.recv().await {
|
||||||
|
LedState::On => led.set_high(),
|
||||||
|
LedState::Off => led.set_low(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -8,13 +8,13 @@ license = "MIT OR Apache-2.0"
|
||||||
embassy-sync = { version = "0.1.0", path = "../../embassy-sync", features = ["defmt"] }
|
embassy-sync = { version = "0.1.0", path = "../../embassy-sync", features = ["defmt"] }
|
||||||
embassy-executor = { version = "0.1.0", path = "../../embassy-executor", features = ["defmt", "integrated-timers"] }
|
embassy-executor = { version = "0.1.0", path = "../../embassy-executor", features = ["defmt", "integrated-timers"] }
|
||||||
embassy-time = { version = "0.1.0", path = "../../embassy-time", features = ["defmt"] }
|
embassy-time = { version = "0.1.0", path = "../../embassy-time", features = ["defmt"] }
|
||||||
embassy-rp = { version = "0.1.0", path = "../../embassy-rp", features = ["nightly", "defmt", "unstable-pac", "unstable-traits", "time-driver"] }
|
embassy-rp = { version = "0.1.0", path = "../../embassy-rp", features = ["nightly", "defmt", "unstable-pac", "unstable-traits", "time-driver", "critical-section-impl"] }
|
||||||
embassy-futures = { version = "0.1.0", path = "../../embassy-futures" }
|
embassy-futures = { version = "0.1.0", path = "../../embassy-futures" }
|
||||||
|
|
||||||
defmt = "0.3.0"
|
defmt = "0.3.0"
|
||||||
defmt-rtt = "0.4"
|
defmt-rtt = "0.4"
|
||||||
|
|
||||||
cortex-m = { version = "0.7.6", features = ["critical-section-single-core"] }
|
cortex-m = { version = "0.7.6" }
|
||||||
cortex-m-rt = "0.7.0"
|
cortex-m-rt = "0.7.0"
|
||||||
embedded-hal = "0.2.6"
|
embedded-hal = "0.2.6"
|
||||||
embedded-hal-1 = { package = "embedded-hal", version = "=1.0.0-alpha.9" }
|
embedded-hal-1 = { package = "embedded-hal", version = "=1.0.0-alpha.9" }
|
||||||
|
|
47
tests/rp/src/bin/multicore.rs
Normal file
47
tests/rp/src/bin/multicore.rs
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
#![no_std]
|
||||||
|
#![no_main]
|
||||||
|
#![feature(type_alias_impl_trait)]
|
||||||
|
|
||||||
|
use defmt::{info, unwrap};
|
||||||
|
use embassy_executor::Executor;
|
||||||
|
use embassy_executor::_export::StaticCell;
|
||||||
|
use embassy_rp::multicore::{spawn_core1, Stack};
|
||||||
|
use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex;
|
||||||
|
use embassy_sync::channel::Channel;
|
||||||
|
use {defmt_rtt as _, panic_probe as _};
|
||||||
|
|
||||||
|
static mut CORE1_STACK: Stack<1024> = Stack::new();
|
||||||
|
static EXECUTOR0: StaticCell<Executor> = StaticCell::new();
|
||||||
|
static EXECUTOR1: StaticCell<Executor> = StaticCell::new();
|
||||||
|
static CHANNEL0: Channel<CriticalSectionRawMutex, bool, 1> = Channel::new();
|
||||||
|
static CHANNEL1: Channel<CriticalSectionRawMutex, bool, 1> = Channel::new();
|
||||||
|
|
||||||
|
#[cortex_m_rt::entry]
|
||||||
|
fn main() -> ! {
|
||||||
|
let p = embassy_rp::init(Default::default());
|
||||||
|
spawn_core1(p.CORE1, unsafe { &mut CORE1_STACK }, move || {
|
||||||
|
let executor1 = EXECUTOR1.init(Executor::new());
|
||||||
|
executor1.run(|spawner| unwrap!(spawner.spawn(core1_task())));
|
||||||
|
});
|
||||||
|
let executor0 = EXECUTOR0.init(Executor::new());
|
||||||
|
executor0.run(|spawner| unwrap!(spawner.spawn(core0_task())));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[embassy_executor::task]
|
||||||
|
async fn core0_task() {
|
||||||
|
info!("CORE0 is running");
|
||||||
|
let ping = true;
|
||||||
|
CHANNEL0.send(ping).await;
|
||||||
|
let pong = CHANNEL1.recv().await;
|
||||||
|
assert_eq!(ping, pong);
|
||||||
|
|
||||||
|
info!("Test OK");
|
||||||
|
cortex_m::asm::bkpt();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[embassy_executor::task]
|
||||||
|
async fn core1_task() {
|
||||||
|
info!("CORE1 is running");
|
||||||
|
let ping = CHANNEL0.recv().await;
|
||||||
|
CHANNEL1.send(ping).await;
|
||||||
|
}
|
Loading…
Reference in a new issue