+ 'a,
+ _request: Request,
+ peri_addr: *mut W,
+ buffer: &'a mut [W],
+ _options: TransferOptions,
+ ) -> Self {
+ into_ref!(channel);
+
+ let len = buffer.len();
+ assert!(len > 0 && len <= 0xFFFF);
+
+ let dir = Dir::PeripheralToMemory;
+ let data_size = W::size();
+
+ let channel_number = channel.num();
+ let dma = channel.regs();
+
+ // "Preceding reads and writes cannot be moved past subsequent writes."
+ fence(Ordering::SeqCst);
+
+ #[cfg(bdma_v2)]
+ critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request)));
+
+ let mut w = regs::Cr(0);
+ w.set_psize(data_size.into());
+ w.set_msize(data_size.into());
+ w.set_minc(vals::Inc::ENABLED);
+ w.set_dir(dir.into());
+ w.set_teie(true);
+ w.set_htie(true);
+ w.set_tcie(true);
+ w.set_circ(vals::Circ::ENABLED);
+ w.set_pl(vals::Pl::VERYHIGH);
+ w.set_en(true);
+
+ let buffer_ptr = buffer.as_mut_ptr();
+ let mut this = Self {
+ channel,
+ cr: w,
+ ringbuf: DmaRingBuffer::new(buffer),
+ };
+ this.clear_irqs();
+
+ #[cfg(dmamux)]
+ super::dmamux::configure_dmamux(&mut *this.channel, _request);
+
+ let ch = dma.ch(channel_number);
+ ch.par().write_value(peri_addr as u32);
+ ch.mar().write_value(buffer_ptr as u32);
+ ch.ndtr().write(|w| w.set_ndt(len as u16));
+
+ this
+ }
+
+ pub fn start(&mut self) {
+ let ch = self.channel.regs().ch(self.channel.num());
+ unsafe { ch.cr().write_value(self.cr) }
+ }
+
+ pub fn clear(&mut self) {
+ self.ringbuf.clear(DmaCtrlImpl(self.channel.reborrow()));
+ }
+
+ /// Read bytes from the ring buffer
+ /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
+ pub fn read(&mut self, buf: &mut [W]) -> Result {
+ self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf)
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.ringbuf.is_empty()
+ }
+
+ pub fn len(&self) -> usize {
+ self.ringbuf.len()
+ }
+
+ pub fn capacity(&self) -> usize {
+ self.ringbuf.dma_buf.len()
+ }
+
+ pub fn set_waker(&mut self, waker: &Waker) {
+ STATE.ch_wakers[self.channel.index()].register(waker);
+ }
+
+ fn clear_irqs(&mut self) {
+ let dma = self.channel.regs();
+ unsafe {
+ dma.ifcr().write(|w| {
+ w.set_htif(self.channel.num(), true);
+ w.set_tcif(self.channel.num(), true);
+ w.set_teif(self.channel.num(), true);
+ })
+ }
+ }
+
+ pub fn request_stop(&mut self) {
+ let ch = self.channel.regs().ch(self.channel.num());
+
+ // Disable the channel. Keep the IEs enabled so the irqs still fire.
+ unsafe {
+ ch.cr().write(|w| {
+ w.set_teie(true);
+ w.set_htie(true);
+ w.set_tcie(true);
+ })
+ }
+ }
+
+ pub fn is_running(&mut self) -> bool {
+ let ch = self.channel.regs().ch(self.channel.num());
+ unsafe { ch.cr().read() }.en()
+ }
+
+ /// Synchronize the position of the ring buffer to the actual DMA controller position
+ pub fn reload_position(&mut self) {
+ let ch = self.channel.regs().ch(self.channel.num());
+ self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize;
+ }
+}
+
+impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> {
+ fn drop(&mut self) {
+ self.request_stop();
+ while self.is_running() {}
+
+ // "Subsequent reads and writes cannot be moved ahead of preceding reads."
+ fence(Ordering::SeqCst);
+ }
+}
diff --git a/embassy-stm32/src/dma/dma.rs b/embassy-stm32/src/dma/dma.rs
index ef1d27573..7b17d9e49 100644
--- a/embassy-stm32/src/dma/dma.rs
+++ b/embassy-stm32/src/dma/dma.rs
@@ -4,16 +4,17 @@ use core::pin::Pin;
use core::sync::atomic::{fence, Ordering};
use core::task::{Context, Poll, Waker};
+use atomic_polyfill::AtomicUsize;
use embassy_cortex_m::interrupt::Priority;
use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
use embassy_sync::waitqueue::AtomicWaker;
-use pac::dma::regs;
+use super::ringbuffer::{DmaCtrl, DmaRingBuffer, OverrunError};
use super::word::{Word, WordSize};
use super::Dir;
use crate::_generated::DMA_CHANNEL_COUNT;
use crate::interrupt::{Interrupt, InterruptExt};
-use crate::pac::dma::vals;
+use crate::pac::dma::{regs, vals};
use crate::{interrupt, pac};
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
@@ -128,13 +129,16 @@ impl From for vals::Fth {
struct State {
ch_wakers: [AtomicWaker; DMA_CHANNEL_COUNT],
+ complete_count: [AtomicUsize; DMA_CHANNEL_COUNT],
}
impl State {
const fn new() -> Self {
+ const ZERO: AtomicUsize = AtomicUsize::new(0);
const AW: AtomicWaker = AtomicWaker::new();
Self {
ch_wakers: [AW; DMA_CHANNEL_COUNT],
+ complete_count: [ZERO; DMA_CHANNEL_COUNT],
}
}
}
@@ -183,9 +187,22 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::dma::Dma, channel_num: usize, index:
panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num);
}
+ let mut wake = false;
+
+ if isr.htif(channel_num % 4) && cr.read().htie() {
+ // Acknowledge half transfer complete interrupt
+ dma.ifcr(channel_num / 4).write(|w| w.set_htif(channel_num % 4, true));
+ wake = true;
+ }
+
if isr.tcif(channel_num % 4) && cr.read().tcie() {
- /* acknowledge transfer complete interrupt */
+ // Acknowledge transfer complete interrupt
dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true));
+ STATE.complete_count[index].fetch_add(1, Ordering::Release);
+ wake = true;
+ }
+
+ if wake {
STATE.ch_wakers[index].wake();
}
}
@@ -445,7 +462,6 @@ impl<'a, C: Channel> Future for Transfer<'a, C> {
// ==================================
-#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct DoubleBuffered<'a, C: Channel, W: Word> {
channel: PeripheralRef<'a, C>,
_phantom: PhantomData,
@@ -530,6 +546,7 @@ impl<'a, C: Channel, W: Word> DoubleBuffered<'a, C, W> {
unsafe {
dma.ifcr(isrn).write(|w| {
+ w.set_htif(isrbit, true);
w.set_tcif(isrbit, true);
w.set_teif(isrbit, true);
})
@@ -578,15 +595,6 @@ impl<'a, C: Channel, W: Word> DoubleBuffered<'a, C, W> {
let ch = self.channel.regs().st(self.channel.num());
unsafe { ch.ndtr().read() }.ndt()
}
-
- pub fn blocking_wait(mut self) {
- while self.is_running() {}
-
- // "Subsequent reads and writes cannot be moved ahead of preceding reads."
- fence(Ordering::SeqCst);
-
- core::mem::forget(self);
- }
}
impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> {
@@ -598,3 +606,180 @@ impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> {
fence(Ordering::SeqCst);
}
}
+
+// ==============================
+
+struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>);
+
+impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> {
+ fn ndtr(&self) -> usize {
+ let ch = self.0.regs().st(self.0.num());
+ unsafe { ch.ndtr().read() }.ndt() as usize
+ }
+
+ fn get_complete_count(&self) -> usize {
+ STATE.complete_count[self.0.index()].load(Ordering::Acquire)
+ }
+
+ fn reset_complete_count(&mut self) -> usize {
+ STATE.complete_count[self.0.index()].swap(0, Ordering::AcqRel)
+ }
+}
+
+pub struct RingBuffer<'a, C: Channel, W: Word> {
+ cr: regs::Cr,
+ channel: PeripheralRef<'a, C>,
+ ringbuf: DmaRingBuffer<'a, W>,
+}
+
+impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
+ pub unsafe fn new_read(
+ channel: impl Peripheral
+ 'a,
+ _request: Request,
+ peri_addr: *mut W,
+ buffer: &'a mut [W],
+ options: TransferOptions,
+ ) -> Self {
+ into_ref!(channel);
+
+ let len = buffer.len();
+ assert!(len > 0 && len <= 0xFFFF);
+
+ let dir = Dir::PeripheralToMemory;
+ let data_size = W::size();
+
+ let channel_number = channel.num();
+ let dma = channel.regs();
+
+ // "Preceding reads and writes cannot be moved past subsequent writes."
+ fence(Ordering::SeqCst);
+
+ let mut w = regs::Cr(0);
+ w.set_dir(dir.into());
+ w.set_msize(data_size.into());
+ w.set_psize(data_size.into());
+ w.set_pl(vals::Pl::VERYHIGH);
+ w.set_minc(vals::Inc::INCREMENTED);
+ w.set_pinc(vals::Inc::FIXED);
+ w.set_teie(true);
+ w.set_htie(true);
+ w.set_tcie(true);
+ w.set_circ(vals::Circ::ENABLED);
+ #[cfg(dma_v1)]
+ w.set_trbuff(true);
+ #[cfg(dma_v2)]
+ w.set_chsel(_request);
+ w.set_pburst(options.pburst.into());
+ w.set_mburst(options.mburst.into());
+ w.set_pfctrl(options.flow_ctrl.into());
+ w.set_en(true);
+
+ let buffer_ptr = buffer.as_mut_ptr();
+ let mut this = Self {
+ channel,
+ cr: w,
+ ringbuf: DmaRingBuffer::new(buffer),
+ };
+ this.clear_irqs();
+
+ #[cfg(dmamux)]
+ super::dmamux::configure_dmamux(&mut *this.channel, _request);
+
+ let ch = dma.st(channel_number);
+ ch.par().write_value(peri_addr as u32);
+ ch.m0ar().write_value(buffer_ptr as u32);
+ ch.ndtr().write_value(regs::Ndtr(len as _));
+ ch.fcr().write(|w| {
+ if let Some(fth) = options.fifo_threshold {
+ // FIFO mode
+ w.set_dmdis(vals::Dmdis::DISABLED);
+ w.set_fth(fth.into());
+ } else {
+ // Direct mode
+ w.set_dmdis(vals::Dmdis::ENABLED);
+ }
+ });
+
+ this
+ }
+
+ pub fn start(&mut self) {
+ let ch = self.channel.regs().st(self.channel.num());
+ unsafe { ch.cr().write_value(self.cr) }
+ }
+
+ pub fn clear(&mut self) {
+ self.ringbuf.clear(DmaCtrlImpl(self.channel.reborrow()));
+ }
+
+ /// Read bytes from the ring buffer
+ /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
+ pub fn read(&mut self, buf: &mut [W]) -> Result {
+ self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf)
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.ringbuf.is_empty()
+ }
+
+ pub fn len(&self) -> usize {
+ self.ringbuf.len()
+ }
+
+ pub fn capacity(&self) -> usize {
+ self.ringbuf.dma_buf.len()
+ }
+
+ pub fn set_waker(&mut self, waker: &Waker) {
+ STATE.ch_wakers[self.channel.index()].register(waker);
+ }
+
+ fn clear_irqs(&mut self) {
+ let channel_number = self.channel.num();
+ let dma = self.channel.regs();
+ let isrn = channel_number / 4;
+ let isrbit = channel_number % 4;
+
+ unsafe {
+ dma.ifcr(isrn).write(|w| {
+ w.set_htif(isrbit, true);
+ w.set_tcif(isrbit, true);
+ w.set_teif(isrbit, true);
+ })
+ }
+ }
+
+ pub fn request_stop(&mut self) {
+ let ch = self.channel.regs().st(self.channel.num());
+
+ // Disable the channel. Keep the IEs enabled so the irqs still fire.
+ unsafe {
+ ch.cr().write(|w| {
+ w.set_teie(true);
+ w.set_htie(true);
+ w.set_tcie(true);
+ })
+ }
+ }
+
+ pub fn is_running(&mut self) -> bool {
+ let ch = self.channel.regs().st(self.channel.num());
+ unsafe { ch.cr().read() }.en()
+ }
+
+ /// Synchronize the position of the ring buffer to the actual DMA controller position
+ pub fn reload_position(&mut self) {
+ let ch = self.channel.regs().st(self.channel.num());
+ self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize;
+ }
+}
+
+impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> {
+ fn drop(&mut self) {
+ self.request_stop();
+ while self.is_running() {}
+
+ // "Subsequent reads and writes cannot be moved ahead of preceding reads."
+ fence(Ordering::SeqCst);
+ }
+}
diff --git a/embassy-stm32/src/dma/mod.rs b/embassy-stm32/src/dma/mod.rs
index 3312ca752..3ac0d1b3d 100644
--- a/embassy-stm32/src/dma/mod.rs
+++ b/embassy-stm32/src/dma/mod.rs
@@ -21,6 +21,7 @@ pub use gpdma::*;
#[cfg(dmamux)]
mod dmamux;
+pub(crate) mod ringbuffer;
pub mod word;
use core::mem;
diff --git a/embassy-stm32/src/dma/ringbuffer.rs b/embassy-stm32/src/dma/ringbuffer.rs
new file mode 100644
index 000000000..38cc87ae9
--- /dev/null
+++ b/embassy-stm32/src/dma/ringbuffer.rs
@@ -0,0 +1,420 @@
+#![cfg_attr(gpdma, allow(unused))]
+
+use core::ops::Range;
+use core::sync::atomic::{compiler_fence, Ordering};
+
+use super::word::Word;
+
+/// A "read-only" ring-buffer to be used together with the DMA controller which
+/// writes in a circular way, "uncontrolled" to the buffer.
+///
+/// A snapshot of the ring buffer state can be attained by setting the `ndtr` field
+/// to the current register value. `ndtr` describes the current position of the DMA
+/// write.
+///
+/// # Buffer layout
+///
+/// ```text
+/// Without wraparound: With wraparound:
+///
+/// + buf +--- NDTR ---+ + buf +---------- NDTR ----------+
+/// | | | | | |
+/// v v v v v v
+/// +-----------------------------------------+ +-----------------------------------------+
+/// |oooooooooooXXXXXXXXXXXXXXXXoooooooooooooo| |XXXXXXXXXXXXXooooooooooooXXXXXXXXXXXXXXXX|
+/// +-----------------------------------------+ +-----------------------------------------+
+/// ^ ^ ^ ^ ^ ^
+/// | | | | | |
+/// +- first --+ | +- end ------+ |
+/// | | | |
+/// +- end --------------------+ +- first ----------------+
+/// ```
+pub struct DmaRingBuffer<'a, W: Word> {
+ pub(crate) dma_buf: &'a mut [W],
+ first: usize,
+ pub ndtr: usize,
+}
+
+#[derive(Debug, PartialEq)]
+pub struct OverrunError;
+
+pub trait DmaCtrl {
+ /// Get the NDTR register value, i.e. the space left in the underlying
+ /// buffer until the dma writer wraps.
+ fn ndtr(&self) -> usize;
+
+ /// Get the transfer completed counter.
+ /// This counter is incremented by the dma controller when NDTR is reloaded,
+ /// i.e. when the writing wraps.
+ fn get_complete_count(&self) -> usize;
+
+ /// Reset the transfer completed counter to 0 and return the value just prior to the reset.
+ fn reset_complete_count(&mut self) -> usize;
+}
+
+impl<'a, W: Word> DmaRingBuffer<'a, W> {
+ pub fn new(dma_buf: &'a mut [W]) -> Self {
+ let ndtr = dma_buf.len();
+ Self {
+ dma_buf,
+ first: 0,
+ ndtr,
+ }
+ }
+
+ /// Reset the ring buffer to its initial state
+ pub fn clear(&mut self, mut dma: impl DmaCtrl) {
+ self.first = 0;
+ self.ndtr = self.dma_buf.len();
+ dma.reset_complete_count();
+ }
+
+ /// The buffer end position
+ fn end(&self) -> usize {
+ self.dma_buf.len() - self.ndtr
+ }
+
+ /// Returns whether the buffer is empty
+ pub fn is_empty(&self) -> bool {
+ self.first == self.end()
+ }
+
+ /// The current number of bytes in the buffer
+ /// This may change at any time if dma is currently active
+ pub fn len(&self) -> usize {
+ // Read out a stable end (the dma periheral can change it at anytime)
+ let end = self.end();
+ if self.first <= end {
+ // No wrap
+ end - self.first
+ } else {
+ self.dma_buf.len() - self.first + end
+ }
+ }
+
+ /// Read bytes from the ring buffer
+ /// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
+ pub fn read(&mut self, mut dma: impl DmaCtrl, buf: &mut [W]) -> Result {
+ let end = self.end();
+
+ compiler_fence(Ordering::SeqCst);
+
+ if self.first == end {
+ // The buffer is currently empty
+
+ if dma.get_complete_count() > 0 {
+ // The DMA has written such that the ring buffer wraps at least once
+ self.ndtr = dma.ndtr();
+ if self.end() > self.first || dma.get_complete_count() > 1 {
+ return Err(OverrunError);
+ }
+ }
+
+ Ok(0)
+ } else if self.first < end {
+ // The available, unread portion in the ring buffer DOES NOT wrap
+
+ if dma.get_complete_count() > 1 {
+ return Err(OverrunError);
+ }
+
+ // Copy out the bytes from the dma buffer
+ let len = self.copy_to(buf, self.first..end);
+
+ compiler_fence(Ordering::SeqCst);
+
+ match dma.get_complete_count() {
+ 0 => {
+ // The DMA writer has not wrapped before nor after the copy
+ }
+ 1 => {
+ // The DMA writer has written such that the ring buffer now wraps
+ self.ndtr = dma.ndtr();
+ if self.end() > self.first || dma.get_complete_count() > 1 {
+ // The bytes that we have copied out have overflowed
+ // as the writer has now both wrapped and is currently writing
+ // within the region that we have just copied out
+ return Err(OverrunError);
+ }
+ }
+ _ => {
+ return Err(OverrunError);
+ }
+ }
+
+ self.first = (self.first + len) % self.dma_buf.len();
+ Ok(len)
+ } else {
+ // The available, unread portion in the ring buffer DOES wrap
+ // The DMA writer has wrapped since we last read and is currently
+ // writing (or the next byte added will be) in the beginning of the ring buffer.
+
+ let complete_count = dma.get_complete_count();
+ if complete_count > 1 {
+ return Err(OverrunError);
+ }
+
+ // If the unread portion wraps then the writer must also have wrapped
+ assert!(complete_count == 1);
+
+ if self.first + buf.len() < self.dma_buf.len() {
+ // The provided read buffer is not large enough to include all bytes from the tail of the dma buffer.
+
+ // Copy out from the dma buffer
+ let len = self.copy_to(buf, self.first..self.dma_buf.len());
+
+ compiler_fence(Ordering::SeqCst);
+
+ // We have now copied out the data from dma_buf
+ // Make sure that the just read part was not overwritten during the copy
+ self.ndtr = dma.ndtr();
+ if self.end() > self.first || dma.get_complete_count() > 1 {
+ // The writer has entered the data that we have just read since we read out `end` in the beginning and until now.
+ return Err(OverrunError);
+ }
+
+ self.first = (self.first + len) % self.dma_buf.len();
+ Ok(len)
+ } else {
+ // The provided read buffer is large enough to include all bytes from the tail of the dma buffer,
+ // so the next read will not have any unread tail bytes in the ring buffer.
+
+ // Copy out from the dma buffer
+ let tail = self.copy_to(buf, self.first..self.dma_buf.len());
+ let head = self.copy_to(&mut buf[tail..], 0..end);
+
+ compiler_fence(Ordering::SeqCst);
+
+ // We have now copied out the data from dma_buf
+ // Reset complete counter and make sure that the just read part was not overwritten during the copy
+ self.ndtr = dma.ndtr();
+ let complete_count = dma.reset_complete_count();
+ if self.end() > self.first || complete_count > 1 {
+ return Err(OverrunError);
+ }
+
+ self.first = head;
+ Ok(tail + head)
+ }
+ }
+ }
+
+ /// Copy from the dma buffer at `data_range` into `buf`
+ fn copy_to(&mut self, buf: &mut [W], data_range: Range) -> usize {
+ // Limit the number of bytes that can be copied
+ let length = usize::min(data_range.len(), buf.len());
+
+ // Copy from dma buffer into read buffer
+ // We need to do it like this instead of a simple copy_from_slice() because
+ // reading from a part of memory that may be simultaneously written to is unsafe
+ unsafe {
+ let dma_buf = self.dma_buf.as_ptr();
+
+ for i in 0..length {
+ buf[i] = core::ptr::read_volatile(dma_buf.offset((data_range.start + i) as isize));
+ }
+ }
+
+ length
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use core::array;
+ use core::cell::RefCell;
+
+ use super::*;
+
+ struct TestCtrl {
+ next_ndtr: RefCell