Merge #1404
1404: feat(stm32): Add DMA based, ring-buffer based rx uart, v3 r=Dirbaio a=rmja This PR replaces #1150. Comparing to that PR, this one has the following changes: * The implementation now aligns with the new stm32 dma module, thanks `@Dirbaio!` * Calls to `read()` now returns on either 1) idle line, or 2) ring buffer is at most half full. This is different from the previous pr, which would return a lot of 1 byte reads. Thank you `@chemicstry` for making me realize that it was actually not what I wanted. This is accomplished using half-transfer completed and full-transfer completed interrupts. Both seems to be supported on both dma and bdma. The implementation still have the issue mentioned here: https://github.com/embassy-rs/embassy/pull/1150#discussion_r1094627035 Regarding the todos here: https://github.com/embassy-rs/embassy/pull/1150#issuecomment-1513905925. I have removed the exposure of ndtr from `dma::RingBuffer` to the uart so that the uart now simply calls `ringbuf::reload_position()` to align the position within the ring buffer to that of the actual running dma controller. BDMA and GPDMA is not implemented. I do not have any chips with those dma controllers, so maybe someone else should to this so that it can be tested. The `saturate_serial` test utility inside `tests/utils` has an `--idles` switch which can be used to saturate the uart from a pc, but with random idles. Because embassy-stm32 now can have tests, we should probably run them in ci. I do this locally to test the DmaRingBuffer: `cargo test --no-default-features --features stm32f429ig`. cc `@chemicstry` `@Dirbaio` Co-authored-by: Rasmus Melchior Jacobsen <rmja@laesoe.org> Co-authored-by: Dario Nieuwenhuis <dirbaio@dirbaio.net>
This commit is contained in:
commit
6096f0cf4b
12 changed files with 1416 additions and 60 deletions
|
@ -3,18 +3,20 @@
|
|||
use core::future::Future;
|
||||
use core::pin::Pin;
|
||||
use core::sync::atomic::{fence, Ordering};
|
||||
use core::task::{Context, Poll};
|
||||
use core::task::{Context, Poll, Waker};
|
||||
|
||||
use atomic_polyfill::AtomicUsize;
|
||||
use embassy_cortex_m::interrupt::Priority;
|
||||
use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
|
||||
use embassy_sync::waitqueue::AtomicWaker;
|
||||
|
||||
use super::ringbuffer::{DmaCtrl, DmaRingBuffer, OverrunError};
|
||||
use super::word::{Word, WordSize};
|
||||
use super::Dir;
|
||||
use crate::_generated::BDMA_CHANNEL_COUNT;
|
||||
use crate::interrupt::{Interrupt, InterruptExt};
|
||||
use crate::pac;
|
||||
use crate::pac::bdma::vals;
|
||||
use crate::pac::bdma::{regs, vals};
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||
|
@ -48,13 +50,16 @@ impl From<Dir> for vals::Dir {
|
|||
|
||||
struct State {
|
||||
ch_wakers: [AtomicWaker; BDMA_CHANNEL_COUNT],
|
||||
complete_count: [AtomicUsize; BDMA_CHANNEL_COUNT],
|
||||
}
|
||||
|
||||
impl State {
|
||||
const fn new() -> Self {
|
||||
const ZERO: AtomicUsize = AtomicUsize::new(0);
|
||||
const AW: AtomicWaker = AtomicWaker::new();
|
||||
Self {
|
||||
ch_wakers: [AW; BDMA_CHANNEL_COUNT],
|
||||
complete_count: [ZERO; BDMA_CHANNEL_COUNT],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -105,8 +110,23 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::bdma::Dma, channel_num: usize, index
|
|||
if isr.teif(channel_num) {
|
||||
panic!("DMA: error on BDMA@{:08x} channel {}", dma.0 as u32, channel_num);
|
||||
}
|
||||
|
||||
let mut wake = false;
|
||||
|
||||
if isr.htif(channel_num) && cr.read().htie() {
|
||||
// Acknowledge half transfer complete interrupt
|
||||
dma.ifcr().write(|w| w.set_htif(channel_num, true));
|
||||
wake = true;
|
||||
}
|
||||
|
||||
if isr.tcif(channel_num) && cr.read().tcie() {
|
||||
cr.write(|_| ()); // Disable channel interrupts with the default value.
|
||||
// Acknowledge transfer complete interrupt
|
||||
dma.ifcr().write(|w| w.set_tcif(channel_num, true));
|
||||
STATE.complete_count[index].fetch_add(1, Ordering::Release);
|
||||
wake = true;
|
||||
}
|
||||
|
||||
if wake {
|
||||
STATE.ch_wakers[index].wake();
|
||||
}
|
||||
}
|
||||
|
@ -252,6 +272,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
|||
|
||||
let mut this = Self { channel };
|
||||
this.clear_irqs();
|
||||
STATE.complete_count[this.channel.index()].store(0, Ordering::Release);
|
||||
|
||||
#[cfg(dmamux)]
|
||||
super::dmamux::configure_dmamux(&mut *this.channel, _request);
|
||||
|
@ -299,7 +320,9 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
|||
|
||||
pub fn is_running(&mut self) -> bool {
|
||||
let ch = self.channel.regs().ch(self.channel.num());
|
||||
unsafe { ch.cr().read() }.en()
|
||||
let en = unsafe { ch.cr().read() }.en();
|
||||
let tcif = STATE.complete_count[self.channel.index()].load(Ordering::Acquire) != 0;
|
||||
en && !tcif
|
||||
}
|
||||
|
||||
/// Gets the total remaining transfers for the channel
|
||||
|
@ -342,3 +365,161 @@ impl<'a, C: Channel> Future for Transfer<'a, C> {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ==============================
|
||||
|
||||
struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>);
|
||||
|
||||
impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> {
|
||||
fn ndtr(&self) -> usize {
|
||||
let ch = self.0.regs().ch(self.0.num());
|
||||
unsafe { ch.ndtr().read() }.ndt() as usize
|
||||
}
|
||||
|
||||
fn get_complete_count(&self) -> usize {
|
||||
STATE.complete_count[self.0.index()].load(Ordering::Acquire)
|
||||
}
|
||||
|
||||
fn reset_complete_count(&mut self) -> usize {
|
||||
STATE.complete_count[self.0.index()].swap(0, Ordering::AcqRel)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RingBuffer<'a, C: Channel, W: Word> {
|
||||
cr: regs::Cr,
|
||||
channel: PeripheralRef<'a, C>,
|
||||
ringbuf: DmaRingBuffer<'a, W>,
|
||||
}
|
||||
|
||||
impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
|
||||
pub unsafe fn new_read(
|
||||
channel: impl Peripheral<P = C> + 'a,
|
||||
_request: Request,
|
||||
peri_addr: *mut W,
|
||||
buffer: &'a mut [W],
|
||||
_options: TransferOptions,
|
||||
) -> Self {
|
||||
into_ref!(channel);
|
||||
|
||||
let len = buffer.len();
|
||||
assert!(len > 0 && len <= 0xFFFF);
|
||||
|
||||
let dir = Dir::PeripheralToMemory;
|
||||
let data_size = W::size();
|
||||
|
||||
let channel_number = channel.num();
|
||||
let dma = channel.regs();
|
||||
|
||||
// "Preceding reads and writes cannot be moved past subsequent writes."
|
||||
fence(Ordering::SeqCst);
|
||||
|
||||
#[cfg(bdma_v2)]
|
||||
critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request)));
|
||||
|
||||
let mut w = regs::Cr(0);
|
||||
w.set_psize(data_size.into());
|
||||
w.set_msize(data_size.into());
|
||||
w.set_minc(vals::Inc::ENABLED);
|
||||
w.set_dir(dir.into());
|
||||
w.set_teie(true);
|
||||
w.set_htie(true);
|
||||
w.set_tcie(true);
|
||||
w.set_circ(vals::Circ::ENABLED);
|
||||
w.set_pl(vals::Pl::VERYHIGH);
|
||||
w.set_en(true);
|
||||
|
||||
let buffer_ptr = buffer.as_mut_ptr();
|
||||
let mut this = Self {
|
||||
channel,
|
||||
cr: w,
|
||||
ringbuf: DmaRingBuffer::new(buffer),
|
||||
};
|
||||
this.clear_irqs();
|
||||
|
||||
#[cfg(dmamux)]
|
||||
super::dmamux::configure_dmamux(&mut *this.channel, _request);
|
||||
|
||||
let ch = dma.ch(channel_number);
|
||||
ch.par().write_value(peri_addr as u32);
|
||||
ch.mar().write_value(buffer_ptr as u32);
|
||||
ch.ndtr().write(|w| w.set_ndt(len as u16));
|
||||
|
||||
this
|
||||
}
|
||||
|
||||
pub fn start(&mut self) {
|
||||
let ch = self.channel.regs().ch(self.channel.num());
|
||||
unsafe { ch.cr().write_value(self.cr) }
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.ringbuf.clear(DmaCtrlImpl(self.channel.reborrow()));
|
||||
}
|
||||
|
||||
/// Read bytes from the ring buffer
|
||||
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
|
||||
pub fn read(&mut self, buf: &mut [W]) -> Result<usize, OverrunError> {
|
||||
self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf)
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.ringbuf.is_empty()
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.ringbuf.len()
|
||||
}
|
||||
|
||||
pub fn capacity(&self) -> usize {
|
||||
self.ringbuf.dma_buf.len()
|
||||
}
|
||||
|
||||
pub fn set_waker(&mut self, waker: &Waker) {
|
||||
STATE.ch_wakers[self.channel.index()].register(waker);
|
||||
}
|
||||
|
||||
fn clear_irqs(&mut self) {
|
||||
let dma = self.channel.regs();
|
||||
unsafe {
|
||||
dma.ifcr().write(|w| {
|
||||
w.set_htif(self.channel.num(), true);
|
||||
w.set_tcif(self.channel.num(), true);
|
||||
w.set_teif(self.channel.num(), true);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn request_stop(&mut self) {
|
||||
let ch = self.channel.regs().ch(self.channel.num());
|
||||
|
||||
// Disable the channel. Keep the IEs enabled so the irqs still fire.
|
||||
unsafe {
|
||||
ch.cr().write(|w| {
|
||||
w.set_teie(true);
|
||||
w.set_htie(true);
|
||||
w.set_tcie(true);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_running(&mut self) -> bool {
|
||||
let ch = self.channel.regs().ch(self.channel.num());
|
||||
unsafe { ch.cr().read() }.en()
|
||||
}
|
||||
|
||||
/// Synchronize the position of the ring buffer to the actual DMA controller position
|
||||
pub fn reload_position(&mut self) {
|
||||
let ch = self.channel.regs().ch(self.channel.num());
|
||||
self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize;
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> {
|
||||
fn drop(&mut self) {
|
||||
self.request_stop();
|
||||
while self.is_running() {}
|
||||
|
||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||
fence(Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,16 +4,17 @@ use core::pin::Pin;
|
|||
use core::sync::atomic::{fence, Ordering};
|
||||
use core::task::{Context, Poll, Waker};
|
||||
|
||||
use atomic_polyfill::AtomicUsize;
|
||||
use embassy_cortex_m::interrupt::Priority;
|
||||
use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
|
||||
use embassy_sync::waitqueue::AtomicWaker;
|
||||
use pac::dma::regs;
|
||||
|
||||
use super::ringbuffer::{DmaCtrl, DmaRingBuffer, OverrunError};
|
||||
use super::word::{Word, WordSize};
|
||||
use super::Dir;
|
||||
use crate::_generated::DMA_CHANNEL_COUNT;
|
||||
use crate::interrupt::{Interrupt, InterruptExt};
|
||||
use crate::pac::dma::vals;
|
||||
use crate::pac::dma::{regs, vals};
|
||||
use crate::{interrupt, pac};
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
|
@ -128,13 +129,16 @@ impl From<FifoThreshold> for vals::Fth {
|
|||
|
||||
struct State {
|
||||
ch_wakers: [AtomicWaker; DMA_CHANNEL_COUNT],
|
||||
complete_count: [AtomicUsize; DMA_CHANNEL_COUNT],
|
||||
}
|
||||
|
||||
impl State {
|
||||
const fn new() -> Self {
|
||||
const ZERO: AtomicUsize = AtomicUsize::new(0);
|
||||
const AW: AtomicWaker = AtomicWaker::new();
|
||||
Self {
|
||||
ch_wakers: [AW; DMA_CHANNEL_COUNT],
|
||||
complete_count: [ZERO; DMA_CHANNEL_COUNT],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -183,9 +187,22 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::dma::Dma, channel_num: usize, index:
|
|||
panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num);
|
||||
}
|
||||
|
||||
let mut wake = false;
|
||||
|
||||
if isr.htif(channel_num % 4) && cr.read().htie() {
|
||||
// Acknowledge half transfer complete interrupt
|
||||
dma.ifcr(channel_num / 4).write(|w| w.set_htif(channel_num % 4, true));
|
||||
wake = true;
|
||||
}
|
||||
|
||||
if isr.tcif(channel_num % 4) && cr.read().tcie() {
|
||||
/* acknowledge transfer complete interrupt */
|
||||
// Acknowledge transfer complete interrupt
|
||||
dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true));
|
||||
STATE.complete_count[index].fetch_add(1, Ordering::Release);
|
||||
wake = true;
|
||||
}
|
||||
|
||||
if wake {
|
||||
STATE.ch_wakers[index].wake();
|
||||
}
|
||||
}
|
||||
|
@ -445,7 +462,6 @@ impl<'a, C: Channel> Future for Transfer<'a, C> {
|
|||
|
||||
// ==================================
|
||||
|
||||
#[must_use = "futures do nothing unless you `.await` or poll them"]
|
||||
pub struct DoubleBuffered<'a, C: Channel, W: Word> {
|
||||
channel: PeripheralRef<'a, C>,
|
||||
_phantom: PhantomData<W>,
|
||||
|
@ -530,6 +546,7 @@ impl<'a, C: Channel, W: Word> DoubleBuffered<'a, C, W> {
|
|||
|
||||
unsafe {
|
||||
dma.ifcr(isrn).write(|w| {
|
||||
w.set_htif(isrbit, true);
|
||||
w.set_tcif(isrbit, true);
|
||||
w.set_teif(isrbit, true);
|
||||
})
|
||||
|
@ -578,15 +595,6 @@ impl<'a, C: Channel, W: Word> DoubleBuffered<'a, C, W> {
|
|||
let ch = self.channel.regs().st(self.channel.num());
|
||||
unsafe { ch.ndtr().read() }.ndt()
|
||||
}
|
||||
|
||||
pub fn blocking_wait(mut self) {
|
||||
while self.is_running() {}
|
||||
|
||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||
fence(Ordering::SeqCst);
|
||||
|
||||
core::mem::forget(self);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> {
|
||||
|
@ -598,3 +606,180 @@ impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> {
|
|||
fence(Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
// ==============================
|
||||
|
||||
struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>);
|
||||
|
||||
impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> {
|
||||
fn ndtr(&self) -> usize {
|
||||
let ch = self.0.regs().st(self.0.num());
|
||||
unsafe { ch.ndtr().read() }.ndt() as usize
|
||||
}
|
||||
|
||||
fn get_complete_count(&self) -> usize {
|
||||
STATE.complete_count[self.0.index()].load(Ordering::Acquire)
|
||||
}
|
||||
|
||||
fn reset_complete_count(&mut self) -> usize {
|
||||
STATE.complete_count[self.0.index()].swap(0, Ordering::AcqRel)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RingBuffer<'a, C: Channel, W: Word> {
|
||||
cr: regs::Cr,
|
||||
channel: PeripheralRef<'a, C>,
|
||||
ringbuf: DmaRingBuffer<'a, W>,
|
||||
}
|
||||
|
||||
impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
|
||||
pub unsafe fn new_read(
|
||||
channel: impl Peripheral<P = C> + 'a,
|
||||
_request: Request,
|
||||
peri_addr: *mut W,
|
||||
buffer: &'a mut [W],
|
||||
options: TransferOptions,
|
||||
) -> Self {
|
||||
into_ref!(channel);
|
||||
|
||||
let len = buffer.len();
|
||||
assert!(len > 0 && len <= 0xFFFF);
|
||||
|
||||
let dir = Dir::PeripheralToMemory;
|
||||
let data_size = W::size();
|
||||
|
||||
let channel_number = channel.num();
|
||||
let dma = channel.regs();
|
||||
|
||||
// "Preceding reads and writes cannot be moved past subsequent writes."
|
||||
fence(Ordering::SeqCst);
|
||||
|
||||
let mut w = regs::Cr(0);
|
||||
w.set_dir(dir.into());
|
||||
w.set_msize(data_size.into());
|
||||
w.set_psize(data_size.into());
|
||||
w.set_pl(vals::Pl::VERYHIGH);
|
||||
w.set_minc(vals::Inc::INCREMENTED);
|
||||
w.set_pinc(vals::Inc::FIXED);
|
||||
w.set_teie(true);
|
||||
w.set_htie(true);
|
||||
w.set_tcie(true);
|
||||
w.set_circ(vals::Circ::ENABLED);
|
||||
#[cfg(dma_v1)]
|
||||
w.set_trbuff(true);
|
||||
#[cfg(dma_v2)]
|
||||
w.set_chsel(_request);
|
||||
w.set_pburst(options.pburst.into());
|
||||
w.set_mburst(options.mburst.into());
|
||||
w.set_pfctrl(options.flow_ctrl.into());
|
||||
w.set_en(true);
|
||||
|
||||
let buffer_ptr = buffer.as_mut_ptr();
|
||||
let mut this = Self {
|
||||
channel,
|
||||
cr: w,
|
||||
ringbuf: DmaRingBuffer::new(buffer),
|
||||
};
|
||||
this.clear_irqs();
|
||||
|
||||
#[cfg(dmamux)]
|
||||
super::dmamux::configure_dmamux(&mut *this.channel, _request);
|
||||
|
||||
let ch = dma.st(channel_number);
|
||||
ch.par().write_value(peri_addr as u32);
|
||||
ch.m0ar().write_value(buffer_ptr as u32);
|
||||
ch.ndtr().write_value(regs::Ndtr(len as _));
|
||||
ch.fcr().write(|w| {
|
||||
if let Some(fth) = options.fifo_threshold {
|
||||
// FIFO mode
|
||||
w.set_dmdis(vals::Dmdis::DISABLED);
|
||||
w.set_fth(fth.into());
|
||||
} else {
|
||||
// Direct mode
|
||||
w.set_dmdis(vals::Dmdis::ENABLED);
|
||||
}
|
||||
});
|
||||
|
||||
this
|
||||
}
|
||||
|
||||
pub fn start(&mut self) {
|
||||
let ch = self.channel.regs().st(self.channel.num());
|
||||
unsafe { ch.cr().write_value(self.cr) }
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.ringbuf.clear(DmaCtrlImpl(self.channel.reborrow()));
|
||||
}
|
||||
|
||||
/// Read bytes from the ring buffer
|
||||
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
|
||||
pub fn read(&mut self, buf: &mut [W]) -> Result<usize, OverrunError> {
|
||||
self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf)
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.ringbuf.is_empty()
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.ringbuf.len()
|
||||
}
|
||||
|
||||
pub fn capacity(&self) -> usize {
|
||||
self.ringbuf.dma_buf.len()
|
||||
}
|
||||
|
||||
pub fn set_waker(&mut self, waker: &Waker) {
|
||||
STATE.ch_wakers[self.channel.index()].register(waker);
|
||||
}
|
||||
|
||||
fn clear_irqs(&mut self) {
|
||||
let channel_number = self.channel.num();
|
||||
let dma = self.channel.regs();
|
||||
let isrn = channel_number / 4;
|
||||
let isrbit = channel_number % 4;
|
||||
|
||||
unsafe {
|
||||
dma.ifcr(isrn).write(|w| {
|
||||
w.set_htif(isrbit, true);
|
||||
w.set_tcif(isrbit, true);
|
||||
w.set_teif(isrbit, true);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn request_stop(&mut self) {
|
||||
let ch = self.channel.regs().st(self.channel.num());
|
||||
|
||||
// Disable the channel. Keep the IEs enabled so the irqs still fire.
|
||||
unsafe {
|
||||
ch.cr().write(|w| {
|
||||
w.set_teie(true);
|
||||
w.set_htie(true);
|
||||
w.set_tcie(true);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_running(&mut self) -> bool {
|
||||
let ch = self.channel.regs().st(self.channel.num());
|
||||
unsafe { ch.cr().read() }.en()
|
||||
}
|
||||
|
||||
/// Synchronize the position of the ring buffer to the actual DMA controller position
|
||||
pub fn reload_position(&mut self) {
|
||||
let ch = self.channel.regs().st(self.channel.num());
|
||||
self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize;
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> {
|
||||
fn drop(&mut self) {
|
||||
self.request_stop();
|
||||
while self.is_running() {}
|
||||
|
||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||
fence(Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ pub use gpdma::*;
|
|||
#[cfg(dmamux)]
|
||||
mod dmamux;
|
||||
|
||||
pub(crate) mod ringbuffer;
|
||||
pub mod word;
|
||||
|
||||
use core::mem;
|
||||
|
|
420
embassy-stm32/src/dma/ringbuffer.rs
Normal file
420
embassy-stm32/src/dma/ringbuffer.rs
Normal file
|
@ -0,0 +1,420 @@
|
|||
#![cfg_attr(gpdma, allow(unused))]
|
||||
|
||||
use core::ops::Range;
|
||||
use core::sync::atomic::{compiler_fence, Ordering};
|
||||
|
||||
use super::word::Word;
|
||||
|
||||
/// A "read-only" ring-buffer to be used together with the DMA controller which
|
||||
/// writes in a circular way, "uncontrolled" to the buffer.
|
||||
///
|
||||
/// A snapshot of the ring buffer state can be attained by setting the `ndtr` field
|
||||
/// to the current register value. `ndtr` describes the current position of the DMA
|
||||
/// write.
|
||||
///
|
||||
/// # Buffer layout
|
||||
///
|
||||
/// ```text
|
||||
/// Without wraparound: With wraparound:
|
||||
///
|
||||
/// + buf +--- NDTR ---+ + buf +---------- NDTR ----------+
|
||||
/// | | | | | |
|
||||
/// v v v v v v
|
||||
/// +-----------------------------------------+ +-----------------------------------------+
|
||||
/// |oooooooooooXXXXXXXXXXXXXXXXoooooooooooooo| |XXXXXXXXXXXXXooooooooooooXXXXXXXXXXXXXXXX|
|
||||
/// +-----------------------------------------+ +-----------------------------------------+
|
||||
/// ^ ^ ^ ^ ^ ^
|
||||
/// | | | | | |
|
||||
/// +- first --+ | +- end ------+ |
|
||||
/// | | | |
|
||||
/// +- end --------------------+ +- first ----------------+
|
||||
/// ```
|
||||
pub struct DmaRingBuffer<'a, W: Word> {
|
||||
pub(crate) dma_buf: &'a mut [W],
|
||||
first: usize,
|
||||
pub ndtr: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct OverrunError;
|
||||
|
||||
pub trait DmaCtrl {
|
||||
/// Get the NDTR register value, i.e. the space left in the underlying
|
||||
/// buffer until the dma writer wraps.
|
||||
fn ndtr(&self) -> usize;
|
||||
|
||||
/// Get the transfer completed counter.
|
||||
/// This counter is incremented by the dma controller when NDTR is reloaded,
|
||||
/// i.e. when the writing wraps.
|
||||
fn get_complete_count(&self) -> usize;
|
||||
|
||||
/// Reset the transfer completed counter to 0 and return the value just prior to the reset.
|
||||
fn reset_complete_count(&mut self) -> usize;
|
||||
}
|
||||
|
||||
impl<'a, W: Word> DmaRingBuffer<'a, W> {
|
||||
pub fn new(dma_buf: &'a mut [W]) -> Self {
|
||||
let ndtr = dma_buf.len();
|
||||
Self {
|
||||
dma_buf,
|
||||
first: 0,
|
||||
ndtr,
|
||||
}
|
||||
}
|
||||
|
||||
/// Reset the ring buffer to its initial state
|
||||
pub fn clear(&mut self, mut dma: impl DmaCtrl) {
|
||||
self.first = 0;
|
||||
self.ndtr = self.dma_buf.len();
|
||||
dma.reset_complete_count();
|
||||
}
|
||||
|
||||
/// The buffer end position
|
||||
fn end(&self) -> usize {
|
||||
self.dma_buf.len() - self.ndtr
|
||||
}
|
||||
|
||||
/// Returns whether the buffer is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.first == self.end()
|
||||
}
|
||||
|
||||
/// The current number of bytes in the buffer
|
||||
/// This may change at any time if dma is currently active
|
||||
pub fn len(&self) -> usize {
|
||||
// Read out a stable end (the dma periheral can change it at anytime)
|
||||
let end = self.end();
|
||||
if self.first <= end {
|
||||
// No wrap
|
||||
end - self.first
|
||||
} else {
|
||||
self.dma_buf.len() - self.first + end
|
||||
}
|
||||
}
|
||||
|
||||
/// Read bytes from the ring buffer
|
||||
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
|
||||
pub fn read(&mut self, mut dma: impl DmaCtrl, buf: &mut [W]) -> Result<usize, OverrunError> {
|
||||
let end = self.end();
|
||||
|
||||
compiler_fence(Ordering::SeqCst);
|
||||
|
||||
if self.first == end {
|
||||
// The buffer is currently empty
|
||||
|
||||
if dma.get_complete_count() > 0 {
|
||||
// The DMA has written such that the ring buffer wraps at least once
|
||||
self.ndtr = dma.ndtr();
|
||||
if self.end() > self.first || dma.get_complete_count() > 1 {
|
||||
return Err(OverrunError);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(0)
|
||||
} else if self.first < end {
|
||||
// The available, unread portion in the ring buffer DOES NOT wrap
|
||||
|
||||
if dma.get_complete_count() > 1 {
|
||||
return Err(OverrunError);
|
||||
}
|
||||
|
||||
// Copy out the bytes from the dma buffer
|
||||
let len = self.copy_to(buf, self.first..end);
|
||||
|
||||
compiler_fence(Ordering::SeqCst);
|
||||
|
||||
match dma.get_complete_count() {
|
||||
0 => {
|
||||
// The DMA writer has not wrapped before nor after the copy
|
||||
}
|
||||
1 => {
|
||||
// The DMA writer has written such that the ring buffer now wraps
|
||||
self.ndtr = dma.ndtr();
|
||||
if self.end() > self.first || dma.get_complete_count() > 1 {
|
||||
// The bytes that we have copied out have overflowed
|
||||
// as the writer has now both wrapped and is currently writing
|
||||
// within the region that we have just copied out
|
||||
return Err(OverrunError);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(OverrunError);
|
||||
}
|
||||
}
|
||||
|
||||
self.first = (self.first + len) % self.dma_buf.len();
|
||||
Ok(len)
|
||||
} else {
|
||||
// The available, unread portion in the ring buffer DOES wrap
|
||||
// The DMA writer has wrapped since we last read and is currently
|
||||
// writing (or the next byte added will be) in the beginning of the ring buffer.
|
||||
|
||||
let complete_count = dma.get_complete_count();
|
||||
if complete_count > 1 {
|
||||
return Err(OverrunError);
|
||||
}
|
||||
|
||||
// If the unread portion wraps then the writer must also have wrapped
|
||||
assert!(complete_count == 1);
|
||||
|
||||
if self.first + buf.len() < self.dma_buf.len() {
|
||||
// The provided read buffer is not large enough to include all bytes from the tail of the dma buffer.
|
||||
|
||||
// Copy out from the dma buffer
|
||||
let len = self.copy_to(buf, self.first..self.dma_buf.len());
|
||||
|
||||
compiler_fence(Ordering::SeqCst);
|
||||
|
||||
// We have now copied out the data from dma_buf
|
||||
// Make sure that the just read part was not overwritten during the copy
|
||||
self.ndtr = dma.ndtr();
|
||||
if self.end() > self.first || dma.get_complete_count() > 1 {
|
||||
// The writer has entered the data that we have just read since we read out `end` in the beginning and until now.
|
||||
return Err(OverrunError);
|
||||
}
|
||||
|
||||
self.first = (self.first + len) % self.dma_buf.len();
|
||||
Ok(len)
|
||||
} else {
|
||||
// The provided read buffer is large enough to include all bytes from the tail of the dma buffer,
|
||||
// so the next read will not have any unread tail bytes in the ring buffer.
|
||||
|
||||
// Copy out from the dma buffer
|
||||
let tail = self.copy_to(buf, self.first..self.dma_buf.len());
|
||||
let head = self.copy_to(&mut buf[tail..], 0..end);
|
||||
|
||||
compiler_fence(Ordering::SeqCst);
|
||||
|
||||
// We have now copied out the data from dma_buf
|
||||
// Reset complete counter and make sure that the just read part was not overwritten during the copy
|
||||
self.ndtr = dma.ndtr();
|
||||
let complete_count = dma.reset_complete_count();
|
||||
if self.end() > self.first || complete_count > 1 {
|
||||
return Err(OverrunError);
|
||||
}
|
||||
|
||||
self.first = head;
|
||||
Ok(tail + head)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Copy from the dma buffer at `data_range` into `buf`
|
||||
fn copy_to(&mut self, buf: &mut [W], data_range: Range<usize>) -> usize {
|
||||
// Limit the number of bytes that can be copied
|
||||
let length = usize::min(data_range.len(), buf.len());
|
||||
|
||||
// Copy from dma buffer into read buffer
|
||||
// We need to do it like this instead of a simple copy_from_slice() because
|
||||
// reading from a part of memory that may be simultaneously written to is unsafe
|
||||
unsafe {
|
||||
let dma_buf = self.dma_buf.as_ptr();
|
||||
|
||||
for i in 0..length {
|
||||
buf[i] = core::ptr::read_volatile(dma_buf.offset((data_range.start + i) as isize));
|
||||
}
|
||||
}
|
||||
|
||||
length
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use core::array;
|
||||
use core::cell::RefCell;
|
||||
|
||||
use super::*;
|
||||
|
||||
struct TestCtrl {
|
||||
next_ndtr: RefCell<Option<usize>>,
|
||||
complete_count: usize,
|
||||
}
|
||||
|
||||
impl TestCtrl {
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
next_ndtr: RefCell::new(None),
|
||||
complete_count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_next_ndtr(&mut self, ndtr: usize) {
|
||||
self.next_ndtr.borrow_mut().replace(ndtr);
|
||||
}
|
||||
}
|
||||
|
||||
impl DmaCtrl for &mut TestCtrl {
|
||||
fn ndtr(&self) -> usize {
|
||||
self.next_ndtr.borrow_mut().unwrap()
|
||||
}
|
||||
|
||||
fn get_complete_count(&self) -> usize {
|
||||
self.complete_count
|
||||
}
|
||||
|
||||
fn reset_complete_count(&mut self) -> usize {
|
||||
let old = self.complete_count;
|
||||
self.complete_count = 0;
|
||||
old
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty() {
|
||||
let mut dma_buf = [0u8; 16];
|
||||
let ringbuf = DmaRingBuffer::new(&mut dma_buf);
|
||||
|
||||
assert!(ringbuf.is_empty());
|
||||
assert_eq!(0, ringbuf.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_read() {
|
||||
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
||||
let mut ctrl = TestCtrl::new();
|
||||
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
|
||||
ringbuf.ndtr = 6;
|
||||
|
||||
assert!(!ringbuf.is_empty());
|
||||
assert_eq!(10, ringbuf.len());
|
||||
|
||||
let mut buf = [0; 2];
|
||||
assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap());
|
||||
assert_eq!([0, 1], buf);
|
||||
assert_eq!(8, ringbuf.len());
|
||||
|
||||
let mut buf = [0; 2];
|
||||
assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap());
|
||||
assert_eq!([2, 3], buf);
|
||||
assert_eq!(6, ringbuf.len());
|
||||
|
||||
let mut buf = [0; 8];
|
||||
assert_eq!(6, ringbuf.read(&mut ctrl, &mut buf).unwrap());
|
||||
assert_eq!([4, 5, 6, 7, 8, 9], buf[..6]);
|
||||
assert_eq!(0, ringbuf.len());
|
||||
|
||||
let mut buf = [0; 2];
|
||||
assert_eq!(0, ringbuf.read(&mut ctrl, &mut buf).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_read_with_wrap() {
|
||||
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
||||
let mut ctrl = TestCtrl::new();
|
||||
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
|
||||
ringbuf.first = 12;
|
||||
ringbuf.ndtr = 10;
|
||||
|
||||
// The dma controller has written 4 + 6 bytes and has reloaded NDTR
|
||||
ctrl.complete_count = 1;
|
||||
ctrl.set_next_ndtr(10);
|
||||
|
||||
assert!(!ringbuf.is_empty());
|
||||
assert_eq!(6 + 4, ringbuf.len());
|
||||
|
||||
let mut buf = [0; 2];
|
||||
assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap());
|
||||
assert_eq!([12, 13], buf);
|
||||
assert_eq!(6 + 2, ringbuf.len());
|
||||
|
||||
let mut buf = [0; 4];
|
||||
assert_eq!(4, ringbuf.read(&mut ctrl, &mut buf).unwrap());
|
||||
assert_eq!([14, 15, 0, 1], buf);
|
||||
assert_eq!(4, ringbuf.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_read_when_dma_writer_is_wrapped_and_read_does_not_wrap() {
|
||||
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
||||
let mut ctrl = TestCtrl::new();
|
||||
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
|
||||
ringbuf.first = 2;
|
||||
ringbuf.ndtr = 6;
|
||||
|
||||
// The dma controller has written 6 + 2 bytes and has reloaded NDTR
|
||||
ctrl.complete_count = 1;
|
||||
ctrl.set_next_ndtr(14);
|
||||
|
||||
let mut buf = [0; 2];
|
||||
assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap());
|
||||
assert_eq!([2, 3], buf);
|
||||
|
||||
assert_eq!(1, ctrl.complete_count); // The interrupt flag IS NOT cleared
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_read_when_dma_writer_is_wrapped_and_read_wraps() {
|
||||
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
||||
let mut ctrl = TestCtrl::new();
|
||||
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
|
||||
ringbuf.first = 12;
|
||||
ringbuf.ndtr = 10;
|
||||
|
||||
// The dma controller has written 6 + 2 bytes and has reloaded NDTR
|
||||
ctrl.complete_count = 1;
|
||||
ctrl.set_next_ndtr(14);
|
||||
|
||||
let mut buf = [0; 10];
|
||||
assert_eq!(10, ringbuf.read(&mut ctrl, &mut buf).unwrap());
|
||||
assert_eq!([12, 13, 14, 15, 0, 1, 2, 3, 4, 5], buf);
|
||||
|
||||
assert_eq!(0, ctrl.complete_count); // The interrupt flag IS cleared
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cannot_read_when_dma_writer_wraps_with_same_ndtr() {
|
||||
let mut dma_buf = [0u8; 16];
|
||||
let mut ctrl = TestCtrl::new();
|
||||
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
|
||||
ringbuf.first = 6;
|
||||
ringbuf.ndtr = 10;
|
||||
ctrl.set_next_ndtr(9);
|
||||
|
||||
assert!(ringbuf.is_empty()); // The ring buffer thinks that it is empty
|
||||
|
||||
// The dma controller has written exactly 16 bytes
|
||||
ctrl.complete_count = 1;
|
||||
|
||||
let mut buf = [0; 2];
|
||||
assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf));
|
||||
|
||||
assert_eq!(1, ctrl.complete_count); // The complete counter is not reset
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cannot_read_when_dma_writer_overwrites_during_not_wrapping_read() {
|
||||
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
||||
let mut ctrl = TestCtrl::new();
|
||||
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
|
||||
ringbuf.first = 2;
|
||||
ringbuf.ndtr = 6;
|
||||
|
||||
// The dma controller has written 6 + 3 bytes and has reloaded NDTR
|
||||
ctrl.complete_count = 1;
|
||||
ctrl.set_next_ndtr(13);
|
||||
|
||||
let mut buf = [0; 2];
|
||||
assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf));
|
||||
|
||||
assert_eq!(1, ctrl.complete_count); // The complete counter is not reset
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cannot_read_when_dma_writer_overwrites_during_wrapping_read() {
|
||||
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
||||
let mut ctrl = TestCtrl::new();
|
||||
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
|
||||
ringbuf.first = 12;
|
||||
ringbuf.ndtr = 10;
|
||||
|
||||
// The dma controller has written 6 + 13 bytes and has reloaded NDTR
|
||||
ctrl.complete_count = 1;
|
||||
ctrl.set_next_ndtr(3);
|
||||
|
||||
let mut buf = [0; 2];
|
||||
assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf));
|
||||
|
||||
assert_eq!(1, ctrl.complete_count); // The complete counter is not reset
|
||||
}
|
||||
}
|
|
@ -283,8 +283,8 @@ impl<'d, T: BasicInstance, RxDma> UartRx<'d, T, RxDma> {
|
|||
|
||||
let (sr, cr1, cr3) = unsafe { (sr(r).read(), r.cr1().read(), r.cr3().read()) };
|
||||
|
||||
let mut wake = false;
|
||||
let has_errors = (sr.pe() && cr1.peie()) || ((sr.fe() || sr.ne() || sr.ore()) && cr3.eie());
|
||||
|
||||
if has_errors {
|
||||
// clear all interrupts and DMA Rx Request
|
||||
unsafe {
|
||||
|
@ -304,22 +304,30 @@ impl<'d, T: BasicInstance, RxDma> UartRx<'d, T, RxDma> {
|
|||
});
|
||||
}
|
||||
|
||||
compiler_fence(Ordering::SeqCst);
|
||||
|
||||
s.rx_waker.wake();
|
||||
} else if cr1.idleie() && sr.idle() {
|
||||
wake = true;
|
||||
} else {
|
||||
if cr1.idleie() && sr.idle() {
|
||||
// IDLE detected: no more data will come
|
||||
unsafe {
|
||||
r.cr1().modify(|w| {
|
||||
// disable idle line detection
|
||||
w.set_idleie(false);
|
||||
});
|
||||
|
||||
r.cr3().modify(|w| {
|
||||
// disable DMA Rx Request
|
||||
w.set_dmar(false);
|
||||
});
|
||||
}
|
||||
|
||||
wake = true;
|
||||
}
|
||||
|
||||
if cr1.rxneie() {
|
||||
// We cannot check the RXNE flag as it is auto-cleared by the DMA controller
|
||||
|
||||
// It is up to the listener to determine if this in fact was a RX event and disable the RXNE detection
|
||||
|
||||
wake = true;
|
||||
}
|
||||
}
|
||||
|
||||
if wake {
|
||||
compiler_fence(Ordering::SeqCst);
|
||||
|
||||
s.rx_waker.wake();
|
||||
|
@ -973,6 +981,11 @@ pub use buffered::*;
|
|||
#[cfg(feature = "nightly")]
|
||||
mod buffered;
|
||||
|
||||
#[cfg(not(gpdma))]
|
||||
mod rx_ringbuffered;
|
||||
#[cfg(not(gpdma))]
|
||||
pub use rx_ringbuffered::RingBufferedUartRx;
|
||||
|
||||
#[cfg(usart_v1)]
|
||||
fn tdr(r: crate::pac::usart::Usart) -> *mut u8 {
|
||||
r.dr().ptr() as _
|
||||
|
|
286
embassy-stm32/src/usart/rx_ringbuffered.rs
Normal file
286
embassy-stm32/src/usart/rx_ringbuffered.rs
Normal file
|
@ -0,0 +1,286 @@
|
|||
use core::future::poll_fn;
|
||||
use core::sync::atomic::{compiler_fence, Ordering};
|
||||
use core::task::Poll;
|
||||
|
||||
use embassy_hal_common::drop::OnDrop;
|
||||
use embassy_hal_common::PeripheralRef;
|
||||
use futures::future::{select, Either};
|
||||
|
||||
use super::{clear_interrupt_flags, rdr, sr, BasicInstance, Error, UartRx};
|
||||
use crate::dma::ringbuffer::OverrunError;
|
||||
use crate::dma::RingBuffer;
|
||||
|
||||
pub struct RingBufferedUartRx<'d, T: BasicInstance, RxDma: super::RxDma<T>> {
|
||||
_peri: PeripheralRef<'d, T>,
|
||||
ring_buf: RingBuffer<'d, RxDma, u8>,
|
||||
}
|
||||
|
||||
impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> UartRx<'d, T, RxDma> {
|
||||
/// Turn the `UartRx` into a buffered uart which can continously receive in the background
|
||||
/// without the possibility of loosing bytes. The `dma_buf` is a buffer registered to the
|
||||
/// DMA controller, and must be sufficiently large, such that it will not overflow.
|
||||
pub fn into_ring_buffered(self, dma_buf: &'d mut [u8]) -> RingBufferedUartRx<'d, T, RxDma> {
|
||||
assert!(dma_buf.len() > 0 && dma_buf.len() <= 0xFFFF);
|
||||
|
||||
let request = self.rx_dma.request();
|
||||
let opts = Default::default();
|
||||
let ring_buf = unsafe { RingBuffer::new_read(self.rx_dma, request, rdr(T::regs()), dma_buf, opts) };
|
||||
RingBufferedUartRx {
|
||||
_peri: self._peri,
|
||||
ring_buf,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxDma> {
|
||||
pub fn start(&mut self) -> Result<(), Error> {
|
||||
// Clear the ring buffer so that it is ready to receive data
|
||||
self.ring_buf.clear();
|
||||
|
||||
self.setup_uart();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start uart background receive
|
||||
fn setup_uart(&mut self) {
|
||||
// fence before starting DMA.
|
||||
compiler_fence(Ordering::SeqCst);
|
||||
|
||||
self.ring_buf.start();
|
||||
|
||||
let r = T::regs();
|
||||
// clear all interrupts and DMA Rx Request
|
||||
// SAFETY: only clears Rx related flags
|
||||
unsafe {
|
||||
r.cr1().modify(|w| {
|
||||
// disable RXNE interrupt
|
||||
w.set_rxneie(false);
|
||||
// enable parity interrupt if not ParityNone
|
||||
w.set_peie(w.pce());
|
||||
// disable idle line interrupt
|
||||
w.set_idleie(false);
|
||||
});
|
||||
r.cr3().modify(|w| {
|
||||
// enable Error Interrupt: (Frame error, Noise error, Overrun error)
|
||||
w.set_eie(true);
|
||||
// enable DMA Rx Request
|
||||
w.set_dmar(true);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop uart background receive
|
||||
fn teardown_uart(&mut self) {
|
||||
let r = T::regs();
|
||||
// clear all interrupts and DMA Rx Request
|
||||
// SAFETY: only clears Rx related flags
|
||||
unsafe {
|
||||
r.cr1().modify(|w| {
|
||||
// disable RXNE interrupt
|
||||
w.set_rxneie(false);
|
||||
// disable parity interrupt
|
||||
w.set_peie(false);
|
||||
// disable idle line interrupt
|
||||
w.set_idleie(false);
|
||||
});
|
||||
r.cr3().modify(|w| {
|
||||
// disable Error Interrupt: (Frame error, Noise error, Overrun error)
|
||||
w.set_eie(false);
|
||||
// disable DMA Rx Request
|
||||
w.set_dmar(false);
|
||||
});
|
||||
}
|
||||
|
||||
compiler_fence(Ordering::SeqCst);
|
||||
|
||||
self.ring_buf.request_stop();
|
||||
while self.ring_buf.is_running() {}
|
||||
}
|
||||
|
||||
/// Read bytes that are readily available in the ring buffer.
|
||||
/// If no bytes are currently available in the buffer the call waits until the some
|
||||
/// bytes are available (at least one byte and at most half the buffer size)
|
||||
///
|
||||
/// Background receive is started if `start()` has not been previously called.
|
||||
///
|
||||
/// Receive in the background is terminated if an error is returned.
|
||||
/// It must then manually be started again by calling `start()` or by re-calling `read()`.
|
||||
pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> {
|
||||
let r = T::regs();
|
||||
|
||||
// Start background receive if it was not already started
|
||||
// SAFETY: read only
|
||||
let is_started = unsafe { r.cr3().read().dmar() };
|
||||
if !is_started {
|
||||
self.start()?;
|
||||
}
|
||||
|
||||
// SAFETY: read only and we only use Rx related flags
|
||||
let s = unsafe { sr(r).read() };
|
||||
let has_errors = s.pe() || s.fe() || s.ne() || s.ore();
|
||||
if has_errors {
|
||||
self.teardown_uart();
|
||||
|
||||
if s.pe() {
|
||||
return Err(Error::Parity);
|
||||
} else if s.fe() {
|
||||
return Err(Error::Framing);
|
||||
} else if s.ne() {
|
||||
return Err(Error::Noise);
|
||||
} else {
|
||||
return Err(Error::Overrun);
|
||||
}
|
||||
}
|
||||
|
||||
self.ring_buf.reload_position();
|
||||
match self.ring_buf.read(buf) {
|
||||
Ok(len) if len == 0 => {}
|
||||
Ok(len) => {
|
||||
assert!(len > 0);
|
||||
return Ok(len);
|
||||
}
|
||||
Err(OverrunError) => {
|
||||
// Stop any transfer from now on
|
||||
// The user must re-start to receive any more data
|
||||
self.teardown_uart();
|
||||
return Err(Error::Overrun);
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
self.wait_for_data_or_idle().await?;
|
||||
|
||||
self.ring_buf.reload_position();
|
||||
if !self.ring_buf.is_empty() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let len = self.ring_buf.read(buf).map_err(|_err| Error::Overrun)?;
|
||||
assert!(len > 0);
|
||||
|
||||
Ok(len)
|
||||
}
|
||||
|
||||
/// Wait for uart idle or dma half-full or full
|
||||
async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> {
|
||||
let r = T::regs();
|
||||
|
||||
// make sure USART state is restored to neutral state
|
||||
let _on_drop = OnDrop::new(move || {
|
||||
// SAFETY: only clears Rx related flags
|
||||
unsafe {
|
||||
r.cr1().modify(|w| {
|
||||
// disable idle line interrupt
|
||||
w.set_idleie(false);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// SAFETY: only sets Rx related flags
|
||||
unsafe {
|
||||
r.cr1().modify(|w| {
|
||||
// enable idle line interrupt
|
||||
w.set_idleie(true);
|
||||
});
|
||||
}
|
||||
|
||||
compiler_fence(Ordering::SeqCst);
|
||||
|
||||
// Future which completes when there is dma is half full or full
|
||||
let dma = poll_fn(|cx| {
|
||||
self.ring_buf.set_waker(cx.waker());
|
||||
|
||||
compiler_fence(Ordering::SeqCst);
|
||||
|
||||
self.ring_buf.reload_position();
|
||||
if !self.ring_buf.is_empty() {
|
||||
// Some data is now available
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
});
|
||||
|
||||
// Future which completes when idle line is detected
|
||||
let uart = poll_fn(|cx| {
|
||||
let s = T::state();
|
||||
s.rx_waker.register(cx.waker());
|
||||
|
||||
compiler_fence(Ordering::SeqCst);
|
||||
|
||||
// SAFETY: read only and we only use Rx related flags
|
||||
let sr = unsafe { sr(r).read() };
|
||||
|
||||
// SAFETY: only clears Rx related flags
|
||||
unsafe {
|
||||
// This read also clears the error and idle interrupt flags on v1.
|
||||
rdr(r).read_volatile();
|
||||
clear_interrupt_flags(r, sr);
|
||||
}
|
||||
|
||||
let has_errors = sr.pe() || sr.fe() || sr.ne() || sr.ore();
|
||||
if has_errors {
|
||||
if sr.pe() {
|
||||
return Poll::Ready(Err(Error::Parity));
|
||||
} else if sr.fe() {
|
||||
return Poll::Ready(Err(Error::Framing));
|
||||
} else if sr.ne() {
|
||||
return Poll::Ready(Err(Error::Noise));
|
||||
} else {
|
||||
return Poll::Ready(Err(Error::Overrun));
|
||||
}
|
||||
}
|
||||
|
||||
if sr.idle() {
|
||||
// Idle line is detected
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
});
|
||||
|
||||
match select(dma, uart).await {
|
||||
Either::Left(((), _)) => Ok(()),
|
||||
Either::Right((Ok(()), _)) => Ok(()),
|
||||
Either::Right((Err(e), _)) => {
|
||||
self.teardown_uart();
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BasicInstance, RxDma: super::RxDma<T>> Drop for RingBufferedUartRx<'_, T, RxDma> {
|
||||
fn drop(&mut self) {
|
||||
self.teardown_uart();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable-traits", feature = "nightly"))]
|
||||
mod eio {
|
||||
use embedded_io::asynch::Read;
|
||||
use embedded_io::Io;
|
||||
|
||||
use super::RingBufferedUartRx;
|
||||
use crate::usart::{BasicInstance, Error, RxDma};
|
||||
|
||||
impl<T, Rx> Io for RingBufferedUartRx<'_, T, Rx>
|
||||
where
|
||||
T: BasicInstance,
|
||||
Rx: RxDma<T>,
|
||||
{
|
||||
type Error = Error;
|
||||
}
|
||||
|
||||
impl<T, Rx> Read for RingBufferedUartRx<'_, T, Rx>
|
||||
where
|
||||
T: BasicInstance,
|
||||
Rx: RxDma<T>,
|
||||
{
|
||||
async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
|
||||
self.read(buf).await
|
||||
}
|
||||
}
|
||||
}
|
|
@ -5,24 +5,26 @@ version = "0.1.0"
|
|||
license = "MIT OR Apache-2.0"
|
||||
|
||||
[features]
|
||||
stm32f103c8 = ["embassy-stm32/stm32f103c8"] # Blue Pill
|
||||
stm32f429zi = ["embassy-stm32/stm32f429zi", "sdmmc", "chrono"] # Nucleo
|
||||
stm32g071rb = ["embassy-stm32/stm32g071rb"] # Nucleo
|
||||
stm32c031c6 = ["embassy-stm32/stm32c031c6"] # Nucleo
|
||||
stm32g491re = ["embassy-stm32/stm32g491re"] # Nucleo
|
||||
stm32h755zi = ["embassy-stm32/stm32h755zi-cm7"] # Nucleo
|
||||
stm32wb55rg = ["embassy-stm32/stm32wb55rg"] # Nucleo
|
||||
stm32f103c8 = ["embassy-stm32/stm32f103c8", "not-gpdma"] # Blue Pill
|
||||
stm32f429zi = ["embassy-stm32/stm32f429zi", "sdmmc", "chrono", "not-gpdma"] # Nucleo
|
||||
stm32g071rb = ["embassy-stm32/stm32g071rb", "not-gpdma"] # Nucleo
|
||||
stm32c031c6 = ["embassy-stm32/stm32c031c6", "not-gpdma"] # Nucleo
|
||||
stm32g491re = ["embassy-stm32/stm32g491re", "not-gpdma"] # Nucleo
|
||||
stm32h755zi = ["embassy-stm32/stm32h755zi-cm7", "not-gpdma"] # Nucleo
|
||||
stm32wb55rg = ["embassy-stm32/stm32wb55rg", "not-gpdma"] # Nucleo
|
||||
stm32h563zi = ["embassy-stm32/stm32h563zi"] # Nucleo
|
||||
stm32u585ai = ["embassy-stm32/stm32u585ai"] # IoT board
|
||||
|
||||
sdmmc = []
|
||||
chrono = ["embassy-stm32/chrono", "dep:chrono"]
|
||||
not-gpdma = []
|
||||
|
||||
[dependencies]
|
||||
embassy-sync = { version = "0.2.0", path = "../../embassy-sync", features = ["defmt"] }
|
||||
embassy-executor = { version = "0.2.0", path = "../../embassy-executor", features = ["arch-cortex-m", "executor-thread", "defmt", "integrated-timers"] }
|
||||
embassy-time = { version = "0.1.0", path = "../../embassy-time", features = ["defmt", "tick-hz-32_768"] }
|
||||
embassy-time = { version = "0.1.0", path = "../../embassy-time", features = ["defmt", "tick-hz-32_768", "defmt-timestamp-uptime"] }
|
||||
embassy-stm32 = { version = "0.1.0", path = "../../embassy-stm32", features = ["nightly", "defmt", "unstable-pac", "memory-x", "time-driver-any"] }
|
||||
embassy-futures = { version = "0.1.0", path = "../../embassy-futures" }
|
||||
|
||||
defmt = "0.3.0"
|
||||
defmt-rtt = "0.4"
|
||||
|
@ -33,6 +35,8 @@ embedded-hal = "0.2.6"
|
|||
embedded-hal-1 = { package = "embedded-hal", version = "=1.0.0-alpha.10" }
|
||||
embedded-hal-async = { version = "=0.2.0-alpha.1" }
|
||||
panic-probe = { version = "0.3.0", features = ["print-defmt"] }
|
||||
rand_core = { version = "0.6", default-features = false }
|
||||
rand_chacha = { version = "0.3", default-features = false }
|
||||
|
||||
chrono = { version = "^0.4", default-features = false, optional = true}
|
||||
|
||||
|
@ -78,6 +82,11 @@ name = "usart_dma"
|
|||
path = "src/bin/usart_dma.rs"
|
||||
required-features = []
|
||||
|
||||
[[bin]]
|
||||
name = "usart_rx_ringbuffered"
|
||||
path = "src/bin/usart_rx_ringbuffered.rs"
|
||||
required-features = [ "not-gpdma",]
|
||||
|
||||
# END TESTS
|
||||
|
||||
[profile.dev]
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
mod example_common;
|
||||
use defmt::assert_eq;
|
||||
use embassy_executor::Spawner;
|
||||
use embassy_futures::join::join;
|
||||
use embassy_stm32::interrupt;
|
||||
use embassy_stm32::usart::{Config, Uart};
|
||||
use example_common::*;
|
||||
|
@ -76,18 +77,26 @@ async fn main(_spawner: Spawner) {
|
|||
(p.PB6, p.PB7, p.USART1, interrupt::take!(USART1), p.DMA1_CH1, p.DMA1_CH2);
|
||||
|
||||
let config = Config::default();
|
||||
let mut usart = Uart::new(usart, rx, tx, irq, tx_dma, rx_dma, config);
|
||||
let usart = Uart::new(usart, rx, tx, irq, tx_dma, rx_dma, config);
|
||||
|
||||
// We can't send too many bytes, they have to fit in the FIFO.
|
||||
// This is because we aren't sending+receiving at the same time.
|
||||
// For whatever reason, blocking works with 2 bytes but DMA only with 1??
|
||||
const LEN: usize = 128;
|
||||
let mut tx_buf = [0; LEN];
|
||||
let mut rx_buf = [0; LEN];
|
||||
for i in 0..LEN {
|
||||
tx_buf[i] = i as u8;
|
||||
}
|
||||
|
||||
let data = [0x42];
|
||||
usart.write(&data).await.unwrap();
|
||||
let (mut tx, mut rx) = usart.split();
|
||||
|
||||
let mut buf = [0; 1];
|
||||
usart.read(&mut buf).await.unwrap();
|
||||
assert_eq!(buf, data);
|
||||
let tx_fut = async {
|
||||
tx.write(&tx_buf).await.unwrap();
|
||||
};
|
||||
let rx_fut = async {
|
||||
rx.read(&mut rx_buf).await.unwrap();
|
||||
};
|
||||
join(rx_fut, tx_fut).await;
|
||||
|
||||
assert_eq!(tx_buf, rx_buf);
|
||||
|
||||
info!("Test OK");
|
||||
cortex_m::asm::bkpt();
|
||||
|
|
200
tests/stm32/src/bin/usart_rx_ringbuffered.rs
Normal file
200
tests/stm32/src/bin/usart_rx_ringbuffered.rs
Normal file
|
@ -0,0 +1,200 @@
|
|||
// required-features: not-gpdma
|
||||
|
||||
#![no_std]
|
||||
#![no_main]
|
||||
#![feature(type_alias_impl_trait)]
|
||||
|
||||
#[path = "../example_common.rs"]
|
||||
mod example_common;
|
||||
use defmt::{assert_eq, panic};
|
||||
use embassy_executor::Spawner;
|
||||
use embassy_stm32::interrupt;
|
||||
use embassy_stm32::usart::{Config, DataBits, Parity, RingBufferedUartRx, StopBits, Uart, UartTx};
|
||||
use embassy_time::{Duration, Timer};
|
||||
use example_common::*;
|
||||
use rand_chacha::ChaCha8Rng;
|
||||
use rand_core::{RngCore, SeedableRng};
|
||||
|
||||
#[cfg(feature = "stm32f103c8")]
|
||||
mod board {
|
||||
pub type Uart = embassy_stm32::peripherals::USART1;
|
||||
pub type TxDma = embassy_stm32::peripherals::DMA1_CH4;
|
||||
pub type RxDma = embassy_stm32::peripherals::DMA1_CH5;
|
||||
}
|
||||
#[cfg(feature = "stm32g491re")]
|
||||
mod board {
|
||||
pub type Uart = embassy_stm32::peripherals::USART1;
|
||||
pub type TxDma = embassy_stm32::peripherals::DMA1_CH1;
|
||||
pub type RxDma = embassy_stm32::peripherals::DMA1_CH2;
|
||||
}
|
||||
#[cfg(feature = "stm32g071rb")]
|
||||
mod board {
|
||||
pub type Uart = embassy_stm32::peripherals::USART1;
|
||||
pub type TxDma = embassy_stm32::peripherals::DMA1_CH1;
|
||||
pub type RxDma = embassy_stm32::peripherals::DMA1_CH2;
|
||||
}
|
||||
#[cfg(feature = "stm32f429zi")]
|
||||
mod board {
|
||||
pub type Uart = embassy_stm32::peripherals::USART6;
|
||||
pub type TxDma = embassy_stm32::peripherals::DMA2_CH6;
|
||||
pub type RxDma = embassy_stm32::peripherals::DMA2_CH1;
|
||||
}
|
||||
#[cfg(feature = "stm32wb55rg")]
|
||||
mod board {
|
||||
pub type Uart = embassy_stm32::peripherals::LPUART1;
|
||||
pub type TxDma = embassy_stm32::peripherals::DMA1_CH1;
|
||||
pub type RxDma = embassy_stm32::peripherals::DMA1_CH2;
|
||||
}
|
||||
#[cfg(feature = "stm32h755zi")]
|
||||
mod board {
|
||||
pub type Uart = embassy_stm32::peripherals::USART1;
|
||||
pub type TxDma = embassy_stm32::peripherals::DMA1_CH0;
|
||||
pub type RxDma = embassy_stm32::peripherals::DMA1_CH1;
|
||||
}
|
||||
#[cfg(feature = "stm32u585ai")]
|
||||
mod board {
|
||||
pub type Uart = embassy_stm32::peripherals::USART3;
|
||||
pub type TxDma = embassy_stm32::peripherals::GPDMA1_CH0;
|
||||
pub type RxDma = embassy_stm32::peripherals::GPDMA1_CH1;
|
||||
}
|
||||
#[cfg(feature = "stm32c031c6")]
|
||||
mod board {
|
||||
pub type Uart = embassy_stm32::peripherals::USART1;
|
||||
pub type TxDma = embassy_stm32::peripherals::DMA1_CH1;
|
||||
pub type RxDma = embassy_stm32::peripherals::DMA1_CH2;
|
||||
}
|
||||
|
||||
const DMA_BUF_SIZE: usize = 256;
|
||||
|
||||
#[embassy_executor::main]
|
||||
async fn main(spawner: Spawner) {
|
||||
let p = embassy_stm32::init(config());
|
||||
info!("Hello World!");
|
||||
|
||||
// Arduino pins D0 and D1
|
||||
// They're connected together with a 1K resistor.
|
||||
#[cfg(feature = "stm32f103c8")]
|
||||
let (tx, rx, usart, irq, tx_dma, rx_dma) = (
|
||||
p.PA9,
|
||||
p.PA10,
|
||||
p.USART1,
|
||||
interrupt::take!(USART1),
|
||||
p.DMA1_CH4,
|
||||
p.DMA1_CH5,
|
||||
);
|
||||
#[cfg(feature = "stm32g491re")]
|
||||
let (tx, rx, usart, irq, tx_dma, rx_dma) =
|
||||
(p.PC4, p.PC5, p.USART1, interrupt::take!(USART1), p.DMA1_CH1, p.DMA1_CH2);
|
||||
#[cfg(feature = "stm32g071rb")]
|
||||
let (tx, rx, usart, irq, tx_dma, rx_dma) =
|
||||
(p.PC4, p.PC5, p.USART1, interrupt::take!(USART1), p.DMA1_CH1, p.DMA1_CH2);
|
||||
#[cfg(feature = "stm32f429zi")]
|
||||
let (tx, rx, usart, irq, tx_dma, rx_dma) = (
|
||||
p.PG14,
|
||||
p.PG9,
|
||||
p.USART6,
|
||||
interrupt::take!(USART6),
|
||||
p.DMA2_CH6,
|
||||
p.DMA2_CH1,
|
||||
);
|
||||
#[cfg(feature = "stm32wb55rg")]
|
||||
let (tx, rx, usart, irq, tx_dma, rx_dma) = (
|
||||
p.PA2,
|
||||
p.PA3,
|
||||
p.LPUART1,
|
||||
interrupt::take!(LPUART1),
|
||||
p.DMA1_CH1,
|
||||
p.DMA1_CH2,
|
||||
);
|
||||
#[cfg(feature = "stm32h755zi")]
|
||||
let (tx, rx, usart, irq, tx_dma, rx_dma) =
|
||||
(p.PB6, p.PB7, p.USART1, interrupt::take!(USART1), p.DMA1_CH0, p.DMA1_CH1);
|
||||
#[cfg(feature = "stm32u585ai")]
|
||||
let (tx, rx, usart, irq, tx_dma, rx_dma) = (
|
||||
p.PD8,
|
||||
p.PD9,
|
||||
p.USART3,
|
||||
interrupt::take!(USART3),
|
||||
p.GPDMA1_CH0,
|
||||
p.GPDMA1_CH1,
|
||||
);
|
||||
#[cfg(feature = "stm32c031c6")]
|
||||
let (tx, rx, usart, irq, tx_dma, rx_dma) =
|
||||
(p.PB6, p.PB7, p.USART1, interrupt::take!(USART1), p.DMA1_CH1, p.DMA1_CH2);
|
||||
|
||||
// To run this test, use the saturating_serial test utility to saturate the serial port
|
||||
|
||||
let mut config = Config::default();
|
||||
// this is the fastest we can go without tuning RCC
|
||||
// some chips have default pclk=8mhz, and uart can run at max pclk/16
|
||||
config.baudrate = 500_000;
|
||||
config.data_bits = DataBits::DataBits8;
|
||||
config.stop_bits = StopBits::STOP1;
|
||||
config.parity = Parity::ParityNone;
|
||||
|
||||
let usart = Uart::new(usart, rx, tx, irq, tx_dma, rx_dma, config);
|
||||
let (tx, rx) = usart.split();
|
||||
static mut DMA_BUF: [u8; DMA_BUF_SIZE] = [0; DMA_BUF_SIZE];
|
||||
let dma_buf = unsafe { DMA_BUF.as_mut() };
|
||||
let rx = rx.into_ring_buffered(dma_buf);
|
||||
|
||||
info!("Spawning tasks");
|
||||
spawner.spawn(transmit_task(tx)).unwrap();
|
||||
spawner.spawn(receive_task(rx)).unwrap();
|
||||
}
|
||||
|
||||
#[embassy_executor::task]
|
||||
async fn transmit_task(mut tx: UartTx<'static, board::Uart, board::TxDma>) {
|
||||
let mut rng = ChaCha8Rng::seed_from_u64(1337);
|
||||
|
||||
info!("Starting random transmissions into void...");
|
||||
|
||||
let mut i: u8 = 0;
|
||||
loop {
|
||||
let mut buf = [0; 32];
|
||||
let len = 1 + (rng.next_u32() as usize % buf.len());
|
||||
for b in &mut buf[..len] {
|
||||
*b = i;
|
||||
i = i.wrapping_add(1);
|
||||
}
|
||||
|
||||
tx.write(&buf[..len]).await.unwrap();
|
||||
Timer::after(Duration::from_micros((rng.next_u32() % 1000) as _)).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[embassy_executor::task]
|
||||
async fn receive_task(mut rx: RingBufferedUartRx<'static, board::Uart, board::RxDma>) {
|
||||
info!("Ready to receive...");
|
||||
|
||||
let mut rng = ChaCha8Rng::seed_from_u64(1337);
|
||||
|
||||
let mut i = 0;
|
||||
let mut expected = 0;
|
||||
loop {
|
||||
let mut buf = [0; 100];
|
||||
let max_len = 1 + (rng.next_u32() as usize % buf.len());
|
||||
let received = match rx.read(&mut buf[..max_len]).await {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
panic!("Test fail! read error: {:?}", e);
|
||||
}
|
||||
};
|
||||
|
||||
for byte in &buf[..received] {
|
||||
assert_eq!(*byte, expected);
|
||||
expected = expected.wrapping_add(1);
|
||||
}
|
||||
|
||||
if received < max_len {
|
||||
Timer::after(Duration::from_micros((rng.next_u32() % 1000) as _)).await;
|
||||
}
|
||||
|
||||
i += received;
|
||||
|
||||
if i > 100000 {
|
||||
info!("Test OK!");
|
||||
cortex_m::asm::bkpt();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,22 +1,11 @@
|
|||
#![macro_use]
|
||||
|
||||
use core::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
pub use defmt::*;
|
||||
#[allow(unused)]
|
||||
use embassy_stm32::time::Hertz;
|
||||
use embassy_stm32::Config;
|
||||
use {defmt_rtt as _, panic_probe as _};
|
||||
|
||||
defmt::timestamp! {"{=u64}", {
|
||||
static COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
// NOTE(no-CAS) `timestamps` runs with interrupts disabled
|
||||
let n = COUNT.load(Ordering::Relaxed);
|
||||
COUNT.store(n + 1, Ordering::Relaxed);
|
||||
n as u64
|
||||
}
|
||||
}
|
||||
|
||||
pub fn config() -> Config {
|
||||
#[allow(unused_mut)]
|
||||
let mut config = Config::default();
|
||||
|
|
10
tests/utils/Cargo.toml
Normal file
10
tests/utils/Cargo.toml
Normal file
|
@ -0,0 +1,10 @@
|
|||
[package]
|
||||
name = "test-utils"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
rand = "0.8"
|
||||
serial = "0.4"
|
53
tests/utils/src/bin/saturate_serial.rs
Normal file
53
tests/utils/src/bin/saturate_serial.rs
Normal file
|
@ -0,0 +1,53 @@
|
|||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
use std::{env, io, process, thread};
|
||||
|
||||
use rand::random;
|
||||
use serial::SerialPort;
|
||||
|
||||
pub fn main() {
|
||||
if let Some(port_name) = env::args().nth(1) {
|
||||
let idles = env::args().position(|x| x == "--idles").is_some();
|
||||
|
||||
println!("Saturating port {:?} with 115200 8N1", port_name);
|
||||
println!("Idles: {}", idles);
|
||||
println!("Process ID: {}", process::id());
|
||||
let mut port = serial::open(&port_name).unwrap();
|
||||
if saturate(&mut port, idles).is_err() {
|
||||
eprintln!("Unable to saturate port");
|
||||
}
|
||||
} else {
|
||||
let path = env::args().next().unwrap();
|
||||
let basepath = Path::new(&path).with_extension("");
|
||||
let basename = basepath.file_name().unwrap();
|
||||
eprintln!("USAGE: {} <port-name>", basename.to_string_lossy());
|
||||
}
|
||||
}
|
||||
|
||||
fn saturate<T: SerialPort>(port: &mut T, idles: bool) -> io::Result<()> {
|
||||
port.reconfigure(&|settings| {
|
||||
settings.set_baud_rate(serial::Baud115200)?;
|
||||
settings.set_char_size(serial::Bits8);
|
||||
settings.set_parity(serial::ParityNone);
|
||||
settings.set_stop_bits(serial::Stop1);
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
let mut written = 0;
|
||||
loop {
|
||||
let len = random::<usize>() % 0x1000;
|
||||
let buf: Vec<u8> = (written..written + len).map(|x| x as u8).collect();
|
||||
|
||||
port.write_all(&buf)?;
|
||||
|
||||
if idles {
|
||||
let micros = (random::<usize>() % 1000) as u64;
|
||||
println!("Sleeping {}us", micros);
|
||||
port.flush().unwrap();
|
||||
thread::sleep(Duration::from_micros(micros));
|
||||
}
|
||||
|
||||
written += len;
|
||||
println!("Written: {}", written);
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue