embassy-stm32/eth: consolidate v1a/v1c and add v1b

The only differences between v1a and v1c were clocks and GPIO, v1b will
likely work out of the box (or simply need minor tweaks)
This commit is contained in:
David Lenfesty 2022-04-26 10:25:04 -06:00 committed by Dario Nieuwenhuis
parent 2e7b42fc5b
commit 5e6c4ae024
10 changed files with 43 additions and 934 deletions

View file

@ -1,9 +1,7 @@
#![macro_use]
#[cfg_attr(eth_v1a, path = "v1a/mod.rs")]
#[cfg_attr(eth_v1c, path = "v1c/mod.rs")]
#[cfg_attr(any(eth_v1a, eth_v1b, eth_v1c), path = "v1/mod.rs")]
#[cfg_attr(eth_v2, path = "v2/mod.rs")]
#[cfg_attr(eth_v1, path = "v1.rs")]
mod _version;
pub mod generic_smi;

View file

@ -1 +0,0 @@

View file

@ -12,7 +12,11 @@ use embassy_net::{Device, DeviceCapabilities, LinkState, PacketBuf, MTU};
use crate::gpio::sealed::Pin as __GpioPin;
use crate::gpio::{sealed::AFType, AnyPin, Speed};
use crate::pac::{AFIO, ETH, RCC};
#[cfg(eth_v1a)]
use crate::pac::AFIO;
#[cfg(any(eth_v1b, eth_v1c))]
use crate::pac::SYSCFG;
use crate::pac::{ETH, RCC};
mod descriptors;
mod rx_desc;
@ -42,6 +46,7 @@ pub struct Ethernet<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> {
mac_addr: [u8; 6],
}
#[cfg(eth_v1a)]
macro_rules! config_in_pins {
($($pin:ident),*) => {
// NOTE(unsafe) Exclusive access to the registers
@ -54,6 +59,7 @@ macro_rules! config_in_pins {
}
}
#[cfg(eth_v1a)]
macro_rules! config_af_pins {
($($pin:ident),*) => {
// NOTE(unsafe) Exclusive access to the registers
@ -66,6 +72,19 @@ macro_rules! config_af_pins {
};
}
#[cfg(any(eth_v1b, eth_v1c))]
macro_rules! config_pins {
($($pin:ident),*) => {
// NOTE(unsafe) Exclusive access to the registers
critical_section::with(|_| {
$(
$pin.set_as_af($pin.af_num(), AFType::OutputPushPull);
$pin.set_speed(Speed::VeryHigh);
)*
})
};
}
impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Ethernet<'d, T, P, TX, RX> {
/// safety: the returned instance is not leak-safe
pub unsafe fn new(
@ -89,6 +108,7 @@ impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Ethernet<'d, T,
// Enable the necessary Clocks
// NOTE(unsafe) We have exclusive access to the registers
#[cfg(eth_v1a)]
critical_section::with(|_| {
RCC.apb2enr().modify(|w| w.set_afioen(true));
@ -103,8 +123,27 @@ impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Ethernet<'d, T,
});
});
config_in_pins!(ref_clk, rx_d0, rx_d1);
config_af_pins!(mdio, mdc, tx_d0, tx_d1, tx_en);
#[cfg(any(eth_v1b, eth_v1c))]
critical_section::with(|_| {
RCC.apb2enr().modify(|w| w.set_syscfgen(true));
RCC.ahb1enr().modify(|w| {
w.set_ethen(true);
w.set_ethtxen(true);
w.set_ethrxen(true);
});
// RMII (Reduced Media Independent Interface)
SYSCFG.pmc().modify(|w| w.set_mii_rmii_sel(true));
});
#[cfg(eth_v1a)]
{
config_in_pins!(ref_clk, rx_d0, rx_d1);
config_af_pins!(mdio, mdc, tx_d0, tx_d1, tx_en);
}
#[cfg(any(eth_v1b, eth_v1c))]
config_pins!(ref_clk, mdio, mdc, crs, rx_d0, rx_d1, tx_d0, tx_d1, tx_en);
// NOTE(unsafe) We are ourselves not leak-safe.
let state = PeripheralMutex::new_unchecked(interrupt, &mut state.0, || Inner::new(peri));

View file

@ -1,21 +0,0 @@
use crate::eth::_version::rx_desc::RDesRing;
use crate::eth::_version::tx_desc::TDesRing;
pub struct DescriptorRing<const T: usize, const R: usize> {
pub(crate) tx: TDesRing<T>,
pub(crate) rx: RDesRing<R>,
}
impl<const T: usize, const R: usize> DescriptorRing<T, R> {
pub const fn new() -> Self {
Self {
tx: TDesRing::new(),
rx: RDesRing::new(),
}
}
pub fn init(&mut self) {
self.tx.init();
self.rx.init();
}
}

View file

@ -1,359 +0,0 @@
// The v1c ethernet driver was ported to embassy from the awesome stm32-eth project (https://github.com/stm32-rs/stm32-eth).
use core::marker::PhantomData;
use core::sync::atomic::{fence, Ordering};
use core::task::Waker;
use embassy::util::Unborrow;
use embassy::waitqueue::AtomicWaker;
use embassy_hal_common::peripheral::{PeripheralMutex, PeripheralState, StateStorage};
use embassy_hal_common::unborrow;
use embassy_net::{Device, DeviceCapabilities, LinkState, PacketBuf, MTU};
use crate::gpio::sealed::Pin as __GpioPin;
use crate::gpio::{sealed::AFType, AnyPin, Speed};
use crate::pac::{ETH, RCC, SYSCFG};
mod descriptors;
mod rx_desc;
mod tx_desc;
use super::*;
use descriptors::DescriptorRing;
use stm32_metapac::eth::vals::{
Apcs, Cr, Dm, DmaomrSr, Fes, Ftf, Ifg, MbProgress, Mw, Pbl, Rsf, St, Tsf,
};
pub struct State<'d, T: Instance, const TX: usize, const RX: usize>(
StateStorage<Inner<'d, T, TX, RX>>,
);
impl<'d, T: Instance, const TX: usize, const RX: usize> State<'d, T, TX, RX> {
pub fn new() -> Self {
Self(StateStorage::new())
}
}
pub struct Ethernet<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> {
state: PeripheralMutex<'d, Inner<'d, T, TX, RX>>,
pins: [AnyPin; 9],
_phy: P,
clock_range: Cr,
phy_addr: u8,
mac_addr: [u8; 6],
}
macro_rules! config_pins {
($($pin:ident),*) => {
// NOTE(unsafe) Exclusive access to the registers
critical_section::with(|_| {
$(
$pin.set_as_af($pin.af_num(), AFType::OutputPushPull);
$pin.set_speed(Speed::VeryHigh);
)*
})
};
}
impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Ethernet<'d, T, P, TX, RX> {
/// safety: the returned instance is not leak-safe
pub unsafe fn new(
state: &'d mut State<'d, T, TX, RX>,
peri: impl Unborrow<Target = T> + 'd,
interrupt: impl Unborrow<Target = crate::interrupt::ETH> + 'd,
ref_clk: impl Unborrow<Target = impl RefClkPin<T>> + 'd,
mdio: impl Unborrow<Target = impl MDIOPin<T>> + 'd,
mdc: impl Unborrow<Target = impl MDCPin<T>> + 'd,
crs: impl Unborrow<Target = impl CRSPin<T>> + 'd,
rx_d0: impl Unborrow<Target = impl RXD0Pin<T>> + 'd,
rx_d1: impl Unborrow<Target = impl RXD1Pin<T>> + 'd,
tx_d0: impl Unborrow<Target = impl TXD0Pin<T>> + 'd,
tx_d1: impl Unborrow<Target = impl TXD1Pin<T>> + 'd,
tx_en: impl Unborrow<Target = impl TXEnPin<T>> + 'd,
phy: P,
mac_addr: [u8; 6],
phy_addr: u8,
) -> Self {
unborrow!(interrupt, ref_clk, mdio, mdc, crs, rx_d0, rx_d1, tx_d0, tx_d1, tx_en);
// Enable the necessary Clocks
// NOTE(unsafe) We have exclusive access to the registers
critical_section::with(|_| {
RCC.apb2enr().modify(|w| w.set_syscfgen(true));
RCC.ahb1enr().modify(|w| {
w.set_ethen(true);
w.set_ethtxen(true);
w.set_ethrxen(true);
});
// RMII (Reduced Media Independent Interface)
SYSCFG.pmc().modify(|w| w.set_mii_rmii_sel(true));
});
config_pins!(ref_clk, mdio, mdc, crs, rx_d0, rx_d1, tx_d0, tx_d1, tx_en);
// NOTE(unsafe) We are ourselves not leak-safe.
let state = PeripheralMutex::new_unchecked(interrupt, &mut state.0, || Inner::new(peri));
// NOTE(unsafe) We have exclusive access to the registers
let dma = ETH.ethernet_dma();
let mac = ETH.ethernet_mac();
// Reset and wait
dma.dmabmr().modify(|w| w.set_sr(true));
while dma.dmabmr().read().sr() {}
mac.maccr().modify(|w| {
w.set_ifg(Ifg::IFG96); // inter frame gap 96 bit times
w.set_apcs(Apcs::STRIP); // automatic padding and crc stripping
w.set_fes(Fes::FES100); // fast ethernet speed
w.set_dm(Dm::FULLDUPLEX); // full duplex
// TODO: Carrier sense ? ECRSFD
});
// Note: Writing to LR triggers synchronisation of both LR and HR into the MAC core,
// so the LR write must happen after the HR write.
mac.maca0hr()
.modify(|w| w.set_maca0h(u16::from(mac_addr[4]) | (u16::from(mac_addr[5]) << 8)));
mac.maca0lr().write(|w| {
w.set_maca0l(
u32::from(mac_addr[0])
| (u32::from(mac_addr[1]) << 8)
| (u32::from(mac_addr[2]) << 16)
| (u32::from(mac_addr[3]) << 24),
)
});
// pause time
mac.macfcr().modify(|w| w.set_pt(0x100));
// Transfer and Forward, Receive and Forward
dma.dmaomr().modify(|w| {
w.set_tsf(Tsf::STOREFORWARD);
w.set_rsf(Rsf::STOREFORWARD);
});
dma.dmabmr().modify(|w| {
w.set_pbl(Pbl::PBL32) // programmable burst length - 32 ?
});
// TODO MTU size setting not found for v1 ethernet, check if correct
// NOTE(unsafe) We got the peripheral singleton, which means that `rcc::init` was called
let hclk = crate::rcc::get_freqs().ahb1;
let hclk_mhz = hclk.0 / 1_000_000;
// Set the MDC clock frequency in the range 1MHz - 2.5MHz
let clock_range = match hclk_mhz {
0..=24 => panic!("Invalid HCLK frequency - should be at least 25 MHz."),
25..=34 => Cr::CR_20_35, // Divide by 16
35..=59 => Cr::CR_35_60, // Divide by 26
60..=99 => Cr::CR_60_100, // Divide by 42
100..=149 => Cr::CR_100_150, // Divide by 62
150..=216 => Cr::CR_150_168, // Divide by 102
_ => {
panic!("HCLK results in MDC clock > 2.5MHz even for the highest CSR clock divider")
}
};
let pins = [
ref_clk.degrade(),
mdio.degrade(),
mdc.degrade(),
crs.degrade(),
rx_d0.degrade(),
rx_d1.degrade(),
tx_d0.degrade(),
tx_d1.degrade(),
tx_en.degrade(),
];
let mut this = Self {
state,
pins,
_phy: phy,
clock_range,
phy_addr,
mac_addr,
};
this.state.with(|s| {
s.desc_ring.init();
fence(Ordering::SeqCst);
let mac = ETH.ethernet_mac();
let dma = ETH.ethernet_dma();
mac.maccr().modify(|w| {
w.set_re(true);
w.set_te(true);
});
dma.dmaomr().modify(|w| {
w.set_ftf(Ftf::FLUSH); // flush transmit fifo (queue)
w.set_st(St::STARTED); // start transmitting channel
w.set_sr(DmaomrSr::STARTED); // start receiving channel
});
// Enable interrupts
dma.dmaier().modify(|w| {
w.set_nise(true);
w.set_rie(true);
w.set_tie(true);
});
});
P::phy_reset(&mut this);
P::phy_init(&mut this);
this
}
}
unsafe impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> StationManagement
for Ethernet<'d, T, P, TX, RX>
{
fn smi_read(&mut self, reg: u8) -> u16 {
// NOTE(unsafe) These registers aren't used in the interrupt and we have `&mut self`
unsafe {
let mac = ETH.ethernet_mac();
mac.macmiiar().modify(|w| {
w.set_pa(self.phy_addr);
w.set_mr(reg);
w.set_mw(Mw::READ); // read operation
w.set_cr(self.clock_range);
w.set_mb(MbProgress::BUSY); // indicate that operation is in progress
});
while mac.macmiiar().read().mb() == MbProgress::BUSY {}
mac.macmiidr().read().md()
}
}
fn smi_write(&mut self, reg: u8, val: u16) {
// NOTE(unsafe) These registers aren't used in the interrupt and we have `&mut self`
unsafe {
let mac = ETH.ethernet_mac();
mac.macmiidr().write(|w| w.set_md(val));
mac.macmiiar().modify(|w| {
w.set_pa(self.phy_addr);
w.set_mr(reg);
w.set_mw(Mw::WRITE); // write
w.set_cr(self.clock_range);
w.set_mb(MbProgress::BUSY);
});
while mac.macmiiar().read().mb() == MbProgress::BUSY {}
}
}
}
impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Device
for Ethernet<'d, T, P, TX, RX>
{
fn is_transmit_ready(&mut self) -> bool {
self.state.with(|s| s.desc_ring.tx.available())
}
fn transmit(&mut self, pkt: PacketBuf) {
self.state.with(|s| unwrap!(s.desc_ring.tx.transmit(pkt)));
}
fn receive(&mut self) -> Option<PacketBuf> {
self.state.with(|s| s.desc_ring.rx.pop_packet())
}
fn register_waker(&mut self, waker: &Waker) {
WAKER.register(waker);
}
fn capabilities(&mut self) -> DeviceCapabilities {
let mut caps = DeviceCapabilities::default();
caps.max_transmission_unit = MTU;
caps.max_burst_size = Some(TX.min(RX));
caps
}
fn link_state(&mut self) -> LinkState {
if P::poll_link(self) {
LinkState::Up
} else {
LinkState::Down
}
}
fn ethernet_address(&mut self) -> [u8; 6] {
self.mac_addr
}
}
impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Drop
for Ethernet<'d, T, P, TX, RX>
{
fn drop(&mut self) {
// NOTE(unsafe) We have `&mut self` and the interrupt doesn't use this registers
unsafe {
let dma = ETH.ethernet_dma();
let mac = ETH.ethernet_mac();
// Disable the TX DMA and wait for any previous transmissions to be completed
dma.dmaomr().modify(|w| w.set_st(St::STOPPED));
// Disable MAC transmitter and receiver
mac.maccr().modify(|w| {
w.set_re(false);
w.set_te(false);
});
dma.dmaomr().modify(|w| w.set_sr(DmaomrSr::STOPPED));
}
// NOTE(unsafe) Exclusive access to the regs
critical_section::with(|_| unsafe {
for pin in self.pins.iter_mut() {
pin.set_as_disconnected();
}
})
}
}
//----------------------------------------------------------------------
struct Inner<'d, T: Instance, const TX: usize, const RX: usize> {
_peri: PhantomData<&'d mut T>,
desc_ring: DescriptorRing<TX, RX>,
}
impl<'d, T: Instance, const TX: usize, const RX: usize> Inner<'d, T, TX, RX> {
pub fn new(_peri: impl Unborrow<Target = T> + 'd) -> Self {
Self {
_peri: PhantomData,
desc_ring: DescriptorRing::new(),
}
}
}
impl<'d, T: Instance, const TX: usize, const RX: usize> PeripheralState for Inner<'d, T, TX, RX> {
type Interrupt = crate::interrupt::ETH;
fn on_interrupt(&mut self) {
unwrap!(self.desc_ring.tx.on_interrupt());
self.desc_ring.rx.on_interrupt();
WAKER.wake();
// TODO: Check and clear more flags
unsafe {
let dma = ETH.ethernet_dma();
dma.dmasr().modify(|w| {
w.set_ts(true);
w.set_rs(true);
w.set_nis(true);
});
// Delay two peripheral's clock
dma.dmasr().read();
dma.dmasr().read();
}
}
}
static WAKER: AtomicWaker = AtomicWaker::new();

View file

@ -1,309 +0,0 @@
use core::sync::atomic::{compiler_fence, fence, Ordering};
use embassy_net::{Packet, PacketBox, PacketBoxExt, PacketBuf};
use stm32_metapac::eth::vals::{DmaomrSr, Rpd, Rps};
use vcell::VolatileCell;
use crate::pac::ETH;
mod rx_consts {
/// Owned by DMA engine
pub const RXDESC_0_OWN: u32 = 1 << 31;
/// First descriptor
pub const RXDESC_0_FS: u32 = 1 << 9;
/// Last descriptor
pub const RXDESC_0_LS: u32 = 1 << 8;
/// Error summary
pub const RXDESC_0_ES: u32 = 1 << 15;
/// Frame length
pub const RXDESC_0_FL_MASK: u32 = 0x3FFF;
pub const RXDESC_0_FL_SHIFT: usize = 16;
pub const RXDESC_1_RBS_MASK: u32 = 0x0fff;
/// Second address chained
pub const RXDESC_1_RCH: u32 = 1 << 14;
/// End Of Ring
pub const RXDESC_1_RER: u32 = 1 << 15;
}
use rx_consts::*;
/// Receive Descriptor representation
///
/// * rdes0: OWN and Status
/// * rdes1: allocated buffer length
/// * rdes2: data buffer address
/// * rdes3: next descriptor address
#[repr(C)]
struct RDes {
rdes0: VolatileCell<u32>,
rdes1: VolatileCell<u32>,
rdes2: VolatileCell<u32>,
rdes3: VolatileCell<u32>,
}
impl RDes {
pub const fn new() -> Self {
Self {
rdes0: VolatileCell::new(0),
rdes1: VolatileCell::new(0),
rdes2: VolatileCell::new(0),
rdes3: VolatileCell::new(0),
}
}
/// Return true if this RDes is acceptable to us
#[inline(always)]
pub fn valid(&self) -> bool {
// Write-back descriptor is valid if:
//
// Contains first buffer of packet AND contains last buf of
// packet AND no errors
(self.rdes0.get() & (RXDESC_0_ES | RXDESC_0_FS | RXDESC_0_LS))
== (RXDESC_0_FS | RXDESC_0_LS)
}
/// Return true if this RDes is not currently owned by the DMA
#[inline(always)]
pub fn available(&self) -> bool {
self.rdes0.get() & RXDESC_0_OWN == 0 // Owned by us
}
/// Configures the reception buffer address and length and passed descriptor ownership to the DMA
#[inline(always)]
pub fn set_ready(&mut self, buf_addr: u32, buf_len: usize) {
self.rdes1
.set(self.rdes1.get() | (buf_len as u32) & RXDESC_1_RBS_MASK);
self.rdes2.set(buf_addr);
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::Release);
compiler_fence(Ordering::Release);
self.rdes0.set(self.rdes0.get() | RXDESC_0_OWN);
// Used to flush the store buffer as fast as possible to make the buffer available for the
// DMA.
fence(Ordering::SeqCst);
}
// points to next descriptor (RCH)
#[inline(always)]
fn set_buffer2(&mut self, buffer: *const u8) {
self.rdes3.set(buffer as u32);
}
#[inline(always)]
fn set_end_of_ring(&mut self) {
self.rdes1.set(self.rdes1.get() | RXDESC_1_RER);
}
#[inline(always)]
fn packet_len(&self) -> usize {
((self.rdes0.get() >> RXDESC_0_FL_SHIFT) & RXDESC_0_FL_MASK) as usize
}
pub fn setup(&mut self, next: Option<&Self>) {
// Defer this initialization to this function, so we can have `RingEntry` on bss.
self.rdes1.set(self.rdes1.get() | RXDESC_1_RCH);
match next {
Some(next) => self.set_buffer2(next as *const _ as *const u8),
None => {
self.set_buffer2(0 as *const u8);
self.set_end_of_ring();
}
}
}
}
/// Running state of the `RxRing`
#[derive(PartialEq, Eq, Debug)]
pub enum RunningState {
Unknown,
Stopped,
Running,
}
impl RunningState {
/// whether self equals to `RunningState::Running`
pub fn is_running(&self) -> bool {
*self == RunningState::Running
}
}
/// Rx ring of descriptors and packets
///
/// This ring has three major locations that work in lock-step. The DMA will never write to the tail
/// index, so the `read_index` must never pass the tail index. The `next_tail_index` is always 1
/// slot ahead of the real tail index, and it must never pass the `read_index` or it could overwrite
/// a packet still to be passed to the application.
///
/// nt can't pass r (no alloc)
/// +---+---+---+---+ Read ok +---+---+---+---+ No Read +---+---+---+---+
/// | | | | | ------------> | | | | | ------------> | | | | |
/// +---+---+---+---+ Allocation ok +---+---+---+---+ +---+---+---+---+
/// ^ ^t ^t ^ ^t ^
/// |r |r |r
/// |nt |nt |nt
///
///
/// +---+---+---+---+ Read ok +---+---+---+---+ Can't read +---+---+---+---+
/// | | | | | ------------> | | | | | ------------> | | | | |
/// +---+---+---+---+ Allocation fail +---+---+---+---+ Allocation ok +---+---+---+---+
/// ^ ^t ^ ^t ^ ^ ^ ^t
/// |r | |r | | |r
/// |nt |nt |nt
///
pub(crate) struct RDesRing<const N: usize> {
descriptors: [RDes; N],
buffers: [Option<PacketBox>; N],
read_index: usize,
next_tail_index: usize,
}
impl<const N: usize> RDesRing<N> {
pub const fn new() -> Self {
const RDES: RDes = RDes::new();
const BUFFERS: Option<PacketBox> = None;
Self {
descriptors: [RDES; N],
buffers: [BUFFERS; N],
read_index: 0,
next_tail_index: 0,
}
}
pub(crate) fn init(&mut self) {
assert!(N > 1);
let mut last_index = 0;
for (index, buf) in self.buffers.iter_mut().enumerate() {
let pkt = match PacketBox::new(Packet::new()) {
Some(p) => p,
None => {
if index == 0 {
panic!("Could not allocate at least one buffer for Ethernet receiving");
} else {
break;
}
}
};
self.descriptors[index].set_ready(pkt.as_ptr() as u32, pkt.len());
*buf = Some(pkt);
last_index = index;
}
self.next_tail_index = (last_index + 1) % N;
// not sure if this is supposed to span all of the descriptor or just those that contain buffers
{
let mut previous: Option<&mut RDes> = None;
for entry in self.descriptors.iter_mut() {
if let Some(prev) = &mut previous {
prev.setup(Some(entry));
}
previous = Some(entry);
}
if let Some(entry) = &mut previous {
entry.setup(None);
}
}
// Register txdescriptor start
// NOTE (unsafe) Used for atomic writes
unsafe {
ETH.ethernet_dma()
.dmardlar()
.write(|w| w.0 = &self.descriptors as *const _ as u32);
};
// We already have fences in `set_owned`, which is called in `setup`
// Start receive
unsafe {
ETH.ethernet_dma()
.dmaomr()
.modify(|w| w.set_sr(DmaomrSr::STARTED))
};
self.demand_poll();
}
fn demand_poll(&self) {
unsafe { ETH.ethernet_dma().dmarpdr().write(|w| w.set_rpd(Rpd::POLL)) };
}
pub(crate) fn on_interrupt(&mut self) {
// XXX: Do we need to do anything here ? Maybe we should try to advance the tail ptr, but it
// would soon hit the read ptr anyway, and we will wake smoltcp's stack on the interrupt
// which should try to pop a packet...
}
/// Get current `RunningState`
fn running_state(&self) -> RunningState {
match unsafe { ETH.ethernet_dma().dmasr().read().rps() } {
// Reset or Stop Receive Command issued
Rps::STOPPED => RunningState::Stopped,
// Fetching receive transfer descriptor
Rps::RUNNINGFETCHING => RunningState::Running,
// Waiting for receive packet
Rps::RUNNINGWAITING => RunningState::Running,
// Receive descriptor unavailable
Rps::SUSPENDED => RunningState::Stopped,
// Closing receive descriptor
Rps(0b101) => RunningState::Running,
// Transferring the receive packet data from receive buffer to host memory
Rps::RUNNINGWRITING => RunningState::Running,
_ => RunningState::Unknown,
}
}
pub(crate) fn pop_packet(&mut self) -> Option<PacketBuf> {
if !self.running_state().is_running() {
self.demand_poll();
}
// Not sure if the contents of the write buffer on the M7 can affects reads, so we are using
// a DMB here just in case, it also serves as a hint to the compiler that we're syncing the
// buffer (I think .-.)
fence(Ordering::SeqCst);
let read_available = self.descriptors[self.read_index].available();
let tail_index = (self.next_tail_index + N - 1) % N;
let pkt = if read_available && self.read_index != tail_index {
let pkt = self.buffers[self.read_index].take();
let len = self.descriptors[self.read_index].packet_len();
assert!(pkt.is_some());
let valid = self.descriptors[self.read_index].valid();
self.read_index = (self.read_index + 1) % N;
if valid {
pkt.map(|p| p.slice(0..len))
} else {
None
}
} else {
None
};
// Try to advance the tail_index
if self.next_tail_index != self.read_index {
match PacketBox::new(Packet::new()) {
Some(b) => {
let addr = b.as_ptr() as u32;
let buffer_len = b.len();
self.buffers[self.next_tail_index].replace(b);
self.descriptors[self.next_tail_index].set_ready(addr, buffer_len);
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::Release);
self.next_tail_index = (self.next_tail_index + 1) % N;
}
None => {}
}
}
pkt
}
}

View file

@ -1,238 +0,0 @@
use core::sync::atomic::{compiler_fence, fence, Ordering};
use embassy_net::PacketBuf;
use stm32_metapac::eth::vals::St;
use vcell::VolatileCell;
use crate::pac::ETH;
#[non_exhaustive]
#[derive(Debug, Copy, Clone)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum Error {
NoBufferAvailable,
// TODO: Break down this error into several others
TransmissionError,
}
/// Transmit and Receive Descriptor fields
#[allow(dead_code)]
mod tx_consts {
pub const TXDESC_0_OWN: u32 = 1 << 31;
pub const TXDESC_0_IOC: u32 = 1 << 30;
// First segment of frame
pub const TXDESC_0_FS: u32 = 1 << 28;
// Last segment of frame
pub const TXDESC_0_LS: u32 = 1 << 29;
// Transmit end of ring
pub const TXDESC_0_TER: u32 = 1 << 21;
// Second address chained
pub const TXDESC_0_TCH: u32 = 1 << 20;
// Error status
pub const TXDESC_0_ES: u32 = 1 << 15;
// Transmit buffer size
pub const TXDESC_1_TBS_SHIFT: usize = 0;
pub const TXDESC_1_TBS_MASK: u32 = 0x0fff << TXDESC_1_TBS_SHIFT;
}
use tx_consts::*;
/// Transmit Descriptor representation
///
/// * tdes0: control
/// * tdes1: buffer lengths
/// * tdes2: data buffer address
/// * tdes3: next descriptor address
#[repr(C)]
struct TDes {
tdes0: VolatileCell<u32>,
tdes1: VolatileCell<u32>,
tdes2: VolatileCell<u32>,
tdes3: VolatileCell<u32>,
}
impl TDes {
pub const fn new() -> Self {
Self {
tdes0: VolatileCell::new(0),
tdes1: VolatileCell::new(0),
tdes2: VolatileCell::new(0),
tdes3: VolatileCell::new(0),
}
}
/// Return true if this TDes is not currently owned by the DMA
pub fn available(&self) -> bool {
(self.tdes0.get() & TXDESC_0_OWN) == 0
}
/// Pass ownership to the DMA engine
fn set_owned(&mut self) {
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::Release);
compiler_fence(Ordering::Release);
self.tdes0.set(self.tdes0.get() | TXDESC_0_OWN);
// Used to flush the store buffer as fast as possible to make the buffer available for the
// DMA.
fence(Ordering::SeqCst);
}
fn set_buffer1(&mut self, buffer: *const u8) {
self.tdes2.set(buffer as u32);
}
fn set_buffer1_len(&mut self, len: usize) {
self.tdes1
.set((self.tdes1.get() & !TXDESC_1_TBS_MASK) | ((len as u32) << TXDESC_1_TBS_SHIFT));
}
// points to next descriptor (RCH)
fn set_buffer2(&mut self, buffer: *const u8) {
self.tdes3.set(buffer as u32);
}
fn set_end_of_ring(&mut self) {
self.tdes0.set(self.tdes0.get() | TXDESC_0_TER);
}
// set up as a part fo the ring buffer - configures the tdes
pub fn setup(&mut self, next: Option<&Self>) {
// Defer this initialization to this function, so we can have `RingEntry` on bss.
self.tdes0
.set(TXDESC_0_TCH | TXDESC_0_IOC | TXDESC_0_FS | TXDESC_0_LS);
match next {
Some(next) => self.set_buffer2(next as *const TDes as *const u8),
None => {
self.set_buffer2(0 as *const u8);
self.set_end_of_ring();
}
}
}
}
pub(crate) struct TDesRing<const N: usize> {
descriptors: [TDes; N],
buffers: [Option<PacketBuf>; N],
next_entry: usize,
}
impl<const N: usize> TDesRing<N> {
pub const fn new() -> Self {
const TDES: TDes = TDes::new();
const BUFFERS: Option<PacketBuf> = None;
Self {
descriptors: [TDES; N],
buffers: [BUFFERS; N],
next_entry: 0,
}
}
/// Initialise this TDesRing. Assume TDesRing is corrupt
///
/// The current memory address of the buffers inside this TDesRing
/// will be stored in the descriptors, so ensure the TDesRing is
/// not moved after initialisation.
pub(crate) fn init(&mut self) {
assert!(N > 0);
{
let mut previous: Option<&mut TDes> = None;
for entry in self.descriptors.iter_mut() {
if let Some(prev) = &mut previous {
prev.setup(Some(entry));
}
previous = Some(entry);
}
if let Some(entry) = &mut previous {
entry.setup(None);
}
}
self.next_entry = 0;
// Register txdescriptor start
// NOTE (unsafe) Used for atomic writes
unsafe {
ETH.ethernet_dma()
.dmatdlar()
.write(|w| w.0 = &self.descriptors as *const _ as u32);
}
// "Preceding reads and writes cannot be moved past subsequent writes."
#[cfg(feature = "fence")]
fence(Ordering::Release);
// We don't need a compiler fence here because all interactions with `Descriptor` are
// volatiles
// Start transmission
unsafe {
ETH.ethernet_dma()
.dmaomr()
.modify(|w| w.set_st(St::STARTED))
};
}
/// Return true if a TDes is available for use
pub(crate) fn available(&self) -> bool {
self.descriptors[self.next_entry].available()
}
pub(crate) fn transmit(&mut self, pkt: PacketBuf) -> Result<(), Error> {
if !self.available() {
return Err(Error::NoBufferAvailable);
}
let descriptor = &mut self.descriptors[self.next_entry];
let pkt_len = pkt.len();
let address = pkt.as_ptr() as *const u8;
descriptor.set_buffer1(address);
descriptor.set_buffer1_len(pkt_len);
self.buffers[self.next_entry].replace(pkt);
descriptor.set_owned();
// Ensure changes to the descriptor are committed before DMA engine sees tail pointer store.
// This will generate an DMB instruction.
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::Release);
// Move the tail pointer (TPR) to the next descriptor
self.next_entry = (self.next_entry + 1) % N;
// Request the DMA engine to poll the latest tx descriptor
unsafe { ETH.ethernet_dma().dmatpdr().modify(|w| w.0 = 1) }
Ok(())
}
pub(crate) fn on_interrupt(&mut self) -> Result<(), Error> {
let previous = (self.next_entry + N - 1) % N;
let td = &self.descriptors[previous];
// DMB to ensure that we are reading an updated value, probably not needed at the hardware
// level, but this is also a hint to the compiler that we're syncing on the buffer.
fence(Ordering::SeqCst);
let tdes0 = td.tdes0.get();
if tdes0 & TXDESC_0_OWN != 0 {
// Transmission isn't done yet, probably a receive interrupt that fired this
return Ok(());
}
// Release the buffer
self.buffers[previous].take();
if tdes0 & TXDESC_0_ES != 0 {
Err(Error::TransmissionError)
} else {
Ok(())
}
}
}