diff --git a/embassy-stm32/src/dma/bdma.rs b/embassy-stm32/src/dma/bdma.rs
index d956047d5..60f4fbd09 100644
--- a/embassy-stm32/src/dma/bdma.rs
+++ b/embassy-stm32/src/dma/bdma.rs
@@ -9,7 +9,7 @@ use atomic_polyfill::AtomicUsize;
 use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
 use embassy_sync::waitqueue::AtomicWaker;
 
-use super::ringbuffer::{DmaCtrl, DmaRingBuffer, OverrunError};
+use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer, WritableDmaRingBuffer};
 use super::word::{Word, WordSize};
 use super::Dir;
 use crate::_generated::BDMA_CHANNEL_COUNT;
@@ -395,13 +395,13 @@ impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> {
     }
 }
 
-pub struct RingBuffer<'a, C: Channel, W: Word> {
+pub struct ReadableRingBuffer<'a, C: Channel, W: Word> {
     cr: regs::Cr,
     channel: PeripheralRef<'a, C>,
-    ringbuf: DmaRingBuffer<'a, W>,
+    ringbuf: ReadableDmaRingBuffer<'a, W>,
 }
 
-impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
+impl<'a, C: Channel, W: Word> ReadableRingBuffer<'a, C, W> {
     pub unsafe fn new_read(
         channel: impl Peripheral<P = C> + 'a,
         _request: Request,
@@ -442,7 +442,7 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
         let mut this = Self {
             channel,
             cr: w,
-            ringbuf: DmaRingBuffer::new(buffer),
+            ringbuf: ReadableDmaRingBuffer::new(buffer),
         };
         this.clear_irqs();
 
@@ -513,7 +513,7 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
         .await
     }
 
-    /// The capacity of the ringbuffer
+    /// The capacity of the ringbuffer.
     pub fn cap(&self) -> usize {
         self.ringbuf.cap()
     }
@@ -550,7 +550,159 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
     }
 }
 
-impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> {
+impl<'a, C: Channel, W: Word> Drop for ReadableRingBuffer<'a, C, W> {
+    fn drop(&mut self) {
+        self.request_stop();
+        while self.is_running() {}
+
+        // "Subsequent reads and writes cannot be moved ahead of preceding reads."
+        fence(Ordering::SeqCst);
+    }
+}
+
+pub struct WritableRingBuffer<'a, C: Channel, W: Word> {
+    cr: regs::Cr,
+    channel: PeripheralRef<'a, C>,
+    ringbuf: WritableDmaRingBuffer<'a, W>,
+}
+
+impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> {
+    pub unsafe fn new_write(
+        channel: impl Peripheral<P = C> + 'a,
+        _request: Request,
+        peri_addr: *mut W,
+        buffer: &'a mut [W],
+        _options: TransferOptions,
+    ) -> Self {
+        into_ref!(channel);
+
+        let len = buffer.len();
+        assert!(len > 0 && len <= 0xFFFF);
+
+        let dir = Dir::MemoryToPeripheral;
+        let data_size = W::size();
+
+        let channel_number = channel.num();
+        let dma = channel.regs();
+
+        // "Preceding reads and writes cannot be moved past subsequent writes."
+        fence(Ordering::SeqCst);
+
+        #[cfg(bdma_v2)]
+        critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request)));
+
+        let mut w = regs::Cr(0);
+        w.set_psize(data_size.into());
+        w.set_msize(data_size.into());
+        w.set_minc(vals::Inc::ENABLED);
+        w.set_dir(dir.into());
+        w.set_teie(true);
+        w.set_htie(true);
+        w.set_tcie(true);
+        w.set_circ(vals::Circ::ENABLED);
+        w.set_pl(vals::Pl::VERYHIGH);
+        w.set_en(true);
+
+        let buffer_ptr = buffer.as_mut_ptr();
+        let mut this = Self {
+            channel,
+            cr: w,
+            ringbuf: WritableDmaRingBuffer::new(buffer),
+        };
+        this.clear_irqs();
+
+        #[cfg(dmamux)]
+        super::dmamux::configure_dmamux(&mut *this.channel, _request);
+
+        let ch = dma.ch(channel_number);
+        ch.par().write_value(peri_addr as u32);
+        ch.mar().write_value(buffer_ptr as u32);
+        ch.ndtr().write(|w| w.set_ndt(len as u16));
+
+        this
+    }
+
+    pub fn start(&mut self) {
+        let ch = self.channel.regs().ch(self.channel.num());
+        ch.cr().write_value(self.cr)
+    }
+
+    pub fn clear(&mut self) {
+        self.ringbuf.clear(DmaCtrlImpl(self.channel.reborrow()));
+    }
+
+    /// Write elements to the ring buffer
+    /// Return a tuple of the length written and the length remaining in the buffer
+    pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
+        self.ringbuf.write(DmaCtrlImpl(self.channel.reborrow()), buf)
+    }
+
+    /// Write an exact number of elements to the ringbuffer.
+    pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, OverrunError> {
+        use core::future::poll_fn;
+        use core::sync::atomic::compiler_fence;
+
+        let mut written_data = 0;
+        let buffer_len = buffer.len();
+
+        poll_fn(|cx| {
+            self.set_waker(cx.waker());
+
+            compiler_fence(Ordering::SeqCst);
+
+            match self.write(&buffer[written_data..buffer_len]) {
+                Ok((len, remaining)) => {
+                    written_data += len;
+                    if written_data == buffer_len {
+                        Poll::Ready(Ok(remaining))
+                    } else {
+                        Poll::Pending
+                    }
+                }
+                Err(e) => Poll::Ready(Err(e)),
+            }
+        })
+        .await
+    }
+
+    /// The capacity of the ringbuffer.
+    pub fn cap(&self) -> usize {
+        self.ringbuf.cap()
+    }
+
+    pub fn set_waker(&mut self, waker: &Waker) {
+        STATE.ch_wakers[self.channel.index()].register(waker);
+    }
+
+    fn clear_irqs(&mut self) {
+        let dma = self.channel.regs();
+        dma.ifcr().write(|w| {
+            w.set_htif(self.channel.num(), true);
+            w.set_tcif(self.channel.num(), true);
+            w.set_teif(self.channel.num(), true);
+        });
+    }
+
+    pub fn request_stop(&mut self) {
+        let ch = self.channel.regs().ch(self.channel.num());
+
+        // Disable the channel. Keep the IEs enabled so the irqs still fire.
+        // If the channel is enabled and transfer is not completed, we need to perform
+        // two separate write access to the CR register to disable the channel.
+        ch.cr().write(|w| {
+            w.set_teie(true);
+            w.set_htie(true);
+            w.set_tcie(true);
+        });
+    }
+
+    pub fn is_running(&mut self) -> bool {
+        let ch = self.channel.regs().ch(self.channel.num());
+        ch.cr().read().en()
+    }
+}
+
+impl<'a, C: Channel, W: Word> Drop for WritableRingBuffer<'a, C, W> {
     fn drop(&mut self) {
         self.request_stop();
         while self.is_running() {}
diff --git a/embassy-stm32/src/dma/dma.rs b/embassy-stm32/src/dma/dma.rs
index 219ef2eb0..9cd7aa8d5 100644
--- a/embassy-stm32/src/dma/dma.rs
+++ b/embassy-stm32/src/dma/dma.rs
@@ -7,7 +7,7 @@ use core::task::{Context, Poll, Waker};
 use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
 use embassy_sync::waitqueue::AtomicWaker;
 
-use super::ringbuffer::{DmaCtrl, DmaRingBuffer, OverrunError};
+use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer, WritableDmaRingBuffer};
 use super::word::{Word, WordSize};
 use super::Dir;
 use crate::_generated::DMA_CHANNEL_COUNT;
@@ -625,13 +625,13 @@ impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> {
     }
 }
 
-pub struct RingBuffer<'a, C: Channel, W: Word> {
+pub struct ReadableRingBuffer<'a, C: Channel, W: Word> {
     cr: regs::Cr,
     channel: PeripheralRef<'a, C>,
-    ringbuf: DmaRingBuffer<'a, W>,
+    ringbuf: ReadableDmaRingBuffer<'a, W>,
 }
 
-impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
+impl<'a, C: Channel, W: Word> ReadableRingBuffer<'a, C, W> {
     pub unsafe fn new_read(
         channel: impl Peripheral<P = C> + 'a,
         _request: Request,
@@ -677,7 +677,7 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
         let mut this = Self {
             channel,
             cr: w,
-            ringbuf: DmaRingBuffer::new(buffer),
+            ringbuf: ReadableDmaRingBuffer::new(buffer),
         };
         this.clear_irqs();
 
@@ -797,7 +797,176 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
     }
 }
 
-impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> {
+impl<'a, C: Channel, W: Word> Drop for ReadableRingBuffer<'a, C, W> {
+    fn drop(&mut self) {
+        self.request_stop();
+        while self.is_running() {}
+
+        // "Subsequent reads and writes cannot be moved ahead of preceding reads."
+        fence(Ordering::SeqCst);
+    }
+}
+
+pub struct WritableRingBuffer<'a, C: Channel, W: Word> {
+    cr: regs::Cr,
+    channel: PeripheralRef<'a, C>,
+    ringbuf: WritableDmaRingBuffer<'a, W>,
+}
+
+impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> {
+    pub unsafe fn new_write(
+        channel: impl Peripheral<P = C> + 'a,
+        _request: Request,
+        peri_addr: *mut W,
+        buffer: &'a mut [W],
+        options: TransferOptions,
+    ) -> Self {
+        into_ref!(channel);
+
+        let len = buffer.len();
+        assert!(len > 0 && len <= 0xFFFF);
+
+        let dir = Dir::MemoryToPeripheral;
+        let data_size = W::size();
+
+        let channel_number = channel.num();
+        let dma = channel.regs();
+
+        // "Preceding reads and writes cannot be moved past subsequent writes."
+        fence(Ordering::SeqCst);
+
+        let mut w = regs::Cr(0);
+        w.set_dir(dir.into());
+        w.set_msize(data_size.into());
+        w.set_psize(data_size.into());
+        w.set_pl(vals::Pl::VERYHIGH);
+        w.set_minc(vals::Inc::INCREMENTED);
+        w.set_pinc(vals::Inc::FIXED);
+        w.set_teie(true);
+        w.set_htie(options.half_transfer_ir);
+        w.set_tcie(true);
+        w.set_circ(vals::Circ::ENABLED);
+        #[cfg(dma_v1)]
+        w.set_trbuff(true);
+        #[cfg(dma_v2)]
+        w.set_chsel(_request);
+        w.set_pburst(options.pburst.into());
+        w.set_mburst(options.mburst.into());
+        w.set_pfctrl(options.flow_ctrl.into());
+        w.set_en(true);
+
+        let buffer_ptr = buffer.as_mut_ptr();
+        let mut this = Self {
+            channel,
+            cr: w,
+            ringbuf: WritableDmaRingBuffer::new(buffer),
+        };
+        this.clear_irqs();
+
+        #[cfg(dmamux)]
+        super::dmamux::configure_dmamux(&mut *this.channel, _request);
+
+        let ch = dma.st(channel_number);
+        ch.par().write_value(peri_addr as u32);
+        ch.m0ar().write_value(buffer_ptr as u32);
+        ch.ndtr().write_value(regs::Ndtr(len as _));
+        ch.fcr().write(|w| {
+            if let Some(fth) = options.fifo_threshold {
+                // FIFO mode
+                w.set_dmdis(vals::Dmdis::DISABLED);
+                w.set_fth(fth.into());
+            } else {
+                // Direct mode
+                w.set_dmdis(vals::Dmdis::ENABLED);
+            }
+        });
+
+        this
+    }
+
+    pub fn start(&mut self) {
+        let ch = self.channel.regs().st(self.channel.num());
+        ch.cr().write_value(self.cr);
+    }
+
+    pub fn clear(&mut self) {
+        self.ringbuf.clear(DmaCtrlImpl(self.channel.reborrow()));
+    }
+
+    /// Write elements from the ring buffer
+    /// Return a tuple of the length written and the length remaining in the buffer
+    pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
+        self.ringbuf.write(DmaCtrlImpl(self.channel.reborrow()), buf)
+    }
+
+    /// Write an exact number of elements to the ringbuffer.
+    pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, OverrunError> {
+        use core::future::poll_fn;
+        use core::sync::atomic::compiler_fence;
+
+        let mut written_data = 0;
+        let buffer_len = buffer.len();
+
+        poll_fn(|cx| {
+            self.set_waker(cx.waker());
+
+            compiler_fence(Ordering::SeqCst);
+
+            match self.write(&buffer[written_data..buffer_len]) {
+                Ok((len, remaining)) => {
+                    written_data += len;
+                    if written_data == buffer_len {
+                        Poll::Ready(Ok(remaining))
+                    } else {
+                        Poll::Pending
+                    }
+                }
+                Err(e) => Poll::Ready(Err(e)),
+            }
+        })
+        .await
+    }
+
+    // The capacity of the ringbuffer
+    pub fn cap(&self) -> usize {
+        self.ringbuf.cap()
+    }
+
+    pub fn set_waker(&mut self, waker: &Waker) {
+        STATE.ch_wakers[self.channel.index()].register(waker);
+    }
+
+    fn clear_irqs(&mut self) {
+        let channel_number = self.channel.num();
+        let dma = self.channel.regs();
+        let isrn = channel_number / 4;
+        let isrbit = channel_number % 4;
+
+        dma.ifcr(isrn).write(|w| {
+            w.set_htif(isrbit, true);
+            w.set_tcif(isrbit, true);
+            w.set_teif(isrbit, true);
+        });
+    }
+
+    pub fn request_stop(&mut self) {
+        let ch = self.channel.regs().st(self.channel.num());
+
+        // Disable the channel. Keep the IEs enabled so the irqs still fire.
+        ch.cr().write(|w| {
+            w.set_teie(true);
+            w.set_htie(true);
+            w.set_tcie(true);
+        });
+    }
+
+    pub fn is_running(&mut self) -> bool {
+        let ch = self.channel.regs().st(self.channel.num());
+        ch.cr().read().en()
+    }
+}
+
+impl<'a, C: Channel, W: Word> Drop for WritableRingBuffer<'a, C, W> {
     fn drop(&mut self) {
         self.request_stop();
         while self.is_running() {}
diff --git a/embassy-stm32/src/dma/ringbuffer.rs b/embassy-stm32/src/dma/ringbuffer.rs
index 190793974..945c7508c 100644
--- a/embassy-stm32/src/dma/ringbuffer.rs
+++ b/embassy-stm32/src/dma/ringbuffer.rs
@@ -29,7 +29,7 @@ use super::word::Word;
 ///  |                          |                    |                        |
 ///  +- end --------------------+                    +- start ----------------+
 /// ```
-pub struct DmaRingBuffer<'a, W: Word> {
+pub struct ReadableDmaRingBuffer<'a, W: Word> {
     pub(crate) dma_buf: &'a mut [W],
     start: usize,
 }
@@ -51,7 +51,7 @@ pub trait DmaCtrl {
     fn reset_complete_count(&mut self) -> usize;
 }
 
-impl<'a, W: Word> DmaRingBuffer<'a, W> {
+impl<'a, W: Word> ReadableDmaRingBuffer<'a, W> {
     pub fn new(dma_buf: &'a mut [W]) -> Self {
         Self { dma_buf, start: 0 }
     }
@@ -197,6 +197,112 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> {
         length
     }
 }
+
+pub struct WritableDmaRingBuffer<'a, W: Word> {
+    pub(crate) dma_buf: &'a mut [W],
+    end: usize,
+}
+
+impl<'a, W: Word> WritableDmaRingBuffer<'a, W> {
+    pub fn new(dma_buf: &'a mut [W]) -> Self {
+        Self { dma_buf, end: 0 }
+    }
+
+    /// Reset the ring buffer to its initial state
+    pub fn clear(&mut self, mut dma: impl DmaCtrl) {
+        self.end = 0;
+        dma.reset_complete_count();
+    }
+
+    /// The capacity of the ringbuffer
+    pub const fn cap(&self) -> usize {
+        self.dma_buf.len()
+    }
+
+    /// The current position of the ringbuffer
+    fn pos(&self, remaining_transfers: usize) -> usize {
+        self.cap() - remaining_transfers
+    }
+
+    /// Write elements from the ring buffer
+    /// Return a tuple of the length written and the capacity remaining to be written in the buffer
+    pub fn write(&mut self, mut dma: impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), OverrunError> {
+        let start = self.pos(dma.get_remaining_transfers());
+        if start > self.end {
+            // The occupied portion in the ring buffer DOES wrap
+            let len = self.copy_from(buf, self.end..start);
+
+            compiler_fence(Ordering::SeqCst);
+
+            // Confirm that the DMA is not inside data we could have written
+            let (pos, complete_count) =
+                critical_section::with(|_| (self.pos(dma.get_remaining_transfers()), dma.get_complete_count()));
+            if (pos >= self.end && pos < start) || (complete_count > 0 && pos >= start) || complete_count > 1 {
+                Err(OverrunError)
+            } else {
+                self.end = (self.end + len) % self.cap();
+
+                Ok((len, self.cap() - (start - self.end)))
+            }
+        } else if start == self.end && dma.get_complete_count() == 0 {
+            Ok((0, 0))
+        } else if start <= self.end && self.end + buf.len() < self.cap() {
+            // The occupied portion in the ring buffer DOES NOT wrap
+            // and copying elements into the buffer WILL NOT cause it to
+
+            // Copy into the dma buffer
+            let len = self.copy_from(buf, self.end..self.cap());
+
+            compiler_fence(Ordering::SeqCst);
+
+            // Confirm that the DMA is not inside data we could have written
+            let pos = self.pos(dma.get_remaining_transfers());
+            if pos > self.end || pos < start || dma.get_complete_count() > 1 {
+                Err(OverrunError)
+            } else {
+                self.end = (self.end + len) % self.cap();
+
+                Ok((len, self.cap() - (self.end - start)))
+            }
+        } else {
+            // The occupied portion in the ring buffer DOES NOT wrap
+            // and copying elements into the buffer WILL cause it to
+
+            let tail = self.copy_from(buf, self.end..self.cap());
+            let head = self.copy_from(&buf[tail..], 0..start);
+
+            compiler_fence(Ordering::SeqCst);
+
+            // Confirm that the DMA is not inside data we could have written
+            let pos = self.pos(dma.get_remaining_transfers());
+            if pos > self.end || pos < start || dma.reset_complete_count() > 1 {
+                Err(OverrunError)
+            } else {
+                self.end = head;
+
+                Ok((tail + head, self.cap() - (start - self.end)))
+            }
+        }
+    }
+    /// Copy into the dma buffer at `data_range` from `buf`
+    fn copy_from(&mut self, buf: &[W], data_range: Range<usize>) -> usize {
+        // Limit the number of elements that can be copied
+        let length = usize::min(data_range.len(), buf.len());
+
+        // Copy into dma buffer from read buffer
+        // We need to do it like this instead of a simple copy_from_slice() because
+        // reading from a part of memory that may be simultaneously written to is unsafe
+        unsafe {
+            let dma_buf = self.dma_buf.as_mut_ptr();
+
+            for i in 0..length {
+                core::ptr::write_volatile(dma_buf.offset((data_range.start + i) as isize), buf[i]);
+            }
+        }
+
+        length
+    }
+}
 #[cfg(test)]
 mod tests {
     use core::array;
@@ -263,7 +369,7 @@ mod tests {
     #[test]
     fn empty_and_read_not_started() {
         let mut dma_buf = [0u8; 16];
-        let ringbuf = DmaRingBuffer::new(&mut dma_buf);
+        let ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
 
         assert_eq!(0, ringbuf.start);
     }
@@ -273,7 +379,7 @@ mod tests {
         let mut dma = TestCircularTransfer::new(16);
 
         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
-        let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
+        let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
 
         assert_eq!(0, ringbuf.start);
         assert_eq!(16, ringbuf.cap());
@@ -314,7 +420,7 @@ mod tests {
         let mut dma = TestCircularTransfer::new(16);
 
         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
-        let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
+        let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
 
         assert_eq!(0, ringbuf.start);
         assert_eq!(16, ringbuf.cap());
@@ -349,7 +455,7 @@ mod tests {
         let mut dma = TestCircularTransfer::new(16);
 
         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
-        let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
+        let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
 
         assert_eq!(0, ringbuf.start);
         assert_eq!(16, ringbuf.cap());
@@ -384,7 +490,7 @@ mod tests {
         let mut dma = TestCircularTransfer::new(16);
 
         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
-        let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
+        let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
 
         assert_eq!(0, ringbuf.start);
         assert_eq!(16, ringbuf.cap());
@@ -420,7 +526,7 @@ mod tests {
         let mut dma = TestCircularTransfer::new(16);
 
         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
-        let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
+        let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
 
         assert_eq!(0, ringbuf.start);
         assert_eq!(16, ringbuf.cap());
@@ -454,7 +560,7 @@ mod tests {
         let mut dma = TestCircularTransfer::new(16);
 
         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
-        let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
+        let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
 
         assert_eq!(0, ringbuf.start);
         assert_eq!(16, ringbuf.cap());
diff --git a/embassy-stm32/src/usart/ringbuffered.rs b/embassy-stm32/src/usart/ringbuffered.rs
index 80261d048..71077c070 100644
--- a/embassy-stm32/src/usart/ringbuffered.rs
+++ b/embassy-stm32/src/usart/ringbuffered.rs
@@ -6,12 +6,12 @@ use embassy_hal_internal::PeripheralRef;
 use futures::future::{select, Either};
 
 use super::{clear_interrupt_flags, rdr, sr, BasicInstance, Error, UartRx};
-use crate::dma::RingBuffer;
+use crate::dma::ReadableRingBuffer;
 use crate::usart::{Regs, Sr};
 
 pub struct RingBufferedUartRx<'d, T: BasicInstance, RxDma: super::RxDma<T>> {
     _peri: PeripheralRef<'d, T>,
-    ring_buf: RingBuffer<'d, RxDma, u8>,
+    ring_buf: ReadableRingBuffer<'d, RxDma, u8>,
 }
 
 impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> UartRx<'d, T, RxDma> {
@@ -24,7 +24,7 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> UartRx<'d, T, RxDma> {
         let request = self.rx_dma.request();
         let opts = Default::default();
 
-        let ring_buf = unsafe { RingBuffer::new_read(self.rx_dma, request, rdr(T::regs()), dma_buf, opts) };
+        let ring_buf = unsafe { ReadableRingBuffer::new_read(self.rx_dma, request, rdr(T::regs()), dma_buf, opts) };
 
         RingBufferedUartRx {
             _peri: self._peri,