diff --git a/embassy-stm32/build.rs b/embassy-stm32/build.rs index 84e8be25d..70f4515db 100644 --- a/embassy-stm32/build.rs +++ b/embassy-stm32/build.rs @@ -1121,6 +1121,8 @@ fn main() { (("dac", "CH2"), quote!(crate::dac::DacDma2)), (("timer", "UP"), quote!(crate::timer::UpDma)), (("hash", "IN"), quote!(crate::hash::Dma)), + (("cryp", "IN"), quote!(crate::cryp::DmaIn)), + (("cryp", "OUT"), quote!(crate::cryp::DmaOut)), (("timer", "CH1"), quote!(crate::timer::Ch1Dma)), (("timer", "CH2"), quote!(crate::timer::Ch2Dma)), (("timer", "CH3"), quote!(crate::timer::Ch3Dma)), diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs index 8f259520a..74b095b6f 100644 --- a/embassy-stm32/src/cryp/mod.rs +++ b/embassy-stm32/src/cryp/mod.rs @@ -2,14 +2,39 @@ #[cfg(any(cryp_v2, cryp_v3))] use core::cmp::min; use core::marker::PhantomData; +use core::ptr; use embassy_hal_internal::{into_ref, PeripheralRef}; +use embassy_sync::waitqueue::AtomicWaker; +use crate::dma::{NoDma, Priority, Transfer, TransferOptions}; +use crate::interrupt::typelevel::Interrupt; use crate::{interrupt, pac, peripherals, Peripheral}; const DES_BLOCK_SIZE: usize = 8; // 64 bits const AES_BLOCK_SIZE: usize = 16; // 128 bits +static CRYP_WAKER: AtomicWaker = AtomicWaker::new(); + +/// CRYP interrupt handler. +pub struct InterruptHandler { + _phantom: PhantomData, +} + +impl interrupt::typelevel::Handler for InterruptHandler { + unsafe fn on_interrupt() { + let bits = T::regs().misr().read(); + if bits.inmis() { + T::regs().imscr().modify(|w| w.set_inim(false)); + CRYP_WAKER.wake(); + } + if bits.outmis() { + T::regs().imscr().modify(|w| w.set_outim(false)); + CRYP_WAKER.wake(); + } + } +} + /// This trait encapsulates all cipher-specific behavior/ pub trait Cipher<'c> { /// Processing block size. Determined by the processor and the algorithm. @@ -32,17 +57,26 @@ pub trait Cipher<'c> { fn prepare_key(&self, _p: &pac::cryp::Cryp) {} /// Performs any cipher-specific initialization. - fn init_phase(&self, _p: &pac::cryp::Cryp) {} + fn init_phase_blocking(&self, _p: &pac::cryp::Cryp, _cryp: &Cryp) {} + + /// Performs any cipher-specific initialization. + async fn init_phase(&self, _p: &pac::cryp::Cryp, _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>) + where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + } /// Called prior to processing the last data block for cipher-specific operations. - fn pre_final_block(&self, _p: &pac::cryp::Cryp, _dir: Direction, _padding_len: usize) -> [u32; 4] { + fn pre_final(&self, _p: &pac::cryp::Cryp, _dir: Direction, _padding_len: usize) -> [u32; 4] { return [0; 4]; } /// Called after processing the last data block for cipher-specific operations. - fn post_final_block( + fn post_final_blocking( &self, _p: &pac::cryp::Cryp, + _cryp: &Cryp, _dir: Direction, _int_data: &mut [u8; AES_BLOCK_SIZE], _temp1: [u32; 4], @@ -50,7 +84,22 @@ pub trait Cipher<'c> { ) { } - /// Called prior to processing the first associated data block for cipher-specific operations. + /// Called after processing the last data block for cipher-specific operations. + async fn post_final( + &self, + _p: &pac::cryp::Cryp, + _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>, + _dir: Direction, + _int_data: &mut [u8; AES_BLOCK_SIZE], + _temp1: [u32; 4], + _padding_mask: [u8; 16], + ) where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + } + + /// Returns the AAD header block as required by the cipher. fn get_header_block(&self) -> &[u8] { return [0; 0].as_slice(); } @@ -425,14 +474,24 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { p.cr().modify(|w| w.set_algomode3(true)); } - fn init_phase(&self, p: &pac::cryp::Cryp) { + fn init_phase_blocking(&self, p: &pac::cryp::Cryp, _cryp: &Cryp) { + p.cr().modify(|w| w.set_gcm_ccmph(0)); + p.cr().modify(|w| w.set_crypen(true)); + while p.cr().read().crypen() {} + } + + async fn init_phase( + &self, + p: &pac::cryp::Cryp, + _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>, + ) { p.cr().modify(|w| w.set_gcm_ccmph(0)); p.cr().modify(|w| w.set_crypen(true)); while p.cr().read().crypen() {} } #[cfg(cryp_v2)] - fn pre_final_block(&self, p: &pac::cryp::Cryp, dir: Direction, _padding_len: usize) -> [u32; 4] { + fn pre_final(&self, p: &pac::cryp::Cryp, dir: Direction, _padding_len: usize) -> [u32; 4] { //Handle special GCM partial block process. if dir == Direction::Encrypt { p.cr().modify(|w| w.set_crypen(false)); @@ -446,16 +505,17 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { } #[cfg(cryp_v3)] - fn pre_final_block(&self, p: &pac::cryp::Cryp, _dir: Direction, padding_len: usize) -> [u32; 4] { + fn pre_final(&self, p: &pac::cryp::Cryp, _dir: Direction, padding_len: usize) -> [u32; 4] { //Handle special GCM partial block process. p.cr().modify(|w| w.set_npblb(padding_len as u8)); [0; 4] } #[cfg(cryp_v2)] - fn post_final_block( + fn post_final_blocking( &self, p: &pac::cryp::Cryp, + cryp: &Cryp, dir: Direction, int_data: &mut [u8; AES_BLOCK_SIZE], _temp1: [u32; 4], @@ -471,17 +531,44 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { } p.cr().modify(|w| w.set_crypen(true)); p.cr().modify(|w| w.set_gcm_ccmph(3)); - let mut index = 0; - let end_index = Self::BLOCK_SIZE; - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&int_data[index..index + 4]); - p.din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } - for _ in 0..4 { - p.dout().read(); + + cryp.write_bytes_blocking(Self::BLOCK_SIZE, int_data); + cryp.read_bytes_blocking(Self::BLOCK_SIZE, int_data); + } + } + + #[cfg(cryp_v2)] + async fn post_final( + &self, + p: &pac::cryp::Cryp, + cryp: &mut Cryp<'_, T, DmaIn, DmaOut>, + dir: Direction, + int_data: &mut [u8; AES_BLOCK_SIZE], + _temp1: [u32; 4], + padding_mask: [u8; AES_BLOCK_SIZE], + ) where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + if dir == Direction::Encrypt { + // Handle special GCM partial block process. + p.cr().modify(|w| w.set_crypen(false)); + p.cr().modify(|w| w.set_algomode3(true)); + p.cr().modify(|w| w.set_algomode0(0)); + for i in 0..AES_BLOCK_SIZE { + int_data[i] = int_data[i] & padding_mask[i]; } + p.cr().modify(|w| w.set_crypen(true)); + p.cr().modify(|w| w.set_gcm_ccmph(3)); + + let mut out_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; + + let read = Cryp::::read_bytes(&mut cryp.outdma, Self::BLOCK_SIZE, &mut out_data); + let write = Cryp::::write_bytes(&mut cryp.indma, Self::BLOCK_SIZE, int_data); + + embassy_futures::join::join(read, write).await; + + int_data.copy_from_slice(&out_data); } } } @@ -532,14 +619,24 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { p.cr().modify(|w| w.set_algomode3(true)); } - fn init_phase(&self, p: &pac::cryp::Cryp) { + fn init_phase_blocking(&self, p: &pac::cryp::Cryp, _cryp: &Cryp) { + p.cr().modify(|w| w.set_gcm_ccmph(0)); + p.cr().modify(|w| w.set_crypen(true)); + while p.cr().read().crypen() {} + } + + async fn init_phase( + &self, + p: &pac::cryp::Cryp, + _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>, + ) { p.cr().modify(|w| w.set_gcm_ccmph(0)); p.cr().modify(|w| w.set_crypen(true)); while p.cr().read().crypen() {} } #[cfg(cryp_v2)] - fn pre_final_block(&self, p: &pac::cryp::Cryp, dir: Direction, _padding_len: usize) -> [u32; 4] { + fn pre_final(&self, p: &pac::cryp::Cryp, dir: Direction, _padding_len: usize) -> [u32; 4] { //Handle special GCM partial block process. if dir == Direction::Encrypt { p.cr().modify(|w| w.set_crypen(false)); @@ -553,16 +650,17 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { } #[cfg(cryp_v3)] - fn pre_final_block(&self, p: &pac::cryp::Cryp, _dir: Direction, padding_len: usize) -> [u32; 4] { + fn pre_final(&self, p: &pac::cryp::Cryp, _dir: Direction, padding_len: usize) -> [u32; 4] { //Handle special GCM partial block process. p.cr().modify(|w| w.set_npblb(padding_len as u8)); [0; 4] } #[cfg(cryp_v2)] - fn post_final_block( + fn post_final_blocking( &self, p: &pac::cryp::Cryp, + cryp: &Cryp, dir: Direction, int_data: &mut [u8; AES_BLOCK_SIZE], _temp1: [u32; 4], @@ -578,17 +676,42 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { } p.cr().modify(|w| w.set_crypen(true)); p.cr().modify(|w| w.set_gcm_ccmph(3)); - let mut index = 0; - let end_index = Self::BLOCK_SIZE; - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&int_data[index..index + 4]); - p.din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } - for _ in 0..4 { - p.dout().read(); + + cryp.write_bytes_blocking(Self::BLOCK_SIZE, int_data); + cryp.read_bytes_blocking(Self::BLOCK_SIZE, int_data); + } + } + + #[cfg(cryp_v2)] + async fn post_final( + &self, + p: &pac::cryp::Cryp, + cryp: &mut Cryp<'_, T, DmaIn, DmaOut>, + dir: Direction, + int_data: &mut [u8; AES_BLOCK_SIZE], + _temp1: [u32; 4], + padding_mask: [u8; AES_BLOCK_SIZE], + ) where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + if dir == Direction::Encrypt { + // Handle special GCM partial block process. + p.cr().modify(|w| w.set_crypen(false)); + p.cr().modify(|w| w.set_algomode3(true)); + p.cr().modify(|w| w.set_algomode0(0)); + for i in 0..AES_BLOCK_SIZE { + int_data[i] = int_data[i] & padding_mask[i]; } + p.cr().modify(|w| w.set_crypen(true)); + p.cr().modify(|w| w.set_gcm_ccmph(3)); + + let mut out_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; + + let read = Cryp::::read_bytes(&mut cryp.outdma, Self::BLOCK_SIZE, &mut out_data); + let write = Cryp::::write_bytes(&mut cryp.indma, Self::BLOCK_SIZE, int_data); + + embassy_futures::join::join(read, write).await; } } } @@ -697,18 +820,24 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip p.cr().modify(|w| w.set_algomode3(true)); } - fn init_phase(&self, p: &pac::cryp::Cryp) { + fn init_phase_blocking(&self, p: &pac::cryp::Cryp, cryp: &Cryp) { p.cr().modify(|w| w.set_gcm_ccmph(0)); - let mut index = 0; - let end_index = index + Self::BLOCK_SIZE; - // Write block in - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&self.block0[index..index + 4]); - p.din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } + cryp.write_bytes_blocking(Self::BLOCK_SIZE, &self.block0); + + p.cr().modify(|w| w.set_crypen(true)); + while p.cr().read().crypen() {} + } + + async fn init_phase(&self, p: &pac::cryp::Cryp, cryp: &mut Cryp<'_, T, DmaIn, DmaOut>) + where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + p.cr().modify(|w| w.set_gcm_ccmph(0)); + + Cryp::::write_bytes(&mut cryp.indma, Self::BLOCK_SIZE, &self.block0).await; + p.cr().modify(|w| w.set_crypen(true)); while p.cr().read().crypen() {} } @@ -718,7 +847,7 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip } #[cfg(cryp_v2)] - fn pre_final_block(&self, p: &pac::cryp::Cryp, dir: Direction, _padding_len: usize) -> [u32; 4] { + fn pre_final(&self, p: &pac::cryp::Cryp, dir: Direction, _padding_len: usize) -> [u32; 4] { //Handle special CCM partial block process. let mut temp1 = [0; 4]; if dir == Direction::Decrypt { @@ -737,16 +866,17 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip } #[cfg(cryp_v3)] - fn pre_final_block(&self, p: &pac::cryp::Cryp, _dir: Direction, padding_len: usize) -> [u32; 4] { + fn pre_final(&self, p: &pac::cryp::Cryp, _dir: Direction, padding_len: usize) -> [u32; 4] { //Handle special GCM partial block process. p.cr().modify(|w| w.set_npblb(padding_len as u8)); [0; 4] } #[cfg(cryp_v2)] - fn post_final_block( + fn post_final_blocking( &self, p: &pac::cryp::Cryp, + cryp: &Cryp, dir: Direction, int_data: &mut [u8; AES_BLOCK_SIZE], temp1: [u32; 4], @@ -774,8 +904,48 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip let int_word = u32::from_le_bytes(int_bytes); in_data[i] = int_word; in_data[i] = in_data[i] ^ temp1[i] ^ temp2[i]; - p.din().write_value(in_data[i]); } + cryp.write_words_blocking(Self::BLOCK_SIZE, &in_data); + } + } + + #[cfg(cryp_v2)] + async fn post_final( + &self, + p: &pac::cryp::Cryp, + cryp: &mut Cryp<'_, T, DmaIn, DmaOut>, + dir: Direction, + int_data: &mut [u8; AES_BLOCK_SIZE], + temp1: [u32; 4], + padding_mask: [u8; 16], + ) where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + if dir == Direction::Decrypt { + //Handle special CCM partial block process. + let mut temp2 = [0; 4]; + temp2[0] = p.csgcmccmr(0).read().swap_bytes(); + temp2[1] = p.csgcmccmr(1).read().swap_bytes(); + temp2[2] = p.csgcmccmr(2).read().swap_bytes(); + temp2[3] = p.csgcmccmr(3).read().swap_bytes(); + p.cr().modify(|w| w.set_algomode3(true)); + p.cr().modify(|w| w.set_algomode0(1)); + p.cr().modify(|w| w.set_gcm_ccmph(3)); + // Header phase + p.cr().modify(|w| w.set_gcm_ccmph(1)); + for i in 0..AES_BLOCK_SIZE { + int_data[i] = int_data[i] & padding_mask[i]; + } + let mut in_data: [u32; 4] = [0; 4]; + for i in 0..in_data.len() { + let mut int_bytes: [u8; 4] = [0; 4]; + int_bytes.copy_from_slice(&int_data[(i * 4)..(i * 4) + 4]); + let int_word = u32::from_le_bytes(int_bytes); + in_data[i] = int_word; + in_data[i] = in_data[i] ^ temp1[i] ^ temp2[i]; + } + Cryp::::write_words(&mut cryp.indma, Self::BLOCK_SIZE, &in_data).await; } } } @@ -845,24 +1015,40 @@ pub enum Direction { } /// Crypto Accelerator Driver -pub struct Cryp<'d, T: Instance> { +pub struct Cryp<'d, T: Instance, DmaIn = NoDma, DmaOut = NoDma> { _peripheral: PeripheralRef<'d, T>, + indma: PeripheralRef<'d, DmaIn>, + outdma: PeripheralRef<'d, DmaOut>, } -impl<'d, T: Instance> Cryp<'d, T> { +impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { /// Create a new CRYP driver. - pub fn new(peri: impl Peripheral

+ 'd) -> Self { + pub fn new( + peri: impl Peripheral

+ 'd, + indma: impl Peripheral

+ 'd, + outdma: impl Peripheral

+ 'd, + _irq: impl interrupt::typelevel::Binding> + 'd, + ) -> Self { T::enable_and_reset(); - into_ref!(peri); - let instance = Self { _peripheral: peri }; + into_ref!(peri, indma, outdma); + let instance = Self { + _peripheral: peri, + indma: indma, + outdma: outdma, + }; + + T::Interrupt::unpend(); + unsafe { T::Interrupt::enable() }; + instance } - /// Start a new cipher operation. - /// Key size must be 128, 192, or 256 bits. - /// Initialization vector must only be supplied if necessary. - /// Panics if there is any mismatch in parameters, such as an incorrect IV length or invalid mode. - pub fn start<'c, C: Cipher<'c> + CipherSized + IVSized>(&self, cipher: &'c C, dir: Direction) -> Context<'c, C> { + /// Start a new encrypt or decrypt operation for the given cipher. + pub fn start_blocking<'c, C: Cipher<'c> + CipherSized + IVSized>( + &self, + cipher: &'c C, + dir: Direction, + ) -> Context<'c, C> { let mut ctx: Context<'c, C> = Context { dir, last_block_processed: false, @@ -929,7 +1115,90 @@ impl<'d, T: Instance> Cryp<'d, T> { // Flush in/out FIFOs T::regs().cr().modify(|w| w.fflush()); - ctx.cipher.init_phase(&T::regs()); + ctx.cipher.init_phase_blocking(&T::regs(), self); + + self.store_context(&mut ctx); + + ctx + } + + /// Start a new encrypt or decrypt operation for the given cipher. + pub async fn start<'c, C: Cipher<'c> + CipherSized + IVSized>( + &mut self, + cipher: &'c C, + dir: Direction, + ) -> Context<'c, C> + where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + let mut ctx: Context<'c, C> = Context { + dir, + last_block_processed: false, + cr: 0, + iv: [0; 4], + csgcmccm: [0; 8], + csgcm: [0; 8], + aad_complete: false, + header_len: 0, + payload_len: 0, + cipher: cipher, + phantom_data: PhantomData, + header_processed: false, + aad_buffer: [0; 16], + aad_buffer_len: 0, + }; + + T::regs().cr().modify(|w| w.set_crypen(false)); + + let key = ctx.cipher.key(); + + if key.len() == (128 / 8) { + T::regs().cr().modify(|w| w.set_keysize(0)); + } else if key.len() == (192 / 8) { + T::regs().cr().modify(|w| w.set_keysize(1)); + } else if key.len() == (256 / 8) { + T::regs().cr().modify(|w| w.set_keysize(2)); + } + + self.load_key(key); + + // Set data type to 8-bit. This will match software implementations. + T::regs().cr().modify(|w| w.set_datatype(2)); + + ctx.cipher.prepare_key(&T::regs()); + + ctx.cipher.set_algomode(&T::regs()); + + // Set encrypt/decrypt + if dir == Direction::Encrypt { + T::regs().cr().modify(|w| w.set_algodir(false)); + } else { + T::regs().cr().modify(|w| w.set_algodir(true)); + } + + // Load the IV into the registers. + let iv = ctx.cipher.iv(); + let mut full_iv: [u8; 16] = [0; 16]; + full_iv[0..iv.len()].copy_from_slice(iv); + let mut iv_idx = 0; + let mut iv_word: [u8; 4] = [0; 4]; + iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); + iv_idx += 4; + T::regs().init(0).ivlr().write_value(u32::from_be_bytes(iv_word)); + iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); + iv_idx += 4; + T::regs().init(0).ivrr().write_value(u32::from_be_bytes(iv_word)); + iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); + iv_idx += 4; + T::regs().init(1).ivlr().write_value(u32::from_be_bytes(iv_word)); + iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); + T::regs().init(1).ivrr().write_value(u32::from_be_bytes(iv_word)); + + // Flush in/out FIFOs + T::regs().cr().modify(|w| w.fflush()); + + ctx.cipher.init_phase(&T::regs(), self).await; self.store_context(&mut ctx); @@ -938,10 +1207,9 @@ impl<'d, T: Instance> Cryp<'d, T> { #[cfg(any(cryp_v2, cryp_v3))] /// Controls the header phase of cipher processing. - /// This function is only valid for GCM, CCM, and GMAC modes. - /// It only needs to be called if using one of these modes and there is associated data. - /// All AAD must be supplied to this function prior to starting the payload phase with `payload_blocking`. - /// The AAD must be supplied in multiples of the block size (128 bits), except when supplying the last block. + /// This function is only valid for authenticated ciphers including GCM, CCM, and GMAC. + /// All additional associated data (AAD) must be supplied to this function prior to starting the payload phase with `payload_blocking`. + /// The AAD must be supplied in multiples of the block size (128-bits for AES, 64-bits for DES), except when supplying the last block. /// When supplying the last block of AAD, `last_aad_block` must be `true`. pub fn aad_blocking< 'c, @@ -985,15 +1253,7 @@ impl<'d, T: Instance> Cryp<'d, T> { if ctx.aad_buffer_len < C::BLOCK_SIZE { // The buffer isn't full and this is the last buffer, so process it as is (already padded). if last_aad_block { - let mut index = 0; - let end_index = C::BLOCK_SIZE; - // Write block in - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&ctx.aad_buffer[index..index + 4]); - T::regs().din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } + self.write_bytes_blocking(C::BLOCK_SIZE, &ctx.aad_buffer); // Block until input FIFO is empty. while !T::regs().sr().read().ifem() {} @@ -1008,15 +1268,7 @@ impl<'d, T: Instance> Cryp<'d, T> { } } else { // Load the full block from the buffer. - let mut index = 0; - let end_index = C::BLOCK_SIZE; - // Write block in - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&ctx.aad_buffer[index..index + 4]); - T::regs().din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } + self.write_bytes_blocking(C::BLOCK_SIZE, &ctx.aad_buffer); // Block until input FIFO is empty. while !T::regs().sr().read().ifem() {} } @@ -1032,33 +1284,108 @@ impl<'d, T: Instance> Cryp<'d, T> { // Load full data blocks into core. let num_full_blocks = aad_len_remaining / C::BLOCK_SIZE; - for block in 0..num_full_blocks { - let mut index = len_to_copy + (block * C::BLOCK_SIZE); - let end_index = index + C::BLOCK_SIZE; - // Write block in - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&aad[index..index + 4]); - T::regs().din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } - // Block until input FIFO is empty. - while !T::regs().sr().read().ifem() {} - } + let start_index = len_to_copy; + let end_index = start_index + (C::BLOCK_SIZE * num_full_blocks); + self.write_bytes_blocking(C::BLOCK_SIZE, &aad[start_index..end_index]); if last_aad_block { if leftovers > 0 { - let mut index = 0; - let end_index = C::BLOCK_SIZE; - // Write block in - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&ctx.aad_buffer[index..index + 4]); - T::regs().din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } - // Block until input FIFO is empty. - while !T::regs().sr().read().ifem() {} + self.write_bytes_blocking(C::BLOCK_SIZE, &ctx.aad_buffer); + } + // Switch to payload phase. + ctx.aad_complete = true; + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); + T::regs().cr().modify(|w| w.fflush()); + } + + self.store_context(ctx); + } + + #[cfg(any(cryp_v2, cryp_v3))] + /// Controls the header phase of cipher processing. + /// This function is only valid for authenticated ciphers including GCM, CCM, and GMAC. + /// All additional associated data (AAD) must be supplied to this function prior to starting the payload phase with `payload`. + /// The AAD must be supplied in multiples of the block size (128-bits for AES, 64-bits for DES), except when supplying the last block. + /// When supplying the last block of AAD, `last_aad_block` must be `true`. + pub async fn aad<'c, const TAG_SIZE: usize, C: Cipher<'c> + CipherSized + IVSized + CipherAuthenticated>( + &mut self, + ctx: &mut Context<'c, C>, + aad: &[u8], + last_aad_block: bool, + ) where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + self.load_context(ctx); + + // Perform checks for correctness. + if ctx.aad_complete { + panic!("Cannot update AAD after starting payload!") + } + + ctx.header_len += aad.len() as u64; + + // Header phase + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(1)); + T::regs().cr().modify(|w| w.set_crypen(true)); + + // First write the header B1 block if not yet written. + if !ctx.header_processed { + ctx.header_processed = true; + let header = ctx.cipher.get_header_block(); + ctx.aad_buffer[0..header.len()].copy_from_slice(header); + ctx.aad_buffer_len += header.len(); + } + + // Fill the header block to make a full block. + let len_to_copy = min(aad.len(), C::BLOCK_SIZE - ctx.aad_buffer_len); + ctx.aad_buffer[ctx.aad_buffer_len..ctx.aad_buffer_len + len_to_copy].copy_from_slice(&aad[..len_to_copy]); + ctx.aad_buffer_len += len_to_copy; + ctx.aad_buffer[ctx.aad_buffer_len..].fill(0); + let mut aad_len_remaining = aad.len() - len_to_copy; + + if ctx.aad_buffer_len < C::BLOCK_SIZE { + // The buffer isn't full and this is the last buffer, so process it as is (already padded). + if last_aad_block { + Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &ctx.aad_buffer).await; + assert_eq!(T::regs().sr().read().ifem(), true); + + // Switch to payload phase. + ctx.aad_complete = true; + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); + T::regs().cr().modify(|w| w.fflush()); + } else { + // Just return because we don't yet have a full block to process. + return; + } + } else { + // Load the full block from the buffer. + Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &ctx.aad_buffer).await; + assert_eq!(T::regs().sr().read().ifem(), true); + } + + // Handle a partial block that is passed in. + ctx.aad_buffer_len = 0; + let leftovers = aad_len_remaining % C::BLOCK_SIZE; + ctx.aad_buffer[..leftovers].copy_from_slice(&aad[aad.len() - leftovers..aad.len()]); + ctx.aad_buffer_len += leftovers; + ctx.aad_buffer[ctx.aad_buffer_len..].fill(0); + aad_len_remaining -= leftovers; + assert_eq!(aad_len_remaining % C::BLOCK_SIZE, 0); + + // Load full data blocks into core. + let num_full_blocks = aad_len_remaining / C::BLOCK_SIZE; + let start_index = len_to_copy; + let end_index = start_index + (C::BLOCK_SIZE * num_full_blocks); + Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &aad[start_index..end_index]).await; + + if last_aad_block { + if leftovers > 0 { + Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &ctx.aad_buffer).await; + assert_eq!(T::regs().sr().read().ifem(), true); } // Switch to payload phase. ctx.aad_complete = true; @@ -1074,7 +1401,7 @@ impl<'d, T: Instance> Cryp<'d, T> { /// The context determines algorithm, mode, and state of the crypto accelerator. /// When the last piece of data is supplied, `last_block` should be `true`. /// This function panics under various mismatches of parameters. - /// Input and output buffer lengths must match. + /// Output buffer must be at least as long as the input buffer. /// Data must be a multiple of block size (128-bits for AES, 64-bits for DES) for CBC and ECB modes. /// Padding or ciphertext stealing must be managed by the application for these modes. /// Data must also be a multiple of block size unless `last_block` is `true`. @@ -1125,54 +1452,23 @@ impl<'d, T: Instance> Cryp<'d, T> { // Load data into core, block by block. let num_full_blocks = input.len() / C::BLOCK_SIZE; for block in 0..num_full_blocks { - let mut index = block * C::BLOCK_SIZE; - let end_index = index + C::BLOCK_SIZE; + let index = block * C::BLOCK_SIZE; // Write block in - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&input[index..index + 4]); - T::regs().din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } - let mut index = block * C::BLOCK_SIZE; - let end_index = index + C::BLOCK_SIZE; - // Block until there is output to read. - while !T::regs().sr().read().ofne() {} + self.write_bytes_blocking(C::BLOCK_SIZE, &input[index..index + C::BLOCK_SIZE]); // Read block out - while index < end_index { - let out_word: u32 = T::regs().dout().read(); - output[index..index + 4].copy_from_slice(u32::to_ne_bytes(out_word).as_slice()); - index += 4; - } + self.read_bytes_blocking(C::BLOCK_SIZE, &mut output[index..index + C::BLOCK_SIZE]); } // Handle the final block, which is incomplete. if last_block_remainder > 0 { let padding_len = C::BLOCK_SIZE - last_block_remainder; - let temp1 = ctx.cipher.pre_final_block(&T::regs(), ctx.dir, padding_len); + let temp1 = ctx.cipher.pre_final(&T::regs(), ctx.dir, padding_len); let mut intermediate_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; let mut last_block: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; last_block[..last_block_remainder].copy_from_slice(&input[input.len() - last_block_remainder..input.len()]); - let mut index = 0; - let end_index = C::BLOCK_SIZE; - // Write block in - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&last_block[index..index + 4]); - T::regs().din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } - let mut index = 0; - let end_index = C::BLOCK_SIZE; - // Block until there is output to read. - while !T::regs().sr().read().ofne() {} - // Read block out - while index < end_index { - let out_word: u32 = T::regs().dout().read(); - intermediate_data[index..index + 4].copy_from_slice(u32::to_ne_bytes(out_word).as_slice()); - index += 4; - } + self.write_bytes_blocking(C::BLOCK_SIZE, &last_block); + self.read_bytes_blocking(C::BLOCK_SIZE, &mut intermediate_data); // Handle the last block depending on mode. let output_len = output.len(); @@ -1182,7 +1478,106 @@ impl<'d, T: Instance> Cryp<'d, T> { let mut mask: [u8; 16] = [0; 16]; mask[..last_block_remainder].fill(0xFF); ctx.cipher - .post_final_block(&T::regs(), ctx.dir, &mut intermediate_data, temp1, mask); + .post_final_blocking(&T::regs(), self, ctx.dir, &mut intermediate_data, temp1, mask); + } + + ctx.payload_len += input.len() as u64; + + self.store_context(ctx); + } + + /// Performs encryption/decryption on the provided context. + /// The context determines algorithm, mode, and state of the crypto accelerator. + /// When the last piece of data is supplied, `last_block` should be `true`. + /// This function panics under various mismatches of parameters. + /// Output buffer must be at least as long as the input buffer. + /// Data must be a multiple of block size (128-bits for AES, 64-bits for DES) for CBC and ECB modes. + /// Padding or ciphertext stealing must be managed by the application for these modes. + /// Data must also be a multiple of block size unless `last_block` is `true`. + pub async fn payload<'c, C: Cipher<'c> + CipherSized + IVSized>( + &mut self, + ctx: &mut Context<'c, C>, + input: &[u8], + output: &mut [u8], + last_block: bool, + ) where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + self.load_context(ctx); + + let last_block_remainder = input.len() % C::BLOCK_SIZE; + + // Perform checks for correctness. + if !ctx.aad_complete && ctx.header_len > 0 { + panic!("Additional associated data must be processed first!"); + } else if !ctx.aad_complete { + #[cfg(any(cryp_v2, cryp_v3))] + { + ctx.aad_complete = true; + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); + T::regs().cr().modify(|w| w.fflush()); + T::regs().cr().modify(|w| w.set_crypen(true)); + } + } + if ctx.last_block_processed { + panic!("The last block has already been processed!"); + } + if input.len() > output.len() { + panic!("Output buffer length must match input length."); + } + if !last_block { + if last_block_remainder != 0 { + panic!("Input length must be a multiple of {} bytes.", C::BLOCK_SIZE); + } + } + if C::REQUIRES_PADDING { + if last_block_remainder != 0 { + panic!("Input must be a multiple of {} bytes in ECB and CBC modes. Consider padding or ciphertext stealing.", C::BLOCK_SIZE); + } + } + if last_block { + ctx.last_block_processed = true; + } + + // Load data into core, block by block. + let num_full_blocks = input.len() / C::BLOCK_SIZE; + for block in 0..num_full_blocks { + let index = block * C::BLOCK_SIZE; + // Read block out + let read = Self::read_bytes( + &mut self.outdma, + C::BLOCK_SIZE, + &mut output[index..index + C::BLOCK_SIZE], + ); + // Write block in + let write = Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &input[index..index + C::BLOCK_SIZE]); + embassy_futures::join::join(read, write).await; + } + + // Handle the final block, which is incomplete. + if last_block_remainder > 0 { + let padding_len = C::BLOCK_SIZE - last_block_remainder; + let temp1 = ctx.cipher.pre_final(&T::regs(), ctx.dir, padding_len); + + let mut intermediate_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; + let mut last_block: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; + last_block[..last_block_remainder].copy_from_slice(&input[input.len() - last_block_remainder..input.len()]); + let read = Self::read_bytes(&mut self.outdma, C::BLOCK_SIZE, &mut intermediate_data); + let write = Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &last_block); + embassy_futures::join::join(read, write).await; + + // Handle the last block depending on mode. + let output_len = output.len(); + output[output_len - last_block_remainder..output_len] + .copy_from_slice(&intermediate_data[0..last_block_remainder]); + + let mut mask: [u8; 16] = [0; 16]; + mask[..last_block_remainder].fill(0xFF); + ctx.cipher + .post_final(&T::regs(), self, ctx.dir, &mut intermediate_data, temp1, mask) + .await; } ctx.payload_len += input.len() as u64; @@ -1191,8 +1586,8 @@ impl<'d, T: Instance> Cryp<'d, T> { } #[cfg(any(cryp_v2, cryp_v3))] - /// This function only needs to be called for GCM, CCM, and GMAC modes to - /// generate an authentication tag. + /// Generates an authentication tag for authenticated ciphers including GCM, CCM, and GMAC. + /// Called after the all data has been encrypted/decrypted by `payload`. pub fn finish_blocking< 'c, const TAG_SIZE: usize, @@ -1213,28 +1608,72 @@ impl<'d, T: Instance> Cryp<'d, T> { let payloadlen2: u32 = (ctx.payload_len * 8) as u32; #[cfg(cryp_v2)] - { - T::regs().din().write_value(headerlen1.swap_bytes()); - T::regs().din().write_value(headerlen2.swap_bytes()); - T::regs().din().write_value(payloadlen1.swap_bytes()); - T::regs().din().write_value(payloadlen2.swap_bytes()); - } - + let footer: [u32; 4] = [ + headerlen1.swap_bytes(), + headerlen2.swap_bytes(), + payloadlen1.swap_bytes(), + payloadlen2.swap_bytes(), + ]; #[cfg(cryp_v3)] - { - T::regs().din().write_value(headerlen1); - T::regs().din().write_value(headerlen2); - T::regs().din().write_value(payloadlen1); - T::regs().din().write_value(payloadlen2); - } + let footer: [u32; 4] = [headerlen1, headerlen2, payloadlen1, payloadlen2]; + + self.write_words_blocking(C::BLOCK_SIZE, &footer); while !T::regs().sr().read().ofne() {} let mut full_tag: [u8; 16] = [0; 16]; - full_tag[0..4].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice()); - full_tag[4..8].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice()); - full_tag[8..12].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice()); - full_tag[12..16].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice()); + self.read_bytes_blocking(C::BLOCK_SIZE, &mut full_tag); + let mut tag: [u8; TAG_SIZE] = [0; TAG_SIZE]; + tag.copy_from_slice(&full_tag[0..TAG_SIZE]); + + T::regs().cr().modify(|w| w.set_crypen(false)); + + tag + } + + #[cfg(any(cryp_v2, cryp_v3))] + // Generates an authentication tag for authenticated ciphers including GCM, CCM, and GMAC. + /// Called after the all data has been encrypted/decrypted by `payload`. + pub async fn finish< + 'c, + const TAG_SIZE: usize, + C: Cipher<'c> + CipherSized + IVSized + CipherAuthenticated, + >( + &mut self, + mut ctx: Context<'c, C>, + ) -> [u8; TAG_SIZE] + where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + self.load_context(&mut ctx); + + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(3)); + T::regs().cr().modify(|w| w.set_crypen(true)); + + let headerlen1: u32 = ((ctx.header_len * 8) >> 32) as u32; + let headerlen2: u32 = (ctx.header_len * 8) as u32; + let payloadlen1: u32 = ((ctx.payload_len * 8) >> 32) as u32; + let payloadlen2: u32 = (ctx.payload_len * 8) as u32; + + #[cfg(cryp_v2)] + let footer: [u32; 4] = [ + headerlen1.swap_bytes(), + headerlen2.swap_bytes(), + payloadlen1.swap_bytes(), + payloadlen2.swap_bytes(), + ]; + #[cfg(cryp_v3)] + let footer: [u32; 4] = [headerlen1, headerlen2, payloadlen1, payloadlen2]; + + let write = Self::write_words(&mut self.indma, C::BLOCK_SIZE, &footer); + + let mut full_tag: [u8; 16] = [0; 16]; + let read = Self::read_bytes(&mut self.outdma, C::BLOCK_SIZE, &mut full_tag); + + embassy_futures::join::join(read, write).await; + let mut tag: [u8; TAG_SIZE] = [0; TAG_SIZE]; tag.copy_from_slice(&full_tag[0..TAG_SIZE]); @@ -1325,6 +1764,125 @@ impl<'d, T: Instance> Cryp<'d, T> { // Enable crypto processor. T::regs().cr().modify(|w| w.set_crypen(true)); } + + fn write_bytes_blocking(&self, block_size: usize, blocks: &[u8]) { + // Ensure input is a multiple of block size. + assert_eq!(blocks.len() % block_size, 0); + let mut index = 0; + let end_index = blocks.len(); + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&blocks[index..index + 4]); + T::regs().din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + if index % block_size == 0 { + // Block until input FIFO is empty. + while !T::regs().sr().read().ifem() {} + } + } + } + + async fn write_bytes(dma: &mut PeripheralRef<'_, DmaIn>, block_size: usize, blocks: &[u8]) + where + DmaIn: crate::cryp::DmaIn, + { + if blocks.len() == 0 { + return; + } + // Ensure input is a multiple of block size. + assert_eq!(blocks.len() % block_size, 0); + // Configure DMA to transfer input to crypto core. + let dma_request = dma.request(); + let dst_ptr = T::regs().din().as_ptr(); + let num_words = blocks.len() / 4; + let src_ptr = ptr::slice_from_raw_parts(blocks.as_ptr().cast(), num_words); + let options = TransferOptions { + priority: Priority::High, + ..Default::default() + }; + let dma_transfer = unsafe { Transfer::new_write_raw(dma, dma_request, src_ptr, dst_ptr, options) }; + T::regs().dmacr().modify(|w| w.set_dien(true)); + // Wait for the transfer to complete. + dma_transfer.await; + } + + #[cfg(any(cryp_v2, cryp_v3))] + fn write_words_blocking(&self, block_size: usize, blocks: &[u32]) { + assert_eq!((blocks.len() * 4) % block_size, 0); + let mut byte_counter: usize = 0; + for word in blocks { + T::regs().din().write_value(*word); + byte_counter += 4; + if byte_counter % block_size == 0 { + // Block until input FIFO is empty. + while !T::regs().sr().read().ifem() {} + } + } + } + + #[cfg(any(cryp_v2, cryp_v3))] + async fn write_words(dma: &mut PeripheralRef<'_, DmaIn>, block_size: usize, blocks: &[u32]) + where + DmaIn: crate::cryp::DmaIn, + { + if blocks.len() == 0 { + return; + } + // Ensure input is a multiple of block size. + assert_eq!((blocks.len() * 4) % block_size, 0); + // Configure DMA to transfer input to crypto core. + let dma_request = dma.request(); + let dst_ptr = T::regs().din().as_ptr(); + let num_words = blocks.len(); + let src_ptr = ptr::slice_from_raw_parts(blocks.as_ptr().cast(), num_words); + let options = TransferOptions { + priority: Priority::High, + ..Default::default() + }; + let dma_transfer = unsafe { Transfer::new_write_raw(dma, dma_request, src_ptr, dst_ptr, options) }; + T::regs().dmacr().modify(|w| w.set_dien(true)); + // Wait for the transfer to complete. + dma_transfer.await; + } + + fn read_bytes_blocking(&self, block_size: usize, blocks: &mut [u8]) { + // Block until there is output to read. + while !T::regs().sr().read().ofne() {} + // Ensure input is a multiple of block size. + assert_eq!(blocks.len() % block_size, 0); + // Read block out + let mut index = 0; + let end_index = blocks.len(); + while index < end_index { + let out_word: u32 = T::regs().dout().read(); + blocks[index..index + 4].copy_from_slice(u32::to_ne_bytes(out_word).as_slice()); + index += 4; + } + } + + async fn read_bytes(dma: &mut PeripheralRef<'_, DmaOut>, block_size: usize, blocks: &mut [u8]) + where + DmaOut: crate::cryp::DmaOut, + { + if blocks.len() == 0 { + return; + } + // Ensure input is a multiple of block size. + assert_eq!(blocks.len() % block_size, 0); + // Configure DMA to get output from crypto core. + let dma_request = dma.request(); + let src_ptr = T::regs().dout().as_ptr(); + let num_words = blocks.len() / 4; + let dst_ptr = ptr::slice_from_raw_parts_mut(blocks.as_mut_ptr().cast(), num_words); + let options = TransferOptions { + priority: Priority::VeryHigh, + ..Default::default() + }; + let dma_transfer = unsafe { Transfer::new_read_raw(dma, dma_request, src_ptr, dst_ptr, options) }; + T::regs().dmacr().modify(|w| w.set_doen(true)); + // Wait for the transfer to complete. + dma_transfer.await; + } } pub(crate) mod sealed { @@ -1354,3 +1912,6 @@ foreach_interrupt!( } }; ); + +dma_trait!(DmaIn, Instance); +dma_trait!(DmaOut, Instance); diff --git a/examples/stm32f7/src/bin/cryp.rs b/examples/stm32f7/src/bin/cryp.rs index 04927841a..235853cb9 100644 --- a/examples/stm32f7/src/bin/cryp.rs +++ b/examples/stm32f7/src/bin/cryp.rs @@ -6,11 +6,15 @@ use aes_gcm::aead::{AeadInPlace, KeyInit}; use aes_gcm::Aes128Gcm; use defmt::info; use embassy_executor::Spawner; -use embassy_stm32::cryp::*; -use embassy_stm32::Config; +use embassy_stm32::cryp::{self, *}; +use embassy_stm32::{bind_interrupts, peripherals, Config}; use embassy_time::Instant; use {defmt_rtt as _, panic_probe as _}; +bind_interrupts!(struct Irqs { + CRYP => cryp::InterruptHandler; +}); + #[embassy_executor::main] async fn main(_spawner: Spawner) -> ! { let config = Config::default(); @@ -19,7 +23,7 @@ async fn main(_spawner: Spawner) -> ! { let payload: &[u8] = b"hello world"; let aad: &[u8] = b"additional data"; - let hw_cryp = Cryp::new(p.CRYP); + let mut hw_cryp = Cryp::new(p.CRYP, p.DMA2_CH6, p.DMA2_CH5, Irqs); let key: [u8; 16] = [0; 16]; let mut ciphertext: [u8; 11] = [0; 11]; let mut plaintext: [u8; 11] = [0; 11]; @@ -29,16 +33,18 @@ async fn main(_spawner: Spawner) -> ! { // Encrypt in hardware using AES-GCM 128-bit let aes_gcm = AesGcm::new(&key, &iv); - let mut gcm_encrypt = hw_cryp.start(&aes_gcm, Direction::Encrypt); - hw_cryp.aad_blocking(&mut gcm_encrypt, aad, true); - hw_cryp.payload_blocking(&mut gcm_encrypt, payload, &mut ciphertext, true); - let encrypt_tag = hw_cryp.finish_blocking(gcm_encrypt); + let mut gcm_encrypt = hw_cryp.start(&aes_gcm, Direction::Encrypt).await; + hw_cryp.aad(&mut gcm_encrypt, aad, true).await; + hw_cryp.payload(&mut gcm_encrypt, payload, &mut ciphertext, true).await; + let encrypt_tag = hw_cryp.finish(gcm_encrypt).await; // Decrypt in hardware using AES-GCM 128-bit - let mut gcm_decrypt = hw_cryp.start(&aes_gcm, Direction::Decrypt); - hw_cryp.aad_blocking(&mut gcm_decrypt, aad, true); - hw_cryp.payload_blocking(&mut gcm_decrypt, &ciphertext, &mut plaintext, true); - let decrypt_tag = hw_cryp.finish_blocking(gcm_decrypt); + let mut gcm_decrypt = hw_cryp.start(&aes_gcm, Direction::Decrypt).await; + hw_cryp.aad(&mut gcm_decrypt, aad, true).await; + hw_cryp + .payload(&mut gcm_decrypt, &ciphertext, &mut plaintext, true) + .await; + let decrypt_tag = hw_cryp.finish(gcm_decrypt).await; let hw_end_time = Instant::now(); let hw_execution_time = hw_end_time - hw_start_time; diff --git a/tests/stm32/src/bin/cryp.rs b/tests/stm32/src/bin/cryp.rs index f105abf26..60778bdaa 100644 --- a/tests/stm32/src/bin/cryp.rs +++ b/tests/stm32/src/bin/cryp.rs @@ -10,9 +10,14 @@ use aes_gcm::aead::{AeadInPlace, KeyInit}; use aes_gcm::Aes128Gcm; use common::*; use embassy_executor::Spawner; -use embassy_stm32::cryp::*; +use embassy_stm32::cryp::{self, *}; +use embassy_stm32::{bind_interrupts, peripherals}; use {defmt_rtt as _, panic_probe as _}; +bind_interrupts!(struct Irqs { + CRYP => cryp::InterruptHandler; +}); + #[embassy_executor::main] async fn main(_spawner: Spawner) { let p: embassy_stm32::Peripherals = embassy_stm32::init(config()); @@ -22,27 +27,32 @@ async fn main(_spawner: Spawner) { const AAD1: &[u8] = b"additional data 1 stdargadrhaethaethjatjatjaetjartjstrjsfkk;'jopofyuisrteytweTASTUIKFUKIXTRDTEREharhaeryhaterjartjarthaethjrtjarthaetrhartjatejatrjsrtjartjyt1"; const AAD2: &[u8] = b"additional data 2 stdhthsthsthsrthsrthsrtjdykjdukdyuldadfhsdghsdghsdghsadghjk'hioethjrtjarthaetrhartjatecfgjhzdfhgzdfhzdfghzdfhzdfhzfhjatrjsrtjartjytjfytjfyg"; - let hw_cryp = Cryp::new(p.CRYP); + let in_dma = peri!(p, CRYP_IN_DMA); + let out_dma = peri!(p, CRYP_OUT_DMA); + + let mut hw_cryp = Cryp::new(p.CRYP, in_dma, out_dma, Irqs); let key: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; let mut ciphertext: [u8; PAYLOAD1.len() + PAYLOAD2.len()] = [0; PAYLOAD1.len() + PAYLOAD2.len()]; let mut plaintext: [u8; PAYLOAD1.len() + PAYLOAD2.len()] = [0; PAYLOAD1.len() + PAYLOAD2.len()]; let iv: [u8; 12] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; - // Encrypt in hardware using AES-GCM 128-bit + // Encrypt in hardware using AES-GCM 128-bit in blocking mode. let aes_gcm = AesGcm::new(&key, &iv); - let mut gcm_encrypt = hw_cryp.start(&aes_gcm, Direction::Encrypt); + let mut gcm_encrypt = hw_cryp.start_blocking(&aes_gcm, Direction::Encrypt); hw_cryp.aad_blocking(&mut gcm_encrypt, AAD1, false); hw_cryp.aad_blocking(&mut gcm_encrypt, AAD2, true); hw_cryp.payload_blocking(&mut gcm_encrypt, PAYLOAD1, &mut ciphertext[..PAYLOAD1.len()], false); hw_cryp.payload_blocking(&mut gcm_encrypt, PAYLOAD2, &mut ciphertext[PAYLOAD1.len()..], true); let encrypt_tag = hw_cryp.finish_blocking(gcm_encrypt); - // Decrypt in hardware using AES-GCM 128-bit - let mut gcm_decrypt = hw_cryp.start(&aes_gcm, Direction::Decrypt); - hw_cryp.aad_blocking(&mut gcm_decrypt, AAD1, false); - hw_cryp.aad_blocking(&mut gcm_decrypt, AAD2, true); - hw_cryp.payload_blocking(&mut gcm_decrypt, &ciphertext, &mut plaintext, true); - let decrypt_tag = hw_cryp.finish_blocking(gcm_decrypt); + // Decrypt in hardware using AES-GCM 128-bit in async (DMA) mode. + let mut gcm_decrypt = hw_cryp.start(&aes_gcm, Direction::Decrypt).await; + hw_cryp.aad(&mut gcm_decrypt, AAD1, false).await; + hw_cryp.aad(&mut gcm_decrypt, AAD2, true).await; + hw_cryp + .payload(&mut gcm_decrypt, &ciphertext, &mut plaintext, true) + .await; + let decrypt_tag = hw_cryp.finish(gcm_decrypt).await; info!("AES-GCM Ciphertext: {:?}", ciphertext); info!("AES-GCM Plaintext: {:?}", plaintext); diff --git a/tests/stm32/src/common.rs b/tests/stm32/src/common.rs index 3297ea7e2..c379863a8 100644 --- a/tests/stm32/src/common.rs +++ b/tests/stm32/src/common.rs @@ -140,6 +140,7 @@ define_peris!( ); #[cfg(any(feature = "stm32h755zi", feature = "stm32h753zi"))] define_peris!( + CRYP_IN_DMA = DMA1_CH0, CRYP_OUT_DMA = DMA1_CH1, UART = USART1, UART_TX = PB6, UART_RX = PB7, UART_TX_DMA = DMA1_CH0, UART_RX_DMA = DMA1_CH1, SPI = SPI1, SPI_SCK = PA5, SPI_MOSI = PB5, SPI_MISO = PA6, SPI_TX_DMA = DMA1_CH0, SPI_RX_DMA = DMA1_CH1, ADC = ADC1, DAC = DAC1, DAC_PIN = PA4,