From 6e9e8eeb5f6458833b28a08e7480b2630107d79c Mon Sep 17 00:00:00 2001 From: Caleb Garrett <47389035+caleb-garrett@users.noreply.github.com> Date: Tue, 5 Mar 2024 11:25:56 -0500 Subject: [PATCH 1/5] Refactored cryp din/dout into functions. --- embassy-stm32/src/cryp/mod.rs | 276 +++++++++++++++---------------- examples/stm32f7/src/bin/cryp.rs | 14 +- 2 files changed, 144 insertions(+), 146 deletions(-) diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs index 8f259520a..12353baa0 100644 --- a/embassy-stm32/src/cryp/mod.rs +++ b/embassy-stm32/src/cryp/mod.rs @@ -4,12 +4,35 @@ use core::cmp::min; use core::marker::PhantomData; use embassy_hal_internal::{into_ref, PeripheralRef}; +use embassy_sync::waitqueue::AtomicWaker; -use crate::{interrupt, pac, peripherals, Peripheral}; +use crate::interrupt::typelevel::Interrupt; +use crate::{dma::NoDma, interrupt, pac, peripherals, Peripheral}; const DES_BLOCK_SIZE: usize = 8; // 64 bits const AES_BLOCK_SIZE: usize = 16; // 128 bits +static CRYP_WAKER: AtomicWaker = AtomicWaker::new(); + +/// CRYP interrupt handler. +pub struct InterruptHandler { + _phantom: PhantomData, +} + +impl interrupt::typelevel::Handler for InterruptHandler { + unsafe fn on_interrupt() { + let bits = T::regs().misr().read(); + if bits.inmis() { + T::regs().imscr().modify(|w| w.set_inim(false)); + CRYP_WAKER.wake(); + } + if bits.outmis() { + T::regs().imscr().modify(|w| w.set_outim(false)); + CRYP_WAKER.wake(); + } + } +} + /// This trait encapsulates all cipher-specific behavior/ pub trait Cipher<'c> { /// Processing block size. Determined by the processor and the algorithm. @@ -32,7 +55,7 @@ pub trait Cipher<'c> { fn prepare_key(&self, _p: &pac::cryp::Cryp) {} /// Performs any cipher-specific initialization. - fn init_phase(&self, _p: &pac::cryp::Cryp) {} + fn init_phase(&self, _p: &pac::cryp::Cryp, _cryp: &Cryp) {} /// Called prior to processing the last data block for cipher-specific operations. fn pre_final_block(&self, _p: &pac::cryp::Cryp, _dir: Direction, _padding_len: usize) -> [u32; 4] { @@ -40,9 +63,10 @@ pub trait Cipher<'c> { } /// Called after processing the last data block for cipher-specific operations. - fn post_final_block( + fn post_final_block( &self, _p: &pac::cryp::Cryp, + _cryp: &Cryp, _dir: Direction, _int_data: &mut [u8; AES_BLOCK_SIZE], _temp1: [u32; 4], @@ -425,7 +449,7 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { p.cr().modify(|w| w.set_algomode3(true)); } - fn init_phase(&self, p: &pac::cryp::Cryp) { + fn init_phase(&self, p: &pac::cryp::Cryp, _cryp: &Cryp) { p.cr().modify(|w| w.set_gcm_ccmph(0)); p.cr().modify(|w| w.set_crypen(true)); while p.cr().read().crypen() {} @@ -453,9 +477,10 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { } #[cfg(cryp_v2)] - fn post_final_block( + fn post_final_block( &self, p: &pac::cryp::Cryp, + cryp: &Cryp, dir: Direction, int_data: &mut [u8; AES_BLOCK_SIZE], _temp1: [u32; 4], @@ -471,17 +496,9 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { } p.cr().modify(|w| w.set_crypen(true)); p.cr().modify(|w| w.set_gcm_ccmph(3)); - let mut index = 0; - let end_index = Self::BLOCK_SIZE; - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&int_data[index..index + 4]); - p.din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } - for _ in 0..4 { - p.dout().read(); - } + + cryp.write_bytes_blocking(Self::BLOCK_SIZE, int_data); + cryp.read_bytes_blocking(Self::BLOCK_SIZE, int_data); } } } @@ -532,7 +549,7 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { p.cr().modify(|w| w.set_algomode3(true)); } - fn init_phase(&self, p: &pac::cryp::Cryp) { + fn init_phase(&self, p: &pac::cryp::Cryp, _cryp: &Cryp) { p.cr().modify(|w| w.set_gcm_ccmph(0)); p.cr().modify(|w| w.set_crypen(true)); while p.cr().read().crypen() {} @@ -560,9 +577,10 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { } #[cfg(cryp_v2)] - fn post_final_block( + fn post_final_block( &self, p: &pac::cryp::Cryp, + cryp: &Cryp, dir: Direction, int_data: &mut [u8; AES_BLOCK_SIZE], _temp1: [u32; 4], @@ -578,17 +596,9 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { } p.cr().modify(|w| w.set_crypen(true)); p.cr().modify(|w| w.set_gcm_ccmph(3)); - let mut index = 0; - let end_index = Self::BLOCK_SIZE; - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&int_data[index..index + 4]); - p.din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } - for _ in 0..4 { - p.dout().read(); - } + + cryp.write_bytes_blocking(Self::BLOCK_SIZE, int_data); + cryp.read_bytes_blocking(Self::BLOCK_SIZE, int_data); } } } @@ -697,18 +707,11 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip p.cr().modify(|w| w.set_algomode3(true)); } - fn init_phase(&self, p: &pac::cryp::Cryp) { + fn init_phase(&self, p: &pac::cryp::Cryp, cryp: &Cryp) { p.cr().modify(|w| w.set_gcm_ccmph(0)); - let mut index = 0; - let end_index = index + Self::BLOCK_SIZE; - // Write block in - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&self.block0[index..index + 4]); - p.din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } + cryp.write_bytes_blocking(Self::BLOCK_SIZE, &self.block0); + p.cr().modify(|w| w.set_crypen(true)); while p.cr().read().crypen() {} } @@ -744,9 +747,10 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip } #[cfg(cryp_v2)] - fn post_final_block( + fn post_final_block( &self, p: &pac::cryp::Cryp, + cryp: &Cryp, dir: Direction, int_data: &mut [u8; AES_BLOCK_SIZE], temp1: [u32; 4], @@ -774,8 +778,8 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip let int_word = u32::from_le_bytes(int_bytes); in_data[i] = int_word; in_data[i] = in_data[i] ^ temp1[i] ^ temp2[i]; - p.din().write_value(in_data[i]); } + cryp.write_words_blocking(Self::BLOCK_SIZE, &in_data); } } } @@ -845,16 +849,31 @@ pub enum Direction { } /// Crypto Accelerator Driver -pub struct Cryp<'d, T: Instance> { +pub struct Cryp<'d, T: Instance, D = NoDma> { _peripheral: PeripheralRef<'d, T>, + indma: PeripheralRef<'d, D>, + outdma: PeripheralRef<'d, D>, } -impl<'d, T: Instance> Cryp<'d, T> { +impl<'d, T: Instance, D> Cryp<'d, T, D> { /// Create a new CRYP driver. - pub fn new(peri: impl Peripheral

+ 'd) -> Self { + pub fn new( + peri: impl Peripheral

+ 'd, + indma: impl Peripheral

+ 'd, + outdma: impl Peripheral

+ 'd, + _irq: impl interrupt::typelevel::Binding> + 'd, + ) -> Self { T::enable_and_reset(); - into_ref!(peri); - let instance = Self { _peripheral: peri }; + into_ref!(peri, indma, outdma); + let instance = Self { + _peripheral: peri, + indma: indma, + outdma: outdma, + }; + + T::Interrupt::unpend(); + unsafe { T::Interrupt::enable() }; + instance } @@ -929,7 +948,7 @@ impl<'d, T: Instance> Cryp<'d, T> { // Flush in/out FIFOs T::regs().cr().modify(|w| w.fflush()); - ctx.cipher.init_phase(&T::regs()); + ctx.cipher.init_phase(&T::regs(), self); self.store_context(&mut ctx); @@ -985,15 +1004,7 @@ impl<'d, T: Instance> Cryp<'d, T> { if ctx.aad_buffer_len < C::BLOCK_SIZE { // The buffer isn't full and this is the last buffer, so process it as is (already padded). if last_aad_block { - let mut index = 0; - let end_index = C::BLOCK_SIZE; - // Write block in - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&ctx.aad_buffer[index..index + 4]); - T::regs().din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } + self.write_bytes_blocking(C::BLOCK_SIZE, &ctx.aad_buffer); // Block until input FIFO is empty. while !T::regs().sr().read().ifem() {} @@ -1008,15 +1019,7 @@ impl<'d, T: Instance> Cryp<'d, T> { } } else { // Load the full block from the buffer. - let mut index = 0; - let end_index = C::BLOCK_SIZE; - // Write block in - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&ctx.aad_buffer[index..index + 4]); - T::regs().din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } + self.write_bytes_blocking(C::BLOCK_SIZE, &ctx.aad_buffer); // Block until input FIFO is empty. while !T::regs().sr().read().ifem() {} } @@ -1032,33 +1035,13 @@ impl<'d, T: Instance> Cryp<'d, T> { // Load full data blocks into core. let num_full_blocks = aad_len_remaining / C::BLOCK_SIZE; - for block in 0..num_full_blocks { - let mut index = len_to_copy + (block * C::BLOCK_SIZE); - let end_index = index + C::BLOCK_SIZE; - // Write block in - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&aad[index..index + 4]); - T::regs().din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } - // Block until input FIFO is empty. - while !T::regs().sr().read().ifem() {} - } + let start_index = len_to_copy; + let end_index = start_index + (C::BLOCK_SIZE * num_full_blocks); + self.write_bytes_blocking(C::BLOCK_SIZE, &aad[start_index..end_index]); if last_aad_block { if leftovers > 0 { - let mut index = 0; - let end_index = C::BLOCK_SIZE; - // Write block in - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&ctx.aad_buffer[index..index + 4]); - T::regs().din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } - // Block until input FIFO is empty. - while !T::regs().sr().read().ifem() {} + self.write_bytes_blocking(C::BLOCK_SIZE, &ctx.aad_buffer); } // Switch to payload phase. ctx.aad_complete = true; @@ -1125,25 +1108,11 @@ impl<'d, T: Instance> Cryp<'d, T> { // Load data into core, block by block. let num_full_blocks = input.len() / C::BLOCK_SIZE; for block in 0..num_full_blocks { - let mut index = block * C::BLOCK_SIZE; - let end_index = index + C::BLOCK_SIZE; + let index = block * C::BLOCK_SIZE; // Write block in - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&input[index..index + 4]); - T::regs().din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } - let mut index = block * C::BLOCK_SIZE; - let end_index = index + C::BLOCK_SIZE; - // Block until there is output to read. - while !T::regs().sr().read().ofne() {} + self.write_bytes_blocking(C::BLOCK_SIZE, &input[index..index + 4]); // Read block out - while index < end_index { - let out_word: u32 = T::regs().dout().read(); - output[index..index + 4].copy_from_slice(u32::to_ne_bytes(out_word).as_slice()); - index += 4; - } + self.read_bytes_blocking(C::BLOCK_SIZE, &mut output[index..index + 4]); } // Handle the final block, which is incomplete. @@ -1154,25 +1123,8 @@ impl<'d, T: Instance> Cryp<'d, T> { let mut intermediate_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; let mut last_block: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; last_block[..last_block_remainder].copy_from_slice(&input[input.len() - last_block_remainder..input.len()]); - let mut index = 0; - let end_index = C::BLOCK_SIZE; - // Write block in - while index < end_index { - let mut in_word: [u8; 4] = [0; 4]; - in_word.copy_from_slice(&last_block[index..index + 4]); - T::regs().din().write_value(u32::from_ne_bytes(in_word)); - index += 4; - } - let mut index = 0; - let end_index = C::BLOCK_SIZE; - // Block until there is output to read. - while !T::regs().sr().read().ofne() {} - // Read block out - while index < end_index { - let out_word: u32 = T::regs().dout().read(); - intermediate_data[index..index + 4].copy_from_slice(u32::to_ne_bytes(out_word).as_slice()); - index += 4; - } + self.write_bytes_blocking(C::BLOCK_SIZE, &last_block); + self.read_bytes_blocking(C::BLOCK_SIZE, &mut intermediate_data); // Handle the last block depending on mode. let output_len = output.len(); @@ -1182,7 +1134,7 @@ impl<'d, T: Instance> Cryp<'d, T> { let mut mask: [u8; 16] = [0; 16]; mask[..last_block_remainder].fill(0xFF); ctx.cipher - .post_final_block(&T::regs(), ctx.dir, &mut intermediate_data, temp1, mask); + .post_final_block(&T::regs(), self, ctx.dir, &mut intermediate_data, temp1, mask); } ctx.payload_len += input.len() as u64; @@ -1213,28 +1165,21 @@ impl<'d, T: Instance> Cryp<'d, T> { let payloadlen2: u32 = (ctx.payload_len * 8) as u32; #[cfg(cryp_v2)] - { - T::regs().din().write_value(headerlen1.swap_bytes()); - T::regs().din().write_value(headerlen2.swap_bytes()); - T::regs().din().write_value(payloadlen1.swap_bytes()); - T::regs().din().write_value(payloadlen2.swap_bytes()); - } - + let footer: [u32; 4] = [ + headerlen1.swap_bytes(), + headerlen2.swap_bytes(), + payloadlen1.swap_bytes(), + payloadlen2.swap_bytes(), + ]; #[cfg(cryp_v3)] - { - T::regs().din().write_value(headerlen1); - T::regs().din().write_value(headerlen2); - T::regs().din().write_value(payloadlen1); - T::regs().din().write_value(payloadlen2); - } + let footer: [u32; 4] = [headerlen1, headerlen2, payloadlen1, payloadlen2]; + + self.write_words_blocking(C::BLOCK_SIZE, &footer); while !T::regs().sr().read().ofne() {} let mut full_tag: [u8; 16] = [0; 16]; - full_tag[0..4].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice()); - full_tag[4..8].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice()); - full_tag[8..12].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice()); - full_tag[12..16].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice()); + self.read_bytes_blocking(C::BLOCK_SIZE, &mut full_tag); let mut tag: [u8; TAG_SIZE] = [0; TAG_SIZE]; tag.copy_from_slice(&full_tag[0..TAG_SIZE]); @@ -1325,6 +1270,51 @@ impl<'d, T: Instance> Cryp<'d, T> { // Enable crypto processor. T::regs().cr().modify(|w| w.set_crypen(true)); } + + fn write_bytes_blocking(&self, block_size: usize, blocks: &[u8]) { + // Ensure input is a multiple of block size. + assert_eq!(blocks.len() % block_size, 0); + let mut index = 0; + let end_index = blocks.len(); + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&blocks[index..index + 4]); + T::regs().din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + if index % block_size == 0 { + // Block until input FIFO is empty. + while !T::regs().sr().read().ifem() {} + } + } + } + + fn write_words_blocking(&self, block_size: usize, blocks: &[u32]) { + assert_eq!((blocks.len() * 4) % block_size, 0); + let mut byte_counter: usize = 0; + for word in blocks { + T::regs().din().write_value(*word); + byte_counter += 4; + if byte_counter % block_size == 0 { + // Block until input FIFO is empty. + while !T::regs().sr().read().ifem() {} + } + } + } + + fn read_bytes_blocking(&self, block_size: usize, blocks: &mut [u8]) { + // Block until there is output to read. + while !T::regs().sr().read().ofne() {} + // Ensure input is a multiple of block size. + assert_eq!(blocks.len() % block_size, 0); + // Read block out + let mut index = 0; + let end_index = blocks.len(); + while index < end_index { + let out_word: u32 = T::regs().dout().read(); + blocks[index..index + 4].copy_from_slice(u32::to_ne_bytes(out_word).as_slice()); + index += 4; + } + } } pub(crate) mod sealed { diff --git a/examples/stm32f7/src/bin/cryp.rs b/examples/stm32f7/src/bin/cryp.rs index 04927841a..79b74e569 100644 --- a/examples/stm32f7/src/bin/cryp.rs +++ b/examples/stm32f7/src/bin/cryp.rs @@ -6,11 +6,19 @@ use aes_gcm::aead::{AeadInPlace, KeyInit}; use aes_gcm::Aes128Gcm; use defmt::info; use embassy_executor::Spawner; -use embassy_stm32::cryp::*; -use embassy_stm32::Config; +use embassy_stm32::dma::NoDma; +use embassy_stm32::{ + bind_interrupts, + cryp::{self, *}, +}; +use embassy_stm32::{peripherals, Config}; use embassy_time::Instant; use {defmt_rtt as _, panic_probe as _}; +bind_interrupts!(struct Irqs { + CRYP => cryp::InterruptHandler; +}); + #[embassy_executor::main] async fn main(_spawner: Spawner) -> ! { let config = Config::default(); @@ -19,7 +27,7 @@ async fn main(_spawner: Spawner) -> ! { let payload: &[u8] = b"hello world"; let aad: &[u8] = b"additional data"; - let hw_cryp = Cryp::new(p.CRYP); + let hw_cryp = Cryp::new(p.CRYP, NoDma, NoDma, Irqs); let key: [u8; 16] = [0; 16]; let mut ciphertext: [u8; 11] = [0; 11]; let mut plaintext: [u8; 11] = [0; 11]; From 61050a16d5f02a7db718c6e39c811e6e434b032b Mon Sep 17 00:00:00 2001 From: Caleb Garrett <47389035+caleb-garrett@users.noreply.github.com> Date: Tue, 12 Mar 2024 12:01:14 -0400 Subject: [PATCH 2/5] Add CRYP DMA support. Updated example. --- embassy-stm32/build.rs | 2 + embassy-stm32/src/cryp/mod.rs | 613 +++++++++++++++++++++++++++++-- examples/stm32f7/src/bin/cryp.rs | 19 +- 3 files changed, 597 insertions(+), 37 deletions(-) diff --git a/embassy-stm32/build.rs b/embassy-stm32/build.rs index 84e8be25d..70f4515db 100644 --- a/embassy-stm32/build.rs +++ b/embassy-stm32/build.rs @@ -1121,6 +1121,8 @@ fn main() { (("dac", "CH2"), quote!(crate::dac::DacDma2)), (("timer", "UP"), quote!(crate::timer::UpDma)), (("hash", "IN"), quote!(crate::hash::Dma)), + (("cryp", "IN"), quote!(crate::cryp::DmaIn)), + (("cryp", "OUT"), quote!(crate::cryp::DmaOut)), (("timer", "CH1"), quote!(crate::timer::Ch1Dma)), (("timer", "CH2"), quote!(crate::timer::Ch2Dma)), (("timer", "CH3"), quote!(crate::timer::Ch3Dma)), diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs index 12353baa0..1a601533d 100644 --- a/embassy-stm32/src/cryp/mod.rs +++ b/embassy-stm32/src/cryp/mod.rs @@ -2,12 +2,14 @@ #[cfg(any(cryp_v2, cryp_v3))] use core::cmp::min; use core::marker::PhantomData; +use core::ptr; use embassy_hal_internal::{into_ref, PeripheralRef}; use embassy_sync::waitqueue::AtomicWaker; +use crate::dma::{NoDma, Priority, Transfer, TransferOptions}; use crate::interrupt::typelevel::Interrupt; -use crate::{dma::NoDma, interrupt, pac, peripherals, Peripheral}; +use crate::{interrupt, pac, peripherals, Peripheral}; const DES_BLOCK_SIZE: usize = 8; // 64 bits const AES_BLOCK_SIZE: usize = 16; // 128 bits @@ -55,18 +57,25 @@ pub trait Cipher<'c> { fn prepare_key(&self, _p: &pac::cryp::Cryp) {} /// Performs any cipher-specific initialization. - fn init_phase(&self, _p: &pac::cryp::Cryp, _cryp: &Cryp) {} + fn init_phase_blocking(&self, _p: &pac::cryp::Cryp, _cryp: &Cryp) {} + + /// Performs any cipher-specific initialization. + async fn init_phase(&self, _p: &pac::cryp::Cryp, _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>) + where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + {} /// Called prior to processing the last data block for cipher-specific operations. - fn pre_final_block(&self, _p: &pac::cryp::Cryp, _dir: Direction, _padding_len: usize) -> [u32; 4] { + fn pre_final(&self, _p: &pac::cryp::Cryp, _dir: Direction, _padding_len: usize) -> [u32; 4] { return [0; 4]; } /// Called after processing the last data block for cipher-specific operations. - fn post_final_block( + fn post_final_blocking( &self, _p: &pac::cryp::Cryp, - _cryp: &Cryp, + _cryp: &Cryp, _dir: Direction, _int_data: &mut [u8; AES_BLOCK_SIZE], _temp1: [u32; 4], @@ -74,6 +83,21 @@ pub trait Cipher<'c> { ) { } + /// Called after processing the last data block for cipher-specific operations. + async fn post_final( + &self, + _p: &pac::cryp::Cryp, + _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>, + _dir: Direction, + _int_data: &mut [u8; AES_BLOCK_SIZE], + _temp1: [u32; 4], + _padding_mask: [u8; 16], + ) + where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + {} + /// Called prior to processing the first associated data block for cipher-specific operations. fn get_header_block(&self) -> &[u8] { return [0; 0].as_slice(); @@ -449,14 +473,20 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { p.cr().modify(|w| w.set_algomode3(true)); } - fn init_phase(&self, p: &pac::cryp::Cryp, _cryp: &Cryp) { + fn init_phase_blocking(&self, p: &pac::cryp::Cryp, _cryp: &Cryp) { + p.cr().modify(|w| w.set_gcm_ccmph(0)); + p.cr().modify(|w| w.set_crypen(true)); + while p.cr().read().crypen() {} + } + + async fn init_phase(&self, p: &pac::cryp::Cryp, _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>) { p.cr().modify(|w| w.set_gcm_ccmph(0)); p.cr().modify(|w| w.set_crypen(true)); while p.cr().read().crypen() {} } #[cfg(cryp_v2)] - fn pre_final_block(&self, p: &pac::cryp::Cryp, dir: Direction, _padding_len: usize) -> [u32; 4] { + fn pre_final(&self, p: &pac::cryp::Cryp, dir: Direction, _padding_len: usize) -> [u32; 4] { //Handle special GCM partial block process. if dir == Direction::Encrypt { p.cr().modify(|w| w.set_crypen(false)); @@ -477,10 +507,10 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { } #[cfg(cryp_v2)] - fn post_final_block( + fn post_final_blocking( &self, p: &pac::cryp::Cryp, - cryp: &Cryp, + cryp: &Cryp, dir: Direction, int_data: &mut [u8; AES_BLOCK_SIZE], _temp1: [u32; 4], @@ -501,6 +531,43 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { cryp.read_bytes_blocking(Self::BLOCK_SIZE, int_data); } } + + #[cfg(cryp_v2)] + async fn post_final( + &self, + p: &pac::cryp::Cryp, + cryp: &mut Cryp<'_, T, DmaIn, DmaOut>, + dir: Direction, + int_data: &mut [u8; AES_BLOCK_SIZE], + _temp1: [u32; 4], + padding_mask: [u8; AES_BLOCK_SIZE], + ) + where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + + if dir == Direction::Encrypt { + // Handle special GCM partial block process. + p.cr().modify(|w| w.set_crypen(false)); + p.cr().modify(|w| w.set_algomode3(true)); + p.cr().modify(|w| w.set_algomode0(0)); + for i in 0..AES_BLOCK_SIZE { + int_data[i] = int_data[i] & padding_mask[i]; + } + p.cr().modify(|w| w.set_crypen(true)); + p.cr().modify(|w| w.set_gcm_ccmph(3)); + + let mut out_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; + + let read = Cryp::::read_bytes(&mut cryp.outdma, Self::BLOCK_SIZE, &mut out_data); + let write = Cryp::::write_bytes(&mut cryp.indma, Self::BLOCK_SIZE, int_data); + + embassy_futures::join::join(read, write).await; + + int_data.copy_from_slice(&out_data); + } + } } #[cfg(any(cryp_v2, cryp_v3))] @@ -549,14 +616,20 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { p.cr().modify(|w| w.set_algomode3(true)); } - fn init_phase(&self, p: &pac::cryp::Cryp, _cryp: &Cryp) { + fn init_phase_blocking(&self, p: &pac::cryp::Cryp, _cryp: &Cryp) { + p.cr().modify(|w| w.set_gcm_ccmph(0)); + p.cr().modify(|w| w.set_crypen(true)); + while p.cr().read().crypen() {} + } + + async fn init_phase(&self, p: &pac::cryp::Cryp, _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>) { p.cr().modify(|w| w.set_gcm_ccmph(0)); p.cr().modify(|w| w.set_crypen(true)); while p.cr().read().crypen() {} } #[cfg(cryp_v2)] - fn pre_final_block(&self, p: &pac::cryp::Cryp, dir: Direction, _padding_len: usize) -> [u32; 4] { + fn pre_final(&self, p: &pac::cryp::Cryp, dir: Direction, _padding_len: usize) -> [u32; 4] { //Handle special GCM partial block process. if dir == Direction::Encrypt { p.cr().modify(|w| w.set_crypen(false)); @@ -577,10 +650,10 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { } #[cfg(cryp_v2)] - fn post_final_block( + fn post_final_blocking( &self, p: &pac::cryp::Cryp, - cryp: &Cryp, + cryp: &Cryp, dir: Direction, int_data: &mut [u8; AES_BLOCK_SIZE], _temp1: [u32; 4], @@ -601,6 +674,41 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { cryp.read_bytes_blocking(Self::BLOCK_SIZE, int_data); } } + + #[cfg(cryp_v2)] + async fn post_final( + &self, + p: &pac::cryp::Cryp, + cryp: &mut Cryp<'_, T, DmaIn, DmaOut>, + dir: Direction, + int_data: &mut [u8; AES_BLOCK_SIZE], + _temp1: [u32; 4], + padding_mask: [u8; AES_BLOCK_SIZE], + ) + where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + + if dir == Direction::Encrypt { + // Handle special GCM partial block process. + p.cr().modify(|w| w.set_crypen(false)); + p.cr().modify(|w| w.set_algomode3(true)); + p.cr().modify(|w| w.set_algomode0(0)); + for i in 0..AES_BLOCK_SIZE { + int_data[i] = int_data[i] & padding_mask[i]; + } + p.cr().modify(|w| w.set_crypen(true)); + p.cr().modify(|w| w.set_gcm_ccmph(3)); + + let mut out_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; + + let read = Cryp::::read_bytes(&mut cryp.outdma, Self::BLOCK_SIZE, &mut out_data); + let write = Cryp::::write_bytes(&mut cryp.indma, Self::BLOCK_SIZE, int_data); + + embassy_futures::join::join(read, write).await; + } + } } #[cfg(any(cryp_v2, cryp_v3))] @@ -707,7 +815,7 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip p.cr().modify(|w| w.set_algomode3(true)); } - fn init_phase(&self, p: &pac::cryp::Cryp, cryp: &Cryp) { + fn init_phase_blocking(&self, p: &pac::cryp::Cryp, cryp: &Cryp) { p.cr().modify(|w| w.set_gcm_ccmph(0)); cryp.write_bytes_blocking(Self::BLOCK_SIZE, &self.block0); @@ -716,12 +824,25 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip while p.cr().read().crypen() {} } + async fn init_phase(&self, p: &pac::cryp::Cryp, cryp: &mut Cryp<'_, T, DmaIn, DmaOut>) + where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + p.cr().modify(|w| w.set_gcm_ccmph(0)); + + Cryp::::write_bytes(&mut cryp.indma, Self::BLOCK_SIZE, &self.block0).await; + + p.cr().modify(|w| w.set_crypen(true)); + while p.cr().read().crypen() {} + } + fn get_header_block(&self) -> &[u8] { return &self.aad_header[0..self.aad_header_len]; } #[cfg(cryp_v2)] - fn pre_final_block(&self, p: &pac::cryp::Cryp, dir: Direction, _padding_len: usize) -> [u32; 4] { + fn pre_final(&self, p: &pac::cryp::Cryp, dir: Direction, _padding_len: usize) -> [u32; 4] { //Handle special CCM partial block process. let mut temp1 = [0; 4]; if dir == Direction::Decrypt { @@ -747,10 +868,10 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip } #[cfg(cryp_v2)] - fn post_final_block( + fn post_final_blocking( &self, p: &pac::cryp::Cryp, - cryp: &Cryp, + cryp: &Cryp, dir: Direction, int_data: &mut [u8; AES_BLOCK_SIZE], temp1: [u32; 4], @@ -782,6 +903,47 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip cryp.write_words_blocking(Self::BLOCK_SIZE, &in_data); } } + + #[cfg(cryp_v2)] + async fn post_final( + &self, + p: &pac::cryp::Cryp, + cryp: &mut Cryp<'_, T, DmaIn, DmaOut>, + dir: Direction, + int_data: &mut [u8; AES_BLOCK_SIZE], + temp1: [u32; 4], + padding_mask: [u8; 16], + ) + where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + if dir == Direction::Decrypt { + //Handle special CCM partial block process. + let mut temp2 = [0; 4]; + temp2[0] = p.csgcmccmr(0).read().swap_bytes(); + temp2[1] = p.csgcmccmr(1).read().swap_bytes(); + temp2[2] = p.csgcmccmr(2).read().swap_bytes(); + temp2[3] = p.csgcmccmr(3).read().swap_bytes(); + p.cr().modify(|w| w.set_algomode3(true)); + p.cr().modify(|w| w.set_algomode0(1)); + p.cr().modify(|w| w.set_gcm_ccmph(3)); + // Header phase + p.cr().modify(|w| w.set_gcm_ccmph(1)); + for i in 0..AES_BLOCK_SIZE { + int_data[i] = int_data[i] & padding_mask[i]; + } + let mut in_data: [u32; 4] = [0; 4]; + for i in 0..in_data.len() { + let mut int_bytes: [u8; 4] = [0; 4]; + int_bytes.copy_from_slice(&int_data[(i * 4)..(i * 4) + 4]); + let int_word = u32::from_le_bytes(int_bytes); + in_data[i] = int_word; + in_data[i] = in_data[i] ^ temp1[i] ^ temp2[i]; + } + Cryp::::write_words(&mut cryp.indma, Self::BLOCK_SIZE, &in_data).await; + } + } } #[cfg(any(cryp_v2, cryp_v3))] @@ -849,18 +1011,18 @@ pub enum Direction { } /// Crypto Accelerator Driver -pub struct Cryp<'d, T: Instance, D = NoDma> { +pub struct Cryp<'d, T: Instance, DmaIn = NoDma, DmaOut = NoDma> { _peripheral: PeripheralRef<'d, T>, - indma: PeripheralRef<'d, D>, - outdma: PeripheralRef<'d, D>, + indma: PeripheralRef<'d, DmaIn>, + outdma: PeripheralRef<'d, DmaOut>, } -impl<'d, T: Instance, D> Cryp<'d, T, D> { +impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { /// Create a new CRYP driver. pub fn new( peri: impl Peripheral

+ 'd, - indma: impl Peripheral

+ 'd, - outdma: impl Peripheral

+ 'd, + indma: impl Peripheral

+ 'd, + outdma: impl Peripheral

+ 'd, _irq: impl interrupt::typelevel::Binding> + 'd, ) -> Self { T::enable_and_reset(); @@ -881,7 +1043,7 @@ impl<'d, T: Instance, D> Cryp<'d, T, D> { /// Key size must be 128, 192, or 256 bits. /// Initialization vector must only be supplied if necessary. /// Panics if there is any mismatch in parameters, such as an incorrect IV length or invalid mode. - pub fn start<'c, C: Cipher<'c> + CipherSized + IVSized>(&self, cipher: &'c C, dir: Direction) -> Context<'c, C> { + pub fn start_blocking<'c, C: Cipher<'c> + CipherSized + IVSized>(&self, cipher: &'c C, dir: Direction) -> Context<'c, C> { let mut ctx: Context<'c, C> = Context { dir, last_block_processed: false, @@ -948,7 +1110,89 @@ impl<'d, T: Instance, D> Cryp<'d, T, D> { // Flush in/out FIFOs T::regs().cr().modify(|w| w.fflush()); - ctx.cipher.init_phase(&T::regs(), self); + ctx.cipher.init_phase_blocking(&T::regs(), self); + + self.store_context(&mut ctx); + + ctx + } + + /// Start a new cipher operation. + /// Key size must be 128, 192, or 256 bits. + /// Initialization vector must only be supplied if necessary. + /// Panics if there is any mismatch in parameters, such as an incorrect IV length or invalid mode. + pub async fn start<'c, C: Cipher<'c> + CipherSized + IVSized>(&mut self, cipher: &'c C, dir: Direction) -> Context<'c, C> + where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + let mut ctx: Context<'c, C> = Context { + dir, + last_block_processed: false, + cr: 0, + iv: [0; 4], + csgcmccm: [0; 8], + csgcm: [0; 8], + aad_complete: false, + header_len: 0, + payload_len: 0, + cipher: cipher, + phantom_data: PhantomData, + header_processed: false, + aad_buffer: [0; 16], + aad_buffer_len: 0, + }; + + T::regs().cr().modify(|w| w.set_crypen(false)); + + let key = ctx.cipher.key(); + + if key.len() == (128 / 8) { + T::regs().cr().modify(|w| w.set_keysize(0)); + } else if key.len() == (192 / 8) { + T::regs().cr().modify(|w| w.set_keysize(1)); + } else if key.len() == (256 / 8) { + T::regs().cr().modify(|w| w.set_keysize(2)); + } + + self.load_key(key); + + // Set data type to 8-bit. This will match software implementations. + T::regs().cr().modify(|w| w.set_datatype(2)); + + ctx.cipher.prepare_key(&T::regs()); + + ctx.cipher.set_algomode(&T::regs()); + + // Set encrypt/decrypt + if dir == Direction::Encrypt { + T::regs().cr().modify(|w| w.set_algodir(false)); + } else { + T::regs().cr().modify(|w| w.set_algodir(true)); + } + + // Load the IV into the registers. + let iv = ctx.cipher.iv(); + let mut full_iv: [u8; 16] = [0; 16]; + full_iv[0..iv.len()].copy_from_slice(iv); + let mut iv_idx = 0; + let mut iv_word: [u8; 4] = [0; 4]; + iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); + iv_idx += 4; + T::regs().init(0).ivlr().write_value(u32::from_be_bytes(iv_word)); + iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); + iv_idx += 4; + T::regs().init(0).ivrr().write_value(u32::from_be_bytes(iv_word)); + iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); + iv_idx += 4; + T::regs().init(1).ivlr().write_value(u32::from_be_bytes(iv_word)); + iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); + T::regs().init(1).ivrr().write_value(u32::from_be_bytes(iv_word)); + + // Flush in/out FIFOs + T::regs().cr().modify(|w| w.fflush()); + + ctx.cipher.init_phase(&T::regs(), self).await; self.store_context(&mut ctx); @@ -1053,6 +1297,107 @@ impl<'d, T: Instance, D> Cryp<'d, T, D> { self.store_context(ctx); } + #[cfg(any(cryp_v2, cryp_v3))] + /// Controls the header phase of cipher processing. + /// This function is only valid for GCM, CCM, and GMAC modes. + /// It only needs to be called if using one of these modes and there is associated data. + /// All AAD must be supplied to this function prior to starting the payload phase with `payload_blocking`. + /// The AAD must be supplied in multiples of the block size (128 bits), except when supplying the last block. + /// When supplying the last block of AAD, `last_aad_block` must be `true`. + pub async fn aad< + 'c, + const TAG_SIZE: usize, + C: Cipher<'c> + CipherSized + IVSized + CipherAuthenticated, + >( + &mut self, + ctx: &mut Context<'c, C>, + aad: &[u8], + last_aad_block: bool, + ) + where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + self.load_context(ctx); + + // Perform checks for correctness. + if ctx.aad_complete { + panic!("Cannot update AAD after starting payload!") + } + + ctx.header_len += aad.len() as u64; + + // Header phase + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(1)); + T::regs().cr().modify(|w| w.set_crypen(true)); + + // First write the header B1 block if not yet written. + if !ctx.header_processed { + ctx.header_processed = true; + let header = ctx.cipher.get_header_block(); + ctx.aad_buffer[0..header.len()].copy_from_slice(header); + ctx.aad_buffer_len += header.len(); + } + + // Fill the header block to make a full block. + let len_to_copy = min(aad.len(), C::BLOCK_SIZE - ctx.aad_buffer_len); + ctx.aad_buffer[ctx.aad_buffer_len..ctx.aad_buffer_len + len_to_copy].copy_from_slice(&aad[..len_to_copy]); + ctx.aad_buffer_len += len_to_copy; + ctx.aad_buffer[ctx.aad_buffer_len..].fill(0); + let mut aad_len_remaining = aad.len() - len_to_copy; + + if ctx.aad_buffer_len < C::BLOCK_SIZE { + // The buffer isn't full and this is the last buffer, so process it as is (already padded). + if last_aad_block { + Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &ctx.aad_buffer).await; + assert_eq!(T::regs().sr().read().ifem(), true); + + // Switch to payload phase. + ctx.aad_complete = true; + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); + T::regs().cr().modify(|w| w.fflush()); + } else { + // Just return because we don't yet have a full block to process. + return; + } + } else { + // Load the full block from the buffer. + Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &ctx.aad_buffer).await; + assert_eq!(T::regs().sr().read().ifem(), true); + } + + // Handle a partial block that is passed in. + ctx.aad_buffer_len = 0; + let leftovers = aad_len_remaining % C::BLOCK_SIZE; + ctx.aad_buffer[..leftovers].copy_from_slice(&aad[aad.len() - leftovers..aad.len()]); + ctx.aad_buffer_len += leftovers; + ctx.aad_buffer[ctx.aad_buffer_len..].fill(0); + aad_len_remaining -= leftovers; + assert_eq!(aad_len_remaining % C::BLOCK_SIZE, 0); + + // Load full data blocks into core. + let num_full_blocks = aad_len_remaining / C::BLOCK_SIZE; + let start_index = len_to_copy; + let end_index = start_index + (C::BLOCK_SIZE * num_full_blocks); + Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &aad[start_index..end_index]).await; + + if last_aad_block { + if leftovers > 0 { + Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &ctx.aad_buffer).await; + assert_eq!(T::regs().sr().read().ifem(), true); + } + // Switch to payload phase. + ctx.aad_complete = true; + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); + T::regs().cr().modify(|w| w.fflush()); + } + + self.store_context(ctx); + } + /// Performs encryption/decryption on the provided context. /// The context determines algorithm, mode, and state of the crypto accelerator. /// When the last piece of data is supplied, `last_block` should be `true`. @@ -1118,7 +1463,7 @@ impl<'d, T: Instance, D> Cryp<'d, T, D> { // Handle the final block, which is incomplete. if last_block_remainder > 0 { let padding_len = C::BLOCK_SIZE - last_block_remainder; - let temp1 = ctx.cipher.pre_final_block(&T::regs(), ctx.dir, padding_len); + let temp1 = ctx.cipher.pre_final(&T::regs(), ctx.dir, padding_len); let mut intermediate_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; let mut last_block: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; @@ -1134,7 +1479,102 @@ impl<'d, T: Instance, D> Cryp<'d, T, D> { let mut mask: [u8; 16] = [0; 16]; mask[..last_block_remainder].fill(0xFF); ctx.cipher - .post_final_block(&T::regs(), self, ctx.dir, &mut intermediate_data, temp1, mask); + .post_final_blocking(&T::regs(), self, ctx.dir, &mut intermediate_data, temp1, mask); + } + + ctx.payload_len += input.len() as u64; + + self.store_context(ctx); + } + + /// Performs encryption/decryption on the provided context. + /// The context determines algorithm, mode, and state of the crypto accelerator. + /// When the last piece of data is supplied, `last_block` should be `true`. + /// This function panics under various mismatches of parameters. + /// Input and output buffer lengths must match. + /// Data must be a multiple of block size (128-bits for AES, 64-bits for DES) for CBC and ECB modes. + /// Padding or ciphertext stealing must be managed by the application for these modes. + /// Data must also be a multiple of block size unless `last_block` is `true`. + pub async fn payload<'c, C: Cipher<'c> + CipherSized + IVSized>( + &mut self, + ctx: &mut Context<'c, C>, + input: &[u8], + output: &mut [u8], + last_block: bool, + ) + where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + self.load_context(ctx); + + let last_block_remainder = input.len() % C::BLOCK_SIZE; + + // Perform checks for correctness. + if !ctx.aad_complete && ctx.header_len > 0 { + panic!("Additional associated data must be processed first!"); + } else if !ctx.aad_complete { + #[cfg(any(cryp_v2, cryp_v3))] + { + ctx.aad_complete = true; + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); + T::regs().cr().modify(|w| w.fflush()); + T::regs().cr().modify(|w| w.set_crypen(true)); + } + } + if ctx.last_block_processed { + panic!("The last block has already been processed!"); + } + if input.len() > output.len() { + panic!("Output buffer length must match input length."); + } + if !last_block { + if last_block_remainder != 0 { + panic!("Input length must be a multiple of {} bytes.", C::BLOCK_SIZE); + } + } + if C::REQUIRES_PADDING { + if last_block_remainder != 0 { + panic!("Input must be a multiple of {} bytes in ECB and CBC modes. Consider padding or ciphertext stealing.", C::BLOCK_SIZE); + } + } + if last_block { + ctx.last_block_processed = true; + } + + // Load data into core, block by block. + let num_full_blocks = input.len() / C::BLOCK_SIZE; + for block in 0..num_full_blocks { + let index = block * C::BLOCK_SIZE; + // Read block out + let read = Self::read_bytes(&mut self.outdma, C::BLOCK_SIZE, &mut output[index..index + 4]); + // Write block in + let write = Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &input[index..index + 4]); + embassy_futures::join::join(read, write).await; + } + + // Handle the final block, which is incomplete. + if last_block_remainder > 0 { + let padding_len = C::BLOCK_SIZE - last_block_remainder; + let temp1 = ctx.cipher.pre_final(&T::regs(), ctx.dir, padding_len); + + let mut intermediate_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; + let mut last_block: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; + last_block[..last_block_remainder].copy_from_slice(&input[input.len() - last_block_remainder..input.len()]); + let read = Self::read_bytes(&mut self.outdma, C::BLOCK_SIZE, &mut intermediate_data); + let write = Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &last_block); + embassy_futures::join::join(read, write).await; + + // Handle the last block depending on mode. + let output_len = output.len(); + output[output_len - last_block_remainder..output_len] + .copy_from_slice(&intermediate_data[0..last_block_remainder]); + + let mut mask: [u8; 16] = [0; 16]; + mask[..last_block_remainder].fill(0xFF); + ctx.cipher + .post_final(&T::regs(), self, ctx.dir, &mut intermediate_data, temp1, mask).await; } ctx.payload_len += input.len() as u64; @@ -1188,6 +1628,50 @@ impl<'d, T: Instance, D> Cryp<'d, T, D> { tag } + #[cfg(any(cryp_v2, cryp_v3))] + /// This function only needs to be called for GCM, CCM, and GMAC modes to + /// generate an authentication tag. + pub async fn finish<'c, const TAG_SIZE: usize, C: Cipher<'c> + CipherSized + IVSized + CipherAuthenticated>(&mut self, mut ctx: Context<'c, C>) -> [u8; TAG_SIZE] + where + DmaIn: crate::cryp::DmaIn, + DmaOut: crate::cryp::DmaOut, + { + self.load_context(&mut ctx); + + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(3)); + T::regs().cr().modify(|w| w.set_crypen(true)); + + let headerlen1: u32 = ((ctx.header_len * 8) >> 32) as u32; + let headerlen2: u32 = (ctx.header_len * 8) as u32; + let payloadlen1: u32 = ((ctx.payload_len * 8) >> 32) as u32; + let payloadlen2: u32 = (ctx.payload_len * 8) as u32; + + #[cfg(cryp_v2)] + let footer: [u32; 4] = [ + headerlen1.swap_bytes(), + headerlen2.swap_bytes(), + payloadlen1.swap_bytes(), + payloadlen2.swap_bytes(), + ]; + #[cfg(cryp_v3)] + let footer: [u32; 4] = [headerlen1, headerlen2, payloadlen1, payloadlen2]; + + let write = Self::write_words(&mut self.indma, C::BLOCK_SIZE, &footer); + + let mut full_tag: [u8; 16] = [0; 16]; + let read = Self::read_bytes(&mut self.outdma, C::BLOCK_SIZE, &mut full_tag); + + embassy_futures::join::join(read, write).await; + + let mut tag: [u8; TAG_SIZE] = [0; TAG_SIZE]; + tag.copy_from_slice(&full_tag[0..TAG_SIZE]); + + T::regs().cr().modify(|w| w.set_crypen(false)); + + tag + } + fn load_key(&self, key: &[u8]) { // Load the key into the registers. let mut keyidx = 0; @@ -1288,6 +1772,30 @@ impl<'d, T: Instance, D> Cryp<'d, T, D> { } } + async fn write_bytes(dma: &mut PeripheralRef<'_, DmaIn>, block_size: usize, blocks: &[u8]) + where + DmaIn: crate::cryp::DmaIn, + { + if blocks.len() == 0 { + return; + } + // Ensure input is a multiple of block size. + assert_eq!(blocks.len() % block_size, 0); + // Configure DMA to transfer input to crypto core. + let dma_request = dma.request(); + let dst_ptr = T::regs().din().as_ptr(); + let num_words = blocks.len() / 4; + let src_ptr = ptr::slice_from_raw_parts(blocks.as_ptr().cast(), num_words); + let options = TransferOptions { + priority: Priority::High, + ..Default::default() + }; + let dma_transfer = unsafe { Transfer::new_write_raw(dma, dma_request, src_ptr, dst_ptr, options) }; + T::regs().dmacr().modify(|w| w.set_dien(true)); + // Wait for the transfer to complete. + dma_transfer.await; + } + fn write_words_blocking(&self, block_size: usize, blocks: &[u32]) { assert_eq!((blocks.len() * 4) % block_size, 0); let mut byte_counter: usize = 0; @@ -1301,6 +1809,30 @@ impl<'d, T: Instance, D> Cryp<'d, T, D> { } } + async fn write_words(dma: &mut PeripheralRef<'_, DmaIn>, block_size: usize, blocks: &[u32]) + where + DmaIn: crate::cryp::DmaIn, + { + if blocks.len() == 0 { + return; + } + // Ensure input is a multiple of block size. + assert_eq!((blocks.len() * 4) % block_size, 0); + // Configure DMA to transfer input to crypto core. + let dma_request = dma.request(); + let dst_ptr = T::regs().din().as_ptr(); + let num_words = blocks.len(); + let src_ptr = ptr::slice_from_raw_parts(blocks.as_ptr().cast(), num_words); + let options = TransferOptions { + priority: Priority::High, + ..Default::default() + }; + let dma_transfer = unsafe { Transfer::new_write_raw(dma, dma_request, src_ptr, dst_ptr, options) }; + T::regs().dmacr().modify(|w| w.set_dien(true)); + // Wait for the transfer to complete. + dma_transfer.await; + } + fn read_bytes_blocking(&self, block_size: usize, blocks: &mut [u8]) { // Block until there is output to read. while !T::regs().sr().read().ofne() {} @@ -1315,6 +1847,30 @@ impl<'d, T: Instance, D> Cryp<'d, T, D> { index += 4; } } + + async fn read_bytes(dma: &mut PeripheralRef<'_, DmaOut>, block_size: usize, blocks: &mut [u8]) + where + DmaOut: crate::cryp::DmaOut, + { + if blocks.len() == 0 { + return; + } + // Ensure input is a multiple of block size. + assert_eq!(blocks.len() % block_size, 0); + // Configure DMA to get output from crypto core. + let dma_request = dma.request(); + let src_ptr = T::regs().dout().as_ptr(); + let num_words = blocks.len() / 4; + let dst_ptr = ptr::slice_from_raw_parts_mut(blocks.as_mut_ptr().cast(), num_words); + let options = TransferOptions { + priority: Priority::VeryHigh, + ..Default::default() + }; + let dma_transfer = unsafe { Transfer::new_read_raw(dma, dma_request, src_ptr, dst_ptr, options) }; + T::regs().dmacr().modify(|w| w.set_doen(true)); + // Wait for the transfer to complete. + dma_transfer.await; + } } pub(crate) mod sealed { @@ -1344,3 +1900,6 @@ foreach_interrupt!( } }; ); + +dma_trait!(DmaIn, Instance); +dma_trait!(DmaOut, Instance); \ No newline at end of file diff --git a/examples/stm32f7/src/bin/cryp.rs b/examples/stm32f7/src/bin/cryp.rs index 79b74e569..a5418765b 100644 --- a/examples/stm32f7/src/bin/cryp.rs +++ b/examples/stm32f7/src/bin/cryp.rs @@ -6,7 +6,6 @@ use aes_gcm::aead::{AeadInPlace, KeyInit}; use aes_gcm::Aes128Gcm; use defmt::info; use embassy_executor::Spawner; -use embassy_stm32::dma::NoDma; use embassy_stm32::{ bind_interrupts, cryp::{self, *}, @@ -27,7 +26,7 @@ async fn main(_spawner: Spawner) -> ! { let payload: &[u8] = b"hello world"; let aad: &[u8] = b"additional data"; - let hw_cryp = Cryp::new(p.CRYP, NoDma, NoDma, Irqs); + let mut hw_cryp = Cryp::new(p.CRYP, p.DMA2_CH6, p.DMA2_CH5, Irqs); let key: [u8; 16] = [0; 16]; let mut ciphertext: [u8; 11] = [0; 11]; let mut plaintext: [u8; 11] = [0; 11]; @@ -37,16 +36,16 @@ async fn main(_spawner: Spawner) -> ! { // Encrypt in hardware using AES-GCM 128-bit let aes_gcm = AesGcm::new(&key, &iv); - let mut gcm_encrypt = hw_cryp.start(&aes_gcm, Direction::Encrypt); - hw_cryp.aad_blocking(&mut gcm_encrypt, aad, true); - hw_cryp.payload_blocking(&mut gcm_encrypt, payload, &mut ciphertext, true); - let encrypt_tag = hw_cryp.finish_blocking(gcm_encrypt); + let mut gcm_encrypt = hw_cryp.start(&aes_gcm, Direction::Encrypt).await; + hw_cryp.aad(&mut gcm_encrypt, aad, true).await; + hw_cryp.payload(&mut gcm_encrypt, payload, &mut ciphertext, true).await; + let encrypt_tag = hw_cryp.finish(gcm_encrypt).await; // Decrypt in hardware using AES-GCM 128-bit - let mut gcm_decrypt = hw_cryp.start(&aes_gcm, Direction::Decrypt); - hw_cryp.aad_blocking(&mut gcm_decrypt, aad, true); - hw_cryp.payload_blocking(&mut gcm_decrypt, &ciphertext, &mut plaintext, true); - let decrypt_tag = hw_cryp.finish_blocking(gcm_decrypt); + let mut gcm_decrypt = hw_cryp.start(&aes_gcm, Direction::Decrypt).await; + hw_cryp.aad(&mut gcm_decrypt, aad, true).await; + hw_cryp.payload(&mut gcm_decrypt, &ciphertext, &mut plaintext, true).await; + let decrypt_tag = hw_cryp.finish(gcm_decrypt).await; let hw_end_time = Instant::now(); let hw_execution_time = hw_end_time - hw_start_time; From 1ec9fc58f44987c11ac1e093f117679c56dbe2ed Mon Sep 17 00:00:00 2001 From: Caleb Garrett <47389035+caleb-garrett@users.noreply.github.com> Date: Tue, 12 Mar 2024 14:52:34 -0400 Subject: [PATCH 3/5] Add async CRYP to test. --- embassy-stm32/src/cryp/mod.rs | 52 +++++++++++++++-------------------- tests/stm32/src/bin/cryp.rs | 31 ++++++++++++++------- tests/stm32/src/common.rs | 1 + 3 files changed, 44 insertions(+), 40 deletions(-) diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs index 1a601533d..aa4c2a024 100644 --- a/embassy-stm32/src/cryp/mod.rs +++ b/embassy-stm32/src/cryp/mod.rs @@ -98,7 +98,7 @@ pub trait Cipher<'c> { DmaOut: crate::cryp::DmaOut, {} - /// Called prior to processing the first associated data block for cipher-specific operations. + /// Returns the AAD header block as required by the cipher. fn get_header_block(&self) -> &[u8] { return [0; 0].as_slice(); } @@ -500,7 +500,7 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { } #[cfg(cryp_v3)] - fn pre_final_block(&self, p: &pac::cryp::Cryp, _dir: Direction, padding_len: usize) -> [u32; 4] { + fn pre_final(&self, p: &pac::cryp::Cryp, _dir: Direction, padding_len: usize) -> [u32; 4] { //Handle special GCM partial block process. p.cr().modify(|w| w.set_npblb(padding_len as u8)); [0; 4] @@ -643,7 +643,7 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { } #[cfg(cryp_v3)] - fn pre_final_block(&self, p: &pac::cryp::Cryp, _dir: Direction, padding_len: usize) -> [u32; 4] { + fn pre_final(&self, p: &pac::cryp::Cryp, _dir: Direction, padding_len: usize) -> [u32; 4] { //Handle special GCM partial block process. p.cr().modify(|w| w.set_npblb(padding_len as u8)); [0; 4] @@ -861,7 +861,7 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip } #[cfg(cryp_v3)] - fn pre_final_block(&self, p: &pac::cryp::Cryp, _dir: Direction, padding_len: usize) -> [u32; 4] { + fn pre_final(&self, p: &pac::cryp::Cryp, _dir: Direction, padding_len: usize) -> [u32; 4] { //Handle special GCM partial block process. p.cr().modify(|w| w.set_npblb(padding_len as u8)); [0; 4] @@ -1039,10 +1039,7 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { instance } - /// Start a new cipher operation. - /// Key size must be 128, 192, or 256 bits. - /// Initialization vector must only be supplied if necessary. - /// Panics if there is any mismatch in parameters, such as an incorrect IV length or invalid mode. + /// Start a new encrypt or decrypt operation for the given cipher. pub fn start_blocking<'c, C: Cipher<'c> + CipherSized + IVSized>(&self, cipher: &'c C, dir: Direction) -> Context<'c, C> { let mut ctx: Context<'c, C> = Context { dir, @@ -1117,10 +1114,7 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { ctx } - /// Start a new cipher operation. - /// Key size must be 128, 192, or 256 bits. - /// Initialization vector must only be supplied if necessary. - /// Panics if there is any mismatch in parameters, such as an incorrect IV length or invalid mode. + /// Start a new encrypt or decrypt operation for the given cipher. pub async fn start<'c, C: Cipher<'c> + CipherSized + IVSized>(&mut self, cipher: &'c C, dir: Direction) -> Context<'c, C> where DmaIn: crate::cryp::DmaIn, @@ -1201,10 +1195,9 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { #[cfg(any(cryp_v2, cryp_v3))] /// Controls the header phase of cipher processing. - /// This function is only valid for GCM, CCM, and GMAC modes. - /// It only needs to be called if using one of these modes and there is associated data. - /// All AAD must be supplied to this function prior to starting the payload phase with `payload_blocking`. - /// The AAD must be supplied in multiples of the block size (128 bits), except when supplying the last block. + /// This function is only valid for authenticated ciphers including GCM, CCM, and GMAC. + /// All additional associated data (AAD) must be supplied to this function prior to starting the payload phase with `payload_blocking`. + /// The AAD must be supplied in multiples of the block size (128-bits for AES, 64-bits for DES), except when supplying the last block. /// When supplying the last block of AAD, `last_aad_block` must be `true`. pub fn aad_blocking< 'c, @@ -1299,10 +1292,9 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { #[cfg(any(cryp_v2, cryp_v3))] /// Controls the header phase of cipher processing. - /// This function is only valid for GCM, CCM, and GMAC modes. - /// It only needs to be called if using one of these modes and there is associated data. - /// All AAD must be supplied to this function prior to starting the payload phase with `payload_blocking`. - /// The AAD must be supplied in multiples of the block size (128 bits), except when supplying the last block. + /// This function is only valid for authenticated ciphers including GCM, CCM, and GMAC. + /// All additional associated data (AAD) must be supplied to this function prior to starting the payload phase with `payload`. + /// The AAD must be supplied in multiples of the block size (128-bits for AES, 64-bits for DES), except when supplying the last block. /// When supplying the last block of AAD, `last_aad_block` must be `true`. pub async fn aad< 'c, @@ -1402,7 +1394,7 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { /// The context determines algorithm, mode, and state of the crypto accelerator. /// When the last piece of data is supplied, `last_block` should be `true`. /// This function panics under various mismatches of parameters. - /// Input and output buffer lengths must match. + /// Output buffer must be at least as long as the input buffer. /// Data must be a multiple of block size (128-bits for AES, 64-bits for DES) for CBC and ECB modes. /// Padding or ciphertext stealing must be managed by the application for these modes. /// Data must also be a multiple of block size unless `last_block` is `true`. @@ -1455,9 +1447,9 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { for block in 0..num_full_blocks { let index = block * C::BLOCK_SIZE; // Write block in - self.write_bytes_blocking(C::BLOCK_SIZE, &input[index..index + 4]); + self.write_bytes_blocking(C::BLOCK_SIZE, &input[index..index + C::BLOCK_SIZE]); // Read block out - self.read_bytes_blocking(C::BLOCK_SIZE, &mut output[index..index + 4]); + self.read_bytes_blocking(C::BLOCK_SIZE, &mut output[index..index + C::BLOCK_SIZE]); } // Handle the final block, which is incomplete. @@ -1491,7 +1483,7 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { /// The context determines algorithm, mode, and state of the crypto accelerator. /// When the last piece of data is supplied, `last_block` should be `true`. /// This function panics under various mismatches of parameters. - /// Input and output buffer lengths must match. + /// Output buffer must be at least as long as the input buffer. /// Data must be a multiple of block size (128-bits for AES, 64-bits for DES) for CBC and ECB modes. /// Padding or ciphertext stealing must be managed by the application for these modes. /// Data must also be a multiple of block size unless `last_block` is `true`. @@ -1548,9 +1540,9 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { for block in 0..num_full_blocks { let index = block * C::BLOCK_SIZE; // Read block out - let read = Self::read_bytes(&mut self.outdma, C::BLOCK_SIZE, &mut output[index..index + 4]); + let read = Self::read_bytes(&mut self.outdma, C::BLOCK_SIZE, &mut output[index..index + C::BLOCK_SIZE]); // Write block in - let write = Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &input[index..index + 4]); + let write = Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &input[index..index + C::BLOCK_SIZE]); embassy_futures::join::join(read, write).await; } @@ -1583,8 +1575,8 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { } #[cfg(any(cryp_v2, cryp_v3))] - /// This function only needs to be called for GCM, CCM, and GMAC modes to - /// generate an authentication tag. + /// Generates an authentication tag for authenticated ciphers including GCM, CCM, and GMAC. + /// Called after the all data has been encrypted/decrypted by `payload`. pub fn finish_blocking< 'c, const TAG_SIZE: usize, @@ -1629,8 +1621,8 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { } #[cfg(any(cryp_v2, cryp_v3))] - /// This function only needs to be called for GCM, CCM, and GMAC modes to - /// generate an authentication tag. + // Generates an authentication tag for authenticated ciphers including GCM, CCM, and GMAC. + /// Called after the all data has been encrypted/decrypted by `payload`. pub async fn finish<'c, const TAG_SIZE: usize, C: Cipher<'c> + CipherSized + IVSized + CipherAuthenticated>(&mut self, mut ctx: Context<'c, C>) -> [u8; TAG_SIZE] where DmaIn: crate::cryp::DmaIn, diff --git a/tests/stm32/src/bin/cryp.rs b/tests/stm32/src/bin/cryp.rs index f105abf26..6bca55f55 100644 --- a/tests/stm32/src/bin/cryp.rs +++ b/tests/stm32/src/bin/cryp.rs @@ -10,9 +10,17 @@ use aes_gcm::aead::{AeadInPlace, KeyInit}; use aes_gcm::Aes128Gcm; use common::*; use embassy_executor::Spawner; -use embassy_stm32::cryp::*; +use embassy_stm32::{ + bind_interrupts, + cryp::{self, *}, + peripherals +}; use {defmt_rtt as _, panic_probe as _}; +bind_interrupts!(struct Irqs { + CRYP => cryp::InterruptHandler; +}); + #[embassy_executor::main] async fn main(_spawner: Spawner) { let p: embassy_stm32::Peripherals = embassy_stm32::init(config()); @@ -22,27 +30,30 @@ async fn main(_spawner: Spawner) { const AAD1: &[u8] = b"additional data 1 stdargadrhaethaethjatjatjaetjartjstrjsfkk;'jopofyuisrteytweTASTUIKFUKIXTRDTEREharhaeryhaterjartjarthaethjrtjarthaetrhartjatejatrjsrtjartjyt1"; const AAD2: &[u8] = b"additional data 2 stdhthsthsthsrthsrthsrtjdykjdukdyuldadfhsdghsdghsdghsadghjk'hioethjrtjarthaetrhartjatecfgjhzdfhgzdfhzdfghzdfhzdfhzfhjatrjsrtjartjytjfytjfyg"; - let hw_cryp = Cryp::new(p.CRYP); + let in_dma = peri!(p, CRYP_IN_DMA); + let out_dma = peri!(p, CRYP_OUT_DMA); + + let mut hw_cryp = Cryp::new(p.CRYP, in_dma, out_dma, Irqs); let key: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; let mut ciphertext: [u8; PAYLOAD1.len() + PAYLOAD2.len()] = [0; PAYLOAD1.len() + PAYLOAD2.len()]; let mut plaintext: [u8; PAYLOAD1.len() + PAYLOAD2.len()] = [0; PAYLOAD1.len() + PAYLOAD2.len()]; let iv: [u8; 12] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; - // Encrypt in hardware using AES-GCM 128-bit + // Encrypt in hardware using AES-GCM 128-bit in blocking mode. let aes_gcm = AesGcm::new(&key, &iv); - let mut gcm_encrypt = hw_cryp.start(&aes_gcm, Direction::Encrypt); + let mut gcm_encrypt = hw_cryp.start_blocking(&aes_gcm, Direction::Encrypt); hw_cryp.aad_blocking(&mut gcm_encrypt, AAD1, false); hw_cryp.aad_blocking(&mut gcm_encrypt, AAD2, true); hw_cryp.payload_blocking(&mut gcm_encrypt, PAYLOAD1, &mut ciphertext[..PAYLOAD1.len()], false); hw_cryp.payload_blocking(&mut gcm_encrypt, PAYLOAD2, &mut ciphertext[PAYLOAD1.len()..], true); let encrypt_tag = hw_cryp.finish_blocking(gcm_encrypt); - // Decrypt in hardware using AES-GCM 128-bit - let mut gcm_decrypt = hw_cryp.start(&aes_gcm, Direction::Decrypt); - hw_cryp.aad_blocking(&mut gcm_decrypt, AAD1, false); - hw_cryp.aad_blocking(&mut gcm_decrypt, AAD2, true); - hw_cryp.payload_blocking(&mut gcm_decrypt, &ciphertext, &mut plaintext, true); - let decrypt_tag = hw_cryp.finish_blocking(gcm_decrypt); + // Decrypt in hardware using AES-GCM 128-bit in async (DMA) mode. + let mut gcm_decrypt = hw_cryp.start(&aes_gcm, Direction::Decrypt).await; + hw_cryp.aad(&mut gcm_decrypt, AAD1, false).await; + hw_cryp.aad(&mut gcm_decrypt, AAD2, true).await; + hw_cryp.payload(&mut gcm_decrypt, &ciphertext, &mut plaintext, true).await; + let decrypt_tag = hw_cryp.finish(gcm_decrypt).await; info!("AES-GCM Ciphertext: {:?}", ciphertext); info!("AES-GCM Plaintext: {:?}", plaintext); diff --git a/tests/stm32/src/common.rs b/tests/stm32/src/common.rs index 3297ea7e2..c379863a8 100644 --- a/tests/stm32/src/common.rs +++ b/tests/stm32/src/common.rs @@ -140,6 +140,7 @@ define_peris!( ); #[cfg(any(feature = "stm32h755zi", feature = "stm32h753zi"))] define_peris!( + CRYP_IN_DMA = DMA1_CH0, CRYP_OUT_DMA = DMA1_CH1, UART = USART1, UART_TX = PB6, UART_RX = PB7, UART_TX_DMA = DMA1_CH0, UART_RX_DMA = DMA1_CH1, SPI = SPI1, SPI_SCK = PA5, SPI_MOSI = PB5, SPI_MISO = PA6, SPI_TX_DMA = DMA1_CH0, SPI_RX_DMA = DMA1_CH1, ADC = ADC1, DAC = DAC1, DAC_PIN = PA4, From 2634a57098ebee5fb2ea3efe7cfb5629817a5b43 Mon Sep 17 00:00:00 2001 From: Caleb Garrett <47389035+caleb-garrett@users.noreply.github.com> Date: Tue, 12 Mar 2024 15:05:22 -0400 Subject: [PATCH 4/5] Correct cryp CI build issues. --- embassy-stm32/src/cryp/mod.rs | 98 +++++++++++++++++++------------- examples/stm32f7/src/bin/cryp.rs | 4 +- tests/stm32/src/bin/cryp.rs | 6 +- 3 files changed, 66 insertions(+), 42 deletions(-) diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs index aa4c2a024..74b095b6f 100644 --- a/embassy-stm32/src/cryp/mod.rs +++ b/embassy-stm32/src/cryp/mod.rs @@ -60,11 +60,12 @@ pub trait Cipher<'c> { fn init_phase_blocking(&self, _p: &pac::cryp::Cryp, _cryp: &Cryp) {} /// Performs any cipher-specific initialization. - async fn init_phase(&self, _p: &pac::cryp::Cryp, _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>) + async fn init_phase(&self, _p: &pac::cryp::Cryp, _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>) where DmaIn: crate::cryp::DmaIn, DmaOut: crate::cryp::DmaOut, - {} + { + } /// Called prior to processing the last data block for cipher-specific operations. fn pre_final(&self, _p: &pac::cryp::Cryp, _dir: Direction, _padding_len: usize) -> [u32; 4] { @@ -92,11 +93,11 @@ pub trait Cipher<'c> { _int_data: &mut [u8; AES_BLOCK_SIZE], _temp1: [u32; 4], _padding_mask: [u8; 16], - ) - where + ) where DmaIn: crate::cryp::DmaIn, DmaOut: crate::cryp::DmaOut, - {} + { + } /// Returns the AAD header block as required by the cipher. fn get_header_block(&self) -> &[u8] { @@ -479,7 +480,11 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { while p.cr().read().crypen() {} } - async fn init_phase(&self, p: &pac::cryp::Cryp, _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>) { + async fn init_phase( + &self, + p: &pac::cryp::Cryp, + _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>, + ) { p.cr().modify(|w| w.set_gcm_ccmph(0)); p.cr().modify(|w| w.set_crypen(true)); while p.cr().read().crypen() {} @@ -541,12 +546,10 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { int_data: &mut [u8; AES_BLOCK_SIZE], _temp1: [u32; 4], padding_mask: [u8; AES_BLOCK_SIZE], - ) - where - DmaIn: crate::cryp::DmaIn, + ) where + DmaIn: crate::cryp::DmaIn, DmaOut: crate::cryp::DmaOut, { - if dir == Direction::Encrypt { // Handle special GCM partial block process. p.cr().modify(|w| w.set_crypen(false)); @@ -562,7 +565,7 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { let read = Cryp::::read_bytes(&mut cryp.outdma, Self::BLOCK_SIZE, &mut out_data); let write = Cryp::::write_bytes(&mut cryp.indma, Self::BLOCK_SIZE, int_data); - + embassy_futures::join::join(read, write).await; int_data.copy_from_slice(&out_data); @@ -622,7 +625,11 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { while p.cr().read().crypen() {} } - async fn init_phase(&self, p: &pac::cryp::Cryp, _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>) { + async fn init_phase( + &self, + p: &pac::cryp::Cryp, + _cryp: &mut Cryp<'_, T, DmaIn, DmaOut>, + ) { p.cr().modify(|w| w.set_gcm_ccmph(0)); p.cr().modify(|w| w.set_crypen(true)); while p.cr().read().crypen() {} @@ -684,12 +691,10 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { int_data: &mut [u8; AES_BLOCK_SIZE], _temp1: [u32; 4], padding_mask: [u8; AES_BLOCK_SIZE], - ) - where - DmaIn: crate::cryp::DmaIn, + ) where + DmaIn: crate::cryp::DmaIn, DmaOut: crate::cryp::DmaOut, { - if dir == Direction::Encrypt { // Handle special GCM partial block process. p.cr().modify(|w| w.set_crypen(false)); @@ -705,7 +710,7 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { let read = Cryp::::read_bytes(&mut cryp.outdma, Self::BLOCK_SIZE, &mut out_data); let write = Cryp::::write_bytes(&mut cryp.indma, Self::BLOCK_SIZE, int_data); - + embassy_futures::join::join(read, write).await; } } @@ -826,7 +831,7 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip async fn init_phase(&self, p: &pac::cryp::Cryp, cryp: &mut Cryp<'_, T, DmaIn, DmaOut>) where - DmaIn: crate::cryp::DmaIn, + DmaIn: crate::cryp::DmaIn, DmaOut: crate::cryp::DmaOut, { p.cr().modify(|w| w.set_gcm_ccmph(0)); @@ -913,8 +918,7 @@ impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cip int_data: &mut [u8; AES_BLOCK_SIZE], temp1: [u32; 4], padding_mask: [u8; 16], - ) - where + ) where DmaIn: crate::cryp::DmaIn, DmaOut: crate::cryp::DmaOut, { @@ -1040,7 +1044,11 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { } /// Start a new encrypt or decrypt operation for the given cipher. - pub fn start_blocking<'c, C: Cipher<'c> + CipherSized + IVSized>(&self, cipher: &'c C, dir: Direction) -> Context<'c, C> { + pub fn start_blocking<'c, C: Cipher<'c> + CipherSized + IVSized>( + &self, + cipher: &'c C, + dir: Direction, + ) -> Context<'c, C> { let mut ctx: Context<'c, C> = Context { dir, last_block_processed: false, @@ -1115,7 +1123,11 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { } /// Start a new encrypt or decrypt operation for the given cipher. - pub async fn start<'c, C: Cipher<'c> + CipherSized + IVSized>(&mut self, cipher: &'c C, dir: Direction) -> Context<'c, C> + pub async fn start<'c, C: Cipher<'c> + CipherSized + IVSized>( + &mut self, + cipher: &'c C, + dir: Direction, + ) -> Context<'c, C> where DmaIn: crate::cryp::DmaIn, DmaOut: crate::cryp::DmaOut, @@ -1296,17 +1308,12 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { /// All additional associated data (AAD) must be supplied to this function prior to starting the payload phase with `payload`. /// The AAD must be supplied in multiples of the block size (128-bits for AES, 64-bits for DES), except when supplying the last block. /// When supplying the last block of AAD, `last_aad_block` must be `true`. - pub async fn aad< - 'c, - const TAG_SIZE: usize, - C: Cipher<'c> + CipherSized + IVSized + CipherAuthenticated, - >( + pub async fn aad<'c, const TAG_SIZE: usize, C: Cipher<'c> + CipherSized + IVSized + CipherAuthenticated>( &mut self, ctx: &mut Context<'c, C>, aad: &[u8], last_aad_block: bool, - ) - where + ) where DmaIn: crate::cryp::DmaIn, DmaOut: crate::cryp::DmaOut, { @@ -1493,8 +1500,7 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { input: &[u8], output: &mut [u8], last_block: bool, - ) - where + ) where DmaIn: crate::cryp::DmaIn, DmaOut: crate::cryp::DmaOut, { @@ -1540,7 +1546,11 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { for block in 0..num_full_blocks { let index = block * C::BLOCK_SIZE; // Read block out - let read = Self::read_bytes(&mut self.outdma, C::BLOCK_SIZE, &mut output[index..index + C::BLOCK_SIZE]); + let read = Self::read_bytes( + &mut self.outdma, + C::BLOCK_SIZE, + &mut output[index..index + C::BLOCK_SIZE], + ); // Write block in let write = Self::write_bytes(&mut self.indma, C::BLOCK_SIZE, &input[index..index + C::BLOCK_SIZE]); embassy_futures::join::join(read, write).await; @@ -1566,7 +1576,8 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { let mut mask: [u8; 16] = [0; 16]; mask[..last_block_remainder].fill(0xFF); ctx.cipher - .post_final(&T::regs(), self, ctx.dir, &mut intermediate_data, temp1, mask).await; + .post_final(&T::regs(), self, ctx.dir, &mut intermediate_data, temp1, mask) + .await; } ctx.payload_len += input.len() as u64; @@ -1623,7 +1634,14 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { #[cfg(any(cryp_v2, cryp_v3))] // Generates an authentication tag for authenticated ciphers including GCM, CCM, and GMAC. /// Called after the all data has been encrypted/decrypted by `payload`. - pub async fn finish<'c, const TAG_SIZE: usize, C: Cipher<'c> + CipherSized + IVSized + CipherAuthenticated>(&mut self, mut ctx: Context<'c, C>) -> [u8; TAG_SIZE] + pub async fn finish< + 'c, + const TAG_SIZE: usize, + C: Cipher<'c> + CipherSized + IVSized + CipherAuthenticated, + >( + &mut self, + mut ctx: Context<'c, C>, + ) -> [u8; TAG_SIZE] where DmaIn: crate::cryp::DmaIn, DmaOut: crate::cryp::DmaOut, @@ -1663,7 +1681,7 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { tag } - + fn load_key(&self, key: &[u8]) { // Load the key into the registers. let mut keyidx = 0; @@ -1766,7 +1784,7 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { async fn write_bytes(dma: &mut PeripheralRef<'_, DmaIn>, block_size: usize, blocks: &[u8]) where - DmaIn: crate::cryp::DmaIn, + DmaIn: crate::cryp::DmaIn, { if blocks.len() == 0 { return; @@ -1788,6 +1806,7 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { dma_transfer.await; } + #[cfg(any(cryp_v2, cryp_v3))] fn write_words_blocking(&self, block_size: usize, blocks: &[u32]) { assert_eq!((blocks.len() * 4) % block_size, 0); let mut byte_counter: usize = 0; @@ -1801,10 +1820,11 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { } } + #[cfg(any(cryp_v2, cryp_v3))] async fn write_words(dma: &mut PeripheralRef<'_, DmaIn>, block_size: usize, blocks: &[u32]) where DmaIn: crate::cryp::DmaIn, - { + { if blocks.len() == 0 { return; } @@ -1840,7 +1860,7 @@ impl<'d, T: Instance, DmaIn, DmaOut> Cryp<'d, T, DmaIn, DmaOut> { } } - async fn read_bytes(dma: &mut PeripheralRef<'_, DmaOut>, block_size: usize, blocks: &mut [u8]) + async fn read_bytes(dma: &mut PeripheralRef<'_, DmaOut>, block_size: usize, blocks: &mut [u8]) where DmaOut: crate::cryp::DmaOut, { @@ -1894,4 +1914,4 @@ foreach_interrupt!( ); dma_trait!(DmaIn, Instance); -dma_trait!(DmaOut, Instance); \ No newline at end of file +dma_trait!(DmaOut, Instance); diff --git a/examples/stm32f7/src/bin/cryp.rs b/examples/stm32f7/src/bin/cryp.rs index a5418765b..ce2cf0489 100644 --- a/examples/stm32f7/src/bin/cryp.rs +++ b/examples/stm32f7/src/bin/cryp.rs @@ -44,7 +44,9 @@ async fn main(_spawner: Spawner) -> ! { // Decrypt in hardware using AES-GCM 128-bit let mut gcm_decrypt = hw_cryp.start(&aes_gcm, Direction::Decrypt).await; hw_cryp.aad(&mut gcm_decrypt, aad, true).await; - hw_cryp.payload(&mut gcm_decrypt, &ciphertext, &mut plaintext, true).await; + hw_cryp + .payload(&mut gcm_decrypt, &ciphertext, &mut plaintext, true) + .await; let decrypt_tag = hw_cryp.finish(gcm_decrypt).await; let hw_end_time = Instant::now(); diff --git a/tests/stm32/src/bin/cryp.rs b/tests/stm32/src/bin/cryp.rs index 6bca55f55..cc317f625 100644 --- a/tests/stm32/src/bin/cryp.rs +++ b/tests/stm32/src/bin/cryp.rs @@ -13,7 +13,7 @@ use embassy_executor::Spawner; use embassy_stm32::{ bind_interrupts, cryp::{self, *}, - peripherals + peripherals, }; use {defmt_rtt as _, panic_probe as _}; @@ -52,7 +52,9 @@ async fn main(_spawner: Spawner) { let mut gcm_decrypt = hw_cryp.start(&aes_gcm, Direction::Decrypt).await; hw_cryp.aad(&mut gcm_decrypt, AAD1, false).await; hw_cryp.aad(&mut gcm_decrypt, AAD2, true).await; - hw_cryp.payload(&mut gcm_decrypt, &ciphertext, &mut plaintext, true).await; + hw_cryp + .payload(&mut gcm_decrypt, &ciphertext, &mut plaintext, true) + .await; let decrypt_tag = hw_cryp.finish(gcm_decrypt).await; info!("AES-GCM Ciphertext: {:?}", ciphertext); From b1ba2729878a1553145f215dff40281c65b75983 Mon Sep 17 00:00:00 2001 From: Caleb Garrett <47389035+caleb-garrett@users.noreply.github.com> Date: Tue, 12 Mar 2024 15:13:06 -0400 Subject: [PATCH 5/5] rustfmt --- examples/stm32f7/src/bin/cryp.rs | 7 ++----- tests/stm32/src/bin/cryp.rs | 7 ++----- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/examples/stm32f7/src/bin/cryp.rs b/examples/stm32f7/src/bin/cryp.rs index ce2cf0489..235853cb9 100644 --- a/examples/stm32f7/src/bin/cryp.rs +++ b/examples/stm32f7/src/bin/cryp.rs @@ -6,11 +6,8 @@ use aes_gcm::aead::{AeadInPlace, KeyInit}; use aes_gcm::Aes128Gcm; use defmt::info; use embassy_executor::Spawner; -use embassy_stm32::{ - bind_interrupts, - cryp::{self, *}, -}; -use embassy_stm32::{peripherals, Config}; +use embassy_stm32::cryp::{self, *}; +use embassy_stm32::{bind_interrupts, peripherals, Config}; use embassy_time::Instant; use {defmt_rtt as _, panic_probe as _}; diff --git a/tests/stm32/src/bin/cryp.rs b/tests/stm32/src/bin/cryp.rs index cc317f625..60778bdaa 100644 --- a/tests/stm32/src/bin/cryp.rs +++ b/tests/stm32/src/bin/cryp.rs @@ -10,11 +10,8 @@ use aes_gcm::aead::{AeadInPlace, KeyInit}; use aes_gcm::Aes128Gcm; use common::*; use embassy_executor::Spawner; -use embassy_stm32::{ - bind_interrupts, - cryp::{self, *}, - peripherals, -}; +use embassy_stm32::cryp::{self, *}; +use embassy_stm32::{bind_interrupts, peripherals}; use {defmt_rtt as _, panic_probe as _}; bind_interrupts!(struct Irqs {