diff --git a/embassy-stm32/Cargo.toml b/embassy-stm32/Cargo.toml index 08ccd35ae..460184920 100644 --- a/embassy-stm32/Cargo.toml +++ b/embassy-stm32/Cargo.toml @@ -70,7 +70,7 @@ rand_core = "0.6.3" sdio-host = "0.5.0" critical-section = "1.1" #stm32-metapac = { version = "15" } -stm32-metapac = { git = "https://github.com/embassy-rs/stm32-data-generated", tag = "stm32-data-4a0bcec33362449fb733c066936d25cbabab396a" } +stm32-metapac = { git = "https://github.com/embassy-rs/stm32-data-generated", tag = "stm32-data-d7462d805ef05892531a83cd9ad60c9cba568d54" } vcell = "0.1.3" bxcan = "0.7.0" nb = "1.0.0" @@ -94,7 +94,7 @@ critical-section = { version = "1.1", features = ["std"] } proc-macro2 = "1.0.36" quote = "1.0.15" #stm32-metapac = { version = "15", default-features = false, features = ["metadata"]} -stm32-metapac = { git = "https://github.com/embassy-rs/stm32-data-generated", tag = "stm32-data-4a0bcec33362449fb733c066936d25cbabab396a", default-features = false, features = ["metadata"]} +stm32-metapac = { git = "https://github.com/embassy-rs/stm32-data-generated", tag = "stm32-data-d7462d805ef05892531a83cd9ad60c9cba568d54", default-features = false, features = ["metadata"]} [features] diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs new file mode 100644 index 000000000..8f259520a --- /dev/null +++ b/embassy-stm32/src/cryp/mod.rs @@ -0,0 +1,1356 @@ +//! Crypto Accelerator (CRYP) +#[cfg(any(cryp_v2, cryp_v3))] +use core::cmp::min; +use core::marker::PhantomData; + +use embassy_hal_internal::{into_ref, PeripheralRef}; + +use crate::{interrupt, pac, peripherals, Peripheral}; + +const DES_BLOCK_SIZE: usize = 8; // 64 bits +const AES_BLOCK_SIZE: usize = 16; // 128 bits + +/// This trait encapsulates all cipher-specific behavior/ +pub trait Cipher<'c> { + /// Processing block size. Determined by the processor and the algorithm. + const BLOCK_SIZE: usize; + + /// Indicates whether the cipher requires the application to provide padding. + /// If `true`, no partial blocks will be accepted (a panic will occur). + const REQUIRES_PADDING: bool = false; + + /// Returns the symmetric key. + fn key(&self) -> &[u8]; + + /// Returns the initialization vector. + fn iv(&self) -> &[u8]; + + /// Sets the processor algorithm mode according to the associated cipher. + fn set_algomode(&self, p: &pac::cryp::Cryp); + + /// Performs any key preparation within the processor, if necessary. + fn prepare_key(&self, _p: &pac::cryp::Cryp) {} + + /// Performs any cipher-specific initialization. + fn init_phase(&self, _p: &pac::cryp::Cryp) {} + + /// Called prior to processing the last data block for cipher-specific operations. + fn pre_final_block(&self, _p: &pac::cryp::Cryp, _dir: Direction, _padding_len: usize) -> [u32; 4] { + return [0; 4]; + } + + /// Called after processing the last data block for cipher-specific operations. + fn post_final_block( + &self, + _p: &pac::cryp::Cryp, + _dir: Direction, + _int_data: &mut [u8; AES_BLOCK_SIZE], + _temp1: [u32; 4], + _padding_mask: [u8; 16], + ) { + } + + /// Called prior to processing the first associated data block for cipher-specific operations. + fn get_header_block(&self) -> &[u8] { + return [0; 0].as_slice(); + } +} + +/// This trait enables restriction of ciphers to specific key sizes. +pub trait CipherSized {} + +/// This trait enables restriction of initialization vectors to sizes compatibile with a cipher mode. +pub trait IVSized {} + +/// This trait enables restriction of a header phase to authenticated ciphers only. +pub trait CipherAuthenticated<const TAG_SIZE: usize> { + /// Defines the authentication tag size. + const TAG_SIZE: usize = TAG_SIZE; +} + +/// TDES-ECB Cipher Mode +pub struct TdesEcb<'c, const KEY_SIZE: usize> { + iv: &'c [u8; 0], + key: &'c [u8; KEY_SIZE], +} + +impl<'c, const KEY_SIZE: usize> TdesEcb<'c, KEY_SIZE> { + /// Constructs a new AES-ECB cipher for a cryptographic operation. + pub fn new(key: &'c [u8; KEY_SIZE]) -> Self { + return Self { key: key, iv: &[0; 0] }; + } +} + +impl<'c, const KEY_SIZE: usize> Cipher<'c> for TdesEcb<'c, KEY_SIZE> { + const BLOCK_SIZE: usize = DES_BLOCK_SIZE; + const REQUIRES_PADDING: bool = true; + + fn key(&self) -> &'c [u8] { + self.key + } + + fn iv(&self) -> &'c [u8] { + self.iv + } + + fn set_algomode(&self, p: &pac::cryp::Cryp) { + #[cfg(cryp_v1)] + { + p.cr().modify(|w| w.set_algomode(0)); + } + #[cfg(any(cryp_v2, cryp_v3))] + { + p.cr().modify(|w| w.set_algomode0(0)); + p.cr().modify(|w| w.set_algomode3(false)); + } + } +} + +impl<'c> CipherSized for TdesEcb<'c, { 112 / 8 }> {} +impl<'c> CipherSized for TdesEcb<'c, { 168 / 8 }> {} +impl<'c, const KEY_SIZE: usize> IVSized for TdesEcb<'c, KEY_SIZE> {} + +/// TDES-CBC Cipher Mode +pub struct TdesCbc<'c, const KEY_SIZE: usize> { + iv: &'c [u8; 8], + key: &'c [u8; KEY_SIZE], +} + +impl<'c, const KEY_SIZE: usize> TdesCbc<'c, KEY_SIZE> { + /// Constructs a new TDES-CBC cipher for a cryptographic operation. + pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8; 8]) -> Self { + return Self { key: key, iv: iv }; + } +} + +impl<'c, const KEY_SIZE: usize> Cipher<'c> for TdesCbc<'c, KEY_SIZE> { + const BLOCK_SIZE: usize = DES_BLOCK_SIZE; + const REQUIRES_PADDING: bool = true; + + fn key(&self) -> &'c [u8] { + self.key + } + + fn iv(&self) -> &'c [u8] { + self.iv + } + + fn set_algomode(&self, p: &pac::cryp::Cryp) { + #[cfg(cryp_v1)] + { + p.cr().modify(|w| w.set_algomode(1)); + } + #[cfg(any(cryp_v2, cryp_v3))] + { + p.cr().modify(|w| w.set_algomode0(1)); + p.cr().modify(|w| w.set_algomode3(false)); + } + } +} + +impl<'c> CipherSized for TdesCbc<'c, { 112 / 8 }> {} +impl<'c> CipherSized for TdesCbc<'c, { 168 / 8 }> {} +impl<'c, const KEY_SIZE: usize> IVSized for TdesCbc<'c, KEY_SIZE> {} + +/// DES-ECB Cipher Mode +pub struct DesEcb<'c, const KEY_SIZE: usize> { + iv: &'c [u8; 0], + key: &'c [u8; KEY_SIZE], +} + +impl<'c, const KEY_SIZE: usize> DesEcb<'c, KEY_SIZE> { + /// Constructs a new AES-ECB cipher for a cryptographic operation. + pub fn new(key: &'c [u8; KEY_SIZE]) -> Self { + return Self { key: key, iv: &[0; 0] }; + } +} + +impl<'c, const KEY_SIZE: usize> Cipher<'c> for DesEcb<'c, KEY_SIZE> { + const BLOCK_SIZE: usize = DES_BLOCK_SIZE; + const REQUIRES_PADDING: bool = true; + + fn key(&self) -> &'c [u8] { + self.key + } + + fn iv(&self) -> &'c [u8] { + self.iv + } + + fn set_algomode(&self, p: &pac::cryp::Cryp) { + #[cfg(cryp_v1)] + { + p.cr().modify(|w| w.set_algomode(2)); + } + #[cfg(any(cryp_v2, cryp_v3))] + { + p.cr().modify(|w| w.set_algomode0(2)); + p.cr().modify(|w| w.set_algomode3(false)); + } + } +} + +impl<'c> CipherSized for DesEcb<'c, { 56 / 8 }> {} +impl<'c, const KEY_SIZE: usize> IVSized for DesEcb<'c, KEY_SIZE> {} + +/// DES-CBC Cipher Mode +pub struct DesCbc<'c, const KEY_SIZE: usize> { + iv: &'c [u8; 8], + key: &'c [u8; KEY_SIZE], +} + +impl<'c, const KEY_SIZE: usize> DesCbc<'c, KEY_SIZE> { + /// Constructs a new AES-CBC cipher for a cryptographic operation. + pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8; 8]) -> Self { + return Self { key: key, iv: iv }; + } +} + +impl<'c, const KEY_SIZE: usize> Cipher<'c> for DesCbc<'c, KEY_SIZE> { + const BLOCK_SIZE: usize = DES_BLOCK_SIZE; + const REQUIRES_PADDING: bool = true; + + fn key(&self) -> &'c [u8] { + self.key + } + + fn iv(&self) -> &'c [u8] { + self.iv + } + + fn set_algomode(&self, p: &pac::cryp::Cryp) { + #[cfg(cryp_v1)] + { + p.cr().modify(|w| w.set_algomode(3)); + } + #[cfg(any(cryp_v2, cryp_v3))] + { + p.cr().modify(|w| w.set_algomode0(3)); + p.cr().modify(|w| w.set_algomode3(false)); + } + } +} + +impl<'c> CipherSized for DesCbc<'c, { 56 / 8 }> {} +impl<'c, const KEY_SIZE: usize> IVSized for DesCbc<'c, KEY_SIZE> {} + +/// AES-ECB Cipher Mode +pub struct AesEcb<'c, const KEY_SIZE: usize> { + iv: &'c [u8; 0], + key: &'c [u8; KEY_SIZE], +} + +impl<'c, const KEY_SIZE: usize> AesEcb<'c, KEY_SIZE> { + /// Constructs a new AES-ECB cipher for a cryptographic operation. + pub fn new(key: &'c [u8; KEY_SIZE]) -> Self { + return Self { key: key, iv: &[0; 0] }; + } +} + +impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesEcb<'c, KEY_SIZE> { + const BLOCK_SIZE: usize = AES_BLOCK_SIZE; + const REQUIRES_PADDING: bool = true; + + fn key(&self) -> &'c [u8] { + self.key + } + + fn iv(&self) -> &'c [u8] { + self.iv + } + + fn prepare_key(&self, p: &pac::cryp::Cryp) { + #[cfg(cryp_v1)] + { + p.cr().modify(|w| w.set_algomode(7)); + } + #[cfg(any(cryp_v2, cryp_v3))] + { + p.cr().modify(|w| w.set_algomode0(7)); + p.cr().modify(|w| w.set_algomode3(false)); + } + p.cr().modify(|w| w.set_crypen(true)); + while p.sr().read().busy() {} + } + + fn set_algomode(&self, p: &pac::cryp::Cryp) { + #[cfg(cryp_v1)] + { + p.cr().modify(|w| w.set_algomode(2)); + } + #[cfg(any(cryp_v2, cryp_v3))] + { + p.cr().modify(|w| w.set_algomode0(2)); + p.cr().modify(|w| w.set_algomode3(false)); + } + } +} + +impl<'c> CipherSized for AesEcb<'c, { 128 / 8 }> {} +impl<'c> CipherSized for AesEcb<'c, { 192 / 8 }> {} +impl<'c> CipherSized for AesEcb<'c, { 256 / 8 }> {} +impl<'c, const KEY_SIZE: usize> IVSized for AesEcb<'c, KEY_SIZE> {} + +/// AES-CBC Cipher Mode +pub struct AesCbc<'c, const KEY_SIZE: usize> { + iv: &'c [u8; 16], + key: &'c [u8; KEY_SIZE], +} + +impl<'c, const KEY_SIZE: usize> AesCbc<'c, KEY_SIZE> { + /// Constructs a new AES-CBC cipher for a cryptographic operation. + pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8; 16]) -> Self { + return Self { key: key, iv: iv }; + } +} + +impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesCbc<'c, KEY_SIZE> { + const BLOCK_SIZE: usize = AES_BLOCK_SIZE; + const REQUIRES_PADDING: bool = true; + + fn key(&self) -> &'c [u8] { + self.key + } + + fn iv(&self) -> &'c [u8] { + self.iv + } + + fn prepare_key(&self, p: &pac::cryp::Cryp) { + #[cfg(cryp_v1)] + { + p.cr().modify(|w| w.set_algomode(7)); + } + #[cfg(any(cryp_v2, cryp_v3))] + { + p.cr().modify(|w| w.set_algomode0(7)); + p.cr().modify(|w| w.set_algomode3(false)); + } + p.cr().modify(|w| w.set_crypen(true)); + while p.sr().read().busy() {} + } + + fn set_algomode(&self, p: &pac::cryp::Cryp) { + #[cfg(cryp_v1)] + { + p.cr().modify(|w| w.set_algomode(5)); + } + #[cfg(any(cryp_v2, cryp_v3))] + { + p.cr().modify(|w| w.set_algomode0(5)); + p.cr().modify(|w| w.set_algomode3(false)); + } + } +} + +impl<'c> CipherSized for AesCbc<'c, { 128 / 8 }> {} +impl<'c> CipherSized for AesCbc<'c, { 192 / 8 }> {} +impl<'c> CipherSized for AesCbc<'c, { 256 / 8 }> {} +impl<'c, const KEY_SIZE: usize> IVSized for AesCbc<'c, KEY_SIZE> {} + +/// AES-CTR Cipher Mode +pub struct AesCtr<'c, const KEY_SIZE: usize> { + iv: &'c [u8; 16], + key: &'c [u8; KEY_SIZE], +} + +impl<'c, const KEY_SIZE: usize> AesCtr<'c, KEY_SIZE> { + /// Constructs a new AES-CTR cipher for a cryptographic operation. + pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8; 16]) -> Self { + return Self { key: key, iv: iv }; + } +} + +impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesCtr<'c, KEY_SIZE> { + const BLOCK_SIZE: usize = AES_BLOCK_SIZE; + + fn key(&self) -> &'c [u8] { + self.key + } + + fn iv(&self) -> &'c [u8] { + self.iv + } + + fn set_algomode(&self, p: &pac::cryp::Cryp) { + #[cfg(cryp_v1)] + { + p.cr().modify(|w| w.set_algomode(6)); + } + #[cfg(any(cryp_v2, cryp_v3))] + { + p.cr().modify(|w| w.set_algomode0(6)); + p.cr().modify(|w| w.set_algomode3(false)); + } + } +} + +impl<'c> CipherSized for AesCtr<'c, { 128 / 8 }> {} +impl<'c> CipherSized for AesCtr<'c, { 192 / 8 }> {} +impl<'c> CipherSized for AesCtr<'c, { 256 / 8 }> {} +impl<'c, const KEY_SIZE: usize> IVSized for AesCtr<'c, KEY_SIZE> {} + +#[cfg(any(cryp_v2, cryp_v3))] +///AES-GCM Cipher Mode +pub struct AesGcm<'c, const KEY_SIZE: usize> { + iv: [u8; 16], + key: &'c [u8; KEY_SIZE], +} + +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize> AesGcm<'c, KEY_SIZE> { + /// Constucts a new AES-GCM cipher for a cryptographic operation. + pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8; 12]) -> Self { + let mut new_gcm = Self { key: key, iv: [0; 16] }; + new_gcm.iv[..12].copy_from_slice(iv); + new_gcm.iv[15] = 2; + new_gcm + } +} + +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { + const BLOCK_SIZE: usize = AES_BLOCK_SIZE; + + fn key(&self) -> &'c [u8] { + self.key + } + + fn iv(&self) -> &[u8] { + self.iv.as_slice() + } + + fn set_algomode(&self, p: &pac::cryp::Cryp) { + p.cr().modify(|w| w.set_algomode0(0)); + p.cr().modify(|w| w.set_algomode3(true)); + } + + fn init_phase(&self, p: &pac::cryp::Cryp) { + p.cr().modify(|w| w.set_gcm_ccmph(0)); + p.cr().modify(|w| w.set_crypen(true)); + while p.cr().read().crypen() {} + } + + #[cfg(cryp_v2)] + fn pre_final_block(&self, p: &pac::cryp::Cryp, dir: Direction, _padding_len: usize) -> [u32; 4] { + //Handle special GCM partial block process. + if dir == Direction::Encrypt { + p.cr().modify(|w| w.set_crypen(false)); + p.cr().modify(|w| w.set_algomode3(false)); + p.cr().modify(|w| w.set_algomode0(6)); + let iv1r = p.csgcmccmr(7).read() - 1; + p.init(1).ivrr().write_value(iv1r); + p.cr().modify(|w| w.set_crypen(true)); + } + [0; 4] + } + + #[cfg(cryp_v3)] + fn pre_final_block(&self, p: &pac::cryp::Cryp, _dir: Direction, padding_len: usize) -> [u32; 4] { + //Handle special GCM partial block process. + p.cr().modify(|w| w.set_npblb(padding_len as u8)); + [0; 4] + } + + #[cfg(cryp_v2)] + fn post_final_block( + &self, + p: &pac::cryp::Cryp, + dir: Direction, + int_data: &mut [u8; AES_BLOCK_SIZE], + _temp1: [u32; 4], + padding_mask: [u8; AES_BLOCK_SIZE], + ) { + if dir == Direction::Encrypt { + //Handle special GCM partial block process. + p.cr().modify(|w| w.set_crypen(false)); + p.cr().modify(|w| w.set_algomode3(true)); + p.cr().modify(|w| w.set_algomode0(0)); + for i in 0..AES_BLOCK_SIZE { + int_data[i] = int_data[i] & padding_mask[i]; + } + p.cr().modify(|w| w.set_crypen(true)); + p.cr().modify(|w| w.set_gcm_ccmph(3)); + let mut index = 0; + let end_index = Self::BLOCK_SIZE; + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&int_data[index..index + 4]); + p.din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + } + for _ in 0..4 { + p.dout().read(); + } + } + } +} + +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c> CipherSized for AesGcm<'c, { 128 / 8 }> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c> CipherSized for AesGcm<'c, { 192 / 8 }> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c> CipherSized for AesGcm<'c, { 256 / 8 }> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize> CipherAuthenticated<16> for AesGcm<'c, KEY_SIZE> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize> IVSized for AesGcm<'c, KEY_SIZE> {} + +#[cfg(any(cryp_v2, cryp_v3))] +/// AES-GMAC Cipher Mode +pub struct AesGmac<'c, const KEY_SIZE: usize> { + iv: [u8; 16], + key: &'c [u8; KEY_SIZE], +} + +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize> AesGmac<'c, KEY_SIZE> { + /// Constructs a new AES-GMAC cipher for a cryptographic operation. + pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8; 12]) -> Self { + let mut new_gmac = Self { key: key, iv: [0; 16] }; + new_gmac.iv[..12].copy_from_slice(iv); + new_gmac.iv[15] = 2; + new_gmac + } +} + +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { + const BLOCK_SIZE: usize = AES_BLOCK_SIZE; + + fn key(&self) -> &'c [u8] { + self.key + } + + fn iv(&self) -> &[u8] { + self.iv.as_slice() + } + + fn set_algomode(&self, p: &pac::cryp::Cryp) { + p.cr().modify(|w| w.set_algomode0(0)); + p.cr().modify(|w| w.set_algomode3(true)); + } + + fn init_phase(&self, p: &pac::cryp::Cryp) { + p.cr().modify(|w| w.set_gcm_ccmph(0)); + p.cr().modify(|w| w.set_crypen(true)); + while p.cr().read().crypen() {} + } + + #[cfg(cryp_v2)] + fn pre_final_block(&self, p: &pac::cryp::Cryp, dir: Direction, _padding_len: usize) -> [u32; 4] { + //Handle special GCM partial block process. + if dir == Direction::Encrypt { + p.cr().modify(|w| w.set_crypen(false)); + p.cr().modify(|w| w.set_algomode3(false)); + p.cr().modify(|w| w.set_algomode0(6)); + let iv1r = p.csgcmccmr(7).read() - 1; + p.init(1).ivrr().write_value(iv1r); + p.cr().modify(|w| w.set_crypen(true)); + } + [0; 4] + } + + #[cfg(cryp_v3)] + fn pre_final_block(&self, p: &pac::cryp::Cryp, _dir: Direction, padding_len: usize) -> [u32; 4] { + //Handle special GCM partial block process. + p.cr().modify(|w| w.set_npblb(padding_len as u8)); + [0; 4] + } + + #[cfg(cryp_v2)] + fn post_final_block( + &self, + p: &pac::cryp::Cryp, + dir: Direction, + int_data: &mut [u8; AES_BLOCK_SIZE], + _temp1: [u32; 4], + padding_mask: [u8; AES_BLOCK_SIZE], + ) { + if dir == Direction::Encrypt { + //Handle special GCM partial block process. + p.cr().modify(|w| w.set_crypen(false)); + p.cr().modify(|w| w.set_algomode3(true)); + p.cr().modify(|w| w.set_algomode0(0)); + for i in 0..AES_BLOCK_SIZE { + int_data[i] = int_data[i] & padding_mask[i]; + } + p.cr().modify(|w| w.set_crypen(true)); + p.cr().modify(|w| w.set_gcm_ccmph(3)); + let mut index = 0; + let end_index = Self::BLOCK_SIZE; + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&int_data[index..index + 4]); + p.din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + } + for _ in 0..4 { + p.dout().read(); + } + } + } +} + +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c> CipherSized for AesGmac<'c, { 128 / 8 }> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c> CipherSized for AesGmac<'c, { 192 / 8 }> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c> CipherSized for AesGmac<'c, { 256 / 8 }> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize> CipherAuthenticated<16> for AesGmac<'c, KEY_SIZE> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize> IVSized for AesGmac<'c, KEY_SIZE> {} + +#[cfg(any(cryp_v2, cryp_v3))] +/// AES-CCM Cipher Mode +pub struct AesCcm<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> { + key: &'c [u8; KEY_SIZE], + aad_header: [u8; 6], + aad_header_len: usize, + block0: [u8; 16], + ctr: [u8; 16], +} + +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> AesCcm<'c, KEY_SIZE, TAG_SIZE, IV_SIZE> { + /// Constructs a new AES-CCM cipher for a cryptographic operation. + pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8; IV_SIZE], aad_len: usize, payload_len: usize) -> Self { + let mut aad_header: [u8; 6] = [0; 6]; + let mut aad_header_len = 0; + let mut block0: [u8; 16] = [0; 16]; + if aad_len != 0 { + if aad_len < 65280 { + aad_header[0] = (aad_len >> 8) as u8 & 0xFF; + aad_header[1] = aad_len as u8 & 0xFF; + aad_header_len = 2; + } else { + aad_header[0] = 0xFF; + aad_header[1] = 0xFE; + let aad_len_bytes: [u8; 4] = aad_len.to_be_bytes(); + aad_header[2] = aad_len_bytes[0]; + aad_header[3] = aad_len_bytes[1]; + aad_header[4] = aad_len_bytes[2]; + aad_header[5] = aad_len_bytes[3]; + aad_header_len = 6; + } + } + let total_aad_len = aad_header_len + aad_len; + let mut aad_padding_len = 16 - (total_aad_len % 16); + if aad_padding_len == 16 { + aad_padding_len = 0; + } + aad_header_len += aad_padding_len; + let total_aad_len_padded = aad_header_len + aad_len; + if total_aad_len_padded > 0 { + block0[0] = 0x40; + } + block0[0] |= ((((TAG_SIZE as u8) - 2) >> 1) & 0x07) << 3; + block0[0] |= ((15 - (iv.len() as u8)) - 1) & 0x07; + block0[1..1 + iv.len()].copy_from_slice(iv); + let payload_len_bytes: [u8; 4] = payload_len.to_be_bytes(); + if iv.len() <= 11 { + block0[12] = payload_len_bytes[0]; + } else if payload_len_bytes[0] > 0 { + panic!("Message is too large for given IV size."); + } + if iv.len() <= 12 { + block0[13] = payload_len_bytes[1]; + } else if payload_len_bytes[1] > 0 { + panic!("Message is too large for given IV size."); + } + block0[14] = payload_len_bytes[2]; + block0[15] = payload_len_bytes[3]; + let mut ctr: [u8; 16] = [0; 16]; + ctr[0] = block0[0] & 0x07; + ctr[1..1 + iv.len()].copy_from_slice(&block0[1..1 + iv.len()]); + ctr[15] = 0x01; + + return Self { + key: key, + aad_header: aad_header, + aad_header_len: aad_header_len, + block0: block0, + ctr: ctr, + }; + } +} + +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize, const IV_SIZE: usize> Cipher<'c> + for AesCcm<'c, KEY_SIZE, TAG_SIZE, IV_SIZE> +{ + const BLOCK_SIZE: usize = AES_BLOCK_SIZE; + + fn key(&self) -> &'c [u8] { + self.key + } + + fn iv(&self) -> &[u8] { + self.ctr.as_slice() + } + + fn set_algomode(&self, p: &pac::cryp::Cryp) { + p.cr().modify(|w| w.set_algomode0(1)); + p.cr().modify(|w| w.set_algomode3(true)); + } + + fn init_phase(&self, p: &pac::cryp::Cryp) { + p.cr().modify(|w| w.set_gcm_ccmph(0)); + + let mut index = 0; + let end_index = index + Self::BLOCK_SIZE; + // Write block in + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&self.block0[index..index + 4]); + p.din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + } + p.cr().modify(|w| w.set_crypen(true)); + while p.cr().read().crypen() {} + } + + fn get_header_block(&self) -> &[u8] { + return &self.aad_header[0..self.aad_header_len]; + } + + #[cfg(cryp_v2)] + fn pre_final_block(&self, p: &pac::cryp::Cryp, dir: Direction, _padding_len: usize) -> [u32; 4] { + //Handle special CCM partial block process. + let mut temp1 = [0; 4]; + if dir == Direction::Decrypt { + p.cr().modify(|w| w.set_crypen(false)); + let iv1temp = p.init(1).ivrr().read(); + temp1[0] = p.csgcmccmr(0).read().swap_bytes(); + temp1[1] = p.csgcmccmr(1).read().swap_bytes(); + temp1[2] = p.csgcmccmr(2).read().swap_bytes(); + temp1[3] = p.csgcmccmr(3).read().swap_bytes(); + p.init(1).ivrr().write_value(iv1temp); + p.cr().modify(|w| w.set_algomode3(false)); + p.cr().modify(|w| w.set_algomode0(6)); + p.cr().modify(|w| w.set_crypen(true)); + } + return temp1; + } + + #[cfg(cryp_v3)] + fn pre_final_block(&self, p: &pac::cryp::Cryp, _dir: Direction, padding_len: usize) -> [u32; 4] { + //Handle special GCM partial block process. + p.cr().modify(|w| w.set_npblb(padding_len as u8)); + [0; 4] + } + + #[cfg(cryp_v2)] + fn post_final_block( + &self, + p: &pac::cryp::Cryp, + dir: Direction, + int_data: &mut [u8; AES_BLOCK_SIZE], + temp1: [u32; 4], + padding_mask: [u8; 16], + ) { + if dir == Direction::Decrypt { + //Handle special CCM partial block process. + let mut temp2 = [0; 4]; + temp2[0] = p.csgcmccmr(0).read().swap_bytes(); + temp2[1] = p.csgcmccmr(1).read().swap_bytes(); + temp2[2] = p.csgcmccmr(2).read().swap_bytes(); + temp2[3] = p.csgcmccmr(3).read().swap_bytes(); + p.cr().modify(|w| w.set_algomode3(true)); + p.cr().modify(|w| w.set_algomode0(1)); + p.cr().modify(|w| w.set_gcm_ccmph(3)); + // Header phase + p.cr().modify(|w| w.set_gcm_ccmph(1)); + for i in 0..AES_BLOCK_SIZE { + int_data[i] = int_data[i] & padding_mask[i]; + } + let mut in_data: [u32; 4] = [0; 4]; + for i in 0..in_data.len() { + let mut int_bytes: [u8; 4] = [0; 4]; + int_bytes.copy_from_slice(&int_data[(i * 4)..(i * 4) + 4]); + let int_word = u32::from_le_bytes(int_bytes); + in_data[i] = int_word; + in_data[i] = in_data[i] ^ temp1[i] ^ temp2[i]; + p.din().write_value(in_data[i]); + } + } + } +} + +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const TAG_SIZE: usize, const IV_SIZE: usize> CipherSized for AesCcm<'c, { 128 / 8 }, TAG_SIZE, IV_SIZE> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const TAG_SIZE: usize, const IV_SIZE: usize> CipherSized for AesCcm<'c, { 192 / 8 }, TAG_SIZE, IV_SIZE> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const TAG_SIZE: usize, const IV_SIZE: usize> CipherSized for AesCcm<'c, { 256 / 8 }, TAG_SIZE, IV_SIZE> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize, const IV_SIZE: usize> CipherAuthenticated<4> for AesCcm<'c, KEY_SIZE, 4, IV_SIZE> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize, const IV_SIZE: usize> CipherAuthenticated<6> for AesCcm<'c, KEY_SIZE, 6, IV_SIZE> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize, const IV_SIZE: usize> CipherAuthenticated<8> for AesCcm<'c, KEY_SIZE, 8, IV_SIZE> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize, const IV_SIZE: usize> CipherAuthenticated<10> for AesCcm<'c, KEY_SIZE, 10, IV_SIZE> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize, const IV_SIZE: usize> CipherAuthenticated<12> for AesCcm<'c, KEY_SIZE, 12, IV_SIZE> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize, const IV_SIZE: usize> CipherAuthenticated<14> for AesCcm<'c, KEY_SIZE, 14, IV_SIZE> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize, const IV_SIZE: usize> CipherAuthenticated<16> for AesCcm<'c, KEY_SIZE, 16, IV_SIZE> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize> IVSized for AesCcm<'c, KEY_SIZE, TAG_SIZE, 7> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize> IVSized for AesCcm<'c, KEY_SIZE, TAG_SIZE, 8> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize> IVSized for AesCcm<'c, KEY_SIZE, TAG_SIZE, 9> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize> IVSized for AesCcm<'c, KEY_SIZE, TAG_SIZE, 10> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize> IVSized for AesCcm<'c, KEY_SIZE, TAG_SIZE, 11> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize> IVSized for AesCcm<'c, KEY_SIZE, TAG_SIZE, 12> {} +#[cfg(any(cryp_v2, cryp_v3))] +impl<'c, const KEY_SIZE: usize, const TAG_SIZE: usize> IVSized for AesCcm<'c, KEY_SIZE, TAG_SIZE, 13> {} + +#[allow(dead_code)] +/// Holds the state information for a cipher operation. +/// Allows suspending/resuming of cipher operations. +pub struct Context<'c, C: Cipher<'c> + CipherSized> { + phantom_data: PhantomData<&'c C>, + cipher: &'c C, + dir: Direction, + last_block_processed: bool, + header_processed: bool, + aad_complete: bool, + cr: u32, + iv: [u32; 4], + csgcmccm: [u32; 8], + csgcm: [u32; 8], + header_len: u64, + payload_len: u64, + aad_buffer: [u8; 16], + aad_buffer_len: usize, +} + +/// Selects whether the crypto processor operates in encryption or decryption mode. +#[derive(PartialEq, Clone, Copy)] +pub enum Direction { + /// Encryption mode + Encrypt, + /// Decryption mode + Decrypt, +} + +/// Crypto Accelerator Driver +pub struct Cryp<'d, T: Instance> { + _peripheral: PeripheralRef<'d, T>, +} + +impl<'d, T: Instance> Cryp<'d, T> { + /// Create a new CRYP driver. + pub fn new(peri: impl Peripheral<P = T> + 'd) -> Self { + T::enable_and_reset(); + into_ref!(peri); + let instance = Self { _peripheral: peri }; + instance + } + + /// Start a new cipher operation. + /// Key size must be 128, 192, or 256 bits. + /// Initialization vector must only be supplied if necessary. + /// Panics if there is any mismatch in parameters, such as an incorrect IV length or invalid mode. + pub fn start<'c, C: Cipher<'c> + CipherSized + IVSized>(&self, cipher: &'c C, dir: Direction) -> Context<'c, C> { + let mut ctx: Context<'c, C> = Context { + dir, + last_block_processed: false, + cr: 0, + iv: [0; 4], + csgcmccm: [0; 8], + csgcm: [0; 8], + aad_complete: false, + header_len: 0, + payload_len: 0, + cipher: cipher, + phantom_data: PhantomData, + header_processed: false, + aad_buffer: [0; 16], + aad_buffer_len: 0, + }; + + T::regs().cr().modify(|w| w.set_crypen(false)); + + let key = ctx.cipher.key(); + + if key.len() == (128 / 8) { + T::regs().cr().modify(|w| w.set_keysize(0)); + } else if key.len() == (192 / 8) { + T::regs().cr().modify(|w| w.set_keysize(1)); + } else if key.len() == (256 / 8) { + T::regs().cr().modify(|w| w.set_keysize(2)); + } + + self.load_key(key); + + // Set data type to 8-bit. This will match software implementations. + T::regs().cr().modify(|w| w.set_datatype(2)); + + ctx.cipher.prepare_key(&T::regs()); + + ctx.cipher.set_algomode(&T::regs()); + + // Set encrypt/decrypt + if dir == Direction::Encrypt { + T::regs().cr().modify(|w| w.set_algodir(false)); + } else { + T::regs().cr().modify(|w| w.set_algodir(true)); + } + + // Load the IV into the registers. + let iv = ctx.cipher.iv(); + let mut full_iv: [u8; 16] = [0; 16]; + full_iv[0..iv.len()].copy_from_slice(iv); + let mut iv_idx = 0; + let mut iv_word: [u8; 4] = [0; 4]; + iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); + iv_idx += 4; + T::regs().init(0).ivlr().write_value(u32::from_be_bytes(iv_word)); + iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); + iv_idx += 4; + T::regs().init(0).ivrr().write_value(u32::from_be_bytes(iv_word)); + iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); + iv_idx += 4; + T::regs().init(1).ivlr().write_value(u32::from_be_bytes(iv_word)); + iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); + T::regs().init(1).ivrr().write_value(u32::from_be_bytes(iv_word)); + + // Flush in/out FIFOs + T::regs().cr().modify(|w| w.fflush()); + + ctx.cipher.init_phase(&T::regs()); + + self.store_context(&mut ctx); + + ctx + } + + #[cfg(any(cryp_v2, cryp_v3))] + /// Controls the header phase of cipher processing. + /// This function is only valid for GCM, CCM, and GMAC modes. + /// It only needs to be called if using one of these modes and there is associated data. + /// All AAD must be supplied to this function prior to starting the payload phase with `payload_blocking`. + /// The AAD must be supplied in multiples of the block size (128 bits), except when supplying the last block. + /// When supplying the last block of AAD, `last_aad_block` must be `true`. + pub fn aad_blocking< + 'c, + const TAG_SIZE: usize, + C: Cipher<'c> + CipherSized + IVSized + CipherAuthenticated<TAG_SIZE>, + >( + &self, + ctx: &mut Context<'c, C>, + aad: &[u8], + last_aad_block: bool, + ) { + self.load_context(ctx); + + // Perform checks for correctness. + if ctx.aad_complete { + panic!("Cannot update AAD after starting payload!") + } + + ctx.header_len += aad.len() as u64; + + // Header phase + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(1)); + T::regs().cr().modify(|w| w.set_crypen(true)); + + // First write the header B1 block if not yet written. + if !ctx.header_processed { + ctx.header_processed = true; + let header = ctx.cipher.get_header_block(); + ctx.aad_buffer[0..header.len()].copy_from_slice(header); + ctx.aad_buffer_len += header.len(); + } + + // Fill the header block to make a full block. + let len_to_copy = min(aad.len(), C::BLOCK_SIZE - ctx.aad_buffer_len); + ctx.aad_buffer[ctx.aad_buffer_len..ctx.aad_buffer_len + len_to_copy].copy_from_slice(&aad[..len_to_copy]); + ctx.aad_buffer_len += len_to_copy; + ctx.aad_buffer[ctx.aad_buffer_len..].fill(0); + let mut aad_len_remaining = aad.len() - len_to_copy; + + if ctx.aad_buffer_len < C::BLOCK_SIZE { + // The buffer isn't full and this is the last buffer, so process it as is (already padded). + if last_aad_block { + let mut index = 0; + let end_index = C::BLOCK_SIZE; + // Write block in + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&ctx.aad_buffer[index..index + 4]); + T::regs().din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + } + // Block until input FIFO is empty. + while !T::regs().sr().read().ifem() {} + + // Switch to payload phase. + ctx.aad_complete = true; + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); + T::regs().cr().modify(|w| w.fflush()); + } else { + // Just return because we don't yet have a full block to process. + return; + } + } else { + // Load the full block from the buffer. + let mut index = 0; + let end_index = C::BLOCK_SIZE; + // Write block in + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&ctx.aad_buffer[index..index + 4]); + T::regs().din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + } + // Block until input FIFO is empty. + while !T::regs().sr().read().ifem() {} + } + + // Handle a partial block that is passed in. + ctx.aad_buffer_len = 0; + let leftovers = aad_len_remaining % C::BLOCK_SIZE; + ctx.aad_buffer[..leftovers].copy_from_slice(&aad[aad.len() - leftovers..aad.len()]); + ctx.aad_buffer_len += leftovers; + ctx.aad_buffer[ctx.aad_buffer_len..].fill(0); + aad_len_remaining -= leftovers; + assert_eq!(aad_len_remaining % C::BLOCK_SIZE, 0); + + // Load full data blocks into core. + let num_full_blocks = aad_len_remaining / C::BLOCK_SIZE; + for block in 0..num_full_blocks { + let mut index = len_to_copy + (block * C::BLOCK_SIZE); + let end_index = index + C::BLOCK_SIZE; + // Write block in + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&aad[index..index + 4]); + T::regs().din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + } + // Block until input FIFO is empty. + while !T::regs().sr().read().ifem() {} + } + + if last_aad_block { + if leftovers > 0 { + let mut index = 0; + let end_index = C::BLOCK_SIZE; + // Write block in + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&ctx.aad_buffer[index..index + 4]); + T::regs().din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + } + // Block until input FIFO is empty. + while !T::regs().sr().read().ifem() {} + } + // Switch to payload phase. + ctx.aad_complete = true; + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); + T::regs().cr().modify(|w| w.fflush()); + } + + self.store_context(ctx); + } + + /// Performs encryption/decryption on the provided context. + /// The context determines algorithm, mode, and state of the crypto accelerator. + /// When the last piece of data is supplied, `last_block` should be `true`. + /// This function panics under various mismatches of parameters. + /// Input and output buffer lengths must match. + /// Data must be a multiple of block size (128-bits for AES, 64-bits for DES) for CBC and ECB modes. + /// Padding or ciphertext stealing must be managed by the application for these modes. + /// Data must also be a multiple of block size unless `last_block` is `true`. + pub fn payload_blocking<'c, C: Cipher<'c> + CipherSized + IVSized>( + &self, + ctx: &mut Context<'c, C>, + input: &[u8], + output: &mut [u8], + last_block: bool, + ) { + self.load_context(ctx); + + let last_block_remainder = input.len() % C::BLOCK_SIZE; + + // Perform checks for correctness. + if !ctx.aad_complete && ctx.header_len > 0 { + panic!("Additional associated data must be processed first!"); + } else if !ctx.aad_complete { + #[cfg(any(cryp_v2, cryp_v3))] + { + ctx.aad_complete = true; + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); + T::regs().cr().modify(|w| w.fflush()); + T::regs().cr().modify(|w| w.set_crypen(true)); + } + } + if ctx.last_block_processed { + panic!("The last block has already been processed!"); + } + if input.len() > output.len() { + panic!("Output buffer length must match input length."); + } + if !last_block { + if last_block_remainder != 0 { + panic!("Input length must be a multiple of {} bytes.", C::BLOCK_SIZE); + } + } + if C::REQUIRES_PADDING { + if last_block_remainder != 0 { + panic!("Input must be a multiple of {} bytes in ECB and CBC modes. Consider padding or ciphertext stealing.", C::BLOCK_SIZE); + } + } + if last_block { + ctx.last_block_processed = true; + } + + // Load data into core, block by block. + let num_full_blocks = input.len() / C::BLOCK_SIZE; + for block in 0..num_full_blocks { + let mut index = block * C::BLOCK_SIZE; + let end_index = index + C::BLOCK_SIZE; + // Write block in + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&input[index..index + 4]); + T::regs().din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + } + let mut index = block * C::BLOCK_SIZE; + let end_index = index + C::BLOCK_SIZE; + // Block until there is output to read. + while !T::regs().sr().read().ofne() {} + // Read block out + while index < end_index { + let out_word: u32 = T::regs().dout().read(); + output[index..index + 4].copy_from_slice(u32::to_ne_bytes(out_word).as_slice()); + index += 4; + } + } + + // Handle the final block, which is incomplete. + if last_block_remainder > 0 { + let padding_len = C::BLOCK_SIZE - last_block_remainder; + let temp1 = ctx.cipher.pre_final_block(&T::regs(), ctx.dir, padding_len); + + let mut intermediate_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; + let mut last_block: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; + last_block[..last_block_remainder].copy_from_slice(&input[input.len() - last_block_remainder..input.len()]); + let mut index = 0; + let end_index = C::BLOCK_SIZE; + // Write block in + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&last_block[index..index + 4]); + T::regs().din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + } + let mut index = 0; + let end_index = C::BLOCK_SIZE; + // Block until there is output to read. + while !T::regs().sr().read().ofne() {} + // Read block out + while index < end_index { + let out_word: u32 = T::regs().dout().read(); + intermediate_data[index..index + 4].copy_from_slice(u32::to_ne_bytes(out_word).as_slice()); + index += 4; + } + + // Handle the last block depending on mode. + let output_len = output.len(); + output[output_len - last_block_remainder..output_len] + .copy_from_slice(&intermediate_data[0..last_block_remainder]); + + let mut mask: [u8; 16] = [0; 16]; + mask[..last_block_remainder].fill(0xFF); + ctx.cipher + .post_final_block(&T::regs(), ctx.dir, &mut intermediate_data, temp1, mask); + } + + ctx.payload_len += input.len() as u64; + + self.store_context(ctx); + } + + #[cfg(any(cryp_v2, cryp_v3))] + /// This function only needs to be called for GCM, CCM, and GMAC modes to + /// generate an authentication tag. + pub fn finish_blocking< + 'c, + const TAG_SIZE: usize, + C: Cipher<'c> + CipherSized + IVSized + CipherAuthenticated<TAG_SIZE>, + >( + &self, + mut ctx: Context<'c, C>, + ) -> [u8; TAG_SIZE] { + self.load_context(&mut ctx); + + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(3)); + T::regs().cr().modify(|w| w.set_crypen(true)); + + let headerlen1: u32 = ((ctx.header_len * 8) >> 32) as u32; + let headerlen2: u32 = (ctx.header_len * 8) as u32; + let payloadlen1: u32 = ((ctx.payload_len * 8) >> 32) as u32; + let payloadlen2: u32 = (ctx.payload_len * 8) as u32; + + #[cfg(cryp_v2)] + { + T::regs().din().write_value(headerlen1.swap_bytes()); + T::regs().din().write_value(headerlen2.swap_bytes()); + T::regs().din().write_value(payloadlen1.swap_bytes()); + T::regs().din().write_value(payloadlen2.swap_bytes()); + } + + #[cfg(cryp_v3)] + { + T::regs().din().write_value(headerlen1); + T::regs().din().write_value(headerlen2); + T::regs().din().write_value(payloadlen1); + T::regs().din().write_value(payloadlen2); + } + + while !T::regs().sr().read().ofne() {} + + let mut full_tag: [u8; 16] = [0; 16]; + full_tag[0..4].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice()); + full_tag[4..8].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice()); + full_tag[8..12].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice()); + full_tag[12..16].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice()); + let mut tag: [u8; TAG_SIZE] = [0; TAG_SIZE]; + tag.copy_from_slice(&full_tag[0..TAG_SIZE]); + + T::regs().cr().modify(|w| w.set_crypen(false)); + + tag + } + + fn load_key(&self, key: &[u8]) { + // Load the key into the registers. + let mut keyidx = 0; + let mut keyword: [u8; 4] = [0; 4]; + let keylen = key.len() * 8; + if keylen > 192 { + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(0).klr().write_value(u32::from_be_bytes(keyword)); + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(0).krr().write_value(u32::from_be_bytes(keyword)); + } + if keylen > 128 { + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(1).klr().write_value(u32::from_be_bytes(keyword)); + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(1).krr().write_value(u32::from_be_bytes(keyword)); + } + if keylen > 64 { + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(2).klr().write_value(u32::from_be_bytes(keyword)); + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(2).krr().write_value(u32::from_be_bytes(keyword)); + } + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(3).klr().write_value(u32::from_be_bytes(keyword)); + keyword = [0; 4]; + keyword[0..key.len() - keyidx].copy_from_slice(&key[keyidx..key.len()]); + T::regs().key(3).krr().write_value(u32::from_be_bytes(keyword)); + } + + fn store_context<'c, C: Cipher<'c> + CipherSized>(&self, ctx: &mut Context<'c, C>) { + // Wait for data block processing to finish. + while !T::regs().sr().read().ifem() {} + while T::regs().sr().read().ofne() {} + while T::regs().sr().read().busy() {} + + // Disable crypto processor. + T::regs().cr().modify(|w| w.set_crypen(false)); + + // Save the peripheral state. + ctx.cr = T::regs().cr().read().0; + ctx.iv[0] = T::regs().init(0).ivlr().read(); + ctx.iv[1] = T::regs().init(0).ivrr().read(); + ctx.iv[2] = T::regs().init(1).ivlr().read(); + ctx.iv[3] = T::regs().init(1).ivrr().read(); + + #[cfg(any(cryp_v2, cryp_v3))] + for i in 0..8 { + ctx.csgcmccm[i] = T::regs().csgcmccmr(i).read(); + ctx.csgcm[i] = T::regs().csgcmr(i).read(); + } + } + + fn load_context<'c, C: Cipher<'c> + CipherSized>(&self, ctx: &Context<'c, C>) { + // Reload state registers. + T::regs().cr().write(|w| w.0 = ctx.cr); + T::regs().init(0).ivlr().write_value(ctx.iv[0]); + T::regs().init(0).ivrr().write_value(ctx.iv[1]); + T::regs().init(1).ivlr().write_value(ctx.iv[2]); + T::regs().init(1).ivrr().write_value(ctx.iv[3]); + + #[cfg(any(cryp_v2, cryp_v3))] + for i in 0..8 { + T::regs().csgcmccmr(i).write_value(ctx.csgcmccm[i]); + T::regs().csgcmr(i).write_value(ctx.csgcm[i]); + } + self.load_key(ctx.cipher.key()); + + // Prepare key if applicable. + ctx.cipher.prepare_key(&T::regs()); + T::regs().cr().write(|w| w.0 = ctx.cr); + + // Enable crypto processor. + T::regs().cr().modify(|w| w.set_crypen(true)); + } +} + +pub(crate) mod sealed { + use super::*; + + pub trait Instance { + fn regs() -> pac::cryp::Cryp; + } +} + +/// CRYP instance trait. +pub trait Instance: sealed::Instance + Peripheral<P = Self> + crate::rcc::RccPeripheral + 'static + Send { + /// Interrupt for this CRYP instance. + type Interrupt: interrupt::typelevel::Interrupt; +} + +foreach_interrupt!( + ($inst:ident, cryp, CRYP, GLOBAL, $irq:ident) => { + impl Instance for peripherals::$inst { + type Interrupt = crate::interrupt::typelevel::$irq; + } + + impl sealed::Instance for peripherals::$inst { + fn regs() -> crate::pac::cryp::Cryp { + crate::pac::$inst + } + } + }; +); diff --git a/embassy-stm32/src/lib.rs b/embassy-stm32/src/lib.rs index 15798e115..ff77399b2 100644 --- a/embassy-stm32/src/lib.rs +++ b/embassy-stm32/src/lib.rs @@ -34,6 +34,8 @@ pub mod adc; pub mod can; #[cfg(crc)] pub mod crc; +#[cfg(cryp)] +pub mod cryp; #[cfg(dac)] pub mod dac; #[cfg(dcmi)] diff --git a/examples/stm32f7/Cargo.toml b/examples/stm32f7/Cargo.toml index 736e81723..305816a2b 100644 --- a/examples/stm32f7/Cargo.toml +++ b/examples/stm32f7/Cargo.toml @@ -30,6 +30,7 @@ embedded-storage = "0.3.1" static_cell = "2" sha2 = { version = "0.10.8", default-features = false } hmac = "0.12.1" +aes-gcm = {version = "0.10.3", default-features = false, features = ["aes", "heapless"] } [profile.release] debug = 2 diff --git a/examples/stm32f7/src/bin/cryp.rs b/examples/stm32f7/src/bin/cryp.rs new file mode 100644 index 000000000..04927841a --- /dev/null +++ b/examples/stm32f7/src/bin/cryp.rs @@ -0,0 +1,74 @@ +#![no_std] +#![no_main] + +use aes_gcm::aead::heapless::Vec; +use aes_gcm::aead::{AeadInPlace, KeyInit}; +use aes_gcm::Aes128Gcm; +use defmt::info; +use embassy_executor::Spawner; +use embassy_stm32::cryp::*; +use embassy_stm32::Config; +use embassy_time::Instant; +use {defmt_rtt as _, panic_probe as _}; + +#[embassy_executor::main] +async fn main(_spawner: Spawner) -> ! { + let config = Config::default(); + let p = embassy_stm32::init(config); + + let payload: &[u8] = b"hello world"; + let aad: &[u8] = b"additional data"; + + let hw_cryp = Cryp::new(p.CRYP); + let key: [u8; 16] = [0; 16]; + let mut ciphertext: [u8; 11] = [0; 11]; + let mut plaintext: [u8; 11] = [0; 11]; + let iv: [u8; 12] = [0; 12]; + + let hw_start_time = Instant::now(); + + // Encrypt in hardware using AES-GCM 128-bit + let aes_gcm = AesGcm::new(&key, &iv); + let mut gcm_encrypt = hw_cryp.start(&aes_gcm, Direction::Encrypt); + hw_cryp.aad_blocking(&mut gcm_encrypt, aad, true); + hw_cryp.payload_blocking(&mut gcm_encrypt, payload, &mut ciphertext, true); + let encrypt_tag = hw_cryp.finish_blocking(gcm_encrypt); + + // Decrypt in hardware using AES-GCM 128-bit + let mut gcm_decrypt = hw_cryp.start(&aes_gcm, Direction::Decrypt); + hw_cryp.aad_blocking(&mut gcm_decrypt, aad, true); + hw_cryp.payload_blocking(&mut gcm_decrypt, &ciphertext, &mut plaintext, true); + let decrypt_tag = hw_cryp.finish_blocking(gcm_decrypt); + + let hw_end_time = Instant::now(); + let hw_execution_time = hw_end_time - hw_start_time; + + info!("AES-GCM Ciphertext: {:?}", ciphertext); + info!("AES-GCM Plaintext: {:?}", plaintext); + assert_eq!(payload, plaintext); + assert_eq!(encrypt_tag, decrypt_tag); + + let sw_start_time = Instant::now(); + + // Encrypt in software using AES-GCM 128-bit + let mut payload_vec: Vec<u8, 32> = Vec::from_slice(&payload).unwrap(); + let cipher = Aes128Gcm::new(&key.into()); + let _ = cipher.encrypt_in_place(&iv.into(), aad.into(), &mut payload_vec); + + assert_eq!(ciphertext, payload_vec[0..ciphertext.len()]); + assert_eq!( + encrypt_tag, + payload_vec[ciphertext.len()..ciphertext.len() + encrypt_tag.len()] + ); + + // Decrypt in software using AES-GCM 128-bit + let _ = cipher.decrypt_in_place(&iv.into(), aad.into(), &mut payload_vec); + + let sw_end_time = Instant::now(); + let sw_execution_time = sw_end_time - sw_start_time; + + info!("Hardware Execution Time: {:?}", hw_execution_time); + info!("Software Execution Time: {:?}", sw_execution_time); + + loop {} +} diff --git a/tests/stm32/Cargo.toml b/tests/stm32/Cargo.toml index 828a28e2c..bfe003a11 100644 --- a/tests/stm32/Cargo.toml +++ b/tests/stm32/Cargo.toml @@ -16,8 +16,8 @@ stm32f767zi = ["embassy-stm32/stm32f767zi", "chrono", "not-gpdma", "eth", "rng"] stm32g071rb = ["embassy-stm32/stm32g071rb", "cm0", "not-gpdma", "dac"] stm32g491re = ["embassy-stm32/stm32g491re", "chrono", "stop", "not-gpdma", "rng", "fdcan"] stm32h563zi = ["embassy-stm32/stm32h563zi", "chrono", "eth", "rng", "hash"] -stm32h753zi = ["embassy-stm32/stm32h753zi", "chrono", "not-gpdma", "eth", "rng", "fdcan", "hash"] -stm32h755zi = ["embassy-stm32/stm32h755zi-cm7", "chrono", "not-gpdma", "eth", "dac", "rng", "fdcan", "hash"] +stm32h753zi = ["embassy-stm32/stm32h753zi", "chrono", "not-gpdma", "eth", "rng", "fdcan", "hash", "cryp"] +stm32h755zi = ["embassy-stm32/stm32h755zi-cm7", "chrono", "not-gpdma", "eth", "dac", "rng", "fdcan", "hash", "cryp"] stm32h7a3zi = ["embassy-stm32/stm32h7a3zi", "not-gpdma", "rng", "fdcan"] stm32l073rz = ["embassy-stm32/stm32l073rz", "cm0", "not-gpdma", "rng"] stm32l152re = ["embassy-stm32/stm32l152re", "chrono", "not-gpdma"] @@ -33,6 +33,7 @@ stm32wl55jc = ["embassy-stm32/stm32wl55jc-cm4", "not-gpdma", "rng", "chrono"] stm32f091rc = ["embassy-stm32/stm32f091rc", "cm0", "not-gpdma", "chrono"] stm32h503rb = ["embassy-stm32/stm32h503rb", "rng"] +cryp = [] hash = [] eth = ["embassy-executor/task-arena-size-16384"] rng = [] @@ -80,6 +81,7 @@ portable-atomic = { version = "1.5", features = [] } chrono = { version = "^0.4", default-features = false, optional = true} sha2 = { version = "0.10.8", default-features = false } hmac = "0.12.1" +aes-gcm = {version = "0.10.3", default-features = false, features = ["aes", "heapless"] } # BEGIN TESTS # Generated by gen_test.py. DO NOT EDIT. @@ -88,6 +90,11 @@ name = "can" path = "src/bin/can.rs" required-features = [ "can",] +[[bin]] +name = "cryp" +path = "src/bin/cryp.rs" +required-features = [ "cryp",] + [[bin]] name = "dac" path = "src/bin/dac.rs" diff --git a/tests/stm32/src/bin/cryp.rs b/tests/stm32/src/bin/cryp.rs new file mode 100644 index 000000000..f105abf26 --- /dev/null +++ b/tests/stm32/src/bin/cryp.rs @@ -0,0 +1,69 @@ +// required-features: cryp +#![no_std] +#![no_main] + +#[path = "../common.rs"] +mod common; + +use aes_gcm::aead::heapless::Vec; +use aes_gcm::aead::{AeadInPlace, KeyInit}; +use aes_gcm::Aes128Gcm; +use common::*; +use embassy_executor::Spawner; +use embassy_stm32::cryp::*; +use {defmt_rtt as _, panic_probe as _}; + +#[embassy_executor::main] +async fn main(_spawner: Spawner) { + let p: embassy_stm32::Peripherals = embassy_stm32::init(config()); + + const PAYLOAD1: &[u8] = b"payload data 1 ;zdfhzdfhS;GKJASBDG;ASKDJBAL,zdfhzdfhzdfhzdfhvljhb,jhbjhb,sdhsdghsdhsfhsghzdfhzdfhzdfhzdfdhsdthsthsdhsgaadfhhgkdgfuoyguoft6783567"; + const PAYLOAD2: &[u8] = b"payload data 2 ;SKEzdfhzdfhzbhgvljhb,jhbjhb,sdhsdghsdhsfhsghshsfhshstsdthadfhsdfjhsfgjsfgjxfgjzdhgDFghSDGHjtfjtjszftjzsdtjhstdsdhsdhsdhsdhsdthsthsdhsgfh"; + const AAD1: &[u8] = b"additional data 1 stdargadrhaethaethjatjatjaetjartjstrjsfkk;'jopofyuisrteytweTASTUIKFUKIXTRDTEREharhaeryhaterjartjarthaethjrtjarthaetrhartjatejatrjsrtjartjyt1"; + const AAD2: &[u8] = b"additional data 2 stdhthsthsthsrthsrthsrtjdykjdukdyuldadfhsdghsdghsdghsadghjk'hioethjrtjarthaetrhartjatecfgjhzdfhgzdfhzdfghzdfhzdfhzfhjatrjsrtjartjytjfytjfyg"; + + let hw_cryp = Cryp::new(p.CRYP); + let key: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let mut ciphertext: [u8; PAYLOAD1.len() + PAYLOAD2.len()] = [0; PAYLOAD1.len() + PAYLOAD2.len()]; + let mut plaintext: [u8; PAYLOAD1.len() + PAYLOAD2.len()] = [0; PAYLOAD1.len() + PAYLOAD2.len()]; + let iv: [u8; 12] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; + + // Encrypt in hardware using AES-GCM 128-bit + let aes_gcm = AesGcm::new(&key, &iv); + let mut gcm_encrypt = hw_cryp.start(&aes_gcm, Direction::Encrypt); + hw_cryp.aad_blocking(&mut gcm_encrypt, AAD1, false); + hw_cryp.aad_blocking(&mut gcm_encrypt, AAD2, true); + hw_cryp.payload_blocking(&mut gcm_encrypt, PAYLOAD1, &mut ciphertext[..PAYLOAD1.len()], false); + hw_cryp.payload_blocking(&mut gcm_encrypt, PAYLOAD2, &mut ciphertext[PAYLOAD1.len()..], true); + let encrypt_tag = hw_cryp.finish_blocking(gcm_encrypt); + + // Decrypt in hardware using AES-GCM 128-bit + let mut gcm_decrypt = hw_cryp.start(&aes_gcm, Direction::Decrypt); + hw_cryp.aad_blocking(&mut gcm_decrypt, AAD1, false); + hw_cryp.aad_blocking(&mut gcm_decrypt, AAD2, true); + hw_cryp.payload_blocking(&mut gcm_decrypt, &ciphertext, &mut plaintext, true); + let decrypt_tag = hw_cryp.finish_blocking(gcm_decrypt); + + info!("AES-GCM Ciphertext: {:?}", ciphertext); + info!("AES-GCM Plaintext: {:?}", plaintext); + defmt::assert!(PAYLOAD1 == &plaintext[..PAYLOAD1.len()]); + defmt::assert!(PAYLOAD2 == &plaintext[PAYLOAD1.len()..]); + defmt::assert!(encrypt_tag == decrypt_tag); + + // Encrypt in software using AES-GCM 128-bit + let mut payload_vec: Vec<u8, { PAYLOAD1.len() + PAYLOAD2.len() + 16 }> = Vec::from_slice(&PAYLOAD1).unwrap(); + payload_vec.extend_from_slice(&PAYLOAD2).unwrap(); + let cipher = Aes128Gcm::new(&key.into()); + let mut aad: Vec<u8, { AAD1.len() + AAD2.len() }> = Vec::from_slice(&AAD1).unwrap(); + aad.extend_from_slice(&AAD2).unwrap(); + let _ = cipher.encrypt_in_place(&iv.into(), &aad, &mut payload_vec); + + defmt::assert!(ciphertext == payload_vec[0..ciphertext.len()]); + defmt::assert!(encrypt_tag == payload_vec[ciphertext.len()..ciphertext.len() + encrypt_tag.len()]); + + // Decrypt in software using AES-GCM 128-bit + let _ = cipher.decrypt_in_place(&iv.into(), &aad, &mut payload_vec); + + info!("Test OK"); + cortex_m::asm::bkpt(); +}