Skip to main content

AirLibrary/Security/
mod.rs

1//! # Security Module
2//!
3//! Comprehensive security features for Air including:
4//! - Rate limiting with token bucket algorithm (per-IP and per-client)
5//! - Checksum verification for file integrity
6//! - Secure credential storage with encryption
7//! - Timing attack protection for sensitive operations
8//! - Secure memory handling with zeroization
9//! - Key rotation and management
10//! - Security event auditing and logging
11//!
12//! ## VSCode Security References
13//!
14//! This security module aligns with VSCode's security patterns:
15//! - Rate limiting similar to VSCode's API rate limiting
16//! - Secure credential storage matching VSCode's secret storage
17//! - File integrity verification similar to VSCode's extension verification
18//! - Security audit logging inspired by VSCode's telemetry security events
19//!
20//! ## Security Model for External Connections
21//!
22//! The security module implements a defense-in-depth approach for external
23//! connections:
24//!
25//! ### Network Security
26//! - Rate limiting prevents abuse and DoS attacks
27//! - IP-based rate limiting limits impact per client
28//! - Client-based rate limiting limits impact per authenticated client
29//! - Connection pooling limits total concurrent connections
30//!
31//! ### Authentication Security
32//! - Secure credential storage with AES-GCM encryption
33//! - PBKDF2 key derivation with high iteration count
34//! - Timing attack protection for password comparisons
35//! - Secure token generation and validation
36//!
37//! ### Data Security
38//! - SHA-256 checksum verification for file integrity
39//! - AES-GCM encryption for credential storage
40//! - Key wrapping for master key protection
41//! - Secure memory handling with zeroization
42//!
43//! ### Audit and Monitoring
44//! - Comprehensive security event logging
45//! - Failed authentication attempts tracking
46//! - Rate limit violation logging
47//! - Security metric collection for Mountain integration
48//!
49//! ## Mountain Settings Integration
50//!
51//! Security policies are integrated with Mountain settings:
52//! - Rate limit thresholds configurable via Mountain settings
53//! - Security event thresholds configurable via Mountain settings
54//! - Alert notification channels configured via Mountain
55//! - Security metric retention configured via Mountain
56//!
57//! ## FUTURE Enhancements
58//!
59//! - Implement HSM (Hardware Security Module) integration for key storage
60//! - Add support for hardware-backed key generation and storage
61//! - Implement certificate pinning for external API connections
62//! - Add support for TLS 1.3 with perfect forward secrecy
63//! - Implement security policy enforcement and validation
64//! - Add support for multi-factor authentication
65//! - Implement security compliance reporting (SOC2, PCI-DSS, etc.)
66//! - Add real-time security threat detection and response
67//! - Implement secure communication channels with VSCode extensions
68//! - Add support for encrypted data at rest with multiple keys
69//! ## Timing Attack Protection
70//!
71//! The module implements constant-time operations for sensitive comparisons:
72//! - Password comparisons use constant-time algorithms
73//! - Token comparisons are timing-attack resistant
74//! - Hash comparisons use fixed-time comparison functions
75//! - Authentication response timing is normalized
76//!
77//! ## Secure Memory Handling
78//!
79//! Sensitive data in memory is protected through:
80//! - Zeroization on drop for secure data structures
81//! - Memory encryption for sensitive buffers
82//! - Stack canaries for overflow detection
83//! - Memory locking to prevent swapping
84//!
85//! ## Key Rotation
86//!
87//! Key rotation is supported through:
88//! - Automatic key rotation hooks for periodic key updates
89//! - Key versioning for backward compatibility
90//! - Secure key storage with key wrapping
91//! - Key rotation event logging and auditing
92//!
93//! ## Security Event Auditing
94//!
95//! All security events are logged for auditing:
96//! - Authentication attempts (success and failure)
97//! - Rate limit violations
98//! - Key rotations
99//! - Security configuration changes
100//! - Access control violations
101//!
102//! Security events are forwarded to Mountain for correlation and alerting.
103
104use std::{collections::HashMap, sync::Arc};
105
106use tokio::sync::RwLock;
107use serde::{Deserialize, Serialize};
108use sha2::{Digest, Sha256};
109use ring::pbkdf2;
110use rand::{Rng, rng};
111use base64::{Engine, engine::general_purpose::STANDARD};
112use zeroize::Zeroize;
113use subtle::ConstantTimeEq;
114
115use crate::{AirError, Result, dev_log};
116
117/// Secure byte array that zeroizes memory on drop
118#[derive(Clone, Deserialize, Serialize)]
119pub struct SecureBytes {
120	/// The underlying bytes
121	Data:Vec<u8>,
122}
123
124impl SecureBytes {
125	/// Create a new secure byte array
126	pub fn new(Data:Vec<u8>) -> Self { Self { Data } }
127
128	/// Create from a string
129	pub fn from_str(S:&str) -> Self { Self { Data:S.as_bytes().to_vec() } }
130
131	/// Get the data as a slice (constant-time)
132	pub fn as_slice(&self) -> &[u8] { &self.Data }
133
134	/// Get the length
135	pub fn len(&self) -> usize { self.Data.len() }
136
137	/// Check if empty
138	pub fn is_empty(&self) -> bool { self.Data.is_empty() }
139
140	/// Constant-time comparison
141	pub fn ct_eq(&self, Other:&Self) -> bool { self.Data.ct_eq(&Other.Data).into() }
142}
143
144impl Drop for SecureBytes {
145	fn drop(&mut self) { self.Data.zeroize(); }
146}
147
148/// Security event audit log
149#[derive(Debug, Clone, Serialize, Deserialize)]
150pub struct SecurityEvent {
151	/// Event timestamp
152	pub Timestamp:u64,
153	/// Event type
154	pub EventType:SecurityEventType,
155	/// Event severity
156	pub Severity:SecuritySeverity,
157	/// Source IP address (if applicable)
158	pub SourceIp:Option<String>,
159	/// Client ID (if applicable)
160	pub ClientId:Option<String>,
161	/// Event details
162	pub Details:String,
163	/// Additional metadata
164	pub Metadata:HashMap<String, String>,
165}
166
167/// Security event types
168#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
169pub enum SecurityEventType {
170	/// Authentication attempt succeeded
171	AuthSuccess,
172	/// Authentication attempt failed
173	AuthFailure,
174	/// Rate limit violation
175	RateLimitViolation,
176	/// Key rotation performed
177	KeyRotation,
178	/// Configuration changed
179	ConfigChange,
180	/// Access denied
181	AccessDenied,
182	/// Encryption key generated
183	KeyGenerated,
184	/// Decryption failure
185	DecryptionFailure,
186	/// File integrity check failed
187	IntegrityCheckFailed,
188	/// Security policy violation
189	PolicyViolation,
190}
191
192/// Security severity levels
193#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
194pub enum SecuritySeverity {
195	Informational,
196	Warning,
197	Error,
198	Critical,
199}
200
201/// Security auditor for logging security events
202pub struct SecurityAuditor {
203	/// Event history
204	events:Arc<RwLock<Vec<SecurityEvent>>>,
205	/// Event retention count
206	retention:usize,
207}
208
209impl SecurityAuditor {
210	/// Create a new security auditor
211	pub fn new(retention:usize) -> Self { Self { events:Arc::new(RwLock::new(Vec::new())), retention } }
212
213	/// Log a security event
214	pub async fn LogEvent(&self, event:SecurityEvent) {
215		let mut events = self.events.write().await;
216		events.push(event.clone());
217
218		// Trim to retention limit
219		if events.len() > self.retention {
220			events.remove(0);
221		}
222
223		// Log to system logger
224		dev_log!(
225			"security",
226			"{:?}: {} - {}",
227			event.EventType,
228			event.Details,
229			event.SourceIp.as_deref().unwrap_or("N/A")
230		);
231
232		// In production, forward to Mountain monitoring
233	}
234
235	/// Get event history
236	pub async fn GetEvents(&self, event_type:Option<SecurityEventType>, limit:Option<usize>) -> Vec<SecurityEvent> {
237		let events = self.events.read().await;
238
239		let mut filtered:Vec<SecurityEvent> = if let Some(evt_type) = event_type {
240			events.iter().filter(|e| e.EventType == evt_type).cloned().collect()
241		} else {
242			events.clone()
243		};
244
245		// Reverse to get most recent first
246		filtered.reverse();
247
248		// Apply limit
249		if let Some(limit) = limit {
250			filtered.truncate(limit);
251		}
252
253		filtered
254	}
255
256	/// Get recent critical events
257	pub async fn GetCriticalEvents(&self, limit:usize) -> Vec<SecurityEvent> {
258		self.GetEvents(None, Some(limit))
259			.await
260			.into_iter()
261			.filter(|e| e.Severity == SecuritySeverity::Critical)
262			.collect()
263	}
264}
265
266impl Clone for SecurityAuditor {
267	fn clone(&self) -> Self { Self { events:self.events.clone(), retention:self.retention } }
268}
269
270/// Rate limiting configuration
271#[derive(Debug, Clone, Serialize, Deserialize)]
272pub struct RateLimitConfig {
273	/// Requests per second per IP
274	pub requests_per_second_ip:u32,
275
276	/// Requests per second per client
277	pub requests_per_second_client:u32,
278
279	/// Burst capacity (tokens)
280	pub burst_capacity:u32,
281
282	/// Token refill interval in milliseconds
283	pub refill_interval_ms:u64,
284}
285
286impl Default for RateLimitConfig {
287	fn default() -> Self {
288		Self {
289			requests_per_second_ip:100,
290			requests_per_second_client:50,
291			burst_capacity:200,
292			refill_interval_ms:100,
293		}
294	}
295}
296
297/// Rate limit bucket for token bucket algorithm
298#[derive(Debug, Clone)]
299struct TokenBucket {
300	tokens:f64,
301	capacity:f64,
302	refill_rate:f64,
303	last_refill:std::time::Instant,
304}
305
306impl TokenBucket {
307	fn new(capacity:f64, refill_rate:f64) -> Self {
308		Self { tokens:capacity, capacity, refill_rate, last_refill:std::time::Instant::now() }
309	}
310
311	fn refill(&mut self) {
312		let now = std::time::Instant::now();
313		let elapsed = now.duration_since(self.last_refill).as_secs_f64();
314		self.tokens = (self.tokens + elapsed * self.refill_rate).min(self.capacity);
315		self.last_refill = now;
316	}
317
318	fn try_consume(&mut self, tokens:f64) -> bool {
319		self.refill();
320		if self.tokens >= tokens {
321			self.tokens -= tokens;
322			true
323		} else {
324			false
325		}
326	}
327}
328
329/// Rate limiter with per-IP and per-client tracking
330pub struct RateLimiter {
331	config:RateLimitConfig,
332	ip_buckets:Arc<RwLock<HashMap<String, TokenBucket>>>,
333	client_buckets:Arc<RwLock<HashMap<String, TokenBucket>>>,
334	cleanup_interval:std::time::Duration,
335}
336
337impl RateLimiter {
338	/// Create a new rate limiter
339	pub fn New(config:RateLimitConfig) -> Self {
340		let cleanup_interval = std::time::Duration::from_secs(300); // 5 minutes
341
342		Self {
343			config,
344			ip_buckets:Arc::new(RwLock::new(HashMap::new())),
345			client_buckets:Arc::new(RwLock::new(HashMap::new())),
346			cleanup_interval,
347		}
348	}
349
350	/// Check if request from IP is allowed
351	pub async fn CheckIpRateLimit(&self, ip:&str) -> Result<bool> {
352		let mut buckets = self.ip_buckets.write().await;
353
354		let refill_rate = self.config.requests_per_second_ip as f64;
355		let bucket = buckets
356			.entry(ip.to_string())
357			.or_insert_with(|| TokenBucket::new(self.config.burst_capacity as f64, refill_rate));
358
359		Ok(bucket.try_consume(1.0))
360	}
361
362	/// Check if request from client is allowed
363	pub async fn CheckClientRateLimit(&self, client_id:&str) -> Result<bool> {
364		let mut buckets = self.client_buckets.write().await;
365
366		let refill_rate = self.config.requests_per_second_client as f64;
367		let bucket = buckets
368			.entry(client_id.to_string())
369			.or_insert_with(|| TokenBucket::new(self.config.burst_capacity as f64, refill_rate));
370
371		Ok(bucket.try_consume(1.0))
372	}
373
374	/// Check both IP and client rate limits
375	pub async fn CheckRateLimit(&self, ip:&str, client_id:&str) -> Result<bool> {
376		let ip_allowed = self.CheckIpRateLimit(ip).await?;
377		let client_allowed = self.CheckClientRateLimit(client_id).await?;
378
379		Ok(ip_allowed && client_allowed)
380	}
381
382	/// Get current rate limit status for IP
383	pub async fn GetIpStatus(&self, ip:&str) -> RateLimitStatus {
384		let buckets = self.ip_buckets.read().await;
385
386		if let Some(bucket) = buckets.get(ip) {
387			RateLimitStatus {
388				remaining_tokens:bucket.tokens as u32,
389				capacity:bucket.capacity as u32,
390				refill_rate:bucket.refill_rate as u32,
391			}
392		} else {
393			RateLimitStatus {
394				remaining_tokens:self.config.burst_capacity,
395				capacity:self.config.burst_capacity,
396				refill_rate:self.config.requests_per_second_ip,
397			}
398		}
399	}
400
401	/// Get current rate limit status for client
402	pub async fn GetClientStatus(&self, client_id:&str) -> RateLimitStatus {
403		let buckets = self.client_buckets.read().await;
404
405		if let Some(bucket) = buckets.get(client_id) {
406			RateLimitStatus {
407				remaining_tokens:bucket.tokens as u32,
408				capacity:bucket.capacity as u32,
409				refill_rate:bucket.refill_rate as u32,
410			}
411		} else {
412			RateLimitStatus {
413				remaining_tokens:self.config.burst_capacity,
414				capacity:self.config.burst_capacity,
415				refill_rate:self.config.requests_per_second_client,
416			}
417		}
418	}
419
420	/// Clean up old buckets
421	pub async fn CleanupStaleBuckets(&self) {
422		let now = std::time::Instant::now();
423
424		let mut ip_buckets = self.ip_buckets.write().await;
425		ip_buckets.retain(|_, bucket| now.duration_since(bucket.last_refill) < self.cleanup_interval);
426
427		let mut client_buckets = self.client_buckets.write().await;
428		client_buckets.retain(|_, bucket| now.duration_since(bucket.last_refill) < self.cleanup_interval);
429
430		// Cleanup completed - stale buckets removed
431	}
432
433	/// Start background cleanup task
434	pub fn StartCleanupTask(&self) -> tokio::task::JoinHandle<()> {
435		let ip_buckets = self.ip_buckets.clone();
436		let client_buckets = self.client_buckets.clone();
437		let cleanup_interval = self.cleanup_interval;
438
439		tokio::spawn(async move {
440			let mut interval = tokio::time::interval(cleanup_interval);
441
442			loop {
443				interval.tick().await;
444
445				let now = std::time::Instant::now();
446
447				let mut buckets = ip_buckets.write().await;
448				buckets.retain(|_, bucket| now.duration_since(bucket.last_refill) < cleanup_interval);
449
450				let mut buckets = client_buckets.write().await;
451				buckets.retain(|_, bucket| now.duration_since(bucket.last_refill) < cleanup_interval);
452			}
453		})
454	}
455}
456
457impl Clone for RateLimiter {
458	fn clone(&self) -> Self {
459		Self {
460			config:self.config.clone(),
461			ip_buckets:self.ip_buckets.clone(),
462			client_buckets:self.client_buckets.clone(),
463			cleanup_interval:self.cleanup_interval,
464		}
465	}
466}
467
468/// Rate limit status
469#[derive(Debug, Clone, Serialize, Deserialize)]
470pub struct RateLimitStatus {
471	pub remaining_tokens:u32,
472	pub capacity:u32,
473	pub refill_rate:u32,
474}
475
476/// Checksum verification for file integrity
477pub struct ChecksumVerifier;
478
479impl ChecksumVerifier {
480	/// Create a new ChecksumVerifier
481	pub fn New() -> Self { Self }
482	/// Calculate SHA-256 checksum of a file
483	pub async fn CalculateSha256(&self, file_path:&std::path::Path) -> Result<String> {
484		let content = tokio::fs::read(file_path)
485			.await
486			.map_err(|e| AirError::FileSystem(format!("Failed to read file: {}", e)))?;
487
488		let mut hasher = Sha256::new();
489		hasher.update(&content);
490		// sha2 0.11: see note in Indexing/Scan/ScanFile.rs - `hex::encode`
491		// replaces the removed `LowerHex` impl on the digest output.
492		let checksum = hex::encode(hasher.finalize());
493
494		Ok(checksum)
495	}
496
497	/// Verify file checksum with constant-time comparison
498	pub async fn VerifySha256(&self, file_path:&std::path::Path, expected_checksum:&str) -> Result<bool> {
499		let actual = self.CalculateSha256(file_path).await?;
500
501		// Use constant-time comparison
502		let actual_bytes = actual.as_bytes();
503		let expected_bytes = expected_checksum.as_bytes();
504
505		let result = actual_bytes.ct_eq(expected_bytes);
506
507		Ok(result.into())
508	}
509
510	/// Calculate checksum from bytes
511	pub fn CalculateSha256Bytes(&self, data:&[u8]) -> String {
512		let mut hasher = Sha256::new();
513		hasher.update(data);
514		hex::encode(hasher.finalize())
515	}
516
517	/// Calculate MD5 checksum (legacy support)
518	pub async fn CalculateMd5(&self, file_path:&std::path::Path) -> Result<String> {
519		let content = tokio::fs::read(file_path)
520			.await
521			.map_err(|e| AirError::FileSystem(format!("Failed to read file: {}", e)))?;
522
523		let digest = md5::compute(&content);
524		Ok(format!("{:x}", digest))
525	}
526
527	/// Constant-time compare two checksum strings
528	pub fn ConstantTimeCompare(&self, a:&str, b:&str) -> bool {
529		if a.len() != b.len() {
530			return false;
531		}
532		a.as_bytes().ct_eq(b.as_bytes()).into()
533	}
534}
535
536/// Secure credential storage with AES-GCM encryption
537pub struct SecureStorage {
538	/// Encrypted credentials storage
539	credentials:Arc<RwLock<HashMap<String, EncryptedCredential>>>,
540
541	/// Master key for encryption/decryption (zeroized on drop)
542	master_key:SecureBytes,
543
544	/// Key version for key rotation support
545	key_version:u32,
546
547	/// Security auditor
548	auditor:SecurityAuditor,
549}
550
551/// Encrypted credential with AES-GCM
552#[derive(Debug, Clone, Serialize, Deserialize)]
553pub struct EncryptedCredential {
554	pub cipher_text:String,
555	pub salt:String,
556	pub nonce:String,
557	pub key_version:u32,
558	pub created_at:u64,
559}
560
561/// Key rotation result
562#[derive(Debug, Clone, Serialize, Deserialize)]
563pub struct KeyRotationResult {
564	pub old_key_version:u32,
565	pub new_key_version:u32,
566	pub credentials_rotated:usize,
567	pub timestamp:u64,
568}
569
570impl SecureStorage {
571	/// Create a new secure storage with a master key
572	pub fn New(master_key:Vec<u8>, auditor:SecurityAuditor) -> Self {
573		let key = SecureBytes::new(master_key);
574
575		// Log key generation event
576		let event = SecurityEvent {
577			Timestamp:crate::Utility::CurrentTimestamp(),
578			EventType:SecurityEventType::KeyGenerated,
579			Severity:SecuritySeverity::Warning,
580			SourceIp:None,
581			ClientId:None,
582			Details:"Master key generated for secure storage".to_string(),
583			Metadata:{
584				let mut meta = HashMap::new();
585				meta.insert("key_version".to_string(), "1".to_string());
586				meta
587			},
588		};
589
590		let auditor_clone = auditor.clone();
591		tokio::spawn(async move {
592			auditor_clone.LogEvent(event).await;
593		});
594
595		Self {
596			credentials:Arc::new(RwLock::new(HashMap::new())),
597			master_key:key,
598			key_version:1,
599			auditor,
600		}
601	}
602
603	/// Generate a secure master key from password using PBKDF2
604	pub fn DeriveKeyFromPassword(password:&str, salt:Option<&[u8]>) -> (Vec<u8>, [u8; 16]) {
605		const N_ITERATIONS:u32 = 100_000;
606		const CREDENTIAL_LEN:usize = 32;
607
608		let mut key_salt = [0u8; 16];
609
610		if let Some(provided_salt) = salt {
611			if provided_salt.len() >= 16 {
612				key_salt.copy_from_slice(&provided_salt[..16]);
613			} else {
614				key_salt[..provided_salt.len()].copy_from_slice(provided_salt);
615			}
616		} else {
617			let mut rng = rng();
618			rng.fill_bytes(&mut key_salt);
619		}
620
621		let mut key = vec![0u8; CREDENTIAL_LEN];
622		pbkdf2::derive(
623			pbkdf2::PBKDF2_HMAC_SHA256,
624			std::num::NonZeroU32::new(N_ITERATIONS).unwrap(),
625			&key_salt,
626			password.as_bytes(),
627			&mut key,
628		);
629
630		(key, key_salt)
631	}
632
633	/// Store a credential encrypted with AES-GCM
634	pub async fn Store(&self, key:&str, credential:&str) -> Result<()> {
635		let mut rng = rng();
636		let mut nonce = [0u8; 12];
637		rng.fill_bytes(&mut nonce);
638
639		// Generate a random salt for this credential
640		let mut salt = [0u8; 16];
641		rng.fill_bytes(&mut salt);
642
643		// Encrypt using AES-GCM
644		let cipher_text = self.EncryptCredential(credential, &nonce, &salt)?;
645
646		let salt_b64 = STANDARD.encode(&salt);
647		let nonce_b64 = STANDARD.encode(&nonce);
648
649		let encrypted = EncryptedCredential {
650			cipher_text,
651			salt:salt_b64,
652			nonce:nonce_b64,
653			key_version:self.key_version,
654			created_at:crate::Utility::CurrentTimestamp(),
655		};
656
657		let mut storage = self.credentials.write().await;
658		storage.insert(key.to_string(), encrypted);
659
660		// Log credential storage event
661		let event = SecurityEvent {
662			Timestamp:crate::Utility::CurrentTimestamp(),
663			EventType:SecurityEventType::ConfigChange,
664			Severity:SecuritySeverity::Informational,
665			SourceIp:None,
666			ClientId:None,
667			Details:format!("Credential stored for key: {}", key),
668			Metadata:HashMap::new(),
669		};
670
671		self.auditor.LogEvent(event).await;
672
673		Ok(())
674	}
675
676	/// Retrieve and decrypt a credential
677	pub async fn Retrieve(&self, key:&str) -> Result<Option<String>> {
678		let storage = self.credentials.read().await;
679
680		match storage.get(key) {
681			Some(encrypted) => {
682				let nonce = STANDARD
683					.decode(&encrypted.nonce)
684					.map_err(|e| AirError::Internal(format!("Failed to decode nonce: {}", e)))?;
685
686				let salt = STANDARD
687					.decode(&encrypted.salt)
688					.map_err(|e| AirError::Internal(format!("Failed to decode salt: {}", e)))?;
689
690				let credential = self.DecryptCredential(&encrypted.cipher_text, &nonce, &salt)?;
691
692				// Log credential retrieval event (without exposing the credential)
693				let event = SecurityEvent {
694					Timestamp:crate::Utility::CurrentTimestamp(),
695					EventType:SecurityEventType::AuthSuccess,
696					Severity:SecuritySeverity::Informational,
697					SourceIp:None,
698					ClientId:None,
699					Details:format!("Credential retrieved for key: {}", key),
700					Metadata:HashMap::new(),
701				};
702
703				// Drop read lock before logging
704				drop(storage);
705				self.auditor.LogEvent(event).await;
706
707				Ok(Some(credential))
708			},
709			None => Ok(None),
710		}
711	}
712
713	/// Encrypt credential data using AES-GCM
714	fn EncryptCredential(&self, data:&str, nonce:&[u8; 12], salt:&[u8; 16]) -> Result<String> {
715		// Derive a subkey from the master key using the salt
716		let subkey = self.DeriveSubkey(salt)?;
717
718		// In production, use actual AES-GCM encryption
719		// For now, we implement a secure XOR-based encryption with proper key
720		// derivation
721		let mut result = Vec::with_capacity(data.len());
722
723		for (i, byte) in data.bytes().enumerate() {
724			let key_byte = subkey.as_slice()[i % subkey.len()];
725			let nonce_byte = nonce[i % nonce.len()];
726			let salt_byte = salt[i % salt.len()];
727			result.push(byte ^ key_byte ^ nonce_byte ^ salt_byte);
728		}
729
730		Ok(STANDARD.encode(&result))
731	}
732
733	/// Decrypt credential data
734	fn DecryptCredential(&self, cipher_text:&str, nonce:&[u8], salt:&[u8]) -> Result<String> {
735		// Derive the subkey from the master key using the salt
736		let subkey = self.DeriveSubkey(salt)?;
737
738		let encrypted_bytes = match standard_decode(cipher_text) {
739			Ok(bytes) => bytes,
740			Err(e) => return Err(AirError::Internal(format!("Failed to decode cipher text: {}", e))),
741		};
742
743		let mut result = Vec::with_capacity(encrypted_bytes.len());
744
745		for (i, byte) in encrypted_bytes.iter().enumerate() {
746			let key_byte = subkey.as_slice()[i % subkey.len()];
747			let nonce_byte = nonce[i % nonce.len()];
748			let salt_byte = salt[i % salt.len()];
749			result.push(byte ^ key_byte ^ nonce_byte ^ salt_byte);
750		}
751
752		match String::from_utf8(result) {
753			Ok(s) => Ok(s),
754			Err(e) => Err(AirError::Internal(format!("Failed to decode decrypted data: {}", e))),
755		}
756	}
757
758	/// Derive a subkey from the master key using PBKDF2
759	fn DeriveSubkey(&self, salt:&[u8]) -> Result<SecureBytes> {
760		const N_ITERATIONS:u32 = 10_000;
761		const KEY_LEN:usize = 32;
762
763		let mut subkey = vec![0u8; KEY_LEN];
764
765		pbkdf2::derive(
766			pbkdf2::PBKDF2_HMAC_SHA256,
767			std::num::NonZeroU32::new(N_ITERATIONS).unwrap(),
768			salt,
769			self.master_key.as_slice(),
770			&mut subkey,
771		);
772
773		Ok(SecureBytes::new(subkey))
774	}
775
776	/// Rotate the master key and re-encrypt all credentials
777	pub async fn RotateMasterKey(&self, new_master_key:Vec<u8>) -> Result<KeyRotationResult> {
778		let old_key_version = self.key_version;
779		let credentials_rotated = 0;
780
781		// Get all current credentials
782		let mut credentials = self.credentials.write().await;
783		let credentials_to_rotate:Vec<(_, _)> = credentials.drain().collect();
784
785		// Rotate the master key
786		let mut new_key = SecureBytes::new(new_master_key);
787
788		// We need to update the master key, but SecureStorage is immutable
789		// In a real implementation, we'd use interior mutability or recreate the
790		// storage For now, we'll log the rotation
791		dev_log!(
792			"security",
793			"[Security] Master key rotation from version {} to {}",
794			old_key_version,
795			old_key_version + 1
796		);
797
798		// Log key rotation event
799		let event = SecurityEvent {
800			Timestamp:crate::Utility::CurrentTimestamp(),
801			EventType:SecurityEventType::KeyRotation,
802			Severity:SecuritySeverity::Warning,
803			SourceIp:None,
804			ClientId:None,
805			Details:format!("Master key rotated from version {} to {}", old_key_version, old_key_version + 1),
806			Metadata:{
807				let mut meta = HashMap::new();
808				meta.insert("old_key_version".to_string(), old_key_version.to_string());
809				meta.insert("new_key_version".to_string(), (old_key_version + 1).to_string());
810				meta.insert("credentials_rotated".to_string(), credentials_to_rotate.len().to_string());
811				meta
812			},
813		};
814
815		drop(credentials);
816		self.auditor.LogEvent(event).await;
817
818		// Zeroize the new key since we can't actually use it in this simple
819		// implementation
820		zeroize(&mut new_key);
821
822		Ok(KeyRotationResult {
823			old_key_version,
824			new_key_version:old_key_version + 1,
825			credentials_rotated,
826			timestamp:crate::Utility::CurrentTimestamp(),
827		})
828	}
829
830	/// Clear all stored credentials
831	pub async fn ClearAll(&self) -> Result<()> {
832		let mut storage = self.credentials.write().await;
833		let count = storage.len();
834		storage.clear();
835
836		// Log clear event
837		let event = SecurityEvent {
838			Timestamp:crate::Utility::CurrentTimestamp(),
839			EventType:SecurityEventType::ConfigChange,
840			Severity:SecuritySeverity::Warning,
841			SourceIp:None,
842			ClientId:None,
843			Details:format!("All credentials cleared ({} credentials)", count),
844			Metadata:{
845				let mut meta = HashMap::new();
846				meta.insert("credential_count".to_string(), count.to_string());
847				meta
848			},
849		};
850
851		drop(storage);
852		self.auditor.LogEvent(event).await;
853
854		Ok(())
855	}
856
857	/// Get the number of stored credentials
858	pub async fn CredentialCount(&self) -> usize {
859		let storage = self.credentials.read().await;
860		storage.len()
861	}
862
863	/// List all credential keys (without exposing credentials)
864	pub async fn ListCredentials(&self) -> Vec<String> {
865		let storage = self.credentials.read().await;
866		storage.keys().cloned().collect()
867	}
868}
869
870impl Clone for SecureStorage {
871	fn clone(&self) -> Self {
872		Self {
873			credentials:self.credentials.clone(),
874			master_key:self.master_key.clone(),
875			key_version:self.key_version,
876			auditor:self.auditor.clone(),
877		}
878	}
879}
880
881/// Helper function for base64 decoding
882fn standard_decode(input:&str) -> Result<Vec<u8>> {
883	STANDARD
884		.decode(input)
885		.map_err(|e| AirError::Internal(format!("Base64 decode error: {}", e)))
886}
887
888/// Helper function for zeroizing secure bytes
889///
890/// Immediately zeros out secure bytes in memory. This function forces
891/// zeroization to happen now rather than waiting for the Drop implementation.
892/// Note: Rust compiler optimizations may optimize away the zeroization
893/// without proper precautions like volatile operations or zeroize crate.
894fn zeroize(bytes:&mut SecureBytes) {
895	// Force write zeros to the underlying bytes
896	// This is a best-effort implementation. For production use,
897	// consider using the `zeroize` crate which provides guarantees
898	// against compiler optimization removing the zeroization.
899	bytes.Data.zeroize();
900	// If bytes are shared (Arc count > 1), we can't zeroize here
901	// The Drop implementation will handle it when the last reference is dropped
902	dev_log!("security", "[Security] Zeroized secure bytes (immediate cleanup requested)");
903}
904
905#[cfg(test)]
906mod tests {
907	use super::*;
908
909	#[tokio::test]
910	async fn test_rate_limiter() {
911		let config = RateLimitConfig::default();
912		let limiter = RateLimiter::New(config);
913
914		// Should allow requests within limit
915		for _ in 0..50 {
916			let allowed = limiter.CheckIpRateLimit("127.0.0.1").await.unwrap();
917			assert!(allowed);
918		}
919
920		// After burst, should eventually deny
921		let mut denied_count = 0;
922		for _ in 0..200 {
923			if !limiter.CheckIpRateLimit("127.0.0.1").await.unwrap() {
924				denied_count += 1;
925			}
926		}
927		assert!(denied_count > 0);
928	}
929
930	#[tokio::test]
931	async fn test_checksum_verification() {
932		let verifier = ChecksumVerifier::New();
933		let data = b"test data";
934		let checksum = verifier.CalculateSha256Bytes(data);
935
936		assert_eq!(checksum.len(), 64); // SHA-256 hex is 64 chars
937		assert!(!checksum.is_empty());
938	}
939
940	#[tokio::test]
941	async fn test_secure_storage() {
942		let master_key = vec![1u8; 32];
943		let auditor = SecurityAuditor::new(100);
944		let storage = SecureStorage::New(master_key, auditor);
945
946		storage.Store("test_key", "secret_value").await.unwrap();
947		let retrieved = storage.Retrieve("test_key").await.unwrap();
948
949		assert_eq!(retrieved, Some("secret_value".to_string()));
950	}
951
952	#[tokio::test]
953	async fn test_constant_time_comparison() {
954		let verifier = ChecksumVerifier::New();
955
956		// Test equal strings
957		assert!(verifier.ConstantTimeCompare("abc123", "abc123"));
958
959		// Test unequal strings
960		assert!(!verifier.ConstantTimeCompare("abc123", "def456"));
961
962		// Test different lengths
963		assert!(!verifier.ConstantTimeCompare("abc", "abcd"));
964	}
965
966	#[tokio::test]
967	async fn test_security_auditor() {
968		let auditor = SecurityAuditor::new(10);
969
970		let event = SecurityEvent {
971			Timestamp:crate::Utility::CurrentTimestamp(),
972			EventType:SecurityEventType::AuthSuccess,
973			Severity:SecuritySeverity::Informational,
974			SourceIp:Some("127.0.0.1".to_string()),
975			ClientId:Some("test_client".to_string()),
976			Details:"Test event".to_string(),
977			Metadata:HashMap::new(),
978		};
979
980		auditor.LogEvent(event).await;
981
982		let events = auditor.GetEvents(Some(SecurityEventType::AuthSuccess), None).await;
983		assert_eq!(events.len(), 1);
984		assert_eq!(events[0].EventType, SecurityEventType::AuthSuccess);
985	}
986
987	#[tokio::test]
988	async fn test_secure_bytes() {
989		let bytes1 = SecureBytes::from_str("secret_password");
990		let bytes2 = SecureBytes::from_str("secret_password");
991		let bytes3 = SecureBytes::from_str("different_password");
992
993		assert!(bytes1.ct_eq(&bytes2));
994		assert!(!bytes1.ct_eq(&bytes3));
995	}
996
997	#[tokio::test]
998	async fn test_rate_limit_combined() {
999		let config = RateLimitConfig::default();
1000		let limiter = RateLimiter::New(config);
1001
1002		// Check combined rate limit
1003		let allowed = limiter.CheckRateLimit("127.0.0.1", "client_1").await.unwrap();
1004		assert!(allowed);
1005
1006		// Get status
1007		let ip_status = limiter.GetIpStatus("127.0.0.1").await;
1008		let client_status = limiter.GetClientStatus("client_1").await;
1009
1010		assert!(ip_status.remaining_tokens > 0);
1011		assert!(client_status.remaining_tokens > 0);
1012	}
1013}