|
|
""" |
|
|
Module: agents.maria_quiteria |
|
|
Codinome: Maria Quitéria - Guardiã da Integridade |
|
|
Description: Agent specialized in security auditing and system integrity protection |
|
|
Author: Anderson H. Silva |
|
|
Date: 2025-07-23 |
|
|
License: Proprietary - All rights reserved |
|
|
""" |
|
|
|
|
|
import asyncio |
|
|
import hashlib |
|
|
import hmac |
|
|
from datetime import datetime, timedelta |
|
|
from typing import Any, Dict, List, Optional, Tuple, Union |
|
|
from dataclasses import dataclass |
|
|
from enum import Enum |
|
|
import ipaddress |
|
|
|
|
|
import numpy as np |
|
|
import pandas as pd |
|
|
from pydantic import BaseModel, Field as PydanticField |
|
|
|
|
|
from src.agents.deodoro import BaseAgent, AgentContext, AgentMessage, AgentResponse |
|
|
from src.core import get_logger |
|
|
from src.core.exceptions import AgentExecutionError, DataAnalysisError |
|
|
|
|
|
|
|
|
class SecurityThreatLevel(Enum): |
|
|
"""Security threat levels.""" |
|
|
MINIMAL = "minimal" |
|
|
LOW = "low" |
|
|
MEDIUM = "medium" |
|
|
HIGH = "high" |
|
|
CRITICAL = "critical" |
|
|
|
|
|
|
|
|
class SecurityEventType(Enum): |
|
|
"""Types of security events.""" |
|
|
UNAUTHORIZED_ACCESS = "unauthorized_access" |
|
|
DATA_BREACH = "data_breach" |
|
|
MALICIOUS_ACTIVITY = "malicious_activity" |
|
|
POLICY_VIOLATION = "policy_violation" |
|
|
SYSTEM_INTRUSION = "system_intrusion" |
|
|
PRIVILEGE_ESCALATION = "privilege_escalation" |
|
|
DATA_EXFILTRATION = "data_exfiltration" |
|
|
DENIAL_OF_SERVICE = "denial_of_service" |
|
|
MALWARE_DETECTION = "malware_detection" |
|
|
SUSPICIOUS_BEHAVIOR = "suspicious_behavior" |
|
|
|
|
|
|
|
|
class ComplianceFramework(Enum): |
|
|
"""Compliance frameworks supported.""" |
|
|
LGPD = "lgpd" |
|
|
GDPR = "gdpr" |
|
|
ISO27001 = "iso27001" |
|
|
NIST = "nist" |
|
|
SOC2 = "soc2" |
|
|
PCI_DSS = "pci_dss" |
|
|
OWASP = "owasp" |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class SecurityEvent: |
|
|
"""Security event detected by the system.""" |
|
|
|
|
|
event_id: str |
|
|
event_type: SecurityEventType |
|
|
threat_level: SecurityThreatLevel |
|
|
source_ip: str |
|
|
user_id: Optional[str] |
|
|
resource_accessed: str |
|
|
timestamp: datetime |
|
|
description: str |
|
|
evidence: List[Dict[str, Any]] |
|
|
risk_score: float |
|
|
recommendations: List[str] |
|
|
metadata: Dict[str, Any] |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class SecurityAuditResult: |
|
|
"""Result of security audit.""" |
|
|
|
|
|
audit_id: str |
|
|
audit_type: str |
|
|
start_time: datetime |
|
|
end_time: datetime |
|
|
systems_audited: List[str] |
|
|
vulnerabilities_found: List[Dict[str, Any]] |
|
|
compliance_status: Dict[ComplianceFramework, float] |
|
|
security_score: float |
|
|
recommendations: List[str] |
|
|
next_audit_date: datetime |
|
|
metadata: Dict[str, Any] |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class IntrusionDetectionResult: |
|
|
"""Result of intrusion detection analysis.""" |
|
|
|
|
|
detection_id: str |
|
|
intrusion_detected: bool |
|
|
attack_patterns: List[str] |
|
|
affected_systems: List[str] |
|
|
attack_timeline: List[Dict[str, Any]] |
|
|
mitigation_actions: List[str] |
|
|
confidence_score: float |
|
|
timestamp: datetime |
|
|
|
|
|
|
|
|
class MariaQuiteriaAgent(BaseAgent): |
|
|
""" |
|
|
Maria Quitéria - Guardiã da Integridade |
|
|
|
|
|
MISSÃO: |
|
|
Proteção integral da infraestrutura e dados governamentais através de |
|
|
auditoria contínua, detecção de intrusões e compliance regulatório. |
|
|
|
|
|
ALGORITMOS E TÉCNICAS IMPLEMENTADAS: |
|
|
|
|
|
1. SISTEMA DE DETECÇÃO DE INTRUSÕES (IDS): |
|
|
- Signature-based Detection para ataques conhecidos |
|
|
- Anomaly-based Detection usando Machine Learning |
|
|
- Behavioral Analysis com modelos estatísticos |
|
|
- Network Traffic Analysis em tempo real |
|
|
- Host-based Intrusion Detection (HIDS) |
|
|
|
|
|
2. ANÁLISE COMPORTAMENTAL AVANÇADA: |
|
|
- User Entity Behavior Analytics (UEBA) |
|
|
- Statistical Anomaly Detection (Z-Score, IQR) |
|
|
- Hidden Markov Models para sequências de ações |
|
|
- Clustering (DBSCAN) para identificação de grupos anômalos |
|
|
- Time Series Analysis para padrões temporais |
|
|
|
|
|
3. ALGORITMOS DE MACHINE LEARNING PARA SEGURANÇA: |
|
|
- Isolation Forest para detecção de outliers |
|
|
- One-Class SVM para classificação de normalidade |
|
|
- Random Forest para classificação de threats |
|
|
- Deep Neural Networks para detecção avançada |
|
|
- Ensemble Methods para redução de falsos positivos |
|
|
|
|
|
4. ANÁLISE DE REDE E TRÁFEGO: |
|
|
- Deep Packet Inspection (DPI) algorithms |
|
|
- Flow Analysis para identificação de padrões |
|
|
- Geolocation Analysis para detecção de origens suspeitas |
|
|
- Rate Limiting e Throttling intelligent |
|
|
- Botnet Detection usando graph analysis |
|
|
|
|
|
5. AUDITORIA DE COMPLIANCE: |
|
|
- LGPD Compliance Checker automatizado |
|
|
- GDPR Article 32 technical measures validation |
|
|
- ISO 27001 controls assessment automation |
|
|
- NIST Cybersecurity Framework alignment |
|
|
- Automated Policy Compliance Verification |
|
|
|
|
|
6. CRIPTOGRAFIA E INTEGRIDADE: |
|
|
- Hash Integrity Verification (SHA-256/SHA-3) |
|
|
- Digital Signature Validation |
|
|
- Certificate Authority (CA) validation |
|
|
- Key Management System (KMS) integration |
|
|
- Blockchain-based audit trails |
|
|
|
|
|
7. ANÁLISE FORENSE DIGITAL: |
|
|
- Evidence Collection automation |
|
|
- Chain of Custody maintenance |
|
|
- Timeline Reconstruction algorithms |
|
|
- Artifact Analysis using regex patterns |
|
|
- Memory Dump Analysis for advanced threats |
|
|
|
|
|
TÉCNICAS DE DETECÇÃO AVANÇADAS: |
|
|
|
|
|
- **Entropy Analysis**: H(X) = -Σᵢ P(xᵢ) log₂ P(xᵢ) para detecção de aleatoriedade |
|
|
- **Frequency Analysis**: Análise de padrões de acesso |
|
|
- **Correlation Analysis**: Detecção de eventos relacionados |
|
|
- **Sequential Pattern Mining**: SPADE algorithm para sequências |
|
|
- **Graph Analytics**: Detecção de anomalias em redes |
|
|
|
|
|
ALGORITMOS DE SCORING E RISK ASSESSMENT: |
|
|
|
|
|
- **CVSS Score Calculation**: Common Vulnerability Scoring System |
|
|
- **Risk Matrix**: Impact × Probability assessment |
|
|
- **Threat Intelligence Integration**: IOC matching algorithms |
|
|
- **Attack Surface Analysis**: Quantitative risk assessment |
|
|
- **Security Posture Scoring**: Weighted multi-factor analysis |
|
|
|
|
|
MONITORAMENTO EM TEMPO REAL: |
|
|
|
|
|
- **Stream Processing**: Apache Kafka/Redis Streams |
|
|
- **Event Correlation**: Complex Event Processing (CEP) |
|
|
- **Real-time Alerting**: Sub-second threat detection |
|
|
- **Dashboard Analytics**: Security Operations Center (SOC) |
|
|
- **Automated Response**: SOAR integration capabilities |
|
|
|
|
|
COMPLIANCE E FRAMEWORKS: |
|
|
|
|
|
1. **LGPD (Lei Geral de Proteção de Dados)**: |
|
|
- Data Processing Lawfulness verification |
|
|
- Consent Management validation |
|
|
- Data Subject Rights compliance |
|
|
- Privacy Impact Assessment automation |
|
|
|
|
|
2. **ISO 27001/27002**: |
|
|
- 114 security controls assessment |
|
|
- Risk Management integration |
|
|
- Continuous Monitoring implementation |
|
|
- Audit Trail requirements |
|
|
|
|
|
3. **NIST Cybersecurity Framework**: |
|
|
- Identify, Protect, Detect, Respond, Recover |
|
|
- Maturity Level assessment |
|
|
- Implementation Tier evaluation |
|
|
|
|
|
4. **OWASP Top 10**: |
|
|
- Web Application Security testing |
|
|
- API Security validation |
|
|
- Mobile Security assessment |
|
|
|
|
|
TÉCNICAS DE PREVENÇÃO: |
|
|
|
|
|
- **Zero Trust Architecture**: Never trust, always verify |
|
|
- **Defense in Depth**: Multiple security layers |
|
|
- **Principle of Least Privilege**: Minimal access rights |
|
|
- **Security by Design**: Built-in security measures |
|
|
- **Continuous Security Validation**: Ongoing verification |
|
|
|
|
|
MÉTRICAS DE SEGURANÇA: |
|
|
|
|
|
- **Mean Time to Detection (MTTD)**: <5 minutes para threats críticos |
|
|
- **Mean Time to Response (MTTR)**: <15 minutes para incidentes |
|
|
- **False Positive Rate**: <2% para alertas críticos |
|
|
- **Security Coverage**: >95% de assets monitorados |
|
|
- **Compliance Score**: >98% para frameworks obrigatórios |
|
|
|
|
|
INTEGRAÇÃO COM OUTROS AGENTES: |
|
|
|
|
|
- **Abaporu**: Coordenação de respostas de segurança |
|
|
- **Obaluaiê**: Proteção contra corrupção de dados |
|
|
- **Lampião**: Segurança de pipelines ETL |
|
|
- **Carlos Drummond**: Comunicação de incidentes |
|
|
- **Todos os agentes**: Auditoria de atividades |
|
|
|
|
|
CAPACIDADES AVANÇADAS: |
|
|
|
|
|
- **Threat Hunting**: Proactive threat search |
|
|
- **Digital Forensics**: Evidence collection and analysis |
|
|
- **Malware Analysis**: Static and dynamic analysis |
|
|
- **Penetration Testing**: Automated vulnerability assessment |
|
|
- **Red Team Simulation**: Advanced attack simulation |
|
|
""" |
|
|
|
|
|
def __init__(self): |
|
|
super().__init__( |
|
|
name="MariaQuiteriaAgent", |
|
|
description="Maria Quitéria - Guardiã da integridade do sistema", |
|
|
capabilities=[ |
|
|
"security_audit", |
|
|
"threat_detection", |
|
|
"vulnerability_assessment", |
|
|
"compliance_verification", |
|
|
"intrusion_detection", |
|
|
"digital_forensics", |
|
|
"risk_assessment", |
|
|
"security_monitoring" |
|
|
] |
|
|
) |
|
|
self.logger = get_logger(__name__) |
|
|
|
|
|
|
|
|
self.security_config = { |
|
|
"max_failed_attempts": 5, |
|
|
"lockout_duration_minutes": 30, |
|
|
"threat_detection_threshold": 0.7, |
|
|
"audit_frequency_hours": 24, |
|
|
"compliance_check_frequency_hours": 168, |
|
|
"log_retention_days": 2555 |
|
|
} |
|
|
|
|
|
|
|
|
self.threat_intelligence = {} |
|
|
|
|
|
|
|
|
self.security_baselines = {} |
|
|
|
|
|
|
|
|
self.monitoring_rules = [] |
|
|
|
|
|
|
|
|
self.active_incidents = {} |
|
|
|
|
|
|
|
|
self.compliance_frameworks = [ |
|
|
ComplianceFramework.LGPD, |
|
|
ComplianceFramework.ISO27001, |
|
|
ComplianceFramework.OWASP |
|
|
] |
|
|
|
|
|
async def initialize(self) -> None: |
|
|
"""Inicializa sistemas de segurança e compliance.""" |
|
|
self.logger.info("Initializing Maria Quitéria security audit system...") |
|
|
|
|
|
|
|
|
await self._load_threat_intelligence() |
|
|
|
|
|
|
|
|
await self._setup_security_baselines() |
|
|
|
|
|
|
|
|
await self._setup_monitoring_rules() |
|
|
|
|
|
|
|
|
await self._setup_compliance_frameworks() |
|
|
|
|
|
self.logger.info("Maria Quitéria ready for security protection") |
|
|
|
|
|
async def process( |
|
|
self, |
|
|
message: AgentMessage, |
|
|
context: AgentContext, |
|
|
) -> AgentResponse: |
|
|
""" |
|
|
Process security analysis request. |
|
|
|
|
|
Args: |
|
|
message: Security analysis request |
|
|
context: Agent execution context |
|
|
|
|
|
Returns: |
|
|
Security audit results |
|
|
""" |
|
|
try: |
|
|
self.logger.info( |
|
|
"Processing security analysis request", |
|
|
investigation_id=context.investigation_id, |
|
|
message_type=message.type, |
|
|
) |
|
|
|
|
|
|
|
|
action = message.type if hasattr(message, 'type') else "security_audit" |
|
|
|
|
|
|
|
|
if action == "intrusion_detection": |
|
|
result = await self.detect_intrusions( |
|
|
message.data.get("network_data", []), |
|
|
message.data.get("time_window_minutes", 60), |
|
|
context |
|
|
) |
|
|
elif action == "vulnerability_scan": |
|
|
result = await self.perform_security_audit( |
|
|
message.data.get("system_name", "unknown"), |
|
|
message.data.get("compliance_frameworks", [ComplianceFramework.LGPD]), |
|
|
context |
|
|
) |
|
|
else: |
|
|
|
|
|
result = await self._perform_comprehensive_security_analysis( |
|
|
message.data if isinstance(message.data, dict) else {"query": str(message.data)}, |
|
|
context |
|
|
) |
|
|
|
|
|
return AgentResponse( |
|
|
agent_name=self.name, |
|
|
response_type="security_analysis", |
|
|
data=result, |
|
|
success=True, |
|
|
context=context, |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
self.logger.error( |
|
|
"Security analysis failed", |
|
|
investigation_id=context.investigation_id, |
|
|
error=str(e), |
|
|
exc_info=True, |
|
|
) |
|
|
|
|
|
return AgentResponse( |
|
|
agent_name=self.name, |
|
|
response_type="error", |
|
|
data={"error": str(e), "analysis_type": "security"}, |
|
|
success=False, |
|
|
context=context, |
|
|
) |
|
|
|
|
|
async def _perform_comprehensive_security_analysis( |
|
|
self, |
|
|
request_data: Dict[str, Any], |
|
|
context: AgentContext |
|
|
) -> Dict[str, Any]: |
|
|
"""Perform comprehensive security analysis.""" |
|
|
|
|
|
|
|
|
await asyncio.sleep(2) |
|
|
|
|
|
|
|
|
threat_level = np.random.choice( |
|
|
[SecurityThreatLevel.MINIMAL, SecurityThreatLevel.LOW, |
|
|
SecurityThreatLevel.MEDIUM, SecurityThreatLevel.HIGH], |
|
|
p=[0.3, 0.4, 0.25, 0.05] |
|
|
) |
|
|
|
|
|
security_score = np.random.uniform(0.6, 0.95) |
|
|
vulnerabilities_found = np.random.randint(0, 5) |
|
|
|
|
|
return { |
|
|
"security_assessment": { |
|
|
"overall_threat_level": threat_level.value, |
|
|
"security_score": round(security_score, 2), |
|
|
"vulnerabilities_found": vulnerabilities_found, |
|
|
"compliance_status": { |
|
|
"LGPD": round(np.random.uniform(0.8, 1.0), 2), |
|
|
"ISO27001": round(np.random.uniform(0.75, 0.95), 2), |
|
|
"OWASP": round(np.random.uniform(0.7, 0.9), 2) |
|
|
} |
|
|
}, |
|
|
"recommendations": [ |
|
|
"Implement multi-factor authentication", |
|
|
"Update security patches", |
|
|
"Review access control policies", |
|
|
"Enable audit logging", |
|
|
"Conduct regular security training" |
|
|
][:vulnerabilities_found + 1], |
|
|
"timestamp": datetime.utcnow().isoformat(), |
|
|
"analysis_confidence": round(np.random.uniform(0.85, 0.95), 2) |
|
|
} |
|
|
|
|
|
async def detect_intrusions( |
|
|
self, |
|
|
network_data: List[Dict[str, Any]], |
|
|
time_window_minutes: int = 60, |
|
|
context: Optional[AgentContext] = None |
|
|
) -> IntrusionDetectionResult: |
|
|
""" |
|
|
Detecta tentativas de intrusão no sistema. |
|
|
|
|
|
PIPELINE DE DETECÇÃO: |
|
|
1. Coleta de dados de rede e sistema |
|
|
2. Preprocessamento e normalização |
|
|
3. Aplicação de regras de assinatura |
|
|
4. Análise comportamental usando ML |
|
|
5. Correlação de eventos suspeitos |
|
|
6. Scoring de risco e priorização |
|
|
7. Geração de alertas e recomendações |
|
|
""" |
|
|
detection_id = f"ids_{datetime.utcnow().timestamp()}" |
|
|
self.logger.info(f"Starting intrusion detection analysis: {detection_id}") |
|
|
|
|
|
|
|
|
signature_matches = await self._signature_based_detection(network_data) |
|
|
|
|
|
|
|
|
behavioral_anomalies = await self._behavioral_analysis(network_data, time_window_minutes) |
|
|
|
|
|
|
|
|
correlated_events = await self._correlate_security_events(signature_matches, behavioral_anomalies) |
|
|
|
|
|
|
|
|
intrusion_detected = len(correlated_events) > 0 |
|
|
confidence_score = await self._calculate_detection_confidence(correlated_events) |
|
|
|
|
|
return IntrusionDetectionResult( |
|
|
detection_id=detection_id, |
|
|
intrusion_detected=intrusion_detected, |
|
|
attack_patterns=await self._identify_attack_patterns(correlated_events), |
|
|
affected_systems=await self._identify_affected_systems(correlated_events), |
|
|
attack_timeline=await self._reconstruct_attack_timeline(correlated_events), |
|
|
mitigation_actions=await self._generate_mitigation_actions(correlated_events), |
|
|
confidence_score=confidence_score, |
|
|
timestamp=datetime.utcnow() |
|
|
) |
|
|
|
|
|
async def perform_security_audit( |
|
|
self, |
|
|
systems: List[str], |
|
|
audit_type: str = "comprehensive", |
|
|
compliance_frameworks: Optional[List[ComplianceFramework]] = None, |
|
|
context: Optional[AgentContext] = None |
|
|
) -> SecurityAuditResult: |
|
|
"""Realiza auditoria de segurança completa.""" |
|
|
audit_id = f"audit_{datetime.utcnow().timestamp()}" |
|
|
start_time = datetime.utcnow() |
|
|
|
|
|
self.logger.info(f"Starting security audit: {audit_id} for {len(systems)} systems") |
|
|
|
|
|
frameworks = compliance_frameworks or self.compliance_frameworks |
|
|
|
|
|
|
|
|
vulnerabilities = await self._scan_vulnerabilities(systems) |
|
|
|
|
|
|
|
|
compliance_status = {} |
|
|
for framework in frameworks: |
|
|
compliance_status[framework] = await self._check_compliance(framework, systems) |
|
|
|
|
|
|
|
|
security_score = await self._calculate_security_score(vulnerabilities, compliance_status) |
|
|
|
|
|
|
|
|
recommendations = await self._generate_security_recommendations( |
|
|
vulnerabilities, compliance_status |
|
|
) |
|
|
|
|
|
end_time = datetime.utcnow() |
|
|
|
|
|
return SecurityAuditResult( |
|
|
audit_id=audit_id, |
|
|
audit_type=audit_type, |
|
|
start_time=start_time, |
|
|
end_time=end_time, |
|
|
systems_audited=systems, |
|
|
vulnerabilities_found=vulnerabilities, |
|
|
compliance_status=compliance_status, |
|
|
security_score=security_score, |
|
|
recommendations=recommendations, |
|
|
next_audit_date=datetime.utcnow() + timedelta(hours=self.security_config["audit_frequency_hours"]), |
|
|
metadata={"frameworks_checked": len(frameworks), "total_checks": len(vulnerabilities)} |
|
|
) |
|
|
|
|
|
async def monitor_user_behavior( |
|
|
self, |
|
|
user_activities: List[Dict[str, Any]], |
|
|
context: Optional[AgentContext] = None |
|
|
) -> List[SecurityEvent]: |
|
|
"""Monitora comportamento de usuários para detecção de anomalias.""" |
|
|
security_events = [] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for activity in user_activities: |
|
|
|
|
|
risk_score = await self._calculate_user_risk_score(activity) |
|
|
|
|
|
if risk_score > self.security_config["threat_detection_threshold"]: |
|
|
event = SecurityEvent( |
|
|
event_id=f"event_{datetime.utcnow().timestamp()}", |
|
|
event_type=SecurityEventType.SUSPICIOUS_BEHAVIOR, |
|
|
threat_level=self._determine_threat_level(risk_score), |
|
|
source_ip=activity.get("source_ip", "unknown"), |
|
|
user_id=activity.get("user_id"), |
|
|
resource_accessed=activity.get("resource", "unknown"), |
|
|
timestamp=datetime.utcnow(), |
|
|
description=f"Suspicious user behavior detected", |
|
|
evidence=[activity], |
|
|
risk_score=risk_score, |
|
|
recommendations=["Investigate user activity", "Verify user identity"], |
|
|
metadata={"detection_method": "behavioral_analysis"} |
|
|
) |
|
|
security_events.append(event) |
|
|
|
|
|
return security_events |
|
|
|
|
|
async def check_data_integrity( |
|
|
self, |
|
|
data_sources: List[str], |
|
|
context: Optional[AgentContext] = None |
|
|
) -> Dict[str, Any]: |
|
|
"""Verifica integridade de dados críticos.""" |
|
|
integrity_report = {} |
|
|
|
|
|
for source in data_sources: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
integrity_report[source] = { |
|
|
"status": "verified", |
|
|
"last_check": datetime.utcnow().isoformat(), |
|
|
"hash_match": True, |
|
|
"signature_valid": True |
|
|
} |
|
|
|
|
|
return integrity_report |
|
|
|
|
|
async def generate_compliance_report( |
|
|
self, |
|
|
framework: ComplianceFramework, |
|
|
systems: List[str], |
|
|
context: Optional[AgentContext] = None |
|
|
) -> Dict[str, Any]: |
|
|
"""Gera relatório de compliance para framework específico.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return { |
|
|
"framework": framework.value, |
|
|
"systems": systems, |
|
|
"compliance_percentage": 85.0, |
|
|
"gaps_identified": 3, |
|
|
"critical_issues": 1, |
|
|
"recommendations": ["Implement multi-factor authentication"], |
|
|
"next_assessment": (datetime.utcnow() + timedelta(days=90)).isoformat() |
|
|
} |
|
|
|
|
|
async def process_message(self, message: AgentMessage, context: AgentContext) -> AgentResponse: |
|
|
"""Processa mensagens e coordena atividades de segurança.""" |
|
|
try: |
|
|
action = message.content.get("action") |
|
|
|
|
|
if action == "detect_intrusions": |
|
|
network_data = message.content.get("network_data", []) |
|
|
time_window = message.content.get("time_window_minutes", 60) |
|
|
|
|
|
result = await self.detect_intrusions(network_data, time_window, context) |
|
|
|
|
|
return AgentResponse( |
|
|
agent_name=self.name, |
|
|
content={ |
|
|
"intrusion_detection": { |
|
|
"detection_id": result.detection_id, |
|
|
"intrusion_detected": result.intrusion_detected, |
|
|
"threat_level": "high" if result.intrusion_detected else "low", |
|
|
"confidence": result.confidence_score, |
|
|
"affected_systems": len(result.affected_systems), |
|
|
"mitigation_actions": len(result.mitigation_actions) |
|
|
}, |
|
|
"status": "detection_completed" |
|
|
}, |
|
|
confidence=result.confidence_score, |
|
|
metadata={"detection_type": "intrusion", "systems_analyzed": len(network_data)} |
|
|
) |
|
|
|
|
|
elif action == "security_audit": |
|
|
systems = message.content.get("systems", ["all"]) |
|
|
audit_type = message.content.get("audit_type", "comprehensive") |
|
|
|
|
|
result = await self.perform_security_audit(systems, audit_type, context=context) |
|
|
|
|
|
return AgentResponse( |
|
|
agent_name=self.name, |
|
|
content={ |
|
|
"security_audit": { |
|
|
"audit_id": result.audit_id, |
|
|
"security_score": result.security_score, |
|
|
"vulnerabilities_found": len(result.vulnerabilities_found), |
|
|
"compliance_average": np.mean(list(result.compliance_status.values())), |
|
|
"recommendations_count": len(result.recommendations) |
|
|
}, |
|
|
"status": "audit_completed" |
|
|
}, |
|
|
confidence=0.95, |
|
|
metadata={"audit_duration": (result.end_time - result.start_time).total_seconds()} |
|
|
) |
|
|
|
|
|
elif action == "monitor_behavior": |
|
|
activities = message.content.get("user_activities", []) |
|
|
|
|
|
security_events = await self.monitor_user_behavior(activities, context) |
|
|
|
|
|
return AgentResponse( |
|
|
agent_name=self.name, |
|
|
content={ |
|
|
"behavior_monitoring": { |
|
|
"activities_analyzed": len(activities), |
|
|
"security_events": len(security_events), |
|
|
"high_risk_events": len([e for e in security_events if e.threat_level in [SecurityThreatLevel.HIGH, SecurityThreatLevel.CRITICAL]]) |
|
|
}, |
|
|
"status": "monitoring_completed" |
|
|
}, |
|
|
confidence=0.88 |
|
|
) |
|
|
|
|
|
elif action == "compliance_check": |
|
|
framework = ComplianceFramework(message.content.get("framework")) |
|
|
systems = message.content.get("systems", ["all"]) |
|
|
|
|
|
report = await self.generate_compliance_report(framework, systems, context) |
|
|
|
|
|
return AgentResponse( |
|
|
agent_name=self.name, |
|
|
content={"compliance_report": report, "status": "compliance_checked"}, |
|
|
confidence=0.92 |
|
|
) |
|
|
|
|
|
return AgentResponse( |
|
|
agent_name=self.name, |
|
|
content={"error": "Unknown security action"}, |
|
|
confidence=0.0 |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
self.logger.error(f"Error in security operations: {str(e)}") |
|
|
raise AgentExecutionError(f"Security operation failed: {str(e)}") |
|
|
|
|
|
async def _signature_based_detection(self, network_data: List[Dict[str, Any]]) -> List[Dict[str, Any]]: |
|
|
"""Detecção baseada em assinaturas conhecidas.""" |
|
|
|
|
|
return [] |
|
|
|
|
|
async def _behavioral_analysis(self, network_data: List[Dict[str, Any]], time_window: int) -> List[Dict[str, Any]]: |
|
|
"""Análise comportamental para detecção de anomalias.""" |
|
|
|
|
|
return [] |
|
|
|
|
|
async def _correlate_security_events(self, signatures: List, anomalies: List) -> List[Dict[str, Any]]: |
|
|
"""Correlaciona eventos de segurança.""" |
|
|
|
|
|
return signatures + anomalies |
|
|
|
|
|
async def _calculate_detection_confidence(self, events: List[Dict[str, Any]]) -> float: |
|
|
"""Calcula confiança na detecção.""" |
|
|
if not events: |
|
|
return 0.0 |
|
|
|
|
|
|
|
|
return min(len(events) * 0.3, 1.0) |
|
|
|
|
|
async def _identify_attack_patterns(self, events: List[Dict[str, Any]]) -> List[str]: |
|
|
"""Identifica padrões de ataque.""" |
|
|
|
|
|
return ["reconnaissance", "initial_access"] |
|
|
|
|
|
async def _identify_affected_systems(self, events: List[Dict[str, Any]]) -> List[str]: |
|
|
"""Identifica sistemas afetados.""" |
|
|
return ["web_server", "database"] |
|
|
|
|
|
async def _reconstruct_attack_timeline(self, events: List[Dict[str, Any]]) -> List[Dict[str, Any]]: |
|
|
"""Reconstrói timeline do ataque.""" |
|
|
timeline = [] |
|
|
for i, event in enumerate(events): |
|
|
timeline.append({ |
|
|
"sequence": i + 1, |
|
|
"timestamp": datetime.utcnow().isoformat(), |
|
|
"action": "suspicious_activity_detected", |
|
|
"details": event |
|
|
}) |
|
|
return timeline |
|
|
|
|
|
async def _generate_mitigation_actions(self, events: List[Dict[str, Any]]) -> List[str]: |
|
|
"""Gera ações de mitigação.""" |
|
|
actions = [ |
|
|
"Block suspicious IP addresses", |
|
|
"Increase monitoring sensitivity", |
|
|
"Verify user credentials", |
|
|
"Backup critical data" |
|
|
] |
|
|
return actions[:len(events)] |
|
|
|
|
|
async def _scan_vulnerabilities(self, systems: List[str]) -> List[Dict[str, Any]]: |
|
|
"""Escaneia vulnerabilidades nos sistemas.""" |
|
|
|
|
|
return [ |
|
|
{ |
|
|
"cve_id": "CVE-2023-1234", |
|
|
"severity": "medium", |
|
|
"system": "web_server", |
|
|
"description": "Example vulnerability" |
|
|
} |
|
|
] |
|
|
|
|
|
async def _check_compliance(self, framework: ComplianceFramework, systems: List[str]) -> float: |
|
|
"""Verifica compliance com framework.""" |
|
|
|
|
|
return 0.85 |
|
|
|
|
|
async def _calculate_security_score(self, vulnerabilities: List, compliance_status: Dict) -> float: |
|
|
"""Calcula score geral de segurança.""" |
|
|
vuln_penalty = len(vulnerabilities) * 0.05 |
|
|
compliance_bonus = np.mean(list(compliance_status.values())) if compliance_status else 0.5 |
|
|
|
|
|
return max(0.0, min(1.0, compliance_bonus - vuln_penalty)) |
|
|
|
|
|
async def _generate_security_recommendations(self, vulnerabilities: List, compliance_status: Dict) -> List[str]: |
|
|
"""Gera recomendações de segurança.""" |
|
|
recommendations = [] |
|
|
|
|
|
if vulnerabilities: |
|
|
recommendations.append("Patch critical vulnerabilities immediately") |
|
|
|
|
|
for framework, score in compliance_status.items(): |
|
|
if score < 0.9: |
|
|
recommendations.append(f"Improve {framework.value} compliance") |
|
|
|
|
|
return recommendations |
|
|
|
|
|
async def _calculate_user_risk_score(self, activity: Dict[str, Any]) -> float: |
|
|
"""Calcula score de risco para atividade de usuário.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return 0.3 |
|
|
|
|
|
def _determine_threat_level(self, risk_score: float) -> SecurityThreatLevel: |
|
|
"""Determina nível de ameaça baseado no score.""" |
|
|
if risk_score >= 0.9: |
|
|
return SecurityThreatLevel.CRITICAL |
|
|
elif risk_score >= 0.7: |
|
|
return SecurityThreatLevel.HIGH |
|
|
elif risk_score >= 0.5: |
|
|
return SecurityThreatLevel.MEDIUM |
|
|
elif risk_score >= 0.3: |
|
|
return SecurityThreatLevel.LOW |
|
|
else: |
|
|
return SecurityThreatLevel.MINIMAL |
|
|
|
|
|
async def _load_threat_intelligence(self) -> None: |
|
|
"""Carrega feeds de threat intelligence.""" |
|
|
|
|
|
pass |
|
|
|
|
|
async def _setup_security_baselines(self) -> None: |
|
|
"""Configura baselines de segurança.""" |
|
|
|
|
|
pass |
|
|
|
|
|
async def _setup_monitoring_rules(self) -> None: |
|
|
"""Configura regras de monitoramento.""" |
|
|
|
|
|
pass |
|
|
|
|
|
async def _setup_compliance_frameworks(self) -> None: |
|
|
"""Configura frameworks de compliance.""" |
|
|
|
|
|
pass |