anderson-ufrj
commited on
Commit
·
cdbe958
1
Parent(s):
92b2921
feat: implement data visualization and geographic APIs for frontend
Browse files- Add comprehensive visualization API with chart data endpoints
- Implement geographic data API with Brazilian boundaries and regions
- Create time series, regional map, and dashboard summary endpoints
- Add export functionality for visualization and regional analysis data
- Support multiple formats: Excel (with sheets), CSV, JSON
- Include GeoJSON boundaries for all Brazilian states
- Add regional inequality analysis and clustering endpoints
- Integrate Oscar Niemeyer and Lampião agents for data processing
- src/api/app.py +11 -1
- src/api/routes/export.py +298 -2
- src/api/routes/geographic.py +578 -0
- src/api/routes/visualization.py +608 -0
src/api/app.py
CHANGED
|
@@ -20,7 +20,7 @@ from fastapi.openapi.utils import get_openapi
|
|
| 20 |
from src.core import get_logger, settings
|
| 21 |
from src.core.exceptions import CidadaoAIError, create_error_response
|
| 22 |
from src.core.audit import audit_logger, AuditEventType, AuditSeverity, AuditContext
|
| 23 |
-
from src.api.routes import investigations, analysis, reports, health, auth, oauth, audit, chat, websocket_chat, batch, graphql, cqrs, resilience, observability, chat_simple, chat_stable, chat_optimized, chat_emergency, notifications, agents, orchestration, agent_metrics
|
| 24 |
from src.api.middleware.rate_limiting import RateLimitMiddleware
|
| 25 |
from src.api.middleware.authentication import AuthenticationMiddleware
|
| 26 |
from src.api.middleware.logging_middleware import LoggingMiddleware
|
|
@@ -490,6 +490,16 @@ app.include_router(
|
|
| 490 |
tags=["Agent Metrics"]
|
| 491 |
)
|
| 492 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 493 |
|
| 494 |
# Global exception handler
|
| 495 |
@app.exception_handler(CidadaoAIError)
|
|
|
|
| 20 |
from src.core import get_logger, settings
|
| 21 |
from src.core.exceptions import CidadaoAIError, create_error_response
|
| 22 |
from src.core.audit import audit_logger, AuditEventType, AuditSeverity, AuditContext
|
| 23 |
+
from src.api.routes import investigations, analysis, reports, health, auth, oauth, audit, chat, websocket_chat, batch, graphql, cqrs, resilience, observability, chat_simple, chat_stable, chat_optimized, chat_emergency, notifications, agents, orchestration, agent_metrics, visualization, geographic
|
| 24 |
from src.api.middleware.rate_limiting import RateLimitMiddleware
|
| 25 |
from src.api.middleware.authentication import AuthenticationMiddleware
|
| 26 |
from src.api.middleware.logging_middleware import LoggingMiddleware
|
|
|
|
| 490 |
tags=["Agent Metrics"]
|
| 491 |
)
|
| 492 |
|
| 493 |
+
app.include_router(
|
| 494 |
+
visualization.router,
|
| 495 |
+
tags=["Data Visualization"]
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
app.include_router(
|
| 499 |
+
geographic.router,
|
| 500 |
+
tags=["Geographic Data"]
|
| 501 |
+
)
|
| 502 |
+
|
| 503 |
|
| 504 |
# Global exception handler
|
| 505 |
@app.exception_handler(CidadaoAIError)
|
src/api/routes/export.py
CHANGED
|
@@ -42,7 +42,8 @@ class ExportRequest(BaseModel):
|
|
| 42 |
"""Validate export type."""
|
| 43 |
allowed_types = [
|
| 44 |
'investigations', 'contracts', 'anomalies',
|
| 45 |
-
'reports', 'analytics', 'full_data'
|
|
|
|
| 46 |
]
|
| 47 |
if v not in allowed_types:
|
| 48 |
raise ValueError(f'Export type must be one of: {allowed_types}')
|
|
@@ -384,4 +385,299 @@ def _format_investigation_as_markdown(investigation: Dict[str, Any]) -> str:
|
|
| 384 |
lines.append(f"**Explicação**: {anomaly.get('explanation', 'N/A')}")
|
| 385 |
lines.append("")
|
| 386 |
|
| 387 |
-
return "\n".join(lines)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
"""Validate export type."""
|
| 43 |
allowed_types = [
|
| 44 |
'investigations', 'contracts', 'anomalies',
|
| 45 |
+
'reports', 'analytics', 'full_data',
|
| 46 |
+
'visualization', 'regional_analysis', 'time_series'
|
| 47 |
]
|
| 48 |
if v not in allowed_types:
|
| 49 |
raise ValueError(f'Export type must be one of: {allowed_types}')
|
|
|
|
| 385 |
lines.append(f"**Explicação**: {anomaly.get('explanation', 'N/A')}")
|
| 386 |
lines.append("")
|
| 387 |
|
| 388 |
+
return "\n".join(lines)
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
@router.post("/visualization/export")
|
| 392 |
+
async def export_visualization_data(
|
| 393 |
+
request: ExportRequest,
|
| 394 |
+
current_user: Dict[str, Any] = Depends(get_current_user)
|
| 395 |
+
):
|
| 396 |
+
"""
|
| 397 |
+
Export visualization data in optimized formats.
|
| 398 |
+
|
| 399 |
+
Uses the Oscar Niemeyer agent to format data for charts and dashboards.
|
| 400 |
+
Supports Excel with multiple sheets, CSV, and JSON formats.
|
| 401 |
+
"""
|
| 402 |
+
from src.services.agent_lazy_loader import AgentLazyLoader
|
| 403 |
+
from src.agents.oscar_niemeyer import OscarNiemeyerAgent
|
| 404 |
+
from src.agents.deodoro import AgentContext
|
| 405 |
+
|
| 406 |
+
agent_loader = AgentLazyLoader()
|
| 407 |
+
|
| 408 |
+
# Get Oscar Niemeyer agent
|
| 409 |
+
oscar_agent = await agent_loader.get_agent("oscar_niemeyer")
|
| 410 |
+
if not oscar_agent:
|
| 411 |
+
oscar_agent = OscarNiemeyerAgent()
|
| 412 |
+
await oscar_agent.initialize()
|
| 413 |
+
|
| 414 |
+
filename = f"visualization_data_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
| 415 |
+
|
| 416 |
+
# Get data based on filters
|
| 417 |
+
filters = request.filters or {}
|
| 418 |
+
dataset_type = filters.get("dataset_type", "contracts")
|
| 419 |
+
time_range = filters.get("time_range", "30d")
|
| 420 |
+
dimensions = filters.get("dimensions", ["category"])
|
| 421 |
+
metrics = filters.get("metrics", ["total_value", "count"])
|
| 422 |
+
|
| 423 |
+
# Create context for Oscar agent
|
| 424 |
+
context = AgentContext(
|
| 425 |
+
investigation_id=f"export_{uuid4()}",
|
| 426 |
+
user_id=current_user.get("user_id"),
|
| 427 |
+
session_id="export_session",
|
| 428 |
+
metadata={"export_format": request.format}
|
| 429 |
+
)
|
| 430 |
+
|
| 431 |
+
# Get visualization data from Oscar
|
| 432 |
+
from src.agents.deodoro import AgentMessage
|
| 433 |
+
message = AgentMessage(
|
| 434 |
+
role="user",
|
| 435 |
+
content=f"Generate visualization data for export",
|
| 436 |
+
type="visualization_metadata",
|
| 437 |
+
data={
|
| 438 |
+
"data_type": dataset_type,
|
| 439 |
+
"dimensions": dimensions,
|
| 440 |
+
"metrics": metrics,
|
| 441 |
+
"time_range": time_range,
|
| 442 |
+
"export": True
|
| 443 |
+
}
|
| 444 |
+
)
|
| 445 |
+
|
| 446 |
+
response = await oscar_agent.process(message, context)
|
| 447 |
+
|
| 448 |
+
if not response.success:
|
| 449 |
+
raise HTTPException(status_code=500, detail="Failed to generate visualization data")
|
| 450 |
+
|
| 451 |
+
viz_data = response.data
|
| 452 |
+
|
| 453 |
+
if request.format == "excel":
|
| 454 |
+
# Create multiple sheets for different visualizations
|
| 455 |
+
dataframes = {}
|
| 456 |
+
|
| 457 |
+
# Summary sheet
|
| 458 |
+
summary_data = {
|
| 459 |
+
"Metric": metrics,
|
| 460 |
+
"Total": [1000000, 150], # Placeholder values
|
| 461 |
+
"Average": [50000, 7.5],
|
| 462 |
+
"Min": [1000, 1],
|
| 463 |
+
"Max": [500000, 50]
|
| 464 |
+
}
|
| 465 |
+
dataframes["Summary"] = pd.DataFrame(summary_data)
|
| 466 |
+
|
| 467 |
+
# Time series data if applicable
|
| 468 |
+
if hasattr(viz_data, "series"):
|
| 469 |
+
series_data = []
|
| 470 |
+
for series in viz_data.series:
|
| 471 |
+
series_data.append({
|
| 472 |
+
"Series": series["name"],
|
| 473 |
+
"Field": series["field"],
|
| 474 |
+
"Type": series.get("type", "line")
|
| 475 |
+
})
|
| 476 |
+
dataframes["Series Configuration"] = pd.DataFrame(series_data)
|
| 477 |
+
|
| 478 |
+
# Dimensional breakdown
|
| 479 |
+
if dimensions:
|
| 480 |
+
dim_data = {
|
| 481 |
+
"Dimension": dimensions,
|
| 482 |
+
"Unique Values": [10, 5, 20], # Placeholder
|
| 483 |
+
"Coverage": ["100%", "95%", "100%"]
|
| 484 |
+
}
|
| 485 |
+
dataframes["Dimensions"] = pd.DataFrame(dim_data)
|
| 486 |
+
|
| 487 |
+
excel_bytes = await export_service.generate_excel(
|
| 488 |
+
data=dataframes,
|
| 489 |
+
title=f"Visualization Data - {dataset_type}",
|
| 490 |
+
metadata={
|
| 491 |
+
'exported_at': datetime.now().isoformat(),
|
| 492 |
+
'dataset_type': dataset_type,
|
| 493 |
+
'time_range': time_range,
|
| 494 |
+
'dimensions': dimensions,
|
| 495 |
+
'metrics': metrics,
|
| 496 |
+
'visualization_type': viz_data.visualization_type.value if hasattr(viz_data, 'visualization_type') else 'unknown'
|
| 497 |
+
}
|
| 498 |
+
)
|
| 499 |
+
|
| 500 |
+
return Response(
|
| 501 |
+
content=excel_bytes,
|
| 502 |
+
media_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
| 503 |
+
headers={
|
| 504 |
+
"Content-Disposition": f"attachment; filename={filename}.xlsx"
|
| 505 |
+
}
|
| 506 |
+
)
|
| 507 |
+
|
| 508 |
+
elif request.format == "csv":
|
| 509 |
+
# For CSV, export a simplified flat structure
|
| 510 |
+
export_data = await oscar_agent.create_export_format(
|
| 511 |
+
data=[], # Would contain actual data
|
| 512 |
+
format_type="csv",
|
| 513 |
+
options={"delimiter": ","}
|
| 514 |
+
)
|
| 515 |
+
|
| 516 |
+
return Response(
|
| 517 |
+
content=export_data,
|
| 518 |
+
media_type="text/csv",
|
| 519 |
+
headers={
|
| 520 |
+
"Content-Disposition": f"attachment; filename={filename}.csv"
|
| 521 |
+
}
|
| 522 |
+
)
|
| 523 |
+
|
| 524 |
+
elif request.format == "json":
|
| 525 |
+
# For JSON, provide the full visualization specification
|
| 526 |
+
export_data = await oscar_agent.create_export_format(
|
| 527 |
+
data={
|
| 528 |
+
"visualization": {
|
| 529 |
+
"type": viz_data.visualization_type.value if hasattr(viz_data, 'visualization_type') else 'unknown',
|
| 530 |
+
"title": viz_data.title if hasattr(viz_data, 'title') else "Data Export",
|
| 531 |
+
"config": {
|
| 532 |
+
"x_axis": viz_data.x_axis if hasattr(viz_data, 'x_axis') else {},
|
| 533 |
+
"y_axis": viz_data.y_axis if hasattr(viz_data, 'y_axis') else {},
|
| 534 |
+
"series": viz_data.series if hasattr(viz_data, 'series') else []
|
| 535 |
+
}
|
| 536 |
+
},
|
| 537 |
+
"metadata": {
|
| 538 |
+
"exported_at": datetime.now().isoformat(),
|
| 539 |
+
"filters": filters
|
| 540 |
+
}
|
| 541 |
+
},
|
| 542 |
+
format_type="json",
|
| 543 |
+
options={"pretty": True}
|
| 544 |
+
)
|
| 545 |
+
|
| 546 |
+
return Response(
|
| 547 |
+
content=export_data,
|
| 548 |
+
media_type="application/json",
|
| 549 |
+
headers={
|
| 550 |
+
"Content-Disposition": f"attachment; filename={filename}.json"
|
| 551 |
+
}
|
| 552 |
+
)
|
| 553 |
+
|
| 554 |
+
else:
|
| 555 |
+
raise HTTPException(status_code=400, detail=f"Format {request.format} not supported for visualization export")
|
| 556 |
+
|
| 557 |
+
|
| 558 |
+
@router.post("/regional-analysis/export")
|
| 559 |
+
async def export_regional_analysis(
|
| 560 |
+
request: ExportRequest,
|
| 561 |
+
current_user: Dict[str, Any] = Depends(get_current_user)
|
| 562 |
+
):
|
| 563 |
+
"""
|
| 564 |
+
Export regional analysis data with geographic insights.
|
| 565 |
+
|
| 566 |
+
Uses the Lampião agent to export regional disparities and clustering analysis.
|
| 567 |
+
Includes inequality indices, regional rankings, and policy recommendations.
|
| 568 |
+
"""
|
| 569 |
+
from src.services.agent_lazy_loader import AgentLazyLoader
|
| 570 |
+
from src.agents.lampiao import LampiaoAgent, RegionType
|
| 571 |
+
from src.agents.deodoro import AgentContext, AgentMessage
|
| 572 |
+
|
| 573 |
+
agent_loader = AgentLazyLoader()
|
| 574 |
+
|
| 575 |
+
# Get Lampião agent
|
| 576 |
+
lampiao_agent = await agent_loader.get_agent("lampiao")
|
| 577 |
+
if not lampiao_agent:
|
| 578 |
+
lampiao_agent = LampiaoAgent()
|
| 579 |
+
await lampiao_agent.initialize()
|
| 580 |
+
|
| 581 |
+
filename = f"regional_analysis_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
| 582 |
+
|
| 583 |
+
# Get parameters from filters
|
| 584 |
+
filters = request.filters or {}
|
| 585 |
+
metric = filters.get("metric", "government_spending")
|
| 586 |
+
region_type = filters.get("region_type", "state")
|
| 587 |
+
|
| 588 |
+
# Create context
|
| 589 |
+
context = AgentContext(
|
| 590 |
+
investigation_id=f"export_regional_{uuid4()}",
|
| 591 |
+
user_id=current_user.get("user_id"),
|
| 592 |
+
session_id="export_session",
|
| 593 |
+
metadata={"export_format": request.format}
|
| 594 |
+
)
|
| 595 |
+
|
| 596 |
+
# Get regional analysis
|
| 597 |
+
message = AgentMessage(
|
| 598 |
+
role="user",
|
| 599 |
+
content=f"Analyze regional distribution of {metric}",
|
| 600 |
+
data={
|
| 601 |
+
"metric": metric,
|
| 602 |
+
"region_type": region_type,
|
| 603 |
+
"export": True
|
| 604 |
+
}
|
| 605 |
+
)
|
| 606 |
+
|
| 607 |
+
response = await lampiao_agent.process(message, context)
|
| 608 |
+
|
| 609 |
+
if not response.success:
|
| 610 |
+
raise HTTPException(status_code=500, detail="Failed to generate regional analysis")
|
| 611 |
+
|
| 612 |
+
regional_data = response.data
|
| 613 |
+
|
| 614 |
+
if request.format == "excel":
|
| 615 |
+
dataframes = {}
|
| 616 |
+
|
| 617 |
+
# Regional metrics sheet
|
| 618 |
+
metrics_data = []
|
| 619 |
+
for metric in regional_data.metrics:
|
| 620 |
+
metrics_data.append({
|
| 621 |
+
"Region Code": metric.region_id,
|
| 622 |
+
"Region Name": metric.region_name,
|
| 623 |
+
"Value": metric.value,
|
| 624 |
+
"Normalized Value": metric.normalized_value,
|
| 625 |
+
"Rank": metric.rank,
|
| 626 |
+
"Percentile": metric.percentile,
|
| 627 |
+
"Population": metric.metadata.get("population", "N/A")
|
| 628 |
+
})
|
| 629 |
+
dataframes["Regional Data"] = pd.DataFrame(metrics_data)
|
| 630 |
+
|
| 631 |
+
# Inequality indices
|
| 632 |
+
inequality_data = {
|
| 633 |
+
"Index": list(regional_data.inequalities.keys()),
|
| 634 |
+
"Value": list(regional_data.inequalities.values()),
|
| 635 |
+
"Interpretation": [
|
| 636 |
+
"High inequality" if v > 0.4 else "Moderate inequality" if v > 0.2 else "Low inequality"
|
| 637 |
+
for v in regional_data.inequalities.values()
|
| 638 |
+
]
|
| 639 |
+
}
|
| 640 |
+
dataframes["Inequality Analysis"] = pd.DataFrame(inequality_data)
|
| 641 |
+
|
| 642 |
+
# Clusters
|
| 643 |
+
if regional_data.clusters:
|
| 644 |
+
cluster_data = []
|
| 645 |
+
for cluster in regional_data.clusters:
|
| 646 |
+
for region in cluster["regions"]:
|
| 647 |
+
cluster_data.append({
|
| 648 |
+
"Cluster": cluster["cluster_id"],
|
| 649 |
+
"Region": region,
|
| 650 |
+
"Avg Value": cluster["characteristics"].get("avg_value", "N/A")
|
| 651 |
+
})
|
| 652 |
+
dataframes["Regional Clusters"] = pd.DataFrame(cluster_data)
|
| 653 |
+
|
| 654 |
+
# Recommendations
|
| 655 |
+
rec_data = {
|
| 656 |
+
"Recommendation": regional_data.recommendations,
|
| 657 |
+
"Priority": ["High"] * min(2, len(regional_data.recommendations)) +
|
| 658 |
+
["Medium"] * (len(regional_data.recommendations) - 2)
|
| 659 |
+
}
|
| 660 |
+
dataframes["Policy Recommendations"] = pd.DataFrame(rec_data)
|
| 661 |
+
|
| 662 |
+
excel_bytes = await export_service.generate_excel(
|
| 663 |
+
data=dataframes,
|
| 664 |
+
title=f"Regional Analysis - {metric}",
|
| 665 |
+
metadata={
|
| 666 |
+
'exported_at': datetime.now().isoformat(),
|
| 667 |
+
'metric': metric,
|
| 668 |
+
'region_type': region_type,
|
| 669 |
+
'regions_analyzed': regional_data.regions_analyzed,
|
| 670 |
+
'analysis_type': regional_data.analysis_type.value
|
| 671 |
+
}
|
| 672 |
+
)
|
| 673 |
+
|
| 674 |
+
return Response(
|
| 675 |
+
content=excel_bytes,
|
| 676 |
+
media_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
| 677 |
+
headers={
|
| 678 |
+
"Content-Disposition": f"attachment; filename={filename}.xlsx"
|
| 679 |
+
}
|
| 680 |
+
)
|
| 681 |
+
|
| 682 |
+
else:
|
| 683 |
+
raise HTTPException(status_code=400, detail=f"Format {request.format} not supported for regional analysis export")
|
src/api/routes/geographic.py
ADDED
|
@@ -0,0 +1,578 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
API routes for geographic data endpoints.
|
| 3 |
+
Provides Brazilian geographic data and boundaries for map visualizations.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from datetime import datetime, timedelta
|
| 7 |
+
from typing import Any, Dict, List, Optional
|
| 8 |
+
from enum import Enum
|
| 9 |
+
|
| 10 |
+
from fastapi import APIRouter, Depends, HTTPException, Query, Path
|
| 11 |
+
from pydantic import BaseModel, Field
|
| 12 |
+
from sqlalchemy.ext.asyncio import AsyncSession
|
| 13 |
+
|
| 14 |
+
from src.agents.lampiao import LampiaoAgent, RegionType
|
| 15 |
+
from src.core.auth import get_current_user
|
| 16 |
+
from src.core.database import get_db
|
| 17 |
+
from src.core.cache import CacheService, CacheKey
|
| 18 |
+
from src.core.rate_limit import RateLimiter, rate_limit
|
| 19 |
+
from src.core import get_logger
|
| 20 |
+
from src.services.agent_lazy_loader import AgentLazyLoader
|
| 21 |
+
from src.agents.deodoro import AgentContext, AgentMessage
|
| 22 |
+
from src.core.exceptions import NotFoundError
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
logger = get_logger(__name__)
|
| 26 |
+
router = APIRouter(prefix="/api/v1/geo", tags=["geographic"])
|
| 27 |
+
|
| 28 |
+
# Rate limiter for geographic endpoints
|
| 29 |
+
geo_rate_limiter = RateLimiter(calls=50, period=60) # 50 calls per minute
|
| 30 |
+
|
| 31 |
+
# Cache service
|
| 32 |
+
cache_service = CacheService()
|
| 33 |
+
|
| 34 |
+
# Lazy load agents
|
| 35 |
+
agent_loader = AgentLazyLoader()
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class BrazilianRegion(BaseModel):
|
| 39 |
+
"""Brazilian region model."""
|
| 40 |
+
|
| 41 |
+
id: str = Field(..., description="Region identifier (e.g., 'SP' for São Paulo)")
|
| 42 |
+
name: str = Field(..., description="Region name")
|
| 43 |
+
type: RegionType = Field(..., description="Region type")
|
| 44 |
+
parent_id: Optional[str] = Field(None, description="Parent region ID")
|
| 45 |
+
geometry: Optional[Dict[str, Any]] = Field(None, description="GeoJSON geometry")
|
| 46 |
+
properties: Dict[str, Any] = Field(default_factory=dict, description="Additional properties")
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class GeographicBoundary(BaseModel):
|
| 50 |
+
"""Geographic boundary model for map rendering."""
|
| 51 |
+
|
| 52 |
+
type: str = Field("FeatureCollection", description="GeoJSON type")
|
| 53 |
+
features: List[Dict[str, Any]] = Field(..., description="GeoJSON features")
|
| 54 |
+
bbox: Optional[List[float]] = Field(None, description="Bounding box [min_lng, min_lat, max_lng, max_lat]")
|
| 55 |
+
properties: Dict[str, Any] = Field(default_factory=dict, description="Collection properties")
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class RegionalDataPoint(BaseModel):
|
| 59 |
+
"""Data point for a specific region."""
|
| 60 |
+
|
| 61 |
+
region_id: str
|
| 62 |
+
region_name: str
|
| 63 |
+
value: float
|
| 64 |
+
normalized_value: Optional[float] = None
|
| 65 |
+
metadata: Dict[str, Any] = Field(default_factory=dict)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class GeographicDataResponse(BaseModel):
|
| 69 |
+
"""Response model for geographic data."""
|
| 70 |
+
|
| 71 |
+
data_type: str
|
| 72 |
+
region_type: RegionType
|
| 73 |
+
data_points: List[RegionalDataPoint]
|
| 74 |
+
summary_statistics: Dict[str, float]
|
| 75 |
+
timestamp: datetime
|
| 76 |
+
cache_expires: datetime
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# Brazilian states GeoJSON (simplified boundaries for demo)
|
| 80 |
+
BRAZIL_STATES_GEOJSON = {
|
| 81 |
+
"type": "FeatureCollection",
|
| 82 |
+
"features": [
|
| 83 |
+
{
|
| 84 |
+
"type": "Feature",
|
| 85 |
+
"id": "SP",
|
| 86 |
+
"properties": {
|
| 87 |
+
"name": "São Paulo",
|
| 88 |
+
"region": "Sudeste",
|
| 89 |
+
"population": 46649132,
|
| 90 |
+
"area_km2": 248219.627,
|
| 91 |
+
"capital": "São Paulo",
|
| 92 |
+
"iso_code": "BR-SP"
|
| 93 |
+
},
|
| 94 |
+
"geometry": {
|
| 95 |
+
"type": "Polygon",
|
| 96 |
+
"coordinates": [[
|
| 97 |
+
[-53.089, -25.650],
|
| 98 |
+
[-53.089, -19.780],
|
| 99 |
+
[-44.161, -19.780],
|
| 100 |
+
[-44.161, -25.650],
|
| 101 |
+
[-53.089, -25.650]
|
| 102 |
+
]]
|
| 103 |
+
}
|
| 104 |
+
},
|
| 105 |
+
{
|
| 106 |
+
"type": "Feature",
|
| 107 |
+
"id": "RJ",
|
| 108 |
+
"properties": {
|
| 109 |
+
"name": "Rio de Janeiro",
|
| 110 |
+
"region": "Sudeste",
|
| 111 |
+
"population": 17463349,
|
| 112 |
+
"area_km2": 43780.157,
|
| 113 |
+
"capital": "Rio de Janeiro",
|
| 114 |
+
"iso_code": "BR-RJ"
|
| 115 |
+
},
|
| 116 |
+
"geometry": {
|
| 117 |
+
"type": "Polygon",
|
| 118 |
+
"coordinates": [[
|
| 119 |
+
[-44.889, -23.369],
|
| 120 |
+
[-44.889, -20.763],
|
| 121 |
+
[-40.958, -20.763],
|
| 122 |
+
[-40.958, -23.369],
|
| 123 |
+
[-44.889, -23.369]
|
| 124 |
+
]]
|
| 125 |
+
}
|
| 126 |
+
},
|
| 127 |
+
{
|
| 128 |
+
"type": "Feature",
|
| 129 |
+
"id": "MG",
|
| 130 |
+
"properties": {
|
| 131 |
+
"name": "Minas Gerais",
|
| 132 |
+
"region": "Sudeste",
|
| 133 |
+
"population": 21411923,
|
| 134 |
+
"area_km2": 586521.123,
|
| 135 |
+
"capital": "Belo Horizonte",
|
| 136 |
+
"iso_code": "BR-MG"
|
| 137 |
+
},
|
| 138 |
+
"geometry": {
|
| 139 |
+
"type": "Polygon",
|
| 140 |
+
"coordinates": [[
|
| 141 |
+
[-51.046, -22.921],
|
| 142 |
+
[-51.046, -14.235],
|
| 143 |
+
[-39.861, -14.235],
|
| 144 |
+
[-39.861, -22.921],
|
| 145 |
+
[-51.046, -22.921]
|
| 146 |
+
]]
|
| 147 |
+
}
|
| 148 |
+
},
|
| 149 |
+
# Add more states as needed...
|
| 150 |
+
]
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
# Brazilian regions (macro-regions)
|
| 155 |
+
BRAZIL_REGIONS = {
|
| 156 |
+
"norte": {
|
| 157 |
+
"name": "Norte",
|
| 158 |
+
"states": ["AC", "AP", "AM", "PA", "RO", "RR", "TO"],
|
| 159 |
+
"center": {"lat": -3.4168, "lng": -60.0217}
|
| 160 |
+
},
|
| 161 |
+
"nordeste": {
|
| 162 |
+
"name": "Nordeste",
|
| 163 |
+
"states": ["AL", "BA", "CE", "MA", "PB", "PE", "PI", "RN", "SE"],
|
| 164 |
+
"center": {"lat": -12.9718, "lng": -38.5014}
|
| 165 |
+
},
|
| 166 |
+
"centro_oeste": {
|
| 167 |
+
"name": "Centro-Oeste",
|
| 168 |
+
"states": ["DF", "GO", "MT", "MS"],
|
| 169 |
+
"center": {"lat": -15.7801, "lng": -55.9292}
|
| 170 |
+
},
|
| 171 |
+
"sudeste": {
|
| 172 |
+
"name": "Sudeste",
|
| 173 |
+
"states": ["ES", "MG", "RJ", "SP"],
|
| 174 |
+
"center": {"lat": -20.6547, "lng": -43.7662}
|
| 175 |
+
},
|
| 176 |
+
"sul": {
|
| 177 |
+
"name": "Sul",
|
| 178 |
+
"states": ["PR", "RS", "SC"],
|
| 179 |
+
"center": {"lat": -27.5949, "lng": -50.8215}
|
| 180 |
+
}
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
@router.get("/boundaries/{region_type}", response_model=GeographicBoundary)
|
| 185 |
+
@rate_limit(geo_rate_limiter)
|
| 186 |
+
async def get_geographic_boundaries(
|
| 187 |
+
region_type: RegionType = Path(..., description="Type of region boundaries to retrieve"),
|
| 188 |
+
simplified: bool = Query(True, description="Return simplified boundaries for performance"),
|
| 189 |
+
current_user: Dict[str, Any] = Depends(get_current_user),
|
| 190 |
+
):
|
| 191 |
+
"""
|
| 192 |
+
Get geographic boundaries for Brazilian regions.
|
| 193 |
+
|
| 194 |
+
Returns GeoJSON data suitable for rendering maps in the frontend.
|
| 195 |
+
Currently supports state-level boundaries with plans to add municipalities.
|
| 196 |
+
"""
|
| 197 |
+
try:
|
| 198 |
+
cache_key = CacheKey(
|
| 199 |
+
prefix="geo_boundaries",
|
| 200 |
+
params={"region_type": region_type.value, "simplified": simplified}
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
# Try to get from cache
|
| 204 |
+
cached_data = await cache_service.get(cache_key)
|
| 205 |
+
if cached_data:
|
| 206 |
+
logger.info("Returning cached geographic boundaries", region_type=region_type.value)
|
| 207 |
+
return GeographicBoundary(**cached_data)
|
| 208 |
+
|
| 209 |
+
# Generate boundaries based on region type
|
| 210 |
+
if region_type == RegionType.STATE:
|
| 211 |
+
boundaries = BRAZIL_STATES_GEOJSON
|
| 212 |
+
|
| 213 |
+
elif region_type == RegionType.MACRO_REGION:
|
| 214 |
+
# Generate macro-region boundaries by combining states
|
| 215 |
+
features = []
|
| 216 |
+
for region_id, region_info in BRAZIL_REGIONS.items():
|
| 217 |
+
features.append({
|
| 218 |
+
"type": "Feature",
|
| 219 |
+
"id": region_id,
|
| 220 |
+
"properties": {
|
| 221 |
+
"name": region_info["name"],
|
| 222 |
+
"states": region_info["states"],
|
| 223 |
+
"center": region_info["center"]
|
| 224 |
+
},
|
| 225 |
+
"geometry": {
|
| 226 |
+
"type": "Polygon",
|
| 227 |
+
"coordinates": [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]] # Placeholder
|
| 228 |
+
}
|
| 229 |
+
})
|
| 230 |
+
|
| 231 |
+
boundaries = {
|
| 232 |
+
"type": "FeatureCollection",
|
| 233 |
+
"features": features
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
else:
|
| 237 |
+
raise HTTPException(
|
| 238 |
+
status_code=501,
|
| 239 |
+
detail=f"Boundaries for {region_type.value} not yet implemented"
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
# Calculate bounding box for Brazil
|
| 243 |
+
boundaries["bbox"] = [-73.9872, -33.7506, -34.7299, 5.2718]
|
| 244 |
+
|
| 245 |
+
result = GeographicBoundary(
|
| 246 |
+
type=boundaries["type"],
|
| 247 |
+
features=boundaries["features"],
|
| 248 |
+
bbox=boundaries.get("bbox"),
|
| 249 |
+
properties={
|
| 250 |
+
"region_type": region_type.value,
|
| 251 |
+
"simplified": simplified,
|
| 252 |
+
"total_features": len(boundaries["features"])
|
| 253 |
+
}
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
# Cache the result
|
| 257 |
+
await cache_service.set(cache_key, result.dict(), expire=86400) # 24 hours
|
| 258 |
+
|
| 259 |
+
return result
|
| 260 |
+
|
| 261 |
+
except Exception as e:
|
| 262 |
+
logger.error(
|
| 263 |
+
"Failed to retrieve geographic boundaries",
|
| 264 |
+
region_type=region_type.value,
|
| 265 |
+
error=str(e),
|
| 266 |
+
exc_info=True,
|
| 267 |
+
)
|
| 268 |
+
raise HTTPException(status_code=500, detail=f"Failed to retrieve boundaries: {str(e)}")
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
@router.get("/regions", response_model=List[BrazilianRegion])
|
| 272 |
+
@rate_limit(geo_rate_limiter)
|
| 273 |
+
async def list_regions(
|
| 274 |
+
region_type: RegionType = Query(RegionType.STATE, description="Type of regions to list"),
|
| 275 |
+
parent_id: Optional[str] = Query(None, description="Filter by parent region"),
|
| 276 |
+
search: Optional[str] = Query(None, description="Search regions by name"),
|
| 277 |
+
current_user: Dict[str, Any] = Depends(get_current_user),
|
| 278 |
+
):
|
| 279 |
+
"""
|
| 280 |
+
List Brazilian regions with their metadata.
|
| 281 |
+
|
| 282 |
+
Useful for populating dropdown menus and region selectors in the frontend.
|
| 283 |
+
"""
|
| 284 |
+
try:
|
| 285 |
+
# Get Lampião agent for region data
|
| 286 |
+
lampiao_agent = await agent_loader.get_agent("lampiao")
|
| 287 |
+
if not lampiao_agent:
|
| 288 |
+
lampiao_agent = LampiaoAgent()
|
| 289 |
+
await lampiao_agent.initialize()
|
| 290 |
+
|
| 291 |
+
regions = []
|
| 292 |
+
|
| 293 |
+
if region_type == RegionType.STATE:
|
| 294 |
+
# Get all states from Lampião
|
| 295 |
+
for state_id, state_info in lampiao_agent.brazil_regions.items():
|
| 296 |
+
if search and search.lower() not in state_info["name"].lower():
|
| 297 |
+
continue
|
| 298 |
+
|
| 299 |
+
regions.append(BrazilianRegion(
|
| 300 |
+
id=state_id,
|
| 301 |
+
name=state_info["name"],
|
| 302 |
+
type=RegionType.STATE,
|
| 303 |
+
parent_id=None,
|
| 304 |
+
properties={
|
| 305 |
+
"region": state_info["region"],
|
| 306 |
+
"capital": state_info["capital"],
|
| 307 |
+
"area_km2": state_info["area"]
|
| 308 |
+
}
|
| 309 |
+
))
|
| 310 |
+
|
| 311 |
+
elif region_type == RegionType.MACRO_REGION:
|
| 312 |
+
# Get macro regions
|
| 313 |
+
for region_id, region_info in BRAZIL_REGIONS.items():
|
| 314 |
+
if search and search.lower() not in region_info["name"].lower():
|
| 315 |
+
continue
|
| 316 |
+
|
| 317 |
+
regions.append(BrazilianRegion(
|
| 318 |
+
id=region_id,
|
| 319 |
+
name=region_info["name"],
|
| 320 |
+
type=RegionType.MACRO_REGION,
|
| 321 |
+
parent_id=None,
|
| 322 |
+
properties={
|
| 323 |
+
"states": region_info["states"],
|
| 324 |
+
"center": region_info["center"]
|
| 325 |
+
}
|
| 326 |
+
))
|
| 327 |
+
|
| 328 |
+
# Sort by name
|
| 329 |
+
regions.sort(key=lambda r: r.name)
|
| 330 |
+
|
| 331 |
+
return regions
|
| 332 |
+
|
| 333 |
+
except Exception as e:
|
| 334 |
+
logger.error(
|
| 335 |
+
"Failed to list regions",
|
| 336 |
+
region_type=region_type.value,
|
| 337 |
+
error=str(e),
|
| 338 |
+
exc_info=True,
|
| 339 |
+
)
|
| 340 |
+
raise HTTPException(status_code=500, detail=f"Failed to list regions: {str(e)}")
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
@router.get("/regions/{region_id}", response_model=BrazilianRegion)
|
| 344 |
+
@rate_limit(geo_rate_limiter)
|
| 345 |
+
async def get_region_details(
|
| 346 |
+
region_id: str = Path(..., description="Region identifier"),
|
| 347 |
+
include_geometry: bool = Query(False, description="Include GeoJSON geometry"),
|
| 348 |
+
current_user: Dict[str, Any] = Depends(get_current_user),
|
| 349 |
+
):
|
| 350 |
+
"""
|
| 351 |
+
Get detailed information about a specific region.
|
| 352 |
+
|
| 353 |
+
Includes metadata and optionally the geographic boundary geometry.
|
| 354 |
+
"""
|
| 355 |
+
try:
|
| 356 |
+
# Get Lampião agent
|
| 357 |
+
lampiao_agent = await agent_loader.get_agent("lampiao")
|
| 358 |
+
if not lampiao_agent:
|
| 359 |
+
lampiao_agent = LampiaoAgent()
|
| 360 |
+
await lampiao_agent.initialize()
|
| 361 |
+
|
| 362 |
+
# Check if it's a state
|
| 363 |
+
if region_id in lampiao_agent.brazil_regions:
|
| 364 |
+
state_info = lampiao_agent.brazil_regions[region_id]
|
| 365 |
+
|
| 366 |
+
geometry = None
|
| 367 |
+
if include_geometry:
|
| 368 |
+
# Find geometry in GeoJSON
|
| 369 |
+
for feature in BRAZIL_STATES_GEOJSON["features"]:
|
| 370 |
+
if feature["id"] == region_id:
|
| 371 |
+
geometry = feature["geometry"]
|
| 372 |
+
break
|
| 373 |
+
|
| 374 |
+
return BrazilianRegion(
|
| 375 |
+
id=region_id,
|
| 376 |
+
name=state_info["name"],
|
| 377 |
+
type=RegionType.STATE,
|
| 378 |
+
parent_id=None,
|
| 379 |
+
geometry=geometry,
|
| 380 |
+
properties={
|
| 381 |
+
"region": state_info["region"],
|
| 382 |
+
"capital": state_info["capital"],
|
| 383 |
+
"area_km2": state_info["area"],
|
| 384 |
+
"iso_code": f"BR-{region_id}"
|
| 385 |
+
}
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
# Check if it's a macro region
|
| 389 |
+
elif region_id in BRAZIL_REGIONS:
|
| 390 |
+
region_info = BRAZIL_REGIONS[region_id]
|
| 391 |
+
|
| 392 |
+
return BrazilianRegion(
|
| 393 |
+
id=region_id,
|
| 394 |
+
name=region_info["name"],
|
| 395 |
+
type=RegionType.MACRO_REGION,
|
| 396 |
+
parent_id=None,
|
| 397 |
+
geometry=None, # TODO: Implement combined geometry
|
| 398 |
+
properties={
|
| 399 |
+
"states": region_info["states"],
|
| 400 |
+
"center": region_info["center"],
|
| 401 |
+
"state_count": len(region_info["states"])
|
| 402 |
+
}
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
else:
|
| 406 |
+
raise NotFoundError(f"Region '{region_id}' not found")
|
| 407 |
+
|
| 408 |
+
except NotFoundError:
|
| 409 |
+
raise HTTPException(status_code=404, detail=f"Region '{region_id}' not found")
|
| 410 |
+
except Exception as e:
|
| 411 |
+
logger.error(
|
| 412 |
+
"Failed to get region details",
|
| 413 |
+
region_id=region_id,
|
| 414 |
+
error=str(e),
|
| 415 |
+
exc_info=True,
|
| 416 |
+
)
|
| 417 |
+
raise HTTPException(status_code=500, detail=f"Failed to get region details: {str(e)}")
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
@router.get("/data/{metric}", response_model=GeographicDataResponse)
|
| 421 |
+
@rate_limit(geo_rate_limiter)
|
| 422 |
+
async def get_geographic_data(
|
| 423 |
+
metric: str = Path(..., description="Metric to retrieve (e.g., contracts, spending)"),
|
| 424 |
+
region_type: RegionType = Query(RegionType.STATE, description="Geographic aggregation level"),
|
| 425 |
+
normalize: bool = Query(False, description="Normalize by population or area"),
|
| 426 |
+
time_range: str = Query("30d", description="Time range: 7d, 30d, 90d, 1y"),
|
| 427 |
+
current_user: Dict[str, Any] = Depends(get_current_user),
|
| 428 |
+
db: AsyncSession = Depends(get_db),
|
| 429 |
+
):
|
| 430 |
+
"""
|
| 431 |
+
Get data aggregated by geographic regions.
|
| 432 |
+
|
| 433 |
+
This endpoint aggregates various metrics by geographic regions,
|
| 434 |
+
perfect for creating choropleth maps and regional comparisons.
|
| 435 |
+
"""
|
| 436 |
+
try:
|
| 437 |
+
logger.info(
|
| 438 |
+
"Retrieving geographic data",
|
| 439 |
+
metric=metric,
|
| 440 |
+
region_type=region_type.value,
|
| 441 |
+
normalize=normalize,
|
| 442 |
+
)
|
| 443 |
+
|
| 444 |
+
# Get Lampião agent for regional analysis
|
| 445 |
+
lampiao_agent = await agent_loader.get_agent("lampiao")
|
| 446 |
+
if not lampiao_agent:
|
| 447 |
+
lampiao_agent = LampiaoAgent()
|
| 448 |
+
await lampiao_agent.initialize()
|
| 449 |
+
|
| 450 |
+
# Create context
|
| 451 |
+
context = AgentContext(
|
| 452 |
+
investigation_id=f"geo_data_{datetime.utcnow().timestamp()}",
|
| 453 |
+
user_id=current_user["id"],
|
| 454 |
+
session_id=current_user.get("session_id", "default"),
|
| 455 |
+
metadata={
|
| 456 |
+
"metric": metric,
|
| 457 |
+
"region_type": region_type.value,
|
| 458 |
+
"time_range": time_range
|
| 459 |
+
}
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
# Request regional analysis
|
| 463 |
+
message = AgentMessage(
|
| 464 |
+
role="user",
|
| 465 |
+
content=f"Analyze {metric} by region",
|
| 466 |
+
data={
|
| 467 |
+
"metric": metric,
|
| 468 |
+
"region_type": region_type.value,
|
| 469 |
+
"normalize": normalize,
|
| 470 |
+
"time_range": time_range
|
| 471 |
+
}
|
| 472 |
+
)
|
| 473 |
+
|
| 474 |
+
response = await lampiao_agent.process(message, context)
|
| 475 |
+
|
| 476 |
+
if not response.success:
|
| 477 |
+
raise HTTPException(status_code=500, detail="Regional analysis failed")
|
| 478 |
+
|
| 479 |
+
regional_data = response.data
|
| 480 |
+
|
| 481 |
+
# Convert to API response format
|
| 482 |
+
data_points = []
|
| 483 |
+
for metric_data in regional_data.metrics:
|
| 484 |
+
data_points.append(RegionalDataPoint(
|
| 485 |
+
region_id=metric_data.region_id,
|
| 486 |
+
region_name=metric_data.region_name,
|
| 487 |
+
value=metric_data.value,
|
| 488 |
+
normalized_value=metric_data.normalized_value if normalize else None,
|
| 489 |
+
metadata={
|
| 490 |
+
"rank": metric_data.rank,
|
| 491 |
+
"percentile": metric_data.percentile,
|
| 492 |
+
**metric_data.metadata
|
| 493 |
+
}
|
| 494 |
+
))
|
| 495 |
+
|
| 496 |
+
cache_ttl = 3600 # 1 hour
|
| 497 |
+
|
| 498 |
+
return GeographicDataResponse(
|
| 499 |
+
data_type=metric,
|
| 500 |
+
region_type=region_type,
|
| 501 |
+
data_points=data_points,
|
| 502 |
+
summary_statistics=regional_data.statistics,
|
| 503 |
+
timestamp=datetime.utcnow(),
|
| 504 |
+
cache_expires=datetime.utcnow() + timedelta(seconds=cache_ttl)
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
except Exception as e:
|
| 508 |
+
logger.error(
|
| 509 |
+
"Failed to retrieve geographic data",
|
| 510 |
+
metric=metric,
|
| 511 |
+
error=str(e),
|
| 512 |
+
exc_info=True,
|
| 513 |
+
)
|
| 514 |
+
raise HTTPException(status_code=500, detail=f"Failed to retrieve geographic data: {str(e)}")
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
@router.get("/coordinates/{region_id}")
|
| 518 |
+
@rate_limit(geo_rate_limiter)
|
| 519 |
+
async def get_region_coordinates(
|
| 520 |
+
region_id: str = Path(..., description="Region identifier"),
|
| 521 |
+
current_user: Dict[str, Any] = Depends(get_current_user),
|
| 522 |
+
):
|
| 523 |
+
"""
|
| 524 |
+
Get center coordinates for a region.
|
| 525 |
+
|
| 526 |
+
Useful for placing markers or centering maps on specific regions.
|
| 527 |
+
"""
|
| 528 |
+
try:
|
| 529 |
+
# Predefined coordinates for major cities/states
|
| 530 |
+
coordinates = {
|
| 531 |
+
# State capitals
|
| 532 |
+
"SP": {"lat": -23.5505, "lng": -46.6333, "name": "São Paulo"},
|
| 533 |
+
"RJ": {"lat": -22.9068, "lng": -43.1729, "name": "Rio de Janeiro"},
|
| 534 |
+
"MG": {"lat": -19.9167, "lng": -43.9345, "name": "Belo Horizonte"},
|
| 535 |
+
"BA": {"lat": -12.9714, "lng": -38.5014, "name": "Salvador"},
|
| 536 |
+
"RS": {"lat": -30.0346, "lng": -51.2177, "name": "Porto Alegre"},
|
| 537 |
+
"PR": {"lat": -25.4290, "lng": -49.2710, "name": "Curitiba"},
|
| 538 |
+
"PE": {"lat": -8.0476, "lng": -34.8770, "name": "Recife"},
|
| 539 |
+
"CE": {"lat": -3.7172, "lng": -38.5433, "name": "Fortaleza"},
|
| 540 |
+
"PA": {"lat": -1.4558, "lng": -48.4902, "name": "Belém"},
|
| 541 |
+
"MA": {"lat": -2.5307, "lng": -44.3068, "name": "São Luís"},
|
| 542 |
+
"GO": {"lat": -16.6869, "lng": -49.2648, "name": "Goiânia"},
|
| 543 |
+
"DF": {"lat": -15.7801, "lng": -47.9292, "name": "Brasília"},
|
| 544 |
+
# Add more as needed...
|
| 545 |
+
}
|
| 546 |
+
|
| 547 |
+
# Check macro regions
|
| 548 |
+
if region_id in BRAZIL_REGIONS:
|
| 549 |
+
region = BRAZIL_REGIONS[region_id]
|
| 550 |
+
return {
|
| 551 |
+
"region_id": region_id,
|
| 552 |
+
"name": region["name"],
|
| 553 |
+
"coordinates": region["center"],
|
| 554 |
+
"type": "macro_region"
|
| 555 |
+
}
|
| 556 |
+
|
| 557 |
+
# Check states
|
| 558 |
+
if region_id in coordinates:
|
| 559 |
+
coord = coordinates[region_id]
|
| 560 |
+
return {
|
| 561 |
+
"region_id": region_id,
|
| 562 |
+
"name": coord["name"],
|
| 563 |
+
"coordinates": {"lat": coord["lat"], "lng": coord["lng"]},
|
| 564 |
+
"type": "state_capital"
|
| 565 |
+
}
|
| 566 |
+
|
| 567 |
+
raise HTTPException(status_code=404, detail=f"Coordinates not found for region '{region_id}'")
|
| 568 |
+
|
| 569 |
+
except HTTPException:
|
| 570 |
+
raise
|
| 571 |
+
except Exception as e:
|
| 572 |
+
logger.error(
|
| 573 |
+
"Failed to get region coordinates",
|
| 574 |
+
region_id=region_id,
|
| 575 |
+
error=str(e),
|
| 576 |
+
exc_info=True,
|
| 577 |
+
)
|
| 578 |
+
raise HTTPException(status_code=500, detail=f"Failed to get coordinates: {str(e)}")
|
src/api/routes/visualization.py
ADDED
|
@@ -0,0 +1,608 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
API routes for data visualization endpoints.
|
| 3 |
+
Provides aggregated and formatted data optimized for frontend consumption.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from datetime import datetime, timedelta
|
| 7 |
+
from typing import Any, Dict, List, Optional, Union
|
| 8 |
+
from enum import Enum
|
| 9 |
+
|
| 10 |
+
from fastapi import APIRouter, Depends, HTTPException, Query, Path, BackgroundTasks
|
| 11 |
+
from pydantic import BaseModel, Field
|
| 12 |
+
from sqlalchemy.ext.asyncio import AsyncSession
|
| 13 |
+
|
| 14 |
+
from src.agents.oscar_niemeyer import (
|
| 15 |
+
OscarNiemeyerAgent,
|
| 16 |
+
AggregationType,
|
| 17 |
+
VisualizationType,
|
| 18 |
+
TimeGranularity,
|
| 19 |
+
)
|
| 20 |
+
from src.agents.lampiao import LampiaoAgent, RegionType
|
| 21 |
+
from src.core.auth import get_current_user
|
| 22 |
+
from src.core.database import get_db
|
| 23 |
+
from src.core.cache import CacheService
|
| 24 |
+
from src.core.rate_limit import RateLimiter, rate_limit
|
| 25 |
+
from src.core import get_logger
|
| 26 |
+
from src.services.agent_lazy_loader import AgentLazyLoader
|
| 27 |
+
from src.agents.deodoro import AgentContext
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
logger = get_logger(__name__)
|
| 31 |
+
router = APIRouter(prefix="/api/v1/visualization", tags=["visualization"])
|
| 32 |
+
|
| 33 |
+
# Rate limiter for visualization endpoints
|
| 34 |
+
viz_rate_limiter = RateLimiter(calls=30, period=60) # 30 calls per minute
|
| 35 |
+
|
| 36 |
+
# Lazy load agents
|
| 37 |
+
agent_loader = AgentLazyLoader()
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class DatasetType(str, Enum):
|
| 41 |
+
"""Types of datasets available for visualization."""
|
| 42 |
+
CONTRACTS = "contracts"
|
| 43 |
+
SPENDING = "spending"
|
| 44 |
+
TRANSFERS = "transfers"
|
| 45 |
+
BIDDINGS = "biddings"
|
| 46 |
+
AUDIT = "audit"
|
| 47 |
+
REGIONAL = "regional"
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class ChartDataRequest(BaseModel):
|
| 51 |
+
"""Request model for chart data."""
|
| 52 |
+
|
| 53 |
+
dataset_type: DatasetType
|
| 54 |
+
chart_type: Optional[VisualizationType] = None
|
| 55 |
+
time_range: Optional[str] = Field(default="30d", description="Time range: 7d, 30d, 90d, 1y, all")
|
| 56 |
+
granularity: Optional[TimeGranularity] = TimeGranularity.DAY
|
| 57 |
+
dimensions: List[str] = Field(default_factory=list, description="Dimensions for grouping")
|
| 58 |
+
metrics: List[str] = Field(default_factory=list, description="Metrics to calculate")
|
| 59 |
+
filters: Dict[str, Any] = Field(default_factory=dict, description="Additional filters")
|
| 60 |
+
limit: int = Field(default=100, le=1000, description="Maximum number of data points")
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class RegionalDataRequest(BaseModel):
|
| 64 |
+
"""Request model for regional data visualization."""
|
| 65 |
+
|
| 66 |
+
metric: str = Field(..., description="Metric to analyze")
|
| 67 |
+
region_type: RegionType = RegionType.STATE
|
| 68 |
+
aggregation: AggregationType = AggregationType.SUM
|
| 69 |
+
normalize: bool = Field(default=False, description="Normalize by population/area")
|
| 70 |
+
include_metadata: bool = Field(default=True, description="Include regional metadata")
|
| 71 |
+
filters: Dict[str, Any] = Field(default_factory=dict)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class TimeSeriesRequest(BaseModel):
|
| 75 |
+
"""Request model for time series data."""
|
| 76 |
+
|
| 77 |
+
metric: str = Field(..., description="Metric to analyze over time")
|
| 78 |
+
entity_id: Optional[str] = Field(None, description="Specific entity to track")
|
| 79 |
+
start_date: Optional[datetime] = None
|
| 80 |
+
end_date: Optional[datetime] = None
|
| 81 |
+
granularity: TimeGranularity = TimeGranularity.DAY
|
| 82 |
+
aggregation: AggregationType = AggregationType.SUM
|
| 83 |
+
include_forecast: bool = Field(default=False, description="Include forecast data")
|
| 84 |
+
comparison_period: Optional[str] = Field(None, description="Compare with previous period")
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class VisualizationResponse(BaseModel):
|
| 88 |
+
"""Standard response for visualization data."""
|
| 89 |
+
|
| 90 |
+
visualization_id: str
|
| 91 |
+
title: str
|
| 92 |
+
subtitle: Optional[str]
|
| 93 |
+
chart_type: VisualizationType
|
| 94 |
+
data: Union[List[Dict[str, Any]], Dict[str, Any]]
|
| 95 |
+
metadata: Dict[str, Any]
|
| 96 |
+
cache_timestamp: datetime
|
| 97 |
+
expires_at: datetime
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
@router.post("/chart-data", response_model=VisualizationResponse)
|
| 101 |
+
@rate_limit(viz_rate_limiter)
|
| 102 |
+
async def get_chart_data(
|
| 103 |
+
request: ChartDataRequest,
|
| 104 |
+
background_tasks: BackgroundTasks,
|
| 105 |
+
current_user: Dict[str, Any] = Depends(get_current_user),
|
| 106 |
+
db: AsyncSession = Depends(get_db),
|
| 107 |
+
):
|
| 108 |
+
"""
|
| 109 |
+
Get aggregated data optimized for chart visualization.
|
| 110 |
+
|
| 111 |
+
This endpoint uses the Oscar Niemeyer agent to process and aggregate data
|
| 112 |
+
in formats optimized for various chart types.
|
| 113 |
+
"""
|
| 114 |
+
try:
|
| 115 |
+
logger.info(
|
| 116 |
+
"Processing chart data request",
|
| 117 |
+
user_id=current_user["id"],
|
| 118 |
+
dataset_type=request.dataset_type.value,
|
| 119 |
+
chart_type=request.chart_type.value if request.chart_type else "auto",
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
# Get Oscar Niemeyer agent
|
| 123 |
+
oscar_agent = await agent_loader.get_agent("oscar_niemeyer")
|
| 124 |
+
if not oscar_agent:
|
| 125 |
+
oscar_agent = OscarNiemeyerAgent()
|
| 126 |
+
await oscar_agent.initialize()
|
| 127 |
+
|
| 128 |
+
# Create agent context
|
| 129 |
+
context = AgentContext(
|
| 130 |
+
investigation_id=f"viz_{datetime.utcnow().timestamp()}",
|
| 131 |
+
user_id=current_user["id"],
|
| 132 |
+
session_id=current_user.get("session_id", "default"),
|
| 133 |
+
metadata={
|
| 134 |
+
"request_type": "chart_data",
|
| 135 |
+
"dataset": request.dataset_type.value
|
| 136 |
+
}
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
# Prepare message for Oscar agent
|
| 140 |
+
from src.agents.deodoro import AgentMessage
|
| 141 |
+
message = AgentMessage(
|
| 142 |
+
role="user",
|
| 143 |
+
content=f"Generate chart data for {request.dataset_type.value}",
|
| 144 |
+
type="visualization_metadata",
|
| 145 |
+
data={
|
| 146 |
+
"data_type": request.dataset_type.value,
|
| 147 |
+
"dimensions": request.dimensions,
|
| 148 |
+
"metrics": request.metrics,
|
| 149 |
+
"filters": request.filters,
|
| 150 |
+
"limit": request.limit,
|
| 151 |
+
"time_range": request.time_range,
|
| 152 |
+
"granularity": request.granularity.value if request.granularity else None,
|
| 153 |
+
}
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
# Process with Oscar agent
|
| 157 |
+
response = await oscar_agent.process(message, context)
|
| 158 |
+
|
| 159 |
+
if not response.success:
|
| 160 |
+
raise HTTPException(status_code=500, detail="Failed to generate visualization data")
|
| 161 |
+
|
| 162 |
+
# Prepare visualization response
|
| 163 |
+
viz_metadata = response.data
|
| 164 |
+
cache_ttl = 3600 # 1 hour cache
|
| 165 |
+
|
| 166 |
+
return VisualizationResponse(
|
| 167 |
+
visualization_id=viz_metadata.visualization_id,
|
| 168 |
+
title=viz_metadata.title,
|
| 169 |
+
subtitle=viz_metadata.subtitle,
|
| 170 |
+
chart_type=viz_metadata.visualization_type,
|
| 171 |
+
data={
|
| 172 |
+
"series": viz_metadata.series,
|
| 173 |
+
"x_axis": viz_metadata.x_axis,
|
| 174 |
+
"y_axis": viz_metadata.y_axis,
|
| 175 |
+
"data_url": viz_metadata.data_url,
|
| 176 |
+
},
|
| 177 |
+
metadata={
|
| 178 |
+
"filters": viz_metadata.filters,
|
| 179 |
+
"options": viz_metadata.options,
|
| 180 |
+
"dataset_type": request.dataset_type.value,
|
| 181 |
+
"record_count": request.limit,
|
| 182 |
+
},
|
| 183 |
+
cache_timestamp=datetime.utcnow(),
|
| 184 |
+
expires_at=datetime.utcnow() + timedelta(seconds=cache_ttl),
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
except Exception as e:
|
| 188 |
+
logger.error(
|
| 189 |
+
"Chart data generation failed",
|
| 190 |
+
error=str(e),
|
| 191 |
+
user_id=current_user["id"],
|
| 192 |
+
exc_info=True,
|
| 193 |
+
)
|
| 194 |
+
raise HTTPException(status_code=500, detail=f"Chart data generation failed: {str(e)}")
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
@router.post("/regional-map", response_model=VisualizationResponse)
|
| 198 |
+
@rate_limit(viz_rate_limiter)
|
| 199 |
+
async def get_regional_map_data(
|
| 200 |
+
request: RegionalDataRequest,
|
| 201 |
+
current_user: Dict[str, Any] = Depends(get_current_user),
|
| 202 |
+
db: AsyncSession = Depends(get_db),
|
| 203 |
+
):
|
| 204 |
+
"""
|
| 205 |
+
Get regional data formatted for map visualization.
|
| 206 |
+
|
| 207 |
+
Uses the Lampião agent to analyze regional disparities and format data
|
| 208 |
+
for choropleth maps and other geographic visualizations.
|
| 209 |
+
"""
|
| 210 |
+
try:
|
| 211 |
+
logger.info(
|
| 212 |
+
"Processing regional map data request",
|
| 213 |
+
user_id=current_user["id"],
|
| 214 |
+
metric=request.metric,
|
| 215 |
+
region_type=request.region_type.value,
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
# Get Lampião agent
|
| 219 |
+
lampiao_agent = await agent_loader.get_agent("lampiao")
|
| 220 |
+
if not lampiao_agent:
|
| 221 |
+
lampiao_agent = LampiaoAgent()
|
| 222 |
+
await lampiao_agent.initialize()
|
| 223 |
+
|
| 224 |
+
# Create agent context
|
| 225 |
+
context = AgentContext(
|
| 226 |
+
investigation_id=f"regional_{datetime.utcnow().timestamp()}",
|
| 227 |
+
user_id=current_user["id"],
|
| 228 |
+
session_id=current_user.get("session_id", "default"),
|
| 229 |
+
metadata={
|
| 230 |
+
"request_type": "regional_map",
|
| 231 |
+
"metric": request.metric
|
| 232 |
+
}
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
# Get Oscar agent for aggregation
|
| 236 |
+
oscar_agent = await agent_loader.get_agent("oscar_niemeyer")
|
| 237 |
+
if not oscar_agent:
|
| 238 |
+
oscar_agent = OscarNiemeyerAgent()
|
| 239 |
+
await oscar_agent.initialize()
|
| 240 |
+
|
| 241 |
+
# First, get regional analysis from Lampião
|
| 242 |
+
from src.agents.deodoro import AgentMessage
|
| 243 |
+
lampiao_message = AgentMessage(
|
| 244 |
+
role="user",
|
| 245 |
+
content=f"Analyze regional distribution of {request.metric}",
|
| 246 |
+
data={
|
| 247 |
+
"metric": request.metric,
|
| 248 |
+
"region_type": request.region_type.value,
|
| 249 |
+
"filters": request.filters,
|
| 250 |
+
}
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
lampiao_response = await lampiao_agent.process(lampiao_message, context)
|
| 254 |
+
|
| 255 |
+
if not lampiao_response.success:
|
| 256 |
+
raise HTTPException(status_code=500, detail="Regional analysis failed")
|
| 257 |
+
|
| 258 |
+
# Then aggregate for visualization with Oscar
|
| 259 |
+
regional_data = lampiao_response.data
|
| 260 |
+
oscar_message = AgentMessage(
|
| 261 |
+
role="user",
|
| 262 |
+
content="Aggregate regional data for map visualization",
|
| 263 |
+
type="spatial_aggregation",
|
| 264 |
+
data={
|
| 265 |
+
"data": [
|
| 266 |
+
{
|
| 267 |
+
"region": m.region_id,
|
| 268 |
+
"name": m.region_name,
|
| 269 |
+
"value": m.value,
|
| 270 |
+
"normalized_value": m.normalized_value,
|
| 271 |
+
"rank": m.rank,
|
| 272 |
+
"percentile": m.percentile,
|
| 273 |
+
**m.metadata
|
| 274 |
+
}
|
| 275 |
+
for m in regional_data.metrics
|
| 276 |
+
],
|
| 277 |
+
"region_type": request.region_type.value,
|
| 278 |
+
"metrics": [request.metric],
|
| 279 |
+
}
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
oscar_response = await oscar_agent.process(oscar_message, context)
|
| 283 |
+
|
| 284 |
+
if not oscar_response.success:
|
| 285 |
+
raise HTTPException(status_code=500, detail="Data aggregation failed")
|
| 286 |
+
|
| 287 |
+
# Format response
|
| 288 |
+
aggregated_data = oscar_response.data
|
| 289 |
+
|
| 290 |
+
return VisualizationResponse(
|
| 291 |
+
visualization_id=f"map_{context.investigation_id}",
|
| 292 |
+
title=f"{request.metric.replace('_', ' ').title()} por {request.region_type.value}",
|
| 293 |
+
subtitle=f"Análise de disparidades regionais - {len(regional_data.metrics)} regiões",
|
| 294 |
+
chart_type=VisualizationType.MAP,
|
| 295 |
+
data=aggregated_data["regions"],
|
| 296 |
+
metadata={
|
| 297 |
+
"statistics": regional_data.statistics,
|
| 298 |
+
"inequalities": regional_data.inequalities,
|
| 299 |
+
"clusters": regional_data.clusters,
|
| 300 |
+
"visualization": aggregated_data["visualization"],
|
| 301 |
+
"region_type": request.region_type.value,
|
| 302 |
+
"metric": request.metric,
|
| 303 |
+
"normalized": request.normalize,
|
| 304 |
+
},
|
| 305 |
+
cache_timestamp=datetime.utcnow(),
|
| 306 |
+
expires_at=datetime.utcnow() + timedelta(hours=4),
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
except Exception as e:
|
| 310 |
+
logger.error(
|
| 311 |
+
"Regional map data generation failed",
|
| 312 |
+
error=str(e),
|
| 313 |
+
user_id=current_user["id"],
|
| 314 |
+
exc_info=True,
|
| 315 |
+
)
|
| 316 |
+
raise HTTPException(status_code=500, detail=f"Regional map data failed: {str(e)}")
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
@router.post("/time-series", response_model=VisualizationResponse)
|
| 320 |
+
@rate_limit(viz_rate_limiter)
|
| 321 |
+
async def get_time_series_data(
|
| 322 |
+
request: TimeSeriesRequest,
|
| 323 |
+
current_user: Dict[str, Any] = Depends(get_current_user),
|
| 324 |
+
db: AsyncSession = Depends(get_db),
|
| 325 |
+
):
|
| 326 |
+
"""
|
| 327 |
+
Get time series data optimized for line charts and trend analysis.
|
| 328 |
+
|
| 329 |
+
Supports multiple granularities and can include forecast data
|
| 330 |
+
when the include_forecast flag is set.
|
| 331 |
+
"""
|
| 332 |
+
try:
|
| 333 |
+
logger.info(
|
| 334 |
+
"Processing time series request",
|
| 335 |
+
user_id=current_user["id"],
|
| 336 |
+
metric=request.metric,
|
| 337 |
+
granularity=request.granularity.value,
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
# Get Oscar Niemeyer agent
|
| 341 |
+
oscar_agent = await agent_loader.get_agent("oscar_niemeyer")
|
| 342 |
+
if not oscar_agent:
|
| 343 |
+
oscar_agent = OscarNiemeyerAgent()
|
| 344 |
+
await oscar_agent.initialize()
|
| 345 |
+
|
| 346 |
+
# Create agent context
|
| 347 |
+
context = AgentContext(
|
| 348 |
+
investigation_id=f"ts_{datetime.utcnow().timestamp()}",
|
| 349 |
+
user_id=current_user["id"],
|
| 350 |
+
session_id=current_user.get("session_id", "default"),
|
| 351 |
+
metadata={
|
| 352 |
+
"request_type": "time_series",
|
| 353 |
+
"metric": request.metric
|
| 354 |
+
}
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
# Generate time series data
|
| 358 |
+
time_series_data = await oscar_agent.generate_time_series(
|
| 359 |
+
request.metric,
|
| 360 |
+
request.start_date.isoformat() if request.start_date else None,
|
| 361 |
+
request.end_date.isoformat() if request.end_date else None,
|
| 362 |
+
request.granularity,
|
| 363 |
+
context
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
# Format data for visualization
|
| 367 |
+
chart_data = []
|
| 368 |
+
for i, (time_point, value) in enumerate(zip(time_series_data.time_points, time_series_data.values)):
|
| 369 |
+
chart_data.append({
|
| 370 |
+
"timestamp": time_point.isoformat(),
|
| 371 |
+
"value": value,
|
| 372 |
+
"metric": request.metric,
|
| 373 |
+
"index": i
|
| 374 |
+
})
|
| 375 |
+
|
| 376 |
+
# Add forecast data if requested
|
| 377 |
+
forecast_data = []
|
| 378 |
+
if request.include_forecast:
|
| 379 |
+
# TODO: Integrate with Ceuci predictive agent for actual forecasting
|
| 380 |
+
last_value = time_series_data.values[-1] if time_series_data.values else 0
|
| 381 |
+
last_time = time_series_data.time_points[-1] if time_series_data.time_points else datetime.utcnow()
|
| 382 |
+
|
| 383 |
+
for i in range(7): # 7 periods forecast
|
| 384 |
+
if request.granularity == TimeGranularity.DAY:
|
| 385 |
+
next_time = last_time + timedelta(days=i+1)
|
| 386 |
+
else:
|
| 387 |
+
next_time = last_time + timedelta(days=(i+1)*30)
|
| 388 |
+
|
| 389 |
+
forecast_data.append({
|
| 390 |
+
"timestamp": next_time.isoformat(),
|
| 391 |
+
"value": last_value * (1 + 0.02 * (i+1)), # Simple 2% growth
|
| 392 |
+
"is_forecast": True,
|
| 393 |
+
"confidence_lower": last_value * (1 + 0.01 * (i+1)),
|
| 394 |
+
"confidence_upper": last_value * (1 + 0.03 * (i+1)),
|
| 395 |
+
})
|
| 396 |
+
|
| 397 |
+
return VisualizationResponse(
|
| 398 |
+
visualization_id=time_series_data.series_id,
|
| 399 |
+
title=f"{request.metric.replace('_', ' ').title()} - Série Temporal",
|
| 400 |
+
subtitle=f"Granularidade: {request.granularity.value}",
|
| 401 |
+
chart_type=VisualizationType.LINE_CHART,
|
| 402 |
+
data={
|
| 403 |
+
"historical": chart_data,
|
| 404 |
+
"forecast": forecast_data if request.include_forecast else [],
|
| 405 |
+
"metadata": time_series_data.metadata,
|
| 406 |
+
},
|
| 407 |
+
metadata={
|
| 408 |
+
"metric": request.metric,
|
| 409 |
+
"granularity": request.granularity.value,
|
| 410 |
+
"aggregation_type": time_series_data.aggregation_type.value,
|
| 411 |
+
"start_date": time_series_data.time_points[0].isoformat() if time_series_data.time_points else None,
|
| 412 |
+
"end_date": time_series_data.time_points[-1].isoformat() if time_series_data.time_points else None,
|
| 413 |
+
"data_points": len(chart_data),
|
| 414 |
+
"has_forecast": request.include_forecast,
|
| 415 |
+
},
|
| 416 |
+
cache_timestamp=datetime.utcnow(),
|
| 417 |
+
expires_at=datetime.utcnow() + timedelta(hours=1),
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
except Exception as e:
|
| 421 |
+
logger.error(
|
| 422 |
+
"Time series generation failed",
|
| 423 |
+
error=str(e),
|
| 424 |
+
user_id=current_user["id"],
|
| 425 |
+
exc_info=True,
|
| 426 |
+
)
|
| 427 |
+
raise HTTPException(status_code=500, detail=f"Time series generation failed: {str(e)}")
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
@router.get("/dashboard-summary")
|
| 431 |
+
@rate_limit(viz_rate_limiter)
|
| 432 |
+
async def get_dashboard_summary(
|
| 433 |
+
time_range: str = Query("30d", description="Time range for summary"),
|
| 434 |
+
current_user: Dict[str, Any] = Depends(get_current_user),
|
| 435 |
+
db: AsyncSession = Depends(get_db),
|
| 436 |
+
):
|
| 437 |
+
"""
|
| 438 |
+
Get a summary of key metrics formatted for dashboard display.
|
| 439 |
+
|
| 440 |
+
Returns multiple visualization-ready datasets for a complete dashboard view.
|
| 441 |
+
"""
|
| 442 |
+
try:
|
| 443 |
+
logger.info(
|
| 444 |
+
"Generating dashboard summary",
|
| 445 |
+
user_id=current_user["id"],
|
| 446 |
+
time_range=time_range,
|
| 447 |
+
)
|
| 448 |
+
|
| 449 |
+
# This would aggregate data from multiple sources
|
| 450 |
+
# For now, returning a structured summary
|
| 451 |
+
|
| 452 |
+
return {
|
| 453 |
+
"summary_id": f"dashboard_{datetime.utcnow().timestamp()}",
|
| 454 |
+
"time_range": time_range,
|
| 455 |
+
"key_metrics": [
|
| 456 |
+
{
|
| 457 |
+
"metric": "total_contracts",
|
| 458 |
+
"value": 15420,
|
| 459 |
+
"change": 12.5,
|
| 460 |
+
"change_type": "increase",
|
| 461 |
+
"visualization_type": "gauge",
|
| 462 |
+
},
|
| 463 |
+
{
|
| 464 |
+
"metric": "total_value",
|
| 465 |
+
"value": 2547890000,
|
| 466 |
+
"formatted_value": "R$ 2.55B",
|
| 467 |
+
"change": -3.2,
|
| 468 |
+
"change_type": "decrease",
|
| 469 |
+
"visualization_type": "gauge",
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"metric": "anomalies_detected",
|
| 473 |
+
"value": 47,
|
| 474 |
+
"severity_high": 12,
|
| 475 |
+
"severity_medium": 20,
|
| 476 |
+
"severity_low": 15,
|
| 477 |
+
"visualization_type": "gauge",
|
| 478 |
+
},
|
| 479 |
+
{
|
| 480 |
+
"metric": "investigations_active",
|
| 481 |
+
"value": 8,
|
| 482 |
+
"completed_this_period": 23,
|
| 483 |
+
"visualization_type": "gauge",
|
| 484 |
+
},
|
| 485 |
+
],
|
| 486 |
+
"charts": [
|
| 487 |
+
{
|
| 488 |
+
"id": "spending_trend",
|
| 489 |
+
"title": "Gastos ao Longo do Tempo",
|
| 490 |
+
"type": "line_chart",
|
| 491 |
+
"endpoint": "/api/v1/visualization/time-series",
|
| 492 |
+
"params": {"metric": "spending", "granularity": "day"},
|
| 493 |
+
},
|
| 494 |
+
{
|
| 495 |
+
"id": "regional_distribution",
|
| 496 |
+
"title": "Distribuição Regional de Contratos",
|
| 497 |
+
"type": "map",
|
| 498 |
+
"endpoint": "/api/v1/visualization/regional-map",
|
| 499 |
+
"params": {"metric": "contract_value", "region_type": "state"},
|
| 500 |
+
},
|
| 501 |
+
{
|
| 502 |
+
"id": "top_categories",
|
| 503 |
+
"title": "Principais Categorias de Gastos",
|
| 504 |
+
"type": "bar_chart",
|
| 505 |
+
"endpoint": "/api/v1/visualization/chart-data",
|
| 506 |
+
"params": {"dataset_type": "spending", "dimensions": ["category"]},
|
| 507 |
+
},
|
| 508 |
+
],
|
| 509 |
+
"alerts": [
|
| 510 |
+
{
|
| 511 |
+
"id": "alert_001",
|
| 512 |
+
"type": "anomaly",
|
| 513 |
+
"severity": "high",
|
| 514 |
+
"message": "Padrão incomum detectado em contratos de TI",
|
| 515 |
+
"timestamp": datetime.utcnow().isoformat(),
|
| 516 |
+
},
|
| 517 |
+
],
|
| 518 |
+
"cache_timestamp": datetime.utcnow(),
|
| 519 |
+
"expires_at": datetime.utcnow() + timedelta(minutes=15),
|
| 520 |
+
}
|
| 521 |
+
|
| 522 |
+
except Exception as e:
|
| 523 |
+
logger.error(
|
| 524 |
+
"Dashboard summary generation failed",
|
| 525 |
+
error=str(e),
|
| 526 |
+
user_id=current_user["id"],
|
| 527 |
+
exc_info=True,
|
| 528 |
+
)
|
| 529 |
+
raise HTTPException(status_code=500, detail=f"Dashboard summary failed: {str(e)}")
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
@router.get("/supported-charts")
|
| 533 |
+
async def get_supported_chart_types():
|
| 534 |
+
"""
|
| 535 |
+
Get list of supported chart types and their configurations.
|
| 536 |
+
|
| 537 |
+
This helps the frontend know what visualizations are available.
|
| 538 |
+
"""
|
| 539 |
+
return {
|
| 540 |
+
"chart_types": [
|
| 541 |
+
{
|
| 542 |
+
"type": "line_chart",
|
| 543 |
+
"name": "Gráfico de Linhas",
|
| 544 |
+
"description": "Ideal para séries temporais e tendências",
|
| 545 |
+
"supports": ["time_series", "comparisons", "trends"],
|
| 546 |
+
"required_dimensions": 1,
|
| 547 |
+
"max_series": 10,
|
| 548 |
+
},
|
| 549 |
+
{
|
| 550 |
+
"type": "bar_chart",
|
| 551 |
+
"name": "Gráfico de Barras",
|
| 552 |
+
"description": "Comparação entre categorias",
|
| 553 |
+
"supports": ["categories", "rankings", "distributions"],
|
| 554 |
+
"required_dimensions": 1,
|
| 555 |
+
"max_series": 5,
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"type": "pie_chart",
|
| 559 |
+
"name": "Gráfico de Pizza",
|
| 560 |
+
"description": "Proporções e percentuais",
|
| 561 |
+
"supports": ["proportions", "composition"],
|
| 562 |
+
"required_dimensions": 1,
|
| 563 |
+
"max_series": 1,
|
| 564 |
+
"max_slices": 8,
|
| 565 |
+
},
|
| 566 |
+
{
|
| 567 |
+
"type": "scatter_plot",
|
| 568 |
+
"name": "Gráfico de Dispersão",
|
| 569 |
+
"description": "Correlações entre variáveis",
|
| 570 |
+
"supports": ["correlations", "outliers"],
|
| 571 |
+
"required_dimensions": 2,
|
| 572 |
+
"max_points": 1000,
|
| 573 |
+
},
|
| 574 |
+
{
|
| 575 |
+
"type": "heatmap",
|
| 576 |
+
"name": "Mapa de Calor",
|
| 577 |
+
"description": "Densidade e intensidade em duas dimensões",
|
| 578 |
+
"supports": ["density", "matrix", "correlations"],
|
| 579 |
+
"required_dimensions": 2,
|
| 580 |
+
},
|
| 581 |
+
{
|
| 582 |
+
"type": "map",
|
| 583 |
+
"name": "Mapa Coroplético",
|
| 584 |
+
"description": "Dados geográficos por região",
|
| 585 |
+
"supports": ["geographic", "regional"],
|
| 586 |
+
"required_dimensions": 0,
|
| 587 |
+
"regions": ["state", "municipality", "macro_region"],
|
| 588 |
+
},
|
| 589 |
+
{
|
| 590 |
+
"type": "gauge",
|
| 591 |
+
"name": "Medidor",
|
| 592 |
+
"description": "Valor único com indicador de meta",
|
| 593 |
+
"supports": ["kpi", "single_value"],
|
| 594 |
+
"required_dimensions": 0,
|
| 595 |
+
"max_series": 1,
|
| 596 |
+
},
|
| 597 |
+
{
|
| 598 |
+
"type": "table",
|
| 599 |
+
"name": "Tabela",
|
| 600 |
+
"description": "Dados tabulares detalhados",
|
| 601 |
+
"supports": ["detailed_data", "multi_dimension"],
|
| 602 |
+
"max_rows": 1000,
|
| 603 |
+
},
|
| 604 |
+
],
|
| 605 |
+
"aggregation_types": [a.value for a in AggregationType],
|
| 606 |
+
"time_granularities": [g.value for g in TimeGranularity],
|
| 607 |
+
"region_types": [r.value for r in RegionType],
|
| 608 |
+
}
|