DavMelchi commited on
Commit
24f8e9d
·
1 Parent(s): 6b0959d

Add snapshot comparison, operations queue, geographic map visualization, and KPI correlation analysis features with delta tracking, alert pack export, and interactive map-based drill-down navigation

Browse files
panel_app/kpi_health_check_panel.py CHANGED
@@ -1,4 +1,5 @@
1
  import io
 
2
  import os
3
  import sys
4
  import time
@@ -67,7 +68,12 @@ current_multirat_df: pd.DataFrame | None = None
67
  current_multirat_raw: pd.DataFrame | None = None
68
  current_top_anomalies_df: pd.DataFrame | None = None
69
  current_top_anomalies_raw: pd.DataFrame | None = None
 
70
  current_export_bytes: bytes | None = None
 
 
 
 
71
 
72
  complaint_sites: set[int] = set()
73
 
@@ -98,6 +104,7 @@ def _set_widget_value(widget, value) -> None:
98
  return
99
  except Exception: # noqa: BLE001
100
  pass
 
101
  _updating_drilldown = True
102
  try:
103
  widget.value = value
@@ -167,6 +174,7 @@ def _drilldown_cache_key(site_code: int, rat: str, kpi: str) -> tuple:
167
  # New cache keys
168
  str(kpi_group_select.value),
169
  str(kpi_group_mode.value),
 
170
  )
171
 
172
 
@@ -297,7 +305,7 @@ def _handle_double_click(table_key: str, table: pn.widgets.Tabulator, event) ->
297
  pass
298
  return
299
 
300
- if table_key == "multirat":
301
  site_code = data.get("site_code")
302
  best_rat = rat_select.value
303
  try:
@@ -437,6 +445,30 @@ complaint_top_anomalies_table = pn.widgets.Tabulator(
437
  height=260, sizing_mode="stretch_width", layout="fit_data_table"
438
  )
439
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
440
  site_select = pn.widgets.AutocompleteInput(
441
  name="Select a site (Type to search)",
442
  options={},
@@ -453,6 +485,12 @@ kpi_compare_norm = pn.widgets.Select(
453
  name="Normalization", options=["None", "Min-Max", "Z-score"], value="None"
454
  )
455
 
 
 
 
 
 
 
456
  kpi_group_select = pn.widgets.Select(
457
  name="KPI Group", options=["All (selected KPIs)"], value="All (selected KPIs)"
458
  )
@@ -543,17 +581,399 @@ _set_tabulator_pagination(multirat_summary_table, page_size=50)
543
  _set_tabulator_pagination(top_anomalies_table, page_size=50)
544
  _set_tabulator_pagination(complaint_multirat_summary_table, page_size=50)
545
  _set_tabulator_pagination(complaint_top_anomalies_table, page_size=50)
 
 
 
 
 
546
  _set_tabulator_pagination(site_kpi_table, page_size=50)
547
  trend_plot_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
548
  heatmap_plot_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
549
  hist_plot_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551
  export_button = pn.widgets.FileDownload(
552
  label="Download KPI Health Check report",
553
  filename="KPI_Health_Check_Report.xlsx",
554
  button_type="primary",
555
  )
556
 
 
 
 
 
 
 
557
 
558
  def _filtered_daily(df: pd.DataFrame) -> pd.DataFrame:
559
  if df is None or df.empty:
@@ -830,6 +1250,8 @@ def _update_site_view(event=None) -> None:
830
  trend_plot_pane.object = None
831
  heatmap_plot_pane.object = None
832
  hist_plot_pane.object = None
 
 
833
  return
834
 
835
  daily = current_daily_by_rat.get(rat)
@@ -837,6 +1259,8 @@ def _update_site_view(event=None) -> None:
837
  trend_plot_pane.object = None
838
  heatmap_plot_pane.object = None
839
  hist_plot_pane.object = None
 
 
840
  return
841
 
842
  d = _filtered_daily(daily)
@@ -849,6 +1273,8 @@ def _update_site_view(event=None) -> None:
849
  trend_plot_pane.object = None
850
  heatmap_plot_pane.object = None
851
  hist_plot_pane.object = None
 
 
852
  return
853
 
854
  try:
@@ -877,6 +1303,8 @@ def _update_site_view(event=None) -> None:
877
  trend_plot_pane.object = None
878
  heatmap_plot_pane.object = None
879
  hist_plot_pane.object = None
 
 
880
  _refresh_validation_state()
881
  return
882
 
@@ -885,6 +1313,8 @@ def _update_site_view(event=None) -> None:
885
  trend_plot_pane.object = None
886
  heatmap_plot_pane.object = None
887
  hist_plot_pane.object = None
 
 
888
  return
889
 
890
  code = code_int
@@ -915,6 +1345,8 @@ def _update_site_view(event=None) -> None:
915
  trend_plot_pane.object = None
916
  heatmap_plot_pane.object = None
917
  hist_plot_pane.object = None
 
 
918
  return
919
  new_kpi = candidate_kpis[0]
920
  _set_widget_value(kpi_select, new_kpi)
@@ -924,12 +1356,27 @@ def _update_site_view(event=None) -> None:
924
  trend_plot_pane.object = None
925
  heatmap_plot_pane.object = None
926
  hist_plot_pane.object = None
 
 
927
  return
928
 
929
  cache_key = _drilldown_cache_key(int(code_int), str(rat), str(kpi))
930
  cached = _drilldown_cache_get(cache_key)
931
  if cached is not None:
932
- trend_plot_pane.object, heatmap_plot_pane.object, hist_plot_pane.object = cached
 
 
 
 
 
 
 
 
 
 
 
 
 
933
  return
934
 
935
  kpis_to_plot = []
@@ -1008,6 +1455,47 @@ def _update_site_view(event=None) -> None:
1008
  )
1009
  hist_plot_pane.object = _build_baseline_recent_hist(d, int(code_int), str(kpi))
1010
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1011
  try:
1012
  drilldown_export_button.filename = (
1013
  f"KPI_Drilldown_{rat}_site_{int(code_int)}.xlsx"
@@ -1017,7 +1505,15 @@ def _update_site_view(event=None) -> None:
1017
 
1018
  _drilldown_cache_set(
1019
  cache_key,
1020
- (trend_plot_pane.object, heatmap_plot_pane.object, hist_plot_pane.object),
 
 
 
 
 
 
 
 
1021
  )
1022
 
1023
 
@@ -1344,6 +1840,72 @@ def _build_baseline_recent_hist(
1344
  return fig
1345
 
1346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1347
  def _compute_site_traffic_gb(daily_by_rat: dict[str, pd.DataFrame]) -> pd.DataFrame:
1348
  MB_PER_GB = 1024.0
1349
  rows = []
@@ -1425,7 +1987,8 @@ def _compute_site_traffic_gb(daily_by_rat: dict[str, pd.DataFrame]) -> pd.DataFr
1425
 
1426
 
1427
  def _refresh_filtered_results(event=None) -> None:
1428
- global current_multirat_df, current_top_anomalies_df, current_export_bytes
 
1429
 
1430
  if _applying_profile or _loading_datasets:
1431
  return
@@ -1455,6 +2018,70 @@ def _refresh_filtered_results(event=None) -> None:
1455
  current_multirat_df = pd.DataFrame()
1456
  multirat_summary_table.value = current_multirat_df
1457
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1458
  if current_top_anomalies_raw is not None and not current_top_anomalies_raw.empty:
1459
  t = _apply_city_filter(current_top_anomalies_raw)
1460
  if (
@@ -1531,6 +2158,12 @@ def _refresh_filtered_results(event=None) -> None:
1531
  complaint_top_anomalies_table.value = pd.DataFrame()
1532
 
1533
  current_export_bytes = None
 
 
 
 
 
 
1534
 
1535
 
1536
  def _refresh_presets(event=None) -> None:
@@ -1766,7 +2399,7 @@ def _delete_profile(event=None) -> None:
1766
 
1767
 
1768
  def _apply_preset(event=None) -> None:
1769
- global current_export_bytes
1770
  try:
1771
  if not preset_select.value:
1772
  return
@@ -1830,7 +2463,7 @@ def _save_current_rules_as_preset(event=None) -> None:
1830
 
1831
 
1832
  def _delete_selected_preset(event=None) -> None:
1833
- global current_export_bytes
1834
  try:
1835
  name = str(preset_select.value or "").strip()
1836
  if not name:
@@ -1840,6 +2473,7 @@ def _delete_selected_preset(event=None) -> None:
1840
  status_pane.alert_type = "success"
1841
  status_pane.object = f"Preset deleted: {name}"
1842
  current_export_bytes = None
 
1843
 
1844
  _invalidate_drilldown_cache(data_changed=True, rules_changed=True)
1845
  except Exception as exc: # noqa: BLE001
@@ -1857,6 +2491,9 @@ def load_datasets(event=None) -> None:
1857
  global current_daily_by_rat, current_rules_df
1858
  global current_status_df, current_summary_df, current_export_bytes
1859
  global current_multirat_df, current_multirat_raw, current_top_anomalies_df, current_top_anomalies_raw
 
 
 
1860
 
1861
  current_daily_by_rat = {}
1862
  current_rules_df = None
@@ -1866,7 +2503,11 @@ def load_datasets(event=None) -> None:
1866
  current_multirat_raw = None
1867
  current_top_anomalies_df = None
1868
  current_top_anomalies_raw = None
 
1869
  current_export_bytes = None
 
 
 
1870
 
1871
  _invalidate_drilldown_cache(
1872
  data_changed=True, rules_changed=True, healthcheck_changed=True
@@ -1877,10 +2518,16 @@ def load_datasets(event=None) -> None:
1877
  top_anomalies_table.value = pd.DataFrame()
1878
  complaint_multirat_summary_table.value = pd.DataFrame()
1879
  complaint_top_anomalies_table.value = pd.DataFrame()
 
 
 
 
1880
  site_kpi_table.value = pd.DataFrame()
1881
  trend_plot_pane.object = None
1882
  heatmap_plot_pane.object = None
1883
  hist_plot_pane.object = None
 
 
1884
 
1885
  inputs = {"2G": file_2g, "3G": file_3g, "LTE": file_lte}
1886
  rows = []
@@ -1995,6 +2642,8 @@ def run_health_check(event=None) -> None:
1995
  global current_status_df, current_summary_df, current_export_bytes
1996
  global current_multirat_df, current_multirat_raw
1997
  global current_top_anomalies_df, current_top_anomalies_raw
 
 
1998
 
1999
  rules_df = (
2000
  rules_table.value
@@ -2109,6 +2758,13 @@ def run_health_check(event=None) -> None:
2109
  _apply_complaint_flags()
2110
 
2111
  current_export_bytes = None
 
 
 
 
 
 
 
2112
 
2113
  _invalidate_drilldown_cache(healthcheck_changed=True)
2114
 
@@ -2154,6 +2810,12 @@ def _build_export_bytes() -> bytes:
2154
  if isinstance(complaint_top_anomalies_table.value, pd.DataFrame)
2155
  else None
2156
  ),
 
 
 
 
 
 
2157
  )
2158
 
2159
 
@@ -2167,6 +2829,157 @@ def _export_callback() -> io.BytesIO:
2167
  return io.BytesIO(current_export_bytes or b"")
2168
 
2169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2170
  load_button.on_click(load_datasets)
2171
  run_button.on_click(run_health_check)
2172
 
@@ -2180,11 +2993,21 @@ profile_apply_button.on_click(_apply_profile)
2180
  profile_save_button.on_click(_save_profile)
2181
  profile_delete_button.on_click(_delete_profile)
2182
 
 
 
 
 
 
2183
  _refresh_presets()
2184
  _refresh_profiles()
2185
  _refresh_complaint_sites()
2186
  _refresh_validation_state()
2187
 
 
 
 
 
 
2188
 
2189
  def _on_rat_change(event=None) -> None:
2190
  if _applying_profile or _loading_datasets or _updating_drilldown:
@@ -2217,6 +3040,7 @@ site_select.param.watch(_on_drilldown_change, "value")
2217
  kpi_select.param.watch(_on_drilldown_change, "value")
2218
  kpi_compare_select.param.watch(_on_drilldown_change, "value")
2219
  kpi_compare_norm.param.watch(_on_drilldown_change, "value")
 
2220
 
2221
  analysis_range.param.watch(_on_drilldown_params_change, "value")
2222
  baseline_days.param.watch(_on_drilldown_params_change, "value")
@@ -2226,10 +3050,11 @@ min_consecutive_days.param.watch(_on_drilldown_params_change, "value")
2226
 
2227
 
2228
  def _on_rules_table_change(event=None) -> None:
2229
- global current_export_bytes
2230
  if _applying_profile or _loading_datasets:
2231
  return
2232
  current_export_bytes = None
 
2233
  _invalidate_drilldown_cache(rules_changed=True)
2234
 
2235
 
@@ -2263,6 +3088,11 @@ try:
2263
  except Exception: # noqa: BLE001
2264
  pass
2265
 
 
 
 
 
 
2266
  min_criticality.param.watch(_refresh_filtered_results, "value")
2267
  min_anomaly_score.param.watch(_refresh_filtered_results, "value")
2268
  city_filter.param.watch(_refresh_filtered_results, "value")
@@ -2270,9 +3100,13 @@ only_complaint_sites.param.watch(_refresh_filtered_results, "value")
2270
  top_rat_filter.param.watch(_refresh_filtered_results, "value")
2271
  top_status_filter.param.watch(_refresh_filtered_results, "value")
2272
 
 
 
 
2273
  complaint_sites_file.param.watch(_refresh_complaint_sites, "value")
2274
 
2275
  export_button.callback = _export_callback
 
2276
 
2277
 
2278
  def _build_drilldown_export_bytes() -> bytes:
@@ -2462,6 +3296,7 @@ sidebar = pn.Column(
2462
  run_button,
2463
  "---",
2464
  export_button,
 
2465
  )
2466
 
2467
  _tab_overview = pn.Column(
@@ -2487,6 +3322,37 @@ _tab_complaint = pn.Column(
2487
  sizing_mode="stretch_width",
2488
  )
2489
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2490
  _tab_drilldown = pn.Column(
2491
  pn.pane.Markdown("## Drill-down"),
2492
  pn.Row(site_select, rat_select),
@@ -2496,12 +3362,20 @@ _tab_drilldown = pn.Column(
2496
  pn.Column(trend_plot_pane, sizing_mode="stretch_both", min_height=500),
2497
  pn.Column(heatmap_plot_pane, sizing_mode="stretch_both", min_height=400),
2498
  pn.Column(hist_plot_pane, sizing_mode="stretch_both", min_height=400),
 
 
 
 
2499
  sizing_mode="stretch_both",
2500
  )
2501
 
2502
  _tabs_main = pn.Tabs(
2503
  ("Overview", _tab_overview),
 
2504
  ("Complaint sites only", _tab_complaint),
 
 
 
2505
  ("Drill-down", _tab_drilldown),
2506
  dynamic=True,
2507
  sizing_mode="stretch_both",
 
1
  import io
2
+ import json
3
  import os
4
  import sys
5
  import time
 
68
  current_multirat_raw: pd.DataFrame | None = None
69
  current_top_anomalies_df: pd.DataFrame | None = None
70
  current_top_anomalies_raw: pd.DataFrame | None = None
71
+ current_ops_queue_df: pd.DataFrame | None = None
72
  current_export_bytes: bytes | None = None
73
+ current_alert_pack_bytes: bytes | None = None
74
+
75
+ current_snapshot: dict | None = None
76
+ current_delta_df: pd.DataFrame | None = None
77
 
78
  complaint_sites: set[int] = set()
79
 
 
104
  return
105
  except Exception: # noqa: BLE001
106
  pass
107
+
108
  _updating_drilldown = True
109
  try:
110
  widget.value = value
 
174
  # New cache keys
175
  str(kpi_group_select.value),
176
  str(kpi_group_mode.value),
177
+ str(corr_window_select.value),
178
  )
179
 
180
 
 
305
  pass
306
  return
307
 
308
+ if table_key in {"multirat", "ops"}:
309
  site_code = data.get("site_code")
310
  best_rat = rat_select.value
311
  try:
 
445
  height=260, sizing_mode="stretch_width", layout="fit_data_table"
446
  )
447
 
448
+ ops_queue_table = pn.widgets.Tabulator(
449
+ height=520, sizing_mode="stretch_width", layout="fit_data_table"
450
+ )
451
+
452
+ snapshot_file = pn.widgets.FileInput(name="Load snapshot (JSON)", accept=".json")
453
+ snapshot_download = pn.widgets.FileDownload(
454
+ label="Save snapshot",
455
+ filename="KPI_Health_Check_Snapshot.json",
456
+ button_type="primary",
457
+ )
458
+ snapshot_rules_table = pn.widgets.Tabulator(
459
+ height=220, sizing_mode="stretch_width", layout="fit_data_table"
460
+ )
461
+ snapshot_multirat_table = pn.widgets.Tabulator(
462
+ height=260, sizing_mode="stretch_width", layout="fit_data_table"
463
+ )
464
+ snapshot_top_table = pn.widgets.Tabulator(
465
+ height=260, sizing_mode="stretch_width", layout="fit_data_table"
466
+ )
467
+
468
+ delta_table = pn.widgets.Tabulator(
469
+ height=320, sizing_mode="stretch_width", layout="fit_data_table"
470
+ )
471
+
472
  site_select = pn.widgets.AutocompleteInput(
473
  name="Select a site (Type to search)",
474
  options={},
 
485
  name="Normalization", options=["None", "Min-Max", "Z-score"], value="None"
486
  )
487
 
488
+ corr_window_select = pn.widgets.Select(
489
+ name="Correlation window",
490
+ options=["Full (filtered range)", "Recent", "Baseline"],
491
+ value="Full (filtered range)",
492
+ )
493
+
494
  kpi_group_select = pn.widgets.Select(
495
  name="KPI Group", options=["All (selected KPIs)"], value="All (selected KPIs)"
496
  )
 
581
  _set_tabulator_pagination(top_anomalies_table, page_size=50)
582
  _set_tabulator_pagination(complaint_multirat_summary_table, page_size=50)
583
  _set_tabulator_pagination(complaint_top_anomalies_table, page_size=50)
584
+ _set_tabulator_pagination(ops_queue_table, page_size=50)
585
+ _set_tabulator_pagination(snapshot_rules_table, page_size=50)
586
+ _set_tabulator_pagination(snapshot_multirat_table, page_size=50)
587
+ _set_tabulator_pagination(snapshot_top_table, page_size=50)
588
+ _set_tabulator_pagination(delta_table, page_size=50)
589
  _set_tabulator_pagination(site_kpi_table, page_size=50)
590
  trend_plot_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
591
  heatmap_plot_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
592
  hist_plot_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
593
 
594
+ map_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
595
+ map_message = pn.pane.Alert("", alert_type="info", visible=False)
596
+
597
+ corr_plot_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
598
+ corr_message = pn.pane.Alert("", alert_type="info", visible=False)
599
+
600
+
601
+ def _coords_by_site() -> pd.DataFrame:
602
+ rows = []
603
+ for _, df in (current_daily_by_rat or {}).items():
604
+ if not isinstance(df, pd.DataFrame) or df.empty:
605
+ continue
606
+ cols = [
607
+ c for c in ["site_code", "Latitude", "Longitude", "City"] if c in df.columns
608
+ ]
609
+ if "site_code" not in cols:
610
+ continue
611
+ tmp = df[cols].copy()
612
+ tmp["site_code"] = pd.to_numeric(tmp["site_code"], errors="coerce")
613
+ tmp = tmp.dropna(subset=["site_code"]).copy()
614
+ tmp["site_code"] = tmp["site_code"].astype(int)
615
+ tmp = tmp.drop_duplicates(subset=["site_code"]).copy()
616
+ rows.append(tmp)
617
+ if not rows:
618
+ return pd.DataFrame(columns=["site_code", "Latitude", "Longitude", "City"])
619
+ out = pd.concat(rows, ignore_index=True)
620
+ out = out.drop_duplicates(subset=["site_code"]).copy()
621
+ return out
622
+
623
+
624
+ def _map_df() -> pd.DataFrame:
625
+ base = (
626
+ current_multirat_df
627
+ if isinstance(current_multirat_df, pd.DataFrame)
628
+ else pd.DataFrame()
629
+ )
630
+ if base is None or base.empty:
631
+ base = (
632
+ current_multirat_raw
633
+ if isinstance(current_multirat_raw, pd.DataFrame)
634
+ else pd.DataFrame()
635
+ )
636
+ if base is None or base.empty or "site_code" not in base.columns:
637
+ return pd.DataFrame()
638
+
639
+ base = base.copy()
640
+ base["site_code"] = pd.to_numeric(base["site_code"], errors="coerce")
641
+ base = base.dropna(subset=["site_code"]).copy()
642
+ base["site_code"] = base["site_code"].astype(int)
643
+
644
+ coords = _coords_by_site()
645
+ if coords is None or coords.empty:
646
+ return pd.DataFrame()
647
+
648
+ out = pd.merge(base, coords, on="site_code", how="left", suffixes=("", "_coord"))
649
+ if "City" not in out.columns and "City_coord" in out.columns:
650
+ out["City"] = out["City_coord"]
651
+ if "City" in out.columns and "City_coord" in out.columns:
652
+ out["City"] = out["City"].where(out["City"].notna(), out["City_coord"])
653
+ return out
654
+
655
+
656
+ def _build_map_fig(df_map: pd.DataFrame) -> go.Figure | None:
657
+ if df_map is None or df_map.empty:
658
+ return None
659
+ if "Latitude" not in df_map.columns or "Longitude" not in df_map.columns:
660
+ return None
661
+
662
+ tmp = df_map.copy()
663
+ tmp["Latitude"] = pd.to_numeric(tmp["Latitude"], errors="coerce")
664
+ tmp["Longitude"] = pd.to_numeric(tmp["Longitude"], errors="coerce")
665
+ tmp = tmp.dropna(subset=["Latitude", "Longitude"]).copy()
666
+ if tmp.empty:
667
+ return None
668
+
669
+ score_col = (
670
+ "criticality_score_weighted"
671
+ if "criticality_score_weighted" in tmp.columns
672
+ else "criticality_score"
673
+ )
674
+ if score_col not in tmp.columns:
675
+ score_col = None
676
+
677
+ if score_col is not None:
678
+ tmp["_score"] = (
679
+ pd.to_numeric(tmp[score_col], errors="coerce").fillna(0).astype(float)
680
+ )
681
+ else:
682
+ tmp["_score"] = 0.0
683
+
684
+ size = (tmp["_score"].clip(lower=0) + 1.0).pow(0.5) * 6.0
685
+ tmp["_size"] = size.clip(lower=6, upper=28)
686
+
687
+ hover_cols = [
688
+ c
689
+ for c in [
690
+ "site_code",
691
+ "City",
692
+ score_col,
693
+ "impacted_rats",
694
+ "persistent_kpis_total",
695
+ "degraded_kpis_total",
696
+ ]
697
+ if c and c in tmp.columns
698
+ ]
699
+ fig = px.scatter_mapbox(
700
+ tmp,
701
+ lat="Latitude",
702
+ lon="Longitude",
703
+ color="_score" if score_col is not None else None,
704
+ size="_size",
705
+ size_max=28,
706
+ zoom=4,
707
+ hover_data=hover_cols,
708
+ custom_data=["site_code"],
709
+ )
710
+ fig.update_layout(
711
+ mapbox_style="open-street-map",
712
+ margin=dict(l=10, r=10, t=10, b=10),
713
+ height=700,
714
+ )
715
+ if score_col is not None:
716
+ fig.update_layout(coloraxis_colorbar=dict(title="Score"))
717
+ return fig
718
+
719
+
720
+ def _refresh_map_view(event=None) -> None:
721
+ df_map = _map_df()
722
+
723
+ if df_map is None or df_map.empty:
724
+ map_pane.object = None
725
+ map_message.alert_type = "info"
726
+ map_message.object = "Run health check to display the map (needs Multi-RAT results + coordinates)."
727
+ map_message.visible = True
728
+ return
729
+
730
+ fig = _build_map_fig(df_map)
731
+ if fig is None:
732
+ map_pane.object = None
733
+ map_message.alert_type = "warning"
734
+ map_message.object = (
735
+ "No geo coordinates available (Latitude/Longitude) for current sites."
736
+ )
737
+ map_message.visible = True
738
+ return
739
+
740
+ map_message.visible = False
741
+ map_pane.object = fig
742
+
743
+
744
+ def _on_map_click(event) -> None:
745
+ try:
746
+ cd = event.new
747
+ if not isinstance(cd, dict):
748
+ return
749
+ pts = cd.get("points", [])
750
+ if not pts:
751
+ return
752
+ p0 = pts[0]
753
+ custom = p0.get("customdata", None)
754
+ if not custom:
755
+ return
756
+ site_code = custom[0] if isinstance(custom, (list, tuple)) else custom
757
+ site_code_int = _coerce_int(site_code)
758
+ if site_code_int is None:
759
+ return
760
+
761
+ best_rat = rat_select.value
762
+ try:
763
+ row = None
764
+ if (
765
+ isinstance(current_multirat_raw, pd.DataFrame)
766
+ and not current_multirat_raw.empty
767
+ ):
768
+ sel = current_multirat_raw[
769
+ current_multirat_raw["site_code"] == int(site_code_int)
770
+ ]
771
+ row = sel.iloc[0].to_dict() if not sel.empty else None
772
+ if row:
773
+ best_score = -1
774
+ for r in list(rat_select.options or []):
775
+ p = pd.to_numeric(row.get(f"persistent_{r}", 0), errors="coerce")
776
+ d = pd.to_numeric(row.get(f"degraded_{r}", 0), errors="coerce")
777
+ p = int(p) if pd.notna(p) else 0
778
+ d = int(d) if pd.notna(d) else 0
779
+ score = p * 2 + d
780
+ if score > best_score:
781
+ best_score = score
782
+ best_rat = r
783
+ except Exception: # noqa: BLE001
784
+ best_rat = rat_select.value
785
+
786
+ _apply_drilldown_selection(site_code=site_code_int, rat=best_rat)
787
+ try:
788
+ status_pane.alert_type = "primary"
789
+ status_pane.object = f"Drill-down: site {int(site_code_int)} | {best_rat}"
790
+ except Exception: # noqa: BLE001
791
+ pass
792
+ except Exception: # noqa: BLE001
793
+ return
794
+
795
+
796
+ def _compute_delta_df() -> pd.DataFrame:
797
+ if not isinstance(current_snapshot, dict) or not current_snapshot:
798
+ return pd.DataFrame()
799
+
800
+ snap_rows = current_snapshot.get("multirat_df", [])
801
+ if not isinstance(snap_rows, list) or not snap_rows:
802
+ return pd.DataFrame()
803
+
804
+ snap = pd.DataFrame(snap_rows)
805
+ if snap.empty or "site_code" not in snap.columns:
806
+ return pd.DataFrame()
807
+
808
+ cur = (
809
+ current_multirat_raw
810
+ if isinstance(current_multirat_raw, pd.DataFrame)
811
+ else pd.DataFrame()
812
+ )
813
+ if cur.empty or "site_code" not in cur.columns:
814
+ return pd.DataFrame()
815
+
816
+ snap["site_code"] = pd.to_numeric(snap["site_code"], errors="coerce")
817
+ cur["site_code"] = pd.to_numeric(cur["site_code"], errors="coerce")
818
+
819
+ snap = snap.dropna(subset=["site_code"]).copy()
820
+ cur = cur.dropna(subset=["site_code"]).copy()
821
+
822
+ snap["site_code"] = snap["site_code"].astype(int)
823
+ cur["site_code"] = cur["site_code"].astype(int)
824
+
825
+ left = snap.set_index("site_code")
826
+ right = cur.set_index("site_code")
827
+
828
+ score_col = (
829
+ "criticality_score_weighted"
830
+ if "criticality_score_weighted" in right.columns
831
+ else "criticality_score"
832
+ )
833
+ if score_col not in right.columns:
834
+ score_col = (
835
+ "criticality_score" if "criticality_score" in right.columns else None
836
+ )
837
+
838
+ key_cols = [
839
+ "City",
840
+ "is_complaint_site",
841
+ "impacted_rats",
842
+ "persistent_kpis_total",
843
+ "degraded_kpis_total",
844
+ "resolved_kpis_total",
845
+ "criticality_score",
846
+ "criticality_score_weighted",
847
+ "traffic_gb_total",
848
+ ]
849
+
850
+ all_sites = sorted(set(left.index.tolist()) | set(right.index.tolist()))
851
+ rows = []
852
+ for sc in all_sites:
853
+ srow = left.loc[sc] if sc in left.index else None
854
+ crow = right.loc[sc] if sc in right.index else None
855
+
856
+ def _get(row, col):
857
+ try:
858
+ if row is None:
859
+ return None
860
+ if isinstance(row, pd.DataFrame):
861
+ row = row.iloc[0]
862
+ return row.get(col, None)
863
+ except Exception: # noqa: BLE001
864
+ return None
865
+
866
+ snap_score = _get(srow, score_col) if score_col else None
867
+ cur_score = _get(crow, score_col) if score_col else None
868
+ try:
869
+ snap_score_f = (
870
+ float(snap_score)
871
+ if snap_score is not None and pd.notna(snap_score)
872
+ else 0.0
873
+ )
874
+ except Exception: # noqa: BLE001
875
+ snap_score_f = 0.0
876
+ try:
877
+ cur_score_f = (
878
+ float(cur_score)
879
+ if cur_score is not None and pd.notna(cur_score)
880
+ else 0.0
881
+ )
882
+ except Exception: # noqa: BLE001
883
+ cur_score_f = 0.0
884
+
885
+ if sc not in left.index:
886
+ change_type = "NEW"
887
+ elif sc not in right.index:
888
+ change_type = "MISSING"
889
+ else:
890
+ if cur_score_f > snap_score_f:
891
+ change_type = "SEVERITY_UP"
892
+ elif cur_score_f < snap_score_f:
893
+ change_type = "SEVERITY_DOWN"
894
+ else:
895
+ change_type = "UNCHANGED"
896
+
897
+ row_out = {
898
+ "site_code": int(sc),
899
+ "change_type": change_type,
900
+ "score_snapshot": int(round(snap_score_f)),
901
+ "score_current": int(round(cur_score_f)),
902
+ "score_delta": int(round(cur_score_f - snap_score_f)),
903
+ }
904
+
905
+ for c in key_cols:
906
+ row_out[f"snapshot_{c}"] = _get(srow, c)
907
+ row_out[f"current_{c}"] = _get(crow, c)
908
+
909
+ rows.append(row_out)
910
+
911
+ out = pd.DataFrame(rows)
912
+ if out.empty:
913
+ return out
914
+
915
+ try:
916
+ q = (city_filter.value or "").strip()
917
+ if q:
918
+ city_series = out.get("current_City")
919
+ if city_series is None:
920
+ city_series = out.get("snapshot_City")
921
+ if city_series is not None:
922
+ out = out[
923
+ city_series.astype(str).str.contains(q, case=False, na=False)
924
+ ].copy()
925
+ except Exception: # noqa: BLE001
926
+ pass
927
+
928
+ try:
929
+ mc = int(min_criticality.value)
930
+ if mc > 0 and "score_current" in out.columns:
931
+ out = out[
932
+ pd.to_numeric(out["score_current"], errors="coerce").fillna(0) >= mc
933
+ ]
934
+ except Exception: # noqa: BLE001
935
+ pass
936
+
937
+ order = {
938
+ "SEVERITY_UP": 0,
939
+ "NEW": 1,
940
+ "SEVERITY_DOWN": 2,
941
+ "UNCHANGED": 3,
942
+ "MISSING": 4,
943
+ }
944
+ try:
945
+ out["_order"] = out["change_type"].map(order).fillna(99).astype(int)
946
+ out = out.sort_values(
947
+ by=["_order", "score_delta"], ascending=[True, False]
948
+ ).drop(columns=["_order"], errors="ignore")
949
+ except Exception: # noqa: BLE001
950
+ out = out.sort_values(
951
+ by=["change_type", "score_delta"], ascending=[True, False]
952
+ )
953
+ return out
954
+
955
+
956
+ def _refresh_delta_view(event=None) -> None:
957
+ global current_delta_df
958
+ try:
959
+ current_delta_df = _compute_delta_df()
960
+ except Exception: # noqa: BLE001
961
+ current_delta_df = pd.DataFrame()
962
+ delta_table.value = current_delta_df
963
+
964
+
965
  export_button = pn.widgets.FileDownload(
966
  label="Download KPI Health Check report",
967
  filename="KPI_Health_Check_Report.xlsx",
968
  button_type="primary",
969
  )
970
 
971
+ alert_pack_button = pn.widgets.FileDownload(
972
+ label="Download Alert Pack",
973
+ filename="KPI_Alert_Pack.xlsx",
974
+ button_type="primary",
975
+ )
976
+
977
 
978
  def _filtered_daily(df: pd.DataFrame) -> pd.DataFrame:
979
  if df is None or df.empty:
 
1250
  trend_plot_pane.object = None
1251
  heatmap_plot_pane.object = None
1252
  hist_plot_pane.object = None
1253
+ corr_plot_pane.object = None
1254
+ corr_message.visible = False
1255
  return
1256
 
1257
  daily = current_daily_by_rat.get(rat)
 
1259
  trend_plot_pane.object = None
1260
  heatmap_plot_pane.object = None
1261
  hist_plot_pane.object = None
1262
+ corr_plot_pane.object = None
1263
+ corr_message.visible = False
1264
  return
1265
 
1266
  d = _filtered_daily(daily)
 
1273
  trend_plot_pane.object = None
1274
  heatmap_plot_pane.object = None
1275
  hist_plot_pane.object = None
1276
+ corr_plot_pane.object = None
1277
+ corr_message.visible = False
1278
  return
1279
 
1280
  try:
 
1303
  trend_plot_pane.object = None
1304
  heatmap_plot_pane.object = None
1305
  hist_plot_pane.object = None
1306
+ corr_plot_pane.object = None
1307
+ corr_message.visible = False
1308
  _refresh_validation_state()
1309
  return
1310
 
 
1313
  trend_plot_pane.object = None
1314
  heatmap_plot_pane.object = None
1315
  hist_plot_pane.object = None
1316
+ corr_plot_pane.object = None
1317
+ corr_message.visible = False
1318
  return
1319
 
1320
  code = code_int
 
1345
  trend_plot_pane.object = None
1346
  heatmap_plot_pane.object = None
1347
  hist_plot_pane.object = None
1348
+ corr_plot_pane.object = None
1349
+ corr_message.visible = False
1350
  return
1351
  new_kpi = candidate_kpis[0]
1352
  _set_widget_value(kpi_select, new_kpi)
 
1356
  trend_plot_pane.object = None
1357
  heatmap_plot_pane.object = None
1358
  hist_plot_pane.object = None
1359
+ corr_plot_pane.object = None
1360
+ corr_message.visible = False
1361
  return
1362
 
1363
  cache_key = _drilldown_cache_key(int(code_int), str(rat), str(kpi))
1364
  cached = _drilldown_cache_get(cache_key)
1365
  if cached is not None:
1366
+ try:
1367
+ (
1368
+ trend_plot_pane.object,
1369
+ heatmap_plot_pane.object,
1370
+ hist_plot_pane.object,
1371
+ corr_plot_pane.object,
1372
+ corr_message.object,
1373
+ corr_message.alert_type,
1374
+ corr_message.visible,
1375
+ ) = cached
1376
+ except Exception: # noqa: BLE001
1377
+ trend_plot_pane.object, heatmap_plot_pane.object, hist_plot_pane.object = (
1378
+ cached
1379
+ )
1380
  return
1381
 
1382
  kpis_to_plot = []
 
1455
  )
1456
  hist_plot_pane.object = _build_baseline_recent_hist(d, int(code_int), str(kpi))
1457
 
1458
+ try:
1459
+ corr_kpis = [str(x) for x in (kpi_compare_select.value or []) if str(x)]
1460
+ if len(corr_kpis) < 2:
1461
+ corr_kpis = [str(x) for x in (kpis_to_plot or []) if str(x)]
1462
+ corr_kpis = [c for c in corr_kpis if c in d.columns]
1463
+ corr_kpis = corr_kpis[:20]
1464
+
1465
+ df_corr = d
1466
+ try:
1467
+ windows = _compute_site_windows(d)
1468
+ if windows is not None:
1469
+ baseline_start, baseline_end, recent_start, recent_end = windows
1470
+ w = str(corr_window_select.value or "")
1471
+ if w.startswith("Recent"):
1472
+ df_corr = d[
1473
+ (d["date_only"] >= recent_start)
1474
+ & (d["date_only"] <= recent_end)
1475
+ ].copy()
1476
+ elif w.startswith("Baseline"):
1477
+ df_corr = d[
1478
+ (d["date_only"] >= baseline_start)
1479
+ & (d["date_only"] <= baseline_end)
1480
+ ].copy()
1481
+ except Exception: # noqa: BLE001
1482
+ df_corr = d
1483
+
1484
+ corr_fig = _build_corr_heatmap(df_corr, int(code_int), corr_kpis)
1485
+ if corr_fig is None:
1486
+ corr_plot_pane.object = None
1487
+ corr_message.alert_type = "info"
1488
+ corr_message.object = "Correlation needs at least 2 KPIs with enough samples for the selected site."
1489
+ corr_message.visible = True
1490
+ else:
1491
+ corr_message.visible = False
1492
+ corr_plot_pane.object = corr_fig
1493
+ except Exception: # noqa: BLE001
1494
+ corr_plot_pane.object = None
1495
+ corr_message.alert_type = "warning"
1496
+ corr_message.object = "Unable to compute correlation."
1497
+ corr_message.visible = True
1498
+
1499
  try:
1500
  drilldown_export_button.filename = (
1501
  f"KPI_Drilldown_{rat}_site_{int(code_int)}.xlsx"
 
1505
 
1506
  _drilldown_cache_set(
1507
  cache_key,
1508
+ (
1509
+ trend_plot_pane.object,
1510
+ heatmap_plot_pane.object,
1511
+ hist_plot_pane.object,
1512
+ corr_plot_pane.object,
1513
+ corr_message.object,
1514
+ corr_message.alert_type,
1515
+ corr_message.visible,
1516
+ ),
1517
  )
1518
 
1519
 
 
1840
  return fig
1841
 
1842
 
1843
+ def _build_corr_heatmap(
1844
+ daily_filtered: pd.DataFrame,
1845
+ site_code: int,
1846
+ kpis: list[str],
1847
+ ) -> go.Figure | None:
1848
+ if daily_filtered is None or daily_filtered.empty:
1849
+ return None
1850
+ if not kpis:
1851
+ return None
1852
+ if "site_code" not in daily_filtered.columns:
1853
+ return None
1854
+
1855
+ df_site = daily_filtered[daily_filtered["site_code"] == int(site_code)].copy()
1856
+ if df_site.empty:
1857
+ return None
1858
+
1859
+ cols = [str(c) for c in kpis if str(c) in df_site.columns]
1860
+ cols = [
1861
+ c
1862
+ for c in cols
1863
+ if c not in {"site_code", "date_only", "Longitude", "Latitude", "City", "RAT"}
1864
+ ]
1865
+ cols = list(dict.fromkeys(cols))
1866
+ if len(cols) < 2:
1867
+ return None
1868
+
1869
+ x = df_site[cols].copy()
1870
+ for c in cols:
1871
+ x[c] = pd.to_numeric(x[c], errors="coerce")
1872
+ x = x.dropna(how="all")
1873
+ if x.empty or x.shape[0] < 5:
1874
+ return None
1875
+
1876
+ x = x.dropna(axis=1, how="all")
1877
+ if x.shape[1] < 2:
1878
+ return None
1879
+
1880
+ corr = x.corr(method="pearson")
1881
+ if corr is None or corr.empty:
1882
+ return None
1883
+
1884
+ labels = [str(c) for c in corr.columns.tolist()]
1885
+ z = corr.values
1886
+ fig = go.Figure(
1887
+ data=[
1888
+ go.Heatmap(
1889
+ z=z,
1890
+ x=labels,
1891
+ y=labels,
1892
+ zmin=-1,
1893
+ zmax=1,
1894
+ colorscale="RdBu",
1895
+ reversescale=True,
1896
+ hovertemplate="%{y} vs %{x}<br>corr=%{z:.3f}<extra></extra>",
1897
+ )
1898
+ ]
1899
+ )
1900
+ fig.update_layout(
1901
+ template="plotly_white",
1902
+ title=f"Correlation heatmap (Pearson) - Site {int(site_code)}",
1903
+ height=520,
1904
+ margin=dict(l=60, r=20, t=60, b=60),
1905
+ )
1906
+ return fig
1907
+
1908
+
1909
  def _compute_site_traffic_gb(daily_by_rat: dict[str, pd.DataFrame]) -> pd.DataFrame:
1910
  MB_PER_GB = 1024.0
1911
  rows = []
 
1987
 
1988
 
1989
  def _refresh_filtered_results(event=None) -> None:
1990
+ global current_multirat_df, current_top_anomalies_df, current_ops_queue_df
1991
+ global current_export_bytes, current_alert_pack_bytes
1992
 
1993
  if _applying_profile or _loading_datasets:
1994
  return
 
2018
  current_multirat_df = pd.DataFrame()
2019
  multirat_summary_table.value = current_multirat_df
2020
 
2021
+ if current_multirat_raw is not None and not current_multirat_raw.empty:
2022
+ oq = _apply_city_filter(current_multirat_raw)
2023
+
2024
+ oq_score_col = (
2025
+ "criticality_score_weighted"
2026
+ if "criticality_score_weighted" in oq.columns
2027
+ else "criticality_score"
2028
+ )
2029
+ if oq_score_col in oq.columns:
2030
+ oq = oq[
2031
+ pd.to_numeric(oq[oq_score_col], errors="coerce").fillna(0)
2032
+ >= int(min_criticality.value)
2033
+ ]
2034
+
2035
+ oq = oq.copy()
2036
+ try:
2037
+ oq["priority_score"] = (
2038
+ pd.to_numeric(oq[oq_score_col], errors="coerce")
2039
+ .fillna(0)
2040
+ .round(0)
2041
+ .astype(int)
2042
+ if oq_score_col in oq.columns
2043
+ else 0
2044
+ )
2045
+ except Exception: # noqa: BLE001
2046
+ oq["priority_score"] = 0
2047
+
2048
+ cols = []
2049
+ for c in [
2050
+ "priority_score",
2051
+ "site_code",
2052
+ "City",
2053
+ "is_complaint_site",
2054
+ "impacted_rats",
2055
+ "persistent_kpis_total",
2056
+ "degraded_kpis_total",
2057
+ "resolved_kpis_total",
2058
+ "criticality_score",
2059
+ "criticality_score_weighted",
2060
+ "traffic_gb_total",
2061
+ "traffic_gb_2G",
2062
+ "traffic_gb_3G",
2063
+ "traffic_gb_LTE",
2064
+ ]:
2065
+ if c in oq.columns and c not in cols:
2066
+ cols.append(c)
2067
+
2068
+ for prefix in ["persistent", "degraded", "resolved"]:
2069
+ for r in ["2G", "3G", "LTE"]:
2070
+ c = f"{prefix}_{r}"
2071
+ if c in oq.columns and c not in cols:
2072
+ cols.append(c)
2073
+ if cols:
2074
+ oq = oq[cols].copy()
2075
+
2076
+ if not oq.empty and "priority_score" in oq.columns:
2077
+ oq = oq.sort_values(by=["priority_score"], ascending=False)
2078
+
2079
+ current_ops_queue_df = oq
2080
+ ops_queue_table.value = current_ops_queue_df
2081
+ else:
2082
+ current_ops_queue_df = pd.DataFrame()
2083
+ ops_queue_table.value = current_ops_queue_df
2084
+
2085
  if current_top_anomalies_raw is not None and not current_top_anomalies_raw.empty:
2086
  t = _apply_city_filter(current_top_anomalies_raw)
2087
  if (
 
2158
  complaint_top_anomalies_table.value = pd.DataFrame()
2159
 
2160
  current_export_bytes = None
2161
+ current_alert_pack_bytes = None
2162
+
2163
+ try:
2164
+ _refresh_map_view()
2165
+ except Exception: # noqa: BLE001
2166
+ pass
2167
 
2168
 
2169
  def _refresh_presets(event=None) -> None:
 
2399
 
2400
 
2401
  def _apply_preset(event=None) -> None:
2402
+ global current_export_bytes, current_alert_pack_bytes
2403
  try:
2404
  if not preset_select.value:
2405
  return
 
2463
 
2464
 
2465
  def _delete_selected_preset(event=None) -> None:
2466
+ global current_export_bytes, current_alert_pack_bytes
2467
  try:
2468
  name = str(preset_select.value or "").strip()
2469
  if not name:
 
2473
  status_pane.alert_type = "success"
2474
  status_pane.object = f"Preset deleted: {name}"
2475
  current_export_bytes = None
2476
+ current_alert_pack_bytes = None
2477
 
2478
  _invalidate_drilldown_cache(data_changed=True, rules_changed=True)
2479
  except Exception as exc: # noqa: BLE001
 
2491
  global current_daily_by_rat, current_rules_df
2492
  global current_status_df, current_summary_df, current_export_bytes
2493
  global current_multirat_df, current_multirat_raw, current_top_anomalies_df, current_top_anomalies_raw
2494
+ global current_ops_queue_df
2495
+ global current_alert_pack_bytes
2496
+ global current_snapshot, current_delta_df
2497
 
2498
  current_daily_by_rat = {}
2499
  current_rules_df = None
 
2503
  current_multirat_raw = None
2504
  current_top_anomalies_df = None
2505
  current_top_anomalies_raw = None
2506
+ current_ops_queue_df = None
2507
  current_export_bytes = None
2508
+ current_alert_pack_bytes = None
2509
+ current_snapshot = None
2510
+ current_delta_df = None
2511
 
2512
  _invalidate_drilldown_cache(
2513
  data_changed=True, rules_changed=True, healthcheck_changed=True
 
2518
  top_anomalies_table.value = pd.DataFrame()
2519
  complaint_multirat_summary_table.value = pd.DataFrame()
2520
  complaint_top_anomalies_table.value = pd.DataFrame()
2521
+ ops_queue_table.value = pd.DataFrame()
2522
+ delta_table.value = pd.DataFrame()
2523
+ map_pane.object = None
2524
+ map_message.visible = False
2525
  site_kpi_table.value = pd.DataFrame()
2526
  trend_plot_pane.object = None
2527
  heatmap_plot_pane.object = None
2528
  hist_plot_pane.object = None
2529
+ corr_plot_pane.object = None
2530
+ corr_message.visible = False
2531
 
2532
  inputs = {"2G": file_2g, "3G": file_3g, "LTE": file_lte}
2533
  rows = []
 
2642
  global current_status_df, current_summary_df, current_export_bytes
2643
  global current_multirat_df, current_multirat_raw
2644
  global current_top_anomalies_df, current_top_anomalies_raw
2645
+ global current_alert_pack_bytes
2646
+ global current_snapshot, current_delta_df
2647
 
2648
  rules_df = (
2649
  rules_table.value
 
2758
  _apply_complaint_flags()
2759
 
2760
  current_export_bytes = None
2761
+ current_alert_pack_bytes = None
2762
+
2763
+ try:
2764
+ current_delta_df = _compute_delta_df()
2765
+ except Exception: # noqa: BLE001
2766
+ current_delta_df = pd.DataFrame()
2767
+ delta_table.value = current_delta_df
2768
 
2769
  _invalidate_drilldown_cache(healthcheck_changed=True)
2770
 
 
2810
  if isinstance(complaint_top_anomalies_table.value, pd.DataFrame)
2811
  else None
2812
  ),
2813
+ (
2814
+ current_ops_queue_df
2815
+ if isinstance(current_ops_queue_df, pd.DataFrame)
2816
+ else None
2817
+ ),
2818
+ (current_delta_df if isinstance(current_delta_df, pd.DataFrame) else None),
2819
  )
2820
 
2821
 
 
2829
  return io.BytesIO(current_export_bytes or b"")
2830
 
2831
 
2832
+ def _build_alert_pack_bytes() -> bytes:
2833
+ params = {
2834
+ "baseline_days": baseline_days.value,
2835
+ "recent_days": recent_days.value,
2836
+ "rel_threshold_pct": rel_threshold_pct.value,
2837
+ "min_consecutive_days": min_consecutive_days.value,
2838
+ "min_criticality": min_criticality.value,
2839
+ "min_anomaly_score": min_anomaly_score.value,
2840
+ "city_filter": str(city_filter.value or ""),
2841
+ "only_complaint_sites": bool(only_complaint_sites.value),
2842
+ "top_rat_filter": ",".join(list(top_rat_filter.value or [])),
2843
+ "top_status_filter": ",".join(list(top_status_filter.value or [])),
2844
+ }
2845
+ params_df = pd.DataFrame(
2846
+ {"key": list(params.keys()), "value": [params[k] for k in params.keys()]}
2847
+ )
2848
+
2849
+ return write_dfs_to_excel(
2850
+ [
2851
+ params_df,
2852
+ (
2853
+ current_ops_queue_df
2854
+ if isinstance(current_ops_queue_df, pd.DataFrame)
2855
+ else pd.DataFrame()
2856
+ ),
2857
+ (
2858
+ current_top_anomalies_df
2859
+ if isinstance(current_top_anomalies_df, pd.DataFrame)
2860
+ else pd.DataFrame()
2861
+ ),
2862
+ (
2863
+ current_summary_df
2864
+ if isinstance(current_summary_df, pd.DataFrame)
2865
+ else pd.DataFrame()
2866
+ ),
2867
+ ],
2868
+ ["Run_Params", "Ops_Queue", "Top_Anomalies", "Site_Summary"],
2869
+ index=False,
2870
+ )
2871
+
2872
+
2873
+ def _alert_pack_callback() -> io.BytesIO:
2874
+ global current_alert_pack_bytes
2875
+ if current_alert_pack_bytes is None:
2876
+ try:
2877
+ current_alert_pack_bytes = _build_alert_pack_bytes()
2878
+ except Exception: # noqa: BLE001
2879
+ current_alert_pack_bytes = b""
2880
+ return io.BytesIO(current_alert_pack_bytes or b"")
2881
+
2882
+
2883
+ def _build_snapshot_obj() -> dict:
2884
+ cfg = _current_profile_config()
2885
+
2886
+ rules_df = (
2887
+ rules_table.value
2888
+ if isinstance(rules_table.value, pd.DataFrame)
2889
+ else pd.DataFrame()
2890
+ )
2891
+ multirat_df = (
2892
+ current_multirat_raw
2893
+ if isinstance(current_multirat_raw, pd.DataFrame)
2894
+ else pd.DataFrame()
2895
+ )
2896
+ top_df = (
2897
+ current_top_anomalies_raw
2898
+ if isinstance(current_top_anomalies_raw, pd.DataFrame)
2899
+ else pd.DataFrame()
2900
+ )
2901
+
2902
+ return {
2903
+ "snapshot_version": 1,
2904
+ "created_at": pd.Timestamp.utcnow().isoformat() + "Z",
2905
+ "profile_config": cfg,
2906
+ "rules_df": rules_df.to_dict(orient="records"),
2907
+ "multirat_df": multirat_df.to_dict(orient="records"),
2908
+ "top_anomalies_df": top_df.to_dict(orient="records"),
2909
+ }
2910
+
2911
+
2912
+ def _snapshot_download_callback() -> io.BytesIO:
2913
+ b = b""
2914
+ try:
2915
+ obj = _build_snapshot_obj()
2916
+ b = json.dumps(obj, ensure_ascii=False, indent=2).encode("utf-8")
2917
+ except Exception: # noqa: BLE001
2918
+ b = b""
2919
+ return io.BytesIO(b)
2920
+
2921
+
2922
+ def _snapshot_from_bytes(content: bytes) -> dict:
2923
+ try:
2924
+ txt = content.decode("utf-8", errors="ignore")
2925
+ obj = json.loads(txt)
2926
+ return obj if isinstance(obj, dict) else {}
2927
+ except Exception: # noqa: BLE001
2928
+ return {}
2929
+
2930
+
2931
+ def _apply_snapshot_to_ui(obj: dict) -> None:
2932
+ global current_snapshot, current_delta_df, current_export_bytes, current_alert_pack_bytes
2933
+ current_snapshot = obj if isinstance(obj, dict) else {}
2934
+
2935
+ cfg = (
2936
+ current_snapshot.get("profile_config", {})
2937
+ if isinstance(current_snapshot.get("profile_config", {}), dict)
2938
+ else {}
2939
+ )
2940
+ _apply_profile_config(cfg)
2941
+
2942
+ try:
2943
+ r = current_snapshot.get("rules_df", [])
2944
+ snapshot_rules_table.value = pd.DataFrame(r)
2945
+ except Exception: # noqa: BLE001
2946
+ snapshot_rules_table.value = pd.DataFrame()
2947
+
2948
+ try:
2949
+ m = current_snapshot.get("multirat_df", [])
2950
+ snapshot_multirat_table.value = pd.DataFrame(m)
2951
+ except Exception: # noqa: BLE001
2952
+ snapshot_multirat_table.value = pd.DataFrame()
2953
+
2954
+ try:
2955
+ t = current_snapshot.get("top_anomalies_df", [])
2956
+ snapshot_top_table.value = pd.DataFrame(t)
2957
+ except Exception: # noqa: BLE001
2958
+ snapshot_top_table.value = pd.DataFrame()
2959
+
2960
+ try:
2961
+ current_delta_df = _compute_delta_df()
2962
+ delta_table.value = current_delta_df
2963
+ except Exception: # noqa: BLE001
2964
+ current_delta_df = pd.DataFrame()
2965
+ delta_table.value = current_delta_df
2966
+
2967
+ current_export_bytes = None
2968
+ current_alert_pack_bytes = None
2969
+
2970
+
2971
+ def _on_snapshot_upload(event=None) -> None:
2972
+ if not snapshot_file.value:
2973
+ return
2974
+ obj = _snapshot_from_bytes(snapshot_file.value)
2975
+ _apply_snapshot_to_ui(obj)
2976
+ try:
2977
+ status_pane.alert_type = "success"
2978
+ status_pane.object = "Snapshot loaded."
2979
+ except Exception: # noqa: BLE001
2980
+ pass
2981
+
2982
+
2983
  load_button.on_click(load_datasets)
2984
  run_button.on_click(run_health_check)
2985
 
 
2993
  profile_save_button.on_click(_save_profile)
2994
  profile_delete_button.on_click(_delete_profile)
2995
 
2996
+ snapshot_file.param.watch(_on_snapshot_upload, "value")
2997
+ snapshot_download.callback = _snapshot_download_callback
2998
+
2999
+ map_pane.param.watch(_on_map_click, "click_data")
3000
+
3001
  _refresh_presets()
3002
  _refresh_profiles()
3003
  _refresh_complaint_sites()
3004
  _refresh_validation_state()
3005
 
3006
+ try:
3007
+ _refresh_map_view()
3008
+ except Exception: # noqa: BLE001
3009
+ pass
3010
+
3011
 
3012
  def _on_rat_change(event=None) -> None:
3013
  if _applying_profile or _loading_datasets or _updating_drilldown:
 
3040
  kpi_select.param.watch(_on_drilldown_change, "value")
3041
  kpi_compare_select.param.watch(_on_drilldown_change, "value")
3042
  kpi_compare_norm.param.watch(_on_drilldown_change, "value")
3043
+ corr_window_select.param.watch(_on_drilldown_change, "value")
3044
 
3045
  analysis_range.param.watch(_on_drilldown_params_change, "value")
3046
  baseline_days.param.watch(_on_drilldown_params_change, "value")
 
3050
 
3051
 
3052
  def _on_rules_table_change(event=None) -> None:
3053
+ global current_export_bytes, current_alert_pack_bytes
3054
  if _applying_profile or _loading_datasets:
3055
  return
3056
  current_export_bytes = None
3057
+ current_alert_pack_bytes = None
3058
  _invalidate_drilldown_cache(rules_changed=True)
3059
 
3060
 
 
3088
  except Exception: # noqa: BLE001
3089
  pass
3090
 
3091
+ try:
3092
+ ops_queue_table.on_click(lambda e: _handle_double_click("ops", ops_queue_table, e))
3093
+ except Exception: # noqa: BLE001
3094
+ pass
3095
+
3096
  min_criticality.param.watch(_refresh_filtered_results, "value")
3097
  min_anomaly_score.param.watch(_refresh_filtered_results, "value")
3098
  city_filter.param.watch(_refresh_filtered_results, "value")
 
3100
  top_rat_filter.param.watch(_refresh_filtered_results, "value")
3101
  top_status_filter.param.watch(_refresh_filtered_results, "value")
3102
 
3103
+ min_criticality.param.watch(_refresh_delta_view, "value")
3104
+ city_filter.param.watch(_refresh_delta_view, "value")
3105
+
3106
  complaint_sites_file.param.watch(_refresh_complaint_sites, "value")
3107
 
3108
  export_button.callback = _export_callback
3109
+ alert_pack_button.callback = _alert_pack_callback
3110
 
3111
 
3112
  def _build_drilldown_export_bytes() -> bytes:
 
3296
  run_button,
3297
  "---",
3298
  export_button,
3299
+ alert_pack_button,
3300
  )
3301
 
3302
  _tab_overview = pn.Column(
 
3322
  sizing_mode="stretch_width",
3323
  )
3324
 
3325
+ _tab_ops_queue = pn.Column(
3326
+ pn.pane.Markdown("## Ops Queue"),
3327
+ ops_queue_table,
3328
+ sizing_mode="stretch_width",
3329
+ )
3330
+
3331
+ _tab_snapshot = pn.Column(
3332
+ pn.pane.Markdown("## Snapshot"),
3333
+ pn.Row(snapshot_download, snapshot_file),
3334
+ pn.pane.Markdown("### Snapshot KPI rules"),
3335
+ snapshot_rules_table,
3336
+ pn.pane.Markdown("### Snapshot Multi-RAT"),
3337
+ snapshot_multirat_table,
3338
+ pn.pane.Markdown("### Snapshot Top anomalies"),
3339
+ snapshot_top_table,
3340
+ sizing_mode="stretch_width",
3341
+ )
3342
+
3343
+ _tab_delta = pn.Column(
3344
+ pn.pane.Markdown("## Delta"),
3345
+ delta_table,
3346
+ sizing_mode="stretch_width",
3347
+ )
3348
+
3349
+ _tab_map = pn.Column(
3350
+ pn.pane.Markdown("## Map"),
3351
+ map_message,
3352
+ pn.Column(map_pane, sizing_mode="stretch_both", min_height=700),
3353
+ sizing_mode="stretch_both",
3354
+ )
3355
+
3356
  _tab_drilldown = pn.Column(
3357
  pn.pane.Markdown("## Drill-down"),
3358
  pn.Row(site_select, rat_select),
 
3362
  pn.Column(trend_plot_pane, sizing_mode="stretch_both", min_height=500),
3363
  pn.Column(heatmap_plot_pane, sizing_mode="stretch_both", min_height=400),
3364
  pn.Column(hist_plot_pane, sizing_mode="stretch_both", min_height=400),
3365
+ pn.pane.Markdown("## Correlation"),
3366
+ pn.Row(corr_window_select),
3367
+ corr_message,
3368
+ pn.Column(corr_plot_pane, sizing_mode="stretch_both", min_height=520),
3369
  sizing_mode="stretch_both",
3370
  )
3371
 
3372
  _tabs_main = pn.Tabs(
3373
  ("Overview", _tab_overview),
3374
+ ("Ops Queue", _tab_ops_queue),
3375
  ("Complaint sites only", _tab_complaint),
3376
+ ("Snapshot", _tab_snapshot),
3377
+ ("Delta", _tab_delta),
3378
+ ("Map", _tab_map),
3379
  ("Drill-down", _tab_drilldown),
3380
  dynamic=True,
3381
  sizing_mode="stretch_both",
panel_app/panel_portal.py CHANGED
@@ -9,10 +9,8 @@ if ROOT_DIR not in sys.path:
9
 
10
  pn.extension("plotly", "tabulator")
11
 
12
- import kpi_health_check_panel
13
-
14
  # Import pages (kept as modules, not nested templates)
15
- import trafic_analysis_panel
16
 
17
  PAGES = {
18
  "📊 Global Traffic Analysis": {
 
9
 
10
  pn.extension("plotly", "tabulator")
11
 
 
 
12
  # Import pages (kept as modules, not nested templates)
13
+ from panel_app import kpi_health_check_panel, trafic_analysis_panel
14
 
15
  PAGES = {
16
  "📊 Global Traffic Analysis": {
process_kpi/kpi_health_check/export.py CHANGED
@@ -12,6 +12,8 @@ def build_export_bytes(
12
  top_anomalies_df: pd.DataFrame | None = None,
13
  complaint_multirat_df: pd.DataFrame | None = None,
14
  complaint_top_anomalies_df: pd.DataFrame | None = None,
 
 
15
  ) -> bytes:
16
  dfs = [
17
  datasets_df if isinstance(datasets_df, pd.DataFrame) else pd.DataFrame(),
@@ -38,6 +40,8 @@ def build_export_bytes(
38
  if isinstance(complaint_top_anomalies_df, pd.DataFrame)
39
  else pd.DataFrame()
40
  ),
 
 
41
  ]
42
  sheet_names = [
43
  "Datasets",
@@ -48,5 +52,7 @@ def build_export_bytes(
48
  "Top_Anomalies",
49
  "Complaint_MultiRAT",
50
  "Complaint_Top_Anomalies",
 
 
51
  ]
52
  return write_dfs_to_excel(dfs, sheet_names, index=False)
 
12
  top_anomalies_df: pd.DataFrame | None = None,
13
  complaint_multirat_df: pd.DataFrame | None = None,
14
  complaint_top_anomalies_df: pd.DataFrame | None = None,
15
+ ops_queue_df: pd.DataFrame | None = None,
16
+ delta_df: pd.DataFrame | None = None,
17
  ) -> bytes:
18
  dfs = [
19
  datasets_df if isinstance(datasets_df, pd.DataFrame) else pd.DataFrame(),
 
40
  if isinstance(complaint_top_anomalies_df, pd.DataFrame)
41
  else pd.DataFrame()
42
  ),
43
+ ops_queue_df if isinstance(ops_queue_df, pd.DataFrame) else pd.DataFrame(),
44
+ delta_df if isinstance(delta_df, pd.DataFrame) else pd.DataFrame(),
45
  ]
46
  sheet_names = [
47
  "Datasets",
 
52
  "Top_Anomalies",
53
  "Complaint_MultiRAT",
54
  "Complaint_Top_Anomalies",
55
+ "Ops_Queue",
56
+ "Delta",
57
  ]
58
  return write_dfs_to_excel(dfs, sheet_names, index=False)