feat: Add Streamlit dashboard with Blueprint compliance (v2.1.0)
Dashboard Features: - 8 navigation sections: Overview, Outcomes, Poor CX, FCR, Churn, Agent, Call Explorer, Export - Beyond Brand Identity styling (colors #6D84E3, Outfit font) - RCA Sankey diagram (Driver → Outcome → Churn Risk flow) - Correlation heatmaps (driver co-occurrence, driver-outcome) - Outcome Deep Dive (root causes, correlation, duration analysis) - Export functionality (Excel, HTML, JSON) Blueprint Compliance: - FCR: 4 categories (Primera Llamada/Rellamada × Sin/Con Riesgo de Fuga) - Churn: Binary view (Sin Riesgo de Fuga / En Riesgo de Fuga) - Agent: Talento Para Replicar / Oportunidades de Mejora - Fixed FCR rate calculation (only FIRST_CALL counts as success) Technical: - Streamlit + Plotly for interactive visualizations - Light theme configuration (.streamlit/config.toml) - Fixed Plotly colorbar titlefont deprecation Documentation: - Updated PROJECT_CONTEXT.md, TODO.md, CHANGELOG.md - Added 4 new technical decisions (TD-014 to TD-017) - Created TROUBLESHOOTING.md with 10 common issues Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
538
dashboard/app.py
Normal file
538
dashboard/app.py
Normal file
@@ -0,0 +1,538 @@
|
||||
"""
|
||||
CXInsights Dashboard - Main Application
|
||||
Rich visualization dashboard for call analysis results.
|
||||
Following Beyond Brand Identity Guidelines v1.0
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
|
||||
# Add parent directory to path for imports
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
|
||||
import streamlit as st
|
||||
import pandas as pd
|
||||
|
||||
from config import COLORS, apply_custom_css
|
||||
from data_loader import (
|
||||
load_batch_data,
|
||||
get_available_batches,
|
||||
calculate_kpis,
|
||||
aggregate_drivers,
|
||||
)
|
||||
from components import (
|
||||
render_kpi_cards,
|
||||
render_outcome_chart,
|
||||
render_driver_analysis,
|
||||
render_driver_detail,
|
||||
render_call_explorer,
|
||||
render_agent_performance,
|
||||
render_fcr_analysis,
|
||||
render_churn_risk_analysis,
|
||||
render_driver_correlation_heatmap,
|
||||
render_driver_outcome_heatmap,
|
||||
render_rca_sankey,
|
||||
render_outcome_deep_dive,
|
||||
)
|
||||
from exports import render_export_section
|
||||
|
||||
# =============================================================================
|
||||
# PAGE CONFIG
|
||||
# =============================================================================
|
||||
|
||||
st.set_page_config(
|
||||
page_title="CXInsights Dashboard | Beyond",
|
||||
page_icon="📊",
|
||||
layout="wide",
|
||||
initial_sidebar_state="expanded",
|
||||
)
|
||||
|
||||
# Apply Beyond brand CSS
|
||||
apply_custom_css()
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# MAIN APP
|
||||
# =============================================================================
|
||||
|
||||
def main():
|
||||
"""Main dashboard application."""
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# SIDEBAR
|
||||
# -------------------------------------------------------------------------
|
||||
with st.sidebar:
|
||||
# Logo/Brand
|
||||
st.markdown(
|
||||
f"""
|
||||
<div style="padding: 1rem 0; margin-bottom: 1rem;">
|
||||
<span style="font-size: 24px; font-weight: 700; color: {COLORS['black']};">
|
||||
beyond
|
||||
</span>
|
||||
<sup style="font-size: 12px; color: {COLORS['blue']};">cx</sup>
|
||||
<div style="font-size: 12px; color: {COLORS['grey']}; margin-top: 4px;">
|
||||
CXInsights Dashboard
|
||||
</div>
|
||||
</div>
|
||||
""",
|
||||
unsafe_allow_html=True,
|
||||
)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# Batch selector
|
||||
data_dir = Path(__file__).parent.parent / "data" / "output"
|
||||
|
||||
batches = get_available_batches(data_dir)
|
||||
|
||||
if not batches:
|
||||
st.error("No batch data found.")
|
||||
st.markdown(
|
||||
"Run the pipeline first:\n"
|
||||
"```bash\n"
|
||||
"python cli.py run <batch_id> -i <audio_dir>\n"
|
||||
"```"
|
||||
)
|
||||
st.stop()
|
||||
|
||||
selected_batch = st.selectbox(
|
||||
"Select Batch",
|
||||
batches,
|
||||
index=len(batches) - 1, # Most recent
|
||||
help="Select a completed analysis batch to visualize",
|
||||
)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# Navigation
|
||||
st.markdown("### Navigation")
|
||||
page = st.radio(
|
||||
"Section",
|
||||
[
|
||||
"📊 Overview",
|
||||
"📈 Outcomes",
|
||||
"😞 Poor CX Analysis",
|
||||
"🎯 FCR Analysis",
|
||||
"⚠️ Churn Risk",
|
||||
"👤 Agent Performance",
|
||||
"🔍 Call Explorer",
|
||||
"📥 Export Insights",
|
||||
],
|
||||
label_visibility="collapsed",
|
||||
)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# Metadata
|
||||
st.markdown(
|
||||
f"""
|
||||
<div style="font-size: 11px; color: {COLORS['grey']};">
|
||||
<strong>Last updated:</strong><br>
|
||||
{datetime.now().strftime('%Y-%m-%d %H:%M')}<br><br>
|
||||
<strong>Powered by:</strong><br>
|
||||
Beyond CXInsights v1.0
|
||||
</div>
|
||||
""",
|
||||
unsafe_allow_html=True,
|
||||
)
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# LOAD DATA
|
||||
# -------------------------------------------------------------------------
|
||||
batch_path = data_dir / selected_batch
|
||||
batch_data = load_batch_data(batch_path)
|
||||
|
||||
if batch_data is None:
|
||||
st.error(f"Failed to load batch: {selected_batch}")
|
||||
st.stop()
|
||||
|
||||
summary = batch_data["summary"]
|
||||
analyses = batch_data["analyses"]
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# HEADER
|
||||
# -------------------------------------------------------------------------
|
||||
st.markdown(
|
||||
f"""
|
||||
<h1 style="margin-bottom: 0.25rem;">📊 CXInsights Dashboard</h1>
|
||||
<p style="color: {COLORS['grey']}; margin-bottom: 2rem;">
|
||||
<strong>Batch:</strong> {selected_batch} |
|
||||
<strong>Calls:</strong> {summary['summary']['total_calls']} |
|
||||
<strong>Generated:</strong> {summary.get('generated_at', 'N/A')[:10]}
|
||||
</p>
|
||||
""",
|
||||
unsafe_allow_html=True,
|
||||
)
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# PAGE ROUTING
|
||||
# -------------------------------------------------------------------------
|
||||
if page == "📊 Overview":
|
||||
render_overview_page(summary, analyses)
|
||||
|
||||
elif page == "📈 Outcomes":
|
||||
render_outcomes_page(summary, analyses)
|
||||
|
||||
elif page == "😞 Poor CX Analysis":
|
||||
render_poor_cx_page(summary, analyses)
|
||||
|
||||
elif page == "🎯 FCR Analysis":
|
||||
render_fcr_page(summary, analyses)
|
||||
|
||||
elif page == "⚠️ Churn Risk":
|
||||
render_churn_page(summary, analyses)
|
||||
|
||||
elif page == "👤 Agent Performance":
|
||||
render_agent_page(analyses)
|
||||
|
||||
elif page == "🔍 Call Explorer":
|
||||
render_call_explorer(analyses)
|
||||
|
||||
elif page == "📥 Export Insights":
|
||||
render_export_page(summary, analyses, selected_batch)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# PAGE RENDERS
|
||||
# =============================================================================
|
||||
|
||||
def render_overview_page(summary: dict, analyses: list[dict]):
|
||||
"""Render overview page with executive summary."""
|
||||
|
||||
# KPI Cards
|
||||
render_kpi_cards(summary, analyses)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# Two column layout
|
||||
col1, col2 = st.columns(2)
|
||||
|
||||
with col1:
|
||||
st.markdown("### Call Outcomes Distribution")
|
||||
render_outcome_chart(summary, height=350)
|
||||
|
||||
with col2:
|
||||
st.markdown("### Top Poor CX Drivers")
|
||||
render_driver_analysis(summary, "poor_cx", limit=5)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# Second row
|
||||
col1, col2 = st.columns(2)
|
||||
|
||||
with col1:
|
||||
st.markdown("### First Call Resolution")
|
||||
render_fcr_analysis(analyses, compact=True)
|
||||
|
||||
with col2:
|
||||
st.markdown("### Churn Risk Distribution")
|
||||
render_churn_risk_analysis(analyses, compact=True)
|
||||
|
||||
# Executive Summary Box
|
||||
st.markdown("---")
|
||||
st.markdown("### Executive Summary")
|
||||
|
||||
kpis = calculate_kpis(summary, analyses)
|
||||
|
||||
# Generate insights
|
||||
insights = []
|
||||
|
||||
if kpis["poor_cx_rate"] > 30:
|
||||
insights.append(
|
||||
f"⚠️ **High Poor CX Rate:** {kpis['poor_cx_rate']:.1f}% of calls show "
|
||||
f"customer experience issues requiring attention."
|
||||
)
|
||||
|
||||
if kpis["churn_risk_rate"] > 20:
|
||||
insights.append(
|
||||
f"⚠️ **Elevated Churn Risk:** {kpis['churn_risk_rate']:.1f}% of customers "
|
||||
f"show elevated churn risk signals."
|
||||
)
|
||||
|
||||
if kpis["fcr_rate"] < 70:
|
||||
insights.append(
|
||||
f"📉 **FCR Below Target:** First call resolution at {kpis['fcr_rate']:.1f}% "
|
||||
f"suggests process improvement opportunities."
|
||||
)
|
||||
|
||||
top_drivers = summary.get("poor_cx", {}).get("top_drivers", [])
|
||||
if top_drivers:
|
||||
top = top_drivers[0]
|
||||
insights.append(
|
||||
f"🔍 **Top Driver:** {top['driver_code']} detected in "
|
||||
f"{top['occurrences']} calls ({top.get('call_rate', 0)*100:.0f}% of total)."
|
||||
)
|
||||
|
||||
if insights:
|
||||
for insight in insights:
|
||||
st.markdown(insight)
|
||||
else:
|
||||
st.success("✅ No critical issues detected. Performance within expected parameters.")
|
||||
|
||||
st.caption(
|
||||
f"Source: CXInsights Analysis | Generated: {summary.get('generated_at', 'N/A')}"
|
||||
)
|
||||
|
||||
|
||||
def render_outcomes_page(summary: dict, analyses: list[dict]):
|
||||
"""Render detailed outcome analysis page."""
|
||||
|
||||
st.markdown("## 📈 Outcome Analysis")
|
||||
st.markdown(
|
||||
"Understanding call outcomes helps identify resolution patterns and opportunities."
|
||||
)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
col1, col2 = st.columns([2, 1])
|
||||
|
||||
with col1:
|
||||
render_outcome_chart(summary, height=450)
|
||||
|
||||
with col2:
|
||||
st.markdown("### Outcome Breakdown")
|
||||
outcomes = summary.get("outcomes", {})
|
||||
total = sum(outcomes.values())
|
||||
|
||||
for outcome, count in sorted(outcomes.items(), key=lambda x: -x[1]):
|
||||
pct = (count / total * 100) if total > 0 else 0
|
||||
st.metric(
|
||||
label=outcome,
|
||||
value=f"{count}",
|
||||
delta=f"{pct:.1f}%",
|
||||
)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# Calls by outcome table
|
||||
st.markdown("### Calls by Outcome")
|
||||
|
||||
outcome_filter = st.multiselect(
|
||||
"Filter outcomes",
|
||||
list(summary.get("outcomes", {}).keys()),
|
||||
default=list(summary.get("outcomes", {}).keys()),
|
||||
)
|
||||
|
||||
filtered = [a for a in analyses if a.get("outcome") in outcome_filter]
|
||||
|
||||
if filtered:
|
||||
df = pd.DataFrame([
|
||||
{
|
||||
"Call ID": a["call_id"],
|
||||
"Outcome": a["outcome"],
|
||||
"FCR Status": a.get("fcr_status", "N/A"),
|
||||
"Churn Risk": a.get("churn_risk", "N/A"),
|
||||
"Agent": a.get("agent_classification", "N/A"),
|
||||
"CX Issues": len(a.get("poor_cx_drivers", [])),
|
||||
}
|
||||
for a in filtered
|
||||
])
|
||||
st.dataframe(df, use_container_width=True, hide_index=True)
|
||||
else:
|
||||
st.info("No calls match the selected filters.")
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# DEEP DIVE SECTION
|
||||
# ---------------------------------------------------------------------
|
||||
st.markdown("---")
|
||||
st.markdown("## Deep Dive: Outcome Analysis")
|
||||
|
||||
outcomes_list = list(summary.get("outcomes", {}).keys())
|
||||
if outcomes_list:
|
||||
# Default to the most problematic outcome (not RESOLVED/POSITIVE)
|
||||
problematic = [o for o in outcomes_list if "UNRESOLVED" in o or "COMPLAINT" in o]
|
||||
default_idx = outcomes_list.index(problematic[0]) if problematic else 0
|
||||
|
||||
selected_outcome = st.selectbox(
|
||||
"Select an outcome to analyze in depth",
|
||||
outcomes_list,
|
||||
index=default_idx,
|
||||
help="Choose an outcome to see root causes, driver correlation, and duration analysis.",
|
||||
)
|
||||
|
||||
render_outcome_deep_dive(analyses, selected_outcome)
|
||||
|
||||
|
||||
def render_poor_cx_page(summary: dict, analyses: list[dict]):
|
||||
"""Render detailed Poor CX analysis page."""
|
||||
|
||||
st.markdown("## 😞 Poor CX Driver Analysis")
|
||||
st.markdown(
|
||||
"Root cause analysis of customer experience issues detected across calls."
|
||||
)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# Summary metrics
|
||||
poor_cx_data = summary.get("poor_cx", {})
|
||||
total_drivers = poor_cx_data.get("total_drivers_found", 0)
|
||||
unique_drivers = len(poor_cx_data.get("top_drivers", []))
|
||||
|
||||
col1, col2 = st.columns(2)
|
||||
with col1:
|
||||
st.metric("Total Driver Instances", total_drivers)
|
||||
with col2:
|
||||
st.metric("Unique Driver Types", unique_drivers)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# RCA Sankey Diagram
|
||||
st.markdown("### Root Cause Analysis Flow")
|
||||
st.markdown(
|
||||
"Visual flow showing how Poor CX drivers lead to outcomes and churn risk. "
|
||||
"Wider bands indicate more frequent paths."
|
||||
)
|
||||
render_rca_sankey(analyses)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# Driver chart
|
||||
st.markdown("### Driver Frequency")
|
||||
render_driver_analysis(summary, "poor_cx", limit=None)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# Correlation heatmaps
|
||||
st.markdown("### Driver Correlation Analysis")
|
||||
st.markdown(
|
||||
"Identify patterns where certain drivers frequently appear together "
|
||||
"(e.g., 'LONG_WAIT' always with 'POOR_EMPATHY')."
|
||||
)
|
||||
|
||||
tab1, tab2 = st.tabs(["Driver Co-occurrence", "Driver by Outcome"])
|
||||
|
||||
with tab1:
|
||||
render_driver_correlation_heatmap(analyses, "poor_cx_drivers")
|
||||
|
||||
with tab2:
|
||||
render_driver_outcome_heatmap(analyses)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# Detailed evidence explorer
|
||||
st.markdown("### Driver Evidence Explorer")
|
||||
render_driver_detail(analyses, "poor_cx_drivers")
|
||||
|
||||
|
||||
def render_fcr_page(summary: dict, analyses: list[dict]):
|
||||
"""Render FCR analysis page."""
|
||||
|
||||
st.markdown("## 🎯 First Call Resolution Analysis")
|
||||
st.markdown(
|
||||
"Analyzing resolution efficiency and identifying callbacks drivers."
|
||||
)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
render_fcr_analysis(analyses, compact=False)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# FCR failure drivers
|
||||
st.markdown("### FCR Failure Root Causes")
|
||||
|
||||
fcr_drivers = aggregate_drivers(analyses, "fcr_failure_drivers")
|
||||
|
||||
if fcr_drivers:
|
||||
df = pd.DataFrame([
|
||||
{
|
||||
"Driver": code,
|
||||
"Instances": data["count"],
|
||||
"Calls Affected": data["call_count"],
|
||||
"Avg Confidence": f"{data['avg_confidence']:.0%}",
|
||||
}
|
||||
for code, data in sorted(fcr_drivers.items(), key=lambda x: -x[1]["count"])
|
||||
])
|
||||
st.dataframe(df, use_container_width=True, hide_index=True)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# Evidence
|
||||
st.markdown("### Evidence & Recommendations")
|
||||
render_driver_detail(analyses, "fcr_failure_drivers")
|
||||
else:
|
||||
st.success("✅ No FCR failures detected. Excellent first-call resolution!")
|
||||
|
||||
|
||||
def render_churn_page(summary: dict, analyses: list[dict]):
|
||||
"""Render churn risk analysis page."""
|
||||
|
||||
st.markdown("## ⚠️ Churn Risk Analysis")
|
||||
st.markdown(
|
||||
"Identifying customers at risk of churning based on conversation signals."
|
||||
)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
render_churn_risk_analysis(analyses, compact=False)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# High risk calls
|
||||
st.markdown("### High Risk Customer Calls")
|
||||
|
||||
high_risk = [
|
||||
a for a in analyses
|
||||
if a.get("churn_risk") in ["HIGH", "AT_RISK"]
|
||||
]
|
||||
|
||||
if high_risk:
|
||||
st.warning(
|
||||
f"⚠️ {len(high_risk)} calls show elevated churn risk requiring follow-up."
|
||||
)
|
||||
|
||||
for analysis in high_risk:
|
||||
with st.expander(
|
||||
f"📞 {analysis['call_id']} — Risk: {analysis.get('churn_risk', 'N/A')}"
|
||||
):
|
||||
st.markdown(f"**Outcome:** {analysis.get('outcome', 'N/A')}")
|
||||
|
||||
drivers = analysis.get("churn_risk_drivers", [])
|
||||
if drivers:
|
||||
st.markdown("**Risk Drivers:**")
|
||||
for d in drivers:
|
||||
st.markdown(
|
||||
f"- **{d.get('driver_code')}** "
|
||||
f"({d.get('confidence', 0):.0%}): "
|
||||
f"{d.get('reasoning', 'N/A')}"
|
||||
)
|
||||
|
||||
if d.get("corrective_action"):
|
||||
st.success(f"Action: {d['corrective_action']}")
|
||||
else:
|
||||
st.success("✅ No high churn risk calls detected.")
|
||||
|
||||
|
||||
def render_agent_page(analyses: list[dict]):
|
||||
"""Render agent performance page."""
|
||||
|
||||
st.markdown("## 👤 Agent Performance Analysis")
|
||||
st.markdown(
|
||||
"Evaluating agent skills and identifying coaching opportunities."
|
||||
)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
render_agent_performance(analyses)
|
||||
|
||||
|
||||
def render_export_page(summary: dict, analyses: list[dict], batch_id: str):
|
||||
"""Render export insights page."""
|
||||
|
||||
st.markdown("## 📥 Export Insights")
|
||||
st.markdown(
|
||||
"Download analysis results in multiple formats for reporting and integration."
|
||||
)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
render_export_section(summary, analyses, batch_id)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# RUN
|
||||
# =============================================================================
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1636
dashboard/components.py
Normal file
1636
dashboard/components.py
Normal file
File diff suppressed because it is too large
Load Diff
411
dashboard/config.py
Normal file
411
dashboard/config.py
Normal file
@@ -0,0 +1,411 @@
|
||||
"""
|
||||
CXInsights Dashboard - Configuration & Branding
|
||||
Based on Beyond Brand Identity Guidelines v1.0
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
|
||||
# =============================================================================
|
||||
# BEYOND BRAND COLORS
|
||||
# =============================================================================
|
||||
|
||||
COLORS = {
|
||||
# Primary colors
|
||||
"black": "#000000", # Beyond Black - Primary
|
||||
"blue": "#6D84E3", # Beyond Blue - Accent (ONLY accent color)
|
||||
"grey": "#B1B1B0", # Beyond Grey - Secondary
|
||||
"light_grey": "#E4E4E4", # Beyond Light Grey - Backgrounds
|
||||
"white": "#FFFFFF",
|
||||
|
||||
# Derived colors for UI states
|
||||
"blue_hover": "#5A6FD1", # Blue darkened 10%
|
||||
"blue_light": "#DBE2FC", # Light blue for subtle backgrounds
|
||||
|
||||
# Chart colors (ordered by importance) - light theme
|
||||
"chart_primary": "#6D84E3", # Blue - main data
|
||||
"chart_secondary": "#B1B1B0", # Grey - comparison/benchmark
|
||||
"chart_tertiary": "#7A7A7A", # Dark grey - third series
|
||||
"chart_quaternary": "#E4E4E4", # Light grey - fourth series
|
||||
|
||||
# Gradients for charts - light theme
|
||||
"gradient_blue": ["#E4E4E4", "#B1B1B0", "#6D84E3"],
|
||||
"gradient_grey": ["#FFFFFF", "#E4E4E4", "#B1B1B0", "#7A7A7A"],
|
||||
"gradient_red": ["#E4E4E4", "#B1B1B0", "#6D84E3", "#5A6FD1"], # For severity
|
||||
}
|
||||
|
||||
# Chart color sequence (for Plotly) - light theme
|
||||
CHART_COLORS = [
|
||||
COLORS["blue"], # Primary
|
||||
COLORS["grey"], # Secondary
|
||||
COLORS["chart_tertiary"], # Dark grey - Tertiary
|
||||
COLORS["light_grey"], # Quaternary
|
||||
]
|
||||
|
||||
# =============================================================================
|
||||
# TYPOGRAPHY (Outfit font via Google Fonts)
|
||||
# =============================================================================
|
||||
|
||||
FONTS = {
|
||||
"family": "'Outfit', -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif",
|
||||
"sizes": {
|
||||
"h1": "40px",
|
||||
"h2": "35px",
|
||||
"h3": "21px",
|
||||
"body": "17px",
|
||||
"small": "12px",
|
||||
"caption": "10px",
|
||||
},
|
||||
"weights": {
|
||||
"black": 900,
|
||||
"bold": 700,
|
||||
"medium": 500,
|
||||
"regular": 400,
|
||||
"light": 300,
|
||||
"thin": 100,
|
||||
}
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# THEME CONFIG FOR PLOTLY CHARTS
|
||||
# =============================================================================
|
||||
|
||||
THEME_CONFIG = {
|
||||
"layout": {
|
||||
"font": {
|
||||
"family": FONTS["family"],
|
||||
"color": COLORS["black"],
|
||||
},
|
||||
"paper_bgcolor": COLORS["white"],
|
||||
"plot_bgcolor": COLORS["white"],
|
||||
"title": {
|
||||
"font": {
|
||||
"size": 18,
|
||||
"family": FONTS["family"],
|
||||
"color": COLORS["black"],
|
||||
},
|
||||
"x": 0,
|
||||
"xanchor": "left",
|
||||
},
|
||||
"legend": {
|
||||
"font": {"size": 14},
|
||||
"bgcolor": "rgba(255,255,255,0)",
|
||||
},
|
||||
"xaxis": {
|
||||
"gridcolor": COLORS["light_grey"],
|
||||
"linecolor": COLORS["grey"],
|
||||
"tickfont": {"size": 12, "color": COLORS["grey"]},
|
||||
"title_font": {"size": 14, "color": COLORS["grey"]},
|
||||
},
|
||||
"yaxis": {
|
||||
"gridcolor": COLORS["light_grey"],
|
||||
"linecolor": COLORS["grey"],
|
||||
"tickfont": {"size": 12, "color": COLORS["grey"]},
|
||||
"title_font": {"size": 14, "color": COLORS["grey"]},
|
||||
"rangemode": "tozero", # Always start at 0 (McKinsey standard)
|
||||
},
|
||||
"margin": {"l": 60, "r": 40, "t": 60, "b": 60},
|
||||
}
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# STREAMLIT CUSTOM CSS
|
||||
# =============================================================================
|
||||
|
||||
def apply_custom_css():
|
||||
"""Apply Beyond brand CSS to Streamlit app."""
|
||||
|
||||
st.markdown("""
|
||||
<style>
|
||||
/* Import Outfit font from Google Fonts */
|
||||
@import url('https://fonts.googleapis.com/css2?family=Outfit:wght@100;300;400;500;700;900&display=swap');
|
||||
|
||||
/* Global font */
|
||||
html, body, [class*="css"] {
|
||||
font-family: 'Outfit', -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
|
||||
}
|
||||
|
||||
/* Headers */
|
||||
h1, h2, h3, h4, h5, h6 {
|
||||
font-family: 'Outfit', sans-serif !important;
|
||||
font-weight: 700 !important;
|
||||
color: #000000 !important;
|
||||
}
|
||||
|
||||
h1 { font-size: 40px !important; }
|
||||
h2 { font-size: 35px !important; }
|
||||
h3 { font-size: 21px !important; }
|
||||
|
||||
/* Body text */
|
||||
p, li, span, div {
|
||||
font-family: 'Outfit', sans-serif;
|
||||
font-weight: 400;
|
||||
color: #000000;
|
||||
}
|
||||
|
||||
/* Sidebar styling */
|
||||
[data-testid="stSidebar"] {
|
||||
background-color: #FFFFFF;
|
||||
border-right: 1px solid #E4E4E4;
|
||||
}
|
||||
|
||||
[data-testid="stSidebar"] h1,
|
||||
[data-testid="stSidebar"] h2,
|
||||
[data-testid="stSidebar"] h3 {
|
||||
color: #000000 !important;
|
||||
}
|
||||
|
||||
/* Main content area */
|
||||
.main .block-container {
|
||||
padding-top: 2rem;
|
||||
max-width: 1200px;
|
||||
}
|
||||
|
||||
/* Metric cards - Beyond style */
|
||||
[data-testid="stMetric"] {
|
||||
background-color: #FFFFFF;
|
||||
border: 1px solid #E4E4E4;
|
||||
border-radius: 8px;
|
||||
padding: 1rem;
|
||||
}
|
||||
|
||||
[data-testid="stMetric"] label {
|
||||
font-family: 'Outfit', sans-serif !important;
|
||||
font-weight: 300 !important;
|
||||
font-size: 14px !important;
|
||||
color: #B1B1B0 !important;
|
||||
}
|
||||
|
||||
[data-testid="stMetric"] [data-testid="stMetricValue"] {
|
||||
font-family: 'Outfit', sans-serif !important;
|
||||
font-weight: 700 !important;
|
||||
font-size: 32px !important;
|
||||
color: #000000 !important;
|
||||
}
|
||||
|
||||
[data-testid="stMetric"] [data-testid="stMetricDelta"] {
|
||||
font-family: 'Outfit', sans-serif !important;
|
||||
font-weight: 400 !important;
|
||||
}
|
||||
|
||||
/* Buttons - Beyond style (light theme) */
|
||||
.stButton > button {
|
||||
font-family: 'Outfit', sans-serif !important;
|
||||
font-weight: 700 !important;
|
||||
background-color: #6D84E3 !important;
|
||||
color: #FFFFFF !important;
|
||||
border: none !important;
|
||||
border-radius: 4px !important;
|
||||
padding: 0.5rem 1.5rem !important;
|
||||
transition: background-color 0.2s ease;
|
||||
}
|
||||
|
||||
.stButton > button:hover {
|
||||
background-color: #5A6FD1 !important;
|
||||
color: #FFFFFF !important;
|
||||
}
|
||||
|
||||
/* Secondary buttons */
|
||||
.stButton > button[kind="secondary"] {
|
||||
background-color: #FFFFFF !important;
|
||||
color: #6D84E3 !important;
|
||||
border: 2px solid #6D84E3 !important;
|
||||
}
|
||||
|
||||
.stButton > button[kind="secondary"]:hover {
|
||||
background-color: #6D84E3 !important;
|
||||
color: #FFFFFF !important;
|
||||
}
|
||||
|
||||
/* Selectbox styling */
|
||||
[data-testid="stSelectbox"] label {
|
||||
font-family: 'Outfit', sans-serif !important;
|
||||
font-weight: 700 !important;
|
||||
color: #000000 !important;
|
||||
}
|
||||
|
||||
/* Radio buttons */
|
||||
[data-testid="stRadio"] label {
|
||||
font-family: 'Outfit', sans-serif !important;
|
||||
}
|
||||
|
||||
/* Expander headers */
|
||||
.streamlit-expanderHeader {
|
||||
font-family: 'Outfit', sans-serif !important;
|
||||
font-weight: 700 !important;
|
||||
color: #000000 !important;
|
||||
background-color: #F8F8F8 !important;
|
||||
}
|
||||
|
||||
/* Tables - Light theme */
|
||||
[data-testid="stTable"] th {
|
||||
background-color: #F8F8F8 !important;
|
||||
color: #000000 !important;
|
||||
font-family: 'Outfit', sans-serif !important;
|
||||
font-weight: 700 !important;
|
||||
border-bottom: 2px solid #6D84E3 !important;
|
||||
}
|
||||
|
||||
[data-testid="stTable"] tr:nth-child(even) {
|
||||
background-color: #FAFAFA;
|
||||
}
|
||||
|
||||
/* Dataframe styling - Light theme */
|
||||
.dataframe th {
|
||||
background-color: #F8F8F8 !important;
|
||||
color: #000000 !important;
|
||||
font-family: 'Outfit', sans-serif !important;
|
||||
font-weight: 700 !important;
|
||||
text-align: left !important;
|
||||
border-bottom: 2px solid #6D84E3 !important;
|
||||
}
|
||||
|
||||
.dataframe td {
|
||||
font-family: 'Outfit', sans-serif !important;
|
||||
text-align: left !important;
|
||||
color: #000000 !important;
|
||||
}
|
||||
|
||||
.dataframe tr:nth-child(even) {
|
||||
background-color: #FAFAFA;
|
||||
}
|
||||
|
||||
/* Info/Warning/Error boxes */
|
||||
.stAlert {
|
||||
font-family: 'Outfit', sans-serif !important;
|
||||
border-radius: 4px !important;
|
||||
}
|
||||
|
||||
/* Links - Beyond Blue */
|
||||
a {
|
||||
color: #6D84E3 !important;
|
||||
text-decoration: none !important;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
color: #5A6FD1 !important;
|
||||
text-decoration: underline !important;
|
||||
}
|
||||
|
||||
/* Caption/small text */
|
||||
.caption, small, .stCaption {
|
||||
font-family: 'Outfit', sans-serif !important;
|
||||
font-weight: 300 !important;
|
||||
color: #B1B1B0 !important;
|
||||
font-size: 12px !important;
|
||||
}
|
||||
|
||||
/* Divider line */
|
||||
hr {
|
||||
border: none;
|
||||
border-top: 1px solid #E4E4E4;
|
||||
margin: 1.5rem 0;
|
||||
}
|
||||
|
||||
/* Custom KPI card class */
|
||||
.kpi-card {
|
||||
background: #FFFFFF;
|
||||
border: 1px solid #E4E4E4;
|
||||
border-radius: 8px;
|
||||
padding: 1.5rem;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.kpi-card .kpi-value {
|
||||
font-size: 48px;
|
||||
font-weight: 700;
|
||||
color: #000000;
|
||||
line-height: 1.2;
|
||||
}
|
||||
|
||||
.kpi-card .kpi-label {
|
||||
font-size: 14px;
|
||||
font-weight: 300;
|
||||
color: #B1B1B0;
|
||||
margin-top: 0.5rem;
|
||||
}
|
||||
|
||||
.kpi-card .kpi-delta {
|
||||
font-size: 14px;
|
||||
font-weight: 400;
|
||||
color: #6D84E3;
|
||||
margin-top: 0.25rem;
|
||||
}
|
||||
|
||||
/* Highlight card (with blue accent) */
|
||||
.highlight-card {
|
||||
background: #FFFFFF;
|
||||
border-left: 4px solid #6D84E3;
|
||||
border-radius: 4px;
|
||||
padding: 1rem 1.5rem;
|
||||
margin: 1rem 0;
|
||||
}
|
||||
|
||||
/* Evidence quote styling */
|
||||
.evidence-quote {
|
||||
background: #F8F8F8;
|
||||
border-left: 3px solid #6D84E3;
|
||||
padding: 1rem;
|
||||
margin: 0.5rem 0;
|
||||
font-style: italic;
|
||||
color: #000000;
|
||||
}
|
||||
|
||||
.evidence-speaker {
|
||||
font-weight: 700;
|
||||
color: #B1B1B0;
|
||||
font-size: 12px;
|
||||
margin-top: 0.5rem;
|
||||
}
|
||||
|
||||
/* Footer styling */
|
||||
.footer {
|
||||
position: fixed;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
background: #FFFFFF;
|
||||
border-top: 1px solid #E4E4E4;
|
||||
padding: 0.5rem 2rem;
|
||||
font-size: 12px;
|
||||
color: #B1B1B0;
|
||||
z-index: 1000;
|
||||
}
|
||||
|
||||
/* Hide Streamlit branding */
|
||||
#MainMenu {visibility: hidden;}
|
||||
footer {visibility: hidden;}
|
||||
|
||||
</style>
|
||||
""", unsafe_allow_html=True)
|
||||
|
||||
|
||||
def get_plotly_layout(title: str = "", height: int = 400) -> dict:
|
||||
"""Get standard Plotly layout with Beyond branding."""
|
||||
layout = THEME_CONFIG["layout"].copy()
|
||||
layout["height"] = height
|
||||
if title:
|
||||
layout["title"]["text"] = title
|
||||
return layout
|
||||
|
||||
|
||||
def format_metric_card(value: str, label: str, delta: str = None) -> str:
|
||||
"""Generate HTML for a branded KPI card."""
|
||||
delta_html = f'<div class="kpi-delta">{delta}</div>' if delta else ""
|
||||
return f"""
|
||||
<div class="kpi-card">
|
||||
<div class="kpi-value">{value}</div>
|
||||
<div class="kpi-label">{label}</div>
|
||||
{delta_html}
|
||||
</div>
|
||||
"""
|
||||
|
||||
|
||||
def format_evidence_quote(text: str, speaker: str = None) -> str:
|
||||
"""Format evidence text with Beyond styling."""
|
||||
speaker_html = f'<div class="evidence-speaker">— {speaker}</div>' if speaker else ""
|
||||
return f"""
|
||||
<div class="evidence-quote">
|
||||
"{text}"
|
||||
{speaker_html}
|
||||
</div>
|
||||
"""
|
||||
235
dashboard/data_loader.py
Normal file
235
dashboard/data_loader.py
Normal file
@@ -0,0 +1,235 @@
|
||||
"""
|
||||
CXInsights Dashboard - Data Loader
|
||||
Handles loading and processing of batch analysis data.
|
||||
"""
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
import streamlit as st
|
||||
|
||||
|
||||
@st.cache_data(ttl=60)
|
||||
def get_available_batches(data_dir: Path) -> list[str]:
|
||||
"""
|
||||
Get list of available batch IDs.
|
||||
|
||||
Args:
|
||||
data_dir: Path to data/output directory
|
||||
|
||||
Returns:
|
||||
List of batch IDs sorted by modification time (newest last)
|
||||
"""
|
||||
if not data_dir.exists():
|
||||
return []
|
||||
|
||||
batches = []
|
||||
for item in data_dir.iterdir():
|
||||
if item.is_dir() and not item.name.startswith("."):
|
||||
# Check if it has a summary.json (valid batch)
|
||||
summary_path = item / "exports" / "summary.json"
|
||||
if summary_path.exists():
|
||||
batches.append(item.name)
|
||||
|
||||
# Sort by modification time (newest last for selectbox default)
|
||||
batches.sort(key=lambda x: (data_dir / x).stat().st_mtime)
|
||||
return batches
|
||||
|
||||
|
||||
@st.cache_data(ttl=60)
|
||||
def load_batch_data(batch_path: Path) -> Optional[dict]:
|
||||
"""
|
||||
Load all data for a batch.
|
||||
|
||||
Args:
|
||||
batch_path: Path to batch directory
|
||||
|
||||
Returns:
|
||||
Dictionary with summary and analyses, or None if failed
|
||||
"""
|
||||
try:
|
||||
# Load summary
|
||||
summary_path = batch_path / "exports" / "summary.json"
|
||||
if not summary_path.exists():
|
||||
return None
|
||||
|
||||
with open(summary_path, "r", encoding="utf-8") as f:
|
||||
summary = json.load(f)
|
||||
|
||||
# Load individual analyses
|
||||
analyses = []
|
||||
analyses_dir = batch_path / "analyses"
|
||||
|
||||
# Handle nested batch_id directory structure
|
||||
if analyses_dir.exists():
|
||||
for subdir in analyses_dir.iterdir():
|
||||
if subdir.is_dir():
|
||||
for json_file in subdir.glob("*.json"):
|
||||
try:
|
||||
with open(json_file, "r", encoding="utf-8") as f:
|
||||
analysis = json.load(f)
|
||||
analyses.append(analysis)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# Also check for flat structure
|
||||
if not analyses and analyses_dir.exists():
|
||||
for json_file in analyses_dir.glob("*.json"):
|
||||
try:
|
||||
with open(json_file, "r", encoding="utf-8") as f:
|
||||
analysis = json.load(f)
|
||||
analyses.append(analysis)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return {
|
||||
"summary": summary,
|
||||
"analyses": analyses,
|
||||
"batch_id": summary.get("batch_id", batch_path.name),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
st.error(f"Error loading batch data: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def load_transcript(batch_path: Path, call_id: str) -> Optional[dict]:
|
||||
"""
|
||||
Load transcript for a specific call.
|
||||
|
||||
Args:
|
||||
batch_path: Path to batch directory
|
||||
call_id: Call ID to load
|
||||
|
||||
Returns:
|
||||
Transcript dictionary or None
|
||||
"""
|
||||
try:
|
||||
transcript_path = batch_path / "transcripts" / f"{call_id}.json"
|
||||
if transcript_path.exists():
|
||||
with open(transcript_path, "r", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def aggregate_drivers(analyses: list[dict], driver_type: str) -> dict:
|
||||
"""
|
||||
Aggregate drivers across all analyses.
|
||||
|
||||
Args:
|
||||
analyses: List of analysis dictionaries
|
||||
driver_type: One of 'poor_cx_drivers', 'lost_sales_drivers',
|
||||
'fcr_failure_drivers', 'churn_risk_drivers'
|
||||
|
||||
Returns:
|
||||
Dictionary with driver_code -> {count, calls, avg_confidence, instances}
|
||||
"""
|
||||
drivers = {}
|
||||
|
||||
for analysis in analyses:
|
||||
call_id = analysis.get("call_id", "unknown")
|
||||
driver_list = analysis.get(driver_type, [])
|
||||
|
||||
for driver in driver_list:
|
||||
code = driver.get("driver_code", "UNKNOWN")
|
||||
|
||||
if code not in drivers:
|
||||
drivers[code] = {
|
||||
"count": 0,
|
||||
"calls": set(),
|
||||
"total_confidence": 0,
|
||||
"instances": [],
|
||||
}
|
||||
|
||||
drivers[code]["count"] += 1
|
||||
drivers[code]["calls"].add(call_id)
|
||||
drivers[code]["total_confidence"] += driver.get("confidence", 0)
|
||||
drivers[code]["instances"].append({
|
||||
"call_id": call_id,
|
||||
**driver,
|
||||
})
|
||||
|
||||
# Calculate averages and convert sets to counts
|
||||
result = {}
|
||||
for code, data in drivers.items():
|
||||
result[code] = {
|
||||
"count": data["count"],
|
||||
"call_count": len(data["calls"]),
|
||||
"avg_confidence": data["total_confidence"] / data["count"] if data["count"] > 0 else 0,
|
||||
"instances": data["instances"],
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def get_fcr_distribution(analyses: list[dict]) -> dict:
|
||||
"""Get FCR status distribution."""
|
||||
distribution = {}
|
||||
for analysis in analyses:
|
||||
status = analysis.get("fcr_status", "UNKNOWN")
|
||||
distribution[status] = distribution.get(status, 0) + 1
|
||||
return distribution
|
||||
|
||||
|
||||
def get_churn_distribution(analyses: list[dict]) -> dict:
|
||||
"""Get churn risk distribution."""
|
||||
distribution = {}
|
||||
for analysis in analyses:
|
||||
risk = analysis.get("churn_risk", "UNKNOWN")
|
||||
distribution[risk] = distribution.get(risk, 0) + 1
|
||||
return distribution
|
||||
|
||||
|
||||
def get_agent_classification_distribution(analyses: list[dict]) -> dict:
|
||||
"""Get agent classification distribution."""
|
||||
distribution = {}
|
||||
for analysis in analyses:
|
||||
classification = analysis.get("agent_classification", "UNKNOWN")
|
||||
distribution[classification] = distribution.get(classification, 0) + 1
|
||||
return distribution
|
||||
|
||||
|
||||
def calculate_kpis(summary: dict, analyses: list[dict]) -> dict:
|
||||
"""
|
||||
Calculate KPIs for the dashboard.
|
||||
|
||||
Returns:
|
||||
Dictionary with KPI values
|
||||
"""
|
||||
total = summary.get("summary", {}).get("total_calls", 0)
|
||||
successful = summary.get("summary", {}).get("successful_analyses", 0)
|
||||
|
||||
# Poor CX rate
|
||||
calls_with_poor_cx = sum(
|
||||
1 for a in analyses
|
||||
if len(a.get("poor_cx_drivers", [])) > 0
|
||||
)
|
||||
poor_cx_rate = (calls_with_poor_cx / total * 100) if total > 0 else 0
|
||||
|
||||
# FCR rate - Per blueprint: Primera Llamada = FCR success
|
||||
fcr_dist = get_fcr_distribution(analyses)
|
||||
fcr_success = fcr_dist.get("FIRST_CALL", 0) # Only FIRST_CALL counts as FCR success
|
||||
fcr_rate = (fcr_success / total * 100) if total > 0 else 0
|
||||
|
||||
# Churn risk
|
||||
churn_dist = get_churn_distribution(analyses)
|
||||
high_risk = churn_dist.get("HIGH", 0) + churn_dist.get("AT_RISK", 0)
|
||||
churn_risk_rate = (high_risk / total * 100) if total > 0 else 0
|
||||
|
||||
# Agent performance
|
||||
agent_dist = get_agent_classification_distribution(analyses)
|
||||
needs_improvement = agent_dist.get("NEEDS_IMPROVEMENT", 0) + agent_dist.get("POOR", 0)
|
||||
improvement_rate = (needs_improvement / total * 100) if total > 0 else 0
|
||||
|
||||
return {
|
||||
"total_calls": total,
|
||||
"success_rate": (successful / total * 100) if total > 0 else 0,
|
||||
"poor_cx_rate": poor_cx_rate,
|
||||
"fcr_rate": fcr_rate,
|
||||
"churn_risk_rate": churn_risk_rate,
|
||||
"improvement_rate": improvement_rate,
|
||||
"total_poor_cx_drivers": summary.get("poor_cx", {}).get("total_drivers_found", 0),
|
||||
"total_lost_sales_drivers": summary.get("lost_sales", {}).get("total_drivers_found", 0),
|
||||
}
|
||||
466
dashboard/exports.py
Normal file
466
dashboard/exports.py
Normal file
@@ -0,0 +1,466 @@
|
||||
"""
|
||||
CXInsights Dashboard - Export Functions
|
||||
Export insights to Excel, PDF, and other formats.
|
||||
"""
|
||||
|
||||
import io
|
||||
import json
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
import streamlit as st
|
||||
|
||||
from config import COLORS
|
||||
|
||||
|
||||
def create_excel_export(summary: dict, analyses: list[dict], batch_id: str) -> io.BytesIO:
|
||||
"""
|
||||
Create comprehensive Excel export with multiple sheets.
|
||||
|
||||
Sheets:
|
||||
- Executive Summary
|
||||
- Call Details
|
||||
- Poor CX Drivers
|
||||
- FCR Analysis
|
||||
- Churn Risk
|
||||
- Agent Performance
|
||||
"""
|
||||
output = io.BytesIO()
|
||||
|
||||
with pd.ExcelWriter(output, engine='openpyxl') as writer:
|
||||
# Sheet 1: Executive Summary
|
||||
summary_data = {
|
||||
"Metric": [
|
||||
"Batch ID",
|
||||
"Generated At",
|
||||
"Total Calls Analyzed",
|
||||
"Successful Analyses",
|
||||
"Failed Analyses",
|
||||
"Poor CX Drivers Found",
|
||||
"Lost Sales Drivers Found",
|
||||
],
|
||||
"Value": [
|
||||
batch_id,
|
||||
summary.get("generated_at", "N/A"),
|
||||
summary.get("summary", {}).get("total_calls", 0),
|
||||
summary.get("summary", {}).get("successful_analyses", 0),
|
||||
summary.get("summary", {}).get("failed_analyses", 0),
|
||||
summary.get("poor_cx", {}).get("total_drivers_found", 0),
|
||||
summary.get("lost_sales", {}).get("total_drivers_found", 0),
|
||||
]
|
||||
}
|
||||
df_summary = pd.DataFrame(summary_data)
|
||||
df_summary.to_excel(writer, sheet_name="Executive Summary", index=False)
|
||||
|
||||
# Sheet 2: Outcomes Distribution
|
||||
outcomes = summary.get("outcomes", {})
|
||||
if outcomes:
|
||||
df_outcomes = pd.DataFrame([
|
||||
{"Outcome": k, "Count": v, "Percentage": f"{v/sum(outcomes.values())*100:.1f}%"}
|
||||
for k, v in sorted(outcomes.items(), key=lambda x: -x[1])
|
||||
])
|
||||
df_outcomes.to_excel(writer, sheet_name="Outcomes", index=False)
|
||||
|
||||
# Sheet 3: Call Details
|
||||
call_data = []
|
||||
for a in analyses:
|
||||
call_data.append({
|
||||
"Call ID": a.get("call_id", ""),
|
||||
"Outcome": a.get("outcome", ""),
|
||||
"FCR Status": a.get("fcr_status", ""),
|
||||
"Churn Risk": a.get("churn_risk", ""),
|
||||
"Agent Classification": a.get("agent_classification", ""),
|
||||
"Poor CX Drivers": len(a.get("poor_cx_drivers", [])),
|
||||
"FCR Failure Drivers": len(a.get("fcr_failure_drivers", [])),
|
||||
"Churn Risk Drivers": len(a.get("churn_risk_drivers", [])),
|
||||
"Duration (sec)": a.get("observed", {}).get("audio_duration_sec", ""),
|
||||
"Total Turns": a.get("observed", {}).get("turn_metrics", {}).get("total_turns", ""),
|
||||
})
|
||||
df_calls = pd.DataFrame(call_data)
|
||||
df_calls.to_excel(writer, sheet_name="Call Details", index=False)
|
||||
|
||||
# Sheet 4: Poor CX Drivers Detail
|
||||
poor_cx_data = []
|
||||
for a in analyses:
|
||||
for d in a.get("poor_cx_drivers", []):
|
||||
poor_cx_data.append({
|
||||
"Call ID": a.get("call_id", ""),
|
||||
"Driver Code": d.get("driver_code", ""),
|
||||
"Confidence": f"{d.get('confidence', 0):.0%}",
|
||||
"Origin": d.get("origin", ""),
|
||||
"Reasoning": d.get("reasoning", ""),
|
||||
"Corrective Action": d.get("corrective_action", ""),
|
||||
"Evidence": "; ".join([e.get("text", "") for e in d.get("evidence_spans", [])]),
|
||||
})
|
||||
if poor_cx_data:
|
||||
df_poor_cx = pd.DataFrame(poor_cx_data)
|
||||
df_poor_cx.to_excel(writer, sheet_name="Poor CX Drivers", index=False)
|
||||
|
||||
# Sheet 5: FCR Failure Drivers
|
||||
fcr_data = []
|
||||
for a in analyses:
|
||||
for d in a.get("fcr_failure_drivers", []):
|
||||
fcr_data.append({
|
||||
"Call ID": a.get("call_id", ""),
|
||||
"Driver Code": d.get("driver_code", ""),
|
||||
"Confidence": f"{d.get('confidence', 0):.0%}",
|
||||
"Origin": d.get("origin", ""),
|
||||
"Reasoning": d.get("reasoning", ""),
|
||||
"Corrective Action": d.get("corrective_action", ""),
|
||||
})
|
||||
if fcr_data:
|
||||
df_fcr = pd.DataFrame(fcr_data)
|
||||
df_fcr.to_excel(writer, sheet_name="FCR Failures", index=False)
|
||||
|
||||
# Sheet 6: Churn Risk Drivers
|
||||
churn_data = []
|
||||
for a in analyses:
|
||||
for d in a.get("churn_risk_drivers", []):
|
||||
churn_data.append({
|
||||
"Call ID": a.get("call_id", ""),
|
||||
"Risk Level": a.get("churn_risk", ""),
|
||||
"Driver Code": d.get("driver_code", ""),
|
||||
"Confidence": f"{d.get('confidence', 0):.0%}",
|
||||
"Reasoning": d.get("reasoning", ""),
|
||||
"Corrective Action": d.get("corrective_action", ""),
|
||||
})
|
||||
if churn_data:
|
||||
df_churn = pd.DataFrame(churn_data)
|
||||
df_churn.to_excel(writer, sheet_name="Churn Risk", index=False)
|
||||
|
||||
# Sheet 7: Agent Performance
|
||||
agent_data = []
|
||||
for a in analyses:
|
||||
positive = [s.get("skill_code", "") for s in a.get("agent_positive_skills", [])]
|
||||
improvement = [s.get("skill_code", "") for s in a.get("agent_improvement_areas", [])]
|
||||
agent_data.append({
|
||||
"Call ID": a.get("call_id", ""),
|
||||
"Classification": a.get("agent_classification", ""),
|
||||
"Positive Skills": ", ".join(positive),
|
||||
"Improvement Areas": ", ".join(improvement),
|
||||
})
|
||||
df_agent = pd.DataFrame(agent_data)
|
||||
df_agent.to_excel(writer, sheet_name="Agent Performance", index=False)
|
||||
|
||||
# Sheet 8: Top Drivers Summary
|
||||
top_drivers = []
|
||||
for d in summary.get("poor_cx", {}).get("top_drivers", []):
|
||||
top_drivers.append({
|
||||
"Type": "Poor CX",
|
||||
"Driver Code": d.get("driver_code", ""),
|
||||
"Occurrences": d.get("occurrences", 0),
|
||||
"Call Rate": f"{d.get('call_rate', 0)*100:.1f}%",
|
||||
"Avg Confidence": f"{d.get('avg_confidence', 0):.0%}",
|
||||
})
|
||||
for d in summary.get("lost_sales", {}).get("top_drivers", []):
|
||||
top_drivers.append({
|
||||
"Type": "Lost Sales",
|
||||
"Driver Code": d.get("driver_code", ""),
|
||||
"Occurrences": d.get("occurrences", 0),
|
||||
"Call Rate": f"{d.get('call_rate', 0)*100:.1f}%",
|
||||
"Avg Confidence": f"{d.get('avg_confidence', 0):.0%}",
|
||||
})
|
||||
if top_drivers:
|
||||
df_top = pd.DataFrame(top_drivers)
|
||||
df_top.to_excel(writer, sheet_name="Top Drivers Summary", index=False)
|
||||
|
||||
output.seek(0)
|
||||
return output
|
||||
|
||||
|
||||
def create_executive_summary_html(summary: dict, analyses: list[dict], batch_id: str) -> str:
|
||||
"""
|
||||
Create HTML executive summary report for PDF export.
|
||||
"""
|
||||
total_calls = summary.get("summary", {}).get("total_calls", 0)
|
||||
|
||||
# Calculate metrics
|
||||
poor_cx_calls = sum(1 for a in analyses if len(a.get("poor_cx_drivers", [])) > 0)
|
||||
poor_cx_rate = (poor_cx_calls / total_calls * 100) if total_calls > 0 else 0
|
||||
|
||||
high_churn = sum(1 for a in analyses if a.get("churn_risk") in ["HIGH", "AT_RISK"])
|
||||
churn_rate = (high_churn / total_calls * 100) if total_calls > 0 else 0
|
||||
|
||||
# FCR rate - Per blueprint: Primera Llamada = FCR success
|
||||
fcr_success = sum(1 for a in analyses if a.get("fcr_status") == "FIRST_CALL")
|
||||
fcr_rate = (fcr_success / total_calls * 100) if total_calls > 0 else 0
|
||||
|
||||
# Top drivers
|
||||
top_drivers = summary.get("poor_cx", {}).get("top_drivers", [])[:5]
|
||||
|
||||
# Outcomes
|
||||
outcomes = summary.get("outcomes", {})
|
||||
|
||||
html = f"""
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>CXInsights Executive Report - {batch_id}</title>
|
||||
<link href="https://fonts.googleapis.com/css2?family=Outfit:wght@300;400;700&display=swap" rel="stylesheet">
|
||||
<style>
|
||||
* {{ margin: 0; padding: 0; box-sizing: border-box; }}
|
||||
body {{
|
||||
font-family: 'Outfit', sans-serif;
|
||||
color: #000000;
|
||||
background: #FFFFFF;
|
||||
padding: 40px;
|
||||
max-width: 900px;
|
||||
margin: 0 auto;
|
||||
}}
|
||||
.header {{
|
||||
border-bottom: 3px solid #6D84E3;
|
||||
padding-bottom: 20px;
|
||||
margin-bottom: 30px;
|
||||
}}
|
||||
.header h1 {{
|
||||
font-size: 32px;
|
||||
font-weight: 700;
|
||||
color: #000000;
|
||||
}}
|
||||
.header .subtitle {{
|
||||
color: #B1B1B0;
|
||||
font-size: 14px;
|
||||
margin-top: 8px;
|
||||
}}
|
||||
.brand {{
|
||||
font-size: 18px;
|
||||
font-weight: 700;
|
||||
margin-bottom: 8px;
|
||||
}}
|
||||
.brand sup {{
|
||||
color: #6D84E3;
|
||||
font-size: 12px;
|
||||
}}
|
||||
.kpi-grid {{
|
||||
display: grid;
|
||||
grid-template-columns: repeat(4, 1fr);
|
||||
gap: 20px;
|
||||
margin-bottom: 40px;
|
||||
}}
|
||||
.kpi-card {{
|
||||
background: #F8F8F8;
|
||||
padding: 20px;
|
||||
border-radius: 8px;
|
||||
text-align: center;
|
||||
}}
|
||||
.kpi-value {{
|
||||
font-size: 36px;
|
||||
font-weight: 700;
|
||||
color: #000000;
|
||||
}}
|
||||
.kpi-label {{
|
||||
font-size: 12px;
|
||||
color: #B1B1B0;
|
||||
margin-top: 8px;
|
||||
}}
|
||||
.section {{ margin-bottom: 40px; }}
|
||||
.section h2 {{
|
||||
font-size: 21px;
|
||||
font-weight: 700;
|
||||
margin-bottom: 16px;
|
||||
padding-bottom: 8px;
|
||||
border-bottom: 2px solid #6D84E3;
|
||||
}}
|
||||
table {{
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
margin-top: 16px;
|
||||
}}
|
||||
th {{
|
||||
background: #F8F8F8;
|
||||
padding: 12px;
|
||||
text-align: left;
|
||||
font-weight: 700;
|
||||
border-bottom: 2px solid #6D84E3;
|
||||
}}
|
||||
td {{
|
||||
padding: 12px;
|
||||
border-bottom: 1px solid #E4E4E4;
|
||||
}}
|
||||
tr:nth-child(even) {{ background: #FAFAFA; }}
|
||||
.insight {{
|
||||
background: #F8F8F8;
|
||||
border-left: 4px solid #6D84E3;
|
||||
padding: 16px;
|
||||
margin: 16px 0;
|
||||
}}
|
||||
.insight strong {{ color: #6D84E3; }}
|
||||
.footer {{
|
||||
margin-top: 40px;
|
||||
padding-top: 20px;
|
||||
border-top: 1px solid #E4E4E4;
|
||||
font-size: 12px;
|
||||
color: #B1B1B0;
|
||||
}}
|
||||
@media print {{
|
||||
body {{ padding: 20px; }}
|
||||
.kpi-grid {{ grid-template-columns: repeat(2, 1fr); }}
|
||||
}}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="header">
|
||||
<div class="brand">beyond<sup>cx</sup></div>
|
||||
<h1>CXInsights Executive Report</h1>
|
||||
<div class="subtitle">
|
||||
Batch: {batch_id} |
|
||||
Generated: {datetime.now().strftime('%Y-%m-%d %H:%M')} |
|
||||
Calls Analyzed: {total_calls}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="kpi-grid">
|
||||
<div class="kpi-card">
|
||||
<div class="kpi-value">{total_calls}</div>
|
||||
<div class="kpi-label">Total Calls</div>
|
||||
</div>
|
||||
<div class="kpi-card">
|
||||
<div class="kpi-value">{poor_cx_rate:.1f}%</div>
|
||||
<div class="kpi-label">Poor CX Rate</div>
|
||||
</div>
|
||||
<div class="kpi-card">
|
||||
<div class="kpi-value">{fcr_rate:.1f}%</div>
|
||||
<div class="kpi-label">FCR Rate</div>
|
||||
</div>
|
||||
<div class="kpi-card">
|
||||
<div class="kpi-value">{churn_rate:.1f}%</div>
|
||||
<div class="kpi-label">Churn Risk</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="section">
|
||||
<h2>Key Insights</h2>
|
||||
{"".join([f'<div class="insight"><strong>{d.get("driver_code", "")}</strong> detected in {d.get("occurrences", 0)} calls ({d.get("call_rate", 0)*100:.0f}% of total)</div>' for d in top_drivers[:3]]) if top_drivers else '<p>No critical drivers detected.</p>'}
|
||||
</div>
|
||||
|
||||
<div class="section">
|
||||
<h2>Outcome Distribution</h2>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Outcome</th>
|
||||
<th>Count</th>
|
||||
<th>Percentage</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{"".join([f'<tr><td>{k}</td><td>{v}</td><td>{v/sum(outcomes.values())*100:.1f}%</td></tr>' for k, v in sorted(outcomes.items(), key=lambda x: -x[1])]) if outcomes else '<tr><td colspan="3">No data</td></tr>'}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="section">
|
||||
<h2>Top Poor CX Drivers</h2>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Driver</th>
|
||||
<th>Occurrences</th>
|
||||
<th>Call Rate</th>
|
||||
<th>Confidence</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{"".join([f'<tr><td>{d.get("driver_code", "")}</td><td>{d.get("occurrences", 0)}</td><td>{d.get("call_rate", 0)*100:.1f}%</td><td>{d.get("avg_confidence", 0):.0%}</td></tr>' for d in top_drivers]) if top_drivers else '<tr><td colspan="4">No drivers detected</td></tr>'}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="footer">
|
||||
<p>Generated by Beyond CXInsights | {datetime.now().strftime('%Y-%m-%d %H:%M')}</p>
|
||||
<p>This report contains AI-generated insights. Please review with domain expertise.</p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
return html
|
||||
|
||||
|
||||
def create_json_export(summary: dict, analyses: list[dict], batch_id: str) -> str:
|
||||
"""Create JSON export of all data."""
|
||||
export_data = {
|
||||
"batch_id": batch_id,
|
||||
"exported_at": datetime.now().isoformat(),
|
||||
"summary": summary,
|
||||
"analyses": analyses,
|
||||
}
|
||||
return json.dumps(export_data, indent=2, ensure_ascii=False)
|
||||
|
||||
|
||||
def render_export_section(summary: dict, analyses: list[dict], batch_id: str):
|
||||
"""Render export options in the dashboard."""
|
||||
|
||||
st.markdown("### Export Options")
|
||||
|
||||
col1, col2, col3 = st.columns(3)
|
||||
|
||||
with col1:
|
||||
st.markdown("#### Excel Report")
|
||||
st.caption("Complete analysis with multiple sheets")
|
||||
|
||||
excel_data = create_excel_export(summary, analyses, batch_id)
|
||||
st.download_button(
|
||||
label="Download Excel",
|
||||
data=excel_data,
|
||||
file_name=f"cxinsights_{batch_id}_{datetime.now().strftime('%Y%m%d')}.xlsx",
|
||||
mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
use_container_width=True,
|
||||
)
|
||||
|
||||
with col2:
|
||||
st.markdown("#### Executive Summary")
|
||||
st.caption("HTML report (print to PDF)")
|
||||
|
||||
html_data = create_executive_summary_html(summary, analyses, batch_id)
|
||||
st.download_button(
|
||||
label="Download HTML",
|
||||
data=html_data,
|
||||
file_name=f"cxinsights_{batch_id}_executive_{datetime.now().strftime('%Y%m%d')}.html",
|
||||
mime="text/html",
|
||||
use_container_width=True,
|
||||
)
|
||||
|
||||
with col3:
|
||||
st.markdown("#### Raw Data")
|
||||
st.caption("JSON format for integration")
|
||||
|
||||
json_data = create_json_export(summary, analyses, batch_id)
|
||||
st.download_button(
|
||||
label="Download JSON",
|
||||
data=json_data,
|
||||
file_name=f"cxinsights_{batch_id}_{datetime.now().strftime('%Y%m%d')}.json",
|
||||
mime="application/json",
|
||||
use_container_width=True,
|
||||
)
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# Quick stats
|
||||
st.markdown("#### Export Preview")
|
||||
|
||||
col1, col2 = st.columns(2)
|
||||
|
||||
with col1:
|
||||
st.markdown("**Excel sheets included:**")
|
||||
st.markdown("""
|
||||
- Executive Summary
|
||||
- Outcomes Distribution
|
||||
- Call Details
|
||||
- Poor CX Drivers
|
||||
- FCR Failures
|
||||
- Churn Risk
|
||||
- Agent Performance
|
||||
- Top Drivers Summary
|
||||
""")
|
||||
|
||||
with col2:
|
||||
st.markdown("**Data summary:**")
|
||||
st.markdown(f"""
|
||||
- **Calls:** {len(analyses)}
|
||||
- **Poor CX instances:** {sum(len(a.get('poor_cx_drivers', [])) for a in analyses)}
|
||||
- **FCR failures:** {sum(len(a.get('fcr_failure_drivers', [])) for a in analyses)}
|
||||
- **Churn risk drivers:** {sum(len(a.get('churn_risk_drivers', [])) for a in analyses)}
|
||||
""")
|
||||
Reference in New Issue
Block a user