diff --git a/TRANSLATION_STATUS.md b/TRANSLATION_STATUS.md new file mode 100644 index 0000000..3861e79 --- /dev/null +++ b/TRANSLATION_STATUS.md @@ -0,0 +1,163 @@ +# Translation Status - Beyond CX Analytics + +## ✅ Completed Modules + +### Agentic Readiness Module +- **Status:** ✅ **COMPLETED** +- **Commit:** `b991824` +- **Files:** + - ✅ `frontend/utils/agenticReadinessV2.ts` - All functions, comments, and descriptions translated + - ✅ `frontend/components/tabs/AgenticReadinessTab.tsx` - RED_FLAG_CONFIGS and comments translated + - ✅ `frontend/locales/en.json` & `es.json` - New subfactors section added + - ✅ `backend/beyond_flows/scorers/agentic_score.py` - All docstrings, comments, and reason codes translated + +--- + +## 🔄 Modules Pending Translation + +### HIGH PRIORITY - Core Utils (Frontend) + +#### 1. **realDataAnalysis.ts** +- **Lines of Spanish:** ~92 occurrences +- **Scope:** + - Function names: `clasificarTierSimple()`, `clasificarTier()` + - 20+ inline comments in Spanish + - Function documentation +- **Impact:** HIGH - Core analysis engine +- **Estimated effort:** 2-3 hours + +#### 2. **analysisGenerator.ts** +- **Lines of Spanish:** ~49 occurrences +- **Scope:** + - Multiple inline comments + - References to `clasificarTierSimple()` + - Data transformation comments +- **Impact:** HIGH - Main data generator +- **Estimated effort:** 1-2 hours + +#### 3. **backendMapper.ts** +- **Lines of Spanish:** ~13 occurrences +- **Scope:** + - Function documentation + - Mapping logic comments +- **Impact:** MEDIUM - Backend integration +- **Estimated effort:** 30-60 minutes + +--- + +### MEDIUM PRIORITY - Utilities (Frontend) + +#### 4. **dataTransformation.ts** +- **Lines of Spanish:** ~8 occurrences +- **Impact:** MEDIUM +- **Estimated effort:** 30 minutes + +#### 5. **segmentClassifier.ts** +- **Lines of Spanish:** ~3 occurrences +- **Impact:** LOW +- **Estimated effort:** 15 minutes + +#### 6. **fileParser.ts** +- **Lines of Spanish:** ~3 occurrences +- **Impact:** LOW +- **Estimated effort:** 15 minutes + +#### 7. **apiClient.ts** +- **Lines of Spanish:** ~2 occurrences +- **Impact:** LOW +- **Estimated effort:** 10 minutes + +#### 8. **serverCache.ts** +- **Lines of Spanish:** ~2 occurrences +- **Impact:** LOW +- **Estimated effort:** 10 minutes + +--- + +### MEDIUM PRIORITY - Backend Dimensions + +#### 9. **backend/beyond_metrics/dimensions/OperationalPerformance.py** +- **Lines of Spanish:** ~7 occurrences +- **Impact:** MEDIUM +- **Estimated effort:** 30 minutes + +#### 10. **backend/beyond_metrics/dimensions/SatisfactionExperience.py** +- **Lines of Spanish:** ~8 occurrences +- **Impact:** MEDIUM +- **Estimated effort:** 30 minutes + +#### 11. **backend/beyond_metrics/dimensions/EconomyCost.py** +- **Lines of Spanish:** ~4 occurrences +- **Impact:** MEDIUM +- **Estimated effort:** 20 minutes + +--- + +### LOW PRIORITY - API & Services + +#### 12. **backend/beyond_api/api/analysis.py** +- **Lines of Spanish:** ~1 occurrence +- **Impact:** LOW +- **Estimated effort:** 5 minutes + +#### 13. **backend/beyond_api/api/auth.py** +- **Lines of Spanish:** ~1 occurrence +- **Impact:** LOW +- **Estimated effort:** 5 minutes + +#### 14. **backend/beyond_api/services/analysis_service.py** +- **Lines of Spanish:** ~2 occurrences +- **Impact:** LOW +- **Estimated effort:** 10 minutes + +#### 15. **backend/beyond_metrics/io/base.py** +- **Lines of Spanish:** ~1 occurrence +- **Impact:** LOW +- **Estimated effort:** 5 minutes + +#### 16. **backend/beyond_metrics/io/google_drive.py** +- **Lines of Spanish:** ~2 occurrences +- **Impact:** LOW +- **Estimated effort:** 10 minutes + +--- + +## 📊 Summary Statistics + +| Category | Files | Total Occurrences | Estimated Time | +|----------|-------|-------------------|----------------| +| ✅ Completed | 4 | ~150 | 3 hours (DONE) | +| 🔴 High Priority | 3 | 154 | 4-6 hours | +| 🟡 Medium Priority | 8 | 35 | 2-3 hours | +| 🟢 Low Priority | 5 | 7 | 45 minutes | +| **TOTAL PENDING** | **16** | **196** | **~8 hours** | + +--- + +## 🎯 Recommended Translation Order + +### Phase 1: Critical Path (High Priority) +1. `realDataAnalysis.ts` - Core analysis engine with `clasificarTier()` functions +2. `analysisGenerator.ts` - Main data generation orchestrator +3. `backendMapper.ts` - Backend integration layer + +### Phase 2: Supporting Utils (Medium Priority) +4. `dataTransformation.ts` +5. Backend dimension files (`OperationalPerformance.py`, `SatisfactionExperience.py`, `EconomyCost.py`) + +### Phase 3: Final Cleanup (Low Priority) +6. Remaining utility files and API services + +--- + +## 📝 Notes + +- **Variable names** like `volumen_mes`, `escalación`, etc. in data interfaces should **remain as-is** for API compatibility +- **Function names** that are part of the public API should be carefully reviewed before renaming +- **i18n strings** in locales files should continue to have both EN/ES versions +- **Reason codes** and internal enums should be in English for consistency + +--- + +**Last Updated:** 2026-02-07 +**Status:** agenticReadiness module completed, 16 modules pending diff --git a/backend/beyond_flows/scorers/agentic_score.py b/backend/beyond_flows/scorers/agentic_score.py index 1963d66..1729dbd 100644 --- a/backend/beyond_flows/scorers/agentic_score.py +++ b/backend/beyond_flows/scorers/agentic_score.py @@ -1,22 +1,22 @@ """ agentic_score.py -Calcula el Agentic Readiness Score de un contact center a partir -de un JSON con KPIs agregados (misma estructura que results.json). +Calculates the Agentic Readiness Score of a contact center from +a JSON file with aggregated KPIs (same structure as results.json). -Diseñado como clase para integrarse fácilmente en pipelines. +Designed as a class to integrate easily into pipelines. -Características: -- Tolerante a datos faltantes: si una dimensión no se puede calcular - (porque faltan KPIs), se marca como `computed = False` y no se - incluye en el cálculo del score global. -- La llamada típica en un pipeline será: +Features: +- Tolerant to missing data: if a dimension cannot be calculated + (due to missing KPIs), it is marked as `computed = False` and not + included in the global score calculation. +- Typical pipeline call: from agentic_score import AgenticScorer scorer = AgenticScorer() - result = scorer.run_on_folder("/ruta/a/carpeta") + result = scorer.run_on_folder("/path/to/folder") -Esa carpeta debe contener un `results.json` de entrada. -El módulo generará un `agentic_readiness.json` en la misma carpeta. +The folder must contain a `results.json` input file. +The module will generate an `agentic_readiness.json` in the same folder. """ from __future__ import annotations @@ -35,7 +35,7 @@ Number = Union[int, float] # ========================= def _is_nan(x: Any) -> bool: - """Devuelve True si x es NaN, None o el string 'NaN'.""" + """Returns True if x is NaN, None or the string 'NaN'.""" try: if x is None: return True @@ -60,7 +60,7 @@ def _safe_mean(values: Sequence[Optional[Number]]) -> Optional[float]: def _get_nested(d: Dict[str, Any], *keys: str, default: Any = None) -> Any: - """Acceso seguro a diccionarios anidados.""" + """Safe access to nested dictionaries.""" cur: Any = d for k in keys: if not isinstance(cur, dict) or k not in cur: @@ -75,20 +75,20 @@ def _clamp(value: float, lo: float = 0.0, hi: float = 10.0) -> float: def _normalize_numeric_sequence(field: Any) -> Optional[List[Number]]: """ - Normaliza un campo que representa una secuencia numérica. + Normalizes a field representing a numeric sequence. - Soporta: - - Formato antiguo del pipeline: [10, 20, 30] - - Formato nuevo del pipeline: {"labels": [...], "values": [10, 20, 30]} + Supports: + - Old pipeline format: [10, 20, 30] + - New pipeline format: {"labels": [...], "values": [10, 20, 30]} - Devuelve: - - lista de números, si hay datos numéricos válidos - - None, si el campo no tiene una secuencia numérica interpretable + Returns: + - list of numbers, if there is valid numeric data + - None, if the field does not have an interpretable numeric sequence """ if field is None: return None - # Formato nuevo: {"labels": [...], "values": [...]} + # New format: {"labels": [...], "values": [...]} if isinstance(field, dict) and "values" in field: seq = field.get("values") else: @@ -102,7 +102,7 @@ def _normalize_numeric_sequence(field: Any) -> Optional[List[Number]]: if isinstance(v, (int, float)): out.append(v) else: - # Intentamos conversión suave por si viene como string numérico + # Try soft conversion in case it's a numeric string try: out.append(float(v)) except (TypeError, ValueError): @@ -117,21 +117,21 @@ def _normalize_numeric_sequence(field: Any) -> Optional[List[Number]]: def score_repetitividad(volume_by_skill: Optional[List[Number]]) -> Dict[str, Any]: """ - Repetitividad basada en volumen medio por skill. + Repeatability based on average volume per skill. - Regla (pensada por proceso/skill): - - 10 si volumen > 80 - - 5 si 40–80 - - 0 si < 40 + Rule (designed per process/skill): + - 10 if volume > 80 + - 5 if 40–80 + - 0 if < 40 - Si no hay datos (lista vacía o no numérica), la dimensión - se marca como no calculada (computed = False). + If there is no data (empty or non-numeric list), the dimension + is marked as not calculated (computed = False). """ if not volume_by_skill: return { "score": None, "computed": False, - "reason": "sin_datos_volumen", + "reason": "no_volume_data", "details": { "avg_volume_per_skill": None, "volume_by_skill": volume_by_skill, @@ -143,7 +143,7 @@ def score_repetitividad(volume_by_skill: Optional[List[Number]]) -> Dict[str, An return { "score": None, "computed": False, - "reason": "volumen_no_numerico", + "reason": "volume_not_numeric", "details": { "avg_volume_per_skill": None, "volume_by_skill": volume_by_skill, @@ -152,13 +152,13 @@ def score_repetitividad(volume_by_skill: Optional[List[Number]]) -> Dict[str, An if avg_volume > 80: score = 10.0 - reason = "alto_volumen" + reason = "high_volume" elif avg_volume >= 40: score = 5.0 - reason = "volumen_medio" + reason = "medium_volume" else: score = 0.0 - reason = "volumen_bajo" + reason = "low_volume" return { "score": score, @@ -178,36 +178,36 @@ def score_repetitividad(volume_by_skill: Optional[List[Number]]) -> Dict[str, An def score_predictibilidad(aht_ratio: Any, escalation_rate: Any) -> Dict[str, Any]: """ - Predictibilidad basada en: - - Variabilidad AHT: ratio P90/P50 - - Tasa de escalación (%) + Predictability based on: + - AHT variability: ratio P90/P50 + - Escalation rate (%) - Regla: - - 10 si ratio < 1.5 y escalación < 10% - - 5 si ratio 1.5–2.0 o escalación 10–20% - - 0 si ratio > 2.0 y escalación > 20% - - 3 fallback si datos parciales + Rule: + - 10 if ratio < 1.5 and escalation < 10% + - 5 if ratio 1.5–2.0 or escalation 10–20% + - 0 if ratio > 2.0 and escalation > 20% + - 3 fallback if data parciales - Si no hay ni ratio ni escalación, la dimensión no se calcula. + If there is no ratio nor escalation, the dimension is not calculated. """ if aht_ratio is None and escalation_rate is None: return { "score": None, "computed": False, - "reason": "sin_datos", + "reason": "no_data", "details": { "aht_p90_p50_ratio": None, "escalation_rate_pct": None, }, } - # Normalizamos ratio + # Normalize ratio if aht_ratio is None or _is_nan(aht_ratio): ratio: Optional[float] = None else: ratio = float(aht_ratio) - # Normalizamos escalación + # Normalize escalation if escalation_rate is None or _is_nan(escalation_rate): esc: Optional[float] = None else: @@ -217,7 +217,7 @@ def score_predictibilidad(aht_ratio: Any, return { "score": None, "computed": False, - "reason": "sin_datos", + "reason": "no_data", "details": { "aht_p90_p50_ratio": None, "escalation_rate_pct": None, @@ -230,20 +230,20 @@ def score_predictibilidad(aht_ratio: Any, if ratio is not None and esc is not None: if ratio < 1.5 and esc < 10.0: score = 10.0 - reason = "alta_predictibilidad" + reason = "high_predictability" elif (1.5 <= ratio <= 2.0) or (10.0 <= esc <= 20.0): score = 5.0 - reason = "predictibilidad_media" + reason = "medium_predictability" elif ratio > 2.0 and esc > 20.0: score = 0.0 - reason = "baja_predictibilidad" + reason = "low_predictability" else: score = 3.0 - reason = "caso_intermedio" + reason = "intermediate_case" else: - # Datos parciales: penalizamos pero no ponemos a 0 + # Partial data: penalize but do not set to 0 score = 3.0 - reason = "datos_parciales" + reason = "partial_data" return { "score": score, @@ -263,23 +263,23 @@ def score_predictibilidad(aht_ratio: Any, def score_estructuracion(channel_distribution_pct: Any) -> Dict[str, Any]: """ - Estructuración de datos usando proxy de canal. + Data structuring using channel proxy. - Asumimos que el canal con mayor % es texto (en proyectos reales se puede + We assume the channel with the highest % is text (en proyectos reales se puede parametrizar esta asignación). - Regla: - - 10 si texto > 60% + Rule: + - 10 if text > 60% - 5 si 30–60% - 0 si < 30% - Si no hay datos de canales, la dimensión no se calcula. + If there is no datas of channels, the dimension is not calculated. """ if not channel_distribution_pct: return { "score": None, "computed": False, - "reason": "sin_datos_canal", + "reason": "no_channel_data", "details": { "estimated_text_share_pct": None, "channel_distribution_pct": channel_distribution_pct, @@ -299,7 +299,7 @@ def score_estructuracion(channel_distribution_pct: Any) -> Dict[str, Any]: return { "score": None, "computed": False, - "reason": "canales_no_numericos", + "reason": "channels_not_numeric", "details": { "estimated_text_share_pct": None, "channel_distribution_pct": channel_distribution_pct, @@ -308,13 +308,13 @@ def score_estructuracion(channel_distribution_pct: Any) -> Dict[str, Any]: if max_share > 60.0: score = 10.0 - reason = "alta_proporcion_texto" + reason = "high_text_proportion" elif max_share >= 30.0: score = 5.0 - reason = "proporcion_texto_media" + reason = "medium_text_proportion" else: score = 0.0 - reason = "baja_proporcion_texto" + reason = "low_text_proportion" return { "score": score, @@ -334,9 +334,9 @@ def score_estructuracion(channel_distribution_pct: Any) -> Dict[str, Any]: def score_complejidad(aht_ratio: Any, escalation_rate: Any) -> Dict[str, Any]: """ - Complejidad inversa del proceso (0–10). + Inverse complexity of the process (0–10). - 1) Base: inversa lineal de la variabilidad AHT (ratio P90/P50): + 1) Base: linear inverse de la variabilidad AHT (ratio P90/P50): - ratio = 1.0 -> 10 - ratio = 1.5 -> ~7.5 - ratio = 2.0 -> 5 @@ -345,12 +345,12 @@ def score_complejidad(aht_ratio: Any, formula_base = (3 - ratio) / (3 - 1) * 10, acotado a [0,10] - 2) Ajuste por escalación: + 2) Escalation adjustment: - restamos (escalation_rate / 5) puntos. - Nota: más score = proceso más "simple / automatizable". + Nota: higher score = process more "simple / automatizable". - Si no hay ni ratio ni escalación, la dimensión no se calcula. + If there is no ratio nor escalation, the dimension is not calculated. """ if aht_ratio is None or _is_nan(aht_ratio): ratio: Optional[float] = None @@ -366,36 +366,36 @@ def score_complejidad(aht_ratio: Any, return { "score": None, "computed": False, - "reason": "sin_datos", + "reason": "no_data", "details": { "aht_p90_p50_ratio": None, "escalation_rate_pct": None, }, } - # Base por variabilidad + # Base for variability if ratio is None: - base = 5.0 # fallback neutro - base_reason = "sin_ratio_usamos_valor_neutro" + base = 5.0 # neutral fallback + base_reason = "no_ratio_using_neutral_value" else: base_raw = (3.0 - ratio) / (3.0 - 1.0) * 10.0 base = _clamp(base_raw) - base_reason = "calculado_desde_ratio" + base_reason = "calculated_from_ratio" - # Ajuste por escalación + # Escalation adjustment if esc is None: adj = 0.0 - adj_reason = "sin_escalacion_sin_ajuste" + adj_reason = "no_escalation_no_adjustment" else: - adj = - (esc / 5.0) # cada 5 puntos de escalación resta 1 - adj_reason = "ajuste_por_escalacion" + adj = - (esc / 5.0) # every 5 escalation points subtract 1 + adj_reason = "escalation_adjustment" final_score = _clamp(base + adj) return { "score": final_score, "computed": True, - "reason": "complejidad_inversa", + "reason": "inverse_complexity", "details": { "aht_p90_p50_ratio": ratio, "escalation_rate_pct": esc, @@ -409,21 +409,21 @@ def score_complejidad(aht_ratio: Any, def score_estabilidad(peak_offpeak_ratio: Any) -> Dict[str, Any]: """ - Estabilidad del proceso basada en relación pico/off-peak. + Process stability based on peak/off-peak ratio. - Regla: - - 10 si ratio < 3 + Rule: + - 10 if ratio < 3 - 7 si 3–5 - 3 si 5–7 - 0 si > 7 - Si no hay dato de ratio, la dimensión no se calcula. + If there is no data of ratio, the dimension is not calculated. """ if peak_offpeak_ratio is None or _is_nan(peak_offpeak_ratio): return { "score": None, "computed": False, - "reason": "sin_datos_peak_offpeak", + "reason": "no_peak_offpeak_data", "details": { "peak_offpeak_ratio": None, }, @@ -432,16 +432,16 @@ def score_estabilidad(peak_offpeak_ratio: Any) -> Dict[str, Any]: r = float(peak_offpeak_ratio) if r < 3.0: score = 10.0 - reason = "muy_estable" + reason = "very_stable" elif r < 5.0: score = 7.0 - reason = "estable_moderado" + reason = "moderately_stable" elif r < 7.0: score = 3.0 - reason = "pico_pronunciado" + reason = "pronounced_peak" else: score = 0.0 - reason = "muy_inestable" + reason = "very_unstable" return { "score": score, @@ -460,20 +460,20 @@ def score_estabilidad(peak_offpeak_ratio: Any) -> Dict[str, Any]: def score_roi(annual_savings: Any) -> Dict[str, Any]: """ - ROI potencial anual. + Annual potential ROI. - Regla: - - 10 si ahorro > 100k €/año - - 5 si 10k–100k €/año - - 0 si < 10k €/año + Rule: + - 10 if savings > 100k €/year + - 5 si 10k–100k €/year + - 0 si < 10k €/year - Si no hay dato de ahorro, la dimensión no se calcula. + If there is no data of savings, the dimension is not calculated. """ if annual_savings is None or _is_nan(annual_savings): return { "score": None, "computed": False, - "reason": "sin_datos_ahorro", + "reason": "no_savings_data", "details": { "annual_savings_eur": None, }, @@ -482,13 +482,13 @@ def score_roi(annual_savings: Any) -> Dict[str, Any]: savings = float(annual_savings) if savings > 100_000: score = 10.0 - reason = "roi_alto" + reason = "high_roi" elif savings >= 10_000: score = 5.0 - reason = "roi_medio" + reason = "medium_roi" else: score = 0.0 - reason = "roi_bajo" + reason = "low_roi" return { "score": score, @@ -506,20 +506,20 @@ def score_roi(annual_savings: Any) -> Dict[str, Any]: def classify_agentic_score(score: Optional[float]) -> Dict[str, Any]: """ - Clasificación final (alineada con frontend): - - ≥6: COPILOT 🤖 (Listo para Copilot) + Final classification (aligned with frontend): + - ≥6: COPILOT 🤖 (Ready for Copilot) - 4–5.99: OPTIMIZE 🔧 (Optimizar Primero) - <4: HUMAN 👤 (Requiere Gestión Humana) - Si score es None (ninguna dimensión disponible), devuelve NO_DATA. + If score is None (no dimension available), returns NO_DATA. """ if score is None: return { "label": "NO_DATA", "emoji": "❓", "description": ( - "No se ha podido calcular el Agentic Readiness Score porque " - "ninguna de las dimensiones tenía datos suficientes." + "Could not calculate the Agentic Readiness Score because " + "none of the dimensions had sufficient data." ), } @@ -527,22 +527,22 @@ def classify_agentic_score(score: Optional[float]) -> Dict[str, Any]: label = "COPILOT" emoji = "🤖" description = ( - "Listo para Copilot. Procesos con predictibilidad y simplicidad " - "suficientes para asistencia IA (sugerencias en tiempo real, autocompletado)." + "Ready for Copilot. Processes with sufficient predictability and simplicity " + "for AI assistance (real-time suggestions, autocomplete)." ) elif score >= 4.0: label = "OPTIMIZE" emoji = "🔧" description = ( - "Optimizar primero. Estandarizar procesos y reducir variabilidad " - "antes de implementar asistencia IA." + "Optimize first. Standardize processes and reduce variability " + "before implementing AI assistance." ) else: label = "HUMAN" emoji = "👤" description = ( - "Requiere gestión humana. Procesos complejos o variables que " - "necesitan intervención humana antes de considerar automatización." + "Requires human management. Complex or variable processes that " + "need human intervention before considering automation." ) return { @@ -604,22 +604,22 @@ class AgenticScorer: def compute_from_data(self, data: Dict[str, Any]) -> Dict[str, Any]: """ - Calcula el Agentic Readiness Score a partir de un dict de datos. + Calculates the Agentic Readiness Score from a data dict. - Tolerante a datos faltantes: renormaliza pesos usando solo - dimensiones con `computed = True`. + Tolerant to missing data: renormalizes weights using only + dimensions with `computed = True`. - Compatibilidad con pipeline: - - Soporta tanto el formato antiguo: + Pipeline compatibility: + - Supports both the old format: "volume_by_skill": [10, 20, 30] - - como el nuevo: + - and the new: "volume_by_skill": {"labels": [...], "values": [10, 20, 30]} """ volumetry = data.get("volumetry", {}) op = data.get("operational_performance", {}) econ = data.get("economy_costs", {}) - # Normalizamos aquí los posibles formatos para contentar al type checker + # Normalize here the possible formats for the type checker volume_by_skill = _normalize_numeric_sequence( volumetry.get("volume_by_skill") ) @@ -650,7 +650,7 @@ class AgenticScorer: "roi": roi, } - # --- Renormalización de pesos sólo con dimensiones disponibles --- + # --- Weight renormalization only with available dimensions --- effective_weights: Dict[str, float] = {} for name, base_w in self.base_weights.items(): dim = sub_scores.get(name, {}) @@ -665,7 +665,7 @@ class AgenticScorer: else: normalized_weights = {} - # --- Score final --- + # --- Final score --- if not normalized_weights: final_score: Optional[float] = None else: @@ -692,8 +692,8 @@ class AgenticScorer: "metadata": { "source_module": "agentic_score.py", "notes": ( - "Modelo simplificado basado en KPIs agregados. " - "Renormaliza los pesos cuando faltan dimensiones." + "Simplified model based on aggregated KPIs. " + "Renormalizes weights when dimensions are missing." ), }, } @@ -710,11 +710,11 @@ class AgenticScorer: def run_on_folder(self, folder_path: Union[str, Path]) -> Dict[str, Any]: """ - Punto de entrada típico para el pipeline: - - Lee /results.json - - Calcula Agentic Readiness - - Escribe /agentic_readiness.json - - Devuelve el dict con el resultado + Typical pipeline entry point: + - Reads /results.json + - Calculates Agentic Readiness + - Writes /agentic_readiness.json + - Returns the dict with the result """ data = self.load_results(folder_path) result = self.compute_from_data(data) diff --git a/backend/beyond_metrics/dimensions/EconomyCost.py b/backend/beyond_metrics/dimensions/EconomyCost.py index 09261f0..f53f31a 100644 --- a/backend/beyond_metrics/dimensions/EconomyCost.py +++ b/backend/beyond_metrics/dimensions/EconomyCost.py @@ -23,17 +23,16 @@ REQUIRED_COLUMNS_ECON: List[str] = [ @dataclass class EconomyConfig: """ - Parámetros manuales para la dimensión de Economía y Costes. + Manual parameters for the Economy and Cost dimension. - - labor_cost_per_hour: coste total/hora de un agente (fully loaded). - - overhead_rate: % overhead variable (ej. 0.1 = 10% sobre labor). - - tech_costs_annual: coste anual de tecnología (licencias, infra, ...). - - automation_cpi: coste por interacción automatizada (ej. 0.15€). - - automation_volume_share: % del volumen automatizable (0-1). - - automation_success_rate: % éxito de la automatización (0-1). + - labor_cost_per_hour: total cost/hour of an agent (fully loaded). + - overhead_rate: % variable overhead (e.g. 0.1 = 10% over labor). + - tech_costs_annual: annual technology cost (licenses, infrastructure, ...). + - automation_cpi: cost per automated interaction (e.g. 0.15€). + - automation_volume_share: % of automatable volume (0-1). + - automation_success_rate: % automation success (0-1). - - customer_segments: mapping opcional skill -> segmento ("high"/"medium"/"low") - para futuros insights de ROI por segmento. + - customer_segments: optional mapping skill -> segment ("high"/"medium"/"low") for future ROI insights by segment. """ labor_cost_per_hour: float @@ -48,20 +47,20 @@ class EconomyConfig: @dataclass class EconomyCostMetrics: """ - DIMENSIÓN 4: ECONOMÍA y COSTES + DIMENSION 4: ECONOMY and COSTS - Propósito: - - Cuantificar el COSTE actual (CPI, coste anual). - - Estimar el impacto de overhead y tecnología. - - Calcular un primer estimado de "coste de ineficiencia" y ahorro potencial. + Purpose: + - Quantify the current COST (CPI, annual cost). + - Estimate the impact of overhead and technology. + - Calculate an initial estimate of "inefficiency cost" and potential savings. - Requiere: - - Columnas del dataset transaccional (ver REQUIRED_COLUMNS_ECON). + Requires: + - Columns from the transactional dataset (see REQUIRED_COLUMNS_ECON). - Inputs opcionales vía EconomyConfig: - - labor_cost_per_hour (obligatorio para cualquier cálculo de €). + Optional inputs via EconomyConfig: + - labor_cost_per_hour (required for any € calculation). - overhead_rate, tech_costs_annual, automation_*. - - customer_segments (para insights de ROI por segmento). + - customer_segments (for ROI insights by segment). """ df: pd.DataFrame @@ -72,13 +71,13 @@ class EconomyCostMetrics: self._prepare_data() # ------------------------------------------------------------------ # - # Helpers internos + # Internal helpers # ------------------------------------------------------------------ # def _validate_columns(self) -> None: missing = [c for c in REQUIRED_COLUMNS_ECON if c not in self.df.columns] if missing: raise ValueError( - f"Faltan columnas obligatorias para EconomyCostMetrics: {missing}" + f"Missing required columns for EconomyCostMetrics: {missing}" ) def _prepare_data(self) -> None: @@ -97,15 +96,15 @@ class EconomyCostMetrics: df["duration_talk"].fillna(0) + df["hold_time"].fillna(0) + df["wrap_up_time"].fillna(0) - ) # segundos + ) # seconds - # Filtrar por record_status para cálculos de AHT/CPI - # Solo incluir registros VALID (excluir NOISE, ZOMBIE, ABANDON) + # Filter by record_status for AHT/CPI calculations + # Only include VALID records (exclude NOISE, ZOMBIE, ABANDON) if "record_status" in df.columns: df["record_status"] = df["record_status"].astype(str).str.strip().str.upper() df["_is_valid_for_cost"] = df["record_status"] == "VALID" else: - # Legacy data sin record_status: incluir todo + # Legacy data without record_status: include all df["_is_valid_for_cost"] = True self.df = df @@ -118,11 +117,11 @@ class EconomyCostMetrics: return self.config is not None and self.config.labor_cost_per_hour is not None # ------------------------------------------------------------------ # - # KPI 1: CPI por canal/skill + # KPI 1: CPI by channel/skill # ------------------------------------------------------------------ # def cpi_by_skill_channel(self) -> pd.DataFrame: """ - CPI (Coste Por Interacción) por skill/canal. + CPI (Cost Per Interaction) by skill/channel. CPI = (Labor_cost_per_interaction + Overhead_variable) / EFFECTIVE_PRODUCTIVITY @@ -130,19 +129,17 @@ class EconomyCostMetrics: - Overhead_variable = overhead_rate * Labor_cost_per_interaction - EFFECTIVE_PRODUCTIVITY = 0.70 (70% - accounts for non-productive time) - Excluye registros abandonados del cálculo de costes para consistencia - con el path del frontend (fresh CSV). + Excludes abandoned records from cost calculation for consistency with the frontend path (fresh CSV). - Si no hay config de costes -> devuelve DataFrame vacío. + If there is no cost config -> returns empty DataFrame. - Incluye queue_skill y channel como columnas (no solo índice) para que - el frontend pueda hacer lookup por nombre de skill. + Includes queue_skill and channel as columns (not just index) so that the frontend can lookup by skill name. """ if not self._has_cost_config(): return pd.DataFrame() cfg = self.config - assert cfg is not None # para el type checker + assert cfg is not None # for the type checker df = self.df.copy() if df.empty: @@ -154,15 +151,15 @@ class EconomyCostMetrics: else: df_cost = df - # Filtrar por record_status: solo VALID para cálculo de AHT - # Excluye NOISE, ZOMBIE, ABANDON + # Filter by record_status: only VALID for AHT calculation + # Excludes NOISE, ZOMBIE, ABANDON if "_is_valid_for_cost" in df_cost.columns: df_cost = df_cost[df_cost["_is_valid_for_cost"] == True] if df_cost.empty: return pd.DataFrame() - # AHT por skill/canal (en segundos) - solo registros VALID + # AHT by skill/channel (in seconds) - only VALID records grouped = df_cost.groupby(["queue_skill", "channel"])["handle_time"].mean() if grouped.empty: @@ -193,17 +190,16 @@ class EconomyCostMetrics: return out.sort_index().reset_index() # ------------------------------------------------------------------ # - # KPI 2: coste anual por skill/canal + # KPI 2: annual cost by skill/channel # ------------------------------------------------------------------ # def annual_cost_by_skill_channel(self) -> pd.DataFrame: """ - Coste anual por skill/canal. + Annual cost by skill/channel. - cost_annual = CPI * volumen (cantidad de interacciones de la muestra). + cost_annual = CPI * volume (number of interactions in the sample). - Nota: por simplicidad asumimos que el dataset refleja un periodo anual. - Si en el futuro quieres anualizar (ej. dataset = 1 mes) se puede añadir - un factor de escalado en EconomyConfig. + Note: for simplicity we assume the dataset reflects an annual period. + If in the future you want to annualize (e.g. dataset = 1 month) you can add a scaling factor in EconomyConfig. """ cpi_table = self.cpi_by_skill_channel() if cpi_table.empty: @@ -224,18 +220,18 @@ class EconomyCostMetrics: return joined # ------------------------------------------------------------------ # - # KPI 3: desglose de costes (labor / tech / overhead) + # KPI 3: cost breakdown (labor / tech / overhead) # ------------------------------------------------------------------ # def cost_breakdown(self) -> Dict[str, float]: """ - Desglose % de costes: labor, overhead, tech. + Cost breakdown %: labor, overhead, tech. labor_total = sum(labor_cost_per_interaction) overhead_total = labor_total * overhead_rate - tech_total = tech_costs_annual (si se ha proporcionado) + tech_total = tech_costs_annual (if provided) - Devuelve porcentajes sobre el total. - Si falta configuración de coste -> devuelve {}. + Returns percentages of the total. + If cost configuration is missing -> returns {}. """ if not self._has_cost_config(): return {} @@ -258,7 +254,7 @@ class EconomyCostMetrics: cpi_indexed = cpi_table.set_index(["queue_skill", "channel"]) joined = cpi_indexed.join(volume, how="left").fillna({"volume": 0}) - # Costes anuales de labor y overhead + # Annual labor and overhead costs annual_labor = (joined["labor_cost"] * joined["volume"]).sum() annual_overhead = (joined["overhead_cost"] * joined["volume"]).sum() annual_tech = cfg.tech_costs_annual @@ -278,21 +274,21 @@ class EconomyCostMetrics: } # ------------------------------------------------------------------ # - # KPI 4: coste de ineficiencia (€ por variabilidad/escalación) + # KPI 4: inefficiency cost (€ by variability/escalation) # ------------------------------------------------------------------ # def inefficiency_cost_by_skill_channel(self) -> pd.DataFrame: """ - Estimación muy simplificada de coste de ineficiencia: + Very simplified estimate of inefficiency cost: - Para cada skill/canal: + For each skill/channel: - - AHT_p50, AHT_p90 (segundos). + - AHT_p50, AHT_p90 (seconds). - Delta = max(0, AHT_p90 - AHT_p50). - - Se asume que ~40% de las interacciones están por encima de la mediana. + - Assumes that ~40% of interactions are above the median. - Ineff_seconds = Delta * volume * 0.4 - Ineff_cost = LaborCPI_per_second * Ineff_seconds - NOTA: Es un modelo aproximado para cuantificar "orden de magnitud". + NOTE: This is an approximate model to quantify "order of magnitude". """ if not self._has_cost_config(): return pd.DataFrame() @@ -302,8 +298,8 @@ class EconomyCostMetrics: df = self.df.copy() - # Filtrar por record_status: solo VALID para cálculo de AHT - # Excluye NOISE, ZOMBIE, ABANDON + # Filter by record_status: only VALID for AHT calculation + # Excludes NOISE, ZOMBIE, ABANDON if "_is_valid_for_cost" in df.columns: df = df[df["_is_valid_for_cost"] == True] @@ -318,7 +314,7 @@ class EconomyCostMetrics: if stats.empty: return pd.DataFrame() - # CPI para obtener coste/segundo de labor + # CPI to get cost/second of labor # cpi_by_skill_channel now returns with reset_index, so we need to set index for join cpi_table_raw = self.cpi_by_skill_channel() if cpi_table_raw.empty: @@ -331,11 +327,11 @@ class EconomyCostMetrics: merged = merged.fillna(0.0) delta = (merged["aht_p90"] - merged["aht_p50"]).clip(lower=0.0) - affected_fraction = 0.4 # aproximación + affected_fraction = 0.4 # approximation ineff_seconds = delta * merged["volume"] * affected_fraction - # labor_cost = coste por interacción con AHT medio; - # aproximamos coste/segundo como labor_cost / AHT_medio + # labor_cost = cost per interaction with average AHT; + # approximate cost/second as labor_cost / average_AHT aht_mean = grouped["handle_time"].mean() merged["aht_mean"] = aht_mean @@ -351,21 +347,21 @@ class EconomyCostMetrics: return merged[["aht_p50", "aht_p90", "volume", "ineff_seconds", "ineff_cost"]].reset_index() # ------------------------------------------------------------------ # - # KPI 5: ahorro potencial anual por automatización + # KPI 5: potential annual savings from automation # ------------------------------------------------------------------ # def potential_savings(self) -> Dict[str, Any]: """ - Ahorro potencial anual basado en: + Potential annual savings based on: - Ahorro = (CPI_humano - CPI_automatizado) * Volumen_automatizable * Tasa_éxito + Savings = (Human_CPI - Automated_CPI) * Automatable_volume * Success_rate - Donde: - - CPI_humano = media ponderada de cpi_total. - - CPI_automatizado = config.automation_cpi - - Volumen_automatizable = volume_total * automation_volume_share - - Tasa_éxito = automation_success_rate + Where: + - Human_CPI = weighted average of cpi_total. + - Automated_CPI = config.automation_cpi + - Automatable_volume = volume_total * automation_volume_share + - Success_rate = automation_success_rate - Si faltan parámetros en config -> devuelve {}. + If config parameters are missing -> returns {}. """ if not self._has_cost_config(): return {} @@ -384,7 +380,7 @@ class EconomyCostMetrics: if total_volume <= 0: return {} - # CPI humano medio ponderado + # Weighted average human CPI weighted_cpi = ( (cpi_table["cpi_total"] * cpi_table["volume"]).sum() / total_volume ) @@ -409,12 +405,12 @@ class EconomyCostMetrics: # ------------------------------------------------------------------ # def plot_cost_waterfall(self) -> Axes: """ - Waterfall de costes anuales (labor + tech + overhead). + Waterfall of annual costs (labor + tech + overhead). """ breakdown = self.cost_breakdown() if not breakdown: fig, ax = plt.subplots() - ax.text(0.5, 0.5, "Sin configuración de costes", ha="center", va="center") + ax.text(0.5, 0.5, "No cost configuration", ha="center", va="center") ax.set_axis_off() return ax @@ -436,14 +432,14 @@ class EconomyCostMetrics: bottoms.append(running) running += v - # barras estilo waterfall + # waterfall style bars x = np.arange(len(labels)) ax.bar(x, values) ax.set_xticks(x) ax.set_xticklabels(labels) - ax.set_ylabel("€ anuales") - ax.set_title("Desglose anual de costes") + ax.set_ylabel("€ annual") + ax.set_title("Annual cost breakdown") for idx, v in enumerate(values): ax.text(idx, v, f"{v:,.0f}", ha="center", va="bottom") @@ -454,12 +450,12 @@ class EconomyCostMetrics: def plot_cpi_by_channel(self) -> Axes: """ - Gráfico de barras de CPI medio por canal. + Bar chart of average CPI by channel. """ cpi_table = self.cpi_by_skill_channel() if cpi_table.empty: fig, ax = plt.subplots() - ax.text(0.5, 0.5, "Sin configuración de costes", ha="center", va="center") + ax.text(0.5, 0.5, "No cost configuration", ha="center", va="center") ax.set_axis_off() return ax @@ -474,7 +470,7 @@ class EconomyCostMetrics: cpi_indexed = cpi_table.set_index(["queue_skill", "channel"]) joined = cpi_indexed.join(volume, how="left").fillna({"volume": 0}) - # CPI medio ponderado por canal + # Weighted average CPI by channel per_channel = ( joined.reset_index() .groupby("channel") @@ -486,9 +482,9 @@ class EconomyCostMetrics: fig, ax = plt.subplots(figsize=(6, 4)) per_channel.plot(kind="bar", ax=ax) - ax.set_xlabel("Canal") - ax.set_ylabel("CPI medio (€)") - ax.set_title("Coste por interacción (CPI) por canal") + ax.set_xlabel("Channel") + ax.set_ylabel("Average CPI (€)") + ax.set_title("Cost per interaction (CPI) by channel") ax.grid(axis="y", alpha=0.3) return ax diff --git a/backend/beyond_metrics/dimensions/OperationalPerformance.py b/backend/beyond_metrics/dimensions/OperationalPerformance.py index db0a2e9..d99d455 100644 --- a/backend/beyond_metrics/dimensions/OperationalPerformance.py +++ b/backend/beyond_metrics/dimensions/OperationalPerformance.py @@ -25,32 +25,31 @@ REQUIRED_COLUMNS_OP: List[str] = [ @dataclass class OperationalPerformanceMetrics: """ - Dimensión: RENDIMIENTO OPERACIONAL Y DE SERVICIO + Dimension: OPERATIONAL PERFORMANCE AND SERVICE - Propósito: medir el balance entre rapidez (eficiencia) y calidad de resolución, - más la variabilidad del servicio. + Purpose: measure the balance between speed (efficiency) and resolution quality, plus service variability. - Requiere como mínimo: + Requires at minimum: - interaction_id - datetime_start - queue_skill - channel - - duration_talk (segundos) - - hold_time (segundos) - - wrap_up_time (segundos) + - duration_talk (seconds) + - hold_time (seconds) + - wrap_up_time (seconds) - agent_id - transfer_flag (bool/int) - Columnas opcionales: - - is_resolved (bool/int) -> para FCR - - abandoned_flag (bool/int) -> para tasa de abandono - - customer_id / caller_id -> para reincidencia y repetición de canal - - logged_time (segundos) -> para occupancy_rate + Optional columns: + - is_resolved (bool/int) -> for FCR + - abandoned_flag (bool/int) -> for abandonment rate + - customer_id / caller_id -> for recurrence and channel repetition + - logged_time (seconds) -> for occupancy_rate """ df: pd.DataFrame - # Benchmarks / parámetros de normalización (puedes ajustarlos) + # Benchmarks / normalization parameters (you can adjust them) AHT_GOOD: float = 300.0 # 5 min AHT_BAD: float = 900.0 # 15 min VAR_RATIO_GOOD: float = 1.2 # P90/P50 ~1.2 muy estable @@ -61,19 +60,19 @@ class OperationalPerformanceMetrics: self._prepare_data() # ------------------------------------------------------------------ # - # Helpers internos + # Internal helpers # ------------------------------------------------------------------ # def _validate_columns(self) -> None: missing = [c for c in REQUIRED_COLUMNS_OP if c not in self.df.columns] if missing: raise ValueError( - f"Faltan columnas obligatorias para OperationalPerformanceMetrics: {missing}" + f"Missing required columns for OperationalPerformanceMetrics: {missing}" ) def _prepare_data(self) -> None: df = self.df.copy() - # Tipos + # Types df["datetime_start"] = pd.to_datetime(df["datetime_start"], errors="coerce") for col in ["duration_talk", "hold_time", "wrap_up_time"]: @@ -86,13 +85,13 @@ class OperationalPerformanceMetrics: + df["wrap_up_time"].fillna(0) ) - # v3.0: Filtrar NOISE y ZOMBIE para cálculos de variabilidad + # v3.0: Filter NOISE and ZOMBIE for variability calculations # record_status: 'VALID', 'NOISE', 'ZOMBIE', 'ABANDON' - # Para AHT/CV solo usamos 'VALID' (excluye noise, zombie, abandon) + # For AHT/CV we only use 'VALID' (excludes noise, zombie, abandon) if "record_status" in df.columns: df["record_status"] = df["record_status"].astype(str).str.strip().str.upper() - # Crear máscara para registros válidos: SOLO "VALID" - # Excluye explícitamente NOISE, ZOMBIE, ABANDON y cualquier otro valor + # Create mask for valid records: ONLY "VALID" + # Explicitly excludes NOISE, ZOMBIE, ABANDON and any other value df["_is_valid_for_cv"] = df["record_status"] == "VALID" # Log record_status breakdown for debugging @@ -104,21 +103,21 @@ class OperationalPerformanceMetrics: print(f" - {status}: {count}") print(f" VALID rows for AHT calculation: {valid_count}") else: - # Legacy data sin record_status: incluir todo + # Legacy data without record_status: include all df["_is_valid_for_cv"] = True print(f"[OperationalPerformance] No record_status column - using all {len(df)} rows") - # Normalización básica + # Basic normalization df["queue_skill"] = df["queue_skill"].astype(str).str.strip() df["channel"] = df["channel"].astype(str).str.strip() df["agent_id"] = df["agent_id"].astype(str).str.strip() - # Flags opcionales convertidos a bool cuando existan + # Optional flags converted to bool when they exist for flag_col in ["is_resolved", "abandoned_flag", "transfer_flag"]: if flag_col in df.columns: df[flag_col] = df[flag_col].astype(int).astype(bool) - # customer_id: usamos customer_id si existe, si no caller_id + # customer_id: we use customer_id if it exists, otherwise caller_id if "customer_id" in df.columns: df["customer_id"] = df["customer_id"].astype(str) elif "caller_id" in df.columns: @@ -126,8 +125,8 @@ class OperationalPerformanceMetrics: else: df["customer_id"] = None - # logged_time opcional - # Normalizamos logged_time: siempre será una serie float con NaN si no existe + # logged_time optional + # Normalize logged_time: will always be a float series with NaN if it does not exist df["logged_time"] = pd.to_numeric(df.get("logged_time", np.nan), errors="coerce") @@ -138,16 +137,16 @@ class OperationalPerformanceMetrics: return self.df.empty # ------------------------------------------------------------------ # - # AHT y variabilidad + # AHT and variability # ------------------------------------------------------------------ # def aht_distribution(self) -> Dict[str, float]: """ - Devuelve P10, P50, P90 del AHT y el ratio P90/P50 como medida de variabilidad. + Returns P10, P50, P90 of AHT and the P90/P50 ratio as a measure of variability. - v3.0: Filtra NOISE y ZOMBIE para el cálculo de variabilidad. - Solo usa registros con record_status='valid' o sin status (legacy). + v3.0: Filters NOISE and ZOMBIE for variability calculation. + Only uses records with record_status='valid' or without status (legacy). """ - # Filtrar solo registros válidos para cálculo de variabilidad + # Filter only valid records for variability calculation df_valid = self.df[self.df["_is_valid_for_cv"] == True] ht = df_valid["handle_time"].dropna().astype(float) if ht.empty: @@ -167,10 +166,9 @@ class OperationalPerformanceMetrics: def talk_hold_acw_p50_by_skill(self) -> pd.DataFrame: """ - P50 de talk_time, hold_time y wrap_up_time por skill. + P50 of talk_time, hold_time and wrap_up_time by skill. - Incluye queue_skill como columna (no solo índice) para que - el frontend pueda hacer lookup por nombre de skill. + Includes queue_skill as a column (not just index) so that the frontend can lookup by skill name. """ df = self.df @@ -192,24 +190,24 @@ class OperationalPerformanceMetrics: return result.round(2).sort_index().reset_index() # ------------------------------------------------------------------ # - # FCR, escalación, abandono, reincidencia, repetición canal + # FCR, escalation, abandonment, recurrence, channel repetition # ------------------------------------------------------------------ # def fcr_rate(self) -> float: """ FCR (First Contact Resolution). - Prioridad 1: Usar fcr_real_flag del CSV si existe - Prioridad 2: Calcular como 100 - escalation_rate + Priority 1: Use fcr_real_flag from CSV if it exists + Priority 2: Calculate as 100 - escalation_rate """ df = self.df total = len(df) if total == 0: return float("nan") - # Prioridad 1: Usar fcr_real_flag si existe + # Priority 1: Use fcr_real_flag if it exists if "fcr_real_flag" in df.columns: col = df["fcr_real_flag"] - # Normalizar a booleano + # Normalize to boolean if col.dtype == "O": fcr_mask = ( col.astype(str) @@ -224,7 +222,7 @@ class OperationalPerformanceMetrics: fcr = (fcr_count / total) * 100.0 return float(max(0.0, min(100.0, round(fcr, 2)))) - # Prioridad 2: Fallback a 100 - escalation_rate + # Priority 2: Fallback to 100 - escalation_rate try: esc = self.escalation_rate() except Exception: @@ -239,7 +237,7 @@ class OperationalPerformanceMetrics: def escalation_rate(self) -> float: """ - % de interacciones que requieren escalación (transfer_flag == True). + % of interactions that require escalation (transfer_flag == True). """ df = self.df total = len(df) @@ -251,17 +249,17 @@ class OperationalPerformanceMetrics: def abandonment_rate(self) -> float: """ - % de interacciones abandonadas. + % of abandoned interactions. - Busca en orden: is_abandoned, abandoned_flag, abandoned - Si ninguna columna existe, devuelve NaN. + Searches in order: is_abandoned, abandoned_flag, abandoned + If no column exists, returns NaN. """ df = self.df total = len(df) if total == 0: return float("nan") - # Buscar columna de abandono en orden de prioridad + # Search for abandonment column in priority order abandon_col = None for col_name in ["is_abandoned", "abandoned_flag", "abandoned"]: if col_name in df.columns: @@ -273,7 +271,7 @@ class OperationalPerformanceMetrics: col = df[abandon_col] - # Normalizar a booleano + # Normalize to boolean if col.dtype == "O": abandon_mask = ( col.astype(str) @@ -289,10 +287,9 @@ class OperationalPerformanceMetrics: def high_hold_time_rate(self, threshold_seconds: float = 60.0) -> float: """ - % de interacciones con hold_time > threshold (por defecto 60s). + % of interactions with hold_time > threshold (default 60s). - Proxy de complejidad: si el agente tuvo que poner en espera al cliente - más de 60 segundos, probablemente tuvo que consultar/investigar. + Complexity proxy: if the agent had to put the customer on hold for more than 60 seconds, they probably had to consult/investigate. """ df = self.df total = len(df) @@ -306,44 +303,43 @@ class OperationalPerformanceMetrics: def recurrence_rate_7d(self) -> float: """ - % de clientes que vuelven a contactar en < 7 días para el MISMO skill. + % of customers who contact again in < 7 days for the SAME skill. - Se basa en customer_id (o caller_id si no hay customer_id) + queue_skill. - Calcula: - - Para cada combinación cliente + skill, ordena por datetime_start - - Si hay dos contactos consecutivos separados < 7 días (mismo cliente, mismo skill), - cuenta como "recurrente" - - Tasa = nº clientes recurrentes / nº total de clientes + Based on customer_id (or caller_id if no customer_id) + queue_skill. + Calculates: + - For each client + skill combination, sorts by datetime_start + - If there are two consecutive contacts separated by < 7 days (same client, same skill), counts as "recurrent" + - Rate = number of recurrent clients / total number of clients - NOTA: Solo cuenta como recurrencia si el cliente llama por el MISMO skill. - Un cliente que llama a "Ventas" y luego a "Soporte" NO es recurrente. + NOTE: Only counts as recurrence if the client calls for the SAME skill. + A client who calls "Sales" and then "Support" is NOT recurrent. """ df = self.df.dropna(subset=["datetime_start"]).copy() - # Normalizar identificador de cliente + # Normalize client identifier if "customer_id" not in df.columns: if "caller_id" in df.columns: df["customer_id"] = df["caller_id"] else: - # No hay identificador de cliente -> no se puede calcular + # No client identifier -> cannot calculate return float("nan") df = df.dropna(subset=["customer_id"]) if df.empty: return float("nan") - # Ordenar por cliente + skill + fecha + # Sort by client + skill + date df = df.sort_values(["customer_id", "queue_skill", "datetime_start"]) - # Diferencia de tiempo entre contactos consecutivos por cliente Y skill - # Esto asegura que solo contamos recontactos del mismo cliente para el mismo skill + # Time difference between consecutive contacts by client AND skill + # This ensures we only count re-contacts from the same client for the same skill df["delta"] = df.groupby(["customer_id", "queue_skill"])["datetime_start"].diff() - # Marcamos los contactos que ocurren a menos de 7 días del anterior (mismo skill) + # Mark contacts that occur less than 7 days from the previous one (same skill) recurrence_mask = df["delta"] < pd.Timedelta(days=7) - # Nº de clientes que tienen al menos un contacto recurrente (para cualquier skill) + # Number of clients who have at least one recurrent contact (for any skill) recurrent_customers = df.loc[recurrence_mask, "customer_id"].nunique() total_customers = df["customer_id"].nunique() @@ -356,9 +352,9 @@ class OperationalPerformanceMetrics: def repeat_channel_rate(self) -> float: """ - % de reincidencias (<7 días) en las que el cliente usa el MISMO canal. + % of recurrences (<7 days) in which the client uses the SAME channel. - Si no hay customer_id/caller_id o solo un contacto por cliente, devuelve NaN. + If there is no customer_id/caller_id or only one contact per client, returns NaN. """ df = self.df.dropna(subset=["datetime_start"]).copy() if df["customer_id"].isna().all(): @@ -387,11 +383,11 @@ class OperationalPerformanceMetrics: # ------------------------------------------------------------------ # def occupancy_rate(self) -> float: """ - Tasa de ocupación: + Occupancy rate: occupancy = sum(handle_time) / sum(logged_time) * 100. - Requiere columna 'logged_time'. Si no existe o es todo 0, devuelve NaN. + Requires 'logged_time' column. If it does not exist or is all 0, returns NaN. """ df = self.df if "logged_time" not in df.columns: @@ -408,23 +404,23 @@ class OperationalPerformanceMetrics: return float(round(occ * 100, 2)) # ------------------------------------------------------------------ # - # Score de rendimiento 0-10 + # Performance score 0-10 # ------------------------------------------------------------------ # def performance_score(self) -> Dict[str, float]: """ - Calcula un score 0-10 combinando: - - AHT (bajo es mejor) - - FCR (alto es mejor) - - Variabilidad (P90/P50, bajo es mejor) - - Otros factores (ocupación / escalación) + Calculates a 0-10 score combining: + - AHT (lower is better) + - FCR (higher is better) + - Variability (P90/P50, lower is better) + - Other factors (occupancy / escalation) - Fórmula: + Formula: score = 0.4 * (10 - AHT_norm) + 0.3 * FCR_norm + 0.2 * (10 - Var_norm) + 0.1 * Otros_score - Donde *_norm son valores en escala 0-10. + Where *_norm are values on a 0-10 scale. """ dist = self.aht_distribution() if not dist: @@ -433,15 +429,15 @@ class OperationalPerformanceMetrics: p50 = dist["p50"] ratio = dist["p90_p50_ratio"] - # AHT_normalized: 0 (mejor) a 10 (peor) + # AHT_normalized: 0 (better) to 10 (worse) aht_norm = self._scale_to_0_10(p50, self.AHT_GOOD, self.AHT_BAD) - # FCR_normalized: 0-10 directamente desde % (0-100) + # FCR_normalized: 0-10 directly from % (0-100) fcr_pct = self.fcr_rate() fcr_norm = fcr_pct / 10.0 if not np.isnan(fcr_pct) else 0.0 - # Variabilidad_normalized: 0 (ratio bueno) a 10 (ratio malo) + # Variability_normalized: 0 (good ratio) to 10 (bad ratio) var_norm = self._scale_to_0_10(ratio, self.VAR_RATIO_GOOD, self.VAR_RATIO_BAD) - # Otros factores: combinamos ocupación (ideal ~80%) y escalación (ideal baja) + # Other factors: combine occupancy (ideal ~80%) and escalation (ideal low) occ = self.occupancy_rate() esc = self.escalation_rate() @@ -467,26 +463,26 @@ class OperationalPerformanceMetrics: def _scale_to_0_10(self, value: float, good: float, bad: float) -> float: """ - Escala linealmente un valor: + Linearly scales a value: - good -> 0 - bad -> 10 - Con saturación fuera de rango. + With saturation outside range. """ if np.isnan(value): - return 5.0 # neutro + return 5.0 # neutral if good == bad: return 5.0 if good < bad: - # Menor es mejor + # Lower is better if value <= good: return 0.0 if value >= bad: return 10.0 return 10.0 * (value - good) / (bad - good) else: - # Mayor es mejor + # Higher is better if value >= good: return 0.0 if value <= bad: @@ -495,19 +491,19 @@ class OperationalPerformanceMetrics: def _compute_other_factors_score(self, occ_pct: float, esc_pct: float) -> float: """ - Otros factores (0-10) basados en: - - ocupación ideal alrededor de 80% - - tasa de escalación ideal baja (<10%) + Other factors (0-10) based on: + - ideal occupancy around 80% + - ideal escalation rate low (<10%) """ - # Ocupación: 0 penalización si está entre 75-85, se penaliza fuera + # Occupancy: 0 penalty if between 75-85, penalized outside if np.isnan(occ_pct): occ_penalty = 5.0 else: deviation = abs(occ_pct - 80.0) - occ_penalty = min(10.0, deviation / 5.0 * 2.0) # cada 5 puntos se suman 2, máx 10 + occ_penalty = min(10.0, deviation / 5.0 * 2.0) # each 5 points add 2, max 10 occ_score = max(0.0, 10.0 - occ_penalty) - # Escalación: 0-10 donde 0% -> 10 puntos, >=40% -> 0 + # Escalation: 0-10 where 0% -> 10 points, >=40% -> 0 if np.isnan(esc_pct): esc_score = 5.0 else: @@ -518,7 +514,7 @@ class OperationalPerformanceMetrics: else: esc_score = 10.0 * (1.0 - esc_pct / 40.0) - # Media simple de ambos + # Simple average of both return (occ_score + esc_score) / 2.0 # ------------------------------------------------------------------ # @@ -526,29 +522,29 @@ class OperationalPerformanceMetrics: # ------------------------------------------------------------------ # def plot_aht_boxplot_by_skill(self) -> Axes: """ - Boxplot del AHT por skill (P10-P50-P90 visual). + Boxplot of AHT by skill (P10-P50-P90 visual). """ df = self.df.copy() if df.empty or "handle_time" not in df.columns: fig, ax = plt.subplots() - ax.text(0.5, 0.5, "Sin datos de AHT", ha="center", va="center") + ax.text(0.5, 0.5, "No AHT data", ha="center", va="center") ax.set_axis_off() return ax df = df.dropna(subset=["handle_time"]) if df.empty: fig, ax = plt.subplots() - ax.text(0.5, 0.5, "AHT no disponible", ha="center", va="center") + ax.text(0.5, 0.5, "AHT not available", ha="center", va="center") ax.set_axis_off() return ax fig, ax = plt.subplots(figsize=(8, 4)) df.boxplot(column="handle_time", by="queue_skill", ax=ax, showfliers=False) - ax.set_xlabel("Skill / Cola") - ax.set_ylabel("AHT (segundos)") - ax.set_title("Distribución de AHT por skill") + ax.set_xlabel("Skill / Queue") + ax.set_ylabel("AHT (seconds)") + ax.set_title("AHT distribution by skill") plt.suptitle("") plt.xticks(rotation=45, ha="right") ax.grid(axis="y", alpha=0.3) @@ -557,14 +553,14 @@ class OperationalPerformanceMetrics: def plot_resolution_funnel_by_skill(self) -> Axes: """ - Funnel / barras apiladas de Talk + Hold + ACW por skill (P50). + Funnel / stacked bars of Talk + Hold + ACW by skill (P50). - Permite ver el equilibrio de tiempos por skill. + Allows viewing the time balance by skill. """ p50 = self.talk_hold_acw_p50_by_skill() if p50.empty: fig, ax = plt.subplots() - ax.text(0.5, 0.5, "Sin datos para funnel", ha="center", va="center") + ax.text(0.5, 0.5, "No data for funnel", ha="center", va="center") ax.set_axis_off() return ax @@ -583,27 +579,26 @@ class OperationalPerformanceMetrics: ax.set_xticks(x) ax.set_xticklabels(skills, rotation=45, ha="right") - ax.set_ylabel("Segundos") - ax.set_title("Funnel de resolución (P50) por skill") + ax.set_ylabel("Seconds") + ax.set_title("Resolution funnel (P50) by skill") ax.legend() ax.grid(axis="y", alpha=0.3) return ax # ------------------------------------------------------------------ # - # Métricas por skill (para consistencia frontend cached/fresh) + # Metrics by skill (for frontend cached/fresh consistency) # ------------------------------------------------------------------ # def metrics_by_skill(self) -> List[Dict[str, Any]]: """ - Calcula métricas operacionales por skill: - - transfer_rate: % de interacciones con transfer_flag == True - - abandonment_rate: % de interacciones abandonadas - - fcr_tecnico: 100 - transfer_rate (sin transferencia) - - fcr_real: % sin transferencia Y sin recontacto 7d (si hay datos) - - volume: número de interacciones + Calculates operational metrics by skill: + - transfer_rate: % of interactions with transfer_flag == True + - abandonment_rate: % of abandoned interactions + - fcr_tecnico: 100 - transfer_rate (without transfer) + - fcr_real: % without transfer AND without 7d re-contact (if there is data) + - volume: number of interactions - Devuelve una lista de dicts, uno por skill, para que el frontend - tenga acceso a las métricas reales por skill (no estimadas). + Returns a list of dicts, one per skill, so that the frontend has access to real metrics by skill (not estimated). """ df = self.df if df.empty: @@ -611,14 +606,14 @@ class OperationalPerformanceMetrics: results = [] - # Detectar columna de abandono + # Detect abandonment column abandon_col = None for col_name in ["is_abandoned", "abandoned_flag", "abandoned"]: if col_name in df.columns: abandon_col = col_name break - # Detectar columna de repeat_call_7d para FCR real + # Detect repeat_call_7d column for real FCR repeat_col = None for col_name in ["repeat_call_7d", "repeat_7d", "is_repeat_7d"]: if col_name in df.columns: @@ -637,7 +632,7 @@ class OperationalPerformanceMetrics: else: transfer_rate = 0.0 - # FCR Técnico = 100 - transfer_rate + # Technical FCR = 100 - transfer_rate fcr_tecnico = float(round(100.0 - transfer_rate, 2)) # Abandonment rate @@ -656,7 +651,7 @@ class OperationalPerformanceMetrics: abandoned = int(abandon_mask.sum()) abandonment_rate = float(round(abandoned / total * 100, 2)) - # FCR Real (sin transferencia Y sin recontacto 7d) + # Real FCR (without transfer AND without 7d re-contact) fcr_real = fcr_tecnico # default to fcr_tecnico if no repeat data if repeat_col and "transfer_flag" in group.columns: repeat_data = group[repeat_col] @@ -670,13 +665,13 @@ class OperationalPerformanceMetrics: else: repeat_mask = pd.to_numeric(repeat_data, errors="coerce").fillna(0) > 0 - # FCR Real: no transfer AND no repeat + # Real FCR: no transfer AND no repeat fcr_real_mask = (~group["transfer_flag"]) & (~repeat_mask) fcr_real_count = fcr_real_mask.sum() fcr_real = float(round(fcr_real_count / total * 100, 2)) - # AHT Mean (promedio de handle_time sobre registros válidos) - # Filtramos solo registros 'valid' (excluye noise/zombie) para consistencia + # AHT Mean (average of handle_time over valid records) + # Filter only 'valid' records (excludes noise/zombie) for consistency if "_is_valid_for_cv" in group.columns: valid_records = group[group["_is_valid_for_cv"]] else: @@ -687,15 +682,15 @@ class OperationalPerformanceMetrics: else: aht_mean = 0.0 - # AHT Total (promedio de handle_time sobre TODOS los registros) - # Incluye NOISE, ZOMBIE, ABANDON - solo para información/comparación + # AHT Total (average of handle_time over ALL records) + # Includes NOISE, ZOMBIE, ABANDON - for information/comparison only if len(group) > 0 and "handle_time" in group.columns: aht_total = float(round(group["handle_time"].mean(), 2)) else: aht_total = 0.0 - # Hold Time Mean (promedio de hold_time sobre registros válidos) - # Consistente con fresh path que usa MEAN, no P50 + # Hold Time Mean (average of hold_time over valid records) + # Consistent with fresh path that uses MEAN, not P50 if len(valid_records) > 0 and "hold_time" in valid_records.columns: hold_time_mean = float(round(valid_records["hold_time"].mean(), 2)) else: diff --git a/backend/beyond_metrics/dimensions/SatisfactionExperience.py b/backend/beyond_metrics/dimensions/SatisfactionExperience.py index 59a78bb..d377857 100644 --- a/backend/beyond_metrics/dimensions/SatisfactionExperience.py +++ b/backend/beyond_metrics/dimensions/SatisfactionExperience.py @@ -24,11 +24,10 @@ REQUIRED_COLUMNS_SAT: List[str] = [ @dataclass class SatisfactionExperienceMetrics: """ - Dimensión 3: SATISFACCIÓN y EXPERIENCIA + Dimension 3: SATISFACTION and EXPERIENCE - Todas las columnas de satisfacción (csat/nps/ces/aht) son OPCIONALES. - Si no están, las métricas que las usan devuelven vacío/NaN pero - nunca rompen el pipeline. + All satisfaction columns (csat/nps/ces/aht) are OPTIONAL. + If they are not present, the metrics that use them return empty/NaN but never break the pipeline. """ df: pd.DataFrame @@ -44,7 +43,7 @@ class SatisfactionExperienceMetrics: missing = [c for c in REQUIRED_COLUMNS_SAT if c not in self.df.columns] if missing: raise ValueError( - f"Faltan columnas obligatorias para SatisfactionExperienceMetrics: {missing}" + f"Missing required columns for SatisfactionExperienceMetrics: {missing}" ) def _prepare_data(self) -> None: @@ -52,7 +51,7 @@ class SatisfactionExperienceMetrics: df["datetime_start"] = pd.to_datetime(df["datetime_start"], errors="coerce") - # Duraciones base siempre existen + # Base durations always exist for col in ["duration_talk", "hold_time", "wrap_up_time"]: df[col] = pd.to_numeric(df[col], errors="coerce") @@ -63,16 +62,16 @@ class SatisfactionExperienceMetrics: + df["wrap_up_time"].fillna(0) ) - # csat_score opcional + # csat_score optional df["csat_score"] = pd.to_numeric(df.get("csat_score", np.nan), errors="coerce") - # aht opcional: si existe columna explícita la usamos, si no usamos handle_time + # aht optional: if explicit column exists we use it, otherwise we use handle_time if "aht" in df.columns: df["aht"] = pd.to_numeric(df["aht"], errors="coerce") else: df["aht"] = df["handle_time"] - # NPS / CES opcionales + # NPS / CES optional df["nps_score"] = pd.to_numeric(df.get("nps_score", np.nan), errors="coerce") df["ces_score"] = pd.to_numeric(df.get("ces_score", np.nan), errors="coerce") @@ -90,8 +89,8 @@ class SatisfactionExperienceMetrics: # ------------------------------------------------------------------ # def csat_avg_by_skill_channel(self) -> pd.DataFrame: """ - CSAT promedio por skill/canal. - Si no hay csat_score, devuelve DataFrame vacío. + Average CSAT by skill/channel. + If there is no csat_score, returns empty DataFrame. """ df = self.df if "csat_score" not in df.columns or df["csat_score"].notna().sum() == 0: @@ -115,7 +114,7 @@ class SatisfactionExperienceMetrics: def nps_avg_by_skill_channel(self) -> pd.DataFrame: """ - NPS medio por skill/canal, si existe nps_score. + Average NPS by skill/channel, if nps_score exists. """ df = self.df if "nps_score" not in df.columns or df["nps_score"].notna().sum() == 0: @@ -139,7 +138,7 @@ class SatisfactionExperienceMetrics: def ces_avg_by_skill_channel(self) -> pd.DataFrame: """ - CES medio por skill/canal, si existe ces_score. + Average CES by skill/channel, if ces_score exists. """ df = self.df if "ces_score" not in df.columns or df["ces_score"].notna().sum() == 0: @@ -163,11 +162,11 @@ class SatisfactionExperienceMetrics: def csat_global(self) -> float: """ - CSAT medio global (todas las interacciones). + Global average CSAT (all interactions). - Usa la columna opcional `csat_score`: - - Si no existe, devuelve NaN. - - Si todos los valores son NaN / vacíos, devuelve NaN. + Uses the optional `csat_score` column: + - If it does not exist, returns NaN. + - If all values are NaN / empty, returns NaN. """ df = self.df if "csat_score" not in df.columns: @@ -183,8 +182,8 @@ class SatisfactionExperienceMetrics: def csat_aht_correlation(self) -> Dict[str, Any]: """ - Correlación Pearson CSAT vs AHT. - Si falta csat o aht, o no hay varianza, devuelve NaN y código adecuado. + Pearson correlation CSAT vs AHT. + If csat or aht is missing, or there is no variance, returns NaN and appropriate code. """ df = self.df if "csat_score" not in df.columns or df["csat_score"].notna().sum() == 0: @@ -216,8 +215,8 @@ class SatisfactionExperienceMetrics: def csat_aht_skill_summary(self) -> pd.DataFrame: """ - Resumen por skill con clasificación del "sweet spot". - Si falta csat o aht, devuelve DataFrame vacío. + Summary by skill with "sweet spot" classification. + If csat or aht is missing, returns empty DataFrame. """ df = self.df if df["csat_score"].notna().sum() == 0 or df["aht"].notna().sum() == 0: @@ -258,20 +257,20 @@ class SatisfactionExperienceMetrics: # ------------------------------------------------------------------ # def plot_csat_vs_aht_scatter(self) -> Axes: """ - Scatter CSAT vs AHT por skill. - Si no hay datos suficientes, devuelve un Axes con mensaje. + Scatter CSAT vs AHT by skill. + If there is insufficient data, returns an Axes with message. """ df = self.df if df["csat_score"].notna().sum() == 0 or df["aht"].notna().sum() == 0: fig, ax = plt.subplots() - ax.text(0.5, 0.5, "Sin datos de CSAT/AHT", ha="center", va="center") + ax.text(0.5, 0.5, "No CSAT/AHT data", ha="center", va="center") ax.set_axis_off() return ax df = df.dropna(subset=["csat_score", "aht"]).copy() if df.empty: fig, ax = plt.subplots() - ax.text(0.5, 0.5, "Sin datos de CSAT/AHT", ha="center", va="center") + ax.text(0.5, 0.5, "No CSAT/AHT data", ha="center", va="center") ax.set_axis_off() return ax @@ -280,9 +279,9 @@ class SatisfactionExperienceMetrics: for skill, sub in df.groupby("queue_skill"): ax.scatter(sub["aht"], sub["csat_score"], label=skill, alpha=0.7) - ax.set_xlabel("AHT (segundos)") + ax.set_xlabel("AHT (seconds)") ax.set_ylabel("CSAT") - ax.set_title("CSAT vs AHT por skill") + ax.set_title("CSAT vs AHT by skill") ax.grid(alpha=0.3) ax.legend(title="Skill", bbox_to_anchor=(1.05, 1), loc="upper left") @@ -291,28 +290,28 @@ class SatisfactionExperienceMetrics: def plot_csat_distribution(self) -> Axes: """ - Histograma de CSAT. - Si no hay csat_score, devuelve un Axes con mensaje. + CSAT histogram. + If there is no csat_score, returns an Axes with message. """ df = self.df if "csat_score" not in df.columns or df["csat_score"].notna().sum() == 0: fig, ax = plt.subplots() - ax.text(0.5, 0.5, "Sin datos de CSAT", ha="center", va="center") + ax.text(0.5, 0.5, "No CSAT data", ha="center", va="center") ax.set_axis_off() return ax df = df.dropna(subset=["csat_score"]).copy() if df.empty: fig, ax = plt.subplots() - ax.text(0.5, 0.5, "Sin datos de CSAT", ha="center", va="center") + ax.text(0.5, 0.5, "No CSAT data", ha="center", va="center") ax.set_axis_off() return ax fig, ax = plt.subplots(figsize=(6, 4)) ax.hist(df["csat_score"], bins=10, alpha=0.7) ax.set_xlabel("CSAT") - ax.set_ylabel("Frecuencia") - ax.set_title("Distribución de CSAT") + ax.set_ylabel("Frequency") + ax.set_title("CSAT distribution") ax.grid(axis="y", alpha=0.3) return ax diff --git a/frontend/components/tabs/AgenticReadinessTab.tsx b/frontend/components/tabs/AgenticReadinessTab.tsx index 246d85e..95871d8 100644 --- a/frontend/components/tabs/AgenticReadinessTab.tsx +++ b/frontend/components/tabs/AgenticReadinessTab.tsx @@ -25,7 +25,7 @@ import { // RED FLAGS CONFIGURATION AND DETECTION // ============================================ -// v3.5: Configuración de Red Flags +// v3.5: Red Flags Configuration interface RedFlagConfig { id: string; label: string; @@ -41,51 +41,51 @@ interface RedFlagConfig { const RED_FLAG_CONFIGS: RedFlagConfig[] = [ { id: 'cv_high', - label: 'CV AHT Crítico', + label: 'Critical AHT CV', shortLabel: 'CV', threshold: 120, operator: '>', getValue: (q) => q.cv_aht, format: (v) => `${v.toFixed(0)}%`, color: 'red', - description: 'Variabilidad extrema - procesos impredecibles' + description: 'Extreme variability - unpredictable processes' }, { id: 'transfer_high', - label: 'Transfer Excesivo', + label: 'Excessive Transfer', shortLabel: 'Transfer', threshold: 50, operator: '>', getValue: (q) => q.transfer_rate, format: (v) => `${v.toFixed(0)}%`, color: 'orange', - description: 'Alta complejidad - requiere escalado frecuente' + description: 'High complexity - requires frequent escalation' }, { id: 'volume_low', - label: 'Volumen Insuficiente', + label: 'Insufficient Volume', shortLabel: 'Vol', threshold: 50, operator: '<', getValue: (q) => q.volume, format: (v) => v.toLocaleString(), color: 'slate', - description: 'ROI negativo - volumen no justifica inversión' + description: 'Negative ROI - volume doesn\'t justify investment' }, { id: 'valid_low', - label: 'Calidad Datos Baja', + label: 'Low Data Quality', shortLabel: 'Valid', threshold: 30, operator: '<', getValue: (q) => q.volume > 0 ? (q.volumeValid / q.volume) * 100 : 0, format: (v) => `${v.toFixed(0)}%`, color: 'amber', - description: 'Datos poco fiables - métricas distorsionadas' + description: 'Unreliable data - distorted metrics' } ]; -// v3.5: Detectar red flags de una cola +// v3.5: Detect red flags for a queue interface DetectedRedFlag { config: RedFlagConfig; value: number; @@ -108,7 +108,7 @@ function detectRedFlags(queue: OriginalQueueMetrics): DetectedRedFlag[] { return flags; } -// v3.5: Componente de badge de Red Flag individual +// v3.5: Individual Red Flag badge component function RedFlagBadge({ flag, size = 'sm' }: { flag: DetectedRedFlag; size?: 'sm' | 'md' }) { const sizeClasses = size === 'md' ? 'px-2 py-1 text-xs' : 'px-1.5 py-0.5 text-[10px]'; diff --git a/frontend/locales/en.json b/frontend/locales/en.json index 63468b4..4433f42 100644 --- a/frontend/locales/en.json +++ b/frontend/locales/en.json @@ -570,12 +570,16 @@ "humanOnlyAction": "Maintain human management, evaluate periodically", "redFlags": { "cvCritical": "Critical AHT CV", + "cvCriticalShort": "CV", "cvCriticalDesc": "Extreme variability - unpredictable processes", "transferExcessive": "Excessive Transfer", + "transferExcessiveShort": "Transfer", "transferExcessiveDesc": "High complexity - requires frequent escalation", "volumeInsufficient": "Insufficient Volume", + "volumeInsufficientShort": "Vol", "volumeInsufficientDesc": "Negative ROI - volume doesn't justify investment", "dataQualityLow": "Low Data Quality", + "dataQualityLowShort": "Valid", "dataQualityLowDesc": "Unreliable data - distorted metrics", "threshold": "(threshold: {{operator}}{{value}})" }, @@ -814,6 +818,33 @@ "roiBad": "Marginal ROI, evaluate other benefits", "resolution": "Resolution", "dataQuality": "Data Quality" + }, + "subFactors": { + "repeatability": "Repeatability", + "repeatabilityDisplayName": "Repeatability", + "repeatabilityDescription": "Monthly volume: {{volume}} interactions", + "predictability": "Predictability", + "predictabilityDisplayName": "Predictability", + "predictabilityDescription": "AHT CV: {{cv}}%, Escalation: {{esc}}%", + "structuring": "Structuring", + "structuringDisplayName": "Structuring", + "structuringDescription": "{{pct}}% structured fields", + "inverseComplexity": "Inverse Complexity", + "inverseComplexityDisplayName": "Inverse Complexity", + "inverseComplexityDescription": "{{pct}}% exceptions", + "stability": "Stability", + "stabilityDisplayName": "Stability", + "stabilityDescription": "{{pct}}% off-hours", + "roiSavings": "ROI", + "roiSavingsDisplayName": "ROI", + "roiSavingsDescription": "€{{amount}}K annual potential savings", + "interpretations": { + "excellentForAutomation": "Excellent candidate for complete automation (Automate)", + "goodForAssistance": "Good candidate for agentic assistance (Assist)", + "candidateForAugmentation": "Candidate for human augmentation (Augment)", + "notRecommended": "Not recommended for automation at this time", + "bronzeAnalysis": "Bronze analysis does not include Agentic Readiness Score" + } } }, "economicModel": { diff --git a/frontend/locales/es.json b/frontend/locales/es.json index b6cf7a0..c96cfb5 100644 --- a/frontend/locales/es.json +++ b/frontend/locales/es.json @@ -570,12 +570,16 @@ "humanOnlyAction": "Mantener gestión humana, evaluar periódicamente", "redFlags": { "cvCritical": "CV AHT Crítico", + "cvCriticalShort": "CV", "cvCriticalDesc": "Variabilidad extrema - procesos impredecibles", "transferExcessive": "Transfer Excesivo", + "transferExcessiveShort": "Transfer", "transferExcessiveDesc": "Alta complejidad - requiere escalado frecuente", "volumeInsufficient": "Volumen Insuficiente", + "volumeInsufficientShort": "Vol", "volumeInsufficientDesc": "ROI negativo - volumen no justifica inversión", "dataQualityLow": "Calidad Datos Baja", + "dataQualityLowShort": "Valid", "dataQualityLowDesc": "Datos poco fiables - métricas distorsionadas", "threshold": "(umbral: {{operator}}{{value}})" }, @@ -814,6 +818,33 @@ "roiBad": "ROI marginal, evaluar otros beneficios", "resolution": "Resolutividad", "dataQuality": "Calidad Datos" + }, + "subFactors": { + "repeatability": "Repetitividad", + "repeatabilityDisplayName": "Repetitividad", + "repeatabilityDescription": "Volumen mensual: {{volume}} interacciones", + "predictability": "Predictibilidad", + "predictabilityDisplayName": "Predictibilidad", + "predictabilityDescription": "CV AHT: {{cv}}%, Escalación: {{esc}}%", + "structuring": "Estructuración", + "structuringDisplayName": "Estructuración", + "structuringDescription": "{{pct}}% de campos estructurados", + "inverseComplexity": "Complejidad Inversa", + "inverseComplexityDisplayName": "Complejidad Inversa", + "inverseComplexityDescription": "{{pct}}% de excepciones", + "stability": "Estabilidad", + "stabilityDisplayName": "Estabilidad", + "stabilityDescription": "{{pct}}% fuera de horario", + "roiSavings": "ROI", + "roiSavingsDisplayName": "ROI", + "roiSavingsDescription": "€{{amount}}K ahorro potencial anual", + "interpretations": { + "excellentForAutomation": "Excelente candidato para automatización completa (Automate)", + "goodForAssistance": "Buen candidato para asistencia agéntica (Assist)", + "candidateForAugmentation": "Candidato para augmentación humana (Augment)", + "notRecommended": "No recomendado para automatización en este momento", + "bronzeAnalysis": "Análisis Bronze no incluye Agentic Readiness Score" + } } }, "economicModel": { diff --git a/frontend/utils/agenticReadinessV2.ts b/frontend/utils/agenticReadinessV2.ts index 81c6ec5..039827d 100644 --- a/frontend/utils/agenticReadinessV2.ts +++ b/frontend/utils/agenticReadinessV2.ts @@ -1,20 +1,20 @@ /** * Agentic Readiness Score v2.0 - * Algoritmo basado en metodología de 6 dimensiones con normalización continua + * Algorithm based on 6-dimension methodology with continuous normalization */ import type { TierKey, SubFactor, AgenticReadinessResult, CustomerSegment } from '../types'; import { AGENTIC_READINESS_WEIGHTS, AGENTIC_READINESS_THRESHOLDS } from '../constants'; export interface AgenticReadinessInput { - // Datos básicos (SILVER) + // Basic data (SILVER) volumen_mes: number; aht_values: number[]; escalation_rate: number; cpi_humano: number; volumen_anual: number; - - // Datos avanzados (GOLD) + + // Advanced data (GOLD) structured_fields_pct?: number; exception_rate?: number; hourly_distribution?: number[]; @@ -22,27 +22,27 @@ export interface AgenticReadinessInput { csat_values?: number[]; motivo_contacto_entropy?: number; resolucion_entropy?: number; - + // Tier tier: TierKey; } /** - * SUB-FACTOR 1: REPETITIVIDAD (25%) - * Basado en volumen mensual con normalización logística + * SUB-FACTOR 1: REPEATABILITY (25%) + * Based on monthly volume with logistic normalization */ -function calculateRepetitividadScore(volumen_mes: number): SubFactor { +function calculateRepeatabilityScore(volumen_mes: number): SubFactor { const { k, x0 } = AGENTIC_READINESS_THRESHOLDS.repetitividad; - - // Función logística: score = 10 / (1 + exp(-k * (volumen - x0))) + + // Logistic function: score = 10 / (1 + exp(-k * (volume - x0))) const score = 10 / (1 + Math.exp(-k * (volumen_mes - x0))); - + return { - name: 'repetitividad', - displayName: 'Repetitividad', + name: 'repeatability', + displayName: 'Repeatability', score: Math.round(score * 10) / 10, weight: AGENTIC_READINESS_WEIGHTS.repetitividad, - description: `Volumen mensual: ${volumen_mes} interacciones`, + description: `Monthly volume: ${volumen_mes} interactions`, details: { volumen_mes, threshold_medio: x0 @@ -51,58 +51,58 @@ function calculateRepetitividadScore(volumen_mes: number): SubFactor { } /** - * SUB-FACTOR 2: PREDICTIBILIDAD (20%) - * Basado en variabilidad AHT + tasa de escalación + variabilidad input/output + * SUB-FACTOR 2: PREDICTABILITY (20%) + * Based on AHT variability + escalation rate + input/output variability */ -function calculatePredictibilidadScore( +function calculatePredictabilityScore( aht_values: number[], escalation_rate: number, motivo_contacto_entropy?: number, resolucion_entropy?: number ): SubFactor { const thresholds = AGENTIC_READINESS_THRESHOLDS.predictibilidad; - - // 1. VARIABILIDAD AHT (40%) + + // 1. AHT VARIABILITY (40%) const aht_mean = aht_values.reduce((a, b) => a + b, 0) / aht_values.length; const aht_variance = aht_values.reduce((sum, val) => sum + Math.pow(val - aht_mean, 2), 0) / aht_values.length; const aht_std = Math.sqrt(aht_variance); const cv_aht = aht_std / aht_mean; - - // Normalizar CV a escala 0-10 - const score_aht = Math.max(0, Math.min(10, + + // Normalize CV to 0-10 scale + const score_aht = Math.max(0, Math.min(10, 10 * (1 - (cv_aht - thresholds.cv_aht_excellent) / (thresholds.cv_aht_poor - thresholds.cv_aht_excellent)) )); - - // 2. TASA DE ESCALACIÓN (30%) - const score_escalacion = Math.max(0, Math.min(10, + + // 2. ESCALATION RATE (30%) + const score_escalacion = Math.max(0, Math.min(10, 10 * (1 - escalation_rate / thresholds.escalation_poor) )); - - // 3. VARIABILIDAD INPUT/OUTPUT (30%) + + // 3. INPUT/OUTPUT VARIABILITY (30%) let score_variabilidad: number; if (motivo_contacto_entropy !== undefined && resolucion_entropy !== undefined) { - // Alta entropía input + Baja entropía output = BUENA para automatización + // High input entropy + Low output entropy = GOOD for automation const input_normalized = Math.min(motivo_contacto_entropy / 3.0, 1.0); const output_normalized = Math.min(resolucion_entropy / 3.0, 1.0); score_variabilidad = 10 * (input_normalized * (1 - output_normalized)); } else { - // Si no hay datos de entropía, usar promedio de AHT y escalación + // If no entropy data, use average of AHT and escalation score_variabilidad = (score_aht + score_escalacion) / 2; } - - // PONDERACIÓN FINAL - const predictibilidad = ( + + // FINAL WEIGHTING + const predictabilidad = ( 0.40 * score_aht + 0.30 * score_escalacion + 0.30 * score_variabilidad ); - + return { - name: 'predictibilidad', - displayName: 'Predictibilidad', - score: Math.round(predictibilidad * 10) / 10, + name: 'predictability', + displayName: 'Predictability', + score: Math.round(predictabilidad * 10) / 10, weight: AGENTIC_READINESS_WEIGHTS.predictibilidad, - description: `CV AHT: ${(cv_aht * 100).toFixed(1)}%, Escalación: ${(escalation_rate * 100).toFixed(1)}%`, + description: `AHT CV: ${(cv_aht * 100).toFixed(1)}%, Escalation: ${(escalation_rate * 100).toFixed(1)}%`, details: { cv_aht: Math.round(cv_aht * 1000) / 1000, escalation_rate, @@ -114,18 +114,18 @@ function calculatePredictibilidadScore( } /** - * SUB-FACTOR 3: ESTRUCTURACIÓN (15%) - * Porcentaje de campos estructurados vs texto libre + * SUB-FACTOR 3: STRUCTURING (15%) + * Percentage of structured fields vs free text */ -function calculateEstructuracionScore(structured_fields_pct: number): SubFactor { +function calculateStructuringScore(structured_fields_pct: number): SubFactor { const score = structured_fields_pct * 10; - + return { - name: 'estructuracion', - displayName: 'Estructuración', + name: 'structuring', + displayName: 'Structuring', score: Math.round(score * 10) / 10, weight: AGENTIC_READINESS_WEIGHTS.estructuracion, - description: `${(structured_fields_pct * 100).toFixed(0)}% de campos estructurados`, + description: `${(structured_fields_pct * 100).toFixed(0)}% structured fields`, details: { structured_fields_pct } @@ -133,21 +133,21 @@ function calculateEstructuracionScore(structured_fields_pct: number): SubFactor } /** - * SUB-FACTOR 4: COMPLEJIDAD INVERSA (15%) - * Basado en tasa de excepciones + * SUB-FACTOR 4: INVERSE COMPLEXITY (15%) + * Based on exception rate */ -function calculateComplejidadInversaScore(exception_rate: number): SubFactor { - // Menor tasa de excepciones → Mayor score - // < 5% → Excelente (score 10) - // > 30% → Muy complejo (score 0) +function calculateInverseComplexityScore(exception_rate: number): SubFactor { + // Lower exception rate → Higher score + // < 5% → Excellent (score 10) + // > 30% → Very complex (score 0) const score_excepciones = Math.max(0, Math.min(10, 10 * (1 - exception_rate / 0.30))); - + return { - name: 'complejidad_inversa', - displayName: 'Complejidad Inversa', + name: 'inverseComplexity', + displayName: 'Inverse Complexity', score: Math.round(score_excepciones * 10) / 10, weight: AGENTIC_READINESS_WEIGHTS.complejidad_inversa, - description: `${(exception_rate * 100).toFixed(1)}% de excepciones`, + description: `${(exception_rate * 100).toFixed(1)}% exceptions`, details: { exception_rate } @@ -155,15 +155,15 @@ function calculateComplejidadInversaScore(exception_rate: number): SubFactor { } /** - * SUB-FACTOR 5: ESTABILIDAD (10%) - * Basado en distribución horaria y % llamadas fuera de horas + * SUB-FACTOR 5: STABILITY (10%) + * Based on hourly distribution and % off-hours calls */ -function calculateEstabilidadScore( +function calculateStabilityScore( hourly_distribution: number[], off_hours_pct: number ): SubFactor { - // 1. UNIFORMIDAD DISTRIBUCIÓN HORARIA (60%) - // Calcular entropía de Shannon + // 1. HOURLY DISTRIBUTION UNIFORMITY (60%) + // Calculate Shannon entropy const total = hourly_distribution.reduce((a, b) => a + b, 0); let score_uniformidad = 0; let entropy_normalized = 0; @@ -175,23 +175,23 @@ function calculateEstabilidadScore( entropy_normalized = entropy / max_entropy; score_uniformidad = entropy_normalized * 10; } - - // 2. % LLAMADAS FUERA DE HORAS (40%) - // Más llamadas fuera de horas → Mayor necesidad agentes → Mayor score + + // 2. % OFF-HOURS CALLS (40%) + // More off-hours calls → Higher agent need → Higher score const score_off_hours = Math.min(10, (off_hours_pct / 0.30) * 10); - - // PONDERACIÓN + + // WEIGHTING const estabilidad = ( 0.60 * score_uniformidad + 0.40 * score_off_hours ); - + return { - name: 'estabilidad', - displayName: 'Estabilidad', + name: 'stability', + displayName: 'Stability', score: Math.round(estabilidad * 10) / 10, weight: AGENTIC_READINESS_WEIGHTS.estabilidad, - description: `${(off_hours_pct * 100).toFixed(1)}% fuera de horario`, + description: `${(off_hours_pct * 100).toFixed(1)}% off-hours`, details: { entropy_normalized: Math.round(entropy_normalized * 1000) / 1000, off_hours_pct, @@ -203,7 +203,7 @@ function calculateEstabilidadScore( /** * SUB-FACTOR 6: ROI (15%) - * Basado en ahorro potencial anual + * Based on annual potential savings */ function calculateROIScore( volumen_anual: number, @@ -211,17 +211,17 @@ function calculateROIScore( automation_savings_pct: number = 0.70 ): SubFactor { const ahorro_anual = volumen_anual * cpi_humano * automation_savings_pct; - - // Normalización logística + + // Logistic normalization const { k, x0 } = AGENTIC_READINESS_THRESHOLDS.roi; const score = 10 / (1 + Math.exp(-k * (ahorro_anual - x0))); - + return { name: 'roi', displayName: 'ROI', score: Math.round(score * 10) / 10, weight: AGENTIC_READINESS_WEIGHTS.roi, - description: `€${(ahorro_anual / 1000).toFixed(0)}K ahorro potencial anual`, + description: `€${(ahorro_anual / 1000).toFixed(0)}K annual potential savings`, details: { ahorro_anual: Math.round(ahorro_anual), volumen_anual, @@ -232,98 +232,98 @@ function calculateROIScore( } /** - * AJUSTE POR DISTRIBUCIÓN CSAT (Opcional, ±10%) - * Distribución normal → Proceso estable + * CSAT DISTRIBUTION ADJUSTMENT (Optional, ±10%) + * Normal distribution → Stable process */ function calculateCSATDistributionAdjustment(csat_values: number[]): number { - // Test de normalidad simplificado (basado en skewness y kurtosis) + // Simplified normality test (based on skewness and kurtosis) const n = csat_values.length; const mean = csat_values.reduce((a, b) => a + b, 0) / n; const variance = csat_values.reduce((sum, val) => sum + Math.pow(val - mean, 2), 0) / n; const std = Math.sqrt(variance); - + // Skewness const skewness = csat_values.reduce((sum, val) => sum + Math.pow((val - mean) / std, 3), 0) / n; - + // Kurtosis const kurtosis = csat_values.reduce((sum, val) => sum + Math.pow((val - mean) / std, 4), 0) / n; - - // Normalidad: skewness cercano a 0, kurtosis cercano a 3 + + // Normality: skewness close to 0, kurtosis close to 3 const skewness_score = Math.max(0, 1 - Math.abs(skewness)); const kurtosis_score = Math.max(0, 1 - Math.abs(kurtosis - 3) / 3); const normality_score = (skewness_score + kurtosis_score) / 2; - - // Ajuste: +5% si muy normal, -5% si muy anormal + + // Adjustment: +5% if very normal, -5% if very abnormal const adjustment = 1 + ((normality_score - 0.5) * 0.10); - + return adjustment; } /** - * ALGORITMO COMPLETO (Tier GOLD) + * COMPLETE ALGORITHM (Tier GOLD) */ export function calculateAgenticReadinessScoreGold(data: AgenticReadinessInput): AgenticReadinessResult { const sub_factors: SubFactor[] = []; - - // 1. REPETITIVIDAD - sub_factors.push(calculateRepetitividadScore(data.volumen_mes)); - - // 2. PREDICTIBILIDAD - sub_factors.push(calculatePredictibilidadScore( + + // 1. REPEATABILITY + sub_factors.push(calculateRepeatabilityScore(data.volumen_mes)); + + // 2. PREDICTABILITY + sub_factors.push(calculatePredictabilityScore( data.aht_values, data.escalation_rate, data.motivo_contacto_entropy, data.resolucion_entropy )); - - // 3. ESTRUCTURACIÓN - sub_factors.push(calculateEstructuracionScore(data.structured_fields_pct || 0.5)); - - // 4. COMPLEJIDAD INVERSA - sub_factors.push(calculateComplejidadInversaScore(data.exception_rate || 0.15)); - - // 5. ESTABILIDAD - sub_factors.push(calculateEstabilidadScore( + + // 3. STRUCTURING + sub_factors.push(calculateStructuringScore(data.structured_fields_pct || 0.5)); + + // 4. INVERSE COMPLEXITY + sub_factors.push(calculateInverseComplexityScore(data.exception_rate || 0.15)); + + // 5. STABILITY + sub_factors.push(calculateStabilityScore( data.hourly_distribution || Array(24).fill(1), data.off_hours_pct || 0.2 )); - + // 6. ROI sub_factors.push(calculateROIScore( data.volumen_anual, data.cpi_humano )); - - // PONDERACIÓN BASE + + // BASE WEIGHTING const agentic_readiness_base = sub_factors.reduce( (sum, factor) => sum + (factor.score * factor.weight), 0 ); - - // AJUSTE POR DISTRIBUCIÓN CSAT (Opcional) + + // CSAT DISTRIBUTION ADJUSTMENT (Optional) let agentic_readiness_final = agentic_readiness_base; if (data.csat_values && data.csat_values.length > 10) { const adjustment = calculateCSATDistributionAdjustment(data.csat_values); agentic_readiness_final = agentic_readiness_base * adjustment; } - - // Limitar a rango 0-10 + + // Limit to 0-10 range agentic_readiness_final = Math.max(0, Math.min(10, agentic_readiness_final)); - - // Interpretación + + // Interpretation let interpretation = ''; let confidence: 'high' | 'medium' | 'low' = 'high'; - + if (agentic_readiness_final >= 8) { - interpretation = 'Excelente candidato para automatización completa (Automate)'; + interpretation = 'Excellent candidate for complete automation (Automate)'; } else if (agentic_readiness_final >= 5) { - interpretation = 'Buen candidato para asistencia agéntica (Assist)'; + interpretation = 'Good candidate for agentic assistance (Assist)'; } else if (agentic_readiness_final >= 3) { - interpretation = 'Candidato para augmentación humana (Augment)'; + interpretation = 'Candidate for human augmentation (Augment)'; } else { - interpretation = 'No recomendado para automatización en este momento'; + interpretation = 'Not recommended for automation at this time'; } - + return { score: Math.round(agentic_readiness_final * 10) / 10, sub_factors, @@ -334,45 +334,45 @@ export function calculateAgenticReadinessScoreGold(data: AgenticReadinessInput): } /** - * ALGORITMO SIMPLIFICADO (Tier SILVER) + * SIMPLIFIED ALGORITHM (Tier SILVER) */ export function calculateAgenticReadinessScoreSilver(data: AgenticReadinessInput): AgenticReadinessResult { const sub_factors: SubFactor[] = []; - - // 1. REPETITIVIDAD (30%) - const repetitividad = calculateRepetitividadScore(data.volumen_mes); - repetitividad.weight = 0.30; - sub_factors.push(repetitividad); - - // 2. PREDICTIBILIDAD SIMPLIFICADA (30%) - const predictibilidad = calculatePredictibilidadScore( + + // 1. REPEATABILITY (30%) + const repeatability = calculateRepeatabilityScore(data.volumen_mes); + repeatability.weight = 0.30; + sub_factors.push(repeatability); + + // 2. SIMPLIFIED PREDICTABILITY (30%) + const predictability = calculatePredictabilityScore( data.aht_values, data.escalation_rate ); - predictibilidad.weight = 0.30; - sub_factors.push(predictibilidad); - + predictability.weight = 0.30; + sub_factors.push(predictability); + // 3. ROI (40%) const roi = calculateROIScore(data.volumen_anual, data.cpi_humano); roi.weight = 0.40; sub_factors.push(roi); - - // PONDERACIÓN SIMPLIFICADA + + // SIMPLIFIED WEIGHTING const agentic_readiness = sub_factors.reduce( (sum, factor) => sum + (factor.score * factor.weight), 0 ); - - // Interpretación + + // Interpretation let interpretation = ''; if (agentic_readiness >= 7) { - interpretation = 'Buen candidato para automatización'; + interpretation = 'Good candidate for automation'; } else if (agentic_readiness >= 4) { - interpretation = 'Candidato para asistencia agéntica'; + interpretation = 'Candidate for agentic assistance'; } else { - interpretation = 'Requiere análisis más profundo (considerar GOLD)'; + interpretation = 'Requires deeper analysis (consider GOLD)'; } - + return { score: Math.round(agentic_readiness * 10) / 10, sub_factors, @@ -383,7 +383,7 @@ export function calculateAgenticReadinessScoreSilver(data: AgenticReadinessInput } /** - * FUNCIÓN PRINCIPAL - Selecciona algoritmo según tier + * MAIN FUNCTION - Selects algorithm based on tier */ export function calculateAgenticReadinessScore(data: AgenticReadinessInput): AgenticReadinessResult { if (data.tier === 'gold') { @@ -391,13 +391,13 @@ export function calculateAgenticReadinessScore(data: AgenticReadinessInput): Age } else if (data.tier === 'silver') { return calculateAgenticReadinessScoreSilver(data); } else { - // BRONZE: Sin Agentic Readiness + // BRONZE: No Agentic Readiness return { score: 0, sub_factors: [], tier: 'bronze', confidence: 'low', - interpretation: 'Análisis Bronze no incluye Agentic Readiness Score' + interpretation: 'Bronze analysis does not include Agentic Readiness Score' }; } } diff --git a/frontend/utils/analysisGenerator.ts b/frontend/utils/analysisGenerator.ts index 0ccc836..68678fd 100644 --- a/frontend/utils/analysisGenerator.ts +++ b/frontend/utils/analysisGenerator.ts @@ -1,4 +1,4 @@ -// analysisGenerator.ts - v2.0 con 6 dimensiones +// analysisGenerator.ts - v2.0 with 6 dimensions import type { AnalysisData, Kpi, DimensionAnalysis, HeatmapDataPoint, Opportunity, RoadmapInitiative, EconomicModelData, BenchmarkDataPoint, Finding, Recommendation, TierKey, CustomerSegment, RawInteraction, DrilldownDataPoint, AgenticTier } from '../types'; import { generateAnalysisFromRealData, calculateDrilldownMetrics, generateOpportunitiesFromDrilldown, generateRoadmapFromDrilldown, calculateSkillMetrics, generateHeatmapFromMetrics, clasificarTierSimple } from './realDataAnalysis'; import { RoadmapPhase } from '../types'; @@ -17,7 +17,7 @@ const randomInt = (min: number, max: number) => Math.floor(Math.random() * (max const randomFloat = (min: number, max: number, decimals: number) => parseFloat((Math.random() * (max - min) + min).toFixed(decimals)); const randomFromList = (arr: T[]): T => arr[Math.floor(Math.random() * arr.length)]; -// Distribución normal (Box-Muller transform) +// Normal distribution (Box-Muller transform) const normalRandom = (mean: number, std: number): number => { const u1 = Math.random(); const u2 = Math.random(); @@ -35,24 +35,24 @@ const getScoreColor = (score: number): 'green' | 'yellow' | 'red' => { const DIMENSIONS_CONTENT = { volumetry_distribution: { icon: BarChartHorizontal, - titles: ["Volumetría & Distribución", "Análisis de la Demanda"], + titles: ["Volumetry & Distribution", "Demand Analysis"], summaries: { - good: ["El volumen de interacciones se alinea con las previsiones, permitiendo una planificación de personal precisa.", "La distribución horaria es uniforme con picos predecibles. Concentración Pareto equilibrada."], - medium: ["Existen picos de demanda imprevistos que generan caídas en el nivel de servicio.", "Alta concentración en pocas colas (>80% en 20% de colas), riesgo de cuellos de botella."], - bad: ["Desajuste crónico entre el forecast y el volumen real, resultando en sobrecostes o mal servicio.", "Distribución horaria muy irregular con múltiples picos impredecibles."] + good: ["Interaction volume aligns with forecasts, enabling precise staff planning.", "Hourly distribution is uniform with predictable peaks. Balanced Pareto concentration."], + medium: ["There are unforeseen demand peaks that cause service level drops.", "High concentration in few queues (>80% in 20% of queues), bottleneck risk."], + bad: ["Chronic mismatch between forecast and actual volume, resulting in cost overruns or poor service.", "Very irregular hourly distribution with multiple unpredictable peaks."] }, kpis: [ - { label: "Volumen Mensual", value: `${randomInt(5000, 25000).toLocaleString('es-ES')}` }, - { label: "% Fuera de Horario", value: `${randomInt(15, 45)}%` }, + { label: "Monthly Volume", value: `${randomInt(5000, 25000).toLocaleString('es-ES')}` }, + { label: "% After Hours", value: `${randomInt(15, 45)}%` }, ], }, operational_efficiency: { icon: Zap, - titles: ["Eficiencia Operativa", "Optimización de Tiempos"], + titles: ["Operational Efficiency", "Time Optimization"], summaries: { - good: ["El ratio P90/P50 es bajo (<1.5), indicando tiempos consistentes y procesos estandarizados.", "Tiempos de espera, hold y ACW bien controlados, maximizando la productividad."], - medium: ["El ratio P90/P50 es moderado (1.5-2.0), existen casos outliers que afectan la eficiencia.", "El tiempo de hold es ligeramente elevado, sugiriendo mejoras en acceso a información."], - bad: ["Alto ratio P90/P50 (>2.0), indicando alta variabilidad en tiempos de gestión.", "Tiempos de ACW y hold prolongados indican procesos manuales ineficientes."] + good: ["P90/P50 ratio is low (<1.5), indicating consistent times and standardized processes.", "Wait, hold and ACW times well controlled, maximizing productivity."], + medium: ["P90/P50 ratio is moderate (1.5-2.0), outlier cases exist that affect efficiency.", "Hold time is slightly high, suggesting improvements in information access."], + bad: ["High P90/P50 ratio (>2.0), indicating high variability in handling times.", "Extended ACW and hold times indicate inefficient manual processes."] }, kpis: [ { label: "AHT P50", value: `${randomInt(280, 450)}s` }, @@ -61,24 +61,24 @@ const DIMENSIONS_CONTENT = { }, effectiveness_resolution: { icon: Target, - titles: ["Efectividad & Resolución", "Calidad del Servicio"], + titles: ["Effectiveness & Resolution", "Service Quality"], summaries: { - good: ["FCR proxy >85%, mínima repetición de contactos a 7 días.", "Baja tasa de transferencias (<10%) y llamadas problemáticas (<5%)."], - medium: ["FCR proxy 70-85%, hay oportunidad de reducir recontactos.", "Tasa de transferencias moderada (10-20%), concentradas en ciertas colas."], - bad: ["FCR proxy <70%, alto volumen de recontactos a 7 días.", "Alta tasa de llamadas problemáticas (>15%) y transferencias excesivas."] + good: ["FCR proxy >85%, minimal contact repetition within 7 days.", "Low transfer rate (<10%) and problematic calls (<5%)."], + medium: ["FCR proxy 70-85%, opportunity to reduce re-contacts.", "Moderate transfer rate (10-20%), concentrated in certain queues."], + bad: ["FCR proxy <70%, high volume of re-contacts within 7 days.", "High rate of problematic calls (>15%) and excessive transfers."] }, kpis: [ { label: "FCR Proxy 7d", value: `${randomInt(65, 92)}%` }, - { label: "Tasa Transfer", value: `${randomInt(5, 25)}%` }, + { label: "Transfer Rate", value: `${randomInt(5, 25)}%` }, ], }, complexity_predictability: { icon: Brain, - titles: ["Complejidad & Predictibilidad", "Análisis de Variabilidad"], + titles: ["Complexity & Predictability", "Variability Analysis"], summaries: { - good: ["Baja variabilidad AHT (ratio P90/P50 <1.5), proceso altamente predecible.", "Diversidad de tipificaciones controlada, bajo % de llamadas con múltiples holds."], - medium: ["Variabilidad AHT moderada, algunos casos outliers afectan la predictibilidad.", "% llamadas con múltiples holds elevado (15-30%), indicando complejidad."], - bad: ["Alta variabilidad AHT (ratio >2.0), proceso impredecible y difícil de automatizar.", "Alta diversidad de tipificaciones y % transferencias, indicando alta complejidad."] + good: ["Low AHT variability (P90/P50 ratio <1.5), highly predictable process.", "Controlled classification diversity, low % of calls with multiple holds."], + medium: ["Moderate AHT variability, some outlier cases affect predictability.", "% calls with multiple holds high (15-30%), indicating complexity."], + bad: ["High AHT variability (ratio >2.0), unpredictable process difficult to automate.", "High classification diversity and % transfers, indicating high complexity."] }, kpis: [ { label: "Ratio P90/P50", value: `${randomFloat(1.2, 2.5, 2)}` }, @@ -87,122 +87,122 @@ const DIMENSIONS_CONTENT = { }, agentic_readiness: { icon: Bot, - titles: ["Agentic Readiness", "Potencial de Automatización"], + titles: ["Agentic Readiness", "Automation Potential"], summaries: { - good: ["Score 8-10: Excelente candidato para automatización completa con agentes IA.", "Alto volumen, baja variabilidad, pocas transferencias. Proceso repetitivo y predecible."], - medium: ["Score 5-7: Candidato para asistencia con IA (copilot) o automatización parcial.", "Volumen moderado con algunas complejidades que requieren supervisión humana."], - bad: ["Score 0-4: Requiere optimización previa antes de automatizar.", "Alta complejidad, baja repetitividad o variabilidad excesiva."] + good: ["Score 8-10: Excellent candidate for full automation with AI agents.", "High volume, low variability, few transfers. Repetitive and predictable process."], + medium: ["Score 5-7: Candidate for AI assistance (copilot) or partial automation.", "Moderate volume with some complexities requiring human supervision."], + bad: ["Score 0-4: Requires prior optimization before automating.", "High complexity, low repeatability or excessive variability."] }, kpis: [ - { label: "Score Global", value: `${randomFloat(3.0, 9.5, 1)}/10` }, - { label: "Categoría", value: randomFromList(['Automatizar', 'Asistir', 'Optimizar']) }, + { label: "Overall Score", value: `${randomFloat(3.0, 9.5, 1)}/10` }, + { label: "Category", value: randomFromList(['Automate', 'Assist', 'Optimize']) }, ], }, }; -// Hallazgos genéricos - los específicos se generan en realDataAnalysis.ts desde datos calculados +// Generic findings - specific ones are generated in realDataAnalysis.ts from calculated data const KEY_FINDINGS: Finding[] = [ { - text: "El ratio P90/P50 de AHT es alto (>2.0), indicando alta variabilidad en tiempos de gestión.", + text: "The P90/P50 ratio of AHT is high (>2.0), indicating high variability in handling times.", dimensionId: 'operational_efficiency', type: 'warning', - title: 'Alta Variabilidad en Tiempos', - description: 'Procesos poco estandarizados generan tiempos impredecibles y afectan la planificación.', + title: 'High Variability in Times', + description: 'Poorly standardized processes generate unpredictable times and affect planning.', impact: 'high' }, { - text: "Tasa de transferencias elevada indica oportunidad de mejora en enrutamiento o capacitación.", + text: "High transfer rate indicates opportunity for improvement in routing or training.", dimensionId: 'effectiveness_resolution', type: 'warning', - title: 'Transferencias Elevadas', - description: 'Las transferencias frecuentes afectan la experiencia del cliente y la eficiencia operativa.', + title: 'High Transfers', + description: 'Frequent transfers affect customer experience and operational efficiency.', impact: 'high' }, { - text: "Concentración de volumen en franjas horarias específicas genera picos de demanda.", + text: "Volume concentration in specific time slots generates demand peaks.", dimensionId: 'volumetry_distribution', type: 'info', - title: 'Concentración de Demanda', - description: 'Revisar capacidad en franjas de mayor volumen para optimizar nivel de servicio.', + title: 'Demand Concentration', + description: 'Review capacity in high-volume time slots to optimize service level.', impact: 'medium' }, { - text: "Porcentaje significativo de interacciones fuera del horario laboral estándar (8-19h).", + text: "Significant percentage of interactions outside standard business hours (8-19h).", dimensionId: 'volumetry_distribution', type: 'info', - title: 'Demanda Fuera de Horario', - description: 'Evaluar cobertura extendida o canales de autoservicio para demanda fuera de horario.', + title: 'After-Hours Demand', + description: 'Evaluate extended coverage or self-service channels for after-hours demand.', impact: 'medium' }, { - text: "Oportunidades de automatización identificadas en consultas repetitivas de alto volumen.", + text: "Automation opportunities identified in high-volume repetitive queries.", dimensionId: 'agentic_readiness', type: 'info', - title: 'Oportunidad de Automatización', - description: 'Skills con alta repetitividad y baja complejidad son candidatos ideales para agentes IA.', + title: 'Automation Opportunity', + description: 'Skills with high repeatability and low complexity are ideal candidates for AI agents.', impact: 'high' }, ]; const RECOMMENDATIONS: Recommendation[] = [ { - text: "Estandarizar procesos en colas con alto ratio P90/P50 para reducir variabilidad.", + text: "Standardize processes in queues with high P90/P50 ratio to reduce variability.", dimensionId: 'operational_efficiency', priority: 'high', - title: 'Estandarización de Procesos', - description: 'Implementar scripts y guías paso a paso para reducir la variabilidad en tiempos de gestión.', - impact: 'Reducción ratio P90/P50: 20-30%, Mejora predictibilidad', - timeline: '3-4 semanas' + title: 'Process Standardization', + description: 'Implement scripts and step-by-step guides to reduce variability in handling times.', + impact: 'P90/P50 ratio reduction: 20-30%, Improved predictability', + timeline: '3-4 weeks' }, { - text: "Desarrollar un bot de estado de pedido para WhatsApp para desviar el 30% de las consultas.", + text: "Develop an order status bot for WhatsApp to deflect 30% of queries.", dimensionId: 'agentic_readiness', priority: 'high', - title: 'Bot Automatizado de Seguimiento de Pedidos', - description: 'Implementar ChatBot en WhatsApp para consultas con alto Agentic Score (>8).', - impact: 'Reducción de volumen: 20-30%, Ahorro anual: €40-60K', + title: 'Automated Order Tracking Bot', + description: 'Implement ChatBot on WhatsApp for queries with high Agentic Score (>8).', + impact: 'Volume reduction: 20-30%, Annual savings: €40-60K', timeline: '1-2 meses' }, { - text: "Revisar la planificación de personal (WFM) para los lunes, añadiendo recursos flexibles.", + text: "Review workforce planning (WFM) for Mondays, adding flexible resources.", dimensionId: 'volumetry_distribution', priority: 'high', - title: 'Ajuste de Plantilla (WFM)', - description: 'Reposicionar agentes y añadir recursos part-time para los lunes 8-11h.', - impact: 'Mejora del NSL: +15-20%, Coste adicional: €5-8K/mes', + title: 'Workforce Adjustment (WFM)', + description: 'Reposition agents and add part-time resources for Mondays 8-11h.', + impact: 'SL improvement: +15-20%, Additional cost: €5-8K/month', timeline: '1 mes' }, { - text: "Crear una Knowledge Base más robusta para reducir hold time y mejorar FCR.", + text: "Create a more robust Knowledge Base to reduce hold time and improve FCR.", dimensionId: 'effectiveness_resolution', priority: 'high', - title: 'Mejora de Acceso a Información', - description: 'Desarrollar una KB centralizada para reducir búsquedas y mejorar resolución en primer contacto.', - impact: 'Reducción hold time: 15-25%, Mejora FCR: 5-10%', - timeline: '6-8 semanas' + title: 'Information Access Improvement', + description: 'Develop a centralized KB to reduce searches and improve first contact resolution.', + impact: 'Hold time reduction: 15-25%, FCR improvement: 5-10%', + timeline: '6-8 weeks' }, { - text: "Implementar cobertura 24/7 con agentes virtuales para el 28% de interacciones fuera de horario.", + text: "Implement 24/7 coverage with virtual agents for 28% of after-hours interactions.", dimensionId: 'volumetry_distribution', priority: 'medium', - title: 'Cobertura 24/7 con IA', - description: 'Desplegar agentes virtuales para gestionar interacciones nocturnas y fines de semana.', - impact: 'Captura de demanda: 20-25%, Coste incremental: €15-20K/mes', + title: '24/7 Coverage with AI', + description: 'Deploy virtual agents to handle nighttime and weekend interactions.', + impact: 'Demand capture: 20-25%, Incremental cost: €15-20K/month', timeline: '2-3 meses' }, { - text: "Simplificar tipificaciones y reducir complejidad en colas problemáticas.", + text: "Simplify classifications and reduce complexity in problematic queues.", dimensionId: 'complexity_predictability', priority: 'medium', - title: 'Reducción de Complejidad', - description: 'Consolidar tipificaciones y simplificar flujos para mejorar predictibilidad.', - impact: 'Reducción de complejidad: 20-30%, Mejora Agentic Score', - timeline: '4-6 semanas' + title: 'Complexity Reduction', + description: 'Consolidate classifications and simplify flows to improve predictability.', + impact: 'Complexity reduction: 20-30%, Improved Agentic Score', + timeline: '4-6 weeks' }, ]; -// === RECOMENDACIONES BASADAS EN DATOS REALES === +// === RECOMMENDATIONS BASED ON REAL DATA === const MAX_RECOMMENDATIONS = 4; const generateRecommendationsFromData = ( @@ -226,23 +226,23 @@ const generateRecommendationsFromData = ( const annualSavings = econ?.annualSavings ?? 0; const currentCost = econ?.currentAnnualCost ?? 0; - // Relevancia por recomendación + // Relevance by recommendation const scoredTemplates = RECOMMENDATIONS.map((tpl, index) => { const dimId = tpl.dimensionId || 'overall'; const dimScore = dimScoreMap.get(dimId) ?? overallScore; let relevance = 0; - // 1) Dimensiones débiles => más relevancia + // 1) Weak dimensions => more relevance if (dimScore < 60) relevance += 3; else if (dimScore < 75) relevance += 2; else if (dimScore < 85) relevance += 1; - // 2) Prioridad declarada en la plantilla + // 2) Priority declared in the template if (tpl.priority === 'high') relevance += 2; else if (tpl.priority === 'medium') relevance += 1; - // 3) Refuerzo en función del potencial económico + // 3) Reinforcement based on economic potential if ( annualSavings > 0 && currentCost > 0 && @@ -252,7 +252,7 @@ const generateRecommendationsFromData = ( relevance += 2; } - // 4) Ligera penalización si la dimensión ya está muy bien (>85) + // 4) Slight penalty if dimension is already very good (>85) if (dimScore > 85) relevance -= 1; return { @@ -262,11 +262,11 @@ const generateRecommendationsFromData = ( }; }); - // Filtramos las que no aportan nada (relevance <= 0) + // Filter out those that contribute nothing (relevance <= 0) let filtered = scoredTemplates.filter((s) => s.relevance > 0); // Si ninguna pasa el filtro (por ejemplo, todo muy bien), - // nos quedamos al menos con 2–3 de las de mayor prioridad + // we keep at least 2-3 of the highest priority ones if (filtered.length === 0) { filtered = scoredTemplates .slice() @@ -297,22 +297,22 @@ const generateRecommendationsFromData = ( return selected.map((rec, i): Recommendation => ({ priority: rec.priority || (i === 0 ? ('high' as const) : ('medium' as const)), - title: rec.title || 'Recomendación', + title: rec.title || 'Recommendation', description: rec.description || rec.text, impact: rec.impact || - 'Mejora estimada del 10-20% en los KPIs clave.', - timeline: rec.timeline || '4-8 semanas', - // campos obligatorios: + 'Estimated 10-20% improvement in key KPIs.', + timeline: rec.timeline || '4-8 weeks', + // required fields: text: rec.text || rec.description || - 'Recomendación prioritaria basada en el análisis de datos.', + 'Priority recommendation based on data analysis.', dimensionId: rec.dimensionId || 'overall', })); }; -// === FINDINGS BASADOS EN DATOS REALES === +// === FINDINGS BASED ON REAL DATA === const MAX_FINDINGS = 5; @@ -333,7 +333,7 @@ const generateFindingsFromData = ( ? analysis.overallHealthScore : 70; - // Miramos volumetría para reforzar algunos findings + // We look at volumetry to reinforce some findings const volumetryDim = dimensions.find( (d) => d.id === 'volumetry_distribution' ); @@ -347,12 +347,12 @@ const generateFindingsFromData = ( let relevance = 0; - // 1) Dimensiones débiles => más relevancia + // 1) Weak dimensions => more relevance if (dimScore < 60) relevance += 3; else if (dimScore < 75) relevance += 2; else if (dimScore < 85) relevance += 1; - // 2) Tipo de finding (critical > warning > info) + // 2) Type of finding (critical > warning > info) if (tpl.type === 'critical') relevance += 3; else if (tpl.type === 'warning') relevance += 2; else relevance += 1; @@ -361,17 +361,17 @@ const generateFindingsFromData = ( if (tpl.impact === 'high') relevance += 2; else if (tpl.impact === 'medium') relevance += 1; - // 4) Refuerzo en volumetría si hay mucha demanda fuera de horario + // 4) Reinforcement in volumetry if there is high after-hours demand if ( offHoursPct > 0.25 && tpl.dimensionId === 'volumetry_distribution' ) { relevance += 2; if ( - tpl.title?.toLowerCase().includes('fuera de horario') || + tpl.title?.toLowerCase().includes('after hours') || tpl.text ?.toLowerCase() - .includes('fuera del horario laboral') + .includes('outside business hours') ) { relevance += 1; } @@ -430,11 +430,11 @@ const generateFindingsFromData = ( : ('info' as const)), title: finding.title || 'Hallazgo', description: finding.description || finding.text, - // campos obligatorios: + // required fields: text: finding.text || finding.description || - 'Hallazgo relevante basado en datos.', + 'Relevant finding based on data.', dimensionId: finding.dimensionId || 'overall', impact: finding.impact, })); @@ -450,7 +450,7 @@ const generateFindingsFromTemplates = (): Finding[] => { type: finding.type || (i === 0 ? 'warning' : 'info'), title: finding.title || 'Hallazgo', description: finding.description || finding.text, - // campos obligatorios: + // required fields: text: finding.text || finding.description || 'Hallazgo relevante', dimensionId: finding.dimensionId || 'overall', impact: finding.impact, @@ -464,33 +464,33 @@ const generateRecommendationsFromTemplates = (): Recommendation[] => { ), ].map((rec, i): Recommendation => ({ priority: rec.priority || (i === 0 ? 'high' : 'medium'), - title: rec.title || 'Recomendación', + title: rec.title || 'Recommendation', description: rec.description || rec.text, - impact: rec.impact || 'Mejora estimada del 20-30%', - timeline: rec.timeline || '1-2 semanas', - // campos obligatorios: - text: rec.text || rec.description || 'Recomendación prioritaria', + impact: rec.impact || 'Estimated improvement of 20-30%', + timeline: rec.timeline || '1-2 weeks', + // required fields: + text: rec.text || rec.description || 'Priority recommendation', dimensionId: rec.dimensionId || 'overall', })); }; -// v2.0: Generar distribución horaria realista +// v2.0: Generate realistic hourly distribution const generateHourlyDistribution = (): number[] => { - // Distribución con picos en 9-11h y 14-17h + // Distribution with peaks at 9-11h and 14-17h const distribution = Array(24).fill(0).map((_, hour) => { - if (hour >= 9 && hour <= 11) return randomInt(800, 1200); // Pico mañana - if (hour >= 14 && hour <= 17) return randomInt(700, 1000); // Pico tarde - if (hour >= 8 && hour <= 18) return randomInt(300, 600); // Horario laboral - return randomInt(50, 200); // Fuera de horario + if (hour >= 9 && hour <= 11) return randomInt(800, 1200); // Morning peak + if (hour >= 14 && hour <= 17) return randomInt(700, 1000); // Afternoon peak + if (hour >= 8 && hour <= 18) return randomInt(300, 600); // Business hours + return randomInt(50, 200); // After hours }); return distribution; }; -// v2.0: Calcular % fuera de horario +// v2.0: Calculate % after hours const calculateOffHoursPct = (hourly_distribution: number[]): number => { const total = hourly_distribution.reduce((a, b) => a + b, 0); - if (total === 0) return 0; // Evitar división por cero + if (total === 0) return 0; // Avoid division by zero const off_hours = hourly_distribution.slice(0, 8).reduce((a, b) => a + b, 0) + hourly_distribution.slice(19, 24).reduce((a, b) => a + b, 0); return off_hours / total; @@ -500,13 +500,13 @@ const calculateOffHoursPct = (hourly_distribution: number[]): number => { const identifyPeakHours = (hourly_distribution: number[]): number[] => { if (!hourly_distribution || hourly_distribution.length === 0) return []; const sorted = [...hourly_distribution].sort((a, b) => b - a); - const threshold = sorted[Math.min(2, sorted.length - 1)] || 0; // Top 3 o máximo disponible + const threshold = sorted[Math.min(2, sorted.length - 1)] || 0; // Top 3 or maximum available return hourly_distribution .map((val, idx) => val >= threshold ? idx : -1) .filter(idx => idx !== -1); }; -// v2.1: Generar heatmap con nueva lógica de transformación (3 dimensiones) +// v2.1: Generate heatmap with new transformation logic (3 dimensions) const generateHeatmapData = ( costPerHour: number = 20, avgCsat: number = 85, @@ -516,45 +516,45 @@ const generateHeatmapData = ( const COST_PER_SECOND = costPerHour / 3600; return skills.map(skill => { - const volume = randomInt(800, 5500); // Volumen mensual (ampliado para cubrir rango de repetitividad) + const volume = randomInt(800, 5500); // Monthly volume (expanded to cover repeatability range) // Simular raw data: duration_talk, hold_time, wrap_up_time const avg_talk_time = randomInt(240, 450); // segundos const avg_hold_time = randomInt(15, 80); // segundos const avg_wrap_up = randomInt(10, 50); // segundos - const aht_mean = avg_talk_time + avg_hold_time + avg_wrap_up; // AHT promedio + const aht_mean = avg_talk_time + avg_hold_time + avg_wrap_up; // Average AHT // Simular desviación estándar del AHT (para CV) const aht_std = randomInt(Math.round(aht_mean * 0.15), Math.round(aht_mean * 0.60)); // 15-60% del AHT const cv_aht = aht_std / aht_mean; // Coeficiente de Variación - // Transfer rate (para complejidad inversa) + // Transfer rate (for inverse complexity) const transfer_rate = randomInt(5, 35); // % const fcr_approx = 100 - transfer_rate; // FCR aproximado - // Coste del período (mensual) - con factor de productividad 70% + // Period cost (monthly) - with 70% productivity factor const effectiveProductivity = 0.70; const period_cost = Math.round((aht_mean / 3600) * costPerHour * volume / effectiveProductivity); - const annual_cost = period_cost; // Renombrado por compatibilidad, pero es coste mensual - // CPI = coste por interacción + const annual_cost = period_cost; // Renamed for compatibility, but it is monthly cost + // CPI = cost per interaction const cpi = volume > 0 ? period_cost / volume : 0; // === NUEVA LÓGICA: 3 DIMENSIONES === - // Dimensión 1: Predictibilidad (Proxy: CV del AHT) + // Dimension 1: Predictability (Proxy: AHT CV) // Fórmula: MAX(0, MIN(10, 10 - ((CV - 0.3) / 1.2 * 10))) const predictability_score = Math.max(0, Math.min(10, 10 - ((cv_aht - 0.3) / 1.2 * 10) )); - // Dimensión 2: Complejidad Inversa (Proxy: Tasa de Transferencia) + // Dimension 2: Inverse Complexity (Proxy: Transfer Rate) // Fórmula: MAX(0, MIN(10, 10 - ((T - 0.05) / 0.25 * 10))) const complexity_inverse_score = Math.max(0, Math.min(10, 10 - ((transfer_rate / 100 - 0.05) / 0.25 * 10) )); - // Dimensión 3: Repetitividad/Impacto (Proxy: Volumen) - // > 5,000 = 10, < 100 = 0, interpolación lineal entre 100-5000 + // Dimension 3: Repeatability/Impact (Proxy: Volume) + // > 5,000 = 10, < 100 = 0, linear interpolation between 100-5000 let repetitivity_score: number; if (volume >= 5000) { repetitivity_score = 10; @@ -564,8 +564,8 @@ const generateHeatmapData = ( repetitivity_score = ((volume - 100) / (5000 - 100)) * 10; } - // Agentic Readiness Score (Promedio ponderado) - // Pesos: Predictibilidad 40%, Complejidad 35%, Repetitividad 25% + // Agentic Readiness Score (Weighted average) + // Weights: Predictability 40%, Complexity 35%, Repeatability 25% const agentic_readiness_score = predictability_score * 0.40 + complexity_inverse_score * 0.35 + @@ -600,7 +600,7 @@ const generateHeatmapData = ( skill, segment, volume, - cost_volume: volume, // En datos sintéticos, asumimos que todos son non-abandon + cost_volume: volume, // In synthetic data, we assume all are non-abandon aht_seconds: aht_mean, // Renombrado para compatibilidad metrics: { fcr: isNaN(fcr_approx) ? 0 : Math.max(0, Math.min(100, Math.round(fcr_approx))), @@ -618,7 +618,7 @@ const generateHeatmapData = ( transfer_rate }, automation_readiness, - // Nuevas dimensiones (v2.1) + // New dimensions (v2.1) dimensions: { predictability: Math.round(predictability_score * 10) / 10, complexity_inverse: Math.round(complexity_inverse_score * 10) / 10, @@ -629,7 +629,7 @@ const generateHeatmapData = ( }); }; -// v2.0: Añadir NPV y costBreakdown +// v2.0: Add NPV and costBreakdown const generateEconomicModelData = (): EconomicModelData => { const currentAnnualCost = randomInt(800000, 2500000); const annualSavings = randomInt(150000, 500000); @@ -647,9 +647,9 @@ const generateEconomicModelData = (): EconomicModelData => { const savingsBreakdown = [ { category: 'Automatización de tareas', amount: annualSavings * 0.45, percentage: 45 }, - { category: 'Eficiencia operativa', amount: annualSavings * 0.30, percentage: 30 }, - { category: 'Mejora FCR', amount: annualSavings * 0.15, percentage: 15 }, - { category: 'Reducción attrition', amount: annualSavings * 0.075, percentage: 7.5 }, + { category: 'Operational efficiency', amount: annualSavings * 0.30, percentage: 30 }, + { category: 'FCR Improvement', amount: annualSavings * 0.15, percentage: 15 }, + { category: 'Attrition reduction', amount: annualSavings * 0.075, percentage: 7.5 }, { category: 'Otros', amount: annualSavings * 0.025, percentage: 2.5 }, ]; @@ -673,7 +673,7 @@ const generateEconomicModelData = (): EconomicModelData => { }; }; -// v2.0: Añadir percentiles múltiples +// v2.0: Add multiple percentiles const generateBenchmarkData = (): BenchmarkDataPoint[] => { const userAHT = randomInt(380, 450); const industryAHT = 420; @@ -686,7 +686,7 @@ const generateBenchmarkData = (): BenchmarkDataPoint[] => { return [ { - kpi: 'AHT Promedio', + kpi: 'Average AHT', userValue: userAHT, userDisplay: `${userAHT}s`, industryValue: industryAHT, @@ -746,12 +746,12 @@ export const generateAnalysis = async ( useSynthetic?: boolean, authHeaderOverride?: string ): Promise => { - // Si hay archivo, procesarlo - // Si hay archivo, primero intentamos usar el backend + // If there is a file, process it + // If there is a file, first try to use the backend if (file && !useSynthetic) { console.log('📡 Processing file (API first):', file.name); - // Pre-parsear archivo para obtener dateRange y interacciones (se usa en ambas rutas) + // Pre-parse file to get dateRange and interactions (used in both routes) let dateRange: { min: string; max: string } | undefined; let parsedInteractions: RawInteraction[] | undefined; try { @@ -759,20 +759,20 @@ export const generateAnalysis = async ( const interactions = await parseFile(file); const validation = validateInteractions(interactions); dateRange = validation.stats.dateRange || undefined; - parsedInteractions = interactions; // Guardar para usar en drilldownData + parsedInteractions = interactions; // Save to use in drilldownData console.log(`📅 Date range extracted: ${dateRange?.min} to ${dateRange?.max}`); console.log(`📊 Parsed ${interactions.length} interactions for drilldown`); - // Cachear el archivo CSV en el servidor para uso futuro + // Cache the CSV file on the server for future use try { if (authHeaderOverride && file) { await saveFileToServerCache(authHeaderOverride, file, costPerHour); - console.log(`💾 Archivo CSV cacheado en el servidor para uso futuro`); + console.log(`💾 CSV file cached on server for future use`); } else { console.warn('⚠️ No se pudo cachear: falta authHeader o file'); } } catch (cacheError) { - console.warn('⚠️ No se pudo cachear archivo:', cacheError); + console.warn('⚠️ Could not cache file:', cacheError); } } catch (e) { console.warn('⚠️ Could not extract dateRange from file:', e); @@ -791,30 +791,30 @@ export const generateAnalysis = async ( const mapped = mapBackendResultsToAnalysisData(raw, tier); - // Añadir dateRange extraído del archivo + // Add dateRange extracted from file mapped.dateRange = dateRange; - // Heatmap: usar cálculos del frontend (parsedInteractions) para consistencia - // Esto asegura que dashboard muestre los mismos valores que los logs de realDataAnalysis + // Heatmap: use frontend calculations (parsedInteractions) for consistency + // This ensures dashboard shows the same values as realDataAnalysis logs if (parsedInteractions && parsedInteractions.length > 0) { const skillMetrics = calculateSkillMetrics(parsedInteractions, costPerHour); mapped.heatmapData = generateHeatmapFromMetrics(skillMetrics, avgCsat, segmentMapping); - console.log('📊 Heatmap generado desde frontend (parsedInteractions) - métricas consistentes'); + console.log('📊 Heatmap generated from frontend (parsedInteractions) - consistent metrics'); } else { - // Fallback: usar backend si no hay parsedInteractions + // Fallback: use backend if there are no parsedInteractions mapped.heatmapData = buildHeatmapFromBackend( raw, costPerHour, avgCsat, segmentMapping ); - console.log('📊 Heatmap generado desde backend (fallback - sin parsedInteractions)'); + console.log('📊 Heatmap generated from backend (fallback - without parsedInteractions)'); } - // v4.5: SINCRONIZAR CPI de dimensión economía con heatmapData para consistencia entre tabs - // El heatmapData contiene el CPI calculado correctamente (con cost_volume ponderado) - // La dimensión economía fue calculada en mapBackendResultsToAnalysisData con otra fórmula - // Actualizamos la dimensión para que muestre el mismo valor que Executive Summary + // v4.5: SYNCHRONIZE CPI from economy dimension with heatmapData for consistency between tabs + // The heatmapData contains the correctly calculated CPI (with weighted cost_volume) + // The economy dimension was calculated in mapBackendResultsToAnalysisData with another formula + // We update the dimension to show the same value as Executive Summary if (mapped.heatmapData && mapped.heatmapData.length > 0) { const heatmapData = mapped.heatmapData; const totalCostVolume = heatmapData.reduce((sum, h) => sum + (h.cost_volume || h.volume), 0); @@ -822,7 +822,7 @@ export const generateAnalysis = async ( let globalCPI: number; if (hasCpiField) { - // CPI real disponible: promedio ponderado por cost_volume + // Real CPI available: weighted average by cost_volume globalCPI = totalCostVolume > 0 ? heatmapData.reduce((sum, h) => sum + (h.cpi || 0) * (h.cost_volume || h.volume), 0) / totalCostVolume : 0; @@ -832,21 +832,21 @@ export const generateAnalysis = async ( globalCPI = totalCostVolume > 0 ? totalAnnualCost / totalCostVolume : 0; } - // Actualizar la dimensión de economía con el CPI calculado desde heatmap - // Buscar tanto economy_costs (backend) como economy_cpi (frontend fallback) + // Update economy dimension with CPI calculated from heatmap + // Search for both economy_costs (backend) and economy_cpi (frontend fallback) const economyDimIdx = mapped.dimensions.findIndex(d => d.id === 'economy_costs' || d.name === 'economy_costs' || d.id === 'economy_cpi' || d.name === 'economy_cpi' ); if (economyDimIdx >= 0 && globalCPI > 0) { - // Usar benchmark de aerolíneas (€3.50) para consistencia con ExecutiveSummaryTab + // Use airline benchmark (€3.50) for consistency with ExecutiveSummaryTab // Percentiles: p25=2.20, p50=3.50, p75=4.50, p90=5.50 const CPI_BENCHMARK = 3.50; const cpiDiff = globalCPI - CPI_BENCHMARK; - // Para CPI invertido: menor es mejor + // For inverted CPI: lower is better const cpiStatus = cpiDiff <= 0 ? 'positive' : cpiDiff <= 0.5 ? 'neutral' : 'negative'; - // Calcular score basado en percentiles aerolíneas + // Calculate score based on airline percentiles let newScore: number; if (globalCPI <= 2.20) newScore = 100; else if (globalCPI <= 3.50) newScore = 80; @@ -865,33 +865,33 @@ export const generateAnalysis = async ( } } - // v3.5: Calcular drilldownData PRIMERO (necesario para opportunities y roadmap) + // v3.5: Calculate drilldownData FIRST (necessary for opportunities and roadmap) if (parsedInteractions && parsedInteractions.length > 0) { mapped.drilldownData = calculateDrilldownMetrics(parsedInteractions, costPerHour); - console.log(`📊 Drill-down calculado: ${mapped.drilldownData.length} skills, ${mapped.drilldownData.filter(d => d.isPriorityCandidate).length} candidatos prioritarios`); + console.log(`📊 Drill-down calculated: ${mapped.drilldownData.length} skills, ${mapped.drilldownData.filter(d => d.isPriorityCandidate).length} priority candidates`); - // v4.4: Cachear drilldownData en el servidor ANTES de retornar (fix: era fire-and-forget) - // Esto asegura que el cache esté disponible cuando el usuario haga "Usar Cache" + // v4.4: Cache drilldownData on server BEFORE returning (fix: was fire-and-forget) + // This ensures cache is available when user clicks "Use Cache" if (authHeaderOverride && mapped.drilldownData.length > 0) { try { const cacheSuccess = await saveDrilldownToServerCache(authHeaderOverride, mapped.drilldownData); if (cacheSuccess) { - console.log('💾 DrilldownData cacheado en servidor correctamente'); + console.log('💾 DrilldownData cached on server successfully'); } else { - console.warn('⚠️ No se pudo cachear drilldownData - fallback a heatmap en próximo uso'); + console.warn('⚠️ Could not cache drilldownData - fallback to heatmap on next use'); } } catch (cacheErr) { console.warn('⚠️ Error cacheando drilldownData:', cacheErr); } } - // Usar oportunidades y roadmap basados en drilldownData (datos reales) + // Use opportunities and roadmap based on drilldownData (real data) mapped.opportunities = generateOpportunitiesFromDrilldown(mapped.drilldownData, costPerHour); mapped.roadmap = generateRoadmapFromDrilldown(mapped.drilldownData, costPerHour); console.log(`📊 Opportunities: ${mapped.opportunities.length}, Roadmap: ${mapped.roadmap.length}`); } else { console.warn('⚠️ No hay interacciones parseadas, usando heatmap para drilldown'); - // v4.3: Generar drilldownData desde heatmap para usar mismas funciones + // v4.3: Generate drilldownData from heatmap to use same functions mapped.drilldownData = generateDrilldownFromHeatmap(mapped.heatmapData, costPerHour); mapped.opportunities = generateOpportunitiesFromDrilldown(mapped.drilldownData, costPerHour); mapped.roadmap = generateRoadmapFromDrilldown(mapped.drilldownData, costPerHour); @@ -901,7 +901,7 @@ export const generateAnalysis = async ( mapped.findings = generateFindingsFromData(mapped); mapped.recommendations = generateRecommendationsFromData(mapped); - // Benchmark: de momento no tenemos datos reales + // Benchmark: for now we do not have real data mapped.benchmarkData = []; console.log( @@ -914,10 +914,10 @@ export const generateAnalysis = async ( const status = apiError?.status; const msg = (apiError as Error).message || ''; - // 🔐 Si es un error de autenticación (401), NO hacemos fallback + // 🔐 If it is an authentication error (401), we do NOT fallback if (status === 401 || msg.includes('401')) { console.error( - '❌ Error de autenticación en backend, abortando análisis (sin fallback).' + '❌ Authentication error in backend, aborting analysis (no fallback).' ); throw apiError; } @@ -956,28 +956,28 @@ export const generateAnalysis = async ( } catch (error) { console.error('❌ Error processing file:', error); throw new Error( - `Error procesando archivo: ${(error as Error).message}` + `Error processing file: ${(error as Error).message}` ); } } - // Si hay URL de Google Sheets, procesarla (TODO: implementar) + // If there is a Google Sheets URL, process it (TODO: implement) if (sheetUrl && !useSynthetic) { console.warn('🔗 Google Sheets URL processing not implemented yet, using synthetic data'); } - // Generar datos sintéticos (fallback) + // Generate synthetic data (fallback) console.log('✨ Generating synthetic data'); return generateSyntheticAnalysis(tier, costPerHour, avgCsat, segmentMapping); }; /** - * Genera análisis usando el archivo CSV cacheado en el servidor - * Permite re-analizar sin necesidad de subir el archivo de nuevo + * Generates analysis using the CSV file cached on the server + * Allows re-analysis without needing to upload the file again * Funciona entre diferentes navegadores y dispositivos * - * v3.5: Descarga el CSV cacheado para parsear localmente y obtener - * todas las colas originales (original_queue_id) en lugar de solo + * v3.5: Downloads the cached CSV to parse locally and obtain + * all original queues (original_queue_id) instead of only * las 9 categorías agregadas (queue_skill) */ export const generateAnalysisFromCache = async ( @@ -989,14 +989,14 @@ export const generateAnalysisFromCache = async ( ): Promise => { console.log('💾 Analyzing from server-cached file...'); - // Verificar que tenemos authHeader + // Verify that we have authHeader if (!authHeaderOverride) { - throw new Error('Se requiere autenticación para acceder a la caché del servidor.'); + throw new Error('Authentication required to access server cache.'); } const API_BASE_URL = import.meta.env.VITE_API_BASE_URL || 'http://localhost:8000'; - // Preparar datos de economía + // Prepare economic data const economyData = { costPerHour, avgCsat, @@ -1019,7 +1019,7 @@ export const generateAnalysisFromCache = async ( body: formData, }); - // Obtener drilldownData cacheado (pequeño JSON, muy rápido) + // Get cached drilldownData (small JSON, very fast) const drilldownPromise = getCachedDrilldown(authHeaderOverride); // Esperar ambas operaciones en paralelo @@ -1033,13 +1033,13 @@ export const generateAnalysisFromCache = async ( try { if (response.status === 404) { - throw new Error('No hay archivo cacheado en el servidor. Por favor, sube un archivo CSV primero.'); + throw new Error('No file cached on server. Please upload a CSV file first.'); } if (!response.ok) { const errorText = await response.text(); console.error('❌ Backend error:', response.status, errorText); - throw new Error(`Error del servidor (${response.status}): ${errorText}`); + throw new Error(`Server error (${response.status}): ${errorText}`); } const rawResponse = await response.json(); @@ -1060,12 +1060,12 @@ export const generateAnalysisFromCache = async ( console.log('📊 Mapped data summaryKpis:', mapped.summaryKpis?.length || 0); console.log('📊 Mapped data dimensions:', mapped.dimensions?.length || 0); - // Añadir dateRange desde el backend + // Add dateRange from backend if (dateRangeFromBackend && dateRangeFromBackend.min && dateRangeFromBackend.max) { mapped.dateRange = dateRangeFromBackend; } - // Heatmap: construir a partir de datos reales del backend + // Heatmap: build from real backend data mapped.heatmapData = buildHeatmapFromBackend( raw, costPerHour, @@ -1074,7 +1074,7 @@ export const generateAnalysisFromCache = async ( ); console.log('📊 Heatmap data points:', mapped.heatmapData?.length || 0); - // v4.6: SINCRONIZAR CPI de dimensión economía con heatmapData para consistencia entre tabs + // v4.6: SYNCHRONIZE CPI from economy dimension with heatmapData for consistency between tabs // (Mismo fix que en generateAnalysis - necesario para path de cache) if (mapped.heatmapData && mapped.heatmapData.length > 0) { const heatmapData = mapped.heatmapData; @@ -1102,7 +1102,7 @@ export const generateAnalysisFromCache = async ( } console.log(' - globalCPI calculated:', globalCPI.toFixed(4)); - // Buscar tanto economy_costs (backend) como economy_cpi (frontend fallback) + // Search for both economy_costs (backend) and economy_cpi (frontend fallback) const dimensionIds = mapped.dimensions.map(d => ({ id: d.id, name: d.name })); console.log(' - Available dimensions:', dimensionIds); @@ -1116,14 +1116,14 @@ export const generateAnalysisFromCache = async ( const oldKpi = mapped.dimensions[economyDimIdx].kpi; console.log(' - OLD KPI value:', oldKpi?.value); - // Usar benchmark de aerolíneas (€3.50) para consistencia con ExecutiveSummaryTab + // Use airline benchmark (€3.50) for consistency with ExecutiveSummaryTab // Percentiles: p25=2.20, p50=3.50, p75=4.50, p90=5.50 const CPI_BENCHMARK = 3.50; const cpiDiff = globalCPI - CPI_BENCHMARK; - // Para CPI invertido: menor es mejor + // For inverted CPI: lower is better const cpiStatus = cpiDiff <= 0 ? 'positive' : cpiDiff <= 0.5 ? 'neutral' : 'negative'; - // Calcular score basado en percentiles aerolíneas + // Calculate score based on airline percentiles let newScore: number; if (globalCPI <= 2.20) newScore = 100; else if (globalCPI <= 3.50) newScore = 80; @@ -1146,13 +1146,13 @@ export const generateAnalysisFromCache = async ( } } - // === DrilldownData: usar cacheado (rápido) o fallback a heatmap === + // === DrilldownData: use cached (fast) or fallback to heatmap === if (cachedDrilldownData && cachedDrilldownData.length > 0) { - // Usar drilldownData cacheado directamente (ya calculado al subir archivo) + // Use cached drilldownData directly (already calculated when uploading file) mapped.drilldownData = cachedDrilldownData; - console.log(`📊 Usando drilldownData cacheado: ${mapped.drilldownData.length} skills`); + console.log(`📊 Using cached drilldownData: ${mapped.drilldownData.length} skills`); - // Contar colas originales para log + // Count original queues for log const uniqueOriginalQueues = new Set( mapped.drilldownData.flatMap((d: any) => (d.originalQueues || []).map((q: any) => q.original_queue_id) @@ -1165,13 +1165,13 @@ export const generateAnalysisFromCache = async ( mapped.roadmap = generateRoadmapFromDrilldown(mapped.drilldownData, costPerHour); console.log(`📊 Opportunities: ${mapped.opportunities.length}, Roadmap: ${mapped.roadmap.length}`); } else if (mapped.heatmapData && mapped.heatmapData.length > 0) { - // v4.5: No hay drilldownData cacheado - intentar calcularlo desde el CSV cacheado + // v4.5: No cached drilldownData - try to calculate it from cached CSV console.log('⚠️ No cached drilldownData found, attempting to calculate from cached CSV...'); let calculatedDrilldown = false; try { - // Descargar y parsear el CSV cacheado para calcular drilldown real + // Download and parse cached CSV to calculate real drilldown const cachedFile = await downloadCachedFile(authHeaderOverride); if (cachedFile) { console.log(`📥 Downloaded cached CSV: ${(cachedFile.size / 1024 / 1024).toFixed(2)} MB`); @@ -1182,11 +1182,11 @@ export const generateAnalysisFromCache = async ( if (parsedInteractions && parsedInteractions.length > 0) { console.log(`📊 Parsed ${parsedInteractions.length} interactions from cached CSV`); - // Calcular drilldown real desde interacciones + // Calculate real drilldown from interactions mapped.drilldownData = calculateDrilldownMetrics(parsedInteractions, costPerHour); console.log(`📊 Calculated drilldown: ${mapped.drilldownData.length} skills`); - // Guardar drilldown en cache para próximo uso + // Save drilldown in cache for next use try { const saveSuccess = await saveDrilldownToServerCache(authHeaderOverride, mapped.drilldownData); if (saveSuccess) { @@ -1206,19 +1206,19 @@ export const generateAnalysisFromCache = async ( } if (!calculatedDrilldown) { - // Fallback final: usar heatmap (datos aproximados) + // Final fallback: use heatmap (approximate data) console.warn('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); - console.warn('⚠️ FALLBACK ACTIVO: No hay drilldownData cacheado'); - console.warn(' Causa probable: El CSV no se subió correctamente o la caché expiró'); - console.warn(' Consecuencia: Usando datos agregados del heatmap (menos precisos)'); - console.warn(' Solución: Vuelva a subir el archivo CSV para obtener datos completos'); + console.warn('⚠️ FALLBACK ACTIVE: No cached drilldownData'); + console.warn(' Probable cause: CSV was not uploaded correctly or cache expired'); + console.warn(' Consequence: Using aggregated heatmap data (less precise)'); + console.warn(' Solution: Re-upload the CSV file to obtain complete data'); console.warn('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); mapped.drilldownData = generateDrilldownFromHeatmap(mapped.heatmapData, costPerHour); - console.log(`📊 Drill-down desde heatmap (fallback): ${mapped.drilldownData.length} skills agregados`); + console.log(`📊 Drill-down from heatmap (fallback): ${mapped.drilldownData.length} aggregated skills`); } - // Usar mismas funciones que ruta fresh para consistencia + // Use same functions as fresh route for consistency mapped.opportunities = generateOpportunitiesFromDrilldown(mapped.drilldownData, costPerHour); mapped.roadmap = generateRoadmapFromDrilldown(mapped.drilldownData, costPerHour); } @@ -1227,7 +1227,7 @@ export const generateAnalysisFromCache = async ( mapped.findings = generateFindingsFromData(mapped); mapped.recommendations = generateRecommendationsFromData(mapped); - // Benchmark: vacío por ahora + // Benchmark: empty for now mapped.benchmarkData = []; // Marcar que viene del backend/caché @@ -1241,7 +1241,7 @@ export const generateAnalysisFromCache = async ( } }; -// Función auxiliar para generar drilldownData desde heatmapData cuando no tenemos parsedInteractions +// Helper function to generate drilldownData from heatmapData when we do not have parsedInteractions function generateDrilldownFromHeatmap( heatmapData: HeatmapDataPoint[], costPerHour: number @@ -1256,7 +1256,7 @@ function generateDrilldownFromHeatmap( ? (hp.dimensions.predictability * 0.4 + hp.dimensions.complexity_inverse * 0.35 + hp.dimensions.repetitivity * 0.25) : (hp.automation_readiness || 0) / 10; - // v4.4: Usar clasificarTierSimple con TODOS los datos disponibles del heatmap + // v4.4: Use clasificarTierSimple with ALL available heatmap data // cvAht, transferRate y fcrRate están en % (ej: 75), clasificarTierSimple espera decimal (ej: 0.75) const tier = clasificarTierSimple( agenticScore, @@ -1294,7 +1294,7 @@ function generateDrilldownFromHeatmap( }); } -// Función auxiliar para generar análisis con datos sintéticos +// Helper function to generate analysis with synthetic data const generateSyntheticAnalysis = ( tier: TierKey, costPerHour: number = 20, @@ -1310,7 +1310,7 @@ const generateSyntheticAnalysis = ( { label: "CSAT", value: `${randomFloat(4.1, 4.8, 1)}/5`, change: `-${randomFloat(0.1, 0.3, 1)}`, changeType: 'negative' }, ]; - // v3.0: 5 dimensiones viables + // v3.0: 5 viable dimensions const dimensionKeys = ['volumetry_distribution', 'operational_efficiency', 'effectiveness_resolution', 'complexity_predictability', 'agentic_readiness']; const dimensions: DimensionAnalysis[] = dimensionKeys.map(key => { @@ -1329,7 +1329,7 @@ const generateSyntheticAnalysis = ( icon: content.icon, }; - // Añadir distribution_data para volumetry_distribution + // Add distribution_data for volumetry_distribution if (key === 'volumetry_distribution') { const hourly = generateHourlyDistribution(); dimension.distribution_data = { @@ -1345,7 +1345,7 @@ const generateSyntheticAnalysis = ( // v2.0: Calcular Agentic Readiness Score let agenticReadiness = undefined; if (tier === 'gold' || tier === 'silver') { - // Generar datos sintéticos para el algoritmo + // Generate synthetic data for the algorithm const volumen_mes = randomInt(5000, 25000); const aht_values = Array.from({ length: 100 }, () => Math.max(180, normalRandom(420, 120)) // Media 420s, std 120s @@ -1363,7 +1363,7 @@ const generateSyntheticAnalysis = ( tier }; - // Datos adicionales para GOLD + // Additional data for GOLD if (tier === 'gold') { const hourly_distribution = dimensions.find(d => d.name === 'volumetry_distribution')?.distribution_data?.hourly; const off_hours_pct = dimensions.find(d => d.name === 'volumetry_distribution')?.distribution_data?.off_hours_pct; @@ -1392,7 +1392,7 @@ const generateSyntheticAnalysis = ( ) }); - // v4.3: Generar drilldownData desde heatmap para usar mismas funciones + // v4.3: Generate drilldownData from heatmap to use same functions const drilldownData = generateDrilldownFromHeatmap(heatmapData, costPerHour); return { diff --git a/frontend/utils/backendMapper.ts b/frontend/utils/backendMapper.ts index 837ece9..fc240ac 100644 --- a/frontend/utils/backendMapper.ts +++ b/frontend/utils/backendMapper.ts @@ -23,13 +23,13 @@ function safeNumber(value: any, fallback = 0): number { function normalizeAhtMetric(ahtSeconds: number): number { if (!Number.isFinite(ahtSeconds) || ahtSeconds <= 0) return 0; - // Ajusta estos números si ves que tus AHTs reales son muy distintos - const MIN_AHT = 300; // AHT muy bueno - const MAX_AHT = 1000; // AHT muy malo + // Adjust these numbers if your actual AHTs are very different + const MIN_AHT = 300; // Very good AHT + const MAX_AHT = 1000; // Very bad AHT const clamped = Math.max(MIN_AHT, Math.min(MAX_AHT, ahtSeconds)); - const ratio = (clamped - MIN_AHT) / (MAX_AHT - MIN_AHT); // 0 (mejor) -> 1 (peor) - const score = 100 - ratio * 100; // 100 (mejor) -> 0 (peor) + const ratio = (clamped - MIN_AHT) / (MAX_AHT - MIN_AHT); // 0 (better) -> 1 (worse) + const score = 100 - ratio * 100; // 100 (better) -> 0 (worse) return Math.round(score); } @@ -74,7 +74,7 @@ function getTopLabel( return String(labels[maxIdx]); } -// ==== Helpers para distribución horaria (desde heatmap_24x7) ==== +// ==== Helpers for hourly distribution (from heatmap_24x7) ==== function computeHourlyFromHeatmap(heatmap24x7: any): number[] { if (!Array.isArray(heatmap24x7) || !heatmap24x7.length) { @@ -146,7 +146,7 @@ function mapAgenticReadiness( description: value?.reason || value?.details?.description || - 'Sub-factor calculado a partir de KPIs agregados.', + 'Sub-factor calculated from aggregated KPIs.', details: value?.details || {}, }; } @@ -156,7 +156,7 @@ function mapAgenticReadiness( const interpretation = classification?.description || - `Puntuación de preparación agentic: ${score.toFixed(1)}/10`; + `Agentic readiness score: ${score.toFixed(1)}/10`; const computedCount = Object.values(sub_scores).filter( (s: any) => s?.computed @@ -176,7 +176,7 @@ function mapAgenticReadiness( }; } -// ==== Volumetría (dimensión + KPIs) ==== +// ==== Volumetry (dimension + KPIs) ==== function buildVolumetryDimension( raw: BackendRawResults @@ -216,13 +216,13 @@ function buildVolumetryDimension( const topChannel = getTopLabel(volumeByChannel?.labels, channelValues); const topSkill = getTopLabel(skillLabels, skillValues); - // Heatmap 24x7 -> distribución horaria + // Heatmap 24x7 -> hourly distribution const heatmap24x7 = volumetry?.heatmap_24x7; const hourly = computeHourlyFromHeatmap(heatmap24x7); const offHoursPct = hourly.length ? calcOffHoursPct(hourly) : 0; const peakHours = hourly.length ? findPeakHours(hourly) : []; - console.log('📊 Volumetría backend (mapper):', { + console.log('📊 Backend volumetry (mapper):', { volumetry, volumeByChannel, volumeBySkill, @@ -240,21 +240,21 @@ function buildVolumetryDimension( if (totalVolume > 0) { extraKpis.push({ - label: 'Volumen total (backend)', + label: 'Total volume (backend)', value: totalVolume.toLocaleString('es-ES'), }); } if (numChannels > 0) { extraKpis.push({ - label: 'Canales analizados', + label: 'Channels analyzed', value: String(numChannels), }); } if (numSkills > 0) { extraKpis.push({ - label: 'Skills analizadas', + label: 'Skills analyzed', value: String(numSkills), }); @@ -271,14 +271,14 @@ function buildVolumetryDimension( if (topChannel) { extraKpis.push({ - label: 'Canal principal', + label: 'Main channel', value: topChannel, }); } if (topSkill) { extraKpis.push({ - label: 'Skill principal', + label: 'Main skill', value: topSkill, }); } @@ -287,28 +287,28 @@ function buildVolumetryDimension( return { dimension: undefined, extraKpis }; } - // Calcular ratio pico/valle para evaluar concentración de demanda + // Calculate peak/valley ratio to evaluate demand concentration const validHourly = hourly.filter(v => v > 0); const maxHourly = validHourly.length > 0 ? Math.max(...validHourly) : 0; const minHourly = validHourly.length > 0 ? Math.min(...validHourly) : 1; const peakValleyRatio = minHourly > 0 ? maxHourly / minHourly : 1; console.log(`⏰ Hourly distribution (backend path): total=${totalVolume}, peak=${maxHourly}, valley=${minHourly}, ratio=${peakValleyRatio.toFixed(2)}`); - // Score basado en: - // - % fuera de horario (>30% penaliza) - // - Ratio pico/valle (>3x penaliza) - // NO penalizar por tener volumen alto + // Score based on: + // - % off-hours (>30% penalty) + // - Peak/valley ratio (>3x penalty) + // DO NOT penalize for having high volume let score = 100; - // Penalización por fuera de horario + // Penalty for off-hours const offHoursPctValue = offHoursPct * 100; if (offHoursPctValue > 30) { - score -= Math.min(40, (offHoursPctValue - 30) * 2); // -2 pts por cada % sobre 30% + score -= Math.min(40, (offHoursPctValue - 30) * 2); // -2 pts per % over30% } else if (offHoursPctValue > 20) { - score -= (offHoursPctValue - 20); // -1 pt por cada % entre 20-30% + score -= (offHoursPctValue - 20); // -1 pt per % between 20-30% } - // Penalización por ratio pico/valle alto + // Penalty for high peak/valley ratio if (peakValleyRatio > 5) { score -= 30; } else if (peakValleyRatio > 3) { @@ -321,32 +321,32 @@ function buildVolumetryDimension( const summaryParts: string[] = []; summaryParts.push( - `${totalVolume.toLocaleString('es-ES')} interacciones analizadas.` + `${totalVolume.toLocaleString('es-ES')} interactions analyzed.` ); summaryParts.push( - `${(offHoursPct * 100).toFixed(0)}% fuera de horario laboral (8-19h).` + `${(offHoursPct * 100).toFixed(0)}% outside business hours (8-19h).` ); if (peakValleyRatio > 2) { summaryParts.push( - `Ratio pico/valle: ${peakValleyRatio.toFixed(1)}x - alta concentración de demanda.` + `Peak/valley ratio: ${peakValleyRatio.toFixed(1)}x - high demand concentration.` ); } if (topSkill) { - summaryParts.push(`Skill principal: ${topSkill}.`); + summaryParts.push(`Main skill: ${topSkill}.`); } - // Métrica principal accionable: % fuera de horario + // Main actionable metric: % off-hours const dimension: DimensionAnalysis = { id: 'volumetry_distribution', name: 'volumetry_distribution', - title: 'Volumetría y distribución de demanda', + title: 'Volumetry and demand distribution', score, percentile: undefined, summary: summaryParts.join(' '), kpi: { - label: 'Fuera de horario', + label: 'Off-hours', value: `${(offHoursPct * 100).toFixed(0)}%`, - change: peakValleyRatio > 2 ? `Pico/valle: ${peakValleyRatio.toFixed(1)}x` : undefined, + change: peakValleyRatio > 2 ? `Peak/valley: ${peakValleyRatio.toFixed(1)}x` : undefined, changeType: offHoursPct > 0.3 ? 'negative' : offHoursPct > 0.2 ? 'neutral' : 'positive' }, icon: BarChartHorizontal, @@ -362,7 +362,7 @@ function buildVolumetryDimension( return { dimension, extraKpis }; } -// ==== Eficiencia Operativa (v3.2 - con segmentación horaria) ==== +// ==== Operational Efficiency (v3.2 - with hourly segmentation) ==== function buildOperationalEfficiencyDimension( raw: BackendRawResults, @@ -371,25 +371,25 @@ function buildOperationalEfficiencyDimension( const op = raw?.operational_performance; if (!op) return undefined; - // AHT Global + // Global AHT const ahtP50 = safeNumber(op.aht_distribution?.p50, 0); const ahtP90 = safeNumber(op.aht_distribution?.p90, 0); const ratioGlobal = ahtP90 > 0 && ahtP50 > 0 ? ahtP90 / ahtP50 : safeNumber(op.aht_distribution?.p90_p50_ratio, 1.5); - // AHT Horario Laboral (8-19h) - estimación basada en distribución - // Asumimos que el AHT en horario laboral es ligeramente menor (más eficiente) - const ahtBusinessHours = Math.round(ahtP50 * 0.92); // ~8% más eficiente en horario laboral - const ratioBusinessHours = ratioGlobal * 0.85; // Menor variabilidad en horario laboral + // Business Hours AHT (8-19h) - estimation based on distribution + // We assume that AHT during business hours is slightly lower (more efficient) + const ahtBusinessHours = Math.round(ahtP50 * 0.92); // ~8% more efficient during business hours + const ratioBusinessHours = ratioGlobal * 0.85; // Lower variability during business hours - // Determinar si la variabilidad se reduce fuera de horario + // Determine if variability reduces outside hours const variabilityReduction = ratioGlobal - ratioBusinessHours; const variabilityInsight = variabilityReduction > 0.3 - ? 'La variabilidad se reduce significativamente en horario laboral.' + ? 'Variability significantly reduces during business hours.' : variabilityReduction > 0.1 - ? 'La variabilidad se mantiene similar en ambos horarios.' - : 'La variabilidad es consistente independientemente del horario.'; + ? 'Variability remains similar in both schedules.' + : 'Variability is consistent regardless of schedule.'; - // Score basado en escala definida: + // Score based on defined scale: // <1.5 = 100pts, 1.5-2.0 = 70pts, 2.0-2.5 = 50pts, 2.5-3.0 = 30pts, >3.0 = 20pts let score: number; if (ratioGlobal < 1.5) { @@ -404,9 +404,9 @@ function buildOperationalEfficiencyDimension( score = 20; } - // Summary con segmentación - let summary = `AHT Global: ${Math.round(ahtP50)}s (P50), ratio ${ratioGlobal.toFixed(2)}. `; - summary += `AHT Horario Laboral (8-19h): ${ahtBusinessHours}s (P50), ratio ${ratioBusinessHours.toFixed(2)}. `; + // Summary with segmentation + let summary = `Global AHT: ${Math.round(ahtP50)}s (P50), ratio ${ratioGlobal.toFixed(2)}. `; + summary += `Business Hours AHT (8-19h): ${ahtBusinessHours}s (P50), ratio ${ratioBusinessHours.toFixed(2)}. `; summary += variabilityInsight; // KPI principal: AHT P50 (industry standard for operational efficiency) @@ -420,7 +420,7 @@ function buildOperationalEfficiencyDimension( const dimension: DimensionAnalysis = { id: 'operational_efficiency', name: 'operational_efficiency', - title: 'Eficiencia Operativa', + title: 'Operational Efficiency', score, percentile: undefined, summary, @@ -431,7 +431,7 @@ function buildOperationalEfficiencyDimension( return dimension; } -// ==== Efectividad & Resolución (v3.2 - enfocada en FCR Técnico) ==== +// ==== Effectiveness & Resolution (v3.2 - focused on Technical FCR) ==== function buildEffectivenessResolutionDimension( raw: BackendRawResults @@ -439,20 +439,20 @@ function buildEffectivenessResolutionDimension( const op = raw?.operational_performance; if (!op) return undefined; - // FCR Técnico = 100 - transfer_rate (comparable con benchmarks de industria) - // Usamos escalation_rate que es la tasa de transferencias + // Technical FCR = 100 - transfer_rate (comparable with industry benchmarks) + // We use escalation_rate which is the transfer rate const escalationRate = safeNumber(op.escalation_rate, NaN); const abandonmentRate = safeNumber(op.abandonment_rate, 0); - // FCR Técnico: 100 - tasa de transferencia + // Technical FCR: 100 - tasa de transferencia const fcrRate = Number.isFinite(escalationRate) && escalationRate >= 0 ? Math.max(0, Math.min(100, 100 - escalationRate)) - : 70; // valor por defecto benchmark aéreo + : 70; // default airline benchmark value - // Tasa de transferencia (complemento del FCR Técnico) + // Transfer rate (complement of Technical FCR) const transferRate = Number.isFinite(escalationRate) ? escalationRate : 100 - fcrRate; - // Score basado en FCR Técnico (benchmark sector aéreo: 85-90%) + // Score based on Technical FCR (benchmark airline sector: 85-90%) // FCR >= 90% = 100pts, 85-90% = 80pts, 80-85% = 60pts, 75-80% = 40pts, <75% = 20pts let score: number; if (fcrRate >= 90) { @@ -467,25 +467,25 @@ function buildEffectivenessResolutionDimension( score = 20; } - // Penalización adicional por abandono alto (>8%) + // Additional penalty for high abandonment (>8%) if (abandonmentRate > 8) { score = Math.max(0, score - Math.round((abandonmentRate - 8) * 2)); } - // Summary enfocado en FCR Técnico - let summary = `FCR Técnico: ${fcrRate.toFixed(1)}% (benchmark: 85-90%). `; - summary += `Tasa de transferencia: ${transferRate.toFixed(1)}%. `; + // Summary focused on Technical FCR + let summary = `Technical FCR: ${fcrRate.toFixed(1)}% (benchmark: 85-90%). `; + summary += `Transfer rate: ${transferRate.toFixed(1)}%. `; if (fcrRate >= 90) { - summary += 'Excelente resolución en primer contacto.'; + summary += 'Excellent first contact resolution.'; } else if (fcrRate >= 85) { - summary += 'Resolución dentro del benchmark del sector.'; + summary += 'Resolution within sector benchmark.'; } else { - summary += 'Oportunidad de mejora reduciendo transferencias.'; + summary += 'Opportunity to improve by reducing transfers.'; } const kpi: Kpi = { - label: 'FCR Técnico', + label: 'Technical FCR', value: `${fcrRate.toFixed(0)}%`, change: `Transfer: ${transferRate.toFixed(0)}%`, changeType: fcrRate >= 85 ? 'positive' : fcrRate >= 80 ? 'neutral' : 'negative' @@ -494,7 +494,7 @@ function buildEffectivenessResolutionDimension( const dimension: DimensionAnalysis = { id: 'effectiveness_resolution', name: 'effectiveness_resolution', - title: 'Efectividad & Resolución', + title: 'Effectiveness & Resolution', score, percentile: undefined, summary, @@ -505,7 +505,7 @@ function buildEffectivenessResolutionDimension( return dimension; } -// ==== Complejidad & Predictibilidad (v3.4 - basada en CV AHT per industry standards) ==== +// ==== Complexity & Predictability (v3.4 - based on CV AHT per industry standards) ==== function buildComplexityPredictabilityDimension( raw: BackendRawResults @@ -535,9 +535,9 @@ function buildComplexityPredictabilityDimension( } } - // Score basado en CV AHT (benchmark: <75% = excelente, <100% = aceptable) + // Score based on CV AHT (benchmark: <75% = excellent, <100% = acceptable) // CV <= 75% = 100pts (alta predictibilidad) - // CV 75-100% = 80pts (predictibilidad aceptable) + // CV 75-100% = 80pts (acceptable predictability) // CV 100-125% = 60pts (variabilidad moderada) // CV 125-150% = 40pts (alta variabilidad) // CV > 150% = 20pts (muy alta variabilidad) @@ -558,16 +558,16 @@ function buildComplexityPredictabilityDimension( let summary = `CV AHT: ${cvAhtPercent}% (benchmark: <75%). `; if (cvAhtPercent <= 75) { - summary += 'Alta predictibilidad: tiempos de atención consistentes. Excelente para planificación WFM.'; + summary += 'High predictability: consistent handling times. Excellent for WFM planning.'; } else if (cvAhtPercent <= 100) { - summary += 'Predictibilidad aceptable: variabilidad moderada en tiempos de atención.'; + summary += 'Acceptable predictability: moderate variability in handling times.'; } else if (cvAhtPercent <= 125) { - summary += 'Variabilidad notable: dificulta la planificación de recursos. Considerar estandarización.'; + summary += 'Notable variability: complicates resource planning. Consider standardization.'; } else { - summary += 'Alta variabilidad: tiempos muy dispersos. Priorizar scripts guiados y estandarización.'; + summary += 'High variability: very scattered times. Prioritize guided scripts and standardization.'; } - // Añadir info de Hold P50 promedio si está disponible (proxy de complejidad) + // Add Hold P50 average info if available (complexity proxy) if (avgHoldP50 > 0) { summary += ` Hold Time P50: ${Math.round(avgHoldP50)}s.`; } @@ -583,7 +583,7 @@ function buildComplexityPredictabilityDimension( const dimension: DimensionAnalysis = { id: 'complexity_predictability', name: 'complexity_predictability', - title: 'Complejidad & Predictibilidad', + title: 'Complexity & Predictability', score, percentile: undefined, summary, @@ -594,7 +594,7 @@ function buildComplexityPredictabilityDimension( return dimension; } -// ==== Satisfacción del Cliente (v3.1) ==== +// ==== Customer Satisfaction (v3.1) ==== function buildSatisfactionDimension( raw: BackendRawResults @@ -604,19 +604,19 @@ function buildSatisfactionDimension( const hasCSATData = Number.isFinite(csatGlobalRaw) && csatGlobalRaw > 0; - // Si no hay CSAT, mostrar dimensión con "No disponible" + // Si no hay CSAT, mostrar dimensión con "Not available" const dimension: DimensionAnalysis = { id: 'customer_satisfaction', name: 'customer_satisfaction', - title: 'Satisfacción del Cliente', - score: hasCSATData ? Math.round((csatGlobalRaw / 5) * 100) : -1, // -1 indica N/A + title: 'Customer Satisfaction', + score: hasCSATData ? Math.round((csatGlobalRaw / 5) * 100) : -1, // -1 indicates N/A percentile: undefined, summary: hasCSATData - ? `CSAT global: ${csatGlobalRaw.toFixed(1)}/5. ${csatGlobalRaw >= 4.0 ? 'Nivel de satisfacción óptimo.' : csatGlobalRaw >= 3.5 ? 'Satisfacción aceptable, margen de mejora.' : 'Satisfacción baja, requiere atención urgente.'}` - : 'CSAT no disponible en el dataset. Para incluir esta dimensión, añadir datos de encuestas de satisfacción.', + ? `Global CSAT: ${csatGlobalRaw.toFixed(1)}/5. ${csatGlobalRaw >= 4.0 ? 'Optimal satisfaction level.' : csatGlobalRaw >= 3.5 ? 'Acceptable satisfaction, room for improvement.' : 'Low satisfaction, requires urgent attention.'}` + : 'CSAT not available in dataset. To include this dimension, add satisfaction survey data.', kpi: { label: 'CSAT', - value: hasCSATData ? `${csatGlobalRaw.toFixed(1)}/5` : 'No disponible', + value: hasCSATData ? `${csatGlobalRaw.toFixed(1)}/5` : 'Not available', changeType: hasCSATData ? (csatGlobalRaw >= 4.0 ? 'positive' : csatGlobalRaw >= 3.5 ? 'neutral' : 'negative') : 'neutral' @@ -627,7 +627,7 @@ function buildSatisfactionDimension( return dimension; } -// ==== Economía - Coste por Interacción (v3.1) ==== +// ==== Economy - Cost per Interaction (v3.1) ==== function buildEconomyDimension( raw: BackendRawResults, @@ -637,9 +637,9 @@ function buildEconomyDimension( const op = raw?.operational_performance; const totalAnnual = safeNumber(econ?.cost_breakdown?.total_annual, 0); - // Benchmark CPI aerolíneas (consistente con ExecutiveSummaryTab) + // Airline CPI benchmark (consistent with ExecutiveSummaryTab) // p25: 2.20, p50: 3.50, p75: 4.50, p90: 5.50 - const CPI_BENCHMARK = 3.50; // p50 aerolíneas + const CPI_BENCHMARK = 3.50; // airline p50 if (totalAnnual <= 0 || totalInteractions <= 0) { return undefined; @@ -652,12 +652,12 @@ function buildEconomyDimension( // Calcular CPI usando cost_volume (non-abandoned) como denominador const cpi = costVolume > 0 ? totalAnnual / costVolume : totalAnnual / totalInteractions; - // Score basado en percentiles de aerolíneas (CPI invertido: menor = mejor) - // CPI <= 2.20 (p25) = 100pts (excelente, top 25%) + // Score based on airline percentiles (inverse CPI: lower = better) + // CPI <= 2.20 (p25) = 100pts (excellent, top 25%) // CPI 2.20-3.50 (p25-p50) = 80pts (bueno, top 50%) - // CPI 3.50-4.50 (p50-p75) = 60pts (promedio) + // CPI 3.50-4.50 (p50-p75) = 60pts (average) // CPI 4.50-5.50 (p75-p90) = 40pts (por debajo) - // CPI > 5.50 (>p90) = 20pts (crítico) + // CPI > 5.50 (>p90) = 20pts (critical) let score: number; if (cpi <= 2.20) { score = 100; @@ -674,24 +674,24 @@ function buildEconomyDimension( const cpiDiff = cpi - CPI_BENCHMARK; const cpiStatus = cpiDiff <= 0 ? 'positive' : cpiDiff <= 0.5 ? 'neutral' : 'negative'; - let summary = `Coste por interacción: €${cpi.toFixed(2)} vs benchmark €${CPI_BENCHMARK.toFixed(2)}. `; + let summary = `Cost per interaction: €${cpi.toFixed(2)} vs benchmark €${CPI_BENCHMARK.toFixed(2)}. `; if (cpi <= CPI_BENCHMARK) { - summary += 'Eficiencia de costes óptima, por debajo del benchmark del sector.'; + summary += 'Optimal cost efficiency, below sector benchmark.'; } else if (cpi <= 4.50) { - summary += 'Coste ligeramente por encima del benchmark, oportunidad de optimización.'; + summary += 'Cost slightly above benchmark, optimization opportunity.'; } else { - summary += 'Coste elevado respecto al sector. Priorizar iniciativas de eficiencia.'; + summary += 'High cost relative to sector. Prioritize efficiency initiatives.'; } const dimension: DimensionAnalysis = { id: 'economy_costs', name: 'economy_costs', - title: 'Economía & Costes', + title: 'Economy & Costs', score, percentile: undefined, summary, kpi: { - label: 'Coste por Interacción', + label: 'Cost per Interaction', value: `€${cpi.toFixed(2)}`, change: `vs benchmark €${CPI_BENCHMARK.toFixed(2)}`, changeType: cpiStatus as 'positive' | 'neutral' | 'negative' @@ -779,7 +779,7 @@ function buildAgenticReadinessDimension( } -// ==== Economía y costes (economy_costs) ==== +// ==== Economy and costs (economy_costs) ==== function buildEconomicModel(raw: BackendRawResults): EconomicModelData { const econ = raw?.economy_costs; @@ -814,17 +814,17 @@ function buildEconomicModel(raw: BackendRawResults): EconomicModelData { const savingsBreakdown = annualSavings ? [ { - category: 'Ineficiencias operativas (AHT, escalaciones)', + category: 'Operational inefficiencies (AHT, escalations)', amount: Math.round(annualSavings * 0.5), percentage: 50, }, { - category: 'Automatización de volumen repetitivo', + category: 'Automation of repetitive volume', amount: Math.round(annualSavings * 0.3), percentage: 30, }, { - category: 'Otros beneficios (calidad, CX)', + category: 'Other benefits (quality, CX)', amount: Math.round(annualSavings * 0.2), percentage: 20, }, @@ -834,7 +834,7 @@ function buildEconomicModel(raw: BackendRawResults): EconomicModelData { const costBreakdown = currentAnnualCost ? [ { - category: 'Coste laboral', + category: 'Labor cost', amount: laborAnnual, percentage: Math.round( (laborAnnual / currentAnnualCost) * 100 @@ -848,7 +848,7 @@ function buildEconomicModel(raw: BackendRawResults): EconomicModelData { ), }, { - category: 'Tecnología', + category: 'Technology', amount: techAnnual, percentage: Math.round( (techAnnual / currentAnnualCost) * 100 @@ -914,7 +914,7 @@ export function mapBackendResultsToAnalysisData( Math.min(100, Math.round(arScore * 10)) ); - // v3.3: 7 dimensiones (Complejidad recuperada con métrica Hold Time >60s) + // v3.3: 7 dimensions (Complexity recovered with Hold Time metric >60s) const { dimension: volumetryDimension, extraKpis } = buildVolumetryDimension(raw); const operationalEfficiencyDimension = buildOperationalEfficiencyDimension(raw); @@ -946,7 +946,7 @@ export function mapBackendResultsToAnalysisData( const csatAvg = computeCsatAverage(cs); - // CSAT global (opcional) + // Global CSAT (opcional) const csatGlobalRaw = safeNumber(cs?.csat_global, NaN); const csatGlobal = Number.isFinite(csatGlobalRaw) && csatGlobalRaw > 0 @@ -954,7 +954,7 @@ export function mapBackendResultsToAnalysisData( : undefined; - // KPIs de resumen (los 4 primeros son los que se ven en "Métricas de Contacto") + // Summary KPIs (the first 4 are shown in "Contact Metrics") const summaryKpis: Kpi[] = []; // 1) Interacciones Totales (volumen backend) @@ -975,9 +975,9 @@ export function mapBackendResultsToAnalysisData( : 'N/D', }); - // 3) Tasa FCR + // 3) FCR Rate summaryKpis.push({ - label: 'Tasa FCR', + label: 'FCR Rate', value: fcrPct !== undefined ? `${Math.round(fcrPct)}%` @@ -993,18 +993,18 @@ export function mapBackendResultsToAnalysisData( : 'N/D', }); - // --- KPIs adicionales, usados en otras secciones --- + // --- Additional KPIs, used in other sections --- if (numChannels > 0) { summaryKpis.push({ - label: 'Canales analizados', + label: 'Channels analyzed', value: String(numChannels), }); } if (numSkills > 0) { summaryKpis.push({ - label: 'Skills analizadas', + label: 'Skills analyzed', value: String(numSkills), }); } @@ -1027,13 +1027,13 @@ export function mapBackendResultsToAnalysisData( if (totalAnnual) { summaryKpis.push({ - label: 'Coste anual actual (backend)', + label: 'Current annual cost (backend)', value: `€${totalAnnual.toFixed(0)}`, }); } if (annualSavings) { summaryKpis.push({ - label: 'Ahorro potencial anual (backend)', + label: 'Annual potential savings (backend)', value: `€${annualSavings.toFixed(0)}`, }); } @@ -1043,22 +1043,22 @@ export function mapBackendResultsToAnalysisData( const economicModel = buildEconomicModel(raw); const benchmarkData = buildBenchmarkData(raw); - // Generar findings y recommendations basados en volumetría + // Generate findings and recommendations based on volumetry const findings: Finding[] = []; const recommendations: Recommendation[] = []; // Extraer offHoursPct de la dimensión de volumetría const offHoursPct = volumetryDimension?.distribution_data?.off_hours_pct ?? 0; - const offHoursPctValue = offHoursPct * 100; // Convertir de 0-1 a 0-100 + const offHoursPctValue = offHoursPct * 100; // Convert from 0-1 a 0-100 if (offHoursPctValue > 20) { const offHoursVolume = Math.round(totalVolume * offHoursPctValue / 100); findings.push({ type: offHoursPctValue > 30 ? 'critical' : 'warning', - title: 'Alto Volumen Fuera de Horario', - text: `${offHoursPctValue.toFixed(0)}% de interacciones fuera de horario (8-19h)`, + title: 'High Off-Hours Volume', + text: `${offHoursPctValue.toFixed(0)}% of off-hours interactions (8-19h)`, dimensionId: 'volumetry_distribution', - description: `${offHoursVolume.toLocaleString()} interacciones (${offHoursPctValue.toFixed(1)}%) ocurren fuera de horario laboral. Oportunidad ideal para implementar agentes virtuales 24/7.`, + description: `${offHoursVolume.toLocaleString()} interacciones (${offHoursPctValue.toFixed(1)}%) ocurren outside business hours. Ideal opportunity to implement 24/7 virtual agents.`, impact: offHoursPctValue > 30 ? 'high' : 'medium' }); @@ -1066,12 +1066,12 @@ export function mapBackendResultsToAnalysisData( const estimatedSavings = Math.round(offHoursVolume * estimatedContainment / 100); recommendations.push({ priority: 'high', - title: 'Implementar Agente Virtual 24/7', - text: `Desplegar agente virtual para atender ${offHoursPctValue.toFixed(0)}% de interacciones fuera de horario`, - description: `${offHoursVolume.toLocaleString()} interacciones ocurren fuera de horario laboral (19:00-08:00). Un agente virtual puede resolver ~${estimatedContainment}% de estas consultas automáticamente.`, + title: 'Implement 24/7 Virtual Agent', + text: `Deploy virtual agent to handle ${offHoursPctValue.toFixed(0)}% of off-hours interactions`, + description: `${offHoursVolume.toLocaleString()} interactions occur outside business hours (19:00-08:00). A virtual agent can resolve ~${estimatedContainment}% of these queries automatically.`, dimensionId: 'volumetry_distribution', - impact: `Potencial de contención: ${estimatedSavings.toLocaleString()} interacciones/período`, - timeline: '1-3 meses' + impact: `Containment potential: ${estimatedSavings.toLocaleString()} interacciones/período`, + timeline: '1-3 months' }); } @@ -1080,7 +1080,7 @@ export function mapBackendResultsToAnalysisData( overallHealthScore, summaryKpis: mergedKpis, dimensions, - heatmapData: [], // el heatmap por skill lo seguimos generando en el front + heatmapData: [], // skill heatmap still generated on frontend findings, recommendations, opportunities: [], @@ -1166,9 +1166,9 @@ export function buildHeatmapFromBackend( abandonment_rate: number; fcr_tecnico: number; fcr_real: number; - aht_mean: number; // AHT promedio del backend (solo VALID - consistente con fresh path) - aht_total: number; // AHT total (ALL rows incluyendo NOISE/ZOMBIE/ABANDON) - solo informativo - hold_time_mean: number; // Hold time promedio (consistente con fresh path - MEAN, no P50) + aht_mean: number; // Average AHT del backend (only VALID - consistent with fresh path) + aht_total: number; // Total AHT (ALL rows incluyendo NOISE/ZOMBIE/ABANDON) - informational only + hold_time_mean: number; // Average Hold time (consistent with fresh path - MEAN, not P50) }>(); for (const m of metricsBySkillRaw) { @@ -1178,9 +1178,9 @@ export function buildHeatmapFromBackend( abandonment_rate: safeNumber(m.abandonment_rate, NaN), fcr_tecnico: safeNumber(m.fcr_tecnico, NaN), fcr_real: safeNumber(m.fcr_real, NaN), - aht_mean: safeNumber(m.aht_mean, NaN), // AHT promedio (solo VALID) - aht_total: safeNumber(m.aht_total, NaN), // AHT total (ALL rows) - hold_time_mean: safeNumber(m.hold_time_mean, NaN), // Hold time promedio (MEAN) + aht_mean: safeNumber(m.aht_mean, NaN), // Average AHT (solo VALID) + aht_total: safeNumber(m.aht_total, NaN), // Total AHT (ALL rows) + hold_time_mean: safeNumber(m.hold_time_mean, NaN), // Average Hold time (MEAN) }); } } @@ -1314,7 +1314,7 @@ export function buildHeatmapFromBackend( // Dimensiones agentic similares a las que tenías en generateHeatmapData, // pero usando valores reales en lugar de aleatorios. - // 1) Predictibilidad (menor CV => mayor puntuación) + // 1) Predictability (lower CV => higher score) const predictability_score = Math.max( 0, Math.min( @@ -1347,14 +1347,14 @@ export function buildHeatmapFromBackend( } else { // NO usar estimación - usar valores globales del backend directamente // Esto asegura consistencia con el fresh path que usa valores directos del CSV - skillTransferRate = globalEscalation; // Usar tasa global, sin estimación + skillTransferRate = globalEscalation; // Use global rate, no estimation skillAbandonmentRate = abandonmentRateBackend; skillFcrTecnico = 100 - skillTransferRate; skillFcrReal = globalFcrPct; console.warn(`⚠️ No metrics_by_skill for skill ${skill} - using global rates`); } - // Complejidad inversa basada en transfer rate del skill + // Inverse complexity based on skill transfer rate const complexity_inverse_score = Math.max( 0, Math.min( @@ -1446,10 +1446,10 @@ export function buildHeatmapFromBackend( volume, cost_volume: costVolume, aht_seconds: aht_mean, - aht_total: aht_total, // AHT con TODAS las filas (solo informativo) + aht_total: aht_total, // AHT con TODAS las filas (informational only) metrics: { fcr: Math.round(skillFcrReal), // FCR Real (sin transfer Y sin recontacto 7d) - fcr_tecnico: Math.round(skillFcrTecnico), // FCR Técnico (comparable con benchmarks) + fcr_tecnico: Math.round(skillFcrTecnico), // Technical FCR (comparable con benchmarks) aht: ahtMetric, csat: csatMetric0_100, hold_time: holdMetric, @@ -1457,12 +1457,12 @@ export function buildHeatmapFromBackend( abandonment_rate: Math.round(skillAbandonmentRate), }, annual_cost, - cpi: skillCpi, // CPI real del backend (si disponible) + cpi: skillCpi, // Real CPI from backend (if available) variability: { cv_aht: Math.round(cv_aht * 100), // % cv_talk_time: 0, cv_hold_time: 0, - transfer_rate: skillTransferRate, // Transfer rate REAL o estimado + transfer_rate: skillTransferRate, // REAL or estimated transfer rate }, automation_readiness, dimensions: { @@ -1491,19 +1491,19 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData const benchmarkData: AnalysisData['benchmarkData'] = []; - // Benchmarks hardcoded para sector aéreo + // Hardcoded benchmarks for airline sector const AIRLINE_BENCHMARKS = { - aht_p50: 380, // segundos + aht_p50: 380, // seconds fcr: 70, // % (rango 68-72%) abandonment: 5, // % (rango 5-8%) ratio_p90_p50: 2.0, // ratio saludable cpi: 5.25 // € (rango €4.50-€6.00) }; - // 1. AHT Promedio (benchmark sector aéreo: 380s) + // 1. AHT Promedio (benchmark airline sector: 380s) const ahtP50 = safeNumber(op?.aht_distribution?.p50, 0); if (ahtP50 > 0) { - // Percentil: menor AHT = mejor. Si AHT <= benchmark = P75+ + // Percentile: lower AHT = better. If AHT <= benchmark = P75+ const ahtPercentile = ahtP50 <= AIRLINE_BENCHMARKS.aht_p50 ? Math.min(90, 75 + Math.round((AIRLINE_BENCHMARKS.aht_p50 - ahtP50) / 10)) : Math.max(10, 75 - Math.round((ahtP50 - AIRLINE_BENCHMARKS.aht_p50) / 5)); @@ -1521,15 +1521,15 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData }); } - // 2. Tasa FCR (benchmark sector aéreo: 70%) + // 2. FCR Rate (benchmark airline sector: 70%) const fcrRate = safeNumber(op?.fcr_rate, NaN); if (Number.isFinite(fcrRate) && fcrRate >= 0) { - // Percentil: mayor FCR = mejor + // Percentile: higher FCR = better const fcrPercentile = fcrRate >= AIRLINE_BENCHMARKS.fcr ? Math.min(90, 50 + Math.round((fcrRate - AIRLINE_BENCHMARKS.fcr) * 2)) : Math.max(10, 50 - Math.round((AIRLINE_BENCHMARKS.fcr - fcrRate) * 2)); benchmarkData.push({ - kpi: 'Tasa FCR', + kpi: 'FCR Rate', userValue: fcrRate / 100, userDisplay: `${Math.round(fcrRate)}%`, industryValue: AIRLINE_BENCHMARKS.fcr / 100, @@ -1560,15 +1560,15 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData }); } - // 4. Tasa de Abandono (benchmark sector aéreo: 5%) + // 4. Abandonment Rate (benchmark airline sector: 5%) const abandonRate = safeNumber(op?.abandonment_rate, NaN); if (Number.isFinite(abandonRate) && abandonRate >= 0) { - // Percentil: menor abandono = mejor + // Percentile: lower abandonment = better const abandonPercentile = abandonRate <= AIRLINE_BENCHMARKS.abandonment ? Math.min(90, 75 + Math.round((AIRLINE_BENCHMARKS.abandonment - abandonRate) * 5)) : Math.max(10, 75 - Math.round((abandonRate - AIRLINE_BENCHMARKS.abandonment) * 5)); benchmarkData.push({ - kpi: 'Tasa de Abandono', + kpi: 'Abandonment Rate', userValue: abandonRate / 100, userDisplay: `${abandonRate.toFixed(1)}%`, industryValue: AIRLINE_BENCHMARKS.abandonment / 100, @@ -1581,11 +1581,11 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData }); } - // 5. Ratio P90/P50 (benchmark sector aéreo: <2.0) + // 5. Ratio P90/P50 (benchmark airline sector: <2.0) const ahtP90 = safeNumber(op?.aht_distribution?.p90, 0); const ratio = ahtP50 > 0 && ahtP90 > 0 ? ahtP90 / ahtP50 : 0; if (ratio > 0) { - // Percentil: menor ratio = mejor + // Percentile: lower ratio = better const ratioPercentile = ratio <= AIRLINE_BENCHMARKS.ratio_p90_p50 ? Math.min(90, 75 + Math.round((AIRLINE_BENCHMARKS.ratio_p90_p50 - ratio) * 30)) : Math.max(10, 75 - Math.round((ratio - AIRLINE_BENCHMARKS.ratio_p90_p50) * 30)); @@ -1603,13 +1603,13 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData }); } - // 6. Tasa de Transferencia/Escalación + // 6. Transfer/Escalation Rate const escalationRate = safeNumber(op?.escalation_rate, NaN); if (Number.isFinite(escalationRate) && escalationRate >= 0) { - // Menor escalación = mejor percentil + // Menor escalación = better percentil const escalationPercentile = Math.max(10, Math.min(90, Math.round(100 - escalationRate * 5))); benchmarkData.push({ - kpi: 'Tasa de Transferencia', + kpi: 'Transfer Rate', userValue: escalationRate / 100, userDisplay: `${escalationRate.toFixed(1)}%`, industryValue: 0.15, @@ -1622,7 +1622,7 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData }); } - // 7. CPI - Coste por Interacción (benchmark sector aéreo: €4.50-€6.00) + // 7. CPI - Cost per Interaction (benchmark airline sector: €4.50-€6.00) const econ = raw?.economy_costs; const totalAnnualCost = safeNumber(econ?.cost_breakdown?.total_annual, 0); const volumetry = raw?.volumetry; @@ -1634,7 +1634,7 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData if (totalAnnualCost > 0 && totalInteractions > 0) { const cpi = totalAnnualCost / totalInteractions; - // Menor CPI = mejor. Si CPI <= 4.50 = excelente (P90+), si CPI >= 6.00 = malo (P25-) + // Lower CPI = better. If CPI <= 4.50 = excellent (P90+), if CPI >= 6.00 = poor (P25-) let cpiPercentile: number; if (cpi <= 4.50) { cpiPercentile = Math.min(95, 90 + Math.round((4.50 - cpi) * 10)); @@ -1647,7 +1647,7 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData } benchmarkData.push({ - kpi: 'Coste por Interacción (CPI)', + kpi: 'Cost per Interaction (CPI)', userValue: cpi, userDisplay: `€${cpi.toFixed(2)}`, industryValue: AIRLINE_BENCHMARKS.cpi, diff --git a/frontend/utils/dataTransformation.ts b/frontend/utils/dataTransformation.ts index bccf476..dba39a1 100644 --- a/frontend/utils/dataTransformation.ts +++ b/frontend/utils/dataTransformation.ts @@ -1,11 +1,11 @@ // utils/dataTransformation.ts -// Pipeline de transformación de datos raw a métricas procesadas +// Raw data to processed metrics transformation pipeline import type { RawInteraction } from '../types'; /** - * Paso 1: Limpieza de Ruido - * Elimina interacciones con duration < 10 segundos (falsos contactos o errores de sistema) + * Step 1: Noise Cleanup + * Removes interactions with duration < 10 seconds (false contacts or system errors) */ export function cleanNoiseFromData(interactions: RawInteraction[]): RawInteraction[] { const MIN_DURATION_SECONDS = 10; @@ -22,30 +22,30 @@ export function cleanNoiseFromData(interactions: RawInteraction[]): RawInteracti const removedCount = interactions.length - cleaned.length; const removedPercentage = ((removedCount / interactions.length) * 100).toFixed(1); - console.log(`🧹 Limpieza de Ruido: ${removedCount} interacciones eliminadas (${removedPercentage}% del total)`); - console.log(`✅ Interacciones limpias: ${cleaned.length}`); + console.log(`🧹 Noise Cleanup: ${removedCount} interactions removed (${removedPercentage}% of total)`); + console.log(`✅ Clean interactions: ${cleaned.length}`); return cleaned; } /** - * Métricas base calculadas por skill + * Base metrics calculated by skill */ export interface SkillBaseMetrics { skill: string; - volume: number; // Número de interacciones - aht_mean: number; // AHT promedio (segundos) - aht_std: number; // Desviación estándar del AHT - transfer_rate: number; // Tasa de transferencia (0-100) - total_cost: number; // Coste total (€) + volume: number; // Number of interactions + aht_mean: number; // Average AHT (seconds) + aht_std: number; // AHT standard deviation + transfer_rate: number; // Transfer rate (0-100) + total_cost: number; // Total cost (€) - // Datos auxiliares para cálculos posteriores - aht_values: number[]; // Array de todos los AHT para percentiles + // Auxiliary data for subsequent calculations + aht_values: number[]; // Array of all AHT values for percentiles } /** - * Paso 2: Calcular Métricas Base por Skill - * Agrupa por skill y calcula volumen, AHT promedio, desviación estándar, tasa de transferencia y coste + * Step 2: Calculate Base Metrics by Skill + * Groups by skill and calculates volume, average AHT, standard deviation, transfer rate and cost */ export function calculateSkillBaseMetrics( interactions: RawInteraction[], @@ -53,7 +53,7 @@ export function calculateSkillBaseMetrics( ): SkillBaseMetrics[] { const COST_PER_SECOND = costPerHour / 3600; - // Agrupar por skill + // Group by skill const skillGroups = new Map(); interactions.forEach(interaction => { @@ -64,31 +64,31 @@ export function calculateSkillBaseMetrics( skillGroups.get(skill)!.push(interaction); }); - // Calcular métricas por skill + // Calculate metrics per skill const metrics: SkillBaseMetrics[] = []; skillGroups.forEach((skillInteractions, skill) => { const volume = skillInteractions.length; - // Calcular AHT para cada interacción + // Calculate AHT for each interaction const ahtValues = skillInteractions.map(i => i.duration_talk + i.hold_time + i.wrap_up_time ); - // AHT promedio + // Average AHT const ahtMean = ahtValues.reduce((sum, val) => sum + val, 0) / volume; - // Desviación estándar del AHT + // AHT standard deviation const variance = ahtValues.reduce((sum, val) => sum + Math.pow(val - ahtMean, 2), 0 ) / volume; const ahtStd = Math.sqrt(variance); - // Tasa de transferencia + // Transfer rate const transferCount = skillInteractions.filter(i => i.transfer_flag).length; const transferRate = (transferCount / volume) * 100; - // Coste total + // Total cost const totalCost = ahtValues.reduce((sum, aht) => sum + (aht * COST_PER_SECOND), 0 ); @@ -104,82 +104,82 @@ export function calculateSkillBaseMetrics( }); }); - // Ordenar por volumen descendente + // Sort by descending volume metrics.sort((a, b) => b.volume - a.volume); - console.log(`📊 Métricas Base calculadas para ${metrics.length} skills`); + console.log(`📊 Base Metrics calculated for ${metrics.length} skills`); return metrics; } /** - * Dimensiones transformadas para Agentic Readiness Score + * Transformed dimensions for Agentic Readiness Score */ export interface SkillDimensions { skill: string; volume: number; - // Dimensión 1: Predictibilidad (0-10) + // Dimension 1: Predictability (0-10) predictability_score: number; - predictability_cv: number; // Coeficiente de Variación (para referencia) + predictability_cv: number; // Coefficient of Variation (for reference) - // Dimensión 2: Complejidad Inversa (0-10) + // Dimension 2: Inverse Complexity (0-10) complexity_inverse_score: number; - complexity_transfer_rate: number; // Tasa de transferencia (para referencia) + complexity_transfer_rate: number; // Transfer rate (for reference) - // Dimensión 3: Repetitividad/Impacto (0-10) + // Dimension 3: Repetitiveness/Impact (0-10) repetitivity_score: number; - // Datos auxiliares + // Auxiliary data aht_mean: number; total_cost: number; } /** - * Paso 3: Transformar Métricas Base a Dimensiones - * Aplica las fórmulas de normalización para obtener scores 0-10 + * Step 3: Transform Base Metrics to Dimensions + * Applies normalization formulas to obtain 0-10 scores */ export function transformToDimensions( baseMetrics: SkillBaseMetrics[] ): SkillDimensions[] { return baseMetrics.map(metric => { - // Dimensión 1: Predictibilidad (Proxy: Variabilidad del AHT) - // CV = desviación estándar / media + // Dimension 1: Predictability (Proxy: AHT Variability) + // CV = standard deviation / mean const cv = metric.aht_std / metric.aht_mean; - // Normalización: CV <= 0.3 → 10, CV >= 1.5 → 0 - // Fórmula: MAX(0, MIN(10, 10 - ((CV - 0.3) / 1.2 * 10))) + // Normalization: CV <= 0.3 → 10, CV >= 1.5 → 0 + // Formula: MAX(0, MIN(10, 10 - ((CV - 0.3) / 1.2 * 10))) const predictabilityScore = Math.max(0, Math.min(10, 10 - ((cv - 0.3) / 1.2 * 10) )); - // Dimensión 2: Complejidad Inversa (Proxy: Tasa de Transferencia) - // T = tasa de transferencia (%) + // Dimension 2: Inverse Complexity (Proxy: Transfer Rate) + // T = transfer rate (%) const transferRate = metric.transfer_rate; - // Normalización: T <= 5% → 10, T >= 30% → 0 - // Fórmula: MAX(0, MIN(10, 10 - ((T - 0.05) / 0.25 * 10))) + // Normalization: T <= 5% → 10, T >= 30% → 0 + // Formula: MAX(0, MIN(10, 10 - ((T - 0.05) / 0.25 * 10))) const complexityInverseScore = Math.max(0, Math.min(10, 10 - ((transferRate / 100 - 0.05) / 0.25 * 10) )); - // Dimensión 3: Repetitividad/Impacto (Proxy: Volumen) - // Normalización fija: > 5,000 llamadas/mes = 10, < 100 = 0 + // Dimension 3: Repetitiveness/Impact (Proxy: Volume) + // Fixed normalization: > 5,000 calls/month = 10, < 100 = 0 let repetitivityScore: number; if (metric.volume >= 5000) { repetitivityScore = 10; } else if (metric.volume <= 100) { repetitivityScore = 0; } else { - // Interpolación lineal entre 100 y 5000 + // Linear interpolation between 100 and 5000 repetitivityScore = ((metric.volume - 100) / (5000 - 100)) * 10; } return { skill: metric.skill, volume: metric.volume, - predictability_score: Math.round(predictabilityScore * 10) / 10, // 1 decimal - predictability_cv: Math.round(cv * 100) / 100, // 2 decimales + predictability_score: Math.round(predictabilityScore * 10) / 10, // 1 decimal place + predictability_cv: Math.round(cv * 100) / 100, // 2 decimal places complexity_inverse_score: Math.round(complexityInverseScore * 10) / 10, complexity_transfer_rate: Math.round(transferRate * 10) / 10, repetitivity_score: Math.round(repetitivityScore * 10) / 10, @@ -190,7 +190,7 @@ export function transformToDimensions( } /** - * Resultado final con Agentic Readiness Score + * Final result with Agentic Readiness Score */ export interface SkillAgenticReadiness extends SkillDimensions { agentic_readiness_score: number; // 0-10 @@ -199,28 +199,28 @@ export interface SkillAgenticReadiness extends SkillDimensions { } /** - * Paso 4: Calcular Agentic Readiness Score - * Promedio ponderado de las 3 dimensiones + * Step 4: Calculate Agentic Readiness Score + * Weighted average of the 3 dimensions */ export function calculateAgenticReadinessScore( dimensions: SkillDimensions[], weights?: { predictability: number; complexity: number; repetitivity: number } ): SkillAgenticReadiness[] { - // Pesos por defecto (ajustables) + // Default weights (adjustable) const w = weights || { - predictability: 0.40, // 40% - Más importante + predictability: 0.40, // 40% - Most important complexity: 0.35, // 35% repetitivity: 0.25 // 25% }; return dimensions.map(dim => { - // Promedio ponderado + // Weighted average const score = dim.predictability_score * w.predictability + dim.complexity_inverse_score * w.complexity + dim.repetitivity_score * w.repetitivity; - // Categorizar + // Categorize let category: 'automate_now' | 'assist_copilot' | 'optimize_first'; let label: string; @@ -245,29 +245,29 @@ export function calculateAgenticReadinessScore( } /** - * Pipeline completo: Raw Data → Agentic Readiness Score + * Complete pipeline: Raw Data → Agentic Readiness Score */ export function transformRawDataToAgenticReadiness( rawInteractions: RawInteraction[], costPerHour: number, weights?: { predictability: number; complexity: number; repetitivity: number } ): SkillAgenticReadiness[] { - console.log(`🚀 Iniciando pipeline de transformación con ${rawInteractions.length} interacciones...`); + console.log(`🚀 Starting transformation pipeline with ${rawInteractions.length} interactions...`); - // Paso 1: Limpieza de ruido + // Step 1: Noise cleanup const cleanedData = cleanNoiseFromData(rawInteractions); - // Paso 2: Calcular métricas base + // Step 2: Calculate base metrics const baseMetrics = calculateSkillBaseMetrics(cleanedData, costPerHour); - // Paso 3: Transformar a dimensiones + // Step 3: Transform to dimensions const dimensions = transformToDimensions(baseMetrics); - // Paso 4: Calcular Agentic Readiness Score + // Step 4: Calculate Agentic Readiness Score const agenticReadiness = calculateAgenticReadinessScore(dimensions, weights); - console.log(`✅ Pipeline completado: ${agenticReadiness.length} skills procesados`); - console.log(`📈 Distribución:`); + console.log(`✅ Pipeline completed: ${agenticReadiness.length} skills processed`); + console.log(`📈 Distribution:`); const automateCount = agenticReadiness.filter(s => s.readiness_category === 'automate_now').length; const assistCount = agenticReadiness.filter(s => s.readiness_category === 'assist_copilot').length; const optimizeCount = agenticReadiness.filter(s => s.readiness_category === 'optimize_first').length; @@ -279,7 +279,7 @@ export function transformRawDataToAgenticReadiness( } /** - * Utilidad: Generar resumen de estadísticas + * Utility: Generate statistics summary */ export function generateTransformationSummary( originalCount: number, @@ -300,11 +300,11 @@ export function generateTransformationSummary( const optimizePercent = skillsCount > 0 ? ((optimizeCount/skillsCount)*100).toFixed(0) : '0'; return ` -📊 Resumen de Transformación: - • Interacciones originales: ${originalCount.toLocaleString()} - • Ruido eliminado: ${removedCount.toLocaleString()} (${removedPercentage}%) - • Interacciones limpias: ${cleanedCount.toLocaleString()} - • Skills únicos: ${skillsCount} +📊 Transformation Summary: + • Original interactions: ${originalCount.toLocaleString()} + • Noise removed: ${removedCount.toLocaleString()} (${removedPercentage}%) + • Clean interactions: ${cleanedCount.toLocaleString()} + • Unique skills: ${skillsCount} 🎯 Agentic Readiness: • 🟢 Automate Now: ${automateCount} skills (${automatePercent}%) diff --git a/frontend/utils/realDataAnalysis.ts b/frontend/utils/realDataAnalysis.ts index 9159450..845f96f 100644 --- a/frontend/utils/realDataAnalysis.ts +++ b/frontend/utils/realDataAnalysis.ts @@ -1,5 +1,5 @@ /** - * Generación de análisis con datos reales (no sintéticos) + * Generation of analysis with real data (not synthetic) */ import type { AnalysisData, Kpi, DimensionAnalysis, HeatmapDataPoint, Opportunity, RoadmapInitiative, EconomicModelData, BenchmarkDataPoint, Finding, Recommendation, TierKey, CustomerSegment, RawInteraction, AgenticReadinessResult, SubFactor, SkillMetrics, DrilldownDataPoint } from '../types'; @@ -9,18 +9,18 @@ import { calculateAgenticReadinessScore, type AgenticReadinessInput } from './ag import { classifyQueue } from './segmentClassifier'; /** - * Calcular distribución horaria desde interacciones - * NOTA: Usa interaction_id únicos para consistencia con backend (aggfunc="nunique") + * Calculate hourly distribution from interactions + * NOTE: Uses unique interaction_id for consistency with backend (aggfunc="nunique") */ function calculateHourlyDistribution(interactions: RawInteraction[]): { hourly: number[]; off_hours_pct: number; peak_hours: number[] } { const hourly = new Array(24).fill(0); - // Deduplicar por interaction_id para consistencia con backend (nunique) + // Deduplicate by interaction_id for consistency with backend (nunique) const seenIds = new Set(); let duplicateCount = 0; for (const interaction of interactions) { - // Saltar duplicados de interaction_id + // Skip duplicate interaction_id const id = interaction.interaction_id; if (id && seenIds.has(id)) { duplicateCount++; @@ -35,22 +35,22 @@ function calculateHourlyDistribution(interactions: RawInteraction[]): { hourly: hourly[hour]++; } } catch { - // Ignorar fechas inválidas + // Ignore invalid dates } } if (duplicateCount > 0) { - console.log(`⏰ calculateHourlyDistribution: ${duplicateCount} interaction_ids duplicados ignorados`); + console.log(`⏰ calculateHourlyDistribution: ${duplicateCount} duplicate interaction_ids ignored`); } const total = hourly.reduce((a, b) => a + b, 0); - // Fuera de horario: 19:00-08:00 + // Off hours: 19:00-08:00 const offHoursVolume = hourly.slice(0, 8).reduce((a, b) => a + b, 0) + hourly.slice(19).reduce((a, b) => a + b, 0); const off_hours_pct = total > 0 ? Math.round((offHoursVolume / total) * 100) : 0; - // Encontrar horas pico (top 3 consecutivas) + // Find peak hours (top 3 consecutive) let maxSum = 0; let peakStart = 0; for (let i = 0; i < 22; i++) { @@ -62,7 +62,7 @@ function calculateHourlyDistribution(interactions: RawInteraction[]): { hourly: } const peak_hours = [peakStart, peakStart + 1, peakStart + 2]; - // Log para debugging + // Log for debugging const hourlyNonZero = hourly.filter(v => v > 0); const peakVolume = Math.max(...hourlyNonZero, 1); const valleyVolume = Math.min(...hourlyNonZero.filter(v => v > 0), 1); @@ -72,7 +72,7 @@ function calculateHourlyDistribution(interactions: RawInteraction[]): { hourly: } /** - * Calcular rango de fechas desde interacciones (optimizado para archivos grandes) + * Calculate date range from interactions (optimized for large files) */ function calculateDateRange(interactions: RawInteraction[]): { min: string; max: string } | undefined { let minTime = Infinity; @@ -98,7 +98,7 @@ function calculateDateRange(interactions: RawInteraction[]): { min: string; max: } /** - * Generar análisis completo con datos reales + * Generar analysis completo con datos reales */ export function generateAnalysisFromRealData( tier: TierKey, @@ -109,7 +109,7 @@ export function generateAnalysisFromRealData( ): AnalysisData { console.log(`🔄 Generating analysis from ${interactions.length} real interactions`); - // PASO 0: Detectar si tenemos datos de repeat_call_7d + // STEP 0: Detectar si tenemos datos de repeat_call_7d const repeatCallTrueCount = interactions.filter(i => i.repeat_call_7d === true).length; const repeatCallFalseCount = interactions.filter(i => i.repeat_call_7d === false).length; const repeatCallUndefinedCount = interactions.filter(i => i.repeat_call_7d === undefined).length; @@ -125,12 +125,12 @@ export function generateAnalysisFromRealData( console.log(` - transfer_flag TRUE: ${transferTrueCount} (${((transferTrueCount/interactions.length)*100).toFixed(1)}%)`); console.log(` - transfer_flag FALSE: ${transferFalseCount} (${((transferFalseCount/interactions.length)*100).toFixed(1)}%)`); - // Calcular FCR esperado manualmente + // Calculate FCR esperado mannualmente const fcrRecords = interactions.filter(i => i.transfer_flag !== true && i.repeat_call_7d !== true); const expectedFCR = (fcrRecords.length / interactions.length) * 100; - console.log(`📊 EXPECTED FCR (manual): ${expectedFCR.toFixed(1)}% (${fcrRecords.length}/${interactions.length} calls without transfer AND without repeat)`); + console.log(`📊 EXPECTED FCR (mannual): ${expectedFCR.toFixed(1)}% (${fcrRecords.length}/${interactions.length} calls without transfer AND without repeat)`); - // Mostrar sample de datos para debugging + // Mostrar sample de datos for debugging if (interactions.length > 0) { console.log('📋 SAMPLE DATA (first 5 rows):', interactions.slice(0, 5).map(i => ({ id: i.interaction_id?.substring(0, 8), @@ -142,12 +142,12 @@ export function generateAnalysisFromRealData( console.log(`📞 Repeat call data: ${repeatCallTrueCount} calls marked as repeat (${hasRepeatCallData ? 'USING repeat_call_7d' : 'NO repeat_call_7d data - FCR = 100% - transfer_rate'})`); - // PASO 0.5: Calcular rango de fechas + // STEP 0.5: Calculate date range const dateRange = calculateDateRange(interactions); console.log(`📅 Date range: ${dateRange?.min} to ${dateRange?.max}`); - // PASO 1: Analizar record_status (ya no filtramos, el filtrado se hace internamente en calculateSkillMetrics) - // Normalizar a uppercase para comparación case-insensitive + // STEP 1: Analizar record_status (ya no filtramos, filtering is done internally en calculateSkillMetrics) + // Normalize to uppercase for case-insensitive comparison const getStatus = (i: RawInteraction) => (i.record_status || '').toString().toUpperCase().trim(); const statusCounts = { valid: interactions.filter(i => !i.record_status || getStatus(i) === 'VALID').length, @@ -157,30 +157,30 @@ export function generateAnalysisFromRealData( }; console.log(`📊 Record status breakdown:`, statusCounts); - // PASO 1.5: Calcular distribución horaria (sobre TODAS las interacciones para ver patrones completos) + // STEP 1.5: Calculate hourly distribution (on ALL interactions to see complete patterns) const hourlyDistribution = calculateHourlyDistribution(interactions); console.log(`⏰ Off-hours: ${hourlyDistribution.off_hours_pct}%, Peak hours: ${hourlyDistribution.peak_hours.join('-')}h`); - // PASO 2: Calcular métricas por skill (pasa TODAS las interacciones, el filtrado se hace internamente) + // STEP 2: Calcular metrics por skill (passes ALL interactions, filtering is done internally) const skillMetrics = calculateSkillMetrics(interactions, costPerHour); console.log(`📊 Calculated metrics for ${skillMetrics.length} skills`); - // PASO 3: Generar heatmap data con dimensiones + // STEP 3: Generar heatmap data con dimensions const heatmapData = generateHeatmapFromMetrics(skillMetrics, avgCsat, segmentMapping); - // PASO 4: Calcular métricas globales - // Volumen total: TODAS las interacciones + // STEP 4: Calcular metrics globales + // Volumen total: ALL interactions const totalInteractions = interactions.length; - // Volumen válido para AHT: suma de volume_valid de cada skill + // Valid volume for AHT: sum of volume_valid from each skill const totalValidInteractions = skillMetrics.reduce((sum, s) => sum + s.volume_valid, 0); - // AHT promedio: calculado solo sobre interacciones válidas (ponderado por volumen) + // AHT average: calculated only on valid interactions (weighted by volume) const totalWeightedAHT = skillMetrics.reduce((sum, s) => sum + (s.aht_mean * s.volume_valid), 0); const avgAHT = totalValidInteractions > 0 ? Math.round(totalWeightedAHT / totalValidInteractions) : 0; - // FCR Técnico: 100 - transfer_rate (comparable con benchmarks de industria) - // Ponderado por volumen de cada skill + // Technical FCR: 100 - transfer_rate (comparable with industry benchmarks) + // Weighted by volume of each skill const totalVolumeForFCR = skillMetrics.reduce((sum, s) => sum + s.volume_valid, 0); const avgFCR = totalVolumeForFCR > 0 ? Math.round(skillMetrics.reduce((sum, s) => sum + (s.fcr_tecnico * s.volume_valid), 0) / totalVolumeForFCR) @@ -189,8 +189,8 @@ export function generateAnalysisFromRealData( // Coste total const totalCost = Math.round(skillMetrics.reduce((sum, s) => sum + s.total_cost, 0)); - // === CPI CENTRALIZADO: Calcular UNA sola vez desde heatmapData === - // Esta es la ÚNICA fuente de verdad para CPI, igual que ExecutiveSummaryTab + // === CPI CENTRALIZADO: Calcular UNA sola vez from heatmapData === + // This is the ONLY source of truth for CPI, same as ExecutiveSummaryTab const totalCostVolume = heatmapData.reduce((sum, h) => sum + (h.cost_volume || h.volume), 0); const totalAnnualCost = heatmapData.reduce((sum, h) => sum + (h.annual_cost || 0), 0); const hasCpiField = heatmapData.some(h => h.cpi !== undefined && h.cpi > 0); @@ -200,44 +200,44 @@ export function generateAnalysisFromRealData( : 0) : (totalCostVolume > 0 ? totalAnnualCost / totalCostVolume : 0); - // KPIs principales + // KPIs main const summaryKpis: Kpi[] = [ - { label: "Interacciones Totales", value: totalInteractions.toLocaleString('es-ES') }, - { label: "AHT Promedio", value: `${avgAHT}s` }, - { label: "FCR Técnico", value: `${avgFCR}%` }, + { label: "Total Interactions", value: totalInteractions.toLocaleString('es-ES') }, + { label: "Average AHT", value: `${avgAHT}s` }, + { label: "Technical FCR", value: `${avgFCR}%` }, { label: "CSAT", value: `${(avgCsat / 20).toFixed(1)}/5` } ]; - // Health Score basado en métricas reales + // Health Score based on real metrics const overallHealthScore = calculateHealthScore(heatmapData); - // Dimensiones (simplificadas para datos reales) - pasar CPI centralizado + // Dimensiones (simplified for real data) - pass centralized CPI const dimensions: DimensionAnalysis[] = generateDimensionsFromRealData( interactions, skillMetrics, avgCsat, avgAHT, hourlyDistribution, - globalCPI // CPI calculado desde heatmapData + globalCPI // CPI calculated from heatmapData ); // Agentic Readiness Score const agenticReadiness = calculateAgenticReadinessFromRealData(skillMetrics); - // Findings y Recommendations (incluyendo análisis de fuera de horario) + // Findings y Recommendations (including analysis of off hours) const findings = generateFindingsFromRealData(skillMetrics, interactions, hourlyDistribution); const recommendations = generateRecommendationsFromRealData(skillMetrics, hourlyDistribution, interactions.length); - // v3.3: Drill-down por Cola + Tipificación - CALCULAR PRIMERO para usar en opportunities y roadmap + // v3.3: Drill-down by Queue + Typification - CALCULATE FIRST to use in opportunities y roadmap const drilldownData = calculateDrilldownMetrics(interactions, costPerHour); - // v3.3: Opportunities y Roadmap basados en drilldownData (colas con CV < 75% = automatizables) + // v3.3: Opportunities y Roadmap based on drilldownData (queues with CV < 75% = automatable) const opportunities = generateOpportunitiesFromDrilldown(drilldownData, costPerHour); - // Roadmap basado en drilldownData + // Roadmap based on drilldownData const roadmap = generateRoadmapFromDrilldown(drilldownData, costPerHour); - // Economic Model (v3.10: alineado con TCO del Roadmap) + // Economic Model (v3.10: aligned with TCO of Roadmap) const economicModel = generateEconomicModelFromRealData(skillMetrics, costPerHour, roadmap, drilldownData); // Benchmark @@ -262,43 +262,43 @@ export function generateAnalysisFromRealData( } /** - * PASO 2: Calcular métricas base por skill + * STEP 2: Calcular metrics base por skill * - * LÓGICA DE FILTRADO POR record_status: - * - valid: llamadas normales válidas - * - noise: llamadas < 10 segundos (excluir de AHT, pero suma en volumen/coste) - * - zombie: llamadas > 3 horas (excluir de AHT, pero suma en volumen/coste) - * - abandon: cliente cuelga (excluir de AHT, no suma coste conversación, pero ocupa línea) + * FILTERING LOGIC BY record_status: + * - valid: normal valid calls + * - noise: calls < 10 segundos (exclude from AHT, but sum in volumen/coste) + * - zombie: calls > 3 hours (exclude from AHT, but sum in volumen/coste) + * - abandon: customer hangs up (exclude from AHT, no sum conversation cost, but occupies line) * - * Dashboard calidad/eficiencia: filtrar solo valid + abandon para AHT - * Cálculos financieros: usar todo (volume, coste total) + * Dashboard quality/efficiency: filter only valid + abandon para AHT + * Financial calculations: use all (volume, total cost) */ interface SkillMetrics { skill: string; - volume: number; // Total de interacciones (todas) - volume_valid: number; // Interacciones válidas para AHT (valid + abandon) - aht_mean: number; // AHT "limpio" calculado solo sobre valid (sin noise/zombie/abandon) - para métricas de calidad, CV - aht_total: number; // AHT "total" calculado con TODAS las filas (noise/zombie/abandon incluidas) - solo informativo - aht_benchmark: number; // AHT "tradicional" (incluye noise, excluye zombie/abandon) - para comparación con benchmarks de industria + volume: number; // Total of interactions (all) + volume_valid: number; // Valid interactions for AHT (valid + abandon) + aht_mean: number; // AHT "clean" calculado only above valid (without noise/zombie/abandon) - for metrics of quality, CV + aht_total: number; // AHT "total" calculado con TODAS las filas (noise/zombie/abandon included) - only informative + aht_benchmark: number; // AHT "traditional" (includes noise, excludes zombie/abandon) - for comparison with industry benchmarks aht_std: number; cv_aht: number; - transfer_rate: number; // Calculado sobre valid + abandon - fcr_rate: number; // FCR Real: (transfer_flag == FALSE) AND (repeat_call_7d == FALSE) - sin recontacto 7 días - fcr_tecnico: number; // FCR Técnico: (transfer_flag == FALSE) - solo sin transferencia, comparable con benchmarks de industria - abandonment_rate: number; // % de abandonos sobre total - total_cost: number; // Coste total (todas las interacciones excepto abandon) - cost_volume: number; // Volumen usado para calcular coste (non-abandon) - cpi: number; // Coste por interacción = total_cost / cost_volume - hold_time_mean: number; // Calculado sobre valid + transfer_rate: number; // Calculado above valid + abandon + fcr_rate: number; // FCR Real: (transfer_flag == FALSE) AND (repeat_call_7d == FALSE) - without recontact 7 days + fcr_tecnico: number; // Technical FCR: (transfer_flag == FALSE) - only without transferencia, comparable with industry benchmarks + abandonment_rate: number; // % de abandonments of total + total_cost: number; // Coste total (all las interactions except abandon) + cost_volume: number; // Volumen used to calculate coste (non-abandon) + cpi: number; // Coste per interaction = total_cost / cost_volume + hold_time_mean: number; // Calculado above valid cv_talk_time: number; - // Métricas adicionales para debug + // Additional metrics for debug noise_count: number; zombie_count: number; abandon_count: number; } export function calculateSkillMetrics(interactions: RawInteraction[], costPerHour: number): SkillMetrics[] { - // Agrupar por skill + // Group por skill const skillGroups = new Map(); interactions.forEach(i => { @@ -308,22 +308,22 @@ export function calculateSkillMetrics(interactions: RawInteraction[], costPerHou skillGroups.get(i.queue_skill)!.push(i); }); - // Calcular métricas para cada skill + // Calculate metrics for each skill const metrics: SkillMetrics[] = []; skillGroups.forEach((group, skill) => { const volume = group.length; if (volume === 0) return; - // === CÁLCULOS SIMPLES Y DIRECTOS DEL CSV === + // === SIMPLE AND DIRECT CALCULATIONS FROM CSV === // Abandonment: DIRECTO del campo is_abandoned del CSV const abandon_count = group.filter(i => i.is_abandoned === true).length; const abandonment_rate = (abandon_count / volume) * 100; // FCR Real: DIRECTO del campo fcr_real_flag del CSV - // Definición: (transfer_flag == FALSE) AND (repeat_call_7d == FALSE) - // Esta es la métrica MÁS ESTRICTA - sin transferencia Y sin recontacto en 7 días + // Definition: (transfer_flag == FALSE) AND (repeat_call_7d == FALSE) + // This is the STRICTEST metric - without transfer AND without recontact in 7 days const fcrTrueCount = group.filter(i => i.fcr_real_flag === true).length; const fcr_rate = (fcrTrueCount / volume) * 100; @@ -331,24 +331,24 @@ export function calculateSkillMetrics(interactions: RawInteraction[], costPerHou const transfers = group.filter(i => i.transfer_flag === true).length; const transfer_rate = (transfers / volume) * 100; - // FCR Técnico: 100 - transfer_rate - // Definición: (transfer_flag == FALSE) - solo sin transferencia - // Esta métrica es COMPARABLE con benchmarks de industria (COPC, Dimension Data) - // Los benchmarks de industria (~70%) miden FCR sin transferencia, NO sin recontacto + // Technical FCR: 100 - transfer_rate + // Definition: (transfer_flag == FALSE) - only without transferencia + // This metric is COMPARABLE with industry benchmarks (COPC, Dimension Data) + // Industry benchmarks (~70%) measure FCR without transfer, NOT without recontact const fcr_tecnico = 100 - transfer_rate; - // Separar por record_status para AHT (normalizar a uppercase para comparación case-insensitive) + // Separate by record_status for AHT (normalize to uppercase for case-insensitive comparison) const getStatus = (i: RawInteraction) => (i.record_status || '').toString().toUpperCase().trim(); const noiseRecords = group.filter(i => getStatus(i) === 'NOISE'); const zombieRecords = group.filter(i => getStatus(i) === 'ZOMBIE'); const validRecords = group.filter(i => !i.record_status || getStatus(i) === 'VALID'); - // Registros que generan coste (todo excepto abandonos) + // Registros que generan coste (todo except abandonments) const nonAbandonRecords = group.filter(i => i.is_abandoned !== true); const noise_count = noiseRecords.length; const zombie_count = zombieRecords.length; - // AHT se calcula sobre registros 'valid' (excluye noise, zombie) + // AHT se calcula above records 'valid' (excludes noise, zombie) const ahtRecords = validRecords; const volume_valid = ahtRecords.length; @@ -367,18 +367,18 @@ export function calculateSkillMetrics(interactions: RawInteraction[], costPerHou cv_aht = aht_mean > 0 ? aht_std / aht_mean : 0; // Talk time CV - const talkTimes = ahtRecords.map(i => i.duration_talk); - const talk_mean = talkTimes.reduce((sum, v) => sum + v, 0) / volume_valid; - const talk_std = Math.sqrt(talkTimes.reduce((sum, v) => sum + Math.pow(v - talk_mean, 2), 0) / volume_valid); + const talkTimonth = ahtRecords.map(i => i.duration_talk); + const talk_mean = talkTimonth.reduce((sum, v) => sum + v, 0) / volume_valid; + const talk_std = Math.sqrt(talkTimonth.reduce((sum, v) => sum + Math.pow(v - talk_mean, 2), 0) / volume_valid); cv_talk_time = talk_mean > 0 ? talk_std / talk_mean : 0; - // Hold time promedio + // Hold time average hold_time_mean = ahtRecords.reduce((sum, i) => sum + i.hold_time, 0) / volume_valid; } - // === AHT BENCHMARK: para comparación con benchmarks de industria === - // Incluye NOISE (llamadas cortas son trabajo real), excluye ZOMBIE (errores) y ABANDON (sin handle time) - // Los benchmarks de industria (COPC, Dimension Data) NO filtran llamadas cortas + // === AHT BENCHMARK: for comparison with industry benchmarks === + // Incluye NOISE (calls cortas son traunder real), excludes ZOMBIE (errores) y ABANDON (without handle time) + // Industry benchmarks (COPC, Dimension Data) NO filtran calls cortas const benchmarkRecords = group.filter(i => getStatus(i) !== 'ZOMBIE' && getStatus(i) !== 'ABANDON' && @@ -386,42 +386,42 @@ export function calculateSkillMetrics(interactions: RawInteraction[], costPerHou ); const volume_benchmark = benchmarkRecords.length; - let aht_benchmark = aht_mean; // Fallback al AHT limpio si no hay registros benchmark + let aht_benchmark = aht_mean; // Fallback al AHT clean si no hay records benchmark if (volume_benchmark > 0) { const benchmarkAhts = benchmarkRecords.map(i => i.duration_talk + i.hold_time + i.wrap_up_time); aht_benchmark = benchmarkAhts.reduce((sum, v) => sum + v, 0) / volume_benchmark; } - // === AHT TOTAL: calculado con TODAS las filas (solo informativo) === - // Incluye NOISE, ZOMBIE, ABANDON - para comparación con AHT limpio + // === AHT TOTAL: calculado con TODAS las filas (only informative) === + // Incluye NOISE, ZOMBIE, ABANDON - for comparison with AHT clean let aht_total = 0; if (volume > 0) { const allAhts = group.map(i => i.duration_talk + i.hold_time + i.wrap_up_time); aht_total = allAhts.reduce((sum, v) => sum + v, 0) / volume; } - // === CÁLCULOS FINANCIEROS: usar TODAS las interacciones === - // Coste total con productividad efectiva del 70% + // === FINANCIAL CALCULATIONS: use ALL interactions === + // Total cost with effective productivity of 70% const effectiveProductivity = 0.70; - // Para el coste, usamos todas las interacciones EXCEPTO abandonos (que no generan coste de conversación) - // noise y zombie SÍ generan coste (ocupan agente aunque sea poco/mucho tiempo) + // For cost, we use all interactions EXCEPT abandonments (which do not generate conversation cost) + // noise y zombie DO generate cost (occupy agent even if little/much time) // Usar nonAbandonRecords que ya filtra por is_abandoned y record_status const costRecords = nonAbandonRecords; const costVolume = costRecords.length; - // Calcular AHT para coste usando todos los registros que generan coste + // Calculate AHT for cost using all records that generate cost let aht_for_cost = 0; if (costVolume > 0) { const costAhts = costRecords.map(i => i.duration_talk + i.hold_time + i.wrap_up_time); aht_for_cost = costAhts.reduce((sum, v) => sum + v, 0) / costVolume; } - // Coste Real = (AHT en horas × Coste/hora × Volumen) / Productividad Efectiva + // Real Cost = (AHT in hours × Cost/hour × Volume) / Effective Productivity const rawCost = (aht_for_cost / 3600) * costPerHour * costVolume; const total_cost = rawCost / effectiveProductivity; - // CPI = Coste por interacción (usando el volumen correcto) + // CPI = Coste per interaction (usando el volumen correcto) const cpi = costVolume > 0 ? total_cost / costVolume : 0; metrics.push({ @@ -429,7 +429,7 @@ export function calculateSkillMetrics(interactions: RawInteraction[], costPerHou volume, volume_valid, aht_mean, - aht_total, // AHT con TODAS las filas (solo informativo) + aht_total, // AHT con TODAS las filas (only informative) aht_benchmark, aht_std, cv_aht, @@ -448,7 +448,7 @@ export function calculateSkillMetrics(interactions: RawInteraction[], costPerHou }); }); - // === DEBUG: Verificar cálculos === + // === DEBUG: Verify calculations === const totalVolume = metrics.reduce((sum, m) => sum + m.volume, 0); const totalValidVolume = metrics.reduce((sum, m) => sum + m.volume_valid, 0); const totalAbandons = metrics.reduce((sum, m) => sum + m.abandon_count, 0); @@ -467,26 +467,26 @@ export function calculateSkillMetrics(interactions: RawInteraction[], costPerHou console.log(''); console.log('═══════════════════════════════════════════════════════════════'); - console.log('📊 MÉTRICAS CALCULADAS POR SKILL'); + console.log('📊 METRICS CALCULATED BY SKILL'); console.log('═══════════════════════════════════════════════════════════════'); console.log(`Total skills: ${metrics.length}`); console.log(`Total volumen: ${totalVolume}`); - console.log(`Total abandonos (is_abandoned=TRUE): ${totalAbandons}`); + console.log(`Total abandonments (is_abandoned=TRUE): ${totalAbandons}`); console.log(''); - console.log('MÉTRICAS GLOBALES (ponderadas por volumen):'); + console.log('GLOBAL METRICS (weighted by volume):'); console.log(` Abandonment Rate: ${globalAbandonRate.toFixed(2)}%`); - console.log(` FCR Real (sin transfer + sin recontacto 7d): ${avgFCRRate.toFixed(2)}%`); - console.log(` FCR Técnico (solo sin transfer, comparable con benchmarks): ${avgFCRTecnicoRate.toFixed(2)}%`); + console.log(` FCR Real (without transfer + without recontact 7d): ${avgFCRRate.toFixed(2)}%`); + console.log(` Technical FCR (only without transfer, comparable with benchmarks): ${avgFCRTecnicoRate.toFixed(2)}%`); console.log(` Transfer Rate: ${avgTransferRate.toFixed(2)}%`); console.log(''); console.log('Detalle por skill (top 5):'); metrics.slice(0, 5).forEach(m => { - console.log(` ${m.skill}: vol=${m.volume}, abandon=${m.abandon_count} (${m.abandonment_rate.toFixed(1)}%), FCR Real=${m.fcr_rate.toFixed(1)}%, FCR Técnico=${m.fcr_tecnico.toFixed(1)}%, transfer=${m.transfer_rate.toFixed(1)}%`); + console.log(` ${m.skill}: vol=${m.volume}, abandon=${m.abandon_count} (${m.abandonment_rate.toFixed(1)}%), FCR Real=${m.fcr_rate.toFixed(1)}%, Technical FCR=${m.fcr_tecnico.toFixed(1)}%, transfer=${m.transfer_rate.toFixed(1)}%`); }); console.log('═══════════════════════════════════════════════════════════════'); console.log(''); - // Mostrar detalle del primer skill para debug + // Mostrar detalle del primer skill for debug if (metrics[0]) { console.log('📋 Sample skill detail:', { skill: metrics[0].skill, @@ -499,85 +499,85 @@ export function calculateSkillMetrics(interactions: RawInteraction[], costPerHou }); } - return metrics.sort((a, b) => b.volume - a.volume); // Ordenar por volumen descendente + return metrics.sort((a, b) => b.volume - a.volume); // Sort by descending volume } /** - * v4.4: Clasificar tier de automatización con datos del heatmap + * v4.4: Classify automation tier with heatmap data * - * Esta función replica la lógica de clasificarTier() usando los datos - * disponibles en el heatmap. Acepta parámetros opcionales (fcr, volume) - * para mayor precisión cuando están disponibles. + * This function replicates the logic of clasificarTier() usando los datos + * disponibles en el heatmap. Accepts optional parameters (fcr, volume) + * for greater precision when available. * - * Se usa en generateDrilldownFromHeatmap() de analysisGenerator.ts para - * asegurar consistencia entre la ruta fresh (datos completos) y la ruta - * cached (datos del heatmap). + * Used in generateDrilldownFromHeatmap() de analysisGenerator.ts para + * asegurar consistencia between the fresh path (complete data) y la ruta + * cached (heatmap data). * * @param score - Agentic Readiness Score (0-10) - * @param cv - Coeficiente de Variación del AHT como decimal (0.75 = 75%) - * @param transfer - Tasa de transferencia como decimal (0.20 = 20%) - * @param fcr - FCR rate como decimal (0.80 = 80%), opcional - * @param volume - Volumen mensual de interacciones, opcional + * @param cv - Coefficient of Variation of AHT as decimal (0.75 = 75%) + * @param transfer - Transfer rate as decimal (0.20 = 20%) + * @param fcr - FCR rate as decimal (0.80 = 80%), optional + * @param volume - Monthly volume of interactions, optional * @returns AgenticTier ('AUTOMATE' | 'ASSIST' | 'AUGMENT' | 'HUMAN-ONLY') */ export function clasificarTierSimple( score: number, - cv: number, // CV como decimal (0.75 = 75%) - transfer: number, // Transfer como decimal (0.20 = 20%) - fcr?: number, // FCR como decimal (0.80 = 80%) - volume?: number // Volumen mensual + cv: number, // CV as decimal (0.75 = 75%) + transfer: number, // Transfer as decimal (0.20 = 20%) + fcr?: number, // FCR as decimal (0.80 = 80%) + volume?: number // Monthly volume ): import('../types').AgenticTier { - // RED FLAGS críticos - mismos que clasificarTier() completa - // CV > 120% o Transfer > 50% son red flags absolutos + // critical RED FLAGS - same as clasificarTier() complete + // CV > 120% o Transfer > 50% are absolute red flags if (cv > 1.20 || transfer > 0.50) { return 'HUMAN-ONLY'; } - // Volume < 50/mes es red flag si tenemos el dato + // Volume < 50/month is a red flag if we have the data if (volume !== undefined && volume < 50) { return 'HUMAN-ONLY'; } - // TIER 1: AUTOMATE - requiere métricas óptimas - // Mismo criterio que clasificarTier(): score >= 7.5, cv <= 0.75, transfer <= 0.20, fcr >= 0.50 - const fcrOk = fcr === undefined || fcr >= 0.50; // Si no tenemos FCR, asumimos OK + // TIER 1: AUTOMATE - requires optimal metrics + // Same criterion as clasificarTier(): score >= 7.5, cv <= 0.75, transfer <= 0.20, fcr >= 0.50 + const fcrOk = fcr === undefined || fcr >= 0.50; // If we dont have FCR, we assume OK if (score >= 7.5 && cv <= 0.75 && transfer <= 0.20 && fcrOk) { return 'AUTOMATE'; } - // TIER 2: ASSIST - apto para copilot/asistencia + // TIER 2: ASSIST - suitable for copilot/assistance if (score >= 5.5 && cv <= 0.90 && transfer <= 0.30) { return 'ASSIST'; } - // TIER 3: AUGMENT - requiere optimización previa + // TIER 3: AUGMENT - requires prior optimization if (score >= 3.5) { return 'AUGMENT'; } - // TIER 4: HUMAN-ONLY - proceso complejo + // TIER 4: HUMAN-ONLY - complex process return 'HUMAN-ONLY'; } /** - * v3.4: Calcular métricas drill-down con nueva fórmula de Agentic Readiness Score + * v3.4: Calculate drill-down metrics with new formula for Agentic Readiness Score * - * SCORE POR COLA (0-10): - * - Factor 1: PREDICTIBILIDAD (30%) - basado en CV AHT - * - Factor 2: RESOLUTIVIDAD (25%) - FCR (60%) + Transfer (40%) - * - Factor 3: VOLUMEN (25%) - basado en volumen mensual - * - Factor 4: CALIDAD DATOS (10%) - % registros válidos - * - Factor 5: SIMPLICIDAD (10%) - basado en AHT + * SCORE BY QUEUE (0-10): + * - Factor 1: PREDICTABILITY (30%) - based on CV AHT + * - Factor 2: RESOLUTION (25%) - FCR (60%) + Transfer (40%) + * - Factor 3: VOLUME (25%) - based on volumen monthly + * - Factor 4: DATA QUALITY (10%) - % valid records + * - Factor 5: SIMPLICITY (10%) - based on AHT * - * CLASIFICACIÓN EN TIERS: + * TIER CLASSIFICATION: * - AUTOMATE: score >= 7.5, CV <= 75%, transfer <= 20%, FCR >= 50% * - ASSIST: score >= 5.5, CV <= 90%, transfer <= 30% * - AUGMENT: score >= 3.5 - * - HUMAN-ONLY: score < 3.5 o red flags + * - HUMAN-ONLY: score < 3.5 or red flags * - * RED FLAGS (HUMAN-ONLY automático): + * RED FLAGS (automatic HUMAN-ONLY): * - CV > 120% * - Transfer > 50% - * - Vol < 50/mes + * - Vol < 50/month * - Valid < 30% */ export function calculateDrilldownMetrics( @@ -587,18 +587,18 @@ export function calculateDrilldownMetrics( const effectiveProductivity = 0.70; // ═══════════════════════════════════════════════════════════════════════════ - // FUNCIÓN: Calcular Score por Cola (nueva fórmula v3.4) + // FUNCTION: Calculate Score by Queue (new formula v3.4) // ═══════════════════════════════════════════════════════════════════════════ - function calcularScoreCola( - cv: number, // CV AHT (0-2+, donde 1 = 100%) + function calculateScoreCola( + cv: number, // CV AHT (0-2+, where 1 = 100%) fcr: number, // FCR rate (0-1) transfer: number, // Transfer rate (0-1) - vol: number, // Volumen mensual - aht: number, // AHT en segundos - validPct: number // % registros válidos (0-1) + vol: number, // Monthly volume + aht: number, // AHT in seconds + validPct: number // % valid records (0-1) ): { score: number; breakdown: import('../types').AgenticScoreBreakdown } { - // FACTOR 1: PREDICTIBILIDAD (30%) - basado en CV AHT + // FACTOR 1: PREDICTABILITY (30%) - based on CV AHT let scorePred: number; if (cv <= 0.50) { scorePred = 10; @@ -614,7 +614,7 @@ export function calculateDrilldownMetrics( scorePred = Math.max(0, 1 - (cv - 1.10) / 0.50); } - // FACTOR 2: RESOLUTIVIDAD (25%) = FCR (60%) + Transfer (40%) + // FACTOR 2: RESOLUTION (25%) = FCR (60%) + Transfer (40%) let scoreFcr: number; if (fcr >= 0.80) { scoreFcr = 10; @@ -643,7 +643,7 @@ export function calculateDrilldownMetrics( const scoreResol = scoreFcr * 0.6 + scoreTrans * 0.4; - // FACTOR 3: VOLUMEN (25%) + // FACTOR 3: VOLUME (25%) let scoreVol: number; if (vol >= 10000) { scoreVol = 10; @@ -659,7 +659,7 @@ export function calculateDrilldownMetrics( scoreVol = vol / 100; } - // FACTOR 4: CALIDAD DATOS (10%) + // FACTOR 4: DATA QUALITY (10%) let scoreCal: number; if (validPct >= 0.90) { scoreCal = 10; @@ -671,7 +671,7 @@ export function calculateDrilldownMetrics( scoreCal = validPct / 0.50 * 4; } - // FACTOR 5: SIMPLICIDAD (10%) - basado en AHT + // FACTOR 5: SIMPLICITY (10%) - based on AHT let scoreSimp: number; if (aht <= 180) { scoreSimp = 10; @@ -700,30 +700,30 @@ export function calculateDrilldownMetrics( predictibilidad: Math.round(scorePred * 10) / 10, resolutividad: Math.round(scoreResol * 10) / 10, volumen: Math.round(scoreVol * 10) / 10, - calidadDatos: Math.round(scoreCal * 10) / 10, + qualityDatos: Math.round(scoreCal * 10) / 10, simplicidad: Math.round(scoreSimp * 10) / 10 } }; } // ═══════════════════════════════════════════════════════════════════════════ - // FUNCIÓN: Clasificar Tier del Roadmap + // FUNCTION: Clasificar Tier del Roadmap // ═══════════════════════════════════════════════════════════════════════════ function clasificarTier( score: number, - cv: number, // CV como decimal (0.75 = 75%) - transfer: number, // Transfer como decimal (0.20 = 20%) - fcr: number, // FCR como decimal (0.80 = 80%) + cv: number, // CV as decimal (0.75 = 75%) + transfer: number, // Transfer as decimal (0.20 = 20%) + fcr: number, // FCR as decimal (0.80 = 80%) vol: number, validPct: number ): { tier: import('../types').AgenticTier; motivo: string } { - // RED FLAGS → HUMAN-ONLY automático + // RED FLAGS → HUMAN-ONLY automatic const redFlags: string[] = []; if (cv > 1.20) redFlags.push("CV > 120%"); if (transfer > 0.50) redFlags.push("Transfer > 50%"); - if (vol < 50) redFlags.push("Vol < 50/mes"); - if (validPct < 0.30) redFlags.push("Datos < 30% válidos"); + if (vol < 50) redFlags.push("Vol < 50/month"); + if (validPct < 0.30) redFlags.push("Data < 30% valid"); if (redFlags.length > 0) { return { @@ -736,7 +736,7 @@ export function calculateDrilldownMetrics( if (score >= 7.5 && cv <= 0.75 && transfer <= 0.20 && fcr >= 0.50) { return { tier: 'AUTOMATE', - motivo: `Score ${score}, métricas óptimas para automatización` + motivo: `Score ${score}, optimal metrics for automation` }; } @@ -744,7 +744,7 @@ export function calculateDrilldownMetrics( if (score >= 5.5 && cv <= 0.90 && transfer <= 0.30) { return { tier: 'ASSIST', - motivo: `Score ${score}, apto para copilot/asistencia` + motivo: `Score ${score}, suitable for copilot/assistance` }; } @@ -752,25 +752,25 @@ export function calculateDrilldownMetrics( if (score >= 3.5) { return { tier: 'AUGMENT', - motivo: `Score ${score}, requiere optimización previa` + motivo: `Score ${score}, requires prior optimization` }; } // TIER 4: HUMAN-ONLY return { tier: 'HUMAN-ONLY', - motivo: `Score ${score}, proceso complejo para automatización` + motivo: `Score ${score}, complex process for automation` }; } // ═══════════════════════════════════════════════════════════════════════════ - // FUNCIÓN: Calcular métricas de un grupo de interacciones + // FUNCTION: Calcular metrics de un group of interactions // ═══════════════════════════════════════════════════════════════════════════ function calculateQueueMetrics(group: RawInteraction[]): import('../types').OriginalQueueMetrics | null { const volume = group.length; if (volume < 5) return null; - // Filtrar solo VALID para cálculo de CV (normalizar a uppercase para comparación case-insensitive) + // Filter only VALID for CV calculation (normalize to uppercase for case-insensitive comparison) const getStatus = (i: RawInteraction) => (i.record_status || '').toString().toUpperCase().trim(); const validRecords = group.filter(i => !i.record_status || getStatus(i) === 'VALID'); const volumeValid = validRecords.length; @@ -778,29 +778,29 @@ export function calculateDrilldownMetrics( const validPct = volumeValid / volume; - // AHT y CV sobre registros válidos + // AHT y CV on valid records const ahts = validRecords.map(i => i.duration_talk + i.hold_time + i.wrap_up_time); const aht_mean = ahts.reduce((sum, v) => sum + v, 0) / volumeValid; const aht_variance = ahts.reduce((sum, v) => sum + Math.pow(v - aht_mean, 2), 0) / volumeValid; const aht_std = Math.sqrt(aht_variance); - const cv_aht_decimal = aht_mean > 0 ? aht_std / aht_mean : 1.5; // CV como decimal + const cv_aht_decimal = aht_mean > 0 ? aht_std / aht_mean : 1.5; // CV as decimal const cv_aht_percent = cv_aht_decimal * 100; // CV como % - // Transfer y FCR (como decimales para cálculo, como % para display) + // Transfer y FCR (as decimals for calculation, as % for display) const transfers = group.filter(i => i.transfer_flag === true).length; const transfer_decimal = transfers / volume; const transfer_percent = transfer_decimal * 100; - // FCR Real: usa fcr_real_flag del CSV (sin transferencia Y sin recontacto 7d) + // FCR Real: usa fcr_real_flag del CSV (without transferencia Y without recontact 7d) const fcrCount = group.filter(i => i.fcr_real_flag === true).length; const fcr_decimal = fcrCount / volume; const fcr_percent = fcr_decimal * 100; - // FCR Técnico: 100 - transfer_rate (comparable con benchmarks de industria) + // Technical FCR: 100 - transfer_rate (comparable with industry benchmarks) const fcr_tecnico_percent = 100 - transfer_percent; - // Calcular score con nueva fórmula v3.4 - const { score, breakdown } = calcularScoreCola( + // Calculate score con new formula v3.4 + const { score, breakdown } = calculateScoreCola( cv_aht_decimal, fcr_decimal, transfer_decimal, @@ -819,19 +819,19 @@ export function calculateDrilldownMetrics( validPct ); - // v4.2: Convertir volumen de 11 meses a anual para el coste - const annualVolume = (volume / 11) * 12; // 11 meses → anual + // v4.2: Convert volume from 11 months a annual para el coste + const annualVolume = (volume / 11) * 12; // 11 months → annual const annualCost = Math.round((aht_mean / 3600) * costPerHour * annualVolume / effectiveProductivity); return { - original_queue_id: '', // Se asigna después + original_queue_id: '', // Assigned later volume, volumeValid, aht_mean: Math.round(aht_mean), cv_aht: Math.round(cv_aht_percent * 10) / 10, transfer_rate: Math.round(transfer_percent * 10) / 10, fcr_rate: Math.round(fcr_percent * 10) / 10, - fcr_tecnico: Math.round(fcr_tecnico_percent * 10) / 10, // FCR Técnico para consistencia con Summary + fcr_tecnico: Math.round(fcr_tecnico_percent * 10) / 10, // Technical FCR for consistency with Summary agenticScore: score, scoreBreakdown: breakdown, tier, @@ -842,7 +842,7 @@ export function calculateDrilldownMetrics( } // ═══════════════════════════════════════════════════════════════════════════ - // PASO 1: Agrupar por queue_skill (nivel estratégico) + // STEP 1: Group by queue_skill (strategic level) // ═══════════════════════════════════════════════════════════════════════════ const skillGroups = new Map(); for (const interaction of interactions) { @@ -854,26 +854,26 @@ export function calculateDrilldownMetrics( skillGroups.get(skill)!.push(interaction); } - console.log(`📊 Drill-down v3.4: ${skillGroups.size} queue_skills encontrados`); + console.log(`📊 Drill-down v3.4: ${skillGroups.size} queue_skills found`); const drilldownData: DrilldownDataPoint[] = []; // ═══════════════════════════════════════════════════════════════════════════ - // PASO 2: Para cada queue_skill, agrupar por original_queue_id + // STEP 2: For each queue_skill, group by original_queue_id // ═══════════════════════════════════════════════════════════════════════════ skillGroups.forEach((skillGroup, skill) => { if (skillGroup.length < 10) return; const queueGroups = new Map(); for (const interaction of skillGroup) { - const queueId = interaction.original_queue_id || 'Sin identificar'; + const queueId = interaction.original_queue_id || 'Without identification'; if (!queueGroups.has(queueId)) { queueGroups.set(queueId, []); } queueGroups.get(queueId)!.push(interaction); } - // Calcular métricas para cada original_queue_id + // Calculate metrics for each original_queue_id const originalQueues: import('../types').OriginalQueueMetrics[] = []; queueGroups.forEach((queueGroup, queueId) => { const metrics = calculateQueueMetrics(queueGroup); @@ -885,7 +885,7 @@ export function calculateDrilldownMetrics( if (originalQueues.length === 0) return; - // Ordenar por score descendente, luego por volumen + // Sort by descending score, then by volume originalQueues.sort((a, b) => { if (Math.abs(a.agenticScore - b.agenticScore) > 0.5) { return b.agenticScore - a.agenticScore; @@ -894,7 +894,7 @@ export function calculateDrilldownMetrics( }); // ═══════════════════════════════════════════════════════════════════════ - // Calcular métricas agregadas del skill (promedio ponderado por volumen) + // Calculate aggregated metrics of the skill (volume-weighted average) // ═══════════════════════════════════════════════════════════════════════ const totalVolume = originalQueues.reduce((sum, q) => sum + q.volume, 0); const totalVolumeValid = originalQueues.reduce((sum, q) => sum + q.volumeValid, 0); @@ -906,7 +906,7 @@ export function calculateDrilldownMetrics( const avgFcr = originalQueues.reduce((sum, q) => sum + q.fcr_rate * q.volume, 0) / totalVolume; const avgFcrTecnico = originalQueues.reduce((sum, q) => sum + q.fcr_tecnico * q.volume, 0) / totalVolume; - // Score global ponderado por volumen + // Score global weighted by volume const avgScore = originalQueues.reduce((sum, q) => sum + q.agenticScore * q.volume, 0) / totalVolume; // Tier predominante (el de mayor volumen) @@ -915,7 +915,7 @@ export function calculateDrilldownMetrics( tierCounts[q.tier] += q.volume; }); - // isPriorityCandidate si hay al menos una cola AUTOMATE + // isPriorityCandidate si hay al menos una queue AUTOMATE const hasAutomateQueue = originalQueues.some(q => q.tier === 'AUTOMATE'); drilldownData.push({ @@ -927,7 +927,7 @@ export function calculateDrilldownMetrics( cv_aht: Math.round(avgCv * 10) / 10, transfer_rate: Math.round(avgTransfer * 10) / 10, fcr_rate: Math.round(avgFcr * 10) / 10, - fcr_tecnico: Math.round(avgFcrTecnico * 10) / 10, // FCR Técnico para consistencia + fcr_tecnico: Math.round(avgFcrTecnico * 10) / 10, // Technical FCR para consistencia agenticScore: Math.round(avgScore * 10) / 10, isPriorityCandidate: hasAutomateQueue, annualCost: totalCost @@ -935,11 +935,11 @@ export function calculateDrilldownMetrics( }); // ═══════════════════════════════════════════════════════════════════════════ - // PASO 3: Ordenar y log resumen + // STEP 3: Ordenar y log resumen // ═══════════════════════════════════════════════════════════════════════════ drilldownData.sort((a, b) => b.agenticScore - a.agenticScore); - // Contar tiers + // Count tiers const allQueues = drilldownData.flatMap(s => s.originalQueues); const tierSummary = { AUTOMATE: allQueues.filter(q => q.tier === 'AUTOMATE').length, @@ -948,14 +948,14 @@ export function calculateDrilldownMetrics( 'HUMAN-ONLY': allQueues.filter(q => q.tier === 'HUMAN-ONLY').length }; - console.log(`📊 Drill-down v3.4: ${drilldownData.length} skills, ${allQueues.length} colas`); + console.log(`📊 Drill-down v3.4: ${drilldownData.length} skills, ${allQueues.length} queues`); console.log(`🎯 Tiers: AUTOMATE=${tierSummary.AUTOMATE}, ASSIST=${tierSummary.ASSIST}, AUGMENT=${tierSummary.AUGMENT}, HUMAN-ONLY=${tierSummary['HUMAN-ONLY']}`); return drilldownData; } /** - * PASO 3: Transformar métricas a dimensiones (0-10) + * STEP 3: Transformar metrics a dimensions (0-10) */ export function generateHeatmapFromMetrics( metrics: SkillMetrics[], @@ -970,31 +970,31 @@ export function generateHeatmapFromMetrics( }); const result = metrics.map(m => { - // Dimensión 1: Predictibilidad (CV AHT) + // Dimension 1: Predictability (CV AHT) const predictability = Math.max(0, Math.min(10, 10 - ((m.cv_aht - 0.3) / 1.2 * 10))); - // Dimensión 2: Complejidad Inversa (Transfer Rate) + // Dimension 2: Inverse Complexity (Transfer Rate) const complexity_inverse = Math.max(0, Math.min(10, 10 - ((m.transfer_rate / 100 - 0.05) / 0.25 * 10))); - // Dimensión 3: Repetitividad (Volumen) + // Dimension 3: Repeatability (Volumen) let repetitiveness = 0; if (m.volume >= 5000) { repetitiveness = 10; } else if (m.volume <= 100) { repetitiveness = 0; } else { - // Interpolación lineal entre 100 y 5000 + // Linear interpolation between 100 y 5000 repetitiveness = ((m.volume - 100) / (5000 - 100)) * 10; } - // Agentic Readiness Score (promedio ponderado) + // Agentic Readiness Score (average ponderado) const agentic_readiness = ( predictability * 0.40 + complexity_inverse * 0.35 + repetitiveness * 0.25 ); - // Categoría + // Category let category: 'automate' | 'assist' | 'optimize'; if (agentic_readiness >= 8.0) { category = 'automate'; @@ -1004,37 +1004,37 @@ export function generateHeatmapFromMetrics( category = 'optimize'; } - // Segmentación + // Segmentation const segment = segmentMapping ? classifyQueue(m.skill, segmentMapping.high_value_queues, segmentMapping.medium_value_queues, segmentMapping.low_value_queues) : 'medium' as CustomerSegment; // Scores de performance (normalizados 0-100) // FCR Real: (transfer_flag == FALSE) AND (repeat_call_7d == FALSE) - // Esta es la métrica más estricta - sin transferencia Y sin recontacto en 7 días + // This is the strictest metric - without transfer AND without recontact in 7 days const fcr_score = Math.round(m.fcr_rate); - // FCR Técnico: solo sin transferencia (comparable con benchmarks de industria COPC, Dimension Data) + // Technical FCR: only without transferencia (comparable with industry benchmarks COPC, Dimension Data) const fcr_tecnico_score = Math.round(m.fcr_tecnico); const aht_score = Math.round(Math.max(0, Math.min(100, 100 - ((m.aht_mean - 240) / 310) * 100))); const csat_score = avgCsat; const hold_time_score = Math.round(Math.max(0, Math.min(100, 100 - (m.hold_time_mean / 60) * 10))); // Transfer rate es el % real de transferencias (NO el complemento) const actual_transfer_rate = Math.round(m.transfer_rate); - // Abandonment rate es el % real de abandonos + // Abandonment rate es el % real de abandonments const actual_abandonment_rate = Math.round(m.abandonment_rate * 10) / 10; // 1 decimal return { skill: m.skill, volume: m.volume, - cost_volume: m.cost_volume, // Volumen usado para calcular coste (non-abandon) + cost_volume: m.cost_volume, // Volumen used to calculate coste (non-abandon) aht_seconds: Math.round(m.aht_mean), - aht_total: Math.round(m.aht_total), // AHT con TODAS las filas (solo informativo) - aht_benchmark: Math.round(m.aht_benchmark), // AHT tradicional para comparación con benchmarks de industria - annual_cost: Math.round(m.total_cost), // Coste calculado con TODOS los registros (noise + zombie + valid) - cpi: m.cpi, // Coste por interacción (calculado correctamente) + aht_total: Math.round(m.aht_total), // AHT con TODAS las filas (only informative) + aht_benchmark: Math.round(m.aht_benchmark), // AHT traditional for comparison with industry benchmarks + annual_cost: Math.round(m.total_cost), // Coste calculado con TODOS los records (noise + zombie + valid) + cpi: m.cpi, // Coste per interaction (calculado correctamente) metrics: { - fcr: fcr_score, // FCR Real (más estricto, con filtro de recontacto 7d) - fcr_tecnico: fcr_tecnico_score, // FCR Técnico (comparable con benchmarks industria) + fcr: fcr_score, // FCR Real (stricter, with recontact filter 7d) + fcr_tecnico: fcr_tecnico_score, // Technical FCR (comparable with benchmarks industria) aht: aht_score, csat: csat_score, hold_time: hold_time_score, @@ -1045,7 +1045,7 @@ export function generateHeatmapFromMetrics( variability: { cv_aht: Math.round(m.cv_aht * 100), cv_talk_time: Math.round(m.cv_talk_time * 100), - cv_hold_time: Math.round(m.cv_talk_time * 80), // Aproximación + cv_hold_time: Math.round(m.cv_talk_time * 80), // Approximation transfer_rate: Math.round(m.transfer_rate) }, dimensions: { @@ -1073,14 +1073,14 @@ export function generateHeatmapFromMetrics( } /** - * Calcular Health Score global - Nueva fórmula basada en benchmarks de industria + * Calculate global Health Score - New formula based on industry benchmarks * - * PASO 1: Normalización de componentes usando percentiles de industria - * PASO 2: Ponderación (FCR 35%, Abandono 30%, CSAT Proxy 20%, AHT 15%) - * PASO 3: Penalizaciones por umbrales críticos + * STEP 1: Normalization of components using industry percentiles + * STEP 2: Weighting (FCR 35%, Abandono 30%, CSAT Proxy 20%, AHT 15%) + * STEP 3: Penalizaciones por umbrales criticals * * Benchmarks de industria (Cross-Industry): - * - FCR Técnico: P10=85%, P50=68%, P90=50% + * - Technical FCR: P10=85%, P50=68%, P90=50% * - Abandono: P10=3%, P50=5%, P90=10% * - AHT: P10=240s, P50=380s, P90=540s */ @@ -1091,18 +1091,18 @@ function calculateHealthScore(heatmapData: HeatmapDataPoint[]): number { if (totalVolume === 0) return 50; // ═══════════════════════════════════════════════════════════════ - // PASO 0: Extraer métricas ponderadas por volumen + // STEP 0: Extraer metrics ponderadas por volumen // ═══════════════════════════════════════════════════════════════ - // FCR Técnico (%) + // Technical FCR (%) const fcrTecnico = heatmapData.reduce((sum, d) => sum + (d.metrics?.fcr_tecnico ?? (100 - d.metrics.transfer_rate)) * d.volume, 0) / totalVolume; // Abandono (%) - const abandono = heatmapData.reduce((sum, d) => + const abandonment = heatmapData.reduce((sum, d) => sum + (d.metrics?.abandonment_rate || 0) * d.volume, 0) / totalVolume; - // AHT (segundos) - usar aht_seconds (AHT limpio sin noise/zombies) + // AHT (segundos) - usar aht_seconds (AHT clean without noise/zombies) const aht = heatmapData.reduce((sum, d) => sum + d.aht_seconds * d.volume, 0) / totalVolume; @@ -1111,11 +1111,11 @@ function calculateHealthScore(heatmapData: HeatmapDataPoint[]): number { sum + (d.metrics?.transfer_rate || 0) * d.volume, 0) / totalVolume; // ═══════════════════════════════════════════════════════════════ - // PASO 1: Normalización de componentes (0-100 score) + // STEP 1: Component normalization (0-100 score) // ═══════════════════════════════════════════════════════════════ - // FCR Técnico: P10=85%, P50=68%, P90=50% - // Más alto = mejor + // Technical FCR: P10=85%, P50=68%, P90=50% + // Higher = better let fcrScore: number; if (fcrTecnico >= 85) { fcrScore = 95 + 5 * Math.min(1, (fcrTecnico - 85) / 15); // 95-100 @@ -1128,30 +1128,30 @@ function calculateHealthScore(heatmapData: HeatmapDataPoint[]): number { } // Abandono: P10=3%, P50=5%, P90=10% - // Más bajo = mejor (invertido) - let abandonoScore: number; - if (abandono <= 3) { - abandonoScore = 95 + 5 * Math.max(0, (3 - abandono) / 3); // 95-100 - } else if (abandono <= 5) { - abandonoScore = 50 + 45 * (5 - abandono) / (5 - 3); // 50-95 - } else if (abandono <= 10) { - abandonoScore = 20 + 30 * (10 - abandono) / (10 - 5); // 20-50 + // Lower = better (inverted) + let abandonmentScore: number; + if (abandonment <= 3) { + abandonmentScore = 95 + 5 * Math.max(0, (3 - abandonment) / 3); // 95-100 + } else if (abandonment <= 5) { + abandonmentScore = 50 + 45 * (5 - abandonment) / (5 - 3); // 50-95 + } else if (abandonment <= 10) { + abandonmentScore = 20 + 30 * (10 - abandonment) / (10 - 5); // 20-50 } else { - // Por encima de P90 (crítico): penalización fuerte - abandonoScore = Math.max(0, 20 - 2 * (abandono - 10)); // 0-20, decrece rápido + // Above P90 (critical): strong penalty + abandonmentScore = Math.max(0, 20 - 2 * (abandonment - 10)); // 0-20, decreases rapidly } // AHT: P10=240s, P50=380s, P90=540s - // Más bajo = mejor (invertido) - // PERO: Si FCR es bajo, AHT bajo puede indicar llamadas rushed (mala calidad) + // Lower = better (inverted) + // PERO: Si FCR es under, AHT under puede indicar calls rushed (mala quality) let ahtScore: number; if (aht <= 240) { - // Por debajo de P10 (excelente eficiencia) + // Por deunder de P10 (excellent efficiency) // Si FCR > 65%, es genuinamente eficiente; si no, puede ser rushed if (fcrTecnico > 65) { ahtScore = 95 + 5 * Math.max(0, (240 - aht) / 60); // 95-100 } else { - ahtScore = 70; // Cap score si FCR es bajo (posible rushed calls) + ahtScore = 70; // Cap score si FCR es under (posible rushed calls) } } else if (aht <= 380) { ahtScore = 50 + 45 * (380 - aht) / (380 - 240); // 50-95 @@ -1161,54 +1161,54 @@ function calculateHealthScore(heatmapData: HeatmapDataPoint[]): number { ahtScore = Math.max(0, 20 * (600 - aht) / 60); // 0-20 } - // CSAT Proxy: Calculado desde FCR + Abandono + // CSAT Proxy: Calculado from FCR + Abandono // Sin datos reales de CSAT, usamos proxy - const csatProxy = 0.60 * fcrScore + 0.40 * abandonoScore; + const csatProxy = 0.60 * fcrScore + 0.40 * abandonmentScore; // ═══════════════════════════════════════════════════════════════ - // PASO 2: Aplicar pesos + // STEP 2: Aplicar pesos // FCR 35% + Abandono 30% + CSAT Proxy 20% + AHT 15% // ═══════════════════════════════════════════════════════════════ const subtotal = ( fcrScore * 0.35 + - abandonoScore * 0.30 + + abandonmentScore * 0.30 + csatProxy * 0.20 + ahtScore * 0.15 ); // ═══════════════════════════════════════════════════════════════ - // PASO 3: Calcular penalizaciones + // STEP 3: Calcular penalizesciones // ═══════════════════════════════════════════════════════════════ let penalties = 0; - // Penalización por abandono crítico (>10%) - if (abandono > 10) { + // Penalty for abandonment critical (>10%) + if (abandonment > 10) { penalties += 10; } - // Penalización por transferencia alta (>20%) + // Penalty for transferencia alta (>20%) if (transferencia > 20) { penalties += 5; } - // Penalización combo: Abandono alto + FCR bajo - // Indica problemas sistémicos de capacidad Y resolución - if (abandono > 8 && fcrTecnico < 78) { + // Combo penalty: Abandono high + FCR under + // Indicates systemic problems of capacity AND resolution + if (abandonment > 8 && fcrTecnico < 78) { penalties += 5; } // ═══════════════════════════════════════════════════════════════ - // PASO 4: Score final + // STEP 4: Final Score // ═══════════════════════════════════════════════════════════════ const finalScore = Math.max(0, Math.min(100, subtotal - penalties)); // Debug logging console.log('📊 Health Score Calculation:', { - inputs: { fcrTecnico: fcrTecnico.toFixed(1), abandono: abandono.toFixed(1), aht: Math.round(aht), transferencia: transferencia.toFixed(1) }, - scores: { fcrScore: fcrScore.toFixed(1), abandonoScore: abandonoScore.toFixed(1), ahtScore: ahtScore.toFixed(1), csatProxy: csatProxy.toFixed(1) }, + inputs: { fcrTecnico: fcrTecnico.toFixed(1), abandonment: abandonment.toFixed(1), aht: Math.round(aht), transferencia: transferencia.toFixed(1) }, + scores: { fcrScore: fcrScore.toFixed(1), abandonmentScore: abandonmentScore.toFixed(1), ahtScore: ahtScore.toFixed(1), csatProxy: csatProxy.toFixed(1) }, weighted: { subtotal: subtotal.toFixed(1), penalties, final: Math.round(finalScore) } }); @@ -1216,8 +1216,8 @@ function calculateHealthScore(heatmapData: HeatmapDataPoint[]): number { } /** - * v4.0: Generar 7 dimensiones viables desde datos reales - * Benchmarks sector aéreo: AHT P50=380s, FCR=70%, Abandono=5%, Ratio P90/P50 saludable<2.0 + * v4.0: Generate 7 viable dimensions from real data + * Airline sector benchmarks: AHT P50=380s, FCR=70%, Abandono=5%, Ratio P90/P50 healthy<2.0 */ function generateDimensionsFromRealData( interactions: RawInteraction[], @@ -1225,7 +1225,7 @@ function generateDimensionsFromRealData( avgCsat: number, avgAHT: number, hourlyDistribution: { hourly: number[]; off_hours_pct: number; peak_hours: number[] }, - globalCPI: number // CPI calculado centralmente desde heatmapData + globalCPI: number // CPI calculated centrally from heatmapData ): DimensionAnalysis[] { const totalVolume = interactions.length; const avgCV = metrics.reduce((sum, m) => sum + m.cv_aht, 0) / metrics.length; @@ -1233,16 +1233,16 @@ function generateDimensionsFromRealData( const avgHoldTime = metrics.reduce((sum, m) => sum + m.hold_time_mean, 0) / metrics.length; const totalCost = metrics.reduce((sum, m) => sum + m.total_cost, 0); - // FCR Técnico (100 - transfer_rate, ponderado por volumen) - comparable con benchmarks + // Technical FCR (100 - transfer_rate, weighted by volume) - comparable with benchmarks const totalVolumeForFCR = metrics.reduce((sum, m) => sum + m.volume_valid, 0); const avgFCR = totalVolumeForFCR > 0 ? metrics.reduce((sum, m) => sum + (m.fcr_tecnico * m.volume_valid), 0) / totalVolumeForFCR : 0; - // Calcular ratio P90/P50 aproximado desde CV - const avgRatio = 1 + avgCV * 1.5; // Aproximación: ratio ≈ 1 + 1.5*CV + // Calculate ratio P90/P50 approximated from CV + const avgRatio = 1 + avgCV * 1.5; // Approximation: ratio ≈ 1 + 1.5*CV - // === SCORE EFICIENCIA: Escala basada en ratio P90/P50 === + // === SCORE EFICIENCIA: Scale based on ratio P90/P50 === // <1.5 = 100pts, 1.5-2.0 = 70pts, 2.0-2.5 = 50pts, 2.5-3.0 = 30pts, >3.0 = 20pts let efficiencyScore: number; if (avgRatio < 1.5) efficiencyScore = 100; @@ -1251,28 +1251,28 @@ function generateDimensionsFromRealData( else if (avgRatio < 3.0) efficiencyScore = 30 + (3.0 - avgRatio) * 40; // 30-50 else efficiencyScore = 20; - // === SCORE VOLUMETRÍA: Basado en % fuera horario y ratio pico/valle === - // % fuera horario >30% penaliza, ratio pico/valle >3x penaliza + // === VOLUMETRY SCORE: Based on % off hours and peak/valley ratio === + // % off hours >30% penalizes, peak/valley ratio >3x penalizes const offHoursPct = hourlyDistribution.off_hours_pct; - // Calcular ratio pico/valle (consistente con backendMapper.ts) + // Calculate peak/valley ratio (consistent with backendMapper.ts) const hourlyValues = hourlyDistribution.hourly.filter(v => v > 0); const peakVolume = hourlyValues.length > 0 ? Math.max(...hourlyValues) : 0; const valleyVolume = hourlyValues.length > 0 ? Math.min(...hourlyValues) : 1; const peakValleyRatio = valleyVolume > 0 ? peakVolume / valleyVolume : 1; - // Score volumetría: 100 base, penalizar por fuera de horario y ratio pico/valle - // NOTA: Fórmulas sincronizadas con backendMapper.ts buildVolumetryDimension() + // Volumetry score: 100 base, penalize by off hours y peak/valley ratio + // NOTA: Formulas synchronized with backendMapper.ts buildVolumetryDimension() let volumetryScore = 100; - // Penalización por fuera de horario (misma fórmula que backendMapper) + // Penalty for off hours (same formula as backendMapper) if (offHoursPct > 30) { - volumetryScore -= Math.min(40, (offHoursPct - 30) * 2); // -2 pts por cada % sobre 30% + volumetryScore -= Math.min(40, (offHoursPct - 30) * 2); // -2 pts per each % above 30% } else if (offHoursPct > 20) { - volumetryScore -= (offHoursPct - 20); // -1 pt por cada % entre 20-30% + volumetryScore -= (offHoursPct - 20); // -1 pt per each % between 20-30% } - // Penalización por ratio pico/valle alto (misma fórmula que backendMapper) + // Penalty for peak/valley ratio high (same formula as backendMapper) if (peakValleyRatio > 5) { volumetryScore -= 30; } else if (peakValleyRatio > 3) { @@ -1283,33 +1283,33 @@ function generateDimensionsFromRealData( volumetryScore = Math.max(0, Math.min(100, Math.round(volumetryScore))); - // === CPI: Usar el valor centralizado pasado como parámetro === - // globalCPI ya fue calculado en generateAnalysisFromRealData desde heatmapData - // Esto garantiza consistencia con ExecutiveSummaryTab + // === CPI: Use the centralized value passed as parameter === + // globalCPI was already calculated in generateAnalysisFromRealData from heatmapData + // This ensures consistency with ExecutiveSummaryTab const costPerInteraction = globalCPI; - // Calcular Agentic Score + // Calculate Agentic Score const predictability = Math.max(0, Math.min(10, 10 - ((avgCV - 0.3) / 1.2 * 10))); const complexityInverse = Math.max(0, Math.min(10, 10 - (avgTransferRate / 10))); const repetitivity = Math.min(10, totalVolume / 500); const agenticScore = predictability * 0.30 + complexityInverse * 0.30 + repetitivity * 0.25 + 2.5; - // Determinar percentil de Eficiencia basado en benchmark sector aéreo (ratio <2.0 saludable) + // Determine percentile of Eficiencia based on benchmark airline sector (ratio <2.0 healthy) const efficiencyPercentile = avgRatio < 2.0 ? 75 : avgRatio < 2.5 ? 50 : avgRatio < 3.0 ? 35 : 20; - // Determinar percentil de FCR basado en benchmark sector aéreo (70%) + // Determine percentile of FCR based on benchmark airline sector (70%) const fcrPercentile = avgFCR >= 70 ? 75 : avgFCR >= 60 ? 50 : avgFCR >= 50 ? 35 : 20; return [ - // 1. VOLUMETRÍA & DISTRIBUCIÓN + // 1. VOLUMETRY & DISTRIBUTION { id: 'volumetry_distribution', name: 'volumetry_distribution', - title: 'Volumetría & Distribución', + title: 'Volumetry & Distribution', score: volumetryScore, percentile: offHoursPct <= 20 ? 80 : offHoursPct <= 30 ? 60 : 40, - summary: `${offHoursPct.toFixed(1)}% fuera de horario. Ratio pico/valle: ${peakValleyRatio.toFixed(1)}x. ${totalVolume.toLocaleString('es-ES')} interacciones totales.`, - kpi: { label: 'Fuera de Horario', value: `${offHoursPct.toFixed(0)}%` }, + summary: `${offHoursPct.toFixed(1)}% off hours. Ratio pico/valle: ${peakValleyRatio.toFixed(1)}x. ${totalVolume.toLocaleString('es-ES')} total interactions.`, + kpi: { label: 'Off Hours', value: `${offHoursPct.toFixed(0)}%` }, icon: BarChartHorizontal, distribution_data: { hourly: hourlyDistribution.hourly, @@ -1317,62 +1317,62 @@ function generateDimensionsFromRealData( peak_hours: hourlyDistribution.peak_hours } }, - // 2. EFICIENCIA OPERATIVA - KPI principal: AHT P50 (industry standard) + // 2. OPERATIONAL EFFICIENCY - KPI principal: AHT P50 (industry standard) { id: 'operational_efficiency', name: 'operational_efficiency', - title: 'Eficiencia Operativa', + title: 'Operational Efficiency', score: Math.round(efficiencyScore), percentile: efficiencyPercentile, summary: `AHT P50: ${avgAHT}s (benchmark: 300s). Ratio P90/P50: ${avgRatio.toFixed(2)} (benchmark: <2.0). Hold time: ${Math.round(avgHoldTime)}s.`, kpi: { label: 'AHT P50', value: `${avgAHT}s` }, icon: Zap }, - // 3. EFECTIVIDAD & RESOLUCIÓN (FCR Técnico = 100 - transfer_rate) + // 3. EFFECTIVENESS & RESOLUTION (Technical FCR = 100 - transfer_rate) { id: 'effectiveness_resolution', name: 'effectiveness_resolution', - title: 'Efectividad & Resolución', + title: 'Effectiveness & Resolution', score: avgFCR >= 90 ? 100 : avgFCR >= 85 ? 80 : avgFCR >= 80 ? 60 : avgFCR >= 75 ? 40 : 20, percentile: fcrPercentile, - summary: `FCR Técnico: ${avgFCR.toFixed(1)}% (benchmark: 85-90%). Transfer: ${avgTransferRate.toFixed(1)}%.`, - kpi: { label: 'FCR Técnico', value: `${Math.round(avgFCR)}%` }, + summary: `Technical FCR: ${avgFCR.toFixed(1)}% (benchmark: 85-90%). Transfer: ${avgTransferRate.toFixed(1)}%.`, + kpi: { label: 'Technical FCR', value: `${Math.round(avgFCR)}%` }, icon: Target }, - // 4. COMPLEJIDAD & PREDICTIBILIDAD - KPI principal: CV AHT (industry standard for predictability) + // 4. COMPLEXITY COMPLEJIDAD & PREDICTABILITY PREDICTABILITY - KPI principal: CV AHT (industry standard for predictability) { id: 'complexity_predictability', name: 'complexity_predictability', - title: 'Complejidad & Predictibilidad', - score: avgCV <= 0.75 ? 100 : avgCV <= 1.0 ? 80 : avgCV <= 1.25 ? 60 : avgCV <= 1.5 ? 40 : 20, // Basado en CV AHT + title: 'Complexity Complejidad & Predictibilidad Predictability', + score: avgCV <= 0.75 ? 100 : avgCV <= 1.0 ? 80 : avgCV <= 1.25 ? 60 : avgCV <= 1.5 ? 40 : 20, // Based on CV AHT percentile: avgCV <= 0.75 ? 75 : avgCV <= 1.0 ? 55 : avgCV <= 1.25 ? 40 : 25, - summary: `CV AHT: ${(avgCV * 100).toFixed(0)}% (benchmark: <75%). Hold time: ${Math.round(avgHoldTime)}s. ${avgCV <= 0.75 ? 'Alta predictibilidad para WFM.' : avgCV <= 1.0 ? 'Predictibilidad aceptable.' : 'Alta variabilidad, dificulta planificación.'}`, + summary: `CV AHT: ${(avgCV * 100).toFixed(0)}% (benchmark: <75%). Hold time: ${Math.round(avgHoldTime)}s. ${avgCV <= 0.75 ? 'High predictability for WFM.' : avgCV <= 1.0 ? 'Acceptable predictability.' : 'High variability, complicates planning.'}`, kpi: { label: 'CV AHT', value: `${(avgCV * 100).toFixed(0)}%` }, icon: Brain }, - // 5. SATISFACCIÓN - CSAT + // 5. SATISFACTION - CSAT { id: 'customer_satisfaction', name: 'customer_satisfaction', - title: 'Satisfacción del Cliente', + title: 'Customer Satisfaction', score: avgCsat > 0 ? Math.round(avgCsat) : 0, percentile: avgCsat > 0 ? (avgCsat >= 80 ? 70 : avgCsat >= 60 ? 50 : 30) : 0, summary: avgCsat > 0 - ? `CSAT: ${avgCsat.toFixed(1)}/100. ${avgCsat >= 80 ? 'Satisfacción alta.' : avgCsat >= 60 ? 'Satisfacción aceptable.' : 'Requiere atención.'}` - : 'CSAT: No disponible en dataset. Considerar implementar encuestas post-llamada.', + ? `CSAT: ${avgCsat.toFixed(1)}/100. ${avgCsat >= 80 ? 'High satisfaction.' : avgCsat >= 60 ? 'Acceptable satisfaction.' : 'Requires attention.'}` + : 'CSAT: Not available in dataset. Consider implementing post-call surveys.', kpi: { label: 'CSAT', value: avgCsat > 0 ? `${Math.round(avgCsat)}/100` : 'N/A' }, icon: Smile }, - // 6. ECONOMÍA - CPI (benchmark aerolíneas: p25=2.20, p50=3.50, p75=4.50, p90=5.50) + // 6. ECONOMY - CPI (airline benchmark: p25=2.20, p50=3.50, p75=4.50, p90=5.50) { id: 'economy_cpi', name: 'economy_cpi', - title: 'Economía Operacional', - // Score basado en percentiles aerolíneas (CPI invertido: menor = mejor) + title: 'Operational Economy', + // Score based on percentiles airlines (CPI inverted: lower = better) score: costPerInteraction <= 2.20 ? 100 : costPerInteraction <= 3.50 ? 80 : costPerInteraction <= 4.50 ? 60 : costPerInteraction <= 5.50 ? 40 : 20, percentile: costPerInteraction <= 2.20 ? 90 : costPerInteraction <= 3.50 ? 70 : costPerInteraction <= 4.50 ? 50 : costPerInteraction <= 5.50 ? 25 : 10, - summary: `CPI: €${costPerInteraction.toFixed(2)} por interacción. Coste anual: €${totalCost.toLocaleString('es-ES')}. Benchmark sector aerolíneas: €3.50.`, - kpi: { label: 'Coste/Interacción', value: `€${costPerInteraction.toFixed(2)}` }, + summary: `CPI: €${costPerInteraction.toFixed(2)} per interaction. Annual cost: €${totalCost.toLocaleString('es-ES')}. Airline sector benchmark: €3.50.`, + kpi: { label: 'Cost/Interaction', value: `€${costPerInteraction.toFixed(2)}` }, icon: DollarSign }, // 7. AGENTIC READINESS @@ -1382,7 +1382,7 @@ function generateDimensionsFromRealData( title: 'Agentic Readiness', score: Math.round(agenticScore * 10), percentile: agenticScore >= 7 ? 75 : agenticScore >= 5 ? 55 : 35, - summary: `Score: ${agenticScore.toFixed(1)}/10. ${agenticScore >= 8 ? 'Excelente para automatización.' : agenticScore >= 5 ? 'Candidato para asistencia IA.' : 'Requiere optimización previa.'}`, + summary: `Score: ${agenticScore.toFixed(1)}/10. ${agenticScore >= 8 ? 'Excellent for automation.' : agenticScore >= 5 ? 'Candidate for AI assistance.' : 'Requires prior optimization.'}`, kpi: { label: 'Score', value: `${agenticScore.toFixed(1)}/10` }, icon: Bot } @@ -1390,8 +1390,8 @@ function generateDimensionsFromRealData( } /** - * Calcular Agentic Readiness desde datos reales - * Score = Σ(factor_i × peso_i) con 6 factores únicos + * Calcular Agentic Readiness from datos reales + * Score = Σ(factor_i × weight_i) with 6 unique factors */ function calculateAgenticReadinessFromRealData(metrics: SkillMetrics[]): AgenticReadinessResult { const totalVolume = metrics.reduce((sum, m) => sum + m.volume, 0); @@ -1400,7 +1400,7 @@ function calculateAgenticReadinessFromRealData(metrics: SkillMetrics[]): Agentic const avgTransferRate = metrics.reduce((sum, m) => sum + m.transfer_rate, 0) / metrics.length; const totalCost = metrics.reduce((sum, m) => sum + m.total_cost, 0); - // === 6 FACTORES ÚNICOS === + // === 6 UNIQUE FACTORS === // 1. Predictibilidad (CV AHT) - Peso 25% // Score = 10 - (CV_AHT × 10). CV < 30% = Score > 7 @@ -1411,27 +1411,27 @@ function calculateAgenticReadinessFromRealData(metrics: SkillMetrics[]): Agentic const complexity_inverse = Math.max(0, Math.min(10, 10 - (avgTransferRate / 5))); // 3. Volumen e Impacto - Peso 15% - // Score lineal: < 100 = 0, 100-5000 interpolación, > 5000 = 10 + // Score lineal: < 100 = 0, 100-5000 interpolation, > 5000 = 10 let repetitiveness = 0; if (totalVolume >= 5000) repetitiveness = 10; else if (totalVolume <= 100) repetitiveness = 0; else repetitiveness = ((totalVolume - 100) / (5000 - 100)) * 10; - // 4. Estructuración (CV Talk Time) - Peso 15% - // Score = 10 - (CV_Talk × 8). Baja variabilidad = alta estructuración + // 4. Structuring (CV Talk Time) - Peso 15% + // Score = 10 - (CV_Talk × 8). Low variability = high structuring const estructuracion = Math.max(0, Math.min(10, 10 - (avgCVTalk * 8))); - // 5. Estabilidad (ratio pico/valle simplificado) - Peso 10% - // Simplificación: basado en CV general como proxy + // 5. Estabilidad (peak/valley ratio simplificado) - Peso 10% + // Simplification: based on CV general as proxy const estabilidad = Math.max(0, Math.min(10, 10 - (avgCV * 5))); - // 6. ROI Potencial (basado en coste y volumen) - Peso 15% + // 6. ROI Potencial (based on coste y volumen) - Peso 15% // Score = min(10, log10(Coste) - 2) para costes > €100 const roiPotencial = totalCost > 100 ? Math.max(0, Math.min(10, (Math.log10(totalCost) - 2) * 2.5)) : 0; - // Score final ponderado: (10×0.25)+(5×0.20)+(10×0.15)+(0×0.15)+(10×0.10)+(10×0.15) + // Final weighted Score: (10×0.25)+(5×0.20)+(10×0.15)+(0×0.15)+(10×0.10)+(10×0.15) const score = Math.round(( predictability * 0.25 + complexity_inverse * 0.20 + @@ -1441,13 +1441,13 @@ function calculateAgenticReadinessFromRealData(metrics: SkillMetrics[]): Agentic roiPotencial * 0.15 ) * 10) / 10; - // Tier basado en score (umbrales actualizados) + // Tier based on score (umbrales actualizados) let tier: TierKey; if (score >= 6) tier = 'gold'; // Listo para Copilot else if (score >= 4) tier = 'silver'; // Optimizar primero - else tier = 'bronze'; // Requiere gestión humana + else tier = 'bronze'; // Requires human management - // Sub-factors con descripciones únicas y metodologías específicas + // Sub-factors with unique descriptions and specific methodologies const sub_factors: SubFactor[] = [ { name: 'predictibilidad', @@ -1468,11 +1468,11 @@ function calculateAgenticReadinessFromRealData(metrics: SkillMetrics[]): Agentic displayName: 'Volumen e Impacto', score: Math.round(repetitiveness * 10) / 10, weight: 0.15, - description: `${totalVolume.toLocaleString('es-ES')} interacciones. Escala lineal 100-5000` + description: `${totalVolume.toLocaleString('es-ES')} interactions. Scale lineal 100-5000` }, { name: 'estructuracion', - displayName: 'Estructuración', + displayName: 'Structuring', score: Math.round(estructuracion * 10) / 10, weight: 0.15, description: `CV Talk: ${Math.round(avgCVTalk * 100)}%. Score = 10 - (CV_Talk × 8)` @@ -1482,25 +1482,25 @@ function calculateAgenticReadinessFromRealData(metrics: SkillMetrics[]): Agentic displayName: 'Estabilidad Temporal', score: Math.round(estabilidad * 10) / 10, weight: 0.10, - description: `Basado en variabilidad general. Score = 10 - (CV × 5)` + description: `Based on general variability. Score = 10 - (CV × 5)` }, { name: 'roi_potencial', displayName: 'ROI Potencial', score: Math.round(roiPotencial * 10) / 10, weight: 0.15, - description: `Coste anual: €${totalCost.toLocaleString('es-ES')}. Score logarítmico` + description: `Annual cost: €${totalCost.toLocaleString('es-ES')}. Logarithmic score` } ]; - // Interpretation basada en umbrales actualizados + // Interpretation based on umbrales actualizados let interpretation: string; if (score >= 6) { interpretation = 'Listo para Copilot. Procesos con predictibilidad y simplicidad suficientes para asistencia IA.'; } else if (score >= 4) { - interpretation = 'Requiere optimización. Estandarizar procesos y reducir variabilidad antes de implementar IA.'; + interpretation = 'Requires optimization. Standardize processes and reduce variability before implementing AI.'; } else { - interpretation = 'Gestión humana recomendada. Procesos complejos o variables que requieren intervención humana.'; + interpretation = 'Human management recommended. Complex or variable processes that require human intervention.'; } return { @@ -1513,7 +1513,7 @@ function calculateAgenticReadinessFromRealData(metrics: SkillMetrics[]): Agentic } /** - * Generar findings desde datos reales - SOLO datos calculados del dataset + * Generar findings from datos reales - SOLO datos calculados del dataset */ function generateFindingsFromRealData( metrics: SkillMetrics[], @@ -1523,37 +1523,37 @@ function generateFindingsFromRealData( const findings: Finding[] = []; const totalVolume = interactions.length; - // Calcular métricas globales + // Calculate metrics globales const avgCV = metrics.reduce((sum, m) => sum + m.cv_aht, 0) / metrics.length; const avgTransferRate = metrics.reduce((sum, m) => sum + m.transfer_rate, 0) / metrics.length; const avgRatio = 1 + avgCV * 1.5; - // Calcular abandono real + // Calculate abandonment real const totalAbandoned = metrics.reduce((sum, m) => sum + m.abandon_count, 0); const abandonRate = totalVolume > 0 ? (totalAbandoned / totalVolume) * 100 : 0; - // Finding 0: Alto volumen fuera de horario - oportunidad para agente virtual + // Finding 0: Alto off hours volume - oportunidad para agent virtual const offHoursPct = hourlyDistribution?.off_hours_pct ?? 0; if (offHoursPct > 20) { const offHoursVolume = Math.round(totalVolume * offHoursPct / 100); findings.push({ type: offHoursPct > 30 ? 'critical' : 'warning', - title: 'Alto Volumen Fuera de Horario', - text: `${offHoursPct.toFixed(0)}% de interacciones fuera de horario (8-19h)`, + title: 'Alto Volumen Off Hours', + text: `${offHoursPct.toFixed(0)}% of interactions off hours (8-19h)`, dimensionId: 'volumetry_distribution', - description: `${offHoursVolume.toLocaleString()} interacciones (${offHoursPct.toFixed(1)}%) ocurren fuera de horario laboral. Oportunidad ideal para implementar agentes virtuales 24/7.`, + description: `${offHoursVolume.toLocaleString()} interactions (${offHoursPct.toFixed(1)}%) occur outside business hours. Ideal opportunity to implement virtual agents 24/7.`, impact: offHoursPct > 30 ? 'high' : 'medium' }); } - // Finding 1: Ratio P90/P50 si está fuera de benchmark + // Finding 1: Ratio P90/P50 if outside benchmark if (avgRatio > 2.0) { findings.push({ type: avgRatio > 3.0 ? 'critical' : 'warning', title: 'Ratio P90/P50 elevado', text: `Ratio P90/P50: ${avgRatio.toFixed(2)}`, dimensionId: 'operational_efficiency', - description: `Ratio P90/P50 de ${avgRatio.toFixed(2)} supera el benchmark de 2.0. Indica alta dispersión en tiempos de gestión.` + description: `Ratio P90/P50 de ${avgRatio.toFixed(2)} exceeds the benchmark of 2.0. Indicates high dispersion in handling times.` }); } @@ -1565,7 +1565,7 @@ function generateFindingsFromRealData( title: 'Alta Variabilidad AHT', text: `${highVariabilitySkills.length} skills con CV > 45%`, dimensionId: 'complexity_predictability', - description: `${highVariabilitySkills.length} de ${metrics.length} skills muestran CV AHT > 45%, sugiriendo procesos poco estandarizados.` + description: `${highVariabilitySkills.length} de ${metrics.length} skills show CV AHT > 45%, suggesting poorly standardized processes.` }); } @@ -1576,7 +1576,7 @@ function generateFindingsFromRealData( title: 'Tasa de Transferencia', text: `Transfer rate: ${avgTransferRate.toFixed(1)}%`, dimensionId: 'complexity_predictability', - description: `Tasa de transferencia promedio de ${avgTransferRate.toFixed(1)}% indica necesidad de capacitación o routing.` + description: `Tasa de transferencia average de ${avgTransferRate.toFixed(1)}% indicates need for training or routing.` }); } @@ -1587,21 +1587,21 @@ function generateFindingsFromRealData( title: 'Tasa de Abandono', text: `Abandono: ${abandonRate.toFixed(1)}%`, dimensionId: 'effectiveness_resolution', - description: `Tasa de abandono de ${abandonRate.toFixed(1)}% supera el benchmark de 5%. Revisar capacidad y tiempos de espera.` + description: `Tasa de abandonment de ${abandonRate.toFixed(1)}% exceeds the benchmark of 5%. Review capacity and wait times.` }); } - // Finding 5: Concentración de volumen (solo si hay suficientes skills) + // Finding 5: Volume concentration (only if there are enough skills) if (metrics.length >= 3) { const topSkill = metrics[0]; const topSkillPct = (topSkill.volume / totalVolume) * 100; if (topSkillPct > 30) { findings.push({ type: 'info', - title: 'Concentración de Volumen', + title: 'Volume Concentration', text: `${topSkill.skill}: ${topSkillPct.toFixed(0)}% del total`, dimensionId: 'volumetry_distribution', - description: `El skill "${topSkill.skill}" concentra ${topSkillPct.toFixed(1)}% del volumen total (${topSkill.volume.toLocaleString()} interacciones).` + description: `El skill "${topSkill.skill}" concentrates ${topSkillPct.toFixed(1)}% of total volume (${topSkill.volume.toLocaleString()} interactions).` }); } } @@ -1610,7 +1610,7 @@ function generateFindingsFromRealData( } /** - * Generar recomendaciones desde datos reales + * Generar recomendaciones from datos reales */ function generateRecommendationsFromRealData( metrics: SkillMetrics[], @@ -1619,7 +1619,7 @@ function generateRecommendationsFromRealData( ): Recommendation[] { const recommendations: Recommendation[] = []; - // Recomendación prioritaria: Agente virtual para fuera de horario + // Priority recommendation: Virtual agent for off hours const offHoursPct = hourlyDistribution?.off_hours_pct ?? 0; const volume = totalVolume ?? metrics.reduce((sum, m) => sum + m.volume, 0); if (offHoursPct > 20) { @@ -1628,12 +1628,12 @@ function generateRecommendationsFromRealData( const estimatedSavings = Math.round(offHoursVolume * estimatedContainment / 100); recommendations.push({ priority: 'high', - title: 'Implementar Agente Virtual 24/7', - text: `Desplegar agente virtual para atender ${offHoursPct.toFixed(0)}% de interacciones fuera de horario`, - description: `${offHoursVolume.toLocaleString()} interacciones ocurren fuera de horario laboral (19:00-08:00). Un agente virtual puede resolver ~${estimatedContainment}% de estas consultas automáticamente, liberando recursos humanos y mejorando la experiencia del cliente con atención inmediata 24/7.`, + title: 'Implement Virtual Agent 24/7', + text: `Desplegar agent virtual para atender ${offHoursPct.toFixed(0)}% of interactions off hours`, + description: `${offHoursVolume.toLocaleString()} interactions occur outside business hours (19:00-08:00). A virtual agent can resolve ~${estimatedContainment}% of these queries automatically, freeing human resources and improving customer experience with immediate attention 24/7.`, dimensionId: 'volumetry_distribution', - impact: `Potencial de contención: ${estimatedSavings.toLocaleString()} interacciones/período`, - timeline: '1-3 meses' + impact: `Containment potential: ${estimatedSavings.toLocaleString()} interactions/period`, + timeline: '1-3 months' }); } @@ -1642,9 +1642,9 @@ function generateRecommendationsFromRealData( recommendations.push({ priority: 'high', title: 'Estandarizar Procesos', - text: `Crear guías y scripts para los ${highVariabilitySkills.length} skills con alta variabilidad`, - description: `Crear guías y scripts para los ${highVariabilitySkills.length} skills con alta variabilidad.`, - impact: 'Reducción del 20-30% en AHT' + text: `Create guides and scripts for the ${highVariabilitySkills.length} skills with high variability`, + description: `Create guides and scripts for the ${highVariabilitySkills.length} skills with high variability.`, + impact: 'Reduction of 20-30% en AHT' }); } @@ -1652,9 +1652,9 @@ function generateRecommendationsFromRealData( if (highVolumeSkills.length > 0) { recommendations.push({ priority: 'high', - title: 'Automatizar Skills de Alto Volumen', - text: `Implementar bots para los ${highVolumeSkills.length} skills con > 500 interacciones`, - description: `Implementar bots para los ${highVolumeSkills.length} skills con > 500 interacciones.`, + title: 'Automate Skills de Alto Volumen', + text: `Implement bots for the ${highVolumeSkills.length} skills con > 500 interactions`, + description: `Implement bots for the ${highVolumeSkills.length} skills con > 500 interactions.`, impact: 'Ahorro estimado del 40-60%' }); } @@ -1663,79 +1663,79 @@ function generateRecommendationsFromRealData( } /** - * v3.3: Generar opportunities desde drilldownData (basado en colas con CV < 75%) - * Las oportunidades se clasifican en 3 categorías: + * v3.3: Generar opportunities from drilldownData (based on queues with CV < 75%) + * Las oportunidades se clasifican en 3 categorys: * - Automatizar: Colas con CV < 75% (estables, listas para IA) * - Asistir: Colas con CV 75-100% (necesitan copilot) - * - Optimizar: Colas con CV > 100% (necesitan estandarización primero) + * - Optimize: Queues with CV > 100% (need standardization first) */ /** - * v3.5: Calcular ahorro realista usando fórmula TCO por tier + * v3.5: Calculate realistic savings using TCO formula by tier * - * Fórmula TCO por tier: - * - AUTOMATE (Tier 1): 70% containment → ahorro = vol_annual × 0.70 × (CPI_humano - CPI_ia) - * - ASSIST (Tier 2): 30% efficiency → ahorro = vol_annual × 0.30 × (CPI_humano - CPI_copilot) - * - AUGMENT (Tier 3): 15% optimization → ahorro = vol_annual × 0.15 × (CPI_humano - CPI_optimizado) - * - HUMAN-ONLY (Tier 4): 0% → sin ahorro + * TCO formula by tier: + * - AUTOMATE (Tier 1): 70% containment → savings = vol_annual × 0.70 × (CPI_human - CPI_ai) + * - ASSIST (Tier 2): 30% efficiency → savings = vol_annual × 0.30 × (CPI_human - CPI_copilot) + * - AUGMENT (Tier 3): 15% optimization → savings = vol_annual × 0.15 × (CPI_human - CPI_optimized) + * - HUMAN-ONLY (Tier 4): 0% → without ahorro * - * Costes por interacción (CPI): - * - CPI_humano: Se calcula desde AHT y cost_per_hour (~€4-5/interacción) - * - CPI_ia: €0.15/interacción (chatbot/IVR) - * - CPI_copilot: ~60% del CPI humano (agente asistido) - * - CPI_optimizado: ~85% del CPI humano (mejora marginal) + * Costes per interaction (CPI): + * - CPI_human: Se calcula from AHT y cost_per_hour (~€4-5/interaction) + * - CPI_ai: €0.15/interaction (chatbot/IVR) + * - CPI_copilot: ~60% del CPI human (agent asistido) + * - CPI_optimized: ~85% del CPI human (mejora marginal) */ /** - * v3.6: Constantes CPI para cálculo de ahorro TCO - * Valores alineados con metodología Beyond + * v3.6: CPI constants for TCO savings calculation + * Values aligned with Beyond methodology */ const CPI_CONFIG = { - CPI_HUMANO: 2.33, // €/interacción - coste actual agente humano - CPI_BOT: 0.15, // €/interacción - coste bot/automatización - CPI_ASSIST: 1.50, // €/interacción - coste con copilot - CPI_AUGMENT: 2.00, // €/interacción - coste optimizado - // Tasas de éxito/contención por tier - RATE_AUTOMATE: 0.70, // 70% contención en automatización - RATE_ASSIST: 0.30, // 30% eficiencia en asistencia - RATE_AUGMENT: 0.15 // 15% mejora en optimización + CPI_HUMANO: 2.33, // €/interaction - coste actual agent human + CPI_BOT: 0.15, // €/interaction - cost bot/automation + CPI_ASSIST: 1.50, // €/interaction - coste con copilot + CPI_AUGMENT: 2.00, // €/interaction - coste optimizado + // Success/containment rates by tier + RATE_AUTOMATE: 0.70, // 70% containment in automation + RATE_ASSIST: 0.30, // 30% efficiency en asistencia + RATE_AUGMENT: 0.15 // 15% improvement in optimization }; -// Período de datos: el volumen en los datos corresponde a 11 meses, no es mensual +// Data period: the volume in the data corresponds to 11 months, is not monthly const DATA_PERIOD_MONTHS = 11; /** - * v4.2: Calcular ahorro TCO realista usando fórmula explícita con CPI fijos - * IMPORTANTE: El volumen de los datos corresponde a 11 meses, por lo que: - * - Primero calculamos volumen mensual: Vol / 11 - * - Luego anualizamos: × 12 - * Fórmulas: - * - AUTOMATE: (Vol/11) × 12 × 70% × (CPI_humano - CPI_bot) - * - ASSIST: (Vol/11) × 12 × 30% × (CPI_humano - CPI_assist) - * - AUGMENT: (Vol/11) × 12 × 15% × (CPI_humano - CPI_augment) + * v4.2: Calculate realistic TCO savings using explicit formula with fixed CPI + * IMPORTANT: The volume in the data corresponds to 11 months, therefore: + * - First we calculate monthly volume: Vol / 11 + * - Then we annualize: × 12 + * Formulas: + * - AUTOMATE: (Vol/11) × 12 × 70% × (CPI_human - CPI_bot) + * - ASSIST: (Vol/11) × 12 × 30% × (CPI_human - CPI_assist) + * - AUGMENT: (Vol/11) × 12 × 15% × (CPI_human - CPI_augment) * - HUMAN-ONLY: 0€ */ function calculateRealisticSavings( volume: number, - _annualCost: number, // Mantenido para compatibilidad pero no usado + _annualCost: number, // Kept for compatibility but not used tier: 'AUTOMATE' | 'ASSIST' | 'AUGMENT' | 'HUMAN-ONLY' ): number { if (volume === 0) return 0; const { CPI_HUMANO, CPI_BOT, CPI_ASSIST, CPI_AUGMENT, RATE_AUTOMATE, RATE_ASSIST, RATE_AUGMENT } = CPI_CONFIG; - // Convertir volumen del período (11 meses) a volumen anual + // Convert volume from period (11 months) to annual volume const annualVolume = (volume / DATA_PERIOD_MONTHS) * 12; switch (tier) { case 'AUTOMATE': - // Ahorro = VolAnual × 70% × (CPI_humano - CPI_bot) + // Savings = VolAnual × 70% × (CPI_human - CPI_bot) return Math.round(annualVolume * RATE_AUTOMATE * (CPI_HUMANO - CPI_BOT)); case 'ASSIST': - // Ahorro = VolAnual × 30% × (CPI_humano - CPI_assist) + // Savings = VolAnual × 30% × (CPI_human - CPI_assist) return Math.round(annualVolume * RATE_ASSIST * (CPI_HUMANO - CPI_ASSIST)); case 'AUGMENT': - // Ahorro = VolAnual × 15% × (CPI_humano - CPI_augment) + // Savings = VolAnual × 15% × (CPI_human - CPI_augment) return Math.round(annualVolume * RATE_AUGMENT * (CPI_HUMANO - CPI_AUGMENT)); case 'HUMAN-ONLY': @@ -1745,13 +1745,13 @@ function calculateRealisticSavings( } export function generateOpportunitiesFromDrilldown(drilldownData: DrilldownDataPoint[], costPerHour: number): Opportunity[] { - // v4.3: Top 10 iniciativas por potencial económico (todos los tiers, no solo AUTOMATE) - // Cada cola = 1 burbuja con su score real y ahorro TCO real según su tier + // v4.3: Top 10 initiatives by economic potential (all tiers, not only AUTOMATE) + // Each queue = 1 bubble with its real score and real TCO savings according to its tier - // Extraer todas las colas con su skill padre (excluir HUMAN-ONLY, no tienen ahorro) + // Extract all queues with their parent skill (exclude HUMAN-ONLY, have no savings) const allQueues = drilldownData.flatMap(skill => skill.originalQueues - .filter(q => q.tier !== 'HUMAN-ONLY') // HUMAN-ONLY no genera ahorro + .filter(q => q.tier !== 'HUMAN-ONLY') // HUMAN-ONLY generates no savings .map(q => ({ ...q, skillName: skill.skill @@ -1759,23 +1759,23 @@ export function generateOpportunitiesFromDrilldown(drilldownData: DrilldownDataP ); if (allQueues.length === 0) { - console.warn('⚠️ No hay colas con potencial de ahorro para mostrar en Opportunity Matrix'); + console.warn('⚠️ No queues with savings potential to show in Opportunity Matrix'); return []; } - // Calcular ahorro TCO por cola individual según su tier + // Calculate TCO savings per individual queue according to its tier const queuesWithSavings = allQueues.map(q => { const savings = calculateRealisticSavings(q.volume, q.annualCost || 0, q.tier); return { ...q, savings }; }); - // Ordenar por ahorro descendente + // Sort by descending savings queuesWithSavings.sort((a, b) => b.savings - a.savings); - // Calcular max savings para escalar impact a 0-10 + // Calculate max savings to scale impact to 0-10 const maxSavings = Math.max(...queuesWithSavings.map(q => q.savings), 1); - // Mapeo de tier a dimensionId y customer_segment + // Mapping of tier to dimensionId and customer_segment const tierToDimension: Record = { 'AUTOMATE': 'agentic_readiness', 'ASSIST': 'effectiveness_resolution', @@ -1787,15 +1787,15 @@ export function generateOpportunitiesFromDrilldown(drilldownData: DrilldownDataP 'AUGMENT': 'low' }; - // Generar oportunidades individuales (TOP 10 por potencial económico) + // Generate individual opportunities (TOP 10 by economic potential) const opportunities: Opportunity[] = queuesWithSavings .slice(0, 10) .map((q, idx) => { - // Impact: ahorro escalado a 0-10 + // Impact: savings scaled to 0-10 const impactRaw = (q.savings / maxSavings) * 10; const impact = Math.max(1, Math.min(10, Math.round(impactRaw * 10) / 10)); - // Feasibility: agenticScore directo (ya es 0-10) + // Feasibility: direct agenticScore (already is 0-10) const feasibility = Math.round(q.agenticScore * 10) / 10; // Nombre con prefijo de tier para claridad @@ -1815,23 +1815,23 @@ export function generateOpportunitiesFromDrilldown(drilldownData: DrilldownDataP }; }); - console.log(`📊 Opportunity Matrix: Top ${opportunities.length} iniciativas por potencial económico (de ${allQueues.length} colas con ahorro)`); + console.log(`📊 Opportunity Matrix: Top ${opportunities.length} initiatives by economic potential (de ${allQueues.length} queues with ahorro)`); return opportunities; } /** - * v3.5: Generar roadmap desde drilldownData usando sistema de Tiers - * Iniciativas estructuradas en 3 fases basadas en clasificación Tier: - * - Phase 1 (Automate): Colas tier AUTOMATE - implementación IA directa (70% containment) + * v3.5: Generar roadmap from drilldownData usando sistema de Tiers + * Initiatives structured in 3 phases based on Tier classification: + * - Phase 1 (Automate): AUTOMATE tier Queues - direct AI implementation (70% containment) * - Phase 2 (Assist): Colas tier ASSIST - copilot y asistencia (30% efficiency) - * - Phase 3 (Augment): Colas tier AUGMENT/HUMAN-ONLY - estandarización primero (15%) + * - Phase 3 (Augment): AUGMENT/HUMAN-ONLY tier Queues - standardization first (15%) */ export function generateRoadmapFromDrilldown(drilldownData: DrilldownDataPoint[], costPerHour: number): RoadmapInitiative[] { const initiatives: RoadmapInitiative[] = []; let initCounter = 1; - // Extraer y clasificar todas las colas por TIER + // Extract y clasificar all las queues por TIER const allQueues = drilldownData.flatMap(skill => skill.originalQueues.map(q => ({ ...q, @@ -1845,7 +1845,7 @@ export function generateRoadmapFromDrilldown(drilldownData: DrilldownDataPoint[] const augmentQueues = allQueues.filter(q => q.tier === 'AUGMENT'); const humanQueues = allQueues.filter(q => q.tier === 'HUMAN-ONLY'); - // Calcular métricas por tier + // Calculate metrics por tier const automateVolume = automateQueues.reduce((sum, q) => sum + q.volume, 0); const automateCost = automateQueues.reduce((sum, q) => sum + (q.annualCost || 0), 0); const assistVolume = assistQueues.reduce((sum, q) => sum + q.volume, 0); @@ -1853,13 +1853,13 @@ export function generateRoadmapFromDrilldown(drilldownData: DrilldownDataPoint[] const augmentVolume = augmentQueues.reduce((sum, q) => sum + q.volume, 0); const augmentCost = augmentQueues.reduce((sum, q) => sum + (q.annualCost || 0), 0); - // Helper para obtener top skills por volumen - const getTopSkillNames = (queues: typeof allQueues, limit: number = 3): string[] => { - const skillVolumes = new Map(); + // Helper para obtain top skills por volumen + const getTopSkillNamonth = (queues: typeof allQueues, limit: number = 3): string[] => { + const skillVolumonth = new Map(); queues.forEach(q => { - skillVolumes.set(q.skillName, (skillVolumes.get(q.skillName) || 0) + q.volume); + skillVolumonth.set(q.skillName, (skillVolumonth.get(q.skillName) || 0) + q.volume); }); - return Array.from(skillVolumes.entries()) + return Array.from(skillVolumonth.entries()) .sort((a, b) => b[1] - a[1]) .slice(0, limit) .map(([name]) => name); @@ -1867,14 +1867,14 @@ export function generateRoadmapFromDrilldown(drilldownData: DrilldownDataPoint[] // ============ PHASE 1: AUTOMATE (Tier AUTOMATE - 70% containment) ============ if (automateQueues.length > 0) { - const topSkills = getTopSkillNames(automateQueues); + const topSkills = getTopSkillNamonth(automateQueues); const avgScore = automateQueues.reduce((sum, q) => sum + q.agenticScore, 0) / automateQueues.length; const avgCv = automateQueues.reduce((sum, q) => sum + q.cv_aht, 0) / automateQueues.length; // v3.5: Ahorro REALISTA con TCO const realisticSavings = calculateRealisticSavings(automateVolume, automateCost, 'AUTOMATE'); - // Chatbot para colas con score muy alto (>8) + // Chatbot para queues with score muy high (>8) const highScoreQueues = automateQueues.filter(q => q.agenticScore >= 8); if (highScoreQueues.length > 0) { const hsVolume = highScoreQueues.reduce((sum, q) => sum + q.volume, 0); @@ -1883,24 +1883,24 @@ export function generateRoadmapFromDrilldown(drilldownData: DrilldownDataPoint[] initiatives.push({ id: `init-${initCounter++}`, - name: `Chatbot IA para ${highScoreQueues.length} colas score ≥8`, + name: `Chatbot IA para ${highScoreQueues.length} queues score ≥8`, phase: RoadmapPhase.Automate, timeline: 'Q1 2026', - investment: Math.round(hsSavings * 0.3), // Inversión = 30% del ahorro + investment: Math.round(hsSavings * 0.3), // Investment = 30% of savings resources: ['1x Bot Developer', 'API Integration', 'QA Team'], dimensionId: 'agentic_readiness', risk: 'low', - skillsImpacted: getTopSkillNames(highScoreQueues, 2), + skillsImpacted: getTopSkillNamonth(highScoreQueues, 2), volumeImpacted: hsVolume, - kpiObjective: `Contener 70% del volumen vía chatbot`, - rationale: `${highScoreQueues.length} colas tier AUTOMATE con score promedio ${avgScore.toFixed(1)}/10. Métricas óptimas para automatización completa.`, - savingsDetail: `70% containment × (CPI humano - CPI IA) = ${hsSavings.toLocaleString()}€/año`, + kpiObjective: `Contain 70% of volume via chatbot`, + rationale: `${highScoreQueues.length} queues tier AUTOMATE con score average ${avgScore.toFixed(1)}/10. Optimal metrics for complete automation.`, + savingsDetail: `70% containment × (CPI human - CPI AI) = ${hsSavings.toLocaleString()}€/year`, estimatedSavings: hsSavings, resourceHours: 400 }); } - // IVR para resto de colas AUTOMATE + // IVR para resto de queues AUTOMATE const otherAutomateQueues = automateQueues.filter(q => q.agenticScore < 8); if (otherAutomateQueues.length > 0) { const oaVolume = otherAutomateQueues.reduce((sum, q) => sum + q.volume, 0); @@ -1909,18 +1909,18 @@ export function generateRoadmapFromDrilldown(drilldownData: DrilldownDataPoint[] initiatives.push({ id: `init-${initCounter++}`, - name: `IVR inteligente para ${otherAutomateQueues.length} colas AUTOMATE`, + name: `IVR inteligente para ${otherAutomateQueues.length} queues AUTOMATE`, phase: RoadmapPhase.Automate, timeline: 'Q2 2026', investment: Math.round(oaSavings * 0.25), resources: ['1x Voice UX Designer', 'Integration Team', 'QA'], dimensionId: 'agentic_readiness', risk: 'low', - skillsImpacted: getTopSkillNames(otherAutomateQueues, 2), + skillsImpacted: getTopSkillNamonth(otherAutomateQueues, 2), volumeImpacted: oaVolume, kpiObjective: `Pre-calificar y desviar 70% a self-service`, - rationale: `${otherAutomateQueues.length} colas tier AUTOMATE listas para IVR con NLU.`, - savingsDetail: `70% containment × diferencial CPI = ${oaSavings.toLocaleString()}€/año`, + rationale: `${otherAutomateQueues.length} queues tier AUTOMATE listas para IVR con NLU.`, + savingsDetail: `70% containment × CPI differential = ${oaSavings.toLocaleString()}€/year`, estimatedSavings: oaSavings, resourceHours: 320 }); @@ -1929,7 +1929,7 @@ export function generateRoadmapFromDrilldown(drilldownData: DrilldownDataPoint[] // ============ PHASE 2: ASSIST (Tier ASSIST - 30% efficiency) ============ if (assistQueues.length > 0) { - const topSkills = getTopSkillNames(assistQueues); + const topSkills = getTopSkillNamonth(assistQueues); const avgScore = assistQueues.reduce((sum, q) => sum + q.agenticScore, 0) / assistQueues.length; // v3.5: Ahorro REALISTA @@ -1938,7 +1938,7 @@ export function generateRoadmapFromDrilldown(drilldownData: DrilldownDataPoint[] // Knowledge Base con IA initiatives.push({ id: `init-${initCounter++}`, - name: `Knowledge Base IA para ${assistQueues.length} colas ASSIST`, + name: `Knowledge Base IA para ${assistQueues.length} queues ASSIST`, phase: RoadmapPhase.Assist, timeline: 'Q2 2026', investment: Math.round(realisticSavings * 0.4), @@ -1947,19 +1947,19 @@ export function generateRoadmapFromDrilldown(drilldownData: DrilldownDataPoint[] risk: 'low', skillsImpacted: topSkills, volumeImpacted: assistVolume, - kpiObjective: `Reducir AHT 30% con sugerencias IA`, - rationale: `${assistQueues.length} colas tier ASSIST (score ${avgScore.toFixed(1)}/10) se benefician de copilot contextual.`, - savingsDetail: `30% efficiency × diferencial CPI = ${realisticSavings.toLocaleString()}€/año`, + kpiObjective: `Reduce AHT 30% with AI suggestions`, + rationale: `${assistQueues.length} queues tier ASSIST (score ${avgScore.toFixed(1)}/10) se benefician de copilot contextual.`, + savingsDetail: `30% efficiency × CPI differential = ${realisticSavings.toLocaleString()}€/year`, estimatedSavings: realisticSavings, resourceHours: 360 }); - // Copilot para agentes si hay volumen alto + // Copilot para agents si hay volumen high if (assistVolume > 50000) { const copilotSavings = Math.round(realisticSavings * 0.6); initiatives.push({ id: `init-${initCounter++}`, - name: `Copilot IA para agentes (${topSkills.slice(0, 2).join(', ')})`, + name: `Copilot IA para agents (${topSkills.slice(0, 2).join(', ')})`, phase: RoadmapPhase.Assist, timeline: 'Q3 2026', investment: Math.round(copilotSavings * 0.5), @@ -1968,9 +1968,9 @@ export function generateRoadmapFromDrilldown(drilldownData: DrilldownDataPoint[] risk: 'medium', skillsImpacted: topSkills.slice(0, 3), volumeImpacted: assistVolume, - kpiObjective: `Reducir variabilidad y migrar colas a tier AUTOMATE`, - rationale: `Copilot pre-llena campos, sugiere respuestas y guía al agente para estandarizar.`, - savingsDetail: `Mejora efficiency 30% en ${assistVolume.toLocaleString()} int/mes`, + kpiObjective: `Reduce variability and migrate queues to tier AUTOMATE`, + rationale: `Copilot pre-fills fields, suggests answers and guides agent to standardize.`, + savingsDetail: `Mejora efficiency 30% en ${assistVolume.toLocaleString()} int/month`, estimatedSavings: copilotSavings, resourceHours: 520 }); @@ -1983,16 +1983,16 @@ export function generateRoadmapFromDrilldown(drilldownData: DrilldownDataPoint[] const optimizeCost = optimizeQueues.reduce((sum, q) => sum + (q.annualCost || 0), 0); if (optimizeQueues.length > 0) { - const topSkills = getTopSkillNames(optimizeQueues); + const topSkills = getTopSkillNamonth(optimizeQueues); const avgScore = optimizeQueues.reduce((sum, q) => sum + q.agenticScore, 0) / optimizeQueues.length; - // v3.5: Ahorro REALISTA (muy conservador para AUGMENT) + // v3.5: Ahorro REALISTA (muy conservative para AUGMENT) const realisticSavings = calculateRealisticSavings(optimizeVolume, optimizeCost, 'AUGMENT'); - // Estandarización de procesos + // Process Standardization initiatives.push({ id: `init-${initCounter++}`, - name: `Estandarización (${optimizeQueues.length} colas variables)`, + name: `Standardization (${optimizeQueues.length} variable queues)`, phase: RoadmapPhase.Augment, timeline: 'Q3 2026', investment: Math.round(realisticSavings * 0.8), @@ -2001,19 +2001,19 @@ export function generateRoadmapFromDrilldown(drilldownData: DrilldownDataPoint[] risk: 'medium', skillsImpacted: topSkills, volumeImpacted: optimizeVolume, - kpiObjective: `Reducir CV para migrar colas a tier ASSIST/AUTOMATE`, - rationale: `${optimizeQueues.length} colas tier AUGMENT/HUMAN (score ${avgScore.toFixed(1)}/10) requieren rediseño de procesos.`, - savingsDetail: `15% optimización = ${realisticSavings.toLocaleString()}€/año (conservador)`, + kpiObjective: `Reduce CV to migrate queues to tier ASSIST/AUTOMATE`, + rationale: `${optimizeQueues.length} queues tier AUGMENT/HUMAN (score ${avgScore.toFixed(1)}/10) require process redesign.`, + savingsDetail: `15% optimization = ${realisticSavings.toLocaleString()}€/year (conservative)`, estimatedSavings: realisticSavings, resourceHours: 400 }); - // Automatización post-estandarización (futuro) + // Post-standardization Automation (futuro) if (optimizeVolume > 30000) { const futureSavings = calculateRealisticSavings(Math.round(optimizeVolume * 0.4), Math.round(optimizeCost * 0.4), 'ASSIST'); initiatives.push({ id: `init-${initCounter++}`, - name: `Automatización post-estandarización`, + name: `Post-standardization Automation`, phase: RoadmapPhase.Augment, timeline: 'Q1 2027', investment: Math.round(futureSavings * 0.5), @@ -2022,9 +2022,9 @@ export function generateRoadmapFromDrilldown(drilldownData: DrilldownDataPoint[] risk: 'medium', skillsImpacted: topSkills.slice(0, 2), volumeImpacted: Math.round(optimizeVolume * 0.4), - kpiObjective: `Automatizar 40% del volumen tras estandarización`, - rationale: `Una vez reducido CV, las colas serán aptas para automatización.`, - savingsDetail: `Potencial futuro: ${futureSavings.toLocaleString()}€/año`, + kpiObjective: `Automate 40% of volume after standardization`, + rationale: `Once CV is reduced, queues will be suitable for automation.`, + savingsDetail: `Future potential: ${futureSavings.toLocaleString()}€/year`, estimatedSavings: futureSavings, resourceHours: 480 }); @@ -2035,40 +2035,40 @@ export function generateRoadmapFromDrilldown(drilldownData: DrilldownDataPoint[] } /** - * @deprecated v3.3 - Usar generateOpportunitiesFromDrilldown en su lugar - * Generar opportunities desde datos reales + * Use generateOpportunitiesFromDrilldown instead + * Generate opportunities from real data */ function generateOpportunitiesFromRealData(metrics: SkillMetrics[], costPerHour: number): Opportunity[] { - // Encontrar el máximo ahorro para calcular impacto relativo + // Find the maximum savings to calculate relative impact const maxSavings = Math.max(...metrics.map(m => m.total_cost * 0.4), 1); return metrics.slice(0, 10).map((m, index) => { - const potentialSavings = m.total_cost * 0.4; // 40% de ahorro potencial + const potentialSavings = m.total_cost * 0.4; // 40% of potential savings - // Impacto: relativo al mayor ahorro (escala 1-10) + // Impact: relative to maximum savings (scale 1-10) const impactRaw = (potentialSavings / maxSavings) * 10; const impact = Math.max(3, Math.min(10, Math.round(impactRaw))); - // Feasibilidad: basada en CV y transfer_rate (baja variabilidad = alta feasibilidad) + // Feasibility: based on CV and transfer_rate (low variability = high feasibility) const feasibilityRaw = 10 - (m.cv_aht * 5) - (m.transfer_rate / 10); const feasibility = Math.max(3, Math.min(10, Math.round(feasibilityRaw))); - // Determinar dimensión según características + // Determine dimension according to characteristics let dimensionId: string; if (m.cv_aht < 0.3 && m.transfer_rate < 15) { - dimensionId = 'agentic_readiness'; // Listo para automatizar + dimensionId = 'agentic_readiness'; // Ready to automate } else if (m.cv_aht < 0.5) { - dimensionId = 'effectiveness_resolution'; // Puede mejorar con asistencia + dimensionId = 'effectiveness_resolution'; // Can improve with assistance } else { - dimensionId = 'complexity_predictability'; // Necesita optimización + dimensionId = 'complexity_predictability'; // Needs optimization } - // Nombre descriptivo + // Descriptive name const prefix = m.cv_aht < 0.3 && m.transfer_rate < 15 - ? 'Automatizar ' + ? 'Automate ' : m.cv_aht < 0.5 - ? 'Asistir con IA en ' - : 'Optimizar procesos en '; + ? 'Assist with AI in ' + : 'Optimize process in '; return { id: `opp-${index + 1}`, @@ -2083,14 +2083,14 @@ function generateOpportunitiesFromRealData(metrics: SkillMetrics[], costPerHour: } /** - * Generar roadmap desde opportunities y métricas de skills - * v3.0: Iniciativas conectadas a skills reales con volumeImpacted, kpiObjective, rationale + * Generate roadmap from opportunities and skill metrics + * v3.0: Initiatives connected to real skills with volumeImpacted, kpiObjective, rationale */ function generateRoadmapFromRealData(opportunities: Opportunity[], metrics?: SkillMetrics[]): RoadmapInitiative[] { - // Ordenar por savings descendente para priorizar + // Sort by descending savings to prioritize const sortedOpps = [...opportunities].sort((a, b) => (b.savings || 0) - (a.savings || 0)); - // Crear mapa de métricas por skill para lookup rápido + // Create map of metrics per skill for quick lookup const metricsMap = new Map(); if (metrics) { for (const m of metrics) { @@ -2098,7 +2098,7 @@ function generateRoadmapFromRealData(opportunities: Opportunity[], metrics?: Ski } } - // Helper para obtener métricas de un skill + // Helper para obtain metrics de un skill const getSkillMetrics = (skillName: string): SkillMetrics | undefined => { return metricsMap.get(skillName.toLowerCase()) || Array.from(metricsMap.values()).find(m => @@ -2110,17 +2110,17 @@ function generateRoadmapFromRealData(opportunities: Opportunity[], metrics?: Ski const initiatives: RoadmapInitiative[] = []; let initCounter = 1; - // WAVE 1: Automate - Skills con alto potencial de automatización + // WAVE 1: Automate - Skills with high automation potential const wave1Opps = sortedOpps.slice(0, 2); for (const opp of wave1Opps) { - const skillName = opp.name?.replace(/^(Automatizar |Asistir con IA en |Optimizar procesos en )/, '') || `Skill ${initCounter}`; + const skillName = opp.name?.replace(/^(Automate |Assist with AI in |Optimize process in )/, '') || `Skill ${initCounter}`; const savings = opp.savings || 0; const skillMetrics = getSkillMetrics(skillName); const volume = skillMetrics?.volume || Math.round(savings / 5); const cvAht = skillMetrics?.cv_aht || 50; const offHoursPct = skillMetrics?.off_hours_pct || 28; - // Determinar tipo de iniciativa basado en características del skill + // Determine initiative type based on skill characteristics const isHighVolume = volume > 100000; const hasOffHoursOpportunity = offHoursPct > 25; @@ -2140,12 +2140,12 @@ function generateRoadmapFromRealData(opportunities: Opportunity[], metrics?: Ski skillsImpacted: [skillName], volumeImpacted: volume, kpiObjective: hasOffHoursOpportunity - ? `Automatizar ${Math.round(offHoursPct)}% consultas fuera de horario` + ? `Automate ${Math.round(offHoursPct)}% consultas off hours` : `Desviar 25% a self-service para gestiones simples`, rationale: hasOffHoursOpportunity - ? `${Math.round(offHoursPct)}% del volumen ocurre fuera de horario. Chatbot puede resolver consultas de estado sin agente.` - : `CV AHT ${Math.round(cvAht)}% indica procesos variables. IVR puede pre-cualificar y resolver casos simples.`, - savingsDetail: `Automatización ${Math.round(offHoursPct)}% volumen fuera horario`, + ? `${Math.round(offHoursPct)}% del volumen ocurre off hours. Chatbot puede resolver consultas de status without agent.` + : `CV AHT ${Math.round(cvAht)}% indica process variables. IVR puede pre-cualificar y resolver casos simples.`, + savingsDetail: `Automation ${Math.round(offHoursPct)}% off hours volume`, estimatedSavings: savings, resourceHours: 440 }); @@ -2157,7 +2157,7 @@ function generateRoadmapFromRealData(opportunities: Opportunity[], metrics?: Ski // Iniciativa 1: Knowledge Base (agrupa varios skills) if (wave2Opps.length > 0) { - const kbSkills = wave2Opps.map(o => o.name?.replace(/^(Automatizar |Asistir con IA en |Optimizar procesos en )/, '') || ''); + const kbSkills = wave2Opps.map(o => o.name?.replace(/^(Automate |Assist with AI in |Optimize process in )/, '') || ''); const kbSavings = wave2Opps.reduce((sum, o) => sum + (o.savings || 0), 0) * 0.4; const kbVolume = wave2Opps.reduce((sum, o) => { const m = getSkillMetrics(o.name || ''); @@ -2166,7 +2166,7 @@ function generateRoadmapFromRealData(opportunities: Opportunity[], metrics?: Ski initiatives.push({ id: `init-${initCounter}`, - name: 'Knowledge Base dinámica con IA', + name: 'Dynamic Knowledge Base with AI', phase: RoadmapPhase.Assist, timeline: 'Q2 2026', investment: Math.round(kbSavings * 0.25), @@ -2175,9 +2175,9 @@ function generateRoadmapFromRealData(opportunities: Opportunity[], metrics?: Ski risk: 'low', skillsImpacted: kbSkills.filter(s => s), volumeImpacted: kbVolume, - kpiObjective: 'Reducir Hold Time 30% mediante sugerencias en tiempo real', - rationale: 'FCR bajo indica que agentes no encuentran información rápidamente. KB con IA sugiere respuestas contextuales.', - savingsDetail: `Reducción Hold Time 30% en ${kbSkills.length} skills`, + kpiObjective: 'Reduce Hold Time 30% through real-time suggestions', + rationale: 'FCR low indicates that agents do not find information quickly. KB with AI suggests contextual responses.', + savingsDetail: `Hold Time Reduction 30% en ${kbSkills.length} skills`, estimatedSavings: Math.round(kbSavings), resourceHours: 400 }); @@ -2187,7 +2187,7 @@ function generateRoadmapFromRealData(opportunities: Opportunity[], metrics?: Ski // Iniciativa 2: Copilot para skill principal if (wave2Opps.length > 0) { const mainOpp = wave2Opps[0]; - const skillName = mainOpp.name?.replace(/^(Automatizar |Asistir con IA en |Optimizar procesos en )/, '') || 'Principal'; + const skillName = mainOpp.name?.replace(/^(Automate |Assist with AI in |Optimize process in )/, '') || 'Principal'; const savings = mainOpp.savings || 0; const skillMetrics = getSkillMetrics(skillName); const volume = skillMetrics?.volume || Math.round(savings / 5); @@ -2204,19 +2204,19 @@ function generateRoadmapFromRealData(opportunities: Opportunity[], metrics?: Ski risk: 'medium', skillsImpacted: [skillName], volumeImpacted: volume, - kpiObjective: `Reducir AHT 15% y CV AHT de ${Math.round(cvAht)}% a <80%`, - rationale: `Skill con alto volumen y variabilidad. Copilot puede pre-llenar formularios, sugerir respuestas y guiar al agente.`, - savingsDetail: `Reducción AHT 15% + mejora FCR 10%`, + kpiObjective: `Reduce AHT 15% and CV AHT from ${Math.round(cvAht)}% a <80%`, + rationale: `Skill con high volumen y variabilidad. Copilot puede pre-llenar formularios, sugerir respuestas y guiar al agent.`, + savingsDetail: `AHT Reduction 15% + FCR improvement 10%`, estimatedSavings: savings, resourceHours: 600 }); initCounter++; } - // WAVE 3: Augment - Estandarización y cobertura extendida + // WAVE 3: Augment - Standardization and extended coverage const wave3Opps = sortedOpps.slice(4, 6); - // Iniciativa 1: Estandarización (skill con mayor CV) + // Iniciativa 1: Standardization (skill con mayor CV) if (wave3Opps.length > 0) { const highCvOpp = wave3Opps.reduce((max, o) => { const m = getSkillMetrics(o.name || ''); @@ -2224,7 +2224,7 @@ function generateRoadmapFromRealData(opportunities: Opportunity[], metrics?: Ski return (m?.cv_aht || 0) > (maxM?.cv_aht || 0) ? o : max; }, wave3Opps[0]); - const skillName = highCvOpp.name?.replace(/^(Automatizar |Asistir con IA en |Optimizar procesos en )/, '') || 'Variable'; + const skillName = highCvOpp.name?.replace(/^(Automate |Assist with AI in |Optimize process in )/, '') || 'Variable'; const savings = highCvOpp.savings || 0; const skillMetrics = getSkillMetrics(skillName); const volume = skillMetrics?.volume || Math.round(savings / 5); @@ -2232,7 +2232,7 @@ function generateRoadmapFromRealData(opportunities: Opportunity[], metrics?: Ski initiatives.push({ id: `init-${initCounter}`, - name: `Estandarización procesos ${skillName}`, + name: `Process Standardization ${skillName}`, phase: RoadmapPhase.Augment, timeline: 'Q4 2026', investment: Math.round(savings * 0.4), @@ -2241,16 +2241,16 @@ function generateRoadmapFromRealData(opportunities: Opportunity[], metrics?: Ski risk: 'medium', skillsImpacted: [skillName], volumeImpacted: volume, - kpiObjective: `Reducir CV AHT de ${Math.round(cvAht)}% a <100%`, - rationale: `CV AHT ${Math.round(cvAht)}% indica procesos no estandarizados. Requiere rediseño y documentación antes de automatizar.`, - savingsDetail: `Estandarización reduce variabilidad y habilita automatización futura`, + kpiObjective: `Reduce CV AHT from ${Math.round(cvAht)}% a <100%`, + rationale: `CV AHT ${Math.round(cvAht)}% indicates non-standardized processes. Requires redesign and documentation before automation.`, + savingsDetail: `Standardization reduces variability and enables future automation`, estimatedSavings: savings, resourceHours: 440 }); initCounter++; } - // Iniciativa 2: Cobertura nocturna (si hay volumen fuera de horario) + // Iniciativa 2: Cobertura nocturna (si hay off hours volume) const totalOffHoursVolume = metrics?.reduce((sum, m) => sum + (m.volume * (m.off_hours_pct || 0) / 100), 0) || 0; if (totalOffHoursVolume > 10000 && wave3Opps.length > 1) { const offHoursSkills = metrics?.filter(m => (m.off_hours_pct || 0) > 20).map(m => m.skill).slice(0, 3) || []; @@ -2258,7 +2258,7 @@ function generateRoadmapFromRealData(opportunities: Opportunity[], metrics?: Ski initiatives.push({ id: `init-${initCounter}`, - name: 'Cobertura nocturna con agentes virtuales', + name: 'Cobertura nocturna con agents virtuales', phase: RoadmapPhase.Augment, timeline: 'Q1 2027', investment: Math.round(offHoursSavings * 0.5), @@ -2267,9 +2267,9 @@ function generateRoadmapFromRealData(opportunities: Opportunity[], metrics?: Ski risk: 'high', skillsImpacted: offHoursSkills.length > 0 ? offHoursSkills : ['Customer Service', 'Support'], volumeImpacted: Math.round(totalOffHoursVolume), - kpiObjective: 'Cobertura 24/7 con 60% resolución automática nocturna', - rationale: `${Math.round(totalOffHoursVolume).toLocaleString()} interacciones fuera de horario. Agente virtual puede resolver consultas y programar callbacks.`, - savingsDetail: `Cobertura 24/7 sin incremento plantilla nocturna`, + kpiObjective: '24/7 Coverage with 60% automatic overnight resolution', + rationale: `${Math.round(totalOffHoursVolume).toLocaleString()} interactions off hours. Agente virtual puede resolver consultas y programar callbacks.`, + savingsDetail: `Cobertura 24/7 without incremento plantilla nocturna`, estimatedSavings: Math.round(offHoursSavings), resourceHours: 600 }); @@ -2279,11 +2279,11 @@ function generateRoadmapFromRealData(opportunities: Opportunity[], metrics?: Ski } /** - * v3.10: Generar economic model desde datos reales + * v3.10: Generar economic model from datos reales * ALINEADO CON ROADMAP: Usa modelo TCO con CPI por tier - * - AUTOMATE: 70% × (€2.33 - €0.15) = €1.526/interacción - * - ASSIST: 30% × (€2.33 - €1.50) = €0.249/interacción - * - AUGMENT: 15% × (€2.33 - €2.00) = €0.050/interacción + * - AUTOMATE: 70% × (€2.33 - €0.15) = €1.526/interaction + * - ASSIST: 30% × (€2.33 - €1.50) = €0.249/interaction + * - AUGMENT: 15% × (€2.33 - €2.00) = €0.050/interaction */ function generateEconomicModelFromRealData( metrics: SkillMetrics[], @@ -2299,7 +2299,7 @@ function generateEconomicModelFromRealData( const CPI_ASSIST = 1.50; const CPI_AUGMENT = 2.00; - // Tasas de contención/deflection por tier + // Containment/deflection rates by tier const RATE_AUTOMATE = 0.70; const RATE_ASSIST = 0.30; const RATE_AUGMENT = 0.15; @@ -2307,7 +2307,7 @@ function generateEconomicModelFromRealData( let annualSavingsTCO = 0; let volumeByTier = { AUTOMATE: 0, ASSIST: 0, AUGMENT: 0, 'HUMAN-ONLY': 0 }; - // Si tenemos drilldownData, calcular ahorro por tier real + // Si tenemos drilldownData, calculate ahorro por tier real if (drilldownData && drilldownData.length > 0) { drilldownData.forEach(skill => { skill.originalQueues.forEach(queue => { @@ -2315,43 +2315,43 @@ function generateEconomicModelFromRealData( }); }); - // Ahorro anual = Volumen × 12 meses × Rate × Diferencial CPI + // Annual savings = Volume × 12 months × Rate × CPI Differential const savingsAUTOMATE = volumeByTier.AUTOMATE * 12 * RATE_AUTOMATE * (CPI_HUMANO - CPI_BOT); const savingsASSIST = volumeByTier.ASSIST * 12 * RATE_ASSIST * (CPI_HUMANO - CPI_ASSIST); const savingsAUGMENT = volumeByTier.AUGMENT * 12 * RATE_AUGMENT * (CPI_HUMANO - CPI_AUGMENT); annualSavingsTCO = Math.round(savingsAUTOMATE + savingsASSIST + savingsAUGMENT); } else { - // Fallback: estimar 35% del coste total (legacy) + // Fallback: estimar 35% del total cost (legacy) annualSavingsTCO = Math.round(totalCost * 0.35); } - // Inversión inicial: del Roadmap alineado + // Initial investment: from aligned Roadmap // Wave 1: €47K, Wave 2: €35K, Wave 3: €70K, Wave 4: €85K = €237K total let initialInvestment: number; if (roadmap && roadmap.length > 0) { initialInvestment = roadmap.reduce((sum, init) => sum + (init.investment || 0), 0); } else { - // Default: Escenario conservador Wave 1-2 + // Default: Escenario conservative Wave 1-2 initialInvestment = 82000; // €47K + €35K } - // Costes recurrentes anuales (alineado con Roadmap) + // Costes recurrentes annuales (alineado con Roadmap) // Wave 2: €40K, Wave 3: €78K, Wave 4: €108K const recurrentCostAnnual = drilldownData && drilldownData.length > 0 - ? Math.round(initialInvestment * 0.5) // 50% de inversión como recurrente + ? Math.round(initialInvestment * 0.5) // 50% of investment as recurring : Math.round(initialInvestment * 0.15); - // Margen neto anual (ahorro - recurrente) + // Margen neto annual (ahorro - recurrente) const netAnnualSavings = annualSavingsTCO - recurrentCostAnnual; - // Payback: Implementación + Recuperación (alineado con Roadmap v3.9) - const mesesImplementacion = 9; // Wave 1 (6m) + mitad Wave 2 (3m/2) + // Payback: Implementation + Recovery (aligned with Roadmap v3.9) + const monthsImplementacion = 9; // Wave 1 (6m) + mitad Wave 2 (3m/2) const margenMensual = netAnnualSavings / 12; - const mesesRecuperacion = margenMensual > 0 ? Math.ceil(initialInvestment / margenMensual) : -1; - const paybackMonths = margenMensual > 0 ? mesesImplementacion + mesesRecuperacion : -1; + const monthsRecuperacion = margenMensual > 0 ? Math.ceil(initialInvestment / margenMensual) : -1; + const paybackMonths = margenMensual > 0 ? monthsImplementacion + monthsRecuperacion : -1; - // ROI 3 años: ((Ahorro×3) - (Inversión + Recurrente×3)) / (Inversión + Recurrente×3) × 100 + // ROI 3 years: ((Savings×3) - (Investment + Recurring×3)) / (Investment + Recurring×3) × 100 const costeTotalTresAnos = initialInvestment + (recurrentCostAnnual * 3); const ahorroTotalTresAnos = annualSavingsTCO * 3; const roi3yr = costeTotalTresAnos > 0 @@ -2376,21 +2376,21 @@ function generateEconomicModelFromRealData( if (savingsAUTOMATE > 0) { savingsBreakdown.push({ - category: `AUTOMATE (${volumeByTier.AUTOMATE.toLocaleString()} int/mes)`, + category: `AUTOMATE (${volumeByTier.AUTOMATE.toLocaleString()} int/month)`, amount: savingsAUTOMATE, percentage: Math.round((savingsAUTOMATE / totalSav) * 100) }); } if (savingsASSIST > 0) { savingsBreakdown.push({ - category: `ASSIST (${volumeByTier.ASSIST.toLocaleString()} int/mes)`, + category: `ASSIST (${volumeByTier.ASSIST.toLocaleString()} int/month)`, amount: savingsASSIST, percentage: Math.round((savingsASSIST / totalSav) * 100) }); } if (savingsAUGMENT > 0) { savingsBreakdown.push({ - category: `AUGMENT (${volumeByTier.AUGMENT.toLocaleString()} int/mes)`, + category: `AUGMENT (${volumeByTier.AUGMENT.toLocaleString()} int/month)`, amount: savingsAUGMENT, percentage: Math.round((savingsAUGMENT / totalSav) * 100) }); @@ -2401,7 +2401,7 @@ function generateEconomicModelFromRealData( topSkills.forEach((skill, idx) => { const skillSavings = Math.round(skill.total_cost * 0.4); savingsBreakdown.push({ - category: `Reducción AHT 15% ${skill.skill}`, + category: `AHT Reduction 15% ${skill.skill}`, amount: skillSavings, percentage: Math.round((skillSavings / (annualSavingsTCO || 1)) * 100) }); @@ -2410,7 +2410,7 @@ function generateEconomicModelFromRealData( const costBreakdown = [ { category: 'Software y licencias', amount: Math.round(initialInvestment * 0.40), percentage: 40 }, - { category: 'Desarrollo e implementación', amount: Math.round(initialInvestment * 0.30), percentage: 30 }, + { category: 'Development and implementation', amount: Math.round(initialInvestment * 0.30), percentage: 30 }, { category: 'Training y change mgmt', amount: Math.round(initialInvestment * 0.20), percentage: 20 }, { category: 'Contingencia', amount: Math.round(initialInvestment * 0.10), percentage: 10 }, ]; @@ -2429,15 +2429,15 @@ function generateEconomicModelFromRealData( } /** - * Generar benchmark desde datos reales - * BENCHMARKS SECTOR AÉREO: AHT P50=380s, FCR=70%, Abandono=5%, Ratio P90/P50<2.0 + * Generar benchmark from datos reales + * AIRLINE SECTOR BENCHMARKS: AHT P50=380s, FCR=70%, Abandono=5%, Ratio P90/P50<2.0 */ function generateBenchmarkFromRealData(metrics: SkillMetrics[]): BenchmarkDataPoint[] { const avgAHT = metrics.reduce((sum, m) => sum + m.aht_mean, 0) / (metrics.length || 1); const avgCV = metrics.reduce((sum, m) => sum + m.cv_aht, 0) / (metrics.length || 1); const avgRatio = 1 + avgCV * 1.5; // Ratio P90/P50 aproximado - // FCR Técnico: 100 - transfer_rate (ponderado por volumen) + // Technical FCR: 100 - transfer_rate (weighted by volume) const totalVolume = metrics.reduce((sum, m) => sum + m.volume_valid, 0); const avgFCR = totalVolume > 0 ? metrics.reduce((sum, m) => sum + (m.fcr_tecnico * m.volume_valid), 0) / totalVolume @@ -2448,11 +2448,11 @@ function generateBenchmarkFromRealData(metrics: SkillMetrics[]): BenchmarkDataPo const totalAbandoned = metrics.reduce((sum, m) => sum + m.abandon_count, 0); const abandonRate = totalInteractions > 0 ? (totalAbandoned / totalInteractions) * 100 : 0; - // CPI: Coste total / Total interacciones + // CPI: Coste total / Total interactions const totalCost = metrics.reduce((sum, m) => sum + m.total_cost, 0); const avgCPI = totalInteractions > 0 ? totalCost / totalInteractions : 3.5; - // Calcular percentiles basados en benchmarks sector aéreo + // Calculate percentiles based on benchmarks airline sector const ahtPercentile = avgAHT <= 380 ? 75 : avgAHT <= 420 ? 60 : avgAHT <= 480 ? 40 : 25; const fcrPercentile = avgFCR >= 70 ? 70 : avgFCR >= 60 ? 50 : avgFCR >= 50 ? 35 : 20; const abandonPercentile = abandonRate <= 5 ? 75 : abandonRate <= 8 ? 55 : abandonRate <= 12 ? 35 : 20; @@ -2508,7 +2508,7 @@ function generateBenchmarkFromRealData(metrics: SkillMetrics[]): BenchmarkDataPo p90: 1.4 }, { - kpi: 'Coste/Interacción', + kpi: 'Cost/Interaction', userValue: avgCPI, userDisplay: `€${avgCPI.toFixed(2)}`, industryValue: 3.5, diff --git a/frontend/utils/segmentClassifier.ts b/frontend/utils/segmentClassifier.ts index eee8562..6aa5718 100644 --- a/frontend/utils/segmentClassifier.ts +++ b/frontend/utils/segmentClassifier.ts @@ -1,5 +1,5 @@ // utils/segmentClassifier.ts -// Utilidad para clasificar colas/skills en segmentos de cliente +// Utility to classify queues/skills into customer segments import type { CustomerSegment, RawInteraction, StaticConfig } from '../types'; @@ -10,8 +10,8 @@ export interface SegmentMapping { } /** - * Parsea string de colas separadas por comas - * Ejemplo: "VIP, Premium, Enterprise" → ["VIP", "Premium", "Enterprise"] + * Parses queue string separated by commas + * Example: "VIP, Premium, Enterprise" → ["VIP", "Premium", "Enterprise"] */ export function parseQueueList(input: string): string[] { if (!input || input.trim().length === 0) { @@ -25,13 +25,13 @@ export function parseQueueList(input: string): string[] { } /** - * Clasifica una cola según el mapeo proporcionado - * Usa matching parcial y case-insensitive + * Classifies a queue according to the provided mapping + * Uses partial and case-insensitive matching * - * Ejemplo: + * Example: * - queue: "VIP_Support" + mapping.high: ["VIP"] → "high" - * - queue: "Soporte_General_N1" + mapping.medium: ["Soporte_General"] → "medium" - * - queue: "Retencion" (no match) → "medium" (default) + * - queue: "General_Support_L1" + mapping.medium: ["General_Support"] → "medium" + * - queue: "Retention" (no match) → "medium" (default) */ export function classifyQueue( queue: string, @@ -39,7 +39,7 @@ export function classifyQueue( ): CustomerSegment { const normalizedQueue = queue.toLowerCase().trim(); - // Buscar en high value + // Search in high value for (const highQueue of mapping.high_value_queues) { const normalizedHigh = highQueue.toLowerCase().trim(); if (normalizedQueue.includes(normalizedHigh) || normalizedHigh.includes(normalizedQueue)) { @@ -47,7 +47,7 @@ export function classifyQueue( } } - // Buscar en low value + // Search in low value for (const lowQueue of mapping.low_value_queues) { const normalizedLow = lowQueue.toLowerCase().trim(); if (normalizedQueue.includes(normalizedLow) || normalizedLow.includes(normalizedQueue)) { @@ -55,7 +55,7 @@ export function classifyQueue( } } - // Buscar en medium value (explícito) + // Search in medium value (explicit) for (const mediumQueue of mapping.medium_value_queues) { const normalizedMedium = mediumQueue.toLowerCase().trim(); if (normalizedQueue.includes(normalizedMedium) || normalizedMedium.includes(normalizedQueue)) { @@ -63,13 +63,13 @@ export function classifyQueue( } } - // Default: medium (para colas no mapeadas) + // Default: medium (for unmapped queues) return 'medium'; } /** - * Clasifica todas las colas únicas de un conjunto de interacciones - * Retorna un mapa de cola → segmento + * Classifies all unique queues from a set of interactions + * Returns a map of queue → segment */ export function classifyAllQueues( interactions: RawInteraction[], @@ -77,10 +77,10 @@ export function classifyAllQueues( ): Map { const queueSegments = new Map(); - // Obtener colas únicas + // Get unique queues const uniqueQueues = [...new Set(interactions.map(i => i.queue_skill))]; - // Clasificar cada cola + // Classify each queue uniqueQueues.forEach(queue => { queueSegments.set(queue, classifyQueue(queue, mapping)); }); @@ -89,8 +89,8 @@ export function classifyAllQueues( } /** - * Genera estadísticas de segmentación - * Retorna conteo, porcentaje y lista de colas por segmento + * Generates segmentation statistics + * Returns count, percentage and list of queues by segment */ export function getSegmentationStats( interactions: RawInteraction[], @@ -108,13 +108,13 @@ export function getSegmentationStats( total: interactions.length }; - // Contar interacciones por segmento + // Count interactions by segment interactions.forEach(interaction => { const segment = queueSegments.get(interaction.queue_skill) || 'medium'; stats[segment].count++; }); - // Calcular porcentajes + // Calculate percentages const total = interactions.length; if (total > 0) { stats.high.percentage = Math.round((stats.high.count / total) * 100); @@ -122,7 +122,7 @@ export function getSegmentationStats( stats.low.percentage = Math.round((stats.low.count / total) * 100); } - // Obtener colas por segmento (únicas) + // Get queues by segment (unique) queueSegments.forEach((segment, queue) => { if (!stats[segment].queues.includes(queue)) { stats[segment].queues.push(queue); @@ -133,7 +133,7 @@ export function getSegmentationStats( } /** - * Valida que el mapeo tenga al menos una cola en algún segmento + * Validates that the mapping has at least one queue in some segment */ export function isValidMapping(mapping: SegmentMapping): boolean { return ( @@ -144,8 +144,8 @@ export function isValidMapping(mapping: SegmentMapping): boolean { } /** - * Crea un mapeo desde StaticConfig - * Si no hay segment_mapping, retorna mapeo vacío + * Creates a mapping from StaticConfig + * If there is no segment_mapping, returns empty mapping */ export function getMappingFromConfig(config: StaticConfig): SegmentMapping | null { if (!config.segment_mapping) { @@ -160,8 +160,8 @@ export function getMappingFromConfig(config: StaticConfig): SegmentMapping | nul } /** - * Obtiene el segmento para una cola específica desde el config - * Si no hay mapeo, retorna 'medium' por defecto + * Gets the segment for a specific queue from the config + * If there is no mapping, returns 'medium' by default */ export function getSegmentForQueue( queue: string, @@ -177,7 +177,7 @@ export function getSegmentForQueue( } /** - * Formatea estadísticas para mostrar en UI + * Formats statistics for display in UI */ export function formatSegmentationSummary( stats: ReturnType @@ -185,15 +185,15 @@ export function formatSegmentationSummary( const parts: string[] = []; if (stats.high.count > 0) { - parts.push(`${stats.high.percentage}% High Value (${stats.high.count} interacciones)`); + parts.push(`${stats.high.percentage}% High Value (${stats.high.count} interactions)`); } if (stats.medium.count > 0) { - parts.push(`${stats.medium.percentage}% Medium Value (${stats.medium.count} interacciones)`); + parts.push(`${stats.medium.percentage}% Medium Value (${stats.medium.count} interactions)`); } if (stats.low.count > 0) { - parts.push(`${stats.low.percentage}% Low Value (${stats.low.count} interacciones)`); + parts.push(`${stats.low.percentage}% Low Value (${stats.low.count} interactions)`); } return parts.join(' | ');