Merge pull request #4 from sujucu70/claude/check-agent-readiness-status-Exnpc

Claude/check agent readiness status exnpc
This commit is contained in:
sujucu70
2026-02-07 12:05:50 +01:00
committed by GitHub
14 changed files with 1692 additions and 1477 deletions

163
TRANSLATION_STATUS.md Normal file
View File

@@ -0,0 +1,163 @@
# Translation Status - Beyond CX Analytics
## ✅ Completed Modules
### Agentic Readiness Module
- **Status:** ✅ **COMPLETED**
- **Commit:** `b991824`
- **Files:**
-`frontend/utils/agenticReadinessV2.ts` - All functions, comments, and descriptions translated
-`frontend/components/tabs/AgenticReadinessTab.tsx` - RED_FLAG_CONFIGS and comments translated
-`frontend/locales/en.json` & `es.json` - New subfactors section added
-`backend/beyond_flows/scorers/agentic_score.py` - All docstrings, comments, and reason codes translated
---
## 🔄 Modules Pending Translation
### HIGH PRIORITY - Core Utils (Frontend)
#### 1. **realDataAnalysis.ts**
- **Lines of Spanish:** ~92 occurrences
- **Scope:**
- Function names: `clasificarTierSimple()`, `clasificarTier()`
- 20+ inline comments in Spanish
- Function documentation
- **Impact:** HIGH - Core analysis engine
- **Estimated effort:** 2-3 hours
#### 2. **analysisGenerator.ts**
- **Lines of Spanish:** ~49 occurrences
- **Scope:**
- Multiple inline comments
- References to `clasificarTierSimple()`
- Data transformation comments
- **Impact:** HIGH - Main data generator
- **Estimated effort:** 1-2 hours
#### 3. **backendMapper.ts**
- **Lines of Spanish:** ~13 occurrences
- **Scope:**
- Function documentation
- Mapping logic comments
- **Impact:** MEDIUM - Backend integration
- **Estimated effort:** 30-60 minutes
---
### MEDIUM PRIORITY - Utilities (Frontend)
#### 4. **dataTransformation.ts**
- **Lines of Spanish:** ~8 occurrences
- **Impact:** MEDIUM
- **Estimated effort:** 30 minutes
#### 5. **segmentClassifier.ts**
- **Lines of Spanish:** ~3 occurrences
- **Impact:** LOW
- **Estimated effort:** 15 minutes
#### 6. **fileParser.ts**
- **Lines of Spanish:** ~3 occurrences
- **Impact:** LOW
- **Estimated effort:** 15 minutes
#### 7. **apiClient.ts**
- **Lines of Spanish:** ~2 occurrences
- **Impact:** LOW
- **Estimated effort:** 10 minutes
#### 8. **serverCache.ts**
- **Lines of Spanish:** ~2 occurrences
- **Impact:** LOW
- **Estimated effort:** 10 minutes
---
### MEDIUM PRIORITY - Backend Dimensions
#### 9. **backend/beyond_metrics/dimensions/OperationalPerformance.py**
- **Lines of Spanish:** ~7 occurrences
- **Impact:** MEDIUM
- **Estimated effort:** 30 minutes
#### 10. **backend/beyond_metrics/dimensions/SatisfactionExperience.py**
- **Lines of Spanish:** ~8 occurrences
- **Impact:** MEDIUM
- **Estimated effort:** 30 minutes
#### 11. **backend/beyond_metrics/dimensions/EconomyCost.py**
- **Lines of Spanish:** ~4 occurrences
- **Impact:** MEDIUM
- **Estimated effort:** 20 minutes
---
### LOW PRIORITY - API & Services
#### 12. **backend/beyond_api/api/analysis.py**
- **Lines of Spanish:** ~1 occurrence
- **Impact:** LOW
- **Estimated effort:** 5 minutes
#### 13. **backend/beyond_api/api/auth.py**
- **Lines of Spanish:** ~1 occurrence
- **Impact:** LOW
- **Estimated effort:** 5 minutes
#### 14. **backend/beyond_api/services/analysis_service.py**
- **Lines of Spanish:** ~2 occurrences
- **Impact:** LOW
- **Estimated effort:** 10 minutes
#### 15. **backend/beyond_metrics/io/base.py**
- **Lines of Spanish:** ~1 occurrence
- **Impact:** LOW
- **Estimated effort:** 5 minutes
#### 16. **backend/beyond_metrics/io/google_drive.py**
- **Lines of Spanish:** ~2 occurrences
- **Impact:** LOW
- **Estimated effort:** 10 minutes
---
## 📊 Summary Statistics
| Category | Files | Total Occurrences | Estimated Time |
|----------|-------|-------------------|----------------|
| ✅ Completed | 4 | ~150 | 3 hours (DONE) |
| 🔴 High Priority | 3 | 154 | 4-6 hours |
| 🟡 Medium Priority | 8 | 35 | 2-3 hours |
| 🟢 Low Priority | 5 | 7 | 45 minutes |
| **TOTAL PENDING** | **16** | **196** | **~8 hours** |
---
## 🎯 Recommended Translation Order
### Phase 1: Critical Path (High Priority)
1. `realDataAnalysis.ts` - Core analysis engine with `clasificarTier()` functions
2. `analysisGenerator.ts` - Main data generation orchestrator
3. `backendMapper.ts` - Backend integration layer
### Phase 2: Supporting Utils (Medium Priority)
4. `dataTransformation.ts`
5. Backend dimension files (`OperationalPerformance.py`, `SatisfactionExperience.py`, `EconomyCost.py`)
### Phase 3: Final Cleanup (Low Priority)
6. Remaining utility files and API services
---
## 📝 Notes
- **Variable names** like `volumen_mes`, `escalación`, etc. in data interfaces should **remain as-is** for API compatibility
- **Function names** that are part of the public API should be carefully reviewed before renaming
- **i18n strings** in locales files should continue to have both EN/ES versions
- **Reason codes** and internal enums should be in English for consistency
---
**Last Updated:** 2026-02-07
**Status:** agenticReadiness module completed, 16 modules pending

View File

@@ -1,22 +1,22 @@
"""
agentic_score.py
Calcula el Agentic Readiness Score de un contact center a partir
de un JSON con KPIs agregados (misma estructura que results.json).
Calculates the Agentic Readiness Score of a contact center from
a JSON file with aggregated KPIs (same structure as results.json).
Diseñado como clase para integrarse fácilmente en pipelines.
Designed as a class to integrate easily into pipelines.
Características:
- Tolerante a datos faltantes: si una dimensión no se puede calcular
(porque faltan KPIs), se marca como `computed = False` y no se
incluye en el cálculo del score global.
- La llamada típica en un pipeline será:
Features:
- Tolerant to missing data: if a dimension cannot be calculated
(due to missing KPIs), it is marked as `computed = False` and not
included in the global score calculation.
- Typical pipeline call:
from agentic_score import AgenticScorer
scorer = AgenticScorer()
result = scorer.run_on_folder("/ruta/a/carpeta")
result = scorer.run_on_folder("/path/to/folder")
Esa carpeta debe contener un `results.json` de entrada.
El módulo generará un `agentic_readiness.json` en la misma carpeta.
The folder must contain a `results.json` input file.
The module will generate an `agentic_readiness.json` in the same folder.
"""
from __future__ import annotations
@@ -35,7 +35,7 @@ Number = Union[int, float]
# =========================
def _is_nan(x: Any) -> bool:
"""Devuelve True si x es NaN, None o el string 'NaN'."""
"""Returns True if x is NaN, None or the string 'NaN'."""
try:
if x is None:
return True
@@ -60,7 +60,7 @@ def _safe_mean(values: Sequence[Optional[Number]]) -> Optional[float]:
def _get_nested(d: Dict[str, Any], *keys: str, default: Any = None) -> Any:
"""Acceso seguro a diccionarios anidados."""
"""Safe access to nested dictionaries."""
cur: Any = d
for k in keys:
if not isinstance(cur, dict) or k not in cur:
@@ -75,20 +75,20 @@ def _clamp(value: float, lo: float = 0.0, hi: float = 10.0) -> float:
def _normalize_numeric_sequence(field: Any) -> Optional[List[Number]]:
"""
Normaliza un campo que representa una secuencia numérica.
Normalizes a field representing a numeric sequence.
Soporta:
- Formato antiguo del pipeline: [10, 20, 30]
- Formato nuevo del pipeline: {"labels": [...], "values": [10, 20, 30]}
Supports:
- Old pipeline format: [10, 20, 30]
- New pipeline format: {"labels": [...], "values": [10, 20, 30]}
Devuelve:
- lista de números, si hay datos numéricos válidos
- None, si el campo no tiene una secuencia numérica interpretable
Returns:
- list of numbers, if there is valid numeric data
- None, if the field does not have an interpretable numeric sequence
"""
if field is None:
return None
# Formato nuevo: {"labels": [...], "values": [...]}
# New format: {"labels": [...], "values": [...]}
if isinstance(field, dict) and "values" in field:
seq = field.get("values")
else:
@@ -102,7 +102,7 @@ def _normalize_numeric_sequence(field: Any) -> Optional[List[Number]]:
if isinstance(v, (int, float)):
out.append(v)
else:
# Intentamos conversión suave por si viene como string numérico
# Try soft conversion in case it's a numeric string
try:
out.append(float(v))
except (TypeError, ValueError):
@@ -117,21 +117,21 @@ def _normalize_numeric_sequence(field: Any) -> Optional[List[Number]]:
def score_repetitividad(volume_by_skill: Optional[List[Number]]) -> Dict[str, Any]:
"""
Repetitividad basada en volumen medio por skill.
Repeatability based on average volume per skill.
Regla (pensada por proceso/skill):
- 10 si volumen > 80
- 5 si 4080
- 0 si < 40
Rule (designed per process/skill):
- 10 if volume > 80
- 5 if 4080
- 0 if < 40
Si no hay datos (lista vacía o no numérica), la dimensión
se marca como no calculada (computed = False).
If there is no data (empty or non-numeric list), the dimension
is marked as not calculated (computed = False).
"""
if not volume_by_skill:
return {
"score": None,
"computed": False,
"reason": "sin_datos_volumen",
"reason": "no_volume_data",
"details": {
"avg_volume_per_skill": None,
"volume_by_skill": volume_by_skill,
@@ -143,7 +143,7 @@ def score_repetitividad(volume_by_skill: Optional[List[Number]]) -> Dict[str, An
return {
"score": None,
"computed": False,
"reason": "volumen_no_numerico",
"reason": "volume_not_numeric",
"details": {
"avg_volume_per_skill": None,
"volume_by_skill": volume_by_skill,
@@ -152,13 +152,13 @@ def score_repetitividad(volume_by_skill: Optional[List[Number]]) -> Dict[str, An
if avg_volume > 80:
score = 10.0
reason = "alto_volumen"
reason = "high_volume"
elif avg_volume >= 40:
score = 5.0
reason = "volumen_medio"
reason = "medium_volume"
else:
score = 0.0
reason = "volumen_bajo"
reason = "low_volume"
return {
"score": score,
@@ -178,36 +178,36 @@ def score_repetitividad(volume_by_skill: Optional[List[Number]]) -> Dict[str, An
def score_predictibilidad(aht_ratio: Any,
escalation_rate: Any) -> Dict[str, Any]:
"""
Predictibilidad basada en:
- Variabilidad AHT: ratio P90/P50
- Tasa de escalación (%)
Predictability based on:
- AHT variability: ratio P90/P50
- Escalation rate (%)
Regla:
- 10 si ratio < 1.5 y escalación < 10%
- 5 si ratio 1.52.0 o escalación 1020%
- 0 si ratio > 2.0 y escalación > 20%
- 3 fallback si datos parciales
Rule:
- 10 if ratio < 1.5 and escalation < 10%
- 5 if ratio 1.52.0 or escalation 1020%
- 0 if ratio > 2.0 and escalation > 20%
- 3 fallback if data parciales
Si no hay ni ratio ni escalación, la dimensión no se calcula.
If there is no ratio nor escalation, the dimension is not calculated.
"""
if aht_ratio is None and escalation_rate is None:
return {
"score": None,
"computed": False,
"reason": "sin_datos",
"reason": "no_data",
"details": {
"aht_p90_p50_ratio": None,
"escalation_rate_pct": None,
},
}
# Normalizamos ratio
# Normalize ratio
if aht_ratio is None or _is_nan(aht_ratio):
ratio: Optional[float] = None
else:
ratio = float(aht_ratio)
# Normalizamos escalación
# Normalize escalation
if escalation_rate is None or _is_nan(escalation_rate):
esc: Optional[float] = None
else:
@@ -217,7 +217,7 @@ def score_predictibilidad(aht_ratio: Any,
return {
"score": None,
"computed": False,
"reason": "sin_datos",
"reason": "no_data",
"details": {
"aht_p90_p50_ratio": None,
"escalation_rate_pct": None,
@@ -230,20 +230,20 @@ def score_predictibilidad(aht_ratio: Any,
if ratio is not None and esc is not None:
if ratio < 1.5 and esc < 10.0:
score = 10.0
reason = "alta_predictibilidad"
reason = "high_predictability"
elif (1.5 <= ratio <= 2.0) or (10.0 <= esc <= 20.0):
score = 5.0
reason = "predictibilidad_media"
reason = "medium_predictability"
elif ratio > 2.0 and esc > 20.0:
score = 0.0
reason = "baja_predictibilidad"
reason = "low_predictability"
else:
score = 3.0
reason = "caso_intermedio"
reason = "intermediate_case"
else:
# Datos parciales: penalizamos pero no ponemos a 0
# Partial data: penalize but do not set to 0
score = 3.0
reason = "datos_parciales"
reason = "partial_data"
return {
"score": score,
@@ -263,23 +263,23 @@ def score_predictibilidad(aht_ratio: Any,
def score_estructuracion(channel_distribution_pct: Any) -> Dict[str, Any]:
"""
Estructuración de datos usando proxy de canal.
Data structuring using channel proxy.
Asumimos que el canal con mayor % es texto (en proyectos reales se puede
We assume the channel with the highest % is text (en proyectos reales se puede
parametrizar esta asignación).
Regla:
- 10 si texto > 60%
Rule:
- 10 if text > 60%
- 5 si 3060%
- 0 si < 30%
Si no hay datos de canales, la dimensión no se calcula.
If there is no datas of channels, the dimension is not calculated.
"""
if not channel_distribution_pct:
return {
"score": None,
"computed": False,
"reason": "sin_datos_canal",
"reason": "no_channel_data",
"details": {
"estimated_text_share_pct": None,
"channel_distribution_pct": channel_distribution_pct,
@@ -299,7 +299,7 @@ def score_estructuracion(channel_distribution_pct: Any) -> Dict[str, Any]:
return {
"score": None,
"computed": False,
"reason": "canales_no_numericos",
"reason": "channels_not_numeric",
"details": {
"estimated_text_share_pct": None,
"channel_distribution_pct": channel_distribution_pct,
@@ -308,13 +308,13 @@ def score_estructuracion(channel_distribution_pct: Any) -> Dict[str, Any]:
if max_share > 60.0:
score = 10.0
reason = "alta_proporcion_texto"
reason = "high_text_proportion"
elif max_share >= 30.0:
score = 5.0
reason = "proporcion_texto_media"
reason = "medium_text_proportion"
else:
score = 0.0
reason = "baja_proporcion_texto"
reason = "low_text_proportion"
return {
"score": score,
@@ -334,9 +334,9 @@ def score_estructuracion(channel_distribution_pct: Any) -> Dict[str, Any]:
def score_complejidad(aht_ratio: Any,
escalation_rate: Any) -> Dict[str, Any]:
"""
Complejidad inversa del proceso (010).
Inverse complexity of the process (010).
1) Base: inversa lineal de la variabilidad AHT (ratio P90/P50):
1) Base: linear inverse de la variabilidad AHT (ratio P90/P50):
- ratio = 1.0 -> 10
- ratio = 1.5 -> ~7.5
- ratio = 2.0 -> 5
@@ -345,12 +345,12 @@ def score_complejidad(aht_ratio: Any,
formula_base = (3 - ratio) / (3 - 1) * 10, acotado a [0,10]
2) Ajuste por escalación:
2) Escalation adjustment:
- restamos (escalation_rate / 5) puntos.
Nota: más score = proceso más "simple / automatizable".
Nota: higher score = process more "simple / automatizable".
Si no hay ni ratio ni escalación, la dimensión no se calcula.
If there is no ratio nor escalation, the dimension is not calculated.
"""
if aht_ratio is None or _is_nan(aht_ratio):
ratio: Optional[float] = None
@@ -366,36 +366,36 @@ def score_complejidad(aht_ratio: Any,
return {
"score": None,
"computed": False,
"reason": "sin_datos",
"reason": "no_data",
"details": {
"aht_p90_p50_ratio": None,
"escalation_rate_pct": None,
},
}
# Base por variabilidad
# Base for variability
if ratio is None:
base = 5.0 # fallback neutro
base_reason = "sin_ratio_usamos_valor_neutro"
base = 5.0 # neutral fallback
base_reason = "no_ratio_using_neutral_value"
else:
base_raw = (3.0 - ratio) / (3.0 - 1.0) * 10.0
base = _clamp(base_raw)
base_reason = "calculado_desde_ratio"
base_reason = "calculated_from_ratio"
# Ajuste por escalación
# Escalation adjustment
if esc is None:
adj = 0.0
adj_reason = "sin_escalacion_sin_ajuste"
adj_reason = "no_escalation_no_adjustment"
else:
adj = - (esc / 5.0) # cada 5 puntos de escalación resta 1
adj_reason = "ajuste_por_escalacion"
adj = - (esc / 5.0) # every 5 escalation points subtract 1
adj_reason = "escalation_adjustment"
final_score = _clamp(base + adj)
return {
"score": final_score,
"computed": True,
"reason": "complejidad_inversa",
"reason": "inverse_complexity",
"details": {
"aht_p90_p50_ratio": ratio,
"escalation_rate_pct": esc,
@@ -409,21 +409,21 @@ def score_complejidad(aht_ratio: Any,
def score_estabilidad(peak_offpeak_ratio: Any) -> Dict[str, Any]:
"""
Estabilidad del proceso basada en relación pico/off-peak.
Process stability based on peak/off-peak ratio.
Regla:
- 10 si ratio < 3
Rule:
- 10 if ratio < 3
- 7 si 35
- 3 si 57
- 0 si > 7
Si no hay dato de ratio, la dimensión no se calcula.
If there is no data of ratio, the dimension is not calculated.
"""
if peak_offpeak_ratio is None or _is_nan(peak_offpeak_ratio):
return {
"score": None,
"computed": False,
"reason": "sin_datos_peak_offpeak",
"reason": "no_peak_offpeak_data",
"details": {
"peak_offpeak_ratio": None,
},
@@ -432,16 +432,16 @@ def score_estabilidad(peak_offpeak_ratio: Any) -> Dict[str, Any]:
r = float(peak_offpeak_ratio)
if r < 3.0:
score = 10.0
reason = "muy_estable"
reason = "very_stable"
elif r < 5.0:
score = 7.0
reason = "estable_moderado"
reason = "moderately_stable"
elif r < 7.0:
score = 3.0
reason = "pico_pronunciado"
reason = "pronounced_peak"
else:
score = 0.0
reason = "muy_inestable"
reason = "very_unstable"
return {
"score": score,
@@ -460,20 +460,20 @@ def score_estabilidad(peak_offpeak_ratio: Any) -> Dict[str, Any]:
def score_roi(annual_savings: Any) -> Dict[str, Any]:
"""
ROI potencial anual.
Annual potential ROI.
Regla:
- 10 si ahorro > 100k €/año
- 5 si 10k100k €/año
- 0 si < 10k €/año
Rule:
- 10 if savings > 100k €/year
- 5 si 10k100k €/year
- 0 si < 10k €/year
Si no hay dato de ahorro, la dimensión no se calcula.
If there is no data of savings, the dimension is not calculated.
"""
if annual_savings is None or _is_nan(annual_savings):
return {
"score": None,
"computed": False,
"reason": "sin_datos_ahorro",
"reason": "no_savings_data",
"details": {
"annual_savings_eur": None,
},
@@ -482,13 +482,13 @@ def score_roi(annual_savings: Any) -> Dict[str, Any]:
savings = float(annual_savings)
if savings > 100_000:
score = 10.0
reason = "roi_alto"
reason = "high_roi"
elif savings >= 10_000:
score = 5.0
reason = "roi_medio"
reason = "medium_roi"
else:
score = 0.0
reason = "roi_bajo"
reason = "low_roi"
return {
"score": score,
@@ -506,20 +506,20 @@ def score_roi(annual_savings: Any) -> Dict[str, Any]:
def classify_agentic_score(score: Optional[float]) -> Dict[str, Any]:
"""
Clasificación final (alineada con frontend):
- ≥6: COPILOT 🤖 (Listo para Copilot)
Final classification (aligned with frontend):
- ≥6: COPILOT 🤖 (Ready for Copilot)
- 45.99: OPTIMIZE 🔧 (Optimizar Primero)
- <4: HUMAN 👤 (Requiere Gestión Humana)
Si score es None (ninguna dimensión disponible), devuelve NO_DATA.
If score is None (no dimension available), returns NO_DATA.
"""
if score is None:
return {
"label": "NO_DATA",
"emoji": "",
"description": (
"No se ha podido calcular el Agentic Readiness Score porque "
"ninguna de las dimensiones tenía datos suficientes."
"Could not calculate the Agentic Readiness Score because "
"none of the dimensions had sufficient data."
),
}
@@ -527,22 +527,22 @@ def classify_agentic_score(score: Optional[float]) -> Dict[str, Any]:
label = "COPILOT"
emoji = "🤖"
description = (
"Listo para Copilot. Procesos con predictibilidad y simplicidad "
"suficientes para asistencia IA (sugerencias en tiempo real, autocompletado)."
"Ready for Copilot. Processes with sufficient predictability and simplicity "
"for AI assistance (real-time suggestions, autocomplete)."
)
elif score >= 4.0:
label = "OPTIMIZE"
emoji = "🔧"
description = (
"Optimizar primero. Estandarizar procesos y reducir variabilidad "
"antes de implementar asistencia IA."
"Optimize first. Standardize processes and reduce variability "
"before implementing AI assistance."
)
else:
label = "HUMAN"
emoji = "👤"
description = (
"Requiere gestión humana. Procesos complejos o variables que "
"necesitan intervención humana antes de considerar automatización."
"Requires human management. Complex or variable processes that "
"need human intervention before considering automation."
)
return {
@@ -604,22 +604,22 @@ class AgenticScorer:
def compute_from_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
"""
Calcula el Agentic Readiness Score a partir de un dict de datos.
Calculates the Agentic Readiness Score from a data dict.
Tolerante a datos faltantes: renormaliza pesos usando solo
dimensiones con `computed = True`.
Tolerant to missing data: renormalizes weights using only
dimensions with `computed = True`.
Compatibilidad con pipeline:
- Soporta tanto el formato antiguo:
Pipeline compatibility:
- Supports both the old format:
"volume_by_skill": [10, 20, 30]
- como el nuevo:
- and the new:
"volume_by_skill": {"labels": [...], "values": [10, 20, 30]}
"""
volumetry = data.get("volumetry", {})
op = data.get("operational_performance", {})
econ = data.get("economy_costs", {})
# Normalizamos aquí los posibles formatos para contentar al type checker
# Normalize here the possible formats for the type checker
volume_by_skill = _normalize_numeric_sequence(
volumetry.get("volume_by_skill")
)
@@ -650,7 +650,7 @@ class AgenticScorer:
"roi": roi,
}
# --- Renormalización de pesos sólo con dimensiones disponibles ---
# --- Weight renormalization only with available dimensions ---
effective_weights: Dict[str, float] = {}
for name, base_w in self.base_weights.items():
dim = sub_scores.get(name, {})
@@ -665,7 +665,7 @@ class AgenticScorer:
else:
normalized_weights = {}
# --- Score final ---
# --- Final score ---
if not normalized_weights:
final_score: Optional[float] = None
else:
@@ -692,8 +692,8 @@ class AgenticScorer:
"metadata": {
"source_module": "agentic_score.py",
"notes": (
"Modelo simplificado basado en KPIs agregados. "
"Renormaliza los pesos cuando faltan dimensiones."
"Simplified model based on aggregated KPIs. "
"Renormalizes weights when dimensions are missing."
),
},
}
@@ -710,11 +710,11 @@ class AgenticScorer:
def run_on_folder(self, folder_path: Union[str, Path]) -> Dict[str, Any]:
"""
Punto de entrada típico para el pipeline:
- Lee <folder>/results.json
- Calcula Agentic Readiness
- Escribe <folder>/agentic_readiness.json
- Devuelve el dict con el resultado
Typical pipeline entry point:
- Reads <folder>/results.json
- Calculates Agentic Readiness
- Writes <folder>/agentic_readiness.json
- Returns the dict with the result
"""
data = self.load_results(folder_path)
result = self.compute_from_data(data)

View File

@@ -23,17 +23,16 @@ REQUIRED_COLUMNS_ECON: List[str] = [
@dataclass
class EconomyConfig:
"""
Parámetros manuales para la dimensión de Economía y Costes.
Manual parameters for the Economy and Cost dimension.
- labor_cost_per_hour: coste total/hora de un agente (fully loaded).
- overhead_rate: % overhead variable (ej. 0.1 = 10% sobre labor).
- tech_costs_annual: coste anual de tecnología (licencias, infra, ...).
- automation_cpi: coste por interacción automatizada (ej. 0.15€).
- automation_volume_share: % del volumen automatizable (0-1).
- automation_success_rate: % éxito de la automatización (0-1).
- labor_cost_per_hour: total cost/hour of an agent (fully loaded).
- overhead_rate: % variable overhead (e.g. 0.1 = 10% over labor).
- tech_costs_annual: annual technology cost (licenses, infrastructure, ...).
- automation_cpi: cost per automated interaction (e.g. 0.15€).
- automation_volume_share: % of automatable volume (0-1).
- automation_success_rate: % automation success (0-1).
- customer_segments: mapping opcional skill -> segmento ("high"/"medium"/"low")
para futuros insights de ROI por segmento.
- customer_segments: optional mapping skill -> segment ("high"/"medium"/"low") for future ROI insights by segment.
"""
labor_cost_per_hour: float
@@ -48,20 +47,20 @@ class EconomyConfig:
@dataclass
class EconomyCostMetrics:
"""
DIMENSIÓN 4: ECONOMÍA y COSTES
DIMENSION 4: ECONOMY and COSTS
Propósito:
- Cuantificar el COSTE actual (CPI, coste anual).
- Estimar el impacto de overhead y tecnología.
- Calcular un primer estimado de "coste de ineficiencia" y ahorro potencial.
Purpose:
- Quantify the current COST (CPI, annual cost).
- Estimate the impact of overhead and technology.
- Calculate an initial estimate of "inefficiency cost" and potential savings.
Requiere:
- Columnas del dataset transaccional (ver REQUIRED_COLUMNS_ECON).
Requires:
- Columns from the transactional dataset (see REQUIRED_COLUMNS_ECON).
Inputs opcionales vía EconomyConfig:
- labor_cost_per_hour (obligatorio para cualquier cálculo de €).
Optional inputs via EconomyConfig:
- labor_cost_per_hour (required for any € calculation).
- overhead_rate, tech_costs_annual, automation_*.
- customer_segments (para insights de ROI por segmento).
- customer_segments (for ROI insights by segment).
"""
df: pd.DataFrame
@@ -72,13 +71,13 @@ class EconomyCostMetrics:
self._prepare_data()
# ------------------------------------------------------------------ #
# Helpers internos
# Internal helpers
# ------------------------------------------------------------------ #
def _validate_columns(self) -> None:
missing = [c for c in REQUIRED_COLUMNS_ECON if c not in self.df.columns]
if missing:
raise ValueError(
f"Faltan columnas obligatorias para EconomyCostMetrics: {missing}"
f"Missing required columns for EconomyCostMetrics: {missing}"
)
def _prepare_data(self) -> None:
@@ -97,15 +96,15 @@ class EconomyCostMetrics:
df["duration_talk"].fillna(0)
+ df["hold_time"].fillna(0)
+ df["wrap_up_time"].fillna(0)
) # segundos
) # seconds
# Filtrar por record_status para cálculos de AHT/CPI
# Solo incluir registros VALID (excluir NOISE, ZOMBIE, ABANDON)
# Filter by record_status for AHT/CPI calculations
# Only include VALID records (exclude NOISE, ZOMBIE, ABANDON)
if "record_status" in df.columns:
df["record_status"] = df["record_status"].astype(str).str.strip().str.upper()
df["_is_valid_for_cost"] = df["record_status"] == "VALID"
else:
# Legacy data sin record_status: incluir todo
# Legacy data without record_status: include all
df["_is_valid_for_cost"] = True
self.df = df
@@ -118,11 +117,11 @@ class EconomyCostMetrics:
return self.config is not None and self.config.labor_cost_per_hour is not None
# ------------------------------------------------------------------ #
# KPI 1: CPI por canal/skill
# KPI 1: CPI by channel/skill
# ------------------------------------------------------------------ #
def cpi_by_skill_channel(self) -> pd.DataFrame:
"""
CPI (Coste Por Interacción) por skill/canal.
CPI (Cost Per Interaction) by skill/channel.
CPI = (Labor_cost_per_interaction + Overhead_variable) / EFFECTIVE_PRODUCTIVITY
@@ -130,19 +129,17 @@ class EconomyCostMetrics:
- Overhead_variable = overhead_rate * Labor_cost_per_interaction
- EFFECTIVE_PRODUCTIVITY = 0.70 (70% - accounts for non-productive time)
Excluye registros abandonados del cálculo de costes para consistencia
con el path del frontend (fresh CSV).
Excludes abandoned records from cost calculation for consistency with the frontend path (fresh CSV).
Si no hay config de costes -> devuelve DataFrame vacío.
If there is no cost config -> returns empty DataFrame.
Incluye queue_skill y channel como columnas (no solo índice) para que
el frontend pueda hacer lookup por nombre de skill.
Includes queue_skill and channel as columns (not just index) so that the frontend can lookup by skill name.
"""
if not self._has_cost_config():
return pd.DataFrame()
cfg = self.config
assert cfg is not None # para el type checker
assert cfg is not None # for the type checker
df = self.df.copy()
if df.empty:
@@ -154,15 +151,15 @@ class EconomyCostMetrics:
else:
df_cost = df
# Filtrar por record_status: solo VALID para cálculo de AHT
# Excluye NOISE, ZOMBIE, ABANDON
# Filter by record_status: only VALID for AHT calculation
# Excludes NOISE, ZOMBIE, ABANDON
if "_is_valid_for_cost" in df_cost.columns:
df_cost = df_cost[df_cost["_is_valid_for_cost"] == True]
if df_cost.empty:
return pd.DataFrame()
# AHT por skill/canal (en segundos) - solo registros VALID
# AHT by skill/channel (in seconds) - only VALID records
grouped = df_cost.groupby(["queue_skill", "channel"])["handle_time"].mean()
if grouped.empty:
@@ -193,17 +190,16 @@ class EconomyCostMetrics:
return out.sort_index().reset_index()
# ------------------------------------------------------------------ #
# KPI 2: coste anual por skill/canal
# KPI 2: annual cost by skill/channel
# ------------------------------------------------------------------ #
def annual_cost_by_skill_channel(self) -> pd.DataFrame:
"""
Coste anual por skill/canal.
Annual cost by skill/channel.
cost_annual = CPI * volumen (cantidad de interacciones de la muestra).
cost_annual = CPI * volume (number of interactions in the sample).
Nota: por simplicidad asumimos que el dataset refleja un periodo anual.
Si en el futuro quieres anualizar (ej. dataset = 1 mes) se puede añadir
un factor de escalado en EconomyConfig.
Note: for simplicity we assume the dataset reflects an annual period.
If in the future you want to annualize (e.g. dataset = 1 month) you can add a scaling factor in EconomyConfig.
"""
cpi_table = self.cpi_by_skill_channel()
if cpi_table.empty:
@@ -224,18 +220,18 @@ class EconomyCostMetrics:
return joined
# ------------------------------------------------------------------ #
# KPI 3: desglose de costes (labor / tech / overhead)
# KPI 3: cost breakdown (labor / tech / overhead)
# ------------------------------------------------------------------ #
def cost_breakdown(self) -> Dict[str, float]:
"""
Desglose % de costes: labor, overhead, tech.
Cost breakdown %: labor, overhead, tech.
labor_total = sum(labor_cost_per_interaction)
overhead_total = labor_total * overhead_rate
tech_total = tech_costs_annual (si se ha proporcionado)
tech_total = tech_costs_annual (if provided)
Devuelve porcentajes sobre el total.
Si falta configuración de coste -> devuelve {}.
Returns percentages of the total.
If cost configuration is missing -> returns {}.
"""
if not self._has_cost_config():
return {}
@@ -258,7 +254,7 @@ class EconomyCostMetrics:
cpi_indexed = cpi_table.set_index(["queue_skill", "channel"])
joined = cpi_indexed.join(volume, how="left").fillna({"volume": 0})
# Costes anuales de labor y overhead
# Annual labor and overhead costs
annual_labor = (joined["labor_cost"] * joined["volume"]).sum()
annual_overhead = (joined["overhead_cost"] * joined["volume"]).sum()
annual_tech = cfg.tech_costs_annual
@@ -278,21 +274,21 @@ class EconomyCostMetrics:
}
# ------------------------------------------------------------------ #
# KPI 4: coste de ineficiencia (€ por variabilidad/escalación)
# KPI 4: inefficiency cost (€ by variability/escalation)
# ------------------------------------------------------------------ #
def inefficiency_cost_by_skill_channel(self) -> pd.DataFrame:
"""
Estimación muy simplificada de coste de ineficiencia:
Very simplified estimate of inefficiency cost:
Para cada skill/canal:
For each skill/channel:
- AHT_p50, AHT_p90 (segundos).
- AHT_p50, AHT_p90 (seconds).
- Delta = max(0, AHT_p90 - AHT_p50).
- Se asume que ~40% de las interacciones están por encima de la mediana.
- Assumes that ~40% of interactions are above the median.
- Ineff_seconds = Delta * volume * 0.4
- Ineff_cost = LaborCPI_per_second * Ineff_seconds
NOTA: Es un modelo aproximado para cuantificar "orden de magnitud".
NOTE: This is an approximate model to quantify "order of magnitude".
"""
if not self._has_cost_config():
return pd.DataFrame()
@@ -302,8 +298,8 @@ class EconomyCostMetrics:
df = self.df.copy()
# Filtrar por record_status: solo VALID para cálculo de AHT
# Excluye NOISE, ZOMBIE, ABANDON
# Filter by record_status: only VALID for AHT calculation
# Excludes NOISE, ZOMBIE, ABANDON
if "_is_valid_for_cost" in df.columns:
df = df[df["_is_valid_for_cost"] == True]
@@ -318,7 +314,7 @@ class EconomyCostMetrics:
if stats.empty:
return pd.DataFrame()
# CPI para obtener coste/segundo de labor
# CPI to get cost/second of labor
# cpi_by_skill_channel now returns with reset_index, so we need to set index for join
cpi_table_raw = self.cpi_by_skill_channel()
if cpi_table_raw.empty:
@@ -331,11 +327,11 @@ class EconomyCostMetrics:
merged = merged.fillna(0.0)
delta = (merged["aht_p90"] - merged["aht_p50"]).clip(lower=0.0)
affected_fraction = 0.4 # aproximación
affected_fraction = 0.4 # approximation
ineff_seconds = delta * merged["volume"] * affected_fraction
# labor_cost = coste por interacción con AHT medio;
# aproximamos coste/segundo como labor_cost / AHT_medio
# labor_cost = cost per interaction with average AHT;
# approximate cost/second as labor_cost / average_AHT
aht_mean = grouped["handle_time"].mean()
merged["aht_mean"] = aht_mean
@@ -351,21 +347,21 @@ class EconomyCostMetrics:
return merged[["aht_p50", "aht_p90", "volume", "ineff_seconds", "ineff_cost"]].reset_index()
# ------------------------------------------------------------------ #
# KPI 5: ahorro potencial anual por automatización
# KPI 5: potential annual savings from automation
# ------------------------------------------------------------------ #
def potential_savings(self) -> Dict[str, Any]:
"""
Ahorro potencial anual basado en:
Potential annual savings based on:
Ahorro = (CPI_humano - CPI_automatizado) * Volumen_automatizable * Tasa_éxito
Savings = (Human_CPI - Automated_CPI) * Automatable_volume * Success_rate
Donde:
- CPI_humano = media ponderada de cpi_total.
- CPI_automatizado = config.automation_cpi
- Volumen_automatizable = volume_total * automation_volume_share
- Tasa_éxito = automation_success_rate
Where:
- Human_CPI = weighted average of cpi_total.
- Automated_CPI = config.automation_cpi
- Automatable_volume = volume_total * automation_volume_share
- Success_rate = automation_success_rate
Si faltan parámetros en config -> devuelve {}.
If config parameters are missing -> returns {}.
"""
if not self._has_cost_config():
return {}
@@ -384,7 +380,7 @@ class EconomyCostMetrics:
if total_volume <= 0:
return {}
# CPI humano medio ponderado
# Weighted average human CPI
weighted_cpi = (
(cpi_table["cpi_total"] * cpi_table["volume"]).sum() / total_volume
)
@@ -409,12 +405,12 @@ class EconomyCostMetrics:
# ------------------------------------------------------------------ #
def plot_cost_waterfall(self) -> Axes:
"""
Waterfall de costes anuales (labor + tech + overhead).
Waterfall of annual costs (labor + tech + overhead).
"""
breakdown = self.cost_breakdown()
if not breakdown:
fig, ax = plt.subplots()
ax.text(0.5, 0.5, "Sin configuración de costes", ha="center", va="center")
ax.text(0.5, 0.5, "No cost configuration", ha="center", va="center")
ax.set_axis_off()
return ax
@@ -436,14 +432,14 @@ class EconomyCostMetrics:
bottoms.append(running)
running += v
# barras estilo waterfall
# waterfall style bars
x = np.arange(len(labels))
ax.bar(x, values)
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.set_ylabel("€ anuales")
ax.set_title("Desglose anual de costes")
ax.set_ylabel("€ annual")
ax.set_title("Annual cost breakdown")
for idx, v in enumerate(values):
ax.text(idx, v, f"{v:,.0f}", ha="center", va="bottom")
@@ -454,12 +450,12 @@ class EconomyCostMetrics:
def plot_cpi_by_channel(self) -> Axes:
"""
Gráfico de barras de CPI medio por canal.
Bar chart of average CPI by channel.
"""
cpi_table = self.cpi_by_skill_channel()
if cpi_table.empty:
fig, ax = plt.subplots()
ax.text(0.5, 0.5, "Sin configuración de costes", ha="center", va="center")
ax.text(0.5, 0.5, "No cost configuration", ha="center", va="center")
ax.set_axis_off()
return ax
@@ -474,7 +470,7 @@ class EconomyCostMetrics:
cpi_indexed = cpi_table.set_index(["queue_skill", "channel"])
joined = cpi_indexed.join(volume, how="left").fillna({"volume": 0})
# CPI medio ponderado por canal
# Weighted average CPI by channel
per_channel = (
joined.reset_index()
.groupby("channel")
@@ -486,9 +482,9 @@ class EconomyCostMetrics:
fig, ax = plt.subplots(figsize=(6, 4))
per_channel.plot(kind="bar", ax=ax)
ax.set_xlabel("Canal")
ax.set_ylabel("CPI medio (€)")
ax.set_title("Coste por interacción (CPI) por canal")
ax.set_xlabel("Channel")
ax.set_ylabel("Average CPI (€)")
ax.set_title("Cost per interaction (CPI) by channel")
ax.grid(axis="y", alpha=0.3)
return ax

View File

@@ -25,32 +25,31 @@ REQUIRED_COLUMNS_OP: List[str] = [
@dataclass
class OperationalPerformanceMetrics:
"""
Dimensión: RENDIMIENTO OPERACIONAL Y DE SERVICIO
Dimension: OPERATIONAL PERFORMANCE AND SERVICE
Propósito: medir el balance entre rapidez (eficiencia) y calidad de resolución,
más la variabilidad del servicio.
Purpose: measure the balance between speed (efficiency) and resolution quality, plus service variability.
Requiere como mínimo:
Requires at minimum:
- interaction_id
- datetime_start
- queue_skill
- channel
- duration_talk (segundos)
- hold_time (segundos)
- wrap_up_time (segundos)
- duration_talk (seconds)
- hold_time (seconds)
- wrap_up_time (seconds)
- agent_id
- transfer_flag (bool/int)
Columnas opcionales:
- is_resolved (bool/int) -> para FCR
- abandoned_flag (bool/int) -> para tasa de abandono
- customer_id / caller_id -> para reincidencia y repetición de canal
- logged_time (segundos) -> para occupancy_rate
Optional columns:
- is_resolved (bool/int) -> for FCR
- abandoned_flag (bool/int) -> for abandonment rate
- customer_id / caller_id -> for recurrence and channel repetition
- logged_time (seconds) -> for occupancy_rate
"""
df: pd.DataFrame
# Benchmarks / parámetros de normalización (puedes ajustarlos)
# Benchmarks / normalization parameters (you can adjust them)
AHT_GOOD: float = 300.0 # 5 min
AHT_BAD: float = 900.0 # 15 min
VAR_RATIO_GOOD: float = 1.2 # P90/P50 ~1.2 muy estable
@@ -61,19 +60,19 @@ class OperationalPerformanceMetrics:
self._prepare_data()
# ------------------------------------------------------------------ #
# Helpers internos
# Internal helpers
# ------------------------------------------------------------------ #
def _validate_columns(self) -> None:
missing = [c for c in REQUIRED_COLUMNS_OP if c not in self.df.columns]
if missing:
raise ValueError(
f"Faltan columnas obligatorias para OperationalPerformanceMetrics: {missing}"
f"Missing required columns for OperationalPerformanceMetrics: {missing}"
)
def _prepare_data(self) -> None:
df = self.df.copy()
# Tipos
# Types
df["datetime_start"] = pd.to_datetime(df["datetime_start"], errors="coerce")
for col in ["duration_talk", "hold_time", "wrap_up_time"]:
@@ -86,13 +85,13 @@ class OperationalPerformanceMetrics:
+ df["wrap_up_time"].fillna(0)
)
# v3.0: Filtrar NOISE y ZOMBIE para cálculos de variabilidad
# v3.0: Filter NOISE and ZOMBIE for variability calculations
# record_status: 'VALID', 'NOISE', 'ZOMBIE', 'ABANDON'
# Para AHT/CV solo usamos 'VALID' (excluye noise, zombie, abandon)
# For AHT/CV we only use 'VALID' (excludes noise, zombie, abandon)
if "record_status" in df.columns:
df["record_status"] = df["record_status"].astype(str).str.strip().str.upper()
# Crear máscara para registros válidos: SOLO "VALID"
# Excluye explícitamente NOISE, ZOMBIE, ABANDON y cualquier otro valor
# Create mask for valid records: ONLY "VALID"
# Explicitly excludes NOISE, ZOMBIE, ABANDON and any other value
df["_is_valid_for_cv"] = df["record_status"] == "VALID"
# Log record_status breakdown for debugging
@@ -104,21 +103,21 @@ class OperationalPerformanceMetrics:
print(f" - {status}: {count}")
print(f" VALID rows for AHT calculation: {valid_count}")
else:
# Legacy data sin record_status: incluir todo
# Legacy data without record_status: include all
df["_is_valid_for_cv"] = True
print(f"[OperationalPerformance] No record_status column - using all {len(df)} rows")
# Normalización básica
# Basic normalization
df["queue_skill"] = df["queue_skill"].astype(str).str.strip()
df["channel"] = df["channel"].astype(str).str.strip()
df["agent_id"] = df["agent_id"].astype(str).str.strip()
# Flags opcionales convertidos a bool cuando existan
# Optional flags converted to bool when they exist
for flag_col in ["is_resolved", "abandoned_flag", "transfer_flag"]:
if flag_col in df.columns:
df[flag_col] = df[flag_col].astype(int).astype(bool)
# customer_id: usamos customer_id si existe, si no caller_id
# customer_id: we use customer_id if it exists, otherwise caller_id
if "customer_id" in df.columns:
df["customer_id"] = df["customer_id"].astype(str)
elif "caller_id" in df.columns:
@@ -126,8 +125,8 @@ class OperationalPerformanceMetrics:
else:
df["customer_id"] = None
# logged_time opcional
# Normalizamos logged_time: siempre será una serie float con NaN si no existe
# logged_time optional
# Normalize logged_time: will always be a float series with NaN if it does not exist
df["logged_time"] = pd.to_numeric(df.get("logged_time", np.nan), errors="coerce")
@@ -138,16 +137,16 @@ class OperationalPerformanceMetrics:
return self.df.empty
# ------------------------------------------------------------------ #
# AHT y variabilidad
# AHT and variability
# ------------------------------------------------------------------ #
def aht_distribution(self) -> Dict[str, float]:
"""
Devuelve P10, P50, P90 del AHT y el ratio P90/P50 como medida de variabilidad.
Returns P10, P50, P90 of AHT and the P90/P50 ratio as a measure of variability.
v3.0: Filtra NOISE y ZOMBIE para el cálculo de variabilidad.
Solo usa registros con record_status='valid' o sin status (legacy).
v3.0: Filters NOISE and ZOMBIE for variability calculation.
Only uses records with record_status='valid' or without status (legacy).
"""
# Filtrar solo registros válidos para cálculo de variabilidad
# Filter only valid records for variability calculation
df_valid = self.df[self.df["_is_valid_for_cv"] == True]
ht = df_valid["handle_time"].dropna().astype(float)
if ht.empty:
@@ -167,10 +166,9 @@ class OperationalPerformanceMetrics:
def talk_hold_acw_p50_by_skill(self) -> pd.DataFrame:
"""
P50 de talk_time, hold_time y wrap_up_time por skill.
P50 of talk_time, hold_time and wrap_up_time by skill.
Incluye queue_skill como columna (no solo índice) para que
el frontend pueda hacer lookup por nombre de skill.
Includes queue_skill as a column (not just index) so that the frontend can lookup by skill name.
"""
df = self.df
@@ -192,24 +190,24 @@ class OperationalPerformanceMetrics:
return result.round(2).sort_index().reset_index()
# ------------------------------------------------------------------ #
# FCR, escalación, abandono, reincidencia, repetición canal
# FCR, escalation, abandonment, recurrence, channel repetition
# ------------------------------------------------------------------ #
def fcr_rate(self) -> float:
"""
FCR (First Contact Resolution).
Prioridad 1: Usar fcr_real_flag del CSV si existe
Prioridad 2: Calcular como 100 - escalation_rate
Priority 1: Use fcr_real_flag from CSV if it exists
Priority 2: Calculate as 100 - escalation_rate
"""
df = self.df
total = len(df)
if total == 0:
return float("nan")
# Prioridad 1: Usar fcr_real_flag si existe
# Priority 1: Use fcr_real_flag if it exists
if "fcr_real_flag" in df.columns:
col = df["fcr_real_flag"]
# Normalizar a booleano
# Normalize to boolean
if col.dtype == "O":
fcr_mask = (
col.astype(str)
@@ -224,7 +222,7 @@ class OperationalPerformanceMetrics:
fcr = (fcr_count / total) * 100.0
return float(max(0.0, min(100.0, round(fcr, 2))))
# Prioridad 2: Fallback a 100 - escalation_rate
# Priority 2: Fallback to 100 - escalation_rate
try:
esc = self.escalation_rate()
except Exception:
@@ -239,7 +237,7 @@ class OperationalPerformanceMetrics:
def escalation_rate(self) -> float:
"""
% de interacciones que requieren escalación (transfer_flag == True).
% of interactions that require escalation (transfer_flag == True).
"""
df = self.df
total = len(df)
@@ -251,17 +249,17 @@ class OperationalPerformanceMetrics:
def abandonment_rate(self) -> float:
"""
% de interacciones abandonadas.
% of abandoned interactions.
Busca en orden: is_abandoned, abandoned_flag, abandoned
Si ninguna columna existe, devuelve NaN.
Searches in order: is_abandoned, abandoned_flag, abandoned
If no column exists, returns NaN.
"""
df = self.df
total = len(df)
if total == 0:
return float("nan")
# Buscar columna de abandono en orden de prioridad
# Search for abandonment column in priority order
abandon_col = None
for col_name in ["is_abandoned", "abandoned_flag", "abandoned"]:
if col_name in df.columns:
@@ -273,7 +271,7 @@ class OperationalPerformanceMetrics:
col = df[abandon_col]
# Normalizar a booleano
# Normalize to boolean
if col.dtype == "O":
abandon_mask = (
col.astype(str)
@@ -289,10 +287,9 @@ class OperationalPerformanceMetrics:
def high_hold_time_rate(self, threshold_seconds: float = 60.0) -> float:
"""
% de interacciones con hold_time > threshold (por defecto 60s).
% of interactions with hold_time > threshold (default 60s).
Proxy de complejidad: si el agente tuvo que poner en espera al cliente
más de 60 segundos, probablemente tuvo que consultar/investigar.
Complexity proxy: if the agent had to put the customer on hold for more than 60 seconds, they probably had to consult/investigate.
"""
df = self.df
total = len(df)
@@ -306,44 +303,43 @@ class OperationalPerformanceMetrics:
def recurrence_rate_7d(self) -> float:
"""
% de clientes que vuelven a contactar en < 7 días para el MISMO skill.
% of customers who contact again in < 7 days for the SAME skill.
Se basa en customer_id (o caller_id si no hay customer_id) + queue_skill.
Calcula:
- Para cada combinación cliente + skill, ordena por datetime_start
- Si hay dos contactos consecutivos separados < 7 días (mismo cliente, mismo skill),
cuenta como "recurrente"
- Tasa = nº clientes recurrentes / nº total de clientes
Based on customer_id (or caller_id if no customer_id) + queue_skill.
Calculates:
- For each client + skill combination, sorts by datetime_start
- If there are two consecutive contacts separated by < 7 days (same client, same skill), counts as "recurrent"
- Rate = number of recurrent clients / total number of clients
NOTA: Solo cuenta como recurrencia si el cliente llama por el MISMO skill.
Un cliente que llama a "Ventas" y luego a "Soporte" NO es recurrente.
NOTE: Only counts as recurrence if the client calls for the SAME skill.
A client who calls "Sales" and then "Support" is NOT recurrent.
"""
df = self.df.dropna(subset=["datetime_start"]).copy()
# Normalizar identificador de cliente
# Normalize client identifier
if "customer_id" not in df.columns:
if "caller_id" in df.columns:
df["customer_id"] = df["caller_id"]
else:
# No hay identificador de cliente -> no se puede calcular
# No client identifier -> cannot calculate
return float("nan")
df = df.dropna(subset=["customer_id"])
if df.empty:
return float("nan")
# Ordenar por cliente + skill + fecha
# Sort by client + skill + date
df = df.sort_values(["customer_id", "queue_skill", "datetime_start"])
# Diferencia de tiempo entre contactos consecutivos por cliente Y skill
# Esto asegura que solo contamos recontactos del mismo cliente para el mismo skill
# Time difference between consecutive contacts by client AND skill
# This ensures we only count re-contacts from the same client for the same skill
df["delta"] = df.groupby(["customer_id", "queue_skill"])["datetime_start"].diff()
# Marcamos los contactos que ocurren a menos de 7 días del anterior (mismo skill)
# Mark contacts that occur less than 7 days from the previous one (same skill)
recurrence_mask = df["delta"] < pd.Timedelta(days=7)
# Nº de clientes que tienen al menos un contacto recurrente (para cualquier skill)
# Number of clients who have at least one recurrent contact (for any skill)
recurrent_customers = df.loc[recurrence_mask, "customer_id"].nunique()
total_customers = df["customer_id"].nunique()
@@ -356,9 +352,9 @@ class OperationalPerformanceMetrics:
def repeat_channel_rate(self) -> float:
"""
% de reincidencias (<7 días) en las que el cliente usa el MISMO canal.
% of recurrences (<7 days) in which the client uses the SAME channel.
Si no hay customer_id/caller_id o solo un contacto por cliente, devuelve NaN.
If there is no customer_id/caller_id or only one contact per client, returns NaN.
"""
df = self.df.dropna(subset=["datetime_start"]).copy()
if df["customer_id"].isna().all():
@@ -387,11 +383,11 @@ class OperationalPerformanceMetrics:
# ------------------------------------------------------------------ #
def occupancy_rate(self) -> float:
"""
Tasa de ocupación:
Occupancy rate:
occupancy = sum(handle_time) / sum(logged_time) * 100.
Requiere columna 'logged_time'. Si no existe o es todo 0, devuelve NaN.
Requires 'logged_time' column. If it does not exist or is all 0, returns NaN.
"""
df = self.df
if "logged_time" not in df.columns:
@@ -408,23 +404,23 @@ class OperationalPerformanceMetrics:
return float(round(occ * 100, 2))
# ------------------------------------------------------------------ #
# Score de rendimiento 0-10
# Performance score 0-10
# ------------------------------------------------------------------ #
def performance_score(self) -> Dict[str, float]:
"""
Calcula un score 0-10 combinando:
- AHT (bajo es mejor)
- FCR (alto es mejor)
- Variabilidad (P90/P50, bajo es mejor)
- Otros factores (ocupación / escalación)
Calculates a 0-10 score combining:
- AHT (lower is better)
- FCR (higher is better)
- Variability (P90/P50, lower is better)
- Other factors (occupancy / escalation)
Fórmula:
Formula:
score = 0.4 * (10 - AHT_norm) +
0.3 * FCR_norm +
0.2 * (10 - Var_norm) +
0.1 * Otros_score
Donde *_norm son valores en escala 0-10.
Where *_norm are values on a 0-10 scale.
"""
dist = self.aht_distribution()
if not dist:
@@ -433,15 +429,15 @@ class OperationalPerformanceMetrics:
p50 = dist["p50"]
ratio = dist["p90_p50_ratio"]
# AHT_normalized: 0 (mejor) a 10 (peor)
# AHT_normalized: 0 (better) to 10 (worse)
aht_norm = self._scale_to_0_10(p50, self.AHT_GOOD, self.AHT_BAD)
# FCR_normalized: 0-10 directamente desde % (0-100)
# FCR_normalized: 0-10 directly from % (0-100)
fcr_pct = self.fcr_rate()
fcr_norm = fcr_pct / 10.0 if not np.isnan(fcr_pct) else 0.0
# Variabilidad_normalized: 0 (ratio bueno) a 10 (ratio malo)
# Variability_normalized: 0 (good ratio) to 10 (bad ratio)
var_norm = self._scale_to_0_10(ratio, self.VAR_RATIO_GOOD, self.VAR_RATIO_BAD)
# Otros factores: combinamos ocupación (ideal ~80%) y escalación (ideal baja)
# Other factors: combine occupancy (ideal ~80%) and escalation (ideal low)
occ = self.occupancy_rate()
esc = self.escalation_rate()
@@ -467,26 +463,26 @@ class OperationalPerformanceMetrics:
def _scale_to_0_10(self, value: float, good: float, bad: float) -> float:
"""
Escala linealmente un valor:
Linearly scales a value:
- good -> 0
- bad -> 10
Con saturación fuera de rango.
With saturation outside range.
"""
if np.isnan(value):
return 5.0 # neutro
return 5.0 # neutral
if good == bad:
return 5.0
if good < bad:
# Menor es mejor
# Lower is better
if value <= good:
return 0.0
if value >= bad:
return 10.0
return 10.0 * (value - good) / (bad - good)
else:
# Mayor es mejor
# Higher is better
if value >= good:
return 0.0
if value <= bad:
@@ -495,19 +491,19 @@ class OperationalPerformanceMetrics:
def _compute_other_factors_score(self, occ_pct: float, esc_pct: float) -> float:
"""
Otros factores (0-10) basados en:
- ocupación ideal alrededor de 80%
- tasa de escalación ideal baja (<10%)
Other factors (0-10) based on:
- ideal occupancy around 80%
- ideal escalation rate low (<10%)
"""
# Ocupación: 0 penalización si está entre 75-85, se penaliza fuera
# Occupancy: 0 penalty if between 75-85, penalized outside
if np.isnan(occ_pct):
occ_penalty = 5.0
else:
deviation = abs(occ_pct - 80.0)
occ_penalty = min(10.0, deviation / 5.0 * 2.0) # cada 5 puntos se suman 2, máx 10
occ_penalty = min(10.0, deviation / 5.0 * 2.0) # each 5 points add 2, max 10
occ_score = max(0.0, 10.0 - occ_penalty)
# Escalación: 0-10 donde 0% -> 10 puntos, >=40% -> 0
# Escalation: 0-10 where 0% -> 10 points, >=40% -> 0
if np.isnan(esc_pct):
esc_score = 5.0
else:
@@ -518,7 +514,7 @@ class OperationalPerformanceMetrics:
else:
esc_score = 10.0 * (1.0 - esc_pct / 40.0)
# Media simple de ambos
# Simple average of both
return (occ_score + esc_score) / 2.0
# ------------------------------------------------------------------ #
@@ -526,29 +522,29 @@ class OperationalPerformanceMetrics:
# ------------------------------------------------------------------ #
def plot_aht_boxplot_by_skill(self) -> Axes:
"""
Boxplot del AHT por skill (P10-P50-P90 visual).
Boxplot of AHT by skill (P10-P50-P90 visual).
"""
df = self.df.copy()
if df.empty or "handle_time" not in df.columns:
fig, ax = plt.subplots()
ax.text(0.5, 0.5, "Sin datos de AHT", ha="center", va="center")
ax.text(0.5, 0.5, "No AHT data", ha="center", va="center")
ax.set_axis_off()
return ax
df = df.dropna(subset=["handle_time"])
if df.empty:
fig, ax = plt.subplots()
ax.text(0.5, 0.5, "AHT no disponible", ha="center", va="center")
ax.text(0.5, 0.5, "AHT not available", ha="center", va="center")
ax.set_axis_off()
return ax
fig, ax = plt.subplots(figsize=(8, 4))
df.boxplot(column="handle_time", by="queue_skill", ax=ax, showfliers=False)
ax.set_xlabel("Skill / Cola")
ax.set_ylabel("AHT (segundos)")
ax.set_title("Distribución de AHT por skill")
ax.set_xlabel("Skill / Queue")
ax.set_ylabel("AHT (seconds)")
ax.set_title("AHT distribution by skill")
plt.suptitle("")
plt.xticks(rotation=45, ha="right")
ax.grid(axis="y", alpha=0.3)
@@ -557,14 +553,14 @@ class OperationalPerformanceMetrics:
def plot_resolution_funnel_by_skill(self) -> Axes:
"""
Funnel / barras apiladas de Talk + Hold + ACW por skill (P50).
Funnel / stacked bars of Talk + Hold + ACW by skill (P50).
Permite ver el equilibrio de tiempos por skill.
Allows viewing the time balance by skill.
"""
p50 = self.talk_hold_acw_p50_by_skill()
if p50.empty:
fig, ax = plt.subplots()
ax.text(0.5, 0.5, "Sin datos para funnel", ha="center", va="center")
ax.text(0.5, 0.5, "No data for funnel", ha="center", va="center")
ax.set_axis_off()
return ax
@@ -583,27 +579,26 @@ class OperationalPerformanceMetrics:
ax.set_xticks(x)
ax.set_xticklabels(skills, rotation=45, ha="right")
ax.set_ylabel("Segundos")
ax.set_title("Funnel de resolución (P50) por skill")
ax.set_ylabel("Seconds")
ax.set_title("Resolution funnel (P50) by skill")
ax.legend()
ax.grid(axis="y", alpha=0.3)
return ax
# ------------------------------------------------------------------ #
# Métricas por skill (para consistencia frontend cached/fresh)
# Metrics by skill (for frontend cached/fresh consistency)
# ------------------------------------------------------------------ #
def metrics_by_skill(self) -> List[Dict[str, Any]]:
"""
Calcula métricas operacionales por skill:
- transfer_rate: % de interacciones con transfer_flag == True
- abandonment_rate: % de interacciones abandonadas
- fcr_tecnico: 100 - transfer_rate (sin transferencia)
- fcr_real: % sin transferencia Y sin recontacto 7d (si hay datos)
- volume: número de interacciones
Calculates operational metrics by skill:
- transfer_rate: % of interactions with transfer_flag == True
- abandonment_rate: % of abandoned interactions
- fcr_tecnico: 100 - transfer_rate (without transfer)
- fcr_real: % without transfer AND without 7d re-contact (if there is data)
- volume: number of interactions
Devuelve una lista de dicts, uno por skill, para que el frontend
tenga acceso a las métricas reales por skill (no estimadas).
Returns a list of dicts, one per skill, so that the frontend has access to real metrics by skill (not estimated).
"""
df = self.df
if df.empty:
@@ -611,14 +606,14 @@ class OperationalPerformanceMetrics:
results = []
# Detectar columna de abandono
# Detect abandonment column
abandon_col = None
for col_name in ["is_abandoned", "abandoned_flag", "abandoned"]:
if col_name in df.columns:
abandon_col = col_name
break
# Detectar columna de repeat_call_7d para FCR real
# Detect repeat_call_7d column for real FCR
repeat_col = None
for col_name in ["repeat_call_7d", "repeat_7d", "is_repeat_7d"]:
if col_name in df.columns:
@@ -637,7 +632,7 @@ class OperationalPerformanceMetrics:
else:
transfer_rate = 0.0
# FCR Técnico = 100 - transfer_rate
# Technical FCR = 100 - transfer_rate
fcr_tecnico = float(round(100.0 - transfer_rate, 2))
# Abandonment rate
@@ -656,7 +651,7 @@ class OperationalPerformanceMetrics:
abandoned = int(abandon_mask.sum())
abandonment_rate = float(round(abandoned / total * 100, 2))
# FCR Real (sin transferencia Y sin recontacto 7d)
# Real FCR (without transfer AND without 7d re-contact)
fcr_real = fcr_tecnico # default to fcr_tecnico if no repeat data
if repeat_col and "transfer_flag" in group.columns:
repeat_data = group[repeat_col]
@@ -670,13 +665,13 @@ class OperationalPerformanceMetrics:
else:
repeat_mask = pd.to_numeric(repeat_data, errors="coerce").fillna(0) > 0
# FCR Real: no transfer AND no repeat
# Real FCR: no transfer AND no repeat
fcr_real_mask = (~group["transfer_flag"]) & (~repeat_mask)
fcr_real_count = fcr_real_mask.sum()
fcr_real = float(round(fcr_real_count / total * 100, 2))
# AHT Mean (promedio de handle_time sobre registros válidos)
# Filtramos solo registros 'valid' (excluye noise/zombie) para consistencia
# AHT Mean (average of handle_time over valid records)
# Filter only 'valid' records (excludes noise/zombie) for consistency
if "_is_valid_for_cv" in group.columns:
valid_records = group[group["_is_valid_for_cv"]]
else:
@@ -687,15 +682,15 @@ class OperationalPerformanceMetrics:
else:
aht_mean = 0.0
# AHT Total (promedio de handle_time sobre TODOS los registros)
# Incluye NOISE, ZOMBIE, ABANDON - solo para información/comparación
# AHT Total (average of handle_time over ALL records)
# Includes NOISE, ZOMBIE, ABANDON - for information/comparison only
if len(group) > 0 and "handle_time" in group.columns:
aht_total = float(round(group["handle_time"].mean(), 2))
else:
aht_total = 0.0
# Hold Time Mean (promedio de hold_time sobre registros válidos)
# Consistente con fresh path que usa MEAN, no P50
# Hold Time Mean (average of hold_time over valid records)
# Consistent with fresh path that uses MEAN, not P50
if len(valid_records) > 0 and "hold_time" in valid_records.columns:
hold_time_mean = float(round(valid_records["hold_time"].mean(), 2))
else:

View File

@@ -24,11 +24,10 @@ REQUIRED_COLUMNS_SAT: List[str] = [
@dataclass
class SatisfactionExperienceMetrics:
"""
Dimensión 3: SATISFACCIÓN y EXPERIENCIA
Dimension 3: SATISFACTION and EXPERIENCE
Todas las columnas de satisfacción (csat/nps/ces/aht) son OPCIONALES.
Si no están, las métricas que las usan devuelven vacío/NaN pero
nunca rompen el pipeline.
All satisfaction columns (csat/nps/ces/aht) are OPTIONAL.
If they are not present, the metrics that use them return empty/NaN but never break the pipeline.
"""
df: pd.DataFrame
@@ -44,7 +43,7 @@ class SatisfactionExperienceMetrics:
missing = [c for c in REQUIRED_COLUMNS_SAT if c not in self.df.columns]
if missing:
raise ValueError(
f"Faltan columnas obligatorias para SatisfactionExperienceMetrics: {missing}"
f"Missing required columns for SatisfactionExperienceMetrics: {missing}"
)
def _prepare_data(self) -> None:
@@ -52,7 +51,7 @@ class SatisfactionExperienceMetrics:
df["datetime_start"] = pd.to_datetime(df["datetime_start"], errors="coerce")
# Duraciones base siempre existen
# Base durations always exist
for col in ["duration_talk", "hold_time", "wrap_up_time"]:
df[col] = pd.to_numeric(df[col], errors="coerce")
@@ -63,16 +62,16 @@ class SatisfactionExperienceMetrics:
+ df["wrap_up_time"].fillna(0)
)
# csat_score opcional
# csat_score optional
df["csat_score"] = pd.to_numeric(df.get("csat_score", np.nan), errors="coerce")
# aht opcional: si existe columna explícita la usamos, si no usamos handle_time
# aht optional: if explicit column exists we use it, otherwise we use handle_time
if "aht" in df.columns:
df["aht"] = pd.to_numeric(df["aht"], errors="coerce")
else:
df["aht"] = df["handle_time"]
# NPS / CES opcionales
# NPS / CES optional
df["nps_score"] = pd.to_numeric(df.get("nps_score", np.nan), errors="coerce")
df["ces_score"] = pd.to_numeric(df.get("ces_score", np.nan), errors="coerce")
@@ -90,8 +89,8 @@ class SatisfactionExperienceMetrics:
# ------------------------------------------------------------------ #
def csat_avg_by_skill_channel(self) -> pd.DataFrame:
"""
CSAT promedio por skill/canal.
Si no hay csat_score, devuelve DataFrame vacío.
Average CSAT by skill/channel.
If there is no csat_score, returns empty DataFrame.
"""
df = self.df
if "csat_score" not in df.columns or df["csat_score"].notna().sum() == 0:
@@ -115,7 +114,7 @@ class SatisfactionExperienceMetrics:
def nps_avg_by_skill_channel(self) -> pd.DataFrame:
"""
NPS medio por skill/canal, si existe nps_score.
Average NPS by skill/channel, if nps_score exists.
"""
df = self.df
if "nps_score" not in df.columns or df["nps_score"].notna().sum() == 0:
@@ -139,7 +138,7 @@ class SatisfactionExperienceMetrics:
def ces_avg_by_skill_channel(self) -> pd.DataFrame:
"""
CES medio por skill/canal, si existe ces_score.
Average CES by skill/channel, if ces_score exists.
"""
df = self.df
if "ces_score" not in df.columns or df["ces_score"].notna().sum() == 0:
@@ -163,11 +162,11 @@ class SatisfactionExperienceMetrics:
def csat_global(self) -> float:
"""
CSAT medio global (todas las interacciones).
Global average CSAT (all interactions).
Usa la columna opcional `csat_score`:
- Si no existe, devuelve NaN.
- Si todos los valores son NaN / vacíos, devuelve NaN.
Uses the optional `csat_score` column:
- If it does not exist, returns NaN.
- If all values are NaN / empty, returns NaN.
"""
df = self.df
if "csat_score" not in df.columns:
@@ -183,8 +182,8 @@ class SatisfactionExperienceMetrics:
def csat_aht_correlation(self) -> Dict[str, Any]:
"""
Correlación Pearson CSAT vs AHT.
Si falta csat o aht, o no hay varianza, devuelve NaN y código adecuado.
Pearson correlation CSAT vs AHT.
If csat or aht is missing, or there is no variance, returns NaN and appropriate code.
"""
df = self.df
if "csat_score" not in df.columns or df["csat_score"].notna().sum() == 0:
@@ -216,8 +215,8 @@ class SatisfactionExperienceMetrics:
def csat_aht_skill_summary(self) -> pd.DataFrame:
"""
Resumen por skill con clasificación del "sweet spot".
Si falta csat o aht, devuelve DataFrame vacío.
Summary by skill with "sweet spot" classification.
If csat or aht is missing, returns empty DataFrame.
"""
df = self.df
if df["csat_score"].notna().sum() == 0 or df["aht"].notna().sum() == 0:
@@ -258,20 +257,20 @@ class SatisfactionExperienceMetrics:
# ------------------------------------------------------------------ #
def plot_csat_vs_aht_scatter(self) -> Axes:
"""
Scatter CSAT vs AHT por skill.
Si no hay datos suficientes, devuelve un Axes con mensaje.
Scatter CSAT vs AHT by skill.
If there is insufficient data, returns an Axes with message.
"""
df = self.df
if df["csat_score"].notna().sum() == 0 or df["aht"].notna().sum() == 0:
fig, ax = plt.subplots()
ax.text(0.5, 0.5, "Sin datos de CSAT/AHT", ha="center", va="center")
ax.text(0.5, 0.5, "No CSAT/AHT data", ha="center", va="center")
ax.set_axis_off()
return ax
df = df.dropna(subset=["csat_score", "aht"]).copy()
if df.empty:
fig, ax = plt.subplots()
ax.text(0.5, 0.5, "Sin datos de CSAT/AHT", ha="center", va="center")
ax.text(0.5, 0.5, "No CSAT/AHT data", ha="center", va="center")
ax.set_axis_off()
return ax
@@ -280,9 +279,9 @@ class SatisfactionExperienceMetrics:
for skill, sub in df.groupby("queue_skill"):
ax.scatter(sub["aht"], sub["csat_score"], label=skill, alpha=0.7)
ax.set_xlabel("AHT (segundos)")
ax.set_xlabel("AHT (seconds)")
ax.set_ylabel("CSAT")
ax.set_title("CSAT vs AHT por skill")
ax.set_title("CSAT vs AHT by skill")
ax.grid(alpha=0.3)
ax.legend(title="Skill", bbox_to_anchor=(1.05, 1), loc="upper left")
@@ -291,28 +290,28 @@ class SatisfactionExperienceMetrics:
def plot_csat_distribution(self) -> Axes:
"""
Histograma de CSAT.
Si no hay csat_score, devuelve un Axes con mensaje.
CSAT histogram.
If there is no csat_score, returns an Axes with message.
"""
df = self.df
if "csat_score" not in df.columns or df["csat_score"].notna().sum() == 0:
fig, ax = plt.subplots()
ax.text(0.5, 0.5, "Sin datos de CSAT", ha="center", va="center")
ax.text(0.5, 0.5, "No CSAT data", ha="center", va="center")
ax.set_axis_off()
return ax
df = df.dropna(subset=["csat_score"]).copy()
if df.empty:
fig, ax = plt.subplots()
ax.text(0.5, 0.5, "Sin datos de CSAT", ha="center", va="center")
ax.text(0.5, 0.5, "No CSAT data", ha="center", va="center")
ax.set_axis_off()
return ax
fig, ax = plt.subplots(figsize=(6, 4))
ax.hist(df["csat_score"], bins=10, alpha=0.7)
ax.set_xlabel("CSAT")
ax.set_ylabel("Frecuencia")
ax.set_title("Distribución de CSAT")
ax.set_ylabel("Frequency")
ax.set_title("CSAT distribution")
ax.grid(axis="y", alpha=0.3)
return ax

View File

@@ -25,7 +25,7 @@ import {
// RED FLAGS CONFIGURATION AND DETECTION
// ============================================
// v3.5: Configuración de Red Flags
// v3.5: Red Flags Configuration
interface RedFlagConfig {
id: string;
label: string;
@@ -41,51 +41,51 @@ interface RedFlagConfig {
const RED_FLAG_CONFIGS: RedFlagConfig[] = [
{
id: 'cv_high',
label: 'CV AHT Crítico',
label: 'Critical AHT CV',
shortLabel: 'CV',
threshold: 120,
operator: '>',
getValue: (q) => q.cv_aht,
format: (v) => `${v.toFixed(0)}%`,
color: 'red',
description: 'Variabilidad extrema - procesos impredecibles'
description: 'Extreme variability - unpredictable processes'
},
{
id: 'transfer_high',
label: 'Transfer Excesivo',
label: 'Excessive Transfer',
shortLabel: 'Transfer',
threshold: 50,
operator: '>',
getValue: (q) => q.transfer_rate,
format: (v) => `${v.toFixed(0)}%`,
color: 'orange',
description: 'Alta complejidad - requiere escalado frecuente'
description: 'High complexity - requires frequent escalation'
},
{
id: 'volume_low',
label: 'Volumen Insuficiente',
label: 'Insufficient Volume',
shortLabel: 'Vol',
threshold: 50,
operator: '<',
getValue: (q) => q.volume,
format: (v) => v.toLocaleString(),
color: 'slate',
description: 'ROI negativo - volumen no justifica inversión'
description: 'Negative ROI - volume doesn\'t justify investment'
},
{
id: 'valid_low',
label: 'Calidad Datos Baja',
label: 'Low Data Quality',
shortLabel: 'Valid',
threshold: 30,
operator: '<',
getValue: (q) => q.volume > 0 ? (q.volumeValid / q.volume) * 100 : 0,
format: (v) => `${v.toFixed(0)}%`,
color: 'amber',
description: 'Datos poco fiables - métricas distorsionadas'
description: 'Unreliable data - distorted metrics'
}
];
// v3.5: Detectar red flags de una cola
// v3.5: Detect red flags for a queue
interface DetectedRedFlag {
config: RedFlagConfig;
value: number;
@@ -108,7 +108,7 @@ function detectRedFlags(queue: OriginalQueueMetrics): DetectedRedFlag[] {
return flags;
}
// v3.5: Componente de badge de Red Flag individual
// v3.5: Individual Red Flag badge component
function RedFlagBadge({ flag, size = 'sm' }: { flag: DetectedRedFlag; size?: 'sm' | 'md' }) {
const sizeClasses = size === 'md' ? 'px-2 py-1 text-xs' : 'px-1.5 py-0.5 text-[10px]';

View File

@@ -570,12 +570,16 @@
"humanOnlyAction": "Maintain human management, evaluate periodically",
"redFlags": {
"cvCritical": "Critical AHT CV",
"cvCriticalShort": "CV",
"cvCriticalDesc": "Extreme variability - unpredictable processes",
"transferExcessive": "Excessive Transfer",
"transferExcessiveShort": "Transfer",
"transferExcessiveDesc": "High complexity - requires frequent escalation",
"volumeInsufficient": "Insufficient Volume",
"volumeInsufficientShort": "Vol",
"volumeInsufficientDesc": "Negative ROI - volume doesn't justify investment",
"dataQualityLow": "Low Data Quality",
"dataQualityLowShort": "Valid",
"dataQualityLowDesc": "Unreliable data - distorted metrics",
"threshold": "(threshold: {{operator}}{{value}})"
},
@@ -814,6 +818,33 @@
"roiBad": "Marginal ROI, evaluate other benefits",
"resolution": "Resolution",
"dataQuality": "Data Quality"
},
"subFactors": {
"repeatability": "Repeatability",
"repeatabilityDisplayName": "Repeatability",
"repeatabilityDescription": "Monthly volume: {{volume}} interactions",
"predictability": "Predictability",
"predictabilityDisplayName": "Predictability",
"predictabilityDescription": "AHT CV: {{cv}}%, Escalation: {{esc}}%",
"structuring": "Structuring",
"structuringDisplayName": "Structuring",
"structuringDescription": "{{pct}}% structured fields",
"inverseComplexity": "Inverse Complexity",
"inverseComplexityDisplayName": "Inverse Complexity",
"inverseComplexityDescription": "{{pct}}% exceptions",
"stability": "Stability",
"stabilityDisplayName": "Stability",
"stabilityDescription": "{{pct}}% off-hours",
"roiSavings": "ROI",
"roiSavingsDisplayName": "ROI",
"roiSavingsDescription": "€{{amount}}K annual potential savings",
"interpretations": {
"excellentForAutomation": "Excellent candidate for complete automation (Automate)",
"goodForAssistance": "Good candidate for agentic assistance (Assist)",
"candidateForAugmentation": "Candidate for human augmentation (Augment)",
"notRecommended": "Not recommended for automation at this time",
"bronzeAnalysis": "Bronze analysis does not include Agentic Readiness Score"
}
}
},
"economicModel": {

View File

@@ -570,12 +570,16 @@
"humanOnlyAction": "Mantener gestión humana, evaluar periódicamente",
"redFlags": {
"cvCritical": "CV AHT Crítico",
"cvCriticalShort": "CV",
"cvCriticalDesc": "Variabilidad extrema - procesos impredecibles",
"transferExcessive": "Transfer Excesivo",
"transferExcessiveShort": "Transfer",
"transferExcessiveDesc": "Alta complejidad - requiere escalado frecuente",
"volumeInsufficient": "Volumen Insuficiente",
"volumeInsufficientShort": "Vol",
"volumeInsufficientDesc": "ROI negativo - volumen no justifica inversión",
"dataQualityLow": "Calidad Datos Baja",
"dataQualityLowShort": "Valid",
"dataQualityLowDesc": "Datos poco fiables - métricas distorsionadas",
"threshold": "(umbral: {{operator}}{{value}})"
},
@@ -814,6 +818,33 @@
"roiBad": "ROI marginal, evaluar otros beneficios",
"resolution": "Resolutividad",
"dataQuality": "Calidad Datos"
},
"subFactors": {
"repeatability": "Repetitividad",
"repeatabilityDisplayName": "Repetitividad",
"repeatabilityDescription": "Volumen mensual: {{volume}} interacciones",
"predictability": "Predictibilidad",
"predictabilityDisplayName": "Predictibilidad",
"predictabilityDescription": "CV AHT: {{cv}}%, Escalación: {{esc}}%",
"structuring": "Estructuración",
"structuringDisplayName": "Estructuración",
"structuringDescription": "{{pct}}% de campos estructurados",
"inverseComplexity": "Complejidad Inversa",
"inverseComplexityDisplayName": "Complejidad Inversa",
"inverseComplexityDescription": "{{pct}}% de excepciones",
"stability": "Estabilidad",
"stabilityDisplayName": "Estabilidad",
"stabilityDescription": "{{pct}}% fuera de horario",
"roiSavings": "ROI",
"roiSavingsDisplayName": "ROI",
"roiSavingsDescription": "€{{amount}}K ahorro potencial anual",
"interpretations": {
"excellentForAutomation": "Excelente candidato para automatización completa (Automate)",
"goodForAssistance": "Buen candidato para asistencia agéntica (Assist)",
"candidateForAugmentation": "Candidato para augmentación humana (Augment)",
"notRecommended": "No recomendado para automatización en este momento",
"bronzeAnalysis": "Análisis Bronze no incluye Agentic Readiness Score"
}
}
},
"economicModel": {

View File

@@ -1,20 +1,20 @@
/**
* Agentic Readiness Score v2.0
* Algoritmo basado en metodología de 6 dimensiones con normalización continua
* Algorithm based on 6-dimension methodology with continuous normalization
*/
import type { TierKey, SubFactor, AgenticReadinessResult, CustomerSegment } from '../types';
import { AGENTIC_READINESS_WEIGHTS, AGENTIC_READINESS_THRESHOLDS } from '../constants';
export interface AgenticReadinessInput {
// Datos básicos (SILVER)
// Basic data (SILVER)
volumen_mes: number;
aht_values: number[];
escalation_rate: number;
cpi_humano: number;
volumen_anual: number;
// Datos avanzados (GOLD)
// Advanced data (GOLD)
structured_fields_pct?: number;
exception_rate?: number;
hourly_distribution?: number[];
@@ -28,21 +28,21 @@ export interface AgenticReadinessInput {
}
/**
* SUB-FACTOR 1: REPETITIVIDAD (25%)
* Basado en volumen mensual con normalización logística
* SUB-FACTOR 1: REPEATABILITY (25%)
* Based on monthly volume with logistic normalization
*/
function calculateRepetitividadScore(volumen_mes: number): SubFactor {
function calculateRepeatabilityScore(volumen_mes: number): SubFactor {
const { k, x0 } = AGENTIC_READINESS_THRESHOLDS.repetitividad;
// Función logística: score = 10 / (1 + exp(-k * (volumen - x0)))
// Logistic function: score = 10 / (1 + exp(-k * (volume - x0)))
const score = 10 / (1 + Math.exp(-k * (volumen_mes - x0)));
return {
name: 'repetitividad',
displayName: 'Repetitividad',
name: 'repeatability',
displayName: 'Repeatability',
score: Math.round(score * 10) / 10,
weight: AGENTIC_READINESS_WEIGHTS.repetitividad,
description: `Volumen mensual: ${volumen_mes} interacciones`,
description: `Monthly volume: ${volumen_mes} interactions`,
details: {
volumen_mes,
threshold_medio: x0
@@ -51,10 +51,10 @@ function calculateRepetitividadScore(volumen_mes: number): SubFactor {
}
/**
* SUB-FACTOR 2: PREDICTIBILIDAD (20%)
* Basado en variabilidad AHT + tasa de escalación + variabilidad input/output
* SUB-FACTOR 2: PREDICTABILITY (20%)
* Based on AHT variability + escalation rate + input/output variability
*/
function calculatePredictibilidadScore(
function calculatePredictabilityScore(
aht_values: number[],
escalation_rate: number,
motivo_contacto_entropy?: number,
@@ -62,47 +62,47 @@ function calculatePredictibilidadScore(
): SubFactor {
const thresholds = AGENTIC_READINESS_THRESHOLDS.predictibilidad;
// 1. VARIABILIDAD AHT (40%)
// 1. AHT VARIABILITY (40%)
const aht_mean = aht_values.reduce((a, b) => a + b, 0) / aht_values.length;
const aht_variance = aht_values.reduce((sum, val) => sum + Math.pow(val - aht_mean, 2), 0) / aht_values.length;
const aht_std = Math.sqrt(aht_variance);
const cv_aht = aht_std / aht_mean;
// Normalizar CV a escala 0-10
// Normalize CV to 0-10 scale
const score_aht = Math.max(0, Math.min(10,
10 * (1 - (cv_aht - thresholds.cv_aht_excellent) / (thresholds.cv_aht_poor - thresholds.cv_aht_excellent))
));
// 2. TASA DE ESCALACIÓN (30%)
// 2. ESCALATION RATE (30%)
const score_escalacion = Math.max(0, Math.min(10,
10 * (1 - escalation_rate / thresholds.escalation_poor)
));
// 3. VARIABILIDAD INPUT/OUTPUT (30%)
// 3. INPUT/OUTPUT VARIABILITY (30%)
let score_variabilidad: number;
if (motivo_contacto_entropy !== undefined && resolucion_entropy !== undefined) {
// Alta entropía input + Baja entropía output = BUENA para automatización
// High input entropy + Low output entropy = GOOD for automation
const input_normalized = Math.min(motivo_contacto_entropy / 3.0, 1.0);
const output_normalized = Math.min(resolucion_entropy / 3.0, 1.0);
score_variabilidad = 10 * (input_normalized * (1 - output_normalized));
} else {
// Si no hay datos de entropía, usar promedio de AHT y escalación
// If no entropy data, use average of AHT and escalation
score_variabilidad = (score_aht + score_escalacion) / 2;
}
// PONDERACIÓN FINAL
const predictibilidad = (
// FINAL WEIGHTING
const predictabilidad = (
0.40 * score_aht +
0.30 * score_escalacion +
0.30 * score_variabilidad
);
return {
name: 'predictibilidad',
displayName: 'Predictibilidad',
score: Math.round(predictibilidad * 10) / 10,
name: 'predictability',
displayName: 'Predictability',
score: Math.round(predictabilidad * 10) / 10,
weight: AGENTIC_READINESS_WEIGHTS.predictibilidad,
description: `CV AHT: ${(cv_aht * 100).toFixed(1)}%, Escalación: ${(escalation_rate * 100).toFixed(1)}%`,
description: `AHT CV: ${(cv_aht * 100).toFixed(1)}%, Escalation: ${(escalation_rate * 100).toFixed(1)}%`,
details: {
cv_aht: Math.round(cv_aht * 1000) / 1000,
escalation_rate,
@@ -114,18 +114,18 @@ function calculatePredictibilidadScore(
}
/**
* SUB-FACTOR 3: ESTRUCTURACIÓN (15%)
* Porcentaje de campos estructurados vs texto libre
* SUB-FACTOR 3: STRUCTURING (15%)
* Percentage of structured fields vs free text
*/
function calculateEstructuracionScore(structured_fields_pct: number): SubFactor {
function calculateStructuringScore(structured_fields_pct: number): SubFactor {
const score = structured_fields_pct * 10;
return {
name: 'estructuracion',
displayName: 'Estructuración',
name: 'structuring',
displayName: 'Structuring',
score: Math.round(score * 10) / 10,
weight: AGENTIC_READINESS_WEIGHTS.estructuracion,
description: `${(structured_fields_pct * 100).toFixed(0)}% de campos estructurados`,
description: `${(structured_fields_pct * 100).toFixed(0)}% structured fields`,
details: {
structured_fields_pct
}
@@ -133,21 +133,21 @@ function calculateEstructuracionScore(structured_fields_pct: number): SubFactor
}
/**
* SUB-FACTOR 4: COMPLEJIDAD INVERSA (15%)
* Basado en tasa de excepciones
* SUB-FACTOR 4: INVERSE COMPLEXITY (15%)
* Based on exception rate
*/
function calculateComplejidadInversaScore(exception_rate: number): SubFactor {
// Menor tasa de excepciones → Mayor score
// < 5% → Excelente (score 10)
// > 30% → Muy complejo (score 0)
function calculateInverseComplexityScore(exception_rate: number): SubFactor {
// Lower exception rate → Higher score
// < 5% → Excellent (score 10)
// > 30% → Very complex (score 0)
const score_excepciones = Math.max(0, Math.min(10, 10 * (1 - exception_rate / 0.30)));
return {
name: 'complejidad_inversa',
displayName: 'Complejidad Inversa',
name: 'inverseComplexity',
displayName: 'Inverse Complexity',
score: Math.round(score_excepciones * 10) / 10,
weight: AGENTIC_READINESS_WEIGHTS.complejidad_inversa,
description: `${(exception_rate * 100).toFixed(1)}% de excepciones`,
description: `${(exception_rate * 100).toFixed(1)}% exceptions`,
details: {
exception_rate
}
@@ -155,15 +155,15 @@ function calculateComplejidadInversaScore(exception_rate: number): SubFactor {
}
/**
* SUB-FACTOR 5: ESTABILIDAD (10%)
* Basado en distribución horaria y % llamadas fuera de horas
* SUB-FACTOR 5: STABILITY (10%)
* Based on hourly distribution and % off-hours calls
*/
function calculateEstabilidadScore(
function calculateStabilityScore(
hourly_distribution: number[],
off_hours_pct: number
): SubFactor {
// 1. UNIFORMIDAD DISTRIBUCIÓN HORARIA (60%)
// Calcular entropía de Shannon
// 1. HOURLY DISTRIBUTION UNIFORMITY (60%)
// Calculate Shannon entropy
const total = hourly_distribution.reduce((a, b) => a + b, 0);
let score_uniformidad = 0;
let entropy_normalized = 0;
@@ -176,22 +176,22 @@ function calculateEstabilidadScore(
score_uniformidad = entropy_normalized * 10;
}
// 2. % LLAMADAS FUERA DE HORAS (40%)
// Más llamadas fuera de horas → Mayor necesidad agentes → Mayor score
// 2. % OFF-HOURS CALLS (40%)
// More off-hours calls → Higher agent need → Higher score
const score_off_hours = Math.min(10, (off_hours_pct / 0.30) * 10);
// PONDERACIÓN
// WEIGHTING
const estabilidad = (
0.60 * score_uniformidad +
0.40 * score_off_hours
);
return {
name: 'estabilidad',
displayName: 'Estabilidad',
name: 'stability',
displayName: 'Stability',
score: Math.round(estabilidad * 10) / 10,
weight: AGENTIC_READINESS_WEIGHTS.estabilidad,
description: `${(off_hours_pct * 100).toFixed(1)}% fuera de horario`,
description: `${(off_hours_pct * 100).toFixed(1)}% off-hours`,
details: {
entropy_normalized: Math.round(entropy_normalized * 1000) / 1000,
off_hours_pct,
@@ -203,7 +203,7 @@ function calculateEstabilidadScore(
/**
* SUB-FACTOR 6: ROI (15%)
* Basado en ahorro potencial anual
* Based on annual potential savings
*/
function calculateROIScore(
volumen_anual: number,
@@ -212,7 +212,7 @@ function calculateROIScore(
): SubFactor {
const ahorro_anual = volumen_anual * cpi_humano * automation_savings_pct;
// Normalización logística
// Logistic normalization
const { k, x0 } = AGENTIC_READINESS_THRESHOLDS.roi;
const score = 10 / (1 + Math.exp(-k * (ahorro_anual - x0)));
@@ -221,7 +221,7 @@ function calculateROIScore(
displayName: 'ROI',
score: Math.round(score * 10) / 10,
weight: AGENTIC_READINESS_WEIGHTS.roi,
description: `${(ahorro_anual / 1000).toFixed(0)}K ahorro potencial anual`,
description: `${(ahorro_anual / 1000).toFixed(0)}K annual potential savings`,
details: {
ahorro_anual: Math.round(ahorro_anual),
volumen_anual,
@@ -232,11 +232,11 @@ function calculateROIScore(
}
/**
* AJUSTE POR DISTRIBUCIÓN CSAT (Opcional, ±10%)
* Distribución normal → Proceso estable
* CSAT DISTRIBUTION ADJUSTMENT (Optional, ±10%)
* Normal distribution → Stable process
*/
function calculateCSATDistributionAdjustment(csat_values: number[]): number {
// Test de normalidad simplificado (basado en skewness y kurtosis)
// Simplified normality test (based on skewness and kurtosis)
const n = csat_values.length;
const mean = csat_values.reduce((a, b) => a + b, 0) / n;
const variance = csat_values.reduce((sum, val) => sum + Math.pow(val - mean, 2), 0) / n;
@@ -248,42 +248,42 @@ function calculateCSATDistributionAdjustment(csat_values: number[]): number {
// Kurtosis
const kurtosis = csat_values.reduce((sum, val) => sum + Math.pow((val - mean) / std, 4), 0) / n;
// Normalidad: skewness cercano a 0, kurtosis cercano a 3
// Normality: skewness close to 0, kurtosis close to 3
const skewness_score = Math.max(0, 1 - Math.abs(skewness));
const kurtosis_score = Math.max(0, 1 - Math.abs(kurtosis - 3) / 3);
const normality_score = (skewness_score + kurtosis_score) / 2;
// Ajuste: +5% si muy normal, -5% si muy anormal
// Adjustment: +5% if very normal, -5% if very abnormal
const adjustment = 1 + ((normality_score - 0.5) * 0.10);
return adjustment;
}
/**
* ALGORITMO COMPLETO (Tier GOLD)
* COMPLETE ALGORITHM (Tier GOLD)
*/
export function calculateAgenticReadinessScoreGold(data: AgenticReadinessInput): AgenticReadinessResult {
const sub_factors: SubFactor[] = [];
// 1. REPETITIVIDAD
sub_factors.push(calculateRepetitividadScore(data.volumen_mes));
// 1. REPEATABILITY
sub_factors.push(calculateRepeatabilityScore(data.volumen_mes));
// 2. PREDICTIBILIDAD
sub_factors.push(calculatePredictibilidadScore(
// 2. PREDICTABILITY
sub_factors.push(calculatePredictabilityScore(
data.aht_values,
data.escalation_rate,
data.motivo_contacto_entropy,
data.resolucion_entropy
));
// 3. ESTRUCTURACIÓN
sub_factors.push(calculateEstructuracionScore(data.structured_fields_pct || 0.5));
// 3. STRUCTURING
sub_factors.push(calculateStructuringScore(data.structured_fields_pct || 0.5));
// 4. COMPLEJIDAD INVERSA
sub_factors.push(calculateComplejidadInversaScore(data.exception_rate || 0.15));
// 4. INVERSE COMPLEXITY
sub_factors.push(calculateInverseComplexityScore(data.exception_rate || 0.15));
// 5. ESTABILIDAD
sub_factors.push(calculateEstabilidadScore(
// 5. STABILITY
sub_factors.push(calculateStabilityScore(
data.hourly_distribution || Array(24).fill(1),
data.off_hours_pct || 0.2
));
@@ -294,34 +294,34 @@ export function calculateAgenticReadinessScoreGold(data: AgenticReadinessInput):
data.cpi_humano
));
// PONDERACIÓN BASE
// BASE WEIGHTING
const agentic_readiness_base = sub_factors.reduce(
(sum, factor) => sum + (factor.score * factor.weight),
0
);
// AJUSTE POR DISTRIBUCIÓN CSAT (Opcional)
// CSAT DISTRIBUTION ADJUSTMENT (Optional)
let agentic_readiness_final = agentic_readiness_base;
if (data.csat_values && data.csat_values.length > 10) {
const adjustment = calculateCSATDistributionAdjustment(data.csat_values);
agentic_readiness_final = agentic_readiness_base * adjustment;
}
// Limitar a rango 0-10
// Limit to 0-10 range
agentic_readiness_final = Math.max(0, Math.min(10, agentic_readiness_final));
// Interpretación
// Interpretation
let interpretation = '';
let confidence: 'high' | 'medium' | 'low' = 'high';
if (agentic_readiness_final >= 8) {
interpretation = 'Excelente candidato para automatización completa (Automate)';
interpretation = 'Excellent candidate for complete automation (Automate)';
} else if (agentic_readiness_final >= 5) {
interpretation = 'Buen candidato para asistencia agéntica (Assist)';
interpretation = 'Good candidate for agentic assistance (Assist)';
} else if (agentic_readiness_final >= 3) {
interpretation = 'Candidato para augmentación humana (Augment)';
interpretation = 'Candidate for human augmentation (Augment)';
} else {
interpretation = 'No recomendado para automatización en este momento';
interpretation = 'Not recommended for automation at this time';
}
return {
@@ -334,43 +334,43 @@ export function calculateAgenticReadinessScoreGold(data: AgenticReadinessInput):
}
/**
* ALGORITMO SIMPLIFICADO (Tier SILVER)
* SIMPLIFIED ALGORITHM (Tier SILVER)
*/
export function calculateAgenticReadinessScoreSilver(data: AgenticReadinessInput): AgenticReadinessResult {
const sub_factors: SubFactor[] = [];
// 1. REPETITIVIDAD (30%)
const repetitividad = calculateRepetitividadScore(data.volumen_mes);
repetitividad.weight = 0.30;
sub_factors.push(repetitividad);
// 1. REPEATABILITY (30%)
const repeatability = calculateRepeatabilityScore(data.volumen_mes);
repeatability.weight = 0.30;
sub_factors.push(repeatability);
// 2. PREDICTIBILIDAD SIMPLIFICADA (30%)
const predictibilidad = calculatePredictibilidadScore(
// 2. SIMPLIFIED PREDICTABILITY (30%)
const predictability = calculatePredictabilityScore(
data.aht_values,
data.escalation_rate
);
predictibilidad.weight = 0.30;
sub_factors.push(predictibilidad);
predictability.weight = 0.30;
sub_factors.push(predictability);
// 3. ROI (40%)
const roi = calculateROIScore(data.volumen_anual, data.cpi_humano);
roi.weight = 0.40;
sub_factors.push(roi);
// PONDERACIÓN SIMPLIFICADA
// SIMPLIFIED WEIGHTING
const agentic_readiness = sub_factors.reduce(
(sum, factor) => sum + (factor.score * factor.weight),
0
);
// Interpretación
// Interpretation
let interpretation = '';
if (agentic_readiness >= 7) {
interpretation = 'Buen candidato para automatización';
interpretation = 'Good candidate for automation';
} else if (agentic_readiness >= 4) {
interpretation = 'Candidato para asistencia agéntica';
interpretation = 'Candidate for agentic assistance';
} else {
interpretation = 'Requiere análisis más profundo (considerar GOLD)';
interpretation = 'Requires deeper analysis (consider GOLD)';
}
return {
@@ -383,7 +383,7 @@ export function calculateAgenticReadinessScoreSilver(data: AgenticReadinessInput
}
/**
* FUNCIÓN PRINCIPAL - Selecciona algoritmo según tier
* MAIN FUNCTION - Selects algorithm based on tier
*/
export function calculateAgenticReadinessScore(data: AgenticReadinessInput): AgenticReadinessResult {
if (data.tier === 'gold') {
@@ -391,13 +391,13 @@ export function calculateAgenticReadinessScore(data: AgenticReadinessInput): Age
} else if (data.tier === 'silver') {
return calculateAgenticReadinessScoreSilver(data);
} else {
// BRONZE: Sin Agentic Readiness
// BRONZE: No Agentic Readiness
return {
score: 0,
sub_factors: [],
tier: 'bronze',
confidence: 'low',
interpretation: 'Análisis Bronze no incluye Agentic Readiness Score'
interpretation: 'Bronze analysis does not include Agentic Readiness Score'
};
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -23,13 +23,13 @@ function safeNumber(value: any, fallback = 0): number {
function normalizeAhtMetric(ahtSeconds: number): number {
if (!Number.isFinite(ahtSeconds) || ahtSeconds <= 0) return 0;
// Ajusta estos números si ves que tus AHTs reales son muy distintos
const MIN_AHT = 300; // AHT muy bueno
const MAX_AHT = 1000; // AHT muy malo
// Adjust these numbers if your actual AHTs are very different
const MIN_AHT = 300; // Very good AHT
const MAX_AHT = 1000; // Very bad AHT
const clamped = Math.max(MIN_AHT, Math.min(MAX_AHT, ahtSeconds));
const ratio = (clamped - MIN_AHT) / (MAX_AHT - MIN_AHT); // 0 (mejor) -> 1 (peor)
const score = 100 - ratio * 100; // 100 (mejor) -> 0 (peor)
const ratio = (clamped - MIN_AHT) / (MAX_AHT - MIN_AHT); // 0 (better) -> 1 (worse)
const score = 100 - ratio * 100; // 100 (better) -> 0 (worse)
return Math.round(score);
}
@@ -74,7 +74,7 @@ function getTopLabel(
return String(labels[maxIdx]);
}
// ==== Helpers para distribución horaria (desde heatmap_24x7) ====
// ==== Helpers for hourly distribution (from heatmap_24x7) ====
function computeHourlyFromHeatmap(heatmap24x7: any): number[] {
if (!Array.isArray(heatmap24x7) || !heatmap24x7.length) {
@@ -146,7 +146,7 @@ function mapAgenticReadiness(
description:
value?.reason ||
value?.details?.description ||
'Sub-factor calculado a partir de KPIs agregados.',
'Sub-factor calculated from aggregated KPIs.',
details: value?.details || {},
};
}
@@ -156,7 +156,7 @@ function mapAgenticReadiness(
const interpretation =
classification?.description ||
`Puntuación de preparación agentic: ${score.toFixed(1)}/10`;
`Agentic readiness score: ${score.toFixed(1)}/10`;
const computedCount = Object.values(sub_scores).filter(
(s: any) => s?.computed
@@ -176,7 +176,7 @@ function mapAgenticReadiness(
};
}
// ==== Volumetría (dimensión + KPIs) ====
// ==== Volumetry (dimension + KPIs) ====
function buildVolumetryDimension(
raw: BackendRawResults
@@ -216,13 +216,13 @@ function buildVolumetryDimension(
const topChannel = getTopLabel(volumeByChannel?.labels, channelValues);
const topSkill = getTopLabel(skillLabels, skillValues);
// Heatmap 24x7 -> distribución horaria
// Heatmap 24x7 -> hourly distribution
const heatmap24x7 = volumetry?.heatmap_24x7;
const hourly = computeHourlyFromHeatmap(heatmap24x7);
const offHoursPct = hourly.length ? calcOffHoursPct(hourly) : 0;
const peakHours = hourly.length ? findPeakHours(hourly) : [];
console.log('📊 Volumetría backend (mapper):', {
console.log('📊 Backend volumetry (mapper):', {
volumetry,
volumeByChannel,
volumeBySkill,
@@ -240,21 +240,21 @@ function buildVolumetryDimension(
if (totalVolume > 0) {
extraKpis.push({
label: 'Volumen total (backend)',
label: 'Total volume (backend)',
value: totalVolume.toLocaleString('es-ES'),
});
}
if (numChannels > 0) {
extraKpis.push({
label: 'Canales analizados',
label: 'Channels analyzed',
value: String(numChannels),
});
}
if (numSkills > 0) {
extraKpis.push({
label: 'Skills analizadas',
label: 'Skills analyzed',
value: String(numSkills),
});
@@ -271,14 +271,14 @@ function buildVolumetryDimension(
if (topChannel) {
extraKpis.push({
label: 'Canal principal',
label: 'Main channel',
value: topChannel,
});
}
if (topSkill) {
extraKpis.push({
label: 'Skill principal',
label: 'Main skill',
value: topSkill,
});
}
@@ -287,28 +287,28 @@ function buildVolumetryDimension(
return { dimension: undefined, extraKpis };
}
// Calcular ratio pico/valle para evaluar concentración de demanda
// Calculate peak/valley ratio to evaluate demand concentration
const validHourly = hourly.filter(v => v > 0);
const maxHourly = validHourly.length > 0 ? Math.max(...validHourly) : 0;
const minHourly = validHourly.length > 0 ? Math.min(...validHourly) : 1;
const peakValleyRatio = minHourly > 0 ? maxHourly / minHourly : 1;
console.log(`⏰ Hourly distribution (backend path): total=${totalVolume}, peak=${maxHourly}, valley=${minHourly}, ratio=${peakValleyRatio.toFixed(2)}`);
// Score basado en:
// - % fuera de horario (>30% penaliza)
// - Ratio pico/valle (>3x penaliza)
// NO penalizar por tener volumen alto
// Score based on:
// - % off-hours (>30% penalty)
// - Peak/valley ratio (>3x penalty)
// DO NOT penalize for having high volume
let score = 100;
// Penalización por fuera de horario
// Penalty for off-hours
const offHoursPctValue = offHoursPct * 100;
if (offHoursPctValue > 30) {
score -= Math.min(40, (offHoursPctValue - 30) * 2); // -2 pts por cada % sobre 30%
score -= Math.min(40, (offHoursPctValue - 30) * 2); // -2 pts per % over30%
} else if (offHoursPctValue > 20) {
score -= (offHoursPctValue - 20); // -1 pt por cada % entre 20-30%
score -= (offHoursPctValue - 20); // -1 pt per % between 20-30%
}
// Penalización por ratio pico/valle alto
// Penalty for high peak/valley ratio
if (peakValleyRatio > 5) {
score -= 30;
} else if (peakValleyRatio > 3) {
@@ -321,32 +321,32 @@ function buildVolumetryDimension(
const summaryParts: string[] = [];
summaryParts.push(
`${totalVolume.toLocaleString('es-ES')} interacciones analizadas.`
`${totalVolume.toLocaleString('es-ES')} interactions analyzed.`
);
summaryParts.push(
`${(offHoursPct * 100).toFixed(0)}% fuera de horario laboral (8-19h).`
`${(offHoursPct * 100).toFixed(0)}% outside business hours (8-19h).`
);
if (peakValleyRatio > 2) {
summaryParts.push(
`Ratio pico/valle: ${peakValleyRatio.toFixed(1)}x - alta concentración de demanda.`
`Peak/valley ratio: ${peakValleyRatio.toFixed(1)}x - high demand concentration.`
);
}
if (topSkill) {
summaryParts.push(`Skill principal: ${topSkill}.`);
summaryParts.push(`Main skill: ${topSkill}.`);
}
// Métrica principal accionable: % fuera de horario
// Main actionable metric: % off-hours
const dimension: DimensionAnalysis = {
id: 'volumetry_distribution',
name: 'volumetry_distribution',
title: 'Volumetría y distribución de demanda',
title: 'Volumetry and demand distribution',
score,
percentile: undefined,
summary: summaryParts.join(' '),
kpi: {
label: 'Fuera de horario',
label: 'Off-hours',
value: `${(offHoursPct * 100).toFixed(0)}%`,
change: peakValleyRatio > 2 ? `Pico/valle: ${peakValleyRatio.toFixed(1)}x` : undefined,
change: peakValleyRatio > 2 ? `Peak/valley: ${peakValleyRatio.toFixed(1)}x` : undefined,
changeType: offHoursPct > 0.3 ? 'negative' : offHoursPct > 0.2 ? 'neutral' : 'positive'
},
icon: BarChartHorizontal,
@@ -362,7 +362,7 @@ function buildVolumetryDimension(
return { dimension, extraKpis };
}
// ==== Eficiencia Operativa (v3.2 - con segmentación horaria) ====
// ==== Operational Efficiency (v3.2 - with hourly segmentation) ====
function buildOperationalEfficiencyDimension(
raw: BackendRawResults,
@@ -371,25 +371,25 @@ function buildOperationalEfficiencyDimension(
const op = raw?.operational_performance;
if (!op) return undefined;
// AHT Global
// Global AHT
const ahtP50 = safeNumber(op.aht_distribution?.p50, 0);
const ahtP90 = safeNumber(op.aht_distribution?.p90, 0);
const ratioGlobal = ahtP90 > 0 && ahtP50 > 0 ? ahtP90 / ahtP50 : safeNumber(op.aht_distribution?.p90_p50_ratio, 1.5);
// AHT Horario Laboral (8-19h) - estimación basada en distribución
// Asumimos que el AHT en horario laboral es ligeramente menor (más eficiente)
const ahtBusinessHours = Math.round(ahtP50 * 0.92); // ~8% más eficiente en horario laboral
const ratioBusinessHours = ratioGlobal * 0.85; // Menor variabilidad en horario laboral
// Business Hours AHT (8-19h) - estimation based on distribution
// We assume that AHT during business hours is slightly lower (more efficient)
const ahtBusinessHours = Math.round(ahtP50 * 0.92); // ~8% more efficient during business hours
const ratioBusinessHours = ratioGlobal * 0.85; // Lower variability during business hours
// Determinar si la variabilidad se reduce fuera de horario
// Determine if variability reduces outside hours
const variabilityReduction = ratioGlobal - ratioBusinessHours;
const variabilityInsight = variabilityReduction > 0.3
? 'La variabilidad se reduce significativamente en horario laboral.'
? 'Variability significantly reduces during business hours.'
: variabilityReduction > 0.1
? 'La variabilidad se mantiene similar en ambos horarios.'
: 'La variabilidad es consistente independientemente del horario.';
? 'Variability remains similar in both schedules.'
: 'Variability is consistent regardless of schedule.';
// Score basado en escala definida:
// Score based on defined scale:
// <1.5 = 100pts, 1.5-2.0 = 70pts, 2.0-2.5 = 50pts, 2.5-3.0 = 30pts, >3.0 = 20pts
let score: number;
if (ratioGlobal < 1.5) {
@@ -404,9 +404,9 @@ function buildOperationalEfficiencyDimension(
score = 20;
}
// Summary con segmentación
let summary = `AHT Global: ${Math.round(ahtP50)}s (P50), ratio ${ratioGlobal.toFixed(2)}. `;
summary += `AHT Horario Laboral (8-19h): ${ahtBusinessHours}s (P50), ratio ${ratioBusinessHours.toFixed(2)}. `;
// Summary with segmentation
let summary = `Global AHT: ${Math.round(ahtP50)}s (P50), ratio ${ratioGlobal.toFixed(2)}. `;
summary += `Business Hours AHT (8-19h): ${ahtBusinessHours}s (P50), ratio ${ratioBusinessHours.toFixed(2)}. `;
summary += variabilityInsight;
// KPI principal: AHT P50 (industry standard for operational efficiency)
@@ -420,7 +420,7 @@ function buildOperationalEfficiencyDimension(
const dimension: DimensionAnalysis = {
id: 'operational_efficiency',
name: 'operational_efficiency',
title: 'Eficiencia Operativa',
title: 'Operational Efficiency',
score,
percentile: undefined,
summary,
@@ -431,7 +431,7 @@ function buildOperationalEfficiencyDimension(
return dimension;
}
// ==== Efectividad & Resolución (v3.2 - enfocada en FCR Técnico) ====
// ==== Effectiveness & Resolution (v3.2 - focused on Technical FCR) ====
function buildEffectivenessResolutionDimension(
raw: BackendRawResults
@@ -439,20 +439,20 @@ function buildEffectivenessResolutionDimension(
const op = raw?.operational_performance;
if (!op) return undefined;
// FCR Técnico = 100 - transfer_rate (comparable con benchmarks de industria)
// Usamos escalation_rate que es la tasa de transferencias
// Technical FCR = 100 - transfer_rate (comparable with industry benchmarks)
// We use escalation_rate which is the transfer rate
const escalationRate = safeNumber(op.escalation_rate, NaN);
const abandonmentRate = safeNumber(op.abandonment_rate, 0);
// FCR Técnico: 100 - tasa de transferencia
// Technical FCR: 100 - tasa de transferencia
const fcrRate = Number.isFinite(escalationRate) && escalationRate >= 0
? Math.max(0, Math.min(100, 100 - escalationRate))
: 70; // valor por defecto benchmark aéreo
: 70; // default airline benchmark value
// Tasa de transferencia (complemento del FCR Técnico)
// Transfer rate (complement of Technical FCR)
const transferRate = Number.isFinite(escalationRate) ? escalationRate : 100 - fcrRate;
// Score basado en FCR Técnico (benchmark sector aéreo: 85-90%)
// Score based on Technical FCR (benchmark airline sector: 85-90%)
// FCR >= 90% = 100pts, 85-90% = 80pts, 80-85% = 60pts, 75-80% = 40pts, <75% = 20pts
let score: number;
if (fcrRate >= 90) {
@@ -467,25 +467,25 @@ function buildEffectivenessResolutionDimension(
score = 20;
}
// Penalización adicional por abandono alto (>8%)
// Additional penalty for high abandonment (>8%)
if (abandonmentRate > 8) {
score = Math.max(0, score - Math.round((abandonmentRate - 8) * 2));
}
// Summary enfocado en FCR Técnico
let summary = `FCR Técnico: ${fcrRate.toFixed(1)}% (benchmark: 85-90%). `;
summary += `Tasa de transferencia: ${transferRate.toFixed(1)}%. `;
// Summary focused on Technical FCR
let summary = `Technical FCR: ${fcrRate.toFixed(1)}% (benchmark: 85-90%). `;
summary += `Transfer rate: ${transferRate.toFixed(1)}%. `;
if (fcrRate >= 90) {
summary += 'Excelente resolución en primer contacto.';
summary += 'Excellent first contact resolution.';
} else if (fcrRate >= 85) {
summary += 'Resolución dentro del benchmark del sector.';
summary += 'Resolution within sector benchmark.';
} else {
summary += 'Oportunidad de mejora reduciendo transferencias.';
summary += 'Opportunity to improve by reducing transfers.';
}
const kpi: Kpi = {
label: 'FCR Técnico',
label: 'Technical FCR',
value: `${fcrRate.toFixed(0)}%`,
change: `Transfer: ${transferRate.toFixed(0)}%`,
changeType: fcrRate >= 85 ? 'positive' : fcrRate >= 80 ? 'neutral' : 'negative'
@@ -494,7 +494,7 @@ function buildEffectivenessResolutionDimension(
const dimension: DimensionAnalysis = {
id: 'effectiveness_resolution',
name: 'effectiveness_resolution',
title: 'Efectividad & Resolución',
title: 'Effectiveness & Resolution',
score,
percentile: undefined,
summary,
@@ -505,7 +505,7 @@ function buildEffectivenessResolutionDimension(
return dimension;
}
// ==== Complejidad & Predictibilidad (v3.4 - basada en CV AHT per industry standards) ====
// ==== Complexity & Predictability (v3.4 - based on CV AHT per industry standards) ====
function buildComplexityPredictabilityDimension(
raw: BackendRawResults
@@ -535,9 +535,9 @@ function buildComplexityPredictabilityDimension(
}
}
// Score basado en CV AHT (benchmark: <75% = excelente, <100% = aceptable)
// Score based on CV AHT (benchmark: <75% = excellent, <100% = acceptable)
// CV <= 75% = 100pts (alta predictibilidad)
// CV 75-100% = 80pts (predictibilidad aceptable)
// CV 75-100% = 80pts (acceptable predictability)
// CV 100-125% = 60pts (variabilidad moderada)
// CV 125-150% = 40pts (alta variabilidad)
// CV > 150% = 20pts (muy alta variabilidad)
@@ -558,16 +558,16 @@ function buildComplexityPredictabilityDimension(
let summary = `CV AHT: ${cvAhtPercent}% (benchmark: <75%). `;
if (cvAhtPercent <= 75) {
summary += 'Alta predictibilidad: tiempos de atención consistentes. Excelente para planificación WFM.';
summary += 'High predictability: consistent handling times. Excellent for WFM planning.';
} else if (cvAhtPercent <= 100) {
summary += 'Predictibilidad aceptable: variabilidad moderada en tiempos de atención.';
summary += 'Acceptable predictability: moderate variability in handling times.';
} else if (cvAhtPercent <= 125) {
summary += 'Variabilidad notable: dificulta la planificación de recursos. Considerar estandarización.';
summary += 'Notable variability: complicates resource planning. Consider standardization.';
} else {
summary += 'Alta variabilidad: tiempos muy dispersos. Priorizar scripts guiados y estandarización.';
summary += 'High variability: very scattered times. Prioritize guided scripts and standardization.';
}
// Añadir info de Hold P50 promedio si está disponible (proxy de complejidad)
// Add Hold P50 average info if available (complexity proxy)
if (avgHoldP50 > 0) {
summary += ` Hold Time P50: ${Math.round(avgHoldP50)}s.`;
}
@@ -583,7 +583,7 @@ function buildComplexityPredictabilityDimension(
const dimension: DimensionAnalysis = {
id: 'complexity_predictability',
name: 'complexity_predictability',
title: 'Complejidad & Predictibilidad',
title: 'Complexity & Predictability',
score,
percentile: undefined,
summary,
@@ -594,7 +594,7 @@ function buildComplexityPredictabilityDimension(
return dimension;
}
// ==== Satisfacción del Cliente (v3.1) ====
// ==== Customer Satisfaction (v3.1) ====
function buildSatisfactionDimension(
raw: BackendRawResults
@@ -604,19 +604,19 @@ function buildSatisfactionDimension(
const hasCSATData = Number.isFinite(csatGlobalRaw) && csatGlobalRaw > 0;
// Si no hay CSAT, mostrar dimensión con "No disponible"
// Si no hay CSAT, mostrar dimensión con "Not available"
const dimension: DimensionAnalysis = {
id: 'customer_satisfaction',
name: 'customer_satisfaction',
title: 'Satisfacción del Cliente',
score: hasCSATData ? Math.round((csatGlobalRaw / 5) * 100) : -1, // -1 indica N/A
title: 'Customer Satisfaction',
score: hasCSATData ? Math.round((csatGlobalRaw / 5) * 100) : -1, // -1 indicates N/A
percentile: undefined,
summary: hasCSATData
? `CSAT global: ${csatGlobalRaw.toFixed(1)}/5. ${csatGlobalRaw >= 4.0 ? 'Nivel de satisfacción óptimo.' : csatGlobalRaw >= 3.5 ? 'Satisfacción aceptable, margen de mejora.' : 'Satisfacción baja, requiere atención urgente.'}`
: 'CSAT no disponible en el dataset. Para incluir esta dimensión, añadir datos de encuestas de satisfacción.',
? `Global CSAT: ${csatGlobalRaw.toFixed(1)}/5. ${csatGlobalRaw >= 4.0 ? 'Optimal satisfaction level.' : csatGlobalRaw >= 3.5 ? 'Acceptable satisfaction, room for improvement.' : 'Low satisfaction, requires urgent attention.'}`
: 'CSAT not available in dataset. To include this dimension, add satisfaction survey data.',
kpi: {
label: 'CSAT',
value: hasCSATData ? `${csatGlobalRaw.toFixed(1)}/5` : 'No disponible',
value: hasCSATData ? `${csatGlobalRaw.toFixed(1)}/5` : 'Not available',
changeType: hasCSATData
? (csatGlobalRaw >= 4.0 ? 'positive' : csatGlobalRaw >= 3.5 ? 'neutral' : 'negative')
: 'neutral'
@@ -627,7 +627,7 @@ function buildSatisfactionDimension(
return dimension;
}
// ==== Economía - Coste por Interacción (v3.1) ====
// ==== Economy - Cost per Interaction (v3.1) ====
function buildEconomyDimension(
raw: BackendRawResults,
@@ -637,9 +637,9 @@ function buildEconomyDimension(
const op = raw?.operational_performance;
const totalAnnual = safeNumber(econ?.cost_breakdown?.total_annual, 0);
// Benchmark CPI aerolíneas (consistente con ExecutiveSummaryTab)
// Airline CPI benchmark (consistent with ExecutiveSummaryTab)
// p25: 2.20, p50: 3.50, p75: 4.50, p90: 5.50
const CPI_BENCHMARK = 3.50; // p50 aerolíneas
const CPI_BENCHMARK = 3.50; // airline p50
if (totalAnnual <= 0 || totalInteractions <= 0) {
return undefined;
@@ -652,12 +652,12 @@ function buildEconomyDimension(
// Calcular CPI usando cost_volume (non-abandoned) como denominador
const cpi = costVolume > 0 ? totalAnnual / costVolume : totalAnnual / totalInteractions;
// Score basado en percentiles de aerolíneas (CPI invertido: menor = mejor)
// CPI <= 2.20 (p25) = 100pts (excelente, top 25%)
// Score based on airline percentiles (inverse CPI: lower = better)
// CPI <= 2.20 (p25) = 100pts (excellent, top 25%)
// CPI 2.20-3.50 (p25-p50) = 80pts (bueno, top 50%)
// CPI 3.50-4.50 (p50-p75) = 60pts (promedio)
// CPI 3.50-4.50 (p50-p75) = 60pts (average)
// CPI 4.50-5.50 (p75-p90) = 40pts (por debajo)
// CPI > 5.50 (>p90) = 20pts (crítico)
// CPI > 5.50 (>p90) = 20pts (critical)
let score: number;
if (cpi <= 2.20) {
score = 100;
@@ -674,24 +674,24 @@ function buildEconomyDimension(
const cpiDiff = cpi - CPI_BENCHMARK;
const cpiStatus = cpiDiff <= 0 ? 'positive' : cpiDiff <= 0.5 ? 'neutral' : 'negative';
let summary = `Coste por interacción: €${cpi.toFixed(2)} vs benchmark €${CPI_BENCHMARK.toFixed(2)}. `;
let summary = `Cost per interaction: €${cpi.toFixed(2)} vs benchmark €${CPI_BENCHMARK.toFixed(2)}. `;
if (cpi <= CPI_BENCHMARK) {
summary += 'Eficiencia de costes óptima, por debajo del benchmark del sector.';
summary += 'Optimal cost efficiency, below sector benchmark.';
} else if (cpi <= 4.50) {
summary += 'Coste ligeramente por encima del benchmark, oportunidad de optimización.';
summary += 'Cost slightly above benchmark, optimization opportunity.';
} else {
summary += 'Coste elevado respecto al sector. Priorizar iniciativas de eficiencia.';
summary += 'High cost relative to sector. Prioritize efficiency initiatives.';
}
const dimension: DimensionAnalysis = {
id: 'economy_costs',
name: 'economy_costs',
title: 'Economía & Costes',
title: 'Economy & Costs',
score,
percentile: undefined,
summary,
kpi: {
label: 'Coste por Interacción',
label: 'Cost per Interaction',
value: `${cpi.toFixed(2)}`,
change: `vs benchmark €${CPI_BENCHMARK.toFixed(2)}`,
changeType: cpiStatus as 'positive' | 'neutral' | 'negative'
@@ -779,7 +779,7 @@ function buildAgenticReadinessDimension(
}
// ==== Economía y costes (economy_costs) ====
// ==== Economy and costs (economy_costs) ====
function buildEconomicModel(raw: BackendRawResults): EconomicModelData {
const econ = raw?.economy_costs;
@@ -814,17 +814,17 @@ function buildEconomicModel(raw: BackendRawResults): EconomicModelData {
const savingsBreakdown = annualSavings
? [
{
category: 'Ineficiencias operativas (AHT, escalaciones)',
category: 'Operational inefficiencies (AHT, escalations)',
amount: Math.round(annualSavings * 0.5),
percentage: 50,
},
{
category: 'Automatización de volumen repetitivo',
category: 'Automation of repetitive volume',
amount: Math.round(annualSavings * 0.3),
percentage: 30,
},
{
category: 'Otros beneficios (calidad, CX)',
category: 'Other benefits (quality, CX)',
amount: Math.round(annualSavings * 0.2),
percentage: 20,
},
@@ -834,7 +834,7 @@ function buildEconomicModel(raw: BackendRawResults): EconomicModelData {
const costBreakdown = currentAnnualCost
? [
{
category: 'Coste laboral',
category: 'Labor cost',
amount: laborAnnual,
percentage: Math.round(
(laborAnnual / currentAnnualCost) * 100
@@ -848,7 +848,7 @@ function buildEconomicModel(raw: BackendRawResults): EconomicModelData {
),
},
{
category: 'Tecnología',
category: 'Technology',
amount: techAnnual,
percentage: Math.round(
(techAnnual / currentAnnualCost) * 100
@@ -914,7 +914,7 @@ export function mapBackendResultsToAnalysisData(
Math.min(100, Math.round(arScore * 10))
);
// v3.3: 7 dimensiones (Complejidad recuperada con métrica Hold Time >60s)
// v3.3: 7 dimensions (Complexity recovered with Hold Time metric >60s)
const { dimension: volumetryDimension, extraKpis } =
buildVolumetryDimension(raw);
const operationalEfficiencyDimension = buildOperationalEfficiencyDimension(raw);
@@ -946,7 +946,7 @@ export function mapBackendResultsToAnalysisData(
const csatAvg = computeCsatAverage(cs);
// CSAT global (opcional)
// Global CSAT (opcional)
const csatGlobalRaw = safeNumber(cs?.csat_global, NaN);
const csatGlobal =
Number.isFinite(csatGlobalRaw) && csatGlobalRaw > 0
@@ -954,7 +954,7 @@ export function mapBackendResultsToAnalysisData(
: undefined;
// KPIs de resumen (los 4 primeros son los que se ven en "Métricas de Contacto")
// Summary KPIs (the first 4 are shown in "Contact Metrics")
const summaryKpis: Kpi[] = [];
// 1) Interacciones Totales (volumen backend)
@@ -975,9 +975,9 @@ export function mapBackendResultsToAnalysisData(
: 'N/D',
});
// 3) Tasa FCR
// 3) FCR Rate
summaryKpis.push({
label: 'Tasa FCR',
label: 'FCR Rate',
value:
fcrPct !== undefined
? `${Math.round(fcrPct)}%`
@@ -993,18 +993,18 @@ export function mapBackendResultsToAnalysisData(
: 'N/D',
});
// --- KPIs adicionales, usados en otras secciones ---
// --- Additional KPIs, used in other sections ---
if (numChannels > 0) {
summaryKpis.push({
label: 'Canales analizados',
label: 'Channels analyzed',
value: String(numChannels),
});
}
if (numSkills > 0) {
summaryKpis.push({
label: 'Skills analizadas',
label: 'Skills analyzed',
value: String(numSkills),
});
}
@@ -1027,13 +1027,13 @@ export function mapBackendResultsToAnalysisData(
if (totalAnnual) {
summaryKpis.push({
label: 'Coste anual actual (backend)',
label: 'Current annual cost (backend)',
value: `${totalAnnual.toFixed(0)}`,
});
}
if (annualSavings) {
summaryKpis.push({
label: 'Ahorro potencial anual (backend)',
label: 'Annual potential savings (backend)',
value: `${annualSavings.toFixed(0)}`,
});
}
@@ -1043,22 +1043,22 @@ export function mapBackendResultsToAnalysisData(
const economicModel = buildEconomicModel(raw);
const benchmarkData = buildBenchmarkData(raw);
// Generar findings y recommendations basados en volumetría
// Generate findings and recommendations based on volumetry
const findings: Finding[] = [];
const recommendations: Recommendation[] = [];
// Extraer offHoursPct de la dimensión de volumetría
const offHoursPct = volumetryDimension?.distribution_data?.off_hours_pct ?? 0;
const offHoursPctValue = offHoursPct * 100; // Convertir de 0-1 a 0-100
const offHoursPctValue = offHoursPct * 100; // Convert from 0-1 a 0-100
if (offHoursPctValue > 20) {
const offHoursVolume = Math.round(totalVolume * offHoursPctValue / 100);
findings.push({
type: offHoursPctValue > 30 ? 'critical' : 'warning',
title: 'Alto Volumen Fuera de Horario',
text: `${offHoursPctValue.toFixed(0)}% de interacciones fuera de horario (8-19h)`,
title: 'High Off-Hours Volume',
text: `${offHoursPctValue.toFixed(0)}% of off-hours interactions (8-19h)`,
dimensionId: 'volumetry_distribution',
description: `${offHoursVolume.toLocaleString()} interacciones (${offHoursPctValue.toFixed(1)}%) ocurren fuera de horario laboral. Oportunidad ideal para implementar agentes virtuales 24/7.`,
description: `${offHoursVolume.toLocaleString()} interacciones (${offHoursPctValue.toFixed(1)}%) ocurren outside business hours. Ideal opportunity to implement 24/7 virtual agents.`,
impact: offHoursPctValue > 30 ? 'high' : 'medium'
});
@@ -1066,12 +1066,12 @@ export function mapBackendResultsToAnalysisData(
const estimatedSavings = Math.round(offHoursVolume * estimatedContainment / 100);
recommendations.push({
priority: 'high',
title: 'Implementar Agente Virtual 24/7',
text: `Desplegar agente virtual para atender ${offHoursPctValue.toFixed(0)}% de interacciones fuera de horario`,
description: `${offHoursVolume.toLocaleString()} interacciones ocurren fuera de horario laboral (19:00-08:00). Un agente virtual puede resolver ~${estimatedContainment}% de estas consultas automáticamente.`,
title: 'Implement 24/7 Virtual Agent',
text: `Deploy virtual agent to handle ${offHoursPctValue.toFixed(0)}% of off-hours interactions`,
description: `${offHoursVolume.toLocaleString()} interactions occur outside business hours (19:00-08:00). A virtual agent can resolve ~${estimatedContainment}% of these queries automatically.`,
dimensionId: 'volumetry_distribution',
impact: `Potencial de contención: ${estimatedSavings.toLocaleString()} interacciones/período`,
timeline: '1-3 meses'
impact: `Containment potential: ${estimatedSavings.toLocaleString()} interacciones/período`,
timeline: '1-3 months'
});
}
@@ -1080,7 +1080,7 @@ export function mapBackendResultsToAnalysisData(
overallHealthScore,
summaryKpis: mergedKpis,
dimensions,
heatmapData: [], // el heatmap por skill lo seguimos generando en el front
heatmapData: [], // skill heatmap still generated on frontend
findings,
recommendations,
opportunities: [],
@@ -1166,9 +1166,9 @@ export function buildHeatmapFromBackend(
abandonment_rate: number;
fcr_tecnico: number;
fcr_real: number;
aht_mean: number; // AHT promedio del backend (solo VALID - consistente con fresh path)
aht_total: number; // AHT total (ALL rows incluyendo NOISE/ZOMBIE/ABANDON) - solo informativo
hold_time_mean: number; // Hold time promedio (consistente con fresh path - MEAN, no P50)
aht_mean: number; // Average AHT del backend (only VALID - consistent with fresh path)
aht_total: number; // Total AHT (ALL rows incluyendo NOISE/ZOMBIE/ABANDON) - informational only
hold_time_mean: number; // Average Hold time (consistent with fresh path - MEAN, not P50)
}>();
for (const m of metricsBySkillRaw) {
@@ -1178,9 +1178,9 @@ export function buildHeatmapFromBackend(
abandonment_rate: safeNumber(m.abandonment_rate, NaN),
fcr_tecnico: safeNumber(m.fcr_tecnico, NaN),
fcr_real: safeNumber(m.fcr_real, NaN),
aht_mean: safeNumber(m.aht_mean, NaN), // AHT promedio (solo VALID)
aht_total: safeNumber(m.aht_total, NaN), // AHT total (ALL rows)
hold_time_mean: safeNumber(m.hold_time_mean, NaN), // Hold time promedio (MEAN)
aht_mean: safeNumber(m.aht_mean, NaN), // Average AHT (solo VALID)
aht_total: safeNumber(m.aht_total, NaN), // Total AHT (ALL rows)
hold_time_mean: safeNumber(m.hold_time_mean, NaN), // Average Hold time (MEAN)
});
}
}
@@ -1314,7 +1314,7 @@ export function buildHeatmapFromBackend(
// Dimensiones agentic similares a las que tenías en generateHeatmapData,
// pero usando valores reales en lugar de aleatorios.
// 1) Predictibilidad (menor CV => mayor puntuación)
// 1) Predictability (lower CV => higher score)
const predictability_score = Math.max(
0,
Math.min(
@@ -1347,14 +1347,14 @@ export function buildHeatmapFromBackend(
} else {
// NO usar estimación - usar valores globales del backend directamente
// Esto asegura consistencia con el fresh path que usa valores directos del CSV
skillTransferRate = globalEscalation; // Usar tasa global, sin estimación
skillTransferRate = globalEscalation; // Use global rate, no estimation
skillAbandonmentRate = abandonmentRateBackend;
skillFcrTecnico = 100 - skillTransferRate;
skillFcrReal = globalFcrPct;
console.warn(`⚠️ No metrics_by_skill for skill ${skill} - using global rates`);
}
// Complejidad inversa basada en transfer rate del skill
// Inverse complexity based on skill transfer rate
const complexity_inverse_score = Math.max(
0,
Math.min(
@@ -1446,10 +1446,10 @@ export function buildHeatmapFromBackend(
volume,
cost_volume: costVolume,
aht_seconds: aht_mean,
aht_total: aht_total, // AHT con TODAS las filas (solo informativo)
aht_total: aht_total, // AHT con TODAS las filas (informational only)
metrics: {
fcr: Math.round(skillFcrReal), // FCR Real (sin transfer Y sin recontacto 7d)
fcr_tecnico: Math.round(skillFcrTecnico), // FCR Técnico (comparable con benchmarks)
fcr_tecnico: Math.round(skillFcrTecnico), // Technical FCR (comparable con benchmarks)
aht: ahtMetric,
csat: csatMetric0_100,
hold_time: holdMetric,
@@ -1457,12 +1457,12 @@ export function buildHeatmapFromBackend(
abandonment_rate: Math.round(skillAbandonmentRate),
},
annual_cost,
cpi: skillCpi, // CPI real del backend (si disponible)
cpi: skillCpi, // Real CPI from backend (if available)
variability: {
cv_aht: Math.round(cv_aht * 100), // %
cv_talk_time: 0,
cv_hold_time: 0,
transfer_rate: skillTransferRate, // Transfer rate REAL o estimado
transfer_rate: skillTransferRate, // REAL or estimated transfer rate
},
automation_readiness,
dimensions: {
@@ -1491,19 +1491,19 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData
const benchmarkData: AnalysisData['benchmarkData'] = [];
// Benchmarks hardcoded para sector aéreo
// Hardcoded benchmarks for airline sector
const AIRLINE_BENCHMARKS = {
aht_p50: 380, // segundos
aht_p50: 380, // seconds
fcr: 70, // % (rango 68-72%)
abandonment: 5, // % (rango 5-8%)
ratio_p90_p50: 2.0, // ratio saludable
cpi: 5.25 // € (rango €4.50-€6.00)
};
// 1. AHT Promedio (benchmark sector aéreo: 380s)
// 1. AHT Promedio (benchmark airline sector: 380s)
const ahtP50 = safeNumber(op?.aht_distribution?.p50, 0);
if (ahtP50 > 0) {
// Percentil: menor AHT = mejor. Si AHT <= benchmark = P75+
// Percentile: lower AHT = better. If AHT <= benchmark = P75+
const ahtPercentile = ahtP50 <= AIRLINE_BENCHMARKS.aht_p50
? Math.min(90, 75 + Math.round((AIRLINE_BENCHMARKS.aht_p50 - ahtP50) / 10))
: Math.max(10, 75 - Math.round((ahtP50 - AIRLINE_BENCHMARKS.aht_p50) / 5));
@@ -1521,15 +1521,15 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData
});
}
// 2. Tasa FCR (benchmark sector aéreo: 70%)
// 2. FCR Rate (benchmark airline sector: 70%)
const fcrRate = safeNumber(op?.fcr_rate, NaN);
if (Number.isFinite(fcrRate) && fcrRate >= 0) {
// Percentil: mayor FCR = mejor
// Percentile: higher FCR = better
const fcrPercentile = fcrRate >= AIRLINE_BENCHMARKS.fcr
? Math.min(90, 50 + Math.round((fcrRate - AIRLINE_BENCHMARKS.fcr) * 2))
: Math.max(10, 50 - Math.round((AIRLINE_BENCHMARKS.fcr - fcrRate) * 2));
benchmarkData.push({
kpi: 'Tasa FCR',
kpi: 'FCR Rate',
userValue: fcrRate / 100,
userDisplay: `${Math.round(fcrRate)}%`,
industryValue: AIRLINE_BENCHMARKS.fcr / 100,
@@ -1560,15 +1560,15 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData
});
}
// 4. Tasa de Abandono (benchmark sector aéreo: 5%)
// 4. Abandonment Rate (benchmark airline sector: 5%)
const abandonRate = safeNumber(op?.abandonment_rate, NaN);
if (Number.isFinite(abandonRate) && abandonRate >= 0) {
// Percentil: menor abandono = mejor
// Percentile: lower abandonment = better
const abandonPercentile = abandonRate <= AIRLINE_BENCHMARKS.abandonment
? Math.min(90, 75 + Math.round((AIRLINE_BENCHMARKS.abandonment - abandonRate) * 5))
: Math.max(10, 75 - Math.round((abandonRate - AIRLINE_BENCHMARKS.abandonment) * 5));
benchmarkData.push({
kpi: 'Tasa de Abandono',
kpi: 'Abandonment Rate',
userValue: abandonRate / 100,
userDisplay: `${abandonRate.toFixed(1)}%`,
industryValue: AIRLINE_BENCHMARKS.abandonment / 100,
@@ -1581,11 +1581,11 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData
});
}
// 5. Ratio P90/P50 (benchmark sector aéreo: <2.0)
// 5. Ratio P90/P50 (benchmark airline sector: <2.0)
const ahtP90 = safeNumber(op?.aht_distribution?.p90, 0);
const ratio = ahtP50 > 0 && ahtP90 > 0 ? ahtP90 / ahtP50 : 0;
if (ratio > 0) {
// Percentil: menor ratio = mejor
// Percentile: lower ratio = better
const ratioPercentile = ratio <= AIRLINE_BENCHMARKS.ratio_p90_p50
? Math.min(90, 75 + Math.round((AIRLINE_BENCHMARKS.ratio_p90_p50 - ratio) * 30))
: Math.max(10, 75 - Math.round((ratio - AIRLINE_BENCHMARKS.ratio_p90_p50) * 30));
@@ -1603,13 +1603,13 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData
});
}
// 6. Tasa de Transferencia/Escalación
// 6. Transfer/Escalation Rate
const escalationRate = safeNumber(op?.escalation_rate, NaN);
if (Number.isFinite(escalationRate) && escalationRate >= 0) {
// Menor escalación = mejor percentil
// Menor escalación = better percentil
const escalationPercentile = Math.max(10, Math.min(90, Math.round(100 - escalationRate * 5)));
benchmarkData.push({
kpi: 'Tasa de Transferencia',
kpi: 'Transfer Rate',
userValue: escalationRate / 100,
userDisplay: `${escalationRate.toFixed(1)}%`,
industryValue: 0.15,
@@ -1622,7 +1622,7 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData
});
}
// 7. CPI - Coste por Interacción (benchmark sector aéreo: €4.50-€6.00)
// 7. CPI - Cost per Interaction (benchmark airline sector: €4.50-€6.00)
const econ = raw?.economy_costs;
const totalAnnualCost = safeNumber(econ?.cost_breakdown?.total_annual, 0);
const volumetry = raw?.volumetry;
@@ -1634,7 +1634,7 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData
if (totalAnnualCost > 0 && totalInteractions > 0) {
const cpi = totalAnnualCost / totalInteractions;
// Menor CPI = mejor. Si CPI <= 4.50 = excelente (P90+), si CPI >= 6.00 = malo (P25-)
// Lower CPI = better. If CPI <= 4.50 = excellent (P90+), if CPI >= 6.00 = poor (P25-)
let cpiPercentile: number;
if (cpi <= 4.50) {
cpiPercentile = Math.min(95, 90 + Math.round((4.50 - cpi) * 10));
@@ -1647,7 +1647,7 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData
}
benchmarkData.push({
kpi: 'Coste por Interacción (CPI)',
kpi: 'Cost per Interaction (CPI)',
userValue: cpi,
userDisplay: `${cpi.toFixed(2)}`,
industryValue: AIRLINE_BENCHMARKS.cpi,

View File

@@ -1,11 +1,11 @@
// utils/dataTransformation.ts
// Pipeline de transformación de datos raw a métricas procesadas
// Raw data to processed metrics transformation pipeline
import type { RawInteraction } from '../types';
/**
* Paso 1: Limpieza de Ruido
* Elimina interacciones con duration < 10 segundos (falsos contactos o errores de sistema)
* Step 1: Noise Cleanup
* Removes interactions with duration < 10 seconds (false contacts or system errors)
*/
export function cleanNoiseFromData(interactions: RawInteraction[]): RawInteraction[] {
const MIN_DURATION_SECONDS = 10;
@@ -22,30 +22,30 @@ export function cleanNoiseFromData(interactions: RawInteraction[]): RawInteracti
const removedCount = interactions.length - cleaned.length;
const removedPercentage = ((removedCount / interactions.length) * 100).toFixed(1);
console.log(`🧹 Limpieza de Ruido: ${removedCount} interacciones eliminadas (${removedPercentage}% del total)`);
console.log(`Interacciones limpias: ${cleaned.length}`);
console.log(`🧹 Noise Cleanup: ${removedCount} interactions removed (${removedPercentage}% of total)`);
console.log(`Clean interactions: ${cleaned.length}`);
return cleaned;
}
/**
* tricas base calculadas por skill
* Base metrics calculated by skill
*/
export interface SkillBaseMetrics {
skill: string;
volume: number; // Número de interacciones
aht_mean: number; // AHT promedio (segundos)
aht_std: number; // Desviación estándar del AHT
transfer_rate: number; // Tasa de transferencia (0-100)
total_cost: number; // Coste total (€)
volume: number; // Number of interactions
aht_mean: number; // Average AHT (seconds)
aht_std: number; // AHT standard deviation
transfer_rate: number; // Transfer rate (0-100)
total_cost: number; // Total cost (€)
// Datos auxiliares para cálculos posteriores
aht_values: number[]; // Array de todos los AHT para percentiles
// Auxiliary data for subsequent calculations
aht_values: number[]; // Array of all AHT values for percentiles
}
/**
* Paso 2: Calcular Métricas Base por Skill
* Agrupa por skill y calcula volumen, AHT promedio, desviación estándar, tasa de transferencia y coste
* Step 2: Calculate Base Metrics by Skill
* Groups by skill and calculates volume, average AHT, standard deviation, transfer rate and cost
*/
export function calculateSkillBaseMetrics(
interactions: RawInteraction[],
@@ -53,7 +53,7 @@ export function calculateSkillBaseMetrics(
): SkillBaseMetrics[] {
const COST_PER_SECOND = costPerHour / 3600;
// Agrupar por skill
// Group by skill
const skillGroups = new Map<string, RawInteraction[]>();
interactions.forEach(interaction => {
@@ -64,31 +64,31 @@ export function calculateSkillBaseMetrics(
skillGroups.get(skill)!.push(interaction);
});
// Calcular métricas por skill
// Calculate metrics per skill
const metrics: SkillBaseMetrics[] = [];
skillGroups.forEach((skillInteractions, skill) => {
const volume = skillInteractions.length;
// Calcular AHT para cada interacción
// Calculate AHT for each interaction
const ahtValues = skillInteractions.map(i =>
i.duration_talk + i.hold_time + i.wrap_up_time
);
// AHT promedio
// Average AHT
const ahtMean = ahtValues.reduce((sum, val) => sum + val, 0) / volume;
// Desviación estándar del AHT
// AHT standard deviation
const variance = ahtValues.reduce((sum, val) =>
sum + Math.pow(val - ahtMean, 2), 0
) / volume;
const ahtStd = Math.sqrt(variance);
// Tasa de transferencia
// Transfer rate
const transferCount = skillInteractions.filter(i => i.transfer_flag).length;
const transferRate = (transferCount / volume) * 100;
// Coste total
// Total cost
const totalCost = ahtValues.reduce((sum, aht) =>
sum + (aht * COST_PER_SECOND), 0
);
@@ -104,82 +104,82 @@ export function calculateSkillBaseMetrics(
});
});
// Ordenar por volumen descendente
// Sort by descending volume
metrics.sort((a, b) => b.volume - a.volume);
console.log(`📊 tricas Base calculadas para ${metrics.length} skills`);
console.log(`📊 Base Metrics calculated for ${metrics.length} skills`);
return metrics;
}
/**
* Dimensiones transformadas para Agentic Readiness Score
* Transformed dimensions for Agentic Readiness Score
*/
export interface SkillDimensions {
skill: string;
volume: number;
// Dimensión 1: Predictibilidad (0-10)
// Dimension 1: Predictability (0-10)
predictability_score: number;
predictability_cv: number; // Coeficiente de Variación (para referencia)
predictability_cv: number; // Coefficient of Variation (for reference)
// Dimensión 2: Complejidad Inversa (0-10)
// Dimension 2: Inverse Complexity (0-10)
complexity_inverse_score: number;
complexity_transfer_rate: number; // Tasa de transferencia (para referencia)
complexity_transfer_rate: number; // Transfer rate (for reference)
// Dimensión 3: Repetitividad/Impacto (0-10)
// Dimension 3: Repetitiveness/Impact (0-10)
repetitivity_score: number;
// Datos auxiliares
// Auxiliary data
aht_mean: number;
total_cost: number;
}
/**
* Paso 3: Transformar Métricas Base a Dimensiones
* Aplica las fórmulas de normalización para obtener scores 0-10
* Step 3: Transform Base Metrics to Dimensions
* Applies normalization formulas to obtain 0-10 scores
*/
export function transformToDimensions(
baseMetrics: SkillBaseMetrics[]
): SkillDimensions[] {
return baseMetrics.map(metric => {
// Dimensión 1: Predictibilidad (Proxy: Variabilidad del AHT)
// CV = desviación estándar / media
// Dimension 1: Predictability (Proxy: AHT Variability)
// CV = standard deviation / mean
const cv = metric.aht_std / metric.aht_mean;
// Normalización: CV <= 0.3 → 10, CV >= 1.5 → 0
// Fórmula: MAX(0, MIN(10, 10 - ((CV - 0.3) / 1.2 * 10)))
// Normalization: CV <= 0.3 → 10, CV >= 1.5 → 0
// Formula: MAX(0, MIN(10, 10 - ((CV - 0.3) / 1.2 * 10)))
const predictabilityScore = Math.max(0, Math.min(10,
10 - ((cv - 0.3) / 1.2 * 10)
));
// Dimensión 2: Complejidad Inversa (Proxy: Tasa de Transferencia)
// T = tasa de transferencia (%)
// Dimension 2: Inverse Complexity (Proxy: Transfer Rate)
// T = transfer rate (%)
const transferRate = metric.transfer_rate;
// Normalización: T <= 5% → 10, T >= 30% → 0
// Fórmula: MAX(0, MIN(10, 10 - ((T - 0.05) / 0.25 * 10)))
// Normalization: T <= 5% → 10, T >= 30% → 0
// Formula: MAX(0, MIN(10, 10 - ((T - 0.05) / 0.25 * 10)))
const complexityInverseScore = Math.max(0, Math.min(10,
10 - ((transferRate / 100 - 0.05) / 0.25 * 10)
));
// Dimensión 3: Repetitividad/Impacto (Proxy: Volumen)
// Normalización fija: > 5,000 llamadas/mes = 10, < 100 = 0
// Dimension 3: Repetitiveness/Impact (Proxy: Volume)
// Fixed normalization: > 5,000 calls/month = 10, < 100 = 0
let repetitivityScore: number;
if (metric.volume >= 5000) {
repetitivityScore = 10;
} else if (metric.volume <= 100) {
repetitivityScore = 0;
} else {
// Interpolación lineal entre 100 y 5000
// Linear interpolation between 100 and 5000
repetitivityScore = ((metric.volume - 100) / (5000 - 100)) * 10;
}
return {
skill: metric.skill,
volume: metric.volume,
predictability_score: Math.round(predictabilityScore * 10) / 10, // 1 decimal
predictability_cv: Math.round(cv * 100) / 100, // 2 decimales
predictability_score: Math.round(predictabilityScore * 10) / 10, // 1 decimal place
predictability_cv: Math.round(cv * 100) / 100, // 2 decimal places
complexity_inverse_score: Math.round(complexityInverseScore * 10) / 10,
complexity_transfer_rate: Math.round(transferRate * 10) / 10,
repetitivity_score: Math.round(repetitivityScore * 10) / 10,
@@ -190,7 +190,7 @@ export function transformToDimensions(
}
/**
* Resultado final con Agentic Readiness Score
* Final result with Agentic Readiness Score
*/
export interface SkillAgenticReadiness extends SkillDimensions {
agentic_readiness_score: number; // 0-10
@@ -199,28 +199,28 @@ export interface SkillAgenticReadiness extends SkillDimensions {
}
/**
* Paso 4: Calcular Agentic Readiness Score
* Promedio ponderado de las 3 dimensiones
* Step 4: Calculate Agentic Readiness Score
* Weighted average of the 3 dimensions
*/
export function calculateAgenticReadinessScore(
dimensions: SkillDimensions[],
weights?: { predictability: number; complexity: number; repetitivity: number }
): SkillAgenticReadiness[] {
// Pesos por defecto (ajustables)
// Default weights (adjustable)
const w = weights || {
predictability: 0.40, // 40% - Más importante
predictability: 0.40, // 40% - Most important
complexity: 0.35, // 35%
repetitivity: 0.25 // 25%
};
return dimensions.map(dim => {
// Promedio ponderado
// Weighted average
const score =
dim.predictability_score * w.predictability +
dim.complexity_inverse_score * w.complexity +
dim.repetitivity_score * w.repetitivity;
// Categorizar
// Categorize
let category: 'automate_now' | 'assist_copilot' | 'optimize_first';
let label: string;
@@ -245,29 +245,29 @@ export function calculateAgenticReadinessScore(
}
/**
* Pipeline completo: Raw Data → Agentic Readiness Score
* Complete pipeline: Raw Data → Agentic Readiness Score
*/
export function transformRawDataToAgenticReadiness(
rawInteractions: RawInteraction[],
costPerHour: number,
weights?: { predictability: number; complexity: number; repetitivity: number }
): SkillAgenticReadiness[] {
console.log(`🚀 Iniciando pipeline de transformación con ${rawInteractions.length} interacciones...`);
console.log(`🚀 Starting transformation pipeline with ${rawInteractions.length} interactions...`);
// Paso 1: Limpieza de ruido
// Step 1: Noise cleanup
const cleanedData = cleanNoiseFromData(rawInteractions);
// Paso 2: Calcular métricas base
// Step 2: Calculate base metrics
const baseMetrics = calculateSkillBaseMetrics(cleanedData, costPerHour);
// Paso 3: Transformar a dimensiones
// Step 3: Transform to dimensions
const dimensions = transformToDimensions(baseMetrics);
// Paso 4: Calcular Agentic Readiness Score
// Step 4: Calculate Agentic Readiness Score
const agenticReadiness = calculateAgenticReadinessScore(dimensions, weights);
console.log(`✅ Pipeline completado: ${agenticReadiness.length} skills procesados`);
console.log(`📈 Distribución:`);
console.log(`✅ Pipeline completed: ${agenticReadiness.length} skills processed`);
console.log(`📈 Distribution:`);
const automateCount = agenticReadiness.filter(s => s.readiness_category === 'automate_now').length;
const assistCount = agenticReadiness.filter(s => s.readiness_category === 'assist_copilot').length;
const optimizeCount = agenticReadiness.filter(s => s.readiness_category === 'optimize_first').length;
@@ -279,7 +279,7 @@ export function transformRawDataToAgenticReadiness(
}
/**
* Utilidad: Generar resumen de estasticas
* Utility: Generate statistics summary
*/
export function generateTransformationSummary(
originalCount: number,
@@ -300,11 +300,11 @@ export function generateTransformationSummary(
const optimizePercent = skillsCount > 0 ? ((optimizeCount/skillsCount)*100).toFixed(0) : '0';
return `
📊 Resumen de Transformación:
Interacciones originales: ${originalCount.toLocaleString()}
Ruido eliminado: ${removedCount.toLocaleString()} (${removedPercentage}%)
Interacciones limpias: ${cleanedCount.toLocaleString()}
Skills únicos: ${skillsCount}
📊 Transformation Summary:
Original interactions: ${originalCount.toLocaleString()}
Noise removed: ${removedCount.toLocaleString()} (${removedPercentage}%)
Clean interactions: ${cleanedCount.toLocaleString()}
Unique skills: ${skillsCount}
🎯 Agentic Readiness:
• 🟢 Automate Now: ${automateCount} skills (${automatePercent}%)

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
// utils/segmentClassifier.ts
// Utilidad para clasificar colas/skills en segmentos de cliente
// Utility to classify queues/skills into customer segments
import type { CustomerSegment, RawInteraction, StaticConfig } from '../types';
@@ -10,8 +10,8 @@ export interface SegmentMapping {
}
/**
* Parsea string de colas separadas por comas
* Ejemplo: "VIP, Premium, Enterprise" → ["VIP", "Premium", "Enterprise"]
* Parses queue string separated by commas
* Example: "VIP, Premium, Enterprise" → ["VIP", "Premium", "Enterprise"]
*/
export function parseQueueList(input: string): string[] {
if (!input || input.trim().length === 0) {
@@ -25,13 +25,13 @@ export function parseQueueList(input: string): string[] {
}
/**
* Clasifica una cola según el mapeo proporcionado
* Usa matching parcial y case-insensitive
* Classifies a queue according to the provided mapping
* Uses partial and case-insensitive matching
*
* Ejemplo:
* Example:
* - queue: "VIP_Support" + mapping.high: ["VIP"] → "high"
* - queue: "Soporte_General_N1" + mapping.medium: ["Soporte_General"] → "medium"
* - queue: "Retencion" (no match) → "medium" (default)
* - queue: "General_Support_L1" + mapping.medium: ["General_Support"] → "medium"
* - queue: "Retention" (no match) → "medium" (default)
*/
export function classifyQueue(
queue: string,
@@ -39,7 +39,7 @@ export function classifyQueue(
): CustomerSegment {
const normalizedQueue = queue.toLowerCase().trim();
// Buscar en high value
// Search in high value
for (const highQueue of mapping.high_value_queues) {
const normalizedHigh = highQueue.toLowerCase().trim();
if (normalizedQueue.includes(normalizedHigh) || normalizedHigh.includes(normalizedQueue)) {
@@ -47,7 +47,7 @@ export function classifyQueue(
}
}
// Buscar en low value
// Search in low value
for (const lowQueue of mapping.low_value_queues) {
const normalizedLow = lowQueue.toLowerCase().trim();
if (normalizedQueue.includes(normalizedLow) || normalizedLow.includes(normalizedQueue)) {
@@ -55,7 +55,7 @@ export function classifyQueue(
}
}
// Buscar en medium value (explícito)
// Search in medium value (explicit)
for (const mediumQueue of mapping.medium_value_queues) {
const normalizedMedium = mediumQueue.toLowerCase().trim();
if (normalizedQueue.includes(normalizedMedium) || normalizedMedium.includes(normalizedQueue)) {
@@ -63,13 +63,13 @@ export function classifyQueue(
}
}
// Default: medium (para colas no mapeadas)
// Default: medium (for unmapped queues)
return 'medium';
}
/**
* Clasifica todas las colas únicas de un conjunto de interacciones
* Retorna un mapa de cola → segmento
* Classifies all unique queues from a set of interactions
* Returns a map of queue → segment
*/
export function classifyAllQueues(
interactions: RawInteraction[],
@@ -77,10 +77,10 @@ export function classifyAllQueues(
): Map<string, CustomerSegment> {
const queueSegments = new Map<string, CustomerSegment>();
// Obtener colas únicas
// Get unique queues
const uniqueQueues = [...new Set(interactions.map(i => i.queue_skill))];
// Clasificar cada cola
// Classify each queue
uniqueQueues.forEach(queue => {
queueSegments.set(queue, classifyQueue(queue, mapping));
});
@@ -89,8 +89,8 @@ export function classifyAllQueues(
}
/**
* Genera estadísticas de segmentación
* Retorna conteo, porcentaje y lista de colas por segmento
* Generates segmentation statistics
* Returns count, percentage and list of queues by segment
*/
export function getSegmentationStats(
interactions: RawInteraction[],
@@ -108,13 +108,13 @@ export function getSegmentationStats(
total: interactions.length
};
// Contar interacciones por segmento
// Count interactions by segment
interactions.forEach(interaction => {
const segment = queueSegments.get(interaction.queue_skill) || 'medium';
stats[segment].count++;
});
// Calcular porcentajes
// Calculate percentages
const total = interactions.length;
if (total > 0) {
stats.high.percentage = Math.round((stats.high.count / total) * 100);
@@ -122,7 +122,7 @@ export function getSegmentationStats(
stats.low.percentage = Math.round((stats.low.count / total) * 100);
}
// Obtener colas por segmento (únicas)
// Get queues by segment (unique)
queueSegments.forEach((segment, queue) => {
if (!stats[segment].queues.includes(queue)) {
stats[segment].queues.push(queue);
@@ -133,7 +133,7 @@ export function getSegmentationStats(
}
/**
* Valida que el mapeo tenga al menos una cola en algún segmento
* Validates that the mapping has at least one queue in some segment
*/
export function isValidMapping(mapping: SegmentMapping): boolean {
return (
@@ -144,8 +144,8 @@ export function isValidMapping(mapping: SegmentMapping): boolean {
}
/**
* Crea un mapeo desde StaticConfig
* Si no hay segment_mapping, retorna mapeo vacío
* Creates a mapping from StaticConfig
* If there is no segment_mapping, returns empty mapping
*/
export function getMappingFromConfig(config: StaticConfig): SegmentMapping | null {
if (!config.segment_mapping) {
@@ -160,8 +160,8 @@ export function getMappingFromConfig(config: StaticConfig): SegmentMapping | nul
}
/**
* Obtiene el segmento para una cola específica desde el config
* Si no hay mapeo, retorna 'medium' por defecto
* Gets the segment for a specific queue from the config
* If there is no mapping, returns 'medium' by default
*/
export function getSegmentForQueue(
queue: string,
@@ -177,7 +177,7 @@ export function getSegmentForQueue(
}
/**
* Formatea estasticas para mostrar en UI
* Formats statistics for display in UI
*/
export function formatSegmentationSummary(
stats: ReturnType<typeof getSegmentationStats>
@@ -185,15 +185,15 @@ export function formatSegmentationSummary(
const parts: string[] = [];
if (stats.high.count > 0) {
parts.push(`${stats.high.percentage}% High Value (${stats.high.count} interacciones)`);
parts.push(`${stats.high.percentage}% High Value (${stats.high.count} interactions)`);
}
if (stats.medium.count > 0) {
parts.push(`${stats.medium.percentage}% Medium Value (${stats.medium.count} interacciones)`);
parts.push(`${stats.medium.percentage}% Medium Value (${stats.medium.count} interactions)`);
}
if (stats.low.count > 0) {
parts.push(`${stats.low.percentage}% Low Value (${stats.low.count} interacciones)`);
parts.push(`${stats.low.percentage}% Low Value (${stats.low.count} interactions)`);
}
return parts.join(' | ');