Merge pull request #4 from sujucu70/claude/check-agent-readiness-status-Exnpc
Claude/check agent readiness status exnpc
This commit is contained in:
163
TRANSLATION_STATUS.md
Normal file
163
TRANSLATION_STATUS.md
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
# Translation Status - Beyond CX Analytics
|
||||||
|
|
||||||
|
## ✅ Completed Modules
|
||||||
|
|
||||||
|
### Agentic Readiness Module
|
||||||
|
- **Status:** ✅ **COMPLETED**
|
||||||
|
- **Commit:** `b991824`
|
||||||
|
- **Files:**
|
||||||
|
- ✅ `frontend/utils/agenticReadinessV2.ts` - All functions, comments, and descriptions translated
|
||||||
|
- ✅ `frontend/components/tabs/AgenticReadinessTab.tsx` - RED_FLAG_CONFIGS and comments translated
|
||||||
|
- ✅ `frontend/locales/en.json` & `es.json` - New subfactors section added
|
||||||
|
- ✅ `backend/beyond_flows/scorers/agentic_score.py` - All docstrings, comments, and reason codes translated
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🔄 Modules Pending Translation
|
||||||
|
|
||||||
|
### HIGH PRIORITY - Core Utils (Frontend)
|
||||||
|
|
||||||
|
#### 1. **realDataAnalysis.ts**
|
||||||
|
- **Lines of Spanish:** ~92 occurrences
|
||||||
|
- **Scope:**
|
||||||
|
- Function names: `clasificarTierSimple()`, `clasificarTier()`
|
||||||
|
- 20+ inline comments in Spanish
|
||||||
|
- Function documentation
|
||||||
|
- **Impact:** HIGH - Core analysis engine
|
||||||
|
- **Estimated effort:** 2-3 hours
|
||||||
|
|
||||||
|
#### 2. **analysisGenerator.ts**
|
||||||
|
- **Lines of Spanish:** ~49 occurrences
|
||||||
|
- **Scope:**
|
||||||
|
- Multiple inline comments
|
||||||
|
- References to `clasificarTierSimple()`
|
||||||
|
- Data transformation comments
|
||||||
|
- **Impact:** HIGH - Main data generator
|
||||||
|
- **Estimated effort:** 1-2 hours
|
||||||
|
|
||||||
|
#### 3. **backendMapper.ts**
|
||||||
|
- **Lines of Spanish:** ~13 occurrences
|
||||||
|
- **Scope:**
|
||||||
|
- Function documentation
|
||||||
|
- Mapping logic comments
|
||||||
|
- **Impact:** MEDIUM - Backend integration
|
||||||
|
- **Estimated effort:** 30-60 minutes
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### MEDIUM PRIORITY - Utilities (Frontend)
|
||||||
|
|
||||||
|
#### 4. **dataTransformation.ts**
|
||||||
|
- **Lines of Spanish:** ~8 occurrences
|
||||||
|
- **Impact:** MEDIUM
|
||||||
|
- **Estimated effort:** 30 minutes
|
||||||
|
|
||||||
|
#### 5. **segmentClassifier.ts**
|
||||||
|
- **Lines of Spanish:** ~3 occurrences
|
||||||
|
- **Impact:** LOW
|
||||||
|
- **Estimated effort:** 15 minutes
|
||||||
|
|
||||||
|
#### 6. **fileParser.ts**
|
||||||
|
- **Lines of Spanish:** ~3 occurrences
|
||||||
|
- **Impact:** LOW
|
||||||
|
- **Estimated effort:** 15 minutes
|
||||||
|
|
||||||
|
#### 7. **apiClient.ts**
|
||||||
|
- **Lines of Spanish:** ~2 occurrences
|
||||||
|
- **Impact:** LOW
|
||||||
|
- **Estimated effort:** 10 minutes
|
||||||
|
|
||||||
|
#### 8. **serverCache.ts**
|
||||||
|
- **Lines of Spanish:** ~2 occurrences
|
||||||
|
- **Impact:** LOW
|
||||||
|
- **Estimated effort:** 10 minutes
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### MEDIUM PRIORITY - Backend Dimensions
|
||||||
|
|
||||||
|
#### 9. **backend/beyond_metrics/dimensions/OperationalPerformance.py**
|
||||||
|
- **Lines of Spanish:** ~7 occurrences
|
||||||
|
- **Impact:** MEDIUM
|
||||||
|
- **Estimated effort:** 30 minutes
|
||||||
|
|
||||||
|
#### 10. **backend/beyond_metrics/dimensions/SatisfactionExperience.py**
|
||||||
|
- **Lines of Spanish:** ~8 occurrences
|
||||||
|
- **Impact:** MEDIUM
|
||||||
|
- **Estimated effort:** 30 minutes
|
||||||
|
|
||||||
|
#### 11. **backend/beyond_metrics/dimensions/EconomyCost.py**
|
||||||
|
- **Lines of Spanish:** ~4 occurrences
|
||||||
|
- **Impact:** MEDIUM
|
||||||
|
- **Estimated effort:** 20 minutes
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### LOW PRIORITY - API & Services
|
||||||
|
|
||||||
|
#### 12. **backend/beyond_api/api/analysis.py**
|
||||||
|
- **Lines of Spanish:** ~1 occurrence
|
||||||
|
- **Impact:** LOW
|
||||||
|
- **Estimated effort:** 5 minutes
|
||||||
|
|
||||||
|
#### 13. **backend/beyond_api/api/auth.py**
|
||||||
|
- **Lines of Spanish:** ~1 occurrence
|
||||||
|
- **Impact:** LOW
|
||||||
|
- **Estimated effort:** 5 minutes
|
||||||
|
|
||||||
|
#### 14. **backend/beyond_api/services/analysis_service.py**
|
||||||
|
- **Lines of Spanish:** ~2 occurrences
|
||||||
|
- **Impact:** LOW
|
||||||
|
- **Estimated effort:** 10 minutes
|
||||||
|
|
||||||
|
#### 15. **backend/beyond_metrics/io/base.py**
|
||||||
|
- **Lines of Spanish:** ~1 occurrence
|
||||||
|
- **Impact:** LOW
|
||||||
|
- **Estimated effort:** 5 minutes
|
||||||
|
|
||||||
|
#### 16. **backend/beyond_metrics/io/google_drive.py**
|
||||||
|
- **Lines of Spanish:** ~2 occurrences
|
||||||
|
- **Impact:** LOW
|
||||||
|
- **Estimated effort:** 10 minutes
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📊 Summary Statistics
|
||||||
|
|
||||||
|
| Category | Files | Total Occurrences | Estimated Time |
|
||||||
|
|----------|-------|-------------------|----------------|
|
||||||
|
| ✅ Completed | 4 | ~150 | 3 hours (DONE) |
|
||||||
|
| 🔴 High Priority | 3 | 154 | 4-6 hours |
|
||||||
|
| 🟡 Medium Priority | 8 | 35 | 2-3 hours |
|
||||||
|
| 🟢 Low Priority | 5 | 7 | 45 minutes |
|
||||||
|
| **TOTAL PENDING** | **16** | **196** | **~8 hours** |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🎯 Recommended Translation Order
|
||||||
|
|
||||||
|
### Phase 1: Critical Path (High Priority)
|
||||||
|
1. `realDataAnalysis.ts` - Core analysis engine with `clasificarTier()` functions
|
||||||
|
2. `analysisGenerator.ts` - Main data generation orchestrator
|
||||||
|
3. `backendMapper.ts` - Backend integration layer
|
||||||
|
|
||||||
|
### Phase 2: Supporting Utils (Medium Priority)
|
||||||
|
4. `dataTransformation.ts`
|
||||||
|
5. Backend dimension files (`OperationalPerformance.py`, `SatisfactionExperience.py`, `EconomyCost.py`)
|
||||||
|
|
||||||
|
### Phase 3: Final Cleanup (Low Priority)
|
||||||
|
6. Remaining utility files and API services
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📝 Notes
|
||||||
|
|
||||||
|
- **Variable names** like `volumen_mes`, `escalación`, etc. in data interfaces should **remain as-is** for API compatibility
|
||||||
|
- **Function names** that are part of the public API should be carefully reviewed before renaming
|
||||||
|
- **i18n strings** in locales files should continue to have both EN/ES versions
|
||||||
|
- **Reason codes** and internal enums should be in English for consistency
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Last Updated:** 2026-02-07
|
||||||
|
**Status:** agenticReadiness module completed, 16 modules pending
|
||||||
@@ -1,22 +1,22 @@
|
|||||||
"""
|
"""
|
||||||
agentic_score.py
|
agentic_score.py
|
||||||
|
|
||||||
Calcula el Agentic Readiness Score de un contact center a partir
|
Calculates the Agentic Readiness Score of a contact center from
|
||||||
de un JSON con KPIs agregados (misma estructura que results.json).
|
a JSON file with aggregated KPIs (same structure as results.json).
|
||||||
|
|
||||||
Diseñado como clase para integrarse fácilmente en pipelines.
|
Designed as a class to integrate easily into pipelines.
|
||||||
|
|
||||||
Características:
|
Features:
|
||||||
- Tolerante a datos faltantes: si una dimensión no se puede calcular
|
- Tolerant to missing data: if a dimension cannot be calculated
|
||||||
(porque faltan KPIs), se marca como `computed = False` y no se
|
(due to missing KPIs), it is marked as `computed = False` and not
|
||||||
incluye en el cálculo del score global.
|
included in the global score calculation.
|
||||||
- La llamada típica en un pipeline será:
|
- Typical pipeline call:
|
||||||
from agentic_score import AgenticScorer
|
from agentic_score import AgenticScorer
|
||||||
scorer = AgenticScorer()
|
scorer = AgenticScorer()
|
||||||
result = scorer.run_on_folder("/ruta/a/carpeta")
|
result = scorer.run_on_folder("/path/to/folder")
|
||||||
|
|
||||||
Esa carpeta debe contener un `results.json` de entrada.
|
The folder must contain a `results.json` input file.
|
||||||
El módulo generará un `agentic_readiness.json` en la misma carpeta.
|
The module will generate an `agentic_readiness.json` in the same folder.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
@@ -35,7 +35,7 @@ Number = Union[int, float]
|
|||||||
# =========================
|
# =========================
|
||||||
|
|
||||||
def _is_nan(x: Any) -> bool:
|
def _is_nan(x: Any) -> bool:
|
||||||
"""Devuelve True si x es NaN, None o el string 'NaN'."""
|
"""Returns True if x is NaN, None or the string 'NaN'."""
|
||||||
try:
|
try:
|
||||||
if x is None:
|
if x is None:
|
||||||
return True
|
return True
|
||||||
@@ -60,7 +60,7 @@ def _safe_mean(values: Sequence[Optional[Number]]) -> Optional[float]:
|
|||||||
|
|
||||||
|
|
||||||
def _get_nested(d: Dict[str, Any], *keys: str, default: Any = None) -> Any:
|
def _get_nested(d: Dict[str, Any], *keys: str, default: Any = None) -> Any:
|
||||||
"""Acceso seguro a diccionarios anidados."""
|
"""Safe access to nested dictionaries."""
|
||||||
cur: Any = d
|
cur: Any = d
|
||||||
for k in keys:
|
for k in keys:
|
||||||
if not isinstance(cur, dict) or k not in cur:
|
if not isinstance(cur, dict) or k not in cur:
|
||||||
@@ -75,20 +75,20 @@ def _clamp(value: float, lo: float = 0.0, hi: float = 10.0) -> float:
|
|||||||
|
|
||||||
def _normalize_numeric_sequence(field: Any) -> Optional[List[Number]]:
|
def _normalize_numeric_sequence(field: Any) -> Optional[List[Number]]:
|
||||||
"""
|
"""
|
||||||
Normaliza un campo que representa una secuencia numérica.
|
Normalizes a field representing a numeric sequence.
|
||||||
|
|
||||||
Soporta:
|
Supports:
|
||||||
- Formato antiguo del pipeline: [10, 20, 30]
|
- Old pipeline format: [10, 20, 30]
|
||||||
- Formato nuevo del pipeline: {"labels": [...], "values": [10, 20, 30]}
|
- New pipeline format: {"labels": [...], "values": [10, 20, 30]}
|
||||||
|
|
||||||
Devuelve:
|
Returns:
|
||||||
- lista de números, si hay datos numéricos válidos
|
- list of numbers, if there is valid numeric data
|
||||||
- None, si el campo no tiene una secuencia numérica interpretable
|
- None, if the field does not have an interpretable numeric sequence
|
||||||
"""
|
"""
|
||||||
if field is None:
|
if field is None:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Formato nuevo: {"labels": [...], "values": [...]}
|
# New format: {"labels": [...], "values": [...]}
|
||||||
if isinstance(field, dict) and "values" in field:
|
if isinstance(field, dict) and "values" in field:
|
||||||
seq = field.get("values")
|
seq = field.get("values")
|
||||||
else:
|
else:
|
||||||
@@ -102,7 +102,7 @@ def _normalize_numeric_sequence(field: Any) -> Optional[List[Number]]:
|
|||||||
if isinstance(v, (int, float)):
|
if isinstance(v, (int, float)):
|
||||||
out.append(v)
|
out.append(v)
|
||||||
else:
|
else:
|
||||||
# Intentamos conversión suave por si viene como string numérico
|
# Try soft conversion in case it's a numeric string
|
||||||
try:
|
try:
|
||||||
out.append(float(v))
|
out.append(float(v))
|
||||||
except (TypeError, ValueError):
|
except (TypeError, ValueError):
|
||||||
@@ -117,21 +117,21 @@ def _normalize_numeric_sequence(field: Any) -> Optional[List[Number]]:
|
|||||||
|
|
||||||
def score_repetitividad(volume_by_skill: Optional[List[Number]]) -> Dict[str, Any]:
|
def score_repetitividad(volume_by_skill: Optional[List[Number]]) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Repetitividad basada en volumen medio por skill.
|
Repeatability based on average volume per skill.
|
||||||
|
|
||||||
Regla (pensada por proceso/skill):
|
Rule (designed per process/skill):
|
||||||
- 10 si volumen > 80
|
- 10 if volume > 80
|
||||||
- 5 si 40–80
|
- 5 if 40–80
|
||||||
- 0 si < 40
|
- 0 if < 40
|
||||||
|
|
||||||
Si no hay datos (lista vacía o no numérica), la dimensión
|
If there is no data (empty or non-numeric list), the dimension
|
||||||
se marca como no calculada (computed = False).
|
is marked as not calculated (computed = False).
|
||||||
"""
|
"""
|
||||||
if not volume_by_skill:
|
if not volume_by_skill:
|
||||||
return {
|
return {
|
||||||
"score": None,
|
"score": None,
|
||||||
"computed": False,
|
"computed": False,
|
||||||
"reason": "sin_datos_volumen",
|
"reason": "no_volume_data",
|
||||||
"details": {
|
"details": {
|
||||||
"avg_volume_per_skill": None,
|
"avg_volume_per_skill": None,
|
||||||
"volume_by_skill": volume_by_skill,
|
"volume_by_skill": volume_by_skill,
|
||||||
@@ -143,7 +143,7 @@ def score_repetitividad(volume_by_skill: Optional[List[Number]]) -> Dict[str, An
|
|||||||
return {
|
return {
|
||||||
"score": None,
|
"score": None,
|
||||||
"computed": False,
|
"computed": False,
|
||||||
"reason": "volumen_no_numerico",
|
"reason": "volume_not_numeric",
|
||||||
"details": {
|
"details": {
|
||||||
"avg_volume_per_skill": None,
|
"avg_volume_per_skill": None,
|
||||||
"volume_by_skill": volume_by_skill,
|
"volume_by_skill": volume_by_skill,
|
||||||
@@ -152,13 +152,13 @@ def score_repetitividad(volume_by_skill: Optional[List[Number]]) -> Dict[str, An
|
|||||||
|
|
||||||
if avg_volume > 80:
|
if avg_volume > 80:
|
||||||
score = 10.0
|
score = 10.0
|
||||||
reason = "alto_volumen"
|
reason = "high_volume"
|
||||||
elif avg_volume >= 40:
|
elif avg_volume >= 40:
|
||||||
score = 5.0
|
score = 5.0
|
||||||
reason = "volumen_medio"
|
reason = "medium_volume"
|
||||||
else:
|
else:
|
||||||
score = 0.0
|
score = 0.0
|
||||||
reason = "volumen_bajo"
|
reason = "low_volume"
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"score": score,
|
"score": score,
|
||||||
@@ -178,36 +178,36 @@ def score_repetitividad(volume_by_skill: Optional[List[Number]]) -> Dict[str, An
|
|||||||
def score_predictibilidad(aht_ratio: Any,
|
def score_predictibilidad(aht_ratio: Any,
|
||||||
escalation_rate: Any) -> Dict[str, Any]:
|
escalation_rate: Any) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Predictibilidad basada en:
|
Predictability based on:
|
||||||
- Variabilidad AHT: ratio P90/P50
|
- AHT variability: ratio P90/P50
|
||||||
- Tasa de escalación (%)
|
- Escalation rate (%)
|
||||||
|
|
||||||
Regla:
|
Rule:
|
||||||
- 10 si ratio < 1.5 y escalación < 10%
|
- 10 if ratio < 1.5 and escalation < 10%
|
||||||
- 5 si ratio 1.5–2.0 o escalación 10–20%
|
- 5 if ratio 1.5–2.0 or escalation 10–20%
|
||||||
- 0 si ratio > 2.0 y escalación > 20%
|
- 0 if ratio > 2.0 and escalation > 20%
|
||||||
- 3 fallback si datos parciales
|
- 3 fallback if data parciales
|
||||||
|
|
||||||
Si no hay ni ratio ni escalación, la dimensión no se calcula.
|
If there is no ratio nor escalation, the dimension is not calculated.
|
||||||
"""
|
"""
|
||||||
if aht_ratio is None and escalation_rate is None:
|
if aht_ratio is None and escalation_rate is None:
|
||||||
return {
|
return {
|
||||||
"score": None,
|
"score": None,
|
||||||
"computed": False,
|
"computed": False,
|
||||||
"reason": "sin_datos",
|
"reason": "no_data",
|
||||||
"details": {
|
"details": {
|
||||||
"aht_p90_p50_ratio": None,
|
"aht_p90_p50_ratio": None,
|
||||||
"escalation_rate_pct": None,
|
"escalation_rate_pct": None,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
# Normalizamos ratio
|
# Normalize ratio
|
||||||
if aht_ratio is None or _is_nan(aht_ratio):
|
if aht_ratio is None or _is_nan(aht_ratio):
|
||||||
ratio: Optional[float] = None
|
ratio: Optional[float] = None
|
||||||
else:
|
else:
|
||||||
ratio = float(aht_ratio)
|
ratio = float(aht_ratio)
|
||||||
|
|
||||||
# Normalizamos escalación
|
# Normalize escalation
|
||||||
if escalation_rate is None or _is_nan(escalation_rate):
|
if escalation_rate is None or _is_nan(escalation_rate):
|
||||||
esc: Optional[float] = None
|
esc: Optional[float] = None
|
||||||
else:
|
else:
|
||||||
@@ -217,7 +217,7 @@ def score_predictibilidad(aht_ratio: Any,
|
|||||||
return {
|
return {
|
||||||
"score": None,
|
"score": None,
|
||||||
"computed": False,
|
"computed": False,
|
||||||
"reason": "sin_datos",
|
"reason": "no_data",
|
||||||
"details": {
|
"details": {
|
||||||
"aht_p90_p50_ratio": None,
|
"aht_p90_p50_ratio": None,
|
||||||
"escalation_rate_pct": None,
|
"escalation_rate_pct": None,
|
||||||
@@ -230,20 +230,20 @@ def score_predictibilidad(aht_ratio: Any,
|
|||||||
if ratio is not None and esc is not None:
|
if ratio is not None and esc is not None:
|
||||||
if ratio < 1.5 and esc < 10.0:
|
if ratio < 1.5 and esc < 10.0:
|
||||||
score = 10.0
|
score = 10.0
|
||||||
reason = "alta_predictibilidad"
|
reason = "high_predictability"
|
||||||
elif (1.5 <= ratio <= 2.0) or (10.0 <= esc <= 20.0):
|
elif (1.5 <= ratio <= 2.0) or (10.0 <= esc <= 20.0):
|
||||||
score = 5.0
|
score = 5.0
|
||||||
reason = "predictibilidad_media"
|
reason = "medium_predictability"
|
||||||
elif ratio > 2.0 and esc > 20.0:
|
elif ratio > 2.0 and esc > 20.0:
|
||||||
score = 0.0
|
score = 0.0
|
||||||
reason = "baja_predictibilidad"
|
reason = "low_predictability"
|
||||||
else:
|
else:
|
||||||
score = 3.0
|
score = 3.0
|
||||||
reason = "caso_intermedio"
|
reason = "intermediate_case"
|
||||||
else:
|
else:
|
||||||
# Datos parciales: penalizamos pero no ponemos a 0
|
# Partial data: penalize but do not set to 0
|
||||||
score = 3.0
|
score = 3.0
|
||||||
reason = "datos_parciales"
|
reason = "partial_data"
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"score": score,
|
"score": score,
|
||||||
@@ -263,23 +263,23 @@ def score_predictibilidad(aht_ratio: Any,
|
|||||||
|
|
||||||
def score_estructuracion(channel_distribution_pct: Any) -> Dict[str, Any]:
|
def score_estructuracion(channel_distribution_pct: Any) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Estructuración de datos usando proxy de canal.
|
Data structuring using channel proxy.
|
||||||
|
|
||||||
Asumimos que el canal con mayor % es texto (en proyectos reales se puede
|
We assume the channel with the highest % is text (en proyectos reales se puede
|
||||||
parametrizar esta asignación).
|
parametrizar esta asignación).
|
||||||
|
|
||||||
Regla:
|
Rule:
|
||||||
- 10 si texto > 60%
|
- 10 if text > 60%
|
||||||
- 5 si 30–60%
|
- 5 si 30–60%
|
||||||
- 0 si < 30%
|
- 0 si < 30%
|
||||||
|
|
||||||
Si no hay datos de canales, la dimensión no se calcula.
|
If there is no datas of channels, the dimension is not calculated.
|
||||||
"""
|
"""
|
||||||
if not channel_distribution_pct:
|
if not channel_distribution_pct:
|
||||||
return {
|
return {
|
||||||
"score": None,
|
"score": None,
|
||||||
"computed": False,
|
"computed": False,
|
||||||
"reason": "sin_datos_canal",
|
"reason": "no_channel_data",
|
||||||
"details": {
|
"details": {
|
||||||
"estimated_text_share_pct": None,
|
"estimated_text_share_pct": None,
|
||||||
"channel_distribution_pct": channel_distribution_pct,
|
"channel_distribution_pct": channel_distribution_pct,
|
||||||
@@ -299,7 +299,7 @@ def score_estructuracion(channel_distribution_pct: Any) -> Dict[str, Any]:
|
|||||||
return {
|
return {
|
||||||
"score": None,
|
"score": None,
|
||||||
"computed": False,
|
"computed": False,
|
||||||
"reason": "canales_no_numericos",
|
"reason": "channels_not_numeric",
|
||||||
"details": {
|
"details": {
|
||||||
"estimated_text_share_pct": None,
|
"estimated_text_share_pct": None,
|
||||||
"channel_distribution_pct": channel_distribution_pct,
|
"channel_distribution_pct": channel_distribution_pct,
|
||||||
@@ -308,13 +308,13 @@ def score_estructuracion(channel_distribution_pct: Any) -> Dict[str, Any]:
|
|||||||
|
|
||||||
if max_share > 60.0:
|
if max_share > 60.0:
|
||||||
score = 10.0
|
score = 10.0
|
||||||
reason = "alta_proporcion_texto"
|
reason = "high_text_proportion"
|
||||||
elif max_share >= 30.0:
|
elif max_share >= 30.0:
|
||||||
score = 5.0
|
score = 5.0
|
||||||
reason = "proporcion_texto_media"
|
reason = "medium_text_proportion"
|
||||||
else:
|
else:
|
||||||
score = 0.0
|
score = 0.0
|
||||||
reason = "baja_proporcion_texto"
|
reason = "low_text_proportion"
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"score": score,
|
"score": score,
|
||||||
@@ -334,9 +334,9 @@ def score_estructuracion(channel_distribution_pct: Any) -> Dict[str, Any]:
|
|||||||
def score_complejidad(aht_ratio: Any,
|
def score_complejidad(aht_ratio: Any,
|
||||||
escalation_rate: Any) -> Dict[str, Any]:
|
escalation_rate: Any) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Complejidad inversa del proceso (0–10).
|
Inverse complexity of the process (0–10).
|
||||||
|
|
||||||
1) Base: inversa lineal de la variabilidad AHT (ratio P90/P50):
|
1) Base: linear inverse de la variabilidad AHT (ratio P90/P50):
|
||||||
- ratio = 1.0 -> 10
|
- ratio = 1.0 -> 10
|
||||||
- ratio = 1.5 -> ~7.5
|
- ratio = 1.5 -> ~7.5
|
||||||
- ratio = 2.0 -> 5
|
- ratio = 2.0 -> 5
|
||||||
@@ -345,12 +345,12 @@ def score_complejidad(aht_ratio: Any,
|
|||||||
|
|
||||||
formula_base = (3 - ratio) / (3 - 1) * 10, acotado a [0,10]
|
formula_base = (3 - ratio) / (3 - 1) * 10, acotado a [0,10]
|
||||||
|
|
||||||
2) Ajuste por escalación:
|
2) Escalation adjustment:
|
||||||
- restamos (escalation_rate / 5) puntos.
|
- restamos (escalation_rate / 5) puntos.
|
||||||
|
|
||||||
Nota: más score = proceso más "simple / automatizable".
|
Nota: higher score = process more "simple / automatizable".
|
||||||
|
|
||||||
Si no hay ni ratio ni escalación, la dimensión no se calcula.
|
If there is no ratio nor escalation, the dimension is not calculated.
|
||||||
"""
|
"""
|
||||||
if aht_ratio is None or _is_nan(aht_ratio):
|
if aht_ratio is None or _is_nan(aht_ratio):
|
||||||
ratio: Optional[float] = None
|
ratio: Optional[float] = None
|
||||||
@@ -366,36 +366,36 @@ def score_complejidad(aht_ratio: Any,
|
|||||||
return {
|
return {
|
||||||
"score": None,
|
"score": None,
|
||||||
"computed": False,
|
"computed": False,
|
||||||
"reason": "sin_datos",
|
"reason": "no_data",
|
||||||
"details": {
|
"details": {
|
||||||
"aht_p90_p50_ratio": None,
|
"aht_p90_p50_ratio": None,
|
||||||
"escalation_rate_pct": None,
|
"escalation_rate_pct": None,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
# Base por variabilidad
|
# Base for variability
|
||||||
if ratio is None:
|
if ratio is None:
|
||||||
base = 5.0 # fallback neutro
|
base = 5.0 # neutral fallback
|
||||||
base_reason = "sin_ratio_usamos_valor_neutro"
|
base_reason = "no_ratio_using_neutral_value"
|
||||||
else:
|
else:
|
||||||
base_raw = (3.0 - ratio) / (3.0 - 1.0) * 10.0
|
base_raw = (3.0 - ratio) / (3.0 - 1.0) * 10.0
|
||||||
base = _clamp(base_raw)
|
base = _clamp(base_raw)
|
||||||
base_reason = "calculado_desde_ratio"
|
base_reason = "calculated_from_ratio"
|
||||||
|
|
||||||
# Ajuste por escalación
|
# Escalation adjustment
|
||||||
if esc is None:
|
if esc is None:
|
||||||
adj = 0.0
|
adj = 0.0
|
||||||
adj_reason = "sin_escalacion_sin_ajuste"
|
adj_reason = "no_escalation_no_adjustment"
|
||||||
else:
|
else:
|
||||||
adj = - (esc / 5.0) # cada 5 puntos de escalación resta 1
|
adj = - (esc / 5.0) # every 5 escalation points subtract 1
|
||||||
adj_reason = "ajuste_por_escalacion"
|
adj_reason = "escalation_adjustment"
|
||||||
|
|
||||||
final_score = _clamp(base + adj)
|
final_score = _clamp(base + adj)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"score": final_score,
|
"score": final_score,
|
||||||
"computed": True,
|
"computed": True,
|
||||||
"reason": "complejidad_inversa",
|
"reason": "inverse_complexity",
|
||||||
"details": {
|
"details": {
|
||||||
"aht_p90_p50_ratio": ratio,
|
"aht_p90_p50_ratio": ratio,
|
||||||
"escalation_rate_pct": esc,
|
"escalation_rate_pct": esc,
|
||||||
@@ -409,21 +409,21 @@ def score_complejidad(aht_ratio: Any,
|
|||||||
|
|
||||||
def score_estabilidad(peak_offpeak_ratio: Any) -> Dict[str, Any]:
|
def score_estabilidad(peak_offpeak_ratio: Any) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Estabilidad del proceso basada en relación pico/off-peak.
|
Process stability based on peak/off-peak ratio.
|
||||||
|
|
||||||
Regla:
|
Rule:
|
||||||
- 10 si ratio < 3
|
- 10 if ratio < 3
|
||||||
- 7 si 3–5
|
- 7 si 3–5
|
||||||
- 3 si 5–7
|
- 3 si 5–7
|
||||||
- 0 si > 7
|
- 0 si > 7
|
||||||
|
|
||||||
Si no hay dato de ratio, la dimensión no se calcula.
|
If there is no data of ratio, the dimension is not calculated.
|
||||||
"""
|
"""
|
||||||
if peak_offpeak_ratio is None or _is_nan(peak_offpeak_ratio):
|
if peak_offpeak_ratio is None or _is_nan(peak_offpeak_ratio):
|
||||||
return {
|
return {
|
||||||
"score": None,
|
"score": None,
|
||||||
"computed": False,
|
"computed": False,
|
||||||
"reason": "sin_datos_peak_offpeak",
|
"reason": "no_peak_offpeak_data",
|
||||||
"details": {
|
"details": {
|
||||||
"peak_offpeak_ratio": None,
|
"peak_offpeak_ratio": None,
|
||||||
},
|
},
|
||||||
@@ -432,16 +432,16 @@ def score_estabilidad(peak_offpeak_ratio: Any) -> Dict[str, Any]:
|
|||||||
r = float(peak_offpeak_ratio)
|
r = float(peak_offpeak_ratio)
|
||||||
if r < 3.0:
|
if r < 3.0:
|
||||||
score = 10.0
|
score = 10.0
|
||||||
reason = "muy_estable"
|
reason = "very_stable"
|
||||||
elif r < 5.0:
|
elif r < 5.0:
|
||||||
score = 7.0
|
score = 7.0
|
||||||
reason = "estable_moderado"
|
reason = "moderately_stable"
|
||||||
elif r < 7.0:
|
elif r < 7.0:
|
||||||
score = 3.0
|
score = 3.0
|
||||||
reason = "pico_pronunciado"
|
reason = "pronounced_peak"
|
||||||
else:
|
else:
|
||||||
score = 0.0
|
score = 0.0
|
||||||
reason = "muy_inestable"
|
reason = "very_unstable"
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"score": score,
|
"score": score,
|
||||||
@@ -460,20 +460,20 @@ def score_estabilidad(peak_offpeak_ratio: Any) -> Dict[str, Any]:
|
|||||||
|
|
||||||
def score_roi(annual_savings: Any) -> Dict[str, Any]:
|
def score_roi(annual_savings: Any) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
ROI potencial anual.
|
Annual potential ROI.
|
||||||
|
|
||||||
Regla:
|
Rule:
|
||||||
- 10 si ahorro > 100k €/año
|
- 10 if savings > 100k €/year
|
||||||
- 5 si 10k–100k €/año
|
- 5 si 10k–100k €/year
|
||||||
- 0 si < 10k €/año
|
- 0 si < 10k €/year
|
||||||
|
|
||||||
Si no hay dato de ahorro, la dimensión no se calcula.
|
If there is no data of savings, the dimension is not calculated.
|
||||||
"""
|
"""
|
||||||
if annual_savings is None or _is_nan(annual_savings):
|
if annual_savings is None or _is_nan(annual_savings):
|
||||||
return {
|
return {
|
||||||
"score": None,
|
"score": None,
|
||||||
"computed": False,
|
"computed": False,
|
||||||
"reason": "sin_datos_ahorro",
|
"reason": "no_savings_data",
|
||||||
"details": {
|
"details": {
|
||||||
"annual_savings_eur": None,
|
"annual_savings_eur": None,
|
||||||
},
|
},
|
||||||
@@ -482,13 +482,13 @@ def score_roi(annual_savings: Any) -> Dict[str, Any]:
|
|||||||
savings = float(annual_savings)
|
savings = float(annual_savings)
|
||||||
if savings > 100_000:
|
if savings > 100_000:
|
||||||
score = 10.0
|
score = 10.0
|
||||||
reason = "roi_alto"
|
reason = "high_roi"
|
||||||
elif savings >= 10_000:
|
elif savings >= 10_000:
|
||||||
score = 5.0
|
score = 5.0
|
||||||
reason = "roi_medio"
|
reason = "medium_roi"
|
||||||
else:
|
else:
|
||||||
score = 0.0
|
score = 0.0
|
||||||
reason = "roi_bajo"
|
reason = "low_roi"
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"score": score,
|
"score": score,
|
||||||
@@ -506,20 +506,20 @@ def score_roi(annual_savings: Any) -> Dict[str, Any]:
|
|||||||
|
|
||||||
def classify_agentic_score(score: Optional[float]) -> Dict[str, Any]:
|
def classify_agentic_score(score: Optional[float]) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Clasificación final (alineada con frontend):
|
Final classification (aligned with frontend):
|
||||||
- ≥6: COPILOT 🤖 (Listo para Copilot)
|
- ≥6: COPILOT 🤖 (Ready for Copilot)
|
||||||
- 4–5.99: OPTIMIZE 🔧 (Optimizar Primero)
|
- 4–5.99: OPTIMIZE 🔧 (Optimizar Primero)
|
||||||
- <4: HUMAN 👤 (Requiere Gestión Humana)
|
- <4: HUMAN 👤 (Requiere Gestión Humana)
|
||||||
|
|
||||||
Si score es None (ninguna dimensión disponible), devuelve NO_DATA.
|
If score is None (no dimension available), returns NO_DATA.
|
||||||
"""
|
"""
|
||||||
if score is None:
|
if score is None:
|
||||||
return {
|
return {
|
||||||
"label": "NO_DATA",
|
"label": "NO_DATA",
|
||||||
"emoji": "❓",
|
"emoji": "❓",
|
||||||
"description": (
|
"description": (
|
||||||
"No se ha podido calcular el Agentic Readiness Score porque "
|
"Could not calculate the Agentic Readiness Score because "
|
||||||
"ninguna de las dimensiones tenía datos suficientes."
|
"none of the dimensions had sufficient data."
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -527,22 +527,22 @@ def classify_agentic_score(score: Optional[float]) -> Dict[str, Any]:
|
|||||||
label = "COPILOT"
|
label = "COPILOT"
|
||||||
emoji = "🤖"
|
emoji = "🤖"
|
||||||
description = (
|
description = (
|
||||||
"Listo para Copilot. Procesos con predictibilidad y simplicidad "
|
"Ready for Copilot. Processes with sufficient predictability and simplicity "
|
||||||
"suficientes para asistencia IA (sugerencias en tiempo real, autocompletado)."
|
"for AI assistance (real-time suggestions, autocomplete)."
|
||||||
)
|
)
|
||||||
elif score >= 4.0:
|
elif score >= 4.0:
|
||||||
label = "OPTIMIZE"
|
label = "OPTIMIZE"
|
||||||
emoji = "🔧"
|
emoji = "🔧"
|
||||||
description = (
|
description = (
|
||||||
"Optimizar primero. Estandarizar procesos y reducir variabilidad "
|
"Optimize first. Standardize processes and reduce variability "
|
||||||
"antes de implementar asistencia IA."
|
"before implementing AI assistance."
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
label = "HUMAN"
|
label = "HUMAN"
|
||||||
emoji = "👤"
|
emoji = "👤"
|
||||||
description = (
|
description = (
|
||||||
"Requiere gestión humana. Procesos complejos o variables que "
|
"Requires human management. Complex or variable processes that "
|
||||||
"necesitan intervención humana antes de considerar automatización."
|
"need human intervention before considering automation."
|
||||||
)
|
)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
@@ -604,22 +604,22 @@ class AgenticScorer:
|
|||||||
|
|
||||||
def compute_from_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
def compute_from_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Calcula el Agentic Readiness Score a partir de un dict de datos.
|
Calculates the Agentic Readiness Score from a data dict.
|
||||||
|
|
||||||
Tolerante a datos faltantes: renormaliza pesos usando solo
|
Tolerant to missing data: renormalizes weights using only
|
||||||
dimensiones con `computed = True`.
|
dimensions with `computed = True`.
|
||||||
|
|
||||||
Compatibilidad con pipeline:
|
Pipeline compatibility:
|
||||||
- Soporta tanto el formato antiguo:
|
- Supports both the old format:
|
||||||
"volume_by_skill": [10, 20, 30]
|
"volume_by_skill": [10, 20, 30]
|
||||||
- como el nuevo:
|
- and the new:
|
||||||
"volume_by_skill": {"labels": [...], "values": [10, 20, 30]}
|
"volume_by_skill": {"labels": [...], "values": [10, 20, 30]}
|
||||||
"""
|
"""
|
||||||
volumetry = data.get("volumetry", {})
|
volumetry = data.get("volumetry", {})
|
||||||
op = data.get("operational_performance", {})
|
op = data.get("operational_performance", {})
|
||||||
econ = data.get("economy_costs", {})
|
econ = data.get("economy_costs", {})
|
||||||
|
|
||||||
# Normalizamos aquí los posibles formatos para contentar al type checker
|
# Normalize here the possible formats for the type checker
|
||||||
volume_by_skill = _normalize_numeric_sequence(
|
volume_by_skill = _normalize_numeric_sequence(
|
||||||
volumetry.get("volume_by_skill")
|
volumetry.get("volume_by_skill")
|
||||||
)
|
)
|
||||||
@@ -650,7 +650,7 @@ class AgenticScorer:
|
|||||||
"roi": roi,
|
"roi": roi,
|
||||||
}
|
}
|
||||||
|
|
||||||
# --- Renormalización de pesos sólo con dimensiones disponibles ---
|
# --- Weight renormalization only with available dimensions ---
|
||||||
effective_weights: Dict[str, float] = {}
|
effective_weights: Dict[str, float] = {}
|
||||||
for name, base_w in self.base_weights.items():
|
for name, base_w in self.base_weights.items():
|
||||||
dim = sub_scores.get(name, {})
|
dim = sub_scores.get(name, {})
|
||||||
@@ -665,7 +665,7 @@ class AgenticScorer:
|
|||||||
else:
|
else:
|
||||||
normalized_weights = {}
|
normalized_weights = {}
|
||||||
|
|
||||||
# --- Score final ---
|
# --- Final score ---
|
||||||
if not normalized_weights:
|
if not normalized_weights:
|
||||||
final_score: Optional[float] = None
|
final_score: Optional[float] = None
|
||||||
else:
|
else:
|
||||||
@@ -692,8 +692,8 @@ class AgenticScorer:
|
|||||||
"metadata": {
|
"metadata": {
|
||||||
"source_module": "agentic_score.py",
|
"source_module": "agentic_score.py",
|
||||||
"notes": (
|
"notes": (
|
||||||
"Modelo simplificado basado en KPIs agregados. "
|
"Simplified model based on aggregated KPIs. "
|
||||||
"Renormaliza los pesos cuando faltan dimensiones."
|
"Renormalizes weights when dimensions are missing."
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -710,11 +710,11 @@ class AgenticScorer:
|
|||||||
|
|
||||||
def run_on_folder(self, folder_path: Union[str, Path]) -> Dict[str, Any]:
|
def run_on_folder(self, folder_path: Union[str, Path]) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Punto de entrada típico para el pipeline:
|
Typical pipeline entry point:
|
||||||
- Lee <folder>/results.json
|
- Reads <folder>/results.json
|
||||||
- Calcula Agentic Readiness
|
- Calculates Agentic Readiness
|
||||||
- Escribe <folder>/agentic_readiness.json
|
- Writes <folder>/agentic_readiness.json
|
||||||
- Devuelve el dict con el resultado
|
- Returns the dict with the result
|
||||||
"""
|
"""
|
||||||
data = self.load_results(folder_path)
|
data = self.load_results(folder_path)
|
||||||
result = self.compute_from_data(data)
|
result = self.compute_from_data(data)
|
||||||
|
|||||||
@@ -23,17 +23,16 @@ REQUIRED_COLUMNS_ECON: List[str] = [
|
|||||||
@dataclass
|
@dataclass
|
||||||
class EconomyConfig:
|
class EconomyConfig:
|
||||||
"""
|
"""
|
||||||
Parámetros manuales para la dimensión de Economía y Costes.
|
Manual parameters for the Economy and Cost dimension.
|
||||||
|
|
||||||
- labor_cost_per_hour: coste total/hora de un agente (fully loaded).
|
- labor_cost_per_hour: total cost/hour of an agent (fully loaded).
|
||||||
- overhead_rate: % overhead variable (ej. 0.1 = 10% sobre labor).
|
- overhead_rate: % variable overhead (e.g. 0.1 = 10% over labor).
|
||||||
- tech_costs_annual: coste anual de tecnología (licencias, infra, ...).
|
- tech_costs_annual: annual technology cost (licenses, infrastructure, ...).
|
||||||
- automation_cpi: coste por interacción automatizada (ej. 0.15€).
|
- automation_cpi: cost per automated interaction (e.g. 0.15€).
|
||||||
- automation_volume_share: % del volumen automatizable (0-1).
|
- automation_volume_share: % of automatable volume (0-1).
|
||||||
- automation_success_rate: % éxito de la automatización (0-1).
|
- automation_success_rate: % automation success (0-1).
|
||||||
|
|
||||||
- customer_segments: mapping opcional skill -> segmento ("high"/"medium"/"low")
|
- customer_segments: optional mapping skill -> segment ("high"/"medium"/"low") for future ROI insights by segment.
|
||||||
para futuros insights de ROI por segmento.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
labor_cost_per_hour: float
|
labor_cost_per_hour: float
|
||||||
@@ -48,20 +47,20 @@ class EconomyConfig:
|
|||||||
@dataclass
|
@dataclass
|
||||||
class EconomyCostMetrics:
|
class EconomyCostMetrics:
|
||||||
"""
|
"""
|
||||||
DIMENSIÓN 4: ECONOMÍA y COSTES
|
DIMENSION 4: ECONOMY and COSTS
|
||||||
|
|
||||||
Propósito:
|
Purpose:
|
||||||
- Cuantificar el COSTE actual (CPI, coste anual).
|
- Quantify the current COST (CPI, annual cost).
|
||||||
- Estimar el impacto de overhead y tecnología.
|
- Estimate the impact of overhead and technology.
|
||||||
- Calcular un primer estimado de "coste de ineficiencia" y ahorro potencial.
|
- Calculate an initial estimate of "inefficiency cost" and potential savings.
|
||||||
|
|
||||||
Requiere:
|
Requires:
|
||||||
- Columnas del dataset transaccional (ver REQUIRED_COLUMNS_ECON).
|
- Columns from the transactional dataset (see REQUIRED_COLUMNS_ECON).
|
||||||
|
|
||||||
Inputs opcionales vía EconomyConfig:
|
Optional inputs via EconomyConfig:
|
||||||
- labor_cost_per_hour (obligatorio para cualquier cálculo de €).
|
- labor_cost_per_hour (required for any € calculation).
|
||||||
- overhead_rate, tech_costs_annual, automation_*.
|
- overhead_rate, tech_costs_annual, automation_*.
|
||||||
- customer_segments (para insights de ROI por segmento).
|
- customer_segments (for ROI insights by segment).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
df: pd.DataFrame
|
df: pd.DataFrame
|
||||||
@@ -72,13 +71,13 @@ class EconomyCostMetrics:
|
|||||||
self._prepare_data()
|
self._prepare_data()
|
||||||
|
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
# Helpers internos
|
# Internal helpers
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
def _validate_columns(self) -> None:
|
def _validate_columns(self) -> None:
|
||||||
missing = [c for c in REQUIRED_COLUMNS_ECON if c not in self.df.columns]
|
missing = [c for c in REQUIRED_COLUMNS_ECON if c not in self.df.columns]
|
||||||
if missing:
|
if missing:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Faltan columnas obligatorias para EconomyCostMetrics: {missing}"
|
f"Missing required columns for EconomyCostMetrics: {missing}"
|
||||||
)
|
)
|
||||||
|
|
||||||
def _prepare_data(self) -> None:
|
def _prepare_data(self) -> None:
|
||||||
@@ -97,15 +96,15 @@ class EconomyCostMetrics:
|
|||||||
df["duration_talk"].fillna(0)
|
df["duration_talk"].fillna(0)
|
||||||
+ df["hold_time"].fillna(0)
|
+ df["hold_time"].fillna(0)
|
||||||
+ df["wrap_up_time"].fillna(0)
|
+ df["wrap_up_time"].fillna(0)
|
||||||
) # segundos
|
) # seconds
|
||||||
|
|
||||||
# Filtrar por record_status para cálculos de AHT/CPI
|
# Filter by record_status for AHT/CPI calculations
|
||||||
# Solo incluir registros VALID (excluir NOISE, ZOMBIE, ABANDON)
|
# Only include VALID records (exclude NOISE, ZOMBIE, ABANDON)
|
||||||
if "record_status" in df.columns:
|
if "record_status" in df.columns:
|
||||||
df["record_status"] = df["record_status"].astype(str).str.strip().str.upper()
|
df["record_status"] = df["record_status"].astype(str).str.strip().str.upper()
|
||||||
df["_is_valid_for_cost"] = df["record_status"] == "VALID"
|
df["_is_valid_for_cost"] = df["record_status"] == "VALID"
|
||||||
else:
|
else:
|
||||||
# Legacy data sin record_status: incluir todo
|
# Legacy data without record_status: include all
|
||||||
df["_is_valid_for_cost"] = True
|
df["_is_valid_for_cost"] = True
|
||||||
|
|
||||||
self.df = df
|
self.df = df
|
||||||
@@ -118,11 +117,11 @@ class EconomyCostMetrics:
|
|||||||
return self.config is not None and self.config.labor_cost_per_hour is not None
|
return self.config is not None and self.config.labor_cost_per_hour is not None
|
||||||
|
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
# KPI 1: CPI por canal/skill
|
# KPI 1: CPI by channel/skill
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
def cpi_by_skill_channel(self) -> pd.DataFrame:
|
def cpi_by_skill_channel(self) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
CPI (Coste Por Interacción) por skill/canal.
|
CPI (Cost Per Interaction) by skill/channel.
|
||||||
|
|
||||||
CPI = (Labor_cost_per_interaction + Overhead_variable) / EFFECTIVE_PRODUCTIVITY
|
CPI = (Labor_cost_per_interaction + Overhead_variable) / EFFECTIVE_PRODUCTIVITY
|
||||||
|
|
||||||
@@ -130,19 +129,17 @@ class EconomyCostMetrics:
|
|||||||
- Overhead_variable = overhead_rate * Labor_cost_per_interaction
|
- Overhead_variable = overhead_rate * Labor_cost_per_interaction
|
||||||
- EFFECTIVE_PRODUCTIVITY = 0.70 (70% - accounts for non-productive time)
|
- EFFECTIVE_PRODUCTIVITY = 0.70 (70% - accounts for non-productive time)
|
||||||
|
|
||||||
Excluye registros abandonados del cálculo de costes para consistencia
|
Excludes abandoned records from cost calculation for consistency with the frontend path (fresh CSV).
|
||||||
con el path del frontend (fresh CSV).
|
|
||||||
|
|
||||||
Si no hay config de costes -> devuelve DataFrame vacío.
|
If there is no cost config -> returns empty DataFrame.
|
||||||
|
|
||||||
Incluye queue_skill y channel como columnas (no solo índice) para que
|
Includes queue_skill and channel as columns (not just index) so that the frontend can lookup by skill name.
|
||||||
el frontend pueda hacer lookup por nombre de skill.
|
|
||||||
"""
|
"""
|
||||||
if not self._has_cost_config():
|
if not self._has_cost_config():
|
||||||
return pd.DataFrame()
|
return pd.DataFrame()
|
||||||
|
|
||||||
cfg = self.config
|
cfg = self.config
|
||||||
assert cfg is not None # para el type checker
|
assert cfg is not None # for the type checker
|
||||||
|
|
||||||
df = self.df.copy()
|
df = self.df.copy()
|
||||||
if df.empty:
|
if df.empty:
|
||||||
@@ -154,15 +151,15 @@ class EconomyCostMetrics:
|
|||||||
else:
|
else:
|
||||||
df_cost = df
|
df_cost = df
|
||||||
|
|
||||||
# Filtrar por record_status: solo VALID para cálculo de AHT
|
# Filter by record_status: only VALID for AHT calculation
|
||||||
# Excluye NOISE, ZOMBIE, ABANDON
|
# Excludes NOISE, ZOMBIE, ABANDON
|
||||||
if "_is_valid_for_cost" in df_cost.columns:
|
if "_is_valid_for_cost" in df_cost.columns:
|
||||||
df_cost = df_cost[df_cost["_is_valid_for_cost"] == True]
|
df_cost = df_cost[df_cost["_is_valid_for_cost"] == True]
|
||||||
|
|
||||||
if df_cost.empty:
|
if df_cost.empty:
|
||||||
return pd.DataFrame()
|
return pd.DataFrame()
|
||||||
|
|
||||||
# AHT por skill/canal (en segundos) - solo registros VALID
|
# AHT by skill/channel (in seconds) - only VALID records
|
||||||
grouped = df_cost.groupby(["queue_skill", "channel"])["handle_time"].mean()
|
grouped = df_cost.groupby(["queue_skill", "channel"])["handle_time"].mean()
|
||||||
|
|
||||||
if grouped.empty:
|
if grouped.empty:
|
||||||
@@ -193,17 +190,16 @@ class EconomyCostMetrics:
|
|||||||
return out.sort_index().reset_index()
|
return out.sort_index().reset_index()
|
||||||
|
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
# KPI 2: coste anual por skill/canal
|
# KPI 2: annual cost by skill/channel
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
def annual_cost_by_skill_channel(self) -> pd.DataFrame:
|
def annual_cost_by_skill_channel(self) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
Coste anual por skill/canal.
|
Annual cost by skill/channel.
|
||||||
|
|
||||||
cost_annual = CPI * volumen (cantidad de interacciones de la muestra).
|
cost_annual = CPI * volume (number of interactions in the sample).
|
||||||
|
|
||||||
Nota: por simplicidad asumimos que el dataset refleja un periodo anual.
|
Note: for simplicity we assume the dataset reflects an annual period.
|
||||||
Si en el futuro quieres anualizar (ej. dataset = 1 mes) se puede añadir
|
If in the future you want to annualize (e.g. dataset = 1 month) you can add a scaling factor in EconomyConfig.
|
||||||
un factor de escalado en EconomyConfig.
|
|
||||||
"""
|
"""
|
||||||
cpi_table = self.cpi_by_skill_channel()
|
cpi_table = self.cpi_by_skill_channel()
|
||||||
if cpi_table.empty:
|
if cpi_table.empty:
|
||||||
@@ -224,18 +220,18 @@ class EconomyCostMetrics:
|
|||||||
return joined
|
return joined
|
||||||
|
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
# KPI 3: desglose de costes (labor / tech / overhead)
|
# KPI 3: cost breakdown (labor / tech / overhead)
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
def cost_breakdown(self) -> Dict[str, float]:
|
def cost_breakdown(self) -> Dict[str, float]:
|
||||||
"""
|
"""
|
||||||
Desglose % de costes: labor, overhead, tech.
|
Cost breakdown %: labor, overhead, tech.
|
||||||
|
|
||||||
labor_total = sum(labor_cost_per_interaction)
|
labor_total = sum(labor_cost_per_interaction)
|
||||||
overhead_total = labor_total * overhead_rate
|
overhead_total = labor_total * overhead_rate
|
||||||
tech_total = tech_costs_annual (si se ha proporcionado)
|
tech_total = tech_costs_annual (if provided)
|
||||||
|
|
||||||
Devuelve porcentajes sobre el total.
|
Returns percentages of the total.
|
||||||
Si falta configuración de coste -> devuelve {}.
|
If cost configuration is missing -> returns {}.
|
||||||
"""
|
"""
|
||||||
if not self._has_cost_config():
|
if not self._has_cost_config():
|
||||||
return {}
|
return {}
|
||||||
@@ -258,7 +254,7 @@ class EconomyCostMetrics:
|
|||||||
cpi_indexed = cpi_table.set_index(["queue_skill", "channel"])
|
cpi_indexed = cpi_table.set_index(["queue_skill", "channel"])
|
||||||
joined = cpi_indexed.join(volume, how="left").fillna({"volume": 0})
|
joined = cpi_indexed.join(volume, how="left").fillna({"volume": 0})
|
||||||
|
|
||||||
# Costes anuales de labor y overhead
|
# Annual labor and overhead costs
|
||||||
annual_labor = (joined["labor_cost"] * joined["volume"]).sum()
|
annual_labor = (joined["labor_cost"] * joined["volume"]).sum()
|
||||||
annual_overhead = (joined["overhead_cost"] * joined["volume"]).sum()
|
annual_overhead = (joined["overhead_cost"] * joined["volume"]).sum()
|
||||||
annual_tech = cfg.tech_costs_annual
|
annual_tech = cfg.tech_costs_annual
|
||||||
@@ -278,21 +274,21 @@ class EconomyCostMetrics:
|
|||||||
}
|
}
|
||||||
|
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
# KPI 4: coste de ineficiencia (€ por variabilidad/escalación)
|
# KPI 4: inefficiency cost (€ by variability/escalation)
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
def inefficiency_cost_by_skill_channel(self) -> pd.DataFrame:
|
def inefficiency_cost_by_skill_channel(self) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
Estimación muy simplificada de coste de ineficiencia:
|
Very simplified estimate of inefficiency cost:
|
||||||
|
|
||||||
Para cada skill/canal:
|
For each skill/channel:
|
||||||
|
|
||||||
- AHT_p50, AHT_p90 (segundos).
|
- AHT_p50, AHT_p90 (seconds).
|
||||||
- Delta = max(0, AHT_p90 - AHT_p50).
|
- Delta = max(0, AHT_p90 - AHT_p50).
|
||||||
- Se asume que ~40% de las interacciones están por encima de la mediana.
|
- Assumes that ~40% of interactions are above the median.
|
||||||
- Ineff_seconds = Delta * volume * 0.4
|
- Ineff_seconds = Delta * volume * 0.4
|
||||||
- Ineff_cost = LaborCPI_per_second * Ineff_seconds
|
- Ineff_cost = LaborCPI_per_second * Ineff_seconds
|
||||||
|
|
||||||
NOTA: Es un modelo aproximado para cuantificar "orden de magnitud".
|
NOTE: This is an approximate model to quantify "order of magnitude".
|
||||||
"""
|
"""
|
||||||
if not self._has_cost_config():
|
if not self._has_cost_config():
|
||||||
return pd.DataFrame()
|
return pd.DataFrame()
|
||||||
@@ -302,8 +298,8 @@ class EconomyCostMetrics:
|
|||||||
|
|
||||||
df = self.df.copy()
|
df = self.df.copy()
|
||||||
|
|
||||||
# Filtrar por record_status: solo VALID para cálculo de AHT
|
# Filter by record_status: only VALID for AHT calculation
|
||||||
# Excluye NOISE, ZOMBIE, ABANDON
|
# Excludes NOISE, ZOMBIE, ABANDON
|
||||||
if "_is_valid_for_cost" in df.columns:
|
if "_is_valid_for_cost" in df.columns:
|
||||||
df = df[df["_is_valid_for_cost"] == True]
|
df = df[df["_is_valid_for_cost"] == True]
|
||||||
|
|
||||||
@@ -318,7 +314,7 @@ class EconomyCostMetrics:
|
|||||||
if stats.empty:
|
if stats.empty:
|
||||||
return pd.DataFrame()
|
return pd.DataFrame()
|
||||||
|
|
||||||
# CPI para obtener coste/segundo de labor
|
# CPI to get cost/second of labor
|
||||||
# cpi_by_skill_channel now returns with reset_index, so we need to set index for join
|
# cpi_by_skill_channel now returns with reset_index, so we need to set index for join
|
||||||
cpi_table_raw = self.cpi_by_skill_channel()
|
cpi_table_raw = self.cpi_by_skill_channel()
|
||||||
if cpi_table_raw.empty:
|
if cpi_table_raw.empty:
|
||||||
@@ -331,11 +327,11 @@ class EconomyCostMetrics:
|
|||||||
merged = merged.fillna(0.0)
|
merged = merged.fillna(0.0)
|
||||||
|
|
||||||
delta = (merged["aht_p90"] - merged["aht_p50"]).clip(lower=0.0)
|
delta = (merged["aht_p90"] - merged["aht_p50"]).clip(lower=0.0)
|
||||||
affected_fraction = 0.4 # aproximación
|
affected_fraction = 0.4 # approximation
|
||||||
ineff_seconds = delta * merged["volume"] * affected_fraction
|
ineff_seconds = delta * merged["volume"] * affected_fraction
|
||||||
|
|
||||||
# labor_cost = coste por interacción con AHT medio;
|
# labor_cost = cost per interaction with average AHT;
|
||||||
# aproximamos coste/segundo como labor_cost / AHT_medio
|
# approximate cost/second as labor_cost / average_AHT
|
||||||
aht_mean = grouped["handle_time"].mean()
|
aht_mean = grouped["handle_time"].mean()
|
||||||
merged["aht_mean"] = aht_mean
|
merged["aht_mean"] = aht_mean
|
||||||
|
|
||||||
@@ -351,21 +347,21 @@ class EconomyCostMetrics:
|
|||||||
return merged[["aht_p50", "aht_p90", "volume", "ineff_seconds", "ineff_cost"]].reset_index()
|
return merged[["aht_p50", "aht_p90", "volume", "ineff_seconds", "ineff_cost"]].reset_index()
|
||||||
|
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
# KPI 5: ahorro potencial anual por automatización
|
# KPI 5: potential annual savings from automation
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
def potential_savings(self) -> Dict[str, Any]:
|
def potential_savings(self) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Ahorro potencial anual basado en:
|
Potential annual savings based on:
|
||||||
|
|
||||||
Ahorro = (CPI_humano - CPI_automatizado) * Volumen_automatizable * Tasa_éxito
|
Savings = (Human_CPI - Automated_CPI) * Automatable_volume * Success_rate
|
||||||
|
|
||||||
Donde:
|
Where:
|
||||||
- CPI_humano = media ponderada de cpi_total.
|
- Human_CPI = weighted average of cpi_total.
|
||||||
- CPI_automatizado = config.automation_cpi
|
- Automated_CPI = config.automation_cpi
|
||||||
- Volumen_automatizable = volume_total * automation_volume_share
|
- Automatable_volume = volume_total * automation_volume_share
|
||||||
- Tasa_éxito = automation_success_rate
|
- Success_rate = automation_success_rate
|
||||||
|
|
||||||
Si faltan parámetros en config -> devuelve {}.
|
If config parameters are missing -> returns {}.
|
||||||
"""
|
"""
|
||||||
if not self._has_cost_config():
|
if not self._has_cost_config():
|
||||||
return {}
|
return {}
|
||||||
@@ -384,7 +380,7 @@ class EconomyCostMetrics:
|
|||||||
if total_volume <= 0:
|
if total_volume <= 0:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
# CPI humano medio ponderado
|
# Weighted average human CPI
|
||||||
weighted_cpi = (
|
weighted_cpi = (
|
||||||
(cpi_table["cpi_total"] * cpi_table["volume"]).sum() / total_volume
|
(cpi_table["cpi_total"] * cpi_table["volume"]).sum() / total_volume
|
||||||
)
|
)
|
||||||
@@ -409,12 +405,12 @@ class EconomyCostMetrics:
|
|||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
def plot_cost_waterfall(self) -> Axes:
|
def plot_cost_waterfall(self) -> Axes:
|
||||||
"""
|
"""
|
||||||
Waterfall de costes anuales (labor + tech + overhead).
|
Waterfall of annual costs (labor + tech + overhead).
|
||||||
"""
|
"""
|
||||||
breakdown = self.cost_breakdown()
|
breakdown = self.cost_breakdown()
|
||||||
if not breakdown:
|
if not breakdown:
|
||||||
fig, ax = plt.subplots()
|
fig, ax = plt.subplots()
|
||||||
ax.text(0.5, 0.5, "Sin configuración de costes", ha="center", va="center")
|
ax.text(0.5, 0.5, "No cost configuration", ha="center", va="center")
|
||||||
ax.set_axis_off()
|
ax.set_axis_off()
|
||||||
return ax
|
return ax
|
||||||
|
|
||||||
@@ -436,14 +432,14 @@ class EconomyCostMetrics:
|
|||||||
bottoms.append(running)
|
bottoms.append(running)
|
||||||
running += v
|
running += v
|
||||||
|
|
||||||
# barras estilo waterfall
|
# waterfall style bars
|
||||||
x = np.arange(len(labels))
|
x = np.arange(len(labels))
|
||||||
ax.bar(x, values)
|
ax.bar(x, values)
|
||||||
|
|
||||||
ax.set_xticks(x)
|
ax.set_xticks(x)
|
||||||
ax.set_xticklabels(labels)
|
ax.set_xticklabels(labels)
|
||||||
ax.set_ylabel("€ anuales")
|
ax.set_ylabel("€ annual")
|
||||||
ax.set_title("Desglose anual de costes")
|
ax.set_title("Annual cost breakdown")
|
||||||
|
|
||||||
for idx, v in enumerate(values):
|
for idx, v in enumerate(values):
|
||||||
ax.text(idx, v, f"{v:,.0f}", ha="center", va="bottom")
|
ax.text(idx, v, f"{v:,.0f}", ha="center", va="bottom")
|
||||||
@@ -454,12 +450,12 @@ class EconomyCostMetrics:
|
|||||||
|
|
||||||
def plot_cpi_by_channel(self) -> Axes:
|
def plot_cpi_by_channel(self) -> Axes:
|
||||||
"""
|
"""
|
||||||
Gráfico de barras de CPI medio por canal.
|
Bar chart of average CPI by channel.
|
||||||
"""
|
"""
|
||||||
cpi_table = self.cpi_by_skill_channel()
|
cpi_table = self.cpi_by_skill_channel()
|
||||||
if cpi_table.empty:
|
if cpi_table.empty:
|
||||||
fig, ax = plt.subplots()
|
fig, ax = plt.subplots()
|
||||||
ax.text(0.5, 0.5, "Sin configuración de costes", ha="center", va="center")
|
ax.text(0.5, 0.5, "No cost configuration", ha="center", va="center")
|
||||||
ax.set_axis_off()
|
ax.set_axis_off()
|
||||||
return ax
|
return ax
|
||||||
|
|
||||||
@@ -474,7 +470,7 @@ class EconomyCostMetrics:
|
|||||||
cpi_indexed = cpi_table.set_index(["queue_skill", "channel"])
|
cpi_indexed = cpi_table.set_index(["queue_skill", "channel"])
|
||||||
joined = cpi_indexed.join(volume, how="left").fillna({"volume": 0})
|
joined = cpi_indexed.join(volume, how="left").fillna({"volume": 0})
|
||||||
|
|
||||||
# CPI medio ponderado por canal
|
# Weighted average CPI by channel
|
||||||
per_channel = (
|
per_channel = (
|
||||||
joined.reset_index()
|
joined.reset_index()
|
||||||
.groupby("channel")
|
.groupby("channel")
|
||||||
@@ -486,9 +482,9 @@ class EconomyCostMetrics:
|
|||||||
fig, ax = plt.subplots(figsize=(6, 4))
|
fig, ax = plt.subplots(figsize=(6, 4))
|
||||||
per_channel.plot(kind="bar", ax=ax)
|
per_channel.plot(kind="bar", ax=ax)
|
||||||
|
|
||||||
ax.set_xlabel("Canal")
|
ax.set_xlabel("Channel")
|
||||||
ax.set_ylabel("CPI medio (€)")
|
ax.set_ylabel("Average CPI (€)")
|
||||||
ax.set_title("Coste por interacción (CPI) por canal")
|
ax.set_title("Cost per interaction (CPI) by channel")
|
||||||
ax.grid(axis="y", alpha=0.3)
|
ax.grid(axis="y", alpha=0.3)
|
||||||
|
|
||||||
return ax
|
return ax
|
||||||
|
|||||||
@@ -25,32 +25,31 @@ REQUIRED_COLUMNS_OP: List[str] = [
|
|||||||
@dataclass
|
@dataclass
|
||||||
class OperationalPerformanceMetrics:
|
class OperationalPerformanceMetrics:
|
||||||
"""
|
"""
|
||||||
Dimensión: RENDIMIENTO OPERACIONAL Y DE SERVICIO
|
Dimension: OPERATIONAL PERFORMANCE AND SERVICE
|
||||||
|
|
||||||
Propósito: medir el balance entre rapidez (eficiencia) y calidad de resolución,
|
Purpose: measure the balance between speed (efficiency) and resolution quality, plus service variability.
|
||||||
más la variabilidad del servicio.
|
|
||||||
|
|
||||||
Requiere como mínimo:
|
Requires at minimum:
|
||||||
- interaction_id
|
- interaction_id
|
||||||
- datetime_start
|
- datetime_start
|
||||||
- queue_skill
|
- queue_skill
|
||||||
- channel
|
- channel
|
||||||
- duration_talk (segundos)
|
- duration_talk (seconds)
|
||||||
- hold_time (segundos)
|
- hold_time (seconds)
|
||||||
- wrap_up_time (segundos)
|
- wrap_up_time (seconds)
|
||||||
- agent_id
|
- agent_id
|
||||||
- transfer_flag (bool/int)
|
- transfer_flag (bool/int)
|
||||||
|
|
||||||
Columnas opcionales:
|
Optional columns:
|
||||||
- is_resolved (bool/int) -> para FCR
|
- is_resolved (bool/int) -> for FCR
|
||||||
- abandoned_flag (bool/int) -> para tasa de abandono
|
- abandoned_flag (bool/int) -> for abandonment rate
|
||||||
- customer_id / caller_id -> para reincidencia y repetición de canal
|
- customer_id / caller_id -> for recurrence and channel repetition
|
||||||
- logged_time (segundos) -> para occupancy_rate
|
- logged_time (seconds) -> for occupancy_rate
|
||||||
"""
|
"""
|
||||||
|
|
||||||
df: pd.DataFrame
|
df: pd.DataFrame
|
||||||
|
|
||||||
# Benchmarks / parámetros de normalización (puedes ajustarlos)
|
# Benchmarks / normalization parameters (you can adjust them)
|
||||||
AHT_GOOD: float = 300.0 # 5 min
|
AHT_GOOD: float = 300.0 # 5 min
|
||||||
AHT_BAD: float = 900.0 # 15 min
|
AHT_BAD: float = 900.0 # 15 min
|
||||||
VAR_RATIO_GOOD: float = 1.2 # P90/P50 ~1.2 muy estable
|
VAR_RATIO_GOOD: float = 1.2 # P90/P50 ~1.2 muy estable
|
||||||
@@ -61,19 +60,19 @@ class OperationalPerformanceMetrics:
|
|||||||
self._prepare_data()
|
self._prepare_data()
|
||||||
|
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
# Helpers internos
|
# Internal helpers
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
def _validate_columns(self) -> None:
|
def _validate_columns(self) -> None:
|
||||||
missing = [c for c in REQUIRED_COLUMNS_OP if c not in self.df.columns]
|
missing = [c for c in REQUIRED_COLUMNS_OP if c not in self.df.columns]
|
||||||
if missing:
|
if missing:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Faltan columnas obligatorias para OperationalPerformanceMetrics: {missing}"
|
f"Missing required columns for OperationalPerformanceMetrics: {missing}"
|
||||||
)
|
)
|
||||||
|
|
||||||
def _prepare_data(self) -> None:
|
def _prepare_data(self) -> None:
|
||||||
df = self.df.copy()
|
df = self.df.copy()
|
||||||
|
|
||||||
# Tipos
|
# Types
|
||||||
df["datetime_start"] = pd.to_datetime(df["datetime_start"], errors="coerce")
|
df["datetime_start"] = pd.to_datetime(df["datetime_start"], errors="coerce")
|
||||||
|
|
||||||
for col in ["duration_talk", "hold_time", "wrap_up_time"]:
|
for col in ["duration_talk", "hold_time", "wrap_up_time"]:
|
||||||
@@ -86,13 +85,13 @@ class OperationalPerformanceMetrics:
|
|||||||
+ df["wrap_up_time"].fillna(0)
|
+ df["wrap_up_time"].fillna(0)
|
||||||
)
|
)
|
||||||
|
|
||||||
# v3.0: Filtrar NOISE y ZOMBIE para cálculos de variabilidad
|
# v3.0: Filter NOISE and ZOMBIE for variability calculations
|
||||||
# record_status: 'VALID', 'NOISE', 'ZOMBIE', 'ABANDON'
|
# record_status: 'VALID', 'NOISE', 'ZOMBIE', 'ABANDON'
|
||||||
# Para AHT/CV solo usamos 'VALID' (excluye noise, zombie, abandon)
|
# For AHT/CV we only use 'VALID' (excludes noise, zombie, abandon)
|
||||||
if "record_status" in df.columns:
|
if "record_status" in df.columns:
|
||||||
df["record_status"] = df["record_status"].astype(str).str.strip().str.upper()
|
df["record_status"] = df["record_status"].astype(str).str.strip().str.upper()
|
||||||
# Crear máscara para registros válidos: SOLO "VALID"
|
# Create mask for valid records: ONLY "VALID"
|
||||||
# Excluye explícitamente NOISE, ZOMBIE, ABANDON y cualquier otro valor
|
# Explicitly excludes NOISE, ZOMBIE, ABANDON and any other value
|
||||||
df["_is_valid_for_cv"] = df["record_status"] == "VALID"
|
df["_is_valid_for_cv"] = df["record_status"] == "VALID"
|
||||||
|
|
||||||
# Log record_status breakdown for debugging
|
# Log record_status breakdown for debugging
|
||||||
@@ -104,21 +103,21 @@ class OperationalPerformanceMetrics:
|
|||||||
print(f" - {status}: {count}")
|
print(f" - {status}: {count}")
|
||||||
print(f" VALID rows for AHT calculation: {valid_count}")
|
print(f" VALID rows for AHT calculation: {valid_count}")
|
||||||
else:
|
else:
|
||||||
# Legacy data sin record_status: incluir todo
|
# Legacy data without record_status: include all
|
||||||
df["_is_valid_for_cv"] = True
|
df["_is_valid_for_cv"] = True
|
||||||
print(f"[OperationalPerformance] No record_status column - using all {len(df)} rows")
|
print(f"[OperationalPerformance] No record_status column - using all {len(df)} rows")
|
||||||
|
|
||||||
# Normalización básica
|
# Basic normalization
|
||||||
df["queue_skill"] = df["queue_skill"].astype(str).str.strip()
|
df["queue_skill"] = df["queue_skill"].astype(str).str.strip()
|
||||||
df["channel"] = df["channel"].astype(str).str.strip()
|
df["channel"] = df["channel"].astype(str).str.strip()
|
||||||
df["agent_id"] = df["agent_id"].astype(str).str.strip()
|
df["agent_id"] = df["agent_id"].astype(str).str.strip()
|
||||||
|
|
||||||
# Flags opcionales convertidos a bool cuando existan
|
# Optional flags converted to bool when they exist
|
||||||
for flag_col in ["is_resolved", "abandoned_flag", "transfer_flag"]:
|
for flag_col in ["is_resolved", "abandoned_flag", "transfer_flag"]:
|
||||||
if flag_col in df.columns:
|
if flag_col in df.columns:
|
||||||
df[flag_col] = df[flag_col].astype(int).astype(bool)
|
df[flag_col] = df[flag_col].astype(int).astype(bool)
|
||||||
|
|
||||||
# customer_id: usamos customer_id si existe, si no caller_id
|
# customer_id: we use customer_id if it exists, otherwise caller_id
|
||||||
if "customer_id" in df.columns:
|
if "customer_id" in df.columns:
|
||||||
df["customer_id"] = df["customer_id"].astype(str)
|
df["customer_id"] = df["customer_id"].astype(str)
|
||||||
elif "caller_id" in df.columns:
|
elif "caller_id" in df.columns:
|
||||||
@@ -126,8 +125,8 @@ class OperationalPerformanceMetrics:
|
|||||||
else:
|
else:
|
||||||
df["customer_id"] = None
|
df["customer_id"] = None
|
||||||
|
|
||||||
# logged_time opcional
|
# logged_time optional
|
||||||
# Normalizamos logged_time: siempre será una serie float con NaN si no existe
|
# Normalize logged_time: will always be a float series with NaN if it does not exist
|
||||||
df["logged_time"] = pd.to_numeric(df.get("logged_time", np.nan), errors="coerce")
|
df["logged_time"] = pd.to_numeric(df.get("logged_time", np.nan), errors="coerce")
|
||||||
|
|
||||||
|
|
||||||
@@ -138,16 +137,16 @@ class OperationalPerformanceMetrics:
|
|||||||
return self.df.empty
|
return self.df.empty
|
||||||
|
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
# AHT y variabilidad
|
# AHT and variability
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
def aht_distribution(self) -> Dict[str, float]:
|
def aht_distribution(self) -> Dict[str, float]:
|
||||||
"""
|
"""
|
||||||
Devuelve P10, P50, P90 del AHT y el ratio P90/P50 como medida de variabilidad.
|
Returns P10, P50, P90 of AHT and the P90/P50 ratio as a measure of variability.
|
||||||
|
|
||||||
v3.0: Filtra NOISE y ZOMBIE para el cálculo de variabilidad.
|
v3.0: Filters NOISE and ZOMBIE for variability calculation.
|
||||||
Solo usa registros con record_status='valid' o sin status (legacy).
|
Only uses records with record_status='valid' or without status (legacy).
|
||||||
"""
|
"""
|
||||||
# Filtrar solo registros válidos para cálculo de variabilidad
|
# Filter only valid records for variability calculation
|
||||||
df_valid = self.df[self.df["_is_valid_for_cv"] == True]
|
df_valid = self.df[self.df["_is_valid_for_cv"] == True]
|
||||||
ht = df_valid["handle_time"].dropna().astype(float)
|
ht = df_valid["handle_time"].dropna().astype(float)
|
||||||
if ht.empty:
|
if ht.empty:
|
||||||
@@ -167,10 +166,9 @@ class OperationalPerformanceMetrics:
|
|||||||
|
|
||||||
def talk_hold_acw_p50_by_skill(self) -> pd.DataFrame:
|
def talk_hold_acw_p50_by_skill(self) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
P50 de talk_time, hold_time y wrap_up_time por skill.
|
P50 of talk_time, hold_time and wrap_up_time by skill.
|
||||||
|
|
||||||
Incluye queue_skill como columna (no solo índice) para que
|
Includes queue_skill as a column (not just index) so that the frontend can lookup by skill name.
|
||||||
el frontend pueda hacer lookup por nombre de skill.
|
|
||||||
"""
|
"""
|
||||||
df = self.df
|
df = self.df
|
||||||
|
|
||||||
@@ -192,24 +190,24 @@ class OperationalPerformanceMetrics:
|
|||||||
return result.round(2).sort_index().reset_index()
|
return result.round(2).sort_index().reset_index()
|
||||||
|
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
# FCR, escalación, abandono, reincidencia, repetición canal
|
# FCR, escalation, abandonment, recurrence, channel repetition
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
def fcr_rate(self) -> float:
|
def fcr_rate(self) -> float:
|
||||||
"""
|
"""
|
||||||
FCR (First Contact Resolution).
|
FCR (First Contact Resolution).
|
||||||
|
|
||||||
Prioridad 1: Usar fcr_real_flag del CSV si existe
|
Priority 1: Use fcr_real_flag from CSV if it exists
|
||||||
Prioridad 2: Calcular como 100 - escalation_rate
|
Priority 2: Calculate as 100 - escalation_rate
|
||||||
"""
|
"""
|
||||||
df = self.df
|
df = self.df
|
||||||
total = len(df)
|
total = len(df)
|
||||||
if total == 0:
|
if total == 0:
|
||||||
return float("nan")
|
return float("nan")
|
||||||
|
|
||||||
# Prioridad 1: Usar fcr_real_flag si existe
|
# Priority 1: Use fcr_real_flag if it exists
|
||||||
if "fcr_real_flag" in df.columns:
|
if "fcr_real_flag" in df.columns:
|
||||||
col = df["fcr_real_flag"]
|
col = df["fcr_real_flag"]
|
||||||
# Normalizar a booleano
|
# Normalize to boolean
|
||||||
if col.dtype == "O":
|
if col.dtype == "O":
|
||||||
fcr_mask = (
|
fcr_mask = (
|
||||||
col.astype(str)
|
col.astype(str)
|
||||||
@@ -224,7 +222,7 @@ class OperationalPerformanceMetrics:
|
|||||||
fcr = (fcr_count / total) * 100.0
|
fcr = (fcr_count / total) * 100.0
|
||||||
return float(max(0.0, min(100.0, round(fcr, 2))))
|
return float(max(0.0, min(100.0, round(fcr, 2))))
|
||||||
|
|
||||||
# Prioridad 2: Fallback a 100 - escalation_rate
|
# Priority 2: Fallback to 100 - escalation_rate
|
||||||
try:
|
try:
|
||||||
esc = self.escalation_rate()
|
esc = self.escalation_rate()
|
||||||
except Exception:
|
except Exception:
|
||||||
@@ -239,7 +237,7 @@ class OperationalPerformanceMetrics:
|
|||||||
|
|
||||||
def escalation_rate(self) -> float:
|
def escalation_rate(self) -> float:
|
||||||
"""
|
"""
|
||||||
% de interacciones que requieren escalación (transfer_flag == True).
|
% of interactions that require escalation (transfer_flag == True).
|
||||||
"""
|
"""
|
||||||
df = self.df
|
df = self.df
|
||||||
total = len(df)
|
total = len(df)
|
||||||
@@ -251,17 +249,17 @@ class OperationalPerformanceMetrics:
|
|||||||
|
|
||||||
def abandonment_rate(self) -> float:
|
def abandonment_rate(self) -> float:
|
||||||
"""
|
"""
|
||||||
% de interacciones abandonadas.
|
% of abandoned interactions.
|
||||||
|
|
||||||
Busca en orden: is_abandoned, abandoned_flag, abandoned
|
Searches in order: is_abandoned, abandoned_flag, abandoned
|
||||||
Si ninguna columna existe, devuelve NaN.
|
If no column exists, returns NaN.
|
||||||
"""
|
"""
|
||||||
df = self.df
|
df = self.df
|
||||||
total = len(df)
|
total = len(df)
|
||||||
if total == 0:
|
if total == 0:
|
||||||
return float("nan")
|
return float("nan")
|
||||||
|
|
||||||
# Buscar columna de abandono en orden de prioridad
|
# Search for abandonment column in priority order
|
||||||
abandon_col = None
|
abandon_col = None
|
||||||
for col_name in ["is_abandoned", "abandoned_flag", "abandoned"]:
|
for col_name in ["is_abandoned", "abandoned_flag", "abandoned"]:
|
||||||
if col_name in df.columns:
|
if col_name in df.columns:
|
||||||
@@ -273,7 +271,7 @@ class OperationalPerformanceMetrics:
|
|||||||
|
|
||||||
col = df[abandon_col]
|
col = df[abandon_col]
|
||||||
|
|
||||||
# Normalizar a booleano
|
# Normalize to boolean
|
||||||
if col.dtype == "O":
|
if col.dtype == "O":
|
||||||
abandon_mask = (
|
abandon_mask = (
|
||||||
col.astype(str)
|
col.astype(str)
|
||||||
@@ -289,10 +287,9 @@ class OperationalPerformanceMetrics:
|
|||||||
|
|
||||||
def high_hold_time_rate(self, threshold_seconds: float = 60.0) -> float:
|
def high_hold_time_rate(self, threshold_seconds: float = 60.0) -> float:
|
||||||
"""
|
"""
|
||||||
% de interacciones con hold_time > threshold (por defecto 60s).
|
% of interactions with hold_time > threshold (default 60s).
|
||||||
|
|
||||||
Proxy de complejidad: si el agente tuvo que poner en espera al cliente
|
Complexity proxy: if the agent had to put the customer on hold for more than 60 seconds, they probably had to consult/investigate.
|
||||||
más de 60 segundos, probablemente tuvo que consultar/investigar.
|
|
||||||
"""
|
"""
|
||||||
df = self.df
|
df = self.df
|
||||||
total = len(df)
|
total = len(df)
|
||||||
@@ -306,44 +303,43 @@ class OperationalPerformanceMetrics:
|
|||||||
|
|
||||||
def recurrence_rate_7d(self) -> float:
|
def recurrence_rate_7d(self) -> float:
|
||||||
"""
|
"""
|
||||||
% de clientes que vuelven a contactar en < 7 días para el MISMO skill.
|
% of customers who contact again in < 7 days for the SAME skill.
|
||||||
|
|
||||||
Se basa en customer_id (o caller_id si no hay customer_id) + queue_skill.
|
Based on customer_id (or caller_id if no customer_id) + queue_skill.
|
||||||
Calcula:
|
Calculates:
|
||||||
- Para cada combinación cliente + skill, ordena por datetime_start
|
- For each client + skill combination, sorts by datetime_start
|
||||||
- Si hay dos contactos consecutivos separados < 7 días (mismo cliente, mismo skill),
|
- If there are two consecutive contacts separated by < 7 days (same client, same skill), counts as "recurrent"
|
||||||
cuenta como "recurrente"
|
- Rate = number of recurrent clients / total number of clients
|
||||||
- Tasa = nº clientes recurrentes / nº total de clientes
|
|
||||||
|
|
||||||
NOTA: Solo cuenta como recurrencia si el cliente llama por el MISMO skill.
|
NOTE: Only counts as recurrence if the client calls for the SAME skill.
|
||||||
Un cliente que llama a "Ventas" y luego a "Soporte" NO es recurrente.
|
A client who calls "Sales" and then "Support" is NOT recurrent.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
df = self.df.dropna(subset=["datetime_start"]).copy()
|
df = self.df.dropna(subset=["datetime_start"]).copy()
|
||||||
|
|
||||||
# Normalizar identificador de cliente
|
# Normalize client identifier
|
||||||
if "customer_id" not in df.columns:
|
if "customer_id" not in df.columns:
|
||||||
if "caller_id" in df.columns:
|
if "caller_id" in df.columns:
|
||||||
df["customer_id"] = df["caller_id"]
|
df["customer_id"] = df["caller_id"]
|
||||||
else:
|
else:
|
||||||
# No hay identificador de cliente -> no se puede calcular
|
# No client identifier -> cannot calculate
|
||||||
return float("nan")
|
return float("nan")
|
||||||
|
|
||||||
df = df.dropna(subset=["customer_id"])
|
df = df.dropna(subset=["customer_id"])
|
||||||
if df.empty:
|
if df.empty:
|
||||||
return float("nan")
|
return float("nan")
|
||||||
|
|
||||||
# Ordenar por cliente + skill + fecha
|
# Sort by client + skill + date
|
||||||
df = df.sort_values(["customer_id", "queue_skill", "datetime_start"])
|
df = df.sort_values(["customer_id", "queue_skill", "datetime_start"])
|
||||||
|
|
||||||
# Diferencia de tiempo entre contactos consecutivos por cliente Y skill
|
# Time difference between consecutive contacts by client AND skill
|
||||||
# Esto asegura que solo contamos recontactos del mismo cliente para el mismo skill
|
# This ensures we only count re-contacts from the same client for the same skill
|
||||||
df["delta"] = df.groupby(["customer_id", "queue_skill"])["datetime_start"].diff()
|
df["delta"] = df.groupby(["customer_id", "queue_skill"])["datetime_start"].diff()
|
||||||
|
|
||||||
# Marcamos los contactos que ocurren a menos de 7 días del anterior (mismo skill)
|
# Mark contacts that occur less than 7 days from the previous one (same skill)
|
||||||
recurrence_mask = df["delta"] < pd.Timedelta(days=7)
|
recurrence_mask = df["delta"] < pd.Timedelta(days=7)
|
||||||
|
|
||||||
# Nº de clientes que tienen al menos un contacto recurrente (para cualquier skill)
|
# Number of clients who have at least one recurrent contact (for any skill)
|
||||||
recurrent_customers = df.loc[recurrence_mask, "customer_id"].nunique()
|
recurrent_customers = df.loc[recurrence_mask, "customer_id"].nunique()
|
||||||
total_customers = df["customer_id"].nunique()
|
total_customers = df["customer_id"].nunique()
|
||||||
|
|
||||||
@@ -356,9 +352,9 @@ class OperationalPerformanceMetrics:
|
|||||||
|
|
||||||
def repeat_channel_rate(self) -> float:
|
def repeat_channel_rate(self) -> float:
|
||||||
"""
|
"""
|
||||||
% de reincidencias (<7 días) en las que el cliente usa el MISMO canal.
|
% of recurrences (<7 days) in which the client uses the SAME channel.
|
||||||
|
|
||||||
Si no hay customer_id/caller_id o solo un contacto por cliente, devuelve NaN.
|
If there is no customer_id/caller_id or only one contact per client, returns NaN.
|
||||||
"""
|
"""
|
||||||
df = self.df.dropna(subset=["datetime_start"]).copy()
|
df = self.df.dropna(subset=["datetime_start"]).copy()
|
||||||
if df["customer_id"].isna().all():
|
if df["customer_id"].isna().all():
|
||||||
@@ -387,11 +383,11 @@ class OperationalPerformanceMetrics:
|
|||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
def occupancy_rate(self) -> float:
|
def occupancy_rate(self) -> float:
|
||||||
"""
|
"""
|
||||||
Tasa de ocupación:
|
Occupancy rate:
|
||||||
|
|
||||||
occupancy = sum(handle_time) / sum(logged_time) * 100.
|
occupancy = sum(handle_time) / sum(logged_time) * 100.
|
||||||
|
|
||||||
Requiere columna 'logged_time'. Si no existe o es todo 0, devuelve NaN.
|
Requires 'logged_time' column. If it does not exist or is all 0, returns NaN.
|
||||||
"""
|
"""
|
||||||
df = self.df
|
df = self.df
|
||||||
if "logged_time" not in df.columns:
|
if "logged_time" not in df.columns:
|
||||||
@@ -408,23 +404,23 @@ class OperationalPerformanceMetrics:
|
|||||||
return float(round(occ * 100, 2))
|
return float(round(occ * 100, 2))
|
||||||
|
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
# Score de rendimiento 0-10
|
# Performance score 0-10
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
def performance_score(self) -> Dict[str, float]:
|
def performance_score(self) -> Dict[str, float]:
|
||||||
"""
|
"""
|
||||||
Calcula un score 0-10 combinando:
|
Calculates a 0-10 score combining:
|
||||||
- AHT (bajo es mejor)
|
- AHT (lower is better)
|
||||||
- FCR (alto es mejor)
|
- FCR (higher is better)
|
||||||
- Variabilidad (P90/P50, bajo es mejor)
|
- Variability (P90/P50, lower is better)
|
||||||
- Otros factores (ocupación / escalación)
|
- Other factors (occupancy / escalation)
|
||||||
|
|
||||||
Fórmula:
|
Formula:
|
||||||
score = 0.4 * (10 - AHT_norm) +
|
score = 0.4 * (10 - AHT_norm) +
|
||||||
0.3 * FCR_norm +
|
0.3 * FCR_norm +
|
||||||
0.2 * (10 - Var_norm) +
|
0.2 * (10 - Var_norm) +
|
||||||
0.1 * Otros_score
|
0.1 * Otros_score
|
||||||
|
|
||||||
Donde *_norm son valores en escala 0-10.
|
Where *_norm are values on a 0-10 scale.
|
||||||
"""
|
"""
|
||||||
dist = self.aht_distribution()
|
dist = self.aht_distribution()
|
||||||
if not dist:
|
if not dist:
|
||||||
@@ -433,15 +429,15 @@ class OperationalPerformanceMetrics:
|
|||||||
p50 = dist["p50"]
|
p50 = dist["p50"]
|
||||||
ratio = dist["p90_p50_ratio"]
|
ratio = dist["p90_p50_ratio"]
|
||||||
|
|
||||||
# AHT_normalized: 0 (mejor) a 10 (peor)
|
# AHT_normalized: 0 (better) to 10 (worse)
|
||||||
aht_norm = self._scale_to_0_10(p50, self.AHT_GOOD, self.AHT_BAD)
|
aht_norm = self._scale_to_0_10(p50, self.AHT_GOOD, self.AHT_BAD)
|
||||||
# FCR_normalized: 0-10 directamente desde % (0-100)
|
# FCR_normalized: 0-10 directly from % (0-100)
|
||||||
fcr_pct = self.fcr_rate()
|
fcr_pct = self.fcr_rate()
|
||||||
fcr_norm = fcr_pct / 10.0 if not np.isnan(fcr_pct) else 0.0
|
fcr_norm = fcr_pct / 10.0 if not np.isnan(fcr_pct) else 0.0
|
||||||
# Variabilidad_normalized: 0 (ratio bueno) a 10 (ratio malo)
|
# Variability_normalized: 0 (good ratio) to 10 (bad ratio)
|
||||||
var_norm = self._scale_to_0_10(ratio, self.VAR_RATIO_GOOD, self.VAR_RATIO_BAD)
|
var_norm = self._scale_to_0_10(ratio, self.VAR_RATIO_GOOD, self.VAR_RATIO_BAD)
|
||||||
|
|
||||||
# Otros factores: combinamos ocupación (ideal ~80%) y escalación (ideal baja)
|
# Other factors: combine occupancy (ideal ~80%) and escalation (ideal low)
|
||||||
occ = self.occupancy_rate()
|
occ = self.occupancy_rate()
|
||||||
esc = self.escalation_rate()
|
esc = self.escalation_rate()
|
||||||
|
|
||||||
@@ -467,26 +463,26 @@ class OperationalPerformanceMetrics:
|
|||||||
|
|
||||||
def _scale_to_0_10(self, value: float, good: float, bad: float) -> float:
|
def _scale_to_0_10(self, value: float, good: float, bad: float) -> float:
|
||||||
"""
|
"""
|
||||||
Escala linealmente un valor:
|
Linearly scales a value:
|
||||||
- good -> 0
|
- good -> 0
|
||||||
- bad -> 10
|
- bad -> 10
|
||||||
Con saturación fuera de rango.
|
With saturation outside range.
|
||||||
"""
|
"""
|
||||||
if np.isnan(value):
|
if np.isnan(value):
|
||||||
return 5.0 # neutro
|
return 5.0 # neutral
|
||||||
|
|
||||||
if good == bad:
|
if good == bad:
|
||||||
return 5.0
|
return 5.0
|
||||||
|
|
||||||
if good < bad:
|
if good < bad:
|
||||||
# Menor es mejor
|
# Lower is better
|
||||||
if value <= good:
|
if value <= good:
|
||||||
return 0.0
|
return 0.0
|
||||||
if value >= bad:
|
if value >= bad:
|
||||||
return 10.0
|
return 10.0
|
||||||
return 10.0 * (value - good) / (bad - good)
|
return 10.0 * (value - good) / (bad - good)
|
||||||
else:
|
else:
|
||||||
# Mayor es mejor
|
# Higher is better
|
||||||
if value >= good:
|
if value >= good:
|
||||||
return 0.0
|
return 0.0
|
||||||
if value <= bad:
|
if value <= bad:
|
||||||
@@ -495,19 +491,19 @@ class OperationalPerformanceMetrics:
|
|||||||
|
|
||||||
def _compute_other_factors_score(self, occ_pct: float, esc_pct: float) -> float:
|
def _compute_other_factors_score(self, occ_pct: float, esc_pct: float) -> float:
|
||||||
"""
|
"""
|
||||||
Otros factores (0-10) basados en:
|
Other factors (0-10) based on:
|
||||||
- ocupación ideal alrededor de 80%
|
- ideal occupancy around 80%
|
||||||
- tasa de escalación ideal baja (<10%)
|
- ideal escalation rate low (<10%)
|
||||||
"""
|
"""
|
||||||
# Ocupación: 0 penalización si está entre 75-85, se penaliza fuera
|
# Occupancy: 0 penalty if between 75-85, penalized outside
|
||||||
if np.isnan(occ_pct):
|
if np.isnan(occ_pct):
|
||||||
occ_penalty = 5.0
|
occ_penalty = 5.0
|
||||||
else:
|
else:
|
||||||
deviation = abs(occ_pct - 80.0)
|
deviation = abs(occ_pct - 80.0)
|
||||||
occ_penalty = min(10.0, deviation / 5.0 * 2.0) # cada 5 puntos se suman 2, máx 10
|
occ_penalty = min(10.0, deviation / 5.0 * 2.0) # each 5 points add 2, max 10
|
||||||
occ_score = max(0.0, 10.0 - occ_penalty)
|
occ_score = max(0.0, 10.0 - occ_penalty)
|
||||||
|
|
||||||
# Escalación: 0-10 donde 0% -> 10 puntos, >=40% -> 0
|
# Escalation: 0-10 where 0% -> 10 points, >=40% -> 0
|
||||||
if np.isnan(esc_pct):
|
if np.isnan(esc_pct):
|
||||||
esc_score = 5.0
|
esc_score = 5.0
|
||||||
else:
|
else:
|
||||||
@@ -518,7 +514,7 @@ class OperationalPerformanceMetrics:
|
|||||||
else:
|
else:
|
||||||
esc_score = 10.0 * (1.0 - esc_pct / 40.0)
|
esc_score = 10.0 * (1.0 - esc_pct / 40.0)
|
||||||
|
|
||||||
# Media simple de ambos
|
# Simple average of both
|
||||||
return (occ_score + esc_score) / 2.0
|
return (occ_score + esc_score) / 2.0
|
||||||
|
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
@@ -526,29 +522,29 @@ class OperationalPerformanceMetrics:
|
|||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
def plot_aht_boxplot_by_skill(self) -> Axes:
|
def plot_aht_boxplot_by_skill(self) -> Axes:
|
||||||
"""
|
"""
|
||||||
Boxplot del AHT por skill (P10-P50-P90 visual).
|
Boxplot of AHT by skill (P10-P50-P90 visual).
|
||||||
"""
|
"""
|
||||||
df = self.df.copy()
|
df = self.df.copy()
|
||||||
|
|
||||||
if df.empty or "handle_time" not in df.columns:
|
if df.empty or "handle_time" not in df.columns:
|
||||||
fig, ax = plt.subplots()
|
fig, ax = plt.subplots()
|
||||||
ax.text(0.5, 0.5, "Sin datos de AHT", ha="center", va="center")
|
ax.text(0.5, 0.5, "No AHT data", ha="center", va="center")
|
||||||
ax.set_axis_off()
|
ax.set_axis_off()
|
||||||
return ax
|
return ax
|
||||||
|
|
||||||
df = df.dropna(subset=["handle_time"])
|
df = df.dropna(subset=["handle_time"])
|
||||||
if df.empty:
|
if df.empty:
|
||||||
fig, ax = plt.subplots()
|
fig, ax = plt.subplots()
|
||||||
ax.text(0.5, 0.5, "AHT no disponible", ha="center", va="center")
|
ax.text(0.5, 0.5, "AHT not available", ha="center", va="center")
|
||||||
ax.set_axis_off()
|
ax.set_axis_off()
|
||||||
return ax
|
return ax
|
||||||
|
|
||||||
fig, ax = plt.subplots(figsize=(8, 4))
|
fig, ax = plt.subplots(figsize=(8, 4))
|
||||||
df.boxplot(column="handle_time", by="queue_skill", ax=ax, showfliers=False)
|
df.boxplot(column="handle_time", by="queue_skill", ax=ax, showfliers=False)
|
||||||
|
|
||||||
ax.set_xlabel("Skill / Cola")
|
ax.set_xlabel("Skill / Queue")
|
||||||
ax.set_ylabel("AHT (segundos)")
|
ax.set_ylabel("AHT (seconds)")
|
||||||
ax.set_title("Distribución de AHT por skill")
|
ax.set_title("AHT distribution by skill")
|
||||||
plt.suptitle("")
|
plt.suptitle("")
|
||||||
plt.xticks(rotation=45, ha="right")
|
plt.xticks(rotation=45, ha="right")
|
||||||
ax.grid(axis="y", alpha=0.3)
|
ax.grid(axis="y", alpha=0.3)
|
||||||
@@ -557,14 +553,14 @@ class OperationalPerformanceMetrics:
|
|||||||
|
|
||||||
def plot_resolution_funnel_by_skill(self) -> Axes:
|
def plot_resolution_funnel_by_skill(self) -> Axes:
|
||||||
"""
|
"""
|
||||||
Funnel / barras apiladas de Talk + Hold + ACW por skill (P50).
|
Funnel / stacked bars of Talk + Hold + ACW by skill (P50).
|
||||||
|
|
||||||
Permite ver el equilibrio de tiempos por skill.
|
Allows viewing the time balance by skill.
|
||||||
"""
|
"""
|
||||||
p50 = self.talk_hold_acw_p50_by_skill()
|
p50 = self.talk_hold_acw_p50_by_skill()
|
||||||
if p50.empty:
|
if p50.empty:
|
||||||
fig, ax = plt.subplots()
|
fig, ax = plt.subplots()
|
||||||
ax.text(0.5, 0.5, "Sin datos para funnel", ha="center", va="center")
|
ax.text(0.5, 0.5, "No data for funnel", ha="center", va="center")
|
||||||
ax.set_axis_off()
|
ax.set_axis_off()
|
||||||
return ax
|
return ax
|
||||||
|
|
||||||
@@ -583,27 +579,26 @@ class OperationalPerformanceMetrics:
|
|||||||
|
|
||||||
ax.set_xticks(x)
|
ax.set_xticks(x)
|
||||||
ax.set_xticklabels(skills, rotation=45, ha="right")
|
ax.set_xticklabels(skills, rotation=45, ha="right")
|
||||||
ax.set_ylabel("Segundos")
|
ax.set_ylabel("Seconds")
|
||||||
ax.set_title("Funnel de resolución (P50) por skill")
|
ax.set_title("Resolution funnel (P50) by skill")
|
||||||
ax.legend()
|
ax.legend()
|
||||||
ax.grid(axis="y", alpha=0.3)
|
ax.grid(axis="y", alpha=0.3)
|
||||||
|
|
||||||
return ax
|
return ax
|
||||||
|
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
# Métricas por skill (para consistencia frontend cached/fresh)
|
# Metrics by skill (for frontend cached/fresh consistency)
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
def metrics_by_skill(self) -> List[Dict[str, Any]]:
|
def metrics_by_skill(self) -> List[Dict[str, Any]]:
|
||||||
"""
|
"""
|
||||||
Calcula métricas operacionales por skill:
|
Calculates operational metrics by skill:
|
||||||
- transfer_rate: % de interacciones con transfer_flag == True
|
- transfer_rate: % of interactions with transfer_flag == True
|
||||||
- abandonment_rate: % de interacciones abandonadas
|
- abandonment_rate: % of abandoned interactions
|
||||||
- fcr_tecnico: 100 - transfer_rate (sin transferencia)
|
- fcr_tecnico: 100 - transfer_rate (without transfer)
|
||||||
- fcr_real: % sin transferencia Y sin recontacto 7d (si hay datos)
|
- fcr_real: % without transfer AND without 7d re-contact (if there is data)
|
||||||
- volume: número de interacciones
|
- volume: number of interactions
|
||||||
|
|
||||||
Devuelve una lista de dicts, uno por skill, para que el frontend
|
Returns a list of dicts, one per skill, so that the frontend has access to real metrics by skill (not estimated).
|
||||||
tenga acceso a las métricas reales por skill (no estimadas).
|
|
||||||
"""
|
"""
|
||||||
df = self.df
|
df = self.df
|
||||||
if df.empty:
|
if df.empty:
|
||||||
@@ -611,14 +606,14 @@ class OperationalPerformanceMetrics:
|
|||||||
|
|
||||||
results = []
|
results = []
|
||||||
|
|
||||||
# Detectar columna de abandono
|
# Detect abandonment column
|
||||||
abandon_col = None
|
abandon_col = None
|
||||||
for col_name in ["is_abandoned", "abandoned_flag", "abandoned"]:
|
for col_name in ["is_abandoned", "abandoned_flag", "abandoned"]:
|
||||||
if col_name in df.columns:
|
if col_name in df.columns:
|
||||||
abandon_col = col_name
|
abandon_col = col_name
|
||||||
break
|
break
|
||||||
|
|
||||||
# Detectar columna de repeat_call_7d para FCR real
|
# Detect repeat_call_7d column for real FCR
|
||||||
repeat_col = None
|
repeat_col = None
|
||||||
for col_name in ["repeat_call_7d", "repeat_7d", "is_repeat_7d"]:
|
for col_name in ["repeat_call_7d", "repeat_7d", "is_repeat_7d"]:
|
||||||
if col_name in df.columns:
|
if col_name in df.columns:
|
||||||
@@ -637,7 +632,7 @@ class OperationalPerformanceMetrics:
|
|||||||
else:
|
else:
|
||||||
transfer_rate = 0.0
|
transfer_rate = 0.0
|
||||||
|
|
||||||
# FCR Técnico = 100 - transfer_rate
|
# Technical FCR = 100 - transfer_rate
|
||||||
fcr_tecnico = float(round(100.0 - transfer_rate, 2))
|
fcr_tecnico = float(round(100.0 - transfer_rate, 2))
|
||||||
|
|
||||||
# Abandonment rate
|
# Abandonment rate
|
||||||
@@ -656,7 +651,7 @@ class OperationalPerformanceMetrics:
|
|||||||
abandoned = int(abandon_mask.sum())
|
abandoned = int(abandon_mask.sum())
|
||||||
abandonment_rate = float(round(abandoned / total * 100, 2))
|
abandonment_rate = float(round(abandoned / total * 100, 2))
|
||||||
|
|
||||||
# FCR Real (sin transferencia Y sin recontacto 7d)
|
# Real FCR (without transfer AND without 7d re-contact)
|
||||||
fcr_real = fcr_tecnico # default to fcr_tecnico if no repeat data
|
fcr_real = fcr_tecnico # default to fcr_tecnico if no repeat data
|
||||||
if repeat_col and "transfer_flag" in group.columns:
|
if repeat_col and "transfer_flag" in group.columns:
|
||||||
repeat_data = group[repeat_col]
|
repeat_data = group[repeat_col]
|
||||||
@@ -670,13 +665,13 @@ class OperationalPerformanceMetrics:
|
|||||||
else:
|
else:
|
||||||
repeat_mask = pd.to_numeric(repeat_data, errors="coerce").fillna(0) > 0
|
repeat_mask = pd.to_numeric(repeat_data, errors="coerce").fillna(0) > 0
|
||||||
|
|
||||||
# FCR Real: no transfer AND no repeat
|
# Real FCR: no transfer AND no repeat
|
||||||
fcr_real_mask = (~group["transfer_flag"]) & (~repeat_mask)
|
fcr_real_mask = (~group["transfer_flag"]) & (~repeat_mask)
|
||||||
fcr_real_count = fcr_real_mask.sum()
|
fcr_real_count = fcr_real_mask.sum()
|
||||||
fcr_real = float(round(fcr_real_count / total * 100, 2))
|
fcr_real = float(round(fcr_real_count / total * 100, 2))
|
||||||
|
|
||||||
# AHT Mean (promedio de handle_time sobre registros válidos)
|
# AHT Mean (average of handle_time over valid records)
|
||||||
# Filtramos solo registros 'valid' (excluye noise/zombie) para consistencia
|
# Filter only 'valid' records (excludes noise/zombie) for consistency
|
||||||
if "_is_valid_for_cv" in group.columns:
|
if "_is_valid_for_cv" in group.columns:
|
||||||
valid_records = group[group["_is_valid_for_cv"]]
|
valid_records = group[group["_is_valid_for_cv"]]
|
||||||
else:
|
else:
|
||||||
@@ -687,15 +682,15 @@ class OperationalPerformanceMetrics:
|
|||||||
else:
|
else:
|
||||||
aht_mean = 0.0
|
aht_mean = 0.0
|
||||||
|
|
||||||
# AHT Total (promedio de handle_time sobre TODOS los registros)
|
# AHT Total (average of handle_time over ALL records)
|
||||||
# Incluye NOISE, ZOMBIE, ABANDON - solo para información/comparación
|
# Includes NOISE, ZOMBIE, ABANDON - for information/comparison only
|
||||||
if len(group) > 0 and "handle_time" in group.columns:
|
if len(group) > 0 and "handle_time" in group.columns:
|
||||||
aht_total = float(round(group["handle_time"].mean(), 2))
|
aht_total = float(round(group["handle_time"].mean(), 2))
|
||||||
else:
|
else:
|
||||||
aht_total = 0.0
|
aht_total = 0.0
|
||||||
|
|
||||||
# Hold Time Mean (promedio de hold_time sobre registros válidos)
|
# Hold Time Mean (average of hold_time over valid records)
|
||||||
# Consistente con fresh path que usa MEAN, no P50
|
# Consistent with fresh path that uses MEAN, not P50
|
||||||
if len(valid_records) > 0 and "hold_time" in valid_records.columns:
|
if len(valid_records) > 0 and "hold_time" in valid_records.columns:
|
||||||
hold_time_mean = float(round(valid_records["hold_time"].mean(), 2))
|
hold_time_mean = float(round(valid_records["hold_time"].mean(), 2))
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -24,11 +24,10 @@ REQUIRED_COLUMNS_SAT: List[str] = [
|
|||||||
@dataclass
|
@dataclass
|
||||||
class SatisfactionExperienceMetrics:
|
class SatisfactionExperienceMetrics:
|
||||||
"""
|
"""
|
||||||
Dimensión 3: SATISFACCIÓN y EXPERIENCIA
|
Dimension 3: SATISFACTION and EXPERIENCE
|
||||||
|
|
||||||
Todas las columnas de satisfacción (csat/nps/ces/aht) son OPCIONALES.
|
All satisfaction columns (csat/nps/ces/aht) are OPTIONAL.
|
||||||
Si no están, las métricas que las usan devuelven vacío/NaN pero
|
If they are not present, the metrics that use them return empty/NaN but never break the pipeline.
|
||||||
nunca rompen el pipeline.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
df: pd.DataFrame
|
df: pd.DataFrame
|
||||||
@@ -44,7 +43,7 @@ class SatisfactionExperienceMetrics:
|
|||||||
missing = [c for c in REQUIRED_COLUMNS_SAT if c not in self.df.columns]
|
missing = [c for c in REQUIRED_COLUMNS_SAT if c not in self.df.columns]
|
||||||
if missing:
|
if missing:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Faltan columnas obligatorias para SatisfactionExperienceMetrics: {missing}"
|
f"Missing required columns for SatisfactionExperienceMetrics: {missing}"
|
||||||
)
|
)
|
||||||
|
|
||||||
def _prepare_data(self) -> None:
|
def _prepare_data(self) -> None:
|
||||||
@@ -52,7 +51,7 @@ class SatisfactionExperienceMetrics:
|
|||||||
|
|
||||||
df["datetime_start"] = pd.to_datetime(df["datetime_start"], errors="coerce")
|
df["datetime_start"] = pd.to_datetime(df["datetime_start"], errors="coerce")
|
||||||
|
|
||||||
# Duraciones base siempre existen
|
# Base durations always exist
|
||||||
for col in ["duration_talk", "hold_time", "wrap_up_time"]:
|
for col in ["duration_talk", "hold_time", "wrap_up_time"]:
|
||||||
df[col] = pd.to_numeric(df[col], errors="coerce")
|
df[col] = pd.to_numeric(df[col], errors="coerce")
|
||||||
|
|
||||||
@@ -63,16 +62,16 @@ class SatisfactionExperienceMetrics:
|
|||||||
+ df["wrap_up_time"].fillna(0)
|
+ df["wrap_up_time"].fillna(0)
|
||||||
)
|
)
|
||||||
|
|
||||||
# csat_score opcional
|
# csat_score optional
|
||||||
df["csat_score"] = pd.to_numeric(df.get("csat_score", np.nan), errors="coerce")
|
df["csat_score"] = pd.to_numeric(df.get("csat_score", np.nan), errors="coerce")
|
||||||
|
|
||||||
# aht opcional: si existe columna explícita la usamos, si no usamos handle_time
|
# aht optional: if explicit column exists we use it, otherwise we use handle_time
|
||||||
if "aht" in df.columns:
|
if "aht" in df.columns:
|
||||||
df["aht"] = pd.to_numeric(df["aht"], errors="coerce")
|
df["aht"] = pd.to_numeric(df["aht"], errors="coerce")
|
||||||
else:
|
else:
|
||||||
df["aht"] = df["handle_time"]
|
df["aht"] = df["handle_time"]
|
||||||
|
|
||||||
# NPS / CES opcionales
|
# NPS / CES optional
|
||||||
df["nps_score"] = pd.to_numeric(df.get("nps_score", np.nan), errors="coerce")
|
df["nps_score"] = pd.to_numeric(df.get("nps_score", np.nan), errors="coerce")
|
||||||
df["ces_score"] = pd.to_numeric(df.get("ces_score", np.nan), errors="coerce")
|
df["ces_score"] = pd.to_numeric(df.get("ces_score", np.nan), errors="coerce")
|
||||||
|
|
||||||
@@ -90,8 +89,8 @@ class SatisfactionExperienceMetrics:
|
|||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
def csat_avg_by_skill_channel(self) -> pd.DataFrame:
|
def csat_avg_by_skill_channel(self) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
CSAT promedio por skill/canal.
|
Average CSAT by skill/channel.
|
||||||
Si no hay csat_score, devuelve DataFrame vacío.
|
If there is no csat_score, returns empty DataFrame.
|
||||||
"""
|
"""
|
||||||
df = self.df
|
df = self.df
|
||||||
if "csat_score" not in df.columns or df["csat_score"].notna().sum() == 0:
|
if "csat_score" not in df.columns or df["csat_score"].notna().sum() == 0:
|
||||||
@@ -115,7 +114,7 @@ class SatisfactionExperienceMetrics:
|
|||||||
|
|
||||||
def nps_avg_by_skill_channel(self) -> pd.DataFrame:
|
def nps_avg_by_skill_channel(self) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
NPS medio por skill/canal, si existe nps_score.
|
Average NPS by skill/channel, if nps_score exists.
|
||||||
"""
|
"""
|
||||||
df = self.df
|
df = self.df
|
||||||
if "nps_score" not in df.columns or df["nps_score"].notna().sum() == 0:
|
if "nps_score" not in df.columns or df["nps_score"].notna().sum() == 0:
|
||||||
@@ -139,7 +138,7 @@ class SatisfactionExperienceMetrics:
|
|||||||
|
|
||||||
def ces_avg_by_skill_channel(self) -> pd.DataFrame:
|
def ces_avg_by_skill_channel(self) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
CES medio por skill/canal, si existe ces_score.
|
Average CES by skill/channel, if ces_score exists.
|
||||||
"""
|
"""
|
||||||
df = self.df
|
df = self.df
|
||||||
if "ces_score" not in df.columns or df["ces_score"].notna().sum() == 0:
|
if "ces_score" not in df.columns or df["ces_score"].notna().sum() == 0:
|
||||||
@@ -163,11 +162,11 @@ class SatisfactionExperienceMetrics:
|
|||||||
|
|
||||||
def csat_global(self) -> float:
|
def csat_global(self) -> float:
|
||||||
"""
|
"""
|
||||||
CSAT medio global (todas las interacciones).
|
Global average CSAT (all interactions).
|
||||||
|
|
||||||
Usa la columna opcional `csat_score`:
|
Uses the optional `csat_score` column:
|
||||||
- Si no existe, devuelve NaN.
|
- If it does not exist, returns NaN.
|
||||||
- Si todos los valores son NaN / vacíos, devuelve NaN.
|
- If all values are NaN / empty, returns NaN.
|
||||||
"""
|
"""
|
||||||
df = self.df
|
df = self.df
|
||||||
if "csat_score" not in df.columns:
|
if "csat_score" not in df.columns:
|
||||||
@@ -183,8 +182,8 @@ class SatisfactionExperienceMetrics:
|
|||||||
|
|
||||||
def csat_aht_correlation(self) -> Dict[str, Any]:
|
def csat_aht_correlation(self) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Correlación Pearson CSAT vs AHT.
|
Pearson correlation CSAT vs AHT.
|
||||||
Si falta csat o aht, o no hay varianza, devuelve NaN y código adecuado.
|
If csat or aht is missing, or there is no variance, returns NaN and appropriate code.
|
||||||
"""
|
"""
|
||||||
df = self.df
|
df = self.df
|
||||||
if "csat_score" not in df.columns or df["csat_score"].notna().sum() == 0:
|
if "csat_score" not in df.columns or df["csat_score"].notna().sum() == 0:
|
||||||
@@ -216,8 +215,8 @@ class SatisfactionExperienceMetrics:
|
|||||||
|
|
||||||
def csat_aht_skill_summary(self) -> pd.DataFrame:
|
def csat_aht_skill_summary(self) -> pd.DataFrame:
|
||||||
"""
|
"""
|
||||||
Resumen por skill con clasificación del "sweet spot".
|
Summary by skill with "sweet spot" classification.
|
||||||
Si falta csat o aht, devuelve DataFrame vacío.
|
If csat or aht is missing, returns empty DataFrame.
|
||||||
"""
|
"""
|
||||||
df = self.df
|
df = self.df
|
||||||
if df["csat_score"].notna().sum() == 0 or df["aht"].notna().sum() == 0:
|
if df["csat_score"].notna().sum() == 0 or df["aht"].notna().sum() == 0:
|
||||||
@@ -258,20 +257,20 @@ class SatisfactionExperienceMetrics:
|
|||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
def plot_csat_vs_aht_scatter(self) -> Axes:
|
def plot_csat_vs_aht_scatter(self) -> Axes:
|
||||||
"""
|
"""
|
||||||
Scatter CSAT vs AHT por skill.
|
Scatter CSAT vs AHT by skill.
|
||||||
Si no hay datos suficientes, devuelve un Axes con mensaje.
|
If there is insufficient data, returns an Axes with message.
|
||||||
"""
|
"""
|
||||||
df = self.df
|
df = self.df
|
||||||
if df["csat_score"].notna().sum() == 0 or df["aht"].notna().sum() == 0:
|
if df["csat_score"].notna().sum() == 0 or df["aht"].notna().sum() == 0:
|
||||||
fig, ax = plt.subplots()
|
fig, ax = plt.subplots()
|
||||||
ax.text(0.5, 0.5, "Sin datos de CSAT/AHT", ha="center", va="center")
|
ax.text(0.5, 0.5, "No CSAT/AHT data", ha="center", va="center")
|
||||||
ax.set_axis_off()
|
ax.set_axis_off()
|
||||||
return ax
|
return ax
|
||||||
|
|
||||||
df = df.dropna(subset=["csat_score", "aht"]).copy()
|
df = df.dropna(subset=["csat_score", "aht"]).copy()
|
||||||
if df.empty:
|
if df.empty:
|
||||||
fig, ax = plt.subplots()
|
fig, ax = plt.subplots()
|
||||||
ax.text(0.5, 0.5, "Sin datos de CSAT/AHT", ha="center", va="center")
|
ax.text(0.5, 0.5, "No CSAT/AHT data", ha="center", va="center")
|
||||||
ax.set_axis_off()
|
ax.set_axis_off()
|
||||||
return ax
|
return ax
|
||||||
|
|
||||||
@@ -280,9 +279,9 @@ class SatisfactionExperienceMetrics:
|
|||||||
for skill, sub in df.groupby("queue_skill"):
|
for skill, sub in df.groupby("queue_skill"):
|
||||||
ax.scatter(sub["aht"], sub["csat_score"], label=skill, alpha=0.7)
|
ax.scatter(sub["aht"], sub["csat_score"], label=skill, alpha=0.7)
|
||||||
|
|
||||||
ax.set_xlabel("AHT (segundos)")
|
ax.set_xlabel("AHT (seconds)")
|
||||||
ax.set_ylabel("CSAT")
|
ax.set_ylabel("CSAT")
|
||||||
ax.set_title("CSAT vs AHT por skill")
|
ax.set_title("CSAT vs AHT by skill")
|
||||||
ax.grid(alpha=0.3)
|
ax.grid(alpha=0.3)
|
||||||
ax.legend(title="Skill", bbox_to_anchor=(1.05, 1), loc="upper left")
|
ax.legend(title="Skill", bbox_to_anchor=(1.05, 1), loc="upper left")
|
||||||
|
|
||||||
@@ -291,28 +290,28 @@ class SatisfactionExperienceMetrics:
|
|||||||
|
|
||||||
def plot_csat_distribution(self) -> Axes:
|
def plot_csat_distribution(self) -> Axes:
|
||||||
"""
|
"""
|
||||||
Histograma de CSAT.
|
CSAT histogram.
|
||||||
Si no hay csat_score, devuelve un Axes con mensaje.
|
If there is no csat_score, returns an Axes with message.
|
||||||
"""
|
"""
|
||||||
df = self.df
|
df = self.df
|
||||||
if "csat_score" not in df.columns or df["csat_score"].notna().sum() == 0:
|
if "csat_score" not in df.columns or df["csat_score"].notna().sum() == 0:
|
||||||
fig, ax = plt.subplots()
|
fig, ax = plt.subplots()
|
||||||
ax.text(0.5, 0.5, "Sin datos de CSAT", ha="center", va="center")
|
ax.text(0.5, 0.5, "No CSAT data", ha="center", va="center")
|
||||||
ax.set_axis_off()
|
ax.set_axis_off()
|
||||||
return ax
|
return ax
|
||||||
|
|
||||||
df = df.dropna(subset=["csat_score"]).copy()
|
df = df.dropna(subset=["csat_score"]).copy()
|
||||||
if df.empty:
|
if df.empty:
|
||||||
fig, ax = plt.subplots()
|
fig, ax = plt.subplots()
|
||||||
ax.text(0.5, 0.5, "Sin datos de CSAT", ha="center", va="center")
|
ax.text(0.5, 0.5, "No CSAT data", ha="center", va="center")
|
||||||
ax.set_axis_off()
|
ax.set_axis_off()
|
||||||
return ax
|
return ax
|
||||||
|
|
||||||
fig, ax = plt.subplots(figsize=(6, 4))
|
fig, ax = plt.subplots(figsize=(6, 4))
|
||||||
ax.hist(df["csat_score"], bins=10, alpha=0.7)
|
ax.hist(df["csat_score"], bins=10, alpha=0.7)
|
||||||
ax.set_xlabel("CSAT")
|
ax.set_xlabel("CSAT")
|
||||||
ax.set_ylabel("Frecuencia")
|
ax.set_ylabel("Frequency")
|
||||||
ax.set_title("Distribución de CSAT")
|
ax.set_title("CSAT distribution")
|
||||||
ax.grid(axis="y", alpha=0.3)
|
ax.grid(axis="y", alpha=0.3)
|
||||||
|
|
||||||
return ax
|
return ax
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ import {
|
|||||||
// RED FLAGS CONFIGURATION AND DETECTION
|
// RED FLAGS CONFIGURATION AND DETECTION
|
||||||
// ============================================
|
// ============================================
|
||||||
|
|
||||||
// v3.5: Configuración de Red Flags
|
// v3.5: Red Flags Configuration
|
||||||
interface RedFlagConfig {
|
interface RedFlagConfig {
|
||||||
id: string;
|
id: string;
|
||||||
label: string;
|
label: string;
|
||||||
@@ -41,51 +41,51 @@ interface RedFlagConfig {
|
|||||||
const RED_FLAG_CONFIGS: RedFlagConfig[] = [
|
const RED_FLAG_CONFIGS: RedFlagConfig[] = [
|
||||||
{
|
{
|
||||||
id: 'cv_high',
|
id: 'cv_high',
|
||||||
label: 'CV AHT Crítico',
|
label: 'Critical AHT CV',
|
||||||
shortLabel: 'CV',
|
shortLabel: 'CV',
|
||||||
threshold: 120,
|
threshold: 120,
|
||||||
operator: '>',
|
operator: '>',
|
||||||
getValue: (q) => q.cv_aht,
|
getValue: (q) => q.cv_aht,
|
||||||
format: (v) => `${v.toFixed(0)}%`,
|
format: (v) => `${v.toFixed(0)}%`,
|
||||||
color: 'red',
|
color: 'red',
|
||||||
description: 'Variabilidad extrema - procesos impredecibles'
|
description: 'Extreme variability - unpredictable processes'
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 'transfer_high',
|
id: 'transfer_high',
|
||||||
label: 'Transfer Excesivo',
|
label: 'Excessive Transfer',
|
||||||
shortLabel: 'Transfer',
|
shortLabel: 'Transfer',
|
||||||
threshold: 50,
|
threshold: 50,
|
||||||
operator: '>',
|
operator: '>',
|
||||||
getValue: (q) => q.transfer_rate,
|
getValue: (q) => q.transfer_rate,
|
||||||
format: (v) => `${v.toFixed(0)}%`,
|
format: (v) => `${v.toFixed(0)}%`,
|
||||||
color: 'orange',
|
color: 'orange',
|
||||||
description: 'Alta complejidad - requiere escalado frecuente'
|
description: 'High complexity - requires frequent escalation'
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 'volume_low',
|
id: 'volume_low',
|
||||||
label: 'Volumen Insuficiente',
|
label: 'Insufficient Volume',
|
||||||
shortLabel: 'Vol',
|
shortLabel: 'Vol',
|
||||||
threshold: 50,
|
threshold: 50,
|
||||||
operator: '<',
|
operator: '<',
|
||||||
getValue: (q) => q.volume,
|
getValue: (q) => q.volume,
|
||||||
format: (v) => v.toLocaleString(),
|
format: (v) => v.toLocaleString(),
|
||||||
color: 'slate',
|
color: 'slate',
|
||||||
description: 'ROI negativo - volumen no justifica inversión'
|
description: 'Negative ROI - volume doesn\'t justify investment'
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 'valid_low',
|
id: 'valid_low',
|
||||||
label: 'Calidad Datos Baja',
|
label: 'Low Data Quality',
|
||||||
shortLabel: 'Valid',
|
shortLabel: 'Valid',
|
||||||
threshold: 30,
|
threshold: 30,
|
||||||
operator: '<',
|
operator: '<',
|
||||||
getValue: (q) => q.volume > 0 ? (q.volumeValid / q.volume) * 100 : 0,
|
getValue: (q) => q.volume > 0 ? (q.volumeValid / q.volume) * 100 : 0,
|
||||||
format: (v) => `${v.toFixed(0)}%`,
|
format: (v) => `${v.toFixed(0)}%`,
|
||||||
color: 'amber',
|
color: 'amber',
|
||||||
description: 'Datos poco fiables - métricas distorsionadas'
|
description: 'Unreliable data - distorted metrics'
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
// v3.5: Detectar red flags de una cola
|
// v3.5: Detect red flags for a queue
|
||||||
interface DetectedRedFlag {
|
interface DetectedRedFlag {
|
||||||
config: RedFlagConfig;
|
config: RedFlagConfig;
|
||||||
value: number;
|
value: number;
|
||||||
@@ -108,7 +108,7 @@ function detectRedFlags(queue: OriginalQueueMetrics): DetectedRedFlag[] {
|
|||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
// v3.5: Componente de badge de Red Flag individual
|
// v3.5: Individual Red Flag badge component
|
||||||
function RedFlagBadge({ flag, size = 'sm' }: { flag: DetectedRedFlag; size?: 'sm' | 'md' }) {
|
function RedFlagBadge({ flag, size = 'sm' }: { flag: DetectedRedFlag; size?: 'sm' | 'md' }) {
|
||||||
const sizeClasses = size === 'md' ? 'px-2 py-1 text-xs' : 'px-1.5 py-0.5 text-[10px]';
|
const sizeClasses = size === 'md' ? 'px-2 py-1 text-xs' : 'px-1.5 py-0.5 text-[10px]';
|
||||||
|
|
||||||
|
|||||||
@@ -570,12 +570,16 @@
|
|||||||
"humanOnlyAction": "Maintain human management, evaluate periodically",
|
"humanOnlyAction": "Maintain human management, evaluate periodically",
|
||||||
"redFlags": {
|
"redFlags": {
|
||||||
"cvCritical": "Critical AHT CV",
|
"cvCritical": "Critical AHT CV",
|
||||||
|
"cvCriticalShort": "CV",
|
||||||
"cvCriticalDesc": "Extreme variability - unpredictable processes",
|
"cvCriticalDesc": "Extreme variability - unpredictable processes",
|
||||||
"transferExcessive": "Excessive Transfer",
|
"transferExcessive": "Excessive Transfer",
|
||||||
|
"transferExcessiveShort": "Transfer",
|
||||||
"transferExcessiveDesc": "High complexity - requires frequent escalation",
|
"transferExcessiveDesc": "High complexity - requires frequent escalation",
|
||||||
"volumeInsufficient": "Insufficient Volume",
|
"volumeInsufficient": "Insufficient Volume",
|
||||||
|
"volumeInsufficientShort": "Vol",
|
||||||
"volumeInsufficientDesc": "Negative ROI - volume doesn't justify investment",
|
"volumeInsufficientDesc": "Negative ROI - volume doesn't justify investment",
|
||||||
"dataQualityLow": "Low Data Quality",
|
"dataQualityLow": "Low Data Quality",
|
||||||
|
"dataQualityLowShort": "Valid",
|
||||||
"dataQualityLowDesc": "Unreliable data - distorted metrics",
|
"dataQualityLowDesc": "Unreliable data - distorted metrics",
|
||||||
"threshold": "(threshold: {{operator}}{{value}})"
|
"threshold": "(threshold: {{operator}}{{value}})"
|
||||||
},
|
},
|
||||||
@@ -814,6 +818,33 @@
|
|||||||
"roiBad": "Marginal ROI, evaluate other benefits",
|
"roiBad": "Marginal ROI, evaluate other benefits",
|
||||||
"resolution": "Resolution",
|
"resolution": "Resolution",
|
||||||
"dataQuality": "Data Quality"
|
"dataQuality": "Data Quality"
|
||||||
|
},
|
||||||
|
"subFactors": {
|
||||||
|
"repeatability": "Repeatability",
|
||||||
|
"repeatabilityDisplayName": "Repeatability",
|
||||||
|
"repeatabilityDescription": "Monthly volume: {{volume}} interactions",
|
||||||
|
"predictability": "Predictability",
|
||||||
|
"predictabilityDisplayName": "Predictability",
|
||||||
|
"predictabilityDescription": "AHT CV: {{cv}}%, Escalation: {{esc}}%",
|
||||||
|
"structuring": "Structuring",
|
||||||
|
"structuringDisplayName": "Structuring",
|
||||||
|
"structuringDescription": "{{pct}}% structured fields",
|
||||||
|
"inverseComplexity": "Inverse Complexity",
|
||||||
|
"inverseComplexityDisplayName": "Inverse Complexity",
|
||||||
|
"inverseComplexityDescription": "{{pct}}% exceptions",
|
||||||
|
"stability": "Stability",
|
||||||
|
"stabilityDisplayName": "Stability",
|
||||||
|
"stabilityDescription": "{{pct}}% off-hours",
|
||||||
|
"roiSavings": "ROI",
|
||||||
|
"roiSavingsDisplayName": "ROI",
|
||||||
|
"roiSavingsDescription": "€{{amount}}K annual potential savings",
|
||||||
|
"interpretations": {
|
||||||
|
"excellentForAutomation": "Excellent candidate for complete automation (Automate)",
|
||||||
|
"goodForAssistance": "Good candidate for agentic assistance (Assist)",
|
||||||
|
"candidateForAugmentation": "Candidate for human augmentation (Augment)",
|
||||||
|
"notRecommended": "Not recommended for automation at this time",
|
||||||
|
"bronzeAnalysis": "Bronze analysis does not include Agentic Readiness Score"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"economicModel": {
|
"economicModel": {
|
||||||
|
|||||||
@@ -570,12 +570,16 @@
|
|||||||
"humanOnlyAction": "Mantener gestión humana, evaluar periódicamente",
|
"humanOnlyAction": "Mantener gestión humana, evaluar periódicamente",
|
||||||
"redFlags": {
|
"redFlags": {
|
||||||
"cvCritical": "CV AHT Crítico",
|
"cvCritical": "CV AHT Crítico",
|
||||||
|
"cvCriticalShort": "CV",
|
||||||
"cvCriticalDesc": "Variabilidad extrema - procesos impredecibles",
|
"cvCriticalDesc": "Variabilidad extrema - procesos impredecibles",
|
||||||
"transferExcessive": "Transfer Excesivo",
|
"transferExcessive": "Transfer Excesivo",
|
||||||
|
"transferExcessiveShort": "Transfer",
|
||||||
"transferExcessiveDesc": "Alta complejidad - requiere escalado frecuente",
|
"transferExcessiveDesc": "Alta complejidad - requiere escalado frecuente",
|
||||||
"volumeInsufficient": "Volumen Insuficiente",
|
"volumeInsufficient": "Volumen Insuficiente",
|
||||||
|
"volumeInsufficientShort": "Vol",
|
||||||
"volumeInsufficientDesc": "ROI negativo - volumen no justifica inversión",
|
"volumeInsufficientDesc": "ROI negativo - volumen no justifica inversión",
|
||||||
"dataQualityLow": "Calidad Datos Baja",
|
"dataQualityLow": "Calidad Datos Baja",
|
||||||
|
"dataQualityLowShort": "Valid",
|
||||||
"dataQualityLowDesc": "Datos poco fiables - métricas distorsionadas",
|
"dataQualityLowDesc": "Datos poco fiables - métricas distorsionadas",
|
||||||
"threshold": "(umbral: {{operator}}{{value}})"
|
"threshold": "(umbral: {{operator}}{{value}})"
|
||||||
},
|
},
|
||||||
@@ -814,6 +818,33 @@
|
|||||||
"roiBad": "ROI marginal, evaluar otros beneficios",
|
"roiBad": "ROI marginal, evaluar otros beneficios",
|
||||||
"resolution": "Resolutividad",
|
"resolution": "Resolutividad",
|
||||||
"dataQuality": "Calidad Datos"
|
"dataQuality": "Calidad Datos"
|
||||||
|
},
|
||||||
|
"subFactors": {
|
||||||
|
"repeatability": "Repetitividad",
|
||||||
|
"repeatabilityDisplayName": "Repetitividad",
|
||||||
|
"repeatabilityDescription": "Volumen mensual: {{volume}} interacciones",
|
||||||
|
"predictability": "Predictibilidad",
|
||||||
|
"predictabilityDisplayName": "Predictibilidad",
|
||||||
|
"predictabilityDescription": "CV AHT: {{cv}}%, Escalación: {{esc}}%",
|
||||||
|
"structuring": "Estructuración",
|
||||||
|
"structuringDisplayName": "Estructuración",
|
||||||
|
"structuringDescription": "{{pct}}% de campos estructurados",
|
||||||
|
"inverseComplexity": "Complejidad Inversa",
|
||||||
|
"inverseComplexityDisplayName": "Complejidad Inversa",
|
||||||
|
"inverseComplexityDescription": "{{pct}}% de excepciones",
|
||||||
|
"stability": "Estabilidad",
|
||||||
|
"stabilityDisplayName": "Estabilidad",
|
||||||
|
"stabilityDescription": "{{pct}}% fuera de horario",
|
||||||
|
"roiSavings": "ROI",
|
||||||
|
"roiSavingsDisplayName": "ROI",
|
||||||
|
"roiSavingsDescription": "€{{amount}}K ahorro potencial anual",
|
||||||
|
"interpretations": {
|
||||||
|
"excellentForAutomation": "Excelente candidato para automatización completa (Automate)",
|
||||||
|
"goodForAssistance": "Buen candidato para asistencia agéntica (Assist)",
|
||||||
|
"candidateForAugmentation": "Candidato para augmentación humana (Augment)",
|
||||||
|
"notRecommended": "No recomendado para automatización en este momento",
|
||||||
|
"bronzeAnalysis": "Análisis Bronze no incluye Agentic Readiness Score"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"economicModel": {
|
"economicModel": {
|
||||||
|
|||||||
@@ -1,20 +1,20 @@
|
|||||||
/**
|
/**
|
||||||
* Agentic Readiness Score v2.0
|
* Agentic Readiness Score v2.0
|
||||||
* Algoritmo basado en metodología de 6 dimensiones con normalización continua
|
* Algorithm based on 6-dimension methodology with continuous normalization
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import type { TierKey, SubFactor, AgenticReadinessResult, CustomerSegment } from '../types';
|
import type { TierKey, SubFactor, AgenticReadinessResult, CustomerSegment } from '../types';
|
||||||
import { AGENTIC_READINESS_WEIGHTS, AGENTIC_READINESS_THRESHOLDS } from '../constants';
|
import { AGENTIC_READINESS_WEIGHTS, AGENTIC_READINESS_THRESHOLDS } from '../constants';
|
||||||
|
|
||||||
export interface AgenticReadinessInput {
|
export interface AgenticReadinessInput {
|
||||||
// Datos básicos (SILVER)
|
// Basic data (SILVER)
|
||||||
volumen_mes: number;
|
volumen_mes: number;
|
||||||
aht_values: number[];
|
aht_values: number[];
|
||||||
escalation_rate: number;
|
escalation_rate: number;
|
||||||
cpi_humano: number;
|
cpi_humano: number;
|
||||||
volumen_anual: number;
|
volumen_anual: number;
|
||||||
|
|
||||||
// Datos avanzados (GOLD)
|
// Advanced data (GOLD)
|
||||||
structured_fields_pct?: number;
|
structured_fields_pct?: number;
|
||||||
exception_rate?: number;
|
exception_rate?: number;
|
||||||
hourly_distribution?: number[];
|
hourly_distribution?: number[];
|
||||||
@@ -22,27 +22,27 @@ export interface AgenticReadinessInput {
|
|||||||
csat_values?: number[];
|
csat_values?: number[];
|
||||||
motivo_contacto_entropy?: number;
|
motivo_contacto_entropy?: number;
|
||||||
resolucion_entropy?: number;
|
resolucion_entropy?: number;
|
||||||
|
|
||||||
// Tier
|
// Tier
|
||||||
tier: TierKey;
|
tier: TierKey;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* SUB-FACTOR 1: REPETITIVIDAD (25%)
|
* SUB-FACTOR 1: REPEATABILITY (25%)
|
||||||
* Basado en volumen mensual con normalización logística
|
* Based on monthly volume with logistic normalization
|
||||||
*/
|
*/
|
||||||
function calculateRepetitividadScore(volumen_mes: number): SubFactor {
|
function calculateRepeatabilityScore(volumen_mes: number): SubFactor {
|
||||||
const { k, x0 } = AGENTIC_READINESS_THRESHOLDS.repetitividad;
|
const { k, x0 } = AGENTIC_READINESS_THRESHOLDS.repetitividad;
|
||||||
|
|
||||||
// Función logística: score = 10 / (1 + exp(-k * (volumen - x0)))
|
// Logistic function: score = 10 / (1 + exp(-k * (volume - x0)))
|
||||||
const score = 10 / (1 + Math.exp(-k * (volumen_mes - x0)));
|
const score = 10 / (1 + Math.exp(-k * (volumen_mes - x0)));
|
||||||
|
|
||||||
return {
|
return {
|
||||||
name: 'repetitividad',
|
name: 'repeatability',
|
||||||
displayName: 'Repetitividad',
|
displayName: 'Repeatability',
|
||||||
score: Math.round(score * 10) / 10,
|
score: Math.round(score * 10) / 10,
|
||||||
weight: AGENTIC_READINESS_WEIGHTS.repetitividad,
|
weight: AGENTIC_READINESS_WEIGHTS.repetitividad,
|
||||||
description: `Volumen mensual: ${volumen_mes} interacciones`,
|
description: `Monthly volume: ${volumen_mes} interactions`,
|
||||||
details: {
|
details: {
|
||||||
volumen_mes,
|
volumen_mes,
|
||||||
threshold_medio: x0
|
threshold_medio: x0
|
||||||
@@ -51,58 +51,58 @@ function calculateRepetitividadScore(volumen_mes: number): SubFactor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* SUB-FACTOR 2: PREDICTIBILIDAD (20%)
|
* SUB-FACTOR 2: PREDICTABILITY (20%)
|
||||||
* Basado en variabilidad AHT + tasa de escalación + variabilidad input/output
|
* Based on AHT variability + escalation rate + input/output variability
|
||||||
*/
|
*/
|
||||||
function calculatePredictibilidadScore(
|
function calculatePredictabilityScore(
|
||||||
aht_values: number[],
|
aht_values: number[],
|
||||||
escalation_rate: number,
|
escalation_rate: number,
|
||||||
motivo_contacto_entropy?: number,
|
motivo_contacto_entropy?: number,
|
||||||
resolucion_entropy?: number
|
resolucion_entropy?: number
|
||||||
): SubFactor {
|
): SubFactor {
|
||||||
const thresholds = AGENTIC_READINESS_THRESHOLDS.predictibilidad;
|
const thresholds = AGENTIC_READINESS_THRESHOLDS.predictibilidad;
|
||||||
|
|
||||||
// 1. VARIABILIDAD AHT (40%)
|
// 1. AHT VARIABILITY (40%)
|
||||||
const aht_mean = aht_values.reduce((a, b) => a + b, 0) / aht_values.length;
|
const aht_mean = aht_values.reduce((a, b) => a + b, 0) / aht_values.length;
|
||||||
const aht_variance = aht_values.reduce((sum, val) => sum + Math.pow(val - aht_mean, 2), 0) / aht_values.length;
|
const aht_variance = aht_values.reduce((sum, val) => sum + Math.pow(val - aht_mean, 2), 0) / aht_values.length;
|
||||||
const aht_std = Math.sqrt(aht_variance);
|
const aht_std = Math.sqrt(aht_variance);
|
||||||
const cv_aht = aht_std / aht_mean;
|
const cv_aht = aht_std / aht_mean;
|
||||||
|
|
||||||
// Normalizar CV a escala 0-10
|
// Normalize CV to 0-10 scale
|
||||||
const score_aht = Math.max(0, Math.min(10,
|
const score_aht = Math.max(0, Math.min(10,
|
||||||
10 * (1 - (cv_aht - thresholds.cv_aht_excellent) / (thresholds.cv_aht_poor - thresholds.cv_aht_excellent))
|
10 * (1 - (cv_aht - thresholds.cv_aht_excellent) / (thresholds.cv_aht_poor - thresholds.cv_aht_excellent))
|
||||||
));
|
));
|
||||||
|
|
||||||
// 2. TASA DE ESCALACIÓN (30%)
|
// 2. ESCALATION RATE (30%)
|
||||||
const score_escalacion = Math.max(0, Math.min(10,
|
const score_escalacion = Math.max(0, Math.min(10,
|
||||||
10 * (1 - escalation_rate / thresholds.escalation_poor)
|
10 * (1 - escalation_rate / thresholds.escalation_poor)
|
||||||
));
|
));
|
||||||
|
|
||||||
// 3. VARIABILIDAD INPUT/OUTPUT (30%)
|
// 3. INPUT/OUTPUT VARIABILITY (30%)
|
||||||
let score_variabilidad: number;
|
let score_variabilidad: number;
|
||||||
if (motivo_contacto_entropy !== undefined && resolucion_entropy !== undefined) {
|
if (motivo_contacto_entropy !== undefined && resolucion_entropy !== undefined) {
|
||||||
// Alta entropía input + Baja entropía output = BUENA para automatización
|
// High input entropy + Low output entropy = GOOD for automation
|
||||||
const input_normalized = Math.min(motivo_contacto_entropy / 3.0, 1.0);
|
const input_normalized = Math.min(motivo_contacto_entropy / 3.0, 1.0);
|
||||||
const output_normalized = Math.min(resolucion_entropy / 3.0, 1.0);
|
const output_normalized = Math.min(resolucion_entropy / 3.0, 1.0);
|
||||||
score_variabilidad = 10 * (input_normalized * (1 - output_normalized));
|
score_variabilidad = 10 * (input_normalized * (1 - output_normalized));
|
||||||
} else {
|
} else {
|
||||||
// Si no hay datos de entropía, usar promedio de AHT y escalación
|
// If no entropy data, use average of AHT and escalation
|
||||||
score_variabilidad = (score_aht + score_escalacion) / 2;
|
score_variabilidad = (score_aht + score_escalacion) / 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
// PONDERACIÓN FINAL
|
// FINAL WEIGHTING
|
||||||
const predictibilidad = (
|
const predictabilidad = (
|
||||||
0.40 * score_aht +
|
0.40 * score_aht +
|
||||||
0.30 * score_escalacion +
|
0.30 * score_escalacion +
|
||||||
0.30 * score_variabilidad
|
0.30 * score_variabilidad
|
||||||
);
|
);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
name: 'predictibilidad',
|
name: 'predictability',
|
||||||
displayName: 'Predictibilidad',
|
displayName: 'Predictability',
|
||||||
score: Math.round(predictibilidad * 10) / 10,
|
score: Math.round(predictabilidad * 10) / 10,
|
||||||
weight: AGENTIC_READINESS_WEIGHTS.predictibilidad,
|
weight: AGENTIC_READINESS_WEIGHTS.predictibilidad,
|
||||||
description: `CV AHT: ${(cv_aht * 100).toFixed(1)}%, Escalación: ${(escalation_rate * 100).toFixed(1)}%`,
|
description: `AHT CV: ${(cv_aht * 100).toFixed(1)}%, Escalation: ${(escalation_rate * 100).toFixed(1)}%`,
|
||||||
details: {
|
details: {
|
||||||
cv_aht: Math.round(cv_aht * 1000) / 1000,
|
cv_aht: Math.round(cv_aht * 1000) / 1000,
|
||||||
escalation_rate,
|
escalation_rate,
|
||||||
@@ -114,18 +114,18 @@ function calculatePredictibilidadScore(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* SUB-FACTOR 3: ESTRUCTURACIÓN (15%)
|
* SUB-FACTOR 3: STRUCTURING (15%)
|
||||||
* Porcentaje de campos estructurados vs texto libre
|
* Percentage of structured fields vs free text
|
||||||
*/
|
*/
|
||||||
function calculateEstructuracionScore(structured_fields_pct: number): SubFactor {
|
function calculateStructuringScore(structured_fields_pct: number): SubFactor {
|
||||||
const score = structured_fields_pct * 10;
|
const score = structured_fields_pct * 10;
|
||||||
|
|
||||||
return {
|
return {
|
||||||
name: 'estructuracion',
|
name: 'structuring',
|
||||||
displayName: 'Estructuración',
|
displayName: 'Structuring',
|
||||||
score: Math.round(score * 10) / 10,
|
score: Math.round(score * 10) / 10,
|
||||||
weight: AGENTIC_READINESS_WEIGHTS.estructuracion,
|
weight: AGENTIC_READINESS_WEIGHTS.estructuracion,
|
||||||
description: `${(structured_fields_pct * 100).toFixed(0)}% de campos estructurados`,
|
description: `${(structured_fields_pct * 100).toFixed(0)}% structured fields`,
|
||||||
details: {
|
details: {
|
||||||
structured_fields_pct
|
structured_fields_pct
|
||||||
}
|
}
|
||||||
@@ -133,21 +133,21 @@ function calculateEstructuracionScore(structured_fields_pct: number): SubFactor
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* SUB-FACTOR 4: COMPLEJIDAD INVERSA (15%)
|
* SUB-FACTOR 4: INVERSE COMPLEXITY (15%)
|
||||||
* Basado en tasa de excepciones
|
* Based on exception rate
|
||||||
*/
|
*/
|
||||||
function calculateComplejidadInversaScore(exception_rate: number): SubFactor {
|
function calculateInverseComplexityScore(exception_rate: number): SubFactor {
|
||||||
// Menor tasa de excepciones → Mayor score
|
// Lower exception rate → Higher score
|
||||||
// < 5% → Excelente (score 10)
|
// < 5% → Excellent (score 10)
|
||||||
// > 30% → Muy complejo (score 0)
|
// > 30% → Very complex (score 0)
|
||||||
const score_excepciones = Math.max(0, Math.min(10, 10 * (1 - exception_rate / 0.30)));
|
const score_excepciones = Math.max(0, Math.min(10, 10 * (1 - exception_rate / 0.30)));
|
||||||
|
|
||||||
return {
|
return {
|
||||||
name: 'complejidad_inversa',
|
name: 'inverseComplexity',
|
||||||
displayName: 'Complejidad Inversa',
|
displayName: 'Inverse Complexity',
|
||||||
score: Math.round(score_excepciones * 10) / 10,
|
score: Math.round(score_excepciones * 10) / 10,
|
||||||
weight: AGENTIC_READINESS_WEIGHTS.complejidad_inversa,
|
weight: AGENTIC_READINESS_WEIGHTS.complejidad_inversa,
|
||||||
description: `${(exception_rate * 100).toFixed(1)}% de excepciones`,
|
description: `${(exception_rate * 100).toFixed(1)}% exceptions`,
|
||||||
details: {
|
details: {
|
||||||
exception_rate
|
exception_rate
|
||||||
}
|
}
|
||||||
@@ -155,15 +155,15 @@ function calculateComplejidadInversaScore(exception_rate: number): SubFactor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* SUB-FACTOR 5: ESTABILIDAD (10%)
|
* SUB-FACTOR 5: STABILITY (10%)
|
||||||
* Basado en distribución horaria y % llamadas fuera de horas
|
* Based on hourly distribution and % off-hours calls
|
||||||
*/
|
*/
|
||||||
function calculateEstabilidadScore(
|
function calculateStabilityScore(
|
||||||
hourly_distribution: number[],
|
hourly_distribution: number[],
|
||||||
off_hours_pct: number
|
off_hours_pct: number
|
||||||
): SubFactor {
|
): SubFactor {
|
||||||
// 1. UNIFORMIDAD DISTRIBUCIÓN HORARIA (60%)
|
// 1. HOURLY DISTRIBUTION UNIFORMITY (60%)
|
||||||
// Calcular entropía de Shannon
|
// Calculate Shannon entropy
|
||||||
const total = hourly_distribution.reduce((a, b) => a + b, 0);
|
const total = hourly_distribution.reduce((a, b) => a + b, 0);
|
||||||
let score_uniformidad = 0;
|
let score_uniformidad = 0;
|
||||||
let entropy_normalized = 0;
|
let entropy_normalized = 0;
|
||||||
@@ -175,23 +175,23 @@ function calculateEstabilidadScore(
|
|||||||
entropy_normalized = entropy / max_entropy;
|
entropy_normalized = entropy / max_entropy;
|
||||||
score_uniformidad = entropy_normalized * 10;
|
score_uniformidad = entropy_normalized * 10;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. % LLAMADAS FUERA DE HORAS (40%)
|
// 2. % OFF-HOURS CALLS (40%)
|
||||||
// Más llamadas fuera de horas → Mayor necesidad agentes → Mayor score
|
// More off-hours calls → Higher agent need → Higher score
|
||||||
const score_off_hours = Math.min(10, (off_hours_pct / 0.30) * 10);
|
const score_off_hours = Math.min(10, (off_hours_pct / 0.30) * 10);
|
||||||
|
|
||||||
// PONDERACIÓN
|
// WEIGHTING
|
||||||
const estabilidad = (
|
const estabilidad = (
|
||||||
0.60 * score_uniformidad +
|
0.60 * score_uniformidad +
|
||||||
0.40 * score_off_hours
|
0.40 * score_off_hours
|
||||||
);
|
);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
name: 'estabilidad',
|
name: 'stability',
|
||||||
displayName: 'Estabilidad',
|
displayName: 'Stability',
|
||||||
score: Math.round(estabilidad * 10) / 10,
|
score: Math.round(estabilidad * 10) / 10,
|
||||||
weight: AGENTIC_READINESS_WEIGHTS.estabilidad,
|
weight: AGENTIC_READINESS_WEIGHTS.estabilidad,
|
||||||
description: `${(off_hours_pct * 100).toFixed(1)}% fuera de horario`,
|
description: `${(off_hours_pct * 100).toFixed(1)}% off-hours`,
|
||||||
details: {
|
details: {
|
||||||
entropy_normalized: Math.round(entropy_normalized * 1000) / 1000,
|
entropy_normalized: Math.round(entropy_normalized * 1000) / 1000,
|
||||||
off_hours_pct,
|
off_hours_pct,
|
||||||
@@ -203,7 +203,7 @@ function calculateEstabilidadScore(
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* SUB-FACTOR 6: ROI (15%)
|
* SUB-FACTOR 6: ROI (15%)
|
||||||
* Basado en ahorro potencial anual
|
* Based on annual potential savings
|
||||||
*/
|
*/
|
||||||
function calculateROIScore(
|
function calculateROIScore(
|
||||||
volumen_anual: number,
|
volumen_anual: number,
|
||||||
@@ -211,17 +211,17 @@ function calculateROIScore(
|
|||||||
automation_savings_pct: number = 0.70
|
automation_savings_pct: number = 0.70
|
||||||
): SubFactor {
|
): SubFactor {
|
||||||
const ahorro_anual = volumen_anual * cpi_humano * automation_savings_pct;
|
const ahorro_anual = volumen_anual * cpi_humano * automation_savings_pct;
|
||||||
|
|
||||||
// Normalización logística
|
// Logistic normalization
|
||||||
const { k, x0 } = AGENTIC_READINESS_THRESHOLDS.roi;
|
const { k, x0 } = AGENTIC_READINESS_THRESHOLDS.roi;
|
||||||
const score = 10 / (1 + Math.exp(-k * (ahorro_anual - x0)));
|
const score = 10 / (1 + Math.exp(-k * (ahorro_anual - x0)));
|
||||||
|
|
||||||
return {
|
return {
|
||||||
name: 'roi',
|
name: 'roi',
|
||||||
displayName: 'ROI',
|
displayName: 'ROI',
|
||||||
score: Math.round(score * 10) / 10,
|
score: Math.round(score * 10) / 10,
|
||||||
weight: AGENTIC_READINESS_WEIGHTS.roi,
|
weight: AGENTIC_READINESS_WEIGHTS.roi,
|
||||||
description: `€${(ahorro_anual / 1000).toFixed(0)}K ahorro potencial anual`,
|
description: `€${(ahorro_anual / 1000).toFixed(0)}K annual potential savings`,
|
||||||
details: {
|
details: {
|
||||||
ahorro_anual: Math.round(ahorro_anual),
|
ahorro_anual: Math.round(ahorro_anual),
|
||||||
volumen_anual,
|
volumen_anual,
|
||||||
@@ -232,98 +232,98 @@ function calculateROIScore(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* AJUSTE POR DISTRIBUCIÓN CSAT (Opcional, ±10%)
|
* CSAT DISTRIBUTION ADJUSTMENT (Optional, ±10%)
|
||||||
* Distribución normal → Proceso estable
|
* Normal distribution → Stable process
|
||||||
*/
|
*/
|
||||||
function calculateCSATDistributionAdjustment(csat_values: number[]): number {
|
function calculateCSATDistributionAdjustment(csat_values: number[]): number {
|
||||||
// Test de normalidad simplificado (basado en skewness y kurtosis)
|
// Simplified normality test (based on skewness and kurtosis)
|
||||||
const n = csat_values.length;
|
const n = csat_values.length;
|
||||||
const mean = csat_values.reduce((a, b) => a + b, 0) / n;
|
const mean = csat_values.reduce((a, b) => a + b, 0) / n;
|
||||||
const variance = csat_values.reduce((sum, val) => sum + Math.pow(val - mean, 2), 0) / n;
|
const variance = csat_values.reduce((sum, val) => sum + Math.pow(val - mean, 2), 0) / n;
|
||||||
const std = Math.sqrt(variance);
|
const std = Math.sqrt(variance);
|
||||||
|
|
||||||
// Skewness
|
// Skewness
|
||||||
const skewness = csat_values.reduce((sum, val) => sum + Math.pow((val - mean) / std, 3), 0) / n;
|
const skewness = csat_values.reduce((sum, val) => sum + Math.pow((val - mean) / std, 3), 0) / n;
|
||||||
|
|
||||||
// Kurtosis
|
// Kurtosis
|
||||||
const kurtosis = csat_values.reduce((sum, val) => sum + Math.pow((val - mean) / std, 4), 0) / n;
|
const kurtosis = csat_values.reduce((sum, val) => sum + Math.pow((val - mean) / std, 4), 0) / n;
|
||||||
|
|
||||||
// Normalidad: skewness cercano a 0, kurtosis cercano a 3
|
// Normality: skewness close to 0, kurtosis close to 3
|
||||||
const skewness_score = Math.max(0, 1 - Math.abs(skewness));
|
const skewness_score = Math.max(0, 1 - Math.abs(skewness));
|
||||||
const kurtosis_score = Math.max(0, 1 - Math.abs(kurtosis - 3) / 3);
|
const kurtosis_score = Math.max(0, 1 - Math.abs(kurtosis - 3) / 3);
|
||||||
const normality_score = (skewness_score + kurtosis_score) / 2;
|
const normality_score = (skewness_score + kurtosis_score) / 2;
|
||||||
|
|
||||||
// Ajuste: +5% si muy normal, -5% si muy anormal
|
// Adjustment: +5% if very normal, -5% if very abnormal
|
||||||
const adjustment = 1 + ((normality_score - 0.5) * 0.10);
|
const adjustment = 1 + ((normality_score - 0.5) * 0.10);
|
||||||
|
|
||||||
return adjustment;
|
return adjustment;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ALGORITMO COMPLETO (Tier GOLD)
|
* COMPLETE ALGORITHM (Tier GOLD)
|
||||||
*/
|
*/
|
||||||
export function calculateAgenticReadinessScoreGold(data: AgenticReadinessInput): AgenticReadinessResult {
|
export function calculateAgenticReadinessScoreGold(data: AgenticReadinessInput): AgenticReadinessResult {
|
||||||
const sub_factors: SubFactor[] = [];
|
const sub_factors: SubFactor[] = [];
|
||||||
|
|
||||||
// 1. REPETITIVIDAD
|
// 1. REPEATABILITY
|
||||||
sub_factors.push(calculateRepetitividadScore(data.volumen_mes));
|
sub_factors.push(calculateRepeatabilityScore(data.volumen_mes));
|
||||||
|
|
||||||
// 2. PREDICTIBILIDAD
|
// 2. PREDICTABILITY
|
||||||
sub_factors.push(calculatePredictibilidadScore(
|
sub_factors.push(calculatePredictabilityScore(
|
||||||
data.aht_values,
|
data.aht_values,
|
||||||
data.escalation_rate,
|
data.escalation_rate,
|
||||||
data.motivo_contacto_entropy,
|
data.motivo_contacto_entropy,
|
||||||
data.resolucion_entropy
|
data.resolucion_entropy
|
||||||
));
|
));
|
||||||
|
|
||||||
// 3. ESTRUCTURACIÓN
|
// 3. STRUCTURING
|
||||||
sub_factors.push(calculateEstructuracionScore(data.structured_fields_pct || 0.5));
|
sub_factors.push(calculateStructuringScore(data.structured_fields_pct || 0.5));
|
||||||
|
|
||||||
// 4. COMPLEJIDAD INVERSA
|
// 4. INVERSE COMPLEXITY
|
||||||
sub_factors.push(calculateComplejidadInversaScore(data.exception_rate || 0.15));
|
sub_factors.push(calculateInverseComplexityScore(data.exception_rate || 0.15));
|
||||||
|
|
||||||
// 5. ESTABILIDAD
|
// 5. STABILITY
|
||||||
sub_factors.push(calculateEstabilidadScore(
|
sub_factors.push(calculateStabilityScore(
|
||||||
data.hourly_distribution || Array(24).fill(1),
|
data.hourly_distribution || Array(24).fill(1),
|
||||||
data.off_hours_pct || 0.2
|
data.off_hours_pct || 0.2
|
||||||
));
|
));
|
||||||
|
|
||||||
// 6. ROI
|
// 6. ROI
|
||||||
sub_factors.push(calculateROIScore(
|
sub_factors.push(calculateROIScore(
|
||||||
data.volumen_anual,
|
data.volumen_anual,
|
||||||
data.cpi_humano
|
data.cpi_humano
|
||||||
));
|
));
|
||||||
|
|
||||||
// PONDERACIÓN BASE
|
// BASE WEIGHTING
|
||||||
const agentic_readiness_base = sub_factors.reduce(
|
const agentic_readiness_base = sub_factors.reduce(
|
||||||
(sum, factor) => sum + (factor.score * factor.weight),
|
(sum, factor) => sum + (factor.score * factor.weight),
|
||||||
0
|
0
|
||||||
);
|
);
|
||||||
|
|
||||||
// AJUSTE POR DISTRIBUCIÓN CSAT (Opcional)
|
// CSAT DISTRIBUTION ADJUSTMENT (Optional)
|
||||||
let agentic_readiness_final = agentic_readiness_base;
|
let agentic_readiness_final = agentic_readiness_base;
|
||||||
if (data.csat_values && data.csat_values.length > 10) {
|
if (data.csat_values && data.csat_values.length > 10) {
|
||||||
const adjustment = calculateCSATDistributionAdjustment(data.csat_values);
|
const adjustment = calculateCSATDistributionAdjustment(data.csat_values);
|
||||||
agentic_readiness_final = agentic_readiness_base * adjustment;
|
agentic_readiness_final = agentic_readiness_base * adjustment;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Limitar a rango 0-10
|
// Limit to 0-10 range
|
||||||
agentic_readiness_final = Math.max(0, Math.min(10, agentic_readiness_final));
|
agentic_readiness_final = Math.max(0, Math.min(10, agentic_readiness_final));
|
||||||
|
|
||||||
// Interpretación
|
// Interpretation
|
||||||
let interpretation = '';
|
let interpretation = '';
|
||||||
let confidence: 'high' | 'medium' | 'low' = 'high';
|
let confidence: 'high' | 'medium' | 'low' = 'high';
|
||||||
|
|
||||||
if (agentic_readiness_final >= 8) {
|
if (agentic_readiness_final >= 8) {
|
||||||
interpretation = 'Excelente candidato para automatización completa (Automate)';
|
interpretation = 'Excellent candidate for complete automation (Automate)';
|
||||||
} else if (agentic_readiness_final >= 5) {
|
} else if (agentic_readiness_final >= 5) {
|
||||||
interpretation = 'Buen candidato para asistencia agéntica (Assist)';
|
interpretation = 'Good candidate for agentic assistance (Assist)';
|
||||||
} else if (agentic_readiness_final >= 3) {
|
} else if (agentic_readiness_final >= 3) {
|
||||||
interpretation = 'Candidato para augmentación humana (Augment)';
|
interpretation = 'Candidate for human augmentation (Augment)';
|
||||||
} else {
|
} else {
|
||||||
interpretation = 'No recomendado para automatización en este momento';
|
interpretation = 'Not recommended for automation at this time';
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
score: Math.round(agentic_readiness_final * 10) / 10,
|
score: Math.round(agentic_readiness_final * 10) / 10,
|
||||||
sub_factors,
|
sub_factors,
|
||||||
@@ -334,45 +334,45 @@ export function calculateAgenticReadinessScoreGold(data: AgenticReadinessInput):
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ALGORITMO SIMPLIFICADO (Tier SILVER)
|
* SIMPLIFIED ALGORITHM (Tier SILVER)
|
||||||
*/
|
*/
|
||||||
export function calculateAgenticReadinessScoreSilver(data: AgenticReadinessInput): AgenticReadinessResult {
|
export function calculateAgenticReadinessScoreSilver(data: AgenticReadinessInput): AgenticReadinessResult {
|
||||||
const sub_factors: SubFactor[] = [];
|
const sub_factors: SubFactor[] = [];
|
||||||
|
|
||||||
// 1. REPETITIVIDAD (30%)
|
// 1. REPEATABILITY (30%)
|
||||||
const repetitividad = calculateRepetitividadScore(data.volumen_mes);
|
const repeatability = calculateRepeatabilityScore(data.volumen_mes);
|
||||||
repetitividad.weight = 0.30;
|
repeatability.weight = 0.30;
|
||||||
sub_factors.push(repetitividad);
|
sub_factors.push(repeatability);
|
||||||
|
|
||||||
// 2. PREDICTIBILIDAD SIMPLIFICADA (30%)
|
// 2. SIMPLIFIED PREDICTABILITY (30%)
|
||||||
const predictibilidad = calculatePredictibilidadScore(
|
const predictability = calculatePredictabilityScore(
|
||||||
data.aht_values,
|
data.aht_values,
|
||||||
data.escalation_rate
|
data.escalation_rate
|
||||||
);
|
);
|
||||||
predictibilidad.weight = 0.30;
|
predictability.weight = 0.30;
|
||||||
sub_factors.push(predictibilidad);
|
sub_factors.push(predictability);
|
||||||
|
|
||||||
// 3. ROI (40%)
|
// 3. ROI (40%)
|
||||||
const roi = calculateROIScore(data.volumen_anual, data.cpi_humano);
|
const roi = calculateROIScore(data.volumen_anual, data.cpi_humano);
|
||||||
roi.weight = 0.40;
|
roi.weight = 0.40;
|
||||||
sub_factors.push(roi);
|
sub_factors.push(roi);
|
||||||
|
|
||||||
// PONDERACIÓN SIMPLIFICADA
|
// SIMPLIFIED WEIGHTING
|
||||||
const agentic_readiness = sub_factors.reduce(
|
const agentic_readiness = sub_factors.reduce(
|
||||||
(sum, factor) => sum + (factor.score * factor.weight),
|
(sum, factor) => sum + (factor.score * factor.weight),
|
||||||
0
|
0
|
||||||
);
|
);
|
||||||
|
|
||||||
// Interpretación
|
// Interpretation
|
||||||
let interpretation = '';
|
let interpretation = '';
|
||||||
if (agentic_readiness >= 7) {
|
if (agentic_readiness >= 7) {
|
||||||
interpretation = 'Buen candidato para automatización';
|
interpretation = 'Good candidate for automation';
|
||||||
} else if (agentic_readiness >= 4) {
|
} else if (agentic_readiness >= 4) {
|
||||||
interpretation = 'Candidato para asistencia agéntica';
|
interpretation = 'Candidate for agentic assistance';
|
||||||
} else {
|
} else {
|
||||||
interpretation = 'Requiere análisis más profundo (considerar GOLD)';
|
interpretation = 'Requires deeper analysis (consider GOLD)';
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
score: Math.round(agentic_readiness * 10) / 10,
|
score: Math.round(agentic_readiness * 10) / 10,
|
||||||
sub_factors,
|
sub_factors,
|
||||||
@@ -383,7 +383,7 @@ export function calculateAgenticReadinessScoreSilver(data: AgenticReadinessInput
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* FUNCIÓN PRINCIPAL - Selecciona algoritmo según tier
|
* MAIN FUNCTION - Selects algorithm based on tier
|
||||||
*/
|
*/
|
||||||
export function calculateAgenticReadinessScore(data: AgenticReadinessInput): AgenticReadinessResult {
|
export function calculateAgenticReadinessScore(data: AgenticReadinessInput): AgenticReadinessResult {
|
||||||
if (data.tier === 'gold') {
|
if (data.tier === 'gold') {
|
||||||
@@ -391,13 +391,13 @@ export function calculateAgenticReadinessScore(data: AgenticReadinessInput): Age
|
|||||||
} else if (data.tier === 'silver') {
|
} else if (data.tier === 'silver') {
|
||||||
return calculateAgenticReadinessScoreSilver(data);
|
return calculateAgenticReadinessScoreSilver(data);
|
||||||
} else {
|
} else {
|
||||||
// BRONZE: Sin Agentic Readiness
|
// BRONZE: No Agentic Readiness
|
||||||
return {
|
return {
|
||||||
score: 0,
|
score: 0,
|
||||||
sub_factors: [],
|
sub_factors: [],
|
||||||
tier: 'bronze',
|
tier: 'bronze',
|
||||||
confidence: 'low',
|
confidence: 'low',
|
||||||
interpretation: 'Análisis Bronze no incluye Agentic Readiness Score'
|
interpretation: 'Bronze analysis does not include Agentic Readiness Score'
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -23,13 +23,13 @@ function safeNumber(value: any, fallback = 0): number {
|
|||||||
function normalizeAhtMetric(ahtSeconds: number): number {
|
function normalizeAhtMetric(ahtSeconds: number): number {
|
||||||
if (!Number.isFinite(ahtSeconds) || ahtSeconds <= 0) return 0;
|
if (!Number.isFinite(ahtSeconds) || ahtSeconds <= 0) return 0;
|
||||||
|
|
||||||
// Ajusta estos números si ves que tus AHTs reales son muy distintos
|
// Adjust these numbers if your actual AHTs are very different
|
||||||
const MIN_AHT = 300; // AHT muy bueno
|
const MIN_AHT = 300; // Very good AHT
|
||||||
const MAX_AHT = 1000; // AHT muy malo
|
const MAX_AHT = 1000; // Very bad AHT
|
||||||
|
|
||||||
const clamped = Math.max(MIN_AHT, Math.min(MAX_AHT, ahtSeconds));
|
const clamped = Math.max(MIN_AHT, Math.min(MAX_AHT, ahtSeconds));
|
||||||
const ratio = (clamped - MIN_AHT) / (MAX_AHT - MIN_AHT); // 0 (mejor) -> 1 (peor)
|
const ratio = (clamped - MIN_AHT) / (MAX_AHT - MIN_AHT); // 0 (better) -> 1 (worse)
|
||||||
const score = 100 - ratio * 100; // 100 (mejor) -> 0 (peor)
|
const score = 100 - ratio * 100; // 100 (better) -> 0 (worse)
|
||||||
|
|
||||||
return Math.round(score);
|
return Math.round(score);
|
||||||
}
|
}
|
||||||
@@ -74,7 +74,7 @@ function getTopLabel(
|
|||||||
return String(labels[maxIdx]);
|
return String(labels[maxIdx]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ==== Helpers para distribución horaria (desde heatmap_24x7) ====
|
// ==== Helpers for hourly distribution (from heatmap_24x7) ====
|
||||||
|
|
||||||
function computeHourlyFromHeatmap(heatmap24x7: any): number[] {
|
function computeHourlyFromHeatmap(heatmap24x7: any): number[] {
|
||||||
if (!Array.isArray(heatmap24x7) || !heatmap24x7.length) {
|
if (!Array.isArray(heatmap24x7) || !heatmap24x7.length) {
|
||||||
@@ -146,7 +146,7 @@ function mapAgenticReadiness(
|
|||||||
description:
|
description:
|
||||||
value?.reason ||
|
value?.reason ||
|
||||||
value?.details?.description ||
|
value?.details?.description ||
|
||||||
'Sub-factor calculado a partir de KPIs agregados.',
|
'Sub-factor calculated from aggregated KPIs.',
|
||||||
details: value?.details || {},
|
details: value?.details || {},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -156,7 +156,7 @@ function mapAgenticReadiness(
|
|||||||
|
|
||||||
const interpretation =
|
const interpretation =
|
||||||
classification?.description ||
|
classification?.description ||
|
||||||
`Puntuación de preparación agentic: ${score.toFixed(1)}/10`;
|
`Agentic readiness score: ${score.toFixed(1)}/10`;
|
||||||
|
|
||||||
const computedCount = Object.values(sub_scores).filter(
|
const computedCount = Object.values(sub_scores).filter(
|
||||||
(s: any) => s?.computed
|
(s: any) => s?.computed
|
||||||
@@ -176,7 +176,7 @@ function mapAgenticReadiness(
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// ==== Volumetría (dimensión + KPIs) ====
|
// ==== Volumetry (dimension + KPIs) ====
|
||||||
|
|
||||||
function buildVolumetryDimension(
|
function buildVolumetryDimension(
|
||||||
raw: BackendRawResults
|
raw: BackendRawResults
|
||||||
@@ -216,13 +216,13 @@ function buildVolumetryDimension(
|
|||||||
const topChannel = getTopLabel(volumeByChannel?.labels, channelValues);
|
const topChannel = getTopLabel(volumeByChannel?.labels, channelValues);
|
||||||
const topSkill = getTopLabel(skillLabels, skillValues);
|
const topSkill = getTopLabel(skillLabels, skillValues);
|
||||||
|
|
||||||
// Heatmap 24x7 -> distribución horaria
|
// Heatmap 24x7 -> hourly distribution
|
||||||
const heatmap24x7 = volumetry?.heatmap_24x7;
|
const heatmap24x7 = volumetry?.heatmap_24x7;
|
||||||
const hourly = computeHourlyFromHeatmap(heatmap24x7);
|
const hourly = computeHourlyFromHeatmap(heatmap24x7);
|
||||||
const offHoursPct = hourly.length ? calcOffHoursPct(hourly) : 0;
|
const offHoursPct = hourly.length ? calcOffHoursPct(hourly) : 0;
|
||||||
const peakHours = hourly.length ? findPeakHours(hourly) : [];
|
const peakHours = hourly.length ? findPeakHours(hourly) : [];
|
||||||
|
|
||||||
console.log('📊 Volumetría backend (mapper):', {
|
console.log('📊 Backend volumetry (mapper):', {
|
||||||
volumetry,
|
volumetry,
|
||||||
volumeByChannel,
|
volumeByChannel,
|
||||||
volumeBySkill,
|
volumeBySkill,
|
||||||
@@ -240,21 +240,21 @@ function buildVolumetryDimension(
|
|||||||
|
|
||||||
if (totalVolume > 0) {
|
if (totalVolume > 0) {
|
||||||
extraKpis.push({
|
extraKpis.push({
|
||||||
label: 'Volumen total (backend)',
|
label: 'Total volume (backend)',
|
||||||
value: totalVolume.toLocaleString('es-ES'),
|
value: totalVolume.toLocaleString('es-ES'),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if (numChannels > 0) {
|
if (numChannels > 0) {
|
||||||
extraKpis.push({
|
extraKpis.push({
|
||||||
label: 'Canales analizados',
|
label: 'Channels analyzed',
|
||||||
value: String(numChannels),
|
value: String(numChannels),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if (numSkills > 0) {
|
if (numSkills > 0) {
|
||||||
extraKpis.push({
|
extraKpis.push({
|
||||||
label: 'Skills analizadas',
|
label: 'Skills analyzed',
|
||||||
value: String(numSkills),
|
value: String(numSkills),
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -271,14 +271,14 @@ function buildVolumetryDimension(
|
|||||||
|
|
||||||
if (topChannel) {
|
if (topChannel) {
|
||||||
extraKpis.push({
|
extraKpis.push({
|
||||||
label: 'Canal principal',
|
label: 'Main channel',
|
||||||
value: topChannel,
|
value: topChannel,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if (topSkill) {
|
if (topSkill) {
|
||||||
extraKpis.push({
|
extraKpis.push({
|
||||||
label: 'Skill principal',
|
label: 'Main skill',
|
||||||
value: topSkill,
|
value: topSkill,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -287,28 +287,28 @@ function buildVolumetryDimension(
|
|||||||
return { dimension: undefined, extraKpis };
|
return { dimension: undefined, extraKpis };
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calcular ratio pico/valle para evaluar concentración de demanda
|
// Calculate peak/valley ratio to evaluate demand concentration
|
||||||
const validHourly = hourly.filter(v => v > 0);
|
const validHourly = hourly.filter(v => v > 0);
|
||||||
const maxHourly = validHourly.length > 0 ? Math.max(...validHourly) : 0;
|
const maxHourly = validHourly.length > 0 ? Math.max(...validHourly) : 0;
|
||||||
const minHourly = validHourly.length > 0 ? Math.min(...validHourly) : 1;
|
const minHourly = validHourly.length > 0 ? Math.min(...validHourly) : 1;
|
||||||
const peakValleyRatio = minHourly > 0 ? maxHourly / minHourly : 1;
|
const peakValleyRatio = minHourly > 0 ? maxHourly / minHourly : 1;
|
||||||
console.log(`⏰ Hourly distribution (backend path): total=${totalVolume}, peak=${maxHourly}, valley=${minHourly}, ratio=${peakValleyRatio.toFixed(2)}`);
|
console.log(`⏰ Hourly distribution (backend path): total=${totalVolume}, peak=${maxHourly}, valley=${minHourly}, ratio=${peakValleyRatio.toFixed(2)}`);
|
||||||
|
|
||||||
// Score basado en:
|
// Score based on:
|
||||||
// - % fuera de horario (>30% penaliza)
|
// - % off-hours (>30% penalty)
|
||||||
// - Ratio pico/valle (>3x penaliza)
|
// - Peak/valley ratio (>3x penalty)
|
||||||
// NO penalizar por tener volumen alto
|
// DO NOT penalize for having high volume
|
||||||
let score = 100;
|
let score = 100;
|
||||||
|
|
||||||
// Penalización por fuera de horario
|
// Penalty for off-hours
|
||||||
const offHoursPctValue = offHoursPct * 100;
|
const offHoursPctValue = offHoursPct * 100;
|
||||||
if (offHoursPctValue > 30) {
|
if (offHoursPctValue > 30) {
|
||||||
score -= Math.min(40, (offHoursPctValue - 30) * 2); // -2 pts por cada % sobre 30%
|
score -= Math.min(40, (offHoursPctValue - 30) * 2); // -2 pts per % over30%
|
||||||
} else if (offHoursPctValue > 20) {
|
} else if (offHoursPctValue > 20) {
|
||||||
score -= (offHoursPctValue - 20); // -1 pt por cada % entre 20-30%
|
score -= (offHoursPctValue - 20); // -1 pt per % between 20-30%
|
||||||
}
|
}
|
||||||
|
|
||||||
// Penalización por ratio pico/valle alto
|
// Penalty for high peak/valley ratio
|
||||||
if (peakValleyRatio > 5) {
|
if (peakValleyRatio > 5) {
|
||||||
score -= 30;
|
score -= 30;
|
||||||
} else if (peakValleyRatio > 3) {
|
} else if (peakValleyRatio > 3) {
|
||||||
@@ -321,32 +321,32 @@ function buildVolumetryDimension(
|
|||||||
|
|
||||||
const summaryParts: string[] = [];
|
const summaryParts: string[] = [];
|
||||||
summaryParts.push(
|
summaryParts.push(
|
||||||
`${totalVolume.toLocaleString('es-ES')} interacciones analizadas.`
|
`${totalVolume.toLocaleString('es-ES')} interactions analyzed.`
|
||||||
);
|
);
|
||||||
summaryParts.push(
|
summaryParts.push(
|
||||||
`${(offHoursPct * 100).toFixed(0)}% fuera de horario laboral (8-19h).`
|
`${(offHoursPct * 100).toFixed(0)}% outside business hours (8-19h).`
|
||||||
);
|
);
|
||||||
if (peakValleyRatio > 2) {
|
if (peakValleyRatio > 2) {
|
||||||
summaryParts.push(
|
summaryParts.push(
|
||||||
`Ratio pico/valle: ${peakValleyRatio.toFixed(1)}x - alta concentración de demanda.`
|
`Peak/valley ratio: ${peakValleyRatio.toFixed(1)}x - high demand concentration.`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (topSkill) {
|
if (topSkill) {
|
||||||
summaryParts.push(`Skill principal: ${topSkill}.`);
|
summaryParts.push(`Main skill: ${topSkill}.`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Métrica principal accionable: % fuera de horario
|
// Main actionable metric: % off-hours
|
||||||
const dimension: DimensionAnalysis = {
|
const dimension: DimensionAnalysis = {
|
||||||
id: 'volumetry_distribution',
|
id: 'volumetry_distribution',
|
||||||
name: 'volumetry_distribution',
|
name: 'volumetry_distribution',
|
||||||
title: 'Volumetría y distribución de demanda',
|
title: 'Volumetry and demand distribution',
|
||||||
score,
|
score,
|
||||||
percentile: undefined,
|
percentile: undefined,
|
||||||
summary: summaryParts.join(' '),
|
summary: summaryParts.join(' '),
|
||||||
kpi: {
|
kpi: {
|
||||||
label: 'Fuera de horario',
|
label: 'Off-hours',
|
||||||
value: `${(offHoursPct * 100).toFixed(0)}%`,
|
value: `${(offHoursPct * 100).toFixed(0)}%`,
|
||||||
change: peakValleyRatio > 2 ? `Pico/valle: ${peakValleyRatio.toFixed(1)}x` : undefined,
|
change: peakValleyRatio > 2 ? `Peak/valley: ${peakValleyRatio.toFixed(1)}x` : undefined,
|
||||||
changeType: offHoursPct > 0.3 ? 'negative' : offHoursPct > 0.2 ? 'neutral' : 'positive'
|
changeType: offHoursPct > 0.3 ? 'negative' : offHoursPct > 0.2 ? 'neutral' : 'positive'
|
||||||
},
|
},
|
||||||
icon: BarChartHorizontal,
|
icon: BarChartHorizontal,
|
||||||
@@ -362,7 +362,7 @@ function buildVolumetryDimension(
|
|||||||
return { dimension, extraKpis };
|
return { dimension, extraKpis };
|
||||||
}
|
}
|
||||||
|
|
||||||
// ==== Eficiencia Operativa (v3.2 - con segmentación horaria) ====
|
// ==== Operational Efficiency (v3.2 - with hourly segmentation) ====
|
||||||
|
|
||||||
function buildOperationalEfficiencyDimension(
|
function buildOperationalEfficiencyDimension(
|
||||||
raw: BackendRawResults,
|
raw: BackendRawResults,
|
||||||
@@ -371,25 +371,25 @@ function buildOperationalEfficiencyDimension(
|
|||||||
const op = raw?.operational_performance;
|
const op = raw?.operational_performance;
|
||||||
if (!op) return undefined;
|
if (!op) return undefined;
|
||||||
|
|
||||||
// AHT Global
|
// Global AHT
|
||||||
const ahtP50 = safeNumber(op.aht_distribution?.p50, 0);
|
const ahtP50 = safeNumber(op.aht_distribution?.p50, 0);
|
||||||
const ahtP90 = safeNumber(op.aht_distribution?.p90, 0);
|
const ahtP90 = safeNumber(op.aht_distribution?.p90, 0);
|
||||||
const ratioGlobal = ahtP90 > 0 && ahtP50 > 0 ? ahtP90 / ahtP50 : safeNumber(op.aht_distribution?.p90_p50_ratio, 1.5);
|
const ratioGlobal = ahtP90 > 0 && ahtP50 > 0 ? ahtP90 / ahtP50 : safeNumber(op.aht_distribution?.p90_p50_ratio, 1.5);
|
||||||
|
|
||||||
// AHT Horario Laboral (8-19h) - estimación basada en distribución
|
// Business Hours AHT (8-19h) - estimation based on distribution
|
||||||
// Asumimos que el AHT en horario laboral es ligeramente menor (más eficiente)
|
// We assume that AHT during business hours is slightly lower (more efficient)
|
||||||
const ahtBusinessHours = Math.round(ahtP50 * 0.92); // ~8% más eficiente en horario laboral
|
const ahtBusinessHours = Math.round(ahtP50 * 0.92); // ~8% more efficient during business hours
|
||||||
const ratioBusinessHours = ratioGlobal * 0.85; // Menor variabilidad en horario laboral
|
const ratioBusinessHours = ratioGlobal * 0.85; // Lower variability during business hours
|
||||||
|
|
||||||
// Determinar si la variabilidad se reduce fuera de horario
|
// Determine if variability reduces outside hours
|
||||||
const variabilityReduction = ratioGlobal - ratioBusinessHours;
|
const variabilityReduction = ratioGlobal - ratioBusinessHours;
|
||||||
const variabilityInsight = variabilityReduction > 0.3
|
const variabilityInsight = variabilityReduction > 0.3
|
||||||
? 'La variabilidad se reduce significativamente en horario laboral.'
|
? 'Variability significantly reduces during business hours.'
|
||||||
: variabilityReduction > 0.1
|
: variabilityReduction > 0.1
|
||||||
? 'La variabilidad se mantiene similar en ambos horarios.'
|
? 'Variability remains similar in both schedules.'
|
||||||
: 'La variabilidad es consistente independientemente del horario.';
|
: 'Variability is consistent regardless of schedule.';
|
||||||
|
|
||||||
// Score basado en escala definida:
|
// Score based on defined scale:
|
||||||
// <1.5 = 100pts, 1.5-2.0 = 70pts, 2.0-2.5 = 50pts, 2.5-3.0 = 30pts, >3.0 = 20pts
|
// <1.5 = 100pts, 1.5-2.0 = 70pts, 2.0-2.5 = 50pts, 2.5-3.0 = 30pts, >3.0 = 20pts
|
||||||
let score: number;
|
let score: number;
|
||||||
if (ratioGlobal < 1.5) {
|
if (ratioGlobal < 1.5) {
|
||||||
@@ -404,9 +404,9 @@ function buildOperationalEfficiencyDimension(
|
|||||||
score = 20;
|
score = 20;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Summary con segmentación
|
// Summary with segmentation
|
||||||
let summary = `AHT Global: ${Math.round(ahtP50)}s (P50), ratio ${ratioGlobal.toFixed(2)}. `;
|
let summary = `Global AHT: ${Math.round(ahtP50)}s (P50), ratio ${ratioGlobal.toFixed(2)}. `;
|
||||||
summary += `AHT Horario Laboral (8-19h): ${ahtBusinessHours}s (P50), ratio ${ratioBusinessHours.toFixed(2)}. `;
|
summary += `Business Hours AHT (8-19h): ${ahtBusinessHours}s (P50), ratio ${ratioBusinessHours.toFixed(2)}. `;
|
||||||
summary += variabilityInsight;
|
summary += variabilityInsight;
|
||||||
|
|
||||||
// KPI principal: AHT P50 (industry standard for operational efficiency)
|
// KPI principal: AHT P50 (industry standard for operational efficiency)
|
||||||
@@ -420,7 +420,7 @@ function buildOperationalEfficiencyDimension(
|
|||||||
const dimension: DimensionAnalysis = {
|
const dimension: DimensionAnalysis = {
|
||||||
id: 'operational_efficiency',
|
id: 'operational_efficiency',
|
||||||
name: 'operational_efficiency',
|
name: 'operational_efficiency',
|
||||||
title: 'Eficiencia Operativa',
|
title: 'Operational Efficiency',
|
||||||
score,
|
score,
|
||||||
percentile: undefined,
|
percentile: undefined,
|
||||||
summary,
|
summary,
|
||||||
@@ -431,7 +431,7 @@ function buildOperationalEfficiencyDimension(
|
|||||||
return dimension;
|
return dimension;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ==== Efectividad & Resolución (v3.2 - enfocada en FCR Técnico) ====
|
// ==== Effectiveness & Resolution (v3.2 - focused on Technical FCR) ====
|
||||||
|
|
||||||
function buildEffectivenessResolutionDimension(
|
function buildEffectivenessResolutionDimension(
|
||||||
raw: BackendRawResults
|
raw: BackendRawResults
|
||||||
@@ -439,20 +439,20 @@ function buildEffectivenessResolutionDimension(
|
|||||||
const op = raw?.operational_performance;
|
const op = raw?.operational_performance;
|
||||||
if (!op) return undefined;
|
if (!op) return undefined;
|
||||||
|
|
||||||
// FCR Técnico = 100 - transfer_rate (comparable con benchmarks de industria)
|
// Technical FCR = 100 - transfer_rate (comparable with industry benchmarks)
|
||||||
// Usamos escalation_rate que es la tasa de transferencias
|
// We use escalation_rate which is the transfer rate
|
||||||
const escalationRate = safeNumber(op.escalation_rate, NaN);
|
const escalationRate = safeNumber(op.escalation_rate, NaN);
|
||||||
const abandonmentRate = safeNumber(op.abandonment_rate, 0);
|
const abandonmentRate = safeNumber(op.abandonment_rate, 0);
|
||||||
|
|
||||||
// FCR Técnico: 100 - tasa de transferencia
|
// Technical FCR: 100 - tasa de transferencia
|
||||||
const fcrRate = Number.isFinite(escalationRate) && escalationRate >= 0
|
const fcrRate = Number.isFinite(escalationRate) && escalationRate >= 0
|
||||||
? Math.max(0, Math.min(100, 100 - escalationRate))
|
? Math.max(0, Math.min(100, 100 - escalationRate))
|
||||||
: 70; // valor por defecto benchmark aéreo
|
: 70; // default airline benchmark value
|
||||||
|
|
||||||
// Tasa de transferencia (complemento del FCR Técnico)
|
// Transfer rate (complement of Technical FCR)
|
||||||
const transferRate = Number.isFinite(escalationRate) ? escalationRate : 100 - fcrRate;
|
const transferRate = Number.isFinite(escalationRate) ? escalationRate : 100 - fcrRate;
|
||||||
|
|
||||||
// Score basado en FCR Técnico (benchmark sector aéreo: 85-90%)
|
// Score based on Technical FCR (benchmark airline sector: 85-90%)
|
||||||
// FCR >= 90% = 100pts, 85-90% = 80pts, 80-85% = 60pts, 75-80% = 40pts, <75% = 20pts
|
// FCR >= 90% = 100pts, 85-90% = 80pts, 80-85% = 60pts, 75-80% = 40pts, <75% = 20pts
|
||||||
let score: number;
|
let score: number;
|
||||||
if (fcrRate >= 90) {
|
if (fcrRate >= 90) {
|
||||||
@@ -467,25 +467,25 @@ function buildEffectivenessResolutionDimension(
|
|||||||
score = 20;
|
score = 20;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Penalización adicional por abandono alto (>8%)
|
// Additional penalty for high abandonment (>8%)
|
||||||
if (abandonmentRate > 8) {
|
if (abandonmentRate > 8) {
|
||||||
score = Math.max(0, score - Math.round((abandonmentRate - 8) * 2));
|
score = Math.max(0, score - Math.round((abandonmentRate - 8) * 2));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Summary enfocado en FCR Técnico
|
// Summary focused on Technical FCR
|
||||||
let summary = `FCR Técnico: ${fcrRate.toFixed(1)}% (benchmark: 85-90%). `;
|
let summary = `Technical FCR: ${fcrRate.toFixed(1)}% (benchmark: 85-90%). `;
|
||||||
summary += `Tasa de transferencia: ${transferRate.toFixed(1)}%. `;
|
summary += `Transfer rate: ${transferRate.toFixed(1)}%. `;
|
||||||
|
|
||||||
if (fcrRate >= 90) {
|
if (fcrRate >= 90) {
|
||||||
summary += 'Excelente resolución en primer contacto.';
|
summary += 'Excellent first contact resolution.';
|
||||||
} else if (fcrRate >= 85) {
|
} else if (fcrRate >= 85) {
|
||||||
summary += 'Resolución dentro del benchmark del sector.';
|
summary += 'Resolution within sector benchmark.';
|
||||||
} else {
|
} else {
|
||||||
summary += 'Oportunidad de mejora reduciendo transferencias.';
|
summary += 'Opportunity to improve by reducing transfers.';
|
||||||
}
|
}
|
||||||
|
|
||||||
const kpi: Kpi = {
|
const kpi: Kpi = {
|
||||||
label: 'FCR Técnico',
|
label: 'Technical FCR',
|
||||||
value: `${fcrRate.toFixed(0)}%`,
|
value: `${fcrRate.toFixed(0)}%`,
|
||||||
change: `Transfer: ${transferRate.toFixed(0)}%`,
|
change: `Transfer: ${transferRate.toFixed(0)}%`,
|
||||||
changeType: fcrRate >= 85 ? 'positive' : fcrRate >= 80 ? 'neutral' : 'negative'
|
changeType: fcrRate >= 85 ? 'positive' : fcrRate >= 80 ? 'neutral' : 'negative'
|
||||||
@@ -494,7 +494,7 @@ function buildEffectivenessResolutionDimension(
|
|||||||
const dimension: DimensionAnalysis = {
|
const dimension: DimensionAnalysis = {
|
||||||
id: 'effectiveness_resolution',
|
id: 'effectiveness_resolution',
|
||||||
name: 'effectiveness_resolution',
|
name: 'effectiveness_resolution',
|
||||||
title: 'Efectividad & Resolución',
|
title: 'Effectiveness & Resolution',
|
||||||
score,
|
score,
|
||||||
percentile: undefined,
|
percentile: undefined,
|
||||||
summary,
|
summary,
|
||||||
@@ -505,7 +505,7 @@ function buildEffectivenessResolutionDimension(
|
|||||||
return dimension;
|
return dimension;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ==== Complejidad & Predictibilidad (v3.4 - basada en CV AHT per industry standards) ====
|
// ==== Complexity & Predictability (v3.4 - based on CV AHT per industry standards) ====
|
||||||
|
|
||||||
function buildComplexityPredictabilityDimension(
|
function buildComplexityPredictabilityDimension(
|
||||||
raw: BackendRawResults
|
raw: BackendRawResults
|
||||||
@@ -535,9 +535,9 @@ function buildComplexityPredictabilityDimension(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Score basado en CV AHT (benchmark: <75% = excelente, <100% = aceptable)
|
// Score based on CV AHT (benchmark: <75% = excellent, <100% = acceptable)
|
||||||
// CV <= 75% = 100pts (alta predictibilidad)
|
// CV <= 75% = 100pts (alta predictibilidad)
|
||||||
// CV 75-100% = 80pts (predictibilidad aceptable)
|
// CV 75-100% = 80pts (acceptable predictability)
|
||||||
// CV 100-125% = 60pts (variabilidad moderada)
|
// CV 100-125% = 60pts (variabilidad moderada)
|
||||||
// CV 125-150% = 40pts (alta variabilidad)
|
// CV 125-150% = 40pts (alta variabilidad)
|
||||||
// CV > 150% = 20pts (muy alta variabilidad)
|
// CV > 150% = 20pts (muy alta variabilidad)
|
||||||
@@ -558,16 +558,16 @@ function buildComplexityPredictabilityDimension(
|
|||||||
let summary = `CV AHT: ${cvAhtPercent}% (benchmark: <75%). `;
|
let summary = `CV AHT: ${cvAhtPercent}% (benchmark: <75%). `;
|
||||||
|
|
||||||
if (cvAhtPercent <= 75) {
|
if (cvAhtPercent <= 75) {
|
||||||
summary += 'Alta predictibilidad: tiempos de atención consistentes. Excelente para planificación WFM.';
|
summary += 'High predictability: consistent handling times. Excellent for WFM planning.';
|
||||||
} else if (cvAhtPercent <= 100) {
|
} else if (cvAhtPercent <= 100) {
|
||||||
summary += 'Predictibilidad aceptable: variabilidad moderada en tiempos de atención.';
|
summary += 'Acceptable predictability: moderate variability in handling times.';
|
||||||
} else if (cvAhtPercent <= 125) {
|
} else if (cvAhtPercent <= 125) {
|
||||||
summary += 'Variabilidad notable: dificulta la planificación de recursos. Considerar estandarización.';
|
summary += 'Notable variability: complicates resource planning. Consider standardization.';
|
||||||
} else {
|
} else {
|
||||||
summary += 'Alta variabilidad: tiempos muy dispersos. Priorizar scripts guiados y estandarización.';
|
summary += 'High variability: very scattered times. Prioritize guided scripts and standardization.';
|
||||||
}
|
}
|
||||||
|
|
||||||
// Añadir info de Hold P50 promedio si está disponible (proxy de complejidad)
|
// Add Hold P50 average info if available (complexity proxy)
|
||||||
if (avgHoldP50 > 0) {
|
if (avgHoldP50 > 0) {
|
||||||
summary += ` Hold Time P50: ${Math.round(avgHoldP50)}s.`;
|
summary += ` Hold Time P50: ${Math.round(avgHoldP50)}s.`;
|
||||||
}
|
}
|
||||||
@@ -583,7 +583,7 @@ function buildComplexityPredictabilityDimension(
|
|||||||
const dimension: DimensionAnalysis = {
|
const dimension: DimensionAnalysis = {
|
||||||
id: 'complexity_predictability',
|
id: 'complexity_predictability',
|
||||||
name: 'complexity_predictability',
|
name: 'complexity_predictability',
|
||||||
title: 'Complejidad & Predictibilidad',
|
title: 'Complexity & Predictability',
|
||||||
score,
|
score,
|
||||||
percentile: undefined,
|
percentile: undefined,
|
||||||
summary,
|
summary,
|
||||||
@@ -594,7 +594,7 @@ function buildComplexityPredictabilityDimension(
|
|||||||
return dimension;
|
return dimension;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ==== Satisfacción del Cliente (v3.1) ====
|
// ==== Customer Satisfaction (v3.1) ====
|
||||||
|
|
||||||
function buildSatisfactionDimension(
|
function buildSatisfactionDimension(
|
||||||
raw: BackendRawResults
|
raw: BackendRawResults
|
||||||
@@ -604,19 +604,19 @@ function buildSatisfactionDimension(
|
|||||||
|
|
||||||
const hasCSATData = Number.isFinite(csatGlobalRaw) && csatGlobalRaw > 0;
|
const hasCSATData = Number.isFinite(csatGlobalRaw) && csatGlobalRaw > 0;
|
||||||
|
|
||||||
// Si no hay CSAT, mostrar dimensión con "No disponible"
|
// Si no hay CSAT, mostrar dimensión con "Not available"
|
||||||
const dimension: DimensionAnalysis = {
|
const dimension: DimensionAnalysis = {
|
||||||
id: 'customer_satisfaction',
|
id: 'customer_satisfaction',
|
||||||
name: 'customer_satisfaction',
|
name: 'customer_satisfaction',
|
||||||
title: 'Satisfacción del Cliente',
|
title: 'Customer Satisfaction',
|
||||||
score: hasCSATData ? Math.round((csatGlobalRaw / 5) * 100) : -1, // -1 indica N/A
|
score: hasCSATData ? Math.round((csatGlobalRaw / 5) * 100) : -1, // -1 indicates N/A
|
||||||
percentile: undefined,
|
percentile: undefined,
|
||||||
summary: hasCSATData
|
summary: hasCSATData
|
||||||
? `CSAT global: ${csatGlobalRaw.toFixed(1)}/5. ${csatGlobalRaw >= 4.0 ? 'Nivel de satisfacción óptimo.' : csatGlobalRaw >= 3.5 ? 'Satisfacción aceptable, margen de mejora.' : 'Satisfacción baja, requiere atención urgente.'}`
|
? `Global CSAT: ${csatGlobalRaw.toFixed(1)}/5. ${csatGlobalRaw >= 4.0 ? 'Optimal satisfaction level.' : csatGlobalRaw >= 3.5 ? 'Acceptable satisfaction, room for improvement.' : 'Low satisfaction, requires urgent attention.'}`
|
||||||
: 'CSAT no disponible en el dataset. Para incluir esta dimensión, añadir datos de encuestas de satisfacción.',
|
: 'CSAT not available in dataset. To include this dimension, add satisfaction survey data.',
|
||||||
kpi: {
|
kpi: {
|
||||||
label: 'CSAT',
|
label: 'CSAT',
|
||||||
value: hasCSATData ? `${csatGlobalRaw.toFixed(1)}/5` : 'No disponible',
|
value: hasCSATData ? `${csatGlobalRaw.toFixed(1)}/5` : 'Not available',
|
||||||
changeType: hasCSATData
|
changeType: hasCSATData
|
||||||
? (csatGlobalRaw >= 4.0 ? 'positive' : csatGlobalRaw >= 3.5 ? 'neutral' : 'negative')
|
? (csatGlobalRaw >= 4.0 ? 'positive' : csatGlobalRaw >= 3.5 ? 'neutral' : 'negative')
|
||||||
: 'neutral'
|
: 'neutral'
|
||||||
@@ -627,7 +627,7 @@ function buildSatisfactionDimension(
|
|||||||
return dimension;
|
return dimension;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ==== Economía - Coste por Interacción (v3.1) ====
|
// ==== Economy - Cost per Interaction (v3.1) ====
|
||||||
|
|
||||||
function buildEconomyDimension(
|
function buildEconomyDimension(
|
||||||
raw: BackendRawResults,
|
raw: BackendRawResults,
|
||||||
@@ -637,9 +637,9 @@ function buildEconomyDimension(
|
|||||||
const op = raw?.operational_performance;
|
const op = raw?.operational_performance;
|
||||||
const totalAnnual = safeNumber(econ?.cost_breakdown?.total_annual, 0);
|
const totalAnnual = safeNumber(econ?.cost_breakdown?.total_annual, 0);
|
||||||
|
|
||||||
// Benchmark CPI aerolíneas (consistente con ExecutiveSummaryTab)
|
// Airline CPI benchmark (consistent with ExecutiveSummaryTab)
|
||||||
// p25: 2.20, p50: 3.50, p75: 4.50, p90: 5.50
|
// p25: 2.20, p50: 3.50, p75: 4.50, p90: 5.50
|
||||||
const CPI_BENCHMARK = 3.50; // p50 aerolíneas
|
const CPI_BENCHMARK = 3.50; // airline p50
|
||||||
|
|
||||||
if (totalAnnual <= 0 || totalInteractions <= 0) {
|
if (totalAnnual <= 0 || totalInteractions <= 0) {
|
||||||
return undefined;
|
return undefined;
|
||||||
@@ -652,12 +652,12 @@ function buildEconomyDimension(
|
|||||||
// Calcular CPI usando cost_volume (non-abandoned) como denominador
|
// Calcular CPI usando cost_volume (non-abandoned) como denominador
|
||||||
const cpi = costVolume > 0 ? totalAnnual / costVolume : totalAnnual / totalInteractions;
|
const cpi = costVolume > 0 ? totalAnnual / costVolume : totalAnnual / totalInteractions;
|
||||||
|
|
||||||
// Score basado en percentiles de aerolíneas (CPI invertido: menor = mejor)
|
// Score based on airline percentiles (inverse CPI: lower = better)
|
||||||
// CPI <= 2.20 (p25) = 100pts (excelente, top 25%)
|
// CPI <= 2.20 (p25) = 100pts (excellent, top 25%)
|
||||||
// CPI 2.20-3.50 (p25-p50) = 80pts (bueno, top 50%)
|
// CPI 2.20-3.50 (p25-p50) = 80pts (bueno, top 50%)
|
||||||
// CPI 3.50-4.50 (p50-p75) = 60pts (promedio)
|
// CPI 3.50-4.50 (p50-p75) = 60pts (average)
|
||||||
// CPI 4.50-5.50 (p75-p90) = 40pts (por debajo)
|
// CPI 4.50-5.50 (p75-p90) = 40pts (por debajo)
|
||||||
// CPI > 5.50 (>p90) = 20pts (crítico)
|
// CPI > 5.50 (>p90) = 20pts (critical)
|
||||||
let score: number;
|
let score: number;
|
||||||
if (cpi <= 2.20) {
|
if (cpi <= 2.20) {
|
||||||
score = 100;
|
score = 100;
|
||||||
@@ -674,24 +674,24 @@ function buildEconomyDimension(
|
|||||||
const cpiDiff = cpi - CPI_BENCHMARK;
|
const cpiDiff = cpi - CPI_BENCHMARK;
|
||||||
const cpiStatus = cpiDiff <= 0 ? 'positive' : cpiDiff <= 0.5 ? 'neutral' : 'negative';
|
const cpiStatus = cpiDiff <= 0 ? 'positive' : cpiDiff <= 0.5 ? 'neutral' : 'negative';
|
||||||
|
|
||||||
let summary = `Coste por interacción: €${cpi.toFixed(2)} vs benchmark €${CPI_BENCHMARK.toFixed(2)}. `;
|
let summary = `Cost per interaction: €${cpi.toFixed(2)} vs benchmark €${CPI_BENCHMARK.toFixed(2)}. `;
|
||||||
if (cpi <= CPI_BENCHMARK) {
|
if (cpi <= CPI_BENCHMARK) {
|
||||||
summary += 'Eficiencia de costes óptima, por debajo del benchmark del sector.';
|
summary += 'Optimal cost efficiency, below sector benchmark.';
|
||||||
} else if (cpi <= 4.50) {
|
} else if (cpi <= 4.50) {
|
||||||
summary += 'Coste ligeramente por encima del benchmark, oportunidad de optimización.';
|
summary += 'Cost slightly above benchmark, optimization opportunity.';
|
||||||
} else {
|
} else {
|
||||||
summary += 'Coste elevado respecto al sector. Priorizar iniciativas de eficiencia.';
|
summary += 'High cost relative to sector. Prioritize efficiency initiatives.';
|
||||||
}
|
}
|
||||||
|
|
||||||
const dimension: DimensionAnalysis = {
|
const dimension: DimensionAnalysis = {
|
||||||
id: 'economy_costs',
|
id: 'economy_costs',
|
||||||
name: 'economy_costs',
|
name: 'economy_costs',
|
||||||
title: 'Economía & Costes',
|
title: 'Economy & Costs',
|
||||||
score,
|
score,
|
||||||
percentile: undefined,
|
percentile: undefined,
|
||||||
summary,
|
summary,
|
||||||
kpi: {
|
kpi: {
|
||||||
label: 'Coste por Interacción',
|
label: 'Cost per Interaction',
|
||||||
value: `€${cpi.toFixed(2)}`,
|
value: `€${cpi.toFixed(2)}`,
|
||||||
change: `vs benchmark €${CPI_BENCHMARK.toFixed(2)}`,
|
change: `vs benchmark €${CPI_BENCHMARK.toFixed(2)}`,
|
||||||
changeType: cpiStatus as 'positive' | 'neutral' | 'negative'
|
changeType: cpiStatus as 'positive' | 'neutral' | 'negative'
|
||||||
@@ -779,7 +779,7 @@ function buildAgenticReadinessDimension(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// ==== Economía y costes (economy_costs) ====
|
// ==== Economy and costs (economy_costs) ====
|
||||||
|
|
||||||
function buildEconomicModel(raw: BackendRawResults): EconomicModelData {
|
function buildEconomicModel(raw: BackendRawResults): EconomicModelData {
|
||||||
const econ = raw?.economy_costs;
|
const econ = raw?.economy_costs;
|
||||||
@@ -814,17 +814,17 @@ function buildEconomicModel(raw: BackendRawResults): EconomicModelData {
|
|||||||
const savingsBreakdown = annualSavings
|
const savingsBreakdown = annualSavings
|
||||||
? [
|
? [
|
||||||
{
|
{
|
||||||
category: 'Ineficiencias operativas (AHT, escalaciones)',
|
category: 'Operational inefficiencies (AHT, escalations)',
|
||||||
amount: Math.round(annualSavings * 0.5),
|
amount: Math.round(annualSavings * 0.5),
|
||||||
percentage: 50,
|
percentage: 50,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
category: 'Automatización de volumen repetitivo',
|
category: 'Automation of repetitive volume',
|
||||||
amount: Math.round(annualSavings * 0.3),
|
amount: Math.round(annualSavings * 0.3),
|
||||||
percentage: 30,
|
percentage: 30,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
category: 'Otros beneficios (calidad, CX)',
|
category: 'Other benefits (quality, CX)',
|
||||||
amount: Math.round(annualSavings * 0.2),
|
amount: Math.round(annualSavings * 0.2),
|
||||||
percentage: 20,
|
percentage: 20,
|
||||||
},
|
},
|
||||||
@@ -834,7 +834,7 @@ function buildEconomicModel(raw: BackendRawResults): EconomicModelData {
|
|||||||
const costBreakdown = currentAnnualCost
|
const costBreakdown = currentAnnualCost
|
||||||
? [
|
? [
|
||||||
{
|
{
|
||||||
category: 'Coste laboral',
|
category: 'Labor cost',
|
||||||
amount: laborAnnual,
|
amount: laborAnnual,
|
||||||
percentage: Math.round(
|
percentage: Math.round(
|
||||||
(laborAnnual / currentAnnualCost) * 100
|
(laborAnnual / currentAnnualCost) * 100
|
||||||
@@ -848,7 +848,7 @@ function buildEconomicModel(raw: BackendRawResults): EconomicModelData {
|
|||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
category: 'Tecnología',
|
category: 'Technology',
|
||||||
amount: techAnnual,
|
amount: techAnnual,
|
||||||
percentage: Math.round(
|
percentage: Math.round(
|
||||||
(techAnnual / currentAnnualCost) * 100
|
(techAnnual / currentAnnualCost) * 100
|
||||||
@@ -914,7 +914,7 @@ export function mapBackendResultsToAnalysisData(
|
|||||||
Math.min(100, Math.round(arScore * 10))
|
Math.min(100, Math.round(arScore * 10))
|
||||||
);
|
);
|
||||||
|
|
||||||
// v3.3: 7 dimensiones (Complejidad recuperada con métrica Hold Time >60s)
|
// v3.3: 7 dimensions (Complexity recovered with Hold Time metric >60s)
|
||||||
const { dimension: volumetryDimension, extraKpis } =
|
const { dimension: volumetryDimension, extraKpis } =
|
||||||
buildVolumetryDimension(raw);
|
buildVolumetryDimension(raw);
|
||||||
const operationalEfficiencyDimension = buildOperationalEfficiencyDimension(raw);
|
const operationalEfficiencyDimension = buildOperationalEfficiencyDimension(raw);
|
||||||
@@ -946,7 +946,7 @@ export function mapBackendResultsToAnalysisData(
|
|||||||
|
|
||||||
const csatAvg = computeCsatAverage(cs);
|
const csatAvg = computeCsatAverage(cs);
|
||||||
|
|
||||||
// CSAT global (opcional)
|
// Global CSAT (opcional)
|
||||||
const csatGlobalRaw = safeNumber(cs?.csat_global, NaN);
|
const csatGlobalRaw = safeNumber(cs?.csat_global, NaN);
|
||||||
const csatGlobal =
|
const csatGlobal =
|
||||||
Number.isFinite(csatGlobalRaw) && csatGlobalRaw > 0
|
Number.isFinite(csatGlobalRaw) && csatGlobalRaw > 0
|
||||||
@@ -954,7 +954,7 @@ export function mapBackendResultsToAnalysisData(
|
|||||||
: undefined;
|
: undefined;
|
||||||
|
|
||||||
|
|
||||||
// KPIs de resumen (los 4 primeros son los que se ven en "Métricas de Contacto")
|
// Summary KPIs (the first 4 are shown in "Contact Metrics")
|
||||||
const summaryKpis: Kpi[] = [];
|
const summaryKpis: Kpi[] = [];
|
||||||
|
|
||||||
// 1) Interacciones Totales (volumen backend)
|
// 1) Interacciones Totales (volumen backend)
|
||||||
@@ -975,9 +975,9 @@ export function mapBackendResultsToAnalysisData(
|
|||||||
: 'N/D',
|
: 'N/D',
|
||||||
});
|
});
|
||||||
|
|
||||||
// 3) Tasa FCR
|
// 3) FCR Rate
|
||||||
summaryKpis.push({
|
summaryKpis.push({
|
||||||
label: 'Tasa FCR',
|
label: 'FCR Rate',
|
||||||
value:
|
value:
|
||||||
fcrPct !== undefined
|
fcrPct !== undefined
|
||||||
? `${Math.round(fcrPct)}%`
|
? `${Math.round(fcrPct)}%`
|
||||||
@@ -993,18 +993,18 @@ export function mapBackendResultsToAnalysisData(
|
|||||||
: 'N/D',
|
: 'N/D',
|
||||||
});
|
});
|
||||||
|
|
||||||
// --- KPIs adicionales, usados en otras secciones ---
|
// --- Additional KPIs, used in other sections ---
|
||||||
|
|
||||||
if (numChannels > 0) {
|
if (numChannels > 0) {
|
||||||
summaryKpis.push({
|
summaryKpis.push({
|
||||||
label: 'Canales analizados',
|
label: 'Channels analyzed',
|
||||||
value: String(numChannels),
|
value: String(numChannels),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if (numSkills > 0) {
|
if (numSkills > 0) {
|
||||||
summaryKpis.push({
|
summaryKpis.push({
|
||||||
label: 'Skills analizadas',
|
label: 'Skills analyzed',
|
||||||
value: String(numSkills),
|
value: String(numSkills),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -1027,13 +1027,13 @@ export function mapBackendResultsToAnalysisData(
|
|||||||
|
|
||||||
if (totalAnnual) {
|
if (totalAnnual) {
|
||||||
summaryKpis.push({
|
summaryKpis.push({
|
||||||
label: 'Coste anual actual (backend)',
|
label: 'Current annual cost (backend)',
|
||||||
value: `€${totalAnnual.toFixed(0)}`,
|
value: `€${totalAnnual.toFixed(0)}`,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
if (annualSavings) {
|
if (annualSavings) {
|
||||||
summaryKpis.push({
|
summaryKpis.push({
|
||||||
label: 'Ahorro potencial anual (backend)',
|
label: 'Annual potential savings (backend)',
|
||||||
value: `€${annualSavings.toFixed(0)}`,
|
value: `€${annualSavings.toFixed(0)}`,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -1043,22 +1043,22 @@ export function mapBackendResultsToAnalysisData(
|
|||||||
const economicModel = buildEconomicModel(raw);
|
const economicModel = buildEconomicModel(raw);
|
||||||
const benchmarkData = buildBenchmarkData(raw);
|
const benchmarkData = buildBenchmarkData(raw);
|
||||||
|
|
||||||
// Generar findings y recommendations basados en volumetría
|
// Generate findings and recommendations based on volumetry
|
||||||
const findings: Finding[] = [];
|
const findings: Finding[] = [];
|
||||||
const recommendations: Recommendation[] = [];
|
const recommendations: Recommendation[] = [];
|
||||||
|
|
||||||
// Extraer offHoursPct de la dimensión de volumetría
|
// Extraer offHoursPct de la dimensión de volumetría
|
||||||
const offHoursPct = volumetryDimension?.distribution_data?.off_hours_pct ?? 0;
|
const offHoursPct = volumetryDimension?.distribution_data?.off_hours_pct ?? 0;
|
||||||
const offHoursPctValue = offHoursPct * 100; // Convertir de 0-1 a 0-100
|
const offHoursPctValue = offHoursPct * 100; // Convert from 0-1 a 0-100
|
||||||
|
|
||||||
if (offHoursPctValue > 20) {
|
if (offHoursPctValue > 20) {
|
||||||
const offHoursVolume = Math.round(totalVolume * offHoursPctValue / 100);
|
const offHoursVolume = Math.round(totalVolume * offHoursPctValue / 100);
|
||||||
findings.push({
|
findings.push({
|
||||||
type: offHoursPctValue > 30 ? 'critical' : 'warning',
|
type: offHoursPctValue > 30 ? 'critical' : 'warning',
|
||||||
title: 'Alto Volumen Fuera de Horario',
|
title: 'High Off-Hours Volume',
|
||||||
text: `${offHoursPctValue.toFixed(0)}% de interacciones fuera de horario (8-19h)`,
|
text: `${offHoursPctValue.toFixed(0)}% of off-hours interactions (8-19h)`,
|
||||||
dimensionId: 'volumetry_distribution',
|
dimensionId: 'volumetry_distribution',
|
||||||
description: `${offHoursVolume.toLocaleString()} interacciones (${offHoursPctValue.toFixed(1)}%) ocurren fuera de horario laboral. Oportunidad ideal para implementar agentes virtuales 24/7.`,
|
description: `${offHoursVolume.toLocaleString()} interacciones (${offHoursPctValue.toFixed(1)}%) ocurren outside business hours. Ideal opportunity to implement 24/7 virtual agents.`,
|
||||||
impact: offHoursPctValue > 30 ? 'high' : 'medium'
|
impact: offHoursPctValue > 30 ? 'high' : 'medium'
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -1066,12 +1066,12 @@ export function mapBackendResultsToAnalysisData(
|
|||||||
const estimatedSavings = Math.round(offHoursVolume * estimatedContainment / 100);
|
const estimatedSavings = Math.round(offHoursVolume * estimatedContainment / 100);
|
||||||
recommendations.push({
|
recommendations.push({
|
||||||
priority: 'high',
|
priority: 'high',
|
||||||
title: 'Implementar Agente Virtual 24/7',
|
title: 'Implement 24/7 Virtual Agent',
|
||||||
text: `Desplegar agente virtual para atender ${offHoursPctValue.toFixed(0)}% de interacciones fuera de horario`,
|
text: `Deploy virtual agent to handle ${offHoursPctValue.toFixed(0)}% of off-hours interactions`,
|
||||||
description: `${offHoursVolume.toLocaleString()} interacciones ocurren fuera de horario laboral (19:00-08:00). Un agente virtual puede resolver ~${estimatedContainment}% de estas consultas automáticamente.`,
|
description: `${offHoursVolume.toLocaleString()} interactions occur outside business hours (19:00-08:00). A virtual agent can resolve ~${estimatedContainment}% of these queries automatically.`,
|
||||||
dimensionId: 'volumetry_distribution',
|
dimensionId: 'volumetry_distribution',
|
||||||
impact: `Potencial de contención: ${estimatedSavings.toLocaleString()} interacciones/período`,
|
impact: `Containment potential: ${estimatedSavings.toLocaleString()} interacciones/período`,
|
||||||
timeline: '1-3 meses'
|
timeline: '1-3 months'
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1080,7 +1080,7 @@ export function mapBackendResultsToAnalysisData(
|
|||||||
overallHealthScore,
|
overallHealthScore,
|
||||||
summaryKpis: mergedKpis,
|
summaryKpis: mergedKpis,
|
||||||
dimensions,
|
dimensions,
|
||||||
heatmapData: [], // el heatmap por skill lo seguimos generando en el front
|
heatmapData: [], // skill heatmap still generated on frontend
|
||||||
findings,
|
findings,
|
||||||
recommendations,
|
recommendations,
|
||||||
opportunities: [],
|
opportunities: [],
|
||||||
@@ -1166,9 +1166,9 @@ export function buildHeatmapFromBackend(
|
|||||||
abandonment_rate: number;
|
abandonment_rate: number;
|
||||||
fcr_tecnico: number;
|
fcr_tecnico: number;
|
||||||
fcr_real: number;
|
fcr_real: number;
|
||||||
aht_mean: number; // AHT promedio del backend (solo VALID - consistente con fresh path)
|
aht_mean: number; // Average AHT del backend (only VALID - consistent with fresh path)
|
||||||
aht_total: number; // AHT total (ALL rows incluyendo NOISE/ZOMBIE/ABANDON) - solo informativo
|
aht_total: number; // Total AHT (ALL rows incluyendo NOISE/ZOMBIE/ABANDON) - informational only
|
||||||
hold_time_mean: number; // Hold time promedio (consistente con fresh path - MEAN, no P50)
|
hold_time_mean: number; // Average Hold time (consistent with fresh path - MEAN, not P50)
|
||||||
}>();
|
}>();
|
||||||
|
|
||||||
for (const m of metricsBySkillRaw) {
|
for (const m of metricsBySkillRaw) {
|
||||||
@@ -1178,9 +1178,9 @@ export function buildHeatmapFromBackend(
|
|||||||
abandonment_rate: safeNumber(m.abandonment_rate, NaN),
|
abandonment_rate: safeNumber(m.abandonment_rate, NaN),
|
||||||
fcr_tecnico: safeNumber(m.fcr_tecnico, NaN),
|
fcr_tecnico: safeNumber(m.fcr_tecnico, NaN),
|
||||||
fcr_real: safeNumber(m.fcr_real, NaN),
|
fcr_real: safeNumber(m.fcr_real, NaN),
|
||||||
aht_mean: safeNumber(m.aht_mean, NaN), // AHT promedio (solo VALID)
|
aht_mean: safeNumber(m.aht_mean, NaN), // Average AHT (solo VALID)
|
||||||
aht_total: safeNumber(m.aht_total, NaN), // AHT total (ALL rows)
|
aht_total: safeNumber(m.aht_total, NaN), // Total AHT (ALL rows)
|
||||||
hold_time_mean: safeNumber(m.hold_time_mean, NaN), // Hold time promedio (MEAN)
|
hold_time_mean: safeNumber(m.hold_time_mean, NaN), // Average Hold time (MEAN)
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1314,7 +1314,7 @@ export function buildHeatmapFromBackend(
|
|||||||
// Dimensiones agentic similares a las que tenías en generateHeatmapData,
|
// Dimensiones agentic similares a las que tenías en generateHeatmapData,
|
||||||
// pero usando valores reales en lugar de aleatorios.
|
// pero usando valores reales en lugar de aleatorios.
|
||||||
|
|
||||||
// 1) Predictibilidad (menor CV => mayor puntuación)
|
// 1) Predictability (lower CV => higher score)
|
||||||
const predictability_score = Math.max(
|
const predictability_score = Math.max(
|
||||||
0,
|
0,
|
||||||
Math.min(
|
Math.min(
|
||||||
@@ -1347,14 +1347,14 @@ export function buildHeatmapFromBackend(
|
|||||||
} else {
|
} else {
|
||||||
// NO usar estimación - usar valores globales del backend directamente
|
// NO usar estimación - usar valores globales del backend directamente
|
||||||
// Esto asegura consistencia con el fresh path que usa valores directos del CSV
|
// Esto asegura consistencia con el fresh path que usa valores directos del CSV
|
||||||
skillTransferRate = globalEscalation; // Usar tasa global, sin estimación
|
skillTransferRate = globalEscalation; // Use global rate, no estimation
|
||||||
skillAbandonmentRate = abandonmentRateBackend;
|
skillAbandonmentRate = abandonmentRateBackend;
|
||||||
skillFcrTecnico = 100 - skillTransferRate;
|
skillFcrTecnico = 100 - skillTransferRate;
|
||||||
skillFcrReal = globalFcrPct;
|
skillFcrReal = globalFcrPct;
|
||||||
console.warn(`⚠️ No metrics_by_skill for skill ${skill} - using global rates`);
|
console.warn(`⚠️ No metrics_by_skill for skill ${skill} - using global rates`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Complejidad inversa basada en transfer rate del skill
|
// Inverse complexity based on skill transfer rate
|
||||||
const complexity_inverse_score = Math.max(
|
const complexity_inverse_score = Math.max(
|
||||||
0,
|
0,
|
||||||
Math.min(
|
Math.min(
|
||||||
@@ -1446,10 +1446,10 @@ export function buildHeatmapFromBackend(
|
|||||||
volume,
|
volume,
|
||||||
cost_volume: costVolume,
|
cost_volume: costVolume,
|
||||||
aht_seconds: aht_mean,
|
aht_seconds: aht_mean,
|
||||||
aht_total: aht_total, // AHT con TODAS las filas (solo informativo)
|
aht_total: aht_total, // AHT con TODAS las filas (informational only)
|
||||||
metrics: {
|
metrics: {
|
||||||
fcr: Math.round(skillFcrReal), // FCR Real (sin transfer Y sin recontacto 7d)
|
fcr: Math.round(skillFcrReal), // FCR Real (sin transfer Y sin recontacto 7d)
|
||||||
fcr_tecnico: Math.round(skillFcrTecnico), // FCR Técnico (comparable con benchmarks)
|
fcr_tecnico: Math.round(skillFcrTecnico), // Technical FCR (comparable con benchmarks)
|
||||||
aht: ahtMetric,
|
aht: ahtMetric,
|
||||||
csat: csatMetric0_100,
|
csat: csatMetric0_100,
|
||||||
hold_time: holdMetric,
|
hold_time: holdMetric,
|
||||||
@@ -1457,12 +1457,12 @@ export function buildHeatmapFromBackend(
|
|||||||
abandonment_rate: Math.round(skillAbandonmentRate),
|
abandonment_rate: Math.round(skillAbandonmentRate),
|
||||||
},
|
},
|
||||||
annual_cost,
|
annual_cost,
|
||||||
cpi: skillCpi, // CPI real del backend (si disponible)
|
cpi: skillCpi, // Real CPI from backend (if available)
|
||||||
variability: {
|
variability: {
|
||||||
cv_aht: Math.round(cv_aht * 100), // %
|
cv_aht: Math.round(cv_aht * 100), // %
|
||||||
cv_talk_time: 0,
|
cv_talk_time: 0,
|
||||||
cv_hold_time: 0,
|
cv_hold_time: 0,
|
||||||
transfer_rate: skillTransferRate, // Transfer rate REAL o estimado
|
transfer_rate: skillTransferRate, // REAL or estimated transfer rate
|
||||||
},
|
},
|
||||||
automation_readiness,
|
automation_readiness,
|
||||||
dimensions: {
|
dimensions: {
|
||||||
@@ -1491,19 +1491,19 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData
|
|||||||
|
|
||||||
const benchmarkData: AnalysisData['benchmarkData'] = [];
|
const benchmarkData: AnalysisData['benchmarkData'] = [];
|
||||||
|
|
||||||
// Benchmarks hardcoded para sector aéreo
|
// Hardcoded benchmarks for airline sector
|
||||||
const AIRLINE_BENCHMARKS = {
|
const AIRLINE_BENCHMARKS = {
|
||||||
aht_p50: 380, // segundos
|
aht_p50: 380, // seconds
|
||||||
fcr: 70, // % (rango 68-72%)
|
fcr: 70, // % (rango 68-72%)
|
||||||
abandonment: 5, // % (rango 5-8%)
|
abandonment: 5, // % (rango 5-8%)
|
||||||
ratio_p90_p50: 2.0, // ratio saludable
|
ratio_p90_p50: 2.0, // ratio saludable
|
||||||
cpi: 5.25 // € (rango €4.50-€6.00)
|
cpi: 5.25 // € (rango €4.50-€6.00)
|
||||||
};
|
};
|
||||||
|
|
||||||
// 1. AHT Promedio (benchmark sector aéreo: 380s)
|
// 1. AHT Promedio (benchmark airline sector: 380s)
|
||||||
const ahtP50 = safeNumber(op?.aht_distribution?.p50, 0);
|
const ahtP50 = safeNumber(op?.aht_distribution?.p50, 0);
|
||||||
if (ahtP50 > 0) {
|
if (ahtP50 > 0) {
|
||||||
// Percentil: menor AHT = mejor. Si AHT <= benchmark = P75+
|
// Percentile: lower AHT = better. If AHT <= benchmark = P75+
|
||||||
const ahtPercentile = ahtP50 <= AIRLINE_BENCHMARKS.aht_p50
|
const ahtPercentile = ahtP50 <= AIRLINE_BENCHMARKS.aht_p50
|
||||||
? Math.min(90, 75 + Math.round((AIRLINE_BENCHMARKS.aht_p50 - ahtP50) / 10))
|
? Math.min(90, 75 + Math.round((AIRLINE_BENCHMARKS.aht_p50 - ahtP50) / 10))
|
||||||
: Math.max(10, 75 - Math.round((ahtP50 - AIRLINE_BENCHMARKS.aht_p50) / 5));
|
: Math.max(10, 75 - Math.round((ahtP50 - AIRLINE_BENCHMARKS.aht_p50) / 5));
|
||||||
@@ -1521,15 +1521,15 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Tasa FCR (benchmark sector aéreo: 70%)
|
// 2. FCR Rate (benchmark airline sector: 70%)
|
||||||
const fcrRate = safeNumber(op?.fcr_rate, NaN);
|
const fcrRate = safeNumber(op?.fcr_rate, NaN);
|
||||||
if (Number.isFinite(fcrRate) && fcrRate >= 0) {
|
if (Number.isFinite(fcrRate) && fcrRate >= 0) {
|
||||||
// Percentil: mayor FCR = mejor
|
// Percentile: higher FCR = better
|
||||||
const fcrPercentile = fcrRate >= AIRLINE_BENCHMARKS.fcr
|
const fcrPercentile = fcrRate >= AIRLINE_BENCHMARKS.fcr
|
||||||
? Math.min(90, 50 + Math.round((fcrRate - AIRLINE_BENCHMARKS.fcr) * 2))
|
? Math.min(90, 50 + Math.round((fcrRate - AIRLINE_BENCHMARKS.fcr) * 2))
|
||||||
: Math.max(10, 50 - Math.round((AIRLINE_BENCHMARKS.fcr - fcrRate) * 2));
|
: Math.max(10, 50 - Math.round((AIRLINE_BENCHMARKS.fcr - fcrRate) * 2));
|
||||||
benchmarkData.push({
|
benchmarkData.push({
|
||||||
kpi: 'Tasa FCR',
|
kpi: 'FCR Rate',
|
||||||
userValue: fcrRate / 100,
|
userValue: fcrRate / 100,
|
||||||
userDisplay: `${Math.round(fcrRate)}%`,
|
userDisplay: `${Math.round(fcrRate)}%`,
|
||||||
industryValue: AIRLINE_BENCHMARKS.fcr / 100,
|
industryValue: AIRLINE_BENCHMARKS.fcr / 100,
|
||||||
@@ -1560,15 +1560,15 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. Tasa de Abandono (benchmark sector aéreo: 5%)
|
// 4. Abandonment Rate (benchmark airline sector: 5%)
|
||||||
const abandonRate = safeNumber(op?.abandonment_rate, NaN);
|
const abandonRate = safeNumber(op?.abandonment_rate, NaN);
|
||||||
if (Number.isFinite(abandonRate) && abandonRate >= 0) {
|
if (Number.isFinite(abandonRate) && abandonRate >= 0) {
|
||||||
// Percentil: menor abandono = mejor
|
// Percentile: lower abandonment = better
|
||||||
const abandonPercentile = abandonRate <= AIRLINE_BENCHMARKS.abandonment
|
const abandonPercentile = abandonRate <= AIRLINE_BENCHMARKS.abandonment
|
||||||
? Math.min(90, 75 + Math.round((AIRLINE_BENCHMARKS.abandonment - abandonRate) * 5))
|
? Math.min(90, 75 + Math.round((AIRLINE_BENCHMARKS.abandonment - abandonRate) * 5))
|
||||||
: Math.max(10, 75 - Math.round((abandonRate - AIRLINE_BENCHMARKS.abandonment) * 5));
|
: Math.max(10, 75 - Math.round((abandonRate - AIRLINE_BENCHMARKS.abandonment) * 5));
|
||||||
benchmarkData.push({
|
benchmarkData.push({
|
||||||
kpi: 'Tasa de Abandono',
|
kpi: 'Abandonment Rate',
|
||||||
userValue: abandonRate / 100,
|
userValue: abandonRate / 100,
|
||||||
userDisplay: `${abandonRate.toFixed(1)}%`,
|
userDisplay: `${abandonRate.toFixed(1)}%`,
|
||||||
industryValue: AIRLINE_BENCHMARKS.abandonment / 100,
|
industryValue: AIRLINE_BENCHMARKS.abandonment / 100,
|
||||||
@@ -1581,11 +1581,11 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// 5. Ratio P90/P50 (benchmark sector aéreo: <2.0)
|
// 5. Ratio P90/P50 (benchmark airline sector: <2.0)
|
||||||
const ahtP90 = safeNumber(op?.aht_distribution?.p90, 0);
|
const ahtP90 = safeNumber(op?.aht_distribution?.p90, 0);
|
||||||
const ratio = ahtP50 > 0 && ahtP90 > 0 ? ahtP90 / ahtP50 : 0;
|
const ratio = ahtP50 > 0 && ahtP90 > 0 ? ahtP90 / ahtP50 : 0;
|
||||||
if (ratio > 0) {
|
if (ratio > 0) {
|
||||||
// Percentil: menor ratio = mejor
|
// Percentile: lower ratio = better
|
||||||
const ratioPercentile = ratio <= AIRLINE_BENCHMARKS.ratio_p90_p50
|
const ratioPercentile = ratio <= AIRLINE_BENCHMARKS.ratio_p90_p50
|
||||||
? Math.min(90, 75 + Math.round((AIRLINE_BENCHMARKS.ratio_p90_p50 - ratio) * 30))
|
? Math.min(90, 75 + Math.round((AIRLINE_BENCHMARKS.ratio_p90_p50 - ratio) * 30))
|
||||||
: Math.max(10, 75 - Math.round((ratio - AIRLINE_BENCHMARKS.ratio_p90_p50) * 30));
|
: Math.max(10, 75 - Math.round((ratio - AIRLINE_BENCHMARKS.ratio_p90_p50) * 30));
|
||||||
@@ -1603,13 +1603,13 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// 6. Tasa de Transferencia/Escalación
|
// 6. Transfer/Escalation Rate
|
||||||
const escalationRate = safeNumber(op?.escalation_rate, NaN);
|
const escalationRate = safeNumber(op?.escalation_rate, NaN);
|
||||||
if (Number.isFinite(escalationRate) && escalationRate >= 0) {
|
if (Number.isFinite(escalationRate) && escalationRate >= 0) {
|
||||||
// Menor escalación = mejor percentil
|
// Menor escalación = better percentil
|
||||||
const escalationPercentile = Math.max(10, Math.min(90, Math.round(100 - escalationRate * 5)));
|
const escalationPercentile = Math.max(10, Math.min(90, Math.round(100 - escalationRate * 5)));
|
||||||
benchmarkData.push({
|
benchmarkData.push({
|
||||||
kpi: 'Tasa de Transferencia',
|
kpi: 'Transfer Rate',
|
||||||
userValue: escalationRate / 100,
|
userValue: escalationRate / 100,
|
||||||
userDisplay: `${escalationRate.toFixed(1)}%`,
|
userDisplay: `${escalationRate.toFixed(1)}%`,
|
||||||
industryValue: 0.15,
|
industryValue: 0.15,
|
||||||
@@ -1622,7 +1622,7 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// 7. CPI - Coste por Interacción (benchmark sector aéreo: €4.50-€6.00)
|
// 7. CPI - Cost per Interaction (benchmark airline sector: €4.50-€6.00)
|
||||||
const econ = raw?.economy_costs;
|
const econ = raw?.economy_costs;
|
||||||
const totalAnnualCost = safeNumber(econ?.cost_breakdown?.total_annual, 0);
|
const totalAnnualCost = safeNumber(econ?.cost_breakdown?.total_annual, 0);
|
||||||
const volumetry = raw?.volumetry;
|
const volumetry = raw?.volumetry;
|
||||||
@@ -1634,7 +1634,7 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData
|
|||||||
|
|
||||||
if (totalAnnualCost > 0 && totalInteractions > 0) {
|
if (totalAnnualCost > 0 && totalInteractions > 0) {
|
||||||
const cpi = totalAnnualCost / totalInteractions;
|
const cpi = totalAnnualCost / totalInteractions;
|
||||||
// Menor CPI = mejor. Si CPI <= 4.50 = excelente (P90+), si CPI >= 6.00 = malo (P25-)
|
// Lower CPI = better. If CPI <= 4.50 = excellent (P90+), if CPI >= 6.00 = poor (P25-)
|
||||||
let cpiPercentile: number;
|
let cpiPercentile: number;
|
||||||
if (cpi <= 4.50) {
|
if (cpi <= 4.50) {
|
||||||
cpiPercentile = Math.min(95, 90 + Math.round((4.50 - cpi) * 10));
|
cpiPercentile = Math.min(95, 90 + Math.round((4.50 - cpi) * 10));
|
||||||
@@ -1647,7 +1647,7 @@ function buildBenchmarkData(raw: BackendRawResults): AnalysisData['benchmarkData
|
|||||||
}
|
}
|
||||||
|
|
||||||
benchmarkData.push({
|
benchmarkData.push({
|
||||||
kpi: 'Coste por Interacción (CPI)',
|
kpi: 'Cost per Interaction (CPI)',
|
||||||
userValue: cpi,
|
userValue: cpi,
|
||||||
userDisplay: `€${cpi.toFixed(2)}`,
|
userDisplay: `€${cpi.toFixed(2)}`,
|
||||||
industryValue: AIRLINE_BENCHMARKS.cpi,
|
industryValue: AIRLINE_BENCHMARKS.cpi,
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
// utils/dataTransformation.ts
|
// utils/dataTransformation.ts
|
||||||
// Pipeline de transformación de datos raw a métricas procesadas
|
// Raw data to processed metrics transformation pipeline
|
||||||
|
|
||||||
import type { RawInteraction } from '../types';
|
import type { RawInteraction } from '../types';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Paso 1: Limpieza de Ruido
|
* Step 1: Noise Cleanup
|
||||||
* Elimina interacciones con duration < 10 segundos (falsos contactos o errores de sistema)
|
* Removes interactions with duration < 10 seconds (false contacts or system errors)
|
||||||
*/
|
*/
|
||||||
export function cleanNoiseFromData(interactions: RawInteraction[]): RawInteraction[] {
|
export function cleanNoiseFromData(interactions: RawInteraction[]): RawInteraction[] {
|
||||||
const MIN_DURATION_SECONDS = 10;
|
const MIN_DURATION_SECONDS = 10;
|
||||||
@@ -22,30 +22,30 @@ export function cleanNoiseFromData(interactions: RawInteraction[]): RawInteracti
|
|||||||
const removedCount = interactions.length - cleaned.length;
|
const removedCount = interactions.length - cleaned.length;
|
||||||
const removedPercentage = ((removedCount / interactions.length) * 100).toFixed(1);
|
const removedPercentage = ((removedCount / interactions.length) * 100).toFixed(1);
|
||||||
|
|
||||||
console.log(`🧹 Limpieza de Ruido: ${removedCount} interacciones eliminadas (${removedPercentage}% del total)`);
|
console.log(`🧹 Noise Cleanup: ${removedCount} interactions removed (${removedPercentage}% of total)`);
|
||||||
console.log(`✅ Interacciones limpias: ${cleaned.length}`);
|
console.log(`✅ Clean interactions: ${cleaned.length}`);
|
||||||
|
|
||||||
return cleaned;
|
return cleaned;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Métricas base calculadas por skill
|
* Base metrics calculated by skill
|
||||||
*/
|
*/
|
||||||
export interface SkillBaseMetrics {
|
export interface SkillBaseMetrics {
|
||||||
skill: string;
|
skill: string;
|
||||||
volume: number; // Número de interacciones
|
volume: number; // Number of interactions
|
||||||
aht_mean: number; // AHT promedio (segundos)
|
aht_mean: number; // Average AHT (seconds)
|
||||||
aht_std: number; // Desviación estándar del AHT
|
aht_std: number; // AHT standard deviation
|
||||||
transfer_rate: number; // Tasa de transferencia (0-100)
|
transfer_rate: number; // Transfer rate (0-100)
|
||||||
total_cost: number; // Coste total (€)
|
total_cost: number; // Total cost (€)
|
||||||
|
|
||||||
// Datos auxiliares para cálculos posteriores
|
// Auxiliary data for subsequent calculations
|
||||||
aht_values: number[]; // Array de todos los AHT para percentiles
|
aht_values: number[]; // Array of all AHT values for percentiles
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Paso 2: Calcular Métricas Base por Skill
|
* Step 2: Calculate Base Metrics by Skill
|
||||||
* Agrupa por skill y calcula volumen, AHT promedio, desviación estándar, tasa de transferencia y coste
|
* Groups by skill and calculates volume, average AHT, standard deviation, transfer rate and cost
|
||||||
*/
|
*/
|
||||||
export function calculateSkillBaseMetrics(
|
export function calculateSkillBaseMetrics(
|
||||||
interactions: RawInteraction[],
|
interactions: RawInteraction[],
|
||||||
@@ -53,7 +53,7 @@ export function calculateSkillBaseMetrics(
|
|||||||
): SkillBaseMetrics[] {
|
): SkillBaseMetrics[] {
|
||||||
const COST_PER_SECOND = costPerHour / 3600;
|
const COST_PER_SECOND = costPerHour / 3600;
|
||||||
|
|
||||||
// Agrupar por skill
|
// Group by skill
|
||||||
const skillGroups = new Map<string, RawInteraction[]>();
|
const skillGroups = new Map<string, RawInteraction[]>();
|
||||||
|
|
||||||
interactions.forEach(interaction => {
|
interactions.forEach(interaction => {
|
||||||
@@ -64,31 +64,31 @@ export function calculateSkillBaseMetrics(
|
|||||||
skillGroups.get(skill)!.push(interaction);
|
skillGroups.get(skill)!.push(interaction);
|
||||||
});
|
});
|
||||||
|
|
||||||
// Calcular métricas por skill
|
// Calculate metrics per skill
|
||||||
const metrics: SkillBaseMetrics[] = [];
|
const metrics: SkillBaseMetrics[] = [];
|
||||||
|
|
||||||
skillGroups.forEach((skillInteractions, skill) => {
|
skillGroups.forEach((skillInteractions, skill) => {
|
||||||
const volume = skillInteractions.length;
|
const volume = skillInteractions.length;
|
||||||
|
|
||||||
// Calcular AHT para cada interacción
|
// Calculate AHT for each interaction
|
||||||
const ahtValues = skillInteractions.map(i =>
|
const ahtValues = skillInteractions.map(i =>
|
||||||
i.duration_talk + i.hold_time + i.wrap_up_time
|
i.duration_talk + i.hold_time + i.wrap_up_time
|
||||||
);
|
);
|
||||||
|
|
||||||
// AHT promedio
|
// Average AHT
|
||||||
const ahtMean = ahtValues.reduce((sum, val) => sum + val, 0) / volume;
|
const ahtMean = ahtValues.reduce((sum, val) => sum + val, 0) / volume;
|
||||||
|
|
||||||
// Desviación estándar del AHT
|
// AHT standard deviation
|
||||||
const variance = ahtValues.reduce((sum, val) =>
|
const variance = ahtValues.reduce((sum, val) =>
|
||||||
sum + Math.pow(val - ahtMean, 2), 0
|
sum + Math.pow(val - ahtMean, 2), 0
|
||||||
) / volume;
|
) / volume;
|
||||||
const ahtStd = Math.sqrt(variance);
|
const ahtStd = Math.sqrt(variance);
|
||||||
|
|
||||||
// Tasa de transferencia
|
// Transfer rate
|
||||||
const transferCount = skillInteractions.filter(i => i.transfer_flag).length;
|
const transferCount = skillInteractions.filter(i => i.transfer_flag).length;
|
||||||
const transferRate = (transferCount / volume) * 100;
|
const transferRate = (transferCount / volume) * 100;
|
||||||
|
|
||||||
// Coste total
|
// Total cost
|
||||||
const totalCost = ahtValues.reduce((sum, aht) =>
|
const totalCost = ahtValues.reduce((sum, aht) =>
|
||||||
sum + (aht * COST_PER_SECOND), 0
|
sum + (aht * COST_PER_SECOND), 0
|
||||||
);
|
);
|
||||||
@@ -104,82 +104,82 @@ export function calculateSkillBaseMetrics(
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
// Ordenar por volumen descendente
|
// Sort by descending volume
|
||||||
metrics.sort((a, b) => b.volume - a.volume);
|
metrics.sort((a, b) => b.volume - a.volume);
|
||||||
|
|
||||||
console.log(`📊 Métricas Base calculadas para ${metrics.length} skills`);
|
console.log(`📊 Base Metrics calculated for ${metrics.length} skills`);
|
||||||
|
|
||||||
return metrics;
|
return metrics;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Dimensiones transformadas para Agentic Readiness Score
|
* Transformed dimensions for Agentic Readiness Score
|
||||||
*/
|
*/
|
||||||
export interface SkillDimensions {
|
export interface SkillDimensions {
|
||||||
skill: string;
|
skill: string;
|
||||||
volume: number;
|
volume: number;
|
||||||
|
|
||||||
// Dimensión 1: Predictibilidad (0-10)
|
// Dimension 1: Predictability (0-10)
|
||||||
predictability_score: number;
|
predictability_score: number;
|
||||||
predictability_cv: number; // Coeficiente de Variación (para referencia)
|
predictability_cv: number; // Coefficient of Variation (for reference)
|
||||||
|
|
||||||
// Dimensión 2: Complejidad Inversa (0-10)
|
// Dimension 2: Inverse Complexity (0-10)
|
||||||
complexity_inverse_score: number;
|
complexity_inverse_score: number;
|
||||||
complexity_transfer_rate: number; // Tasa de transferencia (para referencia)
|
complexity_transfer_rate: number; // Transfer rate (for reference)
|
||||||
|
|
||||||
// Dimensión 3: Repetitividad/Impacto (0-10)
|
// Dimension 3: Repetitiveness/Impact (0-10)
|
||||||
repetitivity_score: number;
|
repetitivity_score: number;
|
||||||
|
|
||||||
// Datos auxiliares
|
// Auxiliary data
|
||||||
aht_mean: number;
|
aht_mean: number;
|
||||||
total_cost: number;
|
total_cost: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Paso 3: Transformar Métricas Base a Dimensiones
|
* Step 3: Transform Base Metrics to Dimensions
|
||||||
* Aplica las fórmulas de normalización para obtener scores 0-10
|
* Applies normalization formulas to obtain 0-10 scores
|
||||||
*/
|
*/
|
||||||
export function transformToDimensions(
|
export function transformToDimensions(
|
||||||
baseMetrics: SkillBaseMetrics[]
|
baseMetrics: SkillBaseMetrics[]
|
||||||
): SkillDimensions[] {
|
): SkillDimensions[] {
|
||||||
return baseMetrics.map(metric => {
|
return baseMetrics.map(metric => {
|
||||||
// Dimensión 1: Predictibilidad (Proxy: Variabilidad del AHT)
|
// Dimension 1: Predictability (Proxy: AHT Variability)
|
||||||
// CV = desviación estándar / media
|
// CV = standard deviation / mean
|
||||||
const cv = metric.aht_std / metric.aht_mean;
|
const cv = metric.aht_std / metric.aht_mean;
|
||||||
|
|
||||||
// Normalización: CV <= 0.3 → 10, CV >= 1.5 → 0
|
// Normalization: CV <= 0.3 → 10, CV >= 1.5 → 0
|
||||||
// Fórmula: MAX(0, MIN(10, 10 - ((CV - 0.3) / 1.2 * 10)))
|
// Formula: MAX(0, MIN(10, 10 - ((CV - 0.3) / 1.2 * 10)))
|
||||||
const predictabilityScore = Math.max(0, Math.min(10,
|
const predictabilityScore = Math.max(0, Math.min(10,
|
||||||
10 - ((cv - 0.3) / 1.2 * 10)
|
10 - ((cv - 0.3) / 1.2 * 10)
|
||||||
));
|
));
|
||||||
|
|
||||||
// Dimensión 2: Complejidad Inversa (Proxy: Tasa de Transferencia)
|
// Dimension 2: Inverse Complexity (Proxy: Transfer Rate)
|
||||||
// T = tasa de transferencia (%)
|
// T = transfer rate (%)
|
||||||
const transferRate = metric.transfer_rate;
|
const transferRate = metric.transfer_rate;
|
||||||
|
|
||||||
// Normalización: T <= 5% → 10, T >= 30% → 0
|
// Normalization: T <= 5% → 10, T >= 30% → 0
|
||||||
// Fórmula: MAX(0, MIN(10, 10 - ((T - 0.05) / 0.25 * 10)))
|
// Formula: MAX(0, MIN(10, 10 - ((T - 0.05) / 0.25 * 10)))
|
||||||
const complexityInverseScore = Math.max(0, Math.min(10,
|
const complexityInverseScore = Math.max(0, Math.min(10,
|
||||||
10 - ((transferRate / 100 - 0.05) / 0.25 * 10)
|
10 - ((transferRate / 100 - 0.05) / 0.25 * 10)
|
||||||
));
|
));
|
||||||
|
|
||||||
// Dimensión 3: Repetitividad/Impacto (Proxy: Volumen)
|
// Dimension 3: Repetitiveness/Impact (Proxy: Volume)
|
||||||
// Normalización fija: > 5,000 llamadas/mes = 10, < 100 = 0
|
// Fixed normalization: > 5,000 calls/month = 10, < 100 = 0
|
||||||
let repetitivityScore: number;
|
let repetitivityScore: number;
|
||||||
if (metric.volume >= 5000) {
|
if (metric.volume >= 5000) {
|
||||||
repetitivityScore = 10;
|
repetitivityScore = 10;
|
||||||
} else if (metric.volume <= 100) {
|
} else if (metric.volume <= 100) {
|
||||||
repetitivityScore = 0;
|
repetitivityScore = 0;
|
||||||
} else {
|
} else {
|
||||||
// Interpolación lineal entre 100 y 5000
|
// Linear interpolation between 100 and 5000
|
||||||
repetitivityScore = ((metric.volume - 100) / (5000 - 100)) * 10;
|
repetitivityScore = ((metric.volume - 100) / (5000 - 100)) * 10;
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
skill: metric.skill,
|
skill: metric.skill,
|
||||||
volume: metric.volume,
|
volume: metric.volume,
|
||||||
predictability_score: Math.round(predictabilityScore * 10) / 10, // 1 decimal
|
predictability_score: Math.round(predictabilityScore * 10) / 10, // 1 decimal place
|
||||||
predictability_cv: Math.round(cv * 100) / 100, // 2 decimales
|
predictability_cv: Math.round(cv * 100) / 100, // 2 decimal places
|
||||||
complexity_inverse_score: Math.round(complexityInverseScore * 10) / 10,
|
complexity_inverse_score: Math.round(complexityInverseScore * 10) / 10,
|
||||||
complexity_transfer_rate: Math.round(transferRate * 10) / 10,
|
complexity_transfer_rate: Math.round(transferRate * 10) / 10,
|
||||||
repetitivity_score: Math.round(repetitivityScore * 10) / 10,
|
repetitivity_score: Math.round(repetitivityScore * 10) / 10,
|
||||||
@@ -190,7 +190,7 @@ export function transformToDimensions(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Resultado final con Agentic Readiness Score
|
* Final result with Agentic Readiness Score
|
||||||
*/
|
*/
|
||||||
export interface SkillAgenticReadiness extends SkillDimensions {
|
export interface SkillAgenticReadiness extends SkillDimensions {
|
||||||
agentic_readiness_score: number; // 0-10
|
agentic_readiness_score: number; // 0-10
|
||||||
@@ -199,28 +199,28 @@ export interface SkillAgenticReadiness extends SkillDimensions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Paso 4: Calcular Agentic Readiness Score
|
* Step 4: Calculate Agentic Readiness Score
|
||||||
* Promedio ponderado de las 3 dimensiones
|
* Weighted average of the 3 dimensions
|
||||||
*/
|
*/
|
||||||
export function calculateAgenticReadinessScore(
|
export function calculateAgenticReadinessScore(
|
||||||
dimensions: SkillDimensions[],
|
dimensions: SkillDimensions[],
|
||||||
weights?: { predictability: number; complexity: number; repetitivity: number }
|
weights?: { predictability: number; complexity: number; repetitivity: number }
|
||||||
): SkillAgenticReadiness[] {
|
): SkillAgenticReadiness[] {
|
||||||
// Pesos por defecto (ajustables)
|
// Default weights (adjustable)
|
||||||
const w = weights || {
|
const w = weights || {
|
||||||
predictability: 0.40, // 40% - Más importante
|
predictability: 0.40, // 40% - Most important
|
||||||
complexity: 0.35, // 35%
|
complexity: 0.35, // 35%
|
||||||
repetitivity: 0.25 // 25%
|
repetitivity: 0.25 // 25%
|
||||||
};
|
};
|
||||||
|
|
||||||
return dimensions.map(dim => {
|
return dimensions.map(dim => {
|
||||||
// Promedio ponderado
|
// Weighted average
|
||||||
const score =
|
const score =
|
||||||
dim.predictability_score * w.predictability +
|
dim.predictability_score * w.predictability +
|
||||||
dim.complexity_inverse_score * w.complexity +
|
dim.complexity_inverse_score * w.complexity +
|
||||||
dim.repetitivity_score * w.repetitivity;
|
dim.repetitivity_score * w.repetitivity;
|
||||||
|
|
||||||
// Categorizar
|
// Categorize
|
||||||
let category: 'automate_now' | 'assist_copilot' | 'optimize_first';
|
let category: 'automate_now' | 'assist_copilot' | 'optimize_first';
|
||||||
let label: string;
|
let label: string;
|
||||||
|
|
||||||
@@ -245,29 +245,29 @@ export function calculateAgenticReadinessScore(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pipeline completo: Raw Data → Agentic Readiness Score
|
* Complete pipeline: Raw Data → Agentic Readiness Score
|
||||||
*/
|
*/
|
||||||
export function transformRawDataToAgenticReadiness(
|
export function transformRawDataToAgenticReadiness(
|
||||||
rawInteractions: RawInteraction[],
|
rawInteractions: RawInteraction[],
|
||||||
costPerHour: number,
|
costPerHour: number,
|
||||||
weights?: { predictability: number; complexity: number; repetitivity: number }
|
weights?: { predictability: number; complexity: number; repetitivity: number }
|
||||||
): SkillAgenticReadiness[] {
|
): SkillAgenticReadiness[] {
|
||||||
console.log(`🚀 Iniciando pipeline de transformación con ${rawInteractions.length} interacciones...`);
|
console.log(`🚀 Starting transformation pipeline with ${rawInteractions.length} interactions...`);
|
||||||
|
|
||||||
// Paso 1: Limpieza de ruido
|
// Step 1: Noise cleanup
|
||||||
const cleanedData = cleanNoiseFromData(rawInteractions);
|
const cleanedData = cleanNoiseFromData(rawInteractions);
|
||||||
|
|
||||||
// Paso 2: Calcular métricas base
|
// Step 2: Calculate base metrics
|
||||||
const baseMetrics = calculateSkillBaseMetrics(cleanedData, costPerHour);
|
const baseMetrics = calculateSkillBaseMetrics(cleanedData, costPerHour);
|
||||||
|
|
||||||
// Paso 3: Transformar a dimensiones
|
// Step 3: Transform to dimensions
|
||||||
const dimensions = transformToDimensions(baseMetrics);
|
const dimensions = transformToDimensions(baseMetrics);
|
||||||
|
|
||||||
// Paso 4: Calcular Agentic Readiness Score
|
// Step 4: Calculate Agentic Readiness Score
|
||||||
const agenticReadiness = calculateAgenticReadinessScore(dimensions, weights);
|
const agenticReadiness = calculateAgenticReadinessScore(dimensions, weights);
|
||||||
|
|
||||||
console.log(`✅ Pipeline completado: ${agenticReadiness.length} skills procesados`);
|
console.log(`✅ Pipeline completed: ${agenticReadiness.length} skills processed`);
|
||||||
console.log(`📈 Distribución:`);
|
console.log(`📈 Distribution:`);
|
||||||
const automateCount = agenticReadiness.filter(s => s.readiness_category === 'automate_now').length;
|
const automateCount = agenticReadiness.filter(s => s.readiness_category === 'automate_now').length;
|
||||||
const assistCount = agenticReadiness.filter(s => s.readiness_category === 'assist_copilot').length;
|
const assistCount = agenticReadiness.filter(s => s.readiness_category === 'assist_copilot').length;
|
||||||
const optimizeCount = agenticReadiness.filter(s => s.readiness_category === 'optimize_first').length;
|
const optimizeCount = agenticReadiness.filter(s => s.readiness_category === 'optimize_first').length;
|
||||||
@@ -279,7 +279,7 @@ export function transformRawDataToAgenticReadiness(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Utilidad: Generar resumen de estadísticas
|
* Utility: Generate statistics summary
|
||||||
*/
|
*/
|
||||||
export function generateTransformationSummary(
|
export function generateTransformationSummary(
|
||||||
originalCount: number,
|
originalCount: number,
|
||||||
@@ -300,11 +300,11 @@ export function generateTransformationSummary(
|
|||||||
const optimizePercent = skillsCount > 0 ? ((optimizeCount/skillsCount)*100).toFixed(0) : '0';
|
const optimizePercent = skillsCount > 0 ? ((optimizeCount/skillsCount)*100).toFixed(0) : '0';
|
||||||
|
|
||||||
return `
|
return `
|
||||||
📊 Resumen de Transformación:
|
📊 Transformation Summary:
|
||||||
• Interacciones originales: ${originalCount.toLocaleString()}
|
• Original interactions: ${originalCount.toLocaleString()}
|
||||||
• Ruido eliminado: ${removedCount.toLocaleString()} (${removedPercentage}%)
|
• Noise removed: ${removedCount.toLocaleString()} (${removedPercentage}%)
|
||||||
• Interacciones limpias: ${cleanedCount.toLocaleString()}
|
• Clean interactions: ${cleanedCount.toLocaleString()}
|
||||||
• Skills únicos: ${skillsCount}
|
• Unique skills: ${skillsCount}
|
||||||
|
|
||||||
🎯 Agentic Readiness:
|
🎯 Agentic Readiness:
|
||||||
• 🟢 Automate Now: ${automateCount} skills (${automatePercent}%)
|
• 🟢 Automate Now: ${automateCount} skills (${automatePercent}%)
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
|||||||
// utils/segmentClassifier.ts
|
// utils/segmentClassifier.ts
|
||||||
// Utilidad para clasificar colas/skills en segmentos de cliente
|
// Utility to classify queues/skills into customer segments
|
||||||
|
|
||||||
import type { CustomerSegment, RawInteraction, StaticConfig } from '../types';
|
import type { CustomerSegment, RawInteraction, StaticConfig } from '../types';
|
||||||
|
|
||||||
@@ -10,8 +10,8 @@ export interface SegmentMapping {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parsea string de colas separadas por comas
|
* Parses queue string separated by commas
|
||||||
* Ejemplo: "VIP, Premium, Enterprise" → ["VIP", "Premium", "Enterprise"]
|
* Example: "VIP, Premium, Enterprise" → ["VIP", "Premium", "Enterprise"]
|
||||||
*/
|
*/
|
||||||
export function parseQueueList(input: string): string[] {
|
export function parseQueueList(input: string): string[] {
|
||||||
if (!input || input.trim().length === 0) {
|
if (!input || input.trim().length === 0) {
|
||||||
@@ -25,13 +25,13 @@ export function parseQueueList(input: string): string[] {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Clasifica una cola según el mapeo proporcionado
|
* Classifies a queue according to the provided mapping
|
||||||
* Usa matching parcial y case-insensitive
|
* Uses partial and case-insensitive matching
|
||||||
*
|
*
|
||||||
* Ejemplo:
|
* Example:
|
||||||
* - queue: "VIP_Support" + mapping.high: ["VIP"] → "high"
|
* - queue: "VIP_Support" + mapping.high: ["VIP"] → "high"
|
||||||
* - queue: "Soporte_General_N1" + mapping.medium: ["Soporte_General"] → "medium"
|
* - queue: "General_Support_L1" + mapping.medium: ["General_Support"] → "medium"
|
||||||
* - queue: "Retencion" (no match) → "medium" (default)
|
* - queue: "Retention" (no match) → "medium" (default)
|
||||||
*/
|
*/
|
||||||
export function classifyQueue(
|
export function classifyQueue(
|
||||||
queue: string,
|
queue: string,
|
||||||
@@ -39,7 +39,7 @@ export function classifyQueue(
|
|||||||
): CustomerSegment {
|
): CustomerSegment {
|
||||||
const normalizedQueue = queue.toLowerCase().trim();
|
const normalizedQueue = queue.toLowerCase().trim();
|
||||||
|
|
||||||
// Buscar en high value
|
// Search in high value
|
||||||
for (const highQueue of mapping.high_value_queues) {
|
for (const highQueue of mapping.high_value_queues) {
|
||||||
const normalizedHigh = highQueue.toLowerCase().trim();
|
const normalizedHigh = highQueue.toLowerCase().trim();
|
||||||
if (normalizedQueue.includes(normalizedHigh) || normalizedHigh.includes(normalizedQueue)) {
|
if (normalizedQueue.includes(normalizedHigh) || normalizedHigh.includes(normalizedQueue)) {
|
||||||
@@ -47,7 +47,7 @@ export function classifyQueue(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Buscar en low value
|
// Search in low value
|
||||||
for (const lowQueue of mapping.low_value_queues) {
|
for (const lowQueue of mapping.low_value_queues) {
|
||||||
const normalizedLow = lowQueue.toLowerCase().trim();
|
const normalizedLow = lowQueue.toLowerCase().trim();
|
||||||
if (normalizedQueue.includes(normalizedLow) || normalizedLow.includes(normalizedQueue)) {
|
if (normalizedQueue.includes(normalizedLow) || normalizedLow.includes(normalizedQueue)) {
|
||||||
@@ -55,7 +55,7 @@ export function classifyQueue(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Buscar en medium value (explícito)
|
// Search in medium value (explicit)
|
||||||
for (const mediumQueue of mapping.medium_value_queues) {
|
for (const mediumQueue of mapping.medium_value_queues) {
|
||||||
const normalizedMedium = mediumQueue.toLowerCase().trim();
|
const normalizedMedium = mediumQueue.toLowerCase().trim();
|
||||||
if (normalizedQueue.includes(normalizedMedium) || normalizedMedium.includes(normalizedQueue)) {
|
if (normalizedQueue.includes(normalizedMedium) || normalizedMedium.includes(normalizedQueue)) {
|
||||||
@@ -63,13 +63,13 @@ export function classifyQueue(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default: medium (para colas no mapeadas)
|
// Default: medium (for unmapped queues)
|
||||||
return 'medium';
|
return 'medium';
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Clasifica todas las colas únicas de un conjunto de interacciones
|
* Classifies all unique queues from a set of interactions
|
||||||
* Retorna un mapa de cola → segmento
|
* Returns a map of queue → segment
|
||||||
*/
|
*/
|
||||||
export function classifyAllQueues(
|
export function classifyAllQueues(
|
||||||
interactions: RawInteraction[],
|
interactions: RawInteraction[],
|
||||||
@@ -77,10 +77,10 @@ export function classifyAllQueues(
|
|||||||
): Map<string, CustomerSegment> {
|
): Map<string, CustomerSegment> {
|
||||||
const queueSegments = new Map<string, CustomerSegment>();
|
const queueSegments = new Map<string, CustomerSegment>();
|
||||||
|
|
||||||
// Obtener colas únicas
|
// Get unique queues
|
||||||
const uniqueQueues = [...new Set(interactions.map(i => i.queue_skill))];
|
const uniqueQueues = [...new Set(interactions.map(i => i.queue_skill))];
|
||||||
|
|
||||||
// Clasificar cada cola
|
// Classify each queue
|
||||||
uniqueQueues.forEach(queue => {
|
uniqueQueues.forEach(queue => {
|
||||||
queueSegments.set(queue, classifyQueue(queue, mapping));
|
queueSegments.set(queue, classifyQueue(queue, mapping));
|
||||||
});
|
});
|
||||||
@@ -89,8 +89,8 @@ export function classifyAllQueues(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Genera estadísticas de segmentación
|
* Generates segmentation statistics
|
||||||
* Retorna conteo, porcentaje y lista de colas por segmento
|
* Returns count, percentage and list of queues by segment
|
||||||
*/
|
*/
|
||||||
export function getSegmentationStats(
|
export function getSegmentationStats(
|
||||||
interactions: RawInteraction[],
|
interactions: RawInteraction[],
|
||||||
@@ -108,13 +108,13 @@ export function getSegmentationStats(
|
|||||||
total: interactions.length
|
total: interactions.length
|
||||||
};
|
};
|
||||||
|
|
||||||
// Contar interacciones por segmento
|
// Count interactions by segment
|
||||||
interactions.forEach(interaction => {
|
interactions.forEach(interaction => {
|
||||||
const segment = queueSegments.get(interaction.queue_skill) || 'medium';
|
const segment = queueSegments.get(interaction.queue_skill) || 'medium';
|
||||||
stats[segment].count++;
|
stats[segment].count++;
|
||||||
});
|
});
|
||||||
|
|
||||||
// Calcular porcentajes
|
// Calculate percentages
|
||||||
const total = interactions.length;
|
const total = interactions.length;
|
||||||
if (total > 0) {
|
if (total > 0) {
|
||||||
stats.high.percentage = Math.round((stats.high.count / total) * 100);
|
stats.high.percentage = Math.round((stats.high.count / total) * 100);
|
||||||
@@ -122,7 +122,7 @@ export function getSegmentationStats(
|
|||||||
stats.low.percentage = Math.round((stats.low.count / total) * 100);
|
stats.low.percentage = Math.round((stats.low.count / total) * 100);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Obtener colas por segmento (únicas)
|
// Get queues by segment (unique)
|
||||||
queueSegments.forEach((segment, queue) => {
|
queueSegments.forEach((segment, queue) => {
|
||||||
if (!stats[segment].queues.includes(queue)) {
|
if (!stats[segment].queues.includes(queue)) {
|
||||||
stats[segment].queues.push(queue);
|
stats[segment].queues.push(queue);
|
||||||
@@ -133,7 +133,7 @@ export function getSegmentationStats(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Valida que el mapeo tenga al menos una cola en algún segmento
|
* Validates that the mapping has at least one queue in some segment
|
||||||
*/
|
*/
|
||||||
export function isValidMapping(mapping: SegmentMapping): boolean {
|
export function isValidMapping(mapping: SegmentMapping): boolean {
|
||||||
return (
|
return (
|
||||||
@@ -144,8 +144,8 @@ export function isValidMapping(mapping: SegmentMapping): boolean {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Crea un mapeo desde StaticConfig
|
* Creates a mapping from StaticConfig
|
||||||
* Si no hay segment_mapping, retorna mapeo vacío
|
* If there is no segment_mapping, returns empty mapping
|
||||||
*/
|
*/
|
||||||
export function getMappingFromConfig(config: StaticConfig): SegmentMapping | null {
|
export function getMappingFromConfig(config: StaticConfig): SegmentMapping | null {
|
||||||
if (!config.segment_mapping) {
|
if (!config.segment_mapping) {
|
||||||
@@ -160,8 +160,8 @@ export function getMappingFromConfig(config: StaticConfig): SegmentMapping | nul
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Obtiene el segmento para una cola específica desde el config
|
* Gets the segment for a specific queue from the config
|
||||||
* Si no hay mapeo, retorna 'medium' por defecto
|
* If there is no mapping, returns 'medium' by default
|
||||||
*/
|
*/
|
||||||
export function getSegmentForQueue(
|
export function getSegmentForQueue(
|
||||||
queue: string,
|
queue: string,
|
||||||
@@ -177,7 +177,7 @@ export function getSegmentForQueue(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Formatea estadísticas para mostrar en UI
|
* Formats statistics for display in UI
|
||||||
*/
|
*/
|
||||||
export function formatSegmentationSummary(
|
export function formatSegmentationSummary(
|
||||||
stats: ReturnType<typeof getSegmentationStats>
|
stats: ReturnType<typeof getSegmentationStats>
|
||||||
@@ -185,15 +185,15 @@ export function formatSegmentationSummary(
|
|||||||
const parts: string[] = [];
|
const parts: string[] = [];
|
||||||
|
|
||||||
if (stats.high.count > 0) {
|
if (stats.high.count > 0) {
|
||||||
parts.push(`${stats.high.percentage}% High Value (${stats.high.count} interacciones)`);
|
parts.push(`${stats.high.percentage}% High Value (${stats.high.count} interactions)`);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stats.medium.count > 0) {
|
if (stats.medium.count > 0) {
|
||||||
parts.push(`${stats.medium.percentage}% Medium Value (${stats.medium.count} interacciones)`);
|
parts.push(`${stats.medium.percentage}% Medium Value (${stats.medium.count} interactions)`);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stats.low.count > 0) {
|
if (stats.low.count > 0) {
|
||||||
parts.push(`${stats.low.percentage}% Low Value (${stats.low.count} interacciones)`);
|
parts.push(`${stats.low.percentage}% Low Value (${stats.low.count} interactions)`);
|
||||||
}
|
}
|
||||||
|
|
||||||
return parts.join(' | ');
|
return parts.join(' | ');
|
||||||
|
|||||||
Reference in New Issue
Block a user