Translate Phase 3 low-priority backend files (complete Spanish-to-English translation)

Phase 3 of Spanish-to-English translation for low-priority backend files:

Backend core modules (4 files):
- Volumetria.py: Translated ~15 occurrences (docstrings, comments, plot labels, day abbreviations)
- agent.py: Translated ~15 occurrences (system prompts, docstrings, error messages)
- pipeline.py: Translated ~10 occurrences (log messages, docstrings, comments)
- analysis_service.py: Translated ~10 occurrences (docstrings, error messages, comments)

All function names, class names, and variable names preserved for API compatibility.
Frontend and backend compilation tested and verified successful.

This completes the comprehensive Spanish-to-English translation project:
- Phase 1 (High Priority): 3 files - backendMapper.ts, analysisGenerator.ts, realDataAnalysis.ts
- Phase 2 (Medium Priority): 5 files - dataTransformation.ts, segmentClassifier.ts, + 3 dimension files
- Phase 3 (Low Priority): 4 files - Volumetria.py, agent.py, pipeline.py, analysis_service.py

Total files translated: 12 files (5 frontend TypeScript + 7 backend Python)
All critical path translations complete.

Related to TRANSLATION_STATUS.md Phase 3 completion.

https://claude.ai/code/session_01GNbnkFoESkRcnPr3bLCYDg
This commit is contained in:
Claude
2026-02-07 11:15:47 +00:00
parent 8c7f5fa827
commit 9caa382010
4 changed files with 217 additions and 217 deletions

View File

@@ -20,15 +20,15 @@ REQUIRED_COLUMNS_VOLUMETRIA: List[str] = [
@dataclass
class VolumetriaMetrics:
"""
Métricas de volumetría basadas en el nuevo esquema de datos.
Volumetry metrics based on the new data schema.
Columnas mínimas requeridas:
Minimum required columns:
- interaction_id
- datetime_start
- queue_skill
- channel
Otras columnas pueden existir pero no son necesarias para estas métricas.
Other columns may exist but are not required for these metrics.
"""
df: pd.DataFrame
@@ -38,41 +38,41 @@ class VolumetriaMetrics:
self._prepare_data()
# ------------------------------------------------------------------ #
# Helpers internos
# Internal helpers
# ------------------------------------------------------------------ #
def _validate_columns(self) -> None:
missing = [c for c in REQUIRED_COLUMNS_VOLUMETRIA if c not in self.df.columns]
if missing:
raise ValueError(
f"Faltan columnas obligatorias para VolumetriaMetrics: {missing}"
f"Missing required columns for VolumetriaMetrics: {missing}"
)
def _prepare_data(self) -> None:
df = self.df.copy()
# Asegurar tipo datetime
# Ensure datetime type
df["datetime_start"] = pd.to_datetime(df["datetime_start"], errors="coerce")
# Normalizar strings
# Normalize strings
df["queue_skill"] = df["queue_skill"].astype(str).str.strip()
df["channel"] = df["channel"].astype(str).str.strip()
# Guardamos el df preparado
# Store the prepared dataframe
self.df = df
# ------------------------------------------------------------------ #
# Propiedades útiles
# Useful properties
# ------------------------------------------------------------------ #
@property
def is_empty(self) -> bool:
return self.df.empty
# ------------------------------------------------------------------ #
# Métricas numéricas / tabulares
# Numeric / tabular metrics
# ------------------------------------------------------------------ #
def volume_by_channel(self) -> pd.Series:
"""
Nº de interacciones por canal.
Number of interactions by channel.
"""
return self.df.groupby("channel")["interaction_id"].nunique().sort_values(
ascending=False
@@ -80,7 +80,7 @@ class VolumetriaMetrics:
def volume_by_skill(self) -> pd.Series:
"""
Nº de interacciones por skill / cola.
Number of interactions by skill / queue.
"""
return self.df.groupby("queue_skill")["interaction_id"].nunique().sort_values(
ascending=False
@@ -88,7 +88,7 @@ class VolumetriaMetrics:
def channel_distribution_pct(self) -> pd.Series:
"""
Distribución porcentual del volumen por canal.
Percentage distribution of volume by channel.
"""
counts = self.volume_by_channel()
total = counts.sum()
@@ -98,7 +98,7 @@ class VolumetriaMetrics:
def skill_distribution_pct(self) -> pd.Series:
"""
Distribución porcentual del volumen por skill.
Percentage distribution of volume by skill.
"""
counts = self.volume_by_skill()
total = counts.sum()
@@ -108,12 +108,12 @@ class VolumetriaMetrics:
def heatmap_24x7(self) -> pd.DataFrame:
"""
Matriz [día_semana x hora] con nº de interacciones.
dayofweek: 0=Lunes ... 6=Domingo
Matrix [day_of_week x hour] with number of interactions.
dayofweek: 0=Monday ... 6=Sunday
"""
df = self.df.dropna(subset=["datetime_start"]).copy()
if df.empty:
# Devolvemos un df vacío pero con índice/columnas esperadas
# Return an empty dataframe with expected index/columns
idx = range(7)
cols = range(24)
return pd.DataFrame(0, index=idx, columns=cols)
@@ -137,8 +137,8 @@ class VolumetriaMetrics:
def monthly_seasonality_cv(self) -> float:
"""
Coeficiente de variación del volumen mensual.
CV = std / mean (en %).
Coefficient of variation of monthly volume.
CV = std / mean (in %).
"""
df = self.df.dropna(subset=["datetime_start"]).copy()
if df.empty:
@@ -161,9 +161,9 @@ class VolumetriaMetrics:
def peak_offpeak_ratio(self) -> float:
"""
Ratio de volumen entre horas pico y valle.
Volume ratio between peak and off-peak hours.
Definimos pico como horas 10:0019:59, resto valle.
We define peak as hours 10:0019:59, rest as off-peak.
"""
df = self.df.dropna(subset=["datetime_start"]).copy()
if df.empty:
@@ -184,7 +184,7 @@ class VolumetriaMetrics:
def concentration_top20_skills_pct(self) -> float:
"""
% del volumen concentrado en el top 20% de skills (por nº de interacciones).
% of volume concentrated in the top 20% of skills (by number of interactions).
"""
counts = (
self.df.groupby("queue_skill")["interaction_id"].nunique().sort_values(
@@ -210,8 +210,8 @@ class VolumetriaMetrics:
# ------------------------------------------------------------------ #
def plot_heatmap_24x7(self) -> Axes:
"""
Heatmap de volumen por día de la semana (0-6) y hora (0-23).
Devuelve Axes para que el pipeline pueda guardar la figura.
Heatmap of volume by day of week (0-6) and hour (0-23).
Returns Axes so the pipeline can save the figure.
"""
data = self.heatmap_24x7()
@@ -222,45 +222,45 @@ class VolumetriaMetrics:
ax.set_xticklabels([str(h) for h in range(24)])
ax.set_yticks(range(7))
ax.set_yticklabels(["L", "M", "X", "J", "V", "S", "D"])
ax.set_yticklabels(["M", "T", "W", "T", "F", "S", "S"])
ax.set_xlabel("Hora del día")
ax.set_ylabel("Día de la semana")
ax.set_title("Volumen por día de la semana y hora")
ax.set_xlabel("Hour of day")
ax.set_ylabel("Day of week")
ax.set_title("Volume by day of week and hour")
plt.colorbar(im, ax=ax, label=" interacciones")
plt.colorbar(im, ax=ax, label="# interactions")
return ax
def plot_channel_distribution(self) -> Axes:
"""
Distribución de volumen por canal.
Volume distribution by channel.
"""
series = self.volume_by_channel()
fig, ax = plt.subplots(figsize=(6, 4))
series.plot(kind="bar", ax=ax)
ax.set_xlabel("Canal")
ax.set_ylabel(" interacciones")
ax.set_title("Volumen por canal")
ax.set_xlabel("Channel")
ax.set_ylabel("# interactions")
ax.set_title("Volume by channel")
ax.grid(axis="y", alpha=0.3)
return ax
def plot_skill_pareto(self) -> Axes:
"""
Pareto simple de volumen por skill (solo barras de volumen).
Simple Pareto chart of volume by skill (volume bars only).
"""
series = self.volume_by_skill()
fig, ax = plt.subplots(figsize=(10, 4))
series.plot(kind="bar", ax=ax)
ax.set_xlabel("Skill / Cola")
ax.set_ylabel(" interacciones")
ax.set_title("Pareto de volumen por skill")
ax.set_xlabel("Skill / Queue")
ax.set_ylabel("# interactions")
ax.set_title("Pareto chart of volume by skill")
ax.grid(axis="y", alpha=0.3)
plt.xticks(rotation=45, ha="right")