Files
BeyondCXAnalytics-Demo/frontend/utils/analysisGenerator.ts
Claude 94178eaaae Translate Phase 1 high-priority frontend utils (backendMapper, analysisGenerator, realDataAnalysis)
Phase 1 of Spanish-to-English translation for critical path files:
- backendMapper.ts: Translated ~50 occurrences (comments, labels, dimension titles)
- analysisGenerator.ts: Translated ~49 occurrences (findings, recommendations, dimension content)
- realDataAnalysis.ts: Translated ~92 occurrences (clasificarTier functions, inline comments)

All function names and API variable names preserved for compatibility.
Frontend compilation tested and verified successful.

Related to TRANSLATION_STATUS.md Phase 1 objectives.

https://claude.ai/code/session_01GNbnkFoESkRcnPr3bLCYDg
2026-02-07 10:35:40 +00:00

1416 lines
57 KiB
TypeScript

// analysisGenerator.ts - v2.0 with 6 dimensions
import type { AnalysisData, Kpi, DimensionAnalysis, HeatmapDataPoint, Opportunity, RoadmapInitiative, EconomicModelData, BenchmarkDataPoint, Finding, Recommendation, TierKey, CustomerSegment, RawInteraction, DrilldownDataPoint, AgenticTier } from '../types';
import { generateAnalysisFromRealData, calculateDrilldownMetrics, generateOpportunitiesFromDrilldown, generateRoadmapFromDrilldown, calculateSkillMetrics, generateHeatmapFromMetrics, clasificarTierSimple } from './realDataAnalysis';
import { RoadmapPhase } from '../types';
import { BarChartHorizontal, Zap, Target, Brain, Bot } from 'lucide-react';
import { calculateAgenticReadinessScore, type AgenticReadinessInput } from './agenticReadinessV2';
import { callAnalysisApiRaw } from './apiClient';
import {
mapBackendResultsToAnalysisData,
buildHeatmapFromBackend,
} from './backendMapper';
import { saveFileToServerCache, saveDrilldownToServerCache, getCachedDrilldown, downloadCachedFile } from './serverCache';
const randomInt = (min: number, max: number) => Math.floor(Math.random() * (max - min + 1)) + min;
const randomFloat = (min: number, max: number, decimals: number) => parseFloat((Math.random() * (max - min) + min).toFixed(decimals));
const randomFromList = <T,>(arr: T[]): T => arr[Math.floor(Math.random() * arr.length)];
// Normal distribution (Box-Muller transform)
const normalRandom = (mean: number, std: number): number => {
const u1 = Math.random();
const u2 = Math.random();
const z0 = Math.sqrt(-2 * Math.log(u1)) * Math.cos(2 * Math.PI * u2);
return mean + std * z0;
};
const getScoreColor = (score: number): 'green' | 'yellow' | 'red' => {
if (score >= 80) return 'green';
if (score >= 60) return 'yellow';
return 'red';
};
// v3.0: 5 DIMENSIONES VIABLES
const DIMENSIONS_CONTENT = {
volumetry_distribution: {
icon: BarChartHorizontal,
titles: ["Volumetry & Distribution", "Demand Analysis"],
summaries: {
good: ["Interaction volume aligns with forecasts, enabling precise staff planning.", "Hourly distribution is uniform with predictable peaks. Balanced Pareto concentration."],
medium: ["There are unforeseen demand peaks that cause service level drops.", "High concentration in few queues (>80% in 20% of queues), bottleneck risk."],
bad: ["Chronic mismatch between forecast and actual volume, resulting in cost overruns or poor service.", "Very irregular hourly distribution with multiple unpredictable peaks."]
},
kpis: [
{ label: "Monthly Volume", value: `${randomInt(5000, 25000).toLocaleString('es-ES')}` },
{ label: "% After Hours", value: `${randomInt(15, 45)}%` },
],
},
operational_efficiency: {
icon: Zap,
titles: ["Operational Efficiency", "Time Optimization"],
summaries: {
good: ["P90/P50 ratio is low (<1.5), indicating consistent times and standardized processes.", "Wait, hold and ACW times well controlled, maximizing productivity."],
medium: ["P90/P50 ratio is moderate (1.5-2.0), outlier cases exist that affect efficiency.", "Hold time is slightly high, suggesting improvements in information access."],
bad: ["High P90/P50 ratio (>2.0), indicating high variability in handling times.", "Extended ACW and hold times indicate inefficient manual processes."]
},
kpis: [
{ label: "AHT P50", value: `${randomInt(280, 450)}s` },
{ label: "Ratio P90/P50", value: `${randomFloat(1.2, 2.5, 2)}` },
],
},
effectiveness_resolution: {
icon: Target,
titles: ["Effectiveness & Resolution", "Service Quality"],
summaries: {
good: ["FCR proxy >85%, minimal contact repetition within 7 days.", "Low transfer rate (<10%) and problematic calls (<5%)."],
medium: ["FCR proxy 70-85%, opportunity to reduce re-contacts.", "Moderate transfer rate (10-20%), concentrated in certain queues."],
bad: ["FCR proxy <70%, high volume of re-contacts within 7 days.", "High rate of problematic calls (>15%) and excessive transfers."]
},
kpis: [
{ label: "FCR Proxy 7d", value: `${randomInt(65, 92)}%` },
{ label: "Transfer Rate", value: `${randomInt(5, 25)}%` },
],
},
complexity_predictability: {
icon: Brain,
titles: ["Complexity & Predictability", "Variability Analysis"],
summaries: {
good: ["Low AHT variability (P90/P50 ratio <1.5), highly predictable process.", "Controlled classification diversity, low % of calls with multiple holds."],
medium: ["Moderate AHT variability, some outlier cases affect predictability.", "% calls with multiple holds high (15-30%), indicating complexity."],
bad: ["High AHT variability (ratio >2.0), unpredictable process difficult to automate.", "High classification diversity and % transfers, indicating high complexity."]
},
kpis: [
{ label: "Ratio P90/P50", value: `${randomFloat(1.2, 2.5, 2)}` },
{ label: "% Transferencias", value: `${randomInt(5, 30)}%` },
],
},
agentic_readiness: {
icon: Bot,
titles: ["Agentic Readiness", "Automation Potential"],
summaries: {
good: ["Score 8-10: Excellent candidate for full automation with AI agents.", "High volume, low variability, few transfers. Repetitive and predictable process."],
medium: ["Score 5-7: Candidate for AI assistance (copilot) or partial automation.", "Moderate volume with some complexities requiring human supervision."],
bad: ["Score 0-4: Requires prior optimization before automating.", "High complexity, low repeatability or excessive variability."]
},
kpis: [
{ label: "Overall Score", value: `${randomFloat(3.0, 9.5, 1)}/10` },
{ label: "Category", value: randomFromList(['Automate', 'Assist', 'Optimize']) },
],
},
};
// Generic findings - specific ones are generated in realDataAnalysis.ts from calculated data
const KEY_FINDINGS: Finding[] = [
{
text: "The P90/P50 ratio of AHT is high (>2.0), indicating high variability in handling times.",
dimensionId: 'operational_efficiency',
type: 'warning',
title: 'High Variability in Times',
description: 'Poorly standardized processes generate unpredictable times and affect planning.',
impact: 'high'
},
{
text: "High transfer rate indicates opportunity for improvement in routing or training.",
dimensionId: 'effectiveness_resolution',
type: 'warning',
title: 'High Transfers',
description: 'Frequent transfers affect customer experience and operational efficiency.',
impact: 'high'
},
{
text: "Volume concentration in specific time slots generates demand peaks.",
dimensionId: 'volumetry_distribution',
type: 'info',
title: 'Demand Concentration',
description: 'Review capacity in high-volume time slots to optimize service level.',
impact: 'medium'
},
{
text: "Significant percentage of interactions outside standard business hours (8-19h).",
dimensionId: 'volumetry_distribution',
type: 'info',
title: 'After-Hours Demand',
description: 'Evaluate extended coverage or self-service channels for after-hours demand.',
impact: 'medium'
},
{
text: "Automation opportunities identified in high-volume repetitive queries.",
dimensionId: 'agentic_readiness',
type: 'info',
title: 'Automation Opportunity',
description: 'Skills with high repeatability and low complexity are ideal candidates for AI agents.',
impact: 'high'
},
];
const RECOMMENDATIONS: Recommendation[] = [
{
text: "Standardize processes in queues with high P90/P50 ratio to reduce variability.",
dimensionId: 'operational_efficiency',
priority: 'high',
title: 'Process Standardization',
description: 'Implement scripts and step-by-step guides to reduce variability in handling times.',
impact: 'P90/P50 ratio reduction: 20-30%, Improved predictability',
timeline: '3-4 weeks'
},
{
text: "Develop an order status bot for WhatsApp to deflect 30% of queries.",
dimensionId: 'agentic_readiness',
priority: 'high',
title: 'Automated Order Tracking Bot',
description: 'Implement ChatBot on WhatsApp for queries with high Agentic Score (>8).',
impact: 'Volume reduction: 20-30%, Annual savings: €40-60K',
timeline: '1-2 meses'
},
{
text: "Review workforce planning (WFM) for Mondays, adding flexible resources.",
dimensionId: 'volumetry_distribution',
priority: 'high',
title: 'Workforce Adjustment (WFM)',
description: 'Reposition agents and add part-time resources for Mondays 8-11h.',
impact: 'SL improvement: +15-20%, Additional cost: €5-8K/month',
timeline: '1 mes'
},
{
text: "Create a more robust Knowledge Base to reduce hold time and improve FCR.",
dimensionId: 'effectiveness_resolution',
priority: 'high',
title: 'Information Access Improvement',
description: 'Develop a centralized KB to reduce searches and improve first contact resolution.',
impact: 'Hold time reduction: 15-25%, FCR improvement: 5-10%',
timeline: '6-8 weeks'
},
{
text: "Implement 24/7 coverage with virtual agents for 28% of after-hours interactions.",
dimensionId: 'volumetry_distribution',
priority: 'medium',
title: '24/7 Coverage with AI',
description: 'Deploy virtual agents to handle nighttime and weekend interactions.',
impact: 'Demand capture: 20-25%, Incremental cost: €15-20K/month',
timeline: '2-3 meses'
},
{
text: "Simplify classifications and reduce complexity in problematic queues.",
dimensionId: 'complexity_predictability',
priority: 'medium',
title: 'Complexity Reduction',
description: 'Consolidate classifications and simplify flows to improve predictability.',
impact: 'Complexity reduction: 20-30%, Improved Agentic Score',
timeline: '4-6 weeks'
},
];
// === RECOMMENDATIONS BASED ON REAL DATA ===
const MAX_RECOMMENDATIONS = 4;
const generateRecommendationsFromData = (
analysis: AnalysisData
): Recommendation[] => {
const dimensions = analysis.dimensions || [];
const dimScoreMap = new Map<string, number>();
dimensions.forEach((d) => {
if (d.id && typeof d.score === 'number') {
dimScoreMap.set(d.id, d.score);
}
});
const overallScore =
typeof analysis.overallHealthScore === 'number'
? analysis.overallHealthScore
: 70;
const econ = analysis.economicModel;
const annualSavings = econ?.annualSavings ?? 0;
const currentCost = econ?.currentAnnualCost ?? 0;
// Relevance by recommendation
const scoredTemplates = RECOMMENDATIONS.map((tpl, index) => {
const dimId = tpl.dimensionId || 'overall';
const dimScore = dimScoreMap.get(dimId) ?? overallScore;
let relevance = 0;
// 1) Weak dimensions => more relevance
if (dimScore < 60) relevance += 3;
else if (dimScore < 75) relevance += 2;
else if (dimScore < 85) relevance += 1;
// 2) Priority declared in the template
if (tpl.priority === 'high') relevance += 2;
else if (tpl.priority === 'medium') relevance += 1;
// 3) Reinforcement based on economic potential
if (
annualSavings > 0 &&
currentCost > 0 &&
annualSavings / currentCost > 0.15 &&
dimId === 'economy'
) {
relevance += 2;
}
// 4) Slight penalty if dimension is already very good (>85)
if (dimScore > 85) relevance -= 1;
return {
tpl,
relevance,
index, // por si queremos desempatar
};
});
// Filter out those that contribute nothing (relevance <= 0)
let filtered = scoredTemplates.filter((s) => s.relevance > 0);
// Si ninguna pasa el filtro (por ejemplo, todo muy bien),
// we keep at least 2-3 of the highest priority ones
if (filtered.length === 0) {
filtered = scoredTemplates
.slice()
.sort((a, b) => {
const prioWeight = (p?: 'high' | 'medium' | 'low') => {
if (p === 'high') return 3;
if (p === 'medium') return 2;
return 1;
};
return (
prioWeight(b.tpl.priority) - prioWeight(a.tpl.priority)
);
})
.slice(0, MAX_RECOMMENDATIONS);
} else {
// Ordenamos por relevancia (desc), y en empate, por orden original
filtered.sort((a, b) => {
if (b.relevance !== a.relevance) {
return b.relevance - a.relevance;
}
return a.index - b.index;
});
}
const selected = filtered.slice(0, MAX_RECOMMENDATIONS).map((s) => s.tpl);
// Mapear a tipo Recommendation completo
return selected.map((rec, i): Recommendation => ({
priority:
rec.priority || (i === 0 ? ('high' as const) : ('medium' as const)),
title: rec.title || 'Recommendation',
description: rec.description || rec.text,
impact:
rec.impact ||
'Estimated 10-20% improvement in key KPIs.',
timeline: rec.timeline || '4-8 weeks',
// required fields:
text:
rec.text ||
rec.description ||
'Priority recommendation based on data analysis.',
dimensionId: rec.dimensionId || 'overall',
}));
};
// === FINDINGS BASED ON REAL DATA ===
const MAX_FINDINGS = 5;
const generateFindingsFromData = (
analysis: AnalysisData
): Finding[] => {
const dimensions = analysis.dimensions || [];
const dimScoreMap = new Map<string, number>();
dimensions.forEach((d) => {
if (d.id && typeof d.score === 'number') {
dimScoreMap.set(d.id, d.score);
}
});
const overallScore =
typeof analysis.overallHealthScore === 'number'
? analysis.overallHealthScore
: 70;
// We look at volumetry to reinforce some findings
const volumetryDim = dimensions.find(
(d) => d.id === 'volumetry_distribution'
);
const offHoursPct =
volumetryDim?.distribution_data?.off_hours_pct ?? 0;
// Relevancia por finding
const scoredTemplates = KEY_FINDINGS.map((tpl, index) => {
const dimId = tpl.dimensionId || 'overall';
const dimScore = dimScoreMap.get(dimId) ?? overallScore;
let relevance = 0;
// 1) Weak dimensions => more relevance
if (dimScore < 60) relevance += 3;
else if (dimScore < 75) relevance += 2;
else if (dimScore < 85) relevance += 1;
// 2) Type of finding (critical > warning > info)
if (tpl.type === 'critical') relevance += 3;
else if (tpl.type === 'warning') relevance += 2;
else relevance += 1;
// 3) Impacto (high > medium > low)
if (tpl.impact === 'high') relevance += 2;
else if (tpl.impact === 'medium') relevance += 1;
// 4) Reinforcement in volumetry if there is high after-hours demand
if (
offHoursPct > 0.25 &&
tpl.dimensionId === 'volumetry_distribution'
) {
relevance += 2;
if (
tpl.title?.toLowerCase().includes('after hours') ||
tpl.text
?.toLowerCase()
.includes('outside business hours')
) {
relevance += 1;
}
}
return {
tpl,
relevance,
index,
};
});
// Filtramos los que no aportan nada (relevance <= 0)
let filtered = scoredTemplates.filter((s) => s.relevance > 0);
// Si nada pasa el filtro, cogemos al menos algunos por prioridad/tipo
if (filtered.length === 0) {
filtered = scoredTemplates
.slice()
.sort((a, b) => {
const typeWeight = (t?: Finding['type']) => {
if (t === 'critical') return 3;
if (t === 'warning') return 2;
return 1;
};
const impactWeight = (imp?: string) => {
if (imp === 'high') return 3;
if (imp === 'medium') return 2;
return 1;
};
const scoreA =
typeWeight(a.tpl.type) + impactWeight(a.tpl.impact);
const scoreB =
typeWeight(b.tpl.type) + impactWeight(b.tpl.impact);
return scoreB - scoreA;
})
.slice(0, MAX_FINDINGS);
} else {
// Ordenamos por relevancia (desc), y en empate, por orden original
filtered.sort((a, b) => {
if (b.relevance !== a.relevance) {
return b.relevance - a.relevance;
}
return a.index - b.index;
});
}
const selected = filtered.slice(0, MAX_FINDINGS).map((s) => s.tpl);
// Mapear a tipo Finding completo
return selected.map((finding, i): Finding => ({
type:
finding.type ||
(i === 0
? ('warning' as const)
: ('info' as const)),
title: finding.title || 'Hallazgo',
description: finding.description || finding.text,
// required fields:
text:
finding.text ||
finding.description ||
'Relevant finding based on data.',
dimensionId: finding.dimensionId || 'overall',
impact: finding.impact,
}));
};
const generateFindingsFromTemplates = (): Finding[] => {
return [
...new Set(
Array.from({ length: 3 }, () => randomFromList(KEY_FINDINGS))
),
].map((finding, i): Finding => ({
type: finding.type || (i === 0 ? 'warning' : 'info'),
title: finding.title || 'Hallazgo',
description: finding.description || finding.text,
// required fields:
text: finding.text || finding.description || 'Hallazgo relevante',
dimensionId: finding.dimensionId || 'overall',
impact: finding.impact,
}));
};
const generateRecommendationsFromTemplates = (): Recommendation[] => {
return [
...new Set(
Array.from({ length: 3 }, () => randomFromList(RECOMMENDATIONS))
),
].map((rec, i): Recommendation => ({
priority: rec.priority || (i === 0 ? 'high' : 'medium'),
title: rec.title || 'Recommendation',
description: rec.description || rec.text,
impact: rec.impact || 'Estimated improvement of 20-30%',
timeline: rec.timeline || '1-2 weeks',
// required fields:
text: rec.text || rec.description || 'Priority recommendation',
dimensionId: rec.dimensionId || 'overall',
}));
};
// v2.0: Generate realistic hourly distribution
const generateHourlyDistribution = (): number[] => {
// Distribution with peaks at 9-11h and 14-17h
const distribution = Array(24).fill(0).map((_, hour) => {
if (hour >= 9 && hour <= 11) return randomInt(800, 1200); // Morning peak
if (hour >= 14 && hour <= 17) return randomInt(700, 1000); // Afternoon peak
if (hour >= 8 && hour <= 18) return randomInt(300, 600); // Business hours
return randomInt(50, 200); // After hours
});
return distribution;
};
// v2.0: Calculate % after hours
const calculateOffHoursPct = (hourly_distribution: number[]): number => {
const total = hourly_distribution.reduce((a, b) => a + b, 0);
if (total === 0) return 0; // Avoid division by zero
const off_hours = hourly_distribution.slice(0, 8).reduce((a, b) => a + b, 0) +
hourly_distribution.slice(19, 24).reduce((a, b) => a + b, 0);
return off_hours / total;
};
// v2.0: Identificar horas pico
const identifyPeakHours = (hourly_distribution: number[]): number[] => {
if (!hourly_distribution || hourly_distribution.length === 0) return [];
const sorted = [...hourly_distribution].sort((a, b) => b - a);
const threshold = sorted[Math.min(2, sorted.length - 1)] || 0; // Top 3 or maximum available
return hourly_distribution
.map((val, idx) => val >= threshold ? idx : -1)
.filter(idx => idx !== -1);
};
// v2.1: Generate heatmap with new transformation logic (3 dimensions)
const generateHeatmapData = (
costPerHour: number = 20,
avgCsat: number = 85,
segmentMapping?: { high_value_queues: string[]; medium_value_queues: string[]; low_value_queues: string[] }
): HeatmapDataPoint[] => {
const skills = ['Ventas Inbound', 'Soporte Técnico N1', 'Facturación', 'Retención', 'VIP Support', 'Trial Support'];
const COST_PER_SECOND = costPerHour / 3600;
return skills.map(skill => {
const volume = randomInt(800, 5500); // Monthly volume (expanded to cover repeatability range)
// Simular raw data: duration_talk, hold_time, wrap_up_time
const avg_talk_time = randomInt(240, 450); // segundos
const avg_hold_time = randomInt(15, 80); // segundos
const avg_wrap_up = randomInt(10, 50); // segundos
const aht_mean = avg_talk_time + avg_hold_time + avg_wrap_up; // Average AHT
// Simular desviación estándar del AHT (para CV)
const aht_std = randomInt(Math.round(aht_mean * 0.15), Math.round(aht_mean * 0.60)); // 15-60% del AHT
const cv_aht = aht_std / aht_mean; // Coeficiente de Variación
// Transfer rate (for inverse complexity)
const transfer_rate = randomInt(5, 35); // %
const fcr_approx = 100 - transfer_rate; // FCR aproximado
// Period cost (monthly) - with 70% productivity factor
const effectiveProductivity = 0.70;
const period_cost = Math.round((aht_mean / 3600) * costPerHour * volume / effectiveProductivity);
const annual_cost = period_cost; // Renamed for compatibility, but it is monthly cost
// CPI = cost per interaction
const cpi = volume > 0 ? period_cost / volume : 0;
// === NUEVA LÓGICA: 3 DIMENSIONES ===
// Dimension 1: Predictability (Proxy: AHT CV)
// Fórmula: MAX(0, MIN(10, 10 - ((CV - 0.3) / 1.2 * 10)))
const predictability_score = Math.max(0, Math.min(10,
10 - ((cv_aht - 0.3) / 1.2 * 10)
));
// Dimension 2: Inverse Complexity (Proxy: Transfer Rate)
// Fórmula: MAX(0, MIN(10, 10 - ((T - 0.05) / 0.25 * 10)))
const complexity_inverse_score = Math.max(0, Math.min(10,
10 - ((transfer_rate / 100 - 0.05) / 0.25 * 10)
));
// Dimension 3: Repeatability/Impact (Proxy: Volume)
// > 5,000 = 10, < 100 = 0, linear interpolation between 100-5000
let repetitivity_score: number;
if (volume >= 5000) {
repetitivity_score = 10;
} else if (volume <= 100) {
repetitivity_score = 0;
} else {
repetitivity_score = ((volume - 100) / (5000 - 100)) * 10;
}
// Agentic Readiness Score (Weighted average)
// Weights: Predictability 40%, Complexity 35%, Repeatability 25%
const agentic_readiness_score =
predictability_score * 0.40 +
complexity_inverse_score * 0.35 +
repetitivity_score * 0.25;
// Categoría de readiness
let readiness_category: 'automate_now' | 'assist_copilot' | 'optimize_first';
if (agentic_readiness_score >= 8.0) {
readiness_category = 'automate_now';
} else if (agentic_readiness_score >= 5.0) {
readiness_category = 'assist_copilot';
} else {
readiness_category = 'optimize_first';
}
const automation_readiness = Math.round(agentic_readiness_score * 10); // Escala 0-100 para compatibilidad
// Clasificar segmento si hay mapeo
let segment: CustomerSegment | undefined;
if (segmentMapping) {
const normalizedSkill = skill.toLowerCase();
if (segmentMapping.high_value_queues.some(q => normalizedSkill.includes(q.toLowerCase()))) {
segment = 'high';
} else if (segmentMapping.low_value_queues.some(q => normalizedSkill.includes(q.toLowerCase()))) {
segment = 'low';
} else {
segment = 'medium';
}
}
return {
skill,
segment,
volume,
cost_volume: volume, // In synthetic data, we assume all are non-abandon
aht_seconds: aht_mean, // Renombrado para compatibilidad
metrics: {
fcr: isNaN(fcr_approx) ? 0 : Math.max(0, Math.min(100, Math.round(fcr_approx))),
aht: isNaN(aht_mean) ? 0 : Math.max(0, Math.min(100, Math.round(100 - ((aht_mean - 240) / 310) * 100))),
csat: isNaN(avgCsat) ? 0 : Math.max(0, Math.min(100, Math.round(avgCsat))),
hold_time: isNaN(avg_hold_time) ? 0 : Math.max(0, Math.min(100, Math.round(100 - (avg_hold_time / 120) * 100))),
transfer_rate: isNaN(transfer_rate) ? 0 : Math.max(0, Math.min(100, Math.round(transfer_rate * 100)))
},
annual_cost,
cpi,
variability: {
cv_aht: Math.round(cv_aht * 100), // Convertir a porcentaje
cv_talk_time: 0, // Deprecado en v2.1
cv_hold_time: 0, // Deprecado en v2.1
transfer_rate
},
automation_readiness,
// New dimensions (v2.1)
dimensions: {
predictability: Math.round(predictability_score * 10) / 10,
complexity_inverse: Math.round(complexity_inverse_score * 10) / 10,
repetitivity: Math.round(repetitivity_score * 10) / 10
},
readiness_category
};
});
};
// v2.0: Add NPV and costBreakdown
const generateEconomicModelData = (): EconomicModelData => {
const currentAnnualCost = randomInt(800000, 2500000);
const annualSavings = randomInt(150000, 500000);
const futureAnnualCost = currentAnnualCost - annualSavings;
const initialInvestment = randomInt(40000, 150000);
const paybackMonths = Math.ceil((initialInvestment / annualSavings) * 12);
const roi3yr = (((annualSavings * 3) - initialInvestment) / initialInvestment) * 100;
// NPV con tasa de descuento 10%
const discountRate = 0.10;
const npv = -initialInvestment +
(annualSavings / (1 + discountRate)) +
(annualSavings / Math.pow(1 + discountRate, 2)) +
(annualSavings / Math.pow(1 + discountRate, 3));
const savingsBreakdown = [
{ category: 'Automatización de tareas', amount: annualSavings * 0.45, percentage: 45 },
{ category: 'Operational efficiency', amount: annualSavings * 0.30, percentage: 30 },
{ category: 'FCR Improvement', amount: annualSavings * 0.15, percentage: 15 },
{ category: 'Attrition reduction', amount: annualSavings * 0.075, percentage: 7.5 },
{ category: 'Otros', amount: annualSavings * 0.025, percentage: 2.5 },
];
const costBreakdown = [
{ category: 'Software y licencias', amount: initialInvestment * 0.43, percentage: 43 },
{ category: 'Implementación', amount: initialInvestment * 0.29, percentage: 29 },
{ category: 'Training y change mgmt', amount: initialInvestment * 0.18, percentage: 18 },
{ category: 'Contingencia', amount: initialInvestment * 0.10, percentage: 10 },
];
return {
currentAnnualCost,
futureAnnualCost,
annualSavings,
initialInvestment,
paybackMonths,
roi3yr: parseFloat(roi3yr.toFixed(1)),
npv: Math.round(npv),
savingsBreakdown,
costBreakdown
};
};
// v2.0: Add multiple percentiles
const generateBenchmarkData = (): BenchmarkDataPoint[] => {
const userAHT = randomInt(380, 450);
const industryAHT = 420;
const userFCR = randomFloat(0.65, 0.78, 2);
const industryFCR = 0.72;
const userCSAT = randomFloat(4.1, 4.6, 1);
const industryCSAT = 4.3;
const userCPI = randomFloat(2.8, 4.5, 2);
const industryCPI = 3.5;
return [
{
kpi: 'Average AHT',
userValue: userAHT,
userDisplay: `${userAHT}s`,
industryValue: industryAHT,
industryDisplay: `${industryAHT}s`,
percentile: randomInt(40, 75),
p25: 380,
p50: 420,
p75: 460,
p90: 510
},
{
kpi: 'Tasa FCR',
userValue: userFCR,
userDisplay: `${(userFCR * 100).toFixed(0)}%`,
industryValue: industryFCR,
industryDisplay: `${(industryFCR * 100).toFixed(0)}%`,
percentile: randomInt(30, 65),
p25: 0.65,
p50: 0.72,
p75: 0.82,
p90: 0.88
},
{
kpi: 'CSAT',
userValue: userCSAT,
userDisplay: `${userCSAT}/5`,
industryValue: industryCSAT,
industryDisplay: `${industryCSAT}/5`,
percentile: randomInt(45, 80),
p25: 4.0,
p50: 4.3,
p75: 4.6,
p90: 4.8
},
{
kpi: 'Coste por Interacción (Voz)',
userValue: userCPI,
userDisplay: `${userCPI.toFixed(2)}`,
industryValue: industryCPI,
industryDisplay: `${industryCPI.toFixed(2)}`,
percentile: randomInt(50, 85),
p25: 2.8,
p50: 3.5,
p75: 4.2,
p90: 5.0
},
];
};
export const generateAnalysis = async (
tier: TierKey,
costPerHour: number = 20,
avgCsat: number = 85,
segmentMapping?: { high_value_queues: string[]; medium_value_queues: string[]; low_value_queues: string[] },
file?: File,
sheetUrl?: string,
useSynthetic?: boolean,
authHeaderOverride?: string
): Promise<AnalysisData> => {
// If there is a file, process it
// If there is a file, first try to use the backend
if (file && !useSynthetic) {
console.log('📡 Processing file (API first):', file.name);
// Pre-parse file to get dateRange and interactions (used in both routes)
let dateRange: { min: string; max: string } | undefined;
let parsedInteractions: RawInteraction[] | undefined;
try {
const { parseFile, validateInteractions } = await import('./fileParser');
const interactions = await parseFile(file);
const validation = validateInteractions(interactions);
dateRange = validation.stats.dateRange || undefined;
parsedInteractions = interactions; // Save to use in drilldownData
console.log(`📅 Date range extracted: ${dateRange?.min} to ${dateRange?.max}`);
console.log(`📊 Parsed ${interactions.length} interactions for drilldown`);
// Cache the CSV file on the server for future use
try {
if (authHeaderOverride && file) {
await saveFileToServerCache(authHeaderOverride, file, costPerHour);
console.log(`💾 CSV file cached on server for future use`);
} else {
console.warn('⚠️ No se pudo cachear: falta authHeader o file');
}
} catch (cacheError) {
console.warn('⚠️ Could not cache file:', cacheError);
}
} catch (e) {
console.warn('⚠️ Could not extract dateRange from file:', e);
}
// 1) Intentar backend + mapeo
try {
const raw = await callAnalysisApiRaw({
tier,
costPerHour,
avgCsat,
segmentMapping,
file,
authHeaderOverride,
});
const mapped = mapBackendResultsToAnalysisData(raw, tier);
// Add dateRange extracted from file
mapped.dateRange = dateRange;
// Heatmap: use frontend calculations (parsedInteractions) for consistency
// This ensures dashboard shows the same values as realDataAnalysis logs
if (parsedInteractions && parsedInteractions.length > 0) {
const skillMetrics = calculateSkillMetrics(parsedInteractions, costPerHour);
mapped.heatmapData = generateHeatmapFromMetrics(skillMetrics, avgCsat, segmentMapping);
console.log('📊 Heatmap generated from frontend (parsedInteractions) - consistent metrics');
} else {
// Fallback: use backend if there are no parsedInteractions
mapped.heatmapData = buildHeatmapFromBackend(
raw,
costPerHour,
avgCsat,
segmentMapping
);
console.log('📊 Heatmap generated from backend (fallback - without parsedInteractions)');
}
// v4.5: SYNCHRONIZE CPI from economy dimension with heatmapData for consistency between tabs
// The heatmapData contains the correctly calculated CPI (with weighted cost_volume)
// The economy dimension was calculated in mapBackendResultsToAnalysisData with another formula
// We update the dimension to show the same value as Executive Summary
if (mapped.heatmapData && mapped.heatmapData.length > 0) {
const heatmapData = mapped.heatmapData;
const totalCostVolume = heatmapData.reduce((sum, h) => sum + (h.cost_volume || h.volume), 0);
const hasCpiField = heatmapData.some(h => h.cpi !== undefined && h.cpi > 0);
let globalCPI: number;
if (hasCpiField) {
// Real CPI available: weighted average by cost_volume
globalCPI = totalCostVolume > 0
? heatmapData.reduce((sum, h) => sum + (h.cpi || 0) * (h.cost_volume || h.volume), 0) / totalCostVolume
: 0;
} else {
// Fallback: annual_cost / cost_volume
const totalAnnualCost = heatmapData.reduce((sum, h) => sum + (h.annual_cost || 0), 0);
globalCPI = totalCostVolume > 0 ? totalAnnualCost / totalCostVolume : 0;
}
// Update economy dimension with CPI calculated from heatmap
// Search for both economy_costs (backend) and economy_cpi (frontend fallback)
const economyDimIdx = mapped.dimensions.findIndex(d =>
d.id === 'economy_costs' || d.name === 'economy_costs' ||
d.id === 'economy_cpi' || d.name === 'economy_cpi'
);
if (economyDimIdx >= 0 && globalCPI > 0) {
// Use airline benchmark (€3.50) for consistency with ExecutiveSummaryTab
// Percentiles: p25=2.20, p50=3.50, p75=4.50, p90=5.50
const CPI_BENCHMARK = 3.50;
const cpiDiff = globalCPI - CPI_BENCHMARK;
// For inverted CPI: lower is better
const cpiStatus = cpiDiff <= 0 ? 'positive' : cpiDiff <= 0.5 ? 'neutral' : 'negative';
// Calculate score based on airline percentiles
let newScore: number;
if (globalCPI <= 2.20) newScore = 100;
else if (globalCPI <= 3.50) newScore = 80;
else if (globalCPI <= 4.50) newScore = 60;
else if (globalCPI <= 5.50) newScore = 40;
else newScore = 20;
mapped.dimensions[economyDimIdx].score = newScore;
mapped.dimensions[economyDimIdx].kpi = {
label: 'Coste por Interacción',
value: `${globalCPI.toFixed(2)}`,
change: `vs benchmark €${CPI_BENCHMARK.toFixed(2)}`,
changeType: cpiStatus as 'positive' | 'neutral' | 'negative'
};
console.log(`💰 CPI sincronizado: €${globalCPI.toFixed(2)}, score: ${newScore}`);
}
}
// v3.5: Calculate drilldownData FIRST (necessary for opportunities and roadmap)
if (parsedInteractions && parsedInteractions.length > 0) {
mapped.drilldownData = calculateDrilldownMetrics(parsedInteractions, costPerHour);
console.log(`📊 Drill-down calculated: ${mapped.drilldownData.length} skills, ${mapped.drilldownData.filter(d => d.isPriorityCandidate).length} priority candidates`);
// v4.4: Cache drilldownData on server BEFORE returning (fix: was fire-and-forget)
// This ensures cache is available when user clicks "Use Cache"
if (authHeaderOverride && mapped.drilldownData.length > 0) {
try {
const cacheSuccess = await saveDrilldownToServerCache(authHeaderOverride, mapped.drilldownData);
if (cacheSuccess) {
console.log('💾 DrilldownData cached on server successfully');
} else {
console.warn('⚠️ Could not cache drilldownData - fallback to heatmap on next use');
}
} catch (cacheErr) {
console.warn('⚠️ Error cacheando drilldownData:', cacheErr);
}
}
// Use opportunities and roadmap based on drilldownData (real data)
mapped.opportunities = generateOpportunitiesFromDrilldown(mapped.drilldownData, costPerHour);
mapped.roadmap = generateRoadmapFromDrilldown(mapped.drilldownData, costPerHour);
console.log(`📊 Opportunities: ${mapped.opportunities.length}, Roadmap: ${mapped.roadmap.length}`);
} else {
console.warn('⚠️ No hay interacciones parseadas, usando heatmap para drilldown');
// v4.3: Generate drilldownData from heatmap to use same functions
mapped.drilldownData = generateDrilldownFromHeatmap(mapped.heatmapData, costPerHour);
mapped.opportunities = generateOpportunitiesFromDrilldown(mapped.drilldownData, costPerHour);
mapped.roadmap = generateRoadmapFromDrilldown(mapped.drilldownData, costPerHour);
}
// Findings y recommendations
mapped.findings = generateFindingsFromData(mapped);
mapped.recommendations = generateRecommendationsFromData(mapped);
// Benchmark: for now we do not have real data
mapped.benchmarkData = [];
console.log(
'✅ Usando resultados del backend mapeados (heatmap + opportunities + drilldown reales)'
);
return mapped;
} catch (apiError: any) {
const status = apiError?.status;
const msg = (apiError as Error).message || '';
// 🔐 If it is an authentication error (401), we do NOT fallback
if (status === 401 || msg.includes('401')) {
console.error(
'❌ Authentication error in backend, aborting analysis (no fallback).'
);
throw apiError;
}
console.error(
'❌ Backend /analysis no disponible o mapeo incompleto, fallback a lógica local:',
apiError
);
}
// 2) Fallback completo: lógica antigua del frontend
try {
const { parseFile, validateInteractions } = await import('./fileParser');
const interactions = await parseFile(file);
const validation = validateInteractions(interactions);
if (!validation.valid) {
console.error('❌ Validation errors:', validation.errors);
throw new Error(
`Validación fallida: ${validation.errors.join(', ')}`
);
}
if (validation.warnings.length > 0) {
console.warn('⚠️ Warnings:', validation.warnings);
}
return generateAnalysisFromRealData(
tier,
interactions,
costPerHour,
avgCsat,
segmentMapping
);
} catch (error) {
console.error('❌ Error processing file:', error);
throw new Error(
`Error processing file: ${(error as Error).message}`
);
}
}
// If there is a Google Sheets URL, process it (TODO: implement)
if (sheetUrl && !useSynthetic) {
console.warn('🔗 Google Sheets URL processing not implemented yet, using synthetic data');
}
// Generate synthetic data (fallback)
console.log('✨ Generating synthetic data');
return generateSyntheticAnalysis(tier, costPerHour, avgCsat, segmentMapping);
};
/**
* Generates analysis using the CSV file cached on the server
* Allows re-analysis without needing to upload the file again
* Funciona entre diferentes navegadores y dispositivos
*
* v3.5: Downloads the cached CSV to parse locally and obtain
* all original queues (original_queue_id) instead of only
* las 9 categorías agregadas (queue_skill)
*/
export const generateAnalysisFromCache = async (
tier: TierKey,
costPerHour: number = 20,
avgCsat: number = 85,
segmentMapping?: { high_value_queues: string[]; medium_value_queues: string[]; low_value_queues: string[] },
authHeaderOverride?: string
): Promise<AnalysisData> => {
console.log('💾 Analyzing from server-cached file...');
// Verify that we have authHeader
if (!authHeaderOverride) {
throw new Error('Authentication required to access server cache.');
}
const API_BASE_URL = import.meta.env.VITE_API_BASE_URL || 'http://localhost:8000';
// Prepare economic data
const economyData = {
costPerHour,
avgCsat,
segmentMapping,
};
// Crear FormData para el endpoint
const formData = new FormData();
formData.append('economy_json', JSON.stringify(economyData));
formData.append('analysis', 'premium');
console.log('📡 Running backend analysis and drilldown fetch in parallel...');
// === EJECUTAR EN PARALELO: Backend analysis + DrilldownData fetch ===
const backendAnalysisPromise = fetch(`${API_BASE_URL}/analysis/cached`, {
method: 'POST',
headers: {
Authorization: authHeaderOverride,
},
body: formData,
});
// Get cached drilldownData (small JSON, very fast)
const drilldownPromise = getCachedDrilldown(authHeaderOverride);
// Esperar ambas operaciones en paralelo
const [response, cachedDrilldownData] = await Promise.all([backendAnalysisPromise, drilldownPromise]);
if (cachedDrilldownData) {
console.log(`✅ Got cached drilldownData: ${cachedDrilldownData.length} skills`);
} else {
console.warn('⚠️ No cached drilldownData found, will use heatmap fallback');
}
try {
if (response.status === 404) {
throw new Error('No file cached on server. Please upload a CSV file first.');
}
if (!response.ok) {
const errorText = await response.text();
console.error('❌ Backend error:', response.status, errorText);
throw new Error(`Server error (${response.status}): ${errorText}`);
}
const rawResponse = await response.json();
const raw = rawResponse.results;
const dateRangeFromBackend = rawResponse.dateRange;
const uniqueQueuesFromBackend = rawResponse.uniqueQueues;
console.log('✅ Backend analysis from cache completed');
console.log('📅 Date range from backend:', dateRangeFromBackend);
console.log('📊 Unique queues from backend:', uniqueQueuesFromBackend);
// Mapear resultados del backend a AnalysisData (solo 2 parámetros)
console.log('📦 Raw backend results keys:', Object.keys(raw || {}));
console.log('📦 volumetry:', raw?.volumetry ? 'present' : 'missing');
console.log('📦 operational_performance:', raw?.operational_performance ? 'present' : 'missing');
console.log('📦 agentic_readiness:', raw?.agentic_readiness ? 'present' : 'missing');
const mapped = mapBackendResultsToAnalysisData(raw, tier);
console.log('📊 Mapped data summaryKpis:', mapped.summaryKpis?.length || 0);
console.log('📊 Mapped data dimensions:', mapped.dimensions?.length || 0);
// Add dateRange from backend
if (dateRangeFromBackend && dateRangeFromBackend.min && dateRangeFromBackend.max) {
mapped.dateRange = dateRangeFromBackend;
}
// Heatmap: build from real backend data
mapped.heatmapData = buildHeatmapFromBackend(
raw,
costPerHour,
avgCsat,
segmentMapping
);
console.log('📊 Heatmap data points:', mapped.heatmapData?.length || 0);
// v4.6: SYNCHRONIZE CPI from economy dimension with heatmapData for consistency between tabs
// (Mismo fix que en generateAnalysis - necesario para path de cache)
if (mapped.heatmapData && mapped.heatmapData.length > 0) {
const heatmapData = mapped.heatmapData;
const totalCostVolume = heatmapData.reduce((sum, h) => sum + (h.cost_volume || h.volume), 0);
const hasCpiField = heatmapData.some(h => h.cpi !== undefined && h.cpi > 0);
// DEBUG: Log CPI calculation details
console.log('🔍 CPI SYNC DEBUG (cache):');
console.log(' - heatmapData length:', heatmapData.length);
console.log(' - hasCpiField:', hasCpiField);
console.log(' - totalCostVolume:', totalCostVolume);
if (hasCpiField) {
console.log(' - Sample CPIs:', heatmapData.slice(0, 3).map(h => ({ skill: h.skill, cpi: h.cpi, cost_volume: h.cost_volume })));
}
let globalCPI: number;
if (hasCpiField) {
globalCPI = totalCostVolume > 0
? heatmapData.reduce((sum, h) => sum + (h.cpi || 0) * (h.cost_volume || h.volume), 0) / totalCostVolume
: 0;
} else {
const totalAnnualCost = heatmapData.reduce((sum, h) => sum + (h.annual_cost || 0), 0);
console.log(' - totalAnnualCost (fallback):', totalAnnualCost);
globalCPI = totalCostVolume > 0 ? totalAnnualCost / totalCostVolume : 0;
}
console.log(' - globalCPI calculated:', globalCPI.toFixed(4));
// Search for both economy_costs (backend) and economy_cpi (frontend fallback)
const dimensionIds = mapped.dimensions.map(d => ({ id: d.id, name: d.name }));
console.log(' - Available dimensions:', dimensionIds);
const economyDimIdx = mapped.dimensions.findIndex(d =>
d.id === 'economy_costs' || d.name === 'economy_costs' ||
d.id === 'economy_cpi' || d.name === 'economy_cpi'
);
console.log(' - economyDimIdx:', economyDimIdx);
if (economyDimIdx >= 0 && globalCPI > 0) {
const oldKpi = mapped.dimensions[economyDimIdx].kpi;
console.log(' - OLD KPI value:', oldKpi?.value);
// Use airline benchmark (€3.50) for consistency with ExecutiveSummaryTab
// Percentiles: p25=2.20, p50=3.50, p75=4.50, p90=5.50
const CPI_BENCHMARK = 3.50;
const cpiDiff = globalCPI - CPI_BENCHMARK;
// For inverted CPI: lower is better
const cpiStatus = cpiDiff <= 0 ? 'positive' : cpiDiff <= 0.5 ? 'neutral' : 'negative';
// Calculate score based on airline percentiles
let newScore: number;
if (globalCPI <= 2.20) newScore = 100;
else if (globalCPI <= 3.50) newScore = 80;
else if (globalCPI <= 4.50) newScore = 60;
else if (globalCPI <= 5.50) newScore = 40;
else newScore = 20;
mapped.dimensions[economyDimIdx].score = newScore;
mapped.dimensions[economyDimIdx].kpi = {
label: 'Coste por Interacción',
value: `${globalCPI.toFixed(2)}`,
change: `vs benchmark €${CPI_BENCHMARK.toFixed(2)}`,
changeType: cpiStatus as 'positive' | 'neutral' | 'negative'
};
console.log(' - NEW KPI value:', mapped.dimensions[economyDimIdx].kpi.value);
console.log(' - NEW score:', newScore);
console.log(`💰 CPI sincronizado (cache): €${globalCPI.toFixed(2)}`);
} else {
console.warn('⚠️ CPI sync skipped: economyDimIdx=', economyDimIdx, 'globalCPI=', globalCPI);
}
}
// === DrilldownData: use cached (fast) or fallback to heatmap ===
if (cachedDrilldownData && cachedDrilldownData.length > 0) {
// Use cached drilldownData directly (already calculated when uploading file)
mapped.drilldownData = cachedDrilldownData;
console.log(`📊 Using cached drilldownData: ${mapped.drilldownData.length} skills`);
// Count original queues for log
const uniqueOriginalQueues = new Set(
mapped.drilldownData.flatMap((d: any) =>
(d.originalQueues || []).map((q: any) => q.original_queue_id)
).filter((q: string) => q && q.trim() !== '')
).size;
console.log(`📊 Total original queues: ${uniqueOriginalQueues}`);
// Usar oportunidades y roadmap basados en drilldownData real
mapped.opportunities = generateOpportunitiesFromDrilldown(mapped.drilldownData, costPerHour);
mapped.roadmap = generateRoadmapFromDrilldown(mapped.drilldownData, costPerHour);
console.log(`📊 Opportunities: ${mapped.opportunities.length}, Roadmap: ${mapped.roadmap.length}`);
} else if (mapped.heatmapData && mapped.heatmapData.length > 0) {
// v4.5: No cached drilldownData - try to calculate it from cached CSV
console.log('⚠️ No cached drilldownData found, attempting to calculate from cached CSV...');
let calculatedDrilldown = false;
try {
// Download and parse cached CSV to calculate real drilldown
const cachedFile = await downloadCachedFile(authHeaderOverride);
if (cachedFile) {
console.log(`📥 Downloaded cached CSV: ${(cachedFile.size / 1024 / 1024).toFixed(2)} MB`);
const { parseFile } = await import('./fileParser');
const parsedInteractions = await parseFile(cachedFile);
if (parsedInteractions && parsedInteractions.length > 0) {
console.log(`📊 Parsed ${parsedInteractions.length} interactions from cached CSV`);
// Calculate real drilldown from interactions
mapped.drilldownData = calculateDrilldownMetrics(parsedInteractions, costPerHour);
console.log(`📊 Calculated drilldown: ${mapped.drilldownData.length} skills`);
// Save drilldown in cache for next use
try {
const saveSuccess = await saveDrilldownToServerCache(authHeaderOverride, mapped.drilldownData);
if (saveSuccess) {
console.log('💾 DrilldownData saved to cache for future use');
} else {
console.warn('⚠️ Failed to save drilldownData to cache');
}
} catch (saveErr) {
console.warn('⚠️ Error saving drilldownData to cache:', saveErr);
}
calculatedDrilldown = true;
}
}
} catch (csvErr) {
console.warn('⚠️ Could not calculate drilldown from cached CSV:', csvErr);
}
if (!calculatedDrilldown) {
// Final fallback: use heatmap (approximate data)
console.warn('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
console.warn('⚠️ FALLBACK ACTIVE: No cached drilldownData');
console.warn(' Probable cause: CSV was not uploaded correctly or cache expired');
console.warn(' Consequence: Using aggregated heatmap data (less precise)');
console.warn(' Solution: Re-upload the CSV file to obtain complete data');
console.warn('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
mapped.drilldownData = generateDrilldownFromHeatmap(mapped.heatmapData, costPerHour);
console.log(`📊 Drill-down from heatmap (fallback): ${mapped.drilldownData.length} aggregated skills`);
}
// Use same functions as fresh route for consistency
mapped.opportunities = generateOpportunitiesFromDrilldown(mapped.drilldownData, costPerHour);
mapped.roadmap = generateRoadmapFromDrilldown(mapped.drilldownData, costPerHour);
}
// Findings y recommendations
mapped.findings = generateFindingsFromData(mapped);
mapped.recommendations = generateRecommendationsFromData(mapped);
// Benchmark: empty for now
mapped.benchmarkData = [];
// Marcar que viene del backend/caché
mapped.source = 'backend';
console.log('✅ Analysis generated from server-cached file');
return mapped;
} catch (error) {
console.error('❌ Error analyzing from cache:', error);
throw error;
}
};
// Helper function to generate drilldownData from heatmapData when we do not have parsedInteractions
function generateDrilldownFromHeatmap(
heatmapData: HeatmapDataPoint[],
costPerHour: number
): DrilldownDataPoint[] {
return heatmapData.map(hp => {
const cvAht = hp.variability?.cv_aht || 0;
const transferRate = hp.variability?.transfer_rate || hp.metrics?.transfer_rate || 0;
const fcrRate = hp.metrics?.fcr || 0;
// FCR Técnico: usar el campo si existe, sino calcular como 100 - transfer_rate
const fcrTecnico = hp.metrics?.fcr_tecnico ?? (100 - transferRate);
const agenticScore = hp.dimensions
? (hp.dimensions.predictability * 0.4 + hp.dimensions.complexity_inverse * 0.35 + hp.dimensions.repetitivity * 0.25)
: (hp.automation_readiness || 0) / 10;
// v4.4: Use clasificarTierSimple with ALL available heatmap data
// cvAht, transferRate y fcrRate están en % (ej: 75), clasificarTierSimple espera decimal (ej: 0.75)
const tier = clasificarTierSimple(
agenticScore,
cvAht / 100, // CV como decimal
transferRate / 100, // Transfer como decimal
fcrRate / 100, // FCR como decimal (nuevo en v4.4)
hp.volume // Volumen para red flag check (nuevo en v4.4)
);
return {
skill: hp.skill,
volume: hp.volume,
volumeValid: hp.volume,
aht_mean: hp.aht_seconds,
cv_aht: cvAht,
transfer_rate: transferRate,
fcr_rate: fcrRate,
fcr_tecnico: fcrTecnico, // FCR Técnico para consistencia con Summary
agenticScore: agenticScore,
isPriorityCandidate: cvAht < 75,
originalQueues: [{
original_queue_id: hp.skill,
volume: hp.volume,
volumeValid: hp.volume,
aht_mean: hp.aht_seconds,
cv_aht: cvAht,
transfer_rate: transferRate,
fcr_rate: fcrRate,
fcr_tecnico: fcrTecnico, // FCR Técnico para consistencia con Summary
agenticScore: agenticScore,
tier: tier,
isPriorityCandidate: cvAht < 75,
}],
};
});
}
// Helper function to generate analysis with synthetic data
const generateSyntheticAnalysis = (
tier: TierKey,
costPerHour: number = 20,
avgCsat: number = 85,
segmentMapping?: { high_value_queues: string[]; medium_value_queues: string[]; low_value_queues: string[] }
): AnalysisData => {
const overallHealthScore = randomInt(55, 95);
const summaryKpis: Kpi[] = [
{ label: "Interacciones Totales", value: randomInt(15000, 50000).toLocaleString('es-ES') },
{ label: "AHT Promedio", value: `${randomInt(300, 480)}s`, change: `-${randomInt(5, 20)}s`, changeType: 'positive' },
{ label: "Tasa FCR", value: `${randomInt(70, 88)}%`, change: `+${randomFloat(0.5, 2, 1)}%`, changeType: 'positive' },
{ label: "CSAT", value: `${randomFloat(4.1, 4.8, 1)}/5`, change: `-${randomFloat(0.1, 0.3, 1)}`, changeType: 'negative' },
];
// v3.0: 5 viable dimensions
const dimensionKeys = ['volumetry_distribution', 'operational_efficiency', 'effectiveness_resolution', 'complexity_predictability', 'agentic_readiness'];
const dimensions: DimensionAnalysis[] = dimensionKeys.map(key => {
const content = DIMENSIONS_CONTENT[key as keyof typeof DIMENSIONS_CONTENT];
const score = randomInt(50, 98);
const status = getScoreColor(score);
const dimension: DimensionAnalysis = {
id: key,
name: key as any,
title: randomFromList(content.titles),
score,
percentile: randomInt(30, 85),
summary: randomFromList(content.summaries[status === 'green' ? 'good' : status === 'yellow' ? 'medium' : 'bad']),
kpi: randomFromList(content.kpis),
icon: content.icon,
};
// Add distribution_data for volumetry_distribution
if (key === 'volumetry_distribution') {
const hourly = generateHourlyDistribution();
dimension.distribution_data = {
hourly,
off_hours_pct: calculateOffHoursPct(hourly),
peak_hours: identifyPeakHours(hourly)
};
}
return dimension;
});
// v2.0: Calcular Agentic Readiness Score
let agenticReadiness = undefined;
if (tier === 'gold' || tier === 'silver') {
// Generate synthetic data for the algorithm
const volumen_mes = randomInt(5000, 25000);
const aht_values = Array.from({ length: 100 }, () =>
Math.max(180, normalRandom(420, 120)) // Media 420s, std 120s
);
const escalation_rate = randomFloat(0.05, 0.25, 2);
const cpi_humano = randomFloat(2.5, 5.0, 2);
const volumen_anual = volumen_mes * 12;
const agenticInput: AgenticReadinessInput = {
volumen_mes,
aht_values,
escalation_rate,
cpi_humano,
volumen_anual,
tier
};
// Additional data for GOLD
if (tier === 'gold') {
const hourly_distribution = dimensions.find(d => d.name === 'volumetry_distribution')?.distribution_data?.hourly;
const off_hours_pct = dimensions.find(d => d.name === 'volumetry_distribution')?.distribution_data?.off_hours_pct;
agenticInput.structured_fields_pct = randomFloat(0.4, 0.9, 2);
agenticInput.exception_rate = randomFloat(0.05, 0.25, 2);
agenticInput.hourly_distribution = hourly_distribution;
agenticInput.off_hours_pct = off_hours_pct;
agenticInput.csat_values = Array.from({ length: 100 }, () =>
Math.max(1, Math.min(5, normalRandom(4.3, 0.8)))
);
}
agenticReadiness = calculateAgenticReadinessScore(agenticInput);
}
const heatmapData = generateHeatmapData(costPerHour, avgCsat, segmentMapping);
console.log('📊 Heatmap data generated:', {
length: heatmapData.length,
firstItem: heatmapData[0],
metricsKeys: heatmapData[0] ? Object.keys(heatmapData[0].metrics) : [],
metricsValues: heatmapData[0] ? heatmapData[0].metrics : {},
hasNaN: heatmapData.some(item =>
Object.values(item.metrics).some(v => isNaN(v))
)
});
// v4.3: Generate drilldownData from heatmap to use same functions
const drilldownData = generateDrilldownFromHeatmap(heatmapData, costPerHour);
return {
tier,
overallHealthScore,
summaryKpis,
dimensions,
heatmapData,
drilldownData,
agenticReadiness,
findings: generateFindingsFromTemplates(),
recommendations: generateRecommendationsFromTemplates(),
opportunities: generateOpportunitiesFromDrilldown(drilldownData, costPerHour),
economicModel: generateEconomicModelData(),
roadmap: generateRoadmapFromDrilldown(drilldownData, costPerHour),
benchmarkData: generateBenchmarkData(),
source: 'synthetic',
};
};