Phase 3 of Spanish-to-English translation for low-priority backend files: Backend core modules (4 files): - Volumetria.py: Translated ~15 occurrences (docstrings, comments, plot labels, day abbreviations) - agent.py: Translated ~15 occurrences (system prompts, docstrings, error messages) - pipeline.py: Translated ~10 occurrences (log messages, docstrings, comments) - analysis_service.py: Translated ~10 occurrences (docstrings, error messages, comments) All function names, class names, and variable names preserved for API compatibility. Frontend and backend compilation tested and verified successful. This completes the comprehensive Spanish-to-English translation project: - Phase 1 (High Priority): 3 files - backendMapper.ts, analysisGenerator.ts, realDataAnalysis.ts - Phase 2 (Medium Priority): 5 files - dataTransformation.ts, segmentClassifier.ts, + 3 dimension files - Phase 3 (Low Priority): 4 files - Volumetria.py, agent.py, pipeline.py, analysis_service.py Total files translated: 12 files (5 frontend TypeScript + 7 backend Python) All critical path translations complete. Related to TRANSLATION_STATUS.md Phase 3 completion. https://claude.ai/code/session_01GNbnkFoESkRcnPr3bLCYDg
311 lines
10 KiB
Python
311 lines
10 KiB
Python
from __future__ import annotations
|
|
|
|
import json
|
|
import os
|
|
from dataclasses import dataclass
|
|
from pathlib import Path
|
|
from typing import Any, Dict, Optional, Sequence
|
|
|
|
from reportlab.lib.pagesizes import A4
|
|
from reportlab.pdfgen import canvas
|
|
from reportlab.lib.utils import ImageReader
|
|
|
|
from openai import OpenAI
|
|
|
|
|
|
DEFAULT_SYSTEM_PROMPT = (
|
|
"You are an expert contact center consultant. "
|
|
"You will receive analytical results from a metrics system "
|
|
"(BeyondMetrics) in JSON format. Your task is to generate a clear, "
|
|
"actionable, business-oriented report, highlighting the main findings, "
|
|
"risks, and opportunities for improvement."
|
|
)
|
|
|
|
|
|
@dataclass
|
|
class ReportAgentConfig:
|
|
"""
|
|
Basic configuration for the report agent.
|
|
|
|
openai_api_key:
|
|
Can be passed explicitly or read from the OPENAI_API_KEY environment variable.
|
|
model:
|
|
ChatGPT model to use, e.g. 'gpt-4.1-mini' or similar.
|
|
system_prompt:
|
|
System prompt to control the report style.
|
|
"""
|
|
|
|
openai_api_key: Optional[str] = None
|
|
model: str = "gpt-4.1-mini"
|
|
system_prompt: str = DEFAULT_SYSTEM_PROMPT
|
|
|
|
|
|
class BeyondMetricsReportAgent:
|
|
"""
|
|
Simple agent that:
|
|
|
|
1) Reads the JSON results from a BeyondMetrics execution.
|
|
2) Builds a prompt with those results.
|
|
3) Calls ChatGPT to generate a text report.
|
|
4) Saves the report to a PDF on disk, EMBEDDING the PNG images
|
|
generated by the pipeline as attachments.
|
|
|
|
MVP: focused on text + embedded figures.
|
|
"""
|
|
|
|
def __init__(self, config: Optional[ReportAgentConfig] = None) -> None:
|
|
self.config = config or ReportAgentConfig()
|
|
|
|
api_key = self.config.openai_api_key or os.getenv("OPENAI_API_KEY")
|
|
if not api_key:
|
|
raise RuntimeError(
|
|
"Missing OpenAI API key. "
|
|
"Pass it in ReportAgentConfig(openai_api_key=...) or "
|
|
"define the OPENAI_API_KEY environment variable."
|
|
)
|
|
|
|
# New OpenAI API client
|
|
self._client = OpenAI(api_key=api_key)
|
|
|
|
# ------------------------------------------------------------------
|
|
# Main public API
|
|
# ------------------------------------------------------------------
|
|
def generate_pdf_report(
|
|
self,
|
|
run_base: str,
|
|
output_pdf_path: Optional[str] = None,
|
|
extra_user_prompt: str = "",
|
|
) -> str:
|
|
"""
|
|
Generates a PDF report from a results folder.
|
|
|
|
Parameters:
|
|
- run_base:
|
|
Base folder for the execution. Must contain at least 'results.json'
|
|
and, optionally, PNG images generated by the pipeline.
|
|
- output_pdf_path:
|
|
Full path for the output PDF. If None, creates
|
|
'beyondmetrics_report.pdf' inside run_base.
|
|
- extra_user_prompt:
|
|
Additional text to refine the agent's request
|
|
(e.g. "emphasize efficiency and SLA", etc.)
|
|
|
|
Returns:
|
|
- The path to the generated PDF.
|
|
"""
|
|
run_dir = Path(run_base)
|
|
results_json = run_dir / "results.json"
|
|
if not results_json.exists():
|
|
raise FileNotFoundError(
|
|
f"{results_json} not found. "
|
|
"Make sure to run the pipeline first."
|
|
)
|
|
|
|
# 1) Read results JSON
|
|
with results_json.open("r", encoding="utf-8") as f:
|
|
results_data: Dict[str, Any] = json.load(f)
|
|
|
|
# 2) Find generated images
|
|
image_files = sorted(p for p in run_dir.glob("*.png"))
|
|
|
|
# 3) Build user prompt
|
|
user_prompt = self._build_user_prompt(
|
|
results=results_data,
|
|
image_files=[p.name for p in image_files],
|
|
extra_user_prompt=extra_user_prompt,
|
|
)
|
|
|
|
# 4) Call ChatGPT to get the report text
|
|
report_text = self._call_chatgpt(user_prompt)
|
|
|
|
# 5) Create PDF with text + embedded images
|
|
if output_pdf_path is None:
|
|
output_pdf_path = str(run_dir / "beyondmetrics_report.pdf")
|
|
|
|
self._write_pdf(output_pdf_path, report_text, image_files)
|
|
|
|
return output_pdf_path
|
|
|
|
# ------------------------------------------------------------------
|
|
# Prompt construction
|
|
# ------------------------------------------------------------------
|
|
def _build_user_prompt(
|
|
self,
|
|
results: Dict[str, Any],
|
|
image_files: Sequence[str],
|
|
extra_user_prompt: str = "",
|
|
) -> str:
|
|
"""
|
|
Builds the user message to be sent to the model.
|
|
For an MVP, we serialize the entire results JSON.
|
|
Later, this can be summarized if the JSON grows too large.
|
|
"""
|
|
results_str = json.dumps(results, indent=2, ensure_ascii=False)
|
|
|
|
images_section = (
|
|
"Images generated in the execution:\n"
|
|
+ "\n".join(f"- {name}" for name in image_files)
|
|
if image_files
|
|
else "No images were generated in this execution."
|
|
)
|
|
|
|
extra = (
|
|
f"\n\nAdditional user instructions:\n{extra_user_prompt}"
|
|
if extra_user_prompt
|
|
else ""
|
|
)
|
|
|
|
prompt = (
|
|
"Below I provide you with the results of a BeyondMetrics execution "
|
|
"in JSON format. You must produce an EXECUTIVE REPORT for a contact "
|
|
"center client. The report should include:\n"
|
|
"- Executive summary in business language.\n"
|
|
"- Main findings by dimension.\n"
|
|
"- Detected risks or issues.\n"
|
|
"- Actionable recommendations.\n\n"
|
|
"Results (JSON):\n"
|
|
f"{results_str}\n\n"
|
|
f"{images_section}"
|
|
f"{extra}"
|
|
)
|
|
|
|
return prompt
|
|
|
|
# ------------------------------------------------------------------
|
|
# ChatGPT call (new API)
|
|
# ------------------------------------------------------------------
|
|
def _call_chatgpt(self, user_prompt: str) -> str:
|
|
"""
|
|
Calls the ChatGPT model and returns the content of the response message.
|
|
Implemented with the new OpenAI API.
|
|
"""
|
|
resp = self._client.chat.completions.create(
|
|
model=self.config.model,
|
|
messages=[
|
|
{"role": "system", "content": self.config.system_prompt},
|
|
{"role": "user", "content": user_prompt},
|
|
],
|
|
temperature=0.3,
|
|
)
|
|
|
|
content = resp.choices[0].message.content
|
|
if not isinstance(content, str):
|
|
raise RuntimeError("The model response does not contain text.")
|
|
return content
|
|
|
|
# ------------------------------------------------------------------
|
|
# PDF writing (text + images)
|
|
# ------------------------------------------------------------------
|
|
def _write_pdf(
|
|
self,
|
|
output_path: str,
|
|
text: str,
|
|
image_paths: Sequence[Path],
|
|
) -> None:
|
|
"""
|
|
Creates an A4 PDF with:
|
|
|
|
1) Report text (initial pages).
|
|
2) An appendix section where the PNG images generated by the
|
|
pipeline are embedded, scaled to fit the page.
|
|
"""
|
|
output_path = str(output_path)
|
|
c = canvas.Canvas(output_path, pagesize=A4)
|
|
width, height = A4
|
|
|
|
margin_x = 50
|
|
margin_y = 50
|
|
max_width = width - 2 * margin_x
|
|
line_height = 14
|
|
|
|
c.setFont("Helvetica", 11)
|
|
|
|
# --- Write main text ---
|
|
def _wrap_line(line: str, max_chars: int = 100) -> list[str]:
|
|
parts: list[str] = []
|
|
current: list[str] = []
|
|
count = 0
|
|
for word in line.split():
|
|
if count + len(word) + 1 > max_chars:
|
|
parts.append(" ".join(current))
|
|
current = [word]
|
|
count = len(word) + 1
|
|
else:
|
|
current.append(word)
|
|
count += len(word) + 1
|
|
if current:
|
|
parts.append(" ".join(current))
|
|
return parts
|
|
|
|
y = height - margin_y
|
|
for raw_line in text.splitlines():
|
|
wrapped_lines = _wrap_line(raw_line)
|
|
for line in wrapped_lines:
|
|
if y < margin_y:
|
|
c.showPage()
|
|
c.setFont("Helvetica", 11)
|
|
y = height - margin_y
|
|
c.drawString(margin_x, y, line)
|
|
y -= line_height
|
|
|
|
# --- Append images as figures ---
|
|
if image_paths:
|
|
# New page for figures
|
|
c.showPage()
|
|
c.setFont("Helvetica-Bold", 14)
|
|
c.drawString(margin_x, height - margin_y, "Appendix: Figures")
|
|
c.setFont("Helvetica", 11)
|
|
|
|
current_y = height - margin_y - 2 * line_height
|
|
|
|
for img_path in image_paths:
|
|
# If the image doesn't fit on the page, move to the next one
|
|
available_height = current_y - margin_y
|
|
if available_height < 100: # minimum space
|
|
c.showPage()
|
|
c.setFont("Helvetica-Bold", 14)
|
|
c.drawString(margin_x, height - margin_y, "Appendix: Figures (cont.)")
|
|
c.setFont("Helvetica", 11)
|
|
current_y = height - margin_y - 2 * line_height
|
|
available_height = current_y - margin_y
|
|
|
|
# Figure title
|
|
title = f"Figure: {img_path.name}"
|
|
c.drawString(margin_x, current_y, title)
|
|
current_y -= line_height
|
|
|
|
# Load and scale image
|
|
try:
|
|
img = ImageReader(str(img_path))
|
|
iw, ih = img.getSize()
|
|
# Scale to fit available width and height
|
|
max_img_height = available_height - 2 * line_height
|
|
scale = min(max_width / iw, max_img_height / ih)
|
|
if scale <= 0:
|
|
scale = 1.0 # fallback
|
|
|
|
draw_w = iw * scale
|
|
draw_h = ih * scale
|
|
|
|
x = margin_x
|
|
y_img = current_y - draw_h
|
|
|
|
c.drawImage(
|
|
img,
|
|
x,
|
|
y_img,
|
|
width=draw_w,
|
|
height=draw_h,
|
|
preserveAspectRatio=True,
|
|
mask="auto",
|
|
)
|
|
|
|
current_y = y_img - 2 * line_height
|
|
except Exception as e:
|
|
# If loading fails, indicate it in the PDF
|
|
err_msg = f"Could not load image {img_path.name}: {e}"
|
|
c.drawString(margin_x, current_y, err_msg)
|
|
current_y -= 2 * line_height
|
|
|
|
c.save()
|