feat: Add AI report generation with DIFY integration
- Add Users table for display name resolution from AD authentication - Integrate DIFY AI service for report content generation - Create docx assembly service with image embedding from MinIO - Add REST API endpoints for report generation and download - Add WebSocket notifications for generation progress - Add frontend UI with progress modal and download functionality - Add integration tests for report generation flow Report sections (Traditional Chinese): - 事件摘要 (Summary) - 時間軸 (Timeline) - 參與人員 (Participants) - 處理過程 (Resolution Process) - 目前狀態 (Current Status) - 最終處置結果 (Final Resolution) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
13
app/modules/report_generation/services/__init__.py
Normal file
13
app/modules/report_generation/services/__init__.py
Normal file
@@ -0,0 +1,13 @@
|
||||
"""Report generation services"""
|
||||
from app.modules.report_generation.services.dify_client import dify_service, DifyService
|
||||
from app.modules.report_generation.services.report_data_service import ReportDataService, RoomReportData
|
||||
from app.modules.report_generation.services.docx_service import docx_service, DocxAssemblyService
|
||||
|
||||
__all__ = [
|
||||
"dify_service",
|
||||
"DifyService",
|
||||
"ReportDataService",
|
||||
"RoomReportData",
|
||||
"docx_service",
|
||||
"DocxAssemblyService",
|
||||
]
|
||||
261
app/modules/report_generation/services/dify_client.py
Normal file
261
app/modules/report_generation/services/dify_client.py
Normal file
@@ -0,0 +1,261 @@
|
||||
"""DIFY AI Service Client
|
||||
|
||||
Handles communication with DIFY Chat API for report generation.
|
||||
"""
|
||||
import json
|
||||
import re
|
||||
import httpx
|
||||
from typing import Dict, Any, Optional
|
||||
from dataclasses import dataclass
|
||||
from app.core.config import get_settings
|
||||
|
||||
settings = get_settings()
|
||||
|
||||
|
||||
@dataclass
|
||||
class DifyResponse:
|
||||
"""DIFY API response container"""
|
||||
message_id: str
|
||||
conversation_id: str
|
||||
answer: str
|
||||
parsed_json: Optional[Dict[str, Any]] = None
|
||||
prompt_tokens: Optional[int] = None
|
||||
completion_tokens: Optional[int] = None
|
||||
|
||||
|
||||
class DifyServiceError(Exception):
|
||||
"""Base exception for DIFY service errors"""
|
||||
pass
|
||||
|
||||
|
||||
class DifyAPIError(DifyServiceError):
|
||||
"""API request failed"""
|
||||
pass
|
||||
|
||||
|
||||
class DifyJSONParseError(DifyServiceError):
|
||||
"""Failed to parse JSON from AI response"""
|
||||
pass
|
||||
|
||||
|
||||
class DifyValidationError(DifyServiceError):
|
||||
"""Response JSON doesn't match expected schema"""
|
||||
pass
|
||||
|
||||
|
||||
class DifyService:
|
||||
"""DIFY Chat API client for report generation"""
|
||||
|
||||
REQUIRED_SECTIONS = [
|
||||
"summary",
|
||||
"timeline",
|
||||
"participants",
|
||||
"resolution_process",
|
||||
"current_status",
|
||||
"final_resolution",
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
self.base_url = settings.DIFY_BASE_URL.rstrip("/")
|
||||
self.api_key = settings.DIFY_API_KEY
|
||||
self.timeout = settings.DIFY_TIMEOUT_SECONDS
|
||||
self._client = httpx.AsyncClient(timeout=self.timeout)
|
||||
|
||||
async def generate_report(self, prompt: str, room_id: str) -> DifyResponse:
|
||||
"""Send prompt to DIFY and get AI-generated report content
|
||||
|
||||
Args:
|
||||
prompt: Constructed prompt with room data and instructions
|
||||
room_id: Room ID used as user identifier for tracking
|
||||
|
||||
Returns:
|
||||
DifyResponse with parsed JSON content
|
||||
|
||||
Raises:
|
||||
DifyAPIError: If API request fails
|
||||
DifyJSONParseError: If response is not valid JSON
|
||||
DifyValidationError: If JSON doesn't contain required sections
|
||||
"""
|
||||
url = f"{self.base_url}/chat-messages"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
payload = {
|
||||
"inputs": {},
|
||||
"query": prompt,
|
||||
"response_mode": "blocking",
|
||||
"conversation_id": "", # New conversation each time
|
||||
"user": room_id, # Use room_id for tracking
|
||||
}
|
||||
|
||||
try:
|
||||
response = await self._client.post(url, headers=headers, json=payload)
|
||||
response.raise_for_status()
|
||||
except httpx.TimeoutException as e:
|
||||
raise DifyAPIError(f"DIFY API timeout after {self.timeout}s") from e
|
||||
except httpx.HTTPStatusError as e:
|
||||
error_detail = ""
|
||||
try:
|
||||
error_detail = e.response.text[:500]
|
||||
except Exception:
|
||||
pass
|
||||
raise DifyAPIError(
|
||||
f"DIFY API error: {e.response.status_code} - {error_detail}"
|
||||
) from e
|
||||
except httpx.RequestError as e:
|
||||
raise DifyAPIError(f"DIFY API connection error: {str(e)}") from e
|
||||
|
||||
# Parse API response
|
||||
try:
|
||||
data = response.json()
|
||||
except json.JSONDecodeError as e:
|
||||
raise DifyAPIError(f"Invalid JSON response from DIFY: {str(e)}") from e
|
||||
|
||||
# Extract fields from response
|
||||
message_id = data.get("message_id", "")
|
||||
conversation_id = data.get("conversation_id", "")
|
||||
answer = data.get("answer", "")
|
||||
|
||||
# Extract token usage from metadata
|
||||
metadata = data.get("metadata", {})
|
||||
usage = metadata.get("usage", {})
|
||||
prompt_tokens = usage.get("prompt_tokens")
|
||||
completion_tokens = usage.get("completion_tokens")
|
||||
|
||||
if not answer:
|
||||
raise DifyAPIError("Empty answer from DIFY API")
|
||||
|
||||
# Parse JSON from answer
|
||||
parsed_json = self._extract_json(answer)
|
||||
|
||||
# Validate required sections
|
||||
self._validate_schema(parsed_json)
|
||||
|
||||
return DifyResponse(
|
||||
message_id=message_id,
|
||||
conversation_id=conversation_id,
|
||||
answer=answer,
|
||||
parsed_json=parsed_json,
|
||||
prompt_tokens=prompt_tokens,
|
||||
completion_tokens=completion_tokens,
|
||||
)
|
||||
|
||||
def _extract_json(self, text: str) -> Dict[str, Any]:
|
||||
"""Extract JSON from AI response text
|
||||
|
||||
Handles cases where:
|
||||
1. Response is pure JSON
|
||||
2. JSON is wrapped in markdown code blocks
|
||||
3. JSON is embedded in other text
|
||||
|
||||
Args:
|
||||
text: Raw text from AI response
|
||||
|
||||
Returns:
|
||||
Parsed JSON as dictionary
|
||||
|
||||
Raises:
|
||||
DifyJSONParseError: If no valid JSON found
|
||||
"""
|
||||
text = text.strip()
|
||||
|
||||
# Try 1: Direct parse (pure JSON)
|
||||
try:
|
||||
return json.loads(text)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
# Try 2: Extract from markdown code blocks ```json ... ``` or ``` ... ```
|
||||
code_block_pattern = r"```(?:json)?\s*([\s\S]*?)\s*```"
|
||||
matches = re.findall(code_block_pattern, text)
|
||||
for match in matches:
|
||||
try:
|
||||
return json.loads(match.strip())
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
# Try 3: Find JSON object in text (first { to last })
|
||||
json_start = text.find("{")
|
||||
json_end = text.rfind("}")
|
||||
if json_start != -1 and json_end != -1 and json_end > json_start:
|
||||
try:
|
||||
return json.loads(text[json_start : json_end + 1])
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
raise DifyJSONParseError(
|
||||
f"Could not extract valid JSON from AI response. Response preview: {text[:200]}..."
|
||||
)
|
||||
|
||||
def _validate_schema(self, data: Dict[str, Any]) -> None:
|
||||
"""Validate that response contains all required sections
|
||||
|
||||
Args:
|
||||
data: Parsed JSON dictionary
|
||||
|
||||
Raises:
|
||||
DifyValidationError: If required sections are missing
|
||||
"""
|
||||
missing = [key for key in self.REQUIRED_SECTIONS if key not in data]
|
||||
if missing:
|
||||
raise DifyValidationError(
|
||||
f"AI response missing required sections: {', '.join(missing)}"
|
||||
)
|
||||
|
||||
# Validate summary has content
|
||||
summary = data.get("summary", {})
|
||||
if not isinstance(summary, dict) or not summary.get("content"):
|
||||
raise DifyValidationError("summary section missing 'content' field")
|
||||
|
||||
# Validate timeline has events list
|
||||
timeline = data.get("timeline", {})
|
||||
if not isinstance(timeline, dict) or not isinstance(
|
||||
timeline.get("events"), list
|
||||
):
|
||||
raise DifyValidationError("timeline section missing 'events' list")
|
||||
|
||||
# Validate participants has members list
|
||||
participants = data.get("participants", {})
|
||||
if not isinstance(participants, dict) or not isinstance(
|
||||
participants.get("members"), list
|
||||
):
|
||||
raise DifyValidationError("participants section missing 'members' list")
|
||||
|
||||
# Validate resolution_process has content
|
||||
resolution = data.get("resolution_process", {})
|
||||
if not isinstance(resolution, dict) or "content" not in resolution:
|
||||
raise DifyValidationError(
|
||||
"resolution_process section missing 'content' field"
|
||||
)
|
||||
|
||||
# Validate current_status has status and description
|
||||
current_status = data.get("current_status", {})
|
||||
if not isinstance(current_status, dict):
|
||||
raise DifyValidationError("current_status must be a dictionary")
|
||||
if "status" not in current_status or "description" not in current_status:
|
||||
raise DifyValidationError(
|
||||
"current_status section missing 'status' or 'description' field"
|
||||
)
|
||||
|
||||
# Validate final_resolution has has_resolution (content required only if has_resolution=true)
|
||||
final = data.get("final_resolution", {})
|
||||
if not isinstance(final, dict):
|
||||
raise DifyValidationError("final_resolution must be a dictionary")
|
||||
if "has_resolution" not in final:
|
||||
raise DifyValidationError(
|
||||
"final_resolution section missing 'has_resolution' field"
|
||||
)
|
||||
# content is required only when has_resolution is true
|
||||
if final.get("has_resolution") and "content" not in final:
|
||||
raise DifyValidationError(
|
||||
"final_resolution section missing 'content' field when has_resolution is true"
|
||||
)
|
||||
|
||||
async def close(self):
|
||||
"""Close HTTP client"""
|
||||
await self._client.aclose()
|
||||
|
||||
|
||||
# Singleton instance
|
||||
dify_service = DifyService()
|
||||
445
app/modules/report_generation/services/docx_service.py
Normal file
445
app/modules/report_generation/services/docx_service.py
Normal file
@@ -0,0 +1,445 @@
|
||||
"""Document Assembly Service
|
||||
|
||||
Creates .docx reports using python-docx with:
|
||||
- Title and metadata header
|
||||
- AI-generated content sections
|
||||
- Embedded images from MinIO
|
||||
- File attachment list
|
||||
"""
|
||||
import io
|
||||
import logging
|
||||
from typing import Dict, Any, List, Optional
|
||||
from datetime import datetime
|
||||
from docx import Document
|
||||
from docx.shared import Inches, Pt, RGBColor
|
||||
from docx.enum.text import WD_ALIGN_PARAGRAPH
|
||||
from docx.enum.style import WD_STYLE_TYPE
|
||||
|
||||
from app.core.config import get_settings
|
||||
from app.core.minio_client import get_minio_client
|
||||
from app.modules.file_storage.services.minio_service import upload_file
|
||||
|
||||
settings = get_settings()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Constants for document styling
|
||||
TITLE_SIZE = Pt(18)
|
||||
HEADING_SIZE = Pt(14)
|
||||
BODY_SIZE = Pt(11)
|
||||
CHINESE_FONT = "Microsoft JhengHei"
|
||||
FALLBACK_FONT = "Arial"
|
||||
|
||||
|
||||
class DocxAssemblyService:
|
||||
"""Service to create and assemble .docx incident reports"""
|
||||
|
||||
# Mapping of status values to Chinese labels
|
||||
STATUS_MAP = {
|
||||
"active": "處理中",
|
||||
"resolved": "已解決",
|
||||
"archived": "已封存",
|
||||
}
|
||||
|
||||
INCIDENT_TYPE_MAP = {
|
||||
"equipment_failure": "設備故障",
|
||||
"material_shortage": "物料短缺",
|
||||
"quality_issue": "品質問題",
|
||||
"other": "其他",
|
||||
}
|
||||
|
||||
SEVERITY_MAP = {
|
||||
"low": "低",
|
||||
"medium": "中",
|
||||
"high": "高",
|
||||
"critical": "緊急",
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
self.minio_client = get_minio_client()
|
||||
self.bucket = settings.MINIO_BUCKET
|
||||
|
||||
def create_report(
|
||||
self,
|
||||
room_data: Dict[str, Any],
|
||||
ai_content: Dict[str, Any],
|
||||
files: List[Dict[str, Any]],
|
||||
include_images: bool = True,
|
||||
include_file_list: bool = True,
|
||||
) -> io.BytesIO:
|
||||
"""Create a complete incident report document
|
||||
|
||||
Args:
|
||||
room_data: Room metadata (title, type, severity, status, etc.)
|
||||
ai_content: AI-generated content (summary, timeline, participants, etc.)
|
||||
files: List of files with metadata
|
||||
include_images: Whether to embed images in the report
|
||||
include_file_list: Whether to include file attachment list
|
||||
|
||||
Returns:
|
||||
BytesIO object containing the .docx file
|
||||
"""
|
||||
doc = Document()
|
||||
|
||||
# Configure document styles
|
||||
self._setup_styles(doc)
|
||||
|
||||
# Add title
|
||||
self._add_title(doc, room_data)
|
||||
|
||||
# Add metadata table
|
||||
self._add_metadata_table(doc, room_data)
|
||||
|
||||
# Add AI-generated sections
|
||||
self._add_summary_section(doc, ai_content)
|
||||
self._add_timeline_section(doc, ai_content)
|
||||
self._add_participants_section(doc, ai_content)
|
||||
self._add_resolution_process_section(doc, ai_content)
|
||||
self._add_current_status_section(doc, ai_content)
|
||||
self._add_final_resolution_section(doc, ai_content)
|
||||
|
||||
# Add images if requested
|
||||
if include_images and files:
|
||||
self._add_images_section(doc, files)
|
||||
|
||||
# Add file attachment list if requested
|
||||
if include_file_list and files:
|
||||
self._add_file_list_section(doc, files)
|
||||
|
||||
# Save to BytesIO
|
||||
output = io.BytesIO()
|
||||
doc.save(output)
|
||||
output.seek(0)
|
||||
|
||||
return output
|
||||
|
||||
def _setup_styles(self, doc: Document):
|
||||
"""Configure document styles"""
|
||||
# Configure default paragraph style
|
||||
style = doc.styles["Normal"]
|
||||
font = style.font
|
||||
font.name = CHINESE_FONT
|
||||
font.size = BODY_SIZE
|
||||
|
||||
def _add_title(self, doc: Document, room_data: Dict[str, Any]):
|
||||
"""Add document title"""
|
||||
title = doc.add_heading(level=0)
|
||||
run = title.add_run(f"事件報告:{room_data.get('title', '未命名事件')}")
|
||||
run.font.size = TITLE_SIZE
|
||||
run.font.bold = True
|
||||
|
||||
# Add generation timestamp
|
||||
timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M")
|
||||
subtitle = doc.add_paragraph()
|
||||
subtitle.alignment = WD_ALIGN_PARAGRAPH.CENTER
|
||||
run = subtitle.add_run(f"報告產生時間:{timestamp}")
|
||||
run.font.size = Pt(10)
|
||||
run.font.color.rgb = RGBColor(128, 128, 128)
|
||||
|
||||
doc.add_paragraph() # Spacing
|
||||
|
||||
def _add_metadata_table(self, doc: Document, room_data: Dict[str, Any]):
|
||||
"""Add metadata summary table"""
|
||||
table = doc.add_table(rows=4, cols=4)
|
||||
table.style = "Table Grid"
|
||||
|
||||
# Row 1: Type and Severity
|
||||
cells = table.rows[0].cells
|
||||
cells[0].text = "事件類型"
|
||||
incident_type = room_data.get("incident_type", "other")
|
||||
cells[1].text = self.INCIDENT_TYPE_MAP.get(incident_type, incident_type)
|
||||
cells[2].text = "嚴重程度"
|
||||
severity = room_data.get("severity", "medium")
|
||||
cells[3].text = self.SEVERITY_MAP.get(severity, severity)
|
||||
|
||||
# Row 2: Status and Location
|
||||
cells = table.rows[1].cells
|
||||
cells[0].text = "目前狀態"
|
||||
status = room_data.get("status", "active")
|
||||
cells[1].text = self.STATUS_MAP.get(status, status)
|
||||
cells[2].text = "發生地點"
|
||||
cells[3].text = room_data.get("location") or "未指定"
|
||||
|
||||
# Row 3: Created and Resolved times
|
||||
cells = table.rows[2].cells
|
||||
cells[0].text = "建立時間"
|
||||
created_at = room_data.get("created_at")
|
||||
if isinstance(created_at, datetime):
|
||||
cells[1].text = created_at.strftime("%Y-%m-%d %H:%M")
|
||||
else:
|
||||
cells[1].text = str(created_at) if created_at else "未知"
|
||||
|
||||
cells[2].text = "解決時間"
|
||||
resolved_at = room_data.get("resolved_at")
|
||||
if isinstance(resolved_at, datetime):
|
||||
cells[3].text = resolved_at.strftime("%Y-%m-%d %H:%M")
|
||||
elif resolved_at:
|
||||
cells[3].text = str(resolved_at)
|
||||
else:
|
||||
cells[3].text = "尚未解決"
|
||||
|
||||
# Row 4: Description (spanning all columns)
|
||||
cells = table.rows[3].cells
|
||||
cells[0].text = "事件描述"
|
||||
# Merge remaining cells for description
|
||||
cells[1].merge(cells[3])
|
||||
cells[1].text = room_data.get("description") or "無描述"
|
||||
|
||||
# Style table cells
|
||||
for row in table.rows:
|
||||
for i, cell in enumerate(row.cells):
|
||||
for paragraph in cell.paragraphs:
|
||||
for run in paragraph.runs:
|
||||
run.font.size = BODY_SIZE
|
||||
if i % 2 == 0: # Header cells
|
||||
run.font.bold = True
|
||||
|
||||
doc.add_paragraph() # Spacing
|
||||
|
||||
def _add_summary_section(self, doc: Document, ai_content: Dict[str, Any]):
|
||||
"""Add event summary section"""
|
||||
doc.add_heading("事件摘要", level=1)
|
||||
summary = ai_content.get("summary", {})
|
||||
content = summary.get("content", "無摘要內容")
|
||||
p = doc.add_paragraph(content)
|
||||
p.paragraph_format.first_line_indent = Pt(24)
|
||||
|
||||
def _add_timeline_section(self, doc: Document, ai_content: Dict[str, Any]):
|
||||
"""Add timeline section"""
|
||||
doc.add_heading("事件時間軸", level=1)
|
||||
timeline = ai_content.get("timeline", {})
|
||||
events = timeline.get("events", [])
|
||||
|
||||
if not events:
|
||||
doc.add_paragraph("無時間軸記錄")
|
||||
return
|
||||
|
||||
# Create timeline table
|
||||
table = doc.add_table(rows=len(events) + 1, cols=2)
|
||||
table.style = "Table Grid"
|
||||
|
||||
# Header row
|
||||
header = table.rows[0].cells
|
||||
header[0].text = "時間"
|
||||
header[1].text = "事件"
|
||||
for cell in header:
|
||||
for run in cell.paragraphs[0].runs:
|
||||
run.font.bold = True
|
||||
|
||||
# Event rows
|
||||
for i, event in enumerate(events):
|
||||
row = table.rows[i + 1].cells
|
||||
row[0].text = event.get("time", "")
|
||||
row[1].text = event.get("description", "")
|
||||
|
||||
doc.add_paragraph() # Spacing
|
||||
|
||||
def _add_participants_section(self, doc: Document, ai_content: Dict[str, Any]):
|
||||
"""Add participants section"""
|
||||
doc.add_heading("參與人員", level=1)
|
||||
participants = ai_content.get("participants", {})
|
||||
members = participants.get("members", [])
|
||||
|
||||
if not members:
|
||||
doc.add_paragraph("無參與人員記錄")
|
||||
return
|
||||
|
||||
# Create participants table
|
||||
table = doc.add_table(rows=len(members) + 1, cols=2)
|
||||
table.style = "Table Grid"
|
||||
|
||||
# Header row
|
||||
header = table.rows[0].cells
|
||||
header[0].text = "姓名"
|
||||
header[1].text = "角色"
|
||||
for cell in header:
|
||||
for run in cell.paragraphs[0].runs:
|
||||
run.font.bold = True
|
||||
|
||||
# Member rows
|
||||
for i, member in enumerate(members):
|
||||
row = table.rows[i + 1].cells
|
||||
row[0].text = member.get("name", "")
|
||||
row[1].text = member.get("role", "")
|
||||
|
||||
doc.add_paragraph() # Spacing
|
||||
|
||||
def _add_resolution_process_section(self, doc: Document, ai_content: Dict[str, Any]):
|
||||
"""Add resolution process section"""
|
||||
doc.add_heading("處理過程", level=1)
|
||||
resolution = ai_content.get("resolution_process", {})
|
||||
content = resolution.get("content", "無處理過程記錄")
|
||||
p = doc.add_paragraph(content)
|
||||
p.paragraph_format.first_line_indent = Pt(24)
|
||||
|
||||
def _add_current_status_section(self, doc: Document, ai_content: Dict[str, Any]):
|
||||
"""Add current status section"""
|
||||
doc.add_heading("目前狀態", level=1)
|
||||
current_status = ai_content.get("current_status", {})
|
||||
status = current_status.get("status", "unknown")
|
||||
status_text = self.STATUS_MAP.get(status, status)
|
||||
description = current_status.get("description", "")
|
||||
|
||||
p = doc.add_paragraph()
|
||||
p.add_run(f"狀態:").bold = True
|
||||
p.add_run(status_text)
|
||||
|
||||
if description:
|
||||
doc.add_paragraph(description)
|
||||
|
||||
def _add_final_resolution_section(self, doc: Document, ai_content: Dict[str, Any]):
|
||||
"""Add final resolution section"""
|
||||
doc.add_heading("最終處置結果", level=1)
|
||||
final = ai_content.get("final_resolution", {})
|
||||
has_resolution = final.get("has_resolution", False)
|
||||
content = final.get("content", "")
|
||||
|
||||
if has_resolution:
|
||||
if content:
|
||||
p = doc.add_paragraph(content)
|
||||
p.paragraph_format.first_line_indent = Pt(24)
|
||||
else:
|
||||
doc.add_paragraph("事件已解決,但無詳細說明。")
|
||||
else:
|
||||
doc.add_paragraph("事件尚未解決或無最終處置結果。")
|
||||
|
||||
def _add_images_section(self, doc: Document, files: List[Dict[str, Any]]):
|
||||
"""Add images section with embedded images from MinIO"""
|
||||
image_files = [f for f in files if f.get("file_type") == "image"]
|
||||
|
||||
if not image_files:
|
||||
return
|
||||
|
||||
doc.add_heading("相關圖片", level=1)
|
||||
|
||||
for f in image_files:
|
||||
try:
|
||||
# Download image from MinIO
|
||||
image_data = self._download_file(f.get("minio_object_path", ""))
|
||||
if image_data:
|
||||
# Add image to document
|
||||
doc.add_picture(image_data, width=Inches(5))
|
||||
|
||||
# Add caption
|
||||
caption = doc.add_paragraph()
|
||||
caption.alignment = WD_ALIGN_PARAGRAPH.CENTER
|
||||
run = caption.add_run(f"{f.get('filename', '圖片')}")
|
||||
run.font.size = Pt(9)
|
||||
run.font.italic = True
|
||||
|
||||
doc.add_paragraph() # Spacing
|
||||
else:
|
||||
# Image download failed, add note
|
||||
doc.add_paragraph(f"[圖片載入失敗: {f.get('filename', '未知')}]")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to embed image {f.get('filename')}: {e}")
|
||||
doc.add_paragraph(f"[圖片嵌入失敗: {f.get('filename', '未知')}]")
|
||||
|
||||
def _add_file_list_section(self, doc: Document, files: List[Dict[str, Any]]):
|
||||
"""Add file attachment list section"""
|
||||
doc.add_heading("附件清單", level=1)
|
||||
|
||||
if not files:
|
||||
doc.add_paragraph("無附件")
|
||||
return
|
||||
|
||||
# Create file list table
|
||||
table = doc.add_table(rows=len(files) + 1, cols=4)
|
||||
table.style = "Table Grid"
|
||||
|
||||
# Header row
|
||||
header = table.rows[0].cells
|
||||
header[0].text = "檔案名稱"
|
||||
header[1].text = "類型"
|
||||
header[2].text = "上傳者"
|
||||
header[3].text = "上傳時間"
|
||||
for cell in header:
|
||||
for run in cell.paragraphs[0].runs:
|
||||
run.font.bold = True
|
||||
|
||||
# File type mapping
|
||||
file_type_map = {
|
||||
"image": "圖片",
|
||||
"document": "文件",
|
||||
"log": "記錄檔",
|
||||
}
|
||||
|
||||
# File rows
|
||||
for i, f in enumerate(files):
|
||||
row = table.rows[i + 1].cells
|
||||
row[0].text = f.get("filename", "")
|
||||
file_type = f.get("file_type", "file")
|
||||
row[1].text = file_type_map.get(file_type, file_type)
|
||||
row[2].text = f.get("uploader_name") or f.get("uploader_id", "")
|
||||
|
||||
uploaded_at = f.get("uploaded_at")
|
||||
if isinstance(uploaded_at, datetime):
|
||||
row[3].text = uploaded_at.strftime("%Y-%m-%d %H:%M")
|
||||
else:
|
||||
row[3].text = str(uploaded_at) if uploaded_at else ""
|
||||
|
||||
def _download_file(self, object_path: str) -> Optional[io.BytesIO]:
|
||||
"""Download file from MinIO
|
||||
|
||||
Args:
|
||||
object_path: MinIO object path
|
||||
|
||||
Returns:
|
||||
BytesIO containing file data, or None if download fails
|
||||
"""
|
||||
if not object_path:
|
||||
return None
|
||||
|
||||
try:
|
||||
response = self.minio_client.get_object(self.bucket, object_path)
|
||||
data = io.BytesIO(response.read())
|
||||
response.close()
|
||||
response.release_conn()
|
||||
return data
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to download file from MinIO: {object_path} - {e}")
|
||||
return None
|
||||
|
||||
def upload_report(
|
||||
self,
|
||||
report_data: io.BytesIO,
|
||||
room_id: str,
|
||||
report_id: str,
|
||||
) -> Optional[str]:
|
||||
"""Upload generated report to MinIO
|
||||
|
||||
Args:
|
||||
report_data: BytesIO containing the .docx file
|
||||
room_id: Room ID for path organization
|
||||
report_id: Report ID for unique filename
|
||||
|
||||
Returns:
|
||||
MinIO object path if successful, None otherwise
|
||||
"""
|
||||
timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
|
||||
object_path = f"{settings.REPORT_STORAGE_PATH}/{room_id}/{report_id}_{timestamp}.docx"
|
||||
|
||||
# Get file size
|
||||
report_data.seek(0, 2) # Seek to end
|
||||
file_size = report_data.tell()
|
||||
report_data.seek(0) # Reset to beginning
|
||||
|
||||
success = upload_file(
|
||||
bucket=self.bucket,
|
||||
object_path=object_path,
|
||||
file_data=report_data,
|
||||
file_size=file_size,
|
||||
content_type="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
)
|
||||
|
||||
if success:
|
||||
logger.info(f"Report uploaded to MinIO: {object_path}")
|
||||
return object_path
|
||||
else:
|
||||
logger.error(f"Failed to upload report to MinIO")
|
||||
return None
|
||||
|
||||
|
||||
# Singleton instance
|
||||
docx_service = DocxAssemblyService()
|
||||
263
app/modules/report_generation/services/report_data_service.py
Normal file
263
app/modules/report_generation/services/report_data_service.py
Normal file
@@ -0,0 +1,263 @@
|
||||
"""Report Data Collection Service
|
||||
|
||||
Collects all room data needed for AI report generation:
|
||||
- Room metadata (title, type, severity, status, etc.)
|
||||
- Messages with sender display names
|
||||
- Room members with display names
|
||||
- File attachments with uploader names
|
||||
"""
|
||||
from typing import Dict, Any, List, Optional
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy import desc
|
||||
|
||||
from app.modules.chat_room.models import IncidentRoom, RoomMember
|
||||
from app.modules.realtime.models import Message
|
||||
from app.modules.file_storage.models import RoomFile
|
||||
from app.modules.auth.models import User
|
||||
|
||||
|
||||
@dataclass
|
||||
class MessageData:
|
||||
"""Message data for report generation"""
|
||||
message_id: str
|
||||
sender_id: str
|
||||
sender_name: str
|
||||
content: str
|
||||
message_type: str
|
||||
created_at: datetime
|
||||
file_name: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class MemberData:
|
||||
"""Member data for report generation"""
|
||||
user_id: str
|
||||
display_name: str
|
||||
role: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileData:
|
||||
"""File data for report generation"""
|
||||
file_id: str
|
||||
filename: str
|
||||
file_type: str
|
||||
mime_type: str
|
||||
uploaded_at: datetime
|
||||
uploader_id: str
|
||||
uploader_name: str
|
||||
minio_object_path: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class RoomReportData:
|
||||
"""Complete room data for report generation"""
|
||||
room_id: str
|
||||
title: str
|
||||
incident_type: str
|
||||
severity: str
|
||||
status: str
|
||||
location: Optional[str]
|
||||
description: Optional[str]
|
||||
resolution_notes: Optional[str]
|
||||
created_at: datetime
|
||||
resolved_at: Optional[datetime]
|
||||
created_by: str
|
||||
messages: List[MessageData] = field(default_factory=list)
|
||||
members: List[MemberData] = field(default_factory=list)
|
||||
files: List[FileData] = field(default_factory=list)
|
||||
|
||||
|
||||
class ReportDataService:
|
||||
"""Service to collect room data for report generation"""
|
||||
|
||||
def __init__(self, db: Session):
|
||||
self.db = db
|
||||
|
||||
def collect_room_data(self, room_id: str) -> Optional[RoomReportData]:
|
||||
"""Collect all data for a room
|
||||
|
||||
Args:
|
||||
room_id: Room ID to collect data for
|
||||
|
||||
Returns:
|
||||
RoomReportData with all collected data, or None if room not found
|
||||
"""
|
||||
# Get room metadata
|
||||
room = self.db.query(IncidentRoom).filter(
|
||||
IncidentRoom.room_id == room_id
|
||||
).first()
|
||||
|
||||
if not room:
|
||||
return None
|
||||
|
||||
# Collect messages with sender names
|
||||
messages = self._collect_messages(room_id)
|
||||
|
||||
# Collect members with display names
|
||||
members = self._collect_members(room_id)
|
||||
|
||||
# Collect files with uploader names
|
||||
files = self._collect_files(room_id)
|
||||
|
||||
return RoomReportData(
|
||||
room_id=room.room_id,
|
||||
title=room.title,
|
||||
incident_type=room.incident_type.value if room.incident_type else "other",
|
||||
severity=room.severity.value if room.severity else "medium",
|
||||
status=room.status.value if room.status else "active",
|
||||
location=room.location,
|
||||
description=room.description,
|
||||
resolution_notes=room.resolution_notes,
|
||||
created_at=room.created_at,
|
||||
resolved_at=room.resolved_at,
|
||||
created_by=room.created_by,
|
||||
messages=messages,
|
||||
members=members,
|
||||
files=files,
|
||||
)
|
||||
|
||||
def _collect_messages(self, room_id: str) -> List[MessageData]:
|
||||
"""Collect messages with sender display names"""
|
||||
# Query messages with LEFT JOIN to users table for display names
|
||||
results = (
|
||||
self.db.query(Message, User.display_name)
|
||||
.outerjoin(User, Message.sender_id == User.user_id)
|
||||
.filter(Message.room_id == room_id)
|
||||
.filter(Message.deleted_at.is_(None)) # Exclude deleted messages
|
||||
.order_by(Message.created_at)
|
||||
.all()
|
||||
)
|
||||
|
||||
messages = []
|
||||
for msg, display_name in results:
|
||||
# Extract file name from metadata if it's a file reference
|
||||
file_name = None
|
||||
if msg.message_type.value in ("image_ref", "file_ref") and msg.message_metadata:
|
||||
file_name = msg.message_metadata.get("filename")
|
||||
|
||||
messages.append(MessageData(
|
||||
message_id=msg.message_id,
|
||||
sender_id=msg.sender_id,
|
||||
sender_name=display_name or msg.sender_id, # Fallback to sender_id
|
||||
content=msg.content,
|
||||
message_type=msg.message_type.value,
|
||||
created_at=msg.created_at,
|
||||
file_name=file_name,
|
||||
))
|
||||
|
||||
return messages
|
||||
|
||||
def _collect_members(self, room_id: str) -> List[MemberData]:
|
||||
"""Collect room members with display names"""
|
||||
results = (
|
||||
self.db.query(RoomMember, User.display_name)
|
||||
.outerjoin(User, RoomMember.user_id == User.user_id)
|
||||
.filter(RoomMember.room_id == room_id)
|
||||
.filter(RoomMember.removed_at.is_(None)) # Exclude removed members
|
||||
.all()
|
||||
)
|
||||
|
||||
members = []
|
||||
for member, display_name in results:
|
||||
members.append(MemberData(
|
||||
user_id=member.user_id,
|
||||
display_name=display_name or member.user_id, # Fallback to user_id
|
||||
role=member.role.value if member.role else "viewer",
|
||||
))
|
||||
|
||||
return members
|
||||
|
||||
def _collect_files(self, room_id: str) -> List[FileData]:
|
||||
"""Collect room files with uploader display names"""
|
||||
results = (
|
||||
self.db.query(RoomFile, User.display_name)
|
||||
.outerjoin(User, RoomFile.uploader_id == User.user_id)
|
||||
.filter(RoomFile.room_id == room_id)
|
||||
.filter(RoomFile.deleted_at.is_(None)) # Exclude deleted files
|
||||
.order_by(RoomFile.uploaded_at)
|
||||
.all()
|
||||
)
|
||||
|
||||
files = []
|
||||
for f, display_name in results:
|
||||
files.append(FileData(
|
||||
file_id=f.file_id,
|
||||
filename=f.filename,
|
||||
file_type=f.file_type,
|
||||
mime_type=f.mime_type,
|
||||
uploaded_at=f.uploaded_at,
|
||||
uploader_id=f.uploader_id,
|
||||
uploader_name=display_name or f.uploader_id, # Fallback to uploader_id
|
||||
minio_object_path=f.minio_object_path,
|
||||
))
|
||||
|
||||
return files
|
||||
|
||||
def to_prompt_dict(self, data: RoomReportData) -> Dict[str, Any]:
|
||||
"""Convert RoomReportData to dictionary format for prompt builder
|
||||
|
||||
Args:
|
||||
data: RoomReportData object
|
||||
|
||||
Returns:
|
||||
Dictionary with room_data, messages, members, files keys
|
||||
"""
|
||||
room_data = {
|
||||
"room_id": data.room_id,
|
||||
"title": data.title,
|
||||
"incident_type": data.incident_type,
|
||||
"severity": data.severity,
|
||||
"status": data.status,
|
||||
"location": data.location,
|
||||
"description": data.description,
|
||||
"resolution_notes": data.resolution_notes,
|
||||
"created_at": data.created_at,
|
||||
"resolved_at": data.resolved_at,
|
||||
"created_by": data.created_by,
|
||||
}
|
||||
|
||||
messages = [
|
||||
{
|
||||
"message_id": m.message_id,
|
||||
"sender_id": m.sender_id,
|
||||
"sender_name": m.sender_name,
|
||||
"content": m.content,
|
||||
"message_type": m.message_type,
|
||||
"created_at": m.created_at,
|
||||
"file_name": m.file_name,
|
||||
}
|
||||
for m in data.messages
|
||||
]
|
||||
|
||||
members = [
|
||||
{
|
||||
"user_id": m.user_id,
|
||||
"display_name": m.display_name,
|
||||
"role": m.role,
|
||||
}
|
||||
for m in data.members
|
||||
]
|
||||
|
||||
files = [
|
||||
{
|
||||
"file_id": f.file_id,
|
||||
"filename": f.filename,
|
||||
"file_type": f.file_type,
|
||||
"mime_type": f.mime_type,
|
||||
"uploaded_at": f.uploaded_at,
|
||||
"uploader_id": f.uploader_id,
|
||||
"uploader_name": f.uploader_name,
|
||||
"minio_object_path": f.minio_object_path,
|
||||
}
|
||||
for f in data.files
|
||||
]
|
||||
|
||||
return {
|
||||
"room_data": room_data,
|
||||
"messages": messages,
|
||||
"members": members,
|
||||
"files": files,
|
||||
}
|
||||
Reference in New Issue
Block a user