feat: complete issue fixes and implement remaining features

## Critical Issues (CRIT-001~003) - All Fixed
- JWT secret key validation with pydantic field_validator
- Login audit logging for success/failure attempts
- Frontend API path prefix removal

## High Priority Issues (HIGH-001~008) - All Fixed
- Project soft delete using is_active flag
- Redis session token bytes handling
- Rate limiting with slowapi (5 req/min for login)
- Attachment API permission checks
- Kanban view with drag-and-drop
- Workload heatmap UI (WorkloadPage, WorkloadHeatmap)
- TaskDetailModal integrating Comments/Attachments
- UserSelect component for task assignment

## Medium Priority Issues (MED-001~012) - All Fixed
- MED-001~005: DB commits, N+1 queries, datetime, error format, blocker flag
- MED-006: Project health dashboard (HealthService, ProjectHealthPage)
- MED-007: Capacity update API (PUT /api/users/{id}/capacity)
- MED-008: Schedule triggers (cron parsing, deadline reminders)
- MED-009: Watermark feature (image/PDF watermarking)
- MED-010~012: useEffect deps, DOM operations, PDF export

## New Files
- backend/app/api/health/ - Project health API
- backend/app/services/health_service.py
- backend/app/services/trigger_scheduler.py
- backend/app/services/watermark_service.py
- backend/app/core/rate_limiter.py
- frontend/src/pages/ProjectHealthPage.tsx
- frontend/src/components/ProjectHealthCard.tsx
- frontend/src/components/KanbanBoard.tsx
- frontend/src/components/WorkloadHeatmap.tsx

## Tests
- 113 new tests passing (health: 32, users: 14, triggers: 35, watermark: 32)

## OpenSpec Archives
- add-project-health-dashboard
- add-capacity-update-api
- add-schedule-triggers
- add-watermark-feature
- add-rate-limiting
- enhance-frontend-ux
- add-resource-management-ui

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
beabigegg
2026-01-04 21:49:52 +08:00
parent 64874d5425
commit 9b220523ff
90 changed files with 9426 additions and 194 deletions

View File

@@ -1,7 +1,7 @@
import uuid
import hashlib
import json
from datetime import datetime, timedelta
from datetime import datetime, timedelta, timezone
from typing import Optional, Dict, Any, List
from sqlalchemy.orm import Session
@@ -85,7 +85,8 @@ class AuditService:
request_metadata: Optional[Dict] = None,
) -> AuditLog:
"""Log an audit event."""
now = datetime.utcnow()
# Use naive datetime for consistency with database storage (SQLite strips tzinfo)
now = datetime.now(timezone.utc).replace(tzinfo=None)
sensitivity = AuditService.get_sensitivity_level(event_type)
checksum = AuditService.calculate_checksum(
@@ -204,7 +205,8 @@ class AuditService:
alert.is_acknowledged = True
alert.acknowledged_by = user_id
alert.acknowledged_at = datetime.utcnow()
# Use naive datetime for consistency with database storage
alert.acknowledged_at = datetime.now(timezone.utc).replace(tzinfo=None)
db.flush()
return alert

View File

@@ -139,9 +139,23 @@ class FileStorageService:
return files[0]
def get_file_by_path(self, file_path: str) -> Optional[Path]:
"""Get file by stored path."""
"""Get file by stored path. Handles both absolute and relative paths."""
path = Path(file_path)
return path if path.exists() else None
# If path is absolute and exists, return it directly
if path.is_absolute() and path.exists():
return path
# If path is relative, try prepending base_dir
full_path = self.base_dir / path
if full_path.exists():
return full_path
# Fallback: check if original path exists (e.g., relative from current dir)
if path.exists():
return path
return None
def delete_file(
self,

View File

@@ -0,0 +1,378 @@
"""Project health calculation service.
Provides functionality to calculate and retrieve project health metrics
including risk scores, schedule status, and resource status.
"""
import uuid
from datetime import datetime
from typing import List, Optional, Dict, Any
from sqlalchemy.orm import Session
from app.models import Project, Task, TaskStatus, Blocker, ProjectHealth
from app.schemas.project_health import (
RiskLevel,
ScheduleStatus,
ResourceStatus,
ProjectHealthResponse,
ProjectHealthWithDetails,
ProjectHealthSummary,
ProjectHealthDashboardResponse,
)
# Constants for health score calculation
BLOCKER_PENALTY_PER_ITEM = 10
BLOCKER_PENALTY_MAX = 30
OVERDUE_PENALTY_PER_ITEM = 5
OVERDUE_PENALTY_MAX = 30
COMPLETION_PENALTY_THRESHOLD = 50
COMPLETION_PENALTY_FACTOR = 0.4
COMPLETION_PENALTY_MAX = 20
# Risk level thresholds
RISK_LOW_THRESHOLD = 80
RISK_MEDIUM_THRESHOLD = 60
RISK_HIGH_THRESHOLD = 40
# Schedule status thresholds
SCHEDULE_AT_RISK_THRESHOLD = 2
# Resource status thresholds
RESOURCE_CONSTRAINED_THRESHOLD = 2
def calculate_health_metrics(db: Session, project: Project) -> Dict[str, Any]:
"""
Calculate health metrics for a project.
Args:
db: Database session
project: Project object to calculate metrics for
Returns:
Dictionary containing:
- health_score: 0-100 integer
- risk_level: low/medium/high/critical
- schedule_status: on_track/at_risk/delayed
- resource_status: adequate/constrained/overloaded
- task_count: Total number of active tasks
- completed_task_count: Number of completed tasks
- blocker_count: Number of unresolved blockers
- overdue_task_count: Number of overdue incomplete tasks
"""
# Fetch active tasks for this project
tasks = db.query(Task).filter(
Task.project_id == project.id,
Task.is_deleted == False
).all()
task_count = len(tasks)
# Count completed tasks
completed_task_count = sum(
1 for task in tasks
if task.status and task.status.is_done
)
# Count overdue tasks (incomplete with past due date)
now = datetime.utcnow()
overdue_task_count = sum(
1 for task in tasks
if task.due_date and task.due_date < now
and not (task.status and task.status.is_done)
)
# Count unresolved blockers
task_ids = [t.id for t in tasks]
blocker_count = 0
if task_ids:
blocker_count = db.query(Blocker).filter(
Blocker.task_id.in_(task_ids),
Blocker.resolved_at.is_(None)
).count()
# Calculate completion rate
completion_rate = 0.0
if task_count > 0:
completion_rate = (completed_task_count / task_count) * 100
# Calculate health score (start at 100, subtract penalties)
health_score = 100
# Apply blocker penalty
blocker_penalty = min(blocker_count * BLOCKER_PENALTY_PER_ITEM, BLOCKER_PENALTY_MAX)
health_score -= blocker_penalty
# Apply overdue penalty
overdue_penalty = min(overdue_task_count * OVERDUE_PENALTY_PER_ITEM, OVERDUE_PENALTY_MAX)
health_score -= overdue_penalty
# Apply completion penalty (if below threshold)
if task_count > 0 and completion_rate < COMPLETION_PENALTY_THRESHOLD:
completion_penalty = int(
(COMPLETION_PENALTY_THRESHOLD - completion_rate) * COMPLETION_PENALTY_FACTOR
)
health_score -= min(completion_penalty, COMPLETION_PENALTY_MAX)
# Ensure health score stays within bounds
health_score = max(0, min(100, health_score))
# Determine risk level based on health score
risk_level = _determine_risk_level(health_score)
# Determine schedule status based on overdue count
schedule_status = _determine_schedule_status(overdue_task_count)
# Determine resource status based on blocker count
resource_status = _determine_resource_status(blocker_count)
return {
"health_score": health_score,
"risk_level": risk_level,
"schedule_status": schedule_status,
"resource_status": resource_status,
"task_count": task_count,
"completed_task_count": completed_task_count,
"blocker_count": blocker_count,
"overdue_task_count": overdue_task_count,
}
def _determine_risk_level(health_score: int) -> str:
"""Determine risk level based on health score."""
if health_score >= RISK_LOW_THRESHOLD:
return "low"
elif health_score >= RISK_MEDIUM_THRESHOLD:
return "medium"
elif health_score >= RISK_HIGH_THRESHOLD:
return "high"
else:
return "critical"
def _determine_schedule_status(overdue_task_count: int) -> str:
"""Determine schedule status based on overdue task count."""
if overdue_task_count == 0:
return "on_track"
elif overdue_task_count <= SCHEDULE_AT_RISK_THRESHOLD:
return "at_risk"
else:
return "delayed"
def _determine_resource_status(blocker_count: int) -> str:
"""Determine resource status based on blocker count."""
if blocker_count == 0:
return "adequate"
elif blocker_count <= RESOURCE_CONSTRAINED_THRESHOLD:
return "constrained"
else:
return "overloaded"
def get_or_create_project_health(db: Session, project: Project) -> ProjectHealth:
"""
Get existing project health record or create a new one.
Args:
db: Database session
project: Project object
Returns:
ProjectHealth record
"""
health = db.query(ProjectHealth).filter(
ProjectHealth.project_id == project.id
).first()
if not health:
health = ProjectHealth(
id=str(uuid.uuid4()),
project_id=project.id
)
db.add(health)
return health
def update_project_health(
db: Session,
project: Project,
metrics: Dict[str, Any]
) -> ProjectHealth:
"""
Update project health record with calculated metrics.
Args:
db: Database session
project: Project object
metrics: Calculated health metrics
Returns:
Updated ProjectHealth record
"""
health = get_or_create_project_health(db, project)
health.health_score = metrics["health_score"]
health.risk_level = metrics["risk_level"]
health.schedule_status = metrics["schedule_status"]
health.resource_status = metrics["resource_status"]
return health
def get_project_health(
db: Session,
project_id: str
) -> Optional[ProjectHealthWithDetails]:
"""
Get health information for a single project.
Args:
db: Database session
project_id: Project ID
Returns:
ProjectHealthWithDetails or None if project not found
"""
project = db.query(Project).filter(Project.id == project_id).first()
if not project:
return None
metrics = calculate_health_metrics(db, project)
health = update_project_health(db, project, metrics)
db.commit()
db.refresh(health)
return _build_health_with_details(project, health, metrics)
def get_all_projects_health(
db: Session,
status_filter: Optional[str] = "active"
) -> ProjectHealthDashboardResponse:
"""
Get health information for all projects.
Args:
db: Database session
status_filter: Filter projects by status (default: "active")
Returns:
ProjectHealthDashboardResponse with projects list and summary
"""
query = db.query(Project)
if status_filter:
query = query.filter(Project.status == status_filter)
projects = query.all()
projects_health: List[ProjectHealthWithDetails] = []
for project in projects:
metrics = calculate_health_metrics(db, project)
health = update_project_health(db, project, metrics)
project_health = _build_health_with_details(project, health, metrics)
projects_health.append(project_health)
db.commit()
# Calculate summary statistics
summary = _calculate_summary(projects_health)
return ProjectHealthDashboardResponse(
projects=projects_health,
summary=summary
)
def _build_health_with_details(
project: Project,
health: ProjectHealth,
metrics: Dict[str, Any]
) -> ProjectHealthWithDetails:
"""Build ProjectHealthWithDetails from project, health, and metrics."""
return ProjectHealthWithDetails(
id=health.id,
project_id=project.id,
health_score=metrics["health_score"],
risk_level=RiskLevel(metrics["risk_level"]),
schedule_status=ScheduleStatus(metrics["schedule_status"]),
resource_status=ResourceStatus(metrics["resource_status"]),
last_updated=health.last_updated or datetime.utcnow(),
project_title=project.title,
project_status=project.status,
owner_name=project.owner.name if project.owner else None,
space_name=project.space.name if project.space else None,
task_count=metrics["task_count"],
completed_task_count=metrics["completed_task_count"],
blocker_count=metrics["blocker_count"],
overdue_task_count=metrics["overdue_task_count"],
)
def _calculate_summary(
projects_health: List[ProjectHealthWithDetails]
) -> ProjectHealthSummary:
"""Calculate summary statistics for health dashboard."""
total_projects = len(projects_health)
healthy_count = sum(1 for p in projects_health if p.health_score >= 80)
at_risk_count = sum(1 for p in projects_health if 50 <= p.health_score < 80)
critical_count = sum(1 for p in projects_health if p.health_score < 50)
average_health_score = 0.0
if total_projects > 0:
average_health_score = sum(p.health_score for p in projects_health) / total_projects
projects_with_blockers = sum(1 for p in projects_health if p.blocker_count > 0)
projects_delayed = sum(
1 for p in projects_health
if p.schedule_status == ScheduleStatus.DELAYED
)
return ProjectHealthSummary(
total_projects=total_projects,
healthy_count=healthy_count,
at_risk_count=at_risk_count,
critical_count=critical_count,
average_health_score=round(average_health_score, 1),
projects_with_blockers=projects_with_blockers,
projects_delayed=projects_delayed,
)
class HealthService:
"""
Service class for project health operations.
Provides a class-based interface for health calculations,
following the service pattern used in the codebase.
"""
def __init__(self, db: Session):
"""Initialize HealthService with database session."""
self.db = db
def calculate_metrics(self, project: Project) -> Dict[str, Any]:
"""Calculate health metrics for a project."""
return calculate_health_metrics(self.db, project)
def get_project_health(self, project_id: str) -> Optional[ProjectHealthWithDetails]:
"""Get health information for a single project."""
return get_project_health(self.db, project_id)
def get_dashboard(
self,
status_filter: Optional[str] = "active"
) -> ProjectHealthDashboardResponse:
"""Get health dashboard for all projects."""
return get_all_projects_health(self.db, status_filter)
def refresh_project_health(self, project: Project) -> ProjectHealth:
"""Refresh and persist health data for a project."""
metrics = calculate_health_metrics(self.db, project)
health = update_project_health(self.db, project, metrics)
self.db.commit()
self.db.refresh(health)
return health

View File

@@ -4,7 +4,7 @@ import re
import asyncio
import logging
import threading
from datetime import datetime
from datetime import datetime, timezone
from typing import List, Optional, Dict, Set
from sqlalchemy.orm import Session
from sqlalchemy import event
@@ -102,7 +102,7 @@ class NotificationService:
"""Convert a Notification to a dict for publishing."""
created_at = notification.created_at
if created_at is None:
created_at = datetime.utcnow()
created_at = datetime.now(timezone.utc).replace(tzinfo=None)
return {
"id": notification.id,
"type": notification.type,

View File

@@ -1,5 +1,5 @@
import uuid
from datetime import datetime, timedelta
from datetime import datetime, timedelta, timezone
from typing import Dict, Any, List, Optional
from sqlalchemy.orm import Session
from sqlalchemy import func
@@ -15,9 +15,15 @@ class ReportService:
@staticmethod
def get_week_start(date: Optional[datetime] = None) -> datetime:
"""Get the start of the week (Monday) for a given date."""
"""Get the start of the week (Monday) for a given date.
Returns a naive datetime for compatibility with database values.
"""
if date is None:
date = datetime.utcnow()
date = datetime.now(timezone.utc).replace(tzinfo=None)
elif date.tzinfo is not None:
# Convert to naive datetime for consistency
date = date.replace(tzinfo=None)
# Get Monday of the current week
days_since_monday = date.weekday()
week_start = date - timedelta(days=days_since_monday)
@@ -37,7 +43,8 @@ class ReportService:
week_end = week_start + timedelta(days=7)
next_week_start = week_end
next_week_end = next_week_start + timedelta(days=7)
now = datetime.utcnow()
# Use naive datetime for comparison with database values
now = datetime.now(timezone.utc).replace(tzinfo=None)
# Get projects owned by the user
projects = db.query(Project).filter(Project.owner_id == user_id).all()
@@ -189,7 +196,7 @@ class ReportService:
return {
"week_start": week_start.isoformat(),
"week_end": week_end.isoformat(),
"generated_at": datetime.utcnow().isoformat(),
"generated_at": datetime.now(timezone.utc).replace(tzinfo=None).isoformat(),
"projects": project_details,
"summary": {
"completed_count": len(completed_tasks),
@@ -235,7 +242,8 @@ class ReportService:
db.add(report_history)
# Update last_sent_at
scheduled_report.last_sent_at = datetime.utcnow()
# Use naive datetime for consistency with database storage
scheduled_report.last_sent_at = datetime.now(timezone.utc).replace(tzinfo=None)
db.commit()
@@ -304,7 +312,8 @@ class ReportService:
db.add(history)
# Update last_sent_at
scheduled_report.last_sent_at = datetime.utcnow()
# Use naive datetime for consistency with database storage
scheduled_report.last_sent_at = datetime.now(timezone.utc).replace(tzinfo=None)
# Send notification
ReportService.send_report_notification(db, scheduled_report.recipient_id, content)

View File

@@ -0,0 +1,701 @@
"""
Scheduled Trigger Execution Service
This module provides functionality for parsing cron expressions and executing
scheduled triggers based on their cron schedule, including deadline reminders.
"""
import uuid
import logging
from datetime import datetime, timezone, timedelta
from typing import Optional, List, Dict, Any, Tuple, Set
from croniter import croniter
from sqlalchemy.orm import Session
from sqlalchemy import and_
from app.models import Trigger, TriggerLog, Task, Project
from app.services.notification_service import NotificationService
logger = logging.getLogger(__name__)
# Key prefix for tracking deadline reminders already sent
DEADLINE_REMINDER_LOG_TYPE = "deadline_reminder"
class TriggerSchedulerService:
"""Service for scheduling and executing cron-based triggers."""
@staticmethod
def parse_cron_expression(expression: str) -> Tuple[bool, Optional[str]]:
"""
Validate a cron expression.
Args:
expression: A cron expression string (e.g., "0 9 * * 1-5" for weekdays at 9am)
Returns:
Tuple of (is_valid, error_message)
- is_valid: True if the expression is valid
- error_message: None if valid, otherwise an error description
"""
try:
# croniter requires a base time for initialization
base_time = datetime.now(timezone.utc)
croniter(expression, base_time)
return True, None
except (ValueError, KeyError) as e:
return False, f"Invalid cron expression: {str(e)}"
@staticmethod
def get_next_run_time(expression: str, base_time: Optional[datetime] = None) -> Optional[datetime]:
"""
Get the next scheduled run time for a cron expression.
Args:
expression: A cron expression string
base_time: The base time to calculate from (defaults to now)
Returns:
The next datetime when the schedule matches, or None if invalid
"""
try:
if base_time is None:
base_time = datetime.now(timezone.utc)
cron = croniter(expression, base_time)
return cron.get_next(datetime)
except (ValueError, KeyError):
return None
@staticmethod
def get_previous_run_time(expression: str, base_time: Optional[datetime] = None) -> Optional[datetime]:
"""
Get the previous scheduled run time for a cron expression.
Args:
expression: A cron expression string
base_time: The base time to calculate from (defaults to now)
Returns:
The previous datetime when the schedule matched, or None if invalid
"""
try:
if base_time is None:
base_time = datetime.now(timezone.utc)
cron = croniter(expression, base_time)
return cron.get_prev(datetime)
except (ValueError, KeyError):
return None
@staticmethod
def should_trigger(
trigger: Trigger,
current_time: datetime,
last_execution_time: Optional[datetime] = None,
) -> bool:
"""
Check if a schedule trigger should fire based on its cron expression.
A trigger should fire if:
1. It's a schedule-type trigger and is active
2. Its conditions contain a valid cron expression
3. The cron schedule has matched since the last execution
Args:
trigger: The trigger to evaluate
current_time: The current time to check against
last_execution_time: The time of the last successful execution
Returns:
True if the trigger should fire, False otherwise
"""
# Only process schedule triggers
if trigger.trigger_type != "schedule":
return False
if not trigger.is_active:
return False
# Get cron expression from conditions
conditions = trigger.conditions or {}
cron_expression = conditions.get("cron_expression")
if not cron_expression:
logger.warning(f"Trigger {trigger.id} has no cron_expression in conditions")
return False
# Validate cron expression
is_valid, error = TriggerSchedulerService.parse_cron_expression(cron_expression)
if not is_valid:
logger.warning(f"Trigger {trigger.id} has invalid cron: {error}")
return False
# Get the previous scheduled time before current_time
prev_scheduled = TriggerSchedulerService.get_previous_run_time(cron_expression, current_time)
if prev_scheduled is None:
return False
# If no last execution, check if we're within the execution window (5 minutes)
if last_execution_time is None:
# Only trigger if the scheduled time was within the last 5 minutes
window_seconds = 300 # 5 minutes
time_since_scheduled = (current_time - prev_scheduled).total_seconds()
return 0 <= time_since_scheduled < window_seconds
# Trigger if the previous scheduled time is after the last execution
return prev_scheduled > last_execution_time
@staticmethod
def get_last_execution_time(db: Session, trigger_id: str) -> Optional[datetime]:
"""
Get the last successful execution time for a trigger.
Args:
db: Database session
trigger_id: The trigger ID
Returns:
The datetime of the last successful execution, or None
"""
last_log = db.query(TriggerLog).filter(
TriggerLog.trigger_id == trigger_id,
TriggerLog.status == "success",
).order_by(TriggerLog.executed_at.desc()).first()
return last_log.executed_at if last_log else None
@staticmethod
def execute_scheduled_triggers(db: Session) -> List[TriggerLog]:
"""
Main execution function that evaluates and executes all scheduled triggers.
This function should be called periodically (e.g., every minute) by a scheduler.
Args:
db: Database session
Returns:
List of TriggerLog entries for executed triggers
"""
logs: List[TriggerLog] = []
current_time = datetime.now(timezone.utc)
# Get all active schedule-type triggers
triggers = db.query(Trigger).filter(
Trigger.trigger_type == "schedule",
Trigger.is_active == True,
).all()
logger.info(f"Evaluating {len(triggers)} scheduled triggers at {current_time}")
for trigger in triggers:
try:
# Get last execution time
last_execution = TriggerSchedulerService.get_last_execution_time(db, trigger.id)
# Check if trigger should fire
if TriggerSchedulerService.should_trigger(trigger, current_time, last_execution):
logger.info(f"Executing scheduled trigger: {trigger.name} (ID: {trigger.id})")
log = TriggerSchedulerService._execute_trigger(db, trigger)
logs.append(log)
except Exception as e:
logger.error(f"Error evaluating trigger {trigger.id}: {e}")
# Log the error
error_log = TriggerSchedulerService._log_execution(
db=db,
trigger=trigger,
status="failed",
details={"error_type": type(e).__name__},
error_message=str(e),
)
logs.append(error_log)
if logs:
db.commit()
logger.info(f"Executed {len(logs)} scheduled triggers")
return logs
@staticmethod
def _execute_trigger(db: Session, trigger: Trigger) -> TriggerLog:
"""
Execute a scheduled trigger's actions.
Args:
db: Database session
trigger: The trigger to execute
Returns:
TriggerLog entry for this execution
"""
actions = trigger.actions if isinstance(trigger.actions, list) else [trigger.actions]
executed_actions = []
error_message = None
try:
for action in actions:
action_type = action.get("type")
if action_type == "notify":
TriggerSchedulerService._execute_notify_action(db, action, trigger)
executed_actions.append({"type": action_type, "status": "success"})
# Add more action types here as needed
status = "success"
except Exception as e:
status = "failed"
error_message = str(e)
executed_actions.append({"type": "error", "message": str(e)})
logger.error(f"Error executing trigger {trigger.id} actions: {e}")
return TriggerSchedulerService._log_execution(
db=db,
trigger=trigger,
status=status,
details={
"trigger_name": trigger.name,
"trigger_type": "schedule",
"cron_expression": trigger.conditions.get("cron_expression"),
"actions_executed": executed_actions,
},
error_message=error_message,
)
@staticmethod
def _execute_notify_action(db: Session, action: Dict[str, Any], trigger: Trigger) -> None:
"""
Execute a notify action for a scheduled trigger.
Args:
db: Database session
action: The action configuration
trigger: The parent trigger
"""
target = action.get("target", "project_owner")
template = action.get("template", "Scheduled trigger '{trigger_name}' has fired")
# For scheduled triggers, we typically notify project-level users
project = trigger.project
if not project:
logger.warning(f"Trigger {trigger.id} has no associated project")
return
target_user_id = TriggerSchedulerService._resolve_target(project, target)
if not target_user_id:
logger.debug(f"No target user resolved for trigger {trigger.id} with target '{target}'")
return
# Format message with variables
message = TriggerSchedulerService._format_template(template, trigger, project)
NotificationService.create_notification(
db=db,
user_id=target_user_id,
notification_type="scheduled_trigger",
reference_type="trigger",
reference_id=trigger.id,
title=f"Scheduled: {trigger.name}",
message=message,
)
@staticmethod
def _resolve_target(project: Project, target: str) -> Optional[str]:
"""
Resolve notification target to user ID.
Args:
project: The project context
target: Target specification (e.g., "project_owner", "user:<id>")
Returns:
User ID or None
"""
if target == "project_owner":
return project.owner_id
elif target.startswith("user:"):
return target.split(":", 1)[1]
return None
@staticmethod
def _format_template(template: str, trigger: Trigger, project: Project) -> str:
"""
Format message template with trigger/project variables.
Args:
template: Template string with {variable} placeholders
trigger: The trigger context
project: The project context
Returns:
Formatted message string
"""
replacements = {
"{trigger_name}": trigger.name,
"{trigger_id}": trigger.id,
"{project_name}": project.title if project else "Unknown",
"{project_id}": project.id if project else "Unknown",
}
result = template
for key, value in replacements.items():
result = result.replace(key, str(value))
return result
@staticmethod
def _log_execution(
db: Session,
trigger: Trigger,
status: str,
details: Optional[Dict[str, Any]] = None,
error_message: Optional[str] = None,
task_id: Optional[str] = None,
) -> TriggerLog:
"""
Create a trigger execution log entry.
Args:
db: Database session
trigger: The trigger that was executed
status: Execution status ("success" or "failed")
details: Optional execution details
error_message: Optional error message if failed
task_id: Optional task ID for deadline reminders
Returns:
The created TriggerLog entry
"""
log = TriggerLog(
id=str(uuid.uuid4()),
trigger_id=trigger.id,
task_id=task_id,
status=status,
details=details,
error_message=error_message,
)
db.add(log)
return log
# =========================================================================
# Deadline Reminder Methods
# =========================================================================
@staticmethod
def execute_deadline_reminders(db: Session) -> List[TriggerLog]:
"""
Check all deadline reminder triggers and send notifications for tasks
that are within N days of their due date.
Each task only receives one reminder per trigger configuration.
Args:
db: Database session
Returns:
List of TriggerLog entries for sent reminders
"""
logs: List[TriggerLog] = []
current_time = datetime.now(timezone.utc)
today = current_time.date()
# Get all active schedule triggers with deadline_reminder_days
triggers = db.query(Trigger).filter(
Trigger.trigger_type == "schedule",
Trigger.is_active == True,
).all()
# Filter triggers that have deadline_reminder_days configured
deadline_triggers = [
t for t in triggers
if t.conditions and t.conditions.get("deadline_reminder_days") is not None
]
if not deadline_triggers:
return logs
logger.info(f"Evaluating {len(deadline_triggers)} deadline reminder triggers")
for trigger in deadline_triggers:
try:
reminder_days = trigger.conditions.get("deadline_reminder_days")
if not isinstance(reminder_days, int) or reminder_days < 1:
continue
# Calculate the target date range
# We want to find tasks whose due_date is exactly N days from today
target_date = today + timedelta(days=reminder_days)
# Get tasks in this project that:
# 1. Have a due_date matching the target date
# 2. Are not deleted
# 3. Have not already received a reminder for this trigger
tasks = TriggerSchedulerService._get_tasks_for_deadline_reminder(
db, trigger, target_date
)
for task in tasks:
try:
log = TriggerSchedulerService._send_deadline_reminder(
db, trigger, task, reminder_days
)
logs.append(log)
except Exception as e:
logger.error(
f"Error sending deadline reminder for task {task.id}: {e}"
)
error_log = TriggerSchedulerService._log_execution(
db=db,
trigger=trigger,
status="failed",
details={
"trigger_type": DEADLINE_REMINDER_LOG_TYPE,
"task_id": task.id,
"reminder_days": reminder_days,
},
error_message=str(e),
task_id=task.id,
)
logs.append(error_log)
except Exception as e:
logger.error(f"Error processing deadline trigger {trigger.id}: {e}")
if logs:
db.commit()
logger.info(f"Processed {len(logs)} deadline reminders")
return logs
@staticmethod
def _get_tasks_for_deadline_reminder(
db: Session,
trigger: Trigger,
target_date,
) -> List[Task]:
"""
Get tasks that need deadline reminders for a specific trigger.
Args:
db: Database session
trigger: The deadline reminder trigger
target_date: The date that matches (today + N days)
Returns:
List of tasks that need reminders
"""
# Get IDs of tasks that already received reminders for this trigger
already_notified = db.query(TriggerLog.task_id).filter(
TriggerLog.trigger_id == trigger.id,
TriggerLog.status == "success",
TriggerLog.task_id.isnot(None),
).all()
notified_task_ids: Set[str] = {t[0] for t in already_notified if t[0]}
# Use date range comparison for cross-database compatibility
# target_date is a date object, we need to find tasks due on that date
target_start = datetime.combine(target_date, datetime.min.time()).replace(tzinfo=timezone.utc)
target_end = datetime.combine(target_date, datetime.max.time()).replace(tzinfo=timezone.utc)
# Query tasks matching criteria
tasks = db.query(Task).filter(
Task.project_id == trigger.project_id,
Task.is_deleted == False,
Task.due_date.isnot(None),
Task.due_date >= target_start,
Task.due_date <= target_end,
).all()
# Filter out tasks that already received reminders
return [t for t in tasks if t.id not in notified_task_ids]
@staticmethod
def _send_deadline_reminder(
db: Session,
trigger: Trigger,
task: Task,
reminder_days: int,
) -> TriggerLog:
"""
Send a deadline reminder notification for a task.
Args:
db: Database session
trigger: The trigger configuration
task: The task approaching its deadline
reminder_days: Number of days before deadline
Returns:
TriggerLog entry for this reminder
"""
actions = trigger.actions if isinstance(trigger.actions, list) else [trigger.actions]
executed_actions = []
error_message = None
try:
for action in actions:
action_type = action.get("type")
if action_type == "notify":
TriggerSchedulerService._execute_deadline_notify_action(
db, action, trigger, task, reminder_days
)
executed_actions.append({"type": action_type, "status": "success"})
status = "success"
except Exception as e:
status = "failed"
error_message = str(e)
executed_actions.append({"type": "error", "message": str(e)})
logger.error(f"Error executing deadline reminder for task {task.id}: {e}")
return TriggerSchedulerService._log_execution(
db=db,
trigger=trigger,
status=status,
details={
"trigger_name": trigger.name,
"trigger_type": DEADLINE_REMINDER_LOG_TYPE,
"reminder_days": reminder_days,
"task_title": task.title,
"due_date": str(task.due_date),
"actions_executed": executed_actions,
},
error_message=error_message,
task_id=task.id,
)
@staticmethod
def _execute_deadline_notify_action(
db: Session,
action: Dict[str, Any],
trigger: Trigger,
task: Task,
reminder_days: int,
) -> None:
"""
Execute a notify action for a deadline reminder.
Args:
db: Database session
action: The action configuration
trigger: The parent trigger
task: The task with approaching deadline
reminder_days: Days until deadline
"""
target = action.get("target", "assignee")
template = action.get(
"template",
"Task '{task_title}' is due in {reminder_days} days"
)
# Resolve target user
target_user_id = TriggerSchedulerService._resolve_deadline_target(task, target)
if not target_user_id:
logger.debug(
f"No target user resolved for deadline reminder, task {task.id}, target '{target}'"
)
return
# Format message with variables
message = TriggerSchedulerService._format_deadline_template(
template, trigger, task, reminder_days
)
NotificationService.create_notification(
db=db,
user_id=target_user_id,
notification_type="deadline_reminder",
reference_type="task",
reference_id=task.id,
title=f"Deadline Reminder: {task.title}",
message=message,
)
@staticmethod
def _resolve_deadline_target(task: Task, target: str) -> Optional[str]:
"""
Resolve notification target for deadline reminders.
Args:
task: The task context
target: Target specification
Returns:
User ID or None
"""
if target == "assignee":
return task.assignee_id
elif target == "creator":
return task.created_by
elif target == "project_owner":
return task.project.owner_id if task.project else None
elif target.startswith("user:"):
return target.split(":", 1)[1]
return None
@staticmethod
def _format_deadline_template(
template: str,
trigger: Trigger,
task: Task,
reminder_days: int,
) -> str:
"""
Format message template for deadline reminders.
Args:
template: Template string with {variable} placeholders
trigger: The trigger context
task: The task context
reminder_days: Days until deadline
Returns:
Formatted message string
"""
project = trigger.project
replacements = {
"{trigger_name}": trigger.name,
"{trigger_id}": trigger.id,
"{task_title}": task.title,
"{task_id}": task.id,
"{due_date}": str(task.due_date.date()) if task.due_date else "N/A",
"{reminder_days}": str(reminder_days),
"{project_name}": project.title if project else "Unknown",
"{project_id}": project.id if project else "Unknown",
}
result = template
for key, value in replacements.items():
result = result.replace(key, str(value))
return result
@staticmethod
def evaluate_schedule_triggers(db: Session) -> List[TriggerLog]:
"""
Main entry point for evaluating all schedule triggers.
This method runs both cron-based triggers and deadline reminders.
Should be called every minute by the scheduler.
Args:
db: Database session
Returns:
Combined list of TriggerLog entries from all evaluations
"""
all_logs: List[TriggerLog] = []
# Execute cron-based schedule triggers
cron_logs = TriggerSchedulerService.execute_scheduled_triggers(db)
all_logs.extend(cron_logs)
# Execute deadline reminder triggers
deadline_logs = TriggerSchedulerService.execute_deadline_reminders(db)
all_logs.extend(deadline_logs)
return all_logs

View File

@@ -0,0 +1,327 @@
"""
Watermark Service for MED-009: Dynamic Watermark for Downloads
This service provides functions to add watermarks to image and PDF files
containing user information for audit and tracking purposes.
Watermark content includes:
- User name
- Employee ID (or email as fallback)
- Download timestamp
"""
import io
import logging
import math
from datetime import datetime
from typing import Optional, Tuple
import fitz # PyMuPDF
from PIL import Image, ImageDraw, ImageFont
logger = logging.getLogger(__name__)
class WatermarkService:
"""Service for adding watermarks to downloaded files."""
# Watermark configuration
WATERMARK_OPACITY = 0.3 # 30% opacity for semi-transparency
WATERMARK_ANGLE = -45 # Diagonal angle in degrees
WATERMARK_FONT_SIZE = 24
WATERMARK_COLOR = (128, 128, 128) # Gray color for watermark
WATERMARK_SPACING = 200 # Spacing between repeated watermarks
@staticmethod
def _format_watermark_text(
user_name: str,
employee_id: Optional[str] = None,
download_time: Optional[datetime] = None
) -> str:
"""
Format the watermark text with user information.
Args:
user_name: Name of the user
employee_id: Employee ID (工號) - uses 'N/A' if not provided
download_time: Time of download (defaults to now)
Returns:
Formatted watermark text
"""
if download_time is None:
download_time = datetime.now()
time_str = download_time.strftime("%Y-%m-%d %H:%M:%S")
emp_id = employee_id if employee_id else "N/A"
return f"{user_name} ({emp_id}) - {time_str}"
@staticmethod
def _get_font(size: int = 24) -> ImageFont.FreeTypeFont:
"""Get a font for the watermark. Falls back to default if custom font not available."""
try:
# Try to use a common system font (macOS)
return ImageFont.truetype("/System/Library/Fonts/Helvetica.ttc", size)
except (OSError, IOError):
try:
# Try Linux font
return ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", size)
except (OSError, IOError):
try:
# Try Windows font
return ImageFont.truetype("C:/Windows/Fonts/arial.ttf", size)
except (OSError, IOError):
# Fall back to default bitmap font
return ImageFont.load_default()
def add_image_watermark(
self,
image_bytes: bytes,
user_name: str,
employee_id: Optional[str] = None,
download_time: Optional[datetime] = None
) -> Tuple[bytes, str]:
"""
Add a semi-transparent diagonal watermark to an image.
Args:
image_bytes: The original image as bytes
user_name: Name of the user downloading the file
employee_id: Employee ID of the user (工號)
download_time: Time of download (defaults to now)
Returns:
Tuple of (watermarked image bytes, output format)
Raises:
Exception: If watermarking fails
"""
# Open the image
original = Image.open(io.BytesIO(image_bytes))
# Convert to RGBA if necessary for transparency support
if original.mode != 'RGBA':
image = original.convert('RGBA')
else:
image = original.copy()
# Create a transparent overlay for the watermark
watermark_layer = Image.new('RGBA', image.size, (255, 255, 255, 0))
draw = ImageDraw.Draw(watermark_layer)
# Get watermark text
watermark_text = self._format_watermark_text(user_name, employee_id, download_time)
# Get font
font = self._get_font(self.WATERMARK_FONT_SIZE)
# Calculate text size
bbox = draw.textbbox((0, 0), watermark_text, font=font)
text_width = bbox[2] - bbox[0]
text_height = bbox[3] - bbox[1]
# Create a larger canvas for the rotated text pattern
diagonal = int(math.sqrt(image.size[0]**2 + image.size[1]**2))
pattern_size = diagonal * 2
# Create pattern layer
pattern = Image.new('RGBA', (pattern_size, pattern_size), (255, 255, 255, 0))
pattern_draw = ImageDraw.Draw(pattern)
# Draw repeated watermark text across the pattern
opacity = int(255 * self.WATERMARK_OPACITY)
watermark_color = (*self.WATERMARK_COLOR, opacity)
y = 0
row = 0
while y < pattern_size:
x = -text_width if row % 2 else 0 # Offset alternate rows
while x < pattern_size:
pattern_draw.text((x, y), watermark_text, font=font, fill=watermark_color)
x += text_width + self.WATERMARK_SPACING
y += text_height + self.WATERMARK_SPACING
row += 1
# Rotate the pattern
rotated_pattern = pattern.rotate(
self.WATERMARK_ANGLE,
expand=False,
center=(pattern_size // 2, pattern_size // 2)
)
# Crop to original image size (centered)
crop_x = (pattern_size - image.size[0]) // 2
crop_y = (pattern_size - image.size[1]) // 2
cropped_pattern = rotated_pattern.crop((
crop_x, crop_y,
crop_x + image.size[0],
crop_y + image.size[1]
))
# Composite the watermark onto the image
watermarked = Image.alpha_composite(image, cropped_pattern)
# Determine output format
original_format = original.format or 'PNG'
if original_format.upper() == 'JPEG':
# Convert back to RGB for JPEG (no alpha channel)
watermarked = watermarked.convert('RGB')
output_format = 'JPEG'
else:
output_format = 'PNG'
# Save to bytes
output = io.BytesIO()
watermarked.save(output, format=output_format, quality=95)
output.seek(0)
logger.info(
f"Image watermark applied successfully for user {user_name} "
f"(employee_id: {employee_id})"
)
return output.getvalue(), output_format.lower()
def add_pdf_watermark(
self,
pdf_bytes: bytes,
user_name: str,
employee_id: Optional[str] = None,
download_time: Optional[datetime] = None
) -> bytes:
"""
Add a semi-transparent diagonal watermark to a PDF using PyMuPDF.
Args:
pdf_bytes: The original PDF as bytes
user_name: Name of the user downloading the file
employee_id: Employee ID of the user (工號)
download_time: Time of download (defaults to now)
Returns:
Watermarked PDF as bytes
Raises:
Exception: If watermarking fails
"""
# Get watermark text
watermark_text = self._format_watermark_text(user_name, employee_id, download_time)
# Open the PDF with PyMuPDF
doc = fitz.open(stream=pdf_bytes, filetype="pdf")
page_count = len(doc)
# Process each page
for page_num in range(page_count):
page = doc[page_num]
page_rect = page.rect
page_width = page_rect.width
page_height = page_rect.height
# Calculate text width for spacing estimation
text_length = fitz.get_text_length(
watermark_text,
fontname="helv",
fontsize=self.WATERMARK_FONT_SIZE
)
# Calculate diagonal for watermark coverage
diagonal = math.sqrt(page_width**2 + page_height**2)
# Set watermark color with opacity (gray with 30% opacity)
color = (0.5, 0.5, 0.5) # Gray
# Calculate rotation angle in radians
angle_rad = math.radians(self.WATERMARK_ANGLE)
# Draw watermark pattern using shape with proper rotation
# We use insert_textbox with a morph transform for rotation
spacing_x = text_length + self.WATERMARK_SPACING
spacing_y = self.WATERMARK_FONT_SIZE + self.WATERMARK_SPACING
# Create watermark by drawing rotated text lines
# We'll use a simpler approach: draw text and apply rotation via morph
shape = page.new_shape()
# Calculate grid positions to cover the page when rotated
center = fitz.Point(page_width / 2, page_height / 2)
# Calculate start and end points for coverage
start = -diagonal
end = diagonal * 2
y = start
row = 0
while y < end:
x = start + (spacing_x / 2 if row % 2 else 0)
while x < end:
# Create text position
text_point = fitz.Point(x, y)
# Apply rotation around center
cos_a = math.cos(angle_rad)
sin_a = math.sin(angle_rad)
# Translate to origin, rotate, translate back
rx = text_point.x - center.x
ry = text_point.y - center.y
new_x = rx * cos_a - ry * sin_a + center.x
new_y = rx * sin_a + ry * cos_a + center.y
# Check if the rotated point is within page bounds (with margin)
margin = 50
if (-margin <= new_x <= page_width + margin and
-margin <= new_y <= page_height + margin):
# Insert text using shape with rotation via morph
text_rect = fitz.Rect(new_x, new_y, new_x + text_length + 10, new_y + 30)
# Use insert_textbox with morph for rotation
pivot = fitz.Point(new_x, new_y)
morph = (pivot, fitz.Matrix(1, 0, 0, 1, 0, 0).prerotate(self.WATERMARK_ANGLE))
shape.insert_textbox(
text_rect,
watermark_text,
fontname="helv",
fontsize=self.WATERMARK_FONT_SIZE,
color=color,
fill_opacity=self.WATERMARK_OPACITY,
morph=morph
)
x += spacing_x
y += spacing_y
row += 1
# Commit the shape drawings
shape.commit(overlay=True)
# Save to bytes
output = io.BytesIO()
doc.save(output)
doc.close()
output.seek(0)
logger.info(
f"PDF watermark applied successfully for user {user_name} "
f"(employee_id: {employee_id}), pages: {page_count}"
)
return output.getvalue()
def is_supported_image(self, mime_type: str) -> bool:
"""Check if the mime type is a supported image format."""
supported_types = {'image/png', 'image/jpeg', 'image/jpg'}
return mime_type.lower() in supported_types
def is_supported_pdf(self, mime_type: str) -> bool:
"""Check if the mime type is a PDF."""
return mime_type.lower() == 'application/pdf'
def supports_watermark(self, mime_type: str) -> bool:
"""Check if the file type supports watermarking."""
return self.is_supported_image(mime_type) or self.is_supported_pdf(mime_type)
# Singleton instance
watermark_service = WatermarkService()

View File

@@ -184,12 +184,17 @@ def get_workload_heatmap(
Returns:
List of UserWorkloadSummary objects
"""
from datetime import datetime
from collections import defaultdict
if week_start is None:
week_start = get_current_week_start()
else:
# Normalize to week start (Monday)
week_start = get_week_bounds(week_start)[0]
week_start, week_end = get_week_bounds(week_start)
# Build user query
query = db.query(User).filter(User.is_active == True)
@@ -201,10 +206,58 @@ def get_workload_heatmap(
users = query.options(joinedload(User.department)).all()
# Calculate workload for each user
if not users:
return []
# Batch query: fetch all tasks for all users in one query
user_id_list = [user.id for user in users]
week_start_dt = datetime.combine(week_start, datetime.min.time())
week_end_dt = datetime.combine(week_end, datetime.max.time())
all_tasks = (
db.query(Task)
.join(Task.status, isouter=True)
.filter(
Task.assignee_id.in_(user_id_list),
Task.due_date >= week_start_dt,
Task.due_date <= week_end_dt,
# Exclude completed tasks
(TaskStatus.is_done == False) | (Task.status_id == None)
)
.all()
)
# Group tasks by assignee_id in memory
tasks_by_user: dict = defaultdict(list)
for task in all_tasks:
tasks_by_user[task.assignee_id].append(task)
# Calculate workload for each user using pre-fetched tasks
results = []
for user in users:
summary = calculate_user_workload(db, user, week_start)
user_tasks = tasks_by_user.get(user.id, [])
# Calculate allocated hours from original_estimate
allocated_hours = Decimal("0")
for task in user_tasks:
if task.original_estimate:
allocated_hours += task.original_estimate
capacity_hours = Decimal(str(user.capacity)) if user.capacity else Decimal("40")
load_percentage = calculate_load_percentage(allocated_hours, capacity_hours)
load_level = determine_load_level(load_percentage)
summary = UserWorkloadSummary(
user_id=user.id,
user_name=user.name,
department_id=user.department_id,
department_name=user.department.name if user.department else None,
capacity_hours=capacity_hours,
allocated_hours=allocated_hours,
load_percentage=load_percentage,
load_level=load_level,
task_count=len(user_tasks),
)
results.append(summary)
return results