feat: complete issue fixes and implement remaining features

## Critical Issues (CRIT-001~003) - All Fixed
- JWT secret key validation with pydantic field_validator
- Login audit logging for success/failure attempts
- Frontend API path prefix removal

## High Priority Issues (HIGH-001~008) - All Fixed
- Project soft delete using is_active flag
- Redis session token bytes handling
- Rate limiting with slowapi (5 req/min for login)
- Attachment API permission checks
- Kanban view with drag-and-drop
- Workload heatmap UI (WorkloadPage, WorkloadHeatmap)
- TaskDetailModal integrating Comments/Attachments
- UserSelect component for task assignment

## Medium Priority Issues (MED-001~012) - All Fixed
- MED-001~005: DB commits, N+1 queries, datetime, error format, blocker flag
- MED-006: Project health dashboard (HealthService, ProjectHealthPage)
- MED-007: Capacity update API (PUT /api/users/{id}/capacity)
- MED-008: Schedule triggers (cron parsing, deadline reminders)
- MED-009: Watermark feature (image/PDF watermarking)
- MED-010~012: useEffect deps, DOM operations, PDF export

## New Files
- backend/app/api/health/ - Project health API
- backend/app/services/health_service.py
- backend/app/services/trigger_scheduler.py
- backend/app/services/watermark_service.py
- backend/app/core/rate_limiter.py
- frontend/src/pages/ProjectHealthPage.tsx
- frontend/src/components/ProjectHealthCard.tsx
- frontend/src/components/KanbanBoard.tsx
- frontend/src/components/WorkloadHeatmap.tsx

## Tests
- 113 new tests passing (health: 32, users: 14, triggers: 35, watermark: 32)

## OpenSpec Archives
- add-project-health-dashboard
- add-capacity-update-api
- add-schedule-triggers
- add-watermark-feature
- add-rate-limiting
- enhance-frontend-ux
- add-resource-management-ui

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
beabigegg
2026-01-04 21:49:52 +08:00
parent 64874d5425
commit 9b220523ff
90 changed files with 9426 additions and 194 deletions

View File

@@ -1,38 +1,74 @@
import uuid
from datetime import datetime
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Request
from fastapi.responses import FileResponse
from fastapi.responses import FileResponse, Response
from sqlalchemy.orm import Session
from typing import Optional
from app.core.database import get_db
from app.middleware.auth import get_current_user
from app.models import User, Task, Attachment, AttachmentVersion, AuditAction
from app.middleware.auth import get_current_user, check_task_access, check_task_edit_access
from app.models import User, Task, Project, Attachment, AttachmentVersion, AuditAction
from app.schemas.attachment import (
AttachmentResponse, AttachmentListResponse, AttachmentDetailResponse,
AttachmentVersionResponse, VersionHistoryResponse
)
from app.services.file_storage_service import file_storage_service
from app.services.audit_service import AuditService
from app.services.watermark_service import watermark_service
router = APIRouter(prefix="/api", tags=["attachments"])
def get_task_or_404(db: Session, task_id: str) -> Task:
"""Get task or raise 404."""
def get_task_with_access_check(db: Session, task_id: str, current_user: User, require_edit: bool = False) -> Task:
"""Get task and verify access permissions."""
task = db.query(Task).filter(Task.id == task_id).first()
if not task:
raise HTTPException(status_code=404, detail="Task not found")
# Get project for access check
project = db.query(Project).filter(Project.id == task.project_id).first()
if not project:
raise HTTPException(status_code=404, detail="Project not found")
# Check access permission
if not check_task_access(current_user, task, project):
raise HTTPException(status_code=403, detail="Access denied to this task")
# Check edit permission if required
if require_edit and not check_task_edit_access(current_user, task, project):
raise HTTPException(status_code=403, detail="Edit access denied to this task")
return task
def get_attachment_or_404(db: Session, attachment_id: str) -> Attachment:
"""Get attachment or raise 404."""
def get_attachment_with_access_check(
db: Session, attachment_id: str, current_user: User, require_edit: bool = False
) -> Attachment:
"""Get attachment and verify access permissions."""
attachment = db.query(Attachment).filter(
Attachment.id == attachment_id,
Attachment.is_deleted == False
).first()
if not attachment:
raise HTTPException(status_code=404, detail="Attachment not found")
# Get task and project for access check
task = db.query(Task).filter(Task.id == attachment.task_id).first()
if not task:
raise HTTPException(status_code=404, detail="Task not found")
project = db.query(Project).filter(Project.id == task.project_id).first()
if not project:
raise HTTPException(status_code=404, detail="Project not found")
# Check access permission
if not check_task_access(current_user, task, project):
raise HTTPException(status_code=403, detail="Access denied to this attachment")
# Check edit permission if required
if require_edit and not check_task_edit_access(current_user, task, project):
raise HTTPException(status_code=403, detail="Edit access denied to this attachment")
return attachment
@@ -76,7 +112,7 @@ async def upload_attachment(
current_user: User = Depends(get_current_user)
):
"""Upload a file attachment to a task."""
task = get_task_or_404(db, task_id)
task = get_task_with_access_check(db, task_id, current_user, require_edit=True)
# Check if attachment with same filename exists (for versioning in Phase 2)
existing = db.query(Attachment).filter(
@@ -115,9 +151,6 @@ async def upload_attachment(
existing.file_size = file_size
existing.updated_at = version.created_at
db.commit()
db.refresh(existing)
# Audit log
AuditService.log_event(
db=db,
@@ -129,7 +162,9 @@ async def upload_attachment(
changes=[{"field": "version", "old_value": new_version - 1, "new_value": new_version}],
request_metadata=getattr(request.state, "audit_metadata", None)
)
db.commit()
db.refresh(existing)
return attachment_to_response(existing)
@@ -175,9 +210,6 @@ async def upload_attachment(
)
db.add(version)
db.commit()
db.refresh(attachment)
# Audit log
AuditService.log_event(
db=db,
@@ -189,7 +221,9 @@ async def upload_attachment(
changes=[{"field": "filename", "old_value": None, "new_value": attachment.filename}],
request_metadata=getattr(request.state, "audit_metadata", None)
)
db.commit()
db.refresh(attachment)
return attachment_to_response(attachment)
@@ -201,7 +235,7 @@ async def list_task_attachments(
current_user: User = Depends(get_current_user)
):
"""List all attachments for a task."""
task = get_task_or_404(db, task_id)
task = get_task_with_access_check(db, task_id, current_user, require_edit=False)
attachments = db.query(Attachment).filter(
Attachment.task_id == task_id,
@@ -221,7 +255,7 @@ async def get_attachment(
current_user: User = Depends(get_current_user)
):
"""Get attachment details with version history."""
attachment = get_attachment_or_404(db, attachment_id)
attachment = get_attachment_with_access_check(db, attachment_id, current_user, require_edit=False)
versions = db.query(AttachmentVersion).filter(
AttachmentVersion.attachment_id == attachment_id
@@ -252,8 +286,8 @@ async def download_attachment(
db: Session = Depends(get_db),
current_user: User = Depends(get_current_user)
):
"""Download an attachment file."""
attachment = get_attachment_or_404(db, attachment_id)
"""Download an attachment file with dynamic watermark."""
attachment = get_attachment_with_access_check(db, attachment_id, current_user, require_edit=False)
# Get version to download
target_version = version or attachment.current_version
@@ -272,6 +306,7 @@ async def download_attachment(
raise HTTPException(status_code=404, detail="File not found on disk")
# Audit log
download_time = datetime.now()
AuditService.log_event(
db=db,
event_type="attachment.download",
@@ -284,6 +319,63 @@ async def download_attachment(
)
db.commit()
# Check if watermark should be applied
mime_type = attachment.mime_type or ""
if watermark_service.supports_watermark(mime_type):
try:
# Read the original file
with open(file_path, "rb") as f:
file_bytes = f.read()
# Apply watermark based on file type
if watermark_service.is_supported_image(mime_type):
watermarked_bytes, output_format = watermark_service.add_image_watermark(
image_bytes=file_bytes,
user_name=current_user.name,
employee_id=current_user.employee_id,
download_time=download_time
)
# Update mime type based on output format
output_mime_type = f"image/{output_format}"
# Update filename extension if format changed
original_filename = attachment.original_filename
if output_format == "png" and not original_filename.lower().endswith(".png"):
original_filename = original_filename.rsplit(".", 1)[0] + ".png"
return Response(
content=watermarked_bytes,
media_type=output_mime_type,
headers={
"Content-Disposition": f'attachment; filename="{original_filename}"'
}
)
elif watermark_service.is_supported_pdf(mime_type):
watermarked_bytes = watermark_service.add_pdf_watermark(
pdf_bytes=file_bytes,
user_name=current_user.name,
employee_id=current_user.employee_id,
download_time=download_time
)
return Response(
content=watermarked_bytes,
media_type="application/pdf",
headers={
"Content-Disposition": f'attachment; filename="{attachment.original_filename}"'
}
)
except Exception as e:
# If watermarking fails, log the error but still return the original file
# This ensures users can still download files even if watermarking has issues
import logging
logging.getLogger(__name__).warning(
f"Watermarking failed for attachment {attachment_id}: {str(e)}. "
"Returning original file."
)
# Return original file without watermark for unsupported types or on error
return FileResponse(
path=str(file_path),
filename=attachment.original_filename,
@@ -299,11 +391,10 @@ async def delete_attachment(
current_user: User = Depends(get_current_user)
):
"""Soft delete an attachment."""
attachment = get_attachment_or_404(db, attachment_id)
attachment = get_attachment_with_access_check(db, attachment_id, current_user, require_edit=True)
# Soft delete
attachment.is_deleted = True
db.commit()
# Audit log
AuditService.log_event(
@@ -316,9 +407,10 @@ async def delete_attachment(
changes=[{"field": "is_deleted", "old_value": False, "new_value": True}],
request_metadata=getattr(request.state, "audit_metadata", None)
)
db.commit()
return {"message": "Attachment deleted", "id": attachment_id}
return {"detail": "Attachment deleted", "id": attachment_id}
@router.get("/attachments/{attachment_id}/versions", response_model=VersionHistoryResponse)
@@ -328,7 +420,7 @@ async def get_version_history(
current_user: User = Depends(get_current_user)
):
"""Get version history for an attachment."""
attachment = get_attachment_or_404(db, attachment_id)
attachment = get_attachment_with_access_check(db, attachment_id, current_user, require_edit=False)
versions = db.query(AttachmentVersion).filter(
AttachmentVersion.attachment_id == attachment_id
@@ -351,7 +443,7 @@ async def restore_version(
current_user: User = Depends(get_current_user)
):
"""Restore an attachment to a specific version."""
attachment = get_attachment_or_404(db, attachment_id)
attachment = get_attachment_with_access_check(db, attachment_id, current_user, require_edit=True)
version_record = db.query(AttachmentVersion).filter(
AttachmentVersion.attachment_id == attachment_id,
@@ -364,7 +456,6 @@ async def restore_version(
old_version = attachment.current_version
attachment.current_version = version
attachment.file_size = version_record.file_size
db.commit()
# Audit log
AuditService.log_event(
@@ -377,6 +468,7 @@ async def restore_version(
changes=[{"field": "current_version", "old_value": old_version, "new_value": version}],
request_metadata=getattr(request.state, "audit_metadata", None)
)
db.commit()
return {"message": f"Restored to version {version}", "current_version": version}
return {"detail": f"Restored to version {version}", "current_version": version}

View File

@@ -1,6 +1,6 @@
import csv
import io
from datetime import datetime
from datetime import datetime, timezone
from typing import Optional
from fastapi import APIRouter, Depends, HTTPException, status, Query
from fastapi.responses import StreamingResponse
@@ -191,7 +191,7 @@ async def export_audit_logs(
output.seek(0)
filename = f"audit_logs_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}.csv"
filename = f"audit_logs_{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')}.csv"
return StreamingResponse(
iter([output.getvalue()]),

View File

@@ -1,53 +1,86 @@
from fastapi import APIRouter, Depends, HTTPException, status
from fastapi import APIRouter, Depends, HTTPException, status, Request
from sqlalchemy.orm import Session
from app.core.config import settings
from app.core.database import get_db
from app.core.security import create_access_token, create_token_payload
from app.core.redis import get_redis
from app.core.rate_limiter import limiter
from app.models.user import User
from app.models.audit_log import AuditAction
from app.schemas.auth import LoginRequest, LoginResponse, UserInfo
from app.services.auth_client import (
verify_credentials,
AuthAPIError,
AuthAPIConnectionError,
)
from app.services.audit_service import AuditService
from app.middleware.auth import get_current_user
router = APIRouter()
@router.post("/login", response_model=LoginResponse)
@limiter.limit("5/minute")
async def login(
request: LoginRequest,
request: Request,
login_request: LoginRequest,
db: Session = Depends(get_db),
redis_client=Depends(get_redis),
):
"""
Authenticate user via external API and return JWT token.
"""
# Prepare metadata for audit logging
client_ip = request.client.host if request.client else "unknown"
user_agent = request.headers.get("user-agent", "unknown")
try:
# Verify credentials with external API
auth_result = await verify_credentials(request.email, request.password)
auth_result = await verify_credentials(login_request.email, login_request.password)
except AuthAPIConnectionError:
# Log failed login attempt due to service unavailable
AuditService.log_event(
db=db,
event_type="user.login_failed",
resource_type="user",
action=AuditAction.LOGIN,
user_id=None,
resource_id=None,
changes={"email": login_request.email, "reason": "auth_service_unavailable"},
request_metadata={"ip_address": client_ip, "user_agent": user_agent},
)
db.commit()
raise HTTPException(
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
detail="Authentication service temporarily unavailable",
)
except AuthAPIError as e:
# Log failed login attempt due to invalid credentials
AuditService.log_event(
db=db,
event_type="user.login_failed",
resource_type="user",
action=AuditAction.LOGIN,
user_id=None,
resource_id=None,
changes={"email": login_request.email, "reason": "invalid_credentials"},
request_metadata={"ip_address": client_ip, "user_agent": user_agent},
)
db.commit()
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid credentials",
)
# Find or create user in local database
user = db.query(User).filter(User.email == request.email).first()
user = db.query(User).filter(User.email == login_request.email).first()
if not user:
# Create new user based on auth API response
user = User(
email=request.email,
name=auth_result.get("name", request.email.split("@")[0]),
email=login_request.email,
name=auth_result.get("name", login_request.email.split("@")[0]),
is_active=True,
)
db.add(user)
@@ -82,6 +115,19 @@ async def login(
access_token,
)
# Log successful login
AuditService.log_event(
db=db,
event_type="user.login",
resource_type="user",
action=AuditAction.LOGIN,
user_id=user.id,
resource_id=user.id,
changes=None,
request_metadata={"ip_address": client_ip, "user_agent": user_agent},
)
db.commit()
return LoginResponse(
access_token=access_token,
user=UserInfo(
@@ -106,7 +152,7 @@ async def logout(
# Remove session from Redis
redis_client.delete(f"session:{current_user.id}")
return {"message": "Successfully logged out"}
return {"detail": "Successfully logged out"}
@router.get("/me", response_model=UserInfo)

View File

@@ -1,5 +1,5 @@
import uuid
from datetime import datetime
from datetime import datetime, timezone
from fastapi import APIRouter, Depends, HTTPException, status, Request
from sqlalchemy.orm import Session
@@ -138,7 +138,8 @@ async def resolve_blocker(
# Update blocker
blocker.resolved_by = current_user.id
blocker.resolution_note = resolve_data.resolution_note
blocker.resolved_at = datetime.utcnow()
# Use naive datetime for consistency with database storage
blocker.resolved_at = datetime.now(timezone.utc).replace(tzinfo=None)
# Check if there are other unresolved blockers
other_blockers = db.query(Blocker).filter(

View File

@@ -0,0 +1,3 @@
from app.api.health.router import router
__all__ = ["router"]

View File

@@ -0,0 +1,70 @@
"""Project health API endpoints.
Provides endpoints for retrieving project health metrics
and dashboard information.
"""
from typing import Optional
from fastapi import APIRouter, Depends, HTTPException, status
from sqlalchemy.orm import Session
from app.core.database import get_db
from app.models import User
from app.schemas.project_health import (
ProjectHealthWithDetails,
ProjectHealthDashboardResponse,
)
from app.services.health_service import HealthService
from app.middleware.auth import get_current_user
router = APIRouter(prefix="/api/projects/health", tags=["Project Health"])
@router.get("/dashboard", response_model=ProjectHealthDashboardResponse)
async def get_health_dashboard(
status_filter: Optional[str] = "active",
db: Session = Depends(get_db),
current_user: User = Depends(get_current_user),
):
"""
Get health dashboard for all projects.
Returns aggregated health metrics and summary statistics
for all projects matching the status filter.
- **status_filter**: Filter projects by status (default: "active")
Returns:
- **projects**: List of project health details
- **summary**: Aggregated summary statistics
"""
service = HealthService(db)
return service.get_dashboard(status_filter=status_filter)
@router.get("/{project_id}", response_model=ProjectHealthWithDetails)
async def get_project_health(
project_id: str,
db: Session = Depends(get_db),
current_user: User = Depends(get_current_user),
):
"""
Get health information for a specific project.
Returns detailed health metrics including risk level,
schedule status, resource status, and task statistics.
- **project_id**: UUID of the project
Raises:
- **404**: Project not found
"""
service = HealthService(db)
result = service.get_project_health(project_id)
if not result:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Project not found"
)
return result

View File

@@ -1,5 +1,5 @@
from typing import Optional
from datetime import datetime
from datetime import datetime, timezone
from fastapi import APIRouter, Depends, HTTPException, status, Query
from sqlalchemy.orm import Session
@@ -91,7 +91,8 @@ async def mark_as_read(
if not notification.is_read:
notification.is_read = True
notification.read_at = datetime.utcnow()
# Use naive datetime for consistency with database storage
notification.read_at = datetime.now(timezone.utc).replace(tzinfo=None)
db.commit()
db.refresh(notification)
@@ -104,7 +105,8 @@ async def mark_all_as_read(
current_user: User = Depends(get_current_user),
):
"""Mark all notifications as read."""
now = datetime.utcnow()
# Use naive datetime for consistency with database storage
now = datetime.now(timezone.utc).replace(tzinfo=None)
updated_count = db.query(Notification).filter(
Notification.user_id == current_user.id,

View File

@@ -273,9 +273,9 @@ async def delete_project(
current_user: User = Depends(get_current_user),
):
"""
Delete a project (hard delete, cascades to tasks).
Delete a project (soft delete - sets is_active to False).
"""
project = db.query(Project).filter(Project.id == project_id).first()
project = db.query(Project).filter(Project.id == project_id, Project.is_active == True).first()
if not project:
raise HTTPException(
@@ -289,7 +289,7 @@ async def delete_project(
detail="Only project owner can delete",
)
# Audit log before deletion (this is a high-sensitivity event that triggers alert)
# Audit log before soft deletion (this is a high-sensitivity event that triggers alert)
AuditService.log_event(
db=db,
event_type="project.delete",
@@ -297,11 +297,12 @@ async def delete_project(
action=AuditAction.DELETE,
user_id=current_user.id,
resource_id=project.id,
changes=[{"field": "title", "old_value": project.title, "new_value": None}],
changes=[{"field": "is_active", "old_value": True, "new_value": False}],
request_metadata=get_audit_metadata(request),
)
db.delete(project)
# Soft delete - set is_active to False
project.is_active = False
db.commit()
return None

View File

@@ -1,11 +1,11 @@
import uuid
from datetime import datetime
from datetime import datetime, timezone
from typing import List, Optional
from fastapi import APIRouter, Depends, HTTPException, status, Query, Request
from sqlalchemy.orm import Session
from app.core.database import get_db
from app.models import User, Project, Task, TaskStatus, AuditAction
from app.models import User, Project, Task, TaskStatus, AuditAction, Blocker
from app.schemas.task import (
TaskCreate, TaskUpdate, TaskResponse, TaskWithDetails, TaskListResponse,
TaskStatusUpdate, TaskAssignUpdate
@@ -374,7 +374,8 @@ async def delete_task(
detail="Permission denied",
)
now = datetime.utcnow()
# Use naive datetime for consistency with database storage
now = datetime.now(timezone.utc).replace(tzinfo=None)
# Soft delete the task
task.is_deleted = True
@@ -504,11 +505,18 @@ async def update_task_status(
task.status_id = status_data.status_id
# Auto-set blocker_flag based on status name
# Auto-set blocker_flag based on status name and actual Blocker records
if new_status.name.lower() == "blocked":
task.blocker_flag = True
else:
task.blocker_flag = False
# Only set blocker_flag = False if there are no unresolved blockers
unresolved_blockers = db.query(Blocker).filter(
Blocker.task_id == task.id,
Blocker.resolved_at == None,
).count()
if unresolved_blockers == 0:
task.blocker_flag = False
# If there are unresolved blockers, keep blocker_flag as is
# Evaluate triggers for status changes
if old_status_id != status_data.status_id:

View File

@@ -10,6 +10,7 @@ from app.schemas.trigger import (
TriggerLogResponse, TriggerLogListResponse, TriggerUserInfo
)
from app.middleware.auth import get_current_user, check_project_access, check_project_edit_access
from app.services.trigger_scheduler import TriggerSchedulerService
router = APIRouter(tags=["triggers"])
@@ -65,18 +66,50 @@ async def create_trigger(
detail="Invalid trigger type. Must be 'field_change' or 'schedule'",
)
# Validate conditions
if trigger_data.conditions.field not in ["status_id", "assignee_id", "priority"]:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Invalid condition field. Must be 'status_id', 'assignee_id', or 'priority'",
)
# Validate conditions based on trigger type
if trigger_data.trigger_type == "field_change":
# Validate field_change conditions
if not trigger_data.conditions.field:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Field is required for field_change triggers",
)
if trigger_data.conditions.field not in ["status_id", "assignee_id", "priority"]:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Invalid condition field. Must be 'status_id', 'assignee_id', or 'priority'",
)
if not trigger_data.conditions.operator:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Operator is required for field_change triggers",
)
if trigger_data.conditions.operator not in ["equals", "not_equals", "changed_to", "changed_from"]:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Invalid operator. Must be 'equals', 'not_equals', 'changed_to', or 'changed_from'",
)
elif trigger_data.trigger_type == "schedule":
# Validate schedule conditions
has_cron = trigger_data.conditions.cron_expression is not None
has_deadline = trigger_data.conditions.deadline_reminder_days is not None
if trigger_data.conditions.operator not in ["equals", "not_equals", "changed_to", "changed_from"]:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Invalid operator. Must be 'equals', 'not_equals', 'changed_to', or 'changed_from'",
)
if not has_cron and not has_deadline:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Schedule triggers require either cron_expression or deadline_reminder_days",
)
# Validate cron expression if provided
if has_cron:
is_valid, error_msg = TriggerSchedulerService.parse_cron_expression(
trigger_data.conditions.cron_expression
)
if not is_valid:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=error_msg or "Invalid cron expression",
)
# Create trigger
trigger = Trigger(
@@ -186,13 +219,25 @@ async def update_trigger(
if trigger_data.description is not None:
trigger.description = trigger_data.description
if trigger_data.conditions is not None:
# Validate conditions
if trigger_data.conditions.field not in ["status_id", "assignee_id", "priority"]:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Invalid condition field",
)
trigger.conditions = trigger_data.conditions.model_dump()
# Validate conditions based on trigger type
if trigger.trigger_type == "field_change":
if trigger_data.conditions.field and trigger_data.conditions.field not in ["status_id", "assignee_id", "priority"]:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Invalid condition field",
)
elif trigger.trigger_type == "schedule":
# Validate cron expression if provided
if trigger_data.conditions.cron_expression is not None:
is_valid, error_msg = TriggerSchedulerService.parse_cron_expression(
trigger_data.conditions.cron_expression
)
if not is_valid:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=error_msg or "Invalid cron expression",
)
trigger.conditions = trigger_data.conditions.model_dump(exclude_none=True)
if trigger_data.actions is not None:
trigger.actions = [a.model_dump() for a in trigger_data.actions]
if trigger_data.is_active is not None:

View File

@@ -4,10 +4,11 @@ from sqlalchemy import or_
from typing import List
from app.core.database import get_db
from app.core.redis import get_redis
from app.models.user import User
from app.models.role import Role
from app.models import AuditAction
from app.schemas.user import UserResponse, UserUpdate
from app.schemas.user import UserResponse, UserUpdate, CapacityUpdate
from app.middleware.auth import (
get_current_user,
require_permission,
@@ -239,3 +240,86 @@ async def set_admin_status(
db.commit()
db.refresh(user)
return user
@router.put("/{user_id}/capacity", response_model=UserResponse)
async def update_user_capacity(
user_id: str,
capacity: CapacityUpdate,
request: Request,
db: Session = Depends(get_db),
current_user: User = Depends(get_current_user),
redis_client=Depends(get_redis),
):
"""
Update user's weekly capacity hours.
Permission: admin, manager, or the user themselves can update capacity.
- Admin/Manager can update any user's capacity
- Regular users can only update their own capacity
Capacity changes are recorded in the audit trail and workload cache is invalidated.
"""
user = db.query(User).filter(User.id == user_id).first()
if not user:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="User not found",
)
# Permission check: admin, manager, or the user themselves can update capacity
is_self = current_user.id == user_id
is_admin = current_user.is_system_admin
is_manager = False
# Check if current user has manager role
if current_user.role and current_user.role.name == "manager":
is_manager = True
if not is_self and not is_admin and not is_manager:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Only admin, manager, or the user themselves can update capacity",
)
# Store old capacity for audit log
old_capacity = float(user.capacity) if user.capacity else None
# Update capacity (validation is handled by Pydantic schema)
user.capacity = capacity.capacity_hours
new_capacity = float(capacity.capacity_hours)
# Record capacity change in audit trail
if old_capacity != new_capacity:
AuditService.log_event(
db=db,
event_type="user.capacity_change",
resource_type="user",
action=AuditAction.UPDATE,
user_id=current_user.id,
resource_id=user.id,
changes=[{
"field": "capacity",
"old_value": old_capacity,
"new_value": new_capacity
}],
request_metadata=get_audit_metadata(request),
)
db.commit()
db.refresh(user)
# Invalidate workload cache for this user
# Cache keys follow pattern: workload:{user_id}:* or workload:heatmap:*
try:
# Delete user-specific workload cache
for key in redis_client.scan_iter(f"workload:{user_id}:*"):
redis_client.delete(key)
# Delete heatmap cache (contains all users' workload data)
for key in redis_client.scan_iter("workload:heatmap:*"):
redis_client.delete(key)
except Exception:
# Cache invalidation failure should not fail the request
pass
return user

View File

@@ -1,4 +1,5 @@
from pydantic_settings import BaseSettings
from pydantic import field_validator
from typing import List
import os
@@ -24,11 +25,33 @@ class Settings(BaseSettings):
def REDIS_URL(self) -> str:
return f"redis://{self.REDIS_HOST}:{self.REDIS_PORT}/{self.REDIS_DB}"
# JWT
JWT_SECRET_KEY: str = "your-secret-key-change-in-production"
# JWT - Must be set in environment, no default allowed
JWT_SECRET_KEY: str = ""
JWT_ALGORITHM: str = "HS256"
JWT_EXPIRE_MINUTES: int = 10080 # 7 days
@field_validator("JWT_SECRET_KEY")
@classmethod
def validate_jwt_secret_key(cls, v: str) -> str:
"""Validate that JWT_SECRET_KEY is set and not a placeholder."""
if not v or v.strip() == "":
raise ValueError(
"JWT_SECRET_KEY must be set in environment variables. "
"Please configure it in the .env file."
)
placeholder_values = [
"your-secret-key-change-in-production",
"change-me",
"secret",
"your-secret-key",
]
if v.lower() in placeholder_values:
raise ValueError(
"JWT_SECRET_KEY appears to be a placeholder value. "
"Please set a secure secret key in the .env file."
)
return v
# External Auth API
AUTH_API_URL: str = "https://pj-auth-api.vercel.app"

View File

@@ -0,0 +1,26 @@
"""
Rate limiting configuration using slowapi with Redis backend.
This module provides rate limiting functionality to protect against
brute force attacks and DoS attempts on sensitive endpoints.
"""
import os
from slowapi import Limiter
from slowapi.util import get_remote_address
from app.core.config import settings
# Use memory storage for testing, Redis for production
# This allows tests to run without a Redis connection
_testing = os.environ.get("TESTING", "").lower() in ("true", "1", "yes")
_storage_uri = "memory://" if _testing else settings.REDIS_URL
# Create limiter instance with appropriate storage
# Uses the client's remote address (IP) as the key for rate limiting
limiter = Limiter(
key_func=get_remote_address,
storage_uri=_storage_uri,
strategy="fixed-window", # Fixed window strategy for predictable rate limiting
)

View File

@@ -1,9 +1,11 @@
import logging
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.interval import IntervalTrigger
from app.core.database import SessionLocal
from app.services.report_service import ReportService
from app.services.trigger_scheduler import TriggerSchedulerService
logger = logging.getLogger(__name__)
@@ -24,6 +26,24 @@ async def weekly_report_job():
db.close()
async def schedule_trigger_job():
"""Job function to evaluate and execute schedule triggers.
This runs every minute and checks:
1. Cron-based schedule triggers
2. Deadline reminder triggers
"""
db = SessionLocal()
try:
logs = TriggerSchedulerService.evaluate_schedule_triggers(db)
if logs:
logger.info(f"Schedule trigger job executed {len(logs)} triggers")
except Exception as e:
logger.error(f"Error in schedule trigger job: {e}")
finally:
db.close()
def init_scheduler():
"""Initialize the scheduler with jobs."""
# Weekly report - Every Friday at 16:00
@@ -35,7 +55,16 @@ def init_scheduler():
replace_existing=True,
)
logger.info("Scheduler initialized with weekly report job (Friday 16:00)")
# Schedule trigger evaluation - Every minute
scheduler.add_job(
schedule_trigger_job,
IntervalTrigger(minutes=1),
id='schedule_triggers',
name='Evaluate Schedule Triggers',
replace_existing=True,
)
logger.info("Scheduler initialized with jobs: weekly_report (Friday 16:00), schedule_triggers (every minute)")
def start_scheduler():

View File

@@ -1,4 +1,4 @@
from datetime import datetime, timedelta
from datetime import datetime, timedelta, timezone
from typing import Optional, Any
from jose import jwt, JWTError
from app.core.config import settings
@@ -16,13 +16,14 @@ def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -
Encoded JWT token string
"""
to_encode = data.copy()
now = datetime.now(timezone.utc)
if expires_delta:
expire = datetime.utcnow() + expires_delta
expire = now + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=settings.JWT_EXPIRE_MINUTES)
expire = now + timedelta(minutes=settings.JWT_EXPIRE_MINUTES)
to_encode.update({"exp": expire, "iat": datetime.utcnow()})
to_encode.update({"exp": expire, "iat": now})
encoded_jwt = jwt.encode(
to_encode,

View File

@@ -1,9 +1,13 @@
from contextlib import asynccontextmanager
from fastapi import FastAPI
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from slowapi import _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from app.middleware.audit import AuditMiddleware
from app.core.scheduler import start_scheduler, shutdown_scheduler
from app.core.rate_limiter import limiter
@asynccontextmanager
@@ -29,6 +33,7 @@ from app.api.audit import router as audit_router
from app.api.attachments import router as attachments_router
from app.api.triggers import router as triggers_router
from app.api.reports import router as reports_router
from app.api.health import router as health_router
from app.core.config import settings
app = FastAPI(
@@ -38,6 +43,10 @@ app = FastAPI(
lifespan=lifespan,
)
# Initialize rate limiter
app.state.limiter = limiter
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
# CORS middleware
app.add_middleware(
CORSMiddleware,
@@ -66,6 +75,7 @@ app.include_router(audit_router)
app.include_router(attachments_router)
app.include_router(triggers_router)
app.include_router(reports_router)
app.include_router(health_router)
@app.get("/health")

View File

@@ -42,7 +42,16 @@ async def get_current_user(
# Check session in Redis
stored_token = redis_client.get(f"session:{user_id}")
if stored_token is None or stored_token != token:
if stored_token is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Session expired or invalid",
headers={"WWW-Authenticate": "Bearer"},
)
# Handle Redis bytes type - decode if necessary
if isinstance(stored_token, bytes):
stored_token = stored_token.decode("utf-8")
if stored_token != token:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Session expired or invalid",

View File

@@ -18,6 +18,7 @@ from app.models.trigger import Trigger, TriggerType
from app.models.trigger_log import TriggerLog, TriggerLogStatus
from app.models.scheduled_report import ScheduledReport, ReportType
from app.models.report_history import ReportHistory, ReportHistoryStatus
from app.models.project_health import ProjectHealth, RiskLevel, ScheduleStatus, ResourceStatus
__all__ = [
"User", "Role", "Department", "Space", "Project", "TaskStatus", "Task", "WorkloadSnapshot",
@@ -25,5 +26,6 @@ __all__ = [
"AuditLog", "AuditAlert", "AuditAction", "SensitivityLevel", "EVENT_SENSITIVITY", "ALERT_EVENTS",
"Attachment", "AttachmentVersion",
"Trigger", "TriggerType", "TriggerLog", "TriggerLogStatus",
"ScheduledReport", "ReportType", "ReportHistory", "ReportHistoryStatus"
"ScheduledReport", "ReportType", "ReportHistory", "ReportHistoryStatus",
"ProjectHealth", "RiskLevel", "ScheduleStatus", "ResourceStatus"
]

View File

@@ -13,6 +13,8 @@ class NotificationType(str, enum.Enum):
STATUS_CHANGE = "status_change"
COMMENT = "comment"
BLOCKER_RESOLVED = "blocker_resolved"
DEADLINE_REMINDER = "deadline_reminder"
SCHEDULED_TRIGGER = "scheduled_trigger"
class Notification(Base):
@@ -22,6 +24,7 @@ class Notification(Base):
user_id = Column(String(36), ForeignKey("pjctrl_users.id", ondelete="CASCADE"), nullable=False)
type = Column(
Enum("mention", "assignment", "blocker", "status_change", "comment", "blocker_resolved",
"deadline_reminder", "scheduled_trigger",
name="notification_type_enum"),
nullable=False
)

View File

@@ -39,3 +39,4 @@ class Project(Base):
task_statuses = relationship("TaskStatus", back_populates="project", cascade="all, delete-orphan")
tasks = relationship("Task", back_populates="project", cascade="all, delete-orphan")
triggers = relationship("Trigger", back_populates="project", cascade="all, delete-orphan")
health = relationship("ProjectHealth", back_populates="project", uselist=False, cascade="all, delete-orphan")

View File

@@ -0,0 +1,51 @@
from sqlalchemy import Column, String, Integer, DateTime, Enum, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.sql import func
from app.core.database import Base
import enum
class RiskLevel(str, enum.Enum):
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
CRITICAL = "critical"
class ScheduleStatus(str, enum.Enum):
ON_TRACK = "on_track"
AT_RISK = "at_risk"
DELAYED = "delayed"
class ResourceStatus(str, enum.Enum):
ADEQUATE = "adequate"
CONSTRAINED = "constrained"
OVERLOADED = "overloaded"
class ProjectHealth(Base):
__tablename__ = "pjctrl_project_health"
id = Column(String(36), primary_key=True)
project_id = Column(String(36), ForeignKey("pjctrl_projects.id", ondelete="CASCADE"), nullable=False, unique=True)
health_score = Column(Integer, default=100, nullable=False) # 0-100
risk_level = Column(
Enum("low", "medium", "high", "critical", name="risk_level_enum"),
default="low",
nullable=False
)
schedule_status = Column(
Enum("on_track", "at_risk", "delayed", name="schedule_status_enum"),
default="on_track",
nullable=False
)
resource_status = Column(
Enum("adequate", "constrained", "overloaded", name="resource_status_enum"),
default="adequate",
nullable=False
)
last_updated = Column(DateTime, server_default=func.now(), onupdate=func.now(), nullable=False)
# Relationships
project = relationship("Project", back_populates="health")

View File

@@ -10,6 +10,7 @@ class User(Base):
id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4()))
email = Column(String(200), unique=True, nullable=False, index=True)
employee_id = Column(String(50), unique=True, nullable=True, index=True)
name = Column(String(200), nullable=False)
department_id = Column(String(36), ForeignKey("pjctrl_departments.id"), nullable=True)
role_id = Column(String(36), ForeignKey("pjctrl_roles.id"), nullable=True)

View File

@@ -0,0 +1,68 @@
from pydantic import BaseModel
from typing import Optional, List
from datetime import datetime
from enum import Enum
class RiskLevel(str, Enum):
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
CRITICAL = "critical"
class ScheduleStatus(str, Enum):
ON_TRACK = "on_track"
AT_RISK = "at_risk"
DELAYED = "delayed"
class ResourceStatus(str, Enum):
ADEQUATE = "adequate"
CONSTRAINED = "constrained"
OVERLOADED = "overloaded"
class ProjectHealthBase(BaseModel):
health_score: int
risk_level: RiskLevel
schedule_status: ScheduleStatus
resource_status: ResourceStatus
class ProjectHealthResponse(ProjectHealthBase):
id: str
project_id: str
last_updated: datetime
class Config:
from_attributes = True
class ProjectHealthWithDetails(ProjectHealthResponse):
"""Extended health response with project and computed metrics."""
project_title: str
project_status: str
owner_name: Optional[str] = None
space_name: Optional[str] = None
task_count: int = 0
completed_task_count: int = 0
blocker_count: int = 0
overdue_task_count: int = 0
class ProjectHealthSummary(BaseModel):
"""Aggregated health metrics across all projects."""
total_projects: int
healthy_count: int # health_score >= 80
at_risk_count: int # health_score 50-79
critical_count: int # health_score < 50
average_health_score: float
projects_with_blockers: int
projects_delayed: int
class ProjectHealthDashboardResponse(BaseModel):
"""Full dashboard response with project list and summary."""
projects: List[ProjectHealthWithDetails]
summary: ProjectHealthSummary

View File

@@ -1,14 +1,32 @@
from datetime import datetime
from typing import Optional, List, Dict, Any
from typing import Optional, List, Dict, Any, Union
from pydantic import BaseModel, Field
class TriggerCondition(BaseModel):
class FieldChangeCondition(BaseModel):
"""Condition for field_change triggers."""
field: str = Field(..., description="Field to check: status_id, assignee_id, priority")
operator: str = Field(..., description="Operator: equals, not_equals, changed_to, changed_from")
value: str = Field(..., description="Value to compare against")
class ScheduleCondition(BaseModel):
"""Condition for schedule triggers."""
cron_expression: Optional[str] = Field(None, description="Cron expression (e.g., '0 9 * * 1' for Monday 9am)")
deadline_reminder_days: Optional[int] = Field(None, ge=1, le=365, description="Days before due date to send reminder")
class TriggerCondition(BaseModel):
"""Union condition that supports both field_change and schedule triggers."""
# Field change conditions
field: Optional[str] = Field(None, description="Field to check: status_id, assignee_id, priority")
operator: Optional[str] = Field(None, description="Operator: equals, not_equals, changed_to, changed_from")
value: Optional[str] = Field(None, description="Value to compare against")
# Schedule conditions
cron_expression: Optional[str] = Field(None, description="Cron expression for schedule triggers")
deadline_reminder_days: Optional[int] = Field(None, ge=1, le=365, description="Days before due date to send reminder")
class TriggerAction(BaseModel):
type: str = Field(default="notify", description="Action type: notify")
target: str = Field(default="assignee", description="Target: assignee, creator, project_owner, user:<id>")

View File

@@ -1,4 +1,4 @@
from pydantic import BaseModel
from pydantic import BaseModel, field_validator
from typing import Optional, List
from datetime import datetime
from decimal import Decimal
@@ -39,3 +39,25 @@ class UserResponse(UserBase):
class UserInDB(UserResponse):
pass
class CapacityUpdate(BaseModel):
"""Schema for updating user's weekly capacity hours."""
capacity_hours: Decimal
@field_validator("capacity_hours")
@classmethod
def validate_capacity_hours(cls, v: Decimal) -> Decimal:
"""Validate capacity hours is within valid range (0-168)."""
if v < 0:
raise ValueError("Capacity hours must be non-negative")
if v > 168:
raise ValueError("Capacity hours cannot exceed 168 (hours in a week)")
return v
class Config:
json_schema_extra = {
"example": {
"capacity_hours": 40.00
}
}

View File

@@ -1,7 +1,7 @@
import uuid
import hashlib
import json
from datetime import datetime, timedelta
from datetime import datetime, timedelta, timezone
from typing import Optional, Dict, Any, List
from sqlalchemy.orm import Session
@@ -85,7 +85,8 @@ class AuditService:
request_metadata: Optional[Dict] = None,
) -> AuditLog:
"""Log an audit event."""
now = datetime.utcnow()
# Use naive datetime for consistency with database storage (SQLite strips tzinfo)
now = datetime.now(timezone.utc).replace(tzinfo=None)
sensitivity = AuditService.get_sensitivity_level(event_type)
checksum = AuditService.calculate_checksum(
@@ -204,7 +205,8 @@ class AuditService:
alert.is_acknowledged = True
alert.acknowledged_by = user_id
alert.acknowledged_at = datetime.utcnow()
# Use naive datetime for consistency with database storage
alert.acknowledged_at = datetime.now(timezone.utc).replace(tzinfo=None)
db.flush()
return alert

View File

@@ -139,9 +139,23 @@ class FileStorageService:
return files[0]
def get_file_by_path(self, file_path: str) -> Optional[Path]:
"""Get file by stored path."""
"""Get file by stored path. Handles both absolute and relative paths."""
path = Path(file_path)
return path if path.exists() else None
# If path is absolute and exists, return it directly
if path.is_absolute() and path.exists():
return path
# If path is relative, try prepending base_dir
full_path = self.base_dir / path
if full_path.exists():
return full_path
# Fallback: check if original path exists (e.g., relative from current dir)
if path.exists():
return path
return None
def delete_file(
self,

View File

@@ -0,0 +1,378 @@
"""Project health calculation service.
Provides functionality to calculate and retrieve project health metrics
including risk scores, schedule status, and resource status.
"""
import uuid
from datetime import datetime
from typing import List, Optional, Dict, Any
from sqlalchemy.orm import Session
from app.models import Project, Task, TaskStatus, Blocker, ProjectHealth
from app.schemas.project_health import (
RiskLevel,
ScheduleStatus,
ResourceStatus,
ProjectHealthResponse,
ProjectHealthWithDetails,
ProjectHealthSummary,
ProjectHealthDashboardResponse,
)
# Constants for health score calculation
BLOCKER_PENALTY_PER_ITEM = 10
BLOCKER_PENALTY_MAX = 30
OVERDUE_PENALTY_PER_ITEM = 5
OVERDUE_PENALTY_MAX = 30
COMPLETION_PENALTY_THRESHOLD = 50
COMPLETION_PENALTY_FACTOR = 0.4
COMPLETION_PENALTY_MAX = 20
# Risk level thresholds
RISK_LOW_THRESHOLD = 80
RISK_MEDIUM_THRESHOLD = 60
RISK_HIGH_THRESHOLD = 40
# Schedule status thresholds
SCHEDULE_AT_RISK_THRESHOLD = 2
# Resource status thresholds
RESOURCE_CONSTRAINED_THRESHOLD = 2
def calculate_health_metrics(db: Session, project: Project) -> Dict[str, Any]:
"""
Calculate health metrics for a project.
Args:
db: Database session
project: Project object to calculate metrics for
Returns:
Dictionary containing:
- health_score: 0-100 integer
- risk_level: low/medium/high/critical
- schedule_status: on_track/at_risk/delayed
- resource_status: adequate/constrained/overloaded
- task_count: Total number of active tasks
- completed_task_count: Number of completed tasks
- blocker_count: Number of unresolved blockers
- overdue_task_count: Number of overdue incomplete tasks
"""
# Fetch active tasks for this project
tasks = db.query(Task).filter(
Task.project_id == project.id,
Task.is_deleted == False
).all()
task_count = len(tasks)
# Count completed tasks
completed_task_count = sum(
1 for task in tasks
if task.status and task.status.is_done
)
# Count overdue tasks (incomplete with past due date)
now = datetime.utcnow()
overdue_task_count = sum(
1 for task in tasks
if task.due_date and task.due_date < now
and not (task.status and task.status.is_done)
)
# Count unresolved blockers
task_ids = [t.id for t in tasks]
blocker_count = 0
if task_ids:
blocker_count = db.query(Blocker).filter(
Blocker.task_id.in_(task_ids),
Blocker.resolved_at.is_(None)
).count()
# Calculate completion rate
completion_rate = 0.0
if task_count > 0:
completion_rate = (completed_task_count / task_count) * 100
# Calculate health score (start at 100, subtract penalties)
health_score = 100
# Apply blocker penalty
blocker_penalty = min(blocker_count * BLOCKER_PENALTY_PER_ITEM, BLOCKER_PENALTY_MAX)
health_score -= blocker_penalty
# Apply overdue penalty
overdue_penalty = min(overdue_task_count * OVERDUE_PENALTY_PER_ITEM, OVERDUE_PENALTY_MAX)
health_score -= overdue_penalty
# Apply completion penalty (if below threshold)
if task_count > 0 and completion_rate < COMPLETION_PENALTY_THRESHOLD:
completion_penalty = int(
(COMPLETION_PENALTY_THRESHOLD - completion_rate) * COMPLETION_PENALTY_FACTOR
)
health_score -= min(completion_penalty, COMPLETION_PENALTY_MAX)
# Ensure health score stays within bounds
health_score = max(0, min(100, health_score))
# Determine risk level based on health score
risk_level = _determine_risk_level(health_score)
# Determine schedule status based on overdue count
schedule_status = _determine_schedule_status(overdue_task_count)
# Determine resource status based on blocker count
resource_status = _determine_resource_status(blocker_count)
return {
"health_score": health_score,
"risk_level": risk_level,
"schedule_status": schedule_status,
"resource_status": resource_status,
"task_count": task_count,
"completed_task_count": completed_task_count,
"blocker_count": blocker_count,
"overdue_task_count": overdue_task_count,
}
def _determine_risk_level(health_score: int) -> str:
"""Determine risk level based on health score."""
if health_score >= RISK_LOW_THRESHOLD:
return "low"
elif health_score >= RISK_MEDIUM_THRESHOLD:
return "medium"
elif health_score >= RISK_HIGH_THRESHOLD:
return "high"
else:
return "critical"
def _determine_schedule_status(overdue_task_count: int) -> str:
"""Determine schedule status based on overdue task count."""
if overdue_task_count == 0:
return "on_track"
elif overdue_task_count <= SCHEDULE_AT_RISK_THRESHOLD:
return "at_risk"
else:
return "delayed"
def _determine_resource_status(blocker_count: int) -> str:
"""Determine resource status based on blocker count."""
if blocker_count == 0:
return "adequate"
elif blocker_count <= RESOURCE_CONSTRAINED_THRESHOLD:
return "constrained"
else:
return "overloaded"
def get_or_create_project_health(db: Session, project: Project) -> ProjectHealth:
"""
Get existing project health record or create a new one.
Args:
db: Database session
project: Project object
Returns:
ProjectHealth record
"""
health = db.query(ProjectHealth).filter(
ProjectHealth.project_id == project.id
).first()
if not health:
health = ProjectHealth(
id=str(uuid.uuid4()),
project_id=project.id
)
db.add(health)
return health
def update_project_health(
db: Session,
project: Project,
metrics: Dict[str, Any]
) -> ProjectHealth:
"""
Update project health record with calculated metrics.
Args:
db: Database session
project: Project object
metrics: Calculated health metrics
Returns:
Updated ProjectHealth record
"""
health = get_or_create_project_health(db, project)
health.health_score = metrics["health_score"]
health.risk_level = metrics["risk_level"]
health.schedule_status = metrics["schedule_status"]
health.resource_status = metrics["resource_status"]
return health
def get_project_health(
db: Session,
project_id: str
) -> Optional[ProjectHealthWithDetails]:
"""
Get health information for a single project.
Args:
db: Database session
project_id: Project ID
Returns:
ProjectHealthWithDetails or None if project not found
"""
project = db.query(Project).filter(Project.id == project_id).first()
if not project:
return None
metrics = calculate_health_metrics(db, project)
health = update_project_health(db, project, metrics)
db.commit()
db.refresh(health)
return _build_health_with_details(project, health, metrics)
def get_all_projects_health(
db: Session,
status_filter: Optional[str] = "active"
) -> ProjectHealthDashboardResponse:
"""
Get health information for all projects.
Args:
db: Database session
status_filter: Filter projects by status (default: "active")
Returns:
ProjectHealthDashboardResponse with projects list and summary
"""
query = db.query(Project)
if status_filter:
query = query.filter(Project.status == status_filter)
projects = query.all()
projects_health: List[ProjectHealthWithDetails] = []
for project in projects:
metrics = calculate_health_metrics(db, project)
health = update_project_health(db, project, metrics)
project_health = _build_health_with_details(project, health, metrics)
projects_health.append(project_health)
db.commit()
# Calculate summary statistics
summary = _calculate_summary(projects_health)
return ProjectHealthDashboardResponse(
projects=projects_health,
summary=summary
)
def _build_health_with_details(
project: Project,
health: ProjectHealth,
metrics: Dict[str, Any]
) -> ProjectHealthWithDetails:
"""Build ProjectHealthWithDetails from project, health, and metrics."""
return ProjectHealthWithDetails(
id=health.id,
project_id=project.id,
health_score=metrics["health_score"],
risk_level=RiskLevel(metrics["risk_level"]),
schedule_status=ScheduleStatus(metrics["schedule_status"]),
resource_status=ResourceStatus(metrics["resource_status"]),
last_updated=health.last_updated or datetime.utcnow(),
project_title=project.title,
project_status=project.status,
owner_name=project.owner.name if project.owner else None,
space_name=project.space.name if project.space else None,
task_count=metrics["task_count"],
completed_task_count=metrics["completed_task_count"],
blocker_count=metrics["blocker_count"],
overdue_task_count=metrics["overdue_task_count"],
)
def _calculate_summary(
projects_health: List[ProjectHealthWithDetails]
) -> ProjectHealthSummary:
"""Calculate summary statistics for health dashboard."""
total_projects = len(projects_health)
healthy_count = sum(1 for p in projects_health if p.health_score >= 80)
at_risk_count = sum(1 for p in projects_health if 50 <= p.health_score < 80)
critical_count = sum(1 for p in projects_health if p.health_score < 50)
average_health_score = 0.0
if total_projects > 0:
average_health_score = sum(p.health_score for p in projects_health) / total_projects
projects_with_blockers = sum(1 for p in projects_health if p.blocker_count > 0)
projects_delayed = sum(
1 for p in projects_health
if p.schedule_status == ScheduleStatus.DELAYED
)
return ProjectHealthSummary(
total_projects=total_projects,
healthy_count=healthy_count,
at_risk_count=at_risk_count,
critical_count=critical_count,
average_health_score=round(average_health_score, 1),
projects_with_blockers=projects_with_blockers,
projects_delayed=projects_delayed,
)
class HealthService:
"""
Service class for project health operations.
Provides a class-based interface for health calculations,
following the service pattern used in the codebase.
"""
def __init__(self, db: Session):
"""Initialize HealthService with database session."""
self.db = db
def calculate_metrics(self, project: Project) -> Dict[str, Any]:
"""Calculate health metrics for a project."""
return calculate_health_metrics(self.db, project)
def get_project_health(self, project_id: str) -> Optional[ProjectHealthWithDetails]:
"""Get health information for a single project."""
return get_project_health(self.db, project_id)
def get_dashboard(
self,
status_filter: Optional[str] = "active"
) -> ProjectHealthDashboardResponse:
"""Get health dashboard for all projects."""
return get_all_projects_health(self.db, status_filter)
def refresh_project_health(self, project: Project) -> ProjectHealth:
"""Refresh and persist health data for a project."""
metrics = calculate_health_metrics(self.db, project)
health = update_project_health(self.db, project, metrics)
self.db.commit()
self.db.refresh(health)
return health

View File

@@ -4,7 +4,7 @@ import re
import asyncio
import logging
import threading
from datetime import datetime
from datetime import datetime, timezone
from typing import List, Optional, Dict, Set
from sqlalchemy.orm import Session
from sqlalchemy import event
@@ -102,7 +102,7 @@ class NotificationService:
"""Convert a Notification to a dict for publishing."""
created_at = notification.created_at
if created_at is None:
created_at = datetime.utcnow()
created_at = datetime.now(timezone.utc).replace(tzinfo=None)
return {
"id": notification.id,
"type": notification.type,

View File

@@ -1,5 +1,5 @@
import uuid
from datetime import datetime, timedelta
from datetime import datetime, timedelta, timezone
from typing import Dict, Any, List, Optional
from sqlalchemy.orm import Session
from sqlalchemy import func
@@ -15,9 +15,15 @@ class ReportService:
@staticmethod
def get_week_start(date: Optional[datetime] = None) -> datetime:
"""Get the start of the week (Monday) for a given date."""
"""Get the start of the week (Monday) for a given date.
Returns a naive datetime for compatibility with database values.
"""
if date is None:
date = datetime.utcnow()
date = datetime.now(timezone.utc).replace(tzinfo=None)
elif date.tzinfo is not None:
# Convert to naive datetime for consistency
date = date.replace(tzinfo=None)
# Get Monday of the current week
days_since_monday = date.weekday()
week_start = date - timedelta(days=days_since_monday)
@@ -37,7 +43,8 @@ class ReportService:
week_end = week_start + timedelta(days=7)
next_week_start = week_end
next_week_end = next_week_start + timedelta(days=7)
now = datetime.utcnow()
# Use naive datetime for comparison with database values
now = datetime.now(timezone.utc).replace(tzinfo=None)
# Get projects owned by the user
projects = db.query(Project).filter(Project.owner_id == user_id).all()
@@ -189,7 +196,7 @@ class ReportService:
return {
"week_start": week_start.isoformat(),
"week_end": week_end.isoformat(),
"generated_at": datetime.utcnow().isoformat(),
"generated_at": datetime.now(timezone.utc).replace(tzinfo=None).isoformat(),
"projects": project_details,
"summary": {
"completed_count": len(completed_tasks),
@@ -235,7 +242,8 @@ class ReportService:
db.add(report_history)
# Update last_sent_at
scheduled_report.last_sent_at = datetime.utcnow()
# Use naive datetime for consistency with database storage
scheduled_report.last_sent_at = datetime.now(timezone.utc).replace(tzinfo=None)
db.commit()
@@ -304,7 +312,8 @@ class ReportService:
db.add(history)
# Update last_sent_at
scheduled_report.last_sent_at = datetime.utcnow()
# Use naive datetime for consistency with database storage
scheduled_report.last_sent_at = datetime.now(timezone.utc).replace(tzinfo=None)
# Send notification
ReportService.send_report_notification(db, scheduled_report.recipient_id, content)

View File

@@ -0,0 +1,701 @@
"""
Scheduled Trigger Execution Service
This module provides functionality for parsing cron expressions and executing
scheduled triggers based on their cron schedule, including deadline reminders.
"""
import uuid
import logging
from datetime import datetime, timezone, timedelta
from typing import Optional, List, Dict, Any, Tuple, Set
from croniter import croniter
from sqlalchemy.orm import Session
from sqlalchemy import and_
from app.models import Trigger, TriggerLog, Task, Project
from app.services.notification_service import NotificationService
logger = logging.getLogger(__name__)
# Key prefix for tracking deadline reminders already sent
DEADLINE_REMINDER_LOG_TYPE = "deadline_reminder"
class TriggerSchedulerService:
"""Service for scheduling and executing cron-based triggers."""
@staticmethod
def parse_cron_expression(expression: str) -> Tuple[bool, Optional[str]]:
"""
Validate a cron expression.
Args:
expression: A cron expression string (e.g., "0 9 * * 1-5" for weekdays at 9am)
Returns:
Tuple of (is_valid, error_message)
- is_valid: True if the expression is valid
- error_message: None if valid, otherwise an error description
"""
try:
# croniter requires a base time for initialization
base_time = datetime.now(timezone.utc)
croniter(expression, base_time)
return True, None
except (ValueError, KeyError) as e:
return False, f"Invalid cron expression: {str(e)}"
@staticmethod
def get_next_run_time(expression: str, base_time: Optional[datetime] = None) -> Optional[datetime]:
"""
Get the next scheduled run time for a cron expression.
Args:
expression: A cron expression string
base_time: The base time to calculate from (defaults to now)
Returns:
The next datetime when the schedule matches, or None if invalid
"""
try:
if base_time is None:
base_time = datetime.now(timezone.utc)
cron = croniter(expression, base_time)
return cron.get_next(datetime)
except (ValueError, KeyError):
return None
@staticmethod
def get_previous_run_time(expression: str, base_time: Optional[datetime] = None) -> Optional[datetime]:
"""
Get the previous scheduled run time for a cron expression.
Args:
expression: A cron expression string
base_time: The base time to calculate from (defaults to now)
Returns:
The previous datetime when the schedule matched, or None if invalid
"""
try:
if base_time is None:
base_time = datetime.now(timezone.utc)
cron = croniter(expression, base_time)
return cron.get_prev(datetime)
except (ValueError, KeyError):
return None
@staticmethod
def should_trigger(
trigger: Trigger,
current_time: datetime,
last_execution_time: Optional[datetime] = None,
) -> bool:
"""
Check if a schedule trigger should fire based on its cron expression.
A trigger should fire if:
1. It's a schedule-type trigger and is active
2. Its conditions contain a valid cron expression
3. The cron schedule has matched since the last execution
Args:
trigger: The trigger to evaluate
current_time: The current time to check against
last_execution_time: The time of the last successful execution
Returns:
True if the trigger should fire, False otherwise
"""
# Only process schedule triggers
if trigger.trigger_type != "schedule":
return False
if not trigger.is_active:
return False
# Get cron expression from conditions
conditions = trigger.conditions or {}
cron_expression = conditions.get("cron_expression")
if not cron_expression:
logger.warning(f"Trigger {trigger.id} has no cron_expression in conditions")
return False
# Validate cron expression
is_valid, error = TriggerSchedulerService.parse_cron_expression(cron_expression)
if not is_valid:
logger.warning(f"Trigger {trigger.id} has invalid cron: {error}")
return False
# Get the previous scheduled time before current_time
prev_scheduled = TriggerSchedulerService.get_previous_run_time(cron_expression, current_time)
if prev_scheduled is None:
return False
# If no last execution, check if we're within the execution window (5 minutes)
if last_execution_time is None:
# Only trigger if the scheduled time was within the last 5 minutes
window_seconds = 300 # 5 minutes
time_since_scheduled = (current_time - prev_scheduled).total_seconds()
return 0 <= time_since_scheduled < window_seconds
# Trigger if the previous scheduled time is after the last execution
return prev_scheduled > last_execution_time
@staticmethod
def get_last_execution_time(db: Session, trigger_id: str) -> Optional[datetime]:
"""
Get the last successful execution time for a trigger.
Args:
db: Database session
trigger_id: The trigger ID
Returns:
The datetime of the last successful execution, or None
"""
last_log = db.query(TriggerLog).filter(
TriggerLog.trigger_id == trigger_id,
TriggerLog.status == "success",
).order_by(TriggerLog.executed_at.desc()).first()
return last_log.executed_at if last_log else None
@staticmethod
def execute_scheduled_triggers(db: Session) -> List[TriggerLog]:
"""
Main execution function that evaluates and executes all scheduled triggers.
This function should be called periodically (e.g., every minute) by a scheduler.
Args:
db: Database session
Returns:
List of TriggerLog entries for executed triggers
"""
logs: List[TriggerLog] = []
current_time = datetime.now(timezone.utc)
# Get all active schedule-type triggers
triggers = db.query(Trigger).filter(
Trigger.trigger_type == "schedule",
Trigger.is_active == True,
).all()
logger.info(f"Evaluating {len(triggers)} scheduled triggers at {current_time}")
for trigger in triggers:
try:
# Get last execution time
last_execution = TriggerSchedulerService.get_last_execution_time(db, trigger.id)
# Check if trigger should fire
if TriggerSchedulerService.should_trigger(trigger, current_time, last_execution):
logger.info(f"Executing scheduled trigger: {trigger.name} (ID: {trigger.id})")
log = TriggerSchedulerService._execute_trigger(db, trigger)
logs.append(log)
except Exception as e:
logger.error(f"Error evaluating trigger {trigger.id}: {e}")
# Log the error
error_log = TriggerSchedulerService._log_execution(
db=db,
trigger=trigger,
status="failed",
details={"error_type": type(e).__name__},
error_message=str(e),
)
logs.append(error_log)
if logs:
db.commit()
logger.info(f"Executed {len(logs)} scheduled triggers")
return logs
@staticmethod
def _execute_trigger(db: Session, trigger: Trigger) -> TriggerLog:
"""
Execute a scheduled trigger's actions.
Args:
db: Database session
trigger: The trigger to execute
Returns:
TriggerLog entry for this execution
"""
actions = trigger.actions if isinstance(trigger.actions, list) else [trigger.actions]
executed_actions = []
error_message = None
try:
for action in actions:
action_type = action.get("type")
if action_type == "notify":
TriggerSchedulerService._execute_notify_action(db, action, trigger)
executed_actions.append({"type": action_type, "status": "success"})
# Add more action types here as needed
status = "success"
except Exception as e:
status = "failed"
error_message = str(e)
executed_actions.append({"type": "error", "message": str(e)})
logger.error(f"Error executing trigger {trigger.id} actions: {e}")
return TriggerSchedulerService._log_execution(
db=db,
trigger=trigger,
status=status,
details={
"trigger_name": trigger.name,
"trigger_type": "schedule",
"cron_expression": trigger.conditions.get("cron_expression"),
"actions_executed": executed_actions,
},
error_message=error_message,
)
@staticmethod
def _execute_notify_action(db: Session, action: Dict[str, Any], trigger: Trigger) -> None:
"""
Execute a notify action for a scheduled trigger.
Args:
db: Database session
action: The action configuration
trigger: The parent trigger
"""
target = action.get("target", "project_owner")
template = action.get("template", "Scheduled trigger '{trigger_name}' has fired")
# For scheduled triggers, we typically notify project-level users
project = trigger.project
if not project:
logger.warning(f"Trigger {trigger.id} has no associated project")
return
target_user_id = TriggerSchedulerService._resolve_target(project, target)
if not target_user_id:
logger.debug(f"No target user resolved for trigger {trigger.id} with target '{target}'")
return
# Format message with variables
message = TriggerSchedulerService._format_template(template, trigger, project)
NotificationService.create_notification(
db=db,
user_id=target_user_id,
notification_type="scheduled_trigger",
reference_type="trigger",
reference_id=trigger.id,
title=f"Scheduled: {trigger.name}",
message=message,
)
@staticmethod
def _resolve_target(project: Project, target: str) -> Optional[str]:
"""
Resolve notification target to user ID.
Args:
project: The project context
target: Target specification (e.g., "project_owner", "user:<id>")
Returns:
User ID or None
"""
if target == "project_owner":
return project.owner_id
elif target.startswith("user:"):
return target.split(":", 1)[1]
return None
@staticmethod
def _format_template(template: str, trigger: Trigger, project: Project) -> str:
"""
Format message template with trigger/project variables.
Args:
template: Template string with {variable} placeholders
trigger: The trigger context
project: The project context
Returns:
Formatted message string
"""
replacements = {
"{trigger_name}": trigger.name,
"{trigger_id}": trigger.id,
"{project_name}": project.title if project else "Unknown",
"{project_id}": project.id if project else "Unknown",
}
result = template
for key, value in replacements.items():
result = result.replace(key, str(value))
return result
@staticmethod
def _log_execution(
db: Session,
trigger: Trigger,
status: str,
details: Optional[Dict[str, Any]] = None,
error_message: Optional[str] = None,
task_id: Optional[str] = None,
) -> TriggerLog:
"""
Create a trigger execution log entry.
Args:
db: Database session
trigger: The trigger that was executed
status: Execution status ("success" or "failed")
details: Optional execution details
error_message: Optional error message if failed
task_id: Optional task ID for deadline reminders
Returns:
The created TriggerLog entry
"""
log = TriggerLog(
id=str(uuid.uuid4()),
trigger_id=trigger.id,
task_id=task_id,
status=status,
details=details,
error_message=error_message,
)
db.add(log)
return log
# =========================================================================
# Deadline Reminder Methods
# =========================================================================
@staticmethod
def execute_deadline_reminders(db: Session) -> List[TriggerLog]:
"""
Check all deadline reminder triggers and send notifications for tasks
that are within N days of their due date.
Each task only receives one reminder per trigger configuration.
Args:
db: Database session
Returns:
List of TriggerLog entries for sent reminders
"""
logs: List[TriggerLog] = []
current_time = datetime.now(timezone.utc)
today = current_time.date()
# Get all active schedule triggers with deadline_reminder_days
triggers = db.query(Trigger).filter(
Trigger.trigger_type == "schedule",
Trigger.is_active == True,
).all()
# Filter triggers that have deadline_reminder_days configured
deadline_triggers = [
t for t in triggers
if t.conditions and t.conditions.get("deadline_reminder_days") is not None
]
if not deadline_triggers:
return logs
logger.info(f"Evaluating {len(deadline_triggers)} deadline reminder triggers")
for trigger in deadline_triggers:
try:
reminder_days = trigger.conditions.get("deadline_reminder_days")
if not isinstance(reminder_days, int) or reminder_days < 1:
continue
# Calculate the target date range
# We want to find tasks whose due_date is exactly N days from today
target_date = today + timedelta(days=reminder_days)
# Get tasks in this project that:
# 1. Have a due_date matching the target date
# 2. Are not deleted
# 3. Have not already received a reminder for this trigger
tasks = TriggerSchedulerService._get_tasks_for_deadline_reminder(
db, trigger, target_date
)
for task in tasks:
try:
log = TriggerSchedulerService._send_deadline_reminder(
db, trigger, task, reminder_days
)
logs.append(log)
except Exception as e:
logger.error(
f"Error sending deadline reminder for task {task.id}: {e}"
)
error_log = TriggerSchedulerService._log_execution(
db=db,
trigger=trigger,
status="failed",
details={
"trigger_type": DEADLINE_REMINDER_LOG_TYPE,
"task_id": task.id,
"reminder_days": reminder_days,
},
error_message=str(e),
task_id=task.id,
)
logs.append(error_log)
except Exception as e:
logger.error(f"Error processing deadline trigger {trigger.id}: {e}")
if logs:
db.commit()
logger.info(f"Processed {len(logs)} deadline reminders")
return logs
@staticmethod
def _get_tasks_for_deadline_reminder(
db: Session,
trigger: Trigger,
target_date,
) -> List[Task]:
"""
Get tasks that need deadline reminders for a specific trigger.
Args:
db: Database session
trigger: The deadline reminder trigger
target_date: The date that matches (today + N days)
Returns:
List of tasks that need reminders
"""
# Get IDs of tasks that already received reminders for this trigger
already_notified = db.query(TriggerLog.task_id).filter(
TriggerLog.trigger_id == trigger.id,
TriggerLog.status == "success",
TriggerLog.task_id.isnot(None),
).all()
notified_task_ids: Set[str] = {t[0] for t in already_notified if t[0]}
# Use date range comparison for cross-database compatibility
# target_date is a date object, we need to find tasks due on that date
target_start = datetime.combine(target_date, datetime.min.time()).replace(tzinfo=timezone.utc)
target_end = datetime.combine(target_date, datetime.max.time()).replace(tzinfo=timezone.utc)
# Query tasks matching criteria
tasks = db.query(Task).filter(
Task.project_id == trigger.project_id,
Task.is_deleted == False,
Task.due_date.isnot(None),
Task.due_date >= target_start,
Task.due_date <= target_end,
).all()
# Filter out tasks that already received reminders
return [t for t in tasks if t.id not in notified_task_ids]
@staticmethod
def _send_deadline_reminder(
db: Session,
trigger: Trigger,
task: Task,
reminder_days: int,
) -> TriggerLog:
"""
Send a deadline reminder notification for a task.
Args:
db: Database session
trigger: The trigger configuration
task: The task approaching its deadline
reminder_days: Number of days before deadline
Returns:
TriggerLog entry for this reminder
"""
actions = trigger.actions if isinstance(trigger.actions, list) else [trigger.actions]
executed_actions = []
error_message = None
try:
for action in actions:
action_type = action.get("type")
if action_type == "notify":
TriggerSchedulerService._execute_deadline_notify_action(
db, action, trigger, task, reminder_days
)
executed_actions.append({"type": action_type, "status": "success"})
status = "success"
except Exception as e:
status = "failed"
error_message = str(e)
executed_actions.append({"type": "error", "message": str(e)})
logger.error(f"Error executing deadline reminder for task {task.id}: {e}")
return TriggerSchedulerService._log_execution(
db=db,
trigger=trigger,
status=status,
details={
"trigger_name": trigger.name,
"trigger_type": DEADLINE_REMINDER_LOG_TYPE,
"reminder_days": reminder_days,
"task_title": task.title,
"due_date": str(task.due_date),
"actions_executed": executed_actions,
},
error_message=error_message,
task_id=task.id,
)
@staticmethod
def _execute_deadline_notify_action(
db: Session,
action: Dict[str, Any],
trigger: Trigger,
task: Task,
reminder_days: int,
) -> None:
"""
Execute a notify action for a deadline reminder.
Args:
db: Database session
action: The action configuration
trigger: The parent trigger
task: The task with approaching deadline
reminder_days: Days until deadline
"""
target = action.get("target", "assignee")
template = action.get(
"template",
"Task '{task_title}' is due in {reminder_days} days"
)
# Resolve target user
target_user_id = TriggerSchedulerService._resolve_deadline_target(task, target)
if not target_user_id:
logger.debug(
f"No target user resolved for deadline reminder, task {task.id}, target '{target}'"
)
return
# Format message with variables
message = TriggerSchedulerService._format_deadline_template(
template, trigger, task, reminder_days
)
NotificationService.create_notification(
db=db,
user_id=target_user_id,
notification_type="deadline_reminder",
reference_type="task",
reference_id=task.id,
title=f"Deadline Reminder: {task.title}",
message=message,
)
@staticmethod
def _resolve_deadline_target(task: Task, target: str) -> Optional[str]:
"""
Resolve notification target for deadline reminders.
Args:
task: The task context
target: Target specification
Returns:
User ID or None
"""
if target == "assignee":
return task.assignee_id
elif target == "creator":
return task.created_by
elif target == "project_owner":
return task.project.owner_id if task.project else None
elif target.startswith("user:"):
return target.split(":", 1)[1]
return None
@staticmethod
def _format_deadline_template(
template: str,
trigger: Trigger,
task: Task,
reminder_days: int,
) -> str:
"""
Format message template for deadline reminders.
Args:
template: Template string with {variable} placeholders
trigger: The trigger context
task: The task context
reminder_days: Days until deadline
Returns:
Formatted message string
"""
project = trigger.project
replacements = {
"{trigger_name}": trigger.name,
"{trigger_id}": trigger.id,
"{task_title}": task.title,
"{task_id}": task.id,
"{due_date}": str(task.due_date.date()) if task.due_date else "N/A",
"{reminder_days}": str(reminder_days),
"{project_name}": project.title if project else "Unknown",
"{project_id}": project.id if project else "Unknown",
}
result = template
for key, value in replacements.items():
result = result.replace(key, str(value))
return result
@staticmethod
def evaluate_schedule_triggers(db: Session) -> List[TriggerLog]:
"""
Main entry point for evaluating all schedule triggers.
This method runs both cron-based triggers and deadline reminders.
Should be called every minute by the scheduler.
Args:
db: Database session
Returns:
Combined list of TriggerLog entries from all evaluations
"""
all_logs: List[TriggerLog] = []
# Execute cron-based schedule triggers
cron_logs = TriggerSchedulerService.execute_scheduled_triggers(db)
all_logs.extend(cron_logs)
# Execute deadline reminder triggers
deadline_logs = TriggerSchedulerService.execute_deadline_reminders(db)
all_logs.extend(deadline_logs)
return all_logs

View File

@@ -0,0 +1,327 @@
"""
Watermark Service for MED-009: Dynamic Watermark for Downloads
This service provides functions to add watermarks to image and PDF files
containing user information for audit and tracking purposes.
Watermark content includes:
- User name
- Employee ID (or email as fallback)
- Download timestamp
"""
import io
import logging
import math
from datetime import datetime
from typing import Optional, Tuple
import fitz # PyMuPDF
from PIL import Image, ImageDraw, ImageFont
logger = logging.getLogger(__name__)
class WatermarkService:
"""Service for adding watermarks to downloaded files."""
# Watermark configuration
WATERMARK_OPACITY = 0.3 # 30% opacity for semi-transparency
WATERMARK_ANGLE = -45 # Diagonal angle in degrees
WATERMARK_FONT_SIZE = 24
WATERMARK_COLOR = (128, 128, 128) # Gray color for watermark
WATERMARK_SPACING = 200 # Spacing between repeated watermarks
@staticmethod
def _format_watermark_text(
user_name: str,
employee_id: Optional[str] = None,
download_time: Optional[datetime] = None
) -> str:
"""
Format the watermark text with user information.
Args:
user_name: Name of the user
employee_id: Employee ID (工號) - uses 'N/A' if not provided
download_time: Time of download (defaults to now)
Returns:
Formatted watermark text
"""
if download_time is None:
download_time = datetime.now()
time_str = download_time.strftime("%Y-%m-%d %H:%M:%S")
emp_id = employee_id if employee_id else "N/A"
return f"{user_name} ({emp_id}) - {time_str}"
@staticmethod
def _get_font(size: int = 24) -> ImageFont.FreeTypeFont:
"""Get a font for the watermark. Falls back to default if custom font not available."""
try:
# Try to use a common system font (macOS)
return ImageFont.truetype("/System/Library/Fonts/Helvetica.ttc", size)
except (OSError, IOError):
try:
# Try Linux font
return ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", size)
except (OSError, IOError):
try:
# Try Windows font
return ImageFont.truetype("C:/Windows/Fonts/arial.ttf", size)
except (OSError, IOError):
# Fall back to default bitmap font
return ImageFont.load_default()
def add_image_watermark(
self,
image_bytes: bytes,
user_name: str,
employee_id: Optional[str] = None,
download_time: Optional[datetime] = None
) -> Tuple[bytes, str]:
"""
Add a semi-transparent diagonal watermark to an image.
Args:
image_bytes: The original image as bytes
user_name: Name of the user downloading the file
employee_id: Employee ID of the user (工號)
download_time: Time of download (defaults to now)
Returns:
Tuple of (watermarked image bytes, output format)
Raises:
Exception: If watermarking fails
"""
# Open the image
original = Image.open(io.BytesIO(image_bytes))
# Convert to RGBA if necessary for transparency support
if original.mode != 'RGBA':
image = original.convert('RGBA')
else:
image = original.copy()
# Create a transparent overlay for the watermark
watermark_layer = Image.new('RGBA', image.size, (255, 255, 255, 0))
draw = ImageDraw.Draw(watermark_layer)
# Get watermark text
watermark_text = self._format_watermark_text(user_name, employee_id, download_time)
# Get font
font = self._get_font(self.WATERMARK_FONT_SIZE)
# Calculate text size
bbox = draw.textbbox((0, 0), watermark_text, font=font)
text_width = bbox[2] - bbox[0]
text_height = bbox[3] - bbox[1]
# Create a larger canvas for the rotated text pattern
diagonal = int(math.sqrt(image.size[0]**2 + image.size[1]**2))
pattern_size = diagonal * 2
# Create pattern layer
pattern = Image.new('RGBA', (pattern_size, pattern_size), (255, 255, 255, 0))
pattern_draw = ImageDraw.Draw(pattern)
# Draw repeated watermark text across the pattern
opacity = int(255 * self.WATERMARK_OPACITY)
watermark_color = (*self.WATERMARK_COLOR, opacity)
y = 0
row = 0
while y < pattern_size:
x = -text_width if row % 2 else 0 # Offset alternate rows
while x < pattern_size:
pattern_draw.text((x, y), watermark_text, font=font, fill=watermark_color)
x += text_width + self.WATERMARK_SPACING
y += text_height + self.WATERMARK_SPACING
row += 1
# Rotate the pattern
rotated_pattern = pattern.rotate(
self.WATERMARK_ANGLE,
expand=False,
center=(pattern_size // 2, pattern_size // 2)
)
# Crop to original image size (centered)
crop_x = (pattern_size - image.size[0]) // 2
crop_y = (pattern_size - image.size[1]) // 2
cropped_pattern = rotated_pattern.crop((
crop_x, crop_y,
crop_x + image.size[0],
crop_y + image.size[1]
))
# Composite the watermark onto the image
watermarked = Image.alpha_composite(image, cropped_pattern)
# Determine output format
original_format = original.format or 'PNG'
if original_format.upper() == 'JPEG':
# Convert back to RGB for JPEG (no alpha channel)
watermarked = watermarked.convert('RGB')
output_format = 'JPEG'
else:
output_format = 'PNG'
# Save to bytes
output = io.BytesIO()
watermarked.save(output, format=output_format, quality=95)
output.seek(0)
logger.info(
f"Image watermark applied successfully for user {user_name} "
f"(employee_id: {employee_id})"
)
return output.getvalue(), output_format.lower()
def add_pdf_watermark(
self,
pdf_bytes: bytes,
user_name: str,
employee_id: Optional[str] = None,
download_time: Optional[datetime] = None
) -> bytes:
"""
Add a semi-transparent diagonal watermark to a PDF using PyMuPDF.
Args:
pdf_bytes: The original PDF as bytes
user_name: Name of the user downloading the file
employee_id: Employee ID of the user (工號)
download_time: Time of download (defaults to now)
Returns:
Watermarked PDF as bytes
Raises:
Exception: If watermarking fails
"""
# Get watermark text
watermark_text = self._format_watermark_text(user_name, employee_id, download_time)
# Open the PDF with PyMuPDF
doc = fitz.open(stream=pdf_bytes, filetype="pdf")
page_count = len(doc)
# Process each page
for page_num in range(page_count):
page = doc[page_num]
page_rect = page.rect
page_width = page_rect.width
page_height = page_rect.height
# Calculate text width for spacing estimation
text_length = fitz.get_text_length(
watermark_text,
fontname="helv",
fontsize=self.WATERMARK_FONT_SIZE
)
# Calculate diagonal for watermark coverage
diagonal = math.sqrt(page_width**2 + page_height**2)
# Set watermark color with opacity (gray with 30% opacity)
color = (0.5, 0.5, 0.5) # Gray
# Calculate rotation angle in radians
angle_rad = math.radians(self.WATERMARK_ANGLE)
# Draw watermark pattern using shape with proper rotation
# We use insert_textbox with a morph transform for rotation
spacing_x = text_length + self.WATERMARK_SPACING
spacing_y = self.WATERMARK_FONT_SIZE + self.WATERMARK_SPACING
# Create watermark by drawing rotated text lines
# We'll use a simpler approach: draw text and apply rotation via morph
shape = page.new_shape()
# Calculate grid positions to cover the page when rotated
center = fitz.Point(page_width / 2, page_height / 2)
# Calculate start and end points for coverage
start = -diagonal
end = diagonal * 2
y = start
row = 0
while y < end:
x = start + (spacing_x / 2 if row % 2 else 0)
while x < end:
# Create text position
text_point = fitz.Point(x, y)
# Apply rotation around center
cos_a = math.cos(angle_rad)
sin_a = math.sin(angle_rad)
# Translate to origin, rotate, translate back
rx = text_point.x - center.x
ry = text_point.y - center.y
new_x = rx * cos_a - ry * sin_a + center.x
new_y = rx * sin_a + ry * cos_a + center.y
# Check if the rotated point is within page bounds (with margin)
margin = 50
if (-margin <= new_x <= page_width + margin and
-margin <= new_y <= page_height + margin):
# Insert text using shape with rotation via morph
text_rect = fitz.Rect(new_x, new_y, new_x + text_length + 10, new_y + 30)
# Use insert_textbox with morph for rotation
pivot = fitz.Point(new_x, new_y)
morph = (pivot, fitz.Matrix(1, 0, 0, 1, 0, 0).prerotate(self.WATERMARK_ANGLE))
shape.insert_textbox(
text_rect,
watermark_text,
fontname="helv",
fontsize=self.WATERMARK_FONT_SIZE,
color=color,
fill_opacity=self.WATERMARK_OPACITY,
morph=morph
)
x += spacing_x
y += spacing_y
row += 1
# Commit the shape drawings
shape.commit(overlay=True)
# Save to bytes
output = io.BytesIO()
doc.save(output)
doc.close()
output.seek(0)
logger.info(
f"PDF watermark applied successfully for user {user_name} "
f"(employee_id: {employee_id}), pages: {page_count}"
)
return output.getvalue()
def is_supported_image(self, mime_type: str) -> bool:
"""Check if the mime type is a supported image format."""
supported_types = {'image/png', 'image/jpeg', 'image/jpg'}
return mime_type.lower() in supported_types
def is_supported_pdf(self, mime_type: str) -> bool:
"""Check if the mime type is a PDF."""
return mime_type.lower() == 'application/pdf'
def supports_watermark(self, mime_type: str) -> bool:
"""Check if the file type supports watermarking."""
return self.is_supported_image(mime_type) or self.is_supported_pdf(mime_type)
# Singleton instance
watermark_service = WatermarkService()

View File

@@ -184,12 +184,17 @@ def get_workload_heatmap(
Returns:
List of UserWorkloadSummary objects
"""
from datetime import datetime
from collections import defaultdict
if week_start is None:
week_start = get_current_week_start()
else:
# Normalize to week start (Monday)
week_start = get_week_bounds(week_start)[0]
week_start, week_end = get_week_bounds(week_start)
# Build user query
query = db.query(User).filter(User.is_active == True)
@@ -201,10 +206,58 @@ def get_workload_heatmap(
users = query.options(joinedload(User.department)).all()
# Calculate workload for each user
if not users:
return []
# Batch query: fetch all tasks for all users in one query
user_id_list = [user.id for user in users]
week_start_dt = datetime.combine(week_start, datetime.min.time())
week_end_dt = datetime.combine(week_end, datetime.max.time())
all_tasks = (
db.query(Task)
.join(Task.status, isouter=True)
.filter(
Task.assignee_id.in_(user_id_list),
Task.due_date >= week_start_dt,
Task.due_date <= week_end_dt,
# Exclude completed tasks
(TaskStatus.is_done == False) | (Task.status_id == None)
)
.all()
)
# Group tasks by assignee_id in memory
tasks_by_user: dict = defaultdict(list)
for task in all_tasks:
tasks_by_user[task.assignee_id].append(task)
# Calculate workload for each user using pre-fetched tasks
results = []
for user in users:
summary = calculate_user_workload(db, user, week_start)
user_tasks = tasks_by_user.get(user.id, [])
# Calculate allocated hours from original_estimate
allocated_hours = Decimal("0")
for task in user_tasks:
if task.original_estimate:
allocated_hours += task.original_estimate
capacity_hours = Decimal(str(user.capacity)) if user.capacity else Decimal("40")
load_percentage = calculate_load_percentage(allocated_hours, capacity_hours)
load_level = determine_load_level(load_percentage)
summary = UserWorkloadSummary(
user_id=user.id,
user_name=user.name,
department_id=user.department_id,
department_name=user.department.name if user.department else None,
capacity_hours=capacity_hours,
allocated_hours=allocated_hours,
load_percentage=load_percentage,
load_level=load_level,
task_count=len(user_tasks),
)
results.append(summary)
return results