fix: logging, warnings, and soft-delete consistency
- Fix duplicate logging in multi-worker mode with file lock for cleanup scheduler - Add Pydantic V2 model_config to suppress protected_namespaces warning - Suppress PaddlePaddle ccache warnings - Fix admin.py using non-existent User.username (now uses email) - Fix get_user_stats to exclude soft-deleted tasks from statistics - Fix create_task to exclude soft-deleted tasks from user limit check - Change LOG_LEVEL default to INFO 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -5,7 +5,10 @@ Background scheduler for periodic file cleanup
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import fcntl
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
@@ -16,11 +19,50 @@ from app.services.cleanup_service import cleanup_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Lock file path for multi-worker coordination
|
||||
_LOCK_FILE = Path(settings.log_file).parent / ".cleanup_scheduler.lock"
|
||||
_lock_fd = None
|
||||
|
||||
|
||||
def _try_acquire_lock() -> bool:
|
||||
"""
|
||||
Try to acquire exclusive lock for scheduler.
|
||||
Only one worker should run the scheduler.
|
||||
Returns True if lock acquired, False otherwise.
|
||||
"""
|
||||
global _lock_fd
|
||||
try:
|
||||
_LOCK_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||
_lock_fd = open(_LOCK_FILE, 'w')
|
||||
fcntl.flock(_lock_fd.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
_lock_fd.write(str(os.getpid()))
|
||||
_lock_fd.flush()
|
||||
return True
|
||||
except (IOError, OSError):
|
||||
# Lock is held by another process
|
||||
if _lock_fd:
|
||||
_lock_fd.close()
|
||||
_lock_fd = None
|
||||
return False
|
||||
|
||||
|
||||
def _release_lock():
|
||||
"""Release the scheduler lock"""
|
||||
global _lock_fd
|
||||
if _lock_fd:
|
||||
try:
|
||||
fcntl.flock(_lock_fd.fileno(), fcntl.LOCK_UN)
|
||||
_lock_fd.close()
|
||||
except Exception:
|
||||
pass
|
||||
_lock_fd = None
|
||||
|
||||
|
||||
class CleanupScheduler:
|
||||
"""
|
||||
Background scheduler for periodic file cleanup.
|
||||
Uses asyncio for non-blocking background execution.
|
||||
Uses file lock to ensure only one instance runs across multiple workers.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
@@ -29,6 +71,7 @@ class CleanupScheduler:
|
||||
self._last_run: Optional[datetime] = None
|
||||
self._next_run: Optional[datetime] = None
|
||||
self._last_result: Optional[dict] = None
|
||||
self._has_lock: bool = False
|
||||
|
||||
@property
|
||||
def is_running(self) -> bool:
|
||||
@@ -55,9 +98,15 @@ class CleanupScheduler:
|
||||
return
|
||||
|
||||
if self.is_running:
|
||||
logger.warning("Cleanup scheduler is already running")
|
||||
logger.debug("Cleanup scheduler is already running")
|
||||
return
|
||||
|
||||
# Try to acquire lock - only one worker should run the scheduler
|
||||
if not _try_acquire_lock():
|
||||
logger.debug("Another worker is running the cleanup scheduler, skipping")
|
||||
return
|
||||
|
||||
self._has_lock = True
|
||||
self._running = True
|
||||
self._task = asyncio.create_task(self._run_loop())
|
||||
logger.info(
|
||||
@@ -77,7 +126,11 @@ class CleanupScheduler:
|
||||
pass
|
||||
self._task = None
|
||||
|
||||
logger.info("Cleanup scheduler stopped")
|
||||
# Release the lock if we had it
|
||||
if self._has_lock:
|
||||
_release_lock()
|
||||
self._has_lock = False
|
||||
logger.info("Cleanup scheduler stopped")
|
||||
|
||||
async def _run_loop(self):
|
||||
"""Main scheduler loop"""
|
||||
|
||||
@@ -41,9 +41,11 @@ class TaskService:
|
||||
# Generate unique task ID
|
||||
task_id = str(uuid.uuid4())
|
||||
|
||||
# Check user's task limit
|
||||
# Check user's task limit (excluding soft-deleted tasks)
|
||||
if settings.max_tasks_per_user > 0:
|
||||
user_task_count = db.query(Task).filter(Task.user_id == user_id).count()
|
||||
user_task_count = db.query(Task).filter(
|
||||
and_(Task.user_id == user_id, Task.deleted_at.is_(None))
|
||||
).count()
|
||||
if user_task_count >= settings.max_tasks_per_user:
|
||||
# Auto-delete oldest completed tasks to make room
|
||||
self._cleanup_old_tasks(db, user_id, limit=10)
|
||||
@@ -359,7 +361,7 @@ class TaskService:
|
||||
|
||||
def get_user_stats(self, db: Session, user_id: int) -> dict:
|
||||
"""
|
||||
Get statistics for a user's tasks
|
||||
Get statistics for a user's tasks (excluding soft-deleted tasks)
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
@@ -368,29 +370,32 @@ class TaskService:
|
||||
Returns:
|
||||
Dictionary with task statistics
|
||||
"""
|
||||
total = db.query(Task).filter(Task.user_id == user_id).count()
|
||||
# Base filter: user's non-deleted tasks
|
||||
base_filter = and_(Task.user_id == user_id, Task.deleted_at.is_(None))
|
||||
|
||||
total = db.query(Task).filter(base_filter).count()
|
||||
|
||||
pending = (
|
||||
db.query(Task)
|
||||
.filter(and_(Task.user_id == user_id, Task.status == TaskStatus.PENDING))
|
||||
.filter(and_(base_filter, Task.status == TaskStatus.PENDING))
|
||||
.count()
|
||||
)
|
||||
|
||||
processing = (
|
||||
db.query(Task)
|
||||
.filter(and_(Task.user_id == user_id, Task.status == TaskStatus.PROCESSING))
|
||||
.filter(and_(base_filter, Task.status == TaskStatus.PROCESSING))
|
||||
.count()
|
||||
)
|
||||
|
||||
completed = (
|
||||
db.query(Task)
|
||||
.filter(and_(Task.user_id == user_id, Task.status == TaskStatus.COMPLETED))
|
||||
.filter(and_(base_filter, Task.status == TaskStatus.COMPLETED))
|
||||
.count()
|
||||
)
|
||||
|
||||
failed = (
|
||||
db.query(Task)
|
||||
.filter(and_(Task.user_id == user_id, Task.status == TaskStatus.FAILED))
|
||||
.filter(and_(base_filter, Task.status == TaskStatus.FAILED))
|
||||
.count()
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user