fix: logging, warnings, and soft-delete consistency

- Fix duplicate logging in multi-worker mode with file lock for cleanup scheduler
- Add Pydantic V2 model_config to suppress protected_namespaces warning
- Suppress PaddlePaddle ccache warnings
- Fix admin.py using non-existent User.username (now uses email)
- Fix get_user_stats to exclude soft-deleted tasks from statistics
- Fix create_task to exclude soft-deleted tasks from user limit check
- Change LOG_LEVEL default to INFO

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
egg
2025-12-14 15:40:31 +08:00
parent f46402f6c9
commit 7233e9cb7b
6 changed files with 132 additions and 43 deletions

View File

@@ -5,7 +5,10 @@ Background scheduler for periodic file cleanup
import asyncio
import logging
import os
import fcntl
from datetime import datetime
from pathlib import Path
from typing import Optional
from sqlalchemy.orm import Session
@@ -16,11 +19,50 @@ from app.services.cleanup_service import cleanup_service
logger = logging.getLogger(__name__)
# Lock file path for multi-worker coordination
_LOCK_FILE = Path(settings.log_file).parent / ".cleanup_scheduler.lock"
_lock_fd = None
def _try_acquire_lock() -> bool:
"""
Try to acquire exclusive lock for scheduler.
Only one worker should run the scheduler.
Returns True if lock acquired, False otherwise.
"""
global _lock_fd
try:
_LOCK_FILE.parent.mkdir(parents=True, exist_ok=True)
_lock_fd = open(_LOCK_FILE, 'w')
fcntl.flock(_lock_fd.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
_lock_fd.write(str(os.getpid()))
_lock_fd.flush()
return True
except (IOError, OSError):
# Lock is held by another process
if _lock_fd:
_lock_fd.close()
_lock_fd = None
return False
def _release_lock():
"""Release the scheduler lock"""
global _lock_fd
if _lock_fd:
try:
fcntl.flock(_lock_fd.fileno(), fcntl.LOCK_UN)
_lock_fd.close()
except Exception:
pass
_lock_fd = None
class CleanupScheduler:
"""
Background scheduler for periodic file cleanup.
Uses asyncio for non-blocking background execution.
Uses file lock to ensure only one instance runs across multiple workers.
"""
def __init__(self):
@@ -29,6 +71,7 @@ class CleanupScheduler:
self._last_run: Optional[datetime] = None
self._next_run: Optional[datetime] = None
self._last_result: Optional[dict] = None
self._has_lock: bool = False
@property
def is_running(self) -> bool:
@@ -55,9 +98,15 @@ class CleanupScheduler:
return
if self.is_running:
logger.warning("Cleanup scheduler is already running")
logger.debug("Cleanup scheduler is already running")
return
# Try to acquire lock - only one worker should run the scheduler
if not _try_acquire_lock():
logger.debug("Another worker is running the cleanup scheduler, skipping")
return
self._has_lock = True
self._running = True
self._task = asyncio.create_task(self._run_loop())
logger.info(
@@ -77,7 +126,11 @@ class CleanupScheduler:
pass
self._task = None
logger.info("Cleanup scheduler stopped")
# Release the lock if we had it
if self._has_lock:
_release_lock()
self._has_lock = False
logger.info("Cleanup scheduler stopped")
async def _run_loop(self):
"""Main scheduler loop"""