## Security Enhancements (P0) - Add input validation with max_length and numeric range constraints - Implement WebSocket token authentication via first message - Add path traversal prevention in file storage service ## Permission Enhancements (P0) - Add project member management for cross-department access - Implement is_department_manager flag for workload visibility ## Cycle Detection (P0) - Add DFS-based cycle detection for task dependencies - Add formula field circular reference detection - Display user-friendly cycle path visualization ## Concurrency & Reliability (P1) - Implement optimistic locking with version field (409 Conflict on mismatch) - Add trigger retry mechanism with exponential backoff (1s, 2s, 4s) - Implement cascade restore for soft-deleted tasks ## Rate Limiting (P1) - Add tiered rate limits: standard (60/min), sensitive (20/min), heavy (5/min) - Apply rate limits to tasks, reports, attachments, and comments ## Frontend Improvements (P1) - Add responsive sidebar with hamburger menu for mobile - Improve touch-friendly UI with proper tap target sizes - Complete i18n translations for all components ## Backend Reliability (P2) - Configure database connection pool (size=10, overflow=20) - Add Redis fallback mechanism with message queue - Add blocker check before task deletion ## API Enhancements (P3) - Add standardized response wrapper utility - Add /health/ready and /health/live endpoints - Implement project templates with status/field copying ## Tests Added - test_input_validation.py - Schema and path traversal tests - test_concurrency_reliability.py - Optimistic locking and retry tests - test_backend_reliability.py - Connection pool and Redis tests - test_api_enhancements.py - Health check and template tests Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
130 lines
3.8 KiB
Python
130 lines
3.8 KiB
Python
import logging
|
|
import threading
|
|
import os
|
|
from sqlalchemy import create_engine, event
|
|
from sqlalchemy.ext.declarative import declarative_base
|
|
from sqlalchemy.orm import sessionmaker
|
|
|
|
from app.core.config import settings
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
# Connection pool configuration with environment variable overrides
|
|
POOL_SIZE = int(os.getenv("DB_POOL_SIZE", "10"))
|
|
MAX_OVERFLOW = int(os.getenv("DB_MAX_OVERFLOW", "20"))
|
|
POOL_TIMEOUT = int(os.getenv("DB_POOL_TIMEOUT", "30"))
|
|
POOL_STATS_INTERVAL = int(os.getenv("DB_POOL_STATS_INTERVAL", "300")) # 5 minutes
|
|
|
|
engine = create_engine(
|
|
settings.DATABASE_URL,
|
|
pool_pre_ping=True,
|
|
pool_recycle=3600,
|
|
pool_size=POOL_SIZE,
|
|
max_overflow=MAX_OVERFLOW,
|
|
pool_timeout=POOL_TIMEOUT,
|
|
)
|
|
|
|
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
|
|
|
Base = declarative_base()
|
|
|
|
# Connection pool statistics tracking
|
|
_pool_stats_lock = threading.Lock()
|
|
_pool_stats = {
|
|
"checkouts": 0,
|
|
"checkins": 0,
|
|
"overflow_connections": 0,
|
|
"invalidated_connections": 0,
|
|
}
|
|
|
|
|
|
def _log_pool_statistics():
|
|
"""Log current connection pool statistics."""
|
|
pool = engine.pool
|
|
with _pool_stats_lock:
|
|
logger.info(
|
|
"Database connection pool statistics: "
|
|
"size=%d, checked_in=%d, overflow=%d, "
|
|
"total_checkouts=%d, total_checkins=%d, invalidated=%d",
|
|
pool.size(),
|
|
pool.checkedin(),
|
|
pool.overflow(),
|
|
_pool_stats["checkouts"],
|
|
_pool_stats["checkins"],
|
|
_pool_stats["invalidated_connections"],
|
|
)
|
|
|
|
|
|
def _start_pool_stats_logging():
|
|
"""Start periodic logging of connection pool statistics."""
|
|
if POOL_STATS_INTERVAL <= 0:
|
|
return
|
|
|
|
def log_stats():
|
|
_log_pool_statistics()
|
|
# Schedule next log
|
|
timer = threading.Timer(POOL_STATS_INTERVAL, log_stats)
|
|
timer.daemon = True
|
|
timer.start()
|
|
|
|
# Start the first timer
|
|
timer = threading.Timer(POOL_STATS_INTERVAL, log_stats)
|
|
timer.daemon = True
|
|
timer.start()
|
|
logger.info(
|
|
"Database connection pool initialized: pool_size=%d, max_overflow=%d, pool_timeout=%d, stats_interval=%ds",
|
|
POOL_SIZE, MAX_OVERFLOW, POOL_TIMEOUT, POOL_STATS_INTERVAL
|
|
)
|
|
|
|
|
|
# Register pool event listeners for statistics
|
|
@event.listens_for(engine, "checkout")
|
|
def _on_checkout(dbapi_conn, connection_record, connection_proxy):
|
|
"""Track connection checkout events."""
|
|
with _pool_stats_lock:
|
|
_pool_stats["checkouts"] += 1
|
|
|
|
|
|
@event.listens_for(engine, "checkin")
|
|
def _on_checkin(dbapi_conn, connection_record):
|
|
"""Track connection checkin events."""
|
|
with _pool_stats_lock:
|
|
_pool_stats["checkins"] += 1
|
|
|
|
|
|
@event.listens_for(engine, "invalidate")
|
|
def _on_invalidate(dbapi_conn, connection_record, exception):
|
|
"""Track connection invalidation events."""
|
|
with _pool_stats_lock:
|
|
_pool_stats["invalidated_connections"] += 1
|
|
if exception:
|
|
logger.warning("Database connection invalidated due to exception: %s", exception)
|
|
|
|
|
|
# Start pool statistics logging on module load
|
|
_start_pool_stats_logging()
|
|
|
|
|
|
def get_db():
|
|
"""Dependency for getting database session."""
|
|
db = SessionLocal()
|
|
try:
|
|
yield db
|
|
finally:
|
|
db.close()
|
|
|
|
|
|
def get_pool_status() -> dict:
|
|
"""Get current connection pool status for health checks."""
|
|
pool = engine.pool
|
|
with _pool_stats_lock:
|
|
return {
|
|
"pool_size": pool.size(),
|
|
"checked_in": pool.checkedin(),
|
|
"checked_out": pool.checkedout(),
|
|
"overflow": pool.overflow(),
|
|
"total_checkouts": _pool_stats["checkouts"],
|
|
"total_checkins": _pool_stats["checkins"],
|
|
"invalidated_connections": _pool_stats["invalidated_connections"],
|
|
}
|