Two changes combined: 1. historical-query-slow-connection: Migrate all historical query pages to read_sql_df_slow with semaphore concurrency control (max 3), raise DB slow timeout to 300s, gunicorn timeout to 360s, and unify frontend timeouts to 360s for all historical pages. 2. hold-resource-history-dataset-cache: Convert hold-history and resource-history from multi-query to single-query + dataset cache pattern (L1 ProcessLevelCache + L2 Redis parquet/base64, TTL=900s). Replace old GET endpoints with POST /query + GET /view two-phase API. Frontend auto-retries on 410 cache_expired. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
53 lines
2.0 KiB
Python
53 lines
2.0 KiB
Python
import os
|
|
|
|
bind = os.getenv("GUNICORN_BIND", "0.0.0.0:8080")
|
|
workers = int(os.getenv("GUNICORN_WORKERS", "2")) # 2 workers for redundancy
|
|
threads = int(os.getenv("GUNICORN_THREADS", "4"))
|
|
worker_class = "gthread"
|
|
|
|
# Timeout settings - critical for dashboard stability.
|
|
# Keep this above slow-query timeout paths (e.g. read_sql_df_slow 300s) and DB pool timeout.
|
|
timeout = int(os.getenv("GUNICORN_TIMEOUT", "360"))
|
|
graceful_timeout = int(os.getenv("GUNICORN_GRACEFUL_TIMEOUT", "120"))
|
|
keepalive = 5 # Keep-alive connections timeout
|
|
|
|
# Worker lifecycle management - prevent state accumulation.
|
|
# Make these configurable so high-load test environments can raise the ceiling.
|
|
max_requests = int(os.getenv("GUNICORN_MAX_REQUESTS", "5000"))
|
|
max_requests_jitter = int(os.getenv("GUNICORN_MAX_REQUESTS_JITTER", "500"))
|
|
|
|
|
|
# ============================================================
|
|
# Worker Lifecycle Hooks
|
|
# ============================================================
|
|
|
|
def worker_exit(server, worker):
|
|
"""Clean up background threads and database connections when worker exits."""
|
|
# Stop background sync threads first
|
|
try:
|
|
from mes_dashboard.services.realtime_equipment_cache import (
|
|
stop_equipment_status_sync_worker
|
|
)
|
|
stop_equipment_status_sync_worker()
|
|
except Exception as e:
|
|
server.log.warning(f"Error stopping equipment sync worker: {e}")
|
|
|
|
try:
|
|
from mes_dashboard.core.cache_updater import stop_cache_updater
|
|
stop_cache_updater()
|
|
except Exception as e:
|
|
server.log.warning(f"Error stopping cache updater: {e}")
|
|
|
|
try:
|
|
from mes_dashboard.core.redis_client import close_redis
|
|
close_redis()
|
|
except Exception as e:
|
|
server.log.warning(f"Error closing redis client: {e}")
|
|
|
|
# Then dispose database connections
|
|
try:
|
|
from mes_dashboard.core.database import dispose_engine
|
|
dispose_engine()
|
|
except Exception as e:
|
|
server.log.warning(f"Error disposing database engine: {e}")
|