feat(trace-pipeline): memory triage, async job queue, and NDJSON streaming

Three proposals addressing the 2026-02-25 trace pipeline OOM crash (114K CIDs):

1. trace-events-memory-triage: fetchmany iterator (read_sql_df_slow_iter),
   admission control (50K CID limit for non-MSD), cache skip for large queries,
   early memory release with gc.collect()

2. trace-async-job-queue: RQ-based async jobs for queries >20K CIDs,
   separate worker process with isolated memory, frontend polling via
   useTraceProgress composable, systemd service + deploy scripts

3. trace-streaming-response: chunked Redis storage (TRACE_STREAM_BATCH_SIZE=5000),
   NDJSON stream endpoint (GET /api/trace/job/<id>/stream), frontend
   ReadableStream consumer for progressive rendering, backward-compatible
   with legacy single-key storage

All three proposals archived. 1101 tests pass, frontend builds clean.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
egg
2026-02-25 21:01:27 +08:00
parent cbb943dfe5
commit dbe0da057c
32 changed files with 3140 additions and 87 deletions

View File

@@ -0,0 +1,119 @@
# -*- coding: utf-8 -*-
"""Unit tests for read_sql_df_slow_iter (fetchmany iterator)."""
from __future__ import annotations
from unittest.mock import MagicMock, patch
import mes_dashboard.core.database as db
@patch.object(db, "oracledb")
@patch.object(db, "_get_slow_query_semaphore")
@patch.object(db, "get_db_runtime_config")
def test_slow_iter_yields_batches(mock_runtime, mock_sem_fn, mock_oracledb):
"""read_sql_df_slow_iter should yield (columns, rows) batches via fetchmany."""
mock_runtime.return_value = {
"slow_call_timeout_ms": 60000,
"slow_fetchmany_size": 2,
"tcp_connect_timeout": 10,
"retry_count": 1,
"retry_delay": 1.0,
}
sem = MagicMock()
sem.acquire.return_value = True
mock_sem_fn.return_value = sem
cursor = MagicMock()
cursor.description = [("COL_A",), ("COL_B",)]
cursor.fetchmany.side_effect = [
[("r1a", "r1b"), ("r2a", "r2b")],
[("r3a", "r3b")],
[],
]
conn = MagicMock()
conn.cursor.return_value = cursor
mock_oracledb.connect.return_value = conn
batches = list(db.read_sql_df_slow_iter("SELECT 1", {"p0": "x"}, batch_size=2))
assert len(batches) == 2
assert batches[0] == (["COL_A", "COL_B"], [("r1a", "r1b"), ("r2a", "r2b")])
assert batches[1] == (["COL_A", "COL_B"], [("r3a", "r3b")])
cursor.fetchmany.assert_called_with(2)
conn.close.assert_called_once()
sem.release.assert_called_once()
@patch.object(db, "oracledb")
@patch.object(db, "_get_slow_query_semaphore")
@patch.object(db, "get_db_runtime_config")
def test_slow_iter_empty_result(mock_runtime, mock_sem_fn, mock_oracledb):
"""read_sql_df_slow_iter should yield nothing for empty result."""
mock_runtime.return_value = {
"slow_call_timeout_ms": 60000,
"slow_fetchmany_size": 5000,
"tcp_connect_timeout": 10,
"retry_count": 1,
"retry_delay": 1.0,
}
sem = MagicMock()
sem.acquire.return_value = True
mock_sem_fn.return_value = sem
cursor = MagicMock()
cursor.description = [("ID",)]
cursor.fetchmany.return_value = []
conn = MagicMock()
conn.cursor.return_value = cursor
mock_oracledb.connect.return_value = conn
batches = list(db.read_sql_df_slow_iter("SELECT 1"))
assert batches == []
conn.close.assert_called_once()
sem.release.assert_called_once()
@patch.object(db, "oracledb")
@patch.object(db, "_get_slow_query_semaphore")
@patch.object(db, "get_db_runtime_config")
def test_slow_iter_releases_on_error(mock_runtime, mock_sem_fn, mock_oracledb):
"""Semaphore and connection should be released even on error."""
mock_runtime.return_value = {
"slow_call_timeout_ms": 60000,
"slow_fetchmany_size": 5000,
"tcp_connect_timeout": 10,
"retry_count": 1,
"retry_delay": 1.0,
}
sem = MagicMock()
sem.acquire.return_value = True
mock_sem_fn.return_value = sem
conn = MagicMock()
conn.cursor.side_effect = RuntimeError("cursor failed")
mock_oracledb.connect.return_value = conn
try:
list(db.read_sql_df_slow_iter("SELECT 1"))
except RuntimeError:
pass
conn.close.assert_called_once()
sem.release.assert_called_once()
def test_runtime_config_includes_fetchmany_size():
"""get_db_runtime_config should include slow_fetchmany_size."""
# Force refresh to pick up current config
db._DB_RUNTIME_CONFIG = None
runtime = db.get_db_runtime_config(refresh=True)
assert "slow_fetchmany_size" in runtime
assert isinstance(runtime["slow_fetchmany_size"], int)
assert runtime["slow_fetchmany_size"] > 0

View File

@@ -5,11 +5,21 @@ from __future__ import annotations
from unittest.mock import patch
import pandas as pd
from mes_dashboard.services.event_fetcher import EventFetcher
def _iter_result(columns, rows):
"""Helper: create a generator that yields a single (columns, rows) batch."""
def _gen(*args, **kwargs):
yield columns, rows
return _gen
def _iter_empty(*args, **kwargs):
"""Helper: generator that yields nothing (empty result)."""
return iter([])
def test_cache_key_is_stable_for_sorted_ids():
key1 = EventFetcher._cache_key("history", ["CID-B", "CID-A", "CID-A"])
key2 = EventFetcher._cache_key("history", ["CID-A", "CID-B"])
@@ -29,98 +39,96 @@ def test_get_rate_limit_config_supports_env_override(monkeypatch):
assert config["window_seconds"] == 77
@patch("mes_dashboard.services.event_fetcher.read_sql_df")
@patch("mes_dashboard.services.event_fetcher.read_sql_df_slow_iter")
@patch("mes_dashboard.services.event_fetcher.cache_get")
def test_fetch_events_cache_hit_skips_db(mock_cache_get, mock_read_sql_df):
def test_fetch_events_cache_hit_skips_db(mock_cache_get, mock_iter):
mock_cache_get.return_value = {"CID-1": [{"CONTAINERID": "CID-1"}]}
result = EventFetcher.fetch_events(["CID-1"], "materials")
assert result["CID-1"][0]["CONTAINERID"] == "CID-1"
mock_read_sql_df.assert_not_called()
mock_iter.assert_not_called()
@patch("mes_dashboard.services.event_fetcher.cache_set")
@patch("mes_dashboard.services.event_fetcher.cache_get", return_value=None)
@patch("mes_dashboard.services.event_fetcher.read_sql_df")
@patch("mes_dashboard.services.event_fetcher.read_sql_df_slow_iter")
@patch("mes_dashboard.services.event_fetcher.SQLLoader.load_with_params")
def test_fetch_events_upstream_history_branch(
mock_sql_load,
mock_read_sql_df,
mock_iter,
_mock_cache_get,
mock_cache_set,
):
mock_sql_load.return_value = "SELECT * FROM UPSTREAM"
mock_read_sql_df.return_value = pd.DataFrame(
[
{"CONTAINERID": "CID-1", "WORKCENTER_GROUP": "DB"},
{"CONTAINERID": "CID-2", "WORKCENTER_GROUP": "WB"},
]
mock_iter.side_effect = _iter_result(
["CONTAINERID", "WORKCENTER_GROUP"],
[("CID-1", "DB"), ("CID-2", "WB")],
)
result = EventFetcher.fetch_events(["CID-1", "CID-2"], "upstream_history")
assert sorted(result.keys()) == ["CID-1", "CID-2"]
assert mock_sql_load.call_args.args[0] == "mid_section_defect/upstream_history"
_, params = mock_read_sql_df.call_args.args
assert len(params) == 2
sql_arg, params_arg = mock_iter.call_args.args
assert len(params_arg) == 2
mock_cache_set.assert_called_once()
assert mock_cache_set.call_args.args[0].startswith("evt:upstream_history:")
@patch("mes_dashboard.services.event_fetcher.cache_set")
@patch("mes_dashboard.services.event_fetcher.cache_get", return_value=None)
@patch("mes_dashboard.services.event_fetcher.read_sql_df")
@patch("mes_dashboard.services.event_fetcher.read_sql_df_slow_iter")
@patch("mes_dashboard.services.event_fetcher.SQLLoader.load")
def test_fetch_events_history_branch_replaces_container_filter(
mock_sql_load,
mock_read_sql_df,
mock_iter,
_mock_cache_get,
_mock_cache_set,
):
mock_sql_load.return_value = (
"SELECT * FROM t WHERE h.CONTAINERID = :container_id {{ WORKCENTER_FILTER }}"
)
mock_read_sql_df.return_value = pd.DataFrame([])
mock_iter.side_effect = _iter_empty
EventFetcher.fetch_events(["CID-1"], "history")
sql, params = mock_read_sql_df.call_args.args
assert "h.CONTAINERID = :container_id" not in sql
assert "{{ WORKCENTER_FILTER }}" not in sql
assert params == {"p0": "CID-1"}
sql_arg, params_arg = mock_iter.call_args.args
assert "h.CONTAINERID = :container_id" not in sql_arg
assert "{{ WORKCENTER_FILTER }}" not in sql_arg
assert params_arg == {"p0": "CID-1"}
@patch("mes_dashboard.services.event_fetcher.cache_set")
@patch("mes_dashboard.services.event_fetcher.cache_get", return_value=None)
@patch("mes_dashboard.services.event_fetcher.read_sql_df")
@patch("mes_dashboard.services.event_fetcher.read_sql_df_slow_iter")
@patch("mes_dashboard.services.event_fetcher.SQLLoader.load")
def test_fetch_events_materials_branch_replaces_aliased_container_filter(
mock_sql_load,
mock_read_sql_df,
mock_iter,
_mock_cache_get,
_mock_cache_set,
):
mock_sql_load.return_value = (
"SELECT * FROM t m WHERE m.CONTAINERID = :container_id ORDER BY TXNDATE"
)
mock_read_sql_df.return_value = pd.DataFrame([])
mock_iter.side_effect = _iter_empty
EventFetcher.fetch_events(["CID-1", "CID-2"], "materials")
sql, params = mock_read_sql_df.call_args.args
assert "m.CONTAINERID = :container_id" not in sql
assert "IN" in sql.upper()
assert params == {"p0": "CID-1", "p1": "CID-2"}
sql_arg, params_arg = mock_iter.call_args.args
assert "m.CONTAINERID = :container_id" not in sql_arg
assert "IN" in sql_arg.upper()
assert params_arg == {"p0": "CID-1", "p1": "CID-2"}
@patch("mes_dashboard.services.event_fetcher.cache_set")
@patch("mes_dashboard.services.event_fetcher.cache_get", return_value=None)
@patch("mes_dashboard.services.event_fetcher.read_sql_df")
@patch("mes_dashboard.services.event_fetcher.read_sql_df_slow_iter")
@patch("mes_dashboard.services.event_fetcher.SQLLoader.load")
def test_fetch_events_rejects_branch_replaces_aliased_container_filter(
mock_sql_load,
mock_read_sql_df,
mock_iter,
_mock_cache_get,
_mock_cache_set,
):
@@ -128,23 +136,23 @@ def test_fetch_events_rejects_branch_replaces_aliased_container_filter(
"SELECT * FROM t r LEFT JOIN c ON c.CONTAINERID = r.CONTAINERID "
"WHERE r.CONTAINERID = :container_id ORDER BY r.TXNDATE"
)
mock_read_sql_df.return_value = pd.DataFrame([])
mock_iter.side_effect = _iter_empty
EventFetcher.fetch_events(["CID-1", "CID-2"], "rejects")
sql, params = mock_read_sql_df.call_args.args
assert "r.CONTAINERID = :container_id" not in sql
assert "IN" in sql.upper()
assert params == {"p0": "CID-1", "p1": "CID-2"}
sql_arg, params_arg = mock_iter.call_args.args
assert "r.CONTAINERID = :container_id" not in sql_arg
assert "IN" in sql_arg.upper()
assert params_arg == {"p0": "CID-1", "p1": "CID-2"}
@patch("mes_dashboard.services.event_fetcher.cache_set")
@patch("mes_dashboard.services.event_fetcher.cache_get", return_value=None)
@patch("mes_dashboard.services.event_fetcher.read_sql_df")
@patch("mes_dashboard.services.event_fetcher.read_sql_df_slow_iter")
@patch("mes_dashboard.services.event_fetcher.SQLLoader.load")
def test_fetch_events_holds_branch_replaces_aliased_container_filter(
mock_sql_load,
mock_read_sql_df,
mock_iter,
_mock_cache_get,
_mock_cache_set,
):
@@ -152,19 +160,41 @@ def test_fetch_events_holds_branch_replaces_aliased_container_filter(
"SELECT * FROM t h LEFT JOIN c ON c.CONTAINERID = h.CONTAINERID "
"WHERE h.CONTAINERID = :container_id ORDER BY h.HOLDTXNDATE DESC"
)
mock_read_sql_df.return_value = pd.DataFrame([])
mock_iter.side_effect = _iter_empty
EventFetcher.fetch_events(["CID-1", "CID-2"], "holds")
sql, params = mock_read_sql_df.call_args.args
assert "h.CONTAINERID = :container_id" not in sql
assert "IN" in sql.upper()
assert params == {"p0": "CID-1", "p1": "CID-2"}
sql_arg, params_arg = mock_iter.call_args.args
assert "h.CONTAINERID = :container_id" not in sql_arg
assert "IN" in sql_arg.upper()
assert params_arg == {"p0": "CID-1", "p1": "CID-2"}
def test_event_fetcher_uses_slow_connection():
"""Regression: event_fetcher must use read_sql_df_slow (non-pooled)."""
def test_event_fetcher_uses_slow_iter_connection():
"""Regression: event_fetcher must use read_sql_df_slow_iter (non-pooled)."""
import mes_dashboard.services.event_fetcher as ef
from mes_dashboard.core.database import read_sql_df_slow
from mes_dashboard.core.database import read_sql_df_slow_iter
assert ef.read_sql_df is read_sql_df_slow
assert ef.read_sql_df_slow_iter is read_sql_df_slow_iter
@patch("mes_dashboard.services.event_fetcher.cache_set")
@patch("mes_dashboard.services.event_fetcher.cache_get", return_value=None)
@patch("mes_dashboard.services.event_fetcher.read_sql_df_slow_iter")
@patch("mes_dashboard.services.event_fetcher.SQLLoader.load_with_params")
def test_fetch_events_sanitizes_nan_values(
mock_sql_load,
mock_iter,
_mock_cache_get,
_mock_cache_set,
):
"""NaN float values in records should be replaced with None."""
mock_sql_load.return_value = "SELECT * FROM UPSTREAM"
mock_iter.side_effect = _iter_result(
["CONTAINERID", "VALUE"],
[("CID-1", float("nan"))],
)
result = EventFetcher.fetch_events(["CID-1"], "upstream_history")
assert result["CID-1"][0]["VALUE"] is None

View File

@@ -0,0 +1,507 @@
# -*- coding: utf-8 -*-
"""Unit tests for trace_job_service (async trace job queue)."""
from __future__ import annotations
import json
from unittest.mock import MagicMock, patch
import pytest
import mes_dashboard.services.trace_job_service as tjs
# ---------------------------------------------------------------------------
# is_async_available
# ---------------------------------------------------------------------------
def test_is_async_available_true():
"""Should return True when rq is importable and Redis is up."""
tjs._RQ_AVAILABLE = None # reset cached flag
with patch.object(tjs, "get_redis_client", return_value=MagicMock()):
assert tjs.is_async_available() is True
def test_is_async_available_false_no_redis():
"""Should return False when Redis is unavailable."""
tjs._RQ_AVAILABLE = True
with patch.object(tjs, "get_redis_client", return_value=None):
assert tjs.is_async_available() is False
# ---------------------------------------------------------------------------
# enqueue_trace_events_job
# ---------------------------------------------------------------------------
@patch.object(tjs, "_get_rq_queue")
@patch.object(tjs, "get_redis_client")
def test_enqueue_success(mock_redis, mock_queue_fn):
"""Enqueue should return a job_id and store metadata in Redis."""
conn = MagicMock()
mock_redis.return_value = conn
queue = MagicMock()
mock_queue_fn.return_value = queue
job_id, err = tjs.enqueue_trace_events_job(
"query_tool", ["CID-1", "CID-2"], ["history"], {"params": {}},
)
assert job_id is not None
assert job_id.startswith("trace-evt-")
assert err is None
queue.enqueue.assert_called_once()
conn.hset.assert_called_once()
conn.expire.assert_called_once()
@patch.object(tjs, "_get_rq_queue", return_value=None)
def test_enqueue_no_queue(mock_queue_fn):
"""Enqueue should return error when queue is unavailable."""
job_id, err = tjs.enqueue_trace_events_job(
"query_tool", ["CID-1"], ["history"], {},
)
assert job_id is None
assert "unavailable" in err
@patch.object(tjs, "_get_rq_queue")
@patch.object(tjs, "get_redis_client")
def test_enqueue_queue_error(mock_redis, mock_queue_fn):
"""Enqueue should return error when queue.enqueue raises."""
conn = MagicMock()
mock_redis.return_value = conn
queue = MagicMock()
queue.enqueue.side_effect = RuntimeError("connection refused")
mock_queue_fn.return_value = queue
job_id, err = tjs.enqueue_trace_events_job(
"query_tool", ["CID-1"], ["history"], {},
)
assert job_id is None
assert "connection refused" in err
# Meta key should be cleaned up
conn.delete.assert_called_once()
# ---------------------------------------------------------------------------
# get_job_status
# ---------------------------------------------------------------------------
@patch.object(tjs, "get_redis_client")
def test_get_job_status_found(mock_redis):
"""Should return status dict from Redis hash."""
conn = MagicMock()
conn.hgetall.return_value = {
"profile": "query_tool",
"cid_count": "100",
"domains": "history,materials",
"status": "started",
"progress": "fetching events",
"created_at": "1740000000.0",
"completed_at": "",
"error": "",
}
mock_redis.return_value = conn
status = tjs.get_job_status("trace-evt-abc123")
assert status is not None
assert status["job_id"] == "trace-evt-abc123"
assert status["status"] == "started"
assert status["cid_count"] == 100
assert status["domains"] == ["history", "materials"]
assert status["error"] is None
@patch.object(tjs, "get_redis_client")
def test_get_job_status_not_found(mock_redis):
"""Should return None when job metadata does not exist."""
conn = MagicMock()
conn.hgetall.return_value = {}
mock_redis.return_value = conn
assert tjs.get_job_status("trace-evt-nonexistent") is None
# ---------------------------------------------------------------------------
# get_job_result
# ---------------------------------------------------------------------------
@patch.object(tjs, "get_redis_client")
def test_get_job_result_found_chunked(mock_redis):
"""Should return reconstructed result from chunked Redis keys."""
result_meta = {
"profile": "query_tool",
"domains": {"history": {"chunks": 1, "total": 1}},
"failed_domains": [],
}
chunk_data = [{"CONTAINERID": "CID-1"}]
conn = MagicMock()
def _get_side_effect(key):
if key.endswith(":result:meta"):
return json.dumps(result_meta)
if key.endswith(":result:history:0"):
return json.dumps(chunk_data)
if key.endswith(":result:aggregation"):
return None
return None
conn.get.side_effect = _get_side_effect
mock_redis.return_value = conn
result = tjs.get_job_result("trace-evt-abc123")
assert result is not None
assert result["stage"] == "events"
assert result["results"]["history"]["count"] == 1
assert result["results"]["history"]["total"] == 1
@patch.object(tjs, "get_redis_client")
def test_get_job_result_found_legacy(mock_redis):
"""Should fall back to legacy single-key result when no chunked meta exists."""
result_data = {
"stage": "events",
"results": {"history": {"data": [{"CONTAINERID": "CID-1"}], "count": 1}},
"aggregation": None,
}
conn = MagicMock()
def _get_side_effect(key):
if key.endswith(":result:meta"):
return None # no chunked meta
if key.endswith(":result"):
return json.dumps(result_data)
return None
conn.get.side_effect = _get_side_effect
mock_redis.return_value = conn
result = tjs.get_job_result("trace-evt-abc123")
assert result is not None
assert result["stage"] == "events"
assert result["results"]["history"]["count"] == 1
@patch.object(tjs, "get_redis_client")
def test_get_job_result_not_found(mock_redis):
"""Should return None when result key does not exist."""
conn = MagicMock()
conn.get.return_value = None
mock_redis.return_value = conn
assert tjs.get_job_result("trace-evt-expired") is None
@patch.object(tjs, "get_redis_client")
def test_get_job_result_with_domain_filter(mock_redis):
"""Should return filtered result with pagination from chunked storage."""
result_meta = {
"profile": "query_tool",
"domains": {
"history": {"chunks": 1, "total": 3},
"materials": {"chunks": 1, "total": 1},
},
"failed_domains": [],
}
history_chunk = [{"id": 1}, {"id": 2}, {"id": 3}]
materials_chunk = [{"id": 10}]
conn = MagicMock()
def _get_side_effect(key):
if key.endswith(":result:meta"):
return json.dumps(result_meta)
if key.endswith(":result:history:0"):
return json.dumps(history_chunk)
if key.endswith(":result:materials:0"):
return json.dumps(materials_chunk)
if key.endswith(":result:aggregation"):
return None
return None
conn.get.side_effect = _get_side_effect
mock_redis.return_value = conn
result = tjs.get_job_result("trace-evt-abc", domain="history", offset=1, limit=1)
assert "history" in result["results"]
assert "materials" not in result["results"]
assert result["results"]["history"]["data"] == [{"id": 2}]
assert result["results"]["history"]["total"] == 3
# ---------------------------------------------------------------------------
# execute_trace_events_job (worker entry point)
# ---------------------------------------------------------------------------
@patch.object(tjs, "get_redis_client")
@patch("mes_dashboard.services.event_fetcher.EventFetcher.fetch_events")
def test_execute_job_success(mock_fetch, mock_redis):
"""Worker should fetch events, store result, and update meta to finished."""
mock_fetch.return_value = {"CID-1": [{"CONTAINERID": "CID-1"}]}
conn = MagicMock()
mock_redis.return_value = conn
tjs.execute_trace_events_job(
"test-job-1", "query_tool", ["CID-1"], ["history"], {},
)
mock_fetch.assert_called_once_with(["CID-1"], "history")
# Result should be stored as chunked keys (chunk + result meta)
setex_calls = [c for c in conn.method_calls if c[0] == "setex"]
assert len(setex_calls) == 2 # 1 chunk + 1 result meta
# Find the result meta setex call
result_meta_call = [c for c in setex_calls if ":result:meta" in str(c)]
assert len(result_meta_call) == 1
stored_meta = json.loads(result_meta_call[0][1][2])
assert "history" in stored_meta["domains"]
assert stored_meta["domains"]["history"]["total"] == 1
# Find the chunk setex call
chunk_call = [c for c in setex_calls if ":result:history:0" in str(c)]
assert len(chunk_call) == 1
stored_chunk = json.loads(chunk_call[0][1][2])
assert len(stored_chunk) == 1
assert stored_chunk[0]["CONTAINERID"] == "CID-1"
# Job meta should be updated to finished
hset_calls = [c for c in conn.method_calls if c[0] == "hset"]
last_meta = hset_calls[-1][2]["mapping"]
assert last_meta["status"] == "finished"
@patch.object(tjs, "get_redis_client")
@patch("mes_dashboard.services.event_fetcher.EventFetcher.fetch_events")
def test_execute_job_domain_failure_records_partial(mock_fetch, mock_redis):
"""Domain fetch failure should result in partial failure, not job crash."""
mock_fetch.side_effect = RuntimeError("db timeout")
conn = MagicMock()
mock_redis.return_value = conn
tjs.execute_trace_events_job(
"test-job-2", "query_tool", ["CID-1"], ["history"], {},
)
# Result meta should still be stored (with failed_domains)
setex_calls = [c for c in conn.method_calls if c[0] == "setex"]
assert len(setex_calls) == 1 # only result meta (no chunks since domain failed)
stored_meta = json.loads(setex_calls[0][1][2])
assert "history" in stored_meta["failed_domains"]
# Job meta should still be finished (partial failure is not a job crash)
hset_calls = [c for c in conn.method_calls if c[0] == "hset"]
last_meta = hset_calls[-1][2]["mapping"]
assert last_meta["status"] == "finished"
# ---------------------------------------------------------------------------
# _store_chunked_result
# ---------------------------------------------------------------------------
@patch.object(tjs, "get_redis_client")
def test_store_chunked_result_splits_batches(mock_redis):
"""Large domain data should be split into multiple chunks."""
conn = MagicMock()
mock_redis.return_value = conn
# 12 records, batch size 5 → 3 chunks (5+5+2)
rows = [{"id": i} for i in range(12)]
results = {"history": {"data": rows, "count": 12}}
original_batch_size = tjs.TRACE_STREAM_BATCH_SIZE
tjs.TRACE_STREAM_BATCH_SIZE = 5
try:
tjs._store_chunked_result(conn, "job-1", "query_tool", results, None, [])
finally:
tjs.TRACE_STREAM_BATCH_SIZE = original_batch_size
setex_calls = [c for c in conn.method_calls if c[0] == "setex"]
# 3 chunk keys + 1 result meta = 4
assert len(setex_calls) == 4
# Verify result meta
meta_call = [c for c in setex_calls if ":result:meta" in str(c)]
assert len(meta_call) == 1
meta = json.loads(meta_call[0][1][2])
assert meta["domains"]["history"]["chunks"] == 3
assert meta["domains"]["history"]["total"] == 12
# Verify chunks
chunk_calls = [c for c in setex_calls if ":result:history:" in str(c)]
assert len(chunk_calls) == 3
chunk_0 = json.loads(chunk_calls[0][1][2])
assert len(chunk_0) == 5
@patch.object(tjs, "get_redis_client")
def test_store_chunked_result_with_aggregation(mock_redis):
"""Aggregation should be stored in a separate key."""
conn = MagicMock()
mock_redis.return_value = conn
results = {"history": {"data": [{"id": 1}], "count": 1}}
aggregation = {"summary": {"total": 100}}
tjs._store_chunked_result(conn, "job-1", "mid_section_defect", results, aggregation, [])
setex_calls = [c for c in conn.method_calls if c[0] == "setex"]
agg_call = [c for c in setex_calls if ":result:aggregation" in str(c)]
assert len(agg_call) == 1
stored_agg = json.loads(agg_call[0][1][2])
assert stored_agg["summary"]["total"] == 100
# ---------------------------------------------------------------------------
# stream_job_result_ndjson
# ---------------------------------------------------------------------------
@patch.object(tjs, "get_redis_client")
def test_stream_ndjson_chunked(mock_redis):
"""NDJSON stream should yield correct protocol lines for chunked result."""
result_meta = {
"profile": "query_tool",
"domains": {"history": {"chunks": 2, "total": 3}},
"failed_domains": [],
}
chunk_0 = [{"id": 1}, {"id": 2}]
chunk_1 = [{"id": 3}]
conn = MagicMock()
def _get_side_effect(key):
if key.endswith(":result:meta"):
return json.dumps(result_meta)
if key.endswith(":result:history:0"):
return json.dumps(chunk_0)
if key.endswith(":result:history:1"):
return json.dumps(chunk_1)
if key.endswith(":result:aggregation"):
return None
return None
conn.get.side_effect = _get_side_effect
mock_redis.return_value = conn
lines = list(tjs.stream_job_result_ndjson("job-1"))
parsed = [json.loads(line) for line in lines]
types = [p["type"] for p in parsed]
assert types == ["meta", "domain_start", "records", "records", "domain_end", "complete"]
assert parsed[0]["domains"] == ["history"]
assert parsed[1]["domain"] == "history"
assert parsed[1]["total"] == 3
assert parsed[2]["count"] == 2
assert parsed[3]["count"] == 1
assert parsed[4]["count"] == 3
assert parsed[5]["total_records"] == 3
@patch.object(tjs, "get_redis_client")
def test_stream_ndjson_with_aggregation(mock_redis):
"""NDJSON stream should include aggregation line when present."""
result_meta = {
"profile": "mid_section_defect",
"domains": {"upstream_history": {"chunks": 1, "total": 1}},
"failed_domains": [],
}
conn = MagicMock()
def _get_side_effect(key):
if key.endswith(":result:meta"):
return json.dumps(result_meta)
if key.endswith(":result:upstream_history:0"):
return json.dumps([{"id": 1}])
if key.endswith(":result:aggregation"):
return json.dumps({"summary": "ok"})
return None
conn.get.side_effect = _get_side_effect
mock_redis.return_value = conn
lines = list(tjs.stream_job_result_ndjson("job-1"))
parsed = [json.loads(line) for line in lines]
types = [p["type"] for p in parsed]
assert "aggregation" in types
agg_line = [p for p in parsed if p["type"] == "aggregation"][0]
assert agg_line["data"]["summary"] == "ok"
@patch.object(tjs, "get_redis_client")
def test_stream_ndjson_legacy_fallback(mock_redis):
"""NDJSON stream should emit full_result for legacy single-key storage."""
legacy_result = {
"stage": "events",
"results": {"history": {"data": [{"id": 1}], "count": 1}},
"aggregation": None,
}
conn = MagicMock()
def _get_side_effect(key):
if key.endswith(":result:meta"):
return None # no chunked meta
if key.endswith(":result"):
return json.dumps(legacy_result)
return None
conn.get.side_effect = _get_side_effect
mock_redis.return_value = conn
lines = list(tjs.stream_job_result_ndjson("job-1"))
parsed = [json.loads(line) for line in lines]
assert len(parsed) == 1
assert parsed[0]["type"] == "full_result"
assert parsed[0]["data"]["stage"] == "events"
@patch.object(tjs, "get_redis_client")
def test_stream_ndjson_with_failed_domains(mock_redis):
"""NDJSON stream should include warning line for partial failures."""
result_meta = {
"profile": "query_tool",
"domains": {"materials": {"chunks": 1, "total": 1}},
"failed_domains": ["history"],
}
conn = MagicMock()
def _get_side_effect(key):
if key.endswith(":result:meta"):
return json.dumps(result_meta)
if key.endswith(":result:materials:0"):
return json.dumps([{"id": 1}])
if key.endswith(":result:aggregation"):
return None
return None
conn.get.side_effect = _get_side_effect
mock_redis.return_value = conn
lines = list(tjs.stream_job_result_ndjson("job-1"))
parsed = [json.loads(line) for line in lines]
types = [p["type"] for p in parsed]
assert "warning" in types
warning = [p for p in parsed if p["type"] == "warning"][0]
assert warning["code"] == "EVENTS_PARTIAL_FAILURE"
assert "history" in warning["failed_domains"]
# ---------------------------------------------------------------------------
# _flatten_domain_records
# ---------------------------------------------------------------------------
def test_flatten_domain_records():
events_by_cid = {
"CID-1": [{"CONTAINERID": "CID-1", "EVT": "A"}],
"CID-2": [{"CONTAINERID": "CID-2", "EVT": "B"}, {"CONTAINERID": "CID-2", "EVT": "C"}],
}
rows = tjs._flatten_domain_records(events_by_cid)
assert len(rows) == 3

View File

@@ -482,3 +482,343 @@ def test_non_msd_events_cache_unchanged(mock_cache_set, mock_cache_get, mock_fet
assert payload['stage'] == 'events'
# EventFetcher should NOT have been called — served from cache
mock_fetch_events.assert_not_called()
# ---- Admission control tests ----
def test_events_non_msd_cid_limit_returns_413(monkeypatch):
"""Non-MSD profile exceeding CID limit should return 413."""
monkeypatch.setattr(
'mes_dashboard.routes.trace_routes.TRACE_EVENTS_CID_LIMIT', 5,
)
client = _client()
response = client.post(
'/api/trace/events',
json={
'profile': 'query_tool',
'container_ids': [f'CID-{i}' for i in range(10)],
'domains': ['history'],
},
)
assert response.status_code == 413
payload = response.get_json()
assert payload['error']['code'] == 'CID_LIMIT_EXCEEDED'
@patch('mes_dashboard.routes.trace_routes.build_trace_aggregation_from_events')
@patch('mes_dashboard.routes.trace_routes.EventFetcher.fetch_events')
def test_events_msd_bypasses_cid_limit(
mock_fetch_events,
mock_build_aggregation,
monkeypatch,
):
"""MSD profile should bypass CID limit and proceed normally."""
monkeypatch.setattr(
'mes_dashboard.routes.trace_routes.TRACE_EVENTS_CID_LIMIT', 5,
)
mock_fetch_events.return_value = {
f'CID-{i}': [{'CONTAINERID': f'CID-{i}', 'WORKCENTER_GROUP': '測試'}]
for i in range(10)
}
mock_build_aggregation.return_value = {
'kpi': {'total_input': 10},
'charts': {},
'daily_trend': [],
'available_loss_reasons': [],
'genealogy_status': 'ready',
'detail_total_count': 0,
}
client = _client()
response = client.post(
'/api/trace/events',
json={
'profile': 'mid_section_defect',
'container_ids': [f'CID-{i}' for i in range(10)],
'domains': ['upstream_history'],
'params': {
'start_date': '2025-01-01',
'end_date': '2025-01-31',
},
'lineage': {'ancestors': {}},
'seed_container_ids': [f'CID-{i}' for i in range(10)],
},
)
assert response.status_code == 200
payload = response.get_json()
assert payload['stage'] == 'events'
mock_fetch_events.assert_called()
@patch('mes_dashboard.routes.trace_routes.EventFetcher.fetch_events')
def test_events_non_msd_within_limit_proceeds(mock_fetch_events):
"""Non-MSD profile within CID limit should proceed normally."""
mock_fetch_events.return_value = {
'CID-001': [{'CONTAINERID': 'CID-001', 'EVENTTYPE': 'TRACK_IN'}]
}
client = _client()
response = client.post(
'/api/trace/events',
json={
'profile': 'query_tool',
'container_ids': ['CID-001'],
'domains': ['history'],
},
)
assert response.status_code == 200
payload = response.get_json()
assert payload['stage'] == 'events'
# ---- Async job queue tests ----
@patch('mes_dashboard.routes.trace_routes.enqueue_trace_events_job')
@patch('mes_dashboard.routes.trace_routes.is_async_available', return_value=True)
def test_events_routes_to_async_above_threshold(
mock_async_avail,
mock_enqueue,
monkeypatch,
):
"""CID count > async threshold should return 202 with job_id."""
monkeypatch.setattr(
'mes_dashboard.routes.trace_routes.TRACE_ASYNC_CID_THRESHOLD', 5,
)
mock_enqueue.return_value = ('trace-evt-abc123', None)
client = _client()
response = client.post(
'/api/trace/events',
json={
'profile': 'query_tool',
'container_ids': [f'CID-{i}' for i in range(10)],
'domains': ['history'],
},
)
assert response.status_code == 202
payload = response.get_json()
assert payload['async'] is True
assert payload['job_id'] == 'trace-evt-abc123'
assert '/api/trace/job/trace-evt-abc123' in payload['status_url']
@patch('mes_dashboard.routes.trace_routes.EventFetcher.fetch_events')
@patch('mes_dashboard.routes.trace_routes.is_async_available', return_value=False)
def test_events_falls_back_to_sync_when_async_unavailable(
mock_async_avail,
mock_fetch_events,
monkeypatch,
):
"""When async is unavailable, should fall through to sync processing."""
monkeypatch.setattr(
'mes_dashboard.routes.trace_routes.TRACE_ASYNC_CID_THRESHOLD', 2,
)
mock_fetch_events.return_value = {
f'CID-{i}': [{'CONTAINERID': f'CID-{i}'}]
for i in range(3)
}
client = _client()
response = client.post(
'/api/trace/events',
json={
'profile': 'query_tool',
'container_ids': [f'CID-{i}' for i in range(3)],
'domains': ['history'],
},
)
assert response.status_code == 200
payload = response.get_json()
assert payload['stage'] == 'events'
mock_fetch_events.assert_called()
@patch('mes_dashboard.routes.trace_routes.enqueue_trace_events_job')
@patch('mes_dashboard.routes.trace_routes.is_async_available', return_value=True)
def test_events_falls_back_to_413_when_enqueue_fails(
mock_async_avail,
mock_enqueue,
monkeypatch,
):
"""When enqueue fails for non-MSD, should fall back to 413."""
monkeypatch.setattr(
'mes_dashboard.routes.trace_routes.TRACE_ASYNC_CID_THRESHOLD', 3,
)
monkeypatch.setattr(
'mes_dashboard.routes.trace_routes.TRACE_EVENTS_CID_LIMIT', 5,
)
mock_enqueue.return_value = (None, 'Redis down')
client = _client()
response = client.post(
'/api/trace/events',
json={
'profile': 'query_tool',
'container_ids': [f'CID-{i}' for i in range(10)],
'domains': ['history'],
},
)
assert response.status_code == 413
payload = response.get_json()
assert payload['error']['code'] == 'CID_LIMIT_EXCEEDED'
# ---- Job status/result endpoint tests ----
@patch('mes_dashboard.routes.trace_routes.get_job_status')
def test_job_status_found(mock_status):
"""GET /api/trace/job/<id> should return status."""
mock_status.return_value = {
'job_id': 'trace-evt-abc',
'status': 'started',
'profile': 'query_tool',
'cid_count': 100,
'domains': ['history'],
'progress': 'fetching events',
'created_at': 1740000000.0,
'elapsed_seconds': 15.0,
'error': None,
}
client = _client()
response = client.get('/api/trace/job/trace-evt-abc')
assert response.status_code == 200
payload = response.get_json()
assert payload['status'] == 'started'
assert payload['job_id'] == 'trace-evt-abc'
@patch('mes_dashboard.routes.trace_routes.get_job_status', return_value=None)
def test_job_status_not_found(mock_status):
"""GET /api/trace/job/<id> should return 404 for unknown job."""
client = _client()
response = client.get('/api/trace/job/trace-evt-nonexist')
assert response.status_code == 404
@patch('mes_dashboard.routes.trace_routes.get_job_result')
@patch('mes_dashboard.routes.trace_routes.get_job_status')
def test_job_result_success(mock_status, mock_result):
"""GET /api/trace/job/<id>/result should return result for finished job."""
mock_status.return_value = {'status': 'finished', 'job_id': 'j1'}
mock_result.return_value = {
'stage': 'events',
'results': {'history': {'data': [], 'count': 0}},
'aggregation': None,
}
client = _client()
response = client.get('/api/trace/job/j1/result')
assert response.status_code == 200
payload = response.get_json()
assert payload['stage'] == 'events'
@patch('mes_dashboard.routes.trace_routes.get_job_status')
def test_job_result_not_complete(mock_status):
"""GET /api/trace/job/<id>/result should return 409 for non-finished job."""
mock_status.return_value = {'status': 'started', 'job_id': 'j2'}
client = _client()
response = client.get('/api/trace/job/j2/result')
assert response.status_code == 409
payload = response.get_json()
assert payload['error']['code'] == 'JOB_NOT_COMPLETE'
@patch('mes_dashboard.routes.trace_routes.get_job_result', return_value=None)
@patch('mes_dashboard.routes.trace_routes.get_job_status')
def test_job_result_expired(mock_status, mock_result):
"""GET /api/trace/job/<id>/result should return 404 if result expired."""
mock_status.return_value = {'status': 'finished', 'job_id': 'j3'}
client = _client()
response = client.get('/api/trace/job/j3/result')
assert response.status_code == 404
# ---------------------------------------------------------------------------
# NDJSON stream endpoint
# ---------------------------------------------------------------------------
@patch("mes_dashboard.routes.trace_routes.stream_job_result_ndjson")
@patch("mes_dashboard.routes.trace_routes.get_job_status")
def test_job_stream_success(mock_status, mock_stream):
"""GET /api/trace/job/<id>/stream should return NDJSON for finished job."""
mock_status.return_value = {'status': 'finished', 'job_id': 'j1'}
mock_stream.return_value = iter([
'{"type":"meta","job_id":"j1","domains":["history"]}\n',
'{"type":"complete","total_records":0}\n',
])
reset_rate_limits_for_tests()
client = _client()
response = client.get('/api/trace/job/j1/stream')
assert response.status_code == 200
assert response.content_type.startswith('application/x-ndjson')
lines = [l for l in response.data.decode().strip().split('\n') if l.strip()]
assert len(lines) == 2
@patch("mes_dashboard.routes.trace_routes.get_job_status")
def test_job_stream_not_found(mock_status):
"""GET /api/trace/job/<id>/stream should return 404 for missing job."""
mock_status.return_value = None
reset_rate_limits_for_tests()
client = _client()
response = client.get('/api/trace/job/j-missing/stream')
assert response.status_code == 404
@patch("mes_dashboard.routes.trace_routes.get_job_status")
def test_job_stream_not_complete(mock_status):
"""GET /api/trace/job/<id>/stream should return 409 for incomplete job."""
mock_status.return_value = {'status': 'started', 'job_id': 'j2'}
reset_rate_limits_for_tests()
client = _client()
response = client.get('/api/trace/job/j2/stream')
assert response.status_code == 409
# ---------------------------------------------------------------------------
# 202 response includes stream_url
# ---------------------------------------------------------------------------
@patch("mes_dashboard.routes.trace_routes.is_async_available", return_value=True)
@patch("mes_dashboard.routes.trace_routes.enqueue_trace_events_job")
def test_events_async_response_includes_stream_url(mock_enqueue, mock_async):
"""Events 202 response should include stream_url field."""
mock_enqueue.return_value = ("trace-evt-xyz", None)
reset_rate_limits_for_tests()
client = _client()
cids = [f"CID-{i}" for i in range(25000)]
response = client.post('/api/trace/events', json={
'profile': 'query_tool',
'container_ids': cids,
'domains': ['history'],
})
assert response.status_code == 202
data = response.get_json()
assert data["stream_url"] == "/api/trace/job/trace-evt-xyz/stream"
assert data["status_url"] == "/api/trace/job/trace-evt-xyz"