feat: harden long-range batch queries with redis+parquet caching

This commit is contained in:
egg
2026-03-02 21:04:18 +08:00
parent 2568fd836c
commit fb92579331
40 changed files with 5443 additions and 676 deletions

View File

@@ -0,0 +1,98 @@
# -*- coding: utf-8 -*-
"""E2E tests for reject-history long-range query flow."""
from __future__ import annotations
import os
import pytest
import requests
def _post_reject_query(app_server: str, body: dict, timeout: float = 420.0) -> requests.Response:
return requests.post(
f"{app_server}/api/reject-history/query",
json=body,
timeout=timeout,
)
@pytest.mark.e2e
@pytest.mark.skipif(
os.environ.get("RUN_LONG_E2E") != "1",
reason="Long-range reject-history E2E disabled; set RUN_LONG_E2E=1 to run.",
)
class TestRejectHistoryLongRangeE2E:
"""Real backend E2E checks for long-range reject history query."""
def test_query_365_day_range_returns_success(self, app_server: str):
response = _post_reject_query(
app_server,
{
"mode": "date_range",
"start_date": "2025-01-01",
"end_date": "2025-12-31",
"include_excluded_scrap": False,
"exclude_material_scrap": True,
"exclude_pb_diode": True,
},
)
assert response.status_code == 200, response.text[:500]
payload = response.json()
assert payload.get("success") is True, payload
assert payload.get("query_id")
def test_query_then_view_returns_cached_result(self, app_server: str):
query_resp = _post_reject_query(
app_server,
{
"mode": "date_range",
"start_date": "2025-01-01",
"end_date": "2025-12-31",
},
)
assert query_resp.status_code == 200, query_resp.text[:500]
query_payload = query_resp.json()
assert query_payload.get("success") is True, query_payload
query_id = query_payload.get("query_id")
assert query_id
view_resp = requests.get(
f"{app_server}/api/reject-history/view",
params={
"query_id": query_id,
"page": 1,
"per_page": 50,
"exclude_material_scrap": "true",
"exclude_pb_diode": "true",
},
timeout=120,
)
assert view_resp.status_code == 200, view_resp.text[:500]
view_payload = view_resp.json()
assert view_payload.get("success") is True, view_payload
def test_query_then_export_cached_returns_csv(self, app_server: str):
query_resp = _post_reject_query(
app_server,
{
"mode": "date_range",
"start_date": "2025-01-01",
"end_date": "2025-12-31",
},
)
assert query_resp.status_code == 200, query_resp.text[:500]
query_payload = query_resp.json()
assert query_payload.get("success") is True, query_payload
query_id = query_payload.get("query_id")
assert query_id
export_resp = requests.get(
f"{app_server}/api/reject-history/export-cached",
params={"query_id": query_id},
timeout=120,
)
assert export_resp.status_code == 200, export_resp.text[:300]
assert "text/csv" in export_resp.headers.get("Content-Type", "")
assert "LOT" in export_resp.text[:200]

View File

@@ -0,0 +1,102 @@
# -*- coding: utf-8 -*-
"""Stress tests for reject-history long-range query stability."""
from __future__ import annotations
import concurrent.futures
import os
import time
import pytest
import requests
try:
import redis
except Exception: # pragma: no cover - optional runtime dependency
redis = None
@pytest.mark.stress
@pytest.mark.load
@pytest.mark.skipif(
os.environ.get("RUN_LONG_STRESS") != "1",
reason="Long-range reject-history stress disabled; set RUN_LONG_STRESS=1 to run.",
)
class TestRejectHistoryLongRangeStress:
"""Concurrent long-range reject-history queries should stay recoverable."""
@staticmethod
def _redis_used_memory_bytes() -> int | None:
if redis is None:
return None
redis_url = os.environ.get("STRESS_REDIS_URL", os.environ.get("REDIS_URL", "redis://localhost:6379/0"))
try:
client = redis.Redis.from_url(redis_url, decode_responses=True)
info = client.info("memory")
used = info.get("used_memory")
return int(used) if used is not None else None
except Exception:
return None
@staticmethod
def _run_query(base_url: str, timeout: float, seed: int) -> tuple[bool, float, str]:
start = time.time()
try:
year = 2024 + (seed % 2)
response = requests.post(
f"{base_url}/api/reject-history/query",
json={
"mode": "date_range",
"start_date": f"{year}-01-01",
"end_date": f"{year}-12-31",
"exclude_material_scrap": True,
"exclude_pb_diode": True,
},
timeout=timeout,
)
duration = time.time() - start
if response.status_code != 200:
return False, duration, f"HTTP {response.status_code}"
payload = response.json()
if payload.get("success") is True and payload.get("query_id"):
return True, duration, ""
return False, duration, f"success={payload.get('success')} error={payload.get('error')}"
except Exception as exc: # pragma: no cover - runtime/network dependent
return False, time.time() - start, str(exc)[:180]
def test_concurrent_365_day_queries_no_crash(self, base_url: str, stress_result):
result = stress_result("Reject History Long-Range Concurrent")
timeout = float(os.environ.get("STRESS_REJECT_HISTORY_TIMEOUT", "420"))
concurrent_users = int(os.environ.get("STRESS_REJECT_HISTORY_CONCURRENCY", "3"))
rounds = int(os.environ.get("STRESS_REJECT_HISTORY_ROUNDS", "2"))
max_redis_delta_mb = int(os.environ.get("STRESS_REJECT_REDIS_MAX_DELTA_MB", "256"))
total_requests = concurrent_users * rounds
redis_before = self._redis_used_memory_bytes()
started = time.time()
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrent_users) as executor:
futures = [
executor.submit(self._run_query, base_url, timeout, idx)
for idx in range(total_requests)
]
for future in concurrent.futures.as_completed(futures):
ok, duration, error = future.result()
if ok:
result.add_success(duration)
else:
result.add_failure(error, duration)
result.total_duration = time.time() - started
print(result.report())
assert result.total_requests == total_requests
assert result.success_rate >= 90.0, f"Success rate too low: {result.success_rate:.2f}%"
health_resp = requests.get(f"{base_url}/health", timeout=10)
assert health_resp.status_code in (200, 503)
redis_after = self._redis_used_memory_bytes()
if redis_before is not None and redis_after is not None:
delta_mb = (redis_after - redis_before) / (1024 * 1024)
assert delta_mb <= max_redis_delta_mb, (
f"Redis memory delta too high: {delta_mb:.1f}MB > {max_redis_delta_mb}MB"
)

View File

@@ -0,0 +1,576 @@
# -*- coding: utf-8 -*-
"""Unit tests for BatchQueryEngine module."""
import pytest
from unittest.mock import patch, MagicMock, call
import pandas as pd
from mes_dashboard.services.batch_query_engine import (
compute_query_hash,
decompose_by_ids,
decompose_by_time_range,
execute_plan,
merge_chunks,
iterate_chunks,
should_decompose_by_time,
should_decompose_by_ids,
)
# ============================================================
# 4.1 decompose_by_time_range
# ============================================================
class TestDecomposeByTimeRange:
def test_90_days_yields_3_chunks(self):
chunks = decompose_by_time_range("2025-01-01", "2025-03-31", grain_days=31)
assert len(chunks) == 3
# First chunk: Jan 1 Jan 31
assert chunks[0] == {"chunk_start": "2025-01-01", "chunk_end": "2025-01-31"}
# Second chunk: Feb 1 Mar 3
assert chunks[1]["chunk_start"] == "2025-02-01"
# Third chunk ends Mar 31
assert chunks[2]["chunk_end"] == "2025-03-31"
def test_31_days_yields_1_chunk(self):
chunks = decompose_by_time_range("2025-01-01", "2025-01-31", grain_days=31)
assert len(chunks) == 1
assert chunks[0] == {"chunk_start": "2025-01-01", "chunk_end": "2025-01-31"}
def test_single_day(self):
chunks = decompose_by_time_range("2025-06-15", "2025-06-15")
assert len(chunks) == 1
assert chunks[0] == {"chunk_start": "2025-06-15", "chunk_end": "2025-06-15"}
def test_contiguous_no_overlap_no_gap(self):
"""Verify closed-interval boundary semantics: no overlap, no gap."""
chunks = decompose_by_time_range("2025-01-01", "2025-06-30", grain_days=31)
for i in range(1, len(chunks)):
prev_end = chunks[i - 1]["chunk_end"]
cur_start = chunks[i]["chunk_start"]
from datetime import datetime, timedelta
prev_dt = datetime.strptime(prev_end, "%Y-%m-%d")
cur_dt = datetime.strptime(cur_start, "%Y-%m-%d")
assert cur_dt == prev_dt + timedelta(days=1), (
f"Gap/overlap between chunk {i-1} end={prev_end} and chunk {i} start={cur_start}"
)
# First starts at start_date, last ends at end_date
assert chunks[0]["chunk_start"] == "2025-01-01"
assert chunks[-1]["chunk_end"] == "2025-06-30"
def test_final_chunk_may_be_shorter(self):
chunks = decompose_by_time_range("2025-01-01", "2025-02-10", grain_days=31)
assert len(chunks) == 2
# Second chunk: Feb 1 Feb 10 (10 days < 31)
assert chunks[1] == {"chunk_start": "2025-02-01", "chunk_end": "2025-02-10"}
def test_inverted_range_raises(self):
with pytest.raises(ValueError, match="must be <="):
decompose_by_time_range("2025-12-31", "2025-01-01")
def test_365_days(self):
chunks = decompose_by_time_range("2025-01-01", "2025-12-31", grain_days=31)
assert len(chunks) == 12 # roughly 365/31 ≈ 12
# ============================================================
# 4.2 decompose_by_ids
# ============================================================
class TestDecomposeByIds:
def test_2500_ids_yields_3_batches(self):
ids = list(range(2500))
batches = decompose_by_ids(ids, batch_size=1000)
assert len(batches) == 3
assert len(batches[0]) == 1000
assert len(batches[1]) == 1000
assert len(batches[2]) == 500
def test_500_ids_yields_1_batch(self):
ids = list(range(500))
batches = decompose_by_ids(ids, batch_size=1000)
assert len(batches) == 1
assert len(batches[0]) == 500
def test_empty_ids(self):
assert decompose_by_ids([]) == []
def test_exact_batch_size(self):
ids = list(range(1000))
batches = decompose_by_ids(ids, batch_size=1000)
assert len(batches) == 1
# ============================================================
# 4.3 execute_plan sequential
# ============================================================
class TestExecutePlanSequential:
def _mock_redis(self):
"""Set up mock redis for chunk store/load/exists."""
stored = {}
mock_client = MagicMock()
mock_client.setex.side_effect = lambda k, t, v: stored.update({k: v})
mock_client.get.side_effect = lambda k: stored.get(k)
mock_client.exists.side_effect = lambda k: 1 if k in stored else 0
mock_client.hset.return_value = None
mock_client.expire.return_value = None
return mock_client, stored
def test_sequential_execution_stores_chunks(self):
import mes_dashboard.core.redis_df_store as rds
import mes_dashboard.services.batch_query_engine as bqe
mock_client, stored = self._mock_redis()
call_log = []
def fake_query_fn(chunk, max_rows_per_chunk=None):
call_log.append(chunk)
return pd.DataFrame({"V": [1, 2]})
chunks = [
{"chunk_start": "2025-01-01", "chunk_end": "2025-01-31"},
{"chunk_start": "2025-02-01", "chunk_end": "2025-02-28"},
]
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client), \
patch.object(bqe, "get_redis_client", return_value=mock_client):
qh = execute_plan(
chunks, fake_query_fn,
query_hash="testhash",
cache_prefix="test",
skip_cached=False,
)
assert qh == "testhash"
assert len(call_log) == 2
# Chunks should be stored in Redis
assert any("chunk:0" in k for k in stored)
assert any("chunk:1" in k for k in stored)
# ============================================================
# 4.4 execute_plan parallel
# ============================================================
class TestExecutePlanParallel:
def test_parallel_uses_threadpool(self):
import mes_dashboard.core.redis_df_store as rds
import mes_dashboard.services.batch_query_engine as bqe
mock_client = MagicMock()
stored = {}
mock_client.setex.side_effect = lambda k, t, v: stored.update({k: v})
mock_client.get.side_effect = lambda k: stored.get(k)
mock_client.exists.side_effect = lambda k: 1 if k in stored else 0
mock_client.hset.return_value = None
mock_client.expire.return_value = None
call_count = {"n": 0}
def fake_query_fn(chunk, max_rows_per_chunk=None):
call_count["n"] += 1
return pd.DataFrame({"V": [1]})
chunks = [{"i": i} for i in range(4)]
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client), \
patch.object(bqe, "get_redis_client", return_value=mock_client), \
patch.object(bqe, "_effective_parallelism", return_value=2):
qh = execute_plan(
chunks, fake_query_fn,
parallel=2,
query_hash="ptest",
cache_prefix="p",
skip_cached=False,
)
assert call_count["n"] == 4
# ============================================================
# 4.5 partial cache hit
# ============================================================
class TestPartialCacheHit:
def test_skips_cached_chunks(self):
import mes_dashboard.core.redis_df_store as rds
import mes_dashboard.services.batch_query_engine as bqe
mock_client = MagicMock()
stored = {}
mock_client.setex.side_effect = lambda k, t, v: stored.update({k: v})
mock_client.get.side_effect = lambda k: stored.get(k)
mock_client.hset.return_value = None
mock_client.expire.return_value = None
# Pre-populate chunks 0 and 1 as "cached"
pre_cached_keys = set()
def fake_exists(k):
return 1 if k in pre_cached_keys else (1 if k in stored else 0)
mock_client.exists.side_effect = fake_exists
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client), \
patch.object(bqe, "get_redis_client", return_value=mock_client):
# Pre-store 2 chunks
rds.redis_store_chunk("test", "hash5", 0, pd.DataFrame({"A": [1]}), ttl=60)
rds.redis_store_chunk("test", "hash5", 1, pd.DataFrame({"A": [2]}), ttl=60)
# Now mark those keys as existing
pre_cached_keys.update(stored.keys())
call_log = []
def fake_query_fn(chunk, max_rows_per_chunk=None):
call_log.append(chunk)
return pd.DataFrame({"A": [99]})
chunks = [{"i": i} for i in range(5)]
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client), \
patch.object(bqe, "get_redis_client", return_value=mock_client):
execute_plan(
chunks, fake_query_fn,
query_hash="hash5",
cache_prefix="test",
skip_cached=True,
)
# Only chunks 2, 3, 4 should have been executed
assert len(call_log) == 3
# ============================================================
# 4.6 memory guard
# ============================================================
class TestMemoryGuard:
def test_oversized_chunk_discarded(self):
import mes_dashboard.core.redis_df_store as rds
import mes_dashboard.services.batch_query_engine as bqe
mock_client = MagicMock()
stored = {}
mock_client.setex.side_effect = lambda k, t, v: stored.update({k: v})
mock_client.get.side_effect = lambda k: stored.get(k)
mock_client.exists.side_effect = lambda k: 1 if k in stored else 0
mock_client.hset.return_value = None
mock_client.expire.return_value = None
def oversized_query_fn(chunk, max_rows_per_chunk=None):
# Create DF that reports large memory
df = pd.DataFrame({"X": [1]})
return df
chunks = [{"i": 0}]
# Set memory limit to 0 MB so any DF exceeds it
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client), \
patch.object(bqe, "get_redis_client", return_value=mock_client), \
patch.object(bqe, "BATCH_CHUNK_MAX_MEMORY_MB", 0):
qh = execute_plan(
chunks, oversized_query_fn,
query_hash="memtest",
cache_prefix="m",
skip_cached=False,
)
# Chunk should NOT be stored (memory exceeded)
assert not any("chunk:0" in k for k in stored)
# ============================================================
# 4.7 result row count limit
# ============================================================
class TestMaxRowsPerChunk:
def test_max_rows_passed_to_query_fn(self):
import mes_dashboard.core.redis_df_store as rds
import mes_dashboard.services.batch_query_engine as bqe
mock_client = MagicMock()
mock_client.setex.return_value = None
mock_client.get.return_value = None
mock_client.exists.return_value = 0
mock_client.hset.return_value = None
mock_client.expire.return_value = None
received_max_rows = []
def capture_query_fn(chunk, max_rows_per_chunk=None):
received_max_rows.append(max_rows_per_chunk)
return pd.DataFrame({"V": [1]})
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client), \
patch.object(bqe, "get_redis_client", return_value=mock_client):
execute_plan(
[{"i": 0}], capture_query_fn,
query_hash="rowtest",
cache_prefix="r",
max_rows_per_chunk=5000,
skip_cached=False,
)
assert received_max_rows == [5000]
# ============================================================
# 4.8 merge_chunks
# ============================================================
class TestMergeChunks:
def test_merge_produces_correct_df(self):
import mes_dashboard.core.redis_df_store as rds
mock_client = MagicMock()
stored = {}
mock_client.setex.side_effect = lambda k, t, v: stored.update({k: v})
mock_client.get.side_effect = lambda k: stored.get(k)
mock_client.hgetall.return_value = {"total": "3", "completed": "3", "failed": "0"}
mock_client.exists.side_effect = lambda k: 1 if k in stored else 0
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client):
rds.redis_store_chunk("t", "h", 0, pd.DataFrame({"A": [1, 2]}))
rds.redis_store_chunk("t", "h", 1, pd.DataFrame({"A": [3, 4]}))
rds.redis_store_chunk("t", "h", 2, pd.DataFrame({"A": [5]}))
import mes_dashboard.services.batch_query_engine as bqe
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client), \
patch.object(bqe, "get_redis_client", return_value=mock_client):
merged = merge_chunks("t", "h")
assert len(merged) == 5
assert list(merged["A"]) == [1, 2, 3, 4, 5]
def test_merge_respects_max_total_rows(self):
import mes_dashboard.core.redis_df_store as rds
mock_client = MagicMock()
stored = {}
mock_client.setex.side_effect = lambda k, t, v: stored.update({k: v})
mock_client.get.side_effect = lambda k: stored.get(k)
mock_client.hgetall.return_value = {"total": "3", "completed": "3", "failed": "0"}
mock_client.exists.side_effect = lambda k: 1 if k in stored else 0
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client):
rds.redis_store_chunk("t", "cap", 0, pd.DataFrame({"A": [1, 2]}))
rds.redis_store_chunk("t", "cap", 1, pd.DataFrame({"A": [3, 4]}))
rds.redis_store_chunk("t", "cap", 2, pd.DataFrame({"A": [5, 6]}))
import mes_dashboard.services.batch_query_engine as bqe
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client), \
patch.object(bqe, "get_redis_client", return_value=mock_client):
merged = merge_chunks("t", "cap", max_total_rows=4)
assert len(merged) == 4
assert list(merged["A"]) == [1, 2, 3, 4]
# ============================================================
# 4.9 progress tracking
# ============================================================
class TestProgressTracking:
def test_hset_updated_after_each_chunk(self):
import mes_dashboard.core.redis_df_store as rds
import mes_dashboard.services.batch_query_engine as bqe
mock_client = MagicMock()
mock_client.setex.return_value = None
mock_client.get.return_value = None
mock_client.exists.return_value = 0
mock_client.hset.return_value = None
mock_client.expire.return_value = None
hset_calls = []
original_hset = mock_client.hset
def track_hset(key, mapping=None):
hset_calls.append(mapping.copy() if mapping else {})
return original_hset(key, mapping=mapping)
mock_client.hset.side_effect = track_hset
def fake_query_fn(chunk, max_rows_per_chunk=None):
return pd.DataFrame({"V": [1]})
chunks = [{"i": 0}, {"i": 1}, {"i": 2}]
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client), \
patch.object(bqe, "get_redis_client", return_value=mock_client):
execute_plan(
chunks, fake_query_fn,
query_hash="progtest",
cache_prefix="p",
skip_cached=False,
)
# Should have initial + 3 per-chunk + final = 5 hset calls
assert len(hset_calls) >= 4
# Last call should show completed status
last = hset_calls[-1]
assert last["status"] == "completed"
assert last["completed"] == "3"
# ============================================================
# 4.10 chunk failure resilience
# ============================================================
class TestChunkFailureResilience:
def test_one_chunk_fails_others_complete(self):
import mes_dashboard.core.redis_df_store as rds
import mes_dashboard.services.batch_query_engine as bqe
mock_client = MagicMock()
stored = {}
mock_client.setex.side_effect = lambda k, t, v: stored.update({k: v})
mock_client.get.side_effect = lambda k: stored.get(k)
mock_client.exists.side_effect = lambda k: 1 if k in stored else 0
mock_client.hset.return_value = None
mock_client.expire.return_value = None
call_count = {"n": 0}
def failing_query_fn(chunk, max_rows_per_chunk=None):
call_count["n"] += 1
if chunk.get("i") == 1:
raise RuntimeError("Oracle timeout")
return pd.DataFrame({"V": [chunk["i"]]})
chunks = [{"i": 0}, {"i": 1}, {"i": 2}]
hset_calls = []
mock_client.hset.side_effect = lambda k, mapping=None: hset_calls.append(
mapping.copy() if mapping else {}
)
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client), \
patch.object(bqe, "get_redis_client", return_value=mock_client):
qh = execute_plan(
chunks, failing_query_fn,
query_hash="failtest",
cache_prefix="f",
skip_cached=False,
)
# All 3 chunks attempted
assert call_count["n"] == 3
# Final metadata should reflect partial failure
last = hset_calls[-1]
assert last["status"] == "partial"
assert last["completed"] == "2"
assert last["failed"] == "1"
assert last["has_partial_failure"] == "True"
def test_chunk_store_failure_is_marked_partial(self):
import mes_dashboard.core.redis_df_store as rds
import mes_dashboard.services.batch_query_engine as bqe
mock_client = MagicMock()
stored = {}
mock_client.setex.side_effect = lambda k, t, v: stored.update({k: v})
mock_client.get.side_effect = lambda k: stored.get(k)
mock_client.exists.side_effect = lambda k: 1 if k in stored else 0
mock_client.hset.return_value = None
mock_client.expire.return_value = None
def query_fn(chunk, max_rows_per_chunk=None):
return pd.DataFrame({"V": [chunk["i"]]})
original_store_chunk = bqe.redis_store_chunk
def fail_one_store(prefix, query_hash, idx, df, ttl=900):
if idx == 1:
return False
return original_store_chunk(prefix, query_hash, idx, df, ttl=ttl)
hset_calls = []
mock_client.hset.side_effect = lambda k, mapping=None: hset_calls.append(
mapping.copy() if mapping else {}
)
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client), \
patch.object(bqe, "get_redis_client", return_value=mock_client), \
patch.object(bqe, "redis_store_chunk", side_effect=fail_one_store):
execute_plan(
[{"i": 0}, {"i": 1}, {"i": 2}],
query_fn,
query_hash="storefail",
cache_prefix="sf",
skip_cached=False,
)
last = hset_calls[-1]
assert last["status"] == "partial"
assert last["completed"] == "2"
assert last["failed"] == "1"
# ============================================================
# query_hash stability
# ============================================================
class TestQueryHash:
def test_same_params_different_order(self):
h1 = compute_query_hash({"a": 1, "b": [3, 1, 2]})
h2 = compute_query_hash({"b": [2, 1, 3], "a": 1})
assert h1 == h2
def test_different_params_different_hash(self):
h1 = compute_query_hash({"mode": "date_range", "start": "2025-01-01"})
h2 = compute_query_hash({"mode": "date_range", "start": "2025-06-01"})
assert h1 != h2
def test_hash_is_16_chars(self):
h = compute_query_hash({"x": 1})
assert len(h) == 16
# ============================================================
# should_decompose helpers
# ============================================================
class TestShouldDecompose:
def test_long_range_true(self):
assert should_decompose_by_time("2025-01-01", "2025-12-31")
def test_short_range_false(self):
assert not should_decompose_by_time("2025-01-01", "2025-02-01")
def test_large_ids_true(self):
assert should_decompose_by_ids(list(range(2000)))
def test_small_ids_false(self):
assert not should_decompose_by_ids(list(range(500)))

View File

@@ -117,3 +117,4 @@ def test_runtime_config_includes_fetchmany_size():
assert "slow_fetchmany_size" in runtime
assert isinstance(runtime["slow_fetchmany_size"], int)
assert runtime["slow_fetchmany_size"] > 0
assert "slow_pool_enabled" in runtime

View File

@@ -0,0 +1,101 @@
# -*- coding: utf-8 -*-
"""Unit tests for isolated slow-query pool path."""
from __future__ import annotations
from unittest.mock import MagicMock, patch
import mes_dashboard.core.database as db
@patch.object(db, "oracledb")
@patch.object(db, "get_slow_engine")
@patch.object(db, "_get_slow_query_semaphore")
@patch.object(db, "get_db_runtime_config")
def test_read_sql_df_slow_uses_slow_pool_when_enabled(
mock_runtime,
mock_sem_fn,
mock_get_slow_engine,
mock_oracledb,
):
"""Slow query should checkout connection from isolated slow pool."""
mock_runtime.return_value = {
"slow_pool_enabled": True,
"slow_call_timeout_ms": 60000,
"slow_fetchmany_size": 5000,
"tcp_connect_timeout": 10,
"retry_count": 1,
"retry_delay": 1.0,
}
sem = MagicMock()
sem.acquire.return_value = True
mock_sem_fn.return_value = sem
cursor = MagicMock()
cursor.description = [("COL_A",), ("COL_B",)]
cursor.fetchall.return_value = [("v1", "v2")]
conn = MagicMock()
conn.cursor.return_value = cursor
engine = MagicMock()
engine.raw_connection.return_value = conn
mock_get_slow_engine.return_value = engine
df = db.read_sql_df_slow("SELECT 1", {"p0": "x"})
assert list(df.columns) == ["COL_A", "COL_B"]
assert len(df) == 1
mock_get_slow_engine.assert_called_once()
mock_oracledb.connect.assert_not_called()
conn.close.assert_called_once()
sem.release.assert_called_once()
@patch.object(db, "oracledb")
@patch.object(db, "get_slow_engine")
@patch.object(db, "_get_slow_query_semaphore")
@patch.object(db, "get_db_runtime_config")
def test_read_sql_df_slow_iter_uses_slow_pool_when_enabled(
mock_runtime,
mock_sem_fn,
mock_get_slow_engine,
mock_oracledb,
):
"""Slow iterator query should checkout connection from isolated slow pool."""
mock_runtime.return_value = {
"slow_pool_enabled": True,
"slow_call_timeout_ms": 60000,
"slow_fetchmany_size": 2,
"tcp_connect_timeout": 10,
"retry_count": 1,
"retry_delay": 1.0,
}
sem = MagicMock()
sem.acquire.return_value = True
mock_sem_fn.return_value = sem
cursor = MagicMock()
cursor.description = [("COL_A",), ("COL_B",)]
cursor.fetchmany.side_effect = [
[("r1a", "r1b")],
[],
]
conn = MagicMock()
conn.cursor.return_value = cursor
engine = MagicMock()
engine.raw_connection.return_value = conn
mock_get_slow_engine.return_value = engine
batches = list(db.read_sql_df_slow_iter("SELECT 1", {"p0": "x"}, batch_size=2))
assert batches == [(["COL_A", "COL_B"], [("r1a", "r1b")])]
mock_get_slow_engine.assert_called_once()
mock_oracledb.connect.assert_not_called()
conn.close.assert_called_once()
sem.release.assert_called_once()

View File

@@ -0,0 +1,97 @@
# -*- coding: utf-8 -*-
"""Unit tests for hold_dataset_cache — engine integration (task 6.4)."""
from __future__ import annotations
import pandas as pd
from mes_dashboard.services import hold_dataset_cache as cache_svc
class TestHoldEngineDecomposition:
"""6.4 — hold-history with long date range triggers engine."""
def test_long_range_triggers_engine(self, monkeypatch):
"""90-day range → engine decomposition activated."""
import mes_dashboard.services.batch_query_engine as engine_mod
engine_calls = {"execute": 0, "merge": 0}
def fake_execute_plan(chunks, query_fn, **kwargs):
engine_calls["execute"] += 1
assert len(chunks) == 3 # 90 days / 31 = 3 chunks
return kwargs.get("query_hash", "fake_hash")
result_df = pd.DataFrame({
"CONTAINERID": ["C1"],
"HOLDTYPE": ["Quality"],
})
def fake_merge_chunks(prefix, qhash, **kwargs):
engine_calls["merge"] += 1
return result_df
monkeypatch.setattr(engine_mod, "execute_plan", fake_execute_plan)
monkeypatch.setattr(engine_mod, "merge_chunks", fake_merge_chunks)
monkeypatch.setattr(
"mes_dashboard.services.hold_dataset_cache._get_cached_df",
lambda _: None,
)
monkeypatch.setattr(
"mes_dashboard.services.hold_dataset_cache._store_df",
lambda *a, **kw: None,
)
monkeypatch.setattr(
"mes_dashboard.services.hold_dataset_cache._load_sql",
lambda name: "SELECT 1 FROM dual",
)
monkeypatch.setattr(
"mes_dashboard.services.hold_dataset_cache._derive_all_views",
lambda df, **kw: {
"summary": {"total": 1},
"detail": {"items": [], "pagination": {"total": 1}},
},
)
result = cache_svc.execute_primary_query(
start_date="2025-01-01",
end_date="2025-03-31",
)
assert engine_calls["execute"] == 1
assert engine_calls["merge"] == 1
def test_short_range_skips_engine(self, monkeypatch):
"""30-day range → direct path, no engine."""
engine_calls = {"execute": 0}
monkeypatch.setattr(
"mes_dashboard.services.hold_dataset_cache._get_cached_df",
lambda _: None,
)
monkeypatch.setattr(
"mes_dashboard.services.hold_dataset_cache._load_sql",
lambda name: "SELECT 1 FROM dual",
)
monkeypatch.setattr(
"mes_dashboard.services.hold_dataset_cache.read_sql_df",
lambda sql, params: pd.DataFrame({"CONTAINERID": ["C1"]}),
)
monkeypatch.setattr(
"mes_dashboard.services.hold_dataset_cache._store_df",
lambda *a, **kw: None,
)
monkeypatch.setattr(
"mes_dashboard.services.hold_dataset_cache._derive_all_views",
lambda df, **kw: {
"summary": {"total": 1},
"detail": {"items": [], "pagination": {"total": 1}},
},
)
result = cache_svc.execute_primary_query(
start_date="2025-06-01",
end_date="2025-06-30",
)
assert engine_calls["execute"] == 0 # Engine NOT used

View File

@@ -0,0 +1,116 @@
# -*- coding: utf-8 -*-
"""Unit tests for job_query_service — engine integration (tasks 9.1-9.4)."""
from __future__ import annotations
import pandas as pd
from mes_dashboard.services import job_query_service as job_svc
class TestJobQueryEngineDecomposition:
"""9.4 — full-year query with many resources → engine decomposition."""
def test_long_range_triggers_engine(self, monkeypatch):
"""90-day range → engine decomposition for job query."""
import mes_dashboard.services.batch_query_engine as engine_mod
import mes_dashboard.core.redis_df_store as rds
engine_calls = {"execute": 0, "merge": 0}
def fake_execute_plan(chunks, query_fn, **kwargs):
engine_calls["execute"] += 1
assert len(chunks) == 3 # 90 days / 31 = 3 chunks
assert kwargs.get("cache_prefix") == "job"
return kwargs.get("query_hash", "fake_hash")
result_df = pd.DataFrame({
"JOBID": ["J1", "J2"],
"RESOURCEID": ["R1", "R2"],
})
def fake_merge_chunks(prefix, qhash, **kwargs):
engine_calls["merge"] += 1
return result_df
monkeypatch.setattr(engine_mod, "execute_plan", fake_execute_plan)
monkeypatch.setattr(engine_mod, "merge_chunks", fake_merge_chunks)
monkeypatch.setattr(rds, "redis_load_df", lambda key: None)
monkeypatch.setattr(rds, "redis_store_df", lambda key, df, ttl=None: None)
monkeypatch.setattr(
"mes_dashboard.services.job_query_service.SQLLoader",
type("FakeLoader", (), {
"load": staticmethod(lambda name: "SELECT 1 FROM dual WHERE {{ RESOURCE_FILTER }}"),
}),
)
result = job_svc.get_jobs_by_resources(
resource_ids=["R1", "R2", "R3"],
start_date="2025-01-01",
end_date="2025-03-31",
)
assert engine_calls["execute"] == 1
assert engine_calls["merge"] == 1
assert result["total"] == 2
assert "error" not in result
def test_short_range_skips_engine(self, monkeypatch):
"""30-day range → direct path, no engine."""
import mes_dashboard.core.redis_df_store as rds
engine_calls = {"execute": 0}
monkeypatch.setattr(rds, "redis_load_df", lambda key: None)
monkeypatch.setattr(rds, "redis_store_df", lambda key, df, ttl=None: None)
monkeypatch.setattr(
"mes_dashboard.services.job_query_service.SQLLoader",
type("FakeLoader", (), {
"load": staticmethod(lambda name: "SELECT 1 FROM dual WHERE {{ RESOURCE_FILTER }}"),
}),
)
monkeypatch.setattr(
"mes_dashboard.services.job_query_service.read_sql_df",
lambda sql, params: pd.DataFrame({"JOBID": ["J1"]}),
)
result = job_svc.get_jobs_by_resources(
resource_ids=["R1"],
start_date="2025-06-01",
end_date="2025-06-30",
)
assert engine_calls["execute"] == 0 # Engine NOT used
assert result["total"] == 1
def test_redis_cache_hit_skips_query(self, monkeypatch):
"""Redis cache hit → returns cached DataFrame without Oracle query."""
import mes_dashboard.core.redis_df_store as rds
query_calls = {"sql": 0}
cached_df = pd.DataFrame({
"JOBID": ["J-CACHED"],
"RESOURCEID": ["R1"],
})
monkeypatch.setattr(rds, "redis_load_df", lambda key: cached_df)
def fail_sql(*args, **kwargs):
query_calls["sql"] += 1
raise RuntimeError("Should not reach Oracle")
monkeypatch.setattr(
"mes_dashboard.services.job_query_service.read_sql_df",
fail_sql,
)
result = job_svc.get_jobs_by_resources(
resource_ids=["R1"],
start_date="2025-06-01",
end_date="2025-06-30",
)
assert query_calls["sql"] == 0 # Oracle NOT called
assert result["total"] == 1
assert result["data"][0]["JOBID"] == "J-CACHED"

View File

@@ -0,0 +1,94 @@
# -*- coding: utf-8 -*-
"""Unit tests for mid_section_defect_service — engine integration (task 8.4)."""
from __future__ import annotations
import pandas as pd
from mes_dashboard.services import mid_section_defect_service as msd_svc
class TestDetectionEngineDecomposition:
"""8.4 — large date range + high-volume station → engine decomposition."""
def test_long_range_triggers_engine(self, monkeypatch):
"""90-day range → engine decomposition for detection query."""
import mes_dashboard.services.batch_query_engine as engine_mod
engine_calls = {"execute": 0, "merge": 0}
def fake_execute_plan(chunks, query_fn, **kwargs):
engine_calls["execute"] += 1
assert len(chunks) == 3 # 90 days / 31 = 3 chunks
assert kwargs.get("cache_prefix") == "msd_detect"
return kwargs.get("query_hash", "fake_hash")
result_df = pd.DataFrame({
"CONTAINERID": ["C1", "C2"],
"WORKCENTERNAME": ["TEST-WC-A", "TEST-WC-B"],
})
def fake_merge_chunks(prefix, qhash, **kwargs):
engine_calls["merge"] += 1
return result_df
monkeypatch.setattr(engine_mod, "execute_plan", fake_execute_plan)
monkeypatch.setattr(engine_mod, "merge_chunks", fake_merge_chunks)
monkeypatch.setattr(
"mes_dashboard.services.mid_section_defect_service.cache_get",
lambda key: None,
)
monkeypatch.setattr(
"mes_dashboard.services.mid_section_defect_service.cache_set",
lambda key, val, ttl=None: None,
)
monkeypatch.setattr(
"mes_dashboard.services.mid_section_defect_service.SQLLoader",
type("FakeLoader", (), {
"load_with_params": staticmethod(lambda name, **kw: "SELECT 1 FROM dual"),
}),
)
df = msd_svc._fetch_station_detection_data(
start_date="2025-01-01",
end_date="2025-03-31",
station="測試",
)
assert engine_calls["execute"] == 1
assert engine_calls["merge"] == 1
assert df is not None
assert len(df) == 2
def test_short_range_skips_engine(self, monkeypatch):
"""30-day range → direct path, no engine."""
engine_calls = {"execute": 0}
monkeypatch.setattr(
"mes_dashboard.services.mid_section_defect_service.cache_get",
lambda key: None,
)
monkeypatch.setattr(
"mes_dashboard.services.mid_section_defect_service.cache_set",
lambda key, val, ttl=None: None,
)
monkeypatch.setattr(
"mes_dashboard.services.mid_section_defect_service.SQLLoader",
type("FakeLoader", (), {
"load_with_params": staticmethod(lambda name, **kw: "SELECT 1 FROM dual"),
}),
)
monkeypatch.setattr(
"mes_dashboard.services.mid_section_defect_service.read_sql_df",
lambda sql, params: pd.DataFrame({"CONTAINERID": ["C1"]}),
)
df = msd_svc._fetch_station_detection_data(
start_date="2025-06-01",
end_date="2025-06-30",
station="測試",
)
assert engine_calls["execute"] == 0 # Engine NOT used
assert df is not None
assert len(df) == 1

View File

@@ -0,0 +1,155 @@
# -*- coding: utf-8 -*-
"""Unit tests for parquet query spool store."""
from __future__ import annotations
import fnmatch
import json
import os
import time
import pandas as pd
from mes_dashboard.core.redis_client import get_key
from mes_dashboard.core import query_spool_store as spool
class FakeRedis:
def __init__(self) -> None:
self._data: dict[str, str] = {}
self._expires: dict[str, int] = {}
def _purge_if_expired(self, key: str) -> None:
exp = self._expires.get(key)
if exp is not None and exp <= int(time.time()):
self._data.pop(key, None)
self._expires.pop(key, None)
def setex(self, key: str, ttl: int, value: str) -> bool:
self._data[key] = value
self._expires[key] = int(time.time()) + int(ttl)
return True
def get(self, key: str):
self._purge_if_expired(key)
return self._data.get(key)
def delete(self, *keys) -> int:
deleted = 0
for key in keys:
if key in self._data:
deleted += 1
self._data.pop(key, None)
self._expires.pop(key, None)
return deleted
def scan_iter(self, match: str | None = None, count: int = 100):
for key in list(self._data.keys()):
self._purge_if_expired(key)
if key not in self._data:
continue
if match and not fnmatch.fnmatch(key, match):
continue
yield key
def _build_df() -> pd.DataFrame:
return pd.DataFrame(
{
"CONTAINERID": ["C1", "C2"],
"LOSSREASONNAME": ["001_A", "002_B"],
"REJECT_TOTAL_QTY": [10, 20],
}
)
def test_spool_store_and_load_roundtrip(monkeypatch, tmp_path):
fake = FakeRedis()
monkeypatch.setattr(spool, "QUERY_SPOOL_ENABLED", True)
monkeypatch.setattr(spool, "QUERY_SPOOL_DIR", tmp_path / "query_spool")
monkeypatch.setattr(spool, "get_redis_client", lambda: fake)
ok = spool.store_spooled_df("reject_dataset", "qid-roundtrip-1", _build_df(), ttl_seconds=1200)
assert ok is True
metadata = spool.get_spool_metadata("reject_dataset", "qid-roundtrip-1")
assert metadata is not None
assert metadata.get("row_count") == 2
loaded = spool.load_spooled_df("reject_dataset", "qid-roundtrip-1")
assert loaded is not None
pd.testing.assert_frame_equal(
loaded.sort_values("CONTAINERID").reset_index(drop=True),
_build_df().sort_values("CONTAINERID").reset_index(drop=True),
)
def test_spool_load_returns_none_when_metadata_hash_mismatch(monkeypatch, tmp_path):
fake = FakeRedis()
monkeypatch.setattr(spool, "QUERY_SPOOL_ENABLED", True)
monkeypatch.setattr(spool, "QUERY_SPOOL_DIR", tmp_path / "query_spool")
monkeypatch.setattr(spool, "get_redis_client", lambda: fake)
assert spool.store_spooled_df("reject_dataset", "qid-hash-1", _build_df(), ttl_seconds=1200)
key = get_key(spool._meta_key("reject_dataset", "qid-hash-1"))
metadata = json.loads(fake.get(key))
metadata["columns_hash"] = "deadbeefdeadbeef"
fake.setex(key, 1200, json.dumps(metadata, ensure_ascii=False))
loaded = spool.load_spooled_df("reject_dataset", "qid-hash-1")
assert loaded is None
assert fake.get(key) is None
def test_spool_load_returns_none_when_file_missing(monkeypatch, tmp_path):
fake = FakeRedis()
monkeypatch.setattr(spool, "QUERY_SPOOL_ENABLED", True)
monkeypatch.setattr(spool, "QUERY_SPOOL_DIR", tmp_path / "query_spool")
monkeypatch.setattr(spool, "get_redis_client", lambda: fake)
assert spool.store_spooled_df("reject_dataset", "qid-missing-file-1", _build_df(), ttl_seconds=1200)
metadata = spool.get_spool_metadata("reject_dataset", "qid-missing-file-1")
assert metadata is not None
path = spool._path_from_relative(metadata["relative_path"])
assert path is not None and path.exists()
path.unlink()
loaded = spool.load_spooled_df("reject_dataset", "qid-missing-file-1")
assert loaded is None
assert spool.get_spool_metadata("reject_dataset", "qid-missing-file-1") is None
def test_cleanup_expired_and_orphan_files(monkeypatch, tmp_path):
fake = FakeRedis()
root = tmp_path / "query_spool"
monkeypatch.setattr(spool, "QUERY_SPOOL_ENABLED", True)
monkeypatch.setattr(spool, "QUERY_SPOOL_DIR", root)
monkeypatch.setattr(spool, "QUERY_SPOOL_ORPHAN_GRACE_SECONDS", 1)
monkeypatch.setattr(spool, "get_redis_client", lambda: fake)
now = int(time.time())
assert spool.store_spooled_df("reject_dataset", "qid-valid-1", _build_df(), ttl_seconds=1200)
assert spool.store_spooled_df("reject_dataset", "qid-expired-1", _build_df(), ttl_seconds=1200)
expired_key = get_key(spool._meta_key("reject_dataset", "qid-expired-1"))
expired_meta = json.loads(fake.get(expired_key))
expired_path = spool._path_from_relative(expired_meta["relative_path"])
assert expired_path is not None and expired_path.exists()
expired_meta["expires_at"] = now - 10
fake.setex(expired_key, 1200, json.dumps(expired_meta, ensure_ascii=False))
orphan_dir = root / "reject_dataset"
orphan_dir.mkdir(parents=True, exist_ok=True)
orphan_path = orphan_dir / "orphan.parquet"
_build_df().to_parquet(orphan_path, engine="pyarrow", index=False)
old_time = now - 120
os.utime(orphan_path, (old_time, old_time))
stats = spool.cleanup_expired_spool(namespace="reject_dataset")
assert stats["meta_deleted"] >= 1
assert stats["expired_files_deleted"] >= 1
assert stats["orphan_files_deleted"] >= 1
assert not orphan_path.exists()
assert not expired_path.exists()
assert spool.get_spool_metadata("reject_dataset", "qid-valid-1") is not None

View File

@@ -0,0 +1,151 @@
# -*- coding: utf-8 -*-
"""Unit tests for query_tool_service — slow-query migration + caching (tasks 10.1-10.5)."""
from __future__ import annotations
from unittest.mock import patch, MagicMock
import pandas as pd
from mes_dashboard.services import query_tool_service as qt_svc
class TestSlowQueryMigration:
"""10.2 — verify high-risk read_sql_df paths migrated to read_sql_df_slow."""
def test_resolve_by_lot_id_uses_slow(self, monkeypatch):
"""_resolve_by_lot_id should call read_sql_df_slow, not read_sql_df."""
calls = {"slow": 0, "fast": 0}
def fake_slow(sql, params=None, **kw):
calls["slow"] += 1
return pd.DataFrame({"CONTAINERID": ["C1"], "CONTAINERNAME": ["LOT-1"]})
def fake_fast(sql, params=None):
calls["fast"] += 1
return pd.DataFrame()
monkeypatch.setattr(qt_svc, "read_sql_df_slow", fake_slow)
monkeypatch.setattr(qt_svc, "read_sql_df", fake_fast)
monkeypatch.setattr(qt_svc, "SQLLoader",
type("FakeLoader", (), {
"load_with_params": staticmethod(lambda name, **kw: "SELECT 1 FROM dual"),
}),
)
result = qt_svc._resolve_by_lot_id(["LOT-1"])
assert calls["slow"] == 1
assert calls["fast"] == 0
def test_resolve_by_work_order_uses_slow(self, monkeypatch):
"""_resolve_by_work_order should call read_sql_df_slow."""
calls = {"slow": 0, "fast": 0}
def fake_slow(sql, params=None, **kw):
calls["slow"] += 1
return pd.DataFrame({
"CONTAINERID": ["C1"],
"CONTAINERNAME": ["LOT-1"],
"MFGORDERNAME": ["GA25010101"],
})
def fake_fast(sql, params=None):
calls["fast"] += 1
return pd.DataFrame()
monkeypatch.setattr(qt_svc, "read_sql_df_slow", fake_slow)
monkeypatch.setattr(qt_svc, "read_sql_df", fake_fast)
monkeypatch.setattr(qt_svc, "SQLLoader",
type("FakeLoader", (), {
"load_with_params": staticmethod(lambda name, **kw: "SELECT 1 FROM dual"),
}),
)
result = qt_svc._resolve_by_work_order(["GA25010101"])
assert calls["slow"] >= 1
assert calls["fast"] == 0
def test_equipment_status_hours_uses_slow(self, monkeypatch):
"""get_equipment_status_hours should call read_sql_df_slow."""
import mes_dashboard.core.redis_df_store as rds
calls = {"slow": 0, "fast": 0}
def fake_slow(sql, params=None, **kw):
calls["slow"] += 1
return pd.DataFrame({
"RESOURCEID": ["EQ1"],
"PRD_HOURS": [100.0],
"SBY_HOURS": [20.0],
"UDT_HOURS": [10.0],
"SDT_HOURS": [5.0],
"EGT_HOURS": [3.0],
"NST_HOURS": [2.0],
"TOTAL_HOURS": [140.0],
})
def fake_fast(sql, params=None):
calls["fast"] += 1
return pd.DataFrame()
monkeypatch.setattr(qt_svc, "read_sql_df_slow", fake_slow)
monkeypatch.setattr(qt_svc, "read_sql_df", fake_fast)
monkeypatch.setattr(rds, "redis_load_df", lambda key: None)
monkeypatch.setattr(rds, "redis_store_df", lambda key, df, ttl=None: None)
monkeypatch.setattr(qt_svc, "SQLLoader",
type("FakeLoader", (), {
"load_with_params": staticmethod(lambda name, **kw: "SELECT 1 FROM dual"),
}),
)
result = qt_svc.get_equipment_status_hours(
equipment_ids=["EQ1"],
start_date="2025-01-01",
end_date="2025-01-31",
)
assert calls["slow"] == 1
assert calls["fast"] == 0
assert "error" not in result
assert result["totals"]["PRD_HOURS"] == 100.0
class TestEquipmentCaching:
"""10.4/10.5 — equipment query caching via Redis."""
def test_equipment_status_cache_hit(self, monkeypatch):
"""Redis cache hit → returns cached result without Oracle query."""
import mes_dashboard.core.redis_df_store as rds
calls = {"sql": 0}
cached_df = pd.DataFrame({
"RESOURCEID": ["EQ-CACHED"],
"PRD_HOURS": [50.0],
"SBY_HOURS": [10.0],
"UDT_HOURS": [5.0],
"SDT_HOURS": [2.0],
"EGT_HOURS": [1.0],
"NST_HOURS": [0.0],
"TOTAL_HOURS": [68.0],
})
monkeypatch.setattr(rds, "redis_load_df", lambda key: cached_df)
def fail_sql(*args, **kwargs):
calls["sql"] += 1
raise RuntimeError("Should not reach Oracle")
monkeypatch.setattr(qt_svc, "read_sql_df_slow", fail_sql)
monkeypatch.setattr(qt_svc, "read_sql_df", fail_sql)
result = qt_svc.get_equipment_status_hours(
equipment_ids=["EQ1"],
start_date="2025-01-01",
end_date="2025-01-31",
)
assert calls["sql"] == 0 # Oracle NOT called
assert result["data"][0]["RESOURCEID"] == "EQ-CACHED"

View File

@@ -0,0 +1,185 @@
# -*- coding: utf-8 -*-
"""Unit tests for redis_df_store module."""
import pytest
from unittest.mock import patch, MagicMock
from decimal import Decimal
import pandas as pd
class TestRedisStoreDf:
"""3.1 — round-trip store/load."""
def test_round_trip(self):
"""Store a DF, load it back, verify equality."""
import mes_dashboard.core.redis_df_store as rds
mock_client = MagicMock()
stored = {}
def fake_setex(key, ttl, value):
stored[key] = value
def fake_get(key):
return stored.get(key)
mock_client.setex.side_effect = fake_setex
mock_client.get.side_effect = fake_get
df = pd.DataFrame({"A": [1, 2, 3], "B": ["x", "y", "z"]})
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client):
rds.redis_store_df("test:key", df, ttl=60)
loaded = rds.redis_load_df("test:key")
assert loaded is not None
pd.testing.assert_frame_equal(loaded, df)
def test_store_empty_df(self):
"""Round-trip with an empty DataFrame preserves schema."""
import mes_dashboard.core.redis_df_store as rds
mock_client = MagicMock()
stored = {}
mock_client.setex.side_effect = lambda k, t, v: stored.update({k: v})
mock_client.get.side_effect = lambda k: stored.get(k)
df = pd.DataFrame({"COL": pd.Series([], dtype="int64")})
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client):
rds.redis_store_df("test:empty", df, ttl=60)
loaded = rds.redis_load_df("test:empty")
assert loaded is not None
assert len(loaded) == 0
assert list(loaded.columns) == ["COL"]
def test_decimal_object_column_round_trip(self):
"""Mixed-precision Decimal object columns should store without serialization errors."""
import mes_dashboard.core.redis_df_store as rds
mock_client = MagicMock()
stored = {}
mock_client.setex.side_effect = lambda k, t, v: stored.update({k: v})
mock_client.get.side_effect = lambda k: stored.get(k)
df = pd.DataFrame(
{
"REJECT_SHARE_PCT": [Decimal("12.345"), Decimal("1.2"), None],
"REJECT_RATE_PCT": [Decimal("0.123456"), Decimal("10.9"), Decimal("9.000001")],
"LABEL": ["A", "B", "C"],
}
)
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client):
assert rds.redis_store_df("test:decimal", df, ttl=60)
loaded = rds.redis_load_df("test:decimal")
assert loaded is not None
assert loaded["REJECT_SHARE_PCT"].dtype.kind in ("f", "i")
assert loaded["REJECT_RATE_PCT"].dtype.kind in ("f", "i")
assert loaded.loc[0, "REJECT_SHARE_PCT"] == pytest.approx(12.345)
assert loaded.loc[2, "REJECT_RATE_PCT"] == pytest.approx(9.000001)
class TestChunkHelpers:
"""3.2 — chunk-level helpers round-trip."""
def test_chunk_round_trip(self):
import mes_dashboard.core.redis_df_store as rds
mock_client = MagicMock()
stored = {}
mock_client.setex.side_effect = lambda k, t, v: stored.update({k: v})
mock_client.get.side_effect = lambda k: stored.get(k)
mock_client.exists.side_effect = lambda k: 1 if k in stored else 0
df = pd.DataFrame({"X": [10, 20]})
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client):
rds.redis_store_chunk("reject", "abc123", 0, df, ttl=60)
assert rds.redis_chunk_exists("reject", "abc123", 0)
loaded = rds.redis_load_chunk("reject", "abc123", 0)
assert loaded is not None
pd.testing.assert_frame_equal(loaded, df)
def test_chunk_not_exists(self):
import mes_dashboard.core.redis_df_store as rds
mock_client = MagicMock()
mock_client.exists.return_value = 0
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client):
assert not rds.redis_chunk_exists("reject", "abc123", 99)
def test_clear_batch_removes_chunk_and_meta_keys(self):
import mes_dashboard.core.redis_df_store as rds
mock_client = MagicMock()
deleted = {"keys": []}
mock_client.keys.return_value = [
"mes-dashboard:batch:reject:q123:chunk:0",
"mes-dashboard:batch:reject:q123:chunk:1",
]
mock_client.delete.side_effect = lambda *keys: deleted["keys"].extend(keys) or len(keys)
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=mock_client):
count = rds.redis_clear_batch("reject", "q123")
assert count == 3
assert any("chunk:0" in key for key in deleted["keys"])
assert any("chunk:1" in key for key in deleted["keys"])
assert any("meta" in key for key in deleted["keys"])
class TestRedisUnavailable:
"""3.3 — graceful fallback when Redis is unavailable."""
def test_store_no_redis(self):
"""store returns without error when Redis disabled."""
import mes_dashboard.core.redis_df_store as rds
df = pd.DataFrame({"A": [1]})
with patch.object(rds, "REDIS_ENABLED", False):
rds.redis_store_df("key", df) # no exception
def test_load_no_redis(self):
"""load returns None when Redis disabled."""
import mes_dashboard.core.redis_df_store as rds
with patch.object(rds, "REDIS_ENABLED", False):
result = rds.redis_load_df("key")
assert result is None
def test_chunk_exists_no_redis(self):
import mes_dashboard.core.redis_df_store as rds
with patch.object(rds, "REDIS_ENABLED", False):
assert not rds.redis_chunk_exists("p", "h", 0)
def test_store_client_none(self):
"""store returns without error when client is None."""
import mes_dashboard.core.redis_df_store as rds
df = pd.DataFrame({"A": [1]})
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=None):
rds.redis_store_df("key", df) # no exception
def test_load_client_none(self):
"""load returns None when client is None."""
import mes_dashboard.core.redis_df_store as rds
with patch.object(rds, "REDIS_ENABLED", True), \
patch.object(rds, "get_redis_client", return_value=None):
result = rds.redis_load_df("key")
assert result is None

View File

@@ -3,6 +3,9 @@
from __future__ import annotations
from decimal import Decimal
from unittest.mock import MagicMock
import pandas as pd
import pytest
@@ -292,3 +295,359 @@ def test_apply_pareto_selection_filter_supports_multi_dimension_and_logic():
assert len(filtered) == 1
assert set(filtered["CONTAINERNAME"].tolist()) == {"LOT-002"}
# ============================================================
# 5.9 — 365-day date range → engine decomposition, no Oracle timeout
# ============================================================
class TestEngineDecompositionDateRange:
"""Verify engine routing for long date ranges."""
def test_365_day_range_triggers_engine(self, monkeypatch):
"""5.9: 365-day date range → chunks decomposed, engine path used."""
import mes_dashboard.services.batch_query_engine as engine_mod
# Track calls via engine module (local imports inside function pull from here)
engine_calls = {
"decompose": 0,
"execute": 0,
"merge": 0,
"chunk_count": 0,
"parallel": 0,
"max_rows_per_chunk": 0,
}
original_decompose = engine_mod.decompose_by_time_range
def tracked_decompose(*args, **kwargs):
engine_calls["decompose"] += 1
return original_decompose(*args, **kwargs)
def fake_execute_plan(chunks, query_fn, **kwargs):
engine_calls["execute"] += 1
engine_calls["chunk_count"] = len(chunks)
engine_calls["parallel"] = int(kwargs.get("parallel", 1))
engine_calls["max_rows_per_chunk"] = int(kwargs.get("max_rows_per_chunk", 0))
return kwargs.get("query_hash", "fake_hash")
result_df = pd.DataFrame({
"CONTAINERID": ["C1"],
"LOSSREASONNAME": ["R1"],
"REJECT_TOTAL_QTY": [10],
})
def fake_merge_chunks(prefix, qhash, **kwargs):
engine_calls["merge"] += 1
return result_df
# Mock on engine module (local imports will pick these up)
monkeypatch.setattr(engine_mod, "decompose_by_time_range", tracked_decompose)
monkeypatch.setattr(engine_mod, "execute_plan", fake_execute_plan)
monkeypatch.setattr(engine_mod, "merge_chunks", fake_merge_chunks)
# Mock service-level helpers
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._prepare_sql",
lambda *a, **kw: "SELECT 1 FROM dual",
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._store_df",
lambda *a, **kw: None,
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._get_cached_df",
lambda _: None,
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._apply_policy_filters",
lambda df, **kw: df,
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._build_primary_response",
lambda qid, df, meta, ri: {"query_id": qid, "rows": len(df)},
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._build_where_clause",
lambda **kw: ("", {}, {}),
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._validate_range",
lambda *a: None,
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache.redis_clear_batch",
lambda *a, **kw: 0,
)
result = cache_svc.execute_primary_query(
mode="date_range",
start_date="2025-01-01",
end_date="2025-12-31",
)
assert engine_calls["decompose"] == 1
assert engine_calls["execute"] == 1
assert engine_calls["merge"] == 1
assert result["rows"] == 1
expected_chunks = original_decompose(
"2025-01-01",
"2025-12-31",
grain_days=cache_svc._REJECT_ENGINE_GRAIN_DAYS,
)
assert engine_calls["chunk_count"] == len(expected_chunks)
assert engine_calls["parallel"] == cache_svc._REJECT_ENGINE_PARALLEL
assert engine_calls["max_rows_per_chunk"] == cache_svc._REJECT_ENGINE_MAX_ROWS_PER_CHUNK
def test_short_range_skips_engine(self, monkeypatch):
"""Short date range (<= threshold) uses direct path, no engine."""
import mes_dashboard.services.batch_query_engine as engine_mod
engine_calls = {"decompose": 0}
original_decompose = engine_mod.decompose_by_time_range
def tracked_decompose(*args, **kwargs):
engine_calls["decompose"] += 1
return original_decompose(*args, **kwargs)
monkeypatch.setattr(engine_mod, "decompose_by_time_range", tracked_decompose)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._get_cached_df",
lambda _: None,
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._prepare_sql",
lambda *a, **kw: "SELECT 1 FROM dual",
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache.read_sql_df",
lambda sql, params: pd.DataFrame({"CONTAINERID": ["C1"]}),
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._store_df",
lambda *a, **kw: None,
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._apply_policy_filters",
lambda df, **kw: df,
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._build_primary_response",
lambda qid, df, meta, ri: {"query_id": qid, "rows": len(df)},
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._build_where_clause",
lambda **kw: ("", {}, {}),
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache.redis_clear_batch",
lambda *a, **kw: 0,
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._validate_range",
lambda *a: None,
)
result = cache_svc.execute_primary_query(
mode="date_range",
start_date="2025-06-01",
end_date="2025-06-30",
)
assert engine_calls["decompose"] == 0 # Engine NOT used
assert result["rows"] == 1
# ============================================================
# 5.10 — Large workorder (500+ containers) → ID batching
# ============================================================
class TestEngineDecompositionContainerIDs:
"""Verify engine routing for large container ID sets."""
def test_large_container_set_triggers_engine(self, monkeypatch):
"""5.10: 1500 container IDs → engine ID batching activated."""
import mes_dashboard.services.batch_query_engine as engine_mod
engine_calls = {"execute": 0, "merge": 0}
fake_ids = [f"CID-{i:04d}" for i in range(1500)]
def fake_execute_plan(chunks, query_fn, **kwargs):
engine_calls["execute"] += 1
# Verify correct number of chunks
assert len(chunks) == 2 # 1500 / 1000 = 2 batches
return kwargs.get("query_hash", "fake_hash")
result_df = pd.DataFrame({"CONTAINERID": fake_ids[:5]})
def fake_merge_chunks(prefix, qhash, **kwargs):
engine_calls["merge"] += 1
return result_df
monkeypatch.setattr(engine_mod, "execute_plan", fake_execute_plan)
monkeypatch.setattr(engine_mod, "merge_chunks", fake_merge_chunks)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache.resolve_containers",
lambda input_type, values: {
"container_ids": fake_ids,
"resolution_info": {"type": input_type, "count": len(fake_ids)},
},
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._get_cached_df",
lambda _: None,
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._prepare_sql",
lambda *a, **kw: "SELECT 1 FROM dual",
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._store_df",
lambda *a, **kw: None,
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._apply_policy_filters",
lambda df, **kw: df,
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._build_primary_response",
lambda qid, df, meta, ri: {"query_id": qid, "rows": len(df)},
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache._build_where_clause",
lambda **kw: ("", {}, {}),
)
monkeypatch.setattr(
"mes_dashboard.services.reject_dataset_cache.redis_clear_batch",
lambda *a, **kw: 0,
)
result = cache_svc.execute_primary_query(
mode="container",
container_input_type="workorder",
container_values=["WO-BIG"],
)
assert engine_calls["execute"] == 1
assert engine_calls["merge"] == 1
def test_engine_path_stores_mixed_precision_decimal_chunks_without_redis_serialization_error(
monkeypatch, caplog
):
"""Long-range engine path should handle Decimal object columns in chunk cache."""
import mes_dashboard.core.redis_df_store as rds
import mes_dashboard.services.batch_query_engine as bqe
mock_client = MagicMock()
stored = {}
hashes = {}
mock_client.setex.side_effect = lambda k, t, v: stored.update({k: v})
mock_client.get.side_effect = lambda k: stored.get(k)
mock_client.exists.side_effect = lambda k: 1 if k in stored else 0
mock_client.hset.side_effect = lambda k, mapping=None: hashes.setdefault(k, {}).update(mapping or {})
mock_client.hgetall.side_effect = lambda k: hashes.get(k, {})
mock_client.expire.return_value = None
engine_row = pd.DataFrame(
{
"CONTAINERID": ["C-1", "C-2"],
"LOSSREASONNAME": ["001_A", "002_B"],
"REJECT_TOTAL_QTY": [10, 20],
"REJECT_SHARE_PCT": [Decimal("12.345"), Decimal("1.2")],
"REJECT_RATE_PCT": [Decimal("0.123456"), Decimal("9.000001")],
}
)
monkeypatch.setattr(cache_svc, "_get_cached_df", lambda _: None)
monkeypatch.setattr(cache_svc, "_prepare_sql", lambda *a, **kw: "SELECT 1 FROM dual")
monkeypatch.setattr(cache_svc, "_build_where_clause", lambda **kw: ("", {}, {}))
monkeypatch.setattr(cache_svc, "_validate_range", lambda *a: None)
monkeypatch.setattr(cache_svc, "_apply_policy_filters", lambda df, **kw: df)
monkeypatch.setattr(cache_svc, "_build_primary_response", lambda qid, df, meta, ri: {"rows": len(df)})
monkeypatch.setattr(cache_svc, "read_sql_df", lambda sql, params: engine_row.copy())
monkeypatch.setattr(cache_svc, "redis_clear_batch", lambda *a, **kw: 0)
monkeypatch.setattr(rds, "REDIS_ENABLED", True)
monkeypatch.setattr(rds, "get_redis_client", lambda: mock_client)
monkeypatch.setattr(bqe, "get_redis_client", lambda: mock_client)
result = cache_svc.execute_primary_query(
mode="date_range",
start_date="2025-01-01",
end_date="2025-12-31",
)
expected_chunks = bqe.decompose_by_time_range(
"2025-01-01",
"2025-12-31",
grain_days=cache_svc._REJECT_ENGINE_GRAIN_DAYS,
)
assert result["rows"] == len(expected_chunks) * 2
assert "Failed to store DataFrame in Redis" not in caplog.text
assert any("batch:reject" in key for key in stored)
def test_large_result_spills_to_parquet_and_view_export_use_spool_fallback(monkeypatch):
"""13.8: long-range oversized result should use spool and still serve view/export."""
spool_data = {}
df = _build_detail_filter_df().copy()
cache_svc._dataset_cache.clear()
monkeypatch.setattr(cache_svc, "_redis_load_df", lambda _qid: None)
monkeypatch.setattr(cache_svc, "_validate_range", lambda *_: None)
monkeypatch.setattr(cache_svc, "_build_where_clause", lambda **kw: ("", {}, {}))
monkeypatch.setattr(cache_svc, "_prepare_sql", lambda *a, **kw: "SELECT 1 FROM dual")
monkeypatch.setattr(cache_svc, "read_sql_df", lambda sql, params: df.copy())
monkeypatch.setattr(cache_svc, "_apply_policy_filters", lambda data, **kw: data)
monkeypatch.setattr(
cache_svc,
"_build_primary_response",
lambda qid, result_df, meta, resolution_info: {"query_id": qid, "rows": len(result_df)},
)
monkeypatch.setattr(cache_svc, "_REJECT_ENGINE_SPILL_ENABLED", True)
monkeypatch.setattr(cache_svc, "_REJECT_ENGINE_MAX_TOTAL_ROWS", 1)
monkeypatch.setattr(cache_svc, "_REJECT_ENGINE_MAX_RESULT_MB", 1)
monkeypatch.setattr(cache_svc, "_store_df", lambda *_a, **_kw: (_ for _ in ()).throw(AssertionError("_store_df should not be called for spill path")))
monkeypatch.setattr(cache_svc, "_redis_delete_df", lambda *_a, **_kw: None)
def fake_store_spooled_df(namespace, query_id, data, ttl_seconds=None):
spool_data[(namespace, query_id)] = data.copy()
return True
def fake_load_spooled_df(namespace, query_id):
stored = spool_data.get((namespace, query_id))
return stored.copy() if stored is not None else None
monkeypatch.setattr(cache_svc, "store_spooled_df", fake_store_spooled_df)
monkeypatch.setattr(cache_svc, "load_spooled_df", fake_load_spooled_df)
result = cache_svc.execute_primary_query(
mode="date_range",
start_date="2025-01-01",
end_date="2025-01-31",
)
query_id = result["query_id"]
assert result["rows"] == len(df)
assert (cache_svc._REDIS_NAMESPACE, query_id) in spool_data
# Force cache miss for L1/L2 and verify spool fallback serves view/export.
cache_svc._dataset_cache.clear()
monkeypatch.setattr(cache_svc, "_redis_load_df", lambda _qid: None)
monkeypatch.setattr(
"mes_dashboard.services.scrap_reason_exclusion_cache.get_excluded_reasons",
lambda: [],
)
view_result = cache_svc.apply_view(query_id=query_id, page=1, per_page=200)
assert view_result is not None
assert view_result["detail"]["pagination"]["total"] == len(df)
export_rows = cache_svc.export_csv_from_cache(query_id=query_id)
assert export_rows is not None
assert len(export_rows) == len(df)

View File

@@ -0,0 +1,134 @@
# -*- coding: utf-8 -*-
"""Unit tests for resource_dataset_cache — engine integration (task 7.4)."""
from __future__ import annotations
import pandas as pd
from mes_dashboard.services import resource_dataset_cache as cache_svc
class TestResourceEngineDecomposition:
"""7.4 — resource-history with long date range triggers engine."""
def test_long_range_triggers_engine(self, monkeypatch):
"""90-day range → engine decomposition activated."""
import mes_dashboard.services.batch_query_engine as engine_mod
engine_calls = {"execute": 0, "merge": 0}
def fake_execute_plan(chunks, query_fn, **kwargs):
engine_calls["execute"] += 1
assert len(chunks) == 3 # 90 days / 31 = 3 chunks
return kwargs.get("query_hash", "fake_hash")
result_df = pd.DataFrame({
"HISTORYID": [1, 2],
"RESOURCEID": ["R1", "R2"],
})
def fake_merge_chunks(prefix, qhash, **kwargs):
engine_calls["merge"] += 1
return result_df
monkeypatch.setattr(engine_mod, "execute_plan", fake_execute_plan)
monkeypatch.setattr(engine_mod, "merge_chunks", fake_merge_chunks)
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache._get_cached_df",
lambda _: None,
)
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache._store_df",
lambda *a, **kw: None,
)
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache._load_sql",
lambda name: "SELECT 1 FROM dual",
)
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache._get_filtered_resources_and_lookup",
lambda **kw: (
[{"RESOURCEID": "R1", "RESOURCENAME": "Machine-1"}],
{"R1": {"RESOURCENAME": "Machine-1"}},
"h.HISTORYID IN (SELECT HISTORYID FROM RESOURCEHISTORY)",
),
)
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache._get_resource_lookup",
lambda: {},
)
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache._get_workcenter_mapping",
lambda: {},
)
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache._derive_summary",
lambda df, rl, wc, gran: {"total_hours": 100},
)
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache._derive_detail",
lambda df, rl, wc: {"items": [], "pagination": {"total": 2}},
)
result = cache_svc.execute_primary_query(
start_date="2025-01-01",
end_date="2025-03-31",
workcenter_groups=["WB"],
)
assert engine_calls["execute"] == 1
assert engine_calls["merge"] == 1
assert result["query_id"] is not None
def test_short_range_skips_engine(self, monkeypatch):
"""30-day range → direct path, no engine."""
engine_calls = {"execute": 0}
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache._get_cached_df",
lambda _: None,
)
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache._load_sql",
lambda name: "SELECT 1 FROM dual",
)
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache.read_sql_df",
lambda sql, params: pd.DataFrame({"HISTORYID": [1]}),
)
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache._store_df",
lambda *a, **kw: None,
)
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache._get_filtered_resources_and_lookup",
lambda **kw: (
[{"RESOURCEID": "R1"}],
{"R1": {"RESOURCENAME": "Machine-1"}},
"h.HISTORYID IN (SELECT HISTORYID FROM RESOURCEHISTORY)",
),
)
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache._get_resource_lookup",
lambda: {},
)
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache._get_workcenter_mapping",
lambda: {},
)
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache._derive_summary",
lambda df, rl, wc, gran: {},
)
monkeypatch.setattr(
"mes_dashboard.services.resource_dataset_cache._derive_detail",
lambda df, rl, wc: {"items": [], "pagination": {"total": 1}},
)
result = cache_svc.execute_primary_query(
start_date="2025-06-01",
end_date="2025-06-30",
workcenter_groups=["WB"],
)
assert engine_calls["execute"] == 0 # Engine NOT used