Chunk failures in BatchQueryEngine were silently discarded — `has_partial_failure` was tracked in Redis but never surfaced to the API response or frontend. Users could see incomplete data without any warning. This commit closes the gap end-to-end: Backend: - Track failed chunk time ranges (`failed_ranges`) in batch engine progress metadata - Add single retry for transient Oracle errors (timeout, connection) in `_execute_single_chunk` - Read `get_batch_progress()` after merge but before `redis_clear_batch()` cleanup - Inject `has_partial_failure`, `failed_chunk_count`, `failed_ranges` into API response meta - Persist partial failure flag to independent Redis key with TTL aligned to data storage layer - Add shared container-resolution policy module with wildcard/expansion guardrails - Refactor reason filter from single-value to multi-select (`reason` → `reasons`) Frontend: - Add client-side date range validation (730-day limit) before API submission - Display amber warning banner on partial failure with specific failed date ranges - Support generic fallback message for container-mode queries without date ranges - Update FilterPanel to support multi-select reason chips Specs & tests: - Create batch-query-resilience spec; update reject-history-api and reject-history-page specs - Add 7 new tests for retry, memory guard, failed ranges, partial failure propagation, TTL - Cross-service regression verified (hold, resource, job, msd — 411 tests pass) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
74 lines
2.4 KiB
Python
74 lines
2.4 KiB
Python
# -*- coding: utf-8 -*-
|
|
"""Unit tests for shared container resolution policy helpers."""
|
|
|
|
from __future__ import annotations
|
|
|
|
from mes_dashboard.services import container_resolution_policy as policy
|
|
|
|
|
|
def test_validate_resolution_request_rejects_empty_values():
|
|
assert policy.validate_resolution_request("lot_id", []) is not None
|
|
|
|
|
|
def test_validate_resolution_request_rejects_broad_pattern(monkeypatch):
|
|
monkeypatch.setenv("CONTAINER_RESOLVE_PATTERN_MIN_PREFIX_LEN", "2")
|
|
error = policy.validate_resolution_request("lot_id", ["%"])
|
|
assert error is not None
|
|
assert "萬用字元條件過於寬鬆" in error
|
|
|
|
|
|
def test_validate_resolution_request_allows_pattern_with_prefix(monkeypatch):
|
|
monkeypatch.setenv("CONTAINER_RESOLVE_PATTERN_MIN_PREFIX_LEN", "2")
|
|
error = policy.validate_resolution_request("lot_id", ["GA26%"])
|
|
assert error is None
|
|
|
|
|
|
def test_validate_resolution_result_rejects_excessive_expansion(monkeypatch):
|
|
monkeypatch.setenv("CONTAINER_RESOLVE_MAX_EXPANSION_PER_TOKEN", "3")
|
|
result = {
|
|
"data": [{"container_id": "C1"}],
|
|
"expansion_info": {"GA%": 10},
|
|
}
|
|
error = policy.validate_resolution_result(result)
|
|
assert error is not None
|
|
assert "單一條件展開過大" in error
|
|
|
|
|
|
def test_validate_resolution_result_rejects_excessive_container_count(monkeypatch):
|
|
monkeypatch.setenv("CONTAINER_RESOLVE_MAX_CONTAINER_IDS", "2")
|
|
result = {
|
|
"data": [
|
|
{"container_id": "C1"},
|
|
{"container_id": "C2"},
|
|
{"container_id": "C3"},
|
|
],
|
|
"expansion_info": {},
|
|
}
|
|
error = policy.validate_resolution_result(result)
|
|
assert error is not None
|
|
assert "解析結果過大" in error
|
|
|
|
|
|
def test_validate_resolution_result_non_strict_allows_overflow(monkeypatch):
|
|
monkeypatch.setenv("CONTAINER_RESOLVE_MAX_CONTAINER_IDS", "2")
|
|
result = {
|
|
"data": [
|
|
{"container_id": "C1"},
|
|
{"container_id": "C2"},
|
|
{"container_id": "C3"},
|
|
],
|
|
"expansion_info": {"GA%": 999},
|
|
}
|
|
error = policy.validate_resolution_result(result, strict=False)
|
|
assert error is None
|
|
|
|
|
|
def test_extract_container_ids_deduplicates_and_preserves_order():
|
|
rows = [
|
|
{"container_id": "C1"},
|
|
{"container_id": "C1"},
|
|
{"CONTAINERID": "C2"},
|
|
{"container_id": "C3"},
|
|
]
|
|
assert policy.extract_container_ids(rows) == ["C1", "C2", "C3"]
|