fix(reject-history): WORKORDER bind variable error + move policy filters to in-memory

Three fixes for the reject history query feature:

1. Fix DPY-4010 bind variable error when querying by WORKORDER — the
   workflow_lookup CTE had hardcoded :start_date/:end_date which aren't
   provided in container mode. Replaced with {{ WORKFLOW_FILTER }} template
   slot that defaults to date-based filter or container-based filter.

2. Move policy toggle filters (material scrap, PB_diode, excluded reasons)
   from SQL-level to in-memory pandas filtering. Cache now stores unfiltered
   data so toggling policy filters reuses cached results instantly instead
   of requiring a ~30s Oracle round-trip per combination.

3. Add per-WORKORDER expansion_info display in FilterPanel for multi-order
   container resolution diagnostics.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
egg
2026-02-26 13:21:31 +08:00
parent 07ced80fb0
commit 5d58ac551d
9 changed files with 161 additions and 16 deletions

View File

@@ -175,6 +175,7 @@ export function buildViewParams(queryId, {
detailReason = '',
page = 1,
perPage = 50,
policyFilters = {},
} = {}) {
const params = { query_id: queryId };
if (supplementaryFilters.packages?.length > 0) {
@@ -197,5 +198,16 @@ export function buildViewParams(queryId, {
}
params.page = page || 1;
params.per_page = perPage || 50;
// Policy filters (applied in-memory on cached data)
if (policyFilters.includeExcludedScrap) {
params.include_excluded_scrap = 'true';
}
if (policyFilters.excludeMaterialScrap === false) {
params.exclude_material_scrap = 'false';
}
if (policyFilters.excludePbDiode === false) {
params.exclude_pb_diode = 'false';
}
return params;
}

View File

@@ -242,6 +242,11 @@ async function refreshView() {
detailReason: detailReason.value,
page: page.value,
perPage: DEFAULT_PER_PAGE,
policyFilters: {
includeExcludedScrap: committedPrimary.includeExcludedScrap,
excludeMaterialScrap: committedPrimary.excludeMaterialScrap,
excludePbDiode: committedPrimary.excludePbDiode,
},
});
const resp = await apiGet('/api/reject-history/view', {
@@ -468,6 +473,11 @@ async function exportCsv() {
for (const date of selectedTrendDates.value) params.append('trend_dates', date);
if (detailReason.value) params.set('detail_reason', detailReason.value);
// Policy filters (applied in-memory on cached data)
if (committedPrimary.includeExcludedScrap) params.set('include_excluded_scrap', 'true');
if (!committedPrimary.excludeMaterialScrap) params.set('exclude_material_scrap', 'false');
if (!committedPrimary.excludePbDiode) params.set('exclude_pb_diode', 'false');
const response = await fetch(`/api/reject-history/export-cached?${params.toString()}`);
if (response.status === 410) {

View File

@@ -173,6 +173,11 @@ function emitSupplementary(patch) {
class="card-body resolution-info"
>
已解析 {{ resolutionInfo.resolved_count }} 筆容器
<template v-if="resolutionInfo.expansion_info && Object.keys(resolutionInfo.expansion_info).length > 1">
<span class="resolution-detail">
({{ Object.entries(resolutionInfo.expansion_info).map(([k, v]) => `${k}: ${v}`).join(', ') }})
</span>
</template>
<template v-if="resolutionInfo.not_found?.length > 0">
<span class="resolution-warn">
({{ resolutionInfo.not_found.length }} 筆未找到:

View File

@@ -67,6 +67,11 @@
font-weight: 600;
}
.resolution-detail {
color: #475569;
font-weight: 400;
}
.resolution-warn {
color: #b45309;
font-weight: 400;

View File

@@ -518,6 +518,10 @@ def api_reject_history_view():
reason = request.args.get("reason", "").strip() or None
detail_reason = request.args.get("detail_reason", "").strip() or None
include_excluded_scrap = request.args.get("include_excluded_scrap", "false").lower() == "true"
exclude_material_scrap = request.args.get("exclude_material_scrap", "true").lower() != "false"
exclude_pb_diode = request.args.get("exclude_pb_diode", "true").lower() != "false"
try:
result = apply_view(
query_id=query_id,
@@ -529,6 +533,9 @@ def api_reject_history_view():
detail_reason=detail_reason,
page=page,
per_page=per_page,
include_excluded_scrap=include_excluded_scrap,
exclude_material_scrap=exclude_material_scrap,
exclude_pb_diode=exclude_pb_diode,
)
if result is None:
@@ -555,6 +562,10 @@ def api_reject_history_export_cached():
reason = request.args.get("reason", "").strip() or None
detail_reason = request.args.get("detail_reason", "").strip() or None
include_excluded_scrap = request.args.get("include_excluded_scrap", "false").lower() == "true"
exclude_material_scrap = request.args.get("exclude_material_scrap", "true").lower() != "false"
exclude_pb_diode = request.args.get("exclude_pb_diode", "true").lower() != "false"
try:
rows = export_csv_from_cache(
query_id=query_id,
@@ -564,6 +575,9 @@ def api_reject_history_export_cached():
metric_filter=metric_filter,
trend_dates=_parse_multi_param("trend_dates") or None,
detail_reason=detail_reason,
include_excluded_scrap=include_excluded_scrap,
exclude_material_scrap=exclude_material_scrap,
exclude_pb_diode=exclude_pb_diode,
)
if rows is None:

View File

@@ -168,6 +168,7 @@ def resolve_containers(
"input_count": result.get("input_count", len(values)),
"resolved_count": len(container_ids),
"not_found": result.get("not_found", []),
"expansion_info": result.get("expansion_info", {}),
},
}
@@ -194,6 +195,7 @@ def execute_primary_query(
base_where_parts: List[str] = []
base_params: Dict[str, Any] = {}
resolution_info: Optional[Dict[str, Any]] = None
workflow_filter: str = "" # empty = use default date-based filter
if mode == "date_range":
if not start_date or not end_date:
@@ -227,28 +229,35 @@ def execute_primary_query(
base_where_parts.append(cid_condition)
base_params.update(cid_params)
# Build workflow_filter for the workflow_lookup CTE (uses r0 alias).
# Reuses the same bind param names (p0, p1, ...) already in base_params.
wf_builder = QueryBuilder()
wf_builder.add_in_condition("r0.CONTAINERID", container_ids)
wf_where, _ = wf_builder.build_where_only()
wf_condition = wf_where.strip()
if wf_condition.upper().startswith("WHERE "):
wf_condition = wf_condition[6:].strip()
workflow_filter = wf_condition
else:
raise ValueError(f"不支援的查詢模式: {mode}")
base_where = " AND ".join(base_where_parts)
# ---- Build policy WHERE (only toggles, no supplementary filters) ----
policy_where, policy_params, meta = _build_where_clause(
# ---- Build policy meta (for response only, NOT for SQL) ----
_, _, meta = _build_where_clause(
include_excluded_scrap=include_excluded_scrap,
exclude_material_scrap=exclude_material_scrap,
exclude_pb_diode=exclude_pb_diode,
)
# ---- Compute query_id from all primary params ----
# ---- Compute query_id from base params only (policy filters applied in-memory) ----
query_id_input = {
"mode": mode,
"start_date": start_date,
"end_date": end_date,
"container_input_type": container_input_type,
"container_values": sorted(container_values or []),
"include_excluded_scrap": include_excluded_scrap,
"exclude_material_scrap": exclude_material_scrap,
"exclude_pb_diode": exclude_pb_diode,
}
query_id = _make_query_id(query_id_input)
@@ -256,28 +265,92 @@ def execute_primary_query(
cached_df = _get_cached_df(query_id)
if cached_df is not None:
logger.info("Dataset cache hit for query_id=%s", query_id)
filtered = _apply_policy_filters(
cached_df,
include_excluded_scrap=include_excluded_scrap,
exclude_material_scrap=exclude_material_scrap,
exclude_pb_diode=exclude_pb_diode,
)
return _build_primary_response(
query_id, cached_df, meta, resolution_info
query_id, filtered, meta, resolution_info
)
# ---- Execute Oracle query ----
# ---- Execute Oracle query (NO policy filters — cache unfiltered) ----
logger.info("Dataset cache miss for query_id=%s, querying Oracle", query_id)
sql = _prepare_sql(
"list",
where_clause=policy_where,
where_clause="",
base_variant="lot",
base_where=base_where,
workflow_filter=workflow_filter,
)
all_params = {**base_params, **policy_params, "offset": 0, "limit": 999999999}
all_params = {**base_params, "offset": 0, "limit": 999999999}
df = read_sql_df(sql, all_params)
if df is None:
df = pd.DataFrame()
# ---- Cache and return ----
# ---- Cache unfiltered, return filtered ----
if not df.empty:
_store_df(query_id, df)
return _build_primary_response(query_id, df, meta, resolution_info)
filtered = _apply_policy_filters(
df,
include_excluded_scrap=include_excluded_scrap,
exclude_material_scrap=exclude_material_scrap,
exclude_pb_diode=exclude_pb_diode,
)
return _build_primary_response(query_id, filtered, meta, resolution_info)
def _apply_policy_filters(
df: pd.DataFrame,
*,
include_excluded_scrap: bool = False,
exclude_material_scrap: bool = True,
exclude_pb_diode: bool = True,
) -> pd.DataFrame:
"""Apply policy toggle filters in-memory (pandas).
Mirrors the SQL-level policy from _build_where_clause but operates
on the cached DataFrame so that toggling filters doesn't require
a new Oracle round-trip.
"""
if df is None or df.empty:
return df
mask = pd.Series(True, index=df.index)
# ---- Material scrap exclusion ----
if exclude_material_scrap and "SCRAP_OBJECTTYPE" in df.columns:
obj_type = df["SCRAP_OBJECTTYPE"].fillna("").str.strip().str.upper()
mask &= obj_type != "MATERIAL"
# ---- PB diode exclusion ----
if exclude_pb_diode and "PRODUCTLINENAME" in df.columns:
mask &= ~df["PRODUCTLINENAME"].fillna("").str.match(r"(?i)^PB_")
# ---- Scrap reason exclusion policy ----
if not include_excluded_scrap:
from mes_dashboard.services.scrap_reason_exclusion_cache import (
get_excluded_reasons,
)
excluded = get_excluded_reasons()
if excluded and "LOSSREASON_CODE" in df.columns:
code_upper = df["LOSSREASON_CODE"].fillna("").str.strip().str.upper()
mask &= ~code_upper.isin(excluded)
if excluded and "LOSSREASONNAME" in df.columns:
name_upper = df["LOSSREASONNAME"].fillna("").str.strip().str.upper()
mask &= ~name_upper.isin(excluded)
# Only keep reasons matching ^[0-9]{3}_ pattern
if "LOSSREASONNAME" in df.columns:
name_trimmed = df["LOSSREASONNAME"].fillna("").str.strip().str.upper()
mask &= name_trimmed.str.match(r"^[0-9]{3}_")
# Exclude XXX_ and ZZZ_ prefixes
mask &= ~name_trimmed.str.match(r"^(XXX|ZZZ)_")
return df[mask]
def _build_primary_response(
@@ -323,12 +396,23 @@ def apply_view(
detail_reason: Optional[str] = None,
page: int = 1,
per_page: int = 50,
include_excluded_scrap: bool = False,
exclude_material_scrap: bool = True,
exclude_pb_diode: bool = True,
) -> Optional[Dict[str, Any]]:
"""Read cache → apply filters → return derived data. Returns None if expired."""
df = _get_cached_df(query_id)
if df is None:
return None
# Apply policy filters first (cache stores unfiltered data)
df = _apply_policy_filters(
df,
include_excluded_scrap=include_excluded_scrap,
exclude_material_scrap=exclude_material_scrap,
exclude_pb_diode=exclude_pb_diode,
)
filtered = _apply_supplementary_filters(
df,
packages=packages,
@@ -759,12 +843,22 @@ def export_csv_from_cache(
metric_filter: str = "all",
trend_dates: Optional[List[str]] = None,
detail_reason: Optional[str] = None,
include_excluded_scrap: bool = False,
exclude_material_scrap: bool = True,
exclude_pb_diode: bool = True,
) -> Optional[list]:
"""Read cache → apply filters → return list of dicts for CSV export."""
df = _get_cached_df(query_id)
if df is None:
return None
df = _apply_policy_filters(
df,
include_excluded_scrap=include_excluded_scrap,
exclude_material_scrap=exclude_material_scrap,
exclude_pb_diode=exclude_pb_diode,
)
filtered = _apply_supplementary_filters(
df,
packages=packages,

View File

@@ -280,6 +280,11 @@ _DEFAULT_BASE_WHERE = (
" AND r.TXNDATE < TO_DATE(:end_date, 'YYYY-MM-DD') + 1"
)
_DEFAULT_WORKFLOW_FILTER = (
"r0.TXNDATE >= TO_DATE(:start_date, 'YYYY-MM-DD')"
" AND r0.TXNDATE < TO_DATE(:end_date, 'YYYY-MM-DD') + 1"
)
def _prepare_sql(
name: str,
@@ -290,11 +295,13 @@ def _prepare_sql(
base_variant: str = "",
base_where: str = "",
dimension_column: str = "",
workflow_filter: str = "",
) -> str:
sql = _load_sql(name)
sql = sql.replace("{{ BASE_QUERY }}", _base_query_sql(base_variant))
sql = sql.replace("{{ BASE_WITH_CTE }}", _base_with_cte_sql("base", base_variant))
sql = sql.replace("{{ BASE_WHERE }}", base_where or _DEFAULT_BASE_WHERE)
sql = sql.replace("{{ WORKFLOW_FILTER }}", workflow_filter or _DEFAULT_WORKFLOW_FILTER)
sql = sql.replace("{{ WHERE_CLAUSE }}", where_clause or "")
sql = sql.replace("{{ BUCKET_EXPR }}", bucket_expr or "TRUNC(b.TXN_DAY)")
sql = sql.replace("{{ METRIC_COLUMN }}", metric_column or "b.REJECT_TOTAL_QTY")

View File

@@ -37,8 +37,7 @@ workflow_lookup AS (
AND w.CONTAINERID IN (
SELECT DISTINCT r0.CONTAINERID
FROM DWH.DW_MES_LOTREJECTHISTORY r0
WHERE r0.TXNDATE >= TO_DATE(:start_date, 'YYYY-MM-DD')
AND r0.TXNDATE < TO_DATE(:end_date, 'YYYY-MM-DD') + 1
WHERE {{ WORKFLOW_FILTER }}
)
),
reject_raw AS (

View File

@@ -26,8 +26,7 @@ workflow_lookup AS (
AND w.CONTAINERID IN (
SELECT DISTINCT r0.CONTAINERID
FROM DWH.DW_MES_LOTREJECTHISTORY r0
WHERE r0.TXNDATE >= TO_DATE(:start_date, 'YYYY-MM-DD')
AND r0.TXNDATE < TO_DATE(:end_date, 'YYYY-MM-DD') + 1
WHERE {{ WORKFLOW_FILTER }}
)
),
reject_raw AS (