feat(reject-history): two-phase query architecture with cached views
Replace per-interaction Oracle queries with a two-phase model: - POST /query: single Oracle hit, cache full LOT-level DataFrame (L1+L2) - GET /view: read cache, apply supplementary/interactive filters via pandas Add container query mode (LOT/工單/WAFER LOT with wildcard support), supplementary filters (Package/WC GROUP/Reason) from cached data, PB_* series exclusion (was PB_Diode only), and query loading spinner. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -27,6 +27,7 @@ dependencies:
|
||||
|
||||
# Data Processing
|
||||
- pandas==2.3.3 # Pin DBAPI2-compatible release for current pd.read_sql flow
|
||||
- pyarrow>=17.0.0,<20.0.0 # Parquet serialization for Redis DataFrame cache
|
||||
- openpyxl>=3.0.0
|
||||
|
||||
# Cache (Redis)
|
||||
|
||||
@@ -149,3 +149,53 @@ export function buildRejectCommonQueryParams(filters = {}, { reason = '' } = {})
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
export function parseMultiLineInput(text) {
|
||||
if (!text) return [];
|
||||
const tokens = String(text)
|
||||
.split(/[\n,]+/)
|
||||
.map((s) => s.trim())
|
||||
.filter(Boolean)
|
||||
.map((s) => s.replace(/\*/g, '%'));
|
||||
const seen = new Set();
|
||||
const result = [];
|
||||
for (const token of tokens) {
|
||||
if (!seen.has(token)) {
|
||||
seen.add(token);
|
||||
result.push(token);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
export function buildViewParams(queryId, {
|
||||
supplementaryFilters = {},
|
||||
metricFilter = 'all',
|
||||
trendDates = [],
|
||||
detailReason = '',
|
||||
page = 1,
|
||||
perPage = 50,
|
||||
} = {}) {
|
||||
const params = { query_id: queryId };
|
||||
if (supplementaryFilters.packages?.length > 0) {
|
||||
params.packages = supplementaryFilters.packages;
|
||||
}
|
||||
if (supplementaryFilters.workcenterGroups?.length > 0) {
|
||||
params.workcenter_groups = supplementaryFilters.workcenterGroups;
|
||||
}
|
||||
if (supplementaryFilters.reason) {
|
||||
params.reason = supplementaryFilters.reason;
|
||||
}
|
||||
if (metricFilter && metricFilter !== 'all') {
|
||||
params.metric_filter = metricFilter;
|
||||
}
|
||||
if (trendDates?.length > 0) {
|
||||
params.trend_dates = trendDates;
|
||||
}
|
||||
if (detailReason) {
|
||||
params.detail_reason = detailReason;
|
||||
}
|
||||
params.page = page || 1;
|
||||
params.per_page = perPage || 50;
|
||||
return params;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,14 +1,39 @@
|
||||
<script setup>
|
||||
import MultiSelect from '../../resource-shared/components/MultiSelect.vue';
|
||||
|
||||
defineProps({
|
||||
const props = defineProps({
|
||||
filters: { type: Object, required: true },
|
||||
options: { type: Object, required: true },
|
||||
queryMode: { type: String, default: 'date_range' },
|
||||
containerInputType: { type: String, default: 'lot' },
|
||||
containerInput: { type: String, default: '' },
|
||||
availableFilters: { type: Object, default: () => ({}) },
|
||||
supplementaryFilters: { type: Object, default: () => ({}) },
|
||||
queryId: { type: String, default: '' },
|
||||
resolutionInfo: { type: Object, default: null },
|
||||
loading: { type: Object, required: true },
|
||||
activeFilterChips: { type: Array, default: () => [] },
|
||||
});
|
||||
|
||||
defineEmits(['apply', 'clear', 'export-csv', 'remove-chip', 'pareto-scope-toggle']);
|
||||
const emit = defineEmits([
|
||||
'apply',
|
||||
'clear',
|
||||
'export-csv',
|
||||
'remove-chip',
|
||||
'pareto-scope-toggle',
|
||||
'update:queryMode',
|
||||
'update:containerInputType',
|
||||
'update:containerInput',
|
||||
'supplementary-change',
|
||||
]);
|
||||
|
||||
function emitSupplementary(patch) {
|
||||
emit('supplementary-change', {
|
||||
packages: props.supplementaryFilters.packages || [],
|
||||
workcenterGroups: props.supplementaryFilters.workcenterGroups || [],
|
||||
reason: props.supplementaryFilters.reason || '',
|
||||
...patch,
|
||||
});
|
||||
}
|
||||
</script>
|
||||
|
||||
<template>
|
||||
@@ -17,46 +42,75 @@ defineEmits(['apply', 'clear', 'export-csv', 'remove-chip', 'pareto-scope-toggle
|
||||
<div class="card-title">查詢條件</div>
|
||||
</div>
|
||||
<div class="card-body filter-panel">
|
||||
<!-- Mode toggle tabs -->
|
||||
<div class="filter-group-full mode-tab-row">
|
||||
<button
|
||||
type="button"
|
||||
:class="['mode-tab', { active: queryMode === 'date_range' }]"
|
||||
@click="$emit('update:queryMode', 'date_range')"
|
||||
>
|
||||
日期區間
|
||||
</button>
|
||||
<button
|
||||
type="button"
|
||||
:class="['mode-tab', { active: queryMode === 'container' }]"
|
||||
@click="$emit('update:queryMode', 'container')"
|
||||
>
|
||||
LOT / 工單 / WAFER
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<!-- Date range mode -->
|
||||
<template v-if="queryMode === 'date_range'">
|
||||
<div class="filter-group">
|
||||
<label class="filter-label" for="start-date">開始日期</label>
|
||||
<input id="start-date" v-model="filters.startDate" type="date" class="filter-input" />
|
||||
<input
|
||||
id="start-date"
|
||||
v-model="filters.startDate"
|
||||
type="date"
|
||||
class="filter-input"
|
||||
/>
|
||||
</div>
|
||||
<div class="filter-group">
|
||||
<label class="filter-label" for="end-date">結束日期</label>
|
||||
<input id="end-date" v-model="filters.endDate" type="date" class="filter-input" />
|
||||
</div>
|
||||
|
||||
<div class="filter-group">
|
||||
<label class="filter-label">Package</label>
|
||||
<MultiSelect
|
||||
:model-value="filters.packages"
|
||||
:options="options.packages"
|
||||
placeholder="全部 Package"
|
||||
searchable
|
||||
@update:model-value="filters.packages = $event"
|
||||
<input
|
||||
id="end-date"
|
||||
v-model="filters.endDate"
|
||||
type="date"
|
||||
class="filter-input"
|
||||
/>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<!-- Container mode -->
|
||||
<template v-else>
|
||||
<div class="filter-group">
|
||||
<label class="filter-label" for="reason">報廢原因</label>
|
||||
<select id="reason" v-model="filters.reason" class="filter-input">
|
||||
<option value="">全部原因</option>
|
||||
<option v-for="reason in options.reasons" :key="reason" :value="reason">
|
||||
{{ reason }}
|
||||
</option>
|
||||
<label class="filter-label" for="container-type">輸入類型</label>
|
||||
<select
|
||||
id="container-type"
|
||||
class="filter-input"
|
||||
:value="containerInputType"
|
||||
@change="$emit('update:containerInputType', $event.target.value)"
|
||||
>
|
||||
<option value="lot">LOT</option>
|
||||
<option value="work_order">工單</option>
|
||||
<option value="wafer_lot">WAFER LOT</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div class="filter-group filter-group-full">
|
||||
<label class="filter-label">WORKCENTER GROUP</label>
|
||||
<MultiSelect
|
||||
:model-value="filters.workcenterGroups"
|
||||
:options="options.workcenterGroups"
|
||||
placeholder="全部工作中心群組"
|
||||
searchable
|
||||
@update:model-value="filters.workcenterGroups = $event"
|
||||
/>
|
||||
<div class="filter-group filter-group-wide">
|
||||
<label class="filter-label" for="container-input"
|
||||
>輸入值 (每行一個,支援 * 或 % wildcard)</label
|
||||
>
|
||||
<textarea
|
||||
id="container-input"
|
||||
class="filter-input filter-textarea"
|
||||
rows="3"
|
||||
:value="containerInput"
|
||||
@input="$emit('update:containerInput', $event.target.value)"
|
||||
placeholder="GA26020001-A00-001 GA260200% ..."
|
||||
></textarea>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<div class="filter-toolbar">
|
||||
<div class="checkbox-row">
|
||||
@@ -70,7 +124,7 @@ defineEmits(['apply', 'clear', 'export-csv', 'remove-chip', 'pareto-scope-toggle
|
||||
</label>
|
||||
<label class="checkbox-pill">
|
||||
<input v-model="filters.excludePbDiode" type="checkbox" />
|
||||
排除 PB_Diode
|
||||
排除 PB_* 系列
|
||||
</label>
|
||||
<label class="checkbox-pill">
|
||||
<input
|
||||
@@ -82,19 +136,110 @@ defineEmits(['apply', 'clear', 'export-csv', 'remove-chip', 'pareto-scope-toggle
|
||||
</label>
|
||||
</div>
|
||||
<div class="filter-actions">
|
||||
<button class="btn btn-primary" :disabled="loading.querying" @click="$emit('apply')">查詢</button>
|
||||
<button class="btn btn-secondary" :disabled="loading.querying" @click="$emit('clear')">清除條件</button>
|
||||
<button class="btn btn-light btn-export" :disabled="loading.querying || loading.exporting" @click="$emit('export-csv')">
|
||||
<template v-if="loading.exporting"><span class="btn-spinner"></span>匯出中...</template>
|
||||
<button
|
||||
class="btn btn-primary"
|
||||
:disabled="loading.querying"
|
||||
@click="$emit('apply')"
|
||||
>
|
||||
<template v-if="loading.querying"
|
||||
><span class="btn-spinner"></span>查詢中...</template
|
||||
>
|
||||
<template v-else>查詢</template>
|
||||
</button>
|
||||
<button
|
||||
class="btn btn-secondary"
|
||||
:disabled="loading.querying"
|
||||
@click="$emit('clear')"
|
||||
>
|
||||
清除條件
|
||||
</button>
|
||||
<button
|
||||
class="btn btn-light btn-export"
|
||||
:disabled="loading.querying || loading.exporting"
|
||||
@click="$emit('export-csv')"
|
||||
>
|
||||
<template v-if="loading.exporting"
|
||||
><span class="btn-spinner"></span>匯出中...</template
|
||||
>
|
||||
<template v-else>匯出 CSV</template>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card-body active-filter-chip-row" v-if="activeFilterChips.length > 0">
|
||||
|
||||
<!-- Resolution info (container mode) -->
|
||||
<div
|
||||
v-if="resolutionInfo && queryMode === 'container'"
|
||||
class="card-body resolution-info"
|
||||
>
|
||||
已解析 {{ resolutionInfo.resolved_count }} 筆容器
|
||||
<template v-if="resolutionInfo.not_found?.length > 0">
|
||||
<span class="resolution-warn">
|
||||
({{ resolutionInfo.not_found.length }} 筆未找到:
|
||||
{{ resolutionInfo.not_found.slice(0, 10).join(', ')
|
||||
}}{{ resolutionInfo.not_found.length > 10 ? '...' : '' }})
|
||||
</span>
|
||||
</template>
|
||||
</div>
|
||||
|
||||
<!-- Supplementary filters (only after primary query) -->
|
||||
<div v-if="queryId" class="supplementary-panel">
|
||||
<div class="supplementary-header">補充篩選 (快取內篩選)</div>
|
||||
<div class="supplementary-row">
|
||||
<div class="filter-group">
|
||||
<label class="filter-label">WORKCENTER GROUP</label>
|
||||
<MultiSelect
|
||||
:model-value="supplementaryFilters.workcenterGroups"
|
||||
:options="availableFilters.workcenterGroups || []"
|
||||
placeholder="全部工作中心群組"
|
||||
searchable
|
||||
@update:model-value="emitSupplementary({ workcenterGroups: $event })"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div class="filter-group">
|
||||
<label class="filter-label">Package</label>
|
||||
<MultiSelect
|
||||
:model-value="supplementaryFilters.packages"
|
||||
:options="availableFilters.packages || []"
|
||||
placeholder="全部 Package"
|
||||
searchable
|
||||
@update:model-value="emitSupplementary({ packages: $event })"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div class="filter-group">
|
||||
<label class="filter-label" for="supp-reason">報廢原因</label>
|
||||
<select
|
||||
id="supp-reason"
|
||||
class="filter-input"
|
||||
:value="supplementaryFilters.reason"
|
||||
@change="emitSupplementary({ reason: $event.target.value })"
|
||||
>
|
||||
<option value="">全部原因</option>
|
||||
<option
|
||||
v-for="r in availableFilters.reasons || []"
|
||||
:key="r"
|
||||
:value="r"
|
||||
>
|
||||
{{ r }}
|
||||
</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div
|
||||
class="card-body active-filter-chip-row"
|
||||
v-if="activeFilterChips.length > 0"
|
||||
>
|
||||
<div class="filter-label">套用中篩選</div>
|
||||
<div class="chip-list">
|
||||
<div v-for="chip in activeFilterChips" :key="chip.key" class="filter-chip">
|
||||
<div
|
||||
v-for="chip in activeFilterChips"
|
||||
:key="chip.key"
|
||||
class="filter-chip"
|
||||
>
|
||||
<span>{{ chip.label }}</span>
|
||||
<button
|
||||
v-if="chip.removable"
|
||||
@@ -102,7 +247,7 @@ defineEmits(['apply', 'clear', 'export-csv', 'remove-chip', 'pareto-scope-toggle
|
||||
class="chip-remove"
|
||||
@click="$emit('remove-chip', chip)"
|
||||
>
|
||||
×
|
||||
×
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -2,11 +2,81 @@
|
||||
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
||||
}
|
||||
|
||||
.mode-tab-row {
|
||||
display: flex;
|
||||
gap: 0;
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 8px;
|
||||
overflow: hidden;
|
||||
width: fit-content;
|
||||
}
|
||||
|
||||
.mode-tab {
|
||||
padding: 7px 16px;
|
||||
border: none;
|
||||
background: #f8fafc;
|
||||
font-size: 13px;
|
||||
font-weight: 600;
|
||||
color: #475569;
|
||||
cursor: pointer;
|
||||
transition: background 0.15s, color 0.15s;
|
||||
}
|
||||
|
||||
.mode-tab:not(:last-child) {
|
||||
border-right: 1px solid var(--border);
|
||||
}
|
||||
|
||||
.mode-tab.active {
|
||||
background: #2563eb;
|
||||
color: #fff;
|
||||
}
|
||||
|
||||
.mode-tab:hover:not(.active) {
|
||||
background: #eef2f7;
|
||||
}
|
||||
|
||||
.filter-textarea {
|
||||
resize: vertical;
|
||||
font-family: monospace;
|
||||
line-height: 1.5;
|
||||
}
|
||||
|
||||
.supplementary-panel {
|
||||
border-top: 1px solid var(--border);
|
||||
padding: 16px 18px;
|
||||
}
|
||||
|
||||
.supplementary-header {
|
||||
font-size: 12px;
|
||||
font-weight: 700;
|
||||
color: #64748b;
|
||||
margin-bottom: 12px;
|
||||
}
|
||||
|
||||
.supplementary-row {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(3, minmax(0, 1fr));
|
||||
gap: 18px;
|
||||
align-items: end;
|
||||
}
|
||||
|
||||
.resolution-info {
|
||||
border-top: 1px solid var(--border);
|
||||
font-size: 13px;
|
||||
color: #0f766e;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.resolution-warn {
|
||||
color: #b45309;
|
||||
font-weight: 400;
|
||||
}
|
||||
|
||||
.card {
|
||||
background: var(--card-bg);
|
||||
border-radius: 10px;
|
||||
box-shadow: var(--shadow);
|
||||
overflow: hidden;
|
||||
overflow: visible;
|
||||
margin-bottom: 14px;
|
||||
}
|
||||
|
||||
@@ -14,6 +84,7 @@
|
||||
padding: 14px 18px;
|
||||
border-bottom: 1px solid var(--border);
|
||||
background: #f8fafc;
|
||||
border-radius: 10px 10px 0 0;
|
||||
}
|
||||
|
||||
.card-title {
|
||||
@@ -515,6 +586,10 @@
|
||||
grid-column: span 2;
|
||||
}
|
||||
|
||||
.supplementary-row {
|
||||
grid-template-columns: repeat(2, minmax(0, 1fr));
|
||||
}
|
||||
|
||||
.pareto-layout {
|
||||
grid-template-columns: 1fr;
|
||||
}
|
||||
@@ -533,6 +608,10 @@
|
||||
grid-column: span 1;
|
||||
}
|
||||
|
||||
.supplementary-row {
|
||||
grid-template-columns: 1fr;
|
||||
}
|
||||
|
||||
.filter-toolbar {
|
||||
flex-direction: column;
|
||||
align-items: flex-start;
|
||||
|
||||
@@ -16,6 +16,7 @@ sqlalchemy>=2.0.0,<3.0.0
|
||||
|
||||
# Data Processing
|
||||
pandas==2.3.3 # pandas 3.x removed DBAPI2 flow used by current pd.read_sql + SQLAlchemy usage
|
||||
pyarrow>=17.0.0,<20.0.0 # Parquet serialization for Redis DataFrame cache
|
||||
openpyxl>=3.0.0 # Excel file support
|
||||
|
||||
# Cache (Redis)
|
||||
|
||||
@@ -11,7 +11,13 @@ from flask import Blueprint, Response, jsonify, request
|
||||
|
||||
from mes_dashboard.core.cache import cache_get, cache_set, make_cache_key
|
||||
from mes_dashboard.core.rate_limit import configured_rate_limit
|
||||
from mes_dashboard.services.reject_dataset_cache import (
|
||||
apply_view,
|
||||
execute_primary_query,
|
||||
export_csv_from_cache,
|
||||
)
|
||||
from mes_dashboard.services.reject_history_service import (
|
||||
_list_to_csv,
|
||||
export_csv,
|
||||
get_filter_options,
|
||||
query_analytics,
|
||||
@@ -438,3 +444,138 @@ def api_reject_history_analytics():
|
||||
return jsonify({"success": False, "error": str(exc)}), 400
|
||||
except Exception:
|
||||
return jsonify({"success": False, "error": "查詢分析資料失敗"}), 500
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Two-phase query endpoints (POST /query, GET /view)
|
||||
# ============================================================
|
||||
|
||||
|
||||
@reject_history_bp.route("/api/reject-history/query", methods=["POST"])
|
||||
def api_reject_history_query():
|
||||
"""Primary query: execute Oracle → cache DataFrame → return results."""
|
||||
body = request.get_json(silent=True) or {}
|
||||
|
||||
mode = str(body.get("mode", "")).strip()
|
||||
if mode not in ("date_range", "container"):
|
||||
return jsonify({"success": False, "error": "mode 必須為 date_range 或 container"}), 400
|
||||
|
||||
include_excluded_scrap = bool(body.get("include_excluded_scrap", False))
|
||||
exclude_material_scrap = bool(body.get("exclude_material_scrap", True))
|
||||
exclude_pb_diode = bool(body.get("exclude_pb_diode", True))
|
||||
|
||||
try:
|
||||
kwargs = {
|
||||
"mode": mode,
|
||||
"include_excluded_scrap": include_excluded_scrap,
|
||||
"exclude_material_scrap": exclude_material_scrap,
|
||||
"exclude_pb_diode": exclude_pb_diode,
|
||||
}
|
||||
|
||||
if mode == "date_range":
|
||||
kwargs["start_date"] = str(body.get("start_date", "")).strip()
|
||||
kwargs["end_date"] = str(body.get("end_date", "")).strip()
|
||||
if not kwargs["start_date"] or not kwargs["end_date"]:
|
||||
return jsonify({"success": False, "error": "date_range mode 需要 start_date 和 end_date"}), 400
|
||||
else:
|
||||
kwargs["container_input_type"] = str(body.get("container_input_type", "lot")).strip()
|
||||
container_values = body.get("container_values", [])
|
||||
if not isinstance(container_values, list) or not container_values:
|
||||
return jsonify({"success": False, "error": "container mode 需要 container_values 陣列"}), 400
|
||||
kwargs["container_values"] = [str(v).strip() for v in container_values if str(v).strip()]
|
||||
|
||||
result = execute_primary_query(**kwargs)
|
||||
return jsonify({"success": True, **result})
|
||||
|
||||
except ValueError as exc:
|
||||
return jsonify({"success": False, "error": str(exc)}), 400
|
||||
except Exception:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return jsonify({"success": False, "error": "主查詢執行失敗"}), 500
|
||||
|
||||
|
||||
@reject_history_bp.route("/api/reject-history/view", methods=["GET"])
|
||||
def api_reject_history_view():
|
||||
"""Supplementary view: read cache → filter → return derived data."""
|
||||
query_id = request.args.get("query_id", "").strip()
|
||||
if not query_id:
|
||||
return jsonify({"success": False, "error": "缺少必要參數: query_id"}), 400
|
||||
|
||||
page = request.args.get("page", 1, type=int) or 1
|
||||
per_page = request.args.get("per_page", 50, type=int) or 50
|
||||
metric_filter = request.args.get("metric_filter", "all").strip().lower() or "all"
|
||||
reason = request.args.get("reason", "").strip() or None
|
||||
detail_reason = request.args.get("detail_reason", "").strip() or None
|
||||
|
||||
try:
|
||||
result = apply_view(
|
||||
query_id=query_id,
|
||||
packages=_parse_multi_param("packages") or None,
|
||||
workcenter_groups=_parse_multi_param("workcenter_groups") or None,
|
||||
reason=reason,
|
||||
metric_filter=metric_filter,
|
||||
trend_dates=_parse_multi_param("trend_dates") or None,
|
||||
detail_reason=detail_reason,
|
||||
page=page,
|
||||
per_page=per_page,
|
||||
)
|
||||
|
||||
if result is None:
|
||||
return jsonify({"success": False, "error": "cache_expired"}), 410
|
||||
|
||||
return jsonify({"success": True, "data": result})
|
||||
|
||||
except ValueError as exc:
|
||||
return jsonify({"success": False, "error": str(exc)}), 400
|
||||
except Exception:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return jsonify({"success": False, "error": "視圖查詢失敗"}), 500
|
||||
|
||||
|
||||
@reject_history_bp.route("/api/reject-history/export-cached", methods=["GET"])
|
||||
def api_reject_history_export_cached():
|
||||
"""Export CSV from cached dataset."""
|
||||
query_id = request.args.get("query_id", "").strip()
|
||||
if not query_id:
|
||||
return jsonify({"success": False, "error": "缺少必要參數: query_id"}), 400
|
||||
|
||||
metric_filter = request.args.get("metric_filter", "all").strip().lower() or "all"
|
||||
reason = request.args.get("reason", "").strip() or None
|
||||
detail_reason = request.args.get("detail_reason", "").strip() or None
|
||||
|
||||
try:
|
||||
rows = export_csv_from_cache(
|
||||
query_id=query_id,
|
||||
packages=_parse_multi_param("packages") or None,
|
||||
workcenter_groups=_parse_multi_param("workcenter_groups") or None,
|
||||
reason=reason,
|
||||
metric_filter=metric_filter,
|
||||
trend_dates=_parse_multi_param("trend_dates") or None,
|
||||
detail_reason=detail_reason,
|
||||
)
|
||||
|
||||
if rows is None:
|
||||
return jsonify({"success": False, "error": "cache_expired"}), 410
|
||||
|
||||
headers = [
|
||||
"LOT", "WORKCENTER", "WORKCENTER_GROUP", "Package", "FUNCTION",
|
||||
"TYPE", "PRODUCT", "原因", "EQUIPMENT", "COMMENT", "SPEC",
|
||||
"REJECT_QTY", "STANDBY_QTY", "QTYTOPROCESS_QTY", "INPROCESS_QTY",
|
||||
"PROCESSED_QTY", "扣帳報廢量", "不扣帳報廢量", "MOVEIN_QTY",
|
||||
"報廢時間", "日期",
|
||||
]
|
||||
return Response(
|
||||
_list_to_csv(rows, headers=headers),
|
||||
mimetype="text/csv",
|
||||
headers={
|
||||
"Content-Disposition": "attachment; filename=reject_history_export.csv",
|
||||
"Content-Type": "text/csv; charset=utf-8-sig",
|
||||
},
|
||||
)
|
||||
|
||||
except ValueError as exc:
|
||||
return jsonify({"success": False, "error": str(exc)}), 400
|
||||
except Exception:
|
||||
return jsonify({"success": False, "error": "匯出 CSV 失敗"}), 500
|
||||
|
||||
689
src/mes_dashboard/services/reject_dataset_cache.py
Normal file
689
src/mes_dashboard/services/reject_dataset_cache.py
Normal file
@@ -0,0 +1,689 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Two-phase reject-history dataset cache.
|
||||
|
||||
Primary query (POST /query) → Oracle → cache full LOT-level DataFrame.
|
||||
Supplementary view (GET /view) → read cache → pandas filter/derive.
|
||||
|
||||
Cache layers:
|
||||
L1: ProcessLevelCache (in-process, per-worker)
|
||||
L2: Redis (cross-worker, parquet bytes encoded as base64 string)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from mes_dashboard.core.cache import ProcessLevelCache
|
||||
from mes_dashboard.core.database import read_sql_df
|
||||
from mes_dashboard.core.redis_client import (
|
||||
REDIS_ENABLED,
|
||||
get_key,
|
||||
get_redis_client,
|
||||
)
|
||||
from mes_dashboard.services.filter_cache import get_specs_for_groups
|
||||
from mes_dashboard.services.reject_history_service import (
|
||||
_as_float,
|
||||
_as_int,
|
||||
_build_where_clause,
|
||||
_derive_summary,
|
||||
_extract_distinct_text_values,
|
||||
_extract_workcenter_group_options,
|
||||
_normalize_text,
|
||||
_prepare_sql,
|
||||
_to_date_str,
|
||||
_to_datetime_str,
|
||||
_validate_range,
|
||||
)
|
||||
from mes_dashboard.services.query_tool_service import (
|
||||
_resolve_by_lot_id,
|
||||
_resolve_by_wafer_lot,
|
||||
_resolve_by_work_order,
|
||||
)
|
||||
from mes_dashboard.sql import QueryBuilder
|
||||
|
||||
logger = logging.getLogger("mes_dashboard.reject_dataset_cache")
|
||||
|
||||
_CACHE_TTL = 900 # 15 minutes
|
||||
_CACHE_MAX_SIZE = 8
|
||||
_REDIS_NAMESPACE = "reject_dataset"
|
||||
|
||||
_dataset_cache = ProcessLevelCache(ttl_seconds=_CACHE_TTL, max_size=_CACHE_MAX_SIZE)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Query ID
|
||||
# ============================================================
|
||||
|
||||
|
||||
def _make_query_id(params: dict) -> str:
|
||||
"""Deterministic hash from primary query params + policy toggles."""
|
||||
canonical = json.dumps(params, sort_keys=True, ensure_ascii=False, default=str)
|
||||
return hashlib.sha256(canonical.encode("utf-8")).hexdigest()[:16]
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Redis L2 helpers (parquet ↔ base64 string)
|
||||
# ============================================================
|
||||
|
||||
|
||||
def _redis_key(query_id: str) -> str:
|
||||
return get_key(f"{_REDIS_NAMESPACE}:{query_id}")
|
||||
|
||||
|
||||
def _redis_store_df(query_id: str, df: pd.DataFrame) -> None:
|
||||
if not REDIS_ENABLED:
|
||||
return
|
||||
client = get_redis_client()
|
||||
if client is None:
|
||||
return
|
||||
try:
|
||||
buf = io.BytesIO()
|
||||
df.to_parquet(buf, engine="pyarrow", index=False)
|
||||
encoded = base64.b64encode(buf.getvalue()).decode("ascii")
|
||||
client.setex(_redis_key(query_id), _CACHE_TTL, encoded)
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to store DataFrame in Redis: %s", exc)
|
||||
|
||||
|
||||
def _redis_load_df(query_id: str) -> Optional[pd.DataFrame]:
|
||||
if not REDIS_ENABLED:
|
||||
return None
|
||||
client = get_redis_client()
|
||||
if client is None:
|
||||
return None
|
||||
try:
|
||||
encoded = client.get(_redis_key(query_id))
|
||||
if encoded is None:
|
||||
return None
|
||||
raw = base64.b64decode(encoded)
|
||||
return pd.read_parquet(io.BytesIO(raw), engine="pyarrow")
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to load DataFrame from Redis: %s", exc)
|
||||
return None
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Cache read (L1 → L2 → None)
|
||||
# ============================================================
|
||||
|
||||
|
||||
def _get_cached_df(query_id: str) -> Optional[pd.DataFrame]:
|
||||
"""Read cache: L1 hit → return, L1 miss → L2 → write L1 → return."""
|
||||
df = _dataset_cache.get(query_id)
|
||||
if df is not None:
|
||||
return df
|
||||
df = _redis_load_df(query_id)
|
||||
if df is not None:
|
||||
_dataset_cache.set(query_id, df)
|
||||
return df
|
||||
|
||||
|
||||
def _store_df(query_id: str, df: pd.DataFrame) -> None:
|
||||
"""Write to L1 and L2."""
|
||||
_dataset_cache.set(query_id, df)
|
||||
_redis_store_df(query_id, df)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Container resolution (reuse query_tool_service resolvers)
|
||||
# ============================================================
|
||||
|
||||
|
||||
_RESOLVERS = {
|
||||
"lot": _resolve_by_lot_id,
|
||||
"work_order": _resolve_by_work_order,
|
||||
"wafer_lot": _resolve_by_wafer_lot,
|
||||
}
|
||||
|
||||
|
||||
def resolve_containers(
|
||||
input_type: str, values: List[str]
|
||||
) -> Dict[str, Any]:
|
||||
"""Dispatch to existing resolver → return container IDs + resolution info."""
|
||||
resolver = _RESOLVERS.get(input_type)
|
||||
if resolver is None:
|
||||
raise ValueError(f"不支援的輸入類型: {input_type}")
|
||||
|
||||
result = resolver(values)
|
||||
if "error" in result:
|
||||
raise ValueError(result["error"])
|
||||
|
||||
container_ids = []
|
||||
for row in result.get("data", []):
|
||||
cid = row.get("container_id")
|
||||
if cid:
|
||||
container_ids.append(cid)
|
||||
|
||||
return {
|
||||
"container_ids": container_ids,
|
||||
"resolution_info": {
|
||||
"input_count": result.get("input_count", len(values)),
|
||||
"resolved_count": len(container_ids),
|
||||
"not_found": result.get("not_found", []),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Primary query
|
||||
# ============================================================
|
||||
|
||||
|
||||
def execute_primary_query(
|
||||
*,
|
||||
mode: str,
|
||||
start_date: Optional[str] = None,
|
||||
end_date: Optional[str] = None,
|
||||
container_input_type: Optional[str] = None,
|
||||
container_values: Optional[List[str]] = None,
|
||||
include_excluded_scrap: bool = False,
|
||||
exclude_material_scrap: bool = True,
|
||||
exclude_pb_diode: bool = True,
|
||||
) -> Dict[str, Any]:
|
||||
"""Execute Oracle query → cache DataFrame → return structured result."""
|
||||
|
||||
# ---- Build base_where + params for the primary filter ----
|
||||
base_where_parts: List[str] = []
|
||||
base_params: Dict[str, Any] = {}
|
||||
resolution_info: Optional[Dict[str, Any]] = None
|
||||
|
||||
if mode == "date_range":
|
||||
if not start_date or not end_date:
|
||||
raise ValueError("date_range mode 需要 start_date 和 end_date")
|
||||
_validate_range(start_date, end_date)
|
||||
base_where_parts.append(
|
||||
"r.TXNDATE >= TO_DATE(:start_date, 'YYYY-MM-DD')"
|
||||
" AND r.TXNDATE < TO_DATE(:end_date, 'YYYY-MM-DD') + 1"
|
||||
)
|
||||
base_params["start_date"] = start_date
|
||||
base_params["end_date"] = end_date
|
||||
|
||||
elif mode == "container":
|
||||
if not container_values:
|
||||
raise ValueError("container mode 需要至少一個容器值")
|
||||
resolved = resolve_containers(
|
||||
container_input_type or "lot", container_values
|
||||
)
|
||||
resolution_info = resolved["resolution_info"]
|
||||
container_ids = resolved["container_ids"]
|
||||
if not container_ids:
|
||||
raise ValueError("未找到任何對應的容器")
|
||||
|
||||
builder = QueryBuilder()
|
||||
builder.add_in_condition("r.CONTAINERID", container_ids)
|
||||
cid_where, cid_params = builder.build_where_only()
|
||||
# build_where_only returns "WHERE ..." — strip "WHERE " prefix
|
||||
cid_condition = cid_where.strip()
|
||||
if cid_condition.upper().startswith("WHERE "):
|
||||
cid_condition = cid_condition[6:].strip()
|
||||
base_where_parts.append(cid_condition)
|
||||
base_params.update(cid_params)
|
||||
|
||||
else:
|
||||
raise ValueError(f"不支援的查詢模式: {mode}")
|
||||
|
||||
base_where = " AND ".join(base_where_parts)
|
||||
|
||||
# ---- Build policy WHERE (only toggles, no supplementary filters) ----
|
||||
policy_where, policy_params, meta = _build_where_clause(
|
||||
include_excluded_scrap=include_excluded_scrap,
|
||||
exclude_material_scrap=exclude_material_scrap,
|
||||
exclude_pb_diode=exclude_pb_diode,
|
||||
)
|
||||
|
||||
# ---- Compute query_id from all primary params ----
|
||||
query_id_input = {
|
||||
"mode": mode,
|
||||
"start_date": start_date,
|
||||
"end_date": end_date,
|
||||
"container_input_type": container_input_type,
|
||||
"container_values": sorted(container_values or []),
|
||||
"include_excluded_scrap": include_excluded_scrap,
|
||||
"exclude_material_scrap": exclude_material_scrap,
|
||||
"exclude_pb_diode": exclude_pb_diode,
|
||||
}
|
||||
query_id = _make_query_id(query_id_input)
|
||||
|
||||
# ---- Check cache first ----
|
||||
cached_df = _get_cached_df(query_id)
|
||||
if cached_df is not None:
|
||||
logger.info("Dataset cache hit for query_id=%s", query_id)
|
||||
return _build_primary_response(
|
||||
query_id, cached_df, meta, resolution_info
|
||||
)
|
||||
|
||||
# ---- Execute Oracle query ----
|
||||
logger.info("Dataset cache miss for query_id=%s, querying Oracle", query_id)
|
||||
sql = _prepare_sql(
|
||||
"list",
|
||||
where_clause=policy_where,
|
||||
base_variant="lot",
|
||||
base_where=base_where,
|
||||
)
|
||||
all_params = {**base_params, **policy_params, "offset": 0, "limit": 999999999}
|
||||
df = read_sql_df(sql, all_params)
|
||||
if df is None:
|
||||
df = pd.DataFrame()
|
||||
|
||||
# ---- Cache and return ----
|
||||
if not df.empty:
|
||||
_store_df(query_id, df)
|
||||
|
||||
return _build_primary_response(query_id, df, meta, resolution_info)
|
||||
|
||||
|
||||
def _build_primary_response(
|
||||
query_id: str,
|
||||
df: pd.DataFrame,
|
||||
meta: Dict[str, Any],
|
||||
resolution_info: Optional[Dict[str, Any]],
|
||||
) -> Dict[str, Any]:
|
||||
"""Build the full response from a LOT-level DataFrame."""
|
||||
analytics_raw = _derive_analytics_raw(df)
|
||||
summary = _derive_summary_from_analytics(analytics_raw)
|
||||
trend_items = _derive_trend_from_analytics(analytics_raw)
|
||||
first_page = _paginate_detail(df, page=1, per_page=50)
|
||||
available = _extract_available_filters(df)
|
||||
|
||||
result: Dict[str, Any] = {
|
||||
"query_id": query_id,
|
||||
"analytics_raw": analytics_raw,
|
||||
"summary": summary,
|
||||
"trend": {"items": trend_items, "granularity": "day"},
|
||||
"detail": first_page,
|
||||
"available_filters": available,
|
||||
"meta": meta,
|
||||
}
|
||||
if resolution_info is not None:
|
||||
result["resolution_info"] = resolution_info
|
||||
return result
|
||||
|
||||
|
||||
# ============================================================
|
||||
# View (supplementary + interactive filtering on cache)
|
||||
# ============================================================
|
||||
|
||||
|
||||
def apply_view(
|
||||
*,
|
||||
query_id: str,
|
||||
packages: Optional[List[str]] = None,
|
||||
workcenter_groups: Optional[List[str]] = None,
|
||||
reason: Optional[str] = None,
|
||||
metric_filter: str = "all",
|
||||
trend_dates: Optional[List[str]] = None,
|
||||
detail_reason: Optional[str] = None,
|
||||
page: int = 1,
|
||||
per_page: int = 50,
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Read cache → apply filters → return derived data. Returns None if expired."""
|
||||
df = _get_cached_df(query_id)
|
||||
if df is None:
|
||||
return None
|
||||
|
||||
filtered = _apply_supplementary_filters(
|
||||
df,
|
||||
packages=packages,
|
||||
workcenter_groups=workcenter_groups,
|
||||
reason=reason,
|
||||
metric_filter=metric_filter,
|
||||
)
|
||||
|
||||
# Analytics always uses full date range (supplementary-filtered only).
|
||||
# The frontend derives trend from analytics_raw and filters Pareto by
|
||||
# selectedTrendDates client-side.
|
||||
analytics_raw = _derive_analytics_raw(filtered)
|
||||
summary = _derive_summary_from_analytics(analytics_raw)
|
||||
|
||||
# Detail list: additionally filter by detail_reason and trend_dates
|
||||
detail_df = filtered
|
||||
if trend_dates:
|
||||
date_set = set(trend_dates)
|
||||
detail_df = detail_df[
|
||||
detail_df["TXN_DAY"].apply(lambda d: _to_date_str(d) in date_set)
|
||||
]
|
||||
if detail_reason:
|
||||
detail_df = detail_df[
|
||||
detail_df["LOSSREASONNAME"].str.strip() == detail_reason.strip()
|
||||
]
|
||||
|
||||
detail_page = _paginate_detail(detail_df, page=page, per_page=per_page)
|
||||
|
||||
return {
|
||||
"analytics_raw": analytics_raw,
|
||||
"summary": summary,
|
||||
"detail": detail_page,
|
||||
}
|
||||
|
||||
|
||||
def _apply_supplementary_filters(
|
||||
df: pd.DataFrame,
|
||||
*,
|
||||
packages: Optional[List[str]] = None,
|
||||
workcenter_groups: Optional[List[str]] = None,
|
||||
reason: Optional[str] = None,
|
||||
metric_filter: str = "all",
|
||||
) -> pd.DataFrame:
|
||||
"""Apply supplementary filters via pandas boolean indexing."""
|
||||
if df is None or df.empty:
|
||||
return df
|
||||
|
||||
mask = pd.Series(True, index=df.index)
|
||||
|
||||
if packages:
|
||||
pkg_set = {p.strip() for p in packages if p.strip()}
|
||||
if pkg_set and "PRODUCTLINENAME" in df.columns:
|
||||
mask &= df["PRODUCTLINENAME"].isin(pkg_set)
|
||||
|
||||
if workcenter_groups:
|
||||
wc_groups = [g.strip() for g in workcenter_groups if g.strip()]
|
||||
if wc_groups:
|
||||
specs = get_specs_for_groups(wc_groups)
|
||||
if specs and "SPECNAME" in df.columns:
|
||||
spec_set = {s.upper() for s in specs}
|
||||
mask &= df["SPECNAME"].str.strip().str.upper().isin(spec_set)
|
||||
elif "WORKCENTER_GROUP" in df.columns:
|
||||
mask &= df["WORKCENTER_GROUP"].isin(wc_groups)
|
||||
|
||||
if reason and "LOSSREASONNAME" in df.columns:
|
||||
mask &= df["LOSSREASONNAME"].str.strip() == reason.strip()
|
||||
|
||||
if metric_filter == "reject" and "REJECT_TOTAL_QTY" in df.columns:
|
||||
mask &= df["REJECT_TOTAL_QTY"] > 0
|
||||
elif metric_filter == "defect" and "DEFECT_QTY" in df.columns:
|
||||
mask &= df["DEFECT_QTY"] > 0
|
||||
|
||||
return df[mask]
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Derivation helpers
|
||||
# ============================================================
|
||||
|
||||
|
||||
def _derive_analytics_raw(df: pd.DataFrame) -> list:
|
||||
"""GROUP BY (TXN_DAY, LOSSREASONNAME) → per date×reason rows."""
|
||||
if df is None or df.empty:
|
||||
return []
|
||||
|
||||
agg_cols = {
|
||||
"MOVEIN_QTY": ("MOVEIN_QTY", "sum"),
|
||||
"REJECT_TOTAL_QTY": ("REJECT_TOTAL_QTY", "sum"),
|
||||
"DEFECT_QTY": ("DEFECT_QTY", "sum"),
|
||||
}
|
||||
# Add optional columns if present
|
||||
if "AFFECTED_WORKORDER_COUNT" in df.columns:
|
||||
agg_cols["AFFECTED_WORKORDER_COUNT"] = ("AFFECTED_WORKORDER_COUNT", "sum")
|
||||
|
||||
grouped = (
|
||||
df.groupby(["TXN_DAY", "LOSSREASONNAME"], sort=True)
|
||||
.agg(**agg_cols)
|
||||
.reset_index()
|
||||
)
|
||||
|
||||
# Count distinct CONTAINERIDs per group for AFFECTED_LOT_COUNT
|
||||
if "CONTAINERID" in df.columns:
|
||||
lot_counts = (
|
||||
df.groupby(["TXN_DAY", "LOSSREASONNAME"])["CONTAINERID"]
|
||||
.nunique()
|
||||
.reset_index()
|
||||
.rename(columns={"CONTAINERID": "AFFECTED_LOT_COUNT"})
|
||||
)
|
||||
grouped = grouped.merge(
|
||||
lot_counts, on=["TXN_DAY", "LOSSREASONNAME"], how="left"
|
||||
)
|
||||
else:
|
||||
grouped["AFFECTED_LOT_COUNT"] = 0
|
||||
|
||||
items = []
|
||||
for _, row in grouped.iterrows():
|
||||
items.append(
|
||||
{
|
||||
"bucket_date": _to_date_str(row["TXN_DAY"]),
|
||||
"reason": _normalize_text(row["LOSSREASONNAME"]) or "(未填寫)",
|
||||
"MOVEIN_QTY": _as_int(row.get("MOVEIN_QTY")),
|
||||
"REJECT_TOTAL_QTY": _as_int(row.get("REJECT_TOTAL_QTY")),
|
||||
"DEFECT_QTY": _as_int(row.get("DEFECT_QTY")),
|
||||
"AFFECTED_LOT_COUNT": _as_int(row.get("AFFECTED_LOT_COUNT")),
|
||||
"AFFECTED_WORKORDER_COUNT": _as_int(
|
||||
row.get("AFFECTED_WORKORDER_COUNT")
|
||||
),
|
||||
}
|
||||
)
|
||||
return items
|
||||
|
||||
|
||||
def _derive_summary_from_analytics(analytics_raw: list) -> dict:
|
||||
"""Aggregate analytics_raw into a single summary dict."""
|
||||
movein = sum(r.get("MOVEIN_QTY", 0) for r in analytics_raw)
|
||||
reject_total = sum(r.get("REJECT_TOTAL_QTY", 0) for r in analytics_raw)
|
||||
defect = sum(r.get("DEFECT_QTY", 0) for r in analytics_raw)
|
||||
affected_lot = sum(r.get("AFFECTED_LOT_COUNT", 0) for r in analytics_raw)
|
||||
affected_wo = sum(r.get("AFFECTED_WORKORDER_COUNT", 0) for r in analytics_raw)
|
||||
|
||||
total_scrap = reject_total + defect
|
||||
return {
|
||||
"MOVEIN_QTY": movein,
|
||||
"REJECT_TOTAL_QTY": reject_total,
|
||||
"DEFECT_QTY": defect,
|
||||
"REJECT_RATE_PCT": round((reject_total / movein * 100) if movein else 0, 4),
|
||||
"DEFECT_RATE_PCT": round((defect / movein * 100) if movein else 0, 4),
|
||||
"REJECT_SHARE_PCT": round(
|
||||
(reject_total / total_scrap * 100) if total_scrap else 0, 4
|
||||
),
|
||||
"AFFECTED_LOT_COUNT": affected_lot,
|
||||
"AFFECTED_WORKORDER_COUNT": affected_wo,
|
||||
}
|
||||
|
||||
|
||||
def _derive_trend_from_analytics(analytics_raw: list) -> list:
|
||||
"""Group analytics_raw by date into trend items."""
|
||||
by_date: Dict[str, Dict[str, int]] = {}
|
||||
for row in analytics_raw:
|
||||
d = row.get("bucket_date", "")
|
||||
if d not in by_date:
|
||||
by_date[d] = {"MOVEIN_QTY": 0, "REJECT_TOTAL_QTY": 0, "DEFECT_QTY": 0}
|
||||
by_date[d]["MOVEIN_QTY"] += row.get("MOVEIN_QTY", 0)
|
||||
by_date[d]["REJECT_TOTAL_QTY"] += row.get("REJECT_TOTAL_QTY", 0)
|
||||
by_date[d]["DEFECT_QTY"] += row.get("DEFECT_QTY", 0)
|
||||
|
||||
items = []
|
||||
for date_str in sorted(by_date.keys()):
|
||||
vals = by_date[date_str]
|
||||
movein = vals["MOVEIN_QTY"]
|
||||
reject = vals["REJECT_TOTAL_QTY"]
|
||||
defect = vals["DEFECT_QTY"]
|
||||
items.append(
|
||||
{
|
||||
"bucket_date": date_str,
|
||||
"MOVEIN_QTY": movein,
|
||||
"REJECT_TOTAL_QTY": reject,
|
||||
"DEFECT_QTY": defect,
|
||||
"REJECT_RATE_PCT": round(
|
||||
(reject / movein * 100) if movein else 0, 4
|
||||
),
|
||||
"DEFECT_RATE_PCT": round(
|
||||
(defect / movein * 100) if movein else 0, 4
|
||||
),
|
||||
}
|
||||
)
|
||||
return items
|
||||
|
||||
|
||||
def _paginate_detail(
|
||||
df: pd.DataFrame, *, page: int = 1, per_page: int = 50
|
||||
) -> dict:
|
||||
"""Sort + paginate LOT-level rows."""
|
||||
if df is None or df.empty:
|
||||
return {
|
||||
"items": [],
|
||||
"pagination": {
|
||||
"page": 1,
|
||||
"perPage": per_page,
|
||||
"total": 0,
|
||||
"totalPages": 1,
|
||||
},
|
||||
}
|
||||
|
||||
page = max(int(page), 1)
|
||||
per_page = min(max(int(per_page), 1), 200)
|
||||
|
||||
# Sort
|
||||
sort_cols = []
|
||||
sort_asc = []
|
||||
for col, asc in [
|
||||
("TXN_DAY", False),
|
||||
("WORKCENTERSEQUENCE_GROUP", True),
|
||||
("WORKCENTERNAME", True),
|
||||
("REJECT_TOTAL_QTY", False),
|
||||
("CONTAINERNAME", True),
|
||||
]:
|
||||
if col in df.columns:
|
||||
sort_cols.append(col)
|
||||
sort_asc.append(asc)
|
||||
|
||||
if sort_cols:
|
||||
sorted_df = df.sort_values(sort_cols, ascending=sort_asc)
|
||||
else:
|
||||
sorted_df = df
|
||||
|
||||
total = len(sorted_df)
|
||||
total_pages = max((total + per_page - 1) // per_page, 1)
|
||||
offset = (page - 1) * per_page
|
||||
page_df = sorted_df.iloc[offset : offset + per_page]
|
||||
|
||||
items = []
|
||||
for _, row in page_df.iterrows():
|
||||
items.append(
|
||||
{
|
||||
"TXN_TIME": _to_datetime_str(row.get("TXN_TIME")),
|
||||
"TXN_DAY": _to_date_str(row.get("TXN_DAY")),
|
||||
"TXN_MONTH": _normalize_text(row.get("TXN_MONTH")),
|
||||
"WORKCENTER_GROUP": _normalize_text(row.get("WORKCENTER_GROUP")),
|
||||
"WORKCENTERNAME": _normalize_text(row.get("WORKCENTERNAME")),
|
||||
"SPECNAME": _normalize_text(row.get("SPECNAME")),
|
||||
"EQUIPMENTNAME": _normalize_text(row.get("EQUIPMENTNAME")),
|
||||
"PRODUCTLINENAME": _normalize_text(row.get("PRODUCTLINENAME")),
|
||||
"PJ_TYPE": _normalize_text(row.get("PJ_TYPE")),
|
||||
"CONTAINERNAME": _normalize_text(row.get("CONTAINERNAME")),
|
||||
"PJ_FUNCTION": _normalize_text(row.get("PJ_FUNCTION")),
|
||||
"PRODUCTNAME": _normalize_text(row.get("PRODUCTNAME")),
|
||||
"LOSSREASONNAME": _normalize_text(row.get("LOSSREASONNAME")),
|
||||
"LOSSREASON_CODE": _normalize_text(row.get("LOSSREASON_CODE")),
|
||||
"REJECTCOMMENT": _normalize_text(row.get("REJECTCOMMENT")),
|
||||
"MOVEIN_QTY": _as_int(row.get("MOVEIN_QTY")),
|
||||
"REJECT_QTY": _as_int(row.get("REJECT_QTY")),
|
||||
"STANDBY_QTY": _as_int(row.get("STANDBY_QTY")),
|
||||
"QTYTOPROCESS_QTY": _as_int(row.get("QTYTOPROCESS_QTY")),
|
||||
"INPROCESS_QTY": _as_int(row.get("INPROCESS_QTY")),
|
||||
"PROCESSED_QTY": _as_int(row.get("PROCESSED_QTY")),
|
||||
"REJECT_TOTAL_QTY": _as_int(row.get("REJECT_TOTAL_QTY")),
|
||||
"DEFECT_QTY": _as_int(row.get("DEFECT_QTY")),
|
||||
"REJECT_RATE_PCT": round(
|
||||
_as_float(row.get("REJECT_RATE_PCT")), 4
|
||||
),
|
||||
"DEFECT_RATE_PCT": round(
|
||||
_as_float(row.get("DEFECT_RATE_PCT")), 4
|
||||
),
|
||||
"REJECT_SHARE_PCT": round(
|
||||
_as_float(row.get("REJECT_SHARE_PCT")), 4
|
||||
),
|
||||
"AFFECTED_WORKORDER_COUNT": _as_int(
|
||||
row.get("AFFECTED_WORKORDER_COUNT")
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"items": items,
|
||||
"pagination": {
|
||||
"page": page,
|
||||
"perPage": per_page,
|
||||
"total": total,
|
||||
"totalPages": total_pages,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _extract_available_filters(df: pd.DataFrame) -> dict:
|
||||
"""Extract distinct packages/reasons/WC groups from the full cache DF."""
|
||||
return {
|
||||
"workcenter_groups": _extract_workcenter_group_options(df),
|
||||
"packages": _extract_distinct_text_values(df, "PRODUCTLINENAME"),
|
||||
"reasons": _extract_distinct_text_values(df, "LOSSREASONNAME"),
|
||||
}
|
||||
|
||||
|
||||
# ============================================================
|
||||
# CSV export from cache
|
||||
# ============================================================
|
||||
|
||||
|
||||
def export_csv_from_cache(
|
||||
*,
|
||||
query_id: str,
|
||||
packages: Optional[List[str]] = None,
|
||||
workcenter_groups: Optional[List[str]] = None,
|
||||
reason: Optional[str] = None,
|
||||
metric_filter: str = "all",
|
||||
trend_dates: Optional[List[str]] = None,
|
||||
detail_reason: Optional[str] = None,
|
||||
) -> Optional[list]:
|
||||
"""Read cache → apply filters → return list of dicts for CSV export."""
|
||||
df = _get_cached_df(query_id)
|
||||
if df is None:
|
||||
return None
|
||||
|
||||
filtered = _apply_supplementary_filters(
|
||||
df,
|
||||
packages=packages,
|
||||
workcenter_groups=workcenter_groups,
|
||||
reason=reason,
|
||||
metric_filter=metric_filter,
|
||||
)
|
||||
|
||||
if trend_dates:
|
||||
date_set = set(trend_dates)
|
||||
filtered = filtered[
|
||||
filtered["TXN_DAY"].apply(lambda d: _to_date_str(d) in date_set)
|
||||
]
|
||||
if detail_reason and "LOSSREASONNAME" in filtered.columns:
|
||||
filtered = filtered[
|
||||
filtered["LOSSREASONNAME"].str.strip() == detail_reason.strip()
|
||||
]
|
||||
|
||||
rows = []
|
||||
for _, row in filtered.iterrows():
|
||||
rows.append(
|
||||
{
|
||||
"LOT": _normalize_text(row.get("CONTAINERNAME")),
|
||||
"WORKCENTER": _normalize_text(row.get("WORKCENTERNAME")),
|
||||
"WORKCENTER_GROUP": _normalize_text(row.get("WORKCENTER_GROUP")),
|
||||
"Package": _normalize_text(row.get("PRODUCTLINENAME")),
|
||||
"FUNCTION": _normalize_text(row.get("PJ_FUNCTION")),
|
||||
"TYPE": _normalize_text(row.get("PJ_TYPE")),
|
||||
"PRODUCT": _normalize_text(row.get("PRODUCTNAME")),
|
||||
"原因": _normalize_text(row.get("LOSSREASONNAME")),
|
||||
"EQUIPMENT": _normalize_text(row.get("EQUIPMENTNAME")),
|
||||
"COMMENT": _normalize_text(row.get("REJECTCOMMENT")),
|
||||
"SPEC": _normalize_text(row.get("SPECNAME")),
|
||||
"REJECT_QTY": _as_int(row.get("REJECT_QTY")),
|
||||
"STANDBY_QTY": _as_int(row.get("STANDBY_QTY")),
|
||||
"QTYTOPROCESS_QTY": _as_int(row.get("QTYTOPROCESS_QTY")),
|
||||
"INPROCESS_QTY": _as_int(row.get("INPROCESS_QTY")),
|
||||
"PROCESSED_QTY": _as_int(row.get("PROCESSED_QTY")),
|
||||
"扣帳報廢量": _as_int(row.get("REJECT_TOTAL_QTY")),
|
||||
"不扣帳報廢量": _as_int(row.get("DEFECT_QTY")),
|
||||
"MOVEIN_QTY": _as_int(row.get("MOVEIN_QTY")),
|
||||
"報廢時間": _to_datetime_str(row.get("TXN_TIME")),
|
||||
"日期": _to_date_str(row.get("TXN_DAY")),
|
||||
}
|
||||
)
|
||||
return rows
|
||||
@@ -225,8 +225,8 @@ def _build_where_clause(
|
||||
builder.add_condition("UPPER(NVL(TRIM(b.SCRAP_OBJECTTYPE), '-')) <> 'MATERIAL'")
|
||||
material_exclusion_applied = True
|
||||
pb_diode_exclusion_applied = False
|
||||
if exclude_pb_diode and "PB_Diode" not in normalized_packages:
|
||||
builder.add_condition("b.PRODUCTLINENAME <> 'PB_Diode'")
|
||||
if exclude_pb_diode and not any(p.startswith("PB_") for p in normalized_packages):
|
||||
builder.add_condition("b.PRODUCTLINENAME NOT LIKE 'PB\\_%' ESCAPE '\\'")
|
||||
pb_diode_exclusion_applied = True
|
||||
if normalized_categories:
|
||||
builder.add_in_condition("b.REJECTCATEGORYNAME", normalized_categories)
|
||||
@@ -275,6 +275,12 @@ def _build_where_clause(
|
||||
return where_clause, params, meta
|
||||
|
||||
|
||||
_DEFAULT_BASE_WHERE = (
|
||||
"r.TXNDATE >= TO_DATE(:start_date, 'YYYY-MM-DD')"
|
||||
" AND r.TXNDATE < TO_DATE(:end_date, 'YYYY-MM-DD') + 1"
|
||||
)
|
||||
|
||||
|
||||
def _prepare_sql(
|
||||
name: str,
|
||||
*,
|
||||
@@ -282,10 +288,12 @@ def _prepare_sql(
|
||||
bucket_expr: str = "",
|
||||
metric_column: str = "",
|
||||
base_variant: str = "",
|
||||
base_where: str = "",
|
||||
) -> str:
|
||||
sql = _load_sql(name)
|
||||
sql = sql.replace("{{ BASE_QUERY }}", _base_query_sql(base_variant))
|
||||
sql = sql.replace("{{ BASE_WITH_CTE }}", _base_with_cte_sql("base", base_variant))
|
||||
sql = sql.replace("{{ BASE_WHERE }}", base_where or _DEFAULT_BASE_WHERE)
|
||||
sql = sql.replace("{{ WHERE_CLAUSE }}", where_clause or "")
|
||||
sql = sql.replace("{{ BUCKET_EXPR }}", bucket_expr or "TRUNC(b.TXN_DAY)")
|
||||
sql = sql.replace("{{ METRIC_COLUMN }}", metric_column or "b.REJECT_TOTAL_QTY")
|
||||
|
||||
@@ -78,8 +78,7 @@ reject_raw AS (
|
||||
ON c.CONTAINERID = r.CONTAINERID
|
||||
LEFT JOIN spec_map sm
|
||||
ON sm.SPEC = TRIM(r.SPECNAME)
|
||||
WHERE r.TXNDATE >= TO_DATE(:start_date, 'YYYY-MM-DD')
|
||||
AND r.TXNDATE < TO_DATE(:end_date, 'YYYY-MM-DD') + 1
|
||||
WHERE {{ BASE_WHERE }}
|
||||
),
|
||||
daily_agg AS (
|
||||
SELECT
|
||||
|
||||
@@ -72,8 +72,7 @@ reject_raw AS (
|
||||
ON c.CONTAINERID = r.CONTAINERID
|
||||
LEFT JOIN spec_map sm
|
||||
ON sm.SPEC = TRIM(r.SPECNAME)
|
||||
WHERE r.TXNDATE >= TO_DATE(:start_date, 'YYYY-MM-DD')
|
||||
AND r.TXNDATE < TO_DATE(:end_date, 'YYYY-MM-DD') + 1
|
||||
WHERE {{ BASE_WHERE }}
|
||||
),
|
||||
daily_agg AS (
|
||||
SELECT
|
||||
|
||||
Reference in New Issue
Block a user