feat(reject-history): ship report page and archive openspec change

This commit is contained in:
egg
2026-02-13 20:35:52 +08:00
parent 7cbb155619
commit 05d907ac72
47 changed files with 4419 additions and 73 deletions

View File

@@ -26,6 +26,13 @@
"drawer_id": "drawer-2", "drawer_id": "drawer-2",
"order": 3 "order": 3
}, },
{
"route": "/reject-history",
"name": "報廢歷史查詢",
"status": "dev",
"drawer_id": "drawer-2",
"order": 4
},
{ {
"route": "/wip-detail", "route": "/wip-detail",
"name": "WIP 明細", "name": "WIP 明細",

View File

@@ -7,6 +7,7 @@
- `DWH.DW_MES_LOTREJECTHISTORY`: 不良/報廢事實表(主來源) - `DWH.DW_MES_LOTREJECTHISTORY`: 不良/報廢事實表(主來源)
- `DWH.DW_MES_CONTAINER`: 補齊 `PJ_TYPE``PRODUCTLINENAME``MFGORDERNAME` - `DWH.DW_MES_CONTAINER`: 補齊 `PJ_TYPE``PRODUCTLINENAME``MFGORDERNAME`
- `DWH.DW_MES_SPEC_WORKCENTER_V`: 對應 `WORKCENTER_GROUP` 與排序欄位 - `DWH.DW_MES_SPEC_WORKCENTER_V`: 對應 `WORKCENTER_GROUP` 與排序欄位
- `DWH.ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE`: 良率排除政策表(`ENABLE_FLAG='Y'` 代表不納入良率計算)
## 資料評估重點2026-02-13近 30 天樣本) ## 資料評估重點2026-02-13近 30 天樣本)
- `DW_MES_LOTREJECTHISTORY``230,074` 筆;`HISTORYMAINLINEID``75,683` 個。 - `DW_MES_LOTREJECTHISTORY``230,074` 筆;`HISTORYMAINLINEID``75,683` 個。
@@ -33,12 +34,69 @@
- `DEFECT_RATE_PCT = DEFECT_QTY / MOVEIN_QTY * 100` - `DEFECT_RATE_PCT = DEFECT_QTY / MOVEIN_QTY * 100`
- `REJECT_SHARE_PCT = REJECT_TOTAL_QTY / (REJECT_TOTAL_QTY + DEFECT_QTY) * 100` - `REJECT_SHARE_PCT = REJECT_TOTAL_QTY / (REJECT_TOTAL_QTY + DEFECT_QTY) * 100`
## 排除政策與前端開關
- 預設模式:排除 `ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE``ENABLE_FLAG='Y'` 的報廢原因。
- 可切換模式:提供 `include_excluded_scrap=true|false` 讓使用者決定是否納入。
- 前端頁面提供「納入不計良率報廢」開關,並同步影響 summary/trend/pareto/list/export。
- 排除原因清單採全表快取預設每日刷新一次Redis 優先、記憶體 fallback
## API 與欄位契約
- `GET /api/reject-history/options`
- 回傳 `workcenter_groups``reasons` 與政策 `meta`
- `GET /api/reject-history/summary`
- 回傳 `MOVEIN_QTY``REJECT_TOTAL_QTY``DEFECT_QTY``REJECT_RATE_PCT``DEFECT_RATE_PCT``REJECT_SHARE_PCT``AFFECTED_LOT_COUNT``AFFECTED_WORKORDER_COUNT`
- `GET /api/reject-history/trend`
- 回傳趨勢 `items[]`,每筆含 `bucket_date``REJECT_TOTAL_QTY``DEFECT_QTY``REJECT_RATE_PCT``DEFECT_RATE_PCT`
- `GET /api/reject-history/reason-pareto`
- 支援 `metric_mode=reject_total|defect`
- 支援 `pareto_scope=top80|all`(預設 `top80`
- `GET /api/reject-history/list`
- 分頁回傳 `items[]``pagination`
- 明細保留五個 reject 欄位(`REJECT_QTY``STANDBY_QTY``QTYTOPROCESS_QTY``INPROCESS_QTY``PROCESSED_QTY`)與 `DEFECT_QTY`
- `GET /api/reject-history/export`
- CSV 欄位與 list 語義一致,含 `REJECT_TOTAL_QTY``DEFECT_QTY`
## 前端視覺與互動
- 主要區塊:
- Header語義 badge + 更新時間)
- 篩選區(時間、原因、`WORKCENTER_GROUP`、政策開關、Pareto 前 80% 開關)
- KPI8 張卡Reject 暖色語義 / Defect 冷色語義)
- 趨勢圖(報廢量與報廢率分圖)
- Pareto柱狀 + 累積線)與明細表
- 互動規則:
- Pareto 點選原因後,會套用為 active filter chip 並重查
- 再次點選同原因會取消篩選
- 預設僅顯示累計前 80%,可切換顯示完整 Pareto
- 匯出 CSV 使用目前畫面相同篩選條件
## 交付檔案 ## 交付檔案
- 建表 + 刷新 SQL`docs/reject_history_performance.sql` - 建表 + 刷新 SQL`docs/reject_history_performance.sql`
- 可被應用層直接載入的查詢 SQL`src/mes_dashboard/sql/reject_history/performance_daily.sql` - 可被應用層直接載入的查詢 SQL`src/mes_dashboard/sql/reject_history/performance_daily.sql`
## 上線與回滾策略
- 上線策略:
- 先維持 `data/page_status.json``/reject-history``dev`
- 完成 UAT 後再改為 `released`
- 回滾策略:
-`/reject-history` 狀態切回 `dev` 或移除導航入口
- 保留 API 與既有頁面,不影響既有報表
- 快取策略:
- 排除政策表每日全表刷新(預設 86400 秒)
- Redis 異常時退回記憶體快取,不阻斷查詢
## 驗證紀錄2026-02-13
- 後端/整合測試:
- `pytest -q tests/test_reject_history_service.py tests/test_scrap_reason_exclusion_cache.py tests/test_reject_history_routes.py tests/test_reject_history_shell_coverage.py tests/test_portal_shell_wave_b_native_smoke.py::test_reject_history_native_smoke_query_sections_and_export tests/test_app_factory.py::AppFactoryTests::test_routes_registered`
- 結果:`22 passed`
- 前端建置:
- `cd frontend && npm run build`
- 結果:成功產出 `reject-history.html/js/css`,並完成 dist 複製流程
## 建議排程 ## 建議排程
- 每日跑前一日增量: - 每日跑前一日增量:
- `:start_date = TRUNC(SYSDATE - 1)` - `:start_date = TRUNC(SYSDATE - 1)`
- `:end_date = TRUNC(SYSDATE - 1)` - `:end_date = TRUNC(SYSDATE - 1)`
- 每月第一天補跑前 31 天,避免補數漏失。 - 每月第一天補跑前 31 天,避免補數漏失。
## 已知環境備註
- `tests/test_navigation_contract.py` 需要 `docs/migration/portal-no-iframe/baseline_drawer_visibility.json`。目前工作區缺少此 baseline 檔案,屬既有環境缺口,與本次 reject-history 開發內容無直接耦合。

View File

@@ -34,6 +34,10 @@ const NATIVE_MODULE_LOADERS = Object.freeze({
() => import('../hold-history/App.vue'), () => import('../hold-history/App.vue'),
[() => import('../wip-shared/styles.css'), () => import('../hold-history/style.css')], [() => import('../wip-shared/styles.css'), () => import('../hold-history/style.css')],
), ),
'/reject-history': createNativeLoader(
() => import('../reject-history/App.vue'),
[() => import('../wip-shared/styles.css'), () => import('../reject-history/style.css')],
),
'/resource': createNativeLoader( '/resource': createNativeLoader(
() => import('../resource-status/App.vue'), () => import('../resource-status/App.vue'),
[() => import('../resource-shared/styles.css'), () => import('../resource-status/style.css')], [() => import('../resource-shared/styles.css'), () => import('../resource-status/style.css')],

View File

@@ -4,6 +4,7 @@ const IN_SCOPE_REPORT_ROUTES = Object.freeze([
'/hold-overview', '/hold-overview',
'/hold-detail', '/hold-detail',
'/hold-history', '/hold-history',
'/reject-history',
'/resource', '/resource',
'/resource-history', '/resource-history',
'/qc-gate', '/qc-gate',
@@ -109,6 +110,17 @@ const ROUTE_CONTRACTS = Object.freeze({
scope: 'in-scope', scope: 'in-scope',
compatibilityPolicy: 'redirect_to_shell_when_spa_enabled', compatibilityPolicy: 'redirect_to_shell_when_spa_enabled',
}), }),
'/reject-history': buildContract({
route: '/reject-history',
routeId: 'reject-history',
renderMode: 'native',
owner: 'frontend-mes-reporting',
title: '報廢歷史查詢',
rollbackStrategy: 'fallback_to_legacy_route',
visibilityPolicy: 'released_or_admin',
scope: 'in-scope',
compatibilityPolicy: 'redirect_to_shell_when_spa_enabled',
}),
'/resource': buildContract({ '/resource': buildContract({
route: '/resource', route: '/resource',
routeId: 'resource', routeId: 'resource',

View File

@@ -0,0 +1,968 @@
<script setup>
import { computed, onMounted, reactive, ref } from 'vue';
import { BarChart, LineChart } from 'echarts/charts';
import { GridComponent, LegendComponent, TooltipComponent } from 'echarts/components';
import { use } from 'echarts/core';
import { CanvasRenderer } from 'echarts/renderers';
import VChart from 'vue-echarts';
import { apiGet } from '../core/api.js';
import { replaceRuntimeHistory } from '../core/shell-navigation.js';
import MultiSelect from '../resource-shared/components/MultiSelect.vue';
use([CanvasRenderer, BarChart, LineChart, GridComponent, TooltipComponent, LegendComponent]);
const API_TIMEOUT = 60000;
const DEFAULT_PER_PAGE = 50;
const filters = reactive({
startDate: '',
endDate: '',
workcenterGroups: [],
packages: [],
reason: '',
includeExcludedScrap: false,
excludeMaterialScrap: true,
paretoTop80: true,
});
const page = ref(1);
const detailReason = ref('');
const options = reactive({
workcenterGroups: [],
packages: [],
reasons: [],
});
const summary = ref({
MOVEIN_QTY: 0,
REJECT_TOTAL_QTY: 0,
DEFECT_QTY: 0,
REJECT_RATE_PCT: 0,
DEFECT_RATE_PCT: 0,
REJECT_SHARE_PCT: 0,
AFFECTED_LOT_COUNT: 0,
AFFECTED_WORKORDER_COUNT: 0,
});
const trend = ref({ items: [], granularity: 'day' });
const pareto = ref({ items: [], metric_mode: 'reject_total', pareto_scope: 'top80' });
const detail = ref({
items: [],
pagination: {
page: 1,
perPage: DEFAULT_PER_PAGE,
total: 0,
totalPages: 1,
},
});
const loading = reactive({
initial: true,
querying: false,
options: false,
list: false,
pareto: false,
});
const errorMessage = ref('');
const lastQueryAt = ref('');
const lastPolicyMeta = ref({
include_excluded_scrap: false,
exclusion_applied: false,
excluded_reason_count: 0,
});
let activeRequestId = 0;
function nextRequestId() {
activeRequestId += 1;
return activeRequestId;
}
function isStaleRequest(requestId) {
return requestId !== activeRequestId;
}
function toDateString(value) {
const y = value.getFullYear();
const m = String(value.getMonth() + 1).padStart(2, '0');
const d = String(value.getDate()).padStart(2, '0');
return `${y}-${m}-${d}`;
}
function setDefaultDateRange() {
const today = new Date();
const end = new Date(today);
end.setDate(end.getDate() - 1);
const start = new Date(end);
start.setDate(start.getDate() - 29);
filters.startDate = toDateString(start);
filters.endDate = toDateString(end);
}
function readArrayParam(params, key) {
const repeated = params.getAll(key).map((value) => String(value || '').trim()).filter(Boolean);
if (repeated.length > 0) {
return repeated;
}
return String(params.get(key) || '')
.split(',')
.map((value) => value.trim())
.filter(Boolean);
}
function readBooleanParam(params, key, defaultValue = false) {
const value = String(params.get(key) || '').trim().toLowerCase();
if (!value) {
return defaultValue;
}
return ['1', 'true', 'yes', 'y', 'on'].includes(value);
}
function restoreFromUrl() {
const params = new URLSearchParams(window.location.search);
const startDate = String(params.get('start_date') || '').trim();
const endDate = String(params.get('end_date') || '').trim();
if (startDate && endDate) {
filters.startDate = startDate;
filters.endDate = endDate;
}
const wcGroups = readArrayParam(params, 'workcenter_groups');
if (wcGroups.length > 0) {
filters.workcenterGroups = wcGroups;
}
const packages = readArrayParam(params, 'packages');
if (packages.length > 0) {
filters.packages = packages;
}
const reason = String(params.get('reason') || '').trim();
if (reason) {
filters.reason = reason;
}
const detailReasonFromUrl = String(params.get('detail_reason') || '').trim();
if (detailReasonFromUrl) {
detailReason.value = detailReasonFromUrl;
}
filters.includeExcludedScrap = readBooleanParam(params, 'include_excluded_scrap', false);
filters.excludeMaterialScrap = readBooleanParam(params, 'exclude_material_scrap', true);
filters.paretoTop80 = !readBooleanParam(params, 'pareto_scope_all', false);
const parsedPage = Number(params.get('page') || '1');
page.value = Number.isFinite(parsedPage) && parsedPage > 0 ? parsedPage : 1;
}
function updateUrlState() {
const params = new URLSearchParams();
params.set('start_date', filters.startDate);
params.set('end_date', filters.endDate);
filters.workcenterGroups.forEach((item) => params.append('workcenter_groups', item));
filters.packages.forEach((item) => params.append('packages', item));
if (filters.reason) {
params.set('reason', filters.reason);
}
if (detailReason.value) {
params.set('detail_reason', detailReason.value);
}
if (filters.includeExcludedScrap) {
params.set('include_excluded_scrap', 'true');
}
params.set('exclude_material_scrap', String(filters.excludeMaterialScrap));
if (!filters.paretoTop80) {
params.set('pareto_scope_all', 'true');
}
if (page.value > 1) {
params.set('page', String(page.value));
}
replaceRuntimeHistory(`/reject-history?${params.toString()}`);
}
function formatNumber(value) {
return Number(value || 0).toLocaleString('zh-TW');
}
function formatPct(value) {
return `${Number(value || 0).toFixed(2)}%`;
}
function unwrapApiResult(result, fallbackMessage) {
if (result?.success === true) {
return result;
}
if (result?.success === false) {
throw new Error(result.error || fallbackMessage);
}
return result;
}
function buildCommonParams({ reason = filters.reason } = {}) {
const params = {
start_date: filters.startDate,
end_date: filters.endDate,
workcenter_groups: filters.workcenterGroups,
packages: filters.packages,
include_excluded_scrap: filters.includeExcludedScrap,
exclude_material_scrap: filters.excludeMaterialScrap,
};
if (reason) {
params.reasons = [reason];
}
return params;
}
function buildParetoParams() {
return {
...buildCommonParams({ reason: filters.reason }),
metric_mode: 'reject_total',
pareto_scope: filters.paretoTop80 ? 'top80' : 'all',
};
}
function buildListParams() {
const effectiveReason = detailReason.value || filters.reason;
return {
...buildCommonParams({ reason: effectiveReason }),
page: page.value,
per_page: DEFAULT_PER_PAGE,
};
}
async function fetchOptions() {
const response = await apiGet('/api/reject-history/options', {
params: {
start_date: filters.startDate,
end_date: filters.endDate,
include_excluded_scrap: filters.includeExcludedScrap,
exclude_material_scrap: filters.excludeMaterialScrap,
},
timeout: API_TIMEOUT,
});
const payload = unwrapApiResult(response, '載入篩選選項失敗');
return payload.data || {};
}
async function fetchSummary() {
const response = await apiGet('/api/reject-history/summary', {
params: buildCommonParams(),
timeout: API_TIMEOUT,
});
const payload = unwrapApiResult(response, '載入摘要資料失敗');
return payload;
}
async function fetchTrend() {
const response = await apiGet('/api/reject-history/trend', {
params: {
...buildCommonParams(),
granularity: 'day',
},
timeout: API_TIMEOUT,
});
const payload = unwrapApiResult(response, '載入趨勢資料失敗');
return payload;
}
async function fetchPareto() {
const response = await apiGet('/api/reject-history/reason-pareto', {
params: buildParetoParams(),
timeout: API_TIMEOUT,
});
const payload = unwrapApiResult(response, '載入柏拉圖資料失敗');
return payload;
}
async function fetchList() {
const response = await apiGet('/api/reject-history/list', {
params: buildListParams(),
timeout: API_TIMEOUT,
});
const payload = unwrapApiResult(response, '載入明細資料失敗');
return payload;
}
function mergePolicyMeta(meta) {
lastPolicyMeta.value = {
include_excluded_scrap: Boolean(meta?.include_excluded_scrap),
exclusion_applied: Boolean(meta?.exclusion_applied),
excluded_reason_count: Number(meta?.excluded_reason_count || 0),
};
}
function normalizeFiltersByOptions() {
if (filters.reason && !options.reasons.includes(filters.reason)) {
filters.reason = '';
}
if (filters.packages.length > 0) {
const packageSet = new Set(options.packages);
filters.packages = filters.packages.filter((pkg) => packageSet.has(pkg));
}
}
async function loadAllData({ loadOptions = true } = {}) {
const requestId = nextRequestId();
loading.querying = true;
loading.list = true;
loading.pareto = true;
errorMessage.value = '';
try {
const tasks = [fetchSummary(), fetchTrend(), fetchPareto(), fetchList()];
if (loadOptions) {
loading.options = true;
tasks.push(fetchOptions());
}
const responses = await Promise.all(tasks);
if (isStaleRequest(requestId)) {
return;
}
const [summaryResp, trendResp, paretoResp, listResp, optionsResp] = responses;
summary.value = summaryResp.data || summary.value;
trend.value = trendResp.data || trend.value;
pareto.value = paretoResp.data || pareto.value;
detail.value = listResp.data || detail.value;
const meta = {
...(summaryResp.meta || {}),
...(trendResp.meta || {}),
...(paretoResp.meta || {}),
...(listResp.meta || {}),
};
mergePolicyMeta(meta);
if (loadOptions && optionsResp) {
options.workcenterGroups = Array.isArray(optionsResp.workcenter_groups)
? optionsResp.workcenter_groups
: [];
options.reasons = Array.isArray(optionsResp.reasons)
? optionsResp.reasons
: [];
options.packages = Array.isArray(optionsResp.packages)
? optionsResp.packages
: [];
normalizeFiltersByOptions();
}
lastQueryAt.value = new Date().toLocaleString('zh-TW');
updateUrlState();
} catch (error) {
if (isStaleRequest(requestId)) {
return;
}
errorMessage.value = error?.message || '載入資料失敗';
} finally {
if (isStaleRequest(requestId)) {
return;
}
loading.initial = false;
loading.querying = false;
loading.options = false;
loading.list = false;
loading.pareto = false;
}
}
async function loadListOnly() {
const requestId = nextRequestId();
loading.list = true;
errorMessage.value = '';
try {
const listResp = await fetchList();
if (isStaleRequest(requestId)) {
return;
}
detail.value = listResp.data || detail.value;
mergePolicyMeta(listResp.meta || {});
updateUrlState();
} catch (error) {
if (isStaleRequest(requestId)) {
return;
}
errorMessage.value = error?.message || '載入明細資料失敗';
} finally {
if (isStaleRequest(requestId)) {
return;
}
loading.list = false;
}
}
async function loadParetoOnly() {
const requestId = nextRequestId();
loading.pareto = true;
errorMessage.value = '';
try {
const paretoResp = await fetchPareto();
if (isStaleRequest(requestId)) {
return;
}
pareto.value = paretoResp.data || pareto.value;
mergePolicyMeta(paretoResp.meta || {});
updateUrlState();
} catch (error) {
if (isStaleRequest(requestId)) {
return;
}
errorMessage.value = error?.message || '載入柏拉圖資料失敗';
} finally {
if (isStaleRequest(requestId)) {
return;
}
loading.pareto = false;
}
}
function applyFilters() {
page.value = 1;
detailReason.value = '';
void loadAllData({ loadOptions: true });
}
function clearFilters() {
setDefaultDateRange();
filters.workcenterGroups = [];
filters.packages = [];
filters.reason = '';
detailReason.value = '';
filters.includeExcludedScrap = false;
filters.excludeMaterialScrap = true;
filters.paretoTop80 = true;
page.value = 1;
void loadAllData({ loadOptions: true });
}
function goToPage(nextPage) {
if (nextPage < 1 || nextPage > Number(detail.value?.pagination?.totalPages || 1)) {
return;
}
page.value = nextPage;
void loadListOnly();
}
function onParetoClick(reason) {
if (!reason) {
return;
}
detailReason.value = detailReason.value === reason ? '' : reason;
page.value = 1;
void loadListOnly();
}
function handleParetoScopeToggle(checked) {
filters.paretoTop80 = Boolean(checked);
void loadParetoOnly();
}
function removeFilterChip(chip) {
if (!chip?.removable) {
return;
}
if (chip.type === 'reason') {
filters.reason = '';
detailReason.value = '';
} else if (chip.type === 'workcenter') {
filters.workcenterGroups = filters.workcenterGroups.filter((item) => item !== chip.value);
} else if (chip.type === 'package') {
filters.packages = filters.packages.filter((item) => item !== chip.value);
} else if (chip.type === 'detail-reason') {
detailReason.value = '';
page.value = 1;
void loadListOnly();
return;
} else {
return;
}
page.value = 1;
void loadAllData({ loadOptions: false });
}
function exportCsv() {
const params = new URLSearchParams();
params.set('start_date', filters.startDate);
params.set('end_date', filters.endDate);
params.set('include_excluded_scrap', String(filters.includeExcludedScrap));
params.set('exclude_material_scrap', String(filters.excludeMaterialScrap));
filters.workcenterGroups.forEach((item) => params.append('workcenter_groups', item));
filters.packages.forEach((item) => params.append('packages', item));
const effectiveReason = detailReason.value || filters.reason;
if (effectiveReason) {
params.append('reasons', effectiveReason);
}
window.location.href = `/api/reject-history/export?${params.toString()}`;
}
const totalScrapQty = computed(() => {
return Number(summary.value.REJECT_TOTAL_QTY || 0) + Number(summary.value.DEFECT_QTY || 0);
});
const activeFilterChips = computed(() => {
const chips = [
{
key: 'date-range',
label: `日期: ${filters.startDate || '-'} ~ ${filters.endDate || '-'}`,
removable: false,
type: 'date',
value: '',
},
{
key: 'policy-mode',
label: filters.includeExcludedScrap ? '政策: 納入不計良率報廢' : '政策: 排除不計良率報廢',
removable: false,
type: 'policy',
value: '',
},
{
key: 'material-policy-mode',
label: filters.excludeMaterialScrap ? '原物料: 已排除' : '原物料: 已納入',
removable: false,
type: 'policy',
value: '',
},
];
if (filters.reason) {
chips.push({
key: `reason:${filters.reason}`,
label: `原因: ${filters.reason}`,
removable: true,
type: 'reason',
value: filters.reason,
});
}
if (detailReason.value) {
chips.push({
key: `detail-reason:${detailReason.value}`,
label: `明細原因: ${detailReason.value}`,
removable: true,
type: 'detail-reason',
value: detailReason.value,
});
}
filters.workcenterGroups.forEach((group) => {
chips.push({
key: `workcenter:${group}`,
label: `WC: ${group}`,
removable: true,
type: 'workcenter',
value: group,
});
});
filters.packages.forEach((pkg) => {
chips.push({
key: `package:${pkg}`,
label: `Package: ${pkg}`,
removable: true,
type: 'package',
value: pkg,
});
});
return chips;
});
const kpiCards = computed(() => {
return [
{ key: 'REJECT_TOTAL_QTY', label: '扣帳報廢量', value: summary.value.REJECT_TOTAL_QTY, lane: 'reject', isPct: false },
{ key: 'DEFECT_QTY', label: '不扣帳報廢量', value: summary.value.DEFECT_QTY, lane: 'defect', isPct: false },
{ key: 'TOTAL_SCRAP_QTY', label: '總報廢量', value: totalScrapQty.value, lane: 'neutral', isPct: false },
{ key: 'REJECT_SHARE_PCT', label: '扣帳占比', value: summary.value.REJECT_SHARE_PCT, lane: 'neutral', isPct: true },
{ key: 'AFFECTED_LOT_COUNT', label: '受影響 LOT', value: summary.value.AFFECTED_LOT_COUNT, lane: 'neutral', isPct: false },
{ key: 'AFFECTED_WORKORDER_COUNT', label: '受影響工單', value: summary.value.AFFECTED_WORKORDER_COUNT, lane: 'neutral', isPct: false },
];
});
const quantityChartOption = computed(() => {
const items = Array.isArray(trend.value?.items) ? trend.value.items : [];
return {
tooltip: {
trigger: 'axis',
axisPointer: { type: 'cross' },
},
legend: {
data: ['扣帳報廢量', '不扣帳報廢量'],
bottom: 0,
},
grid: { left: 48, right: 24, top: 22, bottom: 70 },
xAxis: {
type: 'category',
data: items.map((item) => item.bucket_date || ''),
},
yAxis: {
type: 'value',
axisLabel: {
formatter(value) {
return Number(value || 0).toLocaleString('zh-TW');
},
},
},
series: [
{
name: '扣帳報廢量',
type: 'bar',
data: items.map((item) => Number(item.REJECT_TOTAL_QTY || 0)),
itemStyle: { color: '#dc2626' },
barMaxWidth: 28,
},
{
name: '不扣帳報廢量',
type: 'bar',
data: items.map((item) => Number(item.DEFECT_QTY || 0)),
itemStyle: { color: '#0284c7' },
barMaxWidth: 28,
},
],
};
});
const paretoChartOption = computed(() => {
const items = Array.isArray(pareto.value?.items) ? pareto.value.items : [];
return {
tooltip: {
trigger: 'axis',
axisPointer: { type: 'cross' },
formatter(params) {
const idx = Number(params?.[0]?.dataIndex || 0);
const item = items[idx] || {};
return [
`<b>${item.reason || '(未填寫)'}</b>`,
`報廢量: ${formatNumber(item.metric_value || 0)}`,
`占比: ${Number(item.pct || 0).toFixed(2)}%`,
`累計: ${Number(item.cumPct || 0).toFixed(2)}%`,
].join('<br/>');
},
},
legend: {
data: ['報廢量', '累積%'],
bottom: 0,
},
grid: {
left: 52,
right: 52,
top: 20,
bottom: 96,
},
xAxis: {
type: 'category',
data: items.map((item) => item.reason || '(未填寫)'),
axisLabel: {
interval: 0,
rotate: items.length > 6 ? 35 : 0,
fontSize: 11,
overflow: 'truncate',
width: 100,
},
},
yAxis: [
{
type: 'value',
name: '量',
},
{
type: 'value',
name: '%',
min: 0,
max: 100,
axisLabel: { formatter: '{value}%' },
},
],
series: [
{
name: '報廢量',
type: 'bar',
data: items.map((item) => Number(item.metric_value || 0)),
barMaxWidth: 34,
itemStyle: {
color(params) {
const reason = items[params.dataIndex]?.reason || '';
return reason === detailReason.value ? '#b91c1c' : '#2563eb';
},
borderRadius: [4, 4, 0, 0],
},
},
{
name: '累積%',
type: 'line',
yAxisIndex: 1,
data: items.map((item) => Number(item.cumPct || 0)),
lineStyle: { color: '#f59e0b', width: 2 },
itemStyle: { color: '#f59e0b' },
symbolSize: 6,
},
],
};
});
function onParetoChartClick(params) {
if (params?.seriesType !== 'bar') {
return;
}
const selected = pareto.value?.items?.[params.dataIndex]?.reason;
onParetoClick(selected);
}
const pagination = computed(() => detail.value?.pagination || {
page: 1,
perPage: DEFAULT_PER_PAGE,
total: 0,
totalPages: 1,
});
const hasTrendData = computed(() => Array.isArray(trend.value?.items) && trend.value.items.length > 0);
const hasParetoData = computed(() => Array.isArray(pareto.value?.items) && pareto.value.items.length > 0);
onMounted(() => {
setDefaultDateRange();
restoreFromUrl();
void loadAllData({ loadOptions: true });
});
</script>
<template>
<div class="dashboard reject-history-page">
<header class="header reject-history-header">
<div class="header-left">
<h1>報廢歷史查詢</h1>
</div>
<div class="header-right">
<div class="last-update" v-if="lastQueryAt">更新時間{{ lastQueryAt }}</div>
</div>
</header>
<div v-if="errorMessage" class="error-banner">{{ errorMessage }}</div>
<section class="card">
<div class="card-header">
<div class="card-title">查詢條件</div>
</div>
<div class="card-body filter-panel">
<div class="filter-group">
<label class="filter-label" for="start-date">開始日期</label>
<input id="start-date" v-model="filters.startDate" type="date" class="filter-input" />
</div>
<div class="filter-group">
<label class="filter-label" for="end-date">結束日期</label>
<input id="end-date" v-model="filters.endDate" type="date" class="filter-input" />
</div>
<div class="filter-group">
<label class="filter-label">Package</label>
<MultiSelect
:model-value="filters.packages"
:options="options.packages"
placeholder="全部 Package"
searchable
@update:model-value="filters.packages = $event"
/>
</div>
<div class="filter-group filter-group-wide">
<label class="filter-label">WORKCENTER GROUP</label>
<MultiSelect
:model-value="filters.workcenterGroups"
:options="options.workcenterGroups"
placeholder="全部工作中心群組"
searchable
@update:model-value="filters.workcenterGroups = $event"
/>
</div>
<div class="filter-group filter-group-wide">
<label class="filter-label" for="reason">報廢原因</label>
<select id="reason" v-model="filters.reason" class="filter-input">
<option value="">全部原因</option>
<option v-for="reason in options.reasons" :key="reason" :value="reason">
{{ reason }}
</option>
</select>
</div>
<div class="filter-group filter-group-wide inline-toggle-group">
<div class="checkbox-row">
<label class="checkbox-pill">
<input v-model="filters.includeExcludedScrap" type="checkbox" />
納入不計良率報廢
</label>
<label class="checkbox-pill">
<input v-model="filters.excludeMaterialScrap" type="checkbox" />
排除原物料報廢
</label>
<label class="checkbox-pill">
<input
:checked="filters.paretoTop80"
type="checkbox"
@change="handleParetoScopeToggle($event.target.checked)"
/>
Pareto 僅顯示累計前 80%
</label>
</div>
</div>
<div class="filter-actions">
<button class="btn btn-primary" :disabled="loading.querying" @click="applyFilters">查詢</button>
<button class="btn btn-secondary" :disabled="loading.querying" @click="clearFilters">清除條件</button>
<button class="btn btn-light btn-export" :disabled="loading.querying" @click="exportCsv">匯出 CSV</button>
</div>
</div>
<div class="card-body active-filter-chip-row" v-if="activeFilterChips.length > 0">
<div class="filter-label">套用中篩選</div>
<div class="chip-list">
<div v-for="chip in activeFilterChips" :key="chip.key" class="filter-chip">
<span>{{ chip.label }}</span>
<button
v-if="chip.removable"
type="button"
class="chip-remove"
@click="removeFilterChip(chip)"
>
×
</button>
</div>
</div>
</div>
</section>
<section class="summary-row reject-summary-row">
<article
v-for="card in kpiCards"
:key="card.key"
class="summary-card"
:class="`lane-${card.lane}`"
>
<div class="summary-label">{{ card.label }}</div>
<div class="summary-value small">{{ card.isPct ? formatPct(card.value) : formatNumber(card.value) }}</div>
</article>
</section>
<section class="chart-grid">
<article class="card">
<div class="card-header"><div class="card-title">報廢量趨勢</div></div>
<div class="card-body chart-wrap">
<VChart :option="quantityChartOption" autoresize />
<div v-if="!hasTrendData && !loading.querying" class="placeholder chart-empty">No data</div>
</div>
</article>
</section>
<section class="card">
<div class="card-header pareto-header">
<div class="card-title">報廢量 vs 報廢原因Pareto</div>
</div>
<div class="card-body pareto-layout">
<div class="pareto-chart-wrap">
<VChart :option="paretoChartOption" autoresize @click="onParetoChartClick" />
<div v-if="!hasParetoData && !loading.pareto" class="placeholder chart-empty">No data</div>
</div>
<div class="pareto-table-wrap">
<table class="detail-table pareto-table">
<thead>
<tr>
<th>原因</th>
<th>報廢量</th>
<th>占比</th>
<th>累積</th>
</tr>
</thead>
<tbody>
<tr
v-for="item in pareto.items"
:key="item.reason"
:class="{ active: detailReason === item.reason }"
>
<td>
<button class="reason-link" type="button" @click="onParetoClick(item.reason)">
{{ item.reason }}
</button>
</td>
<td>{{ formatNumber(item.metric_value) }}</td>
<td>{{ formatPct(item.pct) }}</td>
<td>{{ formatPct(item.cumPct) }}</td>
</tr>
<tr v-if="!pareto.items || pareto.items.length === 0">
<td colspan="4" class="placeholder">No data</td>
</tr>
</tbody>
</table>
</div>
</div>
</section>
<section class="card">
<div class="card-header">
<div class="card-title">明細列表</div>
</div>
<div class="card-body detail-table-wrap">
<table class="detail-table">
<thead>
<tr>
<th>日期</th>
<th>WORKCENTER_GROUP</th>
<th>WORKCENTER</th>
<th>Package</th>
<th>原因</th>
<th>REJECT_TOTAL_QTY</th>
<th>DEFECT_QTY</th>
<th>REJECT_QTY</th>
<th>STANDBY_QTY</th>
<th>QTYTOPROCESS_QTY</th>
<th>INPROCESS_QTY</th>
<th>PROCESSED_QTY</th>
</tr>
</thead>
<tbody>
<tr v-for="row in detail.items" :key="`${row.TXN_DAY}-${row.WORKCENTERNAME}-${row.LOSSREASONNAME}`">
<td>{{ row.TXN_DAY }}</td>
<td>{{ row.WORKCENTER_GROUP }}</td>
<td>{{ row.WORKCENTERNAME }}</td>
<td>{{ row.PRODUCTLINENAME }}</td>
<td>{{ row.LOSSREASONNAME }}</td>
<td>{{ formatNumber(row.REJECT_TOTAL_QTY) }}</td>
<td>{{ formatNumber(row.DEFECT_QTY) }}</td>
<td>{{ formatNumber(row.REJECT_QTY) }}</td>
<td>{{ formatNumber(row.STANDBY_QTY) }}</td>
<td>{{ formatNumber(row.QTYTOPROCESS_QTY) }}</td>
<td>{{ formatNumber(row.INPROCESS_QTY) }}</td>
<td>{{ formatNumber(row.PROCESSED_QTY) }}</td>
</tr>
<tr v-if="!detail.items || detail.items.length === 0">
<td colspan="12" class="placeholder">No data</td>
</tr>
</tbody>
</table>
</div>
<div class="pagination">
<button :disabled="pagination.page <= 1 || loading.list" @click="goToPage(pagination.page - 1)">Prev</button>
<span class="page-info">
Page {{ pagination.page }} / {{ pagination.totalPages }} · Total {{ formatNumber(pagination.total) }}
</span>
<button :disabled="pagination.page >= pagination.totalPages || loading.list" @click="goToPage(pagination.page + 1)">Next</button>
</div>
</section>
</div>
</template>

View File

@@ -0,0 +1,12 @@
<!doctype html>
<html lang="zh-Hant">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>報廢歷史查詢</title>
</head>
<body>
<div id="app"></div>
<script type="module" src="./main.js"></script>
</body>
</html>

View File

@@ -0,0 +1,7 @@
import { createApp } from 'vue';
import App from './App.vue';
import '../wip-shared/styles.css';
import './style.css';
createApp(App).mount('#app');

View File

@@ -0,0 +1,418 @@
.reject-history-header {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
}
.card {
background: var(--card-bg);
border-radius: 10px;
box-shadow: var(--shadow);
overflow: hidden;
margin-bottom: 14px;
}
.card-header {
padding: 14px 18px;
border-bottom: 1px solid var(--border);
background: #f8fafc;
}
.card-title {
font-size: 15px;
font-weight: 700;
color: #0f172a;
}
.card-body {
padding: 14px 16px;
}
.error-banner {
margin-bottom: 14px;
padding: 10px 12px;
border-radius: 6px;
background: #fef2f2;
color: #991b1b;
font-size: 13px;
}
.filter-panel {
display: grid;
grid-template-columns: repeat(4, minmax(0, 1fr));
gap: 14px;
align-items: end;
}
.filter-group {
display: flex;
flex-direction: column;
gap: 8px;
}
.filter-group-wide {
grid-column: span 2;
}
.filter-label {
font-size: 12px;
font-weight: 700;
color: #475569;
}
.filter-input {
width: 100%;
padding: 8px 10px;
border: 1px solid var(--border);
border-radius: 8px;
font-size: 13px;
background: #fff;
}
.filter-input:focus {
outline: none;
border-color: #0ea5e9;
box-shadow: 0 0 0 2px rgba(14, 165, 233, 0.18);
}
.filter-group .multi-select {
width: 100%;
min-width: 0;
}
.inline-toggle-group {
align-self: center;
}
.checkbox-row {
display: inline-flex;
align-items: center;
flex-wrap: wrap;
gap: 10px;
}
.checkbox-pill {
display: inline-flex;
align-items: center;
gap: 6px;
border: 1px solid var(--border);
border-radius: 999px;
padding: 6px 10px;
font-size: 13px;
color: #334155;
background: #f8fafc;
}
.checkbox-pill input[type='checkbox'] {
margin: 0;
width: 14px;
height: 14px;
accent-color: #2563eb;
}
.filter-actions {
display: flex;
gap: 10px;
justify-content: flex-end;
grid-column: span 2;
}
.active-filter-chip-row {
display: flex;
flex-direction: column;
gap: 8px;
border-top: 1px solid var(--border);
}
.chip-list {
display: flex;
flex-wrap: wrap;
gap: 8px;
}
.filter-chip {
display: inline-flex;
align-items: center;
gap: 8px;
padding: 5px 10px;
border-radius: 999px;
border: 1px solid #cbd5e1;
background: #f8fafc;
font-size: 12px;
color: #334155;
}
.chip-remove {
border: 0;
background: transparent;
color: #475569;
cursor: pointer;
font-size: 14px;
line-height: 1;
}
.btn-export {
background: #0f766e;
color: #fff;
}
.btn-export:hover {
background: #0b5e59;
}
.reject-summary-row {
grid-template-columns: repeat(6, minmax(0, 1fr));
}
.lane-reject {
border-top: 3px solid #dc2626;
}
.lane-defect {
border-top: 3px solid #0284c7;
}
.lane-neutral {
border-top: 3px solid #64748b;
}
.chart-grid {
display: grid;
grid-template-columns: minmax(0, 1fr);
gap: 14px;
margin-bottom: 14px;
}
.chart-wrap,
.pareto-chart-wrap {
height: 340px;
position: relative;
}
.pareto-header {
display: flex;
justify-content: space-between;
align-items: center;
gap: 12px;
}
.pareto-layout {
display: grid;
grid-template-columns: minmax(0, 1.1fr) minmax(0, 0.9fr);
gap: 12px;
}
.pareto-table-wrap {
overflow: auto;
}
.detail-table {
width: 100%;
border-collapse: collapse;
font-size: 12px;
}
.detail-table th,
.detail-table td {
border-bottom: 1px solid var(--border);
padding: 8px 10px;
text-align: left;
vertical-align: middle;
white-space: nowrap;
}
.detail-table thead th {
position: sticky;
top: 0;
background: #f8fafc;
z-index: 1;
}
.reason-link {
border: none;
background: transparent;
color: #1d4ed8;
cursor: pointer;
text-decoration: underline;
font-size: 12px;
}
.pareto-table tbody tr.active {
background: #eff6ff;
}
.detail-table-wrap {
overflow: auto;
}
/* ---- MultiSelect component styles (shared-ui compatible) ---- */
.multi-select {
position: relative;
min-width: 160px;
}
.multi-select-trigger {
width: 100%;
display: flex;
align-items: center;
justify-content: space-between;
gap: 8px;
border: 1px solid var(--border);
border-radius: 8px;
padding: 8px 10px;
font-size: 13px;
color: #1f2937;
background: #ffffff;
cursor: pointer;
}
.multi-select-trigger:disabled {
cursor: not-allowed;
opacity: 0.7;
}
.multi-select-text {
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
text-align: left;
}
.multi-select-arrow {
color: #64748b;
font-size: 11px;
}
.multi-select-dropdown {
position: absolute;
top: calc(100% + 4px);
left: 0;
right: 0;
z-index: 20;
border: 1px solid var(--border);
border-radius: 8px;
background: #ffffff;
box-shadow: 0 12px 24px rgba(15, 23, 42, 0.14);
overflow: hidden;
}
.multi-select-search {
display: block;
width: 100%;
border: none;
border-bottom: 1px solid var(--border);
padding: 8px 12px;
font-size: 13px;
color: #1f2937;
outline: none;
background: #f8fafc;
}
.multi-select-search::placeholder {
color: #94a3b8;
}
.multi-select-options {
max-height: 250px;
overflow-y: auto;
padding: 8px 0;
}
.multi-select-option {
display: flex;
align-items: center;
gap: 8px;
width: 100%;
padding: 6px 12px;
border: none;
background: transparent;
font-size: 13px;
color: #334155;
cursor: pointer;
text-align: left;
}
.multi-select-option:hover {
background: #f8fafc;
}
.multi-select-option input[type='checkbox'] {
margin: 0;
width: 14px;
height: 14px;
accent-color: #2563eb;
}
.multi-select-empty {
padding: 12px;
text-align: center;
color: #94a3b8;
font-size: 13px;
}
.multi-select-actions {
display: flex;
gap: 8px;
padding: 8px 10px;
border-top: 1px solid var(--border);
background: #f8fafc;
}
.btn-sm {
padding: 4px 10px;
border: 1px solid var(--border);
border-radius: 6px;
background: #f8fafc;
color: var(--text);
cursor: pointer;
font-size: 12px;
}
.btn-sm:hover {
border-color: #c2d0e0;
background: #eef4fb;
}
@media (max-width: 1400px) {
.reject-summary-row {
grid-template-columns: repeat(3, minmax(0, 1fr));
}
}
@media (max-width: 1180px) {
.filter-panel {
grid-template-columns: repeat(2, minmax(0, 1fr));
}
.filter-group-wide {
grid-column: span 2;
}
.filter-actions {
grid-column: span 2;
justify-content: flex-start;
}
.pareto-layout {
grid-template-columns: 1fr;
}
}
@media (max-width: 760px) {
.reject-summary-row {
grid-template-columns: repeat(2, minmax(0, 1fr));
}
.filter-panel {
grid-template-columns: 1fr;
}
.filter-group-wide,
.filter-actions {
grid-column: span 1;
}
.checkbox-row {
flex-direction: column;
align-items: flex-start;
}
}

View File

@@ -19,6 +19,7 @@ export default defineConfig(({ mode }) => ({
'hold-detail': resolve(__dirname, 'src/hold-detail/index.html'), 'hold-detail': resolve(__dirname, 'src/hold-detail/index.html'),
'hold-overview': resolve(__dirname, 'src/hold-overview/index.html'), 'hold-overview': resolve(__dirname, 'src/hold-overview/index.html'),
'hold-history': resolve(__dirname, 'src/hold-history/index.html'), 'hold-history': resolve(__dirname, 'src/hold-history/index.html'),
'reject-history': resolve(__dirname, 'src/reject-history/index.html'),
'resource-status': resolve(__dirname, 'src/resource-status/index.html'), 'resource-status': resolve(__dirname, 'src/resource-status/index.html'),
'resource-history': resolve(__dirname, 'src/resource-history/index.html'), 'resource-history': resolve(__dirname, 'src/resource-history/index.html'),
'job-query': resolve(__dirname, 'src/job-query/main.js'), 'job-query': resolve(__dirname, 'src/job-query/main.js'),

View File

@@ -1,6 +1,6 @@
## Context ## Context
目前 `query-tool` 僅提供單點查詢 reject 資訊,沒有針對歷史趨勢、原因分布與績效指標的完整頁面。`DW_MES_LOTREJECTHISTORY` 存在同一 `HISTORYMAINLINEID` 對應多筆原因紀錄的特性,直接彙總 `MOVEINQTY` 會造成分母膨脹讓報廢率失真。另一方面現有語意中「reject 五欄合計」與 `DEFECTQTY` 曾被混用,導致跨頁面解讀不一致。 目前 `query-tool` 僅提供單點查詢 reject 資訊,沒有針對歷史趨勢、原因分布與績效指標的完整頁面。`DW_MES_LOTREJECTHISTORY` 存在同一 `HISTORYMAINLINEID` 對應多筆原因紀錄的特性,直接彙總 `MOVEINQTY` 會造成分母膨脹讓報廢率失真。另一方面現有語意中「reject 五欄合計」與 `DEFECTQTY` 曾被混用,導致跨頁面解讀不一致。新增可用資料表 `ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE` 也帶來新政策需求:`ENABLE_FLAG='Y'` 的報廢原因應預設不納入良率計算,並允許使用者切換是否納入。
此變更需要跨越前端(新報表頁)、後端(新 API + service + SQL、與治理層route contract、drawer/page registry、coverage test屬於跨模組整合型設計。 此變更需要跨越前端(新報表頁)、後端(新 API + service + SQL、與治理層route contract、drawer/page registry、coverage test屬於跨模組整合型設計。
@@ -11,6 +11,7 @@
- 固化兩條指標語義並列: - 固化兩條指標語義並列:
- 扣帳報廢 `REJECT_TOTAL_QTY` - 扣帳報廢 `REJECT_TOTAL_QTY`
- 不扣帳報廢 `DEFECT_QTY` - 不扣帳報廢 `DEFECT_QTY`
- 將 `ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE``ENABLE_FLAG='Y'`)納入良率排除政策,且提供 UI/API 可切換納入模式。
- 以事件層級去重 `MOVEIN_QTY`,避免因 `HISTORYMAINLINEID` 多筆造成比率失真。 - 以事件層級去重 `MOVEIN_QTY`,避免因 `HISTORYMAINLINEID` 多筆造成比率失真。
- 完整納入現有 pure Vite + portal-shell + route contract 治理流程。 - 完整納入現有 pure Vite + portal-shell + route contract 治理流程。
- 在視覺上清楚區分 reject 與 defect避免操作端誤判。 - 在視覺上清楚區分 reject 與 defect避免操作端誤判。
@@ -63,12 +64,18 @@
**Visual structure:** **Visual structure:**
- Header漸層標題區顯示頁名、資料更新時間、語義說明 badge扣帳/不扣帳)。 - Header漸層標題區顯示頁名、資料更新時間、語義說明 badge扣帳/不扣帳)。
- Filter Card日期區間 + 維度篩選(站群/站點/產品線/原因),含查詢清除 - Filter Card至少提供三個核心篩選器並含查詢/清除動作:
- 時間篩選(`start_date`/`end_date`
- 原因篩選(`LOSSREASONNAME`
- `WORKCENTER_GROUP` 篩選(沿用既有頁面篩選體驗與資料來源)
- KPI Row8 卡):`MOVEIN_QTY``REJECT_TOTAL_QTY``DEFECT_QTY`、兩種 rate、`REJECT_SHARE_PCT`、受影響 lot/workorder。 - KPI Row8 卡):`MOVEIN_QTY``REJECT_TOTAL_QTY``DEFECT_QTY`、兩種 rate、`REJECT_SHARE_PCT`、受影響 lot/workorder。
- Trend Row - Trend Row
- 左圖:`REJECT_TOTAL_QTY` vs `DEFECT_QTY`(量) - 左圖:`REJECT_TOTAL_QTY` vs `DEFECT_QTY`(量)
- 右圖:`REJECT_RATE_PCT` vs `DEFECT_RATE_PCT`(率) - 右圖:`REJECT_RATE_PCT` vs `DEFECT_RATE_PCT`(率)
- Pareto + Detail原因 Pareto支援 metric mode 切換)與可分頁明細表。 - Pareto + Detail報廢量 vs 報廢原因 Pareto支援 metric mode與可分頁明細表。
- Pareto 預設啟用「僅顯示累計前 80%」模式(以目前篩選後資料集計算)。
- 提供開關切換完整 Pareto關閉前 80% 模式時,顯示篩選後的全部原因。
- 視覺與互動可參考 `WIP OVER VIEW` 既有 Pareto 呈現方式,保持使用者認知一致。
**Visual semantics:** **Visual semantics:**
- Reject扣帳使用暖色語義紅/橘系) - Reject扣帳使用暖色語義紅/橘系)
@@ -96,21 +103,60 @@
- 報表是對外分析依據,語義清晰優先於短期縮寫便利。 - 報表是對外分析依據,語義清晰優先於短期縮寫便利。
- 與 field-name-consistency 治理要求一致。 - 與 field-name-consistency 治理要求一致。
### D7: 納入「不計良率報廢」政策並提供可切換模式
**Decision:** 以 `ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE``ENABLE_FLAG='Y'` 為政策清單,預設排除該類報廢於良率相關計算,並新增 `include_excluded_scrap` 參數(預設 `false`)讓使用者可選擇納入。
**Policy scope:**
- 影響 summary/trend/reason-pareto/list/export 的同一套查詢語義。
- 預設模式下,標記為排除的報廢原因不進入良率計算;切換納入後,回到完整資料集計算。
- API 回應 `meta` 顯示目前是否啟用排除政策,前端在 filter 區顯示「納入不計良率報廢」切換開關。
**Why:**
- 業務規則已明示 `ENABLE_FLAG='Y'` 代表不納入良率計算,應在報表層落地。
- 提供切換能兼顧日常看板(排除模式)與稽核追查(納入模式)。
**Alternatives considered:**
- 永久硬排除:簡單但不利追溯與跨單位對帳。
- 完全不排除:違反既有良率定義。
### D8: 排除清單採「L2 Redis + L1 記憶體」每日全表快取
**Decision:** 新增 `scrap_reason_exclusion_cache` 模組採現有快取分層模式Redis 作為跨 worker 共用快取L2process memory 作為快速讀取層L1每日全表刷新一次啟動時先載入。
**Refresh policy:**
- 啟動時進行首次載入。
- 每 24 小時刷新一次(可由環境變數覆寫)。
- Redis 不可用時,自動退化為 in-memory 快取,並在健康檢查/日誌揭露降級狀態。
**Why:**
- 表筆數小(目前約 36 筆),適合全表快取,不需每次 query join DB。
- 共享式 Redis 可避免多 gunicorn worker 間資料不一致。
- 延續專案既有快取策略,降低維運認知成本。
**Alternatives considered:**
- 僅記憶體快取:實作最簡單,但多 worker 會各自持有版本。
- 每次即時查表:邏輯單純,但額外增加 Oracle 往返成本。
## Risks / Trade-offs ## Risks / Trade-offs
- **[基底 SQL 單一來源造成查詢負載偏高]** → 先以日期與維度條件收斂、list/export 加 rate limit必要時再追加快取或物化。 - **[基底 SQL 單一來源造成查詢負載偏高]** → 先以日期與維度條件收斂、list/export 加 rate limit必要時再追加快取或物化。
- **[使用者沿用舊語意理解 defect]** → UI 顯示語義說明 badge + tooltip匯出欄位採顯式命名。 - **[使用者沿用舊語意理解 defect]** → UI 顯示語義說明 badge + tooltip匯出欄位採顯式命名。
- **[Pareto 指標切換造成理解成本]** → 預設以 `REJECT_TOTAL_QTY` 顯示,並保留清楚的 toggle label。 - **[Pareto 指標切換造成理解成本]** → 預設以 `REJECT_TOTAL_QTY` 顯示,並保留清楚的 toggle label。
- **[報廢原因對應鍵格式不一致]** → 在 service 層加入 reason normalization 規則trim/大小寫一致化,必要時切取代碼前綴),並在測試覆蓋。
- **[排除政策切換導致跨報表數值差異爭議]** → API/前端都回傳並顯示 `include_excluded_scrap` 狀態與政策提示文字。
- **[Redis 不可用導致快取行為不一致]** → 採 L1 fallback並透過 health/admin 狀態揭露快取降級。
- **[路由治理漏登記導致 shell 無法導航]** → contract parity test + page_status 驗證列為必做任務。 - **[路由治理漏登記導致 shell 無法導航]** → contract parity test + page_status 驗證列為必做任務。
- **[明細資料量大造成前端卡頓]** → 後端分頁、預設 `per_page=50`,並避免一次性全量載入。 - **[明細資料量大造成前端卡頓]** → 後端分頁、預設 `per_page=50`,並避免一次性全量載入。
## Migration Plan ## Migration Plan
1. 建立後端 SQL/service/routes先讓 API 可單獨驗證)。 1. 建立後端 SQL/service/routes先讓 API 可單獨驗證)。
2. 建立前端 reject-history 頁面與元件(先接 summary/trend再接 pareto/list/export)。 2. 建立 `scrap_reason_exclusion_cache`(全表快取 + 每日刷新 + fallback)。
3. 整合 shell 治理資產:`routeContracts``nativeModuleRegistry``page_status`、Flask page route 3. 建立前端 reject-history 頁面與元件(先接 summary/trend再接 pareto/list/export
4. 補齊測試service、routes、route-contract parity、前端 smoke。 4. 整合 shell 治理資產:`routeContracts``nativeModuleRegistry``page_status`、Flask page route。
5. 先以 `dev` 狀態上線到抽屜,完成 UAT 後調整為 `released` 5. 補齊測試service、routes、cache、route-contract parity、前端 smoke
6. 先以 `dev` 狀態上線到抽屜,完成 UAT 後調整為 `released`
**Rollback strategy:** **Rollback strategy:**
- 將 `/reject-history` 從 page registry 標記為隱藏或 `dev` 並停用導航入口。 - 將 `/reject-history` 從 page registry 標記為隱藏或 `dev` 並停用導航入口。

View File

@@ -1,6 +1,6 @@
## Why ## Why
目前專案僅在 `query-tool` 提供偏即時/點查型的報廢資訊,缺少可追蹤趨勢與績效的「報廢歷史」專用報表。資料評估也顯示 `DW_MES_LOTREJECTHISTORY` 同一 `HISTORYMAINLINEID` 會對應多筆原因紀錄,若直接加總 `MOVEINQTY` 會造成分母重複、報廢率失真;同時既有查詢對 reject/defect 命名語義不一致,容易誤解指標。現在應在既有 portal-shell + Vite + route contract 架構下,建立一個語義明確且可治理的歷史報表頁。 目前專案僅在 `query-tool` 提供偏即時/點查型的報廢資訊,缺少可追蹤趨勢與績效的「報廢歷史」專用報表。資料評估也顯示 `DW_MES_LOTREJECTHISTORY` 同一 `HISTORYMAINLINEID` 會對應多筆原因紀錄,若直接加總 `MOVEINQTY` 會造成分母重複、報廢率失真;同時既有查詢對 reject/defect 命名語義不一致,容易誤解指標。另 IT 新開放 `ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE`,其中 `ENABLE_FLAG='Y'` 代表「不納入良率計算」報廢原因,提案需把此政策納入計算流程。現在應在既有 portal-shell + Vite + route contract 架構下,建立一個語義明確且可治理的歷史報表頁。
## What Changes ## What Changes
@@ -10,6 +10,10 @@
- 扣帳報廢:`REJECT_TOTAL_QTY = REJECTQTY + STANDBYQTY + QTYTOPROCESS + INPROCESSQTY + PROCESSEDQTY` - 扣帳報廢:`REJECT_TOTAL_QTY = REJECTQTY + STANDBYQTY + QTYTOPROCESS + INPROCESSQTY + PROCESSEDQTY`
- 不扣帳報廢:`DEFECT_QTY = DEFECTQTY` - 不扣帳報廢:`DEFECT_QTY = DEFECTQTY`
- 以事件層級去重規則處理分母(`MOVEIN_QTY``HISTORYMAINLINEID` 為主鍵去重),避免多原因拆單導致比率失真。 - 以事件層級去重規則處理分母(`MOVEIN_QTY``HISTORYMAINLINEID` 為主鍵去重),避免多原因拆單導致比率失真。
- 納入 `ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE` 政策(`ENABLE_FLAG='Y'`
- 預設排除「不納入良率計算」報廢原因
- 提供可選開關讓使用者決定是否納入該類報廢
- 新增排除清單全表快取(每日一次)機制,採共享快取優先策略,降低每次查詢重複讀表成本。
- 明確定義 UI/API/匯出欄位語義避免沿用「defect=五欄合計」這類歷史命名混淆,確保報表對外語意一致。 - 明確定義 UI/API/匯出欄位語義避免沿用「defect=五欄合計」這類歷史命名混淆,確保報表對外語意一致。
- 不變更既有 `query-tool` 現有頁面行為與既有 API 回應欄位(此變更先聚焦新頁能力)。 - 不變更既有 `query-tool` 現有頁面行為與既有 API 回應欄位(此變更先聚焦新頁能力)。
@@ -19,6 +23,7 @@
- `reject-history-page`: 新增報廢歷史查詢頁面提供篩選、KPI、趨勢、原因分析、明細查詢與匯出。 - `reject-history-page`: 新增報廢歷史查詢頁面提供篩選、KPI、趨勢、原因分析、明細查詢與匯出。
- `reject-history-api`: 新增報廢歷史 API 能力與資料聚合邏輯支援報表層的摘要、趨勢、Pareto、明細資料來源。 - `reject-history-api`: 新增報廢歷史 API 能力與資料聚合邏輯支援報表層的摘要、趨勢、Pareto、明細資料來源。
- `reject-metric-semantics`: 新增 reject/defect 指標語義規範,要求五個 reject 欄位合計與 `DEFECTQTY` 必須分開呈現、分開計算、分開命名。 - `reject-metric-semantics`: 新增 reject/defect 指標語義規範,要求五個 reject 欄位合計與 `DEFECTQTY` 必須分開呈現、分開計算、分開命名。
- `reject-yield-exclusion-policy`: 新增「不納入良率計算」政策能力,依 `ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE.ENABLE_FLAG='Y'` 控制預設排除,並支援使用者切換是否納入。
### Modified Capabilities ### Modified Capabilities
- `unified-shell-route-coverage`: 新增 `/reject-history` 後,路由契約清單與前後端契約對照規則需同步更新。 - `unified-shell-route-coverage`: 新增 `/reject-history` 後,路由契約清單與前後端契約對照規則需同步更新。
@@ -35,6 +40,7 @@
- 後端: - 後端:
- 新增 `src/mes_dashboard/routes/reject_history_routes.py` - 新增 `src/mes_dashboard/routes/reject_history_routes.py`
- 新增 `src/mes_dashboard/services/reject_history_service.py` - 新增 `src/mes_dashboard/services/reject_history_service.py`
- 新增 `src/mes_dashboard/services/scrap_reason_exclusion_cache.py`
- 新增 `src/mes_dashboard/sql/reject_history/*.sql` - 新增 `src/mes_dashboard/sql/reject_history/*.sql`
- 更新 `src/mes_dashboard/routes/__init__.py` - 更新 `src/mes_dashboard/routes/__init__.py`
- 更新 `src/mes_dashboard/app.py``/reject-history` 靜態頁 route - 更新 `src/mes_dashboard/app.py``/reject-history` 靜態頁 route
@@ -44,9 +50,11 @@
- 測試: - 測試:
- 新增 `tests/test_reject_history_service.py` - 新增 `tests/test_reject_history_service.py`
- 新增 `tests/test_reject_history_routes.py` - 新增 `tests/test_reject_history_routes.py`
- 新增 `tests/test_scrap_reason_exclusion_cache.py`
- 補充 route coverage / contract parity / e2e smoke - 補充 route coverage / contract parity / e2e smoke
- 資料語義: - 資料語義:
- 報表需同時呈現 `REJECT_TOTAL_QTY`(扣帳報廢)與 `DEFECT_QTY`(不扣帳報廢) - 報表需同時呈現 `REJECT_TOTAL_QTY`(扣帳報廢)與 `DEFECT_QTY`(不扣帳報廢)
- `ENABLE_FLAG='Y'` 報廢原因預設不納入良率計算,且可由使用者選擇改為納入
- 不以單一欄位混用兩種語義,避免誤判製程損失 - 不以單一欄位混用兩種語義,避免誤判製程損失
- 依賴: - 依賴:
- 不新增第三方套件,沿用現有 Flask + Vue + Vite + SQLLoader + QueryBuilder 架構 - 不新增第三方套件,沿用現有 Flask + Vue + Vite + SQLLoader + QueryBuilder 架構

View File

@@ -19,6 +19,23 @@ The API SHALL provide aggregated summary metrics for the selected filter context
- **THEN** response SHALL be `{ success: true, data: { ... } }` - **THEN** response SHALL be `{ success: true, data: { ... } }`
- **THEN** data SHALL include `MOVEIN_QTY`, `REJECT_TOTAL_QTY`, `DEFECT_QTY`, `REJECT_RATE_PCT`, `DEFECT_RATE_PCT`, `REJECT_SHARE_PCT`, `AFFECTED_LOT_COUNT`, and `AFFECTED_WORKORDER_COUNT` - **THEN** data SHALL include `MOVEIN_QTY`, `REJECT_TOTAL_QTY`, `DEFECT_QTY`, `REJECT_RATE_PCT`, `DEFECT_RATE_PCT`, `REJECT_SHARE_PCT`, `AFFECTED_LOT_COUNT`, and `AFFECTED_WORKORDER_COUNT`
### Requirement: Reject History API SHALL support yield-exclusion policy toggle
The API SHALL support excluding or including policy-marked scrap reasons through a shared query parameter.
#### Scenario: Default policy mode
- **WHEN** reject-history endpoints are called without `include_excluded_scrap`
- **THEN** `include_excluded_scrap` SHALL default to `false`
- **THEN** rows mapped to `ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE.ENABLE_FLAG='Y'` SHALL be excluded from yield-related calculations
#### Scenario: Explicitly include policy-marked scrap
- **WHEN** `include_excluded_scrap=true` is provided
- **THEN** policy-marked rows SHALL be included in summary/trend/pareto/list/export calculations
- **THEN** API response `meta` SHALL include the effective `include_excluded_scrap` value
#### Scenario: Invalid toggle value
- **WHEN** `include_excluded_scrap` is not parseable as boolean
- **THEN** the API SHALL return HTTP 400 with a descriptive validation error
### Requirement: Reject History API SHALL provide trend endpoint ### Requirement: Reject History API SHALL provide trend endpoint
The API SHALL return time-series trend data for quantity and rate metrics. The API SHALL return time-series trend data for quantity and rate metrics.
@@ -76,6 +93,18 @@ The service SHALL load SQL from dedicated files under `src/mes_dashboard/sql/rej
- **THEN** user-supplied filters SHALL be passed through bind parameters - **THEN** user-supplied filters SHALL be passed through bind parameters
- **THEN** user input SHALL NOT be interpolated into SQL strings directly - **THEN** user input SHALL NOT be interpolated into SQL strings directly
### Requirement: Reject History API SHALL use cached exclusion-policy source
The API SHALL read exclusion-policy reasons from cached `ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE` data instead of querying Oracle on every request.
#### Scenario: Enabled exclusions only
- **WHEN** exclusion-policy data is loaded
- **THEN** only rows with `ENABLE_FLAG='Y'` SHALL be treated as active exclusions
#### Scenario: Daily full-table cache refresh
- **WHEN** exclusion cache is initialized
- **THEN** the full table SHALL be loaded and refreshed at least once per 24 hours
- **THEN** Redis SHOULD be used as shared cache when available, with in-memory fallback when unavailable
### Requirement: Reject History API SHALL apply rate limiting on expensive endpoints ### Requirement: Reject History API SHALL apply rate limiting on expensive endpoints
The API SHALL rate-limit high-cost endpoints to protect Oracle and application resources. The API SHALL rate-limit high-cost endpoints to protect Oracle and application resources.

View File

@@ -14,6 +14,29 @@ The page SHALL provide a filter area for date range and major production dimensi
- **WHEN** user clicks "清除條件" - **WHEN** user clicks "清除條件"
- **THEN** all filters SHALL reset to defaults and all sections SHALL reload - **THEN** all filters SHALL reset to defaults and all sections SHALL reload
#### Scenario: Required core filters are present
- **WHEN** the filter panel is rendered
- **THEN** it SHALL include `start_date/end_date` time filter controls
- **THEN** it SHALL include reason filter control
- **THEN** it SHALL include `WORKCENTER_GROUP` filter control
### Requirement: Reject History page SHALL expose yield-exclusion toggle control
The page SHALL let users decide whether to include policy-marked scrap in yield calculations.
#### Scenario: Default toggle state
- **WHEN** the page is first loaded
- **THEN** "納入不計良率報廢" toggle SHALL default to OFF
- **THEN** requests SHALL be sent with `include_excluded_scrap=false`
#### Scenario: Toggle affects all sections
- **WHEN** user turns ON/OFF the toggle and clicks "查詢"
- **THEN** summary, trend, pareto, and list sections SHALL reload under the selected policy mode
- **THEN** export action SHALL use the same toggle state
#### Scenario: Policy status visibility
- **WHEN** data is rendered
- **THEN** the UI SHALL show a clear badge/text indicating whether policy-marked scrap is currently excluded or included
### Requirement: Reject History page SHALL present KPI cards with split reject/defect semantics ### Requirement: Reject History page SHALL present KPI cards with split reject/defect semantics
The page SHALL display KPI cards that simultaneously show charge-off reject and non-charge-off defect metrics. The page SHALL display KPI cards that simultaneously show charge-off reject and non-charge-off defect metrics.
@@ -49,6 +72,16 @@ The page SHALL provide a Pareto view for loss reasons and support downstream fil
- **THEN** items SHALL be sorted by selected metric descending - **THEN** items SHALL be sorted by selected metric descending
- **THEN** a cumulative percentage line SHALL be shown - **THEN** a cumulative percentage line SHALL be shown
#### Scenario: Default 80% cumulative display mode
- **WHEN** the page first loads Pareto
- **THEN** it SHALL default to "only cumulative top 80%" mode
- **THEN** Pareto SHALL only render categories within the cumulative 80% threshold under current filters
#### Scenario: Full Pareto toggle mode
- **WHEN** user turns OFF the 80% cumulative display mode
- **THEN** Pareto SHALL render all categories after applying current filters
- **THEN** switching mode SHALL NOT reset existing time/reason/workcenter-group filters
#### Scenario: Pareto click filtering #### Scenario: Pareto click filtering
- **WHEN** user clicks a Pareto bar or row - **WHEN** user clicks a Pareto bar or row
- **THEN** the selected reason SHALL become an active filter chip - **THEN** the selected reason SHALL become an active filter chip

View File

@@ -16,6 +16,21 @@ The system SHALL compute `DEFECT_QTY` only from `DEFECTQTY` and SHALL NOT merge
- **THEN** `DEFECT_QTY` SHALL be non-zero - **THEN** `DEFECT_QTY` SHALL be non-zero
- **THEN** `REJECT_TOTAL_QTY` SHALL remain 0 - **THEN** `REJECT_TOTAL_QTY` SHALL remain 0
### Requirement: Yield-exclusion policy SHALL follow ERP exclusion table
The system SHALL use `ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE` as the policy source for "not included in yield" scrap reasons.
#### Scenario: Enabled policy rows
- **WHEN** exclusion policy is evaluated
- **THEN** only rows with `ENABLE_FLAG='Y'` SHALL be considered exclusion rules
#### Scenario: Default exclusion behavior
- **WHEN** `include_excluded_scrap=false` (default)
- **THEN** source rows matching enabled exclusion reasons SHALL be excluded before computing yield-related metrics
#### Scenario: Optional inclusion override
- **WHEN** `include_excluded_scrap=true`
- **THEN** the same matched rows SHALL be included back into metric calculations
### Requirement: Move-in denominator SHALL be deduplicated at event level ### Requirement: Move-in denominator SHALL be deduplicated at event level
The system SHALL deduplicate `MOVEIN_QTY` by event key before rate calculations. The system SHALL deduplicate `MOVEIN_QTY` by event key before rate calculations.

View File

@@ -0,0 +1,60 @@
## 1. Contract and Skeleton Setup
- [x] 1.1 Create backend blueprint scaffold `src/mes_dashboard/routes/reject_history_routes.py` and register it in `src/mes_dashboard/routes/__init__.py`
- [x] 1.2 Create service scaffold `src/mes_dashboard/services/reject_history_service.py` with SQL loader helpers
- [x] 1.3 Create frontend entry scaffold `frontend/src/reject-history/index.html`, `frontend/src/reject-history/main.js`, and `frontend/src/reject-history/App.vue`
- [x] 1.4 Add Vite input for `reject-history` in `frontend/vite.config.js`
## 2. SQL and Metric Semantics Implementation
- [x] 2.1 Finalize base query `src/mes_dashboard/sql/reject_history/performance_daily.sql` for five-reject-sum + defect separation
- [x] 2.2 Add API-specific SQL files in `src/mes_dashboard/sql/reject_history/` (summary, trend, reason_pareto, list, export)
- [x] 2.3 Implement `MOVEIN_QTY` dedupe by `HISTORYMAINLINEID` with deterministic fallback key
- [x] 2.4 Implement consistent rate calculations (`REJECT_RATE_PCT`, `DEFECT_RATE_PCT`, `REJECT_SHARE_PCT`) with zero-denominator handling
- [x] 2.5 Integrate `ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE` policy mapping (`ENABLE_FLAG='Y'`) into reject-history aggregation flow
- [x] 2.6 Create `src/mes_dashboard/services/scrap_reason_exclusion_cache.py` with daily full-table refresh (Redis preferred + in-memory fallback)
## 3. Backend API Routes
- [x] 3.1 Implement `GET /api/reject-history/summary` with date/filter validation
- [x] 3.2 Implement `GET /api/reject-history/trend` with `granularity` validation (`day|week|month`)
- [x] 3.3 Implement `GET /api/reject-history/reason-pareto` with `metric_mode` validation (`reject_total|defect`)
- [x] 3.4 Implement `GET /api/reject-history/list` with paging bounds and reason/category filters
- [x] 3.5 Implement `GET /api/reject-history/export` and CSV output contract
- [x] 3.6 Apply configured rate limiting to list/export endpoints
- [x] 3.7 Add shared query param `include_excluded_scrap` (default false) and return effective policy mode in response meta
## 4. Frontend Visual and Interaction Implementation
- [x] 4.1 Build page header with title, data timestamp, and semantic badges for charge-off reject vs non-charge-off defect
- [x] 4.2 Build filter panel with required controls (`start_date/end_date`, reason, `WORKCENTER_GROUP`) plus query/clear actions, and wire it to all API calls
- [x] 4.3 Implement KPI card row (8 cards) with warm/cool semantic color lanes and zh-TW number formatting
- [x] 4.4 Implement dual trend charts (quantity trend + rate trend) using ECharts with synchronized date buckets
- [x] 4.5 Implement reason Pareto chart/table with `metric_mode` switch and cumulative percentage line, referencing WIP OVER VIEW interaction pattern
- [x] 4.6 Add Pareto mode toggle: default "top cumulative 80%" and optional "show all filtered categories"
- [x] 4.7 Implement detail table with pagination, active filter chips, and empty/error states
- [x] 4.8 Implement CSV export action using current filter context
- [x] 4.9 Add responsive rules so filter/cards/charts/table stay usable on tablet/mobile widths
- [x] 4.10 Add "納入不計良率報廢" toggle in filter panel and wire to all API calls + export
## 5. Shell and Route Governance Integration
- [x] 5.1 Add `/reject-history` contract entry to `frontend/src/portal-shell/routeContracts.js`
- [x] 5.2 Add `/reject-history` loader to `frontend/src/portal-shell/nativeModuleRegistry.js`
- [x] 5.3 Add `/reject-history` page metadata (drawer/order/status) to `data/page_status.json`
- [x] 5.4 Add Flask page route `/reject-history` using `send_from_directory` with dist fallback HTML
## 6. Tests and Quality Gates
- [x] 6.1 Add service tests in `tests/test_reject_history_service.py` covering formulas, dedupe, and edge cases
- [x] 6.2 Add route tests in `tests/test_reject_history_routes.py` covering validation, payload shape, and rate-limit behavior
- [x] 6.3 Add/extend route-contract parity and shell coverage tests for `/reject-history`
- [x] 6.4 Add frontend smoke/integration test for query flow and major visual sections
- [x] 6.5 Add exclusion-policy tests (`ENABLE_FLAG` handling, default exclude, include override, cache fallback path)
## 7. Documentation and Rollout
- [x] 7.1 Update implementation notes under `docs/reject_history_performance.md` to match API/UI field names
- [x] 7.2 Document exclusion-policy behavior and user toggle semantics in reject-history docs
- [x] 7.3 Document rollout policy (`dev` visibility first, then `released`) and rollback path
- [x] 7.4 Run end-to-end verification checklist and capture evidence before implementation handoff

View File

@@ -1,53 +0,0 @@
## 1. Contract and Skeleton Setup
- [ ] 1.1 Create backend blueprint scaffold `src/mes_dashboard/routes/reject_history_routes.py` and register it in `src/mes_dashboard/routes/__init__.py`
- [ ] 1.2 Create service scaffold `src/mes_dashboard/services/reject_history_service.py` with SQL loader helpers
- [ ] 1.3 Create frontend entry scaffold `frontend/src/reject-history/index.html`, `frontend/src/reject-history/main.js`, and `frontend/src/reject-history/App.vue`
- [ ] 1.4 Add Vite input for `reject-history` in `frontend/vite.config.js`
## 2. SQL and Metric Semantics Implementation
- [ ] 2.1 Finalize base query `src/mes_dashboard/sql/reject_history/performance_daily.sql` for five-reject-sum + defect separation
- [ ] 2.2 Add API-specific SQL files in `src/mes_dashboard/sql/reject_history/` (summary, trend, reason_pareto, list, export)
- [ ] 2.3 Implement `MOVEIN_QTY` dedupe by `HISTORYMAINLINEID` with deterministic fallback key
- [ ] 2.4 Implement consistent rate calculations (`REJECT_RATE_PCT`, `DEFECT_RATE_PCT`, `REJECT_SHARE_PCT`) with zero-denominator handling
## 3. Backend API Routes
- [ ] 3.1 Implement `GET /api/reject-history/summary` with date/filter validation
- [ ] 3.2 Implement `GET /api/reject-history/trend` with `granularity` validation (`day|week|month`)
- [ ] 3.3 Implement `GET /api/reject-history/reason-pareto` with `metric_mode` validation (`reject_total|defect`)
- [ ] 3.4 Implement `GET /api/reject-history/list` with paging bounds and reason/category filters
- [ ] 3.5 Implement `GET /api/reject-history/export` and CSV output contract
- [ ] 3.6 Apply configured rate limiting to list/export endpoints
## 4. Frontend Visual and Interaction Implementation
- [ ] 4.1 Build page header with title, data timestamp, and semantic badges for charge-off reject vs non-charge-off defect
- [ ] 4.2 Build filter panel (date range + dimensions + query/clear actions) and wire it to all API calls
- [ ] 4.3 Implement KPI card row (8 cards) with warm/cool semantic color lanes and zh-TW number formatting
- [ ] 4.4 Implement dual trend charts (quantity trend + rate trend) using ECharts with synchronized date buckets
- [ ] 4.5 Implement reason Pareto chart/table with `metric_mode` switch and cumulative percentage line
- [ ] 4.6 Implement detail table with pagination, active filter chips, and empty/error states
- [ ] 4.7 Implement CSV export action using current filter context
- [ ] 4.8 Add responsive rules so filter/cards/charts/table stay usable on tablet/mobile widths
## 5. Shell and Route Governance Integration
- [ ] 5.1 Add `/reject-history` contract entry to `frontend/src/portal-shell/routeContracts.js`
- [ ] 5.2 Add `/reject-history` loader to `frontend/src/portal-shell/nativeModuleRegistry.js`
- [ ] 5.3 Add `/reject-history` page metadata (drawer/order/status) to `data/page_status.json`
- [ ] 5.4 Add Flask page route `/reject-history` using `send_from_directory` with dist fallback HTML
## 6. Tests and Quality Gates
- [ ] 6.1 Add service tests in `tests/test_reject_history_service.py` covering formulas, dedupe, and edge cases
- [ ] 6.2 Add route tests in `tests/test_reject_history_routes.py` covering validation, payload shape, and rate-limit behavior
- [ ] 6.3 Add/extend route-contract parity and shell coverage tests for `/reject-history`
- [ ] 6.4 Add frontend smoke/integration test for query flow and major visual sections
## 7. Documentation and Rollout
- [ ] 7.1 Update implementation notes under `docs/reject_history_performance.md` to match API/UI field names
- [ ] 7.2 Document rollout policy (`dev` visibility first, then `released`) and rollback path
- [ ] 7.3 Run end-to-end verification checklist and capture evidence before implementation handoff

View File

@@ -1,9 +1,6 @@
## Purpose ## Purpose
Define stable requirements for field-name-consistency. Define stable requirements for field-name-consistency.
## Requirements ## Requirements
### Requirement: UI and Export Fields SHALL Have a Consistent Contract ### Requirement: UI and Export Fields SHALL Have a Consistent Contract
The system SHALL define and apply a consistent contract among UI column labels, API keys, and export headers for report/query pages. The system SHALL define and apply a consistent contract among UI column labels, API keys, and export headers for report/query pages.
@@ -14,3 +11,25 @@ The system SHALL define and apply a consistent contract among UI column labels,
#### Scenario: Resource history field alignment #### Scenario: Resource history field alignment
- **WHEN** resource history detail table shows KPI columns - **WHEN** resource history detail table shows KPI columns
- **THEN** columns required by export semantics (including Availability%) SHALL be present or explicitly mapped - **THEN** columns required by export semantics (including Availability%) SHALL be present or explicitly mapped
### Requirement: Reject and defect metric names SHALL remain semantically consistent across UI/API/export
The system SHALL use explicit, stable names for charge-off reject and non-charge-off defect metrics across all output surfaces.
#### Scenario: UI and API key alignment
- **WHEN** summary/trend/list payloads are rendered on reject-history page
- **THEN** UI labels for reject metrics SHALL map to `REJECT_TOTAL_QTY` and related reject-rate fields
- **THEN** UI labels for defect metrics SHALL map to `DEFECT_QTY` and defect-rate fields
#### Scenario: Export header alignment
- **WHEN** reject-history CSV export is generated
- **THEN** CSV headers SHALL include both `REJECT_TOTAL_QTY` and `DEFECT_QTY`
- **THEN** header names SHALL preserve the same semantic meaning as API fields
### Requirement: Reject component columns SHALL be explicitly distinguished from defect columns
The system SHALL prevent ambiguous naming that collapses reject components and defect into a single term.
#### Scenario: Component and aggregate coexistence
- **WHEN** detailed records are presented
- **THEN** reject component fields (`REJECTQTY`, `STANDBYQTY`, `QTYTOPROCESS`, `INPROCESSQTY`, `PROCESSEDQTY`) SHALL be distinguishable from `DEFECT_QTY`
- **THEN** aggregate `REJECT_TOTAL_QTY` SHALL be clearly identified as component sum, not defect

View File

@@ -0,0 +1,117 @@
# reject-history-api Specification
## Purpose
TBD - created by archiving change reject-history-query-page. Update Purpose after archive.
## Requirements
### Requirement: Reject History API SHALL validate required query parameters
The API SHALL validate date parameters and basic paging bounds before executing database work.
#### Scenario: Missing required dates
- **WHEN** a reject-history endpoint requiring date range is called without `start_date` or `end_date`
- **THEN** the API SHALL return HTTP 400 with a descriptive validation error
#### Scenario: Invalid date order
- **WHEN** `end_date` is earlier than `start_date`
- **THEN** the API SHALL return HTTP 400 and SHALL NOT run SQL queries
### Requirement: Reject History API SHALL provide summary metrics endpoint
The API SHALL provide aggregated summary metrics for the selected filter context.
#### Scenario: Summary response payload
- **WHEN** `GET /api/reject-history/summary` is called with valid filters
- **THEN** response SHALL be `{ success: true, data: { ... } }`
- **THEN** data SHALL include `MOVEIN_QTY`, `REJECT_TOTAL_QTY`, `DEFECT_QTY`, `REJECT_RATE_PCT`, `DEFECT_RATE_PCT`, `REJECT_SHARE_PCT`, `AFFECTED_LOT_COUNT`, and `AFFECTED_WORKORDER_COUNT`
### Requirement: Reject History API SHALL support yield-exclusion policy toggle
The API SHALL support excluding or including policy-marked scrap reasons through a shared query parameter.
#### Scenario: Default policy mode
- **WHEN** reject-history endpoints are called without `include_excluded_scrap`
- **THEN** `include_excluded_scrap` SHALL default to `false`
- **THEN** rows mapped to `ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE.ENABLE_FLAG='Y'` SHALL be excluded from yield-related calculations
#### Scenario: Explicitly include policy-marked scrap
- **WHEN** `include_excluded_scrap=true` is provided
- **THEN** policy-marked rows SHALL be included in summary/trend/pareto/list/export calculations
- **THEN** API response `meta` SHALL include the effective `include_excluded_scrap` value
#### Scenario: Invalid toggle value
- **WHEN** `include_excluded_scrap` is not parseable as boolean
- **THEN** the API SHALL return HTTP 400 with a descriptive validation error
### Requirement: Reject History API SHALL provide trend endpoint
The API SHALL return time-series trend data for quantity and rate metrics.
#### Scenario: Trend response structure
- **WHEN** `GET /api/reject-history/trend` is called
- **THEN** response SHALL be `{ success: true, data: { items: [...] } }`
- **THEN** each trend item SHALL contain bucket date, `REJECT_TOTAL_QTY`, `DEFECT_QTY`, `REJECT_RATE_PCT`, and `DEFECT_RATE_PCT`
#### Scenario: Trend granularity
- **WHEN** `granularity` is provided as `day`, `week`, or `month`
- **THEN** the API SHALL aggregate by the requested granularity
- **THEN** invalid granularity SHALL return HTTP 400
### Requirement: Reject History API SHALL provide reason Pareto endpoint
The API SHALL return sorted reason distribution data with cumulative percentages.
#### Scenario: Pareto response payload
- **WHEN** `GET /api/reject-history/reason-pareto` is called
- **THEN** each item SHALL include `reason`, `category`, selected metric value, `pct`, and `cumPct`
- **THEN** items SHALL be sorted descending by selected metric
#### Scenario: Metric mode validation
- **WHEN** `metric_mode` is provided
- **THEN** accepted values SHALL be `reject_total` or `defect`
- **THEN** invalid `metric_mode` SHALL return HTTP 400
### Requirement: Reject History API SHALL provide paginated detail endpoint
The API SHALL return paginated detailed rows for the selected filter context.
#### Scenario: List response payload
- **WHEN** `GET /api/reject-history/list?page=1&per_page=50` is called
- **THEN** response SHALL include `{ items: [...], pagination: { page, perPage, total, totalPages } }`
- **THEN** each row SHALL include date, process dimensions, reason fields, `MOVEIN_QTY`, `REJECT_TOTAL_QTY`, `DEFECT_QTY`, and reject component columns
#### Scenario: Paging bounds
- **WHEN** `page < 1`
- **THEN** page SHALL be treated as 1
- **WHEN** `per_page > 200`
- **THEN** `per_page` SHALL be capped at 200
### Requirement: Reject History API SHALL provide CSV export endpoint
The API SHALL provide CSV export using the same filter and metric semantics as list/query APIs.
#### Scenario: Export payload consistency
- **WHEN** `GET /api/reject-history/export` is called with valid filters
- **THEN** CSV headers SHALL include both `REJECT_TOTAL_QTY` and `DEFECT_QTY`
- **THEN** export rows SHALL follow the same semantic definitions as summary/list endpoints
### Requirement: Reject History API SHALL centralize SQL in reject_history SQL directory
The service SHALL load SQL from dedicated files under `src/mes_dashboard/sql/reject_history/`.
#### Scenario: SQL file loading
- **WHEN** reject-history service executes queries
- **THEN** SQL SHALL be loaded from files in `sql/reject_history`
- **THEN** user-supplied filters SHALL be passed through bind parameters
- **THEN** user input SHALL NOT be interpolated into SQL strings directly
### Requirement: Reject History API SHALL use cached exclusion-policy source
The API SHALL read exclusion-policy reasons from cached `ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE` data instead of querying Oracle on every request.
#### Scenario: Enabled exclusions only
- **WHEN** exclusion-policy data is loaded
- **THEN** only rows with `ENABLE_FLAG='Y'` SHALL be treated as active exclusions
#### Scenario: Daily full-table cache refresh
- **WHEN** exclusion cache is initialized
- **THEN** the full table SHALL be loaded and refreshed at least once per 24 hours
- **THEN** Redis SHOULD be used as shared cache when available, with in-memory fallback when unavailable
### Requirement: Reject History API SHALL apply rate limiting on expensive endpoints
The API SHALL rate-limit high-cost endpoints to protect Oracle and application resources.
#### Scenario: List and export rate limiting
- **WHEN** `/api/reject-history/list` or `/api/reject-history/export` receives excessive requests
- **THEN** configured rate limiting SHALL reject requests beyond the threshold within the time window

View File

@@ -0,0 +1,142 @@
# reject-history-page Specification
## Purpose
TBD - created by archiving change reject-history-query-page. Update Purpose after archive.
## Requirements
### Requirement: Reject History page SHALL provide filterable historical query controls
The page SHALL provide a filter area for date range and major production dimensions to drive all report sections.
#### Scenario: Default filter values
- **WHEN** the page is first loaded
- **THEN** `start_date` and `end_date` SHALL default to a valid recent range
- **THEN** all other dimension filters SHALL default to empty (no restriction)
#### Scenario: Apply and clear filters
- **WHEN** user clicks "查詢"
- **THEN** summary, trend, pareto, and list sections SHALL reload with the same filter set
- **WHEN** user clicks "清除條件"
- **THEN** all filters SHALL reset to defaults and all sections SHALL reload
#### Scenario: Required core filters are present
- **WHEN** the filter panel is rendered
- **THEN** it SHALL include `start_date/end_date` time filter controls
- **THEN** it SHALL include reason filter control
- **THEN** it SHALL include `WORKCENTER_GROUP` filter control
### Requirement: Reject History page SHALL expose yield-exclusion toggle control
The page SHALL let users decide whether to include policy-marked scrap in yield calculations.
#### Scenario: Default toggle state
- **WHEN** the page is first loaded
- **THEN** "納入不計良率報廢" toggle SHALL default to OFF
- **THEN** requests SHALL be sent with `include_excluded_scrap=false`
#### Scenario: Toggle affects all sections
- **WHEN** user turns ON/OFF the toggle and clicks "查詢"
- **THEN** summary, trend, pareto, and list sections SHALL reload under the selected policy mode
- **THEN** export action SHALL use the same toggle state
#### Scenario: Policy status visibility
- **WHEN** data is rendered
- **THEN** the UI SHALL show a clear badge/text indicating whether policy-marked scrap is currently excluded or included
### Requirement: Reject History page SHALL present KPI cards with split reject/defect semantics
The page SHALL display KPI cards that simultaneously show charge-off reject and non-charge-off defect metrics.
#### Scenario: KPI cards render core metrics
- **WHEN** summary data is loaded
- **THEN** cards SHALL include `MOVEIN_QTY`, `REJECT_TOTAL_QTY`, `DEFECT_QTY`, `REJECT_RATE_PCT`, `DEFECT_RATE_PCT`, `REJECT_SHARE_PCT`, `AFFECTED_LOT_COUNT`, and `AFFECTED_WORKORDER_COUNT`
- **THEN** numbers SHALL use zh-TW formatting
#### Scenario: Visual distinction for semantic lanes
- **WHEN** KPI cards are rendered
- **THEN** reject-related cards SHALL use a warm-color visual lane
- **THEN** defect-related cards SHALL use a cool-color visual lane
- **THEN** page legend/badge text SHALL explicitly indicate charge-off vs non-charge-off meaning
### Requirement: Reject History page SHALL display quantity and rate trends in separate charts
The page SHALL show both quantity trend and rate trend to avoid mixing unit scales.
#### Scenario: Quantity trend chart
- **WHEN** trend data is loaded
- **THEN** the quantity trend chart SHALL show `REJECT_TOTAL_QTY` and `DEFECT_QTY` over time
- **THEN** the chart SHALL use a shared X-axis by date bucket
#### Scenario: Rate trend chart
- **WHEN** trend data is loaded
- **THEN** the rate trend chart SHALL show `REJECT_RATE_PCT` and `DEFECT_RATE_PCT` over time
- **THEN** rate values SHALL be displayed as percentages
### Requirement: Reject History page SHALL provide reason Pareto analysis
The page SHALL provide a Pareto view for loss reasons and support downstream filtering.
#### Scenario: Pareto rendering and ordering
- **WHEN** reason Pareto data is loaded
- **THEN** items SHALL be sorted by selected metric descending
- **THEN** a cumulative percentage line SHALL be shown
#### Scenario: Default 80% cumulative display mode
- **WHEN** the page first loads Pareto
- **THEN** it SHALL default to "only cumulative top 80%" mode
- **THEN** Pareto SHALL only render categories within the cumulative 80% threshold under current filters
#### Scenario: Full Pareto toggle mode
- **WHEN** user turns OFF the 80% cumulative display mode
- **THEN** Pareto SHALL render all categories after applying current filters
- **THEN** switching mode SHALL NOT reset existing time/reason/workcenter-group filters
#### Scenario: Pareto click filtering
- **WHEN** user clicks a Pareto bar or row
- **THEN** the selected reason SHALL become an active filter chip
- **THEN** detail list SHALL reload with that reason
- **THEN** clicking the same reason again SHALL clear the reason filter
### Requirement: Reject History page SHALL show paginated detail rows
The page SHALL provide a paginated detail table for investigation and traceability.
#### Scenario: Detail columns
- **WHEN** list data is loaded
- **THEN** each row SHALL include date, workcenter group, workcenter, product dimensions, reason/category, `MOVEIN_QTY`, `REJECT_TOTAL_QTY`, `DEFECT_QTY`, and component reject columns
#### Scenario: Pagination behavior
- **WHEN** total records exceed per-page size
- **THEN** Prev/Next and page summary SHALL be shown
- **THEN** changing any filter SHALL reset page to 1
### Requirement: Reject History page SHALL support CSV export from current filter context
The page SHALL allow users to export records using the exact active filters.
#### Scenario: Export with current filters
- **WHEN** user clicks "匯出 CSV"
- **THEN** export request SHALL include the current filter state and active reason filter
- **THEN** downloaded file SHALL contain both `REJECT_TOTAL_QTY` and `DEFECT_QTY`
### Requirement: Reject History page SHALL provide robust feedback states
The page SHALL provide loading, empty, and error states without breaking interactions.
#### Scenario: Initial loading
- **WHEN** first query is running
- **THEN** a loading overlay or skeleton SHALL be visible until required data sections are ready
#### Scenario: API failure
- **WHEN** any section API fails
- **THEN** a visible error banner SHALL be shown
- **THEN** already loaded sections SHALL remain interactive
#### Scenario: Empty dataset
- **WHEN** query returns no rows
- **THEN** chart and table areas SHALL show explicit empty-state messages
### Requirement: Reject History page SHALL maintain responsive visual hierarchy
The page SHALL keep the same semantic grouping across desktop and mobile layouts.
#### Scenario: Desktop layout
- **WHEN** viewport is desktop width
- **THEN** KPI cards SHALL render in multi-column layout
- **THEN** trend and pareto sections SHALL render as two-column analytical panels
#### Scenario: Mobile layout
- **WHEN** viewport width is below responsive breakpoint
- **THEN** cards and chart panels SHALL stack in a single column
- **THEN** filter controls SHALL remain operable without horizontal overflow

View File

@@ -0,0 +1,77 @@
# reject-metric-semantics Specification
## Purpose
TBD - created by archiving change reject-history-query-page. Update Purpose after archive.
## Requirements
### Requirement: Charge-off reject metric SHALL be computed from five reject component columns
The system SHALL compute `REJECT_TOTAL_QTY` as the sum of five reject-related quantity columns.
#### Scenario: Reject total formula
- **WHEN** a source record is transformed
- **THEN** `REJECT_TOTAL_QTY` SHALL equal `REJECTQTY + STANDBYQTY + QTYTOPROCESS + INPROCESSQTY + PROCESSEDQTY`
- **THEN** null component values SHALL be treated as zero
### Requirement: Defect metric SHALL remain independent from reject total
The system SHALL compute `DEFECT_QTY` only from `DEFECTQTY` and SHALL NOT merge it into `REJECT_TOTAL_QTY`.
#### Scenario: Defect independence
- **WHEN** a record has `DEFECTQTY > 0` and reject component sum equals 0
- **THEN** `DEFECT_QTY` SHALL be non-zero
- **THEN** `REJECT_TOTAL_QTY` SHALL remain 0
### Requirement: Yield-exclusion policy SHALL follow ERP exclusion table
The system SHALL use `ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE` as the policy source for "not included in yield" scrap reasons.
#### Scenario: Enabled policy rows
- **WHEN** exclusion policy is evaluated
- **THEN** only rows with `ENABLE_FLAG='Y'` SHALL be considered exclusion rules
#### Scenario: Default exclusion behavior
- **WHEN** `include_excluded_scrap=false` (default)
- **THEN** source rows matching enabled exclusion reasons SHALL be excluded before computing yield-related metrics
#### Scenario: Optional inclusion override
- **WHEN** `include_excluded_scrap=true`
- **THEN** the same matched rows SHALL be included back into metric calculations
### Requirement: Move-in denominator SHALL be deduplicated at event level
The system SHALL deduplicate `MOVEIN_QTY` by event key before rate calculations.
#### Scenario: Primary dedupe key
- **WHEN** `HISTORYMAINLINEID` is present
- **THEN** only one row per `HISTORYMAINLINEID` SHALL contribute `MOVEIN_QTY`
#### Scenario: Fallback dedupe key
- **WHEN** `HISTORYMAINLINEID` is missing
- **THEN** fallback dedupe key SHALL use a deterministic composite key from transaction context
### Requirement: Reject and defect rates SHALL use the same deduplicated denominator
The system SHALL calculate percentage rates from deduplicated `MOVEIN_QTY` to ensure comparability.
#### Scenario: Reject rate formula
- **WHEN** `MOVEIN_QTY > 0`
- **THEN** `REJECT_RATE_PCT` SHALL equal `REJECT_TOTAL_QTY / MOVEIN_QTY * 100`
#### Scenario: Defect rate formula
- **WHEN** `MOVEIN_QTY > 0`
- **THEN** `DEFECT_RATE_PCT` SHALL equal `DEFECT_QTY / MOVEIN_QTY * 100`
#### Scenario: Zero denominator handling
- **WHEN** `MOVEIN_QTY = 0`
- **THEN** both rate fields SHALL return 0 and SHALL NOT raise divide-by-zero errors
### Requirement: Reject share SHALL describe reject proportion within total loss
The system SHALL calculate reject share against combined reject and defect loss quantities.
#### Scenario: Reject share formula
- **WHEN** `REJECT_TOTAL_QTY + DEFECT_QTY > 0`
- **THEN** `REJECT_SHARE_PCT` SHALL equal `REJECT_TOTAL_QTY / (REJECT_TOTAL_QTY + DEFECT_QTY) * 100`
### Requirement: Metric naming SHALL preserve semantic meaning across transformations
The system SHALL keep explicit names for charge-off reject and non-charge-off defect metrics.
#### Scenario: No ambiguous remapping
- **WHEN** service or export fields are generated
- **THEN** `REJECT_TOTAL_QTY` SHALL NOT be renamed to `DEFECT_QTY`
- **THEN** `DEFECT_QTY` SHALL refer only to `DEFECTQTY`

View File

@@ -45,3 +45,27 @@ When contract loading falls back from the primary modernization contract artifac
#### Scenario: Legacy contract fallback path selected #### Scenario: Legacy contract fallback path selected
- **WHEN** the primary contract artifact is unavailable and a legacy contract file is loaded - **WHEN** the primary contract artifact is unavailable and a legacy contract file is loaded
- **THEN** the system SHALL log a warning that includes the selected legacy source path - **THEN** the system SHALL log a warning that includes the selected legacy source path
### Requirement: Reject History route SHALL be included in governed shell route inventory
The `/reject-history` route SHALL be represented in shell route contracts with complete governance metadata.
#### Scenario: Frontend route contract entry
- **WHEN** route contract validation runs against `frontend/src/portal-shell/routeContracts.js`
- **THEN** `/reject-history` SHALL exist with route id, title, owner, render mode, visibility policy, scope, and compatibility policy
#### Scenario: Native loader coverage
- **WHEN** native module loader registry is validated
- **THEN** `/reject-history` SHALL be resolvable in `nativeModuleRegistry`
### Requirement: Reject History governance metadata SHALL be parity-validated across sources
Shell governance checks SHALL enforce parity for `/reject-history` between frontend and backend contract inventories.
#### Scenario: Contract parity for reject-history route
- **WHEN** contract parity checks execute
- **THEN** frontend and backend route inventories SHALL both include `/reject-history`
- **THEN** metadata mismatch or missing route SHALL fail governance checks
#### Scenario: Navigation visibility governance
- **WHEN** page status/navigation config is evaluated
- **THEN** `/reject-history` SHALL have governed drawer assignment and ordering metadata

View File

@@ -137,3 +137,42 @@ The mid-section defect page SHALL use `AbortController` to cancel in-flight API
- **THEN** the query SHALL NOT be cancelled by the pagination request - **THEN** the query SHALL NOT be cancelled by the pagination request
- **THEN** the pagination SHALL use a separate abort key from the query - **THEN** the pagination SHALL use a separate abort key from the query
### Requirement: Reject History page SHALL be a pure Vite HTML entry
The reject-history page SHALL be built from an HTML entry and emitted as static dist assets.
#### Scenario: Vite entry registration
- **WHEN** Vite config inputs are evaluated
- **THEN** `reject-history` SHALL map to `frontend/src/reject-history/index.html`
#### Scenario: Build output artifacts
- **WHEN** `vite build` completes
- **THEN** output SHALL include `reject-history.html`, `reject-history.js`, and `reject-history.css` in `static/dist/`
### Requirement: Reject History route SHALL serve static dist HTML
The Flask route for `/reject-history` SHALL serve pre-built static HTML through `send_from_directory`.
#### Scenario: Static page serving
- **WHEN** user navigates to `/reject-history`
- **THEN** Flask SHALL serve `static/dist/reject-history.html` when the file exists
- **THEN** HTML SHALL NOT be rendered through Jinja template interpolation
#### Scenario: Dist fallback response
- **WHEN** `reject-history.html` is missing in dist
- **THEN** route SHALL return a minimal fallback HTML that still references `/static/dist/reject-history.js`
### Requirement: Reject History shell integration SHALL use native module loading
The page SHALL integrate with portal-shell native module loading policy.
#### Scenario: Native module registration
- **WHEN** shell resolves a route component for `/reject-history`
- **THEN** it SHALL dynamically import `frontend/src/reject-history/App.vue`
- **THEN** the route style bundle SHALL be loaded via registered style loaders
### Requirement: Reject History page SHALL call APIs through shared core API module
The page SHALL call backend APIs via `frontend/src/core/api.js` without legacy global dependencies.
#### Scenario: API call path
- **WHEN** reject-history page executes GET or export requests
- **THEN** requests SHALL use shared API utilities (`apiGet`/equivalent)
- **THEN** page behavior SHALL NOT depend on `window.MesApi`

View File

@@ -44,6 +44,10 @@ from mes_dashboard.services.realtime_equipment_cache import (
init_realtime_equipment_cache, init_realtime_equipment_cache,
stop_equipment_status_sync_worker, stop_equipment_status_sync_worker,
) )
from mes_dashboard.services.scrap_reason_exclusion_cache import (
init_scrap_reason_exclusion_cache,
stop_scrap_reason_exclusion_cache_worker,
)
from mes_dashboard.core.modernization_policy import ( from mes_dashboard.core.modernization_policy import (
get_deferred_routes as get_deferred_routes_from_scope_matrix, get_deferred_routes as get_deferred_routes_from_scope_matrix,
get_missing_in_scope_assets, get_missing_in_scope_assets,
@@ -286,6 +290,11 @@ def _shutdown_runtime_resources() -> None:
except Exception as exc: except Exception as exc:
logger.warning("Error stopping equipment sync worker: %s", exc) logger.warning("Error stopping equipment sync worker: %s", exc)
try:
stop_scrap_reason_exclusion_cache_worker()
except Exception as exc:
logger.warning("Error stopping scrap exclusion cache worker: %s", exc)
try: try:
close_redis() close_redis()
except Exception as exc: except Exception as exc:
@@ -380,6 +389,7 @@ def create_app(config_name: str | None = None) -> Flask:
start_keepalive() # Keep database connections alive start_keepalive() # Keep database connections alive
start_cache_updater() # Start Redis cache updater start_cache_updater() # Start Redis cache updater
init_realtime_equipment_cache(app) # Start realtime equipment status cache init_realtime_equipment_cache(app) # Start realtime equipment status cache
init_scrap_reason_exclusion_cache(app) # Start exclusion-policy cache sync
_register_shutdown_hooks(app) _register_shutdown_hooks(app)
# Register API routes # Register API routes
@@ -796,6 +806,27 @@ def create_app(config_name: str | None = None) -> Flask:
200, 200,
)) ))
@app.route('/reject-history')
def reject_history_page():
"""Reject history analysis page served as pure Vite HTML output."""
canonical_redirect = maybe_redirect_to_canonical_shell('/reject-history')
if canonical_redirect is not None:
return canonical_redirect
dist_dir = os.path.join(app.static_folder or "", "dist")
dist_html = os.path.join(dist_dir, "reject-history.html")
if os.path.exists(dist_html):
return send_from_directory(dist_dir, 'reject-history.html')
return missing_in_scope_asset_response('/reject-history', (
"<!doctype html><html lang=\"zh-Hant\"><head><meta charset=\"UTF-8\">"
"<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">"
"<title>報廢歷史查詢</title>"
"<script type=\"module\" src=\"/static/dist/reject-history.js\"></script>"
"</head><body><div id='app'></div></body></html>",
200,
))
@app.route('/tmtt-defect') @app.route('/tmtt-defect')
def tmtt_defect_page(): def tmtt_defect_page():
"""TMTT printing & lead form defect analysis page.""" """TMTT printing & lead form defect analysis page."""

View File

@@ -20,6 +20,7 @@ from .tmtt_defect_routes import tmtt_defect_bp
from .qc_gate_routes import qc_gate_bp from .qc_gate_routes import qc_gate_bp
from .mid_section_defect_routes import mid_section_defect_bp from .mid_section_defect_routes import mid_section_defect_bp
from .trace_routes import trace_bp from .trace_routes import trace_bp
from .reject_history_routes import reject_history_bp
def register_routes(app) -> None: def register_routes(app) -> None:
@@ -38,6 +39,7 @@ def register_routes(app) -> None:
app.register_blueprint(qc_gate_bp) app.register_blueprint(qc_gate_bp)
app.register_blueprint(mid_section_defect_bp) app.register_blueprint(mid_section_defect_bp)
app.register_blueprint(trace_bp) app.register_blueprint(trace_bp)
app.register_blueprint(reject_history_bp)
__all__ = [ __all__ = [
'wip_bp', 'wip_bp',
@@ -56,5 +58,6 @@ __all__ = [
'qc_gate_bp', 'qc_gate_bp',
'mid_section_defect_bp', 'mid_section_defect_bp',
'trace_bp', 'trace_bp',
'reject_history_bp',
'register_routes', 'register_routes',
] ]

View File

@@ -0,0 +1,358 @@
# -*- coding: utf-8 -*-
"""Reject-history page API routes."""
from __future__ import annotations
from datetime import date, timedelta
from typing import Optional
from flask import Blueprint, Response, jsonify, request
from mes_dashboard.core.rate_limit import configured_rate_limit
from mes_dashboard.services.reject_history_service import (
export_csv,
get_filter_options,
query_list,
query_reason_pareto,
query_summary,
query_trend,
)
reject_history_bp = Blueprint("reject_history", __name__)
_REJECT_HISTORY_LIST_RATE_LIMIT = configured_rate_limit(
bucket="reject-history-list",
max_attempts_env="REJECT_HISTORY_LIST_RATE_LIMIT_MAX_REQUESTS",
window_seconds_env="REJECT_HISTORY_LIST_RATE_LIMIT_WINDOW_SECONDS",
default_max_attempts=90,
default_window_seconds=60,
)
_REJECT_HISTORY_EXPORT_RATE_LIMIT = configured_rate_limit(
bucket="reject-history-export",
max_attempts_env="REJECT_HISTORY_EXPORT_RATE_LIMIT_MAX_REQUESTS",
window_seconds_env="REJECT_HISTORY_EXPORT_RATE_LIMIT_WINDOW_SECONDS",
default_max_attempts=30,
default_window_seconds=60,
)
def _default_date_range() -> tuple[str, str]:
end = date.today()
start = end - timedelta(days=29)
return start.strftime("%Y-%m-%d"), end.strftime("%Y-%m-%d")
def _parse_date_range(required: bool = True) -> tuple[Optional[str], Optional[str], Optional[tuple[dict, int]]]:
start_date = request.args.get("start_date", "").strip()
end_date = request.args.get("end_date", "").strip()
if not start_date or not end_date:
if required:
return None, None, ({"success": False, "error": "缺少必要參數: start_date, end_date"}, 400)
start_date, end_date = _default_date_range()
return start_date, end_date, None
def _parse_bool(value: str, *, name: str) -> tuple[Optional[bool], Optional[tuple[dict, int]]]:
normalized = str(value or "").strip().lower()
if normalized in {"", "0", "false", "no", "n", "off"}:
return False, None
if normalized in {"1", "true", "yes", "y", "on"}:
return True, None
return None, ({"success": False, "error": f"Invalid {name}, use true/false"}, 400)
def _parse_multi_param(name: str) -> list[str]:
values = []
for raw in request.args.getlist(name):
for token in str(raw).split(","):
item = token.strip()
if item:
values.append(item)
# Deduplicate while preserving order.
seen = set()
deduped = []
for value in values:
if value in seen:
continue
seen.add(value)
deduped.append(value)
return deduped
def _extract_meta(
payload: dict,
include_excluded_scrap: bool,
exclude_material_scrap: bool,
) -> tuple[dict, dict]:
data = dict(payload or {})
meta = data.pop("meta", {}) if isinstance(data.get("meta"), dict) else {}
meta["include_excluded_scrap"] = bool(include_excluded_scrap)
meta["exclude_material_scrap"] = bool(exclude_material_scrap)
return data, meta
@reject_history_bp.route("/api/reject-history/options", methods=["GET"])
def api_reject_history_options():
start_date, end_date, date_error = _parse_date_range(required=False)
if date_error:
return jsonify(date_error[0]), date_error[1]
include_excluded_scrap, bool_error = _parse_bool(
request.args.get("include_excluded_scrap", ""),
name="include_excluded_scrap",
)
if bool_error:
return jsonify(bool_error[0]), bool_error[1]
exclude_material_scrap, material_bool_error = _parse_bool(
request.args.get("exclude_material_scrap", "true"),
name="exclude_material_scrap",
)
if material_bool_error:
return jsonify(material_bool_error[0]), material_bool_error[1]
try:
result = get_filter_options(
start_date=start_date,
end_date=end_date,
include_excluded_scrap=bool(include_excluded_scrap),
exclude_material_scrap=bool(exclude_material_scrap),
)
data, meta = _extract_meta(
result,
bool(include_excluded_scrap),
bool(exclude_material_scrap),
)
return jsonify({"success": True, "data": data, "meta": meta})
except ValueError as exc:
return jsonify({"success": False, "error": str(exc)}), 400
except Exception:
return jsonify({"success": False, "error": "查詢篩選選項失敗"}), 500
@reject_history_bp.route("/api/reject-history/summary", methods=["GET"])
def api_reject_history_summary():
start_date, end_date, date_error = _parse_date_range(required=True)
if date_error:
return jsonify(date_error[0]), date_error[1]
include_excluded_scrap, bool_error = _parse_bool(
request.args.get("include_excluded_scrap", ""),
name="include_excluded_scrap",
)
if bool_error:
return jsonify(bool_error[0]), bool_error[1]
exclude_material_scrap, material_bool_error = _parse_bool(
request.args.get("exclude_material_scrap", "true"),
name="exclude_material_scrap",
)
if material_bool_error:
return jsonify(material_bool_error[0]), material_bool_error[1]
try:
result = query_summary(
start_date=start_date,
end_date=end_date,
workcenter_groups=_parse_multi_param("workcenter_groups") or None,
packages=_parse_multi_param("packages") or None,
reasons=_parse_multi_param("reasons") or None,
categories=_parse_multi_param("categories") or None,
include_excluded_scrap=bool(include_excluded_scrap),
exclude_material_scrap=bool(exclude_material_scrap),
)
data, meta = _extract_meta(
result,
bool(include_excluded_scrap),
bool(exclude_material_scrap),
)
return jsonify({"success": True, "data": data, "meta": meta})
except ValueError as exc:
return jsonify({"success": False, "error": str(exc)}), 400
except Exception:
return jsonify({"success": False, "error": "查詢摘要資料失敗"}), 500
@reject_history_bp.route("/api/reject-history/trend", methods=["GET"])
def api_reject_history_trend():
start_date, end_date, date_error = _parse_date_range(required=True)
if date_error:
return jsonify(date_error[0]), date_error[1]
include_excluded_scrap, bool_error = _parse_bool(
request.args.get("include_excluded_scrap", ""),
name="include_excluded_scrap",
)
if bool_error:
return jsonify(bool_error[0]), bool_error[1]
exclude_material_scrap, material_bool_error = _parse_bool(
request.args.get("exclude_material_scrap", "true"),
name="exclude_material_scrap",
)
if material_bool_error:
return jsonify(material_bool_error[0]), material_bool_error[1]
granularity = request.args.get("granularity", "day").strip().lower() or "day"
try:
result = query_trend(
start_date=start_date,
end_date=end_date,
granularity=granularity,
workcenter_groups=_parse_multi_param("workcenter_groups") or None,
packages=_parse_multi_param("packages") or None,
reasons=_parse_multi_param("reasons") or None,
categories=_parse_multi_param("categories") or None,
include_excluded_scrap=bool(include_excluded_scrap),
exclude_material_scrap=bool(exclude_material_scrap),
)
data, meta = _extract_meta(
result,
bool(include_excluded_scrap),
bool(exclude_material_scrap),
)
return jsonify({"success": True, "data": data, "meta": meta})
except ValueError as exc:
return jsonify({"success": False, "error": str(exc)}), 400
except Exception:
return jsonify({"success": False, "error": "查詢趨勢資料失敗"}), 500
@reject_history_bp.route("/api/reject-history/reason-pareto", methods=["GET"])
def api_reject_history_reason_pareto():
start_date, end_date, date_error = _parse_date_range(required=True)
if date_error:
return jsonify(date_error[0]), date_error[1]
include_excluded_scrap, bool_error = _parse_bool(
request.args.get("include_excluded_scrap", ""),
name="include_excluded_scrap",
)
if bool_error:
return jsonify(bool_error[0]), bool_error[1]
exclude_material_scrap, material_bool_error = _parse_bool(
request.args.get("exclude_material_scrap", "true"),
name="exclude_material_scrap",
)
if material_bool_error:
return jsonify(material_bool_error[0]), material_bool_error[1]
metric_mode = request.args.get("metric_mode", "reject_total").strip().lower() or "reject_total"
pareto_scope = request.args.get("pareto_scope", "top80").strip().lower() or "top80"
try:
result = query_reason_pareto(
start_date=start_date,
end_date=end_date,
metric_mode=metric_mode,
pareto_scope=pareto_scope,
workcenter_groups=_parse_multi_param("workcenter_groups") or None,
packages=_parse_multi_param("packages") or None,
reasons=_parse_multi_param("reasons") or None,
categories=_parse_multi_param("categories") or None,
include_excluded_scrap=bool(include_excluded_scrap),
exclude_material_scrap=bool(exclude_material_scrap),
)
data, meta = _extract_meta(
result,
bool(include_excluded_scrap),
bool(exclude_material_scrap),
)
return jsonify({"success": True, "data": data, "meta": meta})
except ValueError as exc:
return jsonify({"success": False, "error": str(exc)}), 400
except Exception:
return jsonify({"success": False, "error": "查詢柏拉圖資料失敗"}), 500
@reject_history_bp.route("/api/reject-history/list", methods=["GET"])
@_REJECT_HISTORY_LIST_RATE_LIMIT
def api_reject_history_list():
start_date, end_date, date_error = _parse_date_range(required=True)
if date_error:
return jsonify(date_error[0]), date_error[1]
include_excluded_scrap, bool_error = _parse_bool(
request.args.get("include_excluded_scrap", ""),
name="include_excluded_scrap",
)
if bool_error:
return jsonify(bool_error[0]), bool_error[1]
exclude_material_scrap, material_bool_error = _parse_bool(
request.args.get("exclude_material_scrap", "true"),
name="exclude_material_scrap",
)
if material_bool_error:
return jsonify(material_bool_error[0]), material_bool_error[1]
page = request.args.get("page", 1, type=int) or 1
per_page = request.args.get("per_page", 50, type=int) or 50
try:
result = query_list(
start_date=start_date,
end_date=end_date,
page=page,
per_page=per_page,
workcenter_groups=_parse_multi_param("workcenter_groups") or None,
packages=_parse_multi_param("packages") or None,
reasons=_parse_multi_param("reasons") or None,
categories=_parse_multi_param("categories") or None,
include_excluded_scrap=bool(include_excluded_scrap),
exclude_material_scrap=bool(exclude_material_scrap),
)
data, meta = _extract_meta(
result,
bool(include_excluded_scrap),
bool(exclude_material_scrap),
)
return jsonify({"success": True, "data": data, "meta": meta})
except ValueError as exc:
return jsonify({"success": False, "error": str(exc)}), 400
except Exception:
return jsonify({"success": False, "error": "查詢明細資料失敗"}), 500
@reject_history_bp.route("/api/reject-history/export", methods=["GET"])
@_REJECT_HISTORY_EXPORT_RATE_LIMIT
def api_reject_history_export():
start_date, end_date, date_error = _parse_date_range(required=True)
if date_error:
return jsonify(date_error[0]), date_error[1]
include_excluded_scrap, bool_error = _parse_bool(
request.args.get("include_excluded_scrap", ""),
name="include_excluded_scrap",
)
if bool_error:
return jsonify(bool_error[0]), bool_error[1]
exclude_material_scrap, material_bool_error = _parse_bool(
request.args.get("exclude_material_scrap", "true"),
name="exclude_material_scrap",
)
if material_bool_error:
return jsonify(material_bool_error[0]), material_bool_error[1]
filename = f"reject_history_{start_date}_to_{end_date}.csv"
try:
return Response(
export_csv(
start_date=start_date,
end_date=end_date,
workcenter_groups=_parse_multi_param("workcenter_groups") or None,
packages=_parse_multi_param("packages") or None,
reasons=_parse_multi_param("reasons") or None,
categories=_parse_multi_param("categories") or None,
include_excluded_scrap=bool(include_excluded_scrap),
exclude_material_scrap=bool(exclude_material_scrap),
),
mimetype="text/csv",
headers={
"Content-Disposition": f"attachment; filename={filename}",
"Content-Type": "text/csv; charset=utf-8-sig",
},
)
except ValueError as exc:
return jsonify({"success": False, "error": str(exc)}), 400
except Exception:
return jsonify({"success": False, "error": "匯出 CSV 失敗"}), 500

View File

@@ -0,0 +1,676 @@
# -*- coding: utf-8 -*-
"""Service layer for reject-history page APIs."""
from __future__ import annotations
import csv
import io
import logging
from datetime import date, datetime
from typing import Any, Dict, Generator, Iterable, Optional
import pandas as pd
from mes_dashboard.core.database import read_sql_df
from mes_dashboard.services.filter_cache import get_workcenter_groups
from mes_dashboard.services.scrap_reason_exclusion_cache import get_excluded_reasons
from mes_dashboard.sql import QueryBuilder, SQLLoader
logger = logging.getLogger("mes_dashboard.reject_history_service")
MAX_QUERY_DAYS = 730
VALID_GRANULARITY = {"day", "week", "month"}
VALID_METRIC_MODE = {"reject_total", "defect"}
MATERIAL_REASON_OPTION = "原物料報廢"
def _parse_date(value: str) -> date:
return datetime.strptime(value, "%Y-%m-%d").date()
def _validate_range(start_date: str, end_date: str) -> None:
start = _parse_date(start_date)
end = _parse_date(end_date)
if end < start:
raise ValueError("end_date 不可早於 start_date")
if (end - start).days > MAX_QUERY_DAYS:
raise ValueError(f"日期範圍不可超過 {MAX_QUERY_DAYS}")
def _normalize_text(value: Any) -> str:
return str(value or "").strip()
def _as_int(value: Any) -> int:
if value is None:
return 0
try:
if pd.isna(value):
return 0
except Exception:
pass
try:
return int(float(value))
except (TypeError, ValueError):
return 0
def _as_float(value: Any) -> float:
if value is None:
return 0.0
try:
if pd.isna(value):
return 0.0
except Exception:
pass
try:
return float(value)
except (TypeError, ValueError):
return 0.0
def _to_date_str(value: Any) -> str:
if isinstance(value, datetime):
return value.strftime("%Y-%m-%d")
if isinstance(value, date):
return value.strftime("%Y-%m-%d")
if isinstance(value, pd.Timestamp):
return value.to_pydatetime().strftime("%Y-%m-%d")
text = _normalize_text(value)
if not text:
return ""
try:
return pd.to_datetime(text).strftime("%Y-%m-%d")
except Exception:
return text
def _date_bucket_expr(granularity: str) -> str:
if granularity == "week":
return "TRUNC(b.TXN_DAY, 'IW')"
if granularity == "month":
return "TRUNC(b.TXN_DAY, 'MM')"
return "TRUNC(b.TXN_DAY)"
def _metric_column(metric_mode: str) -> str:
if metric_mode == "defect":
return "b.DEFECT_QTY"
return "b.REJECT_TOTAL_QTY"
def _load_sql(name: str) -> str:
return SQLLoader.load(f"reject_history/{name}")
def _base_query_sql() -> str:
sql = _load_sql("performance_daily").strip().rstrip(";")
# Strip leading comment/blank lines so WITH parsing can detect the first SQL token.
lines = sql.splitlines()
first_sql_line = 0
for index, line in enumerate(lines):
token = line.strip()
if not token or token.startswith("--"):
continue
first_sql_line = index
break
return "\n".join(lines[first_sql_line:]).strip()
def _split_with_query(sql: str) -> tuple[str, str] | None:
"""Split a top-level WITH query into (cte_segment, final_select)."""
text = (sql or "").strip()
if not text.lower().startswith("with "):
return None
depth = 0
in_string = False
i = 0
while i < len(text):
ch = text[i]
if ch == "'":
if in_string and i + 1 < len(text) and text[i + 1] == "'":
i += 2
continue
in_string = not in_string
i += 1
continue
if in_string:
i += 1
continue
if ch == "(":
depth += 1
elif ch == ")":
depth = max(depth - 1, 0)
elif depth == 0:
head = text[i : i + 6]
if head.lower() == "select":
prev_ok = i == 0 or not (text[i - 1].isalnum() or text[i - 1] == "_")
next_idx = i + 6
next_ok = next_idx >= len(text) or not (
text[next_idx].isalnum() or text[next_idx] == "_"
)
if prev_ok and next_ok:
cte_segment = text[5:i].strip().rstrip(",")
final_select = text[i:].strip()
if cte_segment and final_select:
return cte_segment, final_select
return None
i += 1
return None
def _base_with_cte_sql(alias: str = "base") -> str:
base_sql = _base_query_sql()
split = _split_with_query(base_sql)
if split is None:
return f"WITH {alias} AS (\n{base_sql}\n)"
cte_segment, final_select = split
return f"WITH {cte_segment},\n{alias} AS (\n{final_select}\n)"
def _build_where_clause(
*,
workcenter_groups: Optional[list[str]] = None,
packages: Optional[list[str]] = None,
reasons: Optional[list[str]] = None,
categories: Optional[list[str]] = None,
include_excluded_scrap: bool = False,
exclude_material_scrap: bool = True,
) -> tuple[str, dict[str, Any], dict[str, Any]]:
builder = QueryBuilder()
normalized_wc_groups = sorted({_normalize_text(v) for v in (workcenter_groups or []) if _normalize_text(v)})
normalized_packages = sorted({_normalize_text(v) for v in (packages or []) if _normalize_text(v)})
normalized_reasons = sorted({_normalize_text(v) for v in (reasons or []) if _normalize_text(v)})
material_reason_selected = MATERIAL_REASON_OPTION in normalized_reasons
reason_name_filters = [value for value in normalized_reasons if value != MATERIAL_REASON_OPTION]
normalized_categories = sorted({_normalize_text(v) for v in (categories or []) if _normalize_text(v)})
if normalized_wc_groups:
builder.add_in_condition("b.WORKCENTER_GROUP", normalized_wc_groups)
if normalized_packages:
builder.add_in_condition("b.PRODUCTLINENAME", normalized_packages)
if reason_name_filters:
builder.add_in_condition("b.LOSSREASONNAME", reason_name_filters)
if material_reason_selected:
builder.add_condition("UPPER(NVL(TRIM(b.SCRAP_OBJECTTYPE), '-')) = 'MATERIAL'")
material_exclusion_applied = False
if exclude_material_scrap and not material_reason_selected:
builder.add_condition("UPPER(NVL(TRIM(b.SCRAP_OBJECTTYPE), '-')) <> 'MATERIAL'")
material_exclusion_applied = True
if normalized_categories:
builder.add_in_condition("b.REJECTCATEGORYNAME", normalized_categories)
exclusions_applied = False
excluded_reason_codes = []
reason_name_prefix_policy_applied = False
if not include_excluded_scrap:
excluded_reason_codes = sorted(get_excluded_reasons())
reason_name_prefix_policy_applied = True
if excluded_reason_codes:
exclusions_applied = True
# Support exclusion matching by both normalized reason code and full reason text.
builder.add_not_in_condition(
"UPPER(NVL(TRIM(b.LOSSREASON_CODE), '-'))",
excluded_reason_codes,
)
builder.add_not_in_condition(
"UPPER(NVL(TRIM(b.LOSSREASONNAME), '-'))",
excluded_reason_codes,
)
# Exclude reason labels that are not "NNN_*", and always exclude XXX_/ZZZ_ prefixes.
builder.add_condition(
"REGEXP_LIKE(UPPER(NVL(TRIM(b.LOSSREASONNAME), '')), '^[0-9]{3}_')"
)
builder.add_condition(
"NOT REGEXP_LIKE(UPPER(NVL(TRIM(b.LOSSREASONNAME), '')), '^(XXX|ZZZ)_')"
)
exclusions_applied = True
where_clause, params = builder.build_where_only()
meta = {
"include_excluded_scrap": bool(include_excluded_scrap),
"exclusion_applied": exclusions_applied,
"reason_name_prefix_policy_applied": reason_name_prefix_policy_applied,
"exclude_material_scrap": bool(exclude_material_scrap),
"material_exclusion_applied": material_exclusion_applied,
"excluded_reason_count": len(excluded_reason_codes),
"workcenter_group_count": len(normalized_wc_groups),
"package_filter_count": len(normalized_packages),
"reason_filter_count": len(reason_name_filters),
"material_reason_selected": material_reason_selected,
}
return where_clause, params, meta
def _prepare_sql(
name: str,
*,
where_clause: str = "",
bucket_expr: str = "",
metric_column: str = "",
) -> str:
sql = _load_sql(name)
sql = sql.replace("{{ BASE_QUERY }}", _base_query_sql())
sql = sql.replace("{{ BASE_WITH_CTE }}", _base_with_cte_sql("base"))
sql = sql.replace("{{ WHERE_CLAUSE }}", where_clause or "")
sql = sql.replace("{{ BUCKET_EXPR }}", bucket_expr or "TRUNC(b.TXN_DAY)")
sql = sql.replace("{{ METRIC_COLUMN }}", metric_column or "b.REJECT_TOTAL_QTY")
return sql
def _common_params(start_date: str, end_date: str, extra: Optional[dict[str, Any]] = None) -> dict[str, Any]:
params = {"start_date": start_date, "end_date": end_date}
if extra:
params.update(extra)
return params
def _list_to_csv(
rows: Iterable[dict[str, Any]],
headers: list[str],
) -> Generator[str, None, None]:
buffer = io.StringIO()
writer = csv.DictWriter(buffer, fieldnames=headers)
writer.writeheader()
yield buffer.getvalue()
buffer.seek(0)
buffer.truncate(0)
for row in rows:
writer.writerow(row)
yield buffer.getvalue()
buffer.seek(0)
buffer.truncate(0)
def get_filter_options(
*,
start_date: str,
end_date: str,
include_excluded_scrap: bool = False,
exclude_material_scrap: bool = True,
) -> dict[str, Any]:
"""Return workcenter-group / package / reason options."""
_validate_range(start_date, end_date)
where_clause, params, meta = _build_where_clause(
include_excluded_scrap=include_excluded_scrap,
exclude_material_scrap=exclude_material_scrap,
)
reason_sql = _prepare_sql("reason_options", where_clause=where_clause)
reason_df = read_sql_df(reason_sql, _common_params(start_date, end_date, params))
reasons = []
if reason_df is not None and not reason_df.empty:
reasons = [
_normalize_text(v)
for v in reason_df.get("REASON", [])
if _normalize_text(v)
]
material_sql = _prepare_sql("material_reason_option", where_clause=where_clause)
material_df = read_sql_df(material_sql, _common_params(start_date, end_date, params))
has_material_option = False
if material_df is not None and not material_df.empty:
has_material_option = _as_int(material_df.iloc[0].get("HAS_MATERIAL")) > 0
package_sql = _prepare_sql("package_options", where_clause=where_clause)
package_df = read_sql_df(package_sql, _common_params(start_date, end_date, params))
packages = []
if package_df is not None and not package_df.empty:
packages = [
_normalize_text(v)
for v in package_df.get("PACKAGE", [])
if _normalize_text(v)
]
groups_raw = get_workcenter_groups() or []
workcenter_groups = []
for item in groups_raw:
name = _normalize_text(item.get("name"))
if not name:
continue
workcenter_groups.append(
{
"name": name,
"sequence": _as_int(item.get("sequence")),
}
)
reason_options = sorted(set(reasons))
if has_material_option and MATERIAL_REASON_OPTION not in reason_options:
reason_options.append(MATERIAL_REASON_OPTION)
return {
"workcenter_groups": workcenter_groups,
"packages": sorted(set(packages)),
"reasons": reason_options,
"meta": meta,
}
def query_summary(
*,
start_date: str,
end_date: str,
workcenter_groups: Optional[list[str]] = None,
packages: Optional[list[str]] = None,
reasons: Optional[list[str]] = None,
categories: Optional[list[str]] = None,
include_excluded_scrap: bool = False,
exclude_material_scrap: bool = True,
) -> dict[str, Any]:
_validate_range(start_date, end_date)
where_clause, params, meta = _build_where_clause(
workcenter_groups=workcenter_groups,
packages=packages,
reasons=reasons,
categories=categories,
include_excluded_scrap=include_excluded_scrap,
exclude_material_scrap=exclude_material_scrap,
)
sql = _prepare_sql("summary", where_clause=where_clause)
df = read_sql_df(sql, _common_params(start_date, end_date, params))
row = (df.iloc[0] if df is not None and not df.empty else {})
return {
"MOVEIN_QTY": _as_int(row.get("MOVEIN_QTY")),
"REJECT_TOTAL_QTY": _as_int(row.get("REJECT_TOTAL_QTY")),
"DEFECT_QTY": _as_int(row.get("DEFECT_QTY")),
"REJECT_RATE_PCT": round(_as_float(row.get("REJECT_RATE_PCT")), 4),
"DEFECT_RATE_PCT": round(_as_float(row.get("DEFECT_RATE_PCT")), 4),
"REJECT_SHARE_PCT": round(_as_float(row.get("REJECT_SHARE_PCT")), 4),
"AFFECTED_LOT_COUNT": _as_int(row.get("AFFECTED_LOT_COUNT")),
"AFFECTED_WORKORDER_COUNT": _as_int(row.get("AFFECTED_WORKORDER_COUNT")),
"meta": meta,
}
def query_trend(
*,
start_date: str,
end_date: str,
granularity: str = "day",
workcenter_groups: Optional[list[str]] = None,
packages: Optional[list[str]] = None,
reasons: Optional[list[str]] = None,
categories: Optional[list[str]] = None,
include_excluded_scrap: bool = False,
exclude_material_scrap: bool = True,
) -> dict[str, Any]:
_validate_range(start_date, end_date)
normalized_granularity = _normalize_text(granularity).lower() or "day"
if normalized_granularity not in VALID_GRANULARITY:
raise ValueError("Invalid granularity. Use day, week, or month")
where_clause, params, meta = _build_where_clause(
workcenter_groups=workcenter_groups,
packages=packages,
reasons=reasons,
categories=categories,
include_excluded_scrap=include_excluded_scrap,
exclude_material_scrap=exclude_material_scrap,
)
sql = _prepare_sql(
"trend",
where_clause=where_clause,
bucket_expr=_date_bucket_expr(normalized_granularity),
)
df = read_sql_df(sql, _common_params(start_date, end_date, params))
items = []
if df is not None and not df.empty:
for _, row in df.iterrows():
items.append(
{
"bucket_date": _to_date_str(row.get("BUCKET_DATE")),
"MOVEIN_QTY": _as_int(row.get("MOVEIN_QTY")),
"REJECT_TOTAL_QTY": _as_int(row.get("REJECT_TOTAL_QTY")),
"DEFECT_QTY": _as_int(row.get("DEFECT_QTY")),
"REJECT_RATE_PCT": round(_as_float(row.get("REJECT_RATE_PCT")), 4),
"DEFECT_RATE_PCT": round(_as_float(row.get("DEFECT_RATE_PCT")), 4),
}
)
return {
"items": items,
"granularity": normalized_granularity,
"meta": meta,
}
def query_reason_pareto(
*,
start_date: str,
end_date: str,
metric_mode: str = "reject_total",
pareto_scope: str = "top80",
workcenter_groups: Optional[list[str]] = None,
packages: Optional[list[str]] = None,
reasons: Optional[list[str]] = None,
categories: Optional[list[str]] = None,
include_excluded_scrap: bool = False,
exclude_material_scrap: bool = True,
) -> dict[str, Any]:
_validate_range(start_date, end_date)
normalized_metric = _normalize_text(metric_mode).lower() or "reject_total"
if normalized_metric not in VALID_METRIC_MODE:
raise ValueError("Invalid metric_mode. Use reject_total or defect")
normalized_scope = _normalize_text(pareto_scope).lower() or "top80"
if normalized_scope not in {"top80", "all"}:
raise ValueError("Invalid pareto_scope. Use top80 or all")
where_clause, params, meta = _build_where_clause(
workcenter_groups=workcenter_groups,
packages=packages,
reasons=reasons,
categories=categories,
include_excluded_scrap=include_excluded_scrap,
exclude_material_scrap=exclude_material_scrap,
)
sql = _prepare_sql(
"reason_pareto",
where_clause=where_clause,
metric_column=_metric_column(normalized_metric),
)
df = read_sql_df(sql, _common_params(start_date, end_date, params))
all_items = []
if df is not None and not df.empty:
for _, row in df.iterrows():
all_items.append(
{
"reason": _normalize_text(row.get("REASON")) or "(未填寫)",
"metric_value": _as_float(row.get("METRIC_VALUE")),
"MOVEIN_QTY": _as_int(row.get("MOVEIN_QTY")),
"REJECT_TOTAL_QTY": _as_int(row.get("REJECT_TOTAL_QTY")),
"DEFECT_QTY": _as_int(row.get("DEFECT_QTY")),
"count": _as_int(row.get("AFFECTED_LOT_COUNT")),
"pct": round(_as_float(row.get("PCT")), 4),
"cumPct": round(_as_float(row.get("CUM_PCT")), 4),
}
)
items = list(all_items)
if normalized_scope == "top80" and items:
top_items = [item for item in items if _as_float(item.get("cumPct")) <= 80.0]
# Keep strict top-80% behavior, but still return one row when first item already exceeds 80%.
if not top_items:
top_items = [items[0]]
items = top_items
return {
"items": items,
"metric_mode": normalized_metric,
"pareto_scope": normalized_scope,
"meta": {
**meta,
"total_items_after_filter": len(all_items),
"displayed_items": len(items),
},
}
def query_list(
*,
start_date: str,
end_date: str,
page: int = 1,
per_page: int = 50,
workcenter_groups: Optional[list[str]] = None,
packages: Optional[list[str]] = None,
reasons: Optional[list[str]] = None,
categories: Optional[list[str]] = None,
include_excluded_scrap: bool = False,
exclude_material_scrap: bool = True,
) -> dict[str, Any]:
_validate_range(start_date, end_date)
page = max(int(page or 1), 1)
per_page = min(max(int(per_page or 50), 1), 200)
offset = (page - 1) * per_page
where_clause, params, meta = _build_where_clause(
workcenter_groups=workcenter_groups,
packages=packages,
reasons=reasons,
categories=categories,
include_excluded_scrap=include_excluded_scrap,
exclude_material_scrap=exclude_material_scrap,
)
sql = _prepare_sql("list", where_clause=where_clause)
query_params = _common_params(
start_date,
end_date,
{
**params,
"offset": offset,
"limit": per_page,
},
)
df = read_sql_df(sql, query_params)
items = []
total = 0
if df is not None and not df.empty:
total = _as_int(df.iloc[0].get("TOTAL_COUNT"))
for _, row in df.iterrows():
items.append(
{
"TXN_DAY": _to_date_str(row.get("TXN_DAY")),
"TXN_MONTH": _normalize_text(row.get("TXN_MONTH")),
"WORKCENTER_GROUP": _normalize_text(row.get("WORKCENTER_GROUP")),
"WORKCENTERNAME": _normalize_text(row.get("WORKCENTERNAME")),
"SPECNAME": _normalize_text(row.get("SPECNAME")),
"PRODUCTLINENAME": _normalize_text(row.get("PRODUCTLINENAME")),
"PJ_TYPE": _normalize_text(row.get("PJ_TYPE")),
"LOSSREASONNAME": _normalize_text(row.get("LOSSREASONNAME")),
"LOSSREASON_CODE": _normalize_text(row.get("LOSSREASON_CODE")),
"MOVEIN_QTY": _as_int(row.get("MOVEIN_QTY")),
"REJECT_QTY": _as_int(row.get("REJECT_QTY")),
"STANDBY_QTY": _as_int(row.get("STANDBY_QTY")),
"QTYTOPROCESS_QTY": _as_int(row.get("QTYTOPROCESS_QTY")),
"INPROCESS_QTY": _as_int(row.get("INPROCESS_QTY")),
"PROCESSED_QTY": _as_int(row.get("PROCESSED_QTY")),
"REJECT_TOTAL_QTY": _as_int(row.get("REJECT_TOTAL_QTY")),
"DEFECT_QTY": _as_int(row.get("DEFECT_QTY")),
"REJECT_RATE_PCT": round(_as_float(row.get("REJECT_RATE_PCT")), 4),
"DEFECT_RATE_PCT": round(_as_float(row.get("DEFECT_RATE_PCT")), 4),
"REJECT_SHARE_PCT": round(_as_float(row.get("REJECT_SHARE_PCT")), 4),
"AFFECTED_LOT_COUNT": _as_int(row.get("AFFECTED_LOT_COUNT")),
"AFFECTED_WORKORDER_COUNT": _as_int(row.get("AFFECTED_WORKORDER_COUNT")),
}
)
total_pages = max((total + per_page - 1) // per_page, 1) if total else 1
return {
"items": items,
"pagination": {
"page": page,
"perPage": per_page,
"total": total,
"totalPages": total_pages,
},
"meta": meta,
}
def export_csv(
*,
start_date: str,
end_date: str,
workcenter_groups: Optional[list[str]] = None,
packages: Optional[list[str]] = None,
reasons: Optional[list[str]] = None,
categories: Optional[list[str]] = None,
include_excluded_scrap: bool = False,
exclude_material_scrap: bool = True,
) -> Generator[str, None, None]:
_validate_range(start_date, end_date)
where_clause, params, _meta = _build_where_clause(
workcenter_groups=workcenter_groups,
packages=packages,
reasons=reasons,
categories=categories,
include_excluded_scrap=include_excluded_scrap,
exclude_material_scrap=exclude_material_scrap,
)
sql = _prepare_sql("export", where_clause=where_clause)
df = read_sql_df(sql, _common_params(start_date, end_date, params))
rows = []
if df is not None and not df.empty:
for _, row in df.iterrows():
rows.append(
{
"TXN_DAY": _to_date_str(row.get("TXN_DAY")),
"TXN_MONTH": _normalize_text(row.get("TXN_MONTH")),
"WORKCENTER_GROUP": _normalize_text(row.get("WORKCENTER_GROUP")),
"WORKCENTERNAME": _normalize_text(row.get("WORKCENTERNAME")),
"SPECNAME": _normalize_text(row.get("SPECNAME")),
"PRODUCTLINENAME": _normalize_text(row.get("PRODUCTLINENAME")),
"PJ_TYPE": _normalize_text(row.get("PJ_TYPE")),
"LOSSREASONNAME": _normalize_text(row.get("LOSSREASONNAME")),
"LOSSREASON_CODE": _normalize_text(row.get("LOSSREASON_CODE")),
"MOVEIN_QTY": _as_int(row.get("MOVEIN_QTY")),
"REJECT_QTY": _as_int(row.get("REJECT_QTY")),
"STANDBY_QTY": _as_int(row.get("STANDBY_QTY")),
"QTYTOPROCESS_QTY": _as_int(row.get("QTYTOPROCESS_QTY")),
"INPROCESS_QTY": _as_int(row.get("INPROCESS_QTY")),
"PROCESSED_QTY": _as_int(row.get("PROCESSED_QTY")),
"REJECT_TOTAL_QTY": _as_int(row.get("REJECT_TOTAL_QTY")),
"DEFECT_QTY": _as_int(row.get("DEFECT_QTY")),
"REJECT_RATE_PCT": round(_as_float(row.get("REJECT_RATE_PCT")), 4),
"DEFECT_RATE_PCT": round(_as_float(row.get("DEFECT_RATE_PCT")), 4),
"REJECT_SHARE_PCT": round(_as_float(row.get("REJECT_SHARE_PCT")), 4),
"AFFECTED_LOT_COUNT": _as_int(row.get("AFFECTED_LOT_COUNT")),
"AFFECTED_WORKORDER_COUNT": _as_int(row.get("AFFECTED_WORKORDER_COUNT")),
}
)
headers = [
"TXN_DAY",
"TXN_MONTH",
"WORKCENTER_GROUP",
"WORKCENTERNAME",
"SPECNAME",
"PRODUCTLINENAME",
"PJ_TYPE",
"LOSSREASONNAME",
"LOSSREASON_CODE",
"MOVEIN_QTY",
"REJECT_QTY",
"STANDBY_QTY",
"QTYTOPROCESS_QTY",
"INPROCESS_QTY",
"PROCESSED_QTY",
"REJECT_TOTAL_QTY",
"DEFECT_QTY",
"REJECT_RATE_PCT",
"DEFECT_RATE_PCT",
"REJECT_SHARE_PCT",
"AFFECTED_LOT_COUNT",
"AFFECTED_WORKORDER_COUNT",
]
return _list_to_csv(rows, headers=headers)

View File

@@ -0,0 +1,236 @@
# -*- coding: utf-8 -*-
"""Cache for ERP scrap reasons excluded from yield calculations.
Policy source: DWH.ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE (ENABLE_FLAG='Y').
Cache strategy:
- L2 Redis shared cache when available
- L1 process memory cache fallback
- Daily full-table refresh (default every 24 hours)
"""
from __future__ import annotations
import json
import logging
import os
import threading
import time
from datetime import datetime
from typing import Iterable
from mes_dashboard.core.database import read_sql_df
from mes_dashboard.core.redis_client import get_key, get_redis_client, try_acquire_lock, release_lock
logger = logging.getLogger("mes_dashboard.scrap_reason_exclusion_cache")
_REFRESH_SECONDS = max(int(os.getenv("SCRAP_REASON_EXCLUSION_REFRESH_SECONDS", "86400")), 60)
_REDIS_DATA_KEY = get_key("scrap_exclusion:enabled_reasons")
_REDIS_META_KEY = get_key("scrap_exclusion:updated_at")
_LOCK_NAME = "scrap_reason_exclusion_cache_refresh"
_CACHE_LOCK = threading.Lock()
_CACHE: dict[str, object] = {
"reasons": set(),
"updated_at": None,
"loaded": False,
"source": None,
}
_WORKER_THREAD: threading.Thread | None = None
_STOP_EVENT = threading.Event()
def _normalize_reason(value: object) -> str:
return str(value or "").strip().upper()
def _extract_reason_codes(values: Iterable[object]) -> set[str]:
normalized = {_normalize_reason(v) for v in values}
normalized.discard("")
return normalized
def _load_from_redis() -> tuple[set[str], str | None] | None:
client = get_redis_client()
if client is None:
return None
try:
raw = client.get(_REDIS_DATA_KEY)
if not raw:
return None
data = json.loads(raw)
if not isinstance(data, list):
return None
reasons = _extract_reason_codes(data)
updated = client.get(_REDIS_META_KEY)
if not reasons:
return None
return reasons, updated
except Exception as exc: # pragma: no cover - defensive
logger.warning("Failed to read scrap exclusion cache from Redis: %s", exc)
return None
def _save_to_redis(reasons: set[str], updated_at: str) -> None:
client = get_redis_client()
if client is None:
return
try:
payload = json.dumps(sorted(reasons), ensure_ascii=False)
# Keep redis key slightly longer than refresh interval to cover worker restarts.
ttl = int(_REFRESH_SECONDS * 1.5)
client.setex(_REDIS_DATA_KEY, ttl, payload)
client.setex(_REDIS_META_KEY, ttl, updated_at)
except Exception as exc: # pragma: no cover - defensive
logger.warning("Failed to write scrap exclusion cache to Redis: %s", exc)
def _read_enabled_reasons_from_oracle() -> set[str]:
sql = """
SELECT TRIM(REASON_NAME) AS REASON_NAME
FROM DWH.ERP_PJ_WIP_SCRAP_REASONS_EXCLUDE
WHERE NVL(ENABLE_FLAG, 'N') = 'Y'
"""
df = read_sql_df(sql)
if df is None or df.empty:
return set()
return _extract_reason_codes(df.get("REASON_NAME", []))
def _set_local_cache(reasons: set[str], *, source: str, updated_at: str) -> None:
with _CACHE_LOCK:
_CACHE["reasons"] = set(reasons)
_CACHE["updated_at"] = updated_at
_CACHE["loaded"] = True
_CACHE["source"] = source
def _seconds_since_update() -> float:
with _CACHE_LOCK:
updated_at = _CACHE.get("updated_at")
if not isinstance(updated_at, str) or not updated_at:
return float("inf")
try:
dt = datetime.fromisoformat(updated_at)
return max((datetime.now() - dt).total_seconds(), 0.0)
except ValueError:
return float("inf")
def refresh_cache(force: bool = False) -> bool:
"""Refresh exclusion policy cache from Oracle.
Returns:
True if cache contains usable data after refresh attempt.
"""
if not force and _seconds_since_update() < _REFRESH_SECONDS:
return True
# Cross-worker lock (fail-open when Redis unavailable).
if not try_acquire_lock(_LOCK_NAME, ttl_seconds=120):
logger.debug("Scrap exclusion cache refresh skipped (lock held by another worker)")
return bool(get_excluded_reasons())
try:
reasons = _read_enabled_reasons_from_oracle()
updated_at = datetime.now().isoformat()
_set_local_cache(reasons, source="oracle", updated_at=updated_at)
_save_to_redis(reasons, updated_at)
logger.info("Scrap exclusion cache refreshed: %s reasons", len(reasons))
return True
except Exception as exc:
# Fallback to Redis payload if local refresh fails.
redis_payload = _load_from_redis()
if redis_payload is not None:
reasons, updated = redis_payload
_set_local_cache(reasons, source="redis", updated_at=updated or datetime.now().isoformat())
logger.warning("Scrap exclusion cache fallback to Redis: %s", exc)
return True
with _CACHE_LOCK:
loaded = bool(_CACHE.get("loaded"))
logger.error("Scrap exclusion cache refresh failed: %s", exc)
return loaded
finally:
release_lock(_LOCK_NAME)
def get_excluded_reasons(force_refresh: bool = False) -> set[str]:
"""Get currently cached exclusion reason codes."""
with _CACHE_LOCK:
loaded = bool(_CACHE.get("loaded"))
reasons = set(_CACHE.get("reasons", set()))
if force_refresh:
refresh_cache(force=True)
elif not loaded:
# Try Redis before Oracle for fast startup in secondary workers.
redis_payload = _load_from_redis()
if redis_payload is not None:
redis_reasons, updated = redis_payload
_set_local_cache(
redis_reasons,
source="redis",
updated_at=updated or datetime.now().isoformat(),
)
return redis_reasons
refresh_cache(force=True)
elif _seconds_since_update() >= _REFRESH_SECONDS:
refresh_cache(force=True)
with _CACHE_LOCK:
return set(_CACHE.get("reasons", set()))
def get_cache_status() -> dict[str, object]:
"""Expose cache diagnostics for health/admin pages."""
with _CACHE_LOCK:
return {
"loaded": bool(_CACHE.get("loaded")),
"updated_at": _CACHE.get("updated_at"),
"source": _CACHE.get("source"),
"reason_count": len(_CACHE.get("reasons", set())),
"refresh_interval_seconds": _REFRESH_SECONDS,
}
def _worker_loop() -> None:
logger.info("Scrap exclusion sync worker started (interval: %ss)", _REFRESH_SECONDS)
while not _STOP_EVENT.is_set():
try:
refresh_cache(force=True)
except Exception as exc: # pragma: no cover - defensive
logger.warning("Scrap exclusion worker refresh failed: %s", exc)
_STOP_EVENT.wait(_REFRESH_SECONDS)
logger.info("Scrap exclusion sync worker stopped")
def init_scrap_reason_exclusion_cache(app=None) -> None:
"""Initialize cache and start background sync worker."""
refresh_cache(force=False)
global _WORKER_THREAD
if app is not None and app.config.get("TESTING"):
return
if _WORKER_THREAD and _WORKER_THREAD.is_alive():
return
_STOP_EVENT.clear()
_WORKER_THREAD = threading.Thread(
target=_worker_loop,
daemon=True,
name="scrap-exclusion-cache-sync",
)
_WORKER_THREAD.start()
def stop_scrap_reason_exclusion_cache_worker(timeout: int = 5) -> None:
global _WORKER_THREAD
if not _WORKER_THREAD:
return
_STOP_EVENT.set()
_WORKER_THREAD.join(timeout=timeout)
_WORKER_THREAD = None

View File

@@ -0,0 +1,36 @@
-- Reject History Export (Unpaginated)
-- Template slots:
-- BASE_QUERY (base reject-history daily dataset SQL)
-- WHERE_CLAUSE (QueryBuilder-generated WHERE clause against alias b)
{{ BASE_WITH_CTE }}
SELECT
b.TXN_DAY,
b.TXN_MONTH,
b.WORKCENTER_GROUP,
b.WORKCENTERNAME,
b.SPECNAME,
b.PRODUCTLINENAME,
b.PJ_TYPE,
b.LOSSREASONNAME,
b.LOSSREASON_CODE,
b.MOVEIN_QTY,
b.REJECT_QTY,
b.STANDBY_QTY,
b.QTYTOPROCESS_QTY,
b.INPROCESS_QTY,
b.PROCESSED_QTY,
b.REJECT_TOTAL_QTY,
b.DEFECT_QTY,
b.REJECT_RATE_PCT,
b.DEFECT_RATE_PCT,
b.REJECT_SHARE_PCT,
b.AFFECTED_LOT_COUNT,
b.AFFECTED_WORKORDER_COUNT
FROM base b
{{ WHERE_CLAUSE }}
ORDER BY
b.TXN_DAY DESC,
b.WORKCENTERSEQUENCE_GROUP ASC,
b.WORKCENTERNAME ASC,
b.REJECT_TOTAL_QTY DESC

View File

@@ -0,0 +1,48 @@
-- Reject History Detail List (Paginated)
-- Template slots:
-- BASE_QUERY (base reject-history daily dataset SQL)
-- WHERE_CLAUSE (QueryBuilder-generated WHERE clause against alias b)
{{ BASE_WITH_CTE }},
filtered AS (
SELECT
b.*,
COUNT(*) OVER () AS TOTAL_COUNT
FROM base b
{{ WHERE_CLAUSE }}
)
SELECT
TXN_DAY,
TXN_MONTH,
WORKCENTER_GROUP,
WORKCENTERSEQUENCE_GROUP,
WORKCENTERNAME,
SPECNAME,
EQUIPMENTNAME,
PRIMARY_EQUIPMENTNAME,
PRODUCTLINENAME,
PJ_TYPE,
LOSSREASONNAME,
LOSSREASON_CODE,
REJECT_EVENT_ROWS,
AFFECTED_LOT_COUNT,
AFFECTED_WORKORDER_COUNT,
MOVEIN_QTY,
REJECT_QTY,
REJECT_TOTAL_QTY,
DEFECT_QTY,
STANDBY_QTY,
QTYTOPROCESS_QTY,
INPROCESS_QTY,
PROCESSED_QTY,
REJECT_RATE_PCT,
DEFECT_RATE_PCT,
REJECT_SHARE_PCT,
TOTAL_COUNT
FROM filtered
ORDER BY
TXN_DAY DESC,
WORKCENTERSEQUENCE_GROUP ASC,
WORKCENTERNAME ASC,
REJECT_TOTAL_QTY DESC
OFFSET :offset ROWS FETCH NEXT :limit ROWS ONLY

View File

@@ -0,0 +1,19 @@
-- Reject History Material Reason Option
-- Template slots:
-- BASE_QUERY (base reject-history daily dataset SQL)
-- WHERE_CLAUSE (QueryBuilder-generated WHERE clause against alias b)
{{ BASE_WITH_CTE }}
SELECT
CASE
WHEN SUM(
CASE
WHEN UPPER(NVL(TRIM(b.SCRAP_OBJECTTYPE), '-')) = 'MATERIAL'
THEN NVL(b.REJECT_TOTAL_QTY, 0) + NVL(b.DEFECT_QTY, 0)
ELSE 0
END
) > 0 THEN 1
ELSE 0
END AS HAS_MATERIAL
FROM base b
{{ WHERE_CLAUSE }}

View File

@@ -0,0 +1,15 @@
-- Reject History Package Options
-- Template slots:
-- BASE_QUERY (base reject-history daily dataset SQL)
-- WHERE_CLAUSE (QueryBuilder-generated WHERE clause against alias b)
{{ BASE_WITH_CTE }}
SELECT
b.PRODUCTLINENAME AS PACKAGE,
SUM(b.REJECT_TOTAL_QTY) AS REJECT_TOTAL_QTY,
SUM(b.DEFECT_QTY) AS DEFECT_QTY
FROM base b
{{ WHERE_CLAUSE }}
GROUP BY b.PRODUCTLINENAME
HAVING SUM(b.REJECT_TOTAL_QTY) > 0 OR SUM(b.DEFECT_QTY) > 0
ORDER BY PACKAGE

View File

@@ -35,6 +35,7 @@ reject_raw AS (
NVL(TRIM(r.PJ_WORKORDER), TRIM(c.MFGORDERNAME)) AS PJ_WORKORDER, NVL(TRIM(r.PJ_WORKORDER), TRIM(c.MFGORDERNAME)) AS PJ_WORKORDER,
NVL(TRIM(c.PJ_TYPE), '(NA)') AS PJ_TYPE, NVL(TRIM(c.PJ_TYPE), '(NA)') AS PJ_TYPE,
NVL(TRIM(c.PRODUCTLINENAME), '(NA)') AS PRODUCTLINENAME, NVL(TRIM(c.PRODUCTLINENAME), '(NA)') AS PRODUCTLINENAME,
NVL(TRIM(c.OBJECTTYPE), '(NA)') AS SCRAP_OBJECTTYPE,
NVL(TRIM(r.WORKCENTERNAME), '(NA)') AS WORKCENTERNAME, NVL(TRIM(r.WORKCENTERNAME), '(NA)') AS WORKCENTERNAME,
NVL(TRIM(wm.WORKCENTER_GROUP), NVL(TRIM(r.WORKCENTERNAME), '(NA)')) AS WORKCENTER_GROUP, NVL(TRIM(wm.WORKCENTER_GROUP), NVL(TRIM(r.WORKCENTERNAME), '(NA)')) AS WORKCENTER_GROUP,
NVL(wm.WORKCENTERSEQUENCE_GROUP, 999) AS WORKCENTERSEQUENCE_GROUP, NVL(wm.WORKCENTERSEQUENCE_GROUP, 999) AS WORKCENTERSEQUENCE_GROUP,
@@ -45,6 +46,10 @@ reject_raw AS (
NVL(TRIM(r.EQUIPMENTNAME), '(NA)') NVL(TRIM(r.EQUIPMENTNAME), '(NA)')
) AS PRIMARY_EQUIPMENTNAME, ) AS PRIMARY_EQUIPMENTNAME,
NVL(TRIM(r.LOSSREASONNAME), '(未填寫)') AS LOSSREASONNAME, NVL(TRIM(r.LOSSREASONNAME), '(未填寫)') AS LOSSREASONNAME,
NVL(
TRIM(REGEXP_SUBSTR(NVL(TRIM(r.LOSSREASONNAME), '(未填寫)'), '^[^_[:space:]-]+')),
NVL(TRIM(r.LOSSREASONNAME), '(未填寫)')
) AS LOSSREASON_CODE,
NVL(TRIM(r.REJECTCATEGORYNAME), '(未填寫)') AS REJECTCATEGORYNAME, NVL(TRIM(r.REJECTCATEGORYNAME), '(未填寫)') AS REJECTCATEGORYNAME,
NVL(r.MOVEINQTY, 0) AS MOVEINQTY, NVL(r.MOVEINQTY, 0) AS MOVEINQTY,
NVL(r.REJECTQTY, 0) AS REJECT_QTY, NVL(r.REJECTQTY, 0) AS REJECT_QTY,
@@ -84,8 +89,10 @@ daily_agg AS (
EQUIPMENTNAME, EQUIPMENTNAME,
PRIMARY_EQUIPMENTNAME, PRIMARY_EQUIPMENTNAME,
PRODUCTLINENAME, PRODUCTLINENAME,
SCRAP_OBJECTTYPE,
PJ_TYPE, PJ_TYPE,
LOSSREASONNAME, LOSSREASONNAME,
LOSSREASON_CODE,
REJECTCATEGORYNAME, REJECTCATEGORYNAME,
COUNT(*) AS REJECT_EVENT_ROWS, COUNT(*) AS REJECT_EVENT_ROWS,
COUNT(DISTINCT CONTAINERID) AS AFFECTED_LOT_COUNT, COUNT(DISTINCT CONTAINERID) AS AFFECTED_LOT_COUNT,
@@ -109,8 +116,10 @@ daily_agg AS (
EQUIPMENTNAME, EQUIPMENTNAME,
PRIMARY_EQUIPMENTNAME, PRIMARY_EQUIPMENTNAME,
PRODUCTLINENAME, PRODUCTLINENAME,
SCRAP_OBJECTTYPE,
PJ_TYPE, PJ_TYPE,
LOSSREASONNAME, LOSSREASONNAME,
LOSSREASON_CODE,
REJECTCATEGORYNAME REJECTCATEGORYNAME
) )
SELECT SELECT
@@ -123,8 +132,10 @@ SELECT
EQUIPMENTNAME, EQUIPMENTNAME,
PRIMARY_EQUIPMENTNAME, PRIMARY_EQUIPMENTNAME,
PRODUCTLINENAME, PRODUCTLINENAME,
SCRAP_OBJECTTYPE,
PJ_TYPE, PJ_TYPE,
LOSSREASONNAME, LOSSREASONNAME,
LOSSREASON_CODE,
REJECTCATEGORYNAME, REJECTCATEGORYNAME,
REJECT_EVENT_ROWS, REJECT_EVENT_ROWS,
AFFECTED_LOT_COUNT, AFFECTED_LOT_COUNT,
@@ -150,4 +161,3 @@ SELECT
ELSE ROUND(REJECT_TOTAL_QTY * 100 / (REJECT_TOTAL_QTY + DEFECT_QTY), 4) ELSE ROUND(REJECT_TOTAL_QTY * 100 / (REJECT_TOTAL_QTY + DEFECT_QTY), 4)
END AS REJECT_SHARE_PCT END AS REJECT_SHARE_PCT
FROM daily_agg FROM daily_agg
ORDER BY TXN_DAY DESC, WORKCENTERSEQUENCE_GROUP, WORKCENTERNAME, DEFECT_QTY DESC

View File

@@ -0,0 +1,14 @@
-- Reject History Reason Options
-- Template slots:
-- BASE_QUERY (base reject-history daily dataset SQL)
{{ BASE_WITH_CTE }}
SELECT
b.LOSSREASONNAME AS REASON,
SUM(b.REJECT_TOTAL_QTY) AS REJECT_TOTAL_QTY,
SUM(b.DEFECT_QTY) AS DEFECT_QTY
FROM base b
{{ WHERE_CLAUSE }}
GROUP BY b.LOSSREASONNAME
HAVING SUM(b.REJECT_TOTAL_QTY) > 0 OR SUM(b.DEFECT_QTY) > 0
ORDER BY REASON

View File

@@ -0,0 +1,41 @@
-- Reject History Reason Pareto
-- Template slots:
-- BASE_QUERY (base reject-history daily dataset SQL)
-- METRIC_COLUMN (metric expression: b.REJECT_TOTAL_QTY or b.DEFECT_QTY)
-- WHERE_CLAUSE (QueryBuilder-generated WHERE clause against alias b)
{{ BASE_WITH_CTE }},
reason_agg AS (
SELECT
b.LOSSREASONNAME AS REASON,
SUM({{ METRIC_COLUMN }}) AS METRIC_VALUE,
SUM(b.MOVEIN_QTY) AS MOVEIN_QTY,
SUM(b.REJECT_TOTAL_QTY) AS REJECT_TOTAL_QTY,
SUM(b.DEFECT_QTY) AS DEFECT_QTY,
SUM(b.AFFECTED_LOT_COUNT) AS AFFECTED_LOT_COUNT
FROM base b
{{ WHERE_CLAUSE }}
GROUP BY
b.LOSSREASONNAME
HAVING SUM({{ METRIC_COLUMN }}) > 0
)
SELECT
REASON,
METRIC_VALUE,
MOVEIN_QTY,
REJECT_TOTAL_QTY,
DEFECT_QTY,
AFFECTED_LOT_COUNT,
ROUND(
METRIC_VALUE * 100 / NULLIF(SUM(METRIC_VALUE) OVER (), 0),
4
) AS PCT,
ROUND(
SUM(METRIC_VALUE) OVER (
ORDER BY METRIC_VALUE DESC, REASON
ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
) * 100 / NULLIF(SUM(METRIC_VALUE) OVER (), 0),
4
) AS CUM_PCT
FROM reason_agg
ORDER BY METRIC_VALUE DESC, REASON

View File

@@ -0,0 +1,31 @@
-- Reject History Summary
-- Template slots:
-- BASE_QUERY (base reject-history daily dataset SQL)
-- WHERE_CLAUSE (QueryBuilder-generated WHERE clause against alias b)
{{ BASE_WITH_CTE }}
SELECT
NVL(SUM(b.MOVEIN_QTY), 0) AS MOVEIN_QTY,
NVL(SUM(b.REJECT_TOTAL_QTY), 0) AS REJECT_TOTAL_QTY,
NVL(SUM(b.DEFECT_QTY), 0) AS DEFECT_QTY,
CASE
WHEN NVL(SUM(b.MOVEIN_QTY), 0) = 0 THEN 0
ELSE ROUND(NVL(SUM(b.REJECT_TOTAL_QTY), 0) * 100 / SUM(b.MOVEIN_QTY), 4)
END AS REJECT_RATE_PCT,
CASE
WHEN NVL(SUM(b.MOVEIN_QTY), 0) = 0 THEN 0
ELSE ROUND(NVL(SUM(b.DEFECT_QTY), 0) * 100 / SUM(b.MOVEIN_QTY), 4)
END AS DEFECT_RATE_PCT,
CASE
WHEN NVL(SUM(b.REJECT_TOTAL_QTY), 0) + NVL(SUM(b.DEFECT_QTY), 0) = 0 THEN 0
ELSE ROUND(
NVL(SUM(b.REJECT_TOTAL_QTY), 0) * 100
/ (NVL(SUM(b.REJECT_TOTAL_QTY), 0) + NVL(SUM(b.DEFECT_QTY), 0)),
4
)
END AS REJECT_SHARE_PCT,
COUNT(DISTINCT b.WORKCENTER_GROUP || ':' || b.WORKCENTERNAME || ':' || b.SPECNAME || ':' || b.TXN_DAY) AS AFFECTED_EVENT_BUCKET_COUNT,
NVL(SUM(b.AFFECTED_LOT_COUNT), 0) AS AFFECTED_LOT_COUNT,
NVL(SUM(b.AFFECTED_WORKORDER_COUNT), 0) AS AFFECTED_WORKORDER_COUNT
FROM base b
{{ WHERE_CLAUSE }}

View File

@@ -0,0 +1,32 @@
-- Reject History Trend
-- Template slots:
-- BASE_QUERY (base reject-history daily dataset SQL)
-- BUCKET_EXPR (Oracle date bucket expression, e.g. TRUNC(b.TXN_DAY))
-- WHERE_CLAUSE (QueryBuilder-generated WHERE clause against alias b)
{{ BASE_WITH_CTE }},
trend_raw AS (
SELECT
{{ BUCKET_EXPR }} AS BUCKET_DATE,
SUM(b.MOVEIN_QTY) AS MOVEIN_QTY,
SUM(b.REJECT_TOTAL_QTY) AS REJECT_TOTAL_QTY,
SUM(b.DEFECT_QTY) AS DEFECT_QTY
FROM base b
{{ WHERE_CLAUSE }}
GROUP BY {{ BUCKET_EXPR }}
)
SELECT
BUCKET_DATE,
MOVEIN_QTY,
REJECT_TOTAL_QTY,
DEFECT_QTY,
CASE
WHEN MOVEIN_QTY = 0 THEN 0
ELSE ROUND(REJECT_TOTAL_QTY * 100 / MOVEIN_QTY, 4)
END AS REJECT_RATE_PCT,
CASE
WHEN MOVEIN_QTY = 0 THEN 0
ELSE ROUND(DEFECT_QTY * 100 / MOVEIN_QTY, 4)
END AS DEFECT_RATE_PCT
FROM trend_raw
ORDER BY BUCKET_DATE

View File

@@ -55,6 +55,7 @@ class AppFactoryTests(unittest.TestCase):
"/wip-overview", "/wip-overview",
"/wip-detail", "/wip-detail",
"/hold-overview", "/hold-overview",
"/reject-history",
"/excel-query", "/excel-query",
"/query-tool", "/query-tool",
"/tmtt-defect", "/tmtt-defect",
@@ -74,6 +75,7 @@ class AppFactoryTests(unittest.TestCase):
"/api/excel-query/upload", "/api/excel-query/upload",
"/api/query-tool/resolve", "/api/query-tool/resolve",
"/api/tmtt-defect/analysis", "/api/tmtt-defect/analysis",
"/api/reject-history/summary",
} }
missing = expected - rules missing = expected - rules
self.assertFalse(missing, f"Missing routes: {sorted(missing)}") self.assertFalse(missing, f"Missing routes: {sorted(missing)}")

View File

@@ -260,3 +260,133 @@ def test_tmtt_defect_native_smoke_range_query_and_csv_export(client):
export = client.get("/api/tmtt-defect/export?start_date=2026-02-01&end_date=2026-02-11") export = client.get("/api/tmtt-defect/export?start_date=2026-02-01&end_date=2026-02-11")
assert export.status_code == 200 assert export.status_code == 200
assert "text/csv" in export.content_type assert "text/csv" in export.content_type
def test_reject_history_native_smoke_query_sections_and_export(client):
_login_as_admin(client)
shell = client.get("/portal-shell/reject-history?start_date=2026-02-01&end_date=2026-02-11")
assert shell.status_code == 200
page = client.get("/reject-history", follow_redirects=False)
if page.status_code == 302:
assert page.status_code == 302
assert page.location.endswith("/portal-shell/reject-history")
elif page.status_code == 200:
assert page.status_code == 200
else:
raise AssertionError(f"unexpected status for /reject-history: {page.status_code}")
with (
patch(
"mes_dashboard.routes.reject_history_routes.get_filter_options",
return_value={
"workcenter_groups": [{"name": "WB", "sequence": 1}],
"reasons": ["R1"],
"meta": {"include_excluded_scrap": False},
},
),
patch(
"mes_dashboard.routes.reject_history_routes.query_summary",
return_value={
"MOVEIN_QTY": 100,
"REJECT_TOTAL_QTY": 10,
"DEFECT_QTY": 2,
"REJECT_RATE_PCT": 10.0,
"DEFECT_RATE_PCT": 2.0,
"REJECT_SHARE_PCT": 83.3333,
"AFFECTED_LOT_COUNT": 5,
"AFFECTED_WORKORDER_COUNT": 3,
"meta": {"include_excluded_scrap": False},
},
),
patch(
"mes_dashboard.routes.reject_history_routes.query_trend",
return_value={
"items": [
{
"bucket_date": "2026-02-01",
"MOVEIN_QTY": 100,
"REJECT_TOTAL_QTY": 10,
"DEFECT_QTY": 2,
"REJECT_RATE_PCT": 10.0,
"DEFECT_RATE_PCT": 2.0,
}
],
"granularity": "day",
"meta": {"include_excluded_scrap": False},
},
),
patch(
"mes_dashboard.routes.reject_history_routes.query_reason_pareto",
return_value={
"items": [
{
"reason": "R1",
"category": "CAT1",
"metric_value": 10,
"pct": 100.0,
"cumPct": 100.0,
}
],
"metric_mode": "reject_total",
"pareto_scope": "top80",
"meta": {"include_excluded_scrap": False},
},
),
patch(
"mes_dashboard.routes.reject_history_routes.query_list",
return_value={
"items": [
{
"TXN_DAY": "2026-02-01",
"WORKCENTER_GROUP": "WB",
"WORKCENTERNAME": "WB01",
"LOSSREASONNAME": "R1",
"REJECT_TOTAL_QTY": 10,
"DEFECT_QTY": 2,
}
],
"pagination": {"page": 1, "perPage": 50, "total": 1, "totalPages": 1},
"meta": {"include_excluded_scrap": False},
},
),
patch(
"mes_dashboard.routes.reject_history_routes.export_csv",
return_value=iter(
[
"TXN_DAY,REJECT_TOTAL_QTY,DEFECT_QTY\n",
"2026-02-01,10,2\n",
]
),
),
):
options = client.get("/api/reject-history/options?start_date=2026-02-01&end_date=2026-02-11")
assert options.status_code == 200
assert options.get_json()["success"] is True
assert options.get_json()["data"]["reasons"] == ["R1"]
summary = client.get("/api/reject-history/summary?start_date=2026-02-01&end_date=2026-02-11")
assert summary.status_code == 200
summary_payload = summary.get_json()
assert summary_payload["success"] is True
assert summary_payload["data"]["REJECT_TOTAL_QTY"] == 10
trend = client.get("/api/reject-history/trend?start_date=2026-02-01&end_date=2026-02-11")
assert trend.status_code == 200
assert trend.get_json()["success"] is True
assert trend.get_json()["data"]["items"][0]["bucket_date"] == "2026-02-01"
pareto = client.get("/api/reject-history/reason-pareto?start_date=2026-02-01&end_date=2026-02-11")
assert pareto.status_code == 200
assert pareto.get_json()["success"] is True
assert pareto.get_json()["data"]["items"][0]["reason"] == "R1"
detail = client.get("/api/reject-history/list?start_date=2026-02-01&end_date=2026-02-11")
assert detail.status_code == 200
assert detail.get_json()["success"] is True
assert detail.get_json()["data"]["pagination"]["total"] == 1
export = client.get("/api/reject-history/export?start_date=2026-02-01&end_date=2026-02-11")
assert export.status_code == 200
assert "text/csv" in export.content_type

View File

@@ -0,0 +1,150 @@
# -*- coding: utf-8 -*-
"""Unit tests for reject-history routes."""
import json
import os
import unittest
from unittest.mock import patch
from mes_dashboard.app import create_app
import mes_dashboard.core.database as db
def _login_as_admin(client):
with client.session_transaction() as sess:
sess['admin'] = {'displayName': 'Admin', 'employeeNo': 'A001'}
class TestRejectHistoryRoutesBase(unittest.TestCase):
def setUp(self):
db._ENGINE = None
self.app = create_app('testing')
self.app.config['TESTING'] = True
self.client = self.app.test_client()
class TestRejectHistoryPageRoute(unittest.TestCase):
@patch.dict(os.environ, {'PORTAL_SPA_ENABLED': 'false'})
@patch('mes_dashboard.app.os.path.exists', return_value=False)
def test_reject_history_page_fallback_contains_vite_entry(self, _mock_exists):
db._ENGINE = None
app = create_app('testing')
app.config['TESTING'] = True
client = app.test_client()
_login_as_admin(client)
response = client.get('/reject-history', follow_redirects=False)
self.assertEqual(response.status_code, 200)
html = response.data.decode('utf-8')
self.assertIn('/static/dist/reject-history.js', html)
class TestRejectHistoryApiRoutes(TestRejectHistoryRoutesBase):
def test_summary_missing_dates_returns_400(self):
response = self.client.get('/api/reject-history/summary')
payload = json.loads(response.data)
self.assertEqual(response.status_code, 400)
self.assertFalse(payload['success'])
def test_summary_invalid_include_excluded_scrap_returns_400(self):
response = self.client.get(
'/api/reject-history/summary?start_date=2026-02-01&end_date=2026-02-07'
'&include_excluded_scrap=invalid'
)
payload = json.loads(response.data)
self.assertEqual(response.status_code, 400)
self.assertFalse(payload['success'])
def test_summary_invalid_exclude_material_scrap_returns_400(self):
response = self.client.get(
'/api/reject-history/summary?start_date=2026-02-01&end_date=2026-02-07'
'&exclude_material_scrap=invalid'
)
payload = json.loads(response.data)
self.assertEqual(response.status_code, 400)
self.assertFalse(payload['success'])
@patch('mes_dashboard.routes.reject_history_routes.query_summary')
def test_summary_passes_filters_and_meta(self, mock_summary):
mock_summary.return_value = {
'MOVEIN_QTY': 100,
'REJECT_TOTAL_QTY': 10,
'DEFECT_QTY': 5,
'REJECT_RATE_PCT': 10,
'DEFECT_RATE_PCT': 5,
'REJECT_SHARE_PCT': 66.7,
'AFFECTED_LOT_COUNT': 8,
'AFFECTED_WORKORDER_COUNT': 4,
'meta': {
'include_excluded_scrap': False,
'exclusion_applied': True,
'excluded_reason_count': 2,
},
}
response = self.client.get(
'/api/reject-history/summary?start_date=2026-02-01&end_date=2026-02-07'
'&workcenter_groups=WB&packages=PKG-A&reasons=R1&reasons=R2'
)
payload = json.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertTrue(payload['success'])
self.assertEqual(payload['meta']['include_excluded_scrap'], False)
_, kwargs = mock_summary.call_args
self.assertEqual(kwargs['workcenter_groups'], ['WB'])
self.assertEqual(kwargs['packages'], ['PKG-A'])
self.assertEqual(kwargs['reasons'], ['R1', 'R2'])
self.assertIs(kwargs['include_excluded_scrap'], False)
self.assertIs(kwargs['exclude_material_scrap'], True)
@patch('mes_dashboard.routes.reject_history_routes.query_trend')
def test_trend_invalid_granularity_returns_400(self, mock_trend):
mock_trend.side_effect = ValueError('Invalid granularity. Use day, week, or month')
response = self.client.get(
'/api/reject-history/trend?start_date=2026-02-01&end_date=2026-02-07&granularity=hour'
)
payload = json.loads(response.data)
self.assertEqual(response.status_code, 400)
self.assertFalse(payload['success'])
@patch('mes_dashboard.routes.reject_history_routes.query_reason_pareto')
def test_reason_pareto_defaults_top80(self, mock_pareto):
mock_pareto.return_value = {'items': [], 'metric_mode': 'reject_total', 'pareto_scope': 'top80', 'meta': {}}
response = self.client.get('/api/reject-history/reason-pareto?start_date=2026-02-01&end_date=2026-02-07')
self.assertEqual(response.status_code, 200)
_, kwargs = mock_pareto.call_args
self.assertEqual(kwargs['pareto_scope'], 'top80')
self.assertEqual(kwargs['metric_mode'], 'reject_total')
@patch('mes_dashboard.routes.reject_history_routes.query_list')
@patch('mes_dashboard.core.rate_limit.check_and_record', return_value=(True, 6))
def test_list_rate_limited_returns_429(self, _mock_limit, mock_list):
response = self.client.get('/api/reject-history/list?start_date=2026-02-01&end_date=2026-02-07')
payload = json.loads(response.data)
self.assertEqual(response.status_code, 429)
self.assertEqual(payload['error']['code'], 'TOO_MANY_REQUESTS')
self.assertEqual(response.headers.get('Retry-After'), '6')
mock_list.assert_not_called()
@patch('mes_dashboard.routes.reject_history_routes.export_csv')
def test_export_returns_csv_response(self, mock_export):
mock_export.return_value = iter(['A,B\n', '1,2\n'])
response = self.client.get('/api/reject-history/export?start_date=2026-02-01&end_date=2026-02-07')
self.assertEqual(response.status_code, 200)
self.assertIn('attachment; filename=reject_history_2026-02-01_to_2026-02-07.csv', response.headers.get('Content-Disposition', ''))
self.assertIn('text/csv', response.headers.get('Content-Type', ''))
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,360 @@
# -*- coding: utf-8 -*-
"""Unit tests for reject_history_service."""
from __future__ import annotations
import pandas as pd
import pytest
from mes_dashboard.services import reject_history_service as svc
def test_query_summary_returns_metrics_and_policy_meta(monkeypatch):
monkeypatch.setattr(svc, "get_excluded_reasons", lambda force_refresh=False: {"358"})
captured = {}
def _fake_read_sql_df(_sql, params=None):
captured["params"] = dict(params or {})
return pd.DataFrame(
[
{
"MOVEIN_QTY": 1000,
"REJECT_TOTAL_QTY": 25,
"DEFECT_QTY": 10,
"REJECT_RATE_PCT": 2.5,
"DEFECT_RATE_PCT": 1.0,
"REJECT_SHARE_PCT": 71.4286,
"AFFECTED_LOT_COUNT": 12,
"AFFECTED_WORKORDER_COUNT": 7,
}
]
)
monkeypatch.setattr(svc, "read_sql_df", _fake_read_sql_df)
result = svc.query_summary(
start_date="2026-02-01",
end_date="2026-02-07",
include_excluded_scrap=False,
)
assert result["MOVEIN_QTY"] == 1000
assert result["REJECT_TOTAL_QTY"] == 25
assert result["DEFECT_QTY"] == 10
assert result["AFFECTED_LOT_COUNT"] == 12
assert result["meta"]["include_excluded_scrap"] is False
assert result["meta"]["exclusion_applied"] is True
assert result["meta"]["excluded_reason_count"] == 1
assert captured["params"]["start_date"] == "2026-02-01"
assert captured["params"]["end_date"] == "2026-02-07"
assert "358" in captured["params"].values()
def test_query_summary_include_override_skips_exclusion_filter(monkeypatch):
monkeypatch.setattr(svc, "get_excluded_reasons", lambda force_refresh=False: {"358", "REASON_X"})
captured = {}
def _fake_read_sql_df(_sql, params=None):
captured["params"] = dict(params or {})
return pd.DataFrame(
[
{
"MOVEIN_QTY": 1000,
"REJECT_TOTAL_QTY": 25,
"DEFECT_QTY": 10,
"REJECT_RATE_PCT": 2.5,
"DEFECT_RATE_PCT": 1.0,
"REJECT_SHARE_PCT": 71.4286,
"AFFECTED_LOT_COUNT": 12,
"AFFECTED_WORKORDER_COUNT": 7,
}
]
)
monkeypatch.setattr(svc, "read_sql_df", _fake_read_sql_df)
result = svc.query_summary(
start_date="2026-02-01",
end_date="2026-02-07",
include_excluded_scrap=True,
)
assert result["meta"]["include_excluded_scrap"] is True
assert result["meta"]["exclusion_applied"] is False
assert result["meta"]["excluded_reason_count"] == 0
assert "358" not in captured["params"].values()
assert "REASON_X" not in captured["params"].values()
def test_build_where_clause_applies_reason_prefix_policy_by_default(monkeypatch):
monkeypatch.setattr(svc, "get_excluded_reasons", lambda force_refresh=False: set())
where_clause, _params, meta = svc._build_where_clause(include_excluded_scrap=False)
assert "REGEXP_LIKE(UPPER(NVL(TRIM(b.LOSSREASONNAME), '')), '^[0-9]{3}_')" in where_clause
assert "NOT REGEXP_LIKE(UPPER(NVL(TRIM(b.LOSSREASONNAME), '')), '^(XXX|ZZZ)_')" in where_clause
assert meta["reason_name_prefix_policy_applied"] is True
assert meta["exclusion_applied"] is True
def test_build_where_clause_include_override_skips_reason_prefix_policy(monkeypatch):
monkeypatch.setattr(svc, "get_excluded_reasons", lambda force_refresh=False: {"358"})
where_clause, params, meta = svc._build_where_clause(
include_excluded_scrap=True,
packages=["PKG-A"],
)
assert "REGEXP_LIKE(UPPER(NVL(TRIM(b.LOSSREASONNAME), '')), '^[0-9]{3}_')" not in where_clause
assert "NOT REGEXP_LIKE(UPPER(NVL(TRIM(b.LOSSREASONNAME), '')), '^(XXX|ZZZ)_')" not in where_clause
assert meta["reason_name_prefix_policy_applied"] is False
assert meta["exclusion_applied"] is False
assert meta["package_filter_count"] == 1
assert "358" not in params.values()
def test_get_filter_options_includes_packages(monkeypatch):
monkeypatch.setattr(svc, "get_excluded_reasons", lambda force_refresh=False: set())
monkeypatch.setattr(
svc,
"get_workcenter_groups",
lambda: [
{"name": "WB", "sequence": 1},
{"name": "FA", "sequence": 2},
],
)
def _fake_read_sql_df(sql, _params=None):
if "AS REASON" in sql:
return pd.DataFrame([{"REASON": "R1"}, {"REASON": "R2"}])
if "AS PACKAGE" in sql:
return pd.DataFrame([{"PACKAGE": "PKG-A"}, {"PACKAGE": "PKG-B"}])
return pd.DataFrame()
monkeypatch.setattr(svc, "read_sql_df", _fake_read_sql_df)
result = svc.get_filter_options(
start_date="2026-02-01",
end_date="2026-02-07",
include_excluded_scrap=False,
)
assert result["reasons"] == ["R1", "R2"]
assert result["packages"] == ["PKG-A", "PKG-B"]
assert result["workcenter_groups"][0]["name"] == "WB"
def test_get_filter_options_appends_material_reason_option(monkeypatch):
monkeypatch.setattr(svc, "get_excluded_reasons", lambda force_refresh=False: set())
monkeypatch.setattr(svc, "get_workcenter_groups", lambda: [])
def _fake_read_sql_df(sql, _params=None):
if "AS REASON" in sql:
return pd.DataFrame([{"REASON": "001_TEST"}])
if "AS PACKAGE" in sql:
return pd.DataFrame([{"PACKAGE": "PKG-A"}])
if "AS HAS_MATERIAL" in sql:
return pd.DataFrame([{"HAS_MATERIAL": 1}])
return pd.DataFrame()
monkeypatch.setattr(svc, "read_sql_df", _fake_read_sql_df)
result = svc.get_filter_options(start_date="2026-02-01", end_date="2026-02-07")
assert svc.MATERIAL_REASON_OPTION in result["reasons"]
def test_build_where_clause_with_material_reason_adds_objecttype_condition(monkeypatch):
monkeypatch.setattr(svc, "get_excluded_reasons", lambda force_refresh=False: set())
where_clause, _params, meta = svc._build_where_clause(reasons=[svc.MATERIAL_REASON_OPTION])
assert "UPPER(NVL(TRIM(b.SCRAP_OBJECTTYPE), '-')) = 'MATERIAL'" in where_clause
assert meta["material_reason_selected"] is True
def test_build_where_clause_exclude_material_scrap_adds_not_material_condition(monkeypatch):
monkeypatch.setattr(svc, "get_excluded_reasons", lambda force_refresh=False: set())
where_clause, _params, meta = svc._build_where_clause(exclude_material_scrap=True)
assert "UPPER(NVL(TRIM(b.SCRAP_OBJECTTYPE), '-')) <> 'MATERIAL'" in where_clause
assert meta["exclude_material_scrap"] is True
assert meta["material_exclusion_applied"] is True
def test_sql_template_replacement_does_not_introduce_fake_bind_placeholders():
sql = svc._prepare_sql(
"summary",
where_clause="WHERE 1=1",
bucket_expr="TRUNC(b.TXN_DAY)",
metric_column="b.REJECT_TOTAL_QTY",
)
assert ":BASE" not in sql
assert ":WHERE" not in sql
assert ":BUCKET" not in sql
assert ":METRIC" not in sql
def test_base_with_cte_sql_flattens_nested_with(monkeypatch):
monkeypatch.setattr(
svc,
"_load_sql",
lambda name: (
"-- comment line\n"
"WITH c1 AS (SELECT 1 AS X FROM DUAL),\n"
"c2 AS (SELECT X FROM c1)\n"
"SELECT X FROM c2"
)
if name == "performance_daily"
else "",
)
rendered = svc._base_with_cte_sql("base")
assert rendered.startswith("WITH c1 AS")
assert "base AS (\nSELECT X FROM c2\n)" in rendered
assert "WITH base AS (\nWITH c1" not in rendered
def test_query_trend_invalid_granularity_raises():
with pytest.raises(ValueError, match="Invalid granularity"):
svc.query_trend(start_date="2026-02-01", end_date="2026-02-07", granularity="hour")
def test_query_reason_pareto_top80_scope(monkeypatch):
monkeypatch.setattr(svc, "get_excluded_reasons", lambda force_refresh=False: set())
monkeypatch.setattr(
svc,
"read_sql_df",
lambda _sql, _params=None: pd.DataFrame(
[
{"REASON": "R1", "CATEGORY": "C1", "METRIC_VALUE": 50, "MOVEIN_QTY": 100, "REJECT_TOTAL_QTY": 50, "DEFECT_QTY": 0, "AFFECTED_LOT_COUNT": 10, "PCT": 50, "CUM_PCT": 50},
{"REASON": "R2", "CATEGORY": "C1", "METRIC_VALUE": 29, "MOVEIN_QTY": 100, "REJECT_TOTAL_QTY": 29, "DEFECT_QTY": 0, "AFFECTED_LOT_COUNT": 8, "PCT": 29, "CUM_PCT": 79},
{"REASON": "R3", "CATEGORY": "C2", "METRIC_VALUE": 13, "MOVEIN_QTY": 100, "REJECT_TOTAL_QTY": 13, "DEFECT_QTY": 0, "AFFECTED_LOT_COUNT": 6, "PCT": 13, "CUM_PCT": 92},
{"REASON": "R4", "CATEGORY": "C3", "METRIC_VALUE": 8, "MOVEIN_QTY": 100, "REJECT_TOTAL_QTY": 8, "DEFECT_QTY": 0, "AFFECTED_LOT_COUNT": 5, "PCT": 8, "CUM_PCT": 100},
]
),
)
top80 = svc.query_reason_pareto(
start_date="2026-02-01",
end_date="2026-02-07",
metric_mode="reject_total",
pareto_scope="top80",
)
assert len(top80["items"]) == 2
assert top80["items"][-1]["reason"] == "R2"
assert "category" not in top80["items"][0]
all_items = svc.query_reason_pareto(
start_date="2026-02-01",
end_date="2026-02-07",
metric_mode="reject_total",
pareto_scope="all",
)
assert len(all_items["items"]) == 4
def test_query_list_pagination_and_caps(monkeypatch):
monkeypatch.setattr(svc, "get_excluded_reasons", lambda force_refresh=False: set())
captured = {}
def _fake_read_sql_df(_sql, params=None):
captured["params"] = dict(params or {})
return pd.DataFrame(
[
{
"TXN_DAY": "2026-02-03",
"TXN_MONTH": "2026-02",
"WORKCENTER_GROUP": "WB",
"WORKCENTERNAME": "WB01",
"SPECNAME": "S1",
"PRODUCTLINENAME": "P1",
"PJ_TYPE": "TYPE1",
"LOSSREASONNAME": "R1",
"LOSSREASON_CODE": "001",
"REJECTCATEGORYNAME": "CAT",
"MOVEIN_QTY": 100,
"REJECT_QTY": 3,
"STANDBY_QTY": 1,
"QTYTOPROCESS_QTY": 1,
"INPROCESS_QTY": 1,
"PROCESSED_QTY": 1,
"REJECT_TOTAL_QTY": 7,
"DEFECT_QTY": 2,
"REJECT_RATE_PCT": 7,
"DEFECT_RATE_PCT": 2,
"REJECT_SHARE_PCT": 77.777,
"AFFECTED_LOT_COUNT": 3,
"AFFECTED_WORKORDER_COUNT": 2,
"TOTAL_COUNT": 12,
}
]
)
monkeypatch.setattr(svc, "read_sql_df", _fake_read_sql_df)
result = svc.query_list(
start_date="2026-02-01",
end_date="2026-02-07",
page=2,
per_page=500,
packages=["PKG1"],
)
assert result["pagination"]["page"] == 2
assert result["pagination"]["perPage"] == 200
assert result["pagination"]["total"] == 12
assert result["pagination"]["totalPages"] == 1
assert captured["params"]["offset"] == 200
assert captured["params"]["limit"] == 200
assert "PKG1" in captured["params"].values()
def test_export_csv_contains_semantic_headers(monkeypatch):
monkeypatch.setattr(svc, "get_excluded_reasons", lambda force_refresh=False: set())
monkeypatch.setattr(
svc,
"read_sql_df",
lambda _sql, _params=None: pd.DataFrame(
[
{
"TXN_DAY": "2026-02-03",
"TXN_MONTH": "2026-02",
"WORKCENTER_GROUP": "WB",
"WORKCENTERNAME": "WB01",
"SPECNAME": "S1",
"PRODUCTLINENAME": "P1",
"PJ_TYPE": "TYPE1",
"LOSSREASONNAME": "R1",
"LOSSREASON_CODE": "001",
"REJECTCATEGORYNAME": "CAT",
"MOVEIN_QTY": 100,
"REJECT_QTY": 3,
"STANDBY_QTY": 1,
"QTYTOPROCESS_QTY": 1,
"INPROCESS_QTY": 1,
"PROCESSED_QTY": 1,
"REJECT_TOTAL_QTY": 7,
"DEFECT_QTY": 2,
"REJECT_RATE_PCT": 7,
"DEFECT_RATE_PCT": 2,
"REJECT_SHARE_PCT": 77.777,
"AFFECTED_LOT_COUNT": 3,
"AFFECTED_WORKORDER_COUNT": 2,
}
]
),
)
chunks = list(
svc.export_csv(
start_date="2026-02-01",
end_date="2026-02-07",
)
)
payload = "".join(chunks)
assert "REJECT_TOTAL_QTY" in payload
assert "DEFECT_QTY" in payload
assert "2026-02-03" in payload

View File

@@ -0,0 +1,38 @@
# -*- coding: utf-8 -*-
"""Governance coverage tests for reject-history shell integration."""
from __future__ import annotations
import json
from pathlib import Path
ROOT = Path(__file__).resolve().parents[1]
ROUTE_CONTRACTS_FILE = ROOT / 'frontend' / 'src' / 'portal-shell' / 'routeContracts.js'
NATIVE_REGISTRY_FILE = ROOT / 'frontend' / 'src' / 'portal-shell' / 'nativeModuleRegistry.js'
PAGE_STATUS_FILE = ROOT / 'data' / 'page_status.json'
def test_reject_history_route_contract_entry_exists():
text = ROUTE_CONTRACTS_FILE.read_text(encoding='utf-8')
assert "'/reject-history'" in text
assert "routeId: 'reject-history'" in text
assert "title: '報廢歷史查詢'" in text
def test_reject_history_native_loader_entry_exists():
text = NATIVE_REGISTRY_FILE.read_text(encoding='utf-8')
assert "'/reject-history'" in text
assert "import('../reject-history/App.vue')" in text
def test_reject_history_page_status_entry_exists():
payload = json.loads(PAGE_STATUS_FILE.read_text(encoding='utf-8'))
pages = payload.get('pages', [])
entry = next((item for item in pages if item.get('route') == '/reject-history'), None)
assert entry is not None
assert entry.get('drawer_id')
assert isinstance(entry.get('order'), int)

View File

@@ -0,0 +1,67 @@
# -*- coding: utf-8 -*-
"""Tests for scrap_reason_exclusion_cache service."""
from __future__ import annotations
import json
from unittest.mock import MagicMock
import pandas as pd
from mes_dashboard.services import scrap_reason_exclusion_cache as cache
def _reset_cache_state():
with cache._CACHE_LOCK:
cache._CACHE["reasons"] = set()
cache._CACHE["updated_at"] = None
cache._CACHE["loaded"] = False
cache._CACHE["source"] = None
def test_refresh_cache_loads_enabled_reason_codes(monkeypatch):
_reset_cache_state()
monkeypatch.setattr(cache, "try_acquire_lock", lambda *_args, **_kwargs: True)
monkeypatch.setattr(cache, "release_lock", lambda *_args, **_kwargs: None)
monkeypatch.setattr(cache, "get_redis_client", lambda: None)
monkeypatch.setattr(
cache,
"read_sql_df",
lambda _sql: pd.DataFrame({"REASON_NAME": ["358", " 160 ", "bonus_adjust"]}),
)
assert cache.refresh_cache(force=True) is True
assert cache.get_excluded_reasons() == {"358", "160", "BONUS_ADJUST"}
def test_refresh_cache_falls_back_to_redis_when_oracle_fails(monkeypatch):
_reset_cache_state()
redis_client = MagicMock()
redis_client.get.side_effect = [json.dumps(["A01", "b02"]), "2026-02-13T00:00:00"]
monkeypatch.setattr(cache, "try_acquire_lock", lambda *_args, **_kwargs: True)
monkeypatch.setattr(cache, "release_lock", lambda *_args, **_kwargs: None)
monkeypatch.setattr(cache, "get_redis_client", lambda: redis_client)
def _raise(_sql):
raise RuntimeError("oracle unavailable")
monkeypatch.setattr(cache, "read_sql_df", _raise)
assert cache.refresh_cache(force=True) is True
assert cache.get_excluded_reasons() == {"A01", "B02"}
def test_get_excluded_reasons_uses_redis_for_lazy_bootstrap(monkeypatch):
_reset_cache_state()
redis_client = MagicMock()
redis_client.get.side_effect = [json.dumps(["X1", "x2"]), "2026-02-13T12:00:00"]
monkeypatch.setattr(cache, "get_redis_client", lambda: redis_client)
monkeypatch.setattr(cache, "refresh_cache", lambda force=False: True)
reasons = cache.get_excluded_reasons(force_refresh=False)
assert reasons == {"X1", "X2"}

View File

@@ -373,6 +373,7 @@ class TestViteModuleIntegration(unittest.TestCase):
('/tables', 'tables.js'), ('/tables', 'tables.js'),
('/resource', 'resource-status.js'), ('/resource', 'resource-status.js'),
('/resource-history', 'resource-history.js'), ('/resource-history', 'resource-history.js'),
('/reject-history', 'reject-history.js'),
('/job-query', 'job-query.js'), ('/job-query', 'job-query.js'),
('/excel-query', 'excel-query.js'), ('/excel-query', 'excel-query.js'),
('/query-tool', 'query-tool.js'), ('/query-tool', 'query-tool.js'),
@@ -396,13 +397,18 @@ class TestViteModuleIntegration(unittest.TestCase):
response = self.client.get(endpoint, follow_redirects=False) response = self.client.get(endpoint, follow_redirects=False)
if endpoint in canonical_routes: if endpoint in canonical_routes:
self.assertEqual(response.status_code, 302) if response.status_code == 302:
self.assertTrue(response.location.endswith(canonical_routes[endpoint])) self.assertTrue(response.location.endswith(canonical_routes[endpoint]))
follow = self.client.get(response.location) follow = self.client.get(response.location)
self.assertEqual(follow.status_code, 200) self.assertEqual(follow.status_code, 200)
html = follow.data.decode('utf-8') html = follow.data.decode('utf-8')
self.assertIn('/static/dist/portal-shell.js', html) self.assertIn('/static/dist/portal-shell.js', html)
self.assertIn('type="module"', html) self.assertIn('type="module"', html)
else:
self.assertEqual(response.status_code, 200)
html = response.data.decode('utf-8')
self.assertIn(f'/static/dist/{asset}', html)
self.assertIn('type="module"', html)
else: else:
self.assertEqual(response.status_code, 200) self.assertEqual(response.status_code, 200)
html = response.data.decode('utf-8') html = response.data.decode('utf-8')