feat: dimension pareto cache-based computation, filter propagation, and MSD events cache isolation

Reject History:
- Compute dimension pareto (package/type/workflow/workcenter/equipment) from
  cached DataFrame instead of re-querying Oracle per dimension change
- Propagate supplementary filters and trend date selection to dimension pareto
- Add staleness tracking to prevent race conditions on rapid dimension switches
- Add WORKFLOWNAME to detail and export outputs
- Fix button hover visibility with CSS specificity

MSD (製程不良追溯分析):
- Separate raw events caching from aggregation computation so changing
  loss_reasons uses EventFetcher per-domain cache (fast) and recomputes
  aggregation with current filters instead of returning stale cached results
- Exclude loss_reasons from MSD seed cache key since seed resolution does
  not use it, avoiding unnecessary Oracle re-queries
- Add suspect context panel, analysis summary, upstream station/spec filters
- Add machine bar click drill-down and filtered attribution charts

Query Tool:
- Support batch container_ids in lot CSV export (history/materials/rejects/holds)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
egg
2026-02-25 09:02:39 +08:00
parent 983737ca1a
commit 86984cfeb1
28 changed files with 1768 additions and 86 deletions

View File

@@ -10,7 +10,9 @@ import KpiCards from './components/KpiCards.vue';
import MultiSelect from './components/MultiSelect.vue';
import ParetoChart from './components/ParetoChart.vue';
import TrendChart from './components/TrendChart.vue';
import AnalysisSummary from './components/AnalysisSummary.vue';
import DetailTable from './components/DetailTable.vue';
import SuspectContextPanel from './components/SuspectContextPanel.vue';
ensureMesApiAvailable();
@@ -170,6 +172,12 @@ const filteredByMachineData = computed(() => {
return filtered.length > 0 ? buildMachineChartFromAttribution(filtered) : [];
});
const suspectMachineNames = computed(() => {
const data = filteredByMachineData.value;
if (!Array.isArray(data)) return [];
return data.filter((d) => d.name && d.name !== '其他').map((d) => d.name);
});
const isForward = computed(() => committedFilters.value.direction === 'forward');
const committedStation = computed(() => {
const key = committedFilters.value.station || '測試';
@@ -198,7 +206,25 @@ const eventsAggregation = computed(() => trace.stage_results.events?.aggregation
const showAnalysisSkeleton = computed(() => hasQueried.value && loading.querying && !eventsAggregation.value);
const showAnalysisCharts = computed(() => hasQueried.value && (Boolean(eventsAggregation.value) || restoredFromCache.value));
const skeletonChartCount = computed(() => (isForward.value ? 4 : 6));
const skeletonChartCount = computed(() => (isForward.value ? 4 : 5));
const totalAncestorCount = computed(() => trace.stage_results.lineage?.total_ancestor_count || analysisData.value?.total_ancestor_count || 0);
const summaryQueryParams = computed(() => {
const snap = committedFilters.value;
const params = {
queryMode: snap.queryMode || 'date_range',
startDate: snap.startDate,
endDate: snap.endDate,
lossReasons: snap.lossReasons || [],
};
if (snap.queryMode === 'container') {
params.containerInputType = snap.containerInputType || 'lot';
params.resolvedCount = resolutionInfo.value?.resolved_count || 0;
params.notFoundCount = resolutionInfo.value?.not_found?.length || 0;
}
return params;
});
function emptyAnalysisData() {
return {
@@ -364,6 +390,7 @@ async function loadAnalysis() {
analysisData.value = {
...analysisData.value,
...eventsAggregation.value,
total_ancestor_count: trace.stage_results.lineage?.total_ancestor_count || 0,
};
}
@@ -429,6 +456,21 @@ function exportCsv() {
document.body.removeChild(link);
}
// Suspect context panel state
const suspectPanelMachine = ref(null);
function handleMachineBarClick({ name, dataIndex }) {
if (!name || name === '其他') return;
const attribution = analysisData.value?.attribution;
if (!Array.isArray(attribution)) return;
const match = attribution.find(
(rec) => rec.EQUIPMENT_NAME === name,
);
if (match) {
suspectPanelMachine.value = suspectPanelMachine.value?.EQUIPMENT_NAME === name ? null : match;
}
}
const _abortControllers = new Map();
function createAbortSignal(key = 'default') {
const prev = _abortControllers.get(key);
@@ -542,6 +584,14 @@ void initPage();
<transition name="trace-fade">
<div v-if="showAnalysisCharts">
<AnalysisSummary
v-if="!isForward"
:query-params="summaryQueryParams"
:kpi="analysisData.kpi"
:total-ancestor-count="totalAncestorCount"
:station-label="committedStation"
/>
<KpiCards
:kpi="analysisData.kpi"
:loading="false"
@@ -552,7 +602,8 @@ void initPage();
<div class="charts-section">
<template v-if="!isForward">
<div class="charts-row">
<ParetoChart title="依上游機台歸因" :data="filteredByMachineData">
<div class="chart-with-panel">
<ParetoChart title="依上游機台歸因" :data="filteredByMachineData" enable-click @bar-click="handleMachineBarClick">
<template #header-extra>
<div class="chart-inline-filters">
<MultiSelect
@@ -572,15 +623,19 @@ void initPage();
</div>
</template>
</ParetoChart>
<SuspectContextPanel
:machine="suspectPanelMachine"
@close="suspectPanelMachine = null"
/>
</div>
<ParetoChart title="依原物料歸因" :data="analysisData.charts?.by_material" />
</div>
<div class="charts-row">
<ParetoChart title="依源頭批次歸因" :data="analysisData.charts?.by_wafer_root" />
<ParetoChart title="依不良原因" :data="analysisData.charts?.by_loss_reason" />
</div>
<div class="charts-row">
<ParetoChart title="依偵測機台" :data="analysisData.charts?.by_detection_machine" />
<ParetoChart title="依製程 (WORKFLOW)" :data="analysisData.charts?.by_workflow" />
</div>
<div class="charts-row">
<ParetoChart title="依封裝 (PACKAGE)" :data="analysisData.charts?.by_package" />
<ParetoChart title="依 TYPE" :data="analysisData.charts?.by_pj_type" />
</div>
</template>
<template v-else>
@@ -606,6 +661,7 @@ void initPage();
:loading="detailLoading"
:pagination="detailPagination"
:direction="committedFilters.direction"
:suspect-machines="suspectMachineNames"
@export-csv="exportCsv"
@prev-page="prevPage"
@next-page="nextPage"

View File

@@ -0,0 +1,163 @@
<script setup>
import { ref, watch } from 'vue';
const STORAGE_KEY = 'msd:summary-collapsed';
const props = defineProps({
queryParams: {
type: Object,
default: () => ({}),
},
kpi: {
type: Object,
default: () => ({}),
},
totalAncestorCount: {
type: Number,
default: 0,
},
stationLabel: {
type: String,
default: '測試',
},
});
const collapsed = ref(false);
// Restore from sessionStorage
try {
const saved = sessionStorage.getItem(STORAGE_KEY);
if (saved === 'true') collapsed.value = true;
} catch { /* unavailable */ }
watch(collapsed, (val) => {
try {
sessionStorage.setItem(STORAGE_KEY, val ? 'true' : 'false');
} catch { /* quota */ }
});
function toggle() {
collapsed.value = !collapsed.value;
}
function formatNumber(v) {
if (v == null || v === 0) return '0';
return Number(v).toLocaleString();
}
</script>
<template>
<section class="summary-panel">
<div class="summary-header" @click="toggle">
<h3 class="summary-title">分析摘要</h3>
<span class="summary-toggle">{{ collapsed ? '▸ 展開' : '▾ 收起' }}</span>
</div>
<div v-show="!collapsed" class="summary-body">
<div class="summary-grid">
<!-- Query context -->
<div class="summary-block">
<h4 class="block-title">查詢條件</h4>
<ul class="block-list">
<li>偵測站{{ stationLabel }}</li>
<template v-if="queryParams.queryMode === 'container'">
<li>輸入方式{{ queryParams.containerInputType === 'lot' ? 'LOT ID' : queryParams.containerInputType }}</li>
<li v-if="queryParams.resolvedCount != null">解析數量{{ queryParams.resolvedCount }} </li>
<li v-if="queryParams.notFoundCount > 0">未找到{{ queryParams.notFoundCount }} </li>
</template>
<template v-else>
<li>日期範圍{{ queryParams.startDate }} ~ {{ queryParams.endDate }}</li>
</template>
<li>不良原因{{ queryParams.lossReasons?.length ? queryParams.lossReasons.join(', ') : '全部' }}</li>
</ul>
</div>
<!-- Data scope -->
<div class="summary-block">
<h4 class="block-title">數據範圍</h4>
<ul class="block-list">
<li>偵測站 LOT 總數{{ formatNumber(kpi.lot_count) }}</li>
<li>總投入{{ formatNumber(kpi.total_input) }} pcs</li>
<li>報廢 LOT {{ formatNumber(kpi.defective_lot_count) }}</li>
<li>報廢總數{{ formatNumber(kpi.total_defect_qty) }} pcs</li>
<li>血緣追溯涵蓋上游 LOT{{ formatNumber(totalAncestorCount) }}</li>
</ul>
</div>
<!-- Methodology -->
<div class="summary-block summary-block-wide">
<h4 class="block-title">歸因方法說明</h4>
<p class="block-text">
分析涵蓋所有經過偵測站的 LOT包含無不良者針對每筆 LOT 回溯血緣split / merge chain找到關聯的上游因子
歸因不良率 = 關聯 LOT 的報廢合計 / 關聯 LOT 的投入合計 × 100%
同一筆不良可歸因於多個上游因子非互斥
柏拉圖柱高 = 歸因不良數含重疊橙線 = 歸因不良率
</p>
</div>
</div>
</div>
</section>
</template>
<style scoped>
.summary-panel {
border: 1px solid var(--border-color, #e5e7eb);
border-radius: 8px;
background: var(--bg-secondary, #f9fafb);
margin-bottom: 16px;
}
.summary-header {
display: flex;
align-items: center;
justify-content: space-between;
padding: 10px 16px;
cursor: pointer;
user-select: none;
}
.summary-title {
font-size: 14px;
font-weight: 600;
margin: 0;
color: var(--text-primary, #1f2937);
}
.summary-toggle {
font-size: 12px;
color: var(--text-tertiary, #9ca3af);
}
.summary-body {
padding: 0 16px 14px;
}
.summary-grid {
display: grid;
grid-template-columns: 1fr 1fr;
gap: 12px 24px;
}
.summary-block {
min-width: 0;
}
.summary-block-wide {
grid-column: 1 / -1;
}
.block-title {
font-size: 12px;
font-weight: 600;
color: var(--text-secondary, #6b7280);
margin: 0 0 6px;
text-transform: uppercase;
letter-spacing: 0.5px;
}
.block-list {
list-style: none;
margin: 0;
padding: 0;
font-size: 13px;
color: var(--text-primary, #374151);
line-height: 1.7;
}
.block-text {
font-size: 12px;
color: var(--text-secondary, #6b7280);
line-height: 1.6;
margin: 0;
}
</style>

View File

@@ -20,6 +20,10 @@ const props = defineProps({
type: String,
default: 'backward',
},
suspectMachines: {
type: Array,
default: () => [],
},
});
const emit = defineEmits(['export-csv', 'prev-page', 'next-page']);
@@ -38,7 +42,8 @@ const COLUMNS_BACKWARD = [
{ key: 'DEFECT_QTY', label: '不良數', width: '70px', numeric: true },
{ key: 'DEFECT_RATE', label: '不良率(%)', width: '90px', numeric: true },
{ key: 'ANCESTOR_COUNT', label: '上游LOT數', width: '80px', numeric: true },
{ key: 'UPSTREAM_MACHINES', label: '上游台', width: '200px' },
{ key: 'UPSTREAM_MACHINE_COUNT', label: '上游台', width: '80px', numeric: true },
{ key: 'SUSPECT_HITS', label: '嫌疑命中', width: '200px', custom: true },
];
const COLUMNS_FORWARD = [
@@ -103,6 +108,27 @@ function formatCell(value, col) {
if (col.numeric) return Number(value).toLocaleString();
return value;
}
function getSuspectHits(row) {
const upstreamMachines = row.UPSTREAM_MACHINES;
if (!Array.isArray(upstreamMachines) || upstreamMachines.length === 0) return null;
const suspects = props.suspectMachines;
if (!suspects || suspects.length === 0) return null;
const suspectSet = new Set(suspects);
const machineNames = upstreamMachines.map((m) => m.machine || m);
const uniqueNames = [...new Set(machineNames)];
const hits = uniqueNames.filter((name) => suspectSet.has(name));
if (hits.length === 0) return null;
return {
hitNames: hits,
hitCount: hits.length,
totalCount: uniqueNames.length,
fullMatch: hits.length === uniqueNames.length,
};
}
</script>
<template>
@@ -136,7 +162,14 @@ function formatCell(value, col) {
<tbody>
<tr v-for="(row, idx) in sortedData" :key="idx">
<td v-for="col in activeColumns" :key="col.key" :class="{ numeric: col.numeric }">
{{ formatCell(row[col.key], col) }}
<template v-if="col.key === 'SUSPECT_HITS'">
<span v-if="getSuspectHits(row)" :class="{ 'hit-full': getSuspectHits(row).fullMatch }" class="suspect-cell">
{{ getSuspectHits(row).hitNames.join(', ') }}
<span class="hit-ratio">({{ getSuspectHits(row).hitCount }}/{{ getSuspectHits(row).totalCount }})</span>
</span>
<span v-else class="no-hit">-</span>
</template>
<template v-else>{{ formatCell(row[col.key], col) }}</template>
</td>
</tr>
<tr v-if="!sortedData.length">
@@ -156,3 +189,21 @@ function formatCell(value, col) {
</div>
</section>
</template>
<style scoped>
.suspect-cell {
font-size: 12px;
color: var(--text-primary, #374151);
}
.hit-ratio {
color: var(--text-tertiary, #9ca3af);
margin-left: 4px;
}
.hit-full {
color: #059669;
font-weight: 600;
}
.no-hit {
color: var(--text-tertiary, #9ca3af);
}
</style>

View File

@@ -1,5 +1,5 @@
<script setup>
import { computed } from 'vue';
import { computed, ref } from 'vue';
import VChart from 'vue-echarts';
import { use } from 'echarts/core';
import { CanvasRenderer } from 'echarts/renderers';
@@ -7,10 +7,11 @@ import { BarChart, LineChart } from 'echarts/charts';
import {
GridComponent,
LegendComponent,
MarkLineComponent,
TooltipComponent,
} from 'echarts/components';
use([CanvasRenderer, BarChart, LineChart, GridComponent, LegendComponent, TooltipComponent]);
use([CanvasRenderer, BarChart, LineChart, GridComponent, LegendComponent, MarkLineComponent, TooltipComponent]);
const props = defineProps({
title: {
@@ -21,15 +22,51 @@ const props = defineProps({
type: Array,
default: () => [],
},
enableClick: {
type: Boolean,
default: false,
},
});
const emit = defineEmits(['bar-click']);
// Sort toggle: 'qty' (default) or 'rate'
const sortMode = ref('qty');
function toggleSort() {
sortMode.value = sortMode.value === 'qty' ? 'rate' : 'qty';
}
const sortedData = computed(() => {
if (!props.data || !props.data.length) return [];
if (sortMode.value === 'qty') return props.data;
// Re-sort by defect_rate and recalculate cumulative %
const sorted = [...props.data].sort((a, b) => (b.defect_rate || 0) - (a.defect_rate || 0));
const totalDefects = sorted.reduce((s, d) => s + (d.defect_qty || 0), 0);
let cumsum = 0;
return sorted.map((item) => {
cumsum += item.defect_qty || 0;
return {
...item,
cumulative_pct: totalDefects > 0 ? Math.round((cumsum / totalDefects) * 1e4) / 100 : 0,
};
});
});
const totalLotCount = computed(() => {
if (!props.data || !props.data.length) return 0;
return props.data.reduce((s, d) => s + (d.lot_count || 0), 0);
});
const chartOption = computed(() => {
if (!props.data || !props.data.length) return null;
const data = sortedData.value;
if (!data.length) return null;
const names = props.data.map((d) => d.name);
const defectQty = props.data.map((d) => d.defect_qty);
const cumulativePct = props.data.map((d) => d.cumulative_pct);
const defectRate = props.data.map((d) => d.defect_rate);
const names = data.map((d) => d.name);
const defectQty = data.map((d) => d.defect_qty);
const cumulativePct = data.map((d) => d.cumulative_pct);
const defectRate = data.map((d) => d.defect_rate);
return {
animationDuration: 350,
@@ -39,11 +76,16 @@ const chartOption = computed(() => {
formatter(params) {
const idx = params[0]?.dataIndex;
if (idx == null) return '';
const item = props.data[idx];
const item = data[idx];
const total = totalLotCount.value;
let html = `<b>${item.name}</b><br/>`;
html += `不良數: ${(item.defect_qty || 0).toLocaleString()}<br/>`;
html += `投入數: ${(item.input_qty || 0).toLocaleString()}<br/>`;
html += `不良率: ${(item.defect_rate || 0).toFixed(2)}%<br/>`;
if (item.lot_count != null) {
const pct = total > 0 ? ((item.lot_count / total) * 100).toFixed(1) : '0.0';
html += `關聯 LOT 數: ${item.lot_count} (${pct}%)<br/>`;
}
html += `累計占比: ${(item.cumulative_pct || 0).toFixed(1)}%`;
return html;
},
@@ -112,16 +154,38 @@ const chartOption = computed(() => {
symbolSize: 6,
lineStyle: { color: '#ef4444', width: 2, type: 'dashed' },
itemStyle: { color: '#ef4444' },
markLine: {
silent: true,
symbol: 'none',
label: { show: true, position: 'insideEndTop', formatter: '80%', fontSize: 10, color: '#94a3b8' },
lineStyle: { color: '#94a3b8', type: 'dotted', width: 1 },
data: [{ yAxis: 80 }],
},
},
],
};
});
function handleChartClick(params) {
if (!props.enableClick) return;
if (params.componentType === 'series' && params.seriesType === 'bar') {
emit('bar-click', { name: params.name, dataIndex: params.dataIndex });
}
}
</script>
<template>
<div class="chart-card">
<div class="chart-header">
<h3 class="chart-title">{{ title }}</h3>
<button
type="button"
class="sort-toggle"
:title="sortMode === 'qty' ? '切換為依不良率排序' : '切換為依不良數排序'"
@click="toggleSort"
>
{{ sortMode === 'qty' ? '依數量' : '依比率' }}
</button>
<slot name="header-extra" />
</div>
<VChart
@@ -129,7 +193,25 @@ const chartOption = computed(() => {
class="chart-canvas"
:option="chartOption"
autoresize
@click="handleChartClick"
/>
<div v-else class="chart-empty">暫無資料</div>
</div>
</template>
<style scoped>
.sort-toggle {
font-size: 11px;
padding: 1px 6px;
border: 1px solid var(--border-color, #d1d5db);
border-radius: 4px;
background: var(--bg-secondary, #f9fafb);
color: var(--text-secondary, #6b7280);
cursor: pointer;
margin-left: 6px;
white-space: nowrap;
}
.sort-toggle:hover {
background: var(--bg-tertiary, #f3f4f6);
}
</style>

View File

@@ -0,0 +1,214 @@
<script setup>
import { ref, watch, onMounted, onBeforeUnmount } from 'vue';
import { apiGet } from '../../core/api.js';
const props = defineProps({
machine: {
type: Object,
default: null,
},
});
const emit = defineEmits(['close']);
const jobsLoading = ref(false);
const jobs = ref([]);
const jobsError = ref('');
watch(() => props.machine, async (val) => {
jobs.value = [];
jobsError.value = '';
if (!val?.EQUIPMENT_ID) return;
jobsLoading.value = true;
try {
const result = await apiGet(`/api/query-tool/equipment-recent-jobs/${encodeURIComponent(val.EQUIPMENT_ID)}`);
if (Array.isArray(result?.data)) {
jobs.value = result.data;
}
} catch (err) {
jobsError.value = err.message || '載入維修紀錄失敗';
} finally {
jobsLoading.value = false;
}
}, { immediate: true });
function handleOutsideClick(e) {
const el = e.target.closest('.suspect-panel');
if (!el) emit('close');
}
onMounted(() => {
setTimeout(() => document.addEventListener('click', handleOutsideClick), 0);
});
onBeforeUnmount(() => {
document.removeEventListener('click', handleOutsideClick);
});
function formatDate(v) {
if (!v) return '-';
return String(v).slice(0, 16).replace('T', ' ');
}
function formatNumber(v) {
if (v == null) return '0';
return Number(v).toLocaleString();
}
</script>
<template>
<div v-if="machine" class="suspect-panel" @click.stop>
<div class="panel-header">
<h4 class="panel-title">{{ machine.EQUIPMENT_NAME }}</h4>
<button type="button" class="panel-close" @click="emit('close')">&times;</button>
</div>
<div class="panel-section">
<h5 class="section-label">歸因摘要</h5>
<table class="attr-table">
<tbody>
<tr><td class="attr-key">站點</td><td>{{ machine.WORKCENTER_GROUP || '-' }}</td></tr>
<tr><td class="attr-key">機型</td><td>{{ machine.RESOURCEFAMILYNAME || '-' }}</td></tr>
<tr><td class="attr-key">歸因不良率</td><td>{{ machine.DEFECT_RATE != null ? Number(machine.DEFECT_RATE).toFixed(2) + '%' : '-' }}</td></tr>
<tr><td class="attr-key">歸因不良數</td><td>{{ formatNumber(machine.DEFECT_QTY) }}</td></tr>
<tr><td class="attr-key">歸因投入數</td><td>{{ formatNumber(machine.INPUT_QTY) }}</td></tr>
<tr><td class="attr-key">關聯 LOT </td><td>{{ formatNumber(machine.DETECTION_LOT_COUNT) }}</td></tr>
</tbody>
</table>
</div>
<div class="panel-section">
<h5 class="section-label">近期維修紀錄</h5>
<div v-if="jobsLoading" class="jobs-loading">載入中...</div>
<div v-else-if="jobsError" class="jobs-error">{{ jobsError }}</div>
<div v-else-if="jobs.length === 0" class="jobs-empty"> 30 天無維修紀錄</div>
<table v-else class="jobs-table">
<thead>
<tr>
<th>JOB ID</th>
<th>狀態</th>
<th>型號</th>
<th>維修區間</th>
</tr>
</thead>
<tbody>
<tr v-for="job in jobs" :key="job.JOBID">
<td>{{ job.JOBID || '-' }}</td>
<td>{{ job.JOBSTATUS || '-' }}</td>
<td>{{ job.JOBMODELNAME || '-' }}</td>
<td class="job-interval">
<span>{{ formatDate(job.CREATEDATE) }}</span>
<span v-if="job.COMPLETEDATE" class="interval-sep"></span>
<span v-if="job.COMPLETEDATE">{{ formatDate(job.COMPLETEDATE) }}</span>
<span v-else class="interval-ongoing">進行中</span>
</td>
</tr>
</tbody>
</table>
</div>
</div>
</template>
<style scoped>
.suspect-panel {
position: absolute;
top: 0;
right: -320px;
width: 300px;
background: var(--bg-primary, #fff);
border: 1px solid var(--border-color, #e5e7eb);
border-radius: 8px;
box-shadow: 0 4px 16px rgba(0, 0, 0, 0.12);
z-index: 100;
font-size: 13px;
}
.panel-header {
display: flex;
align-items: center;
justify-content: space-between;
padding: 10px 14px;
border-bottom: 1px solid var(--border-color, #e5e7eb);
}
.panel-title {
margin: 0;
font-size: 14px;
font-weight: 600;
}
.panel-close {
background: none;
border: none;
font-size: 18px;
cursor: pointer;
color: var(--text-tertiary, #9ca3af);
padding: 0 4px;
line-height: 1;
}
.panel-section {
padding: 10px 14px;
}
.panel-section + .panel-section {
border-top: 1px solid var(--border-color, #f3f4f6);
}
.section-label {
font-size: 11px;
font-weight: 600;
color: var(--text-secondary, #6b7280);
text-transform: uppercase;
letter-spacing: 0.5px;
margin: 0 0 8px;
}
.attr-table {
width: 100%;
border-collapse: collapse;
}
.attr-table td {
padding: 3px 0;
line-height: 1.4;
}
.attr-key {
color: var(--text-secondary, #6b7280);
width: 90px;
white-space: nowrap;
}
.jobs-table {
width: 100%;
border-collapse: collapse;
font-size: 12px;
}
.jobs-table th {
text-align: left;
font-weight: 600;
color: var(--text-secondary, #6b7280);
padding: 4px 6px 4px 0;
border-bottom: 1px solid var(--border-color, #e5e7eb);
font-size: 11px;
}
.jobs-table td {
padding: 4px 6px 4px 0;
border-bottom: 1px solid var(--border-color, #f3f4f6);
}
.job-interval {
font-size: 11px;
line-height: 1.5;
}
.interval-sep {
margin: 0 2px;
color: var(--text-tertiary, #9ca3af);
}
.interval-ongoing {
color: #f59e0b;
font-size: 10px;
font-weight: 500;
}
.jobs-loading,
.jobs-empty {
color: var(--text-tertiary, #9ca3af);
font-size: 12px;
padding: 4px 0;
}
.jobs-error {
color: #ef4444;
font-size: 12px;
padding: 4px 0;
}
</style>

View File

@@ -463,6 +463,10 @@ body {
grid-template-columns: 1fr;
}
.chart-with-panel {
position: relative;
}
.chart-card {
background: var(--msd-card-bg);
border-radius: 10px;

View File

@@ -62,6 +62,9 @@ const page = ref(1);
const detailReason = ref('');
const selectedTrendDates = ref([]);
const trendLegendSelected = ref({ '扣帳報廢量': true, '不扣帳報廢量': true });
const paretoDimension = ref('reason');
const dimensionParetoItems = ref([]);
const dimensionParetoLoading = ref(false);
// ---- Data state ----
const summary = ref({
@@ -197,6 +200,8 @@ async function executePrimaryQuery() {
page.value = 1;
detailReason.value = '';
selectedTrendDates.value = [];
paretoDimension.value = 'reason';
dimensionParetoItems.value = [];
// Apply initial data
analyticsRawItems.value = Array.isArray(result.analytics_raw)
@@ -301,6 +306,7 @@ function onTrendDateClick(dateStr) {
}
page.value = 1;
void refreshView();
refreshDimensionParetoIfActive();
}
function onTrendLegendChange(selected) {
@@ -308,6 +314,7 @@ function onTrendLegendChange(selected) {
page.value = 1;
updateUrlState();
void refreshView();
refreshDimensionParetoIfActive();
}
function onParetoClick(reason) {
@@ -323,6 +330,59 @@ function handleParetoScopeToggle(checked) {
updateUrlState();
}
let activeDimRequestId = 0;
async function fetchDimensionPareto(dim) {
if (dim === 'reason' || !queryId.value) return;
activeDimRequestId += 1;
const myId = activeDimRequestId;
dimensionParetoLoading.value = true;
try {
const params = {
query_id: queryId.value,
start_date: committedPrimary.startDate,
end_date: committedPrimary.endDate,
dimension: dim,
metric_mode: paretoMetricMode.value === 'defect' ? 'defect' : 'reject_total',
pareto_scope: committedPrimary.paretoTop80 ? 'top80' : 'all',
include_excluded_scrap: committedPrimary.includeExcludedScrap,
exclude_material_scrap: committedPrimary.excludeMaterialScrap,
exclude_pb_diode: committedPrimary.excludePbDiode,
packages: supplementaryFilters.packages.length > 0 ? supplementaryFilters.packages : undefined,
workcenter_groups: supplementaryFilters.workcenterGroups.length > 0 ? supplementaryFilters.workcenterGroups : undefined,
reason: supplementaryFilters.reason || undefined,
trend_dates: selectedTrendDates.value.length > 0 ? selectedTrendDates.value : undefined,
};
const resp = await apiGet('/api/reject-history/reason-pareto', { params, timeout: API_TIMEOUT });
if (myId !== activeDimRequestId) return;
const result = unwrapApiResult(resp, '查詢維度 Pareto 失敗');
dimensionParetoItems.value = result.data?.items || [];
} catch (err) {
if (myId !== activeDimRequestId) return;
dimensionParetoItems.value = [];
errorMessage.value = err.message || '查詢維度 Pareto 失敗';
} finally {
if (myId === activeDimRequestId) {
dimensionParetoLoading.value = false;
}
}
}
function refreshDimensionParetoIfActive() {
if (paretoDimension.value !== 'reason') {
void fetchDimensionPareto(paretoDimension.value);
}
}
function onDimensionChange(dim) {
paretoDimension.value = dim;
if (dim === 'reason') {
dimensionParetoItems.value = [];
} else {
void fetchDimensionPareto(dim);
}
}
function onSupplementaryChange(filters) {
supplementaryFilters.packages = filters.packages || [];
supplementaryFilters.workcenterGroups = filters.workcenterGroups || [];
@@ -331,6 +391,7 @@ function onSupplementaryChange(filters) {
detailReason.value = '';
selectedTrendDates.value = [];
void refreshView();
refreshDimensionParetoIfActive();
}
function removeFilterChip(chip) {
@@ -347,6 +408,7 @@ function removeFilterChip(chip) {
selectedTrendDates.value = [];
page.value = 1;
void refreshView();
refreshDimensionParetoIfActive();
return;
}
@@ -354,6 +416,7 @@ function removeFilterChip(chip) {
supplementaryFilters.reason = '';
page.value = 1;
void refreshView();
refreshDimensionParetoIfActive();
return;
}
@@ -363,6 +426,7 @@ function removeFilterChip(chip) {
);
page.value = 1;
void refreshView();
refreshDimensionParetoIfActive();
return;
}
@@ -372,6 +436,7 @@ function removeFilterChip(chip) {
);
page.value = 1;
void refreshView();
refreshDimensionParetoIfActive();
return;
}
}
@@ -556,6 +621,11 @@ const filteredParetoItems = computed(() => {
return items.slice(0, Math.max(top80Count, Math.min(5, items.length)));
});
const activeParetoItems = computed(() => {
if (paretoDimension.value !== 'reason') return dimensionParetoItems.value;
return filteredParetoItems.value;
});
const activeFilterChips = computed(() => {
const chips = [];
@@ -871,12 +941,15 @@ onMounted(() => {
/>
<ParetoSection
:items="filteredParetoItems"
:items="activeParetoItems"
:detail-reason="detailReason"
:selected-dates="selectedTrendDates"
:metric-label="paretoMetricLabel"
:loading="loading.querying"
:loading="loading.querying || dimensionParetoLoading"
:dimension="paretoDimension"
:show-dimension-selector="committedPrimary.mode === 'date_range'"
@reason-click="onParetoClick"
@dimension-change="onDimensionChange"
/>
<DetailTable

View File

@@ -41,6 +41,7 @@ function formatNumber(value) {
<th>Package</th>
<th>FUNCTION</th>
<th class="col-left">TYPE</th>
<th>WORKFLOW</th>
<th>PRODUCT</th>
<th>原因</th>
<th>EQUIPMENT</th>
@@ -66,6 +67,7 @@ function formatNumber(value) {
<td>{{ row.PRODUCTLINENAME }}</td>
<td>{{ row.PJ_FUNCTION || '' }}</td>
<td class="col-left">{{ row.PJ_TYPE }}</td>
<td>{{ row.WORKFLOWNAME || '' }}</td>
<td>{{ row.PRODUCTNAME || '' }}</td>
<td>{{ row.LOSSREASONNAME }}</td>
<td>{{ row.EQUIPMENTNAME || '' }}</td>
@@ -82,7 +84,7 @@ function formatNumber(value) {
<td class="cell-nowrap">{{ row.TXN_TIME || row.TXN_DAY }}</td>
</tr>
<tr v-if="!items || items.length === 0">
<td :colspan="showRejectBreakdown ? 17 : 12" class="placeholder">No data</td>
<td :colspan="showRejectBreakdown ? 18 : 13" class="placeholder">No data</td>
</tr>
</tbody>
</table>

View File

@@ -9,18 +9,34 @@ import VChart from 'vue-echarts';
use([CanvasRenderer, BarChart, LineChart, GridComponent, TooltipComponent, LegendComponent]);
const DIMENSION_OPTIONS = [
{ value: 'reason', label: '不良原因' },
{ value: 'package', label: 'PACKAGE' },
{ value: 'type', label: 'TYPE' },
{ value: 'workflow', label: 'WORKFLOW' },
{ value: 'workcenter', label: '站點' },
{ value: 'equipment', label: '機台' },
];
const props = defineProps({
items: { type: Array, default: () => [] },
detailReason: { type: String, default: '' },
selectedDates: { type: Array, default: () => [] },
metricLabel: { type: String, default: '報廢量' },
loading: { type: Boolean, default: false },
dimension: { type: String, default: 'reason' },
showDimensionSelector: { type: Boolean, default: false },
});
const emit = defineEmits(['reason-click']);
const emit = defineEmits(['reason-click', 'dimension-change']);
const hasData = computed(() => Array.isArray(props.items) && props.items.length > 0);
const dimensionLabel = computed(() => {
const opt = DIMENSION_OPTIONS.find((o) => o.value === props.dimension);
return opt ? opt.label : '報廢原因';
});
function formatNumber(value) {
return Number(value || 0).toLocaleString('zh-TW');
}
@@ -108,7 +124,7 @@ const chartOption = computed(() => {
});
function handleChartClick(params) {
if (params?.seriesType !== 'bar') {
if (params?.seriesType !== 'bar' || props.dimension !== 'reason') {
return;
}
const reason = props.items?.[params.dataIndex]?.reason;
@@ -122,9 +138,17 @@ function handleChartClick(params) {
<section class="card">
<div class="card-header pareto-header">
<div class="card-title">
{{ metricLabel }} vs 報廢原因Pareto
{{ metricLabel }} vs {{ dimensionLabel }}Pareto
<span v-for="d in selectedDates" :key="d" class="pareto-date-badge">{{ d }}</span>
</div>
<select
v-if="showDimensionSelector"
class="dimension-select"
:value="dimension"
@change="emit('dimension-change', $event.target.value)"
>
<option v-for="opt in DIMENSION_OPTIONS" :key="opt.value" :value="opt.value">{{ opt.label }}</option>
</select>
</div>
<div class="card-body pareto-layout">
<div class="pareto-chart-wrap">
@@ -135,7 +159,7 @@ function handleChartClick(params) {
<table class="detail-table pareto-table">
<thead>
<tr>
<th>原因</th>
<th>{{ dimensionLabel }}</th>
<th>{{ metricLabel }}</th>
<th>占比</th>
<th>累積</th>
@@ -148,9 +172,10 @@ function handleChartClick(params) {
:class="{ active: detailReason === item.reason }"
>
<td>
<button class="reason-link" type="button" @click="$emit('reason-click', item.reason)">
<button v-if="dimension === 'reason'" class="reason-link" type="button" @click="$emit('reason-click', item.reason)">
{{ item.reason }}
</button>
<span v-else>{{ item.reason }}</span>
</td>
<td>{{ formatNumber(item.metric_value) }}</td>
<td>{{ formatPct(item.pct) }}</td>

View File

@@ -228,16 +228,27 @@
line-height: 1;
}
.btn-export {
.btn.btn-primary:hover {
background: var(--primary-dark);
color: #fff;
}
.btn.btn-secondary:hover {
background: #5a6268;
color: #fff;
}
.btn.btn-export {
background: #0f766e;
color: #fff;
}
.btn-export:hover {
.btn.btn-export:hover {
background: #0b5e59;
color: #fff;
}
.btn-export:disabled {
.btn.btn-export:disabled {
opacity: 0.7;
cursor: not-allowed;
}
@@ -296,6 +307,17 @@
gap: 12px;
}
.dimension-select {
font-size: 12px;
padding: 3px 8px;
border: 1px solid var(--border-color, #d1d5db);
border-radius: 4px;
background: var(--bg-primary, #fff);
color: var(--text-primary, #374151);
cursor: pointer;
margin-left: auto;
}
.pareto-date-badge {
display: inline-block;
margin-left: 8px;

View File

@@ -7,7 +7,7 @@ ensureMesApiAvailable();
const DEFAULT_STAGE_TIMEOUT_MS = 60000;
const PROFILE_DOMAINS = Object.freeze({
query_tool: ['history', 'materials', 'rejects', 'holds', 'jobs'],
mid_section_defect: ['upstream_history'],
mid_section_defect: ['upstream_history', 'materials'],
mid_section_defect_forward: ['upstream_history', 'downstream_rejects'],
});
@@ -168,6 +168,7 @@ export function useTraceProgress({ profile } = {}) {
lineage: {
ancestors: lineagePayload?.ancestors || {},
children_map: lineagePayload?.children_map || {},
seed_roots: lineagePayload?.seed_roots || {},
},
},
{ timeout: DEFAULT_STAGE_TIMEOUT_MS, signal: controller.signal },

View File

@@ -596,6 +596,49 @@ def get_workcenter_groups_list():
return jsonify({'error': f'載入站點群組失敗: {str(exc)}'}), 500
# ============================================================
# Equipment Recent Jobs API (for suspect context panel)
# ============================================================
@query_tool_bp.route('/api/query-tool/equipment-recent-jobs/<equipment_id>', methods=['GET'])
def get_equipment_recent_jobs(equipment_id):
"""Get recent JOB records for a specific equipment (last 30 days, top 5).
Used by the suspect machine context panel in mid-section-defect analysis.
"""
from mes_dashboard.sql import SQLLoader
from mes_dashboard.core.database import read_sql_df
equipment_id = str(equipment_id or '').strip()
if not equipment_id:
return jsonify({'error': '請指定設備ID'}), 400
try:
sql = SQLLoader.load("query_tool/equipment_recent_jobs")
df = read_sql_df(sql, {'equipment_id': equipment_id})
if df is None or df.empty:
return jsonify({'data': [], 'total': 0})
data = []
for _, row in df.iterrows():
data.append({
'JOBID': str(row.get('JOBID') or ''),
'JOBSTATUS': str(row.get('JOBSTATUS') or ''),
'JOBMODELNAME': str(row.get('JOBMODELNAME') or ''),
'CREATEDATE': str(row.get('CREATEDATE') or ''),
'COMPLETEDATE': str(row.get('COMPLETEDATE') or ''),
'CAUSECODENAME': str(row.get('CAUSECODENAME') or ''),
'REPAIRCODENAME': str(row.get('REPAIRCODENAME') or ''),
'RESOURCENAME': str(row.get('RESOURCENAME') or ''),
})
return jsonify({'data': data, 'total': len(data)})
except Exception as exc:
return jsonify({'error': f'載入維修紀錄失敗: {str(exc)}'}), 500
# ============================================================
# CSV Export API
# ============================================================

View File

@@ -13,6 +13,7 @@ from mes_dashboard.core.cache import cache_get, cache_set, make_cache_key
from mes_dashboard.core.rate_limit import configured_rate_limit
from mes_dashboard.services.reject_dataset_cache import (
apply_view,
compute_dimension_pareto,
execute_primary_query,
export_csv_from_cache,
)
@@ -22,6 +23,7 @@ from mes_dashboard.services.reject_history_service import (
get_filter_options,
query_analytics,
query_list,
query_dimension_pareto,
query_reason_pareto,
query_summary,
query_trend,
@@ -298,11 +300,30 @@ def api_reject_history_reason_pareto():
metric_mode = request.args.get("metric_mode", "reject_total").strip().lower() or "reject_total"
pareto_scope = request.args.get("pareto_scope", "top80").strip().lower() or "top80"
dimension = request.args.get("dimension", "reason").strip().lower() or "reason"
query_id = request.args.get("query_id", "").strip()
try:
result = query_reason_pareto(
# Prefer cache-based computation when query_id is available
if query_id:
result = compute_dimension_pareto(
query_id=query_id,
dimension=dimension,
metric_mode=metric_mode,
pareto_scope=pareto_scope,
packages=_parse_multi_param("packages") or None,
workcenter_groups=_parse_multi_param("workcenter_groups") or None,
reason=request.args.get("reason", "").strip() or None,
trend_dates=_parse_multi_param("trend_dates") or None,
)
if result is not None:
return jsonify({"success": True, "data": result, "meta": {}})
# Cache expired, fall through to Oracle query
result = query_dimension_pareto(
start_date=start_date,
end_date=end_date,
dimension=dimension,
metric_mode=metric_mode,
pareto_scope=pareto_scope,
workcenter_groups=_parse_multi_param("workcenter_groups") or None,

View File

@@ -121,6 +121,11 @@ def _hash_payload(payload: Any) -> str:
def _seed_cache_key(profile: str, params: Dict[str, Any]) -> str:
if profile == PROFILE_MID_SECTION_DEFECT:
# loss_reasons does not affect seed resolution; exclude it so that
# changing the reason filter hits the cache instead of re-querying Oracle.
filtered = {k: v for k, v in params.items() if k != "loss_reasons"}
return f"trace:seed:{profile}:{_hash_payload(filtered)}"
return f"trace:seed:{profile}:{_hash_payload(params)}"
@@ -305,6 +310,7 @@ def _build_lineage_response(
merge_edges: Optional[Dict[str, List[str]]] = None,
typed_nodes: Optional[Dict[str, Dict[str, Any]]] = None,
typed_edges: Optional[List[Dict[str, Any]]] = None,
seed_roots: Optional[Dict[str, str]] = None,
) -> Dict[str, Any]:
normalized_ancestors: Dict[str, List[str]] = {}
all_nodes = set(container_ids)
@@ -319,12 +325,20 @@ def _build_lineage_response(
normalized_ancestors[seed] = normalized_list
all_nodes.update(normalized_list)
# Count unique ancestor CIDs excluding seeds themselves
seed_set = set(container_ids)
ancestor_only = all_nodes - seed_set
total_ancestor_count = len(ancestor_only)
response: Dict[str, Any] = {
"stage": "lineage",
"ancestors": normalized_ancestors,
"merges": {},
"total_nodes": len(all_nodes),
"total_ancestor_count": total_ancestor_count,
}
if seed_roots:
response["seed_roots"] = seed_roots
if cid_to_name:
response["names"] = {
cid: name for cid, name in cid_to_name.items()
@@ -386,6 +400,19 @@ def _parse_lineage_payload(payload: Dict[str, Any]) -> Optional[Dict[str, Any]]:
return None
def _parse_lineage_roots(payload: Dict[str, Any]) -> Optional[Dict[str, str]]:
"""Extract seed_roots mapping from lineage payload."""
lineage = payload.get("lineage")
if isinstance(lineage, dict):
roots = lineage.get("seed_roots")
if isinstance(roots, dict):
return roots
direct_roots = payload.get("seed_roots")
if isinstance(direct_roots, dict):
return direct_roots
return None
def _build_msd_aggregation(
payload: Dict[str, Any],
domain_results: Dict[str, Dict[str, List[Dict[str, Any]]]],
@@ -407,11 +434,13 @@ def _build_msd_aggregation(
loss_reasons = parse_loss_reasons_param(raw_loss_reasons)
lineage_ancestors = _parse_lineage_payload(payload)
lineage_roots = _parse_lineage_roots(payload)
seed_container_ids = _normalize_strings(payload.get("seed_container_ids", []))
if not seed_container_ids and isinstance(lineage_ancestors, dict):
seed_container_ids = _normalize_strings(list(lineage_ancestors.keys()))
upstream_events = domain_results.get("upstream_history", {})
materials_events = domain_results.get("materials", {})
downstream_events = domain_results.get("downstream_rejects", {})
station = str(params.get("station") or "測試").strip()
direction = str(params.get("direction") or "backward").strip()
@@ -422,7 +451,9 @@ def _build_msd_aggregation(
loss_reasons=loss_reasons,
seed_container_ids=seed_container_ids,
lineage_ancestors=lineage_ancestors,
lineage_roots=lineage_roots,
upstream_events_by_cid=upstream_events,
materials_events_by_cid=materials_events,
downstream_events_by_cid=downstream_events,
station=station,
direction=direction,
@@ -534,6 +565,7 @@ def lineage():
merge_edges=reverse_graph.get("merge_edges"),
typed_nodes=reverse_graph.get("nodes"),
typed_edges=reverse_graph.get("edges"),
seed_roots=reverse_graph.get("seed_roots"),
)
response["roots"] = list(container_ids)
else:
@@ -589,7 +621,13 @@ def events():
400,
)
# For MSD profile, skip the events-level cache so that aggregation is
# always recomputed with the current loss_reasons. EventFetcher still
# provides per-domain Redis caching, so raw Oracle queries are avoided.
is_msd = (profile == PROFILE_MID_SECTION_DEFECT)
events_cache_key = _events_cache_key(profile, domains, container_ids)
if not is_msd:
cached = cache_get(events_cache_key)
if cached is not None:
return jsonify(cached)
@@ -645,5 +683,6 @@ def events():
response["code"] = "EVENTS_PARTIAL_FAILURE"
response["failed_domains"] = sorted(failed_domains)
if not is_msd:
cache_set(events_cache_key, response, ttl=TRACE_CACHE_TTL_SECONDS)
return jsonify(response)

View File

@@ -801,6 +801,7 @@ class LineageEngine:
all_nodes.add(child)
recomputed_ancestors: Dict[str, Set[str]] = {}
recomputed_roots: Dict[str, str] = {}
for seed in seed_cids:
visited: Set[str] = set()
stack = list(pm.get(seed, []))
@@ -816,6 +817,15 @@ class LineageEngine:
if gp and gp not in visited:
stack.append(gp)
recomputed_ancestors[seed] = visited
# Root = ancestor with no further parents; if no ancestors, seed is its own root
if visited:
root_cid = next(
(cid for cid in visited if not pm.get(cid)),
next(iter(visited)),
)
else:
root_cid = seed
recomputed_roots[seed] = cid_to_name.get(root_cid, root_cid)
typed_nodes = LineageEngine._build_nodes_payload(all_nodes, snapshots, cid_to_name, wafer_ids)
typed_edges = _to_edge_payload(split_edges + merge_payload_edges + semantic_edges)
@@ -825,6 +835,7 @@ class LineageEngine:
"cid_to_name": cid_to_name,
"parent_map": pm,
"merge_edges": me,
"seed_roots": recomputed_roots,
"nodes": typed_nodes,
"edges": typed_edges,
}

View File

@@ -72,12 +72,9 @@ ANALYSIS_LOCK_POLL_INTERVAL_SECONDS = 0.5
TOP_N = 10
# Dimension column mapping for backward attribution charts
# by_material and by_wafer_root are handled separately (different attribution source)
DIMENSION_MAP = {
'by_station': 'WORKCENTER_GROUP',
'by_machine': 'EQUIPMENT_NAME',
'by_workflow': 'WORKFLOW',
'by_package': 'PRODUCTLINENAME',
'by_pj_type': 'PJ_TYPE',
'by_detection_machine': 'DETECTION_EQUIPMENTNAME',
}
@@ -102,7 +99,10 @@ CSV_COLUMNS_BACKWARD = [
('DEFECT_QTY', '不良數'),
('DEFECT_RATE', '不良率(%)'),
('ANCESTOR_COUNT', '上游LOT數'),
('UPSTREAM_MACHINE_COUNT', '上游台數'),
('UPSTREAM_MACHINES', '上游機台'),
('UPSTREAM_MATERIALS', '上游原物料'),
('WAFER_ROOT', '源頭批次'),
]
# CSV export column config (forward)
@@ -296,7 +296,9 @@ def build_trace_aggregation_from_events(
loss_reasons: Optional[List[str]] = None,
seed_container_ids: Optional[List[str]] = None,
lineage_ancestors: Optional[Dict[str, Any]] = None,
lineage_roots: Optional[Dict[str, str]] = None,
upstream_events_by_cid: Optional[Dict[str, List[Dict[str, Any]]]] = None,
materials_events_by_cid: Optional[Dict[str, List[Dict[str, Any]]]] = None,
downstream_events_by_cid: Optional[Dict[str, List[Dict[str, Any]]]] = None,
station: str = '測試',
direction: str = 'backward',
@@ -308,7 +310,9 @@ def build_trace_aggregation_from_events(
loss_reasons=loss_reasons,
seed_container_ids=seed_container_ids,
lineage_ancestors=lineage_ancestors,
lineage_roots=lineage_roots,
upstream_events_by_cid=upstream_events_by_cid,
materials_events_by_cid=materials_events_by_cid,
downstream_events_by_cid=downstream_events_by_cid,
station=station,
direction=direction,
@@ -393,6 +397,7 @@ def build_trace_aggregation_from_events(
fallback_seed_ids=list(detection_data.keys()),
)
normalized_upstream = _normalize_upstream_event_records(upstream_events_by_cid or {})
normalized_materials = _normalize_materials_event_records(materials_events_by_cid or {})
attribution = _attribute_defects(
detection_data,
@@ -400,11 +405,25 @@ def build_trace_aggregation_from_events(
normalized_upstream,
normalized_loss_reasons,
)
detail = _build_detail_table(filtered_df, normalized_ancestors, normalized_upstream)
mat_attribution = _attribute_materials(
detection_data, normalized_ancestors, normalized_materials, normalized_loss_reasons,
)
root_attribution = _attribute_wafer_roots(
detection_data, lineage_roots or {}, normalized_loss_reasons,
)
detail = _build_detail_table(
filtered_df, normalized_ancestors, normalized_upstream,
materials_by_cid=normalized_materials,
roots=lineage_roots,
)
return {
'kpi': _build_kpi(filtered_df, attribution, normalized_loss_reasons),
'charts': _build_all_charts(attribution, detection_data),
'charts': _build_all_charts(
attribution, detection_data,
materials_attribution=mat_attribution,
wafer_root_attribution=root_attribution,
),
'daily_trend': _build_daily_trend(filtered_df, normalized_loss_reasons),
'available_loss_reasons': available_loss_reasons,
'genealogy_status': genealogy_status,
@@ -418,7 +437,9 @@ def _build_trace_aggregation_container_mode(
loss_reasons: Optional[List[str]] = None,
seed_container_ids: Optional[List[str]] = None,
lineage_ancestors: Optional[Dict[str, Any]] = None,
lineage_roots: Optional[Dict[str, str]] = None,
upstream_events_by_cid: Optional[Dict[str, List[Dict[str, Any]]]] = None,
materials_events_by_cid: Optional[Dict[str, List[Dict[str, Any]]]] = None,
downstream_events_by_cid: Optional[Dict[str, List[Dict[str, Any]]]] = None,
station: str = '測試',
direction: str = 'backward',
@@ -512,6 +533,7 @@ def _build_trace_aggregation_container_mode(
fallback_seed_ids=list(detection_data.keys()),
)
normalized_upstream = _normalize_upstream_event_records(upstream_events_by_cid or {})
normalized_materials = _normalize_materials_event_records(materials_events_by_cid or {})
attribution = _attribute_defects(
detection_data,
@@ -519,11 +541,25 @@ def _build_trace_aggregation_container_mode(
normalized_upstream,
normalized_loss_reasons,
)
detail = _build_detail_table(filtered_df, normalized_ancestors, normalized_upstream)
mat_attribution = _attribute_materials(
detection_data, normalized_ancestors, normalized_materials, normalized_loss_reasons,
)
root_attribution = _attribute_wafer_roots(
detection_data, lineage_roots or {}, normalized_loss_reasons,
)
detail = _build_detail_table(
filtered_df, normalized_ancestors, normalized_upstream,
materials_by_cid=normalized_materials,
roots=lineage_roots,
)
return {
'kpi': _build_kpi(filtered_df, attribution, normalized_loss_reasons),
'charts': _build_all_charts(attribution, detection_data),
'charts': _build_all_charts(
attribution, detection_data,
materials_attribution=mat_attribution,
wafer_root_attribution=root_attribution,
),
'daily_trend': [],
'available_loss_reasons': available_loss_reasons,
'genealogy_status': genealogy_status,
@@ -635,7 +671,20 @@ def export_csv(
return
for row in result.get('detail', []):
writer.writerow([row.get(col, '') for col, _ in columns])
csv_row = []
for col, _ in columns:
value = row.get(col, '')
# Flatten structured list fields for CSV
if col == 'UPSTREAM_MACHINES' and isinstance(value, list):
value = ', '.join(
f"{m.get('station', '')}/{m.get('machine', '')}" for m in value
)
elif col == 'UPSTREAM_MATERIALS' and isinstance(value, list):
value = ', '.join(
f"{m.get('part', '')}/{m.get('lot', '')}" for m in value
)
csv_row.append(value)
writer.writerow(csv_row)
yield output.getvalue()
output.seek(0)
output.truncate(0)
@@ -988,7 +1037,9 @@ def _run_backward_pipeline(
'available_loss_reasons': available_loss_reasons,
'charts': _build_all_charts(attribution, detection_data),
'daily_trend': _build_daily_trend(filtered_df, loss_reasons),
'detail': _build_detail_table(filtered_df, ancestors, upstream_by_cid),
'detail': _build_detail_table(
filtered_df, ancestors, upstream_by_cid,
),
'genealogy_status': genealogy_status,
'attribution': attribution,
}
@@ -1263,6 +1314,29 @@ def _normalize_upstream_event_records(
return dict(result)
def _normalize_materials_event_records(
events_by_cid: Dict[str, List[Dict[str, Any]]],
) -> Dict[str, List[Dict[str, Any]]]:
"""Normalize EventFetcher materials payload into attribution-ready records."""
result: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
for cid, events in events_by_cid.items():
cid_value = _safe_str(cid)
if not cid_value:
continue
for event in events:
part = _safe_str(event.get('MATERIALPARTNAME'))
if not part:
continue
result[cid_value].append({
'MATERIALPARTNAME': part,
'MATERIALLOTNAME': _safe_str(event.get('MATERIALLOTNAME')),
'QTYCONSUMED': _safe_int(event.get('QTYCONSUMED')),
'WORKCENTERNAME': _safe_str(event.get('WORKCENTERNAME')),
'EQUIPMENTNAME': _safe_str(event.get('EQUIPMENTNAME')),
})
return dict(result)
def _normalize_downstream_event_records(
events_by_cid: Dict[str, List[Dict[str, Any]]],
) -> Dict[str, List[Dict[str, Any]]]:
@@ -1441,6 +1515,119 @@ def _attribute_defects(
return attribution
def _attribute_materials(
detection_data: Dict[str, Dict[str, Any]],
ancestors: Dict[str, Set[str]],
materials_by_cid: Dict[str, List[Dict[str, Any]]],
loss_reasons: Optional[List[str]] = None,
) -> List[Dict[str, Any]]:
"""Attribute detection station defects to upstream materials.
Symmetric to ``_attribute_defects`` but keyed by
``(MATERIALPARTNAME, MATERIALLOTNAME)``.
"""
material_to_detection: Dict[Tuple[str, str], Set[str]] = defaultdict(set)
for det_cid, data in detection_data.items():
ancestor_set = ancestors.get(det_cid, set())
all_cids = ancestor_set | {det_cid}
for anc_cid in all_cids:
for record in materials_by_cid.get(anc_cid, []):
part = _safe_str(record.get('MATERIALPARTNAME') or record.get('material_part_name'))
lot = _safe_str(record.get('MATERIALLOTNAME') or record.get('material_lot_name'))
if not part:
continue
material_key = (part, lot) if lot else (part, '')
material_to_detection[material_key].add(det_cid)
attribution = []
for material_key, det_lot_set in material_to_detection.items():
part_name, lot_name = material_key
total_trackinqty = sum(
detection_data[cid]['trackinqty'] for cid in det_lot_set
if cid in detection_data
)
total_rejectqty = 0
for cid in det_lot_set:
if cid not in detection_data:
continue
by_reason = detection_data[cid]['rejectqty_by_reason']
if loss_reasons:
for reason in loss_reasons:
total_rejectqty += by_reason.get(reason, 0)
else:
total_rejectqty += sum(by_reason.values())
rate = round(total_rejectqty / total_trackinqty * 100, 4) if total_trackinqty else 0.0
display_name = f"{part_name} ({lot_name})" if lot_name else part_name
attribution.append({
'MATERIAL_KEY': display_name,
'MATERIAL_PART_NAME': part_name,
'MATERIAL_LOT_NAME': lot_name,
'DETECTION_LOT_COUNT': len(det_lot_set),
'INPUT_QTY': total_trackinqty,
'DEFECT_QTY': total_rejectqty,
'DEFECT_RATE': rate,
})
attribution.sort(key=lambda x: x['DEFECT_RATE'], reverse=True)
return attribution
def _attribute_wafer_roots(
detection_data: Dict[str, Dict[str, Any]],
roots: Dict[str, str],
loss_reasons: Optional[List[str]] = None,
) -> List[Dict[str, Any]]:
"""Attribute detection station defects to wafer root ancestors.
``roots`` maps seed container_id → root container_name.
"""
root_to_detection: Dict[str, Set[str]] = defaultdict(set)
for det_cid in detection_data:
root_name = roots.get(det_cid)
if not root_name:
# Self-root: use the lot's own container name
root_name = detection_data[det_cid].get('containername', det_cid)
root_to_detection[root_name].add(det_cid)
attribution = []
for root_name, det_lot_set in root_to_detection.items():
total_trackinqty = sum(
detection_data[cid]['trackinqty'] for cid in det_lot_set
if cid in detection_data
)
total_rejectqty = 0
for cid in det_lot_set:
if cid not in detection_data:
continue
by_reason = detection_data[cid]['rejectqty_by_reason']
if loss_reasons:
for reason in loss_reasons:
total_rejectqty += by_reason.get(reason, 0)
else:
total_rejectqty += sum(by_reason.values())
rate = round(total_rejectqty / total_trackinqty * 100, 4) if total_trackinqty else 0.0
attribution.append({
'ROOT_CONTAINER_NAME': root_name,
'DETECTION_LOT_COUNT': len(det_lot_set),
'INPUT_QTY': total_trackinqty,
'DEFECT_QTY': total_rejectqty,
'DEFECT_RATE': rate,
})
attribution.sort(key=lambda x: x['DEFECT_RATE'], reverse=True)
return attribution
# ============================================================
# Forward Defect Attribution Engine
# ============================================================
@@ -1563,9 +1750,13 @@ def _build_kpi(
affected_machines = sum(1 for a in attribution if a['DEFECT_QTY'] > 0)
# Count LOTs that have at least one defect matching selected reasons
defective_lot_count = int(defect_rows['CONTAINERID'].nunique()) if not defect_rows.empty else 0
return {
'total_input': total_input,
'lot_count': lot_count,
'defective_lot_count': defective_lot_count,
'total_defect_qty': total_defect_qty,
'total_defect_rate': total_defect_rate,
'top_loss_reason': top_reason,
@@ -1707,12 +1898,27 @@ def _build_loss_reason_chart(
def _build_all_charts(
attribution: List[Dict[str, Any]],
detection_data: Dict[str, Dict[str, Any]],
*,
materials_attribution: Optional[List[Dict[str, Any]]] = None,
wafer_root_attribution: Optional[List[Dict[str, Any]]] = None,
) -> Dict[str, List[Dict]]:
"""Build chart data for all backward dimensions."""
charts = {}
for key, dim_col in DIMENSION_MAP.items():
charts[key] = _build_chart_data(attribution, dim_col)
# Materials attribution chart (keyed by MATERIAL_KEY)
if materials_attribution:
charts['by_material'] = _build_chart_data(materials_attribution, 'MATERIAL_KEY')
else:
charts['by_material'] = []
# Wafer root attribution chart (keyed by ROOT_CONTAINER_NAME)
if wafer_root_attribution:
charts['by_wafer_root'] = _build_chart_data(wafer_root_attribution, 'ROOT_CONTAINER_NAME')
else:
charts['by_wafer_root'] = []
loss_rows = []
for cid, data in detection_data.items():
trackinqty = data['trackinqty']
@@ -1913,8 +2119,11 @@ def _build_detail_table(
df: pd.DataFrame,
ancestors: Dict[str, Set[str]],
upstream_by_cid: Dict[str, List[Dict[str, Any]]],
*,
materials_by_cid: Optional[Dict[str, List[Dict[str, Any]]]] = None,
roots: Optional[Dict[str, str]] = None,
) -> List[Dict[str, Any]]:
"""Build LOT-level detail table with upstream machine info (backward)."""
"""Build LOT-level detail table with structured upstream info (backward)."""
if df.empty:
return []
@@ -1935,6 +2144,9 @@ def _build_detail_table(
if reason and qty > 0:
lot_defects[cid][reason] += qty
materials_by_cid = materials_by_cid or {}
roots = roots or {}
result = []
for _, row in lots.iterrows():
cid = row['CONTAINERID']
@@ -1942,43 +2154,65 @@ def _build_detail_table(
ancestor_set = ancestors.get(cid, set())
all_cids = ancestor_set | {cid}
upstream_machines = set()
# Structured upstream machines
seen_machines: Set[Tuple[str, str]] = set()
upstream_machines_list: List[Dict[str, str]] = []
for anc_cid in all_cids:
for rec in upstream_by_cid.get(anc_cid, []):
upstream_machines.add(f"{rec['workcenter_group']}/{rec['equipment_name']}")
pair = (rec['workcenter_group'], rec['equipment_name'])
if pair not in seen_machines:
seen_machines.add(pair)
upstream_machines_list.append({
'station': rec['workcenter_group'],
'machine': rec['equipment_name'],
})
# Structured upstream materials
seen_materials: Set[Tuple[str, str]] = set()
upstream_materials_list: List[Dict[str, str]] = []
for anc_cid in all_cids:
for rec in materials_by_cid.get(anc_cid, []):
part = rec.get('MATERIALPARTNAME', '')
lot_name = rec.get('MATERIALLOTNAME', '')
pair = (part, lot_name)
if pair not in seen_materials and part:
seen_materials.add(pair)
upstream_materials_list.append({'part': part, 'lot': lot_name})
# Wafer root
wafer_root = roots.get(cid, '')
base = {
'CONTAINERNAME': _safe_str(row.get('CONTAINERNAME')),
'PJ_TYPE': _safe_str(row.get('PJ_TYPE')),
'PRODUCTLINENAME': _safe_str(row.get('PRODUCTLINENAME')),
'WORKFLOW': _safe_str(row.get('WORKFLOW')),
'FINISHEDRUNCARD': _safe_str(row.get('FINISHEDRUNCARD')),
'DETECTION_EQUIPMENTNAME': _safe_str(row.get('DETECTION_EQUIPMENTNAME')),
'INPUT_QTY': input_qty,
'ANCESTOR_COUNT': len(ancestor_set),
'UPSTREAM_MACHINES': upstream_machines_list,
'UPSTREAM_MACHINE_COUNT': len(seen_machines),
'UPSTREAM_MATERIALS': upstream_materials_list,
'WAFER_ROOT': wafer_root,
}
reasons = lot_defects.get(cid, {})
if reasons:
for reason, qty in sorted(reasons.items()):
rate = round(qty / input_qty * 100, 4) if input_qty else 0.0
result.append({
'CONTAINERNAME': _safe_str(row.get('CONTAINERNAME')),
'PJ_TYPE': _safe_str(row.get('PJ_TYPE')),
'PRODUCTLINENAME': _safe_str(row.get('PRODUCTLINENAME')),
'WORKFLOW': _safe_str(row.get('WORKFLOW')),
'FINISHEDRUNCARD': _safe_str(row.get('FINISHEDRUNCARD')),
'DETECTION_EQUIPMENTNAME': _safe_str(row.get('DETECTION_EQUIPMENTNAME')),
'INPUT_QTY': input_qty,
**base,
'LOSS_REASON': reason,
'DEFECT_QTY': qty,
'DEFECT_RATE': rate,
'ANCESTOR_COUNT': len(ancestor_set),
'UPSTREAM_MACHINES': ', '.join(sorted(upstream_machines)),
})
else:
result.append({
'CONTAINERNAME': _safe_str(row.get('CONTAINERNAME')),
'PJ_TYPE': _safe_str(row.get('PJ_TYPE')),
'PRODUCTLINENAME': _safe_str(row.get('PRODUCTLINENAME')),
'WORKFLOW': _safe_str(row.get('WORKFLOW')),
'FINISHEDRUNCARD': _safe_str(row.get('FINISHEDRUNCARD')),
'DETECTION_EQUIPMENTNAME': _safe_str(row.get('DETECTION_EQUIPMENTNAME')),
'INPUT_QTY': input_qty,
**base,
'LOSS_REASON': '',
'DEFECT_QTY': 0,
'DEFECT_RATE': 0.0,
'ANCESTOR_COUNT': len(ancestor_set),
'UPSTREAM_MACHINES': ', '.join(sorted(upstream_machines)),
})
return result

View File

@@ -570,6 +570,7 @@ def _paginate_detail(
"WORKCENTER_GROUP": _normalize_text(row.get("WORKCENTER_GROUP")),
"WORKCENTERNAME": _normalize_text(row.get("WORKCENTERNAME")),
"SPECNAME": _normalize_text(row.get("SPECNAME")),
"WORKFLOWNAME": _normalize_text(row.get("WORKFLOWNAME")),
"EQUIPMENTNAME": _normalize_text(row.get("EQUIPMENTNAME")),
"PRODUCTLINENAME": _normalize_text(row.get("PRODUCTLINENAME")),
"PJ_TYPE": _normalize_text(row.get("PJ_TYPE")),
@@ -622,6 +623,128 @@ def _extract_available_filters(df: pd.DataFrame) -> dict:
}
# ============================================================
# Dimension Pareto from cache
# ============================================================
# Dimension → DF column mapping (matches _DIMENSION_COLUMN_MAP in reject_history_service)
_DIM_TO_DF_COLUMN = {
"reason": "LOSSREASONNAME",
"package": "PRODUCTLINENAME",
"type": "PJ_TYPE",
"workflow": "WORKFLOWNAME",
"workcenter": "WORKCENTER_GROUP",
"equipment": "PRIMARY_EQUIPMENTNAME",
}
def compute_dimension_pareto(
*,
query_id: str,
dimension: str = "reason",
metric_mode: str = "reject_total",
pareto_scope: str = "top80",
packages: Optional[List[str]] = None,
workcenter_groups: Optional[List[str]] = None,
reason: Optional[str] = None,
trend_dates: Optional[List[str]] = None,
) -> Optional[Dict[str, Any]]:
"""Compute dimension pareto from cached DataFrame (no Oracle query)."""
df = _get_cached_df(query_id)
if df is None:
return None
dim_col = _DIM_TO_DF_COLUMN.get(dimension, "LOSSREASONNAME")
if dim_col not in df.columns:
return {"items": [], "dimension": dimension, "metric_mode": metric_mode}
# Apply supplementary filters
filtered = _apply_supplementary_filters(
df,
packages=packages,
workcenter_groups=workcenter_groups,
reason=reason,
)
if filtered is None or filtered.empty:
return {"items": [], "dimension": dimension, "metric_mode": metric_mode}
# Apply trend date filter
if trend_dates and "TXN_DAY" in filtered.columns:
date_set = set(trend_dates)
filtered = filtered[
filtered["TXN_DAY"].apply(lambda d: _to_date_str(d) in date_set)
]
if filtered.empty:
return {"items": [], "dimension": dimension, "metric_mode": metric_mode}
# Determine metric column
if metric_mode == "defect":
metric_col = "DEFECT_QTY"
else:
metric_col = "REJECT_TOTAL_QTY"
if metric_col not in filtered.columns:
return {"items": [], "dimension": dimension, "metric_mode": metric_mode}
# Group by dimension
agg_dict = {}
for col in ["MOVEIN_QTY", "REJECT_TOTAL_QTY", "DEFECT_QTY"]:
if col in filtered.columns:
agg_dict[col] = (col, "sum")
grouped = filtered.groupby(dim_col, sort=False).agg(**agg_dict).reset_index()
# Count distinct lots
if "CONTAINERID" in filtered.columns:
lot_counts = (
filtered.groupby(dim_col)["CONTAINERID"]
.nunique()
.reset_index()
.rename(columns={"CONTAINERID": "AFFECTED_LOT_COUNT"})
)
grouped = grouped.merge(lot_counts, on=dim_col, how="left")
else:
grouped["AFFECTED_LOT_COUNT"] = 0
# Compute metric and sort
grouped["METRIC_VALUE"] = grouped[metric_col].fillna(0)
grouped = grouped[grouped["METRIC_VALUE"] > 0].sort_values(
"METRIC_VALUE", ascending=False
)
if grouped.empty:
return {"items": [], "dimension": dimension, "metric_mode": metric_mode}
total_metric = grouped["METRIC_VALUE"].sum()
grouped["PCT"] = (grouped["METRIC_VALUE"] / total_metric * 100).round(4)
grouped["CUM_PCT"] = grouped["PCT"].cumsum().round(4)
all_items = []
for _, row in grouped.iterrows():
all_items.append({
"reason": _normalize_text(row.get(dim_col)) or "(未知)",
"metric_value": _as_float(row.get("METRIC_VALUE")),
"MOVEIN_QTY": _as_int(row.get("MOVEIN_QTY")),
"REJECT_TOTAL_QTY": _as_int(row.get("REJECT_TOTAL_QTY")),
"DEFECT_QTY": _as_int(row.get("DEFECT_QTY")),
"count": _as_int(row.get("AFFECTED_LOT_COUNT")),
"pct": round(_as_float(row.get("PCT")), 4),
"cumPct": round(_as_float(row.get("CUM_PCT")), 4),
})
items = list(all_items)
if pareto_scope == "top80" and items:
top_items = [item for item in items if _as_float(item.get("cumPct")) <= 80.0]
if not top_items:
top_items = [items[0]]
items = top_items
return {
"items": items,
"dimension": dimension,
"metric_mode": metric_mode,
}
# ============================================================
# CSV export from cache
# ============================================================
@@ -670,6 +793,7 @@ def export_csv_from_cache(
"Package": _normalize_text(row.get("PRODUCTLINENAME")),
"FUNCTION": _normalize_text(row.get("PJ_FUNCTION")),
"TYPE": _normalize_text(row.get("PJ_TYPE")),
"WORKFLOW": _normalize_text(row.get("WORKFLOWNAME")),
"PRODUCT": _normalize_text(row.get("PRODUCTNAME")),
"原因": _normalize_text(row.get("LOSSREASONNAME")),
"EQUIPMENT": _normalize_text(row.get("EQUIPMENTNAME")),

View File

@@ -289,6 +289,7 @@ def _prepare_sql(
metric_column: str = "",
base_variant: str = "",
base_where: str = "",
dimension_column: str = "",
) -> str:
sql = _load_sql(name)
sql = sql.replace("{{ BASE_QUERY }}", _base_query_sql(base_variant))
@@ -297,6 +298,7 @@ def _prepare_sql(
sql = sql.replace("{{ WHERE_CLAUSE }}", where_clause or "")
sql = sql.replace("{{ BUCKET_EXPR }}", bucket_expr or "TRUNC(b.TXN_DAY)")
sql = sql.replace("{{ METRIC_COLUMN }}", metric_column or "b.REJECT_TOTAL_QTY")
sql = sql.replace("{{ DIMENSION_COLUMN }}", dimension_column or "b.LOSSREASONNAME")
return sql
@@ -579,6 +581,111 @@ def query_reason_pareto(
}
# Allowed dimension → SQL column mapping for dimension_pareto
_DIMENSION_COLUMN_MAP = {
"reason": "b.LOSSREASONNAME",
"package": "b.PRODUCTLINENAME",
"type": "b.PJ_TYPE",
"workflow": "b.WORKFLOWNAME",
"workcenter": "b.WORKCENTER_GROUP",
"equipment": "b.PRIMARY_EQUIPMENTNAME",
}
def query_dimension_pareto(
*,
start_date: str,
end_date: str,
dimension: str = "reason",
metric_mode: str = "reject_total",
pareto_scope: str = "top80",
workcenter_groups: Optional[list[str]] = None,
packages: Optional[list[str]] = None,
reasons: Optional[list[str]] = None,
categories: Optional[list[str]] = None,
include_excluded_scrap: bool = False,
exclude_material_scrap: bool = True,
exclude_pb_diode: bool = True,
) -> dict[str, Any]:
"""Pareto chart grouped by an arbitrary dimension (reason, package, type, workcenter, equipment)."""
_validate_range(start_date, end_date)
normalized_dim = _normalize_text(dimension).lower() or "reason"
if normalized_dim not in _DIMENSION_COLUMN_MAP:
raise ValueError(f"Invalid dimension '{dimension}'. Use: {', '.join(_DIMENSION_COLUMN_MAP)}")
# For reason dimension, delegate to existing optimized function
if normalized_dim == "reason":
return query_reason_pareto(
start_date=start_date, end_date=end_date,
metric_mode=metric_mode, pareto_scope=pareto_scope,
workcenter_groups=workcenter_groups, packages=packages,
reasons=reasons, categories=categories,
include_excluded_scrap=include_excluded_scrap,
exclude_material_scrap=exclude_material_scrap,
exclude_pb_diode=exclude_pb_diode,
)
normalized_metric = _normalize_text(metric_mode).lower() or "reject_total"
if normalized_metric not in VALID_METRIC_MODE:
raise ValueError("Invalid metric_mode. Use reject_total or defect")
normalized_scope = _normalize_text(pareto_scope).lower() or "top80"
if normalized_scope not in {"top80", "all"}:
raise ValueError("Invalid pareto_scope. Use top80 or all")
dim_col = _DIMENSION_COLUMN_MAP[normalized_dim]
where_clause, params, meta = _build_where_clause(
workcenter_groups=workcenter_groups,
packages=packages,
reasons=reasons,
categories=categories,
include_excluded_scrap=include_excluded_scrap,
exclude_material_scrap=exclude_material_scrap,
exclude_pb_diode=exclude_pb_diode,
)
sql = _prepare_sql(
"dimension_pareto",
where_clause=where_clause,
metric_column=_metric_column(normalized_metric),
dimension_column=dim_col,
)
df = read_sql_df(sql, _common_params(start_date, end_date, params))
all_items = []
if df is not None and not df.empty:
for _, row in df.iterrows():
all_items.append({
"reason": _normalize_text(row.get("DIMENSION_VALUE")) or "(未知)",
"metric_value": _as_float(row.get("METRIC_VALUE")),
"MOVEIN_QTY": _as_int(row.get("MOVEIN_QTY")),
"REJECT_TOTAL_QTY": _as_int(row.get("REJECT_TOTAL_QTY")),
"DEFECT_QTY": _as_int(row.get("DEFECT_QTY")),
"count": _as_int(row.get("AFFECTED_LOT_COUNT")),
"pct": round(_as_float(row.get("PCT")), 4),
"cumPct": round(_as_float(row.get("CUM_PCT")), 4),
})
items = list(all_items)
if normalized_scope == "top80" and items:
top_items = [item for item in items if _as_float(item.get("cumPct")) <= 80.0]
if not top_items:
top_items = [items[0]]
items = top_items
return {
"items": items,
"dimension": normalized_dim,
"metric_mode": normalized_metric,
"pareto_scope": normalized_scope,
"meta": {
**meta,
"total_items_after_filter": len(all_items),
"displayed_items": len(items),
},
}
def _apply_metric_filter(where_clause: str, metric_filter: str) -> str:
"""Append metric-type filter (reject / defect) to an existing WHERE clause."""
if metric_filter == "reject":
@@ -648,6 +755,7 @@ def query_list(
"WORKCENTER_GROUP": _normalize_text(row.get("WORKCENTER_GROUP")),
"WORKCENTERNAME": _normalize_text(row.get("WORKCENTERNAME")),
"SPECNAME": _normalize_text(row.get("SPECNAME")),
"WORKFLOWNAME": _normalize_text(row.get("WORKFLOWNAME")),
"EQUIPMENTNAME": _normalize_text(row.get("EQUIPMENTNAME")),
"PRODUCTLINENAME": _normalize_text(row.get("PRODUCTLINENAME")),
"PJ_TYPE": _normalize_text(row.get("PJ_TYPE")),
@@ -724,6 +832,7 @@ def export_csv(
"Package": _normalize_text(row.get("PRODUCTLINENAME")),
"FUNCTION": _normalize_text(row.get("PJ_FUNCTION")),
"TYPE": _normalize_text(row.get("PJ_TYPE")),
"WORKFLOW": _normalize_text(row.get("WORKFLOWNAME")),
"PRODUCT": _normalize_text(row.get("PRODUCTNAME")),
"原因": _normalize_text(row.get("LOSSREASONNAME")),
"EQUIPMENT": _normalize_text(row.get("EQUIPMENTNAME")),

View File

@@ -0,0 +1,19 @@
-- Recent JOB records for a specific equipment (last 30 days)
--
-- Parameters:
-- :equipment_id - Equipment ID (RESOURCEID)
SELECT
j.JOBID,
j.JOBSTATUS,
j.JOBMODELNAME,
j.CREATEDATE,
j.COMPLETEDATE,
j.CAUSECODENAME,
j.REPAIRCODENAME,
j.RESOURCENAME
FROM DWH.DW_MES_JOB j
WHERE j.RESOURCEID = :equipment_id
AND j.CREATEDATE >= SYSDATE - 30
ORDER BY j.CREATEDATE DESC
FETCH FIRST 5 ROWS ONLY

View File

@@ -0,0 +1,43 @@
-- Reject History Dimension Pareto (generic dimension grouping)
-- Template slots:
-- BASE_WITH_CTE (base reject-history daily dataset SQL wrapped as CTE)
-- METRIC_COLUMN (metric expression: b.REJECT_TOTAL_QTY or b.DEFECT_QTY)
-- WHERE_CLAUSE (QueryBuilder-generated WHERE clause against alias b)
-- DIMENSION_COLUMN (column to group by, e.g. b.PJ_TYPE, b.PRODUCTLINENAME)
-- DIMENSION_ALIAS (output alias, e.g. DIMENSION_VALUE)
{{ BASE_WITH_CTE }},
dim_agg AS (
SELECT
{{ DIMENSION_COLUMN }} AS DIMENSION_VALUE,
SUM({{ METRIC_COLUMN }}) AS METRIC_VALUE,
SUM(b.MOVEIN_QTY) AS MOVEIN_QTY,
SUM(b.REJECT_TOTAL_QTY) AS REJECT_TOTAL_QTY,
SUM(b.DEFECT_QTY) AS DEFECT_QTY,
SUM(b.AFFECTED_LOT_COUNT) AS AFFECTED_LOT_COUNT
FROM base b
{{ WHERE_CLAUSE }}
GROUP BY
{{ DIMENSION_COLUMN }}
HAVING SUM({{ METRIC_COLUMN }}) > 0
)
SELECT
DIMENSION_VALUE,
METRIC_VALUE,
MOVEIN_QTY,
REJECT_TOTAL_QTY,
DEFECT_QTY,
AFFECTED_LOT_COUNT,
ROUND(
METRIC_VALUE * 100 / NULLIF(SUM(METRIC_VALUE) OVER (), 0),
4
) AS PCT,
ROUND(
SUM(METRIC_VALUE) OVER (
ORDER BY METRIC_VALUE DESC, DIMENSION_VALUE
ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
) * 100 / NULLIF(SUM(METRIC_VALUE) OVER (), 0),
4
) AS CUM_PCT
FROM dim_agg
ORDER BY METRIC_VALUE DESC, DIMENSION_VALUE

View File

@@ -11,6 +11,7 @@ SELECT
b.WORKCENTER_GROUP,
b.WORKCENTERNAME,
b.SPECNAME,
b.WORKFLOWNAME,
b.EQUIPMENTNAME,
b.PRODUCTLINENAME,
b.PJ_FUNCTION,

View File

@@ -30,6 +30,7 @@ SELECT
p.WORKCENTERSEQUENCE_GROUP,
p.WORKCENTERNAME,
p.SPECNAME,
p.WORKFLOWNAME,
p.EQUIPMENTNAME,
p.PRIMARY_EQUIPMENTNAME,
p.PRODUCTLINENAME,

View File

@@ -30,6 +30,17 @@ WITH spec_map AS (
WHERE SPEC IS NOT NULL
GROUP BY SPEC
),
workflow_lookup AS (
SELECT /*+ MATERIALIZE */ DISTINCT w.CONTAINERID, w.WORKFLOWNAME
FROM DWH.DW_MES_WIP w
WHERE w.PRODUCTLINENAME <> '點測'
AND w.CONTAINERID IN (
SELECT DISTINCT r0.CONTAINERID
FROM DWH.DW_MES_LOTREJECTHISTORY r0
WHERE r0.TXNDATE >= TO_DATE(:start_date, 'YYYY-MM-DD')
AND r0.TXNDATE < TO_DATE(:end_date, 'YYYY-MM-DD') + 1
)
),
reject_raw AS (
SELECT
TRUNC(r.TXNDATE) AS TXN_DAY,
@@ -48,6 +59,7 @@ reject_raw AS (
TRIM(REGEXP_SUBSTR(r.EQUIPMENTNAME, '[^,]+', 1, 1)),
NVL(TRIM(r.EQUIPMENTNAME), '(NA)')
) AS PRIMARY_EQUIPMENTNAME,
NVL(TRIM(wf.WORKFLOWNAME), NVL(TRIM(r.SPECNAME), '(NA)')) AS WORKFLOWNAME,
NVL(TRIM(r.LOSSREASONNAME), '(未填寫)') AS LOSSREASONNAME,
NVL(
TRIM(REGEXP_SUBSTR(NVL(TRIM(r.LOSSREASONNAME), '(未填寫)'), '^[^_[:space:]-]+')),
@@ -78,6 +90,8 @@ reject_raw AS (
ON c.CONTAINERID = r.CONTAINERID
LEFT JOIN spec_map sm
ON sm.SPEC = TRIM(r.SPECNAME)
LEFT JOIN workflow_lookup wf
ON wf.CONTAINERID = r.CONTAINERID
WHERE {{ BASE_WHERE }}
),
daily_agg AS (
@@ -88,6 +102,7 @@ daily_agg AS (
WORKCENTERSEQUENCE_GROUP,
WORKCENTERNAME,
SPECNAME,
WORKFLOWNAME,
EQUIPMENTNAME,
PRIMARY_EQUIPMENTNAME,
PRODUCTLINENAME,
@@ -115,6 +130,7 @@ daily_agg AS (
WORKCENTERSEQUENCE_GROUP,
WORKCENTERNAME,
SPECNAME,
WORKFLOWNAME,
EQUIPMENTNAME,
PRIMARY_EQUIPMENTNAME,
PRODUCTLINENAME,
@@ -131,6 +147,7 @@ SELECT
WORKCENTERSEQUENCE_GROUP,
WORKCENTERNAME,
SPECNAME,
WORKFLOWNAME,
EQUIPMENTNAME,
PRIMARY_EQUIPMENTNAME,
PRODUCTLINENAME,

View File

@@ -19,6 +19,17 @@ WITH spec_map AS (
WHERE SPEC IS NOT NULL
GROUP BY SPEC
),
workflow_lookup AS (
SELECT /*+ MATERIALIZE */ DISTINCT w.CONTAINERID, w.WORKFLOWNAME
FROM DWH.DW_MES_WIP w
WHERE w.PRODUCTLINENAME <> '點測'
AND w.CONTAINERID IN (
SELECT DISTINCT r0.CONTAINERID
FROM DWH.DW_MES_LOTREJECTHISTORY r0
WHERE r0.TXNDATE >= TO_DATE(:start_date, 'YYYY-MM-DD')
AND r0.TXNDATE < TO_DATE(:end_date, 'YYYY-MM-DD') + 1
)
),
reject_raw AS (
SELECT
r.TXNDATE,
@@ -41,6 +52,7 @@ reject_raw AS (
TRIM(REGEXP_SUBSTR(r.EQUIPMENTNAME, '[^,]+', 1, 1)),
NVL(TRIM(r.EQUIPMENTNAME), '(NA)')
) AS PRIMARY_EQUIPMENTNAME,
NVL(TRIM(wf.WORKFLOWNAME), NVL(TRIM(r.SPECNAME), '(NA)')) AS WORKFLOWNAME,
NVL(TRIM(r.LOSSREASONNAME), '(未填寫)') AS LOSSREASONNAME,
NVL(
TRIM(REGEXP_SUBSTR(NVL(TRIM(r.LOSSREASONNAME), '(未填寫)'), '^[^_[:space:]-]+')),
@@ -72,6 +84,8 @@ reject_raw AS (
ON c.CONTAINERID = r.CONTAINERID
LEFT JOIN spec_map sm
ON sm.SPEC = TRIM(r.SPECNAME)
LEFT JOIN workflow_lookup wf
ON wf.CONTAINERID = r.CONTAINERID
WHERE {{ BASE_WHERE }}
),
daily_agg AS (
@@ -86,6 +100,7 @@ daily_agg AS (
WORKCENTERSEQUENCE_GROUP,
WORKCENTERNAME,
SPECNAME,
WORKFLOWNAME,
EQUIPMENTNAME,
PRIMARY_EQUIPMENTNAME,
PRODUCTLINENAME,
@@ -118,6 +133,7 @@ daily_agg AS (
WORKCENTERSEQUENCE_GROUP,
WORKCENTERNAME,
SPECNAME,
WORKFLOWNAME,
EQUIPMENTNAME,
PRIMARY_EQUIPMENTNAME,
PRODUCTLINENAME,
@@ -139,6 +155,7 @@ SELECT
WORKCENTERSEQUENCE_GROUP,
WORKCENTERNAME,
SPECNAME,
WORKFLOWNAME,
EQUIPMENTNAME,
PRIMARY_EQUIPMENTNAME,
PRODUCTLINENAME,

View File

@@ -8,6 +8,8 @@ from unittest.mock import patch
import pandas as pd
from mes_dashboard.services.mid_section_defect_service import (
_attribute_materials,
_attribute_wafer_roots,
build_trace_aggregation_from_events,
query_analysis,
query_analysis_detail,
@@ -251,3 +253,112 @@ def test_query_station_options_returns_ordered_list():
assert result[0]['order'] == 0
assert result[-1]['name'] == '測試'
assert result[-1]['order'] == 11
# --- _attribute_materials tests ---
def _make_detection_data(entries):
"""Helper: build detection_data dict from simplified entries."""
data = {}
for e in entries:
data[e['cid']] = {
'containername': e.get('name', e['cid']),
'trackinqty': e['trackinqty'],
'rejectqty_by_reason': e.get('reasons', {}),
}
return data
def test_attribute_materials_basic_rate_calculation():
detection_data = _make_detection_data([
{'cid': 'C1', 'trackinqty': 100, 'reasons': {'R1': 5}},
{'cid': 'C2', 'trackinqty': 200, 'reasons': {'R1': 10}},
])
ancestors = {'C1': {'A1'}, 'C2': {'A1'}}
materials_by_cid = {
'A1': [{'MATERIALPARTNAME': 'PART-A', 'MATERIALLOTNAME': 'LOT-X'}],
}
result = _attribute_materials(detection_data, ancestors, materials_by_cid)
assert len(result) == 1
assert result[0]['MATERIAL_KEY'] == 'PART-A (LOT-X)'
assert result[0]['INPUT_QTY'] == 300
assert result[0]['DEFECT_QTY'] == 15
assert abs(result[0]['DEFECT_RATE'] - 5.0) < 0.01
assert result[0]['DETECTION_LOT_COUNT'] == 2
def test_attribute_materials_null_lot_name():
detection_data = _make_detection_data([
{'cid': 'C1', 'trackinqty': 100, 'reasons': {'R1': 3}},
])
ancestors = {'C1': {'A1'}}
materials_by_cid = {
'A1': [{'MATERIALPARTNAME': 'PART-B', 'MATERIALLOTNAME': None}],
}
result = _attribute_materials(detection_data, ancestors, materials_by_cid)
assert len(result) == 1
assert result[0]['MATERIAL_KEY'] == 'PART-B'
assert result[0]['MATERIAL_LOT_NAME'] == ''
def test_attribute_materials_with_loss_reason_filter():
detection_data = _make_detection_data([
{'cid': 'C1', 'trackinqty': 100, 'reasons': {'R1': 5, 'R2': 3}},
])
ancestors = {'C1': {'A1'}}
materials_by_cid = {
'A1': [{'MATERIALPARTNAME': 'P', 'MATERIALLOTNAME': 'L'}],
}
result = _attribute_materials(detection_data, ancestors, materials_by_cid, loss_reasons=['R1'])
assert result[0]['DEFECT_QTY'] == 5
# --- _attribute_wafer_roots tests ---
def test_attribute_wafer_roots_basic():
detection_data = _make_detection_data([
{'cid': 'C1', 'name': 'LOT-1', 'trackinqty': 100, 'reasons': {'R1': 5}},
{'cid': 'C2', 'name': 'LOT-2', 'trackinqty': 200, 'reasons': {'R1': 10}},
])
roots = {'C1': 'ROOT-A', 'C2': 'ROOT-A'}
result = _attribute_wafer_roots(detection_data, roots)
assert len(result) == 1
assert result[0]['ROOT_CONTAINER_NAME'] == 'ROOT-A'
assert result[0]['INPUT_QTY'] == 300
assert result[0]['DEFECT_QTY'] == 15
def test_attribute_wafer_roots_self_root():
"""LOTs with no root mapping should use their own container name."""
detection_data = _make_detection_data([
{'cid': 'C1', 'name': 'LOT-SELF', 'trackinqty': 100, 'reasons': {'R1': 2}},
])
roots = {} # No root for C1
result = _attribute_wafer_roots(detection_data, roots)
assert len(result) == 1
assert result[0]['ROOT_CONTAINER_NAME'] == 'LOT-SELF'
def test_attribute_wafer_roots_multiple_roots():
detection_data = _make_detection_data([
{'cid': 'C1', 'name': 'L1', 'trackinqty': 100, 'reasons': {'R1': 5}},
{'cid': 'C2', 'name': 'L2', 'trackinqty': 200, 'reasons': {'R1': 20}},
])
roots = {'C1': 'ROOT-A', 'C2': 'ROOT-B'}
result = _attribute_wafer_roots(detection_data, roots)
assert len(result) == 2
# Sorted by DEFECT_RATE desc
assert result[0]['ROOT_CONTAINER_NAME'] == 'ROOT-B'
assert result[1]['ROOT_CONTAINER_NAME'] == 'ROOT-A'

View File

@@ -1152,6 +1152,62 @@ class TestWorkcenterGroupsEndpoint:
assert 'error' in data
class TestEquipmentRecentJobsEndpoint:
"""Tests for /api/query-tool/equipment-recent-jobs/<equipment_id> endpoint."""
@patch('mes_dashboard.core.database.read_sql_df')
@patch('mes_dashboard.sql.SQLLoader.load', return_value='SELECT 1')
def test_returns_recent_jobs(self, _mock_sql, mock_read_sql, client):
"""Should return recent JOB records for given equipment."""
import pandas as pd
mock_read_sql.return_value = pd.DataFrame([
{
'JOBID': 'JOB-001',
'JOBSTATUS': 'Complete',
'JOBMODELNAME': 'MODEL-A',
'CREATEDATE': '2026-02-01 10:00:00',
'COMPLETEDATE': '2026-02-01 12:00:00',
'CAUSECODENAME': 'CAUSE-1',
'REPAIRCODENAME': 'REPAIR-1',
'RESOURCENAME': 'EQ-001',
},
])
response = client.get('/api/query-tool/equipment-recent-jobs/EQ001')
assert response.status_code == 200
data = json.loads(response.data)
assert len(data['data']) == 1
assert data['data'][0]['JOBID'] == 'JOB-001'
assert data['total'] == 1
@patch('mes_dashboard.core.database.read_sql_df')
@patch('mes_dashboard.sql.SQLLoader.load', return_value='SELECT 1')
def test_returns_empty_when_no_jobs(self, _mock_sql, mock_read_sql, client):
"""Should return empty list when no jobs found."""
import pandas as pd
mock_read_sql.return_value = pd.DataFrame()
response = client.get('/api/query-tool/equipment-recent-jobs/EQ002')
assert response.status_code == 200
data = json.loads(response.data)
assert data['data'] == []
assert data['total'] == 0
@patch('mes_dashboard.core.database.read_sql_df')
@patch('mes_dashboard.sql.SQLLoader.load', return_value='SELECT 1')
def test_handles_db_error(self, _mock_sql, mock_read_sql, client):
"""Should return 500 on database error."""
mock_read_sql.side_effect = Exception('DB connection failed')
response = client.get('/api/query-tool/equipment-recent-jobs/EQ003')
assert response.status_code == 500
data = json.loads(response.data)
assert 'error' in data
class TestLotHistoryWithWorkcenterFilter:
"""Tests for /api/query-tool/lot-history with workcenter filter."""

View File

@@ -166,7 +166,7 @@ class TestRejectHistoryApiRoutes(TestRejectHistoryRoutesBase):
self.assertEqual(response.status_code, 400)
self.assertFalse(payload['success'])
@patch('mes_dashboard.routes.reject_history_routes.query_reason_pareto')
@patch('mes_dashboard.routes.reject_history_routes.query_dimension_pareto')
def test_reason_pareto_defaults_top80(self, mock_pareto):
mock_pareto.return_value = {'items': [], 'metric_mode': 'reject_total', 'pareto_scope': 'top80', 'meta': {}}
@@ -176,6 +176,47 @@ class TestRejectHistoryApiRoutes(TestRejectHistoryRoutesBase):
_, kwargs = mock_pareto.call_args
self.assertEqual(kwargs['pareto_scope'], 'top80')
self.assertEqual(kwargs['metric_mode'], 'reject_total')
self.assertEqual(kwargs['dimension'], 'reason')
@patch('mes_dashboard.routes.reject_history_routes.query_dimension_pareto')
def test_dimension_pareto_accepts_package(self, mock_pareto):
mock_pareto.return_value = {
'items': [{'reason': 'PKG-A', 'metric_value': 100, 'pct': 50, 'cumPct': 50}],
'dimension': 'package',
'metric_mode': 'reject_total',
'pareto_scope': 'all',
'meta': {},
}
response = self.client.get(
'/api/reject-history/reason-pareto?start_date=2026-02-01&end_date=2026-02-07&dimension=package&pareto_scope=all'
)
payload = json.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertTrue(payload['success'])
_, kwargs = mock_pareto.call_args
self.assertEqual(kwargs['dimension'], 'package')
@patch('mes_dashboard.routes.reject_history_routes.query_dimension_pareto')
def test_dimension_pareto_accepts_equipment(self, mock_pareto):
mock_pareto.return_value = {
'items': [{'reason': 'EQ-01', 'metric_value': 50, 'pct': 100, 'cumPct': 100}],
'dimension': 'equipment',
'metric_mode': 'reject_total',
'pareto_scope': 'top80',
'meta': {},
}
response = self.client.get(
'/api/reject-history/reason-pareto?start_date=2026-02-01&end_date=2026-02-07&dimension=equipment'
)
payload = json.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertTrue(payload['success'])
_, kwargs = mock_pareto.call_args
self.assertEqual(kwargs['dimension'], 'equipment')
@patch('mes_dashboard.routes.reject_history_routes.query_list')
@patch('mes_dashboard.core.rate_limit.check_and_record', return_value=(True, 6))

View File

@@ -9,7 +9,7 @@ import mes_dashboard.core.database as db
from mes_dashboard.app import create_app
from mes_dashboard.core.cache import NoOpCache
from mes_dashboard.core.rate_limit import reset_rate_limits_for_tests
from mes_dashboard.routes.trace_routes import _lineage_cache_key
from mes_dashboard.routes.trace_routes import _lineage_cache_key, _seed_cache_key
def _client():
@@ -380,3 +380,105 @@ def test_events_rate_limited_returns_429(_mock_rate_limit):
assert response.headers.get('Retry-After') == '5'
payload = response.get_json()
assert payload['error']['code'] == 'TOO_MANY_REQUESTS'
# ---- MSD cache isolation tests ----
def test_msd_seed_cache_key_ignores_loss_reasons():
"""Changing loss_reasons should not change the seed cache key for MSD."""
base_params = {
'start_date': '2025-01-01',
'end_date': '2025-01-31',
'station': '測試',
'direction': 'backward',
}
key_all = _seed_cache_key('mid_section_defect', {**base_params, 'loss_reasons': ['A', 'B', 'C']})
key_two = _seed_cache_key('mid_section_defect', {**base_params, 'loss_reasons': ['A']})
key_none = _seed_cache_key('mid_section_defect', base_params)
assert key_all == key_two == key_none
def test_non_msd_seed_cache_key_includes_all_params():
"""For non-MSD profiles the seed cache key should still hash all params."""
params_a = {'resolve_type': 'lot_id', 'values': ['LOT-001'], 'extra': 'x'}
params_b = {'resolve_type': 'lot_id', 'values': ['LOT-001'], 'extra': 'y'}
key_a = _seed_cache_key('query_tool', params_a)
key_b = _seed_cache_key('query_tool', params_b)
assert key_a != key_b
@patch('mes_dashboard.routes.trace_routes.build_trace_aggregation_from_events')
@patch('mes_dashboard.routes.trace_routes.EventFetcher.fetch_events')
def test_msd_events_recomputes_aggregation_on_each_call(
mock_fetch_events,
mock_build_aggregation,
):
"""MSD events should NOT use events-level cache, so aggregation is always fresh."""
mock_fetch_events.return_value = {
'CID-001': [{'CONTAINERID': 'CID-001', 'WORKCENTER_GROUP': '測試'}]
}
mock_build_aggregation.return_value = {
'kpi': {'total_input': 100},
'charts': {},
'daily_trend': [],
'available_loss_reasons': [],
'genealogy_status': 'ready',
'detail_total_count': 0,
}
client = _client()
body = {
'profile': 'mid_section_defect',
'container_ids': ['CID-001'],
'domains': ['upstream_history'],
'params': {
'start_date': '2025-01-01',
'end_date': '2025-01-31',
'loss_reasons': ['Reason-A'],
},
'lineage': {'ancestors': {'CID-001': ['CID-A']}},
'seed_container_ids': ['CID-001'],
}
# First call
resp1 = client.post('/api/trace/events', json=body)
assert resp1.status_code == 200
# Second call with different loss_reasons — aggregation must be re-invoked
body['params']['loss_reasons'] = ['Reason-B']
resp2 = client.post('/api/trace/events', json=body)
assert resp2.status_code == 200
assert mock_build_aggregation.call_count == 2
@patch('mes_dashboard.routes.trace_routes.EventFetcher.fetch_events')
@patch('mes_dashboard.routes.trace_routes.cache_get')
@patch('mes_dashboard.routes.trace_routes.cache_set')
def test_non_msd_events_cache_unchanged(mock_cache_set, mock_cache_get, mock_fetch_events):
"""Non-MSD profiles should still use events-level cache as before."""
cached_response = {
'stage': 'events',
'results': {'history': {'data': [], 'count': 0}},
'aggregation': None,
}
mock_cache_get.return_value = cached_response
client = _client()
response = client.post(
'/api/trace/events',
json={
'profile': 'query_tool',
'container_ids': ['CID-001'],
'domains': ['history'],
},
)
assert response.status_code == 200
payload = response.get_json()
assert payload['stage'] == 'events'
# EventFetcher should NOT have been called — served from cache
mock_fetch_events.assert_not_called()