feat(query-tool): align lineage model and tighten timeline mapping

This commit is contained in:
egg
2026-02-22 17:36:47 +08:00
parent 6016c31e4d
commit 9890586191
29 changed files with 2090 additions and 299 deletions

View File

@@ -20,8 +20,8 @@ const TAB_EQUIPMENT = 'equipment';
const VALID_TABS = new Set([TAB_LOT, TAB_REVERSE, TAB_EQUIPMENT]);
const tabItems = Object.freeze([
{ key: TAB_LOT, label: '批次追蹤(正向)', subtitle: '由批次展開下游血緣與明細' },
{ key: TAB_REVERSE, label: '流水批反查(反向)', subtitle: '由成品流水號回溯上游批次' },
{ key: TAB_LOT, label: '批次追蹤(正向)', subtitle: '由 Wafer LOT / GA-GC 工單展開下游血緣與明細' },
{ key: TAB_REVERSE, label: '流水批反查(反向)', subtitle: '由成品流水號 / GD 工單 / GD LOT 回溯上游批次' },
{ key: TAB_EQUIPMENT, label: '設備生產批次追蹤', subtitle: '設備紀錄與時序視圖' },
]);
@@ -48,6 +48,7 @@ function readStateFromUrl() {
lotSubTab: normalizeText(params.get('lot_sub_tab')) || 'history',
lotWorkcenterGroups: parseArrayParam(params, 'workcenter_groups'),
reverseInputType: normalizeText(params.get('reverse_input_type')) || (tab === TAB_REVERSE ? legacyInputType : '') || 'serial_number',
reverseInputText: parseArrayParam(params, 'reverse_values').join('\n') || (tab === TAB_REVERSE ? legacyInputText : ''),
reverseSelectedContainerId: normalizeText(params.get('reverse_container_id')) || (tab === TAB_REVERSE ? legacySelectedContainerId : ''),
reverseSubTab: normalizeText(params.get('reverse_sub_tab')) || (tab === TAB_REVERSE ? legacyLotSubTab : 'history'),
@@ -68,13 +69,13 @@ const activeTab = ref(initialState.tab);
const lotResolve = useLotResolve({
inputType: initialState.lotInputType,
inputText: initialState.lotInputText,
allowedTypes: ['lot_id', 'work_order'],
allowedTypes: ['wafer_lot', 'lot_id', 'work_order'],
});
const reverseResolve = useLotResolve({
inputType: 'serial_number',
inputType: initialState.reverseInputType,
inputText: initialState.reverseInputText,
allowedTypes: ['serial_number'],
allowedTypes: ['serial_number', 'gd_work_order', 'gd_lot_id'],
});
const lotLineage = useLotLineage({
@@ -151,6 +152,7 @@ function buildUrlState() {
parseInputValues(reverseResolve.inputText.value).forEach((value) => {
params.append('reverse_values', value);
});
params.set('reverse_input_type', reverseResolve.inputType.value);
if (lotDetail.selectedContainerId.value) {
params.set('lot_container_id', lotDetail.selectedContainerId.value);
@@ -202,7 +204,7 @@ function buildUrlState() {
params.set('container_id', lotDetail.selectedContainerId.value);
}
} else if (activeTab.value === TAB_REVERSE) {
params.set('input_type', 'serial_number');
params.set('input_type', reverseResolve.inputType.value);
parseInputValues(reverseResolve.inputText.value).forEach((value) => {
params.append('values', value);
});
@@ -242,7 +244,7 @@ async function applyStateFromUrl() {
lotResolve.setInputType(state.lotInputType);
lotResolve.setInputText(state.lotInputText);
reverseResolve.setInputType('serial_number');
reverseResolve.setInputType(state.reverseInputType);
reverseResolve.setInputText(state.reverseInputText);
lotDetail.activeSubTab.value = state.lotSubTab;
@@ -395,6 +397,7 @@ watch(
lotDetail.selectedWorkcenterGroups,
reverseResolve.inputText,
reverseResolve.inputType,
reverseDetail.selectedContainerId,
reverseDetail.activeSubTab,
reverseDetail.selectedWorkcenterGroups,
@@ -476,6 +479,8 @@ watch(
:not-found="lotResolve.notFound.value"
:lineage-map="lotLineage.lineageMap"
:name-map="lotLineage.nameMap"
:node-meta-map="lotLineage.nodeMetaMap"
:edge-type-map="lotLineage.edgeTypeMap"
:leaf-serials="lotLineage.leafSerials"
:lineage-loading="lotLineage.lineageLoading.value"
:selected-container-ids="lotLineage.selectedContainerIds.value"
@@ -513,6 +518,8 @@ watch(
:not-found="reverseResolve.notFound.value"
:lineage-map="reverseLineage.lineageMap"
:name-map="reverseLineage.nameMap"
:node-meta-map="reverseLineage.nodeMetaMap"
:edge-type-map="reverseLineage.edgeTypeMap"
:leaf-serials="reverseLineage.leafSerials"
:lineage-loading="reverseLineage.lineageLoading.value"
:selected-container-ids="reverseLineage.selectedContainerIds.value"

View File

@@ -12,12 +12,30 @@ import { normalizeText } from '../utils/values.js';
use([CanvasRenderer, TreeChart, TooltipComponent]);
const NODE_COLORS = {
wafer: '#2563EB',
gc: '#06B6D4',
ga: '#10B981',
gd: '#EF4444',
root: '#3B82F6',
branch: '#10B981',
leaf: '#F59E0B',
serial: '#94A3B8',
};
const EDGE_STYLES = Object.freeze({
split_from: { color: '#CBD5E1', type: 'solid', width: 1.5 },
merge_source: { color: '#F59E0B', type: 'dashed', width: 1.8 },
wafer_origin: { color: '#2563EB', type: 'dotted', width: 1.8 },
gd_rework_source: { color: '#EF4444', type: 'dashed', width: 1.8 },
default: { color: '#CBD5E1', type: 'solid', width: 1.5 },
});
const LABEL_BASE_STYLE = Object.freeze({
backgroundColor: 'rgba(255,255,255,0.92)',
borderRadius: 3,
padding: [1, 4],
});
const props = defineProps({
treeRoots: {
type: Array,
@@ -31,6 +49,14 @@ const props = defineProps({
type: Object,
default: () => new Map(),
},
nodeMetaMap: {
type: Object,
default: () => new Map(),
},
edgeTypeMap: {
type: Object,
default: () => new Map(),
},
leafSerials: {
type: Object,
default: () => new Map(),
@@ -84,6 +110,20 @@ const allSerialNames = computed(() => {
});
function detectNodeType(cid, entry, serials) {
const explicitType = normalizeText(props.nodeMetaMap?.get?.(cid)?.node_type).toUpperCase();
if (explicitType === 'WAFER') {
return 'wafer';
}
if (explicitType === 'GC') {
return 'gc';
}
if (explicitType === 'GA') {
return 'ga';
}
if (explicitType === 'GD') {
return 'gd';
}
if (rootsSet.value.has(cid)) {
return 'root';
}
@@ -97,7 +137,20 @@ function detectNodeType(cid, entry, serials) {
return 'branch';
}
function buildNode(cid, visited) {
function lookupEdgeType(parentCid, childCid) {
const parent = normalizeText(parentCid);
const child = normalizeText(childCid);
if (!parent || !child) {
return '';
}
const direct = normalizeText(props.edgeTypeMap?.get?.(`${parent}->${child}`));
if (direct) {
return direct;
}
return normalizeText(props.edgeTypeMap?.get?.(`${child}->${parent}`));
}
function buildNode(cid, visited, parentCid = '') {
const id = normalizeText(cid);
if (!id || visited.has(id)) {
return null;
@@ -112,7 +165,7 @@ function buildNode(cid, visited) {
const isSelected = selectedSet.value.has(id);
const children = childIds
.map((childId) => buildNode(childId, visited))
.map((childId) => buildNode(childId, visited, id))
.filter(Boolean);
if (children.length === 0 && serials.length > 0) {
@@ -141,10 +194,12 @@ function buildNode(cid, visited) {
&& allSerialNames.value.has(name);
const effectiveType = isSerialLike ? 'serial' : nodeType;
const color = NODE_COLORS[effectiveType] || NODE_COLORS.branch;
const incomingEdgeType = lookupEdgeType(parentCid, id);
const incomingEdgeStyle = EDGE_STYLES[incomingEdgeType] || EDGE_STYLES.default;
return {
name,
value: { cid: id, type: effectiveType },
value: { cid: id, type: effectiveType, edgeType: incomingEdgeType || '' },
children,
itemStyle: {
color,
@@ -152,12 +207,16 @@ function buildNode(cid, visited) {
borderWidth: isSelected ? 3 : 1,
},
label: {
...LABEL_BASE_STYLE,
position: children.length > 0 ? 'top' : 'right',
distance: children.length > 0 ? 8 : 6,
fontWeight: isSelected ? 'bold' : 'normal',
fontSize: isSerialLike ? 10 : 11,
color: isSelected ? '#1E3A8A' : (isSerialLike ? '#64748B' : '#334155'),
},
symbol: isSerialLike ? 'diamond' : (nodeType === 'root' ? 'roundRect' : 'circle'),
symbolSize: isSerialLike ? 6 : (nodeType === 'root' ? 14 : 10),
lineStyle: incomingEdgeStyle,
};
}
@@ -200,11 +259,13 @@ const TREE_SERIES_DEFAULTS = Object.freeze({
label: {
show: true,
position: 'right',
distance: 6,
fontSize: 11,
color: '#334155',
overflow: 'truncate',
ellipsis: '…',
width: 160,
...LABEL_BASE_STYLE,
},
lineStyle: {
width: 1.5,
@@ -238,6 +299,14 @@ const chartOption = computed(() => {
const lines = [`<b>${data.name}</b>`];
if (val.type === 'serial') {
lines.push('<span style="color:#64748B">成品序列號</span>');
} else if (val.type === 'wafer') {
lines.push('<span style="color:#2563EB">Wafer LOT</span>');
} else if (val.type === 'gc') {
lines.push('<span style="color:#06B6D4">GC LOT</span>');
} else if (val.type === 'ga') {
lines.push('<span style="color:#10B981">GA LOT</span>');
} else if (val.type === 'gd') {
lines.push('<span style="color:#EF4444">GD LOT重工</span>');
} else if (val.type === 'root') {
lines.push('<span style="color:#3B82F6">根節點(晶批)</span>');
} else if (val.type === 'leaf') {
@@ -245,6 +314,9 @@ const chartOption = computed(() => {
} else if (val.type === 'branch') {
lines.push('<span style="color:#10B981">中間節點</span>');
}
if (val.edgeType) {
lines.push(`<span style="color:#94A3B8;font-size:11px">關係: ${val.edgeType}</span>`);
}
if (val.cid && val.cid !== data.name) {
lines.push(`<span style="color:#94A3B8;font-size:11px">CID: ${val.cid}</span>`);
}
@@ -325,21 +397,45 @@ function handleNodeClick(params) {
<div class="flex items-center gap-3">
<div class="flex items-center gap-2 text-[10px] text-slate-500">
<span class="inline-flex items-center gap-1">
<span class="inline-block size-2.5 rounded-sm" :style="{ background: NODE_COLORS.root }" />
晶批
<span class="inline-block size-2.5 rounded-sm" :style="{ background: NODE_COLORS.wafer }" />
Wafer
</span>
<span class="inline-flex items-center gap-1">
<span class="inline-block size-2.5 rounded-full" :style="{ background: NODE_COLORS.branch }" />
中間
<span class="inline-block size-2.5 rounded-full" :style="{ background: NODE_COLORS.gc }" />
GC
</span>
<span class="inline-flex items-center gap-1">
<span class="inline-block size-2.5 rounded-full" :style="{ background: NODE_COLORS.ga }" />
GA
</span>
<span class="inline-flex items-center gap-1">
<span class="inline-block size-2.5 rounded-full" :style="{ background: NODE_COLORS.gd }" />
GD
</span>
<span class="inline-flex items-center gap-1">
<span class="inline-block size-2.5 rounded-full" :style="{ background: NODE_COLORS.leaf }" />
末端
其他 LOT
</span>
<span v-if="showSerialLegend" class="inline-flex items-center gap-1">
<span class="inline-block size-2.5 rotate-45" :style="{ background: NODE_COLORS.serial, width: '8px', height: '8px' }" />
序列號
</span>
<span class="inline-flex items-center gap-1">
<span class="inline-block h-0.5 w-3 bg-slate-300" />
split
</span>
<span class="inline-flex items-center gap-1">
<span class="inline-block h-0.5 w-3 border-t-2 border-dashed border-amber-500" />
merge
</span>
<span class="inline-flex items-center gap-1">
<span class="inline-block h-0.5 w-3 border-t-2 border-dotted border-blue-600" />
wafer
</span>
<span class="inline-flex items-center gap-1">
<span class="inline-block h-0.5 w-3 border-t-2 border-dashed border-red-500" />
gd-rework
</span>
</div>
</div>
</div>

View File

@@ -24,6 +24,10 @@ function safeDate(value) {
return parsed ? parsed : null;
}
function normalizedKey(value) {
return normalizeText(value).toUpperCase();
}
// ── Tracks: group by (WORKCENTER_GROUP × LOT ID × Equipment) ──
const tracks = computed(() => {
const grouped = new Map();
@@ -44,7 +48,13 @@ const tracks = computed(() => {
}
if (!grouped.has(trackKey)) {
grouped.set(trackKey, { groupName, lotId, equipment, bars: [] });
grouped.set(trackKey, {
groupName,
lotId,
equipment,
containerId: normalizeText(row?.CONTAINERID),
bars: [],
});
}
grouped.get(trackKey).bars.push({
@@ -56,9 +66,10 @@ const tracks = computed(() => {
});
});
return [...grouped.entries()].map(([trackKey, { groupName, lotId, equipment, bars }]) => ({
return [...grouped.entries()].map(([trackKey, { groupName, lotId, equipment, containerId, bars }]) => ({
id: trackKey,
group: groupName,
containerId,
label: groupName,
sublabels: [
lotId ? `LOT ID: ${lotId}` : '',
@@ -74,20 +85,128 @@ const tracks = computed(() => {
}));
});
// ── Events: resolve trackId to compound key via group matching ──
// ── Events: resolve event-to-track mapping ──
const groupToFirstTrackId = computed(() => {
const map = new Map();
tracks.value.forEach((track) => {
if (!map.has(track.group)) {
map.set(track.group, track.id);
const key = normalizedKey(track.group);
if (key && !map.has(key)) {
map.set(key, track.id);
}
});
return map;
});
function resolveEventTrackId(row) {
const group = normalizeText(row?.WORKCENTER_GROUP) || normalizeText(row?.WORKCENTERNAME) || '';
return groupToFirstTrackId.value.get(group) || group;
const containerToTrackIds = computed(() => {
const map = new Map();
tracks.value.forEach((track) => {
const cid = normalizedKey(track.containerId);
if (!cid) {
return;
}
if (!map.has(cid)) {
map.set(cid, []);
}
map.get(cid).push(track.id);
});
return map;
});
const containerSpecWindows = computed(() => {
const map = new Map();
tracks.value.forEach((track) => {
const containerKey = normalizedKey(track.containerId);
if (!containerKey) {
return;
}
(track.layers || []).forEach((layer) => {
(layer.bars || []).forEach((bar) => {
const specKey = normalizedKey(bar?.label || bar?.type);
const startMs = bar?.start instanceof Date ? bar.start.getTime() : null;
const endMs = bar?.end instanceof Date ? bar.end.getTime() : null;
if (!specKey || !Number.isFinite(startMs) || !Number.isFinite(endMs)) {
return;
}
const key = `${containerKey}||${specKey}`;
if (!map.has(key)) {
map.set(key, []);
}
map.get(key).push({
trackId: track.id,
startMs,
endMs: endMs > startMs ? endMs : startMs,
});
});
});
});
return map;
});
function pickClosestTrack(windows, timeMs) {
if (!Array.isArray(windows) || windows.length === 0) {
return '';
}
if (!Number.isFinite(timeMs)) {
return windows[0]?.trackId || '';
}
let best = '';
let bestDistance = Number.POSITIVE_INFINITY;
windows.forEach((window) => {
if (!window?.trackId) {
return;
}
if (timeMs >= window.startMs && timeMs <= window.endMs) {
if (0 < bestDistance) {
best = window.trackId;
bestDistance = 0;
}
return;
}
const distance = timeMs < window.startMs
? (window.startMs - timeMs)
: (timeMs - window.endMs);
if (distance < bestDistance) {
best = window.trackId;
bestDistance = distance;
}
});
return best;
}
function resolveHoldTrackId(row) {
const groupKey = normalizedKey(row?.WORKCENTER_GROUP) || normalizedKey(row?.WORKCENTERNAME);
if (groupKey) {
const trackId = groupToFirstTrackId.value.get(groupKey);
if (trackId) {
return trackId;
}
}
const containerKey = normalizedKey(row?.CONTAINERID);
if (containerKey) {
const byContainer = containerToTrackIds.value.get(containerKey) || [];
if (byContainer.length > 0) {
return byContainer[0];
}
}
return '';
}
function resolveMaterialTrackId(row, time) {
const specKey = normalizedKey(row?.SPECNAME);
const containerKey = normalizedKey(row?.CONTAINERID);
if (!specKey || !containerKey) {
return '';
}
const windows = containerSpecWindows.value.get(`${containerKey}||${specKey}`) || [];
const timeMs = time instanceof Date ? time.getTime() : null;
return pickClosestTrack(windows, timeMs);
}
const events = computed(() => {
@@ -98,10 +217,14 @@ const events = computed(() => {
if (!time) {
return;
}
const trackId = resolveHoldTrackId(row);
if (!trackId) {
return;
}
markers.push({
id: `hold-${index}`,
trackId: resolveEventTrackId(row),
trackId,
time,
type: 'HOLD',
shape: 'diamond',
@@ -115,10 +238,14 @@ const events = computed(() => {
if (!time) {
return;
}
const trackId = resolveMaterialTrackId(row, time);
if (!trackId) {
return;
}
markers.push({
id: `material-${index}`,
trackId: resolveEventTrackId(row),
trackId,
time,
type: 'MATERIAL',
shape: 'triangle',
@@ -130,6 +257,28 @@ const events = computed(() => {
return markers;
});
const materialMappingStats = computed(() => {
let total = 0;
let mapped = 0;
props.materialRows.forEach((row) => {
const time = safeDate(row?.TXNDATE);
if (!time) {
return;
}
total += 1;
if (resolveMaterialTrackId(row, time)) {
mapped += 1;
}
});
return {
total,
mapped,
unmapped: Math.max(0, total - mapped),
};
});
const colorMap = computed(() => {
const colors = {
HOLD: '#f59e0b',
@@ -182,6 +331,12 @@ const timeRange = computed(() => {
<div class="flex items-center gap-3 text-xs text-slate-500">
<span v-if="timeRange">{{ formatDateTime(timeRange.start) }} {{ formatDateTime(timeRange.end) }}</span>
<span>Hold / Material 事件已覆蓋標記</span>
<span v-if="materialMappingStats.total > 0">
扣料對應 {{ materialMappingStats.mapped }} / {{ materialMappingStats.total }}
<template v-if="materialMappingStats.unmapped > 0">
未對應 {{ materialMappingStats.unmapped }}
</template>
</span>
</div>
</div>

View File

@@ -48,6 +48,14 @@ const props = defineProps({
type: Object,
default: () => new Map(),
},
nodeMetaMap: {
type: Object,
default: () => new Map(),
},
edgeTypeMap: {
type: Object,
default: () => new Map(),
},
leafSerials: {
type: Object,
default: () => new Map(),
@@ -144,6 +152,8 @@ const emit = defineEmits([
:not-found="notFound"
:lineage-map="lineageMap"
:name-map="nameMap"
:node-meta-map="nodeMetaMap"
:edge-type-map="edgeTypeMap"
:leaf-serials="leafSerials"
:selected-container-ids="selectedContainerIds"
:loading="lineageLoading"

View File

@@ -40,6 +40,11 @@ const inputCount = computed(() => {
.length;
});
const inputTypeLabel = computed(() => {
const selected = (props.inputTypeOptions || []).find((option) => option?.value === props.inputType);
return selected?.label || '查詢條件';
});
function handleResolve() {
emit('resolve');
}
@@ -82,10 +87,14 @@ function handleResolve() {
<textarea
:value="inputText"
class="min-h-28 w-full rounded-card border border-stroke-soft bg-surface-muted/40 px-3 py-2 text-sm text-slate-700 outline-none transition focus:border-brand-500"
:placeholder="`輸入多筆(換行或逗號分隔),最多 ${inputLimit} 筆`"
:placeholder="`輸入 ${inputTypeLabel}(換行或逗號分隔),最多 ${inputLimit} 筆`"
:disabled="resolving"
@input="emit('update:inputText', $event.target.value)"
/>
<p class="mt-2 text-xs text-slate-500">
支援萬用字元<code>%</code>任意長度<code>_</code>單一字元也可用 <code>*</code> 代表 <code>%</code>
例如<code>GA25%01</code><code>GA25%</code><code>GMSN-1173%</code>
</p>
<div class="mt-2 flex items-center justify-between text-xs">
<p class="text-slate-500">已輸入 {{ inputCount }} / {{ inputLimit }}</p>
<p v-if="errorMessage" class="text-state-danger">{{ errorMessage }}</p>

View File

@@ -48,6 +48,14 @@ const props = defineProps({
type: Object,
default: () => new Map(),
},
nodeMetaMap: {
type: Object,
default: () => new Map(),
},
edgeTypeMap: {
type: Object,
default: () => new Map(),
},
leafSerials: {
type: Object,
default: () => new Map(),
@@ -144,6 +152,8 @@ const emit = defineEmits([
:not-found="notFound"
:lineage-map="lineageMap"
:name-map="nameMap"
:node-meta-map="nodeMetaMap"
:edge-type-map="edgeTypeMap"
:leaf-serials="leafSerials"
:selected-container-ids="selectedContainerIds"
:loading="lineageLoading"

View File

@@ -62,11 +62,22 @@ function sleep(ms) {
return new Promise((resolve) => window.setTimeout(resolve, ms));
}
function edgeKey(fromCid, toCid) {
const from = normalizeText(fromCid);
const to = normalizeText(toCid);
if (!from || !to) {
return '';
}
return `${from}->${to}`;
}
export function useLotLineage(initial = {}) {
ensureMesApiAvailable();
const lineageMap = reactive(new Map());
const nameMap = reactive(new Map());
const nodeMetaMap = reactive(new Map());
const edgeTypeMap = reactive(new Map());
const leafSerials = reactive(new Map());
const expandedNodes = ref(new Set());
const selectedContainerId = ref(normalizeText(initial.selectedContainerId));
@@ -217,6 +228,8 @@ export function useLotLineage(initial = {}) {
const rootsList = payload?.roots || [];
const serialsData = payload?.leaf_serials || {};
const names = payload?.names;
const typedNodes = payload?.nodes;
const typedEdges = payload?.edges;
// Merge name mapping
if (names && typeof names === 'object') {
@@ -227,6 +240,34 @@ export function useLotLineage(initial = {}) {
});
}
if (typedNodes && typeof typedNodes === 'object') {
Object.entries(typedNodes).forEach(([cid, node]) => {
const normalizedCid = normalizeText(cid);
if (!normalizedCid || !node || typeof node !== 'object') {
return;
}
nodeMetaMap.set(normalizedCid, node);
const displayName = normalizeText(node.container_name);
if (displayName) {
nameMap.set(normalizedCid, displayName);
}
});
}
edgeTypeMap.clear();
if (Array.isArray(typedEdges)) {
typedEdges.forEach((edge) => {
if (!edge || typeof edge !== 'object') {
return;
}
const key = edgeKey(edge.from_cid, edge.to_cid);
const type = normalizeText(edge.edge_type);
if (key && type) {
edgeTypeMap.set(key, type);
}
});
}
// Store leaf serial numbers
Object.entries(serialsData).forEach(([cid, serials]) => {
const id = normalizeText(cid);
@@ -420,6 +461,8 @@ export function useLotLineage(initial = {}) {
inFlight.clear();
lineageMap.clear();
nameMap.clear();
nodeMetaMap.clear();
edgeTypeMap.clear();
leafSerials.clear();
expandedNodes.value = new Set();
selectedContainerIds.value = [];
@@ -463,6 +506,8 @@ export function useLotLineage(initial = {}) {
return {
lineageMap,
nameMap,
nodeMetaMap,
edgeTypeMap,
leafSerials,
expandedNodes,
selectedContainerId,

View File

@@ -4,15 +4,21 @@ import { apiPost, ensureMesApiAvailable } from '../../core/api.js';
import { parseInputValues } from '../utils/values.js';
const INPUT_TYPE_OPTIONS = Object.freeze([
{ value: 'wafer_lot', label: 'Wafer LOT' },
{ value: 'lot_id', label: 'LOT ID' },
{ value: 'serial_number', label: '流水號' },
{ value: 'work_order', label: '工單' },
{ value: 'gd_work_order', label: 'GD 工單' },
{ value: 'gd_lot_id', label: 'GD LOT ID' },
]);
const INPUT_LIMITS = Object.freeze({
wafer_lot: 50,
lot_id: 50,
serial_number: 50,
work_order: 10,
gd_work_order: 10,
gd_lot_id: 50,
});
function normalizeInputType(value) {
@@ -29,7 +35,7 @@ function normalizeAllowedTypes(input) {
: [];
const filtered = values.filter((value) => Boolean(INPUT_LIMITS[value]));
if (filtered.length === 0) {
return ['lot_id', 'serial_number', 'work_order'];
return ['wafer_lot', 'lot_id', 'serial_number', 'work_order', 'gd_work_order', 'gd_lot_id'];
}
return filtered;
}

View File

@@ -62,11 +62,22 @@ function sleep(ms) {
return new Promise((resolve) => window.setTimeout(resolve, ms));
}
function edgeKey(fromCid, toCid) {
const from = normalizeText(fromCid);
const to = normalizeText(toCid);
if (!from || !to) {
return '';
}
return `${from}->${to}`;
}
export function useReverseLineage(initial = {}) {
ensureMesApiAvailable();
const lineageMap = reactive(new Map());
const nameMap = reactive(new Map());
const nodeMetaMap = reactive(new Map());
const edgeTypeMap = reactive(new Map());
const leafSerials = reactive(new Map());
const selectedContainerId = ref(normalizeText(initial.selectedContainerId));
const selectedContainerIds = ref(
@@ -219,6 +230,8 @@ export function useReverseLineage(initial = {}) {
function populateReverseTree(payload, requestedRoots = []) {
const parentMap = normalizeParentMap(payload);
const names = payload?.names;
const typedNodes = payload?.nodes;
const typedEdges = payload?.edges;
if (names && typeof names === 'object') {
Object.entries(names).forEach(([cid, name]) => {
@@ -228,6 +241,34 @@ export function useReverseLineage(initial = {}) {
});
}
if (typedNodes && typeof typedNodes === 'object') {
Object.entries(typedNodes).forEach(([cid, node]) => {
const normalizedCid = normalizeText(cid);
if (!normalizedCid || !node || typeof node !== 'object') {
return;
}
nodeMetaMap.set(normalizedCid, node);
const displayName = normalizeText(node.container_name);
if (displayName) {
nameMap.set(normalizedCid, displayName);
}
});
}
edgeTypeMap.clear();
if (Array.isArray(typedEdges)) {
typedEdges.forEach((edge) => {
if (!edge || typeof edge !== 'object') {
return;
}
const key = edgeKey(edge.from_cid, edge.to_cid);
const type = normalizeText(edge.edge_type);
if (key && type) {
edgeTypeMap.set(key, type);
}
});
}
Object.entries(parentMap).forEach(([childId, parentIds]) => {
patchEntry(childId, {
children: uniqueValues(parentIds || []),
@@ -349,6 +390,8 @@ export function useReverseLineage(initial = {}) {
semaphore.clear();
lineageMap.clear();
nameMap.clear();
nodeMetaMap.clear();
edgeTypeMap.clear();
leafSerials.clear();
rootRows.value = [];
rootContainerIds.value = [];
@@ -371,6 +414,8 @@ export function useReverseLineage(initial = {}) {
return {
lineageMap,
nameMap,
nodeMetaMap,
edgeTypeMap,
leafSerials,
selectedContainerId,
selectedContainerIds,

View File

@@ -0,0 +1,2 @@
schema: spec-driven
created: 2026-02-22

View File

@@ -0,0 +1,152 @@
## Context
「批次追蹤工具」目前已拆成三個頁籤(正向/反向/設備),但 lineage 核心仍以 `SPLITFROMID``DW_MES_PJ_COMBINEDASSYLOTS` 為主,資料語意不足以完整表達現場流程:
- GC 並非 GA 的必經節點,且非 1:1部分批次只有 GC 抽點,部分完全不經 GC。
- Wafer LOT`DW_MES_CONTAINER.FIRSTNAME`)是 GA/GC 共同上游錨點,應獨立建模。
- GD 重工追溯主鏈在 `DW_MES_CONTAINER``ORIGINALCONTAINERID` + `FIRSTNAME` + `SPLITFROMID`,僅靠 COMBINED 表無法表達完整重工來源。
已驗證資料特徵(實查):
- GD lot 可由 `DW_MES_PJ_COMBINEDASSYLOTS.FINISHEDNAME` 反解至 `GDxxxx-Axx`
- 該 GD lot 在 `DW_MES_CONTAINER` 中可取得 `MFGORDERNAME=GD...``ORIGINALCONTAINERID``FIRSTNAME`
- `ORIGINALCONTAINERID` 對應來源 lot 可回接 Wafer LOT`FIRSTNAME`)。
約束條件:
- 需沿用現有 `/api/query-tool/*``/api/trace/*` 路由,不做破壞式移除。
- 需保留 staged trace 的快取與 rate limit 行為。
- 需維持查詢效能,避免以 Wafer LOT 為起點時產生不可控 fan-out。
## Goals / Non-Goals
**Goals:**
- 以「語意化節點/邊」重建 query-tool 的追溯模型,明確區分 split、merge、wafer-origin、gd-rework。
- 明確支持兩種入口集合:
- 正向Wafer LOT / GA-GC 工單 / GA-GC LOT
- 反向:成品流水號 / GD 工單 / GD LOT ID
- 前端樹圖可視化要能辨識「GA 無 GC」與「GD 重工分支」。
- 將 GD 追溯落在 lot/workorder 層級保證可追,並保留 serial 層級可得資訊。
**Non-Goals:**
- 不承諾舊成品流水號與新成品流水號 1:1 映射。
- 不調整設備頁籤功能。
- 不在本變更導入新資料來源(僅使用既有 DWH 表)。
## Decisions
### D1. 建立 Typed Lineage Graph節點/邊雙語意)
後端 lineage 輸出新增語意欄位,與現有欄位並存(過渡期兼容):
- `nodes`: 依 `container_id` 聚合節點屬性(`node_type`, `container_name`, `mfgorder_name`, `wafer_lot`
- `edges`: 邊列表(`from_cid`, `to_cid`, `edge_type`
- `edge_type` 固定枚舉:
- `split_from`
- `merge_source`
- `wafer_origin`
- `gd_rework_source`
`node_type` 判定優先順序:
1. `MFGORDERNAME LIKE 'GD%'``CONTAINERNAME LIKE 'GD%'``GD`
2. `MFGORDERNAME LIKE 'GC%'``CONTAINERNAME LIKE 'GC%'``GC`
3. `MFGORDERNAME LIKE 'GA%'``CONTAINERNAME LIKE 'GA%'``GA`
4. `OBJECTTYPE='LOT'` 且為 Wafer 錨點節點 → `WAFER`
5. COMBINED `FINISHEDNAME` 的虛擬節點 → `SERIAL`
保留現有 `children_map` / `parent_map` 等欄位,前端逐步切換到 typed graph。
### D2. 以 Profile 區分 seed-resolve 輸入語意
`/api/trace/seed-resolve` 改為 profile-aware 的 resolve type 規則:
- `query_tool`(正向)允許:`wafer_lot`, `lot_id`, `work_order`
- `query_tool_reverse`(反向)允許:`serial_number`, `gd_work_order`, `gd_lot_id`
其中:
- `wafer_lot`: 以 `DW_MES_CONTAINER.FIRSTNAME` 解析種子 lot 集合
- `gd_work_order`: 僅允許 `GD%` 前綴,對 `DW_MES_CONTAINER.MFGORDERNAME` 解析
- `gd_lot_id`: 以 `DW_MES_CONTAINER.CONTAINERNAME` 解析,且需同時符合 GD 規則(`CONTAINERNAME LIKE 'GD%'``MFGORDERNAME LIKE 'GD%'`
- `work_order`(正向)限定 GA/GC非 GD
此設計避免正反向模式語意混用,且可在 API 層即早回饋錯誤。
### D3. GD 反向追溯採「Container 主鏈 + Combined 輔鏈」
GD 反向演算法(三種起點共用):
1. 種子為 serial 時,先由 `DW_MES_PJ_COMBINEDASSYLOTS.FINISHEDNAME` 找到 lot常為 `GDxxxx-Axx`);種子為 `gd_lot_id` 時直接命中該 lot種子為 `gd_work_order` 時直接展開該工單 lot 群。
2. 對 serial 或 `gd_lot_id` 起點,讀取 lot 的 `MFGORDERNAME` 以展開同 GD 工單 lot 群。
3. 對每個 GD lot 取來源:
- 主來源:`ORIGINALCONTAINERID`
- 回退來源:`SPLITFROMID`(當 ORIGINAL 為空或無效)
4. 來源 lot 再透過 `FIRSTNAME` 接回 Wafer LOT 錨點。
5. COMBINED 僅負責「lot -> 成品流水號」映射,不作為 GD 來源主依據。
這可涵蓋「成品流水號 -> GD -> 來源 lot -> wafer」與「GD 工單 -> lot 群 -> 來源 lot」兩條路徑。
### D4. 前端改為語意化樹圖且保持明細過濾邊界
`LineageTreeChart` 調整為語意視覺:
- 節點顏色/形狀區分 `WAFER/GC/GA/GD/SERIAL`
- 邊樣式區分 `split/merge/wafer-origin/gd-rework`
- 無 GC 時強制顯示 `WAFER -> GA` 直接鏈路,不用「缺失」呈現
互動邊界:
- 點擊節點僅更新 detail panel 的 container scope
- 不重新過濾/改寫樹本身(避免「點樹即變樹」)
### D5. 效能策略:分段查詢 + 批次 + 快取
- lineage 查詢維持分段與批次IN clause batching策略。
- Wafer LOT 展開加入結果上限與分頁/裁切策略(避免單一查詢過大)。
- GD 關係查詢以 Redis/L2 做短期快取(可由 env 配置 TTL
- 監控新增 typed-edge 命中統計,觀察 `wafer_origin``gd_rework_source` 的覆蓋率。
### D6. 向後相容與漸進切換
- API contract 採「新增欄位」方式,不先移除舊欄位。
- 前端先讀新欄位,保留舊欄位 fallback 一個版本週期。
- 若生產異常可切回舊渲染路徑feature flag 或 runtime config
## Risks / Trade-offs
- [Risk] Wafer LOT fan-out 過大導致查詢壓力
Mitigation: 設定種子展開上限、分段查詢、UI 提示「僅顯示前 N 筆」。
- [Risk] `FIRSTNAME` 同名造成跨流程誤連
Mitigation: 邊生成時加上 `OBJECTTYPE='LOT'` 與工單/時間窗交叉約束;疑似多義連線以低信任度標記。
- [Risk] GD 舊/新 serial 無法 1:1 對映引發期待落差
Mitigation: 在規格與 UI 說明明確宣告 serial 層級的限制,保證 lot/workorder 層級完整可追。
- [Risk] 新舊欄位並存造成前後端邏輯複雜
Mitigation: 設定移除時程,待新前端穩定後再移除舊欄位讀取。
## Migration Plan
1. 後端先落地 typed lineage不改前端確認 API 回傳兼容。
2. 前端切換至 typed graph 視覺與新 resolve 類型。
3. 啟用 GD reverse 路徑與 GC-optional 顯示規則。
4. 以實例資料驗證三種主流程:
- WAFER -> GA無 GC
- WAFER -> GC -> GA
- SERIAL -> GD -> SOURCE LOT -> WAFER
5. 穩定後移除舊渲染相依欄位(若決議移除)。
Rollback
- 關閉 typed graph 功能開關,前端退回舊欄位渲染。
- 保留新 SQL/欄位但不被前端使用,避免熱修回滾需 DB 變更。
## Open Questions
- Wafer LOT 輸入值格式是否需要強制前綴或正則,以降低同名誤連?
- 正向 `work_order` 是否嚴格限制 GA/GC或允許 GD 但提示「請用反向頁籤」?
- `WAFER -> GA` 直接鏈路在視覺上要以虛線還是實線呈現(避免與 split 混淆)?

View File

@@ -0,0 +1,77 @@
## Why
目前「批次追蹤工具」雖已拆成正向/反向/設備三個頁籤,但追溯模型仍以 `SPLITFROMID + COMBINEDASSYLOTS` 為主,與實際 GA/GC/GD/WAFER LOT 關係不完全一致。已完成的資料探索也顯示GC→GA 常透過共同 `FIRSTNAME`Wafer LOT而非 split 直接可見GD 重工鏈也主要落在 `DW_MES_CONTAINER``ORIGINALCONTAINERID` / `FIRSTNAME` / `SPLITFROMID`),若不補齊模型,前端樹圖會持續出現「可顯示但語意不正確」的問題。
## What Changes
- 釐清並統一「批次追蹤」資料語意,將追溯關係分成可辨識的邊類型,而不只是一般 parent/child
- `split_from`(拆批)
- `merge_source`(併批)
- `wafer_origin``FIRSTNAME` 對應 Wafer LOT
- `gd_rework_source`GD 重工來源,依 `ORIGINALCONTAINERID`/`FIRSTNAME`
- 明確納入 GC 非必經站規則:
- GC 與 GA 非 1:1也不是必經關係可能僅抽點也可能完全不經 GC
- 追溯主錨點改為 Wafer LOTGC 視為「可選節點」,不存在時不視為斷鏈
- 前端需顯示 `WAFER -> GA` 直接鏈路(無 GC 時),讓使用者可視覺辨識「跳過 GC」情境
- 調整查詢入口,對齊你定義的使用情境:
- 正向頁籤支援Wafer LOT、GA/GC 工單、GA/GC LOT 作為起點
- 反向頁籤支援成品流水號、GD 工單、GD LOT ID 作為起點
- 讓正反向追溯輸出採同一份「語意化關係圖」資料結構,只在起點與展開方向不同,避免結果解讀不一致。
- 補齊 GA 無 GC 時的可視化語意:若無 GC 節點,仍須明確顯示 Wafer LOT 補充鏈路,不可隱性省略。
- 前端樹圖改為「節點類型 + 關係類型」雙重視覺表達(非僅 root/branch/leaf
- 節點至少區分WAFER、GC、GA、GD、SERIAL
- 關係邊樣式區分split、merge、wafer-origin、gd-rework
- 保留點選節點只過濾下方明細,不回頭過濾樹本身。
- 增加查詢效能與風險控制策略:
- 先做 seed resolve再按需分段展開關係避免一次全量 fan-out
- 對 GD 關係查詢加入快取策略(可配置 TTL預設使用既有 Redis 快取層)
- 補上追溯鏈路命中統計與慢查監控欄位,便於驗證模型是否正確覆蓋。
### GD 追溯策略(補充)
- 反向起點為「成品流水號」時:
1. 先用 `DW_MES_PJ_COMBINEDASSYLOTS.FINISHEDNAME` 解析到 GD lot例如 `GDxxxx-A01`
2. 取得 GD lot 對應 `MFGORDERNAME=GD...`
3.`DW_MES_CONTAINER` 展開同 GD 工單全部 lot
4. 每一個 GD lot 以 `ORIGINALCONTAINERID`(主)與 `FIRSTNAME`(輔)回溯來源 lot
5. 來源 lot 再透過 `FIRSTNAME` 連到 Wafer LOT 錨點
- 反向起點為「GD 工單」時:
- 直接從 `DW_MES_CONTAINER` 取 GD lot 群,後續同上回溯來源 lot 與 Wafer LOT
- 反向起點為「GD LOT ID」時
-`DW_MES_CONTAINER.CONTAINERNAME` 精準命中 GD lot需符合 GD 規則),再沿用同一條回溯鏈
- 適用「已知單顆/單批 GD lot未知整張 GD 工單」的快速反查情境
- 正向時,若查到來源 lot 存在 GD 再製分支,需額外顯示 `gd_rework_source` 邊,形成「原 lot -> GD lot -> 新成品」分支。
- 限制聲明:
- 目前資料可穩定追出「來源 lot 與 GD lot 關係」;
- 舊成品流水號與新成品流水號不保證存在 1:1 可直接映射,提案先保證 lot/workorder 層級完整可追。
### 現況/需求/整合比較
| 面向 | 目前實作 | 新需求 | 本提案整合方向 |
|---|---|---|---|
| 正向入口 | `lot_id` / `work_order` | Wafer LOT + GA/GC 工單 + GA/GC LOT | 擴充 resolve type 與正向查詢入口 |
| 反向入口 | 僅成品流水號 | 成品流水號 + GD 工單 + GD LOT ID | 反向 QueryBar 增加 GD 工單/GD LOT 模式 |
| GD 關聯 | 主要倚賴 COMBINED 映射 | 需追出重工來源與重測後新結果 | 改以 `DW_MES_CONTAINER` 欄位為 GD 主鏈COMBINED 僅作輔助 |
| GC 缺失情境 | 樹上不易看出補線來源 | GA 無 GC 時仍要看見 WAFER LOT | 新增 `wafer_origin` 邊與視覺標示 |
| 前端語意 | 泛化 root/branch/leaf | 要看得出流程語意 | 改成節點/邊語意化圖例與樣式 |
## Capabilities
### New Capabilities
- _(none)_
### Modified Capabilities
- `query-tool-lot-trace`: 查詢入口、正反向頁籤語意、樹圖互動與可視化規則更新。
- `lineage-engine-core`: 從單一 split/merge 模型擴充為可輸出 wafer/GD 關係的語意化關係圖。
- `trace-staged-api`: seed resolve 與 lineage response contract 擴充(新 resolve type、typed edges、節點分類欄位
- `progressive-trace-ux`: 正反向追溯在同一 UX 規則下顯示,並保持分段載入與快取策略一致。
## Impact
- **前端**`frontend/src/query-tool/App.vue``frontend/src/query-tool/components/QueryBar.vue``frontend/src/query-tool/components/LineageTreeChart.vue`、相關 composables`useLotResolve.js``useLotLineage.js``useReverseLineage.js`
- **後端 API**`src/mes_dashboard/routes/query_tool_routes.py``src/mes_dashboard/routes/trace_routes.py`
- **服務層**`src/mes_dashboard/services/query_tool_service.py``src/mes_dashboard/services/lineage_engine.py`
- **SQL/資料來源**`src/mes_dashboard/sql/lineage/*.sql``src/mes_dashboard/sql/query_tool/*resolve*.sql`(含 `DW_MES_CONTAINER` 欄位關聯補強)
- **快取/監控**:沿用既有 Redis/L2 cache 與 slow-query logger新增追溯關係命中統計欄位

View File

@@ -0,0 +1,42 @@
## MODIFIED Requirements
### Requirement: LineageEngine SHALL provide combined genealogy resolution
`LineageEngine.resolve_full_genealogy()` SHALL produce a semantic lineage graph that includes split, merge, wafer-origin, and GD-rework relationships.
#### Scenario: Combined genealogy includes typed edges
- **WHEN** `resolve_full_genealogy()` is called with seed container IDs
- **THEN** the response SHALL include lineage relationships with explicit edge types
- **THEN** split, merge, wafer-origin, and gd-rework edges SHALL be distinguishable
#### Scenario: GA without GC remains traceable by wafer origin
- **WHEN** seed lots have GA lineage without GC nodes
- **THEN** the engine SHALL still link GA lineage to wafer origin via `FIRSTNAME`
- **THEN** lineage output SHALL remain connected without synthetic GC nodes
#### Scenario: Backward compatibility fields preserved during migration
- **WHEN** callers still depend on legacy ancestry maps
- **THEN** the engine SHALL continue returning legacy-compatible fields during migration window
- **THEN** typed graph fields SHALL be additive, not replacing legacy fields immediately
## ADDED Requirements
### Requirement: LineageEngine SHALL resolve wafer-origin relationships from container data
The engine SHALL derive wafer-origin links using `DW_MES_CONTAINER.FIRSTNAME` and valid LOT nodes.
#### Scenario: Wafer-origin edge creation
- **WHEN** a lot node has a non-empty `FIRSTNAME` that maps to a wafer lot node
- **THEN** the engine SHALL create a `wafer_origin` edge between the lot and wafer nodes
- **THEN** wafer-origin resolution SHALL avoid duplicate edges per node pair
### Requirement: LineageEngine SHALL resolve GD rework source relationships from container data
The engine SHALL derive GD rework source links primarily from `ORIGINALCONTAINERID`, with `SPLITFROMID` as fallback.
#### Scenario: GD source via ORIGINALCONTAINERID
- **WHEN** a GD lot has a valid `ORIGINALCONTAINERID`
- **THEN** the engine SHALL create a `gd_rework_source` edge from source lot to GD lot
- **THEN** this edge SHALL be included in reverse and forward lineage outputs where applicable
#### Scenario: GD source fallback to SPLITFROMID
- **WHEN** `ORIGINALCONTAINERID` is null or invalid and `SPLITFROMID` is available
- **THEN** the engine SHALL fallback to `SPLITFROMID` for gd-rework source linkage
- **THEN** the fallback linkage SHALL be marked with edge type `gd_rework_source`

View File

@@ -0,0 +1,24 @@
## MODIFIED Requirements
### Requirement: query-tool lineage tab SHALL load on-demand
The query-tool lineage experience SHALL keep progressive loading behavior while supporting forward and reverse tracing semantics with independent caches.
#### Scenario: Forward resolve auto-fires lineage progressively
- **WHEN** forward seed resolution completes with N lots
- **THEN** lineage requests SHALL auto-fire with concurrency control
- **THEN** the tree SHALL progressively render as responses arrive
#### Scenario: Reverse resolve supports serial, GD work-order, and GD lot-id modes
- **WHEN** reverse tab resolves seeds using `serial_number`, `gd_work_order`, or `gd_lot_id`
- **THEN** lineage SHALL render upstream graph from resolved roots
- **THEN** reverse tab behavior SHALL not depend on forward tab state
#### Scenario: Cache isolation per tab context
- **WHEN** lineage data is fetched in forward tab
- **THEN** forward cache SHALL be reusable within forward context
- **THEN** reverse tab lineage cache SHALL be isolated from forward cache state
#### Scenario: Tree interaction does not mutate graph scope
- **WHEN** user clicks nodes to inspect details
- **THEN** detail panel scope SHALL update immediately
- **THEN** lineage graph visibility SHALL remain unchanged unless a new resolve is executed

View File

@@ -0,0 +1,65 @@
## MODIFIED Requirements
### Requirement: Query-tool page SHALL use tab-based layout separating LOT tracing from equipment queries
The query-tool page SHALL present three top-level tabs with independent state: `批次追蹤(正向)`, `流水批反查(反向)`, and `設備生產批次追蹤`.
#### Scenario: Tab switching preserves independent state
- **WHEN** the user switches between forward, reverse, and equipment tabs
- **THEN** each tab SHALL retain its own input values, resolved seeds, selected nodes, and detail sub-tab state
- **THEN** switching tabs SHALL NOT clear another tab's query context
#### Scenario: URL state reflects active tab and tab-local inputs
- **WHEN** the user is on a specific tab
- **THEN** the URL SHALL include `tab` and corresponding tab-local query parameters
- **THEN** reloading the page SHALL restore the active tab and its tab-local state
### Requirement: QueryBar SHALL resolve LOT/Serial/WorkOrder inputs
The query bar SHALL support profile-specific input types. Forward tracing SHALL support wafer/lot/work-order inputs, and reverse tracing SHALL support serial, GD work-order, and GD lot-id inputs.
#### Scenario: Forward query supports wafer-lot seeds
- **WHEN** the user selects `wafer_lot` in forward tab and submits values
- **THEN** the system SHALL call resolve API with `input_type=wafer_lot`
- **THEN** resolved lots under the wafer origin SHALL appear as forward tree roots
#### Scenario: Reverse query supports GD work-order seeds
- **WHEN** the user selects `gd_work_order` in reverse tab and submits `GD%` work orders
- **THEN** the system SHALL call resolve API with `input_type=gd_work_order`
- **THEN** resolved GD lots SHALL appear as reverse tree roots
#### Scenario: Reverse query supports GD lot-id seeds
- **WHEN** the user selects `gd_lot_id` in reverse tab and submits GD lot IDs
- **THEN** the system SHALL call resolve API with `input_type=gd_lot_id`
- **THEN** resolved GD lot roots SHALL be used for reverse lineage expansion
#### Scenario: Invalid GD work-order input is rejected
- **WHEN** reverse tab input type is `gd_work_order` and a value does not match `GD%`
- **THEN** the system SHALL return validation error without issuing lineage query
- **THEN** the UI SHALL keep user input and display actionable error text
#### Scenario: Invalid GD lot-id input is rejected
- **WHEN** reverse tab input type is `gd_lot_id` and a value does not match GD lot rules
- **THEN** the system SHALL return validation error without issuing lineage query
- **THEN** invalid values SHALL be reported in the UI without clearing user input
### Requirement: LineageTree SHALL display as a decomposition tree with progressive growth animation
The lineage tree SHALL render semantic node/edge relationships and SHALL preserve progressive loading behavior.
#### Scenario: GC is optional and wafer linkage remains visible
- **WHEN** a GA lot has no GC node in its upstream chain
- **THEN** the tree SHALL still render a direct `WAFER -> GA` relationship
- **THEN** this SHALL NOT be treated as a broken lineage
#### Scenario: GD rework branch is explicitly rendered
- **WHEN** lineage includes GD rework data
- **THEN** the tree SHALL render `source lot -> GD lot -> new serial/lot` using GD-specific node/edge style
- **THEN** users SHALL be able to distinguish GD rework edges from split/merge edges
#### Scenario: Auto-fire lineage after forward resolve
- **WHEN** forward lot resolution completes with N resolved lots
- **THEN** lineage SHALL be fetched automatically with concurrency-limited requests
- **THEN** the tree SHALL progressively grow as lineage responses arrive
#### Scenario: Node click only scopes detail panel
- **WHEN** the user clicks one or more nodes in the tree
- **THEN** only the detail panel query scope SHALL change
- **THEN** the tree structure and node visibility SHALL remain unchanged

View File

@@ -0,0 +1,42 @@
## MODIFIED Requirements
### Requirement: Staged trace API SHALL expose seed-resolve endpoint
`POST /api/trace/seed-resolve` SHALL resolve seed lots based on profile-specific resolve types.
#### Scenario: Forward profile resolve types
- **WHEN** request body contains `{ "profile": "query_tool", "params": { "resolve_type": "<type>", "values": [...] } }`
- **THEN** `<type>` SHALL be one of `wafer_lot`, `lot_id`, or `work_order`
- **THEN** non-supported types for this profile SHALL return HTTP 400 with `INVALID_PARAMS`
#### Scenario: Reverse profile resolve types
- **WHEN** request body contains `{ "profile": "query_tool_reverse", "params": { "resolve_type": "<type>", "values": [...] } }`
- **THEN** `<type>` SHALL be one of `serial_number`, `gd_work_order`, or `gd_lot_id`
- **THEN** invalid `gd_work_order` values not matching `GD%` SHALL return HTTP 400
#### Scenario: GD lot-id validation
- **WHEN** reverse profile uses `resolve_type=gd_lot_id`
- **THEN** each value SHALL be validated against GD lot rules before resolution
- **THEN** invalid values SHALL return HTTP 400 with `INVALID_PARAMS`
#### Scenario: Seed response payload compatibility
- **WHEN** seed resolution succeeds
- **THEN** response SHALL include `stage`, `seeds`, `seed_count`, and `cache_key`
- **THEN** each seed SHALL include `container_id` and displayable lot/container name fields
### Requirement: Staged trace API SHALL expose lineage endpoint
`POST /api/trace/lineage` SHALL return semantic lineage graph fields while preserving legacy-compatible fields during migration.
#### Scenario: Lineage response contains typed graph fields
- **WHEN** lineage is resolved for `query_tool` or `query_tool_reverse`
- **THEN** response SHALL include typed lineage fields (`nodes` and typed `edges`)
- **THEN** each edge SHALL declare edge type sufficient to distinguish split/merge/wafer/gd-rework
#### Scenario: Legacy compatibility during frontend migration
- **WHEN** existing clients still consume legacy lineage fields
- **THEN** lineage response SHALL continue to include existing compatibility fields for a migration period
- **THEN** typed fields SHALL be additive and not break current clients
#### Scenario: Profile-aware cache keys
- **WHEN** lineage requests have same container IDs but different profiles
- **THEN** cache keys SHALL remain profile-aware to prevent cross-profile response mixing
- **THEN** repeated requests with same profile and same sorted IDs SHALL hit cache

View File

@@ -0,0 +1,31 @@
## 1. Backend lineage model (typed graph)
- [x] 1.1 Extend `LineageEngine` output to include typed `nodes` and `edges` while keeping legacy-compatible fields
- [x] 1.2 Implement edge builders for `wafer_origin` (via `DW_MES_CONTAINER.FIRSTNAME`) and `gd_rework_source` (via `ORIGINALCONTAINERID`, fallback `SPLITFROMID`)
- [x] 1.3 Add node classification helper for `WAFER/GC/GA/GD/SERIAL` and ensure deterministic priority rules
- [x] 1.4 Add/adjust SQL fragments needed for wafer-origin and GD-source resolution with bind-safe `QueryBuilder` usage
## 2. Trace API and resolve contract updates
- [x] 2.1 Extend resolve service to support `wafer_lot`, `gd_work_order`, and `gd_lot_id` input types with profile-aware validation
- [x] 2.2 Update `/api/trace/seed-resolve` to enforce profile-specific resolve-type allowlists (`query_tool` vs `query_tool_reverse`)
- [x] 2.3 Update `/api/trace/lineage` response contract to return typed graph payload additively (no immediate legacy break)
- [x] 2.4 Verify lineage cache behavior remains profile-safe and does not mix forward/reverse responses
## 3. Query-tool frontend integration
- [x] 3.1 Update query bars and tab logic to expose forward types (`wafer_lot/lot_id/work_order`) and reverse types (`serial_number/gd_work_order/gd_lot_id`)
- [x] 3.2 Refactor lineage composables to consume typed graph fields and map them into rendering data structures
- [x] 3.3 Update `LineageTreeChart` to render semantic node styles and edge semantics for split/merge/wafer/gd-rework
- [x] 3.4 Implement explicit UI handling for GC-optional flow (`WAFER -> GA` visible when GC is absent)
- [x] 3.5 Ensure node click only updates detail scope and does not mutate tree visibility
## 4. Validation, regression, and documentation
- [x] 4.1 Add backend tests for resolve-type validation (`gd_work_order` + `gd_lot_id`), wafer-origin edges, and GD-source linkage
- [x] 4.2 Add API contract tests for typed lineage fields and backward-compatible fields
- [x] 4.3 Run manual data validation on representative scenarios:
- [x] 4.4 Validate `WAFER -> GA` path without GC
- [x] 4.5 Validate `WAFER -> GC -> GA` path
- [x] 4.6 Validate `SERIAL -> GD -> source lot -> WAFER` reverse path
- [x] 4.7 Update user-facing documentation/help text for new query modes and GD/GC interpretation rules

View File

@@ -110,7 +110,7 @@ def resolve_lot_input():
Expects JSON body:
{
"input_type": "lot_id" | "serial_number" | "work_order",
"input_type": "lot_id" | "wafer_lot" | "serial_number" | "work_order" | "gd_work_order" | "gd_lot_id",
"values": ["value1", "value2", ...]
}
@@ -131,7 +131,7 @@ def resolve_lot_input():
values = data.get('values', [])
# Validate input type
valid_types = ['lot_id', 'serial_number', 'work_order']
valid_types = ['lot_id', 'wafer_lot', 'serial_number', 'work_order', 'gd_work_order', 'gd_lot_id']
if input_type not in valid_types:
return jsonify({'error': f'不支援的查詢類型: {input_type}'}), 400
@@ -268,7 +268,7 @@ def query_lot_associations():
container_id = request.args.get('container_id')
assoc_type = request.args.get('type')
valid_types = ['materials', 'rejects', 'holds', 'jobs']
valid_types = ['materials', 'rejects', 'holds', 'splits', 'jobs']
if assoc_type not in valid_types:
return jsonify({'error': f'不支援的關聯類型: {assoc_type}'}), 400
@@ -289,6 +289,9 @@ def query_lot_associations():
result = get_lot_rejects(container_id)
elif assoc_type == 'holds':
result = get_lot_holds(container_id)
elif assoc_type == 'splits':
full_history = str(request.args.get('full_history', '')).strip().lower() in {'1', 'true', 'yes'}
result = get_lot_splits(container_id, full_history=full_history)
elif assoc_type == 'jobs':
equipment_id = request.args.get('equipment_id')
time_start = request.args.get('time_start')

View File

@@ -46,7 +46,10 @@ SUPPORTED_PROFILES = {
PROFILE_MID_SECTION_DEFECT,
}
QUERY_TOOL_RESOLVE_TYPES = {"lot_id", "serial_number", "work_order"}
QUERY_TOOL_RESOLVE_TYPES_BY_PROFILE = {
PROFILE_QUERY_TOOL: {"wafer_lot", "lot_id", "work_order"},
PROFILE_QUERY_TOOL_REVERSE: {"serial_number", "gd_work_order", "gd_lot_id"},
}
SUPPORTED_EVENT_DOMAINS = {
"history",
"materials",
@@ -172,10 +175,18 @@ def _extract_date_range(params: Dict[str, Any]) -> tuple[Optional[str], Optional
return None, None
def _seed_resolve_query_tool(params: Dict[str, Any]) -> tuple[Optional[Dict[str, Any]], Optional[tuple[str, str, int]]]:
def _seed_resolve_query_tool(
profile: str,
params: Dict[str, Any],
) -> tuple[Optional[Dict[str, Any]], Optional[tuple[str, str, int]]]:
resolve_type = str(params.get("resolve_type") or params.get("input_type") or "").strip()
if resolve_type not in QUERY_TOOL_RESOLVE_TYPES:
return None, ("INVALID_PARAMS", "resolve_type must be lot_id/serial_number/work_order", 400)
allowed_types = QUERY_TOOL_RESOLVE_TYPES_BY_PROFILE.get(profile, set())
if resolve_type not in allowed_types:
return None, (
"INVALID_PARAMS",
f"resolve_type must be one of: {','.join(sorted(allowed_types))}",
400,
)
values = _normalize_strings(params.get("values", []))
if not values:
@@ -232,6 +243,8 @@ def _build_lineage_response(
cid_to_name: Optional[Dict[str, str]] = None,
parent_map: Optional[Dict[str, List[str]]] = None,
merge_edges: Optional[Dict[str, List[str]]] = None,
typed_nodes: Optional[Dict[str, Dict[str, Any]]] = None,
typed_edges: Optional[List[Dict[str, Any]]] = None,
) -> Dict[str, Any]:
normalized_ancestors: Dict[str, List[str]] = {}
all_nodes = set(container_ids)
@@ -267,6 +280,26 @@ def _build_lineage_response(
child: sources for child, sources in merge_edges.items()
if child in all_nodes
}
if typed_nodes:
response["nodes"] = {
cid: node for cid, node in typed_nodes.items()
if cid in all_nodes or cid in container_ids
}
if typed_edges:
normalized_edges = []
for edge in typed_edges:
if not isinstance(edge, dict):
continue
from_cid = str(edge.get("from_cid") or "").strip()
to_cid = str(edge.get("to_cid") or "").strip()
if not from_cid or not to_cid:
continue
if from_cid in all_nodes or to_cid in all_nodes:
normalized_edges.append(edge)
all_nodes.add(from_cid)
all_nodes.add(to_cid)
response["edges"] = normalized_edges
response["total_nodes"] = len(all_nodes)
return response
@@ -359,7 +392,7 @@ def seed_resolve():
started = time.monotonic()
if profile in {PROFILE_QUERY_TOOL, PROFILE_QUERY_TOOL_REVERSE}:
resolved, route_error = _seed_resolve_query_tool(params)
resolved, route_error = _seed_resolve_query_tool(profile, params)
else:
resolved, route_error = _seed_resolve_mid_section_defect(params)
@@ -418,6 +451,8 @@ def lineage():
cid_to_name=reverse_graph.get("cid_to_name"),
parent_map=reverse_graph.get("parent_map"),
merge_edges=reverse_graph.get("merge_edges"),
typed_nodes=reverse_graph.get("nodes"),
typed_edges=reverse_graph.get("edges"),
)
response["roots"] = list(container_ids)
else:
@@ -430,6 +465,8 @@ def lineage():
"leaf_serials": forward_tree.get("leaf_serials", {}),
"names": {cid: name for cid, name in cid_to_name.items() if name},
"total_nodes": forward_tree.get("total_nodes", 0),
"nodes": forward_tree.get("nodes", {}),
"edges": forward_tree.get("edges", []),
}
except Exception as exc:
if _is_timeout_exception(exc):

View File

@@ -5,7 +5,7 @@ from __future__ import annotations
import logging
from collections import defaultdict
from typing import Any, Dict, List, Optional, Set
from typing import Any, Dict, List, Optional, Set, Tuple
from mes_dashboard.core.database import read_sql_df
from mes_dashboard.sql import QueryBuilder, SQLLoader
@@ -15,6 +15,18 @@ logger = logging.getLogger("mes_dashboard.lineage_engine")
ORACLE_IN_BATCH_SIZE = 1000
MAX_SPLIT_DEPTH = 20
NODE_TYPE_WAFER = "WAFER"
NODE_TYPE_GC = "GC"
NODE_TYPE_GA = "GA"
NODE_TYPE_GD = "GD"
NODE_TYPE_LOT = "LOT"
NODE_TYPE_UNKNOWN = "UNKNOWN"
EDGE_TYPE_SPLIT = "split_from"
EDGE_TYPE_MERGE = "merge_source"
EDGE_TYPE_WAFER = "wafer_origin"
EDGE_TYPE_GD_REWORK = "gd_rework_source"
def _normalize_list(values: List[str]) -> List[str]:
"""Normalize string list while preserving input order."""
@@ -41,6 +53,39 @@ def _safe_str(value: Any) -> Optional[str]:
return value if value else None
def _upper_prefix_match(value: Optional[str], prefix: str) -> bool:
text = _safe_str(value)
if not text:
return False
return text.upper().startswith(prefix.upper())
def _append_unique(values: List[str], item: str) -> None:
if item and item not in values:
values.append(item)
def _to_edge_payload(edges: List[Tuple[str, str, str]]) -> List[Dict[str, str]]:
dedup: List[Dict[str, str]] = []
seen: Set[Tuple[str, str, str]] = set()
for from_cid, to_cid, edge_type in edges:
from_id = _safe_str(from_cid)
to_id = _safe_str(to_cid)
et = _safe_str(edge_type)
if not from_id or not to_id or not et:
continue
key = (from_id, to_id, et)
if key in seen:
continue
seen.add(key)
dedup.append({
"from_cid": from_id,
"to_cid": to_id,
"edge_type": et,
})
return dedup
def _build_parent_map(
child_to_parent: Dict[str, str],
merge_child_to_parent: Dict[str, str],
@@ -82,6 +127,174 @@ def _build_parent_map(
class LineageEngine:
"""Unified split/merge genealogy resolver."""
@staticmethod
def _resolve_container_snapshot(
container_ids: List[str],
) -> Dict[str, Dict[str, Optional[str]]]:
normalized_cids = _normalize_list(container_ids)
if not normalized_cids:
return {}
snapshots: Dict[str, Dict[str, Optional[str]]] = {}
for i in range(0, len(normalized_cids), ORACLE_IN_BATCH_SIZE):
batch = normalized_cids[i:i + ORACLE_IN_BATCH_SIZE]
builder = QueryBuilder()
builder.add_in_condition("c.CONTAINERID", batch)
sql = SQLLoader.load_with_params(
"lineage/container_snapshot",
CID_FILTER=builder.get_conditions_sql(),
)
df = read_sql_df(sql, builder.params)
if df is None or df.empty:
continue
for _, row in df.iterrows():
cid = _safe_str(row.get("CONTAINERID"))
if not cid:
continue
snapshots[cid] = {
"CONTAINERID": cid,
"CONTAINERNAME": _safe_str(row.get("CONTAINERNAME")),
"MFGORDERNAME": _safe_str(row.get("MFGORDERNAME")),
"OBJECTTYPE": _safe_str(row.get("OBJECTTYPE")),
"FIRSTNAME": _safe_str(row.get("FIRSTNAME")),
"ORIGINALCONTAINERID": _safe_str(row.get("ORIGINALCONTAINERID")),
"SPLITFROMID": _safe_str(row.get("SPLITFROMID")),
}
return snapshots
@staticmethod
def _resolve_lot_ids_by_name(names: List[str]) -> Dict[str, str]:
normalized_names = _normalize_list(names)
if not normalized_names:
return {}
mapping: Dict[str, str] = {}
for i in range(0, len(normalized_names), ORACLE_IN_BATCH_SIZE):
batch = normalized_names[i:i + ORACLE_IN_BATCH_SIZE]
builder = QueryBuilder()
builder.add_in_condition("c.CONTAINERNAME", batch)
sql = SQLLoader.load_with_params(
"lineage/lot_ids_by_name",
NAME_FILTER=builder.get_conditions_sql(),
)
df = read_sql_df(sql, builder.params)
if df is None or df.empty:
continue
for _, row in df.iterrows():
cid = _safe_str(row.get("CONTAINERID"))
name = _safe_str(row.get("CONTAINERNAME"))
if cid and name and name not in mapping:
mapping[name] = cid
return mapping
@staticmethod
def _is_gd_snapshot(snapshot: Optional[Dict[str, Optional[str]]]) -> bool:
if not snapshot:
return False
return (
_upper_prefix_match(snapshot.get("MFGORDERNAME"), "GD")
or _upper_prefix_match(snapshot.get("CONTAINERNAME"), "GD")
)
@staticmethod
def _classify_node_type(
cid: str,
snapshot: Optional[Dict[str, Optional[str]]],
wafer_ids: Set[str],
) -> str:
if cid in wafer_ids:
return NODE_TYPE_WAFER
if LineageEngine._is_gd_snapshot(snapshot):
return NODE_TYPE_GD
if snapshot and (
_upper_prefix_match(snapshot.get("MFGORDERNAME"), "GC")
or _upper_prefix_match(snapshot.get("CONTAINERNAME"), "GC")
):
return NODE_TYPE_GC
if snapshot and (
_upper_prefix_match(snapshot.get("MFGORDERNAME"), "GA")
or _upper_prefix_match(snapshot.get("CONTAINERNAME"), "GA")
):
return NODE_TYPE_GA
if snapshot and _safe_str(snapshot.get("OBJECTTYPE")) == "LOT":
return NODE_TYPE_LOT
return NODE_TYPE_UNKNOWN
@staticmethod
def _build_semantic_links(
base_node_ids: Set[str],
snapshots: Dict[str, Dict[str, Optional[str]]],
) -> Tuple[Dict[str, Dict[str, Optional[str]]], List[Tuple[str, str, str]], Set[str]]:
"""Build wafer-origin and GD rework edges from container snapshots.
Returns:
(snapshots, semantic_edges, wafer_ids)
"""
if not base_node_ids:
return snapshots, [], set()
all_snapshots = dict(snapshots)
first_names = sorted({
first_name
for row in all_snapshots.values()
for first_name in [_safe_str(row.get("FIRSTNAME"))]
if first_name
})
wafer_by_name = LineageEngine._resolve_lot_ids_by_name(first_names)
extra_ids: Set[str] = set()
for cid in wafer_by_name.values():
if cid not in all_snapshots:
extra_ids.add(cid)
for row in all_snapshots.values():
if not LineageEngine._is_gd_snapshot(row):
continue
source = _safe_str(row.get("ORIGINALCONTAINERID")) or _safe_str(row.get("SPLITFROMID"))
if source and source not in all_snapshots:
extra_ids.add(source)
if extra_ids:
all_snapshots.update(LineageEngine._resolve_container_snapshot(sorted(extra_ids)))
semantic_edges: List[Tuple[str, str, str]] = []
wafer_ids: Set[str] = set()
for cid, row in all_snapshots.items():
first_name = _safe_str(row.get("FIRSTNAME"))
wafer_cid = wafer_by_name.get(first_name or "")
if wafer_cid and wafer_cid != cid:
semantic_edges.append((wafer_cid, cid, EDGE_TYPE_WAFER))
wafer_ids.add(wafer_cid)
if LineageEngine._is_gd_snapshot(row):
source = _safe_str(row.get("ORIGINALCONTAINERID")) or _safe_str(row.get("SPLITFROMID"))
if source and source != cid:
semantic_edges.append((source, cid, EDGE_TYPE_GD_REWORK))
return all_snapshots, semantic_edges, wafer_ids
@staticmethod
def _build_nodes_payload(
node_ids: Set[str],
snapshots: Dict[str, Dict[str, Optional[str]]],
cid_to_name: Dict[str, str],
wafer_ids: Set[str],
) -> Dict[str, Dict[str, Optional[str]]]:
payload: Dict[str, Dict[str, Optional[str]]] = {}
for cid in sorted({cid for cid in node_ids if _safe_str(cid)}):
snapshot = snapshots.get(cid, {})
name = _safe_str(snapshot.get("CONTAINERNAME")) or _safe_str(cid_to_name.get(cid)) or cid
payload[cid] = {
"container_id": cid,
"container_name": name,
"mfgorder_name": _safe_str(snapshot.get("MFGORDERNAME")),
"wafer_lot": _safe_str(snapshot.get("FIRSTNAME")),
"node_type": LineageEngine._classify_node_type(cid, snapshot, wafer_ids),
}
return payload
@staticmethod
def resolve_split_ancestors(
container_ids: List[str],
@@ -341,9 +554,18 @@ class LineageEngine:
# Step 2: Trace DOWN from roots to get full tree
desc_result = LineageEngine.resolve_split_descendants(roots)
children_map = desc_result["children_map"]
split_children_map = desc_result["children_map"]
children_map: Dict[str, List[str]] = {
parent: list(children)
for parent, children in split_children_map.items()
}
cid_to_name.update(desc_result["cid_to_name"])
split_edges: List[Tuple[str, str, str]] = []
for parent, children in split_children_map.items():
for child in children:
split_edges.append((parent, child, EDGE_TYPE_SPLIT))
# Collect all nodes in the tree
all_nodes: Set[str] = set(roots)
for parent, children in children_map.items():
@@ -356,13 +578,52 @@ class LineageEngine:
# Step 4: Query serial numbers for leaf nodes
leaf_serials = LineageEngine.resolve_leaf_serials(leaf_cids) if leaf_cids else {}
# Step 5: Build semantic links (wafer origin / GD rework) and augment tree.
snapshots: Dict[str, Dict[str, Optional[str]]] = {}
semantic_edges: List[Tuple[str, str, str]] = []
wafer_ids: Set[str] = set()
try:
snapshots = LineageEngine._resolve_container_snapshot(list(all_nodes))
for cid, row in snapshots.items():
name = _safe_str(row.get("CONTAINERNAME"))
if name:
cid_to_name[cid] = name
snapshots, semantic_edges, wafer_ids = LineageEngine._build_semantic_links(all_nodes, snapshots)
for cid, row in snapshots.items():
name = _safe_str(row.get("CONTAINERNAME"))
if name:
cid_to_name[cid] = name
except Exception as exc:
logger.warning("Forward semantic enrichment skipped due to snapshot error: %s", exc)
for from_cid, to_cid, _edge_type in semantic_edges:
if from_cid not in children_map:
children_map[from_cid] = []
_append_unique(children_map[from_cid], to_cid)
all_nodes.add(from_cid)
all_nodes.add(to_cid)
# Recompute roots after semantic edge augmentation.
incoming: Set[str] = set()
for parent, children in children_map.items():
all_nodes.add(parent)
for child in children:
incoming.add(child)
all_nodes.add(child)
roots = sorted([cid for cid in all_nodes if cid not in incoming])
typed_nodes = LineageEngine._build_nodes_payload(all_nodes, snapshots, cid_to_name, wafer_ids)
typed_edges = _to_edge_payload(split_edges + semantic_edges)
logger.info(
"Forward tree resolution completed: seeds=%s, roots=%s, nodes=%s, leaves=%s, serials=%s",
"Forward tree resolution completed: seeds=%s, roots=%s, nodes=%s, leaves=%s, serials=%s, semantic_edges=%s",
len(seed_cids),
len(roots),
len(all_nodes),
len(leaf_cids),
len(leaf_serials),
len(semantic_edges),
)
return {
@@ -371,6 +632,8 @@ class LineageEngine:
"leaf_serials": leaf_serials,
"cid_to_name": cid_to_name,
"total_nodes": len(all_nodes),
"nodes": typed_nodes,
"edges": typed_edges,
}
@staticmethod
@@ -390,7 +653,14 @@ class LineageEngine:
"""
seed_cids = _normalize_list(container_ids)
if not seed_cids:
return {"ancestors": {}, "cid_to_name": {}, "parent_map": {}, "merge_edges": {}}
return {
"ancestors": {},
"cid_to_name": {},
"parent_map": {},
"merge_edges": {},
"nodes": {},
"edges": [],
}
split_result = LineageEngine.resolve_split_ancestors(seed_cids, initial_names)
child_to_parent = split_result["child_to_parent"]
@@ -410,13 +680,17 @@ class LineageEngine:
current = parent
ancestors[seed] = visited
split_edges: List[Tuple[str, str, str]] = [
(parent, child, EDGE_TYPE_SPLIT)
for child, parent in child_to_parent.items()
if _safe_str(parent) and _safe_str(child)
]
all_names = [name for name in cid_to_name.values() if _safe_str(name)]
merge_source_map = LineageEngine.resolve_merge_sources(all_names)
if not merge_source_map:
pm, me = _build_parent_map(child_to_parent, {}, {}, cid_to_name)
return {"ancestors": ancestors, "cid_to_name": cid_to_name, "parent_map": pm, "merge_edges": me}
merge_child_to_parent: Dict[str, str] = {}
merge_source_cids_all: Set[str] = set()
if merge_source_map:
for seed in seed_cids:
self_and_ancestors = ancestors[seed] | {seed}
for cid in list(self_and_ancestors):
@@ -431,10 +705,7 @@ class LineageEngine:
seen = set(seed_cids) | set(child_to_parent.keys()) | set(child_to_parent.values())
new_merge_cids = list(merge_source_cids_all - seen)
if not new_merge_cids:
pm, me = _build_parent_map(child_to_parent, {}, merge_source_map, cid_to_name)
return {"ancestors": ancestors, "cid_to_name": cid_to_name, "parent_map": pm, "merge_edges": me}
if new_merge_cids:
merge_split_result = LineageEngine.resolve_split_ancestors(new_merge_cids)
merge_child_to_parent = merge_split_result["child_to_parent"]
cid_to_name.update(merge_split_result["cid_to_name"])
@@ -452,4 +723,76 @@ class LineageEngine:
current = parent
pm, me = _build_parent_map(child_to_parent, merge_child_to_parent, merge_source_map, cid_to_name)
return {"ancestors": ancestors, "cid_to_name": cid_to_name, "parent_map": pm, "merge_edges": me}
for child, parent in merge_child_to_parent.items():
if _safe_str(parent) and _safe_str(child):
split_edges.append((parent, child, EDGE_TYPE_SPLIT))
merge_payload_edges: List[Tuple[str, str, str]] = []
for child, sources in me.items():
for source in sources:
merge_payload_edges.append((source, child, EDGE_TYPE_MERGE))
all_nodes: Set[str] = set(seed_cids)
for values in ancestors.values():
all_nodes.update(values)
for child, parents in pm.items():
all_nodes.add(child)
all_nodes.update(parents)
snapshots: Dict[str, Dict[str, Optional[str]]] = {}
semantic_edges: List[Tuple[str, str, str]] = []
wafer_ids: Set[str] = set()
try:
snapshots = LineageEngine._resolve_container_snapshot(list(all_nodes))
for cid, row in snapshots.items():
name = _safe_str(row.get("CONTAINERNAME"))
if name:
cid_to_name[cid] = name
snapshots, semantic_edges, wafer_ids = LineageEngine._build_semantic_links(all_nodes, snapshots)
for cid, row in snapshots.items():
name = _safe_str(row.get("CONTAINERNAME"))
if name:
cid_to_name[cid] = name
except Exception as exc:
logger.warning("Reverse semantic enrichment skipped due to snapshot error: %s", exc)
for parent, child, _edge_type in semantic_edges:
parent = _safe_str(parent)
child = _safe_str(child)
if not parent or not child:
continue
parents = pm.setdefault(child, [])
_append_unique(parents, parent)
all_nodes.add(parent)
all_nodes.add(child)
recomputed_ancestors: Dict[str, Set[str]] = {}
for seed in seed_cids:
visited: Set[str] = set()
stack = list(pm.get(seed, []))
depth = 0
while stack and depth < MAX_SPLIT_DEPTH * 10:
depth += 1
parent = _safe_str(stack.pop())
if not parent or parent in visited:
continue
visited.add(parent)
for grand_parent in pm.get(parent, []):
gp = _safe_str(grand_parent)
if gp and gp not in visited:
stack.append(gp)
recomputed_ancestors[seed] = visited
typed_nodes = LineageEngine._build_nodes_payload(all_nodes, snapshots, cid_to_name, wafer_ids)
typed_edges = _to_edge_payload(split_edges + merge_payload_edges + semantic_edges)
return {
"ancestors": recomputed_ancestors,
"cid_to_name": cid_to_name,
"parent_map": pm,
"merge_edges": me,
"nodes": typed_nodes,
"edges": typed_edges,
}

View File

@@ -18,9 +18,10 @@ Architecture:
import csv
import io
import logging
import re
from datetime import datetime, timedelta
from decimal import Decimal
from typing import Any, Dict, List, Optional, Generator
from typing import Any, Dict, List, Optional, Generator, Iterable, Tuple
import pandas as pd
@@ -83,7 +84,7 @@ def validate_lot_input(input_type: str, values: List[str]) -> Optional[str]:
"""Validate LOT input based on type.
Args:
input_type: Type of input ('lot_id', 'serial_number', 'work_order')
input_type: Type of input
values: List of input values
Returns:
@@ -94,8 +95,11 @@ def validate_lot_input(input_type: str, values: List[str]) -> Optional[str]:
limits = {
'lot_id': MAX_LOT_IDS,
'wafer_lot': MAX_LOT_IDS,
'gd_lot_id': MAX_LOT_IDS,
'serial_number': MAX_SERIAL_NUMBERS,
'work_order': MAX_WORK_ORDERS,
'gd_work_order': MAX_WORK_ORDERS,
}
limit = limits.get(input_type, MAX_LOT_IDS)
@@ -155,6 +159,157 @@ def _df_to_records(df: pd.DataFrame) -> List[Dict[str, Any]]:
return data
def _normalize_search_tokens(values: Iterable[str]) -> List[str]:
"""Normalize user-provided search tokens while preserving order."""
normalized: List[str] = []
seen = set()
for raw in values or []:
token = str(raw or '').strip()
if not token or token in seen:
continue
seen.add(token)
normalized.append(token)
return normalized
def _normalize_wildcard_token(value: str) -> str:
"""Normalize user wildcard syntax.
Supports both SQL wildcard (`%`) and shell-style wildcard (`*`).
"""
return str(value or '').replace('*', '%')
def _is_pattern_token(value: str) -> bool:
token = _normalize_wildcard_token(value)
return '%' in token or '_' in token
def _to_like_regex(pattern: str, *, case_insensitive: bool = False) -> re.Pattern:
"""Convert SQL LIKE pattern (`%`, `_`, `\\` escape) to Python regex."""
token = _normalize_wildcard_token(pattern)
parts: List[str] = ['^']
i = 0
while i < len(token):
ch = token[i]
if ch == '\\':
# Keep Oracle ESCAPE semantics: \% or \_ means literal.
if i + 1 < len(token):
i += 1
parts.append(re.escape(token[i]))
else:
parts.append(re.escape(ch))
elif ch == '%':
parts.append('.*')
elif ch == '_':
parts.append('.')
else:
parts.append(re.escape(ch))
i += 1
parts.append('$')
flags = re.IGNORECASE if case_insensitive else 0
return re.compile(''.join(parts), flags)
def _add_exact_or_pattern_condition(
builder: QueryBuilder,
column: str,
values: List[str],
*,
case_insensitive: bool = False,
) -> None:
"""Add a single OR-group condition supporting exact and wildcard tokens."""
tokens = _normalize_search_tokens(values)
if not tokens:
return
col_expr = f"UPPER(NVL({column}, ''))" if case_insensitive else f"NVL({column}, '')"
conditions: List[str] = []
exact_tokens = [token for token in tokens if not _is_pattern_token(token)]
pattern_tokens = [token for token in tokens if _is_pattern_token(token)]
if exact_tokens:
placeholders: List[str] = []
for token in exact_tokens:
param = builder._next_param()
placeholders.append(f":{param}")
builder.params[param] = token.upper() if case_insensitive else token
conditions.append(f"{col_expr} IN ({', '.join(placeholders)})")
for token in pattern_tokens:
param = builder._next_param()
normalized = _normalize_wildcard_token(token)
builder.params[param] = normalized.upper() if case_insensitive else normalized
conditions.append(f"{col_expr} LIKE :{param} ESCAPE '\\'")
if conditions:
builder.add_condition(f"({' OR '.join(conditions)})")
def _match_rows_by_tokens(
tokens: List[str],
rows: List[Dict[str, Any]],
*,
row_key: str,
case_insensitive: bool = False,
) -> Tuple[List[Dict[str, Any]], List[str], Dict[str, int]]:
"""Map query tokens to matching rows and report not-found tokens."""
normalized_tokens = _normalize_search_tokens(tokens)
if not normalized_tokens:
return [], [], {}
def normalize_text(value: Any) -> str:
text = str(value or '').strip()
return text.upper() if case_insensitive else text
row_pairs: List[Tuple[str, Dict[str, Any]]] = [
(normalize_text(row.get(row_key)), row)
for row in rows
if normalize_text(row.get(row_key))
]
exact_index: Dict[str, List[Dict[str, Any]]] = {}
for key, row in row_pairs:
exact_index.setdefault(key, []).append(row)
matches: List[Dict[str, Any]] = []
not_found: List[str] = []
expansion_info: Dict[str, int] = {}
seen_pairs = set()
for token in normalized_tokens:
token_key = normalize_text(token)
matched_rows: List[Dict[str, Any]]
if _is_pattern_token(token):
regex = _to_like_regex(token, case_insensitive=case_insensitive)
matched_rows = [
row
for value, row in row_pairs
if regex.fullmatch(value)
]
else:
matched_rows = exact_index.get(token_key, [])
if not matched_rows:
not_found.append(token)
continue
expansion_info[token] = len(matched_rows)
for row in matched_rows:
cid = str(row.get('CONTAINERID') or row.get('container_id') or '').strip()
dedup_key = (token, cid)
if dedup_key in seen_pairs:
continue
seen_pairs.add(dedup_key)
item = dict(row)
item['input_value'] = token
matches.append(item)
return matches, not_found, expansion_info
# ============================================================
# LOT Resolution Functions
# ============================================================
@@ -167,7 +322,7 @@ def resolve_lots(input_type: str, values: List[str]) -> Dict[str, Any]:
This function converts user input to CONTAINERID for subsequent queries.
Args:
input_type: Type of input ('lot_id', 'serial_number', 'work_order')
input_type: Type of input
values: List of input values
Returns:
@@ -187,10 +342,16 @@ def resolve_lots(input_type: str, values: List[str]) -> Dict[str, Any]:
try:
if input_type == 'lot_id':
return _resolve_by_lot_id(cleaned)
elif input_type == 'wafer_lot':
return _resolve_by_wafer_lot(cleaned)
elif input_type == 'gd_lot_id':
return _resolve_by_gd_lot_id(cleaned)
elif input_type == 'serial_number':
return _resolve_by_serial_number(cleaned)
elif input_type == 'work_order':
return _resolve_by_work_order(cleaned)
elif input_type == 'gd_work_order':
return _resolve_by_gd_work_order(cleaned)
else:
return {'error': f'不支援的輸入類型: {input_type}'}
@@ -209,7 +370,7 @@ def _resolve_by_lot_id(lot_ids: List[str]) -> Dict[str, Any]:
Resolution result dict.
"""
builder = QueryBuilder()
builder.add_in_condition("CONTAINERNAME", lot_ids)
_add_exact_or_pattern_condition(builder, "CONTAINERNAME", lot_ids)
sql = SQLLoader.load_with_params(
"query_tool/lot_resolve_id",
CONTAINER_FILTER=builder.get_conditions_sql(),
@@ -217,23 +378,21 @@ def _resolve_by_lot_id(lot_ids: List[str]) -> Dict[str, Any]:
df = read_sql_df(sql, builder.params)
data = _df_to_records(df)
matched, not_found, expansion_info = _match_rows_by_tokens(
lot_ids,
data,
row_key='CONTAINERNAME',
)
# Map results
found = {r['CONTAINERNAME']: r for r in data}
results = []
not_found = []
for lot_id in lot_ids:
if lot_id in found:
for row in matched:
results.append({
'container_id': found[lot_id]['CONTAINERID'],
'lot_id': found[lot_id]['CONTAINERNAME'], # LOT ID for display
'input_value': lot_id,
'spec_name': found[lot_id].get('SPECNAME'),
'qty': found[lot_id].get('QTY'),
'container_id': row.get('CONTAINERID'),
'lot_id': row.get('CONTAINERNAME'),
'input_value': row.get('input_value'),
'spec_name': row.get('SPECNAME'),
'qty': row.get('QTY'),
})
else:
not_found.append(lot_id)
logger.info(f"LOT ID resolution: {len(results)} found, {len(not_found)} not found")
@@ -242,6 +401,104 @@ def _resolve_by_lot_id(lot_ids: List[str]) -> Dict[str, Any]:
'total': len(results),
'input_count': len(lot_ids),
'not_found': not_found,
'expansion_info': expansion_info,
}
def _resolve_by_wafer_lot(wafer_lots: List[str]) -> Dict[str, Any]:
"""Resolve wafer lot values (FIRSTNAME) to CONTAINERID."""
builder = QueryBuilder()
_add_exact_or_pattern_condition(builder, "FIRSTNAME", wafer_lots)
builder.add_condition("OBJECTTYPE = 'LOT'")
sql = SQLLoader.load_with_params(
"query_tool/lot_resolve_wafer_lot",
WAFER_FILTER=builder.get_conditions_sql(),
)
df = read_sql_df(sql, builder.params)
data = _df_to_records(df)
matched, not_found, expansion_info = _match_rows_by_tokens(
wafer_lots,
data,
row_key='FIRSTNAME',
)
results = []
for row in matched:
cid = row.get('CONTAINERID')
if not cid:
continue
results.append({
'container_id': cid,
'lot_id': row.get('CONTAINERNAME'),
'input_value': row.get('input_value'),
'spec_name': row.get('SPECNAME'),
'qty': row.get('QTY'),
})
logger.info(f"Wafer lot resolution: {len(results)} containers from {len(wafer_lots)} wafer lots")
return {
'data': results,
'total': len(results),
'input_count': len(wafer_lots),
'not_found': not_found,
'expansion_info': expansion_info,
}
def _is_gd_like(value: str) -> bool:
text = str(value or '').strip().upper()
return text.startswith('GD')
def _literal_prefix_before_wildcard(value: str) -> str:
token = _normalize_wildcard_token(value)
for idx, ch in enumerate(token):
if ch in ('%', '_'):
return token[:idx]
return token
def _resolve_by_gd_lot_id(gd_lot_ids: List[str]) -> Dict[str, Any]:
"""Resolve GD lot IDs to CONTAINERID with strict GD validation."""
invalid = [value for value in gd_lot_ids if not _is_gd_like(_literal_prefix_before_wildcard(value))]
if invalid:
return {'error': f'GD LOT ID 格式錯誤: {", ".join(invalid)}'}
builder = QueryBuilder()
_add_exact_or_pattern_condition(builder, "CONTAINERNAME", gd_lot_ids, case_insensitive=True)
builder.add_condition("(UPPER(NVL(CONTAINERNAME, '')) LIKE 'GD%' OR UPPER(NVL(MFGORDERNAME, '')) LIKE 'GD%')")
sql = SQLLoader.load_with_params(
"query_tool/lot_resolve_id",
CONTAINER_FILTER=builder.get_conditions_sql(),
)
df = read_sql_df(sql, builder.params)
data = _df_to_records(df)
matched, not_found, expansion_info = _match_rows_by_tokens(
gd_lot_ids,
data,
row_key='CONTAINERNAME',
case_insensitive=True,
)
results = []
for row in matched:
results.append({
'container_id': row.get('CONTAINERID'),
'lot_id': row.get('CONTAINERNAME'),
'input_value': row.get('input_value'),
'spec_name': row.get('SPECNAME'),
'qty': row.get('QTY'),
})
logger.info(f"GD lot resolution: {len(results)} found, {len(not_found)} not found")
return {
'data': results,
'total': len(results),
'input_count': len(gd_lot_ids),
'not_found': not_found,
'expansion_info': expansion_info,
}
@@ -257,7 +514,7 @@ def _resolve_by_serial_number(serial_numbers: List[str]) -> Dict[str, Any]:
Resolution result dict.
"""
builder = QueryBuilder()
builder.add_in_condition("p.FINISHEDNAME", serial_numbers)
_add_exact_or_pattern_condition(builder, "p.FINISHEDNAME", serial_numbers)
sql = SQLLoader.load_with_params(
"query_tool/lot_resolve_serial",
SERIAL_FILTER=builder.get_conditions_sql(),
@@ -265,33 +522,20 @@ def _resolve_by_serial_number(serial_numbers: List[str]) -> Dict[str, Any]:
df = read_sql_df(sql, builder.params)
data = _df_to_records(df)
# Group by serial number
sn_to_containers = {}
for r in data:
sn = r['FINISHEDNAME']
if sn not in sn_to_containers:
sn_to_containers[sn] = []
sn_to_containers[sn].append({
'container_id': r['CONTAINERID'],
'lot_id': r.get('CONTAINERNAME'),
'spec_name': r.get('SPECNAME'),
})
matched, not_found, expansion_info = _match_rows_by_tokens(
serial_numbers,
data,
row_key='FINISHEDNAME',
)
results = []
not_found = []
for sn in serial_numbers:
if sn in sn_to_containers:
for item in sn_to_containers[sn]:
for row in matched:
results.append({
'container_id': item['container_id'],
'lot_id': item['lot_id'],
'input_value': sn,
'spec_name': item.get('spec_name'),
'container_id': row.get('CONTAINERID'),
'lot_id': row.get('CONTAINERNAME'),
'input_value': row.get('input_value'),
'spec_name': row.get('SPECNAME'),
})
else:
not_found.append(sn)
logger.info(f"Serial number resolution: {len(results)} containers from {len(serial_numbers)} inputs")
@@ -300,6 +544,7 @@ def _resolve_by_serial_number(serial_numbers: List[str]) -> Dict[str, Any]:
'total': len(results),
'input_count': len(serial_numbers),
'not_found': not_found,
'expansion_info': expansion_info,
}
@@ -314,8 +559,13 @@ def _resolve_by_work_order(work_orders: List[str]) -> Dict[str, Any]:
Returns:
Resolution result dict.
"""
invalid = [value for value in work_orders if _is_gd_like(_literal_prefix_before_wildcard(value))]
if invalid:
return {'error': f'正向工單僅支援 GA/GC請改用反向 GD 工單查詢: {", ".join(invalid)}'}
builder = QueryBuilder()
builder.add_in_condition("MFGORDERNAME", work_orders)
_add_exact_or_pattern_condition(builder, "MFGORDERNAME", work_orders, case_insensitive=True)
builder.add_condition("(UPPER(NVL(MFGORDERNAME, '')) LIKE 'GA%' OR UPPER(NVL(MFGORDERNAME, '')) LIKE 'GC%')")
sql = SQLLoader.load_with_params(
"query_tool/lot_resolve_work_order",
WORK_ORDER_FILTER=builder.get_conditions_sql(),
@@ -323,35 +573,21 @@ def _resolve_by_work_order(work_orders: List[str]) -> Dict[str, Any]:
df = read_sql_df(sql, builder.params)
data = _df_to_records(df)
# Group by work order
wo_to_containers = {}
for r in data:
wo = r['MFGORDERNAME']
if wo not in wo_to_containers:
wo_to_containers[wo] = []
wo_to_containers[wo].append({
'container_id': r['CONTAINERID'],
'lot_id': r.get('CONTAINERNAME'),
'spec_name': r.get('SPECNAME'),
})
matched, not_found, expansion_info = _match_rows_by_tokens(
work_orders,
data,
row_key='MFGORDERNAME',
case_insensitive=True,
)
results = []
not_found = []
expansion_info = {}
for wo in work_orders:
if wo in wo_to_containers:
expansion_info[wo] = len(wo_to_containers[wo])
for item in wo_to_containers[wo]:
for row in matched:
results.append({
'container_id': item['container_id'],
'lot_id': item['lot_id'],
'input_value': wo,
'spec_name': item.get('spec_name'),
'container_id': row.get('CONTAINERID'),
'lot_id': row.get('CONTAINERNAME'),
'input_value': row.get('input_value'),
'spec_name': row.get('SPECNAME'),
})
else:
not_found.append(wo)
logger.info(f"Work order resolution: {len(results)} containers from {len(work_orders)} orders")
@@ -364,6 +600,51 @@ def _resolve_by_work_order(work_orders: List[str]) -> Dict[str, Any]:
}
def _resolve_by_gd_work_order(work_orders: List[str]) -> Dict[str, Any]:
"""Resolve GD work orders to CONTAINERID."""
invalid = [value for value in work_orders if not _is_gd_like(_literal_prefix_before_wildcard(value))]
if invalid:
return {'error': f'GD 工單格式錯誤: {", ".join(invalid)}'}
builder = QueryBuilder()
_add_exact_or_pattern_condition(builder, "MFGORDERNAME", work_orders, case_insensitive=True)
builder.add_condition("UPPER(NVL(MFGORDERNAME, '')) LIKE 'GD%'")
sql = SQLLoader.load_with_params(
"query_tool/lot_resolve_work_order",
WORK_ORDER_FILTER=builder.get_conditions_sql(),
)
df = read_sql_df(sql, builder.params)
data = _df_to_records(df)
matched, not_found, expansion_info = _match_rows_by_tokens(
work_orders,
data,
row_key='MFGORDERNAME',
case_insensitive=True,
)
results = []
for row in matched:
cid = row.get('CONTAINERID')
if not cid:
continue
results.append({
'container_id': cid,
'lot_id': row.get('CONTAINERNAME'),
'input_value': row.get('input_value'),
'spec_name': row.get('SPECNAME'),
})
logger.info(f"GD work order resolution: {len(results)} containers from {len(work_orders)} orders")
return {
'data': results,
'total': len(results),
'input_count': len(work_orders),
'not_found': not_found,
'expansion_info': expansion_info,
}
# ============================================================
# LOT History Functions
# ============================================================
@@ -584,6 +865,11 @@ def get_lot_associations_batch(
for cid in container_ids:
rows.extend(events_by_cid.get(cid, []))
# Keep timeline grouping consistent with history rows.
# Especially for materials, workcenter names like "焊_DB_料" need to map
# to the same WORKCENTER_GROUP used by LOT history tracks.
_enrich_workcenter_group(rows)
data = _df_to_records(pd.DataFrame(rows))
logger.debug(
@@ -620,7 +906,9 @@ def get_lot_materials(container_id: str) -> Dict[str, Any]:
try:
events_by_cid = EventFetcher.fetch_events([container_id], "materials")
data = _df_to_records(pd.DataFrame(events_by_cid.get(container_id, [])))
rows = list(events_by_cid.get(container_id, []))
_enrich_workcenter_group(rows)
data = _df_to_records(pd.DataFrame(rows))
logger.debug(f"LOT materials: {len(data)} records for {container_id}")

View File

@@ -0,0 +1,16 @@
-- Unified LineageEngine - Container Snapshot
-- Fetches key container attributes for semantic lineage classification.
--
-- Parameters:
-- CID_FILTER - QueryBuilder-generated condition on c.CONTAINERID
--
SELECT
c.CONTAINERID,
c.CONTAINERNAME,
c.MFGORDERNAME,
c.OBJECTTYPE,
c.FIRSTNAME,
c.ORIGINALCONTAINERID,
c.SPLITFROMID
FROM DWH.DW_MES_CONTAINER c
WHERE {{ CID_FILTER }}

View File

@@ -0,0 +1,12 @@
-- Unified LineageEngine - LOT IDs by Container Name
-- Resolves container IDs by LOT names for wafer-origin joins.
--
-- Parameters:
-- NAME_FILTER - QueryBuilder-generated condition on c.CONTAINERNAME
--
SELECT
c.CONTAINERID,
c.CONTAINERNAME
FROM DWH.DW_MES_CONTAINER c
WHERE c.OBJECTTYPE = 'LOT'
AND {{ NAME_FILTER }}

View File

@@ -14,6 +14,7 @@ SELECT
MATERIALLOTNAME,
QTYCONSUMED,
WORKCENTERNAME,
SPECNAME,
EQUIPMENTNAME,
TXNDATE
FROM DWH.DW_MES_LOTMATERIALSHISTORY

View File

@@ -0,0 +1,15 @@
-- Wafer LOT (FIRSTNAME) to CONTAINERID Resolution
-- Expands wafer lot values to matching LOT containers.
--
-- Parameters:
-- WAFER_FILTER - QueryBuilder filter on FIRSTNAME + object constraints
--
SELECT
CONTAINERID,
CONTAINERNAME,
MFGORDERNAME,
SPECNAME,
QTY,
FIRSTNAME
FROM DWH.DW_MES_CONTAINER
WHERE {{ WAFER_FILTER }}

View File

@@ -242,3 +242,70 @@ def test_split_ancestors_matches_legacy_bfs_for_five_known_lots(mock_read_sql_df
assert connect_by_result["child_to_parent"] == legacy_child_to_parent
assert connect_by_result["cid_to_name"] == legacy_cid_to_name
@patch("mes_dashboard.services.lineage_engine.LineageEngine._build_semantic_links")
@patch("mes_dashboard.services.lineage_engine.LineageEngine._resolve_container_snapshot")
@patch("mes_dashboard.services.lineage_engine.LineageEngine.resolve_merge_sources")
@patch("mes_dashboard.services.lineage_engine.LineageEngine.resolve_split_ancestors")
def test_resolve_full_genealogy_includes_semantic_edges(
mock_resolve_split_ancestors,
mock_resolve_merge_sources,
mock_resolve_container_snapshot,
mock_build_semantic_links,
):
mock_resolve_split_ancestors.return_value = {
"child_to_parent": {"GD-LOT": "SRC-LOT"},
"cid_to_name": {
"GD-LOT": "GD25060502-A11",
"SRC-LOT": "56014S00T-5K07R",
},
}
mock_resolve_merge_sources.return_value = {}
snapshots = {
"GD-LOT": {
"CONTAINERID": "GD-LOT",
"CONTAINERNAME": "GD25060502-A11",
"MFGORDERNAME": "GD25060502",
"OBJECTTYPE": "LOT",
"FIRSTNAME": "56014S00T-5K07R",
"ORIGINALCONTAINERID": "SRC-LOT",
"SPLITFROMID": "SRC-LOT",
},
"SRC-LOT": {
"CONTAINERID": "SRC-LOT",
"CONTAINERNAME": "56014S00T-5K07R",
"MFGORDERNAME": None,
"OBJECTTYPE": "LOT",
"FIRSTNAME": "56014S00T-5K07R",
"ORIGINALCONTAINERID": None,
"SPLITFROMID": None,
},
"WAFER-LOT": {
"CONTAINERID": "WAFER-LOT",
"CONTAINERNAME": "56014S00T-5K07R",
"MFGORDERNAME": None,
"OBJECTTYPE": "LOT",
"FIRSTNAME": "56014S00T-5K07R",
"ORIGINALCONTAINERID": None,
"SPLITFROMID": None,
},
}
mock_resolve_container_snapshot.return_value = snapshots
mock_build_semantic_links.return_value = (
snapshots,
[
("WAFER-LOT", "GD-LOT", "wafer_origin"),
("SRC-LOT", "GD-LOT", "gd_rework_source"),
],
{"WAFER-LOT"},
)
result = LineageEngine.resolve_full_genealogy(["GD-LOT"], {"GD-LOT": "GD25060502-A11"})
assert "GD-LOT" in result["parent_map"]
assert "SRC-LOT" in result["parent_map"]["GD-LOT"]
assert "WAFER-LOT" in result["parent_map"]["GD-LOT"]
edge_types = {edge["edge_type"] for edge in result["edges"]}
assert "wafer_origin" in edge_types
assert "gd_rework_source" in edge_types

View File

@@ -139,6 +139,34 @@ class TestResolveEndpoint:
assert data['total'] == 1
assert data['data'][0]['lot_id'] == 'GA23100020-A00-001'
@patch('mes_dashboard.routes.query_tool_routes.resolve_lots')
def test_resolve_supports_gd_lot_id(self, mock_resolve, client):
mock_resolve.return_value = {
'data': [
{
'container_id': '4881038000260b21',
'lot_id': 'GD25060502-A11',
'input_value': 'GD25060502-A11',
}
],
'total': 1,
'input_count': 1,
'not_found': [],
}
response = client.post(
'/api/query-tool/resolve',
json={
'input_type': 'gd_lot_id',
'values': ['GD25060502-A11'],
}
)
assert response.status_code == 200
payload = response.get_json()
assert payload['total'] == 1
assert payload['data'][0]['lot_id'] == 'GD25060502-A11'
@patch('mes_dashboard.routes.query_tool_routes.resolve_lots')
def test_resolve_not_found(self, mock_resolve, client):
"""Should return not_found list for missing LOT IDs."""

View File

@@ -13,6 +13,7 @@ from mes_dashboard.services.query_tool_service import (
validate_lot_input,
validate_equipment_input,
_resolve_by_lot_id,
_resolve_by_wafer_lot,
_resolve_by_serial_number,
_resolve_by_work_order,
get_lot_split_merge_history,
@@ -215,6 +216,70 @@ class TestResolveQueriesUseBindParams:
_, query_params = mock_read.call_args.args
assert query_params == {'p0': 'LOT-1'}
def test_resolve_by_lot_id_supports_wildcard_pattern(self):
from unittest.mock import patch
import pandas as pd
with patch('mes_dashboard.services.query_tool_service.SQLLoader.load_with_params') as mock_load:
with patch('mes_dashboard.services.query_tool_service.read_sql_df') as mock_read:
mock_load.return_value = "SELECT * FROM DUAL"
mock_read.return_value = pd.DataFrame([
{
'CONTAINERID': 'CID-1',
'CONTAINERNAME': 'GA25123401',
'SPECNAME': 'SPEC-1',
'QTY': 100,
},
{
'CONTAINERID': 'CID-2',
'CONTAINERNAME': 'GA24123401',
'SPECNAME': 'SPEC-2',
'QTY': 200,
},
])
result = _resolve_by_lot_id(['GA25%01'])
assert result['total'] == 1
assert result['data'][0]['lot_id'] == 'GA25123401'
assert result['data'][0]['input_value'] == 'GA25%01'
sql_params = mock_load.call_args.kwargs
assert "LIKE" in sql_params['CONTAINER_FILTER']
_, query_params = mock_read.call_args.args
assert query_params == {'p0': 'GA25%01'}
def test_resolve_by_wafer_lot_supports_wildcard_pattern(self):
from unittest.mock import patch
import pandas as pd
with patch('mes_dashboard.services.query_tool_service.SQLLoader.load_with_params') as mock_load:
with patch('mes_dashboard.services.query_tool_service.read_sql_df') as mock_read:
mock_load.return_value = "SELECT * FROM DUAL"
mock_read.return_value = pd.DataFrame([
{
'CONTAINERID': 'CID-1',
'CONTAINERNAME': 'GA25123401-A00-001',
'SPECNAME': 'SPEC-1',
'QTY': 100,
'FIRSTNAME': 'GMSN-1173#A',
},
{
'CONTAINERID': 'CID-2',
'CONTAINERNAME': 'GA25123402-A00-001',
'SPECNAME': 'SPEC-2',
'QTY': 100,
'FIRSTNAME': 'GMSN-9999#B',
},
])
result = _resolve_by_wafer_lot(['GMSN-1173%'])
assert result['total'] == 1
assert result['data'][0]['input_value'] == 'GMSN-1173%'
sql_params = mock_load.call_args.kwargs
assert "LIKE" in sql_params['WAFER_FILTER']
assert "OBJECTTYPE = 'LOT'" in sql_params['WAFER_FILTER']
def test_resolve_by_serial_number_uses_query_builder_params(self):
from unittest.mock import patch
import pandas as pd
@@ -263,6 +328,39 @@ class TestResolveQueriesUseBindParams:
_, query_params = mock_read.call_args.args
assert query_params == {'p0': 'WO-1'}
def test_resolve_by_work_order_supports_wildcard_pattern(self):
from unittest.mock import patch
import pandas as pd
with patch('mes_dashboard.services.query_tool_service.SQLLoader.load_with_params') as mock_load:
with patch('mes_dashboard.services.query_tool_service.read_sql_df') as mock_read:
mock_load.return_value = "SELECT * FROM DUAL"
mock_read.return_value = pd.DataFrame([
{
'CONTAINERID': 'CID-1',
'MFGORDERNAME': 'GA25120018',
'CONTAINERNAME': 'GA25120018-A00-001',
'SPECNAME': 'SPEC-1',
},
{
'CONTAINERID': 'CID-2',
'MFGORDERNAME': 'GA24120018',
'CONTAINERNAME': 'GA24120018-A00-001',
'SPECNAME': 'SPEC-2',
},
])
result = _resolve_by_work_order(['ga25%'])
assert result['total'] == 1
assert result['data'][0]['input_value'] == 'ga25%'
assert result['data'][0]['lot_id'] == 'GA25120018-A00-001'
sql_params = mock_load.call_args.kwargs
assert "LIKE" in sql_params['WORK_ORDER_FILTER']
assert "UPPER(NVL(MFGORDERNAME, ''))" in sql_params['WORK_ORDER_FILTER']
_, query_params = mock_read.call_args.args
assert query_params == {'p0': 'GA25%'}
class TestSplitMergeHistoryMode:
"""Fast mode should use read_sql_df, full mode should use read_sql_df_slow."""

View File

@@ -9,6 +9,7 @@ import mes_dashboard.core.database as db
from mes_dashboard.app import create_app
from mes_dashboard.core.cache import NoOpCache
from mes_dashboard.core.rate_limit import reset_rate_limits_for_tests
from mes_dashboard.routes.trace_routes import _lineage_cache_key
def _client():
@@ -27,6 +28,14 @@ def teardown_function():
reset_rate_limits_for_tests()
def test_lineage_cache_key_is_profile_aware():
key_forward = _lineage_cache_key("query_tool", ["CID-001", "CID-002"])
key_reverse = _lineage_cache_key("query_tool_reverse", ["CID-001", "CID-002"])
assert key_forward != key_reverse
assert key_forward.startswith("trace:lineage:query_tool:")
assert key_reverse.startswith("trace:lineage:query_tool_reverse:")
@patch('mes_dashboard.routes.trace_routes.resolve_lots')
def test_seed_resolve_query_tool_success(mock_resolve_lots):
mock_resolve_lots.return_value = {
@@ -91,6 +100,53 @@ def test_seed_resolve_query_tool_reverse_success(mock_resolve_lots):
assert payload['cache_key'].startswith('trace:seed:query_tool_reverse:')
@patch('mes_dashboard.routes.trace_routes.resolve_lots')
def test_seed_resolve_query_tool_reverse_gd_lot_id_success(mock_resolve_lots):
mock_resolve_lots.return_value = {
'data': [
{
'container_id': 'CID-GD',
'lot_id': 'GD25060502-A11',
}
]
}
client = _client()
response = client.post(
'/api/trace/seed-resolve',
json={
'profile': 'query_tool_reverse',
'params': {
'resolve_type': 'gd_lot_id',
'values': ['GD25060502-A11'],
},
},
)
assert response.status_code == 200
payload = response.get_json()
assert payload['seed_count'] == 1
assert payload['seeds'][0]['container_name'] == 'GD25060502-A11'
def test_seed_resolve_query_tool_rejects_reverse_only_type():
client = _client()
response = client.post(
'/api/trace/seed-resolve',
json={
'profile': 'query_tool',
'params': {
'resolve_type': 'serial_number',
'values': ['SN-001'],
},
},
)
assert response.status_code == 400
payload = response.get_json()
assert payload['error']['code'] == 'INVALID_PARAMS'
def test_seed_resolve_invalid_profile_returns_400():
client = _client()
response = client.post(
@@ -165,6 +221,13 @@ def test_lineage_reverse_profile_returns_ancestors(mock_resolve_genealogy):
},
'parent_map': {'CID-SN': ['CID-A'], 'CID-A': ['CID-B']},
'merge_edges': {'CID-SN': ['CID-A']},
'nodes': {
'CID-SN': {'container_id': 'CID-SN', 'node_type': 'GD'},
'CID-A': {'container_id': 'CID-A', 'node_type': 'GA'},
},
'edges': [
{'from_cid': 'CID-A', 'to_cid': 'CID-SN', 'edge_type': 'gd_rework_source'},
],
}
client = _client()
@@ -184,6 +247,8 @@ def test_lineage_reverse_profile_returns_ancestors(mock_resolve_genealogy):
assert payload['parent_map']['CID-SN'] == ['CID-A']
assert payload['merge_edges']['CID-SN'] == ['CID-A']
assert payload['names']['CID-A'] == 'LOT-A'
assert payload['nodes']['CID-SN']['node_type'] == 'GD'
assert payload['edges'][0]['edge_type'] == 'gd_rework_source'
@patch(