feat: complete dashboard-vite parity and fix portal health/csp regressions
This commit is contained in:
23
.env.example
23
.env.example
@@ -103,6 +103,17 @@ RESOURCE_CACHE_ENABLED=true
|
||||
# The cache will check for updates at this interval using MAX(LASTCHANGEDATE)
|
||||
RESOURCE_SYNC_INTERVAL=14400
|
||||
|
||||
# Resource index version check interval in seconds (default: 5)
|
||||
RESOURCE_INDEX_VERSION_CHECK_INTERVAL=5
|
||||
|
||||
# Realtime equipment status cache toggle and sync interval
|
||||
REALTIME_EQUIPMENT_CACHE_ENABLED=true
|
||||
EQUIPMENT_STATUS_SYNC_INTERVAL=300
|
||||
|
||||
# Filter cache SQL view overrides
|
||||
FILTER_CACHE_WIP_VIEW=DWH.DW_MES_LOT_V
|
||||
FILTER_CACHE_SPEC_WORKCENTER_VIEW=DWH.DW_MES_SPEC_WORKCENTER_V
|
||||
|
||||
# ============================================================
|
||||
# Circuit Breaker Configuration
|
||||
# ============================================================
|
||||
@@ -128,6 +139,9 @@ CIRCUIT_BREAKER_WINDOW_SIZE=10
|
||||
# Note: Real-time Oracle views may take 2-5s per query, set threshold accordingly
|
||||
SLOW_QUERY_THRESHOLD=5.0
|
||||
|
||||
# In-memory query metrics sliding window size
|
||||
METRICS_WINDOW_SIZE=1000
|
||||
|
||||
# ============================================================
|
||||
# SQLite Log Store Configuration
|
||||
# ============================================================
|
||||
@@ -164,6 +178,12 @@ WATCHDOG_RESTART_HISTORY_MAX=50
|
||||
# Cooldown period between restart requests in seconds (default: 60)
|
||||
WORKER_RESTART_COOLDOWN=60
|
||||
|
||||
# Watchdog loop check interval in seconds
|
||||
WATCHDOG_CHECK_INTERVAL=5
|
||||
|
||||
# Runtime contract strict validation toggle
|
||||
RUNTIME_CONTRACT_ENFORCE=false
|
||||
|
||||
# ============================================================
|
||||
# Runtime Resilience Diagnostics Thresholds
|
||||
# ============================================================
|
||||
@@ -185,3 +205,6 @@ RESILIENCE_RESTART_CHURN_THRESHOLD=3
|
||||
# Example: https://example.com,https://app.example.com
|
||||
# Set to * for development (not recommended for production)
|
||||
CORS_ALLOWED_ORIGINS=
|
||||
|
||||
# Health endpoint memo cache TTL in seconds
|
||||
HEALTH_MEMO_TTL_SECONDS=5
|
||||
|
||||
@@ -44,6 +44,16 @@
|
||||
"route": "/job-query",
|
||||
"name": "設備維修查詢",
|
||||
"status": "released"
|
||||
},
|
||||
{
|
||||
"route": "/query-tool",
|
||||
"name": "批次追蹤工具",
|
||||
"status": "released"
|
||||
},
|
||||
{
|
||||
"route": "/tmtt-defect",
|
||||
"name": "TMTT印字腳型不良分析",
|
||||
"status": "dev"
|
||||
}
|
||||
],
|
||||
"api_public": true,
|
||||
|
||||
26
docs/env_sync_report.md
Normal file
26
docs/env_sync_report.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# Environment Sync Report
|
||||
|
||||
- Source: `/home/egg/Project/DashBoard/.env`
|
||||
- Target: `/home/egg/Project/DashBoard_vite/.env`
|
||||
- Example Baseline: `.env.example`
|
||||
|
||||
- Keys in source env: 39
|
||||
- Keys in vite .env.example: 54
|
||||
- Missing keys auto-added to target: 15
|
||||
|
||||
## Auto-Added Keys
|
||||
- `DB_POOL_TIMEOUT`
|
||||
- `DB_POOL_RECYCLE`
|
||||
- `DB_TCP_CONNECT_TIMEOUT`
|
||||
- `DB_CONNECT_RETRY_COUNT`
|
||||
- `DB_CONNECT_RETRY_DELAY`
|
||||
- `DB_CALL_TIMEOUT_MS`
|
||||
- `WIP_CACHE_TTL_SECONDS`
|
||||
- `WATCHDOG_RUNTIME_DIR`
|
||||
- `WATCHDOG_PID_FILE`
|
||||
- `WATCHDOG_RESTART_HISTORY_MAX`
|
||||
- `RESILIENCE_DEGRADED_ALERT_SECONDS`
|
||||
- `RESILIENCE_POOL_SATURATION_WARNING`
|
||||
- `RESILIENCE_POOL_SATURATION_CRITICAL`
|
||||
- `RESILIENCE_RESTART_CHURN_WINDOW_SECONDS`
|
||||
- `RESILIENCE_RESTART_CHURN_THRESHOLD`
|
||||
9
docs/env_usage_gap_report.md
Normal file
9
docs/env_usage_gap_report.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Environment Usage Gap Report
|
||||
|
||||
- Parsed env keys used in code/tests: 37
|
||||
- Keys present in `.env`: 63
|
||||
- Missing keys: 2
|
||||
|
||||
## Missing Keys
|
||||
- `CONDA_DEFAULT_ENV`
|
||||
- `PYTEST_CURRENT_TEST`
|
||||
@@ -26,6 +26,7 @@ Result:
|
||||
- PASS
|
||||
- `routes 71`
|
||||
- Redis/Oracle warnings observed in this local environment; app factory and route registration still completed.
|
||||
- Note: current tree includes additional routes (query-tool / tmtt-defect / hardening paths), so fresh smoke now reports `routes 83`.
|
||||
|
||||
## Focused Test Gate (root project)
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
|
||||
### App import smoke
|
||||
- `PYTHONPATH=src python -c "from mes_dashboard.app import create_app; app=create_app('testing'); print(app.url_map)"`
|
||||
- Verified route initialization count (`routes 71`) in root-only execution context.
|
||||
- Verified route initialization count (`routes 83`) in root-only execution context.
|
||||
|
||||
### HTTP smoke (Flask test client)
|
||||
- Verify page renders and module asset tags resolve/fallback:
|
||||
|
||||
@@ -177,9 +177,8 @@ import './portal.css';
|
||||
|
||||
window.openTool = openTool;
|
||||
window.toggleHealthPopup = toggleHealthPopup;
|
||||
if (healthStatus) {
|
||||
healthStatus.addEventListener('click', toggleHealthPopup);
|
||||
}
|
||||
// Click handler is wired via inline onclick in template for fallback compatibility.
|
||||
// Avoid duplicate binding here, otherwise a single click toggles twice.
|
||||
document.addEventListener('click', (e) => {
|
||||
if (!e.target.closest('#healthStatus') && !e.target.closest('#healthPopup') && healthPopup) {
|
||||
healthPopup.classList.remove('show');
|
||||
|
||||
3139
frontend/src/query-tool/main.js
Normal file
3139
frontend/src/query-tool/main.js
Normal file
File diff suppressed because it is too large
Load Diff
363
frontend/src/tmtt-defect/main.js
Normal file
363
frontend/src/tmtt-defect/main.js
Normal file
@@ -0,0 +1,363 @@
|
||||
import { ensureMesApiAvailable } from '../core/api.js';
|
||||
|
||||
ensureMesApiAvailable();
|
||||
|
||||
(function() {
|
||||
// ============================================================
|
||||
// State
|
||||
// ============================================================
|
||||
let analysisData = null;
|
||||
let activeFilter = null; // { dimension: 'by_workflow', field: 'WORKFLOW', value: 'xxx' }
|
||||
let sortState = { column: null, asc: true };
|
||||
const charts = {};
|
||||
|
||||
const CHART_CONFIG = [
|
||||
{ id: 'chartWorkflow', key: 'by_workflow', field: 'WORKFLOW', title: 'WORKFLOW' },
|
||||
{ id: 'chartPackage', key: 'by_package', field: 'PRODUCTLINENAME', title: 'PACKAGE' },
|
||||
{ id: 'chartType', key: 'by_type', field: 'PJ_TYPE', title: 'TYPE' },
|
||||
{ id: 'chartTmtt', key: 'by_tmtt_machine', field: 'TMTT_EQUIPMENTNAME', title: 'TMTT機台' },
|
||||
{ id: 'chartMold', key: 'by_mold_machine', field: 'MOLD_EQUIPMENTNAME', title: 'MOLD機台' },
|
||||
];
|
||||
|
||||
// ============================================================
|
||||
// Query
|
||||
// ============================================================
|
||||
window.executeQuery = async function() {
|
||||
const startDate = document.getElementById('startDate').value;
|
||||
const endDate = document.getElementById('endDate').value;
|
||||
|
||||
if (!startDate || !endDate) {
|
||||
Toast.warning('請選擇起始和結束日期');
|
||||
return;
|
||||
}
|
||||
|
||||
const btn = document.getElementById('btnQuery');
|
||||
btn.disabled = true;
|
||||
const loadingId = Toast.loading('查詢中...');
|
||||
|
||||
try {
|
||||
const result = await MesApi.get('/api/tmtt-defect/analysis', {
|
||||
params: { start_date: startDate, end_date: endDate },
|
||||
timeout: 120000,
|
||||
});
|
||||
|
||||
Toast.dismiss(loadingId);
|
||||
|
||||
if (!result || !result.success) {
|
||||
Toast.error(result?.error || '查詢失敗');
|
||||
return;
|
||||
}
|
||||
|
||||
analysisData = result.data;
|
||||
activeFilter = null;
|
||||
sortState = { column: null, asc: true };
|
||||
|
||||
renderAll();
|
||||
Toast.success('查詢完成');
|
||||
} catch (err) {
|
||||
Toast.dismiss(loadingId);
|
||||
Toast.error('查詢失敗: ' + (err.message || '未知錯誤'));
|
||||
} finally {
|
||||
btn.disabled = false;
|
||||
}
|
||||
};
|
||||
|
||||
// ============================================================
|
||||
// Render
|
||||
// ============================================================
|
||||
function renderAll() {
|
||||
if (!analysisData) return;
|
||||
|
||||
document.getElementById('emptyState').style.display = 'none';
|
||||
document.getElementById('kpiRow').style.display = '';
|
||||
document.getElementById('chartGrid').style.display = '';
|
||||
document.getElementById('detailSection').style.display = '';
|
||||
|
||||
renderKpi(analysisData.kpi);
|
||||
renderCharts(analysisData.charts);
|
||||
renderDailyTrend(analysisData.daily_trend || []);
|
||||
renderDetailTable();
|
||||
}
|
||||
|
||||
function renderKpi(kpi) {
|
||||
document.getElementById('kpiInput').textContent = kpi.total_input.toLocaleString('zh-TW');
|
||||
document.getElementById('kpiLots').textContent = kpi.lot_count.toLocaleString('zh-TW');
|
||||
document.getElementById('kpiPrintQty').textContent = kpi.print_defect_qty.toLocaleString('zh-TW');
|
||||
document.getElementById('kpiPrintRate').innerHTML = kpi.print_defect_rate.toFixed(4) + '<span class="kpi-unit">%</span>';
|
||||
document.getElementById('kpiLeadQty').textContent = kpi.lead_defect_qty.toLocaleString('zh-TW');
|
||||
document.getElementById('kpiLeadRate').innerHTML = kpi.lead_defect_rate.toFixed(4) + '<span class="kpi-unit">%</span>';
|
||||
}
|
||||
|
||||
// ============================================================
|
||||
// Charts
|
||||
// ============================================================
|
||||
function renderCharts(chartsData) {
|
||||
CHART_CONFIG.forEach(cfg => {
|
||||
const data = chartsData[cfg.key] || [];
|
||||
renderParetoChart(cfg.id, data, cfg.key, cfg.field, cfg.title);
|
||||
});
|
||||
}
|
||||
|
||||
function renderParetoChart(containerId, data, chartKey, filterField, title) {
|
||||
if (!charts[containerId]) {
|
||||
charts[containerId] = echarts.init(document.getElementById(containerId));
|
||||
}
|
||||
const chart = charts[containerId];
|
||||
|
||||
if (!data || data.length === 0) {
|
||||
chart.setOption({
|
||||
title: { text: '無資料', left: 'center', top: 'center', textStyle: { color: '#999', fontSize: 14 } },
|
||||
xAxis: { show: false }, yAxis: { show: false }, series: []
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const names = data.map(d => d.name);
|
||||
const printRates = data.map(d => d.print_defect_rate);
|
||||
const leadRates = data.map(d => d.lead_defect_rate);
|
||||
const cumPct = data.map(d => d.cumulative_pct);
|
||||
|
||||
const option = {
|
||||
tooltip: {
|
||||
trigger: 'axis',
|
||||
axisPointer: { type: 'shadow' },
|
||||
formatter: function(params) {
|
||||
const name = params[0].name;
|
||||
const item = data.find(d => d.name === name);
|
||||
if (!item) return name;
|
||||
return `<b>${name}</b><br/>` +
|
||||
`投入數: ${item.input_qty.toLocaleString()}<br/>` +
|
||||
`<span style="color:${getComputedStyle(document.documentElement).getPropertyValue('--print-color')}">●</span> 印字不良: ${item.print_defect_qty} (${item.print_defect_rate.toFixed(4)}%)<br/>` +
|
||||
`<span style="color:${getComputedStyle(document.documentElement).getPropertyValue('--lead-color')}">●</span> 腳型不良: ${item.lead_defect_qty} (${item.lead_defect_rate.toFixed(4)}%)<br/>` +
|
||||
`累積: ${item.cumulative_pct.toFixed(1)}%`;
|
||||
}
|
||||
},
|
||||
legend: { data: ['印字不良率', '腳型不良率', '累積%'], bottom: 0, textStyle: { fontSize: 11 } },
|
||||
grid: { left: 60, right: 60, top: 30, bottom: names.length > 8 ? 100 : 60 },
|
||||
xAxis: {
|
||||
type: 'category', data: names,
|
||||
axisLabel: {
|
||||
rotate: names.length > 8 ? 35 : 0,
|
||||
fontSize: 11,
|
||||
interval: 0,
|
||||
formatter: v => v.length > 16 ? v.slice(0, 16) + '...' : v
|
||||
}
|
||||
},
|
||||
yAxis: [
|
||||
{ type: 'value', name: '不良率(%)', axisLabel: { fontSize: 10 }, splitLine: { lineStyle: { type: 'dashed' } } },
|
||||
{ type: 'value', name: '累積%', max: 100, axisLabel: { fontSize: 10 } }
|
||||
],
|
||||
series: [
|
||||
{
|
||||
name: '印字不良率', type: 'bar', stack: 'defect',
|
||||
data: printRates,
|
||||
itemStyle: { color: '#ef4444' },
|
||||
barMaxWidth: 40,
|
||||
},
|
||||
{
|
||||
name: '腳型不良率', type: 'bar', stack: 'defect',
|
||||
data: leadRates,
|
||||
itemStyle: { color: '#f59e0b' },
|
||||
barMaxWidth: 40,
|
||||
},
|
||||
{
|
||||
name: '累積%', type: 'line', yAxisIndex: 1,
|
||||
data: cumPct,
|
||||
itemStyle: { color: '#6366f1' },
|
||||
lineStyle: { width: 2 },
|
||||
symbol: 'circle', symbolSize: 6,
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
chart.setOption(option, true);
|
||||
|
||||
// Drill-down click handler
|
||||
chart.off('click');
|
||||
chart.on('click', function(params) {
|
||||
if (params.componentType === 'series' && params.name) {
|
||||
setFilter(chartKey, filterField, params.name);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// ============================================================
|
||||
// Daily Trend Charts
|
||||
// ============================================================
|
||||
function renderDailyTrend(trendData) {
|
||||
renderTrendChart('chartPrintTrend', trendData, 'print_defect_rate', '印字不良率', '#ef4444');
|
||||
renderTrendChart('chartLeadTrend', trendData, 'lead_defect_rate', '腳型不良率', '#f59e0b');
|
||||
}
|
||||
|
||||
function renderTrendChart(containerId, data, rateKey, label, color) {
|
||||
if (!charts[containerId]) {
|
||||
charts[containerId] = echarts.init(document.getElementById(containerId));
|
||||
}
|
||||
const chart = charts[containerId];
|
||||
|
||||
if (!data || data.length === 0) {
|
||||
chart.setOption({
|
||||
title: { text: '無資料', left: 'center', top: 'center', textStyle: { color: '#999', fontSize: 14 } },
|
||||
xAxis: { show: false }, yAxis: { show: false }, series: []
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const dates = data.map(d => d.date);
|
||||
const rates = data.map(d => d[rateKey]);
|
||||
const qtys = data.map(d => d[rateKey === 'print_defect_rate' ? 'print_defect_qty' : 'lead_defect_qty']);
|
||||
const inputs = data.map(d => d.input_qty);
|
||||
|
||||
const option = {
|
||||
tooltip: {
|
||||
trigger: 'axis',
|
||||
formatter: function(params) {
|
||||
const idx = params[0].dataIndex;
|
||||
const d = data[idx];
|
||||
return `<b>${d.date}</b><br/>` +
|
||||
`投入數: ${d.input_qty.toLocaleString()}<br/>` +
|
||||
`<span style="color:${color}">●</span> ${label}: ${d[rateKey].toFixed(4)}%<br/>` +
|
||||
`不良數: ${qtys[idx].toLocaleString()}`;
|
||||
}
|
||||
},
|
||||
legend: { data: [label, '投入數'], bottom: 0, textStyle: { fontSize: 11 } },
|
||||
grid: { left: 60, right: 60, top: 30, bottom: 50 },
|
||||
xAxis: {
|
||||
type: 'category', data: dates,
|
||||
axisLabel: { fontSize: 11, rotate: dates.length > 15 ? 35 : 0 }
|
||||
},
|
||||
yAxis: [
|
||||
{ type: 'value', name: '不良率(%)', axisLabel: { fontSize: 10 }, splitLine: { lineStyle: { type: 'dashed' } } },
|
||||
{ type: 'value', name: '投入數', axisLabel: { fontSize: 10 } }
|
||||
],
|
||||
series: [
|
||||
{
|
||||
name: label, type: 'line', data: rates,
|
||||
itemStyle: { color: color },
|
||||
lineStyle: { width: 2 },
|
||||
symbol: 'circle', symbolSize: 4,
|
||||
areaStyle: { color: { type: 'linear', x: 0, y: 0, x2: 0, y2: 1, colorStops: [{ offset: 0, color: color + '33' }, { offset: 1, color: color + '05' }] } },
|
||||
},
|
||||
{
|
||||
name: '投入數', type: 'bar', yAxisIndex: 1,
|
||||
data: inputs,
|
||||
itemStyle: { color: '#e0e7ff' },
|
||||
barMaxWidth: 20,
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
chart.setOption(option, true);
|
||||
}
|
||||
|
||||
// ============================================================
|
||||
// Filter / Drill-down
|
||||
// ============================================================
|
||||
function setFilter(chartKey, field, value) {
|
||||
activeFilter = { dimension: chartKey, field: field, value: value };
|
||||
renderDetailTable();
|
||||
}
|
||||
|
||||
window.clearFilter = function() {
|
||||
activeFilter = null;
|
||||
renderDetailTable();
|
||||
};
|
||||
|
||||
// ============================================================
|
||||
// Detail Table
|
||||
// ============================================================
|
||||
function renderDetailTable() {
|
||||
if (!analysisData) return;
|
||||
|
||||
let rows = analysisData.detail;
|
||||
|
||||
// Apply filter
|
||||
const filterTag = document.getElementById('filterTag');
|
||||
const btnClear = document.getElementById('btnClear');
|
||||
|
||||
if (activeFilter) {
|
||||
rows = rows.filter(r => (r[activeFilter.field] || '') === activeFilter.value);
|
||||
document.getElementById('filterLabel').textContent =
|
||||
`${activeFilter.field}: ${activeFilter.value}`;
|
||||
filterTag.style.display = '';
|
||||
btnClear.style.display = '';
|
||||
} else {
|
||||
filterTag.style.display = 'none';
|
||||
btnClear.style.display = 'none';
|
||||
}
|
||||
|
||||
// Apply sort
|
||||
if (sortState.column) {
|
||||
const col = sortState.column;
|
||||
const asc = sortState.asc;
|
||||
rows = [...rows].sort((a, b) => {
|
||||
const va = a[col] ?? '';
|
||||
const vb = b[col] ?? '';
|
||||
if (typeof va === 'number' && typeof vb === 'number') {
|
||||
return asc ? va - vb : vb - va;
|
||||
}
|
||||
return asc ? String(va).localeCompare(String(vb)) : String(vb).localeCompare(String(va));
|
||||
});
|
||||
}
|
||||
|
||||
document.getElementById('detailCount').textContent = `(${rows.length} 筆)`;
|
||||
|
||||
const tbody = document.getElementById('detailBody');
|
||||
if (rows.length === 0) {
|
||||
tbody.innerHTML = '<tr><td colspan="12" style="text-align:center;padding:20px;color:#999;">無資料</td></tr>';
|
||||
return;
|
||||
}
|
||||
|
||||
tbody.innerHTML = rows.map(r => `<tr>
|
||||
<td>${r.CONTAINERNAME || ''}</td>
|
||||
<td>${r.PJ_TYPE || ''}</td>
|
||||
<td>${r.PRODUCTLINENAME || ''}</td>
|
||||
<td>${r.WORKFLOW || ''}</td>
|
||||
<td>${r.FINISHEDRUNCARD || ''}</td>
|
||||
<td>${r.TMTT_EQUIPMENTNAME || ''}</td>
|
||||
<td>${r.MOLD_EQUIPMENTNAME || ''}</td>
|
||||
<td style="text-align:right">${(r.INPUT_QTY || 0).toLocaleString()}</td>
|
||||
<td style="text-align:right;color:var(--print-color)">${r.PRINT_DEFECT_QTY || 0}</td>
|
||||
<td style="text-align:right;color:var(--print-color)">${(r.PRINT_DEFECT_RATE || 0).toFixed(4)}</td>
|
||||
<td style="text-align:right;color:var(--lead-color)">${r.LEAD_DEFECT_QTY || 0}</td>
|
||||
<td style="text-align:right;color:var(--lead-color)">${(r.LEAD_DEFECT_RATE || 0).toFixed(4)}</td>
|
||||
</tr>`).join('');
|
||||
|
||||
// Update sort indicators
|
||||
document.querySelectorAll('.sort-indicator').forEach(el => el.textContent = '');
|
||||
if (sortState.column) {
|
||||
const ind = document.getElementById('sort_' + sortState.column);
|
||||
if (ind) ind.textContent = sortState.asc ? '▲' : '▼';
|
||||
}
|
||||
}
|
||||
|
||||
window.sortTable = function(column) {
|
||||
if (sortState.column === column) {
|
||||
sortState.asc = !sortState.asc;
|
||||
} else {
|
||||
sortState.column = column;
|
||||
sortState.asc = true;
|
||||
}
|
||||
renderDetailTable();
|
||||
};
|
||||
|
||||
// ============================================================
|
||||
// CSV Export
|
||||
// ============================================================
|
||||
window.exportCsv = function() {
|
||||
const startDate = document.getElementById('startDate').value;
|
||||
const endDate = document.getElementById('endDate').value;
|
||||
if (!startDate || !endDate) {
|
||||
Toast.warning('請先查詢資料');
|
||||
return;
|
||||
}
|
||||
window.open(`/api/tmtt-defect/export?start_date=${startDate}&end_date=${endDate}`, '_blank');
|
||||
};
|
||||
|
||||
// ============================================================
|
||||
// Resize
|
||||
// ============================================================
|
||||
window.addEventListener('resize', function() {
|
||||
Object.values(charts).forEach(c => c.resize());
|
||||
});
|
||||
})();
|
||||
@@ -17,7 +17,9 @@ export default defineConfig(({ mode }) => ({
|
||||
'resource-history': resolve(__dirname, 'src/resource-history/main.js'),
|
||||
'job-query': resolve(__dirname, 'src/job-query/main.js'),
|
||||
'excel-query': resolve(__dirname, 'src/excel-query/main.js'),
|
||||
tables: resolve(__dirname, 'src/tables/main.js')
|
||||
tables: resolve(__dirname, 'src/tables/main.js'),
|
||||
'query-tool': resolve(__dirname, 'src/query-tool/main.js'),
|
||||
'tmtt-defect': resolve(__dirname, 'src/tmtt-defect/main.js')
|
||||
},
|
||||
output: {
|
||||
entryFileNames: '[name].js',
|
||||
|
||||
@@ -104,11 +104,12 @@ def _build_security_headers(production: bool) -> dict[str, str]:
|
||||
"img-src 'self' data: blob:; "
|
||||
"font-src 'self' data:; "
|
||||
"connect-src 'self'; "
|
||||
"frame-ancestors 'none'; "
|
||||
# Portal embeds same-origin report pages via iframe.
|
||||
"frame-ancestors 'self'; "
|
||||
"base-uri 'self'; "
|
||||
"form-action 'self'"
|
||||
),
|
||||
"X-Frame-Options": "DENY",
|
||||
"X-Frame-Options": "SAMEORIGIN",
|
||||
"X-Content-Type-Options": "nosniff",
|
||||
"Referrer-Policy": "strict-origin-when-cross-origin",
|
||||
}
|
||||
@@ -233,11 +234,14 @@ def create_app(config_name: str | None = None) -> Flask:
|
||||
|
||||
# Initialize database teardown and pool
|
||||
init_db(app)
|
||||
running_pytest = bool(os.getenv("PYTEST_CURRENT_TEST"))
|
||||
is_testing_runtime = bool(app.config.get("TESTING")) or app.testing or running_pytest
|
||||
with app.app_context():
|
||||
get_engine()
|
||||
start_keepalive() # Keep database connections alive
|
||||
start_cache_updater() # Start Redis cache updater
|
||||
init_realtime_equipment_cache(app) # Start realtime equipment status cache
|
||||
if not is_testing_runtime:
|
||||
get_engine()
|
||||
start_keepalive() # Keep database connections alive
|
||||
start_cache_updater() # Start Redis cache updater
|
||||
init_realtime_equipment_cache(app) # Start realtime equipment status cache
|
||||
_register_shutdown_hooks(app)
|
||||
|
||||
# Register API routes
|
||||
@@ -343,13 +347,10 @@ def create_app(config_name: str | None = None) -> Flask:
|
||||
return admin
|
||||
|
||||
def frontend_asset(filename: str) -> str | None:
|
||||
"""Resolve built frontend asset from static/dist if available."""
|
||||
"""Resolve frontend asset path served from static/dist."""
|
||||
if not filename:
|
||||
return None
|
||||
dist_path = os.path.join(app.static_folder or "", "dist", filename)
|
||||
if os.path.exists(dist_path):
|
||||
return url_for("static", filename=f"dist/{filename}")
|
||||
return None
|
||||
return url_for("static", filename=f"dist/{filename}")
|
||||
|
||||
return {
|
||||
"is_admin": admin,
|
||||
@@ -368,6 +369,11 @@ def create_app(config_name: str | None = None) -> Flask:
|
||||
"""Portal home with tabs."""
|
||||
return render_template('portal.html')
|
||||
|
||||
@app.route('/favicon.ico')
|
||||
def favicon():
|
||||
"""Serve favicon without 404 noise."""
|
||||
return redirect(url_for('static', filename='favicon.svg'), code=302)
|
||||
|
||||
@app.route('/tables')
|
||||
def tables_page():
|
||||
"""Table viewer page."""
|
||||
@@ -398,6 +404,11 @@ def create_app(config_name: str | None = None) -> Flask:
|
||||
"""Resource history analysis page."""
|
||||
return render_template('resource_history.html')
|
||||
|
||||
@app.route('/tmtt-defect')
|
||||
def tmtt_defect_page():
|
||||
"""TMTT printing & lead form defect analysis page."""
|
||||
return render_template('tmtt_defect.html')
|
||||
|
||||
# ========================================================
|
||||
# Table Query APIs (for table_data_viewer)
|
||||
# ========================================================
|
||||
|
||||
@@ -6,6 +6,7 @@ Loads credentials from environment variables (.env file).
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from urllib.parse import quote_plus
|
||||
|
||||
@@ -13,9 +14,14 @@ from urllib.parse import quote_plus
|
||||
try:
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Find .env file in project root
|
||||
env_path = Path(__file__).resolve().parents[3] / '.env'
|
||||
load_dotenv(env_path)
|
||||
# Find .env file in project root.
|
||||
# Pytest runs should not auto-load runtime secrets/config from .env.
|
||||
running_pytest = bool(os.getenv("PYTEST_CURRENT_TEST")) or any(
|
||||
"pytest" in arg for arg in sys.argv
|
||||
)
|
||||
if not running_pytest:
|
||||
env_path = Path(__file__).resolve().parents[3] / '.env'
|
||||
load_dotenv(env_path)
|
||||
except ImportError:
|
||||
pass # python-dotenv not installed, rely on system environment variables
|
||||
|
||||
|
||||
@@ -13,6 +13,8 @@ from .auth_routes import auth_bp
|
||||
from .admin_routes import admin_bp
|
||||
from .resource_history_routes import resource_history_bp
|
||||
from .job_query_routes import job_query_bp
|
||||
from .query_tool_routes import query_tool_bp
|
||||
from .tmtt_defect_routes import tmtt_defect_bp
|
||||
|
||||
|
||||
def register_routes(app) -> None:
|
||||
@@ -24,6 +26,8 @@ def register_routes(app) -> None:
|
||||
app.register_blueprint(hold_bp)
|
||||
app.register_blueprint(resource_history_bp)
|
||||
app.register_blueprint(job_query_bp)
|
||||
app.register_blueprint(query_tool_bp)
|
||||
app.register_blueprint(tmtt_defect_bp)
|
||||
|
||||
__all__ = [
|
||||
'wip_bp',
|
||||
@@ -35,5 +39,7 @@ __all__ = [
|
||||
'admin_bp',
|
||||
'resource_history_bp',
|
||||
'job_query_bp',
|
||||
'query_tool_bp',
|
||||
'tmtt_defect_bp',
|
||||
'register_routes',
|
||||
]
|
||||
|
||||
509
src/mes_dashboard/routes/query_tool_routes.py
Normal file
509
src/mes_dashboard/routes/query_tool_routes.py
Normal file
@@ -0,0 +1,509 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Query Tool API routes.
|
||||
|
||||
Contains Flask Blueprint for batch tracing and equipment period query endpoints:
|
||||
- LOT resolution (LOT ID / Serial Number / Work Order → CONTAINERID)
|
||||
- LOT production history and adjacent lots
|
||||
- LOT associations (materials, rejects, holds, jobs)
|
||||
- Equipment period queries (status hours, lots, materials, rejects, jobs)
|
||||
- CSV export functionality
|
||||
"""
|
||||
|
||||
from flask import Blueprint, jsonify, request, Response, render_template
|
||||
|
||||
from mes_dashboard.services.query_tool_service import (
|
||||
resolve_lots,
|
||||
get_lot_history,
|
||||
get_adjacent_lots,
|
||||
get_lot_materials,
|
||||
get_lot_rejects,
|
||||
get_lot_holds,
|
||||
get_lot_splits,
|
||||
get_lot_jobs,
|
||||
get_equipment_status_hours,
|
||||
get_equipment_lots,
|
||||
get_equipment_materials,
|
||||
get_equipment_rejects,
|
||||
get_equipment_jobs,
|
||||
export_to_csv,
|
||||
generate_csv_stream,
|
||||
validate_date_range,
|
||||
validate_lot_input,
|
||||
validate_equipment_input,
|
||||
)
|
||||
|
||||
# Create Blueprint
|
||||
query_tool_bp = Blueprint('query_tool', __name__)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Page Route
|
||||
# ============================================================
|
||||
|
||||
@query_tool_bp.route('/query-tool')
|
||||
def query_tool_page():
|
||||
"""Render the query tool page."""
|
||||
return render_template('query_tool.html')
|
||||
|
||||
|
||||
# ============================================================
|
||||
# LOT Resolution API
|
||||
# ============================================================
|
||||
|
||||
@query_tool_bp.route('/api/query-tool/resolve', methods=['POST'])
|
||||
def resolve_lot_input():
|
||||
"""Resolve user input to CONTAINERID list.
|
||||
|
||||
Expects JSON body:
|
||||
{
|
||||
"input_type": "lot_id" | "serial_number" | "work_order",
|
||||
"values": ["value1", "value2", ...]
|
||||
}
|
||||
|
||||
Returns:
|
||||
{
|
||||
"data": [{"container_id": "...", "input_value": "..."}, ...],
|
||||
"total": 10,
|
||||
"input_count": 5,
|
||||
"not_found": ["value3"]
|
||||
}
|
||||
"""
|
||||
data = request.get_json()
|
||||
|
||||
if not data:
|
||||
return jsonify({'error': '請求內容不可為空'}), 400
|
||||
|
||||
input_type = data.get('input_type')
|
||||
values = data.get('values', [])
|
||||
|
||||
# Validate input type
|
||||
valid_types = ['lot_id', 'serial_number', 'work_order']
|
||||
if input_type not in valid_types:
|
||||
return jsonify({'error': f'不支援的查詢類型: {input_type}'}), 400
|
||||
|
||||
# Validate values
|
||||
validation_error = validate_lot_input(input_type, values)
|
||||
if validation_error:
|
||||
return jsonify({'error': validation_error}), 400
|
||||
|
||||
result = resolve_lots(input_type, values)
|
||||
|
||||
if 'error' in result:
|
||||
return jsonify(result), 400
|
||||
|
||||
return jsonify(result)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# LOT History API
|
||||
# ============================================================
|
||||
|
||||
@query_tool_bp.route('/api/query-tool/lot-history', methods=['GET'])
|
||||
def query_lot_history():
|
||||
"""Query production history for a LOT.
|
||||
|
||||
Query params:
|
||||
container_id: CONTAINERID (16-char hex)
|
||||
workcenter_groups: Optional comma-separated list of WORKCENTER_GROUP names
|
||||
|
||||
Returns production history records.
|
||||
"""
|
||||
container_id = request.args.get('container_id')
|
||||
workcenter_groups_param = request.args.get('workcenter_groups')
|
||||
|
||||
if not container_id:
|
||||
return jsonify({'error': '請指定 CONTAINERID'}), 400
|
||||
|
||||
# Parse workcenter_groups if provided
|
||||
workcenter_groups = None
|
||||
if workcenter_groups_param:
|
||||
workcenter_groups = [
|
||||
g.strip() for g in workcenter_groups_param.split(',') if g.strip()
|
||||
]
|
||||
|
||||
result = get_lot_history(container_id, workcenter_groups=workcenter_groups)
|
||||
|
||||
if 'error' in result:
|
||||
return jsonify(result), 400
|
||||
|
||||
return jsonify(result)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Adjacent Lots API
|
||||
# ============================================================
|
||||
|
||||
@query_tool_bp.route('/api/query-tool/adjacent-lots', methods=['GET'])
|
||||
def query_adjacent_lots():
|
||||
"""Query adjacent lots (前後批) for a specific equipment.
|
||||
|
||||
Finds lots before/after target on same equipment until different PJ_TYPE,
|
||||
with minimum 3 lots in each direction.
|
||||
|
||||
Query params:
|
||||
equipment_id: Equipment ID
|
||||
target_time: Target lot's TRACKINTIMESTAMP (ISO format)
|
||||
time_window: Time window in hours (optional, default 24)
|
||||
|
||||
Returns adjacent lots with relative position.
|
||||
"""
|
||||
equipment_id = request.args.get('equipment_id')
|
||||
target_time = request.args.get('target_time')
|
||||
time_window = request.args.get('time_window', 24, type=int)
|
||||
|
||||
if not all([equipment_id, target_time]):
|
||||
return jsonify({'error': '請指定設備和目標時間'}), 400
|
||||
|
||||
result = get_adjacent_lots(equipment_id, target_time, time_window)
|
||||
|
||||
if 'error' in result:
|
||||
return jsonify(result), 400
|
||||
|
||||
return jsonify(result)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# LOT Associations API
|
||||
# ============================================================
|
||||
|
||||
@query_tool_bp.route('/api/query-tool/lot-associations', methods=['GET'])
|
||||
def query_lot_associations():
|
||||
"""Query association data for a LOT.
|
||||
|
||||
Query params:
|
||||
container_id: CONTAINERID (16-char hex)
|
||||
type: Association type ('materials', 'rejects', 'holds', 'jobs')
|
||||
equipment_id: Equipment ID (required for 'jobs' type)
|
||||
time_start: Start time (required for 'jobs' type)
|
||||
time_end: End time (required for 'jobs' type)
|
||||
|
||||
Returns association records based on type.
|
||||
"""
|
||||
container_id = request.args.get('container_id')
|
||||
assoc_type = request.args.get('type')
|
||||
|
||||
if not container_id:
|
||||
return jsonify({'error': '請指定 CONTAINERID'}), 400
|
||||
|
||||
valid_types = ['materials', 'rejects', 'holds', 'splits', 'jobs']
|
||||
if assoc_type not in valid_types:
|
||||
return jsonify({'error': f'不支援的關聯類型: {assoc_type}'}), 400
|
||||
|
||||
if assoc_type == 'materials':
|
||||
result = get_lot_materials(container_id)
|
||||
elif assoc_type == 'rejects':
|
||||
result = get_lot_rejects(container_id)
|
||||
elif assoc_type == 'holds':
|
||||
result = get_lot_holds(container_id)
|
||||
elif assoc_type == 'splits':
|
||||
result = get_lot_splits(container_id)
|
||||
elif assoc_type == 'jobs':
|
||||
equipment_id = request.args.get('equipment_id')
|
||||
time_start = request.args.get('time_start')
|
||||
time_end = request.args.get('time_end')
|
||||
|
||||
if not all([equipment_id, time_start, time_end]):
|
||||
return jsonify({'error': '查詢 JOB 需指定設備和時間範圍'}), 400
|
||||
|
||||
result = get_lot_jobs(equipment_id, time_start, time_end)
|
||||
|
||||
if 'error' in result:
|
||||
return jsonify(result), 400
|
||||
|
||||
return jsonify(result)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Equipment Period Query API
|
||||
# ============================================================
|
||||
|
||||
@query_tool_bp.route('/api/query-tool/equipment-period', methods=['POST'])
|
||||
def query_equipment_period():
|
||||
"""Query equipment data for a time period.
|
||||
|
||||
Expects JSON body:
|
||||
{
|
||||
"equipment_ids": ["id1", "id2", ...],
|
||||
"equipment_names": ["name1", "name2", ...],
|
||||
"start_date": "2024-01-01",
|
||||
"end_date": "2024-01-31",
|
||||
"query_type": "status_hours" | "lots" | "materials" | "rejects" | "jobs"
|
||||
}
|
||||
|
||||
Returns data based on query_type.
|
||||
"""
|
||||
data = request.get_json()
|
||||
|
||||
if not data:
|
||||
return jsonify({'error': '請求內容不可為空'}), 400
|
||||
|
||||
equipment_ids = data.get('equipment_ids', [])
|
||||
equipment_names = data.get('equipment_names', [])
|
||||
start_date = data.get('start_date')
|
||||
end_date = data.get('end_date')
|
||||
query_type = data.get('query_type')
|
||||
|
||||
# Validate date range
|
||||
if not start_date or not end_date:
|
||||
return jsonify({'error': '請指定日期範圍'}), 400
|
||||
|
||||
validation_error = validate_date_range(start_date, end_date)
|
||||
if validation_error:
|
||||
return jsonify({'error': validation_error}), 400
|
||||
|
||||
# Validate query type
|
||||
valid_types = ['status_hours', 'lots', 'materials', 'rejects', 'jobs']
|
||||
if query_type not in valid_types:
|
||||
return jsonify({'error': f'不支援的查詢類型: {query_type}'}), 400
|
||||
|
||||
# Execute query based on type
|
||||
if query_type == 'status_hours':
|
||||
if not equipment_ids:
|
||||
return jsonify({'error': '請選擇至少一台設備'}), 400
|
||||
result = get_equipment_status_hours(equipment_ids, start_date, end_date)
|
||||
|
||||
elif query_type == 'lots':
|
||||
if not equipment_ids:
|
||||
return jsonify({'error': '請選擇至少一台設備'}), 400
|
||||
result = get_equipment_lots(equipment_ids, start_date, end_date)
|
||||
|
||||
elif query_type == 'materials':
|
||||
if not equipment_names:
|
||||
return jsonify({'error': '請選擇至少一台設備'}), 400
|
||||
result = get_equipment_materials(equipment_names, start_date, end_date)
|
||||
|
||||
elif query_type == 'rejects':
|
||||
if not equipment_names:
|
||||
return jsonify({'error': '請選擇至少一台設備'}), 400
|
||||
result = get_equipment_rejects(equipment_names, start_date, end_date)
|
||||
|
||||
elif query_type == 'jobs':
|
||||
if not equipment_ids:
|
||||
return jsonify({'error': '請選擇至少一台設備'}), 400
|
||||
result = get_equipment_jobs(equipment_ids, start_date, end_date)
|
||||
|
||||
if 'error' in result:
|
||||
return jsonify(result), 400
|
||||
|
||||
return jsonify(result)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Equipment List API (for selection UI)
|
||||
# ============================================================
|
||||
|
||||
@query_tool_bp.route('/api/query-tool/equipment-list', methods=['GET'])
|
||||
def get_equipment_list():
|
||||
"""Get available equipment for selection.
|
||||
|
||||
Returns equipment from cache for equipment selection UI.
|
||||
"""
|
||||
from mes_dashboard.services.resource_cache import get_all_resources
|
||||
|
||||
try:
|
||||
resources = get_all_resources()
|
||||
if not resources:
|
||||
return jsonify({'error': '無法載入設備資料'}), 500
|
||||
|
||||
# Return minimal data for selection UI
|
||||
data = []
|
||||
for r in resources:
|
||||
data.append({
|
||||
'RESOURCEID': r.get('RESOURCEID'),
|
||||
'RESOURCENAME': r.get('RESOURCENAME'),
|
||||
'WORKCENTERNAME': r.get('WORKCENTERNAME'),
|
||||
'RESOURCEFAMILYNAME': r.get('RESOURCEFAMILYNAME'),
|
||||
})
|
||||
|
||||
# Sort by WORKCENTERNAME, then RESOURCENAME
|
||||
data.sort(key=lambda x: (x.get('WORKCENTERNAME', ''), x.get('RESOURCENAME', '')))
|
||||
|
||||
return jsonify({
|
||||
'data': data,
|
||||
'total': len(data)
|
||||
})
|
||||
|
||||
except Exception as exc:
|
||||
return jsonify({'error': f'載入設備資料失敗: {str(exc)}'}), 500
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Workcenter Groups API (for filtering)
|
||||
# ============================================================
|
||||
|
||||
@query_tool_bp.route('/api/query-tool/workcenter-groups', methods=['GET'])
|
||||
def get_workcenter_groups_list():
|
||||
"""Get available workcenter groups for filtering.
|
||||
|
||||
Returns workcenter groups list sorted by sequence.
|
||||
Used for production history filtering UI.
|
||||
"""
|
||||
from mes_dashboard.services.filter_cache import get_workcenter_groups
|
||||
|
||||
try:
|
||||
groups = get_workcenter_groups()
|
||||
if groups is None:
|
||||
return jsonify({'error': '無法載入站點群組資料'}), 500
|
||||
|
||||
return jsonify({
|
||||
'data': groups,
|
||||
'total': len(groups)
|
||||
})
|
||||
|
||||
except Exception as exc:
|
||||
return jsonify({'error': f'載入站點群組失敗: {str(exc)}'}), 500
|
||||
|
||||
|
||||
# ============================================================
|
||||
# CSV Export API
|
||||
# ============================================================
|
||||
|
||||
@query_tool_bp.route('/api/query-tool/export-csv', methods=['POST'])
|
||||
def export_csv():
|
||||
"""Export query results as CSV.
|
||||
|
||||
Expects JSON body:
|
||||
{
|
||||
"export_type": "lot_history" | "adjacent_lots" | "lot_materials" |
|
||||
"lot_rejects" | "lot_holds" | "lot_jobs" |
|
||||
"equipment_status_hours" | "equipment_lots" |
|
||||
"equipment_materials" | "equipment_rejects" | "equipment_jobs",
|
||||
"params": { ... query parameters ... }
|
||||
}
|
||||
|
||||
Returns streaming CSV response.
|
||||
"""
|
||||
data = request.get_json()
|
||||
|
||||
if not data:
|
||||
return jsonify({'error': '請求內容不可為空'}), 400
|
||||
|
||||
export_type = data.get('export_type')
|
||||
params = data.get('params', {})
|
||||
|
||||
# Get data based on export type
|
||||
result = None
|
||||
filename = 'export.csv'
|
||||
|
||||
try:
|
||||
if export_type == 'lot_history':
|
||||
container_id = params.get('container_id')
|
||||
if not container_id:
|
||||
return jsonify({'error': '請指定 CONTAINERID'}), 400
|
||||
result = get_lot_history(container_id)
|
||||
filename = f'lot_history_{container_id}.csv'
|
||||
|
||||
elif export_type == 'adjacent_lots':
|
||||
result = get_adjacent_lots(
|
||||
params.get('equipment_id'),
|
||||
params.get('target_time'),
|
||||
params.get('time_window', 24)
|
||||
)
|
||||
filename = 'adjacent_lots.csv'
|
||||
|
||||
elif export_type == 'lot_materials':
|
||||
container_id = params.get('container_id')
|
||||
result = get_lot_materials(container_id)
|
||||
filename = f'lot_materials_{container_id}.csv'
|
||||
|
||||
elif export_type == 'lot_rejects':
|
||||
container_id = params.get('container_id')
|
||||
result = get_lot_rejects(container_id)
|
||||
filename = f'lot_rejects_{container_id}.csv'
|
||||
|
||||
elif export_type == 'lot_holds':
|
||||
container_id = params.get('container_id')
|
||||
result = get_lot_holds(container_id)
|
||||
filename = f'lot_holds_{container_id}.csv'
|
||||
|
||||
elif export_type == 'lot_splits':
|
||||
container_id = params.get('container_id')
|
||||
result = get_lot_splits(container_id)
|
||||
# Flatten nested structure for CSV
|
||||
if result and 'data' in result:
|
||||
flat_data = []
|
||||
for item in result['data']:
|
||||
serial_number = item.get('serial_number', '')
|
||||
txn_date = item.get('txn_date', '')
|
||||
for lot in item.get('lots', []):
|
||||
flat_data.append({
|
||||
'成品流水號': serial_number,
|
||||
'LOT ID': lot.get('lot_id', ''),
|
||||
'規格': lot.get('spec_name', ''),
|
||||
'數量': lot.get('qty', ''),
|
||||
'合併序號': lot.get('combine_seq', ''),
|
||||
'交易時間': txn_date,
|
||||
})
|
||||
result['data'] = flat_data
|
||||
filename = f'lot_splits_{container_id}.csv'
|
||||
|
||||
elif export_type == 'lot_jobs':
|
||||
result = get_lot_jobs(
|
||||
params.get('equipment_id'),
|
||||
params.get('time_start'),
|
||||
params.get('time_end')
|
||||
)
|
||||
filename = 'lot_jobs.csv'
|
||||
|
||||
elif export_type == 'equipment_status_hours':
|
||||
result = get_equipment_status_hours(
|
||||
params.get('equipment_ids', []),
|
||||
params.get('start_date'),
|
||||
params.get('end_date')
|
||||
)
|
||||
filename = 'equipment_status_hours.csv'
|
||||
|
||||
elif export_type == 'equipment_lots':
|
||||
result = get_equipment_lots(
|
||||
params.get('equipment_ids', []),
|
||||
params.get('start_date'),
|
||||
params.get('end_date')
|
||||
)
|
||||
filename = 'equipment_lots.csv'
|
||||
|
||||
elif export_type == 'equipment_materials':
|
||||
result = get_equipment_materials(
|
||||
params.get('equipment_names', []),
|
||||
params.get('start_date'),
|
||||
params.get('end_date')
|
||||
)
|
||||
filename = 'equipment_materials.csv'
|
||||
|
||||
elif export_type == 'equipment_rejects':
|
||||
result = get_equipment_rejects(
|
||||
params.get('equipment_names', []),
|
||||
params.get('start_date'),
|
||||
params.get('end_date')
|
||||
)
|
||||
filename = 'equipment_rejects.csv'
|
||||
|
||||
elif export_type == 'equipment_jobs':
|
||||
result = get_equipment_jobs(
|
||||
params.get('equipment_ids', []),
|
||||
params.get('start_date'),
|
||||
params.get('end_date')
|
||||
)
|
||||
filename = 'equipment_jobs.csv'
|
||||
|
||||
else:
|
||||
return jsonify({'error': f'不支援的匯出類型: {export_type}'}), 400
|
||||
|
||||
if result is None or 'error' in result:
|
||||
error_msg = result.get('error', '查詢失敗') if result else '查詢失敗'
|
||||
return jsonify({'error': error_msg}), 400
|
||||
|
||||
export_data = result.get('data', [])
|
||||
if not export_data:
|
||||
return jsonify({'error': '查無資料'}), 404
|
||||
|
||||
# Stream CSV response
|
||||
return Response(
|
||||
generate_csv_stream(export_data),
|
||||
mimetype='text/csv; charset=utf-8',
|
||||
headers={
|
||||
'Content-Disposition': f'attachment; filename={filename}'
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as exc:
|
||||
return jsonify({'error': f'匯出失敗: {str(exc)}'}), 500
|
||||
82
src/mes_dashboard/routes/tmtt_defect_routes.py
Normal file
82
src/mes_dashboard/routes/tmtt_defect_routes.py
Normal file
@@ -0,0 +1,82 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""TMTT Defect Analysis API routes.
|
||||
|
||||
Contains Flask Blueprint for TMTT printing & lead form defect analysis endpoints.
|
||||
"""
|
||||
|
||||
from flask import Blueprint, jsonify, request, Response
|
||||
|
||||
from mes_dashboard.services.tmtt_defect_service import (
|
||||
query_tmtt_defect_analysis,
|
||||
export_csv,
|
||||
)
|
||||
|
||||
# Create Blueprint
|
||||
tmtt_defect_bp = Blueprint(
|
||||
'tmtt_defect',
|
||||
__name__,
|
||||
url_prefix='/api/tmtt-defect'
|
||||
)
|
||||
|
||||
|
||||
@tmtt_defect_bp.route('/analysis', methods=['GET'])
|
||||
def api_tmtt_defect_analysis():
|
||||
"""API: Get TMTT defect analysis data (KPI + charts + detail).
|
||||
|
||||
Query Parameters:
|
||||
start_date: Start date (YYYY-MM-DD), required
|
||||
end_date: End date (YYYY-MM-DD), required
|
||||
|
||||
Returns:
|
||||
JSON with kpi, charts, detail sections.
|
||||
"""
|
||||
start_date = request.args.get('start_date')
|
||||
end_date = request.args.get('end_date')
|
||||
|
||||
if not start_date or not end_date:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': '必須提供 start_date 和 end_date 參數'
|
||||
}), 400
|
||||
|
||||
result = query_tmtt_defect_analysis(start_date, end_date)
|
||||
|
||||
if result is None:
|
||||
return jsonify({'success': False, 'error': '查詢失敗,請稍後再試'}), 500
|
||||
|
||||
if 'error' in result:
|
||||
return jsonify({'success': False, 'error': result['error']}), 400
|
||||
|
||||
return jsonify({'success': True, 'data': result})
|
||||
|
||||
|
||||
@tmtt_defect_bp.route('/export', methods=['GET'])
|
||||
def api_tmtt_defect_export():
|
||||
"""API: Export TMTT defect detail data as CSV.
|
||||
|
||||
Query Parameters:
|
||||
start_date: Start date (YYYY-MM-DD), required
|
||||
end_date: End date (YYYY-MM-DD), required
|
||||
|
||||
Returns:
|
||||
CSV file download.
|
||||
"""
|
||||
start_date = request.args.get('start_date')
|
||||
end_date = request.args.get('end_date')
|
||||
|
||||
if not start_date or not end_date:
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': '必須提供 start_date 和 end_date 參數'
|
||||
}), 400
|
||||
|
||||
filename = f"tmtt_defect_{start_date}_to_{end_date}.csv"
|
||||
|
||||
return Response(
|
||||
export_csv(start_date, end_date),
|
||||
mimetype='text/csv',
|
||||
headers={
|
||||
'Content-Disposition': f'attachment; filename={filename}',
|
||||
'Content-Type': 'text/csv; charset=utf-8-sig'
|
||||
}
|
||||
)
|
||||
1329
src/mes_dashboard/services/query_tool_service.py
Normal file
1329
src/mes_dashboard/services/query_tool_service.py
Normal file
File diff suppressed because it is too large
Load Diff
529
src/mes_dashboard/services/tmtt_defect_service.py
Normal file
529
src/mes_dashboard/services/tmtt_defect_service.py
Normal file
@@ -0,0 +1,529 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""TMTT Defect Analysis Service.
|
||||
|
||||
Provides functions for analyzing printing (印字) and lead form (腳型) defects
|
||||
at TMTT stations, with MOLD equipment correlation and multi-dimension Pareto analysis.
|
||||
|
||||
Defect rates are calculated separately by LOSSREASONNAME:
|
||||
- Print defect rate = 277_印字不良 / TMTT INPUT
|
||||
- Lead defect rate = 276_腳型不良 / TMTT INPUT
|
||||
"""
|
||||
|
||||
import csv
|
||||
import io
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional, Dict, List, Any, Generator
|
||||
|
||||
import math
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from mes_dashboard.core.database import read_sql_df
|
||||
from mes_dashboard.core.cache import cache_get, cache_set, make_cache_key
|
||||
from mes_dashboard.sql import SQLLoader
|
||||
|
||||
logger = logging.getLogger('mes_dashboard.tmtt_defect')
|
||||
|
||||
# Constants
|
||||
MAX_QUERY_DAYS = 180
|
||||
CACHE_TTL = 300 # 5 minutes
|
||||
|
||||
PRINT_DEFECT = '277_印字不良'
|
||||
LEAD_DEFECT = '276_腳型不良'
|
||||
|
||||
# Dimension column mapping for chart aggregation
|
||||
DIMENSION_MAP = {
|
||||
'by_workflow': 'WORKFLOW',
|
||||
'by_package': 'PRODUCTLINENAME',
|
||||
'by_type': 'PJ_TYPE',
|
||||
'by_tmtt_machine': 'TMTT_EQUIPMENTNAME',
|
||||
'by_mold_machine': 'MOLD_EQUIPMENTNAME',
|
||||
}
|
||||
|
||||
# CSV export column config
|
||||
CSV_COLUMNS = [
|
||||
('CONTAINERNAME', 'LOT ID'),
|
||||
('PJ_TYPE', 'TYPE'),
|
||||
('PRODUCTLINENAME', 'PACKAGE'),
|
||||
('WORKFLOW', 'WORKFLOW'),
|
||||
('FINISHEDRUNCARD', '完工流水碼'),
|
||||
('TMTT_EQUIPMENTNAME', 'TMTT設備'),
|
||||
('MOLD_EQUIPMENTNAME', 'MOLD設備'),
|
||||
('INPUT_QTY', '投入數'),
|
||||
('PRINT_DEFECT_QTY', '印字不良數'),
|
||||
('PRINT_DEFECT_RATE', '印字不良率(%)'),
|
||||
('LEAD_DEFECT_QTY', '腳型不良數'),
|
||||
('LEAD_DEFECT_RATE', '腳型不良率(%)'),
|
||||
]
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Public API
|
||||
# ============================================================
|
||||
|
||||
def query_tmtt_defect_analysis(
|
||||
start_date: str,
|
||||
end_date: str,
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Main entry point for TMTT defect analysis.
|
||||
|
||||
Args:
|
||||
start_date: Start date (YYYY-MM-DD)
|
||||
end_date: End date (YYYY-MM-DD)
|
||||
|
||||
Returns:
|
||||
Dict with kpi, charts, detail sections, or dict with 'error' key.
|
||||
"""
|
||||
# Validate dates
|
||||
error = _validate_date_range(start_date, end_date)
|
||||
if error:
|
||||
return {'error': error}
|
||||
|
||||
# Check cache
|
||||
cache_key = make_cache_key(
|
||||
"tmtt_defect_analysis",
|
||||
filters={'start_date': start_date, 'end_date': end_date},
|
||||
)
|
||||
cached = cache_get(cache_key)
|
||||
if cached is not None:
|
||||
return cached
|
||||
|
||||
# Fetch data
|
||||
df = _fetch_base_data(start_date, end_date)
|
||||
if df is None:
|
||||
return None
|
||||
|
||||
# Build response
|
||||
result = {
|
||||
'kpi': _build_kpi(df),
|
||||
'charts': _build_all_charts(df),
|
||||
'daily_trend': _build_daily_trend(df),
|
||||
'detail': _build_detail_table(df),
|
||||
}
|
||||
|
||||
cache_set(cache_key, result, ttl=CACHE_TTL)
|
||||
return result
|
||||
|
||||
|
||||
def export_csv(
|
||||
start_date: str,
|
||||
end_date: str,
|
||||
) -> Generator[str, None, None]:
|
||||
"""Stream CSV export of detail data.
|
||||
|
||||
Args:
|
||||
start_date: Start date (YYYY-MM-DD)
|
||||
end_date: End date (YYYY-MM-DD)
|
||||
|
||||
Yields:
|
||||
CSV lines as strings.
|
||||
"""
|
||||
df = _fetch_base_data(start_date, end_date)
|
||||
|
||||
# BOM for Excel UTF-8 compatibility
|
||||
yield '\ufeff'
|
||||
|
||||
output = io.StringIO()
|
||||
writer = csv.writer(output)
|
||||
|
||||
# Header row
|
||||
writer.writerow([label for _, label in CSV_COLUMNS])
|
||||
yield output.getvalue()
|
||||
output.seek(0)
|
||||
output.truncate(0)
|
||||
|
||||
if df is None or df.empty:
|
||||
return
|
||||
|
||||
detail = _build_detail_table(df)
|
||||
for row in detail:
|
||||
writer.writerow([row.get(col, '') for col, _ in CSV_COLUMNS])
|
||||
yield output.getvalue()
|
||||
output.seek(0)
|
||||
output.truncate(0)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Helpers
|
||||
# ============================================================
|
||||
|
||||
def _safe_str(v, default=''):
|
||||
"""Return a JSON-safe string. Converts NaN/None to default."""
|
||||
if v is None or (isinstance(v, float) and math.isnan(v)):
|
||||
return default
|
||||
try:
|
||||
if pd.isna(v):
|
||||
return default
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
return str(v)
|
||||
|
||||
|
||||
def _safe_float(v, default=0.0):
|
||||
"""Return a JSON-safe float. Converts NaN/None to default."""
|
||||
if v is None:
|
||||
return default
|
||||
try:
|
||||
f = float(v)
|
||||
if math.isnan(f) or math.isinf(f):
|
||||
return default
|
||||
return f
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def _safe_int(v, default=0):
|
||||
"""Return a JSON-safe int. Converts NaN/None to default."""
|
||||
return int(_safe_float(v, float(default)))
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Internal Functions
|
||||
# ============================================================
|
||||
|
||||
def _validate_date_range(start_date: str, end_date: str) -> Optional[str]:
|
||||
"""Validate date range parameters.
|
||||
|
||||
Returns:
|
||||
Error message string, or None if valid.
|
||||
"""
|
||||
try:
|
||||
start = datetime.strptime(start_date, '%Y-%m-%d')
|
||||
end = datetime.strptime(end_date, '%Y-%m-%d')
|
||||
except (ValueError, TypeError):
|
||||
return '日期格式無效,請使用 YYYY-MM-DD'
|
||||
|
||||
if start > end:
|
||||
return '起始日期不能晚於結束日期'
|
||||
|
||||
if (end - start).days > MAX_QUERY_DAYS:
|
||||
return f'查詢範圍不能超過 {MAX_QUERY_DAYS} 天'
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _fetch_base_data(start_date: str, end_date: str) -> Optional[pd.DataFrame]:
|
||||
"""Execute base_data.sql and return raw DataFrame.
|
||||
|
||||
Args:
|
||||
start_date: Start date (YYYY-MM-DD)
|
||||
end_date: End date (YYYY-MM-DD)
|
||||
|
||||
Returns:
|
||||
DataFrame or None on error.
|
||||
"""
|
||||
try:
|
||||
sql = SQLLoader.load("tmtt_defect/base_data")
|
||||
params = {
|
||||
'start_date': start_date,
|
||||
'end_date': end_date,
|
||||
}
|
||||
df = read_sql_df(sql, params)
|
||||
if df is None:
|
||||
logger.error("TMTT defect base query returned None")
|
||||
return None
|
||||
logger.info(
|
||||
f"TMTT defect query: {len(df)} rows, "
|
||||
f"{df['CONTAINERID'].nunique() if not df.empty else 0} unique lots"
|
||||
)
|
||||
return df
|
||||
except Exception as exc:
|
||||
logger.error(f"TMTT defect query failed: {exc}", exc_info=True)
|
||||
return None
|
||||
|
||||
|
||||
def _build_kpi(df: pd.DataFrame) -> Dict[str, Any]:
|
||||
"""Build KPI summary from base data.
|
||||
|
||||
Defect rates are calculated separately by LOSSREASONNAME.
|
||||
INPUT is deduplicated by CONTAINERID (a LOT may have multiple defect rows).
|
||||
|
||||
Args:
|
||||
df: Base data DataFrame.
|
||||
|
||||
Returns:
|
||||
KPI dict with total_input, lot_count, print/lead defect qty and rate.
|
||||
"""
|
||||
if df.empty:
|
||||
return {
|
||||
'total_input': 0,
|
||||
'lot_count': 0,
|
||||
'print_defect_qty': 0,
|
||||
'print_defect_rate': 0.0,
|
||||
'lead_defect_qty': 0,
|
||||
'lead_defect_rate': 0.0,
|
||||
}
|
||||
|
||||
# Deduplicate for INPUT: one TRACKINQTY per unique CONTAINERID
|
||||
unique_lots = df.drop_duplicates(subset=['CONTAINERID'])
|
||||
total_input = int(unique_lots['TRACKINQTY'].sum())
|
||||
lot_count = len(unique_lots)
|
||||
|
||||
# Defect totals by type
|
||||
defect_rows = df[df['REJECTQTY'] > 0]
|
||||
print_qty = int(
|
||||
defect_rows.loc[
|
||||
defect_rows['LOSSREASONNAME'] == PRINT_DEFECT, 'REJECTQTY'
|
||||
].sum()
|
||||
)
|
||||
lead_qty = int(
|
||||
defect_rows.loc[
|
||||
defect_rows['LOSSREASONNAME'] == LEAD_DEFECT, 'REJECTQTY'
|
||||
].sum()
|
||||
)
|
||||
|
||||
return {
|
||||
'total_input': total_input,
|
||||
'lot_count': lot_count,
|
||||
'print_defect_qty': print_qty,
|
||||
'print_defect_rate': round(print_qty / total_input * 100, 4) if total_input else 0.0,
|
||||
'lead_defect_qty': lead_qty,
|
||||
'lead_defect_rate': round(lead_qty / total_input * 100, 4) if total_input else 0.0,
|
||||
}
|
||||
|
||||
|
||||
def _build_chart_data(
|
||||
df: pd.DataFrame,
|
||||
dimension: str,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Build Pareto chart data for a given dimension.
|
||||
|
||||
Each item includes separate print and lead defect quantities/rates.
|
||||
|
||||
Args:
|
||||
df: Base data DataFrame.
|
||||
dimension: Column name to group by.
|
||||
|
||||
Returns:
|
||||
List of dicts sorted by total defect qty DESC, with cumulative_pct.
|
||||
"""
|
||||
if df.empty:
|
||||
return []
|
||||
|
||||
# Fill NaN dimension values
|
||||
work_df = df.copy()
|
||||
work_df[dimension] = work_df[dimension].fillna('(未知)')
|
||||
|
||||
# INPUT per dimension (deduplicated by CONTAINERID within each group)
|
||||
input_by_dim = (
|
||||
work_df.drop_duplicates(subset=['CONTAINERID', dimension])
|
||||
.groupby(dimension)['TRACKINQTY']
|
||||
.sum()
|
||||
)
|
||||
|
||||
# Defect qty per dimension per type
|
||||
defect_rows = work_df[work_df['REJECTQTY'] > 0]
|
||||
|
||||
print_by_dim = (
|
||||
defect_rows[defect_rows['LOSSREASONNAME'] == PRINT_DEFECT]
|
||||
.groupby(dimension)['REJECTQTY']
|
||||
.sum()
|
||||
)
|
||||
lead_by_dim = (
|
||||
defect_rows[defect_rows['LOSSREASONNAME'] == LEAD_DEFECT]
|
||||
.groupby(dimension)['REJECTQTY']
|
||||
.sum()
|
||||
)
|
||||
|
||||
# Combine
|
||||
combined = pd.DataFrame({
|
||||
'input_qty': input_by_dim,
|
||||
'print_defect_qty': print_by_dim,
|
||||
'lead_defect_qty': lead_by_dim,
|
||||
}).fillna(0).astype({'print_defect_qty': int, 'lead_defect_qty': int, 'input_qty': int})
|
||||
|
||||
combined['total_defect_qty'] = combined['print_defect_qty'] + combined['lead_defect_qty']
|
||||
combined = combined.sort_values('total_defect_qty', ascending=False)
|
||||
|
||||
# Cumulative percentage
|
||||
total_defects = combined['total_defect_qty'].sum()
|
||||
if total_defects > 0:
|
||||
combined['cumulative_pct'] = (
|
||||
combined['total_defect_qty'].cumsum() / total_defects * 100
|
||||
).round(2)
|
||||
else:
|
||||
combined['cumulative_pct'] = 0.0
|
||||
|
||||
# Defect rates
|
||||
combined['print_defect_rate'] = (
|
||||
combined['print_defect_qty'] / combined['input_qty'] * 100
|
||||
).round(4).where(combined['input_qty'] > 0, 0.0)
|
||||
combined['lead_defect_rate'] = (
|
||||
combined['lead_defect_qty'] / combined['input_qty'] * 100
|
||||
).round(4).where(combined['input_qty'] > 0, 0.0)
|
||||
|
||||
result = []
|
||||
for name, row in combined.iterrows():
|
||||
result.append({
|
||||
'name': _safe_str(name),
|
||||
'input_qty': _safe_int(row['input_qty']),
|
||||
'print_defect_qty': _safe_int(row['print_defect_qty']),
|
||||
'print_defect_rate': _safe_float(row['print_defect_rate']),
|
||||
'lead_defect_qty': _safe_int(row['lead_defect_qty']),
|
||||
'lead_defect_rate': _safe_float(row['lead_defect_rate']),
|
||||
'total_defect_qty': _safe_int(row['total_defect_qty']),
|
||||
'cumulative_pct': _safe_float(row['cumulative_pct']),
|
||||
})
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _build_all_charts(df: pd.DataFrame) -> Dict[str, List[Dict]]:
|
||||
"""Build chart data for all 5 dimensions.
|
||||
|
||||
Args:
|
||||
df: Base data DataFrame.
|
||||
|
||||
Returns:
|
||||
Dict mapping chart key to Pareto data list.
|
||||
"""
|
||||
return {
|
||||
key: _build_chart_data(df, col)
|
||||
for key, col in DIMENSION_MAP.items()
|
||||
}
|
||||
|
||||
|
||||
def _build_daily_trend(df: pd.DataFrame) -> List[Dict[str, Any]]:
|
||||
"""Build daily defect rate trend data.
|
||||
|
||||
Groups by TRACKINTIMESTAMP date, calculates daily print/lead defect rates.
|
||||
|
||||
Args:
|
||||
df: Base data DataFrame.
|
||||
|
||||
Returns:
|
||||
List of dicts sorted by date ASC, each with date, input_qty,
|
||||
print/lead defect qty and rate.
|
||||
"""
|
||||
if df.empty:
|
||||
return []
|
||||
|
||||
work_df = df.copy()
|
||||
work_df['DATE'] = pd.to_datetime(work_df['TRACKINTIMESTAMP']).dt.strftime('%Y-%m-%d')
|
||||
|
||||
# Daily INPUT (deduplicated by CONTAINERID per date)
|
||||
daily_input = (
|
||||
work_df.drop_duplicates(subset=['CONTAINERID', 'DATE'])
|
||||
.groupby('DATE')['TRACKINQTY']
|
||||
.sum()
|
||||
)
|
||||
|
||||
# Daily defects by type
|
||||
defect_rows = work_df[work_df['REJECTQTY'] > 0]
|
||||
|
||||
daily_print = (
|
||||
defect_rows[defect_rows['LOSSREASONNAME'] == PRINT_DEFECT]
|
||||
.groupby('DATE')['REJECTQTY']
|
||||
.sum()
|
||||
)
|
||||
daily_lead = (
|
||||
defect_rows[defect_rows['LOSSREASONNAME'] == LEAD_DEFECT]
|
||||
.groupby('DATE')['REJECTQTY']
|
||||
.sum()
|
||||
)
|
||||
|
||||
combined = pd.DataFrame({
|
||||
'input_qty': daily_input,
|
||||
'print_defect_qty': daily_print,
|
||||
'lead_defect_qty': daily_lead,
|
||||
}).fillna(0).astype({'print_defect_qty': int, 'lead_defect_qty': int, 'input_qty': int})
|
||||
|
||||
combined['print_defect_rate'] = (
|
||||
combined['print_defect_qty'] / combined['input_qty'] * 100
|
||||
).round(4).where(combined['input_qty'] > 0, 0.0)
|
||||
combined['lead_defect_rate'] = (
|
||||
combined['lead_defect_qty'] / combined['input_qty'] * 100
|
||||
).round(4).where(combined['input_qty'] > 0, 0.0)
|
||||
|
||||
combined = combined.sort_index()
|
||||
|
||||
result = []
|
||||
for date, row in combined.iterrows():
|
||||
result.append({
|
||||
'date': str(date),
|
||||
'input_qty': _safe_int(row['input_qty']),
|
||||
'print_defect_qty': _safe_int(row['print_defect_qty']),
|
||||
'print_defect_rate': _safe_float(row['print_defect_rate']),
|
||||
'lead_defect_qty': _safe_int(row['lead_defect_qty']),
|
||||
'lead_defect_rate': _safe_float(row['lead_defect_rate']),
|
||||
})
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _build_detail_table(df: pd.DataFrame) -> List[Dict[str, Any]]:
|
||||
"""Build detail table rows, one per LOT.
|
||||
|
||||
Aggregates defect quantities per LOT across defect types.
|
||||
|
||||
Args:
|
||||
df: Base data DataFrame.
|
||||
|
||||
Returns:
|
||||
List of dicts, one per LOT.
|
||||
"""
|
||||
if df.empty:
|
||||
return []
|
||||
|
||||
# Pivot defects per LOT
|
||||
lot_group_cols = [
|
||||
'CONTAINERID', 'CONTAINERNAME', 'PJ_TYPE', 'PRODUCTLINENAME',
|
||||
'WORKFLOW', 'FINISHEDRUNCARD', 'TMTT_EQUIPMENTNAME',
|
||||
'MOLD_EQUIPMENTNAME', 'TRACKINQTY',
|
||||
]
|
||||
|
||||
# Get unique LOT info (first occurrence)
|
||||
lots = df.drop_duplicates(subset=['CONTAINERID'])[lot_group_cols].copy()
|
||||
|
||||
# Aggregate defects per LOT per type
|
||||
defect_rows = df[df['REJECTQTY'] > 0]
|
||||
|
||||
print_defects = (
|
||||
defect_rows[defect_rows['LOSSREASONNAME'] == PRINT_DEFECT]
|
||||
.groupby('CONTAINERID')['REJECTQTY']
|
||||
.sum()
|
||||
.rename('PRINT_DEFECT_QTY')
|
||||
)
|
||||
lead_defects = (
|
||||
defect_rows[defect_rows['LOSSREASONNAME'] == LEAD_DEFECT]
|
||||
.groupby('CONTAINERID')['REJECTQTY']
|
||||
.sum()
|
||||
.rename('LEAD_DEFECT_QTY')
|
||||
)
|
||||
|
||||
lots = lots.set_index('CONTAINERID')
|
||||
lots = lots.join(print_defects, how='left')
|
||||
lots = lots.join(lead_defects, how='left')
|
||||
lots['PRINT_DEFECT_QTY'] = lots['PRINT_DEFECT_QTY'].fillna(0).astype(int)
|
||||
lots['LEAD_DEFECT_QTY'] = lots['LEAD_DEFECT_QTY'].fillna(0).astype(int)
|
||||
|
||||
# Calculate rates
|
||||
lots['INPUT_QTY'] = lots['TRACKINQTY'].astype(int)
|
||||
lots['PRINT_DEFECT_RATE'] = (
|
||||
lots['PRINT_DEFECT_QTY'] / lots['INPUT_QTY'] * 100
|
||||
).round(4).where(lots['INPUT_QTY'] > 0, 0.0)
|
||||
lots['LEAD_DEFECT_RATE'] = (
|
||||
lots['LEAD_DEFECT_QTY'] / lots['INPUT_QTY'] * 100
|
||||
).round(4).where(lots['INPUT_QTY'] > 0, 0.0)
|
||||
|
||||
# Convert to list of dicts
|
||||
lots = lots.reset_index()
|
||||
result = []
|
||||
for _, row in lots.iterrows():
|
||||
result.append({
|
||||
'CONTAINERNAME': _safe_str(row.get('CONTAINERNAME')),
|
||||
'PJ_TYPE': _safe_str(row.get('PJ_TYPE')),
|
||||
'PRODUCTLINENAME': _safe_str(row.get('PRODUCTLINENAME')),
|
||||
'WORKFLOW': _safe_str(row.get('WORKFLOW')),
|
||||
'FINISHEDRUNCARD': _safe_str(row.get('FINISHEDRUNCARD')),
|
||||
'TMTT_EQUIPMENTNAME': _safe_str(row.get('TMTT_EQUIPMENTNAME')),
|
||||
'MOLD_EQUIPMENTNAME': _safe_str(row.get('MOLD_EQUIPMENTNAME')),
|
||||
'INPUT_QTY': _safe_int(row.get('INPUT_QTY')),
|
||||
'PRINT_DEFECT_QTY': _safe_int(row.get('PRINT_DEFECT_QTY')),
|
||||
'PRINT_DEFECT_RATE': _safe_float(row.get('PRINT_DEFECT_RATE')),
|
||||
'LEAD_DEFECT_QTY': _safe_int(row.get('LEAD_DEFECT_QTY')),
|
||||
'LEAD_DEFECT_RATE': _safe_float(row.get('LEAD_DEFECT_RATE')),
|
||||
})
|
||||
|
||||
return result
|
||||
130
src/mes_dashboard/sql/query_tool/adjacent_lots.sql
Normal file
130
src/mes_dashboard/sql/query_tool/adjacent_lots.sql
Normal file
@@ -0,0 +1,130 @@
|
||||
-- Adjacent Lots Query (前後批查詢)
|
||||
-- Finds lots processed before and after a target lot on the same equipment
|
||||
-- Searches until finding a different PJ_TYPE, with minimum 3 lots in each direction
|
||||
--
|
||||
-- Parameters:
|
||||
-- :equipment_id - Target equipment ID
|
||||
-- :target_trackin_time - Target lot's TRACKINTIMESTAMP
|
||||
-- :time_window_hours - Time window in hours (default 24)
|
||||
--
|
||||
-- Output columns:
|
||||
-- PJ_TYPE - Product type (from DW_MES_CONTAINER)
|
||||
-- PJ_BOP - BOP code (from DW_MES_CONTAINER)
|
||||
-- WAFER_LOT_ID - Wafer lot ID, mapped from FIRSTNAME (from DW_MES_CONTAINER)
|
||||
--
|
||||
-- Logic:
|
||||
-- 1. Only filter by EQUIPMENTID (no SPECNAME restriction)
|
||||
-- 2. Search forward/backward until finding a different PJ_TYPE
|
||||
-- 3. Minimum 3 lots in each direction (even if different PJ_TYPE found earlier)
|
||||
-- 4. Stop at first different PJ_TYPE if found beyond 3 lots
|
||||
--
|
||||
-- Note: Deduplicates multiple track-out records for same track-in (takes latest track-out)
|
||||
|
||||
WITH time_bounds AS (
|
||||
SELECT
|
||||
:target_trackin_time - INTERVAL '1' HOUR * :time_window_hours AS time_start,
|
||||
:target_trackin_time + INTERVAL '1' HOUR * :time_window_hours AS time_end
|
||||
FROM DUAL
|
||||
),
|
||||
-- Step 1: Get all records and deduplicate
|
||||
-- Multiple track-out records for same track-in -> take the latest track-out time
|
||||
raw_lots AS (
|
||||
SELECT
|
||||
h.CONTAINERID,
|
||||
h.EQUIPMENTID,
|
||||
h.EQUIPMENTNAME,
|
||||
h.SPECNAME,
|
||||
h.TRACKINTIMESTAMP,
|
||||
h.TRACKOUTTIMESTAMP,
|
||||
h.TRACKINQTY,
|
||||
h.TRACKOUTQTY,
|
||||
h.FINISHEDRUNCARD,
|
||||
h.PJ_WORKORDER,
|
||||
c.CONTAINERNAME,
|
||||
c.PJ_TYPE,
|
||||
c.PJ_BOP,
|
||||
c.FIRSTNAME AS WAFER_LOT_ID,
|
||||
ROW_NUMBER() OVER (
|
||||
PARTITION BY h.CONTAINERID, h.EQUIPMENTID, h.TRACKINTIMESTAMP
|
||||
ORDER BY h.TRACKOUTTIMESTAMP DESC NULLS LAST
|
||||
) AS dedup_rn
|
||||
FROM DWH.DW_MES_LOTWIPHISTORY h
|
||||
LEFT JOIN DWH.DW_MES_CONTAINER c ON h.CONTAINERID = c.CONTAINERID
|
||||
CROSS JOIN time_bounds tb
|
||||
WHERE h.EQUIPMENTID = :equipment_id
|
||||
AND h.TRACKINTIMESTAMP BETWEEN tb.time_start AND tb.time_end
|
||||
),
|
||||
-- Step 2: Keep only deduplicated records
|
||||
deduped_lots AS (
|
||||
SELECT *
|
||||
FROM raw_lots
|
||||
WHERE dedup_rn = 1
|
||||
),
|
||||
-- Step 3: Rank by track-in time (partitioned by EQUIPMENTID only)
|
||||
ranked_lots AS (
|
||||
SELECT
|
||||
d.*,
|
||||
ROW_NUMBER() OVER (
|
||||
PARTITION BY d.EQUIPMENTID
|
||||
ORDER BY d.TRACKINTIMESTAMP
|
||||
) AS rn
|
||||
FROM deduped_lots d
|
||||
),
|
||||
-- Step 4: Find target lot position and PJ_TYPE
|
||||
target_lot AS (
|
||||
SELECT rn AS target_rn, PJ_TYPE AS target_pj_type
|
||||
FROM ranked_lots
|
||||
WHERE TRACKINTIMESTAMP = :target_trackin_time
|
||||
),
|
||||
-- Step 5: Find first lot BEFORE target with different PJ_TYPE
|
||||
-- (highest rn that is less than target_rn and has different PJ_TYPE)
|
||||
first_diff_before AS (
|
||||
SELECT MAX(r.rn) AS rn
|
||||
FROM ranked_lots r
|
||||
CROSS JOIN target_lot t
|
||||
WHERE r.rn < t.target_rn
|
||||
AND (
|
||||
(r.PJ_TYPE IS NULL AND t.target_pj_type IS NOT NULL)
|
||||
OR (r.PJ_TYPE IS NOT NULL AND t.target_pj_type IS NULL)
|
||||
OR (r.PJ_TYPE != t.target_pj_type)
|
||||
)
|
||||
),
|
||||
-- Step 6: Find first lot AFTER target with different PJ_TYPE
|
||||
-- (lowest rn that is greater than target_rn and has different PJ_TYPE)
|
||||
first_diff_after AS (
|
||||
SELECT MIN(r.rn) AS rn
|
||||
FROM ranked_lots r
|
||||
CROSS JOIN target_lot t
|
||||
WHERE r.rn > t.target_rn
|
||||
AND (
|
||||
(r.PJ_TYPE IS NULL AND t.target_pj_type IS NOT NULL)
|
||||
OR (r.PJ_TYPE IS NOT NULL AND t.target_pj_type IS NULL)
|
||||
OR (r.PJ_TYPE != t.target_pj_type)
|
||||
)
|
||||
)
|
||||
-- Step 7: Select lots within calculated range
|
||||
-- Before: MIN(first_diff_before, target - 3) to ensure minimum 3 and stop at different PJ_TYPE
|
||||
-- After: MAX(first_diff_after, target + 3) to ensure minimum 3 and stop at different PJ_TYPE
|
||||
SELECT
|
||||
r.CONTAINERID,
|
||||
r.EQUIPMENTID,
|
||||
r.EQUIPMENTNAME,
|
||||
r.SPECNAME,
|
||||
r.TRACKINTIMESTAMP,
|
||||
r.TRACKOUTTIMESTAMP,
|
||||
r.TRACKINQTY,
|
||||
r.TRACKOUTQTY,
|
||||
r.FINISHEDRUNCARD,
|
||||
r.PJ_WORKORDER,
|
||||
r.CONTAINERNAME,
|
||||
r.PJ_TYPE,
|
||||
r.PJ_BOP,
|
||||
r.WAFER_LOT_ID,
|
||||
r.rn - t.target_rn AS RELATIVE_POSITION
|
||||
FROM ranked_lots r
|
||||
CROSS JOIN target_lot t
|
||||
CROSS JOIN first_diff_before b
|
||||
CROSS JOIN first_diff_after a
|
||||
WHERE r.rn >= LEAST(NVL(b.rn, t.target_rn - 3), t.target_rn - 3)
|
||||
AND r.rn <= GREATEST(NVL(a.rn, t.target_rn + 3), t.target_rn + 3)
|
||||
ORDER BY r.rn
|
||||
33
src/mes_dashboard/sql/query_tool/equipment_jobs.sql
Normal file
33
src/mes_dashboard/sql/query_tool/equipment_jobs.sql
Normal file
@@ -0,0 +1,33 @@
|
||||
-- Equipment JOB Records Query
|
||||
-- Retrieves JOB records for equipment in a time period
|
||||
--
|
||||
-- Parameters:
|
||||
-- :start_date - Start date (YYYY-MM-DD)
|
||||
-- :end_date - End date (YYYY-MM-DD)
|
||||
--
|
||||
-- Dynamic placeholders:
|
||||
-- EQUIPMENT_FILTER - Equipment filter condition (on RESOURCEID)
|
||||
--
|
||||
-- Note: DW_MES_JOB uses RESOURCEID/RESOURCENAME
|
||||
-- EQUIPMENTID = RESOURCEID (same ID system)
|
||||
-- Uses CREATEDATE for date filtering
|
||||
|
||||
SELECT
|
||||
JOBID,
|
||||
RESOURCEID,
|
||||
RESOURCENAME,
|
||||
JOBSTATUS,
|
||||
JOBMODELNAME,
|
||||
JOBORDERNAME,
|
||||
CREATEDATE,
|
||||
COMPLETEDATE,
|
||||
CAUSECODENAME,
|
||||
REPAIRCODENAME,
|
||||
SYMPTOMCODENAME,
|
||||
CONTAINERIDS,
|
||||
CONTAINERNAMES
|
||||
FROM DWH.DW_MES_JOB
|
||||
WHERE CREATEDATE >= TO_DATE(:start_date, 'YYYY-MM-DD')
|
||||
AND CREATEDATE < TO_DATE(:end_date, 'YYYY-MM-DD') + 1
|
||||
AND {{ EQUIPMENT_FILTER }}
|
||||
ORDER BY RESOURCENAME, CREATEDATE DESC
|
||||
64
src/mes_dashboard/sql/query_tool/equipment_lots.sql
Normal file
64
src/mes_dashboard/sql/query_tool/equipment_lots.sql
Normal file
@@ -0,0 +1,64 @@
|
||||
-- Equipment Lots List Query
|
||||
-- Retrieves all lots processed by equipment in a time period
|
||||
--
|
||||
-- Parameters:
|
||||
-- :start_date - Start date (YYYY-MM-DD)
|
||||
-- :end_date - End date (YYYY-MM-DD)
|
||||
--
|
||||
-- Dynamic placeholders:
|
||||
-- EQUIPMENT_FILTER - Equipment filter condition (on EQUIPMENTID)
|
||||
--
|
||||
-- Note: Uses EQUIPMENTID/EQUIPMENTNAME (NOT RESOURCEID/RESOURCENAME)
|
||||
-- JOIN CONTAINER to get CONTAINERNAME, PJ_TYPE, PJ_BOP, WAFER_LOT_ID
|
||||
-- Partial track-out: Same LOT may have multiple records with same track-in
|
||||
-- but different track-out times. We take the latest track-out time.
|
||||
-- Only includes records with actual equipment (excludes checkpoint stations)
|
||||
|
||||
WITH ranked_lots AS (
|
||||
SELECT
|
||||
h.CONTAINERID,
|
||||
h.WORKCENTERNAME,
|
||||
h.EQUIPMENTID,
|
||||
h.EQUIPMENTNAME,
|
||||
h.SPECNAME,
|
||||
h.TRACKINTIMESTAMP,
|
||||
h.TRACKOUTTIMESTAMP,
|
||||
h.TRACKINQTY,
|
||||
h.TRACKOUTQTY,
|
||||
h.FINISHEDRUNCARD,
|
||||
h.PJ_WORKORDER,
|
||||
c.CONTAINERNAME,
|
||||
c.PJ_TYPE,
|
||||
c.PJ_BOP,
|
||||
c.FIRSTNAME AS WAFER_LOT_ID,
|
||||
ROW_NUMBER() OVER (
|
||||
PARTITION BY h.CONTAINERID, h.EQUIPMENTID, h.SPECNAME, h.TRACKINTIMESTAMP
|
||||
ORDER BY h.TRACKOUTTIMESTAMP DESC NULLS LAST
|
||||
) AS rn
|
||||
FROM DWH.DW_MES_LOTWIPHISTORY h
|
||||
LEFT JOIN DWH.DW_MES_CONTAINER c ON h.CONTAINERID = c.CONTAINERID
|
||||
WHERE h.TRACKINTIMESTAMP >= TO_DATE(:start_date, 'YYYY-MM-DD')
|
||||
AND h.TRACKINTIMESTAMP < TO_DATE(:end_date, 'YYYY-MM-DD') + 1
|
||||
AND h.EQUIPMENTID IS NOT NULL
|
||||
AND h.TRACKINTIMESTAMP IS NOT NULL
|
||||
AND {{ EQUIPMENT_FILTER }}
|
||||
)
|
||||
SELECT
|
||||
CONTAINERID,
|
||||
WORKCENTERNAME,
|
||||
EQUIPMENTID,
|
||||
EQUIPMENTNAME,
|
||||
SPECNAME,
|
||||
TRACKINTIMESTAMP,
|
||||
TRACKOUTTIMESTAMP,
|
||||
TRACKINQTY,
|
||||
TRACKOUTQTY,
|
||||
FINISHEDRUNCARD,
|
||||
PJ_WORKORDER,
|
||||
CONTAINERNAME,
|
||||
PJ_TYPE,
|
||||
PJ_BOP,
|
||||
WAFER_LOT_ID
|
||||
FROM ranked_lots
|
||||
WHERE rn = 1
|
||||
ORDER BY EQUIPMENTNAME, TRACKINTIMESTAMP
|
||||
25
src/mes_dashboard/sql/query_tool/equipment_materials.sql
Normal file
25
src/mes_dashboard/sql/query_tool/equipment_materials.sql
Normal file
@@ -0,0 +1,25 @@
|
||||
-- Equipment Materials Consumption Summary
|
||||
-- Aggregates material consumption by equipment for a time period
|
||||
--
|
||||
-- Parameters:
|
||||
-- :start_date - Start date (YYYY-MM-DD)
|
||||
-- :end_date - End date (YYYY-MM-DD)
|
||||
--
|
||||
-- Dynamic placeholders:
|
||||
-- EQUIPMENT_FILTER - Equipment filter condition (on EQUIPMENTNAME)
|
||||
--
|
||||
-- Note: Uses MATERIALPARTNAME (NOT MATERIALNAME)
|
||||
-- Uses QTYCONSUMED (NOT CONSUMEQTY)
|
||||
-- Uses TXNDATE (NOT TXNDATETIME)
|
||||
|
||||
SELECT
|
||||
EQUIPMENTNAME,
|
||||
MATERIALPARTNAME,
|
||||
SUM(QTYCONSUMED) AS TOTAL_CONSUMED,
|
||||
COUNT(DISTINCT CONTAINERID) AS LOT_COUNT
|
||||
FROM DWH.DW_MES_LOTMATERIALSHISTORY
|
||||
WHERE TXNDATE >= TO_DATE(:start_date, 'YYYY-MM-DD')
|
||||
AND TXNDATE < TO_DATE(:end_date, 'YYYY-MM-DD') + 1
|
||||
AND {{ EQUIPMENT_FILTER }}
|
||||
GROUP BY EQUIPMENTNAME, MATERIALPARTNAME
|
||||
ORDER BY EQUIPMENTNAME, TOTAL_CONSUMED DESC
|
||||
29
src/mes_dashboard/sql/query_tool/equipment_rejects.sql
Normal file
29
src/mes_dashboard/sql/query_tool/equipment_rejects.sql
Normal file
@@ -0,0 +1,29 @@
|
||||
-- Equipment Reject Statistics
|
||||
-- Aggregates reject statistics by equipment for a time period
|
||||
--
|
||||
-- Parameters:
|
||||
-- :start_date - Start date (YYYY-MM-DD)
|
||||
-- :end_date - End date (YYYY-MM-DD)
|
||||
--
|
||||
-- Dynamic placeholders:
|
||||
-- EQUIPMENT_FILTER - Equipment filter condition (on EQUIPMENTNAME)
|
||||
--
|
||||
-- Note: LOTREJECTHISTORY only has EQUIPMENTNAME, NO EQUIPMENTID
|
||||
-- If need to filter by EQUIPMENTID, must JOIN LOTWIPHISTORY
|
||||
-- Uses LOSSREASONNAME (NOT REJECTREASONNAME)
|
||||
-- Uses TXNDATE (NOT TXNDATETIME)
|
||||
-- DEFECTQTY = SUM of REJECTQTY + STANDBYQTY + QTYTOPROCESS + INPROCESSQTY + PROCESSEDQTY
|
||||
|
||||
SELECT
|
||||
EQUIPMENTNAME,
|
||||
LOSSREASONNAME,
|
||||
SUM(REJECTQTY) AS TOTAL_REJECT_QTY,
|
||||
SUM(NVL(REJECTQTY, 0) + NVL(STANDBYQTY, 0) + NVL(QTYTOPROCESS, 0)
|
||||
+ NVL(INPROCESSQTY, 0) + NVL(PROCESSEDQTY, 0)) AS TOTAL_DEFECT_QTY,
|
||||
COUNT(DISTINCT CONTAINERID) AS AFFECTED_LOT_COUNT
|
||||
FROM DWH.DW_MES_LOTREJECTHISTORY
|
||||
WHERE TXNDATE >= TO_DATE(:start_date, 'YYYY-MM-DD')
|
||||
AND TXNDATE < TO_DATE(:end_date, 'YYYY-MM-DD') + 1
|
||||
AND {{ EQUIPMENT_FILTER }}
|
||||
GROUP BY EQUIPMENTNAME, LOSSREASONNAME
|
||||
ORDER BY EQUIPMENTNAME, TOTAL_DEFECT_QTY DESC
|
||||
41
src/mes_dashboard/sql/query_tool/equipment_status_hours.sql
Normal file
41
src/mes_dashboard/sql/query_tool/equipment_status_hours.sql
Normal file
@@ -0,0 +1,41 @@
|
||||
-- Equipment Status Hours Query
|
||||
-- Aggregates status hours by equipment for a time period
|
||||
--
|
||||
-- Parameters:
|
||||
-- :start_date - Start date (YYYY-MM-DD)
|
||||
-- :end_date - End date (YYYY-MM-DD)
|
||||
--
|
||||
-- Dynamic placeholders:
|
||||
-- EQUIPMENT_FILTER - Equipment filter condition
|
||||
--
|
||||
-- Note: RESOURCESTATUS_SHIFT requires JOIN with RESOURCE to get RESOURCENAME
|
||||
-- Uses HISTORYID = RESOURCEID relationship
|
||||
-- Uses OLDSTATUSNAME/NEWSTATUSNAME (NOT STATUSNAME)
|
||||
-- Uses TXNDATE (NOT SHIFTDATE)
|
||||
-- Hours field: HOURS
|
||||
|
||||
SELECT
|
||||
r.RESOURCEID,
|
||||
r.RESOURCENAME,
|
||||
SUM(CASE WHEN s.NEWSTATUSNAME = 'PRD' THEN s.HOURS ELSE 0 END) AS PRD_HOURS,
|
||||
SUM(CASE WHEN s.NEWSTATUSNAME = 'SBY' THEN s.HOURS ELSE 0 END) AS SBY_HOURS,
|
||||
SUM(CASE WHEN s.NEWSTATUSNAME = 'UDT' THEN s.HOURS ELSE 0 END) AS UDT_HOURS,
|
||||
SUM(CASE WHEN s.NEWSTATUSNAME = 'SDT' THEN s.HOURS ELSE 0 END) AS SDT_HOURS,
|
||||
SUM(CASE WHEN s.NEWSTATUSNAME = 'EGT' THEN s.HOURS ELSE 0 END) AS EGT_HOURS,
|
||||
SUM(CASE WHEN s.NEWSTATUSNAME = 'NST' THEN s.HOURS ELSE 0 END) AS NST_HOURS,
|
||||
SUM(s.HOURS) AS TOTAL_HOURS,
|
||||
ROUND(
|
||||
SUM(CASE WHEN s.NEWSTATUSNAME = 'PRD' THEN s.HOURS ELSE 0 END) * 100.0 /
|
||||
NULLIF(
|
||||
SUM(CASE WHEN s.NEWSTATUSNAME IN ('PRD', 'SBY', 'UDT') THEN s.HOURS ELSE 0 END),
|
||||
0
|
||||
),
|
||||
2
|
||||
) AS OU_PERCENT
|
||||
FROM DWH.DW_MES_RESOURCESTATUS_SHIFT s
|
||||
JOIN DWH.DW_MES_RESOURCE r ON s.HISTORYID = r.RESOURCEID
|
||||
WHERE s.TXNDATE >= TO_DATE(:start_date, 'YYYY-MM-DD')
|
||||
AND s.TXNDATE < TO_DATE(:end_date, 'YYYY-MM-DD') + 1
|
||||
AND {{ EQUIPMENT_FILTER }}
|
||||
GROUP BY r.RESOURCEID, r.RESOURCENAME
|
||||
ORDER BY r.RESOURCENAME
|
||||
65
src/mes_dashboard/sql/query_tool/lot_history.sql
Normal file
65
src/mes_dashboard/sql/query_tool/lot_history.sql
Normal file
@@ -0,0 +1,65 @@
|
||||
-- LOT Production History Query
|
||||
-- Retrieves complete production history for a LOT
|
||||
--
|
||||
-- Parameters:
|
||||
-- :container_id - CONTAINERID to query (16-char hex)
|
||||
-- {{ WORKCENTER_FILTER }} - Optional workcenter name filter (replaced by service)
|
||||
--
|
||||
-- Output columns:
|
||||
-- PJ_TYPE - Product type (from DW_MES_CONTAINER)
|
||||
-- PJ_BOP - BOP code (from DW_MES_CONTAINER)
|
||||
-- WAFER_LOT_ID - Wafer lot ID, mapped from FIRSTNAME (from DW_MES_CONTAINER)
|
||||
--
|
||||
-- Note: Uses EQUIPMENTID/EQUIPMENTNAME (NOT RESOURCEID/RESOURCENAME)
|
||||
-- Time fields: TRACKINTIMESTAMP/TRACKOUTTIMESTAMP (NOT TXNDATETIME)
|
||||
-- Partial track-out: Same LOT may have multiple records with same track-in
|
||||
-- but different track-out times. We take the latest track-out time.
|
||||
-- Only includes records with actual equipment (excludes checkpoint stations)
|
||||
|
||||
WITH ranked_history AS (
|
||||
SELECT
|
||||
h.CONTAINERID,
|
||||
h.WORKCENTERNAME,
|
||||
h.EQUIPMENTID,
|
||||
h.EQUIPMENTNAME,
|
||||
h.SPECNAME,
|
||||
h.TRACKINTIMESTAMP,
|
||||
h.TRACKOUTTIMESTAMP,
|
||||
h.TRACKINQTY,
|
||||
h.TRACKOUTQTY,
|
||||
h.FINISHEDRUNCARD,
|
||||
h.PJ_WORKORDER,
|
||||
c.CONTAINERNAME,
|
||||
c.PJ_TYPE,
|
||||
c.PJ_BOP,
|
||||
c.FIRSTNAME AS WAFER_LOT_ID,
|
||||
ROW_NUMBER() OVER (
|
||||
PARTITION BY h.CONTAINERID, h.EQUIPMENTID, h.SPECNAME, h.TRACKINTIMESTAMP
|
||||
ORDER BY h.TRACKOUTTIMESTAMP DESC NULLS LAST
|
||||
) AS rn
|
||||
FROM DWH.DW_MES_LOTWIPHISTORY h
|
||||
LEFT JOIN DWH.DW_MES_CONTAINER c ON h.CONTAINERID = c.CONTAINERID
|
||||
WHERE h.CONTAINERID = :container_id
|
||||
AND h.EQUIPMENTID IS NOT NULL
|
||||
AND h.TRACKINTIMESTAMP IS NOT NULL
|
||||
{{ WORKCENTER_FILTER }}
|
||||
)
|
||||
SELECT
|
||||
CONTAINERID,
|
||||
WORKCENTERNAME,
|
||||
EQUIPMENTID,
|
||||
EQUIPMENTNAME,
|
||||
SPECNAME,
|
||||
TRACKINTIMESTAMP,
|
||||
TRACKOUTTIMESTAMP,
|
||||
TRACKINQTY,
|
||||
TRACKOUTQTY,
|
||||
FINISHEDRUNCARD,
|
||||
PJ_WORKORDER,
|
||||
CONTAINERNAME,
|
||||
PJ_TYPE,
|
||||
PJ_BOP,
|
||||
WAFER_LOT_ID
|
||||
FROM ranked_history
|
||||
WHERE rn = 1
|
||||
ORDER BY TRACKINTIMESTAMP
|
||||
34
src/mes_dashboard/sql/query_tool/lot_holds.sql
Normal file
34
src/mes_dashboard/sql/query_tool/lot_holds.sql
Normal file
@@ -0,0 +1,34 @@
|
||||
-- LOT HOLD Records Query
|
||||
-- Retrieves HOLD/RELEASE history for a LOT
|
||||
--
|
||||
-- Parameters:
|
||||
-- :container_id - CONTAINERID to query (16-char hex)
|
||||
--
|
||||
-- Note: Uses HOLDTXNDATE/RELEASETXNDATE (NOT TXNDATETIME)
|
||||
-- NULL RELEASETXNDATE means currently HOLD
|
||||
|
||||
SELECT
|
||||
CONTAINERID,
|
||||
WORKCENTERNAME,
|
||||
HOLDTXNDATE,
|
||||
HOLDEMP,
|
||||
HOLDEMPDEPTNAME,
|
||||
HOLDREASONNAME,
|
||||
HOLDCOMMENTS,
|
||||
RELEASETXNDATE,
|
||||
RELEASEEMP,
|
||||
RELEASECOMMENTS,
|
||||
NCRID,
|
||||
CASE
|
||||
WHEN RELEASETXNDATE IS NULL THEN 'HOLD'
|
||||
ELSE 'RELEASED'
|
||||
END AS HOLD_STATUS,
|
||||
CASE
|
||||
WHEN RELEASETXNDATE IS NULL THEN
|
||||
ROUND((SYSDATE - HOLDTXNDATE) * 24, 2)
|
||||
ELSE
|
||||
ROUND((RELEASETXNDATE - HOLDTXNDATE) * 24, 2)
|
||||
END AS HOLD_HOURS
|
||||
FROM DWH.DW_MES_HOLDRELEASEHISTORY
|
||||
WHERE CONTAINERID = :container_id
|
||||
ORDER BY HOLDTXNDATE DESC
|
||||
35
src/mes_dashboard/sql/query_tool/lot_jobs.sql
Normal file
35
src/mes_dashboard/sql/query_tool/lot_jobs.sql
Normal file
@@ -0,0 +1,35 @@
|
||||
-- LOT Related JOB Records Query
|
||||
-- Retrieves JOB records for equipment during LOT processing
|
||||
--
|
||||
-- Parameters:
|
||||
-- :equipment_id - Equipment ID (EQUIPMENTID = RESOURCEID in same ID system)
|
||||
-- :time_start - Start time of LOT processing
|
||||
-- :time_end - End time of LOT processing
|
||||
--
|
||||
-- Note: DW_MES_JOB uses RESOURCEID/RESOURCENAME
|
||||
-- LOTWIPHISTORY uses EQUIPMENTID/EQUIPMENTNAME
|
||||
-- EQUIPMENTID = RESOURCEID (same ID system, can JOIN directly)
|
||||
-- CONTAINERIDS/CONTAINERNAMES are comma-separated strings
|
||||
|
||||
SELECT
|
||||
j.JOBID,
|
||||
j.RESOURCEID,
|
||||
j.RESOURCENAME,
|
||||
j.JOBSTATUS,
|
||||
j.JOBMODELNAME,
|
||||
j.JOBORDERNAME,
|
||||
j.CREATEDATE,
|
||||
j.COMPLETEDATE,
|
||||
j.CAUSECODENAME,
|
||||
j.REPAIRCODENAME,
|
||||
j.SYMPTOMCODENAME,
|
||||
j.CONTAINERIDS,
|
||||
j.CONTAINERNAMES
|
||||
FROM DWH.DW_MES_JOB j
|
||||
WHERE j.RESOURCEID = :equipment_id
|
||||
AND (
|
||||
(j.CREATEDATE BETWEEN :time_start AND :time_end)
|
||||
OR (j.COMPLETEDATE BETWEEN :time_start AND :time_end)
|
||||
OR (j.CREATEDATE <= :time_start AND (j.COMPLETEDATE IS NULL OR j.COMPLETEDATE >= :time_end))
|
||||
)
|
||||
ORDER BY j.CREATEDATE
|
||||
21
src/mes_dashboard/sql/query_tool/lot_materials.sql
Normal file
21
src/mes_dashboard/sql/query_tool/lot_materials.sql
Normal file
@@ -0,0 +1,21 @@
|
||||
-- LOT Materials Consumption Query
|
||||
-- Retrieves material consumption records for a LOT
|
||||
--
|
||||
-- Parameters:
|
||||
-- :container_id - CONTAINERID to query (16-char hex)
|
||||
--
|
||||
-- Note: Uses MATERIALPARTNAME (NOT MATERIALNAME)
|
||||
-- Uses QTYCONSUMED (NOT CONSUMEQTY)
|
||||
-- Uses TXNDATE (NOT TXNDATETIME)
|
||||
|
||||
SELECT
|
||||
CONTAINERID,
|
||||
MATERIALPARTNAME,
|
||||
MATERIALLOTNAME,
|
||||
QTYCONSUMED,
|
||||
WORKCENTERNAME,
|
||||
EQUIPMENTNAME,
|
||||
TXNDATE
|
||||
FROM DWH.DW_MES_LOTMATERIALSHISTORY
|
||||
WHERE CONTAINERID = :container_id
|
||||
ORDER BY TXNDATE
|
||||
26
src/mes_dashboard/sql/query_tool/lot_rejects.sql
Normal file
26
src/mes_dashboard/sql/query_tool/lot_rejects.sql
Normal file
@@ -0,0 +1,26 @@
|
||||
-- LOT Reject Records Query
|
||||
-- Retrieves reject (defect) records for a LOT
|
||||
--
|
||||
-- Parameters:
|
||||
-- :container_id - CONTAINERID to query (16-char hex)
|
||||
--
|
||||
-- Note: Uses LOSSREASONNAME (NOT REJECTREASONNAME)
|
||||
-- Uses TXNDATE (NOT TXNDATETIME)
|
||||
-- Only has EQUIPMENTNAME, NO EQUIPMENTID field
|
||||
-- DEFECTQTY = SUM of REJECTQTY + STANDBYQTY + QTYTOPROCESS + INPROCESSQTY + PROCESSEDQTY
|
||||
|
||||
SELECT
|
||||
CONTAINERID,
|
||||
LOSSREASONNAME,
|
||||
REJECTQTY,
|
||||
NVL(REJECTQTY, 0) + NVL(STANDBYQTY, 0) + NVL(QTYTOPROCESS, 0)
|
||||
+ NVL(INPROCESSQTY, 0) + NVL(PROCESSEDQTY, 0) AS DEFECTQTY,
|
||||
WORKCENTERNAME,
|
||||
EQUIPMENTNAME,
|
||||
TXNDATE,
|
||||
COMMENTS,
|
||||
REJECTCAUSE,
|
||||
REJECTCOMMENT
|
||||
FROM DWH.DW_MES_LOTREJECTHISTORY
|
||||
WHERE CONTAINERID = :container_id
|
||||
ORDER BY TXNDATE
|
||||
17
src/mes_dashboard/sql/query_tool/lot_resolve_id.sql
Normal file
17
src/mes_dashboard/sql/query_tool/lot_resolve_id.sql
Normal file
@@ -0,0 +1,17 @@
|
||||
-- LOT ID to CONTAINERID Resolution
|
||||
-- Converts user-input LOT ID (CONTAINERNAME) to internal CONTAINERID
|
||||
--
|
||||
-- Parameters:
|
||||
-- :container_names - List of CONTAINERNAME values (bind variable list)
|
||||
--
|
||||
-- Note: CONTAINERID is 16-char hex (e.g., '48810380001cba48')
|
||||
-- CONTAINERNAME is user-visible LOT ID (e.g., 'GA23100020-A00-011')
|
||||
|
||||
SELECT
|
||||
CONTAINERID,
|
||||
CONTAINERNAME,
|
||||
MFGORDERNAME,
|
||||
SPECNAME,
|
||||
QTY
|
||||
FROM DWH.DW_MES_CONTAINER
|
||||
WHERE CONTAINERNAME IN ({{ CONTAINER_NAMES }})
|
||||
13
src/mes_dashboard/sql/query_tool/lot_resolve_sn.sql
Normal file
13
src/mes_dashboard/sql/query_tool/lot_resolve_sn.sql
Normal file
@@ -0,0 +1,13 @@
|
||||
-- Serial Number (流水號) to CONTAINERID Resolution
|
||||
-- Converts finished product serial number to CONTAINERID list
|
||||
--
|
||||
-- Parameters:
|
||||
-- :finished_names - List of FINISHEDNAME values (bind variable list)
|
||||
--
|
||||
-- Note: One FINISHEDNAME may correspond to multiple CONTAINERIDs (2-5 typical)
|
||||
|
||||
SELECT DISTINCT
|
||||
CONTAINERID,
|
||||
FINISHEDNAME
|
||||
FROM DWH.DW_MES_PJ_COMBINEDASSYLOTS
|
||||
WHERE FINISHEDNAME IN ({{ FINISHED_NAMES }})
|
||||
14
src/mes_dashboard/sql/query_tool/lot_resolve_wo.sql
Normal file
14
src/mes_dashboard/sql/query_tool/lot_resolve_wo.sql
Normal file
@@ -0,0 +1,14 @@
|
||||
-- GA Work Order to CONTAINERID Resolution
|
||||
-- Expands work order to all associated CONTAINERIDs
|
||||
--
|
||||
-- Parameters:
|
||||
-- :work_orders - List of PJ_WORKORDER values (bind variable list)
|
||||
--
|
||||
-- Note: One work order may expand to many CONTAINERIDs (can be 100+)
|
||||
-- Using LOTWIPHISTORY because PJ_WORKORDER has 100% completeness there
|
||||
|
||||
SELECT DISTINCT
|
||||
CONTAINERID,
|
||||
PJ_WORKORDER
|
||||
FROM DWH.DW_MES_LOTWIPHISTORY
|
||||
WHERE PJ_WORKORDER IN ({{ WORK_ORDERS }})
|
||||
26
src/mes_dashboard/sql/query_tool/lot_split_merge_history.sql
Normal file
26
src/mes_dashboard/sql/query_tool/lot_split_merge_history.sql
Normal file
@@ -0,0 +1,26 @@
|
||||
-- LOT Split/Merge History Query (拆併批歷史紀錄)
|
||||
-- Query by CONTAINERID list from same work order
|
||||
-- Check both TARGET (CONTAINERID) and SOURCE (FROMCONTAINERID) to find all related records
|
||||
|
||||
WITH work_order_lots AS (
|
||||
SELECT CONTAINERID
|
||||
FROM DWH.DW_MES_CONTAINER
|
||||
WHERE MFGORDERNAME = :work_order
|
||||
)
|
||||
SELECT
|
||||
h.HISTORYMAINLINEID,
|
||||
h.CDONAME AS OPERATION_TYPE,
|
||||
h.CONTAINERID AS TARGET_CONTAINERID,
|
||||
h.CONTAINERNAME AS TARGET_LOT,
|
||||
h.FROMCONTAINERID AS SOURCE_CONTAINERID,
|
||||
h.FROMCONTAINERNAME AS SOURCE_LOT,
|
||||
h.QTY AS TARGET_QTY,
|
||||
h.TXNDATE
|
||||
FROM DWH.DW_MES_HM_LOTMOVEOUT h
|
||||
WHERE (
|
||||
h.CONTAINERID IN (SELECT CONTAINERID FROM work_order_lots)
|
||||
OR h.FROMCONTAINERID IN (SELECT CONTAINERID FROM work_order_lots)
|
||||
)
|
||||
AND h.FROMCONTAINERID IS NOT NULL
|
||||
ORDER BY h.TXNDATE
|
||||
FETCH FIRST 100 ROWS ONLY
|
||||
30
src/mes_dashboard/sql/query_tool/lot_splits.sql
Normal file
30
src/mes_dashboard/sql/query_tool/lot_splits.sql
Normal file
@@ -0,0 +1,30 @@
|
||||
-- LOT Split/Merge Records (拆併批紀錄)
|
||||
-- Shows what serial numbers were produced from this LOT
|
||||
-- and what other LOTs were combined together
|
||||
--
|
||||
-- Parameters:
|
||||
-- :container_id - Target CONTAINERID (16-char hex)
|
||||
--
|
||||
-- Returns:
|
||||
-- - FINISHEDNAME: Serial number produced
|
||||
-- - Related LOTs that were combined to create each serial number
|
||||
-- - PJ_COMBINEDRATIO: Contribution ratio (1.0 = 100%)
|
||||
-- - PJ_GOODDIEQTY: Good die quantity contributed
|
||||
|
||||
SELECT
|
||||
p.FINISHEDNAME,
|
||||
p.CONTAINERID,
|
||||
p.CONTAINERNAME AS LOT_ID,
|
||||
p.PJ_WORKORDER,
|
||||
p.PJ_COMBINEDRATIO,
|
||||
p.PJ_GOODDIEQTY,
|
||||
p.PJ_ORIGINALGOODDIEQTY,
|
||||
p.ORIGINALSTARTDATE
|
||||
FROM DWH.DW_MES_PJ_COMBINEDASSYLOTS p
|
||||
WHERE p.FINISHEDNAME IN (
|
||||
-- Find all serial numbers that this LOT contributed to
|
||||
SELECT DISTINCT FINISHEDNAME
|
||||
FROM DWH.DW_MES_PJ_COMBINEDASSYLOTS
|
||||
WHERE CONTAINERID = :container_id
|
||||
)
|
||||
ORDER BY p.FINISHEDNAME, p.ORIGINALSTARTDATE, p.CONTAINERNAME
|
||||
116
src/mes_dashboard/sql/tmtt_defect/base_data.sql
Normal file
116
src/mes_dashboard/sql/tmtt_defect/base_data.sql
Normal file
@@ -0,0 +1,116 @@
|
||||
-- TMTT Defect Analysis - Base Data Query
|
||||
-- Returns LOT-level data with TMTT input, defects (印字/腳型), and MOLD equipment
|
||||
--
|
||||
-- Parameters:
|
||||
-- :start_date - Start date (YYYY-MM-DD)
|
||||
-- :end_date - End date (YYYY-MM-DD)
|
||||
--
|
||||
-- Tables used:
|
||||
-- DWH.DW_MES_LOTWIPHISTORY (TMTT station records, MOLD station records)
|
||||
-- DWH.DW_MES_LOTREJECTHISTORY (defect records)
|
||||
-- DWH.DW_MES_CONTAINER (product info)
|
||||
-- DWH.DW_MES_WIP (WORKFLOWNAME, filtered by PRODUCTLINENAME <> '點測')
|
||||
--
|
||||
-- Notes:
|
||||
-- - LOSSREASONNAME: '276_腳型不良', '277_印字不良'
|
||||
-- - TMTT station: WORKCENTERNAME matching 'TMTT' or '測試'
|
||||
-- - MOLD station: WORKCENTERNAME matching '成型'
|
||||
-- - Multiple MOLD equipment per LOT: take earliest TRACKINTIMESTAMP
|
||||
-- - TMTT dedup: one row per CONTAINERID, take latest TRACKINTIMESTAMP
|
||||
-- - LOTREJECTHISTORY only has EQUIPMENTNAME (no EQUIPMENTID)
|
||||
-- - WORKFLOW: from DW_MES_WIP.WORKFLOWNAME (exclude PRODUCTLINENAME='點測')
|
||||
-- - Defect qty = SUM(REJECTQTY + STANDBYQTY + QTYTOPROCESS + INPROCESSQTY + PROCESSEDQTY)
|
||||
|
||||
WITH tmtt_records AS (
|
||||
SELECT /*+ MATERIALIZE */
|
||||
h.CONTAINERID,
|
||||
h.EQUIPMENTID AS TMTT_EQUIPMENTID,
|
||||
h.EQUIPMENTNAME AS TMTT_EQUIPMENTNAME,
|
||||
h.TRACKINQTY,
|
||||
h.TRACKINTIMESTAMP,
|
||||
h.TRACKOUTTIMESTAMP,
|
||||
h.FINISHEDRUNCARD,
|
||||
h.SPECNAME,
|
||||
h.WORKCENTERNAME,
|
||||
ROW_NUMBER() OVER (
|
||||
PARTITION BY h.CONTAINERID
|
||||
ORDER BY h.TRACKINTIMESTAMP DESC, h.TRACKOUTTIMESTAMP DESC NULLS LAST
|
||||
) AS rn
|
||||
FROM DWH.DW_MES_LOTWIPHISTORY h
|
||||
WHERE h.TRACKINTIMESTAMP >= TO_DATE(:start_date, 'YYYY-MM-DD')
|
||||
AND h.TRACKINTIMESTAMP < TO_DATE(:end_date, 'YYYY-MM-DD') + 1
|
||||
AND (UPPER(h.WORKCENTERNAME) LIKE '%TMTT%' OR h.WORKCENTERNAME LIKE '%測試%')
|
||||
AND h.EQUIPMENTID IS NOT NULL
|
||||
AND h.TRACKINTIMESTAMP IS NOT NULL
|
||||
),
|
||||
tmtt_deduped AS (
|
||||
SELECT * FROM tmtt_records WHERE rn = 1
|
||||
),
|
||||
tmtt_rejects AS (
|
||||
SELECT /*+ MATERIALIZE */
|
||||
r.CONTAINERID,
|
||||
r.LOSSREASONNAME,
|
||||
SUM(NVL(r.REJECTQTY, 0) + NVL(r.STANDBYQTY, 0) + NVL(r.QTYTOPROCESS, 0)
|
||||
+ NVL(r.INPROCESSQTY, 0) + NVL(r.PROCESSEDQTY, 0)) AS REJECTQTY
|
||||
FROM DWH.DW_MES_LOTREJECTHISTORY r
|
||||
WHERE r.TXNDATE >= TO_DATE(:start_date, 'YYYY-MM-DD')
|
||||
AND r.TXNDATE < TO_DATE(:end_date, 'YYYY-MM-DD') + 1
|
||||
AND (UPPER(r.WORKCENTERNAME) LIKE '%TMTT%' OR r.WORKCENTERNAME LIKE '%測試%')
|
||||
AND r.LOSSREASONNAME IN ('276_腳型不良', '277_印字不良')
|
||||
GROUP BY r.CONTAINERID, r.LOSSREASONNAME
|
||||
),
|
||||
mold_records AS (
|
||||
SELECT /*+ MATERIALIZE */
|
||||
m.CONTAINERID,
|
||||
m.EQUIPMENTID AS MOLD_EQUIPMENTID,
|
||||
m.EQUIPMENTNAME AS MOLD_EQUIPMENTNAME,
|
||||
ROW_NUMBER() OVER (
|
||||
PARTITION BY m.CONTAINERID
|
||||
ORDER BY m.TRACKINTIMESTAMP ASC
|
||||
) AS mold_rn
|
||||
FROM DWH.DW_MES_LOTWIPHISTORY m
|
||||
WHERE m.CONTAINERID IN (SELECT CONTAINERID FROM tmtt_deduped)
|
||||
AND (m.WORKCENTERNAME LIKE '%成型%')
|
||||
AND m.EQUIPMENTID IS NOT NULL
|
||||
),
|
||||
mold_deduped AS (
|
||||
SELECT * FROM mold_records WHERE mold_rn = 1
|
||||
),
|
||||
product_info AS (
|
||||
SELECT /*+ MATERIALIZE */
|
||||
c.CONTAINERID,
|
||||
c.CONTAINERNAME,
|
||||
c.PJ_TYPE,
|
||||
c.PRODUCTLINENAME
|
||||
FROM DWH.DW_MES_CONTAINER c
|
||||
WHERE c.CONTAINERID IN (SELECT CONTAINERID FROM tmtt_deduped)
|
||||
),
|
||||
workflow_info AS (
|
||||
SELECT /*+ MATERIALIZE */
|
||||
DISTINCT w.CONTAINERID,
|
||||
w.WORKFLOWNAME
|
||||
FROM DWH.DW_MES_WIP w
|
||||
WHERE w.CONTAINERID IN (SELECT CONTAINERID FROM tmtt_deduped)
|
||||
AND w.PRODUCTLINENAME <> '點測'
|
||||
)
|
||||
SELECT
|
||||
t.CONTAINERID,
|
||||
p.CONTAINERNAME,
|
||||
p.PJ_TYPE,
|
||||
p.PRODUCTLINENAME,
|
||||
NVL(wf.WORKFLOWNAME, t.SPECNAME) AS WORKFLOW,
|
||||
t.FINISHEDRUNCARD,
|
||||
t.TMTT_EQUIPMENTID,
|
||||
t.TMTT_EQUIPMENTNAME,
|
||||
t.TRACKINQTY,
|
||||
t.TRACKINTIMESTAMP,
|
||||
m.MOLD_EQUIPMENTID,
|
||||
m.MOLD_EQUIPMENTNAME,
|
||||
r.LOSSREASONNAME,
|
||||
NVL(r.REJECTQTY, 0) AS REJECTQTY
|
||||
FROM tmtt_deduped t
|
||||
LEFT JOIN product_info p ON t.CONTAINERID = p.CONTAINERID
|
||||
LEFT JOIN workflow_info wf ON t.CONTAINERID = wf.CONTAINERID
|
||||
LEFT JOIN mold_deduped m ON t.CONTAINERID = m.CONTAINERID
|
||||
LEFT JOIN tmtt_rejects r ON t.CONTAINERID = r.CONTAINERID
|
||||
ORDER BY t.TRACKINTIMESTAMP
|
||||
12
src/mes_dashboard/static/favicon.svg
Normal file
12
src/mes_dashboard/static/favicon.svg
Normal file
@@ -0,0 +1,12 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64">
|
||||
<defs>
|
||||
<linearGradient id="g" x1="0" y1="0" x2="1" y2="1">
|
||||
<stop offset="0%" stop-color="#1d4ed8" />
|
||||
<stop offset="100%" stop-color="#0f766e" />
|
||||
</linearGradient>
|
||||
</defs>
|
||||
<rect width="64" height="64" rx="12" fill="url(#g)" />
|
||||
<path d="M16 20h12c10 0 14 5 14 12s-4 12-14 12H16V20zm10 18c6 0 8-2.1 8-6s-2-6-8-6h-4v12h4z"
|
||||
fill="#fff" />
|
||||
<rect x="44" y="20" width="4" height="24" rx="2" fill="#fff" />
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 500 B |
20
src/mes_dashboard/static/js/chart.umd.min.js
vendored
Normal file
20
src/mes_dashboard/static/js/chart.umd.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
3056
src/mes_dashboard/static/js/query-tool.js
Normal file
3056
src/mes_dashboard/static/js/query-tool.js
Normal file
File diff suppressed because it is too large
Load Diff
@@ -4,6 +4,7 @@
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta name="csrf-token" content="{{ csrf_token() }}">
|
||||
<link rel="icon" type="image/svg+xml" href="{{ url_for('static', filename='favicon.svg') }}">
|
||||
<title>{% block title %}MES Dashboard{% endblock %}</title>
|
||||
|
||||
<!-- Toast 樣式 -->
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
{% block title %}效能監控 - MES Dashboard{% endblock %}
|
||||
|
||||
{% block head_extra %}
|
||||
<script src="https://cdn.jsdelivr.net/npm/chart.js@4.4.1/dist/chart.umd.min.js"></script>
|
||||
<script src="{{ url_for('static', filename='js/chart.umd.min.js') }}"></script>
|
||||
<style>
|
||||
* {
|
||||
margin: 0;
|
||||
|
||||
@@ -391,6 +391,12 @@
|
||||
{% if can_view_page('/job-query') %}
|
||||
<button class="tab" data-target="jobQueryFrame">設備維修查詢</button>
|
||||
{% endif %}
|
||||
{% if can_view_page('/query-tool') %}
|
||||
<button class="tab" data-target="queryToolFrame">批次追蹤工具</button>
|
||||
{% endif %}
|
||||
{% if can_view_page('/tmtt-defect') %}
|
||||
<button class="tab" data-target="tmttDefectFrame">TMTT不良分析</button>
|
||||
{% endif %}
|
||||
</div>
|
||||
</details>
|
||||
|
||||
@@ -427,6 +433,12 @@
|
||||
{% if can_view_page('/job-query') %}
|
||||
<iframe id="jobQueryFrame" data-src="/job-query" title="設備維修查詢"></iframe>
|
||||
{% endif %}
|
||||
{% if can_view_page('/query-tool') %}
|
||||
<iframe id="queryToolFrame" data-src="/query-tool" title="批次追蹤工具"></iframe>
|
||||
{% endif %}
|
||||
{% if can_view_page('/tmtt-defect') %}
|
||||
<iframe id="tmttDefectFrame" data-src="/tmtt-defect" title="TMTT不良分析"></iframe>
|
||||
{% endif %}
|
||||
{% if is_admin %}
|
||||
<iframe id="toolFrame" title="開發工具"></iframe>
|
||||
{% endif %}
|
||||
|
||||
1267
src/mes_dashboard/templates/query_tool.html
Normal file
1267
src/mes_dashboard/templates/query_tool.html
Normal file
File diff suppressed because it is too large
Load Diff
271
src/mes_dashboard/templates/tmtt_defect.html
Normal file
271
src/mes_dashboard/templates/tmtt_defect.html
Normal file
@@ -0,0 +1,271 @@
|
||||
{% extends "_base.html" %}
|
||||
|
||||
{% block title %}TMTT 印字腳型不良分析{% endblock %}
|
||||
|
||||
{% block head_extra %}
|
||||
<script src="{{ url_for('static', filename='js/echarts.min.js') }}"></script>
|
||||
<style>
|
||||
:root {
|
||||
--bg: #f0f2f5;
|
||||
--card-bg: #ffffff;
|
||||
--primary: #667eea;
|
||||
--primary-light: #818cf8;
|
||||
--success: #22c55e;
|
||||
--danger: #ef4444;
|
||||
--warning: #f59e0b;
|
||||
--text: #1f2937;
|
||||
--text-secondary: #6b7280;
|
||||
--border: #e5e7eb;
|
||||
--print-color: #ef4444;
|
||||
--lead-color: #f59e0b;
|
||||
}
|
||||
|
||||
* { margin: 0; padding: 0; box-sizing: border-box; }
|
||||
body { background: var(--bg); font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif; color: var(--text); }
|
||||
|
||||
.page-header {
|
||||
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
||||
color: white; padding: 20px 32px;
|
||||
}
|
||||
.page-header h1 { font-size: 22px; font-weight: 600; }
|
||||
.page-header .subtitle { font-size: 13px; opacity: 0.8; margin-top: 4px; }
|
||||
|
||||
.container { max-width: 1600px; margin: 0 auto; padding: 20px 24px; }
|
||||
|
||||
/* Filter Bar */
|
||||
.filter-bar {
|
||||
background: var(--card-bg); border-radius: 10px; padding: 16px 20px;
|
||||
display: flex; align-items: center; gap: 16px; margin-bottom: 20px;
|
||||
box-shadow: 0 1px 3px rgba(0,0,0,0.08);
|
||||
}
|
||||
.filter-bar label { font-size: 13px; color: var(--text-secondary); font-weight: 500; }
|
||||
.filter-bar input[type="date"] {
|
||||
padding: 6px 10px; border: 1px solid var(--border); border-radius: 6px;
|
||||
font-size: 13px; color: var(--text);
|
||||
}
|
||||
.btn-query {
|
||||
padding: 8px 24px; background: var(--primary); color: white; border: none;
|
||||
border-radius: 6px; font-size: 13px; font-weight: 600; cursor: pointer;
|
||||
transition: background 0.2s;
|
||||
}
|
||||
.btn-query:hover { background: var(--primary-light); }
|
||||
.btn-query:disabled { opacity: 0.5; cursor: not-allowed; }
|
||||
|
||||
/* KPI Cards */
|
||||
.kpi-row {
|
||||
display: grid; grid-template-columns: repeat(6, 1fr); gap: 12px; margin-bottom: 20px;
|
||||
}
|
||||
.kpi-card {
|
||||
background: var(--card-bg); border-radius: 10px; padding: 16px;
|
||||
box-shadow: 0 1px 3px rgba(0,0,0,0.08); border-left: 4px solid var(--primary);
|
||||
}
|
||||
.kpi-card.print { border-left-color: var(--print-color); }
|
||||
.kpi-card.lead { border-left-color: var(--lead-color); }
|
||||
.kpi-card .kpi-label { font-size: 12px; color: var(--text-secondary); margin-bottom: 6px; }
|
||||
.kpi-card .kpi-value { font-size: 22px; font-weight: 700; }
|
||||
.kpi-card .kpi-unit { font-size: 12px; color: var(--text-secondary); margin-left: 4px; }
|
||||
|
||||
/* Chart Grid */
|
||||
.chart-grid {
|
||||
display: grid; grid-template-columns: 1fr; gap: 16px; margin-bottom: 20px;
|
||||
}
|
||||
.chart-card {
|
||||
background: var(--card-bg); border-radius: 10px; padding: 16px;
|
||||
box-shadow: 0 1px 3px rgba(0,0,0,0.08);
|
||||
}
|
||||
.chart-card h3 { font-size: 14px; font-weight: 600; margin-bottom: 12px; color: var(--text); }
|
||||
.chart-container { height: 380px; }
|
||||
|
||||
/* Detail Section */
|
||||
.detail-section {
|
||||
background: var(--card-bg); border-radius: 10px; padding: 16px;
|
||||
box-shadow: 0 1px 3px rgba(0,0,0,0.08);
|
||||
}
|
||||
.detail-header {
|
||||
display: flex; align-items: center; justify-content: space-between;
|
||||
margin-bottom: 12px;
|
||||
}
|
||||
.detail-header h3 { font-size: 14px; font-weight: 600; }
|
||||
.detail-actions { display: flex; gap: 8px; align-items: center; }
|
||||
|
||||
.filter-tag {
|
||||
display: inline-flex; align-items: center; gap: 4px;
|
||||
background: #eff6ff; color: #2563eb; padding: 4px 10px;
|
||||
border-radius: 16px; font-size: 12px;
|
||||
}
|
||||
.filter-tag button {
|
||||
background: none; border: none; color: #2563eb; cursor: pointer;
|
||||
font-size: 14px; line-height: 1;
|
||||
}
|
||||
|
||||
.btn-export {
|
||||
padding: 6px 14px; background: var(--success); color: white; border: none;
|
||||
border-radius: 6px; font-size: 12px; font-weight: 600; cursor: pointer;
|
||||
}
|
||||
.btn-export:hover { opacity: 0.9; }
|
||||
|
||||
.btn-clear {
|
||||
padding: 6px 14px; background: #f3f4f6; color: var(--text-secondary); border: none;
|
||||
border-radius: 6px; font-size: 12px; cursor: pointer;
|
||||
}
|
||||
.btn-clear:hover { background: #e5e7eb; }
|
||||
|
||||
/* Table */
|
||||
.detail-table-wrap { overflow-x: auto; max-height: 500px; overflow-y: auto; }
|
||||
.detail-table {
|
||||
width: 100%; border-collapse: collapse; font-size: 12px;
|
||||
}
|
||||
.detail-table th {
|
||||
background: #f8fafc; position: sticky; top: 0; z-index: 1;
|
||||
padding: 8px 10px; text-align: left; font-weight: 600;
|
||||
border-bottom: 2px solid var(--border); white-space: nowrap; cursor: pointer;
|
||||
}
|
||||
.detail-table th:hover { background: #f0f2f5; }
|
||||
.detail-table td {
|
||||
padding: 6px 10px; border-bottom: 1px solid #f3f4f6; white-space: nowrap;
|
||||
}
|
||||
.detail-table tr:hover td { background: #f8fafc; }
|
||||
|
||||
.sort-indicator { font-size: 10px; margin-left: 4px; color: var(--text-secondary); }
|
||||
|
||||
/* Empty state */
|
||||
.empty-state {
|
||||
text-align: center; padding: 60px 20px; color: var(--text-secondary);
|
||||
}
|
||||
.empty-state .icon { font-size: 48px; margin-bottom: 12px; }
|
||||
.empty-state p { font-size: 14px; }
|
||||
|
||||
/* Responsive */
|
||||
@media (max-width: 1200px) { .kpi-row { grid-template-columns: repeat(3, 1fr); } }
|
||||
@media (max-width: 768px) {
|
||||
.kpi-row { grid-template-columns: repeat(2, 1fr); }
|
||||
.filter-bar { flex-wrap: wrap; }
|
||||
}
|
||||
</style>
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="page-header">
|
||||
<h1>TMTT 印字與腳型不良分析</h1>
|
||||
<div class="subtitle">分析 TMTT 站印字/腳型不良率,按 WORKFLOW、PACKAGE、TYPE、TMTT機台、MOLD機台 維度</div>
|
||||
</div>
|
||||
|
||||
<div class="container">
|
||||
<!-- Filter Bar -->
|
||||
<div class="filter-bar">
|
||||
<label>起始日期</label>
|
||||
<input type="date" id="startDate">
|
||||
<label>結束日期</label>
|
||||
<input type="date" id="endDate">
|
||||
<button class="btn-query" id="btnQuery" onclick="executeQuery()">查詢</button>
|
||||
</div>
|
||||
|
||||
<!-- KPI Cards -->
|
||||
<div class="kpi-row" id="kpiRow" style="display:none;">
|
||||
<div class="kpi-card">
|
||||
<div class="kpi-label">投入數</div>
|
||||
<div class="kpi-value" id="kpiInput">-</div>
|
||||
</div>
|
||||
<div class="kpi-card">
|
||||
<div class="kpi-label">LOT 數</div>
|
||||
<div class="kpi-value" id="kpiLots">-</div>
|
||||
</div>
|
||||
<div class="kpi-card print">
|
||||
<div class="kpi-label">印字不良數</div>
|
||||
<div class="kpi-value" id="kpiPrintQty">-</div>
|
||||
</div>
|
||||
<div class="kpi-card print">
|
||||
<div class="kpi-label">印字不良率</div>
|
||||
<div class="kpi-value" id="kpiPrintRate">-<span class="kpi-unit">%</span></div>
|
||||
</div>
|
||||
<div class="kpi-card lead">
|
||||
<div class="kpi-label">腳型不良數</div>
|
||||
<div class="kpi-value" id="kpiLeadQty">-</div>
|
||||
</div>
|
||||
<div class="kpi-card lead">
|
||||
<div class="kpi-label">腳型不良率</div>
|
||||
<div class="kpi-value" id="kpiLeadRate">-<span class="kpi-unit">%</span></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Charts -->
|
||||
<div class="chart-grid" id="chartGrid" style="display:none;">
|
||||
<div class="chart-card">
|
||||
<h3>依 WORKFLOW</h3>
|
||||
<div class="chart-container" id="chartWorkflow"></div>
|
||||
</div>
|
||||
<div class="chart-card">
|
||||
<h3>依 PACKAGE</h3>
|
||||
<div class="chart-container" id="chartPackage"></div>
|
||||
</div>
|
||||
<div class="chart-card">
|
||||
<h3>依 TYPE</h3>
|
||||
<div class="chart-container" id="chartType"></div>
|
||||
</div>
|
||||
<div class="chart-card">
|
||||
<h3>依 TMTT 機台</h3>
|
||||
<div class="chart-container" id="chartTmtt"></div>
|
||||
</div>
|
||||
<div class="chart-card">
|
||||
<h3>依 MOLD 機台</h3>
|
||||
<div class="chart-container" id="chartMold"></div>
|
||||
</div>
|
||||
<div class="chart-card">
|
||||
<h3>每日印字不良率趨勢</h3>
|
||||
<div class="chart-container" id="chartPrintTrend"></div>
|
||||
</div>
|
||||
<div class="chart-card">
|
||||
<h3>每日腳型不良率趨勢</h3>
|
||||
<div class="chart-container" id="chartLeadTrend"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Detail Table -->
|
||||
<div class="detail-section" id="detailSection" style="display:none;">
|
||||
<div class="detail-header">
|
||||
<h3>明細清單 <span id="detailCount" style="font-weight:400; color:var(--text-secondary);"></span></h3>
|
||||
<div class="detail-actions">
|
||||
<span id="filterTag" style="display:none;" class="filter-tag">
|
||||
<span id="filterLabel"></span>
|
||||
<button onclick="clearFilter()">×</button>
|
||||
</span>
|
||||
<button class="btn-clear" onclick="clearFilter()" id="btnClear" style="display:none;">清除篩選</button>
|
||||
<button class="btn-export" onclick="exportCsv()">匯出 CSV</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="detail-table-wrap">
|
||||
<table class="detail-table" id="detailTable">
|
||||
<thead>
|
||||
<tr>
|
||||
<th onclick="sortTable('CONTAINERNAME')">LOT ID <span class="sort-indicator" id="sort_CONTAINERNAME"></span></th>
|
||||
<th onclick="sortTable('PJ_TYPE')">TYPE <span class="sort-indicator" id="sort_PJ_TYPE"></span></th>
|
||||
<th onclick="sortTable('PRODUCTLINENAME')">PACKAGE <span class="sort-indicator" id="sort_PRODUCTLINENAME"></span></th>
|
||||
<th onclick="sortTable('WORKFLOW')">WORKFLOW <span class="sort-indicator" id="sort_WORKFLOW"></span></th>
|
||||
<th onclick="sortTable('FINISHEDRUNCARD')">完工流水碼 <span class="sort-indicator" id="sort_FINISHEDRUNCARD"></span></th>
|
||||
<th onclick="sortTable('TMTT_EQUIPMENTNAME')">TMTT設備 <span class="sort-indicator" id="sort_TMTT_EQUIPMENTNAME"></span></th>
|
||||
<th onclick="sortTable('MOLD_EQUIPMENTNAME')">MOLD設備 <span class="sort-indicator" id="sort_MOLD_EQUIPMENTNAME"></span></th>
|
||||
<th onclick="sortTable('INPUT_QTY')">投入數 <span class="sort-indicator" id="sort_INPUT_QTY"></span></th>
|
||||
<th onclick="sortTable('PRINT_DEFECT_QTY')">印字不良 <span class="sort-indicator" id="sort_PRINT_DEFECT_QTY"></span></th>
|
||||
<th onclick="sortTable('PRINT_DEFECT_RATE')">印字不良率(%) <span class="sort-indicator" id="sort_PRINT_DEFECT_RATE"></span></th>
|
||||
<th onclick="sortTable('LEAD_DEFECT_QTY')">腳型不良 <span class="sort-indicator" id="sort_LEAD_DEFECT_QTY"></span></th>
|
||||
<th onclick="sortTable('LEAD_DEFECT_RATE')">腳型不良率(%) <span class="sort-indicator" id="sort_LEAD_DEFECT_RATE"></span></th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="detailBody"></tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Empty State -->
|
||||
<div class="empty-state" id="emptyState">
|
||||
<div class="icon">📊</div>
|
||||
<p>請選擇日期範圍後點擊「查詢」</p>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
||||
|
||||
{% block scripts %}
|
||||
{% set tmtt_defect_js = frontend_asset('tmtt-defect.js') %}
|
||||
<script type="module" src="{{ tmtt_defect_js }}"></script>
|
||||
{% endblock %}
|
||||
|
||||
@@ -7,6 +7,18 @@ import os
|
||||
|
||||
# Add the src directory to Python path
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))
|
||||
_PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
||||
_TMP_DIR = os.path.join(_PROJECT_ROOT, 'tmp')
|
||||
|
||||
# Test baseline env: keep pytest isolated from local runtime/.env side effects.
|
||||
os.environ.setdefault('FLASK_ENV', 'testing')
|
||||
os.environ.setdefault('REDIS_ENABLED', 'false')
|
||||
os.environ.setdefault('RUNTIME_CONTRACT_ENFORCE', 'false')
|
||||
os.environ.setdefault('SLOW_QUERY_THRESHOLD', '1.0')
|
||||
os.environ.setdefault('WATCHDOG_RUNTIME_DIR', _TMP_DIR)
|
||||
os.environ.setdefault('WATCHDOG_RESTART_FLAG', os.path.join(_TMP_DIR, 'mes_dashboard_restart.flag'))
|
||||
os.environ.setdefault('WATCHDOG_PID_FILE', os.path.join(_TMP_DIR, 'gunicorn.pid'))
|
||||
os.environ.setdefault('WATCHDOG_STATE_FILE', os.path.join(_TMP_DIR, 'mes_dashboard_restart_state.json'))
|
||||
|
||||
import mes_dashboard.core.database as db
|
||||
from mes_dashboard.app import create_app
|
||||
|
||||
@@ -7,9 +7,7 @@ Run with: pytest tests/e2e/test_admin_auth_e2e.py -v --run-integration
|
||||
|
||||
import json
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import sys
|
||||
import os
|
||||
@@ -18,6 +16,7 @@ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'src'))
|
||||
import mes_dashboard.core.database as db
|
||||
from mes_dashboard.app import create_app
|
||||
from mes_dashboard.services import page_registry
|
||||
from mes_dashboard.routes import auth_routes
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -51,7 +50,7 @@ def app(temp_page_status):
|
||||
|
||||
app = create_app('testing')
|
||||
app.config['TESTING'] = True
|
||||
app.config['WTF_CSRF_ENABLED'] = False
|
||||
app.config['CSRF_ENABLED'] = False
|
||||
|
||||
yield app
|
||||
|
||||
@@ -65,28 +64,31 @@ def client(app):
|
||||
return app.test_client()
|
||||
|
||||
|
||||
def mock_ldap_success(mail="ymirliu@panjit.com.tw"):
|
||||
"""Helper to create mock for successful LDAP auth."""
|
||||
mock_response = MagicMock()
|
||||
mock_response.json.return_value = {
|
||||
"success": True,
|
||||
"user": {
|
||||
"username": "92367",
|
||||
"displayName": "Test Admin",
|
||||
"mail": mail,
|
||||
"department": "Test Department"
|
||||
}
|
||||
@pytest.fixture(autouse=True)
|
||||
def clear_login_rate_limit():
|
||||
"""Reset in-memory login attempts to avoid cross-test interference."""
|
||||
auth_routes._login_attempts.clear()
|
||||
yield
|
||||
auth_routes._login_attempts.clear()
|
||||
|
||||
|
||||
def _mock_admin_user(mail: str = "ymirliu@panjit.com.tw") -> dict:
|
||||
return {
|
||||
"username": "92367",
|
||||
"displayName": "Test Admin",
|
||||
"mail": mail,
|
||||
"department": "Test Department",
|
||||
}
|
||||
return mock_response
|
||||
|
||||
|
||||
class TestFullLoginLogoutFlow:
|
||||
"""E2E tests for complete login/logout flow."""
|
||||
|
||||
@patch('mes_dashboard.services.auth_service.requests.post')
|
||||
def test_complete_admin_login_workflow(self, mock_post, client):
|
||||
@patch('mes_dashboard.routes.auth_routes.is_admin', return_value=True)
|
||||
@patch('mes_dashboard.routes.auth_routes.authenticate')
|
||||
def test_complete_admin_login_workflow(self, mock_auth, _mock_is_admin, client):
|
||||
"""Test complete admin login workflow."""
|
||||
mock_post.return_value = mock_ldap_success()
|
||||
mock_auth.return_value = _mock_admin_user()
|
||||
|
||||
# 1. Access portal - should see login link
|
||||
response = client.get("/")
|
||||
@@ -146,10 +148,11 @@ class TestPageAccessControlFlow:
|
||||
content = response.data.decode("utf-8")
|
||||
assert "開發中" in content or "403" in content
|
||||
|
||||
@patch('mes_dashboard.services.auth_service.requests.post')
|
||||
def test_admin_can_access_all_pages(self, mock_post, client, temp_page_status):
|
||||
@patch('mes_dashboard.routes.auth_routes.is_admin', return_value=True)
|
||||
@patch('mes_dashboard.routes.auth_routes.authenticate')
|
||||
def test_admin_can_access_all_pages(self, mock_auth, _mock_is_admin, client, temp_page_status):
|
||||
"""Test admin users can access all pages."""
|
||||
mock_post.return_value = mock_ldap_success()
|
||||
mock_auth.return_value = _mock_admin_user()
|
||||
|
||||
# 1. Login as admin
|
||||
client.post("/admin/login", data={
|
||||
@@ -169,10 +172,11 @@ class TestPageAccessControlFlow:
|
||||
class TestPageManagementFlow:
|
||||
"""E2E tests for page management flow."""
|
||||
|
||||
@patch('mes_dashboard.services.auth_service.requests.post')
|
||||
def test_admin_can_change_page_status(self, mock_post, client, temp_page_status):
|
||||
@patch('mes_dashboard.routes.auth_routes.is_admin', return_value=True)
|
||||
@patch('mes_dashboard.routes.auth_routes.authenticate')
|
||||
def test_admin_can_change_page_status(self, mock_auth, _mock_is_admin, client, temp_page_status):
|
||||
"""Test admin can change page status via management interface."""
|
||||
mock_post.return_value = mock_ldap_success()
|
||||
mock_auth.return_value = _mock_admin_user()
|
||||
|
||||
# 1. Login as admin
|
||||
client.post("/admin/login", data={
|
||||
@@ -206,10 +210,11 @@ class TestPageManagementFlow:
|
||||
response = client.get("/wip-overview")
|
||||
assert response.status_code == 403
|
||||
|
||||
@patch('mes_dashboard.services.auth_service.requests.post')
|
||||
def test_release_dev_page_makes_it_public(self, mock_post, client, temp_page_status):
|
||||
@patch('mes_dashboard.routes.auth_routes.is_admin', return_value=True)
|
||||
@patch('mes_dashboard.routes.auth_routes.authenticate')
|
||||
def test_release_dev_page_makes_it_public(self, mock_auth, _mock_is_admin, client, temp_page_status):
|
||||
"""Test releasing a dev page makes it publicly accessible."""
|
||||
mock_post.return_value = mock_ldap_success()
|
||||
mock_auth.return_value = _mock_admin_user()
|
||||
|
||||
# 1. Verify /tables is currently dev (403 for non-admin)
|
||||
response = client.get("/tables")
|
||||
@@ -253,10 +258,11 @@ class TestPortalDynamicTabs:
|
||||
# Dev pages should NOT show (tables and resource are dev)
|
||||
# Note: This depends on the can_view_page implementation in portal.html
|
||||
|
||||
@patch('mes_dashboard.services.auth_service.requests.post')
|
||||
def test_portal_shows_all_tabs_for_admin(self, mock_post, client, temp_page_status):
|
||||
@patch('mes_dashboard.routes.auth_routes.is_admin', return_value=True)
|
||||
@patch('mes_dashboard.routes.auth_routes.authenticate')
|
||||
def test_portal_shows_all_tabs_for_admin(self, mock_auth, _mock_is_admin, client, temp_page_status):
|
||||
"""Test portal shows all tabs for admin users."""
|
||||
mock_post.return_value = mock_ldap_success()
|
||||
mock_auth.return_value = _mock_admin_user()
|
||||
|
||||
# Login as admin
|
||||
client.post("/admin/login", data={
|
||||
@@ -275,10 +281,11 @@ class TestPortalDynamicTabs:
|
||||
class TestSessionPersistence:
|
||||
"""E2E tests for session persistence."""
|
||||
|
||||
@patch('mes_dashboard.services.auth_service.requests.post')
|
||||
def test_session_persists_across_requests(self, mock_post, client):
|
||||
@patch('mes_dashboard.routes.auth_routes.is_admin', return_value=True)
|
||||
@patch('mes_dashboard.routes.auth_routes.authenticate')
|
||||
def test_session_persists_across_requests(self, mock_auth, _mock_is_admin, client):
|
||||
"""Test admin session persists across multiple requests."""
|
||||
mock_post.return_value = mock_ldap_success()
|
||||
mock_auth.return_value = _mock_admin_user()
|
||||
|
||||
# Login
|
||||
client.post("/admin/login", data={
|
||||
@@ -303,7 +310,7 @@ class TestSecurityScenarios:
|
||||
"""Test admin APIs are protected."""
|
||||
# Try to get pages without login
|
||||
response = client.get("/admin/api/pages", follow_redirects=False)
|
||||
assert response.status_code == 302
|
||||
assert response.status_code in (302, 401)
|
||||
|
||||
# Try to update page without login
|
||||
response = client.put(
|
||||
@@ -312,23 +319,18 @@ class TestSecurityScenarios:
|
||||
content_type="application/json",
|
||||
follow_redirects=False
|
||||
)
|
||||
assert response.status_code == 302
|
||||
assert response.status_code in (302, 401)
|
||||
|
||||
@patch('mes_dashboard.services.auth_service.requests.post')
|
||||
def test_non_admin_user_cannot_login(self, mock_post, client):
|
||||
@patch('mes_dashboard.routes.auth_routes.is_admin', return_value=False)
|
||||
@patch('mes_dashboard.routes.auth_routes.authenticate')
|
||||
def test_non_admin_user_cannot_login(self, mock_auth, _mock_is_admin, client):
|
||||
"""Test non-admin user cannot access admin features."""
|
||||
# Mock LDAP success but with non-admin email
|
||||
mock_response = MagicMock()
|
||||
mock_response.json.return_value = {
|
||||
"success": True,
|
||||
"user": {
|
||||
"username": "99999",
|
||||
"displayName": "Regular User",
|
||||
"mail": "regular@panjit.com.tw",
|
||||
"department": "Test"
|
||||
}
|
||||
mock_auth.return_value = {
|
||||
"username": "99999",
|
||||
"displayName": "Regular User",
|
||||
"mail": "regular@panjit.com.tw",
|
||||
"department": "Test",
|
||||
}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
# Try to login
|
||||
response = client.post("/admin/login", data={
|
||||
|
||||
@@ -198,7 +198,7 @@ class TestSearchEndpointsE2E:
|
||||
# Use a common pattern that should exist
|
||||
response = requests.get(
|
||||
f"{api_base_url}/wip/meta/search",
|
||||
params={'type': 'workorder', 'q': 'WO', 'limit': 10},
|
||||
params={'field': 'workorder', 'q': 'WO', 'limit': 10},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
@@ -210,7 +210,7 @@ class TestSearchEndpointsE2E:
|
||||
"""Test lot ID search returns results."""
|
||||
response = requests.get(
|
||||
f"{api_base_url}/wip/meta/search",
|
||||
params={'type': 'lotid', 'q': 'LOT', 'limit': 10},
|
||||
params={'field': 'lotid', 'q': 'LOT', 'limit': 10},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
@@ -222,7 +222,7 @@ class TestSearchEndpointsE2E:
|
||||
"""Test search with short query returns empty list."""
|
||||
response = requests.get(
|
||||
f"{api_base_url}/wip/meta/search",
|
||||
params={'type': 'workorder', 'q': 'W'}, # Too short
|
||||
params={'field': 'workorder', 'q': 'W'}, # Too short
|
||||
timeout=30
|
||||
)
|
||||
|
||||
|
||||
@@ -27,21 +27,32 @@ class TestPortalPage:
|
||||
"""Portal should have all navigation tabs."""
|
||||
page.goto(app_server)
|
||||
|
||||
# Check all tabs exist
|
||||
# Check released tabs exist
|
||||
expect(page.locator('.tab:has-text("WIP 即時概況")')).to_be_visible()
|
||||
expect(page.locator('.tab:has-text("機台狀態報表")')).to_be_visible()
|
||||
expect(page.locator('.tab:has-text("數據表查詢工具")')).to_be_visible()
|
||||
expect(page.locator('.tab:has-text("Excel 批次查詢")')).to_be_visible()
|
||||
expect(page.locator('.tab:has-text("設備即時概況")')).to_be_visible()
|
||||
expect(page.locator('.tab:has-text("設備歷史績效")')).to_be_visible()
|
||||
expect(page.locator('.tab:has-text("設備維修查詢")')).to_be_visible()
|
||||
expect(page.locator('.tab:has-text("批次追蹤工具")')).to_be_visible()
|
||||
|
||||
def test_portal_tab_switching(self, page: Page, app_server: str):
|
||||
"""Portal tabs should switch iframe content."""
|
||||
page.goto(app_server)
|
||||
|
||||
# Click on a different tab
|
||||
page.locator('.tab:has-text("機台狀態報表")').click()
|
||||
page.locator('.tab:has-text("設備即時概況")').click()
|
||||
|
||||
# Verify the tab is active
|
||||
expect(page.locator('.tab:has-text("機台狀態報表")')).to_have_class(re.compile(r'active'))
|
||||
expect(page.locator('.tab:has-text("設備即時概況")')).to_have_class(re.compile(r'active'))
|
||||
|
||||
def test_portal_health_popup_clickable(self, page: Page, app_server: str):
|
||||
"""Health status pill should toggle popup visibility on click."""
|
||||
page.goto(app_server)
|
||||
|
||||
popup = page.locator('#healthPopup')
|
||||
expect(popup).not_to_have_class(re.compile(r'show'))
|
||||
|
||||
page.locator('#healthStatus').click()
|
||||
expect(popup).to_have_class(re.compile(r'show'))
|
||||
|
||||
|
||||
@pytest.mark.e2e
|
||||
@@ -243,8 +254,13 @@ class TestTablesPage:
|
||||
def test_tables_page_loads(self, page: Page, app_server: str):
|
||||
"""Tables page should load."""
|
||||
page.goto(f"{app_server}/tables")
|
||||
|
||||
expect(page.locator('h1')).to_contain_text('MES 數據表查詢工具')
|
||||
header = page.locator('h1')
|
||||
expect(header).to_be_visible()
|
||||
text = header.inner_text()
|
||||
assert (
|
||||
'MES 數據表查詢工具' in text
|
||||
or '頁面開發中' in text
|
||||
)
|
||||
|
||||
def test_tables_has_toast_system(self, page: Page, app_server: str):
|
||||
"""Tables page should have Toast system loaded."""
|
||||
|
||||
@@ -7,9 +7,9 @@ Run with: pytest tests/e2e/test_resource_history_e2e.py -v --run-integration
|
||||
|
||||
import json
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
from unittest.mock import patch
|
||||
import pandas as pd
|
||||
from datetime import datetime, timedelta
|
||||
from datetime import datetime
|
||||
|
||||
import sys
|
||||
import os
|
||||
@@ -97,16 +97,17 @@ class TestResourceHistoryPageAccess:
|
||||
class TestResourceHistoryAPIWorkflow:
|
||||
"""E2E tests for API workflows."""
|
||||
|
||||
@patch('mes_dashboard.services.filter_cache.get_workcenter_groups')
|
||||
@patch('mes_dashboard.services.filter_cache.get_resource_families')
|
||||
def test_filter_options_workflow(self, mock_families, mock_groups, client):
|
||||
@patch('mes_dashboard.services.resource_history_service.get_filter_options')
|
||||
def test_filter_options_workflow(self, mock_get_filter_options, client):
|
||||
"""Filter options should be loadable."""
|
||||
mock_groups.return_value = [
|
||||
{'name': '焊接_DB', 'sequence': 1},
|
||||
{'name': '焊接_WB', 'sequence': 2},
|
||||
{'name': '成型', 'sequence': 4},
|
||||
]
|
||||
mock_families.return_value = ['FAM001', 'FAM002']
|
||||
mock_get_filter_options.return_value = {
|
||||
'workcenter_groups': [
|
||||
{'name': '焊接_DB', 'sequence': 1},
|
||||
{'name': '焊接_WB', 'sequence': 2},
|
||||
{'name': '成型', 'sequence': 4},
|
||||
],
|
||||
'families': ['FAM001', 'FAM002'],
|
||||
}
|
||||
|
||||
response = client.get('/api/resource/history/options')
|
||||
|
||||
@@ -116,10 +117,26 @@ class TestResourceHistoryAPIWorkflow:
|
||||
assert 'workcenter_groups' in data['data']
|
||||
assert 'families' in data['data']
|
||||
|
||||
@patch('mes_dashboard.services.resource_history_service._get_filtered_resources')
|
||||
@patch('mes_dashboard.services.resource_history_service.read_sql_df')
|
||||
def test_complete_query_workflow(self, mock_read_sql, client):
|
||||
def test_complete_query_workflow(self, mock_read_sql, mock_resources, client):
|
||||
"""Complete query workflow should return all data sections."""
|
||||
# Mock responses for the 4 queries in query_summary
|
||||
mock_resources.return_value = [
|
||||
{
|
||||
'RESOURCEID': 'RES001',
|
||||
'WORKCENTERNAME': '焊接_DB',
|
||||
'RESOURCEFAMILYNAME': 'FAM001',
|
||||
'RESOURCENAME': 'RES001',
|
||||
},
|
||||
{
|
||||
'RESOURCEID': 'RES002',
|
||||
'WORKCENTERNAME': '成型',
|
||||
'RESOURCEFAMILYNAME': 'FAM002',
|
||||
'RESOURCENAME': 'RES002',
|
||||
},
|
||||
]
|
||||
|
||||
# Mock responses for the 3 queries in query_summary
|
||||
kpi_df = pd.DataFrame([{
|
||||
'PRD_HOURS': 8000, 'SBY_HOURS': 1000, 'UDT_HOURS': 500,
|
||||
'SDT_HOURS': 300, 'EGT_HOURS': 200, 'NST_HOURS': 1000,
|
||||
@@ -133,29 +150,20 @@ class TestResourceHistoryAPIWorkflow:
|
||||
'UDT_HOURS': 40, 'SDT_HOURS': 25, 'EGT_HOURS': 15, 'NST_HOURS': 100, 'MACHINE_COUNT': 100},
|
||||
])
|
||||
|
||||
heatmap_df = pd.DataFrame([
|
||||
{'WORKCENTERNAME': '焊接_DB', 'DATA_DATE': datetime(2024, 1, 1),
|
||||
'PRD_HOURS': 400, 'SBY_HOURS': 50, 'UDT_HOURS': 25, 'SDT_HOURS': 15, 'EGT_HOURS': 10},
|
||||
{'WORKCENTERNAME': '成型', 'DATA_DATE': datetime(2024, 1, 1),
|
||||
'PRD_HOURS': 600, 'SBY_HOURS': 50, 'UDT_HOURS': 25, 'SDT_HOURS': 15, 'EGT_HOURS': 10},
|
||||
])
|
||||
|
||||
comparison_df = pd.DataFrame([
|
||||
{'WORKCENTERNAME': '焊接_DB', 'PRD_HOURS': 4000, 'SBY_HOURS': 500,
|
||||
'UDT_HOURS': 250, 'SDT_HOURS': 150, 'EGT_HOURS': 100, 'MACHINE_COUNT': 50},
|
||||
{'WORKCENTERNAME': '成型', 'PRD_HOURS': 4000, 'SBY_HOURS': 500,
|
||||
'UDT_HOURS': 250, 'SDT_HOURS': 150, 'EGT_HOURS': 100, 'MACHINE_COUNT': 50},
|
||||
heatmap_raw_df = pd.DataFrame([
|
||||
{'HISTORYID': 'RES001', 'DATA_DATE': datetime(2024, 1, 1),
|
||||
'PRD_HOURS': 400, 'SBY_HOURS': 50, 'UDT_HOURS': 25, 'SDT_HOURS': 15, 'EGT_HOURS': 10, 'NST_HOURS': 20},
|
||||
{'HISTORYID': 'RES002', 'DATA_DATE': datetime(2024, 1, 1),
|
||||
'PRD_HOURS': 600, 'SBY_HOURS': 50, 'UDT_HOURS': 25, 'SDT_HOURS': 15, 'EGT_HOURS': 10, 'NST_HOURS': 30},
|
||||
])
|
||||
|
||||
# Use function-based side_effect for ThreadPoolExecutor parallel queries
|
||||
def mock_sql(sql):
|
||||
def mock_sql(sql, _params=None):
|
||||
sql_upper = sql.upper()
|
||||
if 'DATA_DATE' in sql_upper and 'WORKCENTERNAME' in sql_upper:
|
||||
return heatmap_df
|
||||
if 'HISTORYID' in sql_upper and 'DATA_DATE' in sql_upper:
|
||||
return heatmap_raw_df
|
||||
elif 'DATA_DATE' in sql_upper:
|
||||
return trend_df
|
||||
elif 'WORKCENTERNAME' in sql_upper:
|
||||
return comparison_df
|
||||
else:
|
||||
return kpi_df
|
||||
|
||||
@@ -189,14 +197,30 @@ class TestResourceHistoryAPIWorkflow:
|
||||
# Verify comparison
|
||||
assert len(data['data']['workcenter_comparison']) == 2
|
||||
|
||||
@patch('mes_dashboard.services.resource_history_service._get_filtered_resources')
|
||||
@patch('mes_dashboard.services.resource_history_service.read_sql_df')
|
||||
def test_detail_query_workflow(self, mock_read_sql, client):
|
||||
def test_detail_query_workflow(self, mock_read_sql, mock_resources, client):
|
||||
"""Detail query workflow should return hierarchical data."""
|
||||
mock_resources.return_value = [
|
||||
{
|
||||
'RESOURCEID': 'RES001',
|
||||
'WORKCENTERNAME': '焊接_DB',
|
||||
'RESOURCEFAMILYNAME': 'FAM001',
|
||||
'RESOURCENAME': 'RES001',
|
||||
},
|
||||
{
|
||||
'RESOURCEID': 'RES002',
|
||||
'WORKCENTERNAME': '焊接_DB',
|
||||
'RESOURCEFAMILYNAME': 'FAM001',
|
||||
'RESOURCENAME': 'RES002',
|
||||
},
|
||||
]
|
||||
|
||||
detail_df = pd.DataFrame([
|
||||
{'WORKCENTERNAME': '焊接_DB', 'RESOURCEFAMILYNAME': 'FAM001', 'RESOURCENAME': 'RES001',
|
||||
{'HISTORYID': 'RES001',
|
||||
'PRD_HOURS': 80, 'SBY_HOURS': 10, 'UDT_HOURS': 5, 'SDT_HOURS': 3, 'EGT_HOURS': 2,
|
||||
'NST_HOURS': 10, 'TOTAL_HOURS': 110},
|
||||
{'WORKCENTERNAME': '焊接_DB', 'RESOURCEFAMILYNAME': 'FAM001', 'RESOURCENAME': 'RES002',
|
||||
{'HISTORYID': 'RES002',
|
||||
'PRD_HOURS': 75, 'SBY_HOURS': 15, 'UDT_HOURS': 5, 'SDT_HOURS': 3, 'EGT_HOURS': 2,
|
||||
'NST_HOURS': 10, 'TOTAL_HOURS': 110},
|
||||
])
|
||||
@@ -226,11 +250,20 @@ class TestResourceHistoryAPIWorkflow:
|
||||
assert 'prd_hours' in first_row
|
||||
assert 'prd_pct' in first_row
|
||||
|
||||
@patch('mes_dashboard.services.resource_history_service._get_filtered_resources')
|
||||
@patch('mes_dashboard.services.resource_history_service.read_sql_df')
|
||||
def test_export_workflow(self, mock_read_sql, client):
|
||||
def test_export_workflow(self, mock_read_sql, mock_resources, client):
|
||||
"""Export workflow should return valid CSV."""
|
||||
mock_resources.return_value = [
|
||||
{
|
||||
'RESOURCEID': 'RES001',
|
||||
'WORKCENTERNAME': '焊接_DB',
|
||||
'RESOURCEFAMILYNAME': 'FAM001',
|
||||
'RESOURCENAME': 'RES001',
|
||||
}
|
||||
]
|
||||
mock_read_sql.return_value = pd.DataFrame([
|
||||
{'WORKCENTERNAME': '焊接_DB', 'RESOURCEFAMILYNAME': 'FAM001', 'RESOURCENAME': 'RES001',
|
||||
{'HISTORYID': 'RES001',
|
||||
'PRD_HOURS': 80, 'SBY_HOURS': 10, 'UDT_HOURS': 5, 'SDT_HOURS': 3, 'EGT_HOURS': 2,
|
||||
'NST_HOURS': 10, 'TOTAL_HOURS': 110},
|
||||
])
|
||||
@@ -281,17 +314,43 @@ class TestResourceHistoryValidation:
|
||||
data = json.loads(response.data)
|
||||
assert data['success'] is False
|
||||
|
||||
@patch('mes_dashboard.services.resource_history_service._get_filtered_resources')
|
||||
@patch('mes_dashboard.services.resource_history_service.read_sql_df')
|
||||
def test_granularity_options(self, mock_read_sql, client):
|
||||
def test_granularity_options(self, mock_read_sql, mock_resources, client):
|
||||
"""Different granularity options should work."""
|
||||
mock_df = pd.DataFrame([{
|
||||
mock_resources.return_value = [{
|
||||
'RESOURCEID': 'RES001',
|
||||
'WORKCENTERNAME': '焊接_DB',
|
||||
'RESOURCEFAMILYNAME': 'FAM001',
|
||||
'RESOURCENAME': 'RES001',
|
||||
}]
|
||||
kpi_df = pd.DataFrame([{
|
||||
'PRD_HOURS': 100, 'SBY_HOURS': 10, 'UDT_HOURS': 5,
|
||||
'SDT_HOURS': 3, 'EGT_HOURS': 2, 'NST_HOURS': 10, 'MACHINE_COUNT': 5
|
||||
}])
|
||||
mock_read_sql.return_value = mock_df
|
||||
trend_df = pd.DataFrame([{
|
||||
'DATA_DATE': datetime(2024, 1, 1),
|
||||
'PRD_HOURS': 100, 'SBY_HOURS': 10, 'UDT_HOURS': 5,
|
||||
'SDT_HOURS': 3, 'EGT_HOURS': 2, 'NST_HOURS': 10,
|
||||
'MACHINE_COUNT': 5
|
||||
}])
|
||||
heatmap_raw_df = pd.DataFrame([{
|
||||
'HISTORYID': 'RES001',
|
||||
'DATA_DATE': datetime(2024, 1, 1),
|
||||
'PRD_HOURS': 100, 'SBY_HOURS': 10, 'UDT_HOURS': 5,
|
||||
'SDT_HOURS': 3, 'EGT_HOURS': 2, 'NST_HOURS': 10
|
||||
}])
|
||||
|
||||
for granularity in ['day', 'week', 'month', 'year']:
|
||||
mock_read_sql.side_effect = [mock_df, pd.DataFrame(), pd.DataFrame(), pd.DataFrame()]
|
||||
def mock_sql(sql, _params=None):
|
||||
sql_upper = sql.upper()
|
||||
if 'HISTORYID' in sql_upper and 'DATA_DATE' in sql_upper:
|
||||
return heatmap_raw_df
|
||||
if 'DATA_DATE' in sql_upper:
|
||||
return trend_df
|
||||
return kpi_df
|
||||
|
||||
mock_read_sql.side_effect = mock_sql
|
||||
|
||||
response = client.get(
|
||||
f'/api/resource/history/summary'
|
||||
|
||||
@@ -10,7 +10,7 @@ class AppFactoryTests(unittest.TestCase):
|
||||
db._ENGINE = None
|
||||
|
||||
def test_create_app_default_config(self):
|
||||
app = create_app()
|
||||
app = create_app("development")
|
||||
self.assertTrue(app.config.get("DEBUG"))
|
||||
self.assertEqual(app.config.get("ENV"), "development")
|
||||
cache = app.extensions.get("cache")
|
||||
@@ -20,8 +20,11 @@ class AppFactoryTests(unittest.TestCase):
|
||||
|
||||
def test_create_app_production_config(self):
|
||||
old_secret = os.environ.get("SECRET_KEY")
|
||||
old_conda_env_name = os.environ.get("CONDA_ENV_NAME")
|
||||
try:
|
||||
os.environ["SECRET_KEY"] = "test-production-secret-key"
|
||||
# Keep runtime-contract strict validation aligned with active env.
|
||||
os.environ["CONDA_ENV_NAME"] = os.environ.get("CONDA_DEFAULT_ENV", "base")
|
||||
app = create_app("production")
|
||||
self.assertFalse(app.config.get("DEBUG"))
|
||||
self.assertEqual(app.config.get("ENV"), "production")
|
||||
@@ -30,15 +33,19 @@ class AppFactoryTests(unittest.TestCase):
|
||||
os.environ.pop("SECRET_KEY", None)
|
||||
else:
|
||||
os.environ["SECRET_KEY"] = old_secret
|
||||
if old_conda_env_name is None:
|
||||
os.environ.pop("CONDA_ENV_NAME", None)
|
||||
else:
|
||||
os.environ["CONDA_ENV_NAME"] = old_conda_env_name
|
||||
|
||||
def test_create_app_independent_instances(self):
|
||||
app1 = create_app()
|
||||
app1 = create_app("development")
|
||||
db._ENGINE = None
|
||||
app2 = create_app()
|
||||
app2 = create_app("development")
|
||||
self.assertIsNot(app1, app2)
|
||||
|
||||
def test_routes_registered(self):
|
||||
app = create_app()
|
||||
app = create_app("development")
|
||||
rules = {rule.rule for rule in app.url_map.iter_rules()}
|
||||
expected = {
|
||||
"/",
|
||||
@@ -47,6 +54,8 @@ class AppFactoryTests(unittest.TestCase):
|
||||
"/wip-overview",
|
||||
"/wip-detail",
|
||||
"/excel-query",
|
||||
"/query-tool",
|
||||
"/tmtt-defect",
|
||||
"/api/wip/overview/summary",
|
||||
"/api/wip/overview/matrix",
|
||||
"/api/wip/overview/hold",
|
||||
@@ -56,6 +65,8 @@ class AppFactoryTests(unittest.TestCase):
|
||||
"/api/resource/status/summary",
|
||||
"/api/dashboard/kpi",
|
||||
"/api/excel-query/upload",
|
||||
"/api/query-tool/resolve",
|
||||
"/api/tmtt-defect/analysis",
|
||||
}
|
||||
missing = expected - rules
|
||||
self.assertFalse(missing, f"Missing routes: {sorted(missing)}")
|
||||
|
||||
@@ -111,6 +111,7 @@ class TestWipApiWithCache:
|
||||
'WORKORDER': ['WO001', 'WO002', 'WO003'],
|
||||
'WORKCENTER_GROUP': ['WC1', 'WC1', 'WC2'],
|
||||
'WORKCENTERSEQUENCE_GROUP': [1, 1, 2],
|
||||
'PACKAGE_LEF': ['PKG1', 'PKG2', 'PKG1'],
|
||||
'PRODUCTLINENAME': ['PKG1', 'PKG2', 'PKG1'],
|
||||
'EQUIPMENTCOUNT': [1, 0, 0],
|
||||
'CURRENTHOLDCOUNT': [0, 1, 0],
|
||||
|
||||
@@ -384,5 +384,8 @@ class TestPerformancePage:
|
||||
# Should be 200 for authenticated admin
|
||||
assert response.status_code == 200
|
||||
# Check for performance-related content
|
||||
data_str = response.data.decode('utf-8', errors='ignore').lower()
|
||||
html = response.data.decode('utf-8', errors='ignore')
|
||||
data_str = html.lower()
|
||||
assert 'performance' in data_str or '效能' in data_str
|
||||
assert '/static/js/chart.umd.min.js' in html
|
||||
assert 'cdn.jsdelivr.net' not in html
|
||||
|
||||
645
tests/test_query_tool_routes.py
Normal file
645
tests/test_query_tool_routes.py
Normal file
@@ -0,0 +1,645 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Integration tests for Query Tool API routes.
|
||||
|
||||
Tests the API endpoints with mocked service dependencies:
|
||||
- Input validation (empty, over limit, invalid format)
|
||||
- Success responses
|
||||
- Error handling
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import json
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from mes_dashboard import create_app
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def app():
|
||||
"""Create test Flask application."""
|
||||
app = create_app()
|
||||
app.config['TESTING'] = True
|
||||
return app
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def client(app):
|
||||
"""Create test client."""
|
||||
return app.test_client()
|
||||
|
||||
|
||||
class TestQueryToolPage:
|
||||
"""Tests for /query-tool page route."""
|
||||
|
||||
def test_page_returns_html(self, client):
|
||||
"""Should return the query tool page."""
|
||||
response = client.get('/query-tool')
|
||||
assert response.status_code == 200
|
||||
assert b'html' in response.data.lower()
|
||||
|
||||
|
||||
class TestResolveEndpoint:
|
||||
"""Tests for /api/query-tool/resolve endpoint."""
|
||||
|
||||
def test_missing_input_type(self, client):
|
||||
"""Should return error without input_type."""
|
||||
response = client.post(
|
||||
'/api/query-tool/resolve',
|
||||
json={
|
||||
'values': ['GA23100020-A00-001']
|
||||
}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
def test_missing_values(self, client):
|
||||
"""Should return error without values."""
|
||||
response = client.post(
|
||||
'/api/query-tool/resolve',
|
||||
json={
|
||||
'input_type': 'lot_id'
|
||||
}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
def test_empty_values(self, client):
|
||||
"""Should return error for empty values list."""
|
||||
response = client.post(
|
||||
'/api/query-tool/resolve',
|
||||
json={
|
||||
'input_type': 'lot_id',
|
||||
'values': []
|
||||
}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
def test_values_over_limit(self, client):
|
||||
"""Should reject values exceeding limit."""
|
||||
# More than MAX_LOT_IDS (50)
|
||||
values = [f'GA{i:09d}' for i in range(51)]
|
||||
response = client.post(
|
||||
'/api/query-tool/resolve',
|
||||
json={
|
||||
'input_type': 'lot_id',
|
||||
'values': values
|
||||
}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
assert '超過上限' in data['error'] or '50' in data['error']
|
||||
|
||||
@patch('mes_dashboard.routes.query_tool_routes.resolve_lots')
|
||||
def test_resolve_success(self, mock_resolve, client):
|
||||
"""Should return resolved LOT IDs on success."""
|
||||
mock_resolve.return_value = {
|
||||
'data': [
|
||||
{
|
||||
'container_id': '488103800029578b',
|
||||
'lot_id': 'GA23100020-A00-001',
|
||||
'input_value': 'GA23100020-A00-001',
|
||||
'spec_name': 'SPEC-001'
|
||||
}
|
||||
],
|
||||
'total': 1,
|
||||
'input_count': 1,
|
||||
'not_found': []
|
||||
}
|
||||
|
||||
response = client.post(
|
||||
'/api/query-tool/resolve',
|
||||
json={
|
||||
'input_type': 'lot_id',
|
||||
'values': ['GA23100020-A00-001']
|
||||
}
|
||||
)
|
||||
assert response.status_code == 200
|
||||
data = json.loads(response.data)
|
||||
assert 'data' in data
|
||||
assert data['total'] == 1
|
||||
assert data['data'][0]['lot_id'] == 'GA23100020-A00-001'
|
||||
|
||||
@patch('mes_dashboard.routes.query_tool_routes.resolve_lots')
|
||||
def test_resolve_not_found(self, mock_resolve, client):
|
||||
"""Should return not_found list for missing LOT IDs."""
|
||||
mock_resolve.return_value = {
|
||||
'data': [],
|
||||
'total': 0,
|
||||
'input_count': 1,
|
||||
'not_found': ['INVALID-LOT-ID']
|
||||
}
|
||||
|
||||
response = client.post(
|
||||
'/api/query-tool/resolve',
|
||||
json={
|
||||
'input_type': 'lot_id',
|
||||
'values': ['INVALID-LOT-ID']
|
||||
}
|
||||
)
|
||||
assert response.status_code == 200
|
||||
data = json.loads(response.data)
|
||||
assert data['total'] == 0
|
||||
assert 'INVALID-LOT-ID' in data['not_found']
|
||||
|
||||
|
||||
class TestLotHistoryEndpoint:
|
||||
"""Tests for /api/query-tool/lot-history endpoint."""
|
||||
|
||||
def test_missing_container_id(self, client):
|
||||
"""Should return error without container_id."""
|
||||
response = client.get('/api/query-tool/lot-history')
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
@patch('mes_dashboard.routes.query_tool_routes.get_lot_history')
|
||||
def test_lot_history_success(self, mock_query, client):
|
||||
"""Should return lot history on success."""
|
||||
mock_query.return_value = {
|
||||
'data': [
|
||||
{
|
||||
'CONTAINERID': '488103800029578b',
|
||||
'EQUIPMENTNAME': 'ASSY-01',
|
||||
'SPECNAME': 'SPEC-001',
|
||||
'TRACKINTIMESTAMP': '2024-01-15 10:30:00',
|
||||
'TRACKOUTTIMESTAMP': '2024-01-15 11:00:00'
|
||||
}
|
||||
],
|
||||
'total': 1
|
||||
}
|
||||
|
||||
response = client.get('/api/query-tool/lot-history?container_id=488103800029578b')
|
||||
assert response.status_code == 200
|
||||
data = json.loads(response.data)
|
||||
assert 'data' in data
|
||||
assert data['total'] == 1
|
||||
|
||||
@patch('mes_dashboard.routes.query_tool_routes.get_lot_history')
|
||||
def test_lot_history_service_error(self, mock_query, client):
|
||||
"""Should return error from service."""
|
||||
mock_query.return_value = {'error': '查詢失敗'}
|
||||
|
||||
response = client.get('/api/query-tool/lot-history?container_id=invalid')
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
|
||||
class TestAdjacentLotsEndpoint:
|
||||
"""Tests for /api/query-tool/adjacent-lots endpoint."""
|
||||
|
||||
def test_missing_equipment_id(self, client):
|
||||
"""Should return error without equipment_id."""
|
||||
response = client.get(
|
||||
'/api/query-tool/adjacent-lots?'
|
||||
'target_time=2024-01-15T10:30:00'
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
def test_missing_target_time(self, client):
|
||||
"""Should return error without target_time."""
|
||||
response = client.get(
|
||||
'/api/query-tool/adjacent-lots?'
|
||||
'equipment_id=EQ001'
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
def test_with_only_equipment_id(self, client):
|
||||
"""Should return error with only equipment_id (no target_time)."""
|
||||
response = client.get(
|
||||
'/api/query-tool/adjacent-lots?'
|
||||
'equipment_id=EQ001'
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
@patch('mes_dashboard.routes.query_tool_routes.get_adjacent_lots')
|
||||
def test_adjacent_lots_success(self, mock_query, client):
|
||||
"""Should return adjacent lots on success."""
|
||||
mock_query.return_value = {
|
||||
'data': [
|
||||
{
|
||||
'CONTAINERID': '488103800029578a',
|
||||
'CONTAINERNAME': 'GA23100020-A00-000',
|
||||
'relative_position': -1
|
||||
},
|
||||
{
|
||||
'CONTAINERID': '488103800029578b',
|
||||
'CONTAINERNAME': 'GA23100020-A00-001',
|
||||
'relative_position': 0
|
||||
},
|
||||
{
|
||||
'CONTAINERID': '488103800029578c',
|
||||
'CONTAINERNAME': 'GA23100020-A00-002',
|
||||
'relative_position': 1
|
||||
}
|
||||
],
|
||||
'total': 3
|
||||
}
|
||||
|
||||
response = client.get(
|
||||
'/api/query-tool/adjacent-lots?'
|
||||
'equipment_id=EQ001&target_time=2024-01-15T10:30:00'
|
||||
)
|
||||
assert response.status_code == 200
|
||||
data = json.loads(response.data)
|
||||
assert 'data' in data
|
||||
assert data['total'] == 3
|
||||
# Verify service was called without spec_name
|
||||
mock_query.assert_called_once()
|
||||
call_args = mock_query.call_args
|
||||
assert call_args[0][0] == 'EQ001' # equipment_id
|
||||
assert '2024-01-15' in call_args[0][1] # target_time
|
||||
|
||||
|
||||
class TestLotAssociationsEndpoint:
|
||||
"""Tests for /api/query-tool/lot-associations endpoint."""
|
||||
|
||||
def test_missing_container_id(self, client):
|
||||
"""Should return error without container_id."""
|
||||
response = client.get('/api/query-tool/lot-associations?type=materials')
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
def test_missing_type(self, client):
|
||||
"""Should return error without type."""
|
||||
response = client.get('/api/query-tool/lot-associations?container_id=488103800029578b')
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
def test_invalid_type(self, client):
|
||||
"""Should return error for invalid association type."""
|
||||
response = client.get(
|
||||
'/api/query-tool/lot-associations?container_id=488103800029578b&type=invalid'
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
assert '不支援' in data['error'] or 'type' in data['error'].lower()
|
||||
|
||||
@patch('mes_dashboard.routes.query_tool_routes.get_lot_materials')
|
||||
def test_lot_materials_success(self, mock_query, client):
|
||||
"""Should return lot materials on success."""
|
||||
mock_query.return_value = {
|
||||
'data': [
|
||||
{
|
||||
'MATERIALTYPE': 'TypeA',
|
||||
'MATERIALNAME': 'Material-001',
|
||||
'QTY': 100
|
||||
}
|
||||
],
|
||||
'total': 1
|
||||
}
|
||||
|
||||
response = client.get(
|
||||
'/api/query-tool/lot-associations?container_id=488103800029578b&type=materials'
|
||||
)
|
||||
assert response.status_code == 200
|
||||
data = json.loads(response.data)
|
||||
assert 'data' in data
|
||||
assert data['total'] == 1
|
||||
|
||||
|
||||
class TestEquipmentPeriodEndpoint:
|
||||
"""Tests for /api/query-tool/equipment-period endpoint."""
|
||||
|
||||
def test_missing_query_type(self, client):
|
||||
"""Should return error without query_type."""
|
||||
response = client.post(
|
||||
'/api/query-tool/equipment-period',
|
||||
json={
|
||||
'equipment_ids': ['EQ001'],
|
||||
'start_date': '2024-01-01',
|
||||
'end_date': '2024-01-31'
|
||||
}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
assert '查詢類型' in data['error'] or 'type' in data['error'].lower()
|
||||
|
||||
def test_empty_equipment_ids(self, client):
|
||||
"""Should return error for empty equipment_ids."""
|
||||
response = client.post(
|
||||
'/api/query-tool/equipment-period',
|
||||
json={
|
||||
'equipment_ids': [],
|
||||
'start_date': '2024-01-01',
|
||||
'end_date': '2024-01-31',
|
||||
'query_type': 'status_hours'
|
||||
}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
def test_missing_start_date(self, client):
|
||||
"""Should return error without start_date."""
|
||||
response = client.post(
|
||||
'/api/query-tool/equipment-period',
|
||||
json={
|
||||
'equipment_ids': ['EQ001'],
|
||||
'end_date': '2024-01-31',
|
||||
'query_type': 'status_hours'
|
||||
}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
def test_missing_end_date(self, client):
|
||||
"""Should return error without end_date."""
|
||||
response = client.post(
|
||||
'/api/query-tool/equipment-period',
|
||||
json={
|
||||
'equipment_ids': ['EQ001'],
|
||||
'start_date': '2024-01-01',
|
||||
'query_type': 'status_hours'
|
||||
}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
def test_invalid_date_range(self, client):
|
||||
"""Should return error for end date before start date."""
|
||||
response = client.post(
|
||||
'/api/query-tool/equipment-period',
|
||||
json={
|
||||
'equipment_ids': ['EQ001'],
|
||||
'start_date': '2024-12-31',
|
||||
'end_date': '2024-01-01',
|
||||
'query_type': 'status_hours'
|
||||
}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
assert '結束日期' in data['error'] or '早於' in data['error']
|
||||
|
||||
def test_date_range_exceeds_limit(self, client):
|
||||
"""Should reject date range > 90 days."""
|
||||
response = client.post(
|
||||
'/api/query-tool/equipment-period',
|
||||
json={
|
||||
'equipment_ids': ['EQ001'],
|
||||
'start_date': '2024-01-01',
|
||||
'end_date': '2024-06-01',
|
||||
'query_type': 'status_hours'
|
||||
}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
assert '90' in data['error']
|
||||
|
||||
def test_invalid_query_type(self, client):
|
||||
"""Should reject invalid query_type."""
|
||||
response = client.post(
|
||||
'/api/query-tool/equipment-period',
|
||||
json={
|
||||
'equipment_ids': ['EQ001'],
|
||||
'start_date': '2024-01-01',
|
||||
'end_date': '2024-01-31',
|
||||
'query_type': 'invalid_type'
|
||||
}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
assert '查詢類型' in data['error'] or 'type' in data['error'].lower()
|
||||
|
||||
@patch('mes_dashboard.routes.query_tool_routes.get_equipment_status_hours')
|
||||
def test_equipment_status_hours_success(self, mock_status, client):
|
||||
"""Should return equipment status hours on success."""
|
||||
mock_status.return_value = {'data': [], 'total': 0}
|
||||
|
||||
response = client.post(
|
||||
'/api/query-tool/equipment-period',
|
||||
json={
|
||||
'equipment_ids': ['EQ001'],
|
||||
'start_date': '2024-01-01',
|
||||
'end_date': '2024-01-31',
|
||||
'query_type': 'status_hours'
|
||||
}
|
||||
)
|
||||
assert response.status_code == 200
|
||||
data = json.loads(response.data)
|
||||
assert 'data' in data
|
||||
|
||||
|
||||
class TestExportCsvEndpoint:
|
||||
"""Tests for /api/query-tool/export-csv endpoint."""
|
||||
|
||||
def test_missing_export_type(self, client):
|
||||
"""Should return error without export_type."""
|
||||
response = client.post(
|
||||
'/api/query-tool/export-csv',
|
||||
json={
|
||||
'params': {'container_id': '488103800029578b'}
|
||||
}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
def test_invalid_export_type(self, client):
|
||||
"""Should return error for invalid export_type."""
|
||||
response = client.post(
|
||||
'/api/query-tool/export-csv',
|
||||
json={
|
||||
'export_type': 'invalid_type',
|
||||
'params': {}
|
||||
}
|
||||
)
|
||||
assert response.status_code == 400
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
assert '不支援' in data['error'] or 'type' in data['error'].lower()
|
||||
|
||||
@patch('mes_dashboard.routes.query_tool_routes.get_lot_history')
|
||||
def test_export_lot_history_success(self, mock_get_history, client):
|
||||
"""Should return CSV for lot history."""
|
||||
mock_get_history.return_value = {
|
||||
'data': [
|
||||
{
|
||||
'EQUIPMENTNAME': 'ASSY-01',
|
||||
'SPECNAME': 'SPEC-001',
|
||||
'TRACKINTIMESTAMP': '2024-01-15 10:00:00'
|
||||
}
|
||||
],
|
||||
'total': 1
|
||||
}
|
||||
|
||||
response = client.post(
|
||||
'/api/query-tool/export-csv',
|
||||
json={
|
||||
'export_type': 'lot_history',
|
||||
'params': {'container_id': '488103800029578b'}
|
||||
}
|
||||
)
|
||||
assert response.status_code == 200
|
||||
assert 'text/csv' in response.content_type
|
||||
|
||||
|
||||
class TestEquipmentListEndpoint:
|
||||
"""Tests for /api/query-tool/equipment-list endpoint."""
|
||||
|
||||
@patch('mes_dashboard.services.resource_cache.get_all_resources')
|
||||
def test_get_equipment_list_success(self, mock_get_resources, client):
|
||||
"""Should return equipment list."""
|
||||
mock_get_resources.return_value = [
|
||||
{
|
||||
'RESOURCEID': 'EQ001',
|
||||
'RESOURCENAME': 'ASSY-01',
|
||||
'WORKCENTERNAME': 'WC-A',
|
||||
'RESOURCEFAMILYNAME': 'FAM-01'
|
||||
},
|
||||
{
|
||||
'RESOURCEID': 'EQ002',
|
||||
'RESOURCENAME': 'ASSY-02',
|
||||
'WORKCENTERNAME': 'WC-B',
|
||||
'RESOURCEFAMILYNAME': 'FAM-02'
|
||||
}
|
||||
]
|
||||
|
||||
response = client.get('/api/query-tool/equipment-list')
|
||||
assert response.status_code == 200
|
||||
data = json.loads(response.data)
|
||||
assert 'data' in data
|
||||
assert 'total' in data
|
||||
assert data['total'] == 2
|
||||
|
||||
@patch('mes_dashboard.services.resource_cache.get_all_resources')
|
||||
def test_get_equipment_list_empty(self, mock_get_resources, client):
|
||||
"""Should return error when no equipment available."""
|
||||
mock_get_resources.return_value = []
|
||||
|
||||
response = client.get('/api/query-tool/equipment-list')
|
||||
assert response.status_code == 500
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
@patch('mes_dashboard.services.resource_cache.get_all_resources')
|
||||
def test_get_equipment_list_exception(self, mock_get_resources, client):
|
||||
"""Should handle exception gracefully."""
|
||||
mock_get_resources.side_effect = Exception('Database error')
|
||||
|
||||
response = client.get('/api/query-tool/equipment-list')
|
||||
assert response.status_code == 500
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
|
||||
class TestWorkcenterGroupsEndpoint:
|
||||
"""Tests for /api/query-tool/workcenter-groups endpoint."""
|
||||
|
||||
@patch('mes_dashboard.services.filter_cache.get_workcenter_groups')
|
||||
def test_returns_groups_list(self, mock_get_groups, client):
|
||||
"""Should return workcenter groups list."""
|
||||
mock_get_groups.return_value = [
|
||||
{'name': 'DB', 'sequence': 1},
|
||||
{'name': 'WB', 'sequence': 2},
|
||||
]
|
||||
|
||||
response = client.get('/api/query-tool/workcenter-groups')
|
||||
|
||||
assert response.status_code == 200
|
||||
data = json.loads(response.data)
|
||||
assert 'data' in data
|
||||
assert len(data['data']) == 2
|
||||
assert data['total'] == 2
|
||||
|
||||
@patch('mes_dashboard.services.filter_cache.get_workcenter_groups')
|
||||
def test_handles_cache_failure(self, mock_get_groups, client):
|
||||
"""Should return 500 when cache fails."""
|
||||
mock_get_groups.return_value = None
|
||||
|
||||
response = client.get('/api/query-tool/workcenter-groups')
|
||||
|
||||
assert response.status_code == 500
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
@patch('mes_dashboard.services.filter_cache.get_workcenter_groups')
|
||||
def test_handles_exception(self, mock_get_groups, client):
|
||||
"""Should handle exception gracefully."""
|
||||
mock_get_groups.side_effect = Exception('Cache error')
|
||||
|
||||
response = client.get('/api/query-tool/workcenter-groups')
|
||||
|
||||
assert response.status_code == 500
|
||||
data = json.loads(response.data)
|
||||
assert 'error' in data
|
||||
|
||||
|
||||
class TestLotHistoryWithWorkcenterFilter:
|
||||
"""Tests for /api/query-tool/lot-history with workcenter filter."""
|
||||
|
||||
@patch('mes_dashboard.routes.query_tool_routes.get_lot_history')
|
||||
def test_accepts_workcenter_groups_param(self, mock_query, client):
|
||||
"""Should pass workcenter_groups parameter to service."""
|
||||
mock_query.return_value = {
|
||||
'data': [],
|
||||
'total': 0,
|
||||
'filtered_by_groups': ['DB', 'WB']
|
||||
}
|
||||
|
||||
response = client.get(
|
||||
'/api/query-tool/lot-history?'
|
||||
'container_id=abc123&workcenter_groups=DB,WB'
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
# Verify the service was called with workcenter_groups
|
||||
call_args = mock_query.call_args
|
||||
assert call_args[1].get('workcenter_groups') == ['DB', 'WB']
|
||||
|
||||
@patch('mes_dashboard.routes.query_tool_routes.get_lot_history')
|
||||
def test_empty_workcenter_groups_ignored(self, mock_query, client):
|
||||
"""Should ignore empty workcenter_groups parameter."""
|
||||
mock_query.return_value = {
|
||||
'data': [],
|
||||
'total': 0,
|
||||
'filtered_by_groups': []
|
||||
}
|
||||
|
||||
response = client.get(
|
||||
'/api/query-tool/lot-history?'
|
||||
'container_id=abc123&workcenter_groups='
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
# Verify workcenter_groups is None (not empty list)
|
||||
call_args = mock_query.call_args
|
||||
assert call_args[1].get('workcenter_groups') is None
|
||||
|
||||
@patch('mes_dashboard.routes.query_tool_routes.get_lot_history')
|
||||
def test_returns_filtered_by_groups_in_response(self, mock_query, client):
|
||||
"""Should include filtered_by_groups in response."""
|
||||
mock_query.return_value = {
|
||||
'data': [{'CONTAINERID': 'abc123'}],
|
||||
'total': 1,
|
||||
'filtered_by_groups': ['DB']
|
||||
}
|
||||
|
||||
response = client.get(
|
||||
'/api/query-tool/lot-history?'
|
||||
'container_id=abc123&workcenter_groups=DB'
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = json.loads(response.data)
|
||||
assert data.get('filtered_by_groups') == ['DB']
|
||||
420
tests/test_query_tool_service.py
Normal file
420
tests/test_query_tool_service.py
Normal file
@@ -0,0 +1,420 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Unit tests for Query Tool service functions.
|
||||
|
||||
Tests the core service functions without database dependencies:
|
||||
- Input validation (LOT, equipment, date range)
|
||||
- IN clause building helpers
|
||||
- Constants validation
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from mes_dashboard.services.query_tool_service import (
|
||||
validate_date_range,
|
||||
validate_lot_input,
|
||||
validate_equipment_input,
|
||||
_build_in_clause,
|
||||
_build_in_filter,
|
||||
BATCH_SIZE,
|
||||
MAX_LOT_IDS,
|
||||
MAX_SERIAL_NUMBERS,
|
||||
MAX_WORK_ORDERS,
|
||||
MAX_EQUIPMENTS,
|
||||
MAX_DATE_RANGE_DAYS,
|
||||
)
|
||||
|
||||
|
||||
class TestValidateDateRange:
|
||||
"""Tests for validate_date_range function."""
|
||||
|
||||
def test_valid_range(self):
|
||||
"""Should return None for valid date range."""
|
||||
result = validate_date_range('2024-01-01', '2024-01-31')
|
||||
assert result is None
|
||||
|
||||
def test_same_day(self):
|
||||
"""Should allow same day as start and end."""
|
||||
result = validate_date_range('2024-01-01', '2024-01-01')
|
||||
assert result is None
|
||||
|
||||
def test_end_before_start(self):
|
||||
"""Should reject end date before start date."""
|
||||
result = validate_date_range('2024-12-31', '2024-01-01')
|
||||
assert result is not None
|
||||
assert '結束日期' in result or '早於' in result
|
||||
|
||||
def test_exceeds_max_range(self):
|
||||
"""Should reject date range exceeding limit."""
|
||||
result = validate_date_range('2023-01-01', '2024-12-31')
|
||||
assert result is not None
|
||||
assert str(MAX_DATE_RANGE_DAYS) in result
|
||||
|
||||
def test_exactly_max_range(self):
|
||||
"""Should allow exactly max range days."""
|
||||
# 90 days from 2024-01-01 is 2024-03-31
|
||||
result = validate_date_range('2024-01-01', '2024-03-31')
|
||||
assert result is None
|
||||
|
||||
def test_one_day_over_max_range(self):
|
||||
"""Should reject one day over max range."""
|
||||
# 91 days
|
||||
result = validate_date_range('2024-01-01', '2024-04-02')
|
||||
assert result is not None
|
||||
assert str(MAX_DATE_RANGE_DAYS) in result
|
||||
|
||||
def test_invalid_date_format(self):
|
||||
"""Should reject invalid date format."""
|
||||
result = validate_date_range('01-01-2024', '12-31-2024')
|
||||
assert result is not None
|
||||
assert '格式' in result or 'format' in result.lower()
|
||||
|
||||
def test_invalid_start_date(self):
|
||||
"""Should reject invalid start date."""
|
||||
result = validate_date_range('2024-13-01', '2024-12-31')
|
||||
assert result is not None
|
||||
assert '格式' in result or 'format' in result.lower()
|
||||
|
||||
def test_invalid_end_date(self):
|
||||
"""Should reject invalid end date."""
|
||||
result = validate_date_range('2024-01-01', '2024-02-30')
|
||||
assert result is not None
|
||||
assert '格式' in result or 'format' in result.lower()
|
||||
|
||||
def test_non_date_string(self):
|
||||
"""Should reject non-date strings."""
|
||||
result = validate_date_range('abc', 'def')
|
||||
assert result is not None
|
||||
assert '格式' in result or 'format' in result.lower()
|
||||
|
||||
|
||||
class TestValidateLotInput:
|
||||
"""Tests for validate_lot_input function."""
|
||||
|
||||
def test_valid_lot_ids(self):
|
||||
"""Should accept valid LOT IDs within limit."""
|
||||
values = ['GA23100020-A00-001', 'GA23100020-A00-002']
|
||||
result = validate_lot_input('lot_id', values)
|
||||
assert result is None
|
||||
|
||||
def test_valid_serial_numbers(self):
|
||||
"""Should accept valid serial numbers within limit."""
|
||||
values = ['SN001', 'SN002', 'SN003']
|
||||
result = validate_lot_input('serial_number', values)
|
||||
assert result is None
|
||||
|
||||
def test_valid_work_orders(self):
|
||||
"""Should accept valid work orders within limit."""
|
||||
values = ['GA231000001']
|
||||
result = validate_lot_input('work_order', values)
|
||||
assert result is None
|
||||
|
||||
def test_empty_values(self):
|
||||
"""Should reject empty values list."""
|
||||
result = validate_lot_input('lot_id', [])
|
||||
assert result is not None
|
||||
assert '至少一個' in result
|
||||
|
||||
def test_exceeds_lot_id_limit(self):
|
||||
"""Should reject LOT IDs exceeding limit."""
|
||||
values = [f'GA{i:09d}' for i in range(MAX_LOT_IDS + 1)]
|
||||
result = validate_lot_input('lot_id', values)
|
||||
assert result is not None
|
||||
assert '超過上限' in result
|
||||
assert str(MAX_LOT_IDS) in result
|
||||
|
||||
def test_exceeds_serial_number_limit(self):
|
||||
"""Should reject serial numbers exceeding limit."""
|
||||
values = [f'SN{i:06d}' for i in range(MAX_SERIAL_NUMBERS + 1)]
|
||||
result = validate_lot_input('serial_number', values)
|
||||
assert result is not None
|
||||
assert '超過上限' in result
|
||||
assert str(MAX_SERIAL_NUMBERS) in result
|
||||
|
||||
def test_exceeds_work_order_limit(self):
|
||||
"""Should reject work orders exceeding limit."""
|
||||
values = [f'WO{i:06d}' for i in range(MAX_WORK_ORDERS + 1)]
|
||||
result = validate_lot_input('work_order', values)
|
||||
assert result is not None
|
||||
assert '超過上限' in result
|
||||
assert str(MAX_WORK_ORDERS) in result
|
||||
|
||||
def test_exactly_at_limit(self):
|
||||
"""Should accept values exactly at limit."""
|
||||
values = [f'GA{i:09d}' for i in range(MAX_LOT_IDS)]
|
||||
result = validate_lot_input('lot_id', values)
|
||||
assert result is None
|
||||
|
||||
def test_unknown_input_type_uses_default_limit(self):
|
||||
"""Should use default limit for unknown input types."""
|
||||
values = [f'X{i}' for i in range(MAX_LOT_IDS)]
|
||||
result = validate_lot_input('unknown_type', values)
|
||||
assert result is None
|
||||
|
||||
values_over = [f'X{i}' for i in range(MAX_LOT_IDS + 1)]
|
||||
result = validate_lot_input('unknown_type', values_over)
|
||||
assert result is not None
|
||||
|
||||
|
||||
class TestValidateEquipmentInput:
|
||||
"""Tests for validate_equipment_input function."""
|
||||
|
||||
def test_valid_equipment_ids(self):
|
||||
"""Should accept valid equipment IDs within limit."""
|
||||
values = ['EQ001', 'EQ002', 'EQ003']
|
||||
result = validate_equipment_input(values)
|
||||
assert result is None
|
||||
|
||||
def test_empty_equipment_ids(self):
|
||||
"""Should reject empty equipment list."""
|
||||
result = validate_equipment_input([])
|
||||
assert result is not None
|
||||
assert '至少一台' in result
|
||||
|
||||
def test_exceeds_equipment_limit(self):
|
||||
"""Should reject equipment IDs exceeding limit."""
|
||||
values = [f'EQ{i:05d}' for i in range(MAX_EQUIPMENTS + 1)]
|
||||
result = validate_equipment_input(values)
|
||||
assert result is not None
|
||||
assert '不得超過' in result
|
||||
assert str(MAX_EQUIPMENTS) in result
|
||||
|
||||
def test_exactly_at_limit(self):
|
||||
"""Should accept equipment IDs exactly at limit."""
|
||||
values = [f'EQ{i:05d}' for i in range(MAX_EQUIPMENTS)]
|
||||
result = validate_equipment_input(values)
|
||||
assert result is None
|
||||
|
||||
|
||||
class TestBuildInClause:
|
||||
"""Tests for _build_in_clause function."""
|
||||
|
||||
def test_empty_list(self):
|
||||
"""Should return empty list for empty input."""
|
||||
result = _build_in_clause([])
|
||||
assert result == []
|
||||
|
||||
def test_single_value(self):
|
||||
"""Should return single chunk for single value."""
|
||||
result = _build_in_clause(['VAL001'])
|
||||
assert len(result) == 1
|
||||
assert result[0] == "'VAL001'"
|
||||
|
||||
def test_multiple_values(self):
|
||||
"""Should join multiple values with comma."""
|
||||
result = _build_in_clause(['VAL001', 'VAL002', 'VAL003'])
|
||||
assert len(result) == 1
|
||||
assert "'VAL001'" in result[0]
|
||||
assert "'VAL002'" in result[0]
|
||||
assert "'VAL003'" in result[0]
|
||||
assert result[0] == "'VAL001', 'VAL002', 'VAL003'"
|
||||
|
||||
def test_chunking(self):
|
||||
"""Should chunk when exceeding batch size."""
|
||||
# Create more than BATCH_SIZE values
|
||||
values = [f'VAL{i:06d}' for i in range(BATCH_SIZE + 10)]
|
||||
result = _build_in_clause(values)
|
||||
assert len(result) == 2
|
||||
# First chunk should have BATCH_SIZE items
|
||||
assert result[0].count("'") == BATCH_SIZE * 2 # 2 quotes per value
|
||||
|
||||
def test_escape_single_quotes(self):
|
||||
"""Should escape single quotes in values."""
|
||||
result = _build_in_clause(["VAL'001"])
|
||||
assert len(result) == 1
|
||||
assert "VAL''001" in result[0] # Escaped
|
||||
|
||||
def test_custom_chunk_size(self):
|
||||
"""Should respect custom chunk size."""
|
||||
values = ['V1', 'V2', 'V3', 'V4', 'V5']
|
||||
result = _build_in_clause(values, max_chunk_size=2)
|
||||
assert len(result) == 3 # 2+2+1
|
||||
|
||||
|
||||
class TestBuildInFilter:
|
||||
"""Tests for _build_in_filter function."""
|
||||
|
||||
def test_empty_list(self):
|
||||
"""Should return 1=0 for empty input (no results)."""
|
||||
result = _build_in_filter([], 'COL')
|
||||
assert result == "1=0"
|
||||
|
||||
def test_single_value(self):
|
||||
"""Should build simple IN clause for single value."""
|
||||
result = _build_in_filter(['VAL001'], 'COL')
|
||||
assert "COL IN" in result
|
||||
assert "'VAL001'" in result
|
||||
|
||||
def test_multiple_values(self):
|
||||
"""Should build IN clause with multiple values."""
|
||||
result = _build_in_filter(['VAL001', 'VAL002'], 'COL')
|
||||
assert "COL IN" in result
|
||||
assert "'VAL001'" in result
|
||||
assert "'VAL002'" in result
|
||||
|
||||
def test_custom_column(self):
|
||||
"""Should use custom column name."""
|
||||
result = _build_in_filter(['VAL001'], 't.MYCOL')
|
||||
assert "t.MYCOL IN" in result
|
||||
|
||||
def test_large_list_uses_or(self):
|
||||
"""Should use OR for chunked results."""
|
||||
# Create more than BATCH_SIZE values
|
||||
values = [f'VAL{i:06d}' for i in range(BATCH_SIZE + 10)]
|
||||
result = _build_in_filter(values, 'COL')
|
||||
assert " OR " in result
|
||||
# Should have parentheses wrapping the OR conditions
|
||||
assert result.startswith("(")
|
||||
assert result.endswith(")")
|
||||
|
||||
|
||||
class TestServiceConstants:
|
||||
"""Tests for service constants."""
|
||||
|
||||
def test_batch_size_is_reasonable(self):
|
||||
"""Batch size should be <= 1000 (Oracle limit)."""
|
||||
assert BATCH_SIZE <= 1000
|
||||
|
||||
def test_max_date_range_is_reasonable(self):
|
||||
"""Max date range should be 90 days."""
|
||||
assert MAX_DATE_RANGE_DAYS == 90
|
||||
|
||||
def test_max_lot_ids_is_reasonable(self):
|
||||
"""Max LOT IDs should be sensible."""
|
||||
assert 10 <= MAX_LOT_IDS <= 100
|
||||
|
||||
def test_max_serial_numbers_is_reasonable(self):
|
||||
"""Max serial numbers should be sensible."""
|
||||
assert 10 <= MAX_SERIAL_NUMBERS <= 100
|
||||
|
||||
def test_max_work_orders_is_reasonable(self):
|
||||
"""Max work orders should be low due to expansion."""
|
||||
assert MAX_WORK_ORDERS <= 20 # Work orders can expand to many LOTs
|
||||
|
||||
def test_max_equipments_is_reasonable(self):
|
||||
"""Max equipments should be sensible."""
|
||||
assert 5 <= MAX_EQUIPMENTS <= 50
|
||||
|
||||
|
||||
class TestGetWorkcenterForGroups:
|
||||
"""Tests for _get_workcenters_for_groups helper function."""
|
||||
|
||||
def test_calls_filter_cache(self):
|
||||
"""Should call filter_cache.get_workcenters_for_groups."""
|
||||
from unittest.mock import patch
|
||||
|
||||
with patch('mes_dashboard.services.filter_cache.get_workcenters_for_groups') as mock_get:
|
||||
from mes_dashboard.services.query_tool_service import _get_workcenters_for_groups
|
||||
mock_get.return_value = ['DB_1', 'DB_2']
|
||||
|
||||
result = _get_workcenters_for_groups(['DB'])
|
||||
|
||||
mock_get.assert_called_once_with(['DB'])
|
||||
assert result == ['DB_1', 'DB_2']
|
||||
|
||||
def test_returns_empty_list_for_unknown_group(self):
|
||||
"""Should return empty list for unknown group."""
|
||||
from unittest.mock import patch
|
||||
|
||||
with patch('mes_dashboard.services.filter_cache.get_workcenters_for_groups') as mock_get:
|
||||
from mes_dashboard.services.query_tool_service import _get_workcenters_for_groups
|
||||
mock_get.return_value = []
|
||||
|
||||
result = _get_workcenters_for_groups(['UNKNOWN'])
|
||||
|
||||
assert result == []
|
||||
|
||||
|
||||
class TestGetLotHistoryWithWorkcenterFilter:
|
||||
"""Tests for get_lot_history with workcenter_groups filter."""
|
||||
|
||||
def test_no_filter_returns_all(self):
|
||||
"""When no workcenter_groups, should not add filter to SQL."""
|
||||
from unittest.mock import patch, MagicMock
|
||||
import pandas as pd
|
||||
|
||||
with patch('mes_dashboard.services.query_tool_service.read_sql_df') as mock_read:
|
||||
with patch('mes_dashboard.services.query_tool_service.SQLLoader') as mock_loader:
|
||||
from mes_dashboard.services.query_tool_service import get_lot_history
|
||||
|
||||
mock_loader.load.return_value = 'SELECT * FROM t WHERE c = :container_id {{ WORKCENTER_FILTER }}'
|
||||
mock_read.return_value = pd.DataFrame({
|
||||
'CONTAINERID': ['abc123'],
|
||||
'WORKCENTERNAME': ['DB_1'],
|
||||
})
|
||||
|
||||
result = get_lot_history('abc123', workcenter_groups=None)
|
||||
|
||||
assert 'error' not in result
|
||||
assert result['filtered_by_groups'] == []
|
||||
# Verify SQL does not contain WORKCENTERNAME IN
|
||||
sql_called = mock_read.call_args[0][0]
|
||||
assert 'WORKCENTERNAME IN' not in sql_called
|
||||
assert '{{ WORKCENTER_FILTER }}' not in sql_called
|
||||
|
||||
def test_with_filter_adds_condition(self):
|
||||
"""When workcenter_groups provided, should filter by workcenters."""
|
||||
from unittest.mock import patch
|
||||
import pandas as pd
|
||||
|
||||
with patch('mes_dashboard.services.query_tool_service.read_sql_df') as mock_read:
|
||||
with patch('mes_dashboard.services.query_tool_service.SQLLoader') as mock_loader:
|
||||
with patch('mes_dashboard.services.filter_cache.get_workcenters_for_groups') as mock_get_wc:
|
||||
from mes_dashboard.services.query_tool_service import get_lot_history
|
||||
|
||||
mock_loader.load.return_value = 'SELECT * FROM t WHERE c = :container_id {{ WORKCENTER_FILTER }}'
|
||||
mock_get_wc.return_value = ['DB_1', 'DB_2']
|
||||
mock_read.return_value = pd.DataFrame({
|
||||
'CONTAINERID': ['abc123'],
|
||||
'WORKCENTERNAME': ['DB_1'],
|
||||
})
|
||||
|
||||
result = get_lot_history('abc123', workcenter_groups=['DB'])
|
||||
|
||||
mock_get_wc.assert_called_once_with(['DB'])
|
||||
assert result['filtered_by_groups'] == ['DB']
|
||||
# Verify SQL contains filter
|
||||
sql_called = mock_read.call_args[0][0]
|
||||
assert 'WORKCENTERNAME' in sql_called
|
||||
|
||||
def test_empty_groups_list_no_filter(self):
|
||||
"""Empty groups list should return all (no filter)."""
|
||||
from unittest.mock import patch
|
||||
import pandas as pd
|
||||
|
||||
with patch('mes_dashboard.services.query_tool_service.read_sql_df') as mock_read:
|
||||
with patch('mes_dashboard.services.query_tool_service.SQLLoader') as mock_loader:
|
||||
from mes_dashboard.services.query_tool_service import get_lot_history
|
||||
|
||||
mock_loader.load.return_value = 'SELECT * FROM t WHERE c = :container_id {{ WORKCENTER_FILTER }}'
|
||||
mock_read.return_value = pd.DataFrame({
|
||||
'CONTAINERID': ['abc123'],
|
||||
'WORKCENTERNAME': ['DB_1'],
|
||||
})
|
||||
|
||||
result = get_lot_history('abc123', workcenter_groups=[])
|
||||
|
||||
assert result['filtered_by_groups'] == []
|
||||
# Verify SQL does not contain WORKCENTERNAME IN
|
||||
sql_called = mock_read.call_args[0][0]
|
||||
assert 'WORKCENTERNAME IN' not in sql_called
|
||||
|
||||
def test_filter_with_empty_workcenters_result(self):
|
||||
"""When group has no workcenters, should not add filter."""
|
||||
from unittest.mock import patch
|
||||
import pandas as pd
|
||||
|
||||
with patch('mes_dashboard.services.query_tool_service.read_sql_df') as mock_read:
|
||||
with patch('mes_dashboard.services.query_tool_service.SQLLoader') as mock_loader:
|
||||
with patch('mes_dashboard.services.filter_cache.get_workcenters_for_groups') as mock_get_wc:
|
||||
from mes_dashboard.services.query_tool_service import get_lot_history
|
||||
|
||||
mock_loader.load.return_value = 'SELECT * FROM t WHERE c = :container_id {{ WORKCENTER_FILTER }}'
|
||||
mock_get_wc.return_value = [] # No workcenters for this group
|
||||
mock_read.return_value = pd.DataFrame({
|
||||
'CONTAINERID': ['abc123'],
|
||||
'WORKCENTERNAME': ['DB_1'],
|
||||
})
|
||||
|
||||
result = get_lot_history('abc123', workcenter_groups=['UNKNOWN'])
|
||||
|
||||
# Should still succeed, just no filter applied
|
||||
assert 'error' not in result
|
||||
@@ -15,7 +15,11 @@ from mes_dashboard.routes.health_routes import check_database
|
||||
@pytest.fixture
|
||||
def testing_app_factory(monkeypatch):
|
||||
def _factory(*, csrf_enabled: bool = False):
|
||||
from mes_dashboard.routes import auth_routes
|
||||
|
||||
monkeypatch.setenv("REALTIME_EQUIPMENT_CACHE_ENABLED", "false")
|
||||
with auth_routes._rate_limit_lock:
|
||||
auth_routes._login_attempts.clear()
|
||||
db._ENGINE = None
|
||||
db._HEALTH_ENGINE = None
|
||||
app = create_app("testing")
|
||||
@@ -154,7 +158,8 @@ def test_security_headers_applied_globally(testing_app_factory):
|
||||
|
||||
assert response.status_code == 200
|
||||
assert "Content-Security-Policy" in response.headers
|
||||
assert response.headers["X-Frame-Options"] == "DENY"
|
||||
assert "frame-ancestors 'self'" in response.headers["Content-Security-Policy"]
|
||||
assert response.headers["X-Frame-Options"] == "SAMEORIGIN"
|
||||
assert response.headers["X-Content-Type-Options"] == "nosniff"
|
||||
assert "Referrer-Policy" in response.headers
|
||||
|
||||
|
||||
@@ -81,6 +81,24 @@ class TestTemplateIntegration(unittest.TestCase):
|
||||
self.assertIn('mes-api.js', html)
|
||||
self.assertIn('mes-toast-container', html)
|
||||
|
||||
def test_query_tool_page_includes_base_scripts(self):
|
||||
response = self.client.get('/query-tool')
|
||||
self.assertEqual(response.status_code, 200)
|
||||
html = response.data.decode('utf-8')
|
||||
|
||||
self.assertIn('toast.js', html)
|
||||
self.assertIn('mes-api.js', html)
|
||||
self.assertIn('mes-toast-container', html)
|
||||
|
||||
def test_tmtt_defect_page_includes_base_scripts(self):
|
||||
response = self.client.get('/tmtt-defect')
|
||||
self.assertEqual(response.status_code, 200)
|
||||
html = response.data.decode('utf-8')
|
||||
|
||||
self.assertIn('toast.js', html)
|
||||
self.assertIn('mes-api.js', html)
|
||||
self.assertIn('mes-toast-container', html)
|
||||
|
||||
|
||||
class TestToastCSSIntegration(unittest.TestCase):
|
||||
"""Test that Toast CSS styles are included in pages."""
|
||||
@@ -148,11 +166,29 @@ class TestMesApiUsageInTemplates(unittest.TestCase):
|
||||
response = self.client.get('/resource')
|
||||
html = response.data.decode('utf-8')
|
||||
|
||||
self.assertTrue('MesApi.post' in html or '/static/dist/resource-status.js' in html)
|
||||
self.assertTrue(
|
||||
'MesApi.post' in html or
|
||||
'MesApi.get' in html or
|
||||
'/static/dist/resource-status.js' in html
|
||||
)
|
||||
|
||||
def test_query_tool_page_uses_vite_module(self):
|
||||
response = self.client.get('/query-tool')
|
||||
html = response.data.decode('utf-8')
|
||||
|
||||
self.assertIn('/static/dist/query-tool.js', html)
|
||||
self.assertIn('type="module"', html)
|
||||
|
||||
def test_tmtt_defect_page_uses_vite_module(self):
|
||||
response = self.client.get('/tmtt-defect')
|
||||
html = response.data.decode('utf-8')
|
||||
|
||||
self.assertIn('/static/dist/tmtt-defect.js', html)
|
||||
self.assertIn('type="module"', html)
|
||||
|
||||
|
||||
class TestViteModuleFallbackIntegration(unittest.TestCase):
|
||||
"""Ensure page templates support Vite module assets with inline fallback."""
|
||||
class TestViteModuleIntegration(unittest.TestCase):
|
||||
"""Ensure page templates render Vite module assets."""
|
||||
|
||||
def setUp(self):
|
||||
db._ENGINE = None
|
||||
@@ -161,25 +197,7 @@ class TestViteModuleFallbackIntegration(unittest.TestCase):
|
||||
self.client = self.app.test_client()
|
||||
_login_as_admin(self.client)
|
||||
|
||||
def test_pages_render_inline_fallback_when_asset_missing(self):
|
||||
endpoints_and_markers = [
|
||||
('/wip-overview', 'function applyFilters'),
|
||||
('/wip-detail', 'function init'),
|
||||
('/hold-detail?reason=test-reason', 'function loadAllData'),
|
||||
('/tables', 'function loadTableData'),
|
||||
('/resource', 'function loadData'),
|
||||
('/resource-history', 'function executeQuery'),
|
||||
('/job-query', 'function queryJobs'),
|
||||
('/excel-query', 'function uploadExcel'),
|
||||
]
|
||||
for endpoint, marker in endpoints_and_markers:
|
||||
with patch('mes_dashboard.app.os.path.exists', return_value=False):
|
||||
response = self.client.get(endpoint)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
html = response.data.decode('utf-8')
|
||||
self.assertIn(marker, html)
|
||||
|
||||
def test_pages_render_vite_module_when_asset_exists(self):
|
||||
def test_pages_render_vite_module_reference(self):
|
||||
endpoints_and_assets = [
|
||||
('/wip-overview', 'wip-overview.js'),
|
||||
('/wip-detail', 'wip-detail.js'),
|
||||
@@ -189,9 +207,11 @@ class TestViteModuleFallbackIntegration(unittest.TestCase):
|
||||
('/resource-history', 'resource-history.js'),
|
||||
('/job-query', 'job-query.js'),
|
||||
('/excel-query', 'excel-query.js'),
|
||||
('/query-tool', 'query-tool.js'),
|
||||
('/tmtt-defect', 'tmtt-defect.js'),
|
||||
]
|
||||
for endpoint, asset in endpoints_and_assets:
|
||||
with patch('mes_dashboard.app.os.path.exists', return_value=True):
|
||||
with patch('mes_dashboard.app.os.path.exists', return_value=False):
|
||||
response = self.client.get(endpoint)
|
||||
self.assertEqual(response.status_code, 200)
|
||||
html = response.data.decode('utf-8')
|
||||
|
||||
146
tests/test_tmtt_defect_routes.py
Normal file
146
tests/test_tmtt_defect_routes.py
Normal file
@@ -0,0 +1,146 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Integration tests for TMTT Defect Analysis API routes."""
|
||||
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
|
||||
import pandas as pd
|
||||
|
||||
|
||||
class TestTmttDefectAnalysisEndpoint(unittest.TestCase):
|
||||
"""Test GET /api/tmtt-defect/analysis endpoint."""
|
||||
|
||||
def setUp(self):
|
||||
from mes_dashboard.core import database as db
|
||||
db._ENGINE = None
|
||||
|
||||
from mes_dashboard.app import create_app
|
||||
self.app = create_app()
|
||||
self.client = self.app.test_client()
|
||||
|
||||
def test_missing_start_date(self):
|
||||
resp = self.client.get('/api/tmtt-defect/analysis?end_date=2025-01-31')
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
data = resp.get_json()
|
||||
self.assertFalse(data['success'])
|
||||
|
||||
def test_missing_end_date(self):
|
||||
resp = self.client.get('/api/tmtt-defect/analysis?start_date=2025-01-01')
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
data = resp.get_json()
|
||||
self.assertFalse(data['success'])
|
||||
|
||||
def test_missing_both_dates(self):
|
||||
resp = self.client.get('/api/tmtt-defect/analysis')
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
|
||||
@patch('mes_dashboard.routes.tmtt_defect_routes.query_tmtt_defect_analysis')
|
||||
def test_invalid_date_format(self, mock_query):
|
||||
mock_query.return_value = {'error': '日期格式無效,請使用 YYYY-MM-DD'}
|
||||
resp = self.client.get(
|
||||
'/api/tmtt-defect/analysis?start_date=invalid&end_date=2025-01-31'
|
||||
)
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
data = resp.get_json()
|
||||
self.assertFalse(data['success'])
|
||||
self.assertIn('格式', data['error'])
|
||||
|
||||
@patch('mes_dashboard.routes.tmtt_defect_routes.query_tmtt_defect_analysis')
|
||||
def test_exceeds_180_days(self, mock_query):
|
||||
mock_query.return_value = {'error': '查詢範圍不能超過 180 天'}
|
||||
resp = self.client.get(
|
||||
'/api/tmtt-defect/analysis?start_date=2025-01-01&end_date=2025-12-31'
|
||||
)
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
data = resp.get_json()
|
||||
self.assertIn('180', data['error'])
|
||||
|
||||
@patch('mes_dashboard.routes.tmtt_defect_routes.query_tmtt_defect_analysis')
|
||||
def test_successful_query(self, mock_query):
|
||||
mock_query.return_value = {
|
||||
'kpi': {
|
||||
'total_input': 1000, 'lot_count': 10,
|
||||
'print_defect_qty': 5, 'print_defect_rate': 0.5,
|
||||
'lead_defect_qty': 3, 'lead_defect_rate': 0.3,
|
||||
},
|
||||
'charts': {
|
||||
'by_workflow': [], 'by_package': [], 'by_type': [],
|
||||
'by_tmtt_machine': [], 'by_mold_machine': [],
|
||||
},
|
||||
'detail': [],
|
||||
}
|
||||
|
||||
resp = self.client.get(
|
||||
'/api/tmtt-defect/analysis?start_date=2025-01-01&end_date=2025-01-31'
|
||||
)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
data = resp.get_json()
|
||||
self.assertTrue(data['success'])
|
||||
self.assertIn('kpi', data['data'])
|
||||
self.assertIn('charts', data['data'])
|
||||
self.assertIn('detail', data['data'])
|
||||
|
||||
# Verify separate defect rates
|
||||
kpi = data['data']['kpi']
|
||||
self.assertEqual(kpi['print_defect_qty'], 5)
|
||||
self.assertEqual(kpi['lead_defect_qty'], 3)
|
||||
|
||||
@patch('mes_dashboard.routes.tmtt_defect_routes.query_tmtt_defect_analysis')
|
||||
def test_query_failure_returns_500(self, mock_query):
|
||||
mock_query.return_value = None
|
||||
resp = self.client.get(
|
||||
'/api/tmtt-defect/analysis?start_date=2025-01-01&end_date=2025-01-31'
|
||||
)
|
||||
self.assertEqual(resp.status_code, 500)
|
||||
|
||||
|
||||
class TestTmttDefectExportEndpoint(unittest.TestCase):
|
||||
"""Test GET /api/tmtt-defect/export endpoint."""
|
||||
|
||||
def setUp(self):
|
||||
from mes_dashboard.core import database as db
|
||||
db._ENGINE = None
|
||||
|
||||
from mes_dashboard.app import create_app
|
||||
self.app = create_app()
|
||||
self.client = self.app.test_client()
|
||||
|
||||
def test_missing_dates(self):
|
||||
resp = self.client.get('/api/tmtt-defect/export')
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
|
||||
@patch('mes_dashboard.routes.tmtt_defect_routes.export_csv')
|
||||
def test_export_csv(self, mock_export):
|
||||
mock_export.return_value = iter([
|
||||
'\ufeff',
|
||||
'LOT ID,TYPE,PACKAGE,WORKFLOW,完工流水碼,TMTT設備,MOLD設備,'
|
||||
'投入數,印字不良數,印字不良率(%),腳型不良數,腳型不良率(%)\r\n',
|
||||
])
|
||||
resp = self.client.get(
|
||||
'/api/tmtt-defect/export?start_date=2025-01-01&end_date=2025-01-31'
|
||||
)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
self.assertIn('text/csv', resp.content_type)
|
||||
self.assertIn('attachment', resp.headers.get('Content-Disposition', ''))
|
||||
|
||||
|
||||
class TestTmttDefectPageRoute(unittest.TestCase):
|
||||
"""Test page route."""
|
||||
|
||||
def setUp(self):
|
||||
from mes_dashboard.core import database as db
|
||||
db._ENGINE = None
|
||||
|
||||
from mes_dashboard.app import create_app
|
||||
self.app = create_app()
|
||||
self.client = self.app.test_client()
|
||||
|
||||
def test_page_requires_auth_when_dev(self):
|
||||
"""Page in 'dev' status returns 403 for unauthenticated users."""
|
||||
resp = self.client.get('/tmtt-defect')
|
||||
# 403 because page_status is 'dev' and user is not admin
|
||||
self.assertIn(resp.status_code, [200, 403])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
287
tests/test_tmtt_defect_service.py
Normal file
287
tests/test_tmtt_defect_service.py
Normal file
@@ -0,0 +1,287 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Unit tests for TMTT Defect Analysis Service."""
|
||||
|
||||
import unittest
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from mes_dashboard.services.tmtt_defect_service import (
|
||||
_build_kpi,
|
||||
_build_chart_data,
|
||||
_build_all_charts,
|
||||
_build_detail_table,
|
||||
_validate_date_range,
|
||||
query_tmtt_defect_analysis,
|
||||
PRINT_DEFECT,
|
||||
LEAD_DEFECT,
|
||||
)
|
||||
|
||||
|
||||
def _make_df(rows):
|
||||
"""Helper to create test DataFrame from list of dicts."""
|
||||
cols = [
|
||||
'CONTAINERID', 'CONTAINERNAME', 'PJ_TYPE', 'PRODUCTLINENAME',
|
||||
'WORKFLOW', 'FINISHEDRUNCARD', 'TMTT_EQUIPMENTID',
|
||||
'TMTT_EQUIPMENTNAME', 'TRACKINQTY', 'TRACKINTIMESTAMP',
|
||||
'MOLD_EQUIPMENTID', 'MOLD_EQUIPMENTNAME',
|
||||
'LOSSREASONNAME', 'REJECTQTY',
|
||||
]
|
||||
if not rows:
|
||||
return pd.DataFrame(columns=cols)
|
||||
df = pd.DataFrame(rows)
|
||||
for c in cols:
|
||||
if c not in df.columns:
|
||||
df[c] = None
|
||||
return df
|
||||
|
||||
|
||||
class TestValidateDateRange(unittest.TestCase):
|
||||
"""Test date range validation."""
|
||||
|
||||
def test_valid_range(self):
|
||||
self.assertIsNone(_validate_date_range('2025-01-01', '2025-01-31'))
|
||||
|
||||
def test_invalid_format(self):
|
||||
result = _validate_date_range('2025/01/01', '2025-01-31')
|
||||
self.assertIn('格式', result)
|
||||
|
||||
def test_start_after_end(self):
|
||||
result = _validate_date_range('2025-02-01', '2025-01-01')
|
||||
self.assertIn('不能晚於', result)
|
||||
|
||||
def test_exceeds_max_days(self):
|
||||
result = _validate_date_range('2025-01-01', '2025-12-31')
|
||||
self.assertIn('180', result)
|
||||
|
||||
def test_exactly_max_days(self):
|
||||
self.assertIsNone(_validate_date_range('2025-01-01', '2025-06-30'))
|
||||
|
||||
|
||||
class TestBuildKpi(unittest.TestCase):
|
||||
"""Test KPI calculation with separate defect rates."""
|
||||
|
||||
def test_empty_dataframe(self):
|
||||
df = _make_df([])
|
||||
kpi = _build_kpi(df)
|
||||
self.assertEqual(kpi['total_input'], 0)
|
||||
self.assertEqual(kpi['lot_count'], 0)
|
||||
self.assertEqual(kpi['print_defect_qty'], 0)
|
||||
self.assertEqual(kpi['lead_defect_qty'], 0)
|
||||
self.assertEqual(kpi['print_defect_rate'], 0.0)
|
||||
self.assertEqual(kpi['lead_defect_rate'], 0.0)
|
||||
|
||||
def test_single_lot_no_defects(self):
|
||||
df = _make_df([{
|
||||
'CONTAINERID': 'A001', 'TRACKINQTY': 100,
|
||||
'LOSSREASONNAME': None, 'REJECTQTY': 0,
|
||||
}])
|
||||
kpi = _build_kpi(df)
|
||||
self.assertEqual(kpi['total_input'], 100)
|
||||
self.assertEqual(kpi['lot_count'], 1)
|
||||
self.assertEqual(kpi['print_defect_qty'], 0)
|
||||
self.assertEqual(kpi['lead_defect_qty'], 0)
|
||||
|
||||
def test_separate_defect_rates(self):
|
||||
"""A LOT with both print and lead defects - rates calculated separately."""
|
||||
df = _make_df([
|
||||
{'CONTAINERID': 'A001', 'TRACKINQTY': 10000,
|
||||
'LOSSREASONNAME': PRINT_DEFECT, 'REJECTQTY': 50},
|
||||
{'CONTAINERID': 'A001', 'TRACKINQTY': 10000,
|
||||
'LOSSREASONNAME': LEAD_DEFECT, 'REJECTQTY': 30},
|
||||
])
|
||||
kpi = _build_kpi(df)
|
||||
# INPUT should be deduplicated (10000, not 20000)
|
||||
self.assertEqual(kpi['total_input'], 10000)
|
||||
self.assertEqual(kpi['lot_count'], 1)
|
||||
self.assertEqual(kpi['print_defect_qty'], 50)
|
||||
self.assertEqual(kpi['lead_defect_qty'], 30)
|
||||
self.assertAlmostEqual(kpi['print_defect_rate'], 0.5, places=4)
|
||||
self.assertAlmostEqual(kpi['lead_defect_rate'], 0.3, places=4)
|
||||
|
||||
def test_multiple_lots(self):
|
||||
df = _make_df([
|
||||
{'CONTAINERID': 'A001', 'TRACKINQTY': 100,
|
||||
'LOSSREASONNAME': PRINT_DEFECT, 'REJECTQTY': 2},
|
||||
{'CONTAINERID': 'A002', 'TRACKINQTY': 200,
|
||||
'LOSSREASONNAME': LEAD_DEFECT, 'REJECTQTY': 1},
|
||||
{'CONTAINERID': 'A003', 'TRACKINQTY': 300,
|
||||
'LOSSREASONNAME': None, 'REJECTQTY': 0},
|
||||
])
|
||||
kpi = _build_kpi(df)
|
||||
self.assertEqual(kpi['total_input'], 600)
|
||||
self.assertEqual(kpi['lot_count'], 3)
|
||||
self.assertEqual(kpi['print_defect_qty'], 2)
|
||||
self.assertEqual(kpi['lead_defect_qty'], 1)
|
||||
|
||||
|
||||
class TestBuildChartData(unittest.TestCase):
|
||||
"""Test Pareto chart data aggregation."""
|
||||
|
||||
def test_empty_dataframe(self):
|
||||
df = _make_df([])
|
||||
result = _build_chart_data(df, 'PJ_TYPE')
|
||||
self.assertEqual(result, [])
|
||||
|
||||
def test_single_dimension_value(self):
|
||||
df = _make_df([
|
||||
{'CONTAINERID': 'A001', 'TRACKINQTY': 100, 'PJ_TYPE': 'TypeA',
|
||||
'LOSSREASONNAME': PRINT_DEFECT, 'REJECTQTY': 5},
|
||||
{'CONTAINERID': 'A001', 'TRACKINQTY': 100, 'PJ_TYPE': 'TypeA',
|
||||
'LOSSREASONNAME': LEAD_DEFECT, 'REJECTQTY': 3},
|
||||
])
|
||||
result = _build_chart_data(df, 'PJ_TYPE')
|
||||
self.assertEqual(len(result), 1)
|
||||
self.assertEqual(result[0]['name'], 'TypeA')
|
||||
self.assertEqual(result[0]['print_defect_qty'], 5)
|
||||
self.assertEqual(result[0]['lead_defect_qty'], 3)
|
||||
self.assertEqual(result[0]['total_defect_qty'], 8)
|
||||
self.assertAlmostEqual(result[0]['cumulative_pct'], 100.0)
|
||||
|
||||
def test_null_dimension_grouped_as_unknown(self):
|
||||
df = _make_df([
|
||||
{'CONTAINERID': 'A001', 'TRACKINQTY': 100, 'MOLD_EQUIPMENTNAME': None,
|
||||
'LOSSREASONNAME': PRINT_DEFECT, 'REJECTQTY': 2},
|
||||
])
|
||||
result = _build_chart_data(df, 'MOLD_EQUIPMENTNAME')
|
||||
self.assertEqual(len(result), 1)
|
||||
self.assertEqual(result[0]['name'], '(未知)')
|
||||
|
||||
def test_sorted_by_total_defect_desc(self):
|
||||
df = _make_df([
|
||||
{'CONTAINERID': 'A001', 'TRACKINQTY': 100, 'PJ_TYPE': 'TypeA',
|
||||
'LOSSREASONNAME': PRINT_DEFECT, 'REJECTQTY': 1},
|
||||
{'CONTAINERID': 'A002', 'TRACKINQTY': 100, 'PJ_TYPE': 'TypeB',
|
||||
'LOSSREASONNAME': PRINT_DEFECT, 'REJECTQTY': 10},
|
||||
])
|
||||
result = _build_chart_data(df, 'PJ_TYPE')
|
||||
self.assertEqual(result[0]['name'], 'TypeB')
|
||||
self.assertEqual(result[1]['name'], 'TypeA')
|
||||
|
||||
def test_cumulative_percentage(self):
|
||||
df = _make_df([
|
||||
{'CONTAINERID': 'A001', 'TRACKINQTY': 100, 'PJ_TYPE': 'TypeA',
|
||||
'LOSSREASONNAME': PRINT_DEFECT, 'REJECTQTY': 6},
|
||||
{'CONTAINERID': 'A002', 'TRACKINQTY': 100, 'PJ_TYPE': 'TypeB',
|
||||
'LOSSREASONNAME': PRINT_DEFECT, 'REJECTQTY': 4},
|
||||
])
|
||||
result = _build_chart_data(df, 'PJ_TYPE')
|
||||
# TypeA: 6/10 = 60%, TypeB: cumulative 10/10 = 100%
|
||||
self.assertAlmostEqual(result[0]['cumulative_pct'], 60.0)
|
||||
self.assertAlmostEqual(result[1]['cumulative_pct'], 100.0)
|
||||
|
||||
|
||||
class TestBuildAllCharts(unittest.TestCase):
|
||||
"""Test all 5 chart dimensions are built."""
|
||||
|
||||
def test_returns_all_dimensions(self):
|
||||
df = _make_df([{
|
||||
'CONTAINERID': 'A001', 'TRACKINQTY': 100,
|
||||
'WORKFLOW': 'WF1', 'PRODUCTLINENAME': 'PKG1',
|
||||
'PJ_TYPE': 'T1', 'TMTT_EQUIPMENTNAME': 'TMTT-1',
|
||||
'MOLD_EQUIPMENTNAME': 'MOLD-1',
|
||||
'LOSSREASONNAME': PRINT_DEFECT, 'REJECTQTY': 1,
|
||||
}])
|
||||
charts = _build_all_charts(df)
|
||||
self.assertIn('by_workflow', charts)
|
||||
self.assertIn('by_package', charts)
|
||||
self.assertIn('by_type', charts)
|
||||
self.assertIn('by_tmtt_machine', charts)
|
||||
self.assertIn('by_mold_machine', charts)
|
||||
|
||||
|
||||
class TestBuildDetailTable(unittest.TestCase):
|
||||
"""Test detail table building."""
|
||||
|
||||
def test_empty_dataframe(self):
|
||||
df = _make_df([])
|
||||
result = _build_detail_table(df)
|
||||
self.assertEqual(result, [])
|
||||
|
||||
def test_single_lot_aggregated(self):
|
||||
"""LOT with both defect types should produce one row."""
|
||||
df = _make_df([
|
||||
{'CONTAINERID': 'A001', 'CONTAINERNAME': 'LOT-001',
|
||||
'TRACKINQTY': 100, 'PJ_TYPE': 'T1', 'PRODUCTLINENAME': 'P1',
|
||||
'WORKFLOW': 'WF1', 'FINISHEDRUNCARD': 'RC001',
|
||||
'TMTT_EQUIPMENTNAME': 'TMTT-1', 'MOLD_EQUIPMENTNAME': 'MOLD-1',
|
||||
'LOSSREASONNAME': PRINT_DEFECT, 'REJECTQTY': 5},
|
||||
{'CONTAINERID': 'A001', 'CONTAINERNAME': 'LOT-001',
|
||||
'TRACKINQTY': 100, 'PJ_TYPE': 'T1', 'PRODUCTLINENAME': 'P1',
|
||||
'WORKFLOW': 'WF1', 'FINISHEDRUNCARD': 'RC001',
|
||||
'TMTT_EQUIPMENTNAME': 'TMTT-1', 'MOLD_EQUIPMENTNAME': 'MOLD-1',
|
||||
'LOSSREASONNAME': LEAD_DEFECT, 'REJECTQTY': 3},
|
||||
])
|
||||
result = _build_detail_table(df)
|
||||
self.assertEqual(len(result), 1)
|
||||
row = result[0]
|
||||
self.assertEqual(row['CONTAINERNAME'], 'LOT-001')
|
||||
self.assertEqual(row['INPUT_QTY'], 100)
|
||||
self.assertEqual(row['PRINT_DEFECT_QTY'], 5)
|
||||
self.assertEqual(row['LEAD_DEFECT_QTY'], 3)
|
||||
self.assertAlmostEqual(row['PRINT_DEFECT_RATE'], 5.0, places=4)
|
||||
self.assertAlmostEqual(row['LEAD_DEFECT_RATE'], 3.0, places=4)
|
||||
|
||||
def test_lot_with_no_defects(self):
|
||||
df = _make_df([{
|
||||
'CONTAINERID': 'A001', 'CONTAINERNAME': 'LOT-001',
|
||||
'TRACKINQTY': 100, 'PJ_TYPE': 'T1',
|
||||
'LOSSREASONNAME': None, 'REJECTQTY': 0,
|
||||
}])
|
||||
result = _build_detail_table(df)
|
||||
self.assertEqual(len(result), 1)
|
||||
self.assertEqual(result[0]['PRINT_DEFECT_QTY'], 0)
|
||||
self.assertEqual(result[0]['LEAD_DEFECT_QTY'], 0)
|
||||
|
||||
|
||||
class TestQueryTmttDefectAnalysis(unittest.TestCase):
|
||||
"""Test the main entry point function."""
|
||||
|
||||
def setUp(self):
|
||||
from mes_dashboard.core import database as db
|
||||
db._ENGINE = None
|
||||
|
||||
@patch('mes_dashboard.services.tmtt_defect_service.cache_get', return_value=None)
|
||||
@patch('mes_dashboard.services.tmtt_defect_service.cache_set')
|
||||
@patch('mes_dashboard.services.tmtt_defect_service._fetch_base_data')
|
||||
def test_valid_query(self, mock_fetch, mock_cache_set, mock_cache_get):
|
||||
mock_fetch.return_value = _make_df([{
|
||||
'CONTAINERID': 'A001', 'CONTAINERNAME': 'LOT-001',
|
||||
'TRACKINQTY': 100, 'PJ_TYPE': 'T1', 'PRODUCTLINENAME': 'P1',
|
||||
'WORKFLOW': 'WF1', 'FINISHEDRUNCARD': 'RC001',
|
||||
'TMTT_EQUIPMENTNAME': 'TMTT-1', 'MOLD_EQUIPMENTNAME': 'MOLD-1',
|
||||
'LOSSREASONNAME': PRINT_DEFECT, 'REJECTQTY': 2,
|
||||
}])
|
||||
|
||||
result = query_tmtt_defect_analysis('2025-01-01', '2025-01-31')
|
||||
self.assertIn('kpi', result)
|
||||
self.assertIn('charts', result)
|
||||
self.assertIn('detail', result)
|
||||
self.assertNotIn('error', result)
|
||||
mock_cache_set.assert_called_once()
|
||||
|
||||
def test_invalid_dates(self):
|
||||
result = query_tmtt_defect_analysis('invalid', '2025-01-31')
|
||||
self.assertIn('error', result)
|
||||
|
||||
def test_exceeds_max_days(self):
|
||||
result = query_tmtt_defect_analysis('2025-01-01', '2025-12-31')
|
||||
self.assertIn('error', result)
|
||||
self.assertIn('180', result['error'])
|
||||
|
||||
@patch('mes_dashboard.services.tmtt_defect_service.cache_get')
|
||||
def test_cache_hit(self, mock_cache_get):
|
||||
cached_data = {'kpi': {}, 'charts': {}, 'detail': []}
|
||||
mock_cache_get.return_value = cached_data
|
||||
result = query_tmtt_defect_analysis('2025-01-01', '2025-01-31')
|
||||
self.assertEqual(result, cached_data)
|
||||
|
||||
@patch('mes_dashboard.services.tmtt_defect_service.cache_get', return_value=None)
|
||||
@patch('mes_dashboard.services.tmtt_defect_service._fetch_base_data', return_value=None)
|
||||
def test_query_failure(self, mock_fetch, mock_cache_get):
|
||||
result = query_tmtt_defect_analysis('2025-01-01', '2025-01-31')
|
||||
self.assertIsNone(result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -26,6 +26,12 @@ def disable_cache(func):
|
||||
"""Decorator to disable Redis cache for Oracle fallback tests."""
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
import mes_dashboard.services.wip_service as wip_service
|
||||
|
||||
with wip_service._wip_search_index_lock:
|
||||
wip_service._wip_search_index_cache.clear()
|
||||
with wip_service._wip_snapshot_lock:
|
||||
wip_service._wip_snapshot_cache.clear()
|
||||
with patch('mes_dashboard.services.wip_service.get_cached_wip_data', return_value=None):
|
||||
with patch('mes_dashboard.services.wip_service.get_cached_sys_date', return_value=None):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
Reference in New Issue
Block a user