fix(hold): dedup equipment cache, fix portal iframe, improve Hold dashboards

- Equipment cache: add freshness gate so only 1 Oracle query per 5-min cycle
  across 4 gunicorn workers; sync worker waits before first refresh
- Portal: add frame-busting to prevent recursive iframe nesting
- Hold Overview: remove redundant TreeMap, add Product & Future Hold Comment
  columns to LotTable
- Hold History: switch list.sql JOIN from DW_MES_LOT_V (WIP snapshot) to
  DW_MES_CONTAINER (historical master) for reliable Product data; add
  Future Hold Comment column; fix comment truncation with hover tooltip
- Page status: reorganize drawer groupings

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
egg
2026-02-11 09:01:02 +08:00
parent be22571421
commit e2ce75b004
18 changed files with 420 additions and 237 deletions

View File

@@ -89,14 +89,14 @@
"route": "/tmtt-defect", "route": "/tmtt-defect",
"name": "TMTT印字腳型不良分析", "name": "TMTT印字腳型不良分析",
"status": "released", "status": "released",
"drawer_id": "queries", "drawer_id": "dev-tools",
"order": 5 "order": 5
}, },
{ {
"route": "/mid-section-defect", "route": "/mid-section-defect",
"name": "中段製程不良追溯", "name": "中段製程不良追溯",
"status": "dev", "status": "dev",
"drawer_id": "queries", "drawer_id": "dev-tools",
"order": 6 "order": 6
}, },
{ {
@@ -124,21 +124,27 @@
"drawers": [ "drawers": [
{ {
"id": "reports", "id": "reports",
"name": "報表", "name": "即時報表",
"order": 1, "order": 1,
"admin_only": false "admin_only": false
}, },
{ {
"id": "queries", "id": "queries",
"name": "查詢類", "name": "查詢類",
"order": 2, "order": 3,
"admin_only": false "admin_only": false
}, },
{ {
"id": "dev-tools", "id": "dev-tools",
"name": "開發工具", "name": "開發工具",
"order": 3, "order": 4,
"admin_only": true "admin_only": true
},
{
"id": "drawer",
"name": "查詢工具",
"order": 2,
"admin_only": false
} }
] ]
} }

View File

@@ -1,5 +1,5 @@
<script setup> <script setup>
import { computed } from 'vue'; import { computed, reactive } from 'vue';
const props = defineProps({ const props = defineProps({
items: { items: {
@@ -52,6 +52,24 @@ function formatHours(value) {
} }
return Number(value).toFixed(2); return Number(value).toFixed(2);
} }
const tip = reactive({ visible: false, text: '', x: 0, y: 0 });
function showTip(event) {
const text = event.currentTarget.getAttribute('data-tip');
if (!text) {
return;
}
const rect = event.currentTarget.getBoundingClientRect();
tip.text = text;
tip.x = rect.left;
tip.y = rect.bottom + 4;
tip.visible = true;
}
function hideTip() {
tip.visible = false;
}
</script> </script>
<template> <template>
@@ -67,6 +85,7 @@ function formatHours(value) {
<tr> <tr>
<th>Lot ID</th> <th>Lot ID</th>
<th>WorkOrder</th> <th>WorkOrder</th>
<th>Product</th>
<th>站別</th> <th>站別</th>
<th>Hold Reason</th> <th>Hold Reason</th>
<th>數量</th> <th>數量</th>
@@ -78,32 +97,35 @@ function formatHours(value) {
<th>Release Comment</th> <th>Release Comment</th>
<th>時長(hr)</th> <th>時長(hr)</th>
<th>NCR</th> <th>NCR</th>
<th>Future Hold Comment</th>
</tr> </tr>
</thead> </thead>
<tbody> <tbody>
<tr v-if="loading"> <tr v-if="loading">
<td colspan="13" class="placeholder">Loading...</td> <td colspan="15" class="placeholder">Loading...</td>
</tr> </tr>
<tr v-else-if="errorMessage"> <tr v-else-if="errorMessage">
<td colspan="13" class="placeholder">{{ errorMessage }}</td> <td colspan="15" class="placeholder">{{ errorMessage }}</td>
</tr> </tr>
<tr v-else-if="items.length === 0"> <tr v-else-if="items.length === 0">
<td colspan="13" class="placeholder">No data</td> <td colspan="15" class="placeholder">No data</td>
</tr> </tr>
<tr v-for="item in items" v-else :key="`${item.lotId}-${item.holdDate}-${item.releaseDate}`"> <tr v-for="item in items" v-else :key="`${item.lotId}-${item.holdDate}-${item.releaseDate}`">
<td>{{ item.lotId || '-' }}</td> <td>{{ item.lotId || '-' }}</td>
<td>{{ item.workorder || '-' }}</td> <td>{{ item.workorder || '-' }}</td>
<td>{{ item.product || '-' }}</td>
<td>{{ item.workcenter || '-' }}</td> <td>{{ item.workcenter || '-' }}</td>
<td>{{ item.holdReason || '-' }}</td> <td>{{ item.holdReason || '-' }}</td>
<td>{{ formatNumber(item.qty) }}</td> <td>{{ formatNumber(item.qty) }}</td>
<td>{{ item.holdDate || '-' }}</td> <td>{{ item.holdDate || '-' }}</td>
<td>{{ item.holdEmp || '-' }}</td> <td>{{ item.holdEmp || '-' }}</td>
<td class="cell-comment">{{ item.holdComment || '-' }}</td> <td class="cell-comment" :data-tip="item.holdComment || ''" @mouseenter="showTip" @mouseleave="hideTip">{{ item.holdComment || '-' }}</td>
<td>{{ item.releaseDate || '仍在 Hold' }}</td> <td>{{ item.releaseDate || '仍在 Hold' }}</td>
<td>{{ item.releaseEmp || '-' }}</td> <td>{{ item.releaseEmp || '-' }}</td>
<td class="cell-comment">{{ item.releaseComment || '-' }}</td> <td class="cell-comment" :data-tip="item.releaseComment || ''" @mouseenter="showTip" @mouseleave="hideTip">{{ item.releaseComment || '-' }}</td>
<td>{{ formatHours(item.holdHours) }}</td> <td>{{ formatHours(item.holdHours) }}</td>
<td>{{ item.ncr || '-' }}</td> <td>{{ item.ncr || '-' }}</td>
<td class="cell-comment" :data-tip="item.futureHoldComment || ''" @mouseenter="showTip" @mouseleave="hideTip">{{ item.futureHoldComment || '-' }}</td>
</tr> </tr>
</tbody> </tbody>
</table> </table>
@@ -115,4 +137,10 @@ function formatHours(value) {
<button type="button" :disabled="!canNext" @click="emit('next-page')">Next</button> <button type="button" :disabled="!canNext" @click="emit('next-page')">Next</button>
</div> </div>
</section> </section>
<Teleport to="body">
<div v-if="tip.visible" class="cell-tip" :style="{ left: tip.x + 'px', top: tip.y + 'px' }">
{{ tip.text }}
</div>
</Teleport>
</template> </template>

View File

@@ -224,8 +224,8 @@
.dept-table td:nth-child(3), .dept-table td:nth-child(3),
.dept-table td:nth-child(4), .dept-table td:nth-child(4),
.dept-table td:nth-child(5), .dept-table td:nth-child(5),
.detail-table td:nth-child(11), .detail-table td:nth-child(6),
.detail-table td:nth-child(12) { .detail-table td:nth-child(13) {
text-align: right; text-align: right;
} }
@@ -268,6 +268,24 @@
max-width: 220px; max-width: 220px;
overflow: hidden; overflow: hidden;
text-overflow: ellipsis; text-overflow: ellipsis;
cursor: default;
}
.cell-tip {
position: fixed;
z-index: 9999;
background: #1e293b;
color: #f8fafc;
padding: 8px 12px;
border-radius: 6px;
font-size: 12px;
line-height: 1.5;
white-space: normal;
word-break: break-word;
max-width: 400px;
min-width: 180px;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.18);
pointer-events: none;
} }
@media (max-width: 1440px) { @media (max-width: 1440px) {

View File

@@ -8,7 +8,6 @@ import SummaryCards from '../hold-detail/components/SummaryCards.vue';
import FilterBar from './components/FilterBar.vue'; import FilterBar from './components/FilterBar.vue';
import FilterIndicator from './components/FilterIndicator.vue'; import FilterIndicator from './components/FilterIndicator.vue';
import HoldMatrix from './components/HoldMatrix.vue'; import HoldMatrix from './components/HoldMatrix.vue';
import HoldTreeMap from './components/HoldTreeMap.vue';
import LotTable from './components/LotTable.vue'; import LotTable from './components/LotTable.vue';
const API_TIMEOUT = 60000; const API_TIMEOUT = 60000;
@@ -16,7 +15,6 @@ const DEFAULT_PER_PAGE = 50;
const summary = ref(null); const summary = ref(null);
const matrix = ref(null); const matrix = ref(null);
const treemapItems = ref([]);
const lots = ref([]); const lots = ref([]);
const filterBar = reactive({ const filterBar = reactive({
@@ -25,8 +23,6 @@ const filterBar = reactive({
}); });
const matrixFilter = ref(null); const matrixFilter = ref(null);
const treemapFilter = ref(null);
const reasonOptions = ref([]);
const pagination = ref({ const pagination = ref({
page: 1, page: 1,
@@ -56,7 +52,7 @@ const holdTypeLabel = computed(() => {
return '品質異常'; return '品質異常';
}); });
const hasCascadeFilters = computed(() => Boolean(matrixFilter.value || treemapFilter.value)); const hasCascadeFilters = computed(() => Boolean(matrixFilter.value));
const lotFilterText = computed(() => { const lotFilterText = computed(() => {
const parts = []; const parts = [];
@@ -66,9 +62,6 @@ const lotFilterText = computed(() => {
if (matrixFilter.value?.package) { if (matrixFilter.value?.package) {
parts.push(`Package=${matrixFilter.value.package}`); parts.push(`Package=${matrixFilter.value.package}`);
} }
if (treemapFilter.value?.reason) {
parts.push(`TreeMap Reason=${treemapFilter.value.reason}`);
}
return parts.join(', '); return parts.join(', ');
}); });
@@ -122,27 +115,13 @@ function buildMatrixFilterParams() {
return params; return params;
} }
function buildTreemapParams() {
return {
...buildFilterBarParams(),
...buildMatrixFilterParams(),
};
}
function buildLotsParams() { function buildLotsParams() {
const params = { return {
...buildFilterBarParams(), ...buildFilterBarParams(),
...buildMatrixFilterParams(), ...buildMatrixFilterParams(),
page: page.value, page: page.value,
per_page: Number(pagination.value?.perPage || DEFAULT_PER_PAGE), per_page: Number(pagination.value?.perPage || DEFAULT_PER_PAGE),
}; };
if (treemapFilter.value?.workcenter) {
params.workcenter = treemapFilter.value.workcenter;
}
if (treemapFilter.value?.reason) {
params.treemap_reason = treemapFilter.value.reason;
}
return params;
} }
async function fetchSummary(signal) { async function fetchSummary(signal) {
@@ -163,15 +142,6 @@ async function fetchMatrix(signal) {
return unwrapApiResult(result, 'Failed to fetch hold matrix'); return unwrapApiResult(result, 'Failed to fetch hold matrix');
} }
async function fetchTreemap(signal) {
const result = await apiGet('/api/hold-overview/treemap', {
params: buildTreemapParams(),
timeout: API_TIMEOUT,
signal,
});
return unwrapApiResult(result, 'Failed to fetch hold treemap');
}
async function fetchLots(signal) { async function fetchLots(signal) {
const result = await apiGet('/api/hold-overview/lots', { const result = await apiGet('/api/hold-overview/lots', {
params: buildLotsParams(), params: buildLotsParams(),
@@ -192,23 +162,6 @@ function updateLotsState(payload) {
page.value = pagination.value.page; page.value = pagination.value.page;
} }
function updateReasonOptions(items) {
const unique = new Set();
const nextReasons = [];
(items || []).forEach((item) => {
const reason = String(item?.reason || '').trim();
if (!reason || unique.has(reason)) {
return;
}
unique.add(reason);
nextReasons.push(reason);
});
reasonOptions.value = nextReasons.sort((a, b) => a.localeCompare(b, 'zh-Hant'));
if (filterBar.reason && !unique.has(filterBar.reason)) {
filterBar.reason = '';
}
}
function showRefreshSuccess() { function showRefreshSuccess() {
refreshSuccess.value = true; refreshSuccess.value = true;
window.setTimeout(() => { window.setTimeout(() => {
@@ -223,7 +176,6 @@ const { createAbortSignal, clearAbortController, triggerRefresh } = useAutoRefre
async function loadAllData(showOverlay = true) { async function loadAllData(showOverlay = true) {
const requestId = nextRequestId(); const requestId = nextRequestId();
clearAbortController('hold-overview-treemap-lots');
clearAbortController('hold-overview-lots'); clearAbortController('hold-overview-lots');
const signal = createAbortSignal('hold-overview-all'); const signal = createAbortSignal('hold-overview-all');
@@ -237,10 +189,9 @@ async function loadAllData(showOverlay = true) {
lotsError.value = ''; lotsError.value = '';
try { try {
const [summaryData, matrixData, treemapData, lotsData] = await Promise.all([ const [summaryData, matrixData, lotsData] = await Promise.all([
fetchSummary(signal), fetchSummary(signal),
fetchMatrix(signal), fetchMatrix(signal),
fetchTreemap(signal),
fetchLots(signal), fetchLots(signal),
]); ]);
if (isStaleRequest(requestId)) { if (isStaleRequest(requestId)) {
@@ -249,11 +200,7 @@ async function loadAllData(showOverlay = true) {
summary.value = summaryData; summary.value = summaryData;
matrix.value = matrixData; matrix.value = matrixData;
treemapItems.value = Array.isArray(treemapData?.items) ? treemapData.items : [];
updateLotsState(lotsData); updateLotsState(lotsData);
if (!matrixFilter.value) {
updateReasonOptions(treemapItems.value);
}
showRefreshSuccess(); showRefreshSuccess();
} catch (error) { } catch (error) {
if (error?.name === 'AbortError' || isStaleRequest(requestId)) { if (error?.name === 'AbortError' || isStaleRequest(requestId)) {
@@ -273,51 +220,9 @@ async function loadAllData(showOverlay = true) {
} }
} }
async function loadTreemapAndLots() {
const requestId = nextRequestId();
clearAbortController('hold-overview-all');
clearAbortController('hold-overview-lots');
const signal = createAbortSignal('hold-overview-treemap-lots');
refreshing.value = true;
lotsLoading.value = true;
refreshError.value = false;
errorMessage.value = '';
lotsError.value = '';
try {
const [treemapData, lotsData] = await Promise.all([
fetchTreemap(signal),
fetchLots(signal),
]);
if (isStaleRequest(requestId)) {
return;
}
treemapItems.value = Array.isArray(treemapData?.items) ? treemapData.items : [];
updateLotsState(lotsData);
showRefreshSuccess();
} catch (error) {
if (error?.name === 'AbortError' || isStaleRequest(requestId)) {
return;
}
refreshError.value = true;
const message = error?.message || '載入 TreeMap/Lot 資料失敗';
errorMessage.value = message;
lotsError.value = message;
} finally {
if (isStaleRequest(requestId)) {
return;
}
refreshing.value = false;
lotsLoading.value = false;
}
}
async function loadLots() { async function loadLots() {
const requestId = nextRequestId(); const requestId = nextRequestId();
clearAbortController('hold-overview-all'); clearAbortController('hold-overview-all');
clearAbortController('hold-overview-treemap-lots');
const signal = createAbortSignal('hold-overview-lots'); const signal = createAbortSignal('hold-overview-lots');
refreshing.value = true; refreshing.value = true;
@@ -360,20 +265,12 @@ function handleFilterChange(next) {
filterBar.holdType = nextHoldType; filterBar.holdType = nextHoldType;
filterBar.reason = nextReason; filterBar.reason = nextReason;
matrixFilter.value = null; matrixFilter.value = null;
treemapFilter.value = null;
page.value = 1; page.value = 1;
void loadAllData(false); void loadAllData(false);
} }
function handleMatrixSelect(nextFilter) { function handleMatrixSelect(nextFilter) {
matrixFilter.value = nextFilter; matrixFilter.value = nextFilter;
treemapFilter.value = null;
page.value = 1;
void loadTreemapAndLots();
}
function handleTreemapSelect(nextFilter) {
treemapFilter.value = nextFilter;
page.value = 1; page.value = 1;
void loadLots(); void loadLots();
} }
@@ -383,30 +280,10 @@ function clearMatrixFilter() {
return; return;
} }
matrixFilter.value = null; matrixFilter.value = null;
treemapFilter.value = null;
page.value = 1;
void loadTreemapAndLots();
}
function clearTreemapFilter() {
if (!treemapFilter.value) {
return;
}
treemapFilter.value = null;
page.value = 1; page.value = 1;
void loadLots(); void loadLots();
} }
function clearAllFilters() {
if (!hasCascadeFilters.value) {
return;
}
matrixFilter.value = null;
treemapFilter.value = null;
page.value = 1;
void loadTreemapAndLots();
}
function prevPage() { function prevPage() {
if (page.value <= 1) { if (page.value <= 1) {
return; return;
@@ -475,29 +352,7 @@ onMounted(() => {
:matrix-filter="matrixFilter" :matrix-filter="matrixFilter"
:show-clear-all="true" :show-clear-all="true"
@clear-matrix="clearMatrixFilter" @clear-matrix="clearMatrixFilter"
@clear-treemap="clearTreemapFilter" @clear-all="clearMatrixFilter"
@clear-all="clearAllFilters"
/>
<section class="card">
<div class="card-header">
<div class="card-title">Workcenter Hold Reason TreeMap</div>
</div>
<div class="card-body treemap-body">
<HoldTreeMap
:items="treemapItems"
:active-filter="treemapFilter"
@select="handleTreemapSelect"
/>
</div>
</section>
<FilterIndicator
:treemap-filter="treemapFilter"
:show-clear-all="true"
@clear-matrix="clearMatrixFilter"
@clear-treemap="clearTreemapFilter"
@clear-all="clearAllFilters"
/> />
<LotTable <LotTable
@@ -507,7 +362,7 @@ onMounted(() => {
:error-message="lotsError" :error-message="lotsError"
:has-active-filters="hasLotFilterText" :has-active-filters="hasLotFilterText"
:filter-text="lotFilterText" :filter-text="lotFilterText"
@clear-filters="clearAllFilters" @clear-filters="clearMatrixFilter"
@prev-page="prevPage" @prev-page="prevPage"
@next-page="nextPage" @next-page="nextPage"
/> />

View File

@@ -132,7 +132,7 @@ const chartOption = computed(() => ({
name: 'Hold TreeMap', name: 'Hold TreeMap',
type: 'treemap', type: 'treemap',
roam: false, roam: false,
nodeClick: false, nodeClick: 'link',
breadcrumb: { show: false }, breadcrumb: { show: false },
leafDepth: 1, leafDepth: 1,
visualDimension: 1, visualDimension: 1,

View File

@@ -85,6 +85,7 @@ const pageInfo = computed(() => {
<th>LOTID</th> <th>LOTID</th>
<th>WORKORDER</th> <th>WORKORDER</th>
<th>QTY</th> <th>QTY</th>
<th>Product</th>
<th>Package</th> <th>Package</th>
<th>Workcenter</th> <th>Workcenter</th>
<th>Hold Reason</th> <th>Hold Reason</th>
@@ -92,22 +93,24 @@ const pageInfo = computed(() => {
<th>Hold By</th> <th>Hold By</th>
<th>Dept</th> <th>Dept</th>
<th>Hold Comment</th> <th>Hold Comment</th>
<th>Future Hold Comment</th>
</tr> </tr>
</thead> </thead>
<tbody> <tbody>
<tr v-if="loading"> <tr v-if="loading">
<td colspan="10" class="placeholder">Loading...</td> <td colspan="12" class="placeholder">Loading...</td>
</tr> </tr>
<tr v-else-if="errorMessage"> <tr v-else-if="errorMessage">
<td colspan="10" class="placeholder">{{ errorMessage }}</td> <td colspan="12" class="placeholder">{{ errorMessage }}</td>
</tr> </tr>
<tr v-else-if="lots.length === 0"> <tr v-else-if="lots.length === 0">
<td colspan="10" class="placeholder">No data</td> <td colspan="12" class="placeholder">No data</td>
</tr> </tr>
<tr v-for="lot in lots" v-else :key="lot.lotId"> <tr v-for="lot in lots" v-else :key="lot.lotId">
<td>{{ lot.lotId || '-' }}</td> <td>{{ lot.lotId || '-' }}</td>
<td>{{ lot.workorder || '-' }}</td> <td>{{ lot.workorder || '-' }}</td>
<td>{{ formatNumber(lot.qty) }}</td> <td>{{ formatNumber(lot.qty) }}</td>
<td>{{ lot.product || '-' }}</td>
<td>{{ lot.package || '-' }}</td> <td>{{ lot.package || '-' }}</td>
<td>{{ lot.workcenter || '-' }}</td> <td>{{ lot.workcenter || '-' }}</td>
<td>{{ lot.holdReason || '-' }}</td> <td>{{ lot.holdReason || '-' }}</td>
@@ -115,6 +118,7 @@ const pageInfo = computed(() => {
<td>{{ lot.holdBy || '-' }}</td> <td>{{ lot.holdBy || '-' }}</td>
<td>{{ lot.dept || '-' }}</td> <td>{{ lot.dept || '-' }}</td>
<td>{{ lot.holdComment || '-' }}</td> <td>{{ lot.holdComment || '-' }}</td>
<td>{{ lot.futureHoldComment || '-' }}</td>
</tr> </tr>
</tbody> </tbody>
</table> </table>

View File

@@ -0,0 +1,2 @@
schema: spec-driven
created: 2026-02-10

View File

@@ -0,0 +1,54 @@
## Context
`GUNICORN_WORKERS=4` 啟動 4 個 worker process每個 worker 在 `init_realtime_equipment_cache()` 中各啟動一個 equipment sync daemon thread。現有分散式鎖`try_acquire_lock`只做序列化——worker A 釋放鎖後worker B 取得鎖仍會查 Oracle即使 worker A 剛寫入完全相同的資料。
現行 `_sync_worker()` 迴圈為 `refresh → wait(interval)`,首次進入即立刻 `refresh()`,與 init 的 `refresh_equipment_status_cache()` 形成 double-call。
**唯一修改檔案**`src/mes_dashboard/services/realtime_equipment_cache.py`
## Goals / Non-Goals
**Goals:**
- 每個 5 分鐘同步週期只產生 1 次 Oracle 查詢(目前 4 次)
- 消除 init + sync thread 的 double-call
- 保留 `force=True` 繞過去重的能力
**Non-Goals:**
- 改變快取對外 API 行為或資料格式
- 改變 process-level cacheL1的 TTL 或容量策略
- 單一 worker 架構(仍維持多 worker 架構)
- 更改分散式鎖本身的實作
## Decisions
### Decision 1: Freshness gate取得鎖後檢查 Redis timestamp
**方案 A選用**:取得分散式鎖後,讀取 Redis `equipment_status:meta:updated`。若 age < `_SYNC_INTERVAL // 2`,判定為 fresh釋放鎖並跳過。
**方案 B捨棄**:取得鎖前先檢查 timestamp。問題TOCTOU——檢查後鎖被另一個 worker 拿走並完成更新,本 worker 不知情。
**方案 C捨棄**:用 Redis SETNX 做 "sync epoch" marker 取代 timestamp 比較。增加額外 key 管理複雜度,沒有實際優勢。
**理由**:方案 A 最簡單,取得鎖後再檢查保證無 TOCTOU。threshold 設為 `interval / 2` 提供安全邊界——即使時鐘微漂移或 refresh 執行時間較長,也不會誤判。
### Decision 2: Wait-first sync worker loop
現行:`while not stop: refresh(); wait(interval)` → sync thread 啟動即 refreshdouble-call
改為:`while not _STOP_EVENT.wait(timeout=interval): refresh()` → sync thread 先等 interval 再首次 refresh
**理由**`init_realtime_equipment_cache()` 已做首次同步sync thread 不需要重複。`_STOP_EVENT.wait(timeout)` 返回 False 表示 timeout繼續迴圈返回 True 表示 stop signal跳出——語意清晰且是 Python threading 慣用模式。
### Decision 3: 模組級 `_SYNC_INTERVAL` 變數
`refresh_equipment_status_cache()` 需要知道 sync interval 來計算 freshness threshold。由 `init_realtime_equipment_cache()` 設定模組級變數 `_SYNC_INTERVAL`default 300。
**理由**:避免在 refresh 函數中重新讀取 Flask configrefresh 可能在 app context 外被呼叫)。模組級變數是此 codebase 已有的慣例(如 `_STOP_EVENT``_SYNC_THREAD`)。
## Risks / Trade-offs
| 風險 | 緩解 |
|------|------|
| Freshness gate 過於激進導致整個週期無 worker 更新 | Threshold 為 `interval / 2`150s遠小於完整 interval300s。只要 1 個 worker 成功更新,其餘 worker 看到 age < 150s 就會跳過若連 1 worker 都沒成功150s 後下一個取得鎖的 worker 會正常更新 |
| `_SYNC_INTERVAL` init 前被 refresh 呼叫 | Default 300 確保安全只有透過 init 才會啟動 sync thread所以正常流程下 init 一定先於週期性 refresh |
| Wait-first loop 延遲首次週期性 refresh 5 分鐘 | 這是期望行為——init 已完成首次同步sync thread 5 分鐘後才需要下一次 |

View File

@@ -0,0 +1,28 @@
## Why
`.env` 設定 `GUNICORN_WORKERS=4`,每個 worker 各自有獨立的 equipment sync thread共 4 個)。每 5 分鐘週期4 個 sync thread 輪流取得分散式鎖後都去查 Oracle產生 4 次完全相同的 `SELECT ... FROM DW_MES_EQUIPMENTSTATUS_WIP_V`~2700 rows其中 3 次是多餘的。分散式鎖只做序列化serialize沒有去重deduplicate。另外 `init_realtime_equipment_cache()` 存在 double-call 問題init 先呼叫一次 `refresh_equipment_status_cache()`,再啟動 sync thread 立即又呼叫一次。
## What Changes
- **Freshness gate**`refresh_equipment_status_cache()` 取得分散式鎖後、查 Oracle 前,檢查 Redis `equipment_status:meta:updated` 時間戳。若距上次更新不到 `sync_interval / 2` 秒,跳過 Oracle 查詢並釋放鎖。`force=True` 繞過此檢查。
- **Wait-first sync worker**`_sync_worker()` 改為先等 interval 再開始查詢(`_STOP_EVENT.wait(timeout=interval)` loop避免與 init 的首次 refresh 重複。
- **模組級 `_SYNC_INTERVAL` 變數**:由 `init_realtime_equipment_cache()` 設定,供 freshness gate 使用。
## Capabilities
### New Capabilities
(無新增 capability。此為既有 equipment cache sync 機制的去重優化。)
### Modified Capabilities
(無 spec-level requirement 變更。改動純屬實作層最佳化,不影響快取對外行為、資料即時性或 API 契約。)
## Impact
- **檔案**`src/mes_dashboard/services/realtime_equipment_cache.py`(唯一修改檔案)
- **Oracle 負載**:每 5 分鐘週期從 4 次查詢降至 1 次
- **資料即時性**:無影響,每週期仍保證至少 1 次更新
- **`force=True` 調用**:無影響,繞過 freshness gate
- **Process-level cache**:無影響,`_save_to_redis()` 已呼叫 `invalidate()`,其他 worker 的 L1 cache 在 30s TTL 內自然過期
- **Worker 重啟gunicorn max_requests**:改善——新 worker 的 init refresh 會被 freshness gate 擋住

View File

@@ -0,0 +1,27 @@
## ADDED Requirements
### Requirement: Equipment Sync Refresh SHALL Skip Redundant Oracle Queries Within Same Cycle
When multiple workers attempt to refresh the equipment status cache within the same sync cycle, only the first successful refresh SHALL query Oracle. Subsequent workers that acquire the distributed lock MUST check the freshness of the existing cache and skip the Oracle query if the cache was recently updated.
#### Scenario: Another worker already refreshed within current cycle
- **WHEN** a worker acquires the distributed lock and the `equipment_status:meta:updated` timestamp is less than half the sync interval old
- **THEN** the worker MUST release the lock without querying Oracle and return False
#### Scenario: No recent refresh exists
- **WHEN** a worker acquires the distributed lock and the `equipment_status:meta:updated` timestamp is older than half the sync interval (or missing)
- **THEN** the worker MUST proceed with the full Oracle query and cache update
#### Scenario: Force refresh bypasses freshness gate
- **WHEN** `refresh_equipment_status_cache(force=True)` is called
- **THEN** the freshness gate MUST be skipped and the Oracle query MUST proceed regardless of `meta:updated` age
### Requirement: Sync Worker SHALL Not Duplicate Init Refresh
The background sync worker thread MUST wait for one full sync interval before its first refresh attempt, since `init_realtime_equipment_cache()` already performs an initial refresh at startup.
#### Scenario: Sync worker startup after init
- **WHEN** the sync worker thread starts after `init_realtime_equipment_cache()` completes the initial refresh
- **THEN** the worker MUST wait for the configured interval before attempting its first refresh
#### Scenario: Stop signal during wait
- **WHEN** a stop signal is received while the sync worker is waiting
- **THEN** the worker MUST exit without performing a refresh

View File

@@ -0,0 +1,18 @@
## 1. Freshness Gate
- [x] 1.1 Add module-level `_SYNC_INTERVAL: int = 300` variable in `realtime_equipment_cache.py`
- [x] 1.2 In `init_realtime_equipment_cache()`, set `_SYNC_INTERVAL` from `config.get('EQUIPMENT_STATUS_SYNC_INTERVAL', 300)` before starting sync worker
- [x] 1.3 In `refresh_equipment_status_cache()`, after acquiring distributed lock and before Oracle query: if `force` is False, read Redis `equipment_status:meta:updated`, compute age, skip if age < `_SYNC_INTERVAL // 2`
## 2. Wait-First Sync Worker
- [x] 2.1 Rewrite `_sync_worker()` loop from `while not stop: refresh(); wait()` to `while not _STOP_EVENT.wait(timeout=interval): refresh()` so sync thread waits one full interval before first refresh
## 3. Tests
- [x] 3.1 Add test: `test_refresh_skips_when_recently_updated` mock `meta:updated` as 10s ago, verify Oracle not called
- [x] 3.2 Add test: `test_refresh_proceeds_when_stale` mock `meta:updated` as 200s ago, verify Oracle called
- [x] 3.3 Add test: `test_refresh_proceeds_when_force` set `meta:updated` as 10s ago with `force=True`, verify Oracle called
- [x] 3.4 Add test: `test_sync_worker_waits_before_first_refresh` verify sync worker does not call refresh immediately on start
- [x] 3.5 Run `python -m pytest tests/test_realtime_equipment_cache.py -x -q` existing + new tests pass
- [x] 3.6 Run `python -m pytest tests/ -x -q` full test suite pass

View File

@@ -0,0 +1,32 @@
# equipment-sync-dedup Specification
## Purpose
Ensure multi-worker equipment status cache sync performs at most one Oracle query per sync cycle, preventing redundant identical queries across gunicorn workers.
## Requirements
### Requirement: Equipment Sync Refresh SHALL Skip Redundant Oracle Queries Within Same Cycle
When multiple workers attempt to refresh the equipment status cache within the same sync cycle, only the first successful refresh SHALL query Oracle. Subsequent workers that acquire the distributed lock MUST check the freshness of the existing cache and skip the Oracle query if the cache was recently updated.
#### Scenario: Another worker already refreshed within current cycle
- **WHEN** a worker acquires the distributed lock and the `equipment_status:meta:updated` timestamp is less than half the sync interval old
- **THEN** the worker MUST release the lock without querying Oracle and return False
#### Scenario: No recent refresh exists
- **WHEN** a worker acquires the distributed lock and the `equipment_status:meta:updated` timestamp is older than half the sync interval (or missing)
- **THEN** the worker MUST proceed with the full Oracle query and cache update
#### Scenario: Force refresh bypasses freshness gate
- **WHEN** `refresh_equipment_status_cache(force=True)` is called
- **THEN** the freshness gate MUST be skipped and the Oracle query MUST proceed regardless of `meta:updated` age
### Requirement: Sync Worker SHALL Not Duplicate Init Refresh
The background sync worker thread MUST wait for one full sync interval before its first refresh attempt, since `init_realtime_equipment_cache()` already performs an initial refresh at startup.
#### Scenario: Sync worker startup after init
- **WHEN** the sync worker thread starts after `init_realtime_equipment_cache()` completes the initial refresh
- **THEN** the worker MUST wait for the configured interval before attempting its first refresh
#### Scenario: Stop signal during wait
- **WHEN** a stop signal is received while the sync worker is waiting
- **THEN** the worker MUST exit without performing a refresh

View File

@@ -476,6 +476,7 @@ def get_hold_history_list(
items.append({ items.append({
'lotId': _clean_text(row.get('LOT_ID')), 'lotId': _clean_text(row.get('LOT_ID')),
'workorder': _clean_text(row.get('WORKORDER')), 'workorder': _clean_text(row.get('WORKORDER')),
'product': _clean_text(row.get('PRODUCT')),
'workcenter': wc_group or wc_name, 'workcenter': wc_group or wc_name,
'holdReason': _clean_text(row.get('HOLD_REASON')), 'holdReason': _clean_text(row.get('HOLD_REASON')),
'qty': _safe_int(row.get('QTY')), 'qty': _safe_int(row.get('QTY')),
@@ -487,6 +488,7 @@ def get_hold_history_list(
'releaseComment': _clean_text(row.get('RELEASE_COMMENT')), 'releaseComment': _clean_text(row.get('RELEASE_COMMENT')),
'holdHours': round(_safe_float(row.get('HOLD_HOURS')), 2), 'holdHours': round(_safe_float(row.get('HOLD_HOURS')), 2),
'ncr': _clean_text(row.get('NCR_ID')), 'ncr': _clean_text(row.get('NCR_ID')),
'futureHoldComment': _clean_text(row.get('FUTURE_HOLD_COMMENT')),
}) })
total_pages = (total + per_page - 1) // per_page if total > 0 else 1 total_pages = (total + per_page - 1) // per_page if total > 0 else 1

View File

@@ -127,6 +127,7 @@ LOOKUP_TTL_SECONDS = DEFAULT_LOOKUP_TTL_SECONDS
_SYNC_THREAD: threading.Thread | None = None _SYNC_THREAD: threading.Thread | None = None
_STOP_EVENT = threading.Event() _STOP_EVENT = threading.Event()
_SYNC_LOCK = threading.Lock() _SYNC_LOCK = threading.Lock()
_SYNC_INTERVAL: int = 300
# ============================================================ # ============================================================
@@ -662,6 +663,29 @@ def refresh_equipment_status_cache(force: bool = False) -> bool:
try: try:
with _SYNC_LOCK: with _SYNC_LOCK:
if not force:
redis_client = get_redis_client()
if redis_client is not None:
try:
prefix = get_key_prefix()
updated_key = f"{prefix}:{EQUIPMENT_STATUS_META_UPDATED_KEY}"
updated_at = redis_client.get(updated_key)
if updated_at:
normalized = updated_at.replace("Z", "+00:00")
parsed = datetime.fromisoformat(normalized)
now = datetime.now(tz=parsed.tzinfo) if parsed.tzinfo else datetime.now()
age_seconds = max((now - parsed).total_seconds(), 0.0)
freshness_threshold = max(int(_SYNC_INTERVAL // 2), 1)
if age_seconds < freshness_threshold:
logger.info(
"Equipment status cache is fresh (age=%.2fs < threshold=%ds), skipping refresh",
age_seconds,
freshness_threshold,
)
return False
except Exception as exc:
logger.warning("Failed to evaluate equipment cache freshness: %s", exc)
logger.info("Refreshing equipment status cache...") logger.info("Refreshing equipment status cache...")
start_time = time.time() start_time = time.time()
@@ -696,15 +720,12 @@ def _sync_worker(interval: int):
""" """
logger.info(f"Equipment status sync worker started (interval: {interval}s)") logger.info(f"Equipment status sync worker started (interval: {interval}s)")
while not _STOP_EVENT.is_set(): while not _STOP_EVENT.wait(timeout=interval):
try: try:
refresh_equipment_status_cache() refresh_equipment_status_cache()
except Exception as exc: except Exception as exc:
logger.error(f"Equipment status sync error: {exc}") logger.error(f"Equipment status sync error: {exc}")
# Wait for next sync or stop signal
_STOP_EVENT.wait(timeout=interval)
logger.info("Equipment status sync worker stopped") logger.info("Equipment status sync worker stopped")
@@ -764,7 +785,9 @@ def init_realtime_equipment_cache(app=None):
logger.info("Realtime equipment cache is disabled") logger.info("Realtime equipment cache is disabled")
return return
global _SYNC_INTERVAL
interval = config.get('EQUIPMENT_STATUS_SYNC_INTERVAL', 300) interval = config.get('EQUIPMENT_STATUS_SYNC_INTERVAL', 300)
_SYNC_INTERVAL = int(interval)
logger.info("Initializing realtime equipment cache...") logger.info("Initializing realtime equipment cache...")
@@ -772,4 +795,4 @@ def init_realtime_equipment_cache(app=None):
refresh_equipment_status_cache() refresh_equipment_status_cache()
# Start background worker # Start background worker
_start_equipment_status_sync_worker(interval) _start_equipment_status_sync_worker(_SYNC_INTERVAL)

View File

@@ -16,6 +16,7 @@ WITH history_base AS (
h.RELEASEEMP, h.RELEASEEMP,
h.RELEASECOMMENTS, h.RELEASECOMMENTS,
h.NCRID, h.NCRID,
h.FUTUREHOLDCOMMENTS,
CASE CASE
WHEN h.HOLDREASONNAME IN ({{ NON_QUALITY_REASONS }}) THEN 'non-quality' WHEN h.HOLDREASONNAME IN ({{ NON_QUALITY_REASONS }}) THEN 'non-quality'
ELSE 'quality' ELSE 'quality'
@@ -56,8 +57,9 @@ filtered AS (
), ),
ranked AS ( ranked AS (
SELECT SELECT
NVL(l.LOTID, TRIM(f.CONTAINERID)) AS lot_id, NVL(c.CONTAINERNAME, TRIM(f.CONTAINERID)) AS lot_id,
f.PJ_WORKORDER AS workorder, f.PJ_WORKORDER AS workorder,
c.PRODUCTNAME AS product,
f.WORKCENTERNAME AS workcenter, f.WORKCENTERNAME AS workcenter,
f.HOLDREASONNAME AS hold_reason, f.HOLDREASONNAME AS hold_reason,
f.QTY AS qty, f.QTY AS qty,
@@ -69,14 +71,16 @@ ranked AS (
f.RELEASECOMMENTS AS release_comment, f.RELEASECOMMENTS AS release_comment,
f.hold_hours, f.hold_hours,
f.NCRID AS ncr_id, f.NCRID AS ncr_id,
f.FUTUREHOLDCOMMENTS AS future_hold_comment,
ROW_NUMBER() OVER (ORDER BY f.HOLDTXNDATE DESC, f.CONTAINERID) AS rn, ROW_NUMBER() OVER (ORDER BY f.HOLDTXNDATE DESC, f.CONTAINERID) AS rn,
COUNT(*) OVER () AS total_count COUNT(*) OVER () AS total_count
FROM filtered f FROM filtered f
LEFT JOIN DWH.DW_MES_LOT_V l ON l.CONTAINERID = f.CONTAINERID LEFT JOIN DWH.DW_MES_CONTAINER c ON c.CONTAINERID = f.CONTAINERID
) )
SELECT SELECT
lot_id, lot_id,
workorder, workorder,
product,
workcenter, workcenter,
hold_reason, hold_reason,
qty, qty,
@@ -88,6 +92,7 @@ SELECT
release_comment, release_comment,
hold_hours, hold_hours,
ncr_id, ncr_id,
future_hold_comment,
total_count total_count
FROM ranked FROM ranked
WHERE rn > :offset WHERE rn > :offset

View File

@@ -3,6 +3,7 @@
{% block title %}MES 報表入口{% endblock %} {% block title %}MES 報表入口{% endblock %}
{% block head_extra %} {% block head_extra %}
<script>if (window.self !== window.top) { window.top.location.href = window.self.location.href; }</script>
{% set portal_css = frontend_asset('portal.css') %} {% set portal_css = frontend_asset('portal.css') %}
{% if portal_css %} {% if portal_css %}
<link rel="stylesheet" href="{{ portal_css }}"> <link rel="stylesheet" href="{{ portal_css }}">

View File

@@ -260,58 +260,49 @@ class TestMesApiStress:
@pytest.mark.stress @pytest.mark.stress
class TestPageNavigationStress: class TestPageNavigationStress:
"""Stress tests for rapid page navigation.""" """Stress tests for rapid page navigation."""
def test_rapid_tab_switching(self, page: Page, app_server: str): def test_rapid_tab_switching(self, page: Page, app_server: str):
"""Test rapid tab switching in portal.""" """Test rapid tab switching in portal."""
page.goto(app_server, wait_until='domcontentloaded', timeout=30000) page.goto(app_server, wait_until='domcontentloaded', timeout=30000)
page.wait_for_timeout(500) sidebar_items = page.locator('.sidebar-item[data-target]')
expect(sidebar_items.first).to_be_visible()
# Only use released pages that are visible without admin login item_count = sidebar_items.count()
tabs = [ assert item_count >= 1, "No portal sidebar pages available for navigation stress test"
'.tab:has-text("WIP 即時概況")',
'.tab:has-text("設備即時概況")', start_time = time.time()
'.tab:has-text("設備歷史績效")',
'.tab:has-text("設備維修查詢")', # Rapidly switch pages 20 times
] for i in range(20):
item = sidebar_items.nth(i % item_count)
start_time = time.time() item.click()
page.wait_for_timeout(50)
# Rapidly switch tabs 20 times
for i in range(20): switch_time = time.time() - start_time
tab = tabs[i % len(tabs)] print(f"\n 20 sidebar switches in {switch_time:.3f}s")
page.locator(tab).click()
page.wait_for_timeout(50) # Page should still be responsive
expect(page.locator('h1')).to_contain_text('MES 報表入口')
switch_time = time.time() - start_time print(" Portal remained stable")
print(f"\n 20 tab switches in {switch_time:.3f}s")
def test_portal_iframe_stress(self, page: Page, app_server: str):
# Page should still be responsive """Test portal remains responsive with iframe loading."""
expect(page.locator('h1')).to_contain_text('MES 報表入口') page.goto(app_server, wait_until='domcontentloaded', timeout=30000)
print(" Portal remained stable") sidebar_items = page.locator('.sidebar-item[data-target]')
expect(sidebar_items.first).to_be_visible()
def test_portal_iframe_stress(self, page: Page, app_server: str): item_count = sidebar_items.count()
"""Test portal remains responsive with iframe loading.""" assert item_count >= 1, "No portal sidebar pages available for iframe stress test"
page.goto(app_server, wait_until='domcontentloaded', timeout=30000)
page.wait_for_timeout(500) checked = min(item_count, 4)
for idx in range(checked):
# Switch through released tabs (dev tabs hidden without admin login) item = sidebar_items.nth(idx)
tabs = [ item.click()
'WIP 即時概況', page.wait_for_timeout(200)
'設備即時概況',
'設備歷史績效', # Verify clicked item is active
'設備維修查詢', expect(item).to_have_class(re.compile(r'active'))
]
print(f"\n All {checked} sidebar pages clickable and responsive")
for tab_name in tabs:
page.locator(f'.tab:has-text("{tab_name}")').click()
page.wait_for_timeout(200)
# Verify tab is active
tab = page.locator(f'.tab:has-text("{tab_name}")')
expect(tab).to_have_class(re.compile(r'active'))
print(f"\n All {len(tabs)} tabs clickable and responsive")
@pytest.mark.stress @pytest.mark.stress

View File

@@ -4,10 +4,11 @@
Tests aggregation, status classification, and cache query functionality. Tests aggregation, status classification, and cache query functionality.
""" """
import pytest import pytest
from unittest.mock import patch, MagicMock from unittest.mock import patch, MagicMock
import json import json
import pandas as pd from datetime import datetime, timedelta
import pandas as pd
class TestClassifyStatus: class TestClassifyStatus:
@@ -516,7 +517,7 @@ class TestGetEquipmentStatusCacheStatus:
assert result['count'] == 1000 assert result['count'] == 1000
class TestEquipmentProcessLevelCache: class TestEquipmentProcessLevelCache:
"""Test bounded process-level cache behavior for equipment status.""" """Test bounded process-level cache behavior for equipment status."""
def test_lru_eviction_prefers_recent_keys(self): def test_lru_eviction_prefers_recent_keys(self):
@@ -535,11 +536,99 @@ class TestEquipmentProcessLevelCache:
def test_global_equipment_cache_uses_bounded_config(self): def test_global_equipment_cache_uses_bounded_config(self):
import mes_dashboard.services.realtime_equipment_cache as eq import mes_dashboard.services.realtime_equipment_cache as eq
assert eq.EQUIPMENT_PROCESS_CACHE_MAX_SIZE >= 1 assert eq.EQUIPMENT_PROCESS_CACHE_MAX_SIZE >= 1
assert eq._equipment_status_cache.max_size == eq.EQUIPMENT_PROCESS_CACHE_MAX_SIZE assert eq._equipment_status_cache.max_size == eq.EQUIPMENT_PROCESS_CACHE_MAX_SIZE
class TestSharedQueryFragments: class TestEquipmentRefreshDedup:
"""Test refresh de-dup behavior and sync worker startup timing."""
def test_refresh_skips_when_recently_updated(self):
"""Should skip Oracle query when cache is fresh and force=False."""
import mes_dashboard.services.realtime_equipment_cache as eq
recent_updated = (datetime.now() - timedelta(seconds=10)).isoformat()
mock_client = MagicMock()
mock_client.get.return_value = recent_updated
with patch.object(eq, "_SYNC_INTERVAL", 300):
with patch.object(eq, "try_acquire_lock", return_value=True):
with patch.object(eq, "release_lock") as mock_release_lock:
with patch.object(eq, "get_redis_client", return_value=mock_client):
with patch.object(eq, "get_key_prefix", return_value="mes_wip"):
with patch.object(eq, "_load_equipment_status_from_oracle") as mock_oracle:
with patch.object(eq, "_save_to_redis", return_value=True) as mock_save:
result = eq.refresh_equipment_status_cache(force=False)
assert result is False
mock_oracle.assert_not_called()
mock_save.assert_not_called()
mock_client.get.assert_called_once_with("mes_wip:equipment_status:meta:updated")
mock_release_lock.assert_called_once_with("equipment_status_cache_update")
def test_refresh_proceeds_when_stale(self):
"""Should proceed with Oracle query when cache is stale."""
import mes_dashboard.services.realtime_equipment_cache as eq
stale_updated = (datetime.now() - timedelta(seconds=200)).isoformat()
mock_client = MagicMock()
mock_client.get.return_value = stale_updated
with patch.object(eq, "_SYNC_INTERVAL", 300):
with patch.object(eq, "try_acquire_lock", return_value=True):
with patch.object(eq, "release_lock"):
with patch.object(eq, "get_redis_client", return_value=mock_client):
with patch.object(eq, "get_key_prefix", return_value="mes_wip"):
with patch.object(eq, "_load_equipment_status_from_oracle", return_value=[{"RESOURCEID": "R001"}]) as mock_oracle:
with patch.object(eq, "_aggregate_by_resourceid", return_value=[{"RESOURCEID": "R001"}]):
with patch.object(eq, "_save_to_redis", return_value=True) as mock_save:
result = eq.refresh_equipment_status_cache(force=False)
assert result is True
mock_oracle.assert_called_once()
mock_save.assert_called_once()
def test_refresh_proceeds_when_force(self):
"""Should bypass freshness gate when force=True."""
import mes_dashboard.services.realtime_equipment_cache as eq
with patch.object(eq, "_SYNC_INTERVAL", 300):
with patch.object(eq, "try_acquire_lock", return_value=True):
with patch.object(eq, "release_lock"):
with patch.object(eq, "get_redis_client") as mock_get_redis_client:
with patch.object(eq, "_load_equipment_status_from_oracle", return_value=[{"RESOURCEID": "R001"}]) as mock_oracle:
with patch.object(eq, "_aggregate_by_resourceid", return_value=[{"RESOURCEID": "R001"}]):
with patch.object(eq, "_save_to_redis", return_value=True) as mock_save:
result = eq.refresh_equipment_status_cache(force=True)
assert result is True
mock_oracle.assert_called_once()
mock_save.assert_called_once()
mock_get_redis_client.assert_not_called()
def test_sync_worker_waits_before_first_refresh(self):
"""Sync worker should not refresh immediately on startup."""
import mes_dashboard.services.realtime_equipment_cache as eq
class StopImmediatelyEvent:
def __init__(self):
self.timeouts = []
def wait(self, timeout=None):
self.timeouts.append(timeout)
return True
fake_stop_event = StopImmediatelyEvent()
with patch.object(eq, "_STOP_EVENT", fake_stop_event):
with patch.object(eq, "refresh_equipment_status_cache") as mock_refresh:
eq._sync_worker(interval=300)
mock_refresh.assert_not_called()
assert fake_stop_event.timeouts == [300]
class TestSharedQueryFragments:
"""Test shared SQL fragment governance for equipment cache.""" """Test shared SQL fragment governance for equipment cache."""
def test_equipment_load_uses_shared_sql_fragment(self): def test_equipment_load_uses_shared_sql_fragment(self):