feat: 新增設備即時狀態快取與合併查詢 API

- 新增 realtime_equipment_cache 模組,從 DW_MES_EQUIPMENTSTATUS_WIP_V 同步設備即時狀態
- 新增 resource_service 合併三層快取(resource-cache、realtime-equipment、workcenter-mapping)
- 新增 /api/resource/status/* API 端點提供設備狀態查詢
- 更新 health_routes 顯示 realtime equipment cache 狀態
- 更新 portal.html 顯示設備即時快取資訊
- 重構 resource_status.html 前端頁面
- 新增相關 OpenSpec 規格文件與測試

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
beabigegg
2026-01-30 07:51:30 +08:00
parent 23e105e92c
commit f823d8cefd
27 changed files with 4953 additions and 1048 deletions

View File

@@ -0,0 +1,2 @@
schema: spec-driven
created: 2026-01-29

View File

@@ -0,0 +1,312 @@
-- ============================================================
-- 資料調查 SQL - realtime-equipment-status
-- 執行環境Oracle (DWH)
-- 注意:所有表名需加上 DWH. schema 前綴
-- ============================================================
-- ============================================================
-- 1. 狀態值對應調查
-- 比較 EQUIPMENTSTATUS_WIP_V 與 RESOURCESTATUS 的狀態值域
-- ============================================================
-- 1.1 EQUIPMENTSTATUS_WIP_V 的狀態值分布
SELECT
'EQUIPMENTSTATUS_WIP_V' AS SOURCE,
EQUIPMENTASSETSSTATUS AS STATUS_VALUE,
EQUIPMENTASSETSSTATUSREASON AS STATUS_REASON,
COUNT(*) AS CNT
FROM DWH.DW_MES_EQUIPMENTSTATUS_WIP_V
GROUP BY EQUIPMENTASSETSSTATUS, EQUIPMENTASSETSSTATUSREASON
ORDER BY CNT DESC;
-- 1.2 現有機況表查詢的狀態值分布 (從 RESOURCESTATUS 取最新)
-- 注意:此查詢可能較慢,可選擇性執行
SELECT
'RESOURCESTATUS' AS SOURCE,
NEWSTATUSNAME AS STATUS_VALUE,
NEWREASONNAME AS STATUS_REASON,
COUNT(*) AS CNT
FROM (
SELECT
r.RESOURCEID,
s.NEWSTATUSNAME,
s.NEWREASONNAME,
ROW_NUMBER() OVER (
PARTITION BY r.RESOURCEID
ORDER BY s.LASTSTATUSCHANGEDATE DESC NULLS LAST
) AS rn
FROM DWH.DW_MES_RESOURCE r
JOIN DWH.DW_MES_RESOURCESTATUS s ON r.RESOURCEID = s.HISTORYID
WHERE (r.OBJECTCATEGORY = 'ASSEMBLY' AND r.OBJECTTYPE = 'ASSEMBLY')
OR (r.OBJECTCATEGORY = 'WAFERSORT' AND r.OBJECTTYPE = 'WAFERSORT')
) WHERE rn = 1
GROUP BY NEWSTATUSNAME, NEWREASONNAME
ORDER BY CNT DESC;
-- ============================================================
-- 2. 資料覆蓋範圍調查
-- 確認 resource-cache 設備是否都能在 EQUIPMENTSTATUS_WIP_V 找到
-- ============================================================
-- 2.1 resource-cache 篩選條件的設備數量
SELECT
COUNT(*) AS RESOURCE_CACHE_COUNT
FROM DWH.DW_MES_RESOURCE r
WHERE ((r.OBJECTCATEGORY = 'ASSEMBLY' AND r.OBJECTTYPE = 'ASSEMBLY')
OR (r.OBJECTCATEGORY = 'WAFERSORT' AND r.OBJECTTYPE = 'WAFERSORT'))
AND (r.LOCATIONNAME IS NULL OR r.LOCATIONNAME NOT IN
('ATEC', 'F區', 'F區焊接站', '報廢', '實驗室', '山東', '成型站_F區', '焊接F區', '無錫', '熒茂'))
AND (r.PJ_ASSETSSTATUS IS NULL OR r.PJ_ASSETSSTATUS NOT IN ('Disapproved'));
-- 2.2 EQUIPMENTSTATUS_WIP_V 的設備數量
SELECT
COUNT(*) AS EQUIPMENTSTATUS_COUNT,
COUNT(DISTINCT RESOURCEID) AS DISTINCT_RESOURCEID,
COUNT(DISTINCT EQUIPMENTID) AS DISTINCT_EQUIPMENTID
FROM DWH.DW_MES_EQUIPMENTSTATUS_WIP_V;
-- 2.3 找出在 resource-cache 但不在 EQUIPMENTSTATUS_WIP_V 的設備
SELECT
r.RESOURCEID,
r.RESOURCENAME,
r.WORKCENTERNAME,
r.PJ_ASSETSSTATUS,
r.OBJECTCATEGORY
FROM DWH.DW_MES_RESOURCE r
WHERE ((r.OBJECTCATEGORY = 'ASSEMBLY' AND r.OBJECTTYPE = 'ASSEMBLY')
OR (r.OBJECTCATEGORY = 'WAFERSORT' AND r.OBJECTTYPE = 'WAFERSORT'))
AND (r.LOCATIONNAME IS NULL OR r.LOCATIONNAME NOT IN
('ATEC', 'F區', 'F區焊接站', '報廢', '實驗室', '山東', '成型站_F區', '焊接F區', '無錫', '熒茂'))
AND (r.PJ_ASSETSSTATUS IS NULL OR r.PJ_ASSETSSTATUS NOT IN ('Disapproved'))
AND r.RESOURCEID NOT IN (
SELECT RESOURCEID FROM DWH.DW_MES_EQUIPMENTSTATUS_WIP_V WHERE RESOURCEID IS NOT NULL
);
-- 2.4 統計缺失設備的分布(按工站)
SELECT
r.WORKCENTERNAME,
r.PJ_ASSETSSTATUS,
COUNT(*) AS MISSING_COUNT
FROM DWH.DW_MES_RESOURCE r
WHERE ((r.OBJECTCATEGORY = 'ASSEMBLY' AND r.OBJECTTYPE = 'ASSEMBLY')
OR (r.OBJECTCATEGORY = 'WAFERSORT' AND r.OBJECTTYPE = 'WAFERSORT'))
AND (r.LOCATIONNAME IS NULL OR r.LOCATIONNAME NOT IN
('ATEC', 'F區', 'F區焊接站', '報廢', '實驗室', '山東', '成型站_F區', '焊接F區', '無錫', '熒茂'))
AND (r.PJ_ASSETSSTATUS IS NULL OR r.PJ_ASSETSSTATUS NOT IN ('Disapproved'))
AND r.RESOURCEID NOT IN (
SELECT RESOURCEID FROM DWH.DW_MES_EQUIPMENTSTATUS_WIP_V WHERE RESOURCEID IS NOT NULL
)
GROUP BY r.WORKCENTERNAME, r.PJ_ASSETSSTATUS
ORDER BY MISSING_COUNT DESC;
-- ============================================================
-- 3. 工站對應調查
-- 確認 WORKCENTERNAME 與 WORK_CENTER 的對應關係
-- ============================================================
-- 3.1 DW_MES_SPEC_WORKCENTER_V 的完整資料(僅 230 筆)
SELECT
SPEC,
WORK_CENTER,
WORK_CENTER_GROUP,
WORK_CENTER_SEQUENCE,
WORKCENTERSEQUENCE_GROUP,
WORK_CENTER_SHORT
FROM DWH.DW_MES_SPEC_WORKCENTER_V
ORDER BY WORKCENTERSEQUENCE_GROUP, WORK_CENTER;
-- 3.2 resource-cache 中的 WORKCENTERNAME 清單
SELECT DISTINCT
WORKCENTERNAME,
COUNT(*) AS RESOURCE_COUNT
FROM DWH.DW_MES_RESOURCE r
WHERE ((r.OBJECTCATEGORY = 'ASSEMBLY' AND r.OBJECTTYPE = 'ASSEMBLY')
OR (r.OBJECTCATEGORY = 'WAFERSORT' AND r.OBJECTTYPE = 'WAFERSORT'))
AND (r.LOCATIONNAME IS NULL OR r.LOCATIONNAME NOT IN
('ATEC', 'F區', 'F區焊接站', '報廢', '實驗室', '山東', '成型站_F區', '焊接F區', '無錫', '熒茂'))
AND (r.PJ_ASSETSSTATUS IS NULL OR r.PJ_ASSETSSTATUS NOT IN ('Disapproved'))
GROUP BY WORKCENTERNAME
ORDER BY WORKCENTERNAME;
-- 3.3 SPEC_WORKCENTER_V 中的 WORK_CENTER 清單(去重)
SELECT DISTINCT
WORK_CENTER,
WORK_CENTER_GROUP,
WORKCENTERSEQUENCE_GROUP
FROM DWH.DW_MES_SPEC_WORKCENTER_V
ORDER BY WORKCENTERSEQUENCE_GROUP, WORK_CENTER;
-- 3.4 找出在 resource-cache 但無法對應到 SPEC_WORKCENTER_V 的 WORKCENTERNAME
SELECT
r.WORKCENTERNAME,
COUNT(*) AS RESOURCE_COUNT
FROM DWH.DW_MES_RESOURCE r
WHERE ((r.OBJECTCATEGORY = 'ASSEMBLY' AND r.OBJECTTYPE = 'ASSEMBLY')
OR (r.OBJECTCATEGORY = 'WAFERSORT' AND r.OBJECTTYPE = 'WAFERSORT'))
AND (r.LOCATIONNAME IS NULL OR r.LOCATIONNAME NOT IN
('ATEC', 'F區', 'F區焊接站', '報廢', '實驗室', '山東', '成型站_F區', '焊接F區', '無錫', '熒茂'))
AND (r.PJ_ASSETSSTATUS IS NULL OR r.PJ_ASSETSSTATUS NOT IN ('Disapproved'))
AND r.WORKCENTERNAME NOT IN (
SELECT DISTINCT WORK_CENTER FROM DWH.DW_MES_SPEC_WORKCENTER_V WHERE WORK_CENTER IS NOT NULL
)
GROUP BY r.WORKCENTERNAME
ORDER BY RESOURCE_COUNT DESC;
-- ============================================================
-- 4. 關聯欄位驗證
-- 確認 RESOURCEID / EQUIPMENTID 的對應關係
-- ============================================================
-- 4.1 EQUIPMENTSTATUS_WIP_V 的 RESOURCEID vs EQUIPMENTID 對應
SELECT
RESOURCEID,
EQUIPMENTID,
OBJECTCATEGORY,
EQUIPMENTASSETSSTATUS
FROM DWH.DW_MES_EQUIPMENTSTATUS_WIP_V
WHERE ROWNUM <= 20;
-- 4.2 比較 RESOURCE.RESOURCENAME 與 EQUIPMENTSTATUS_WIP_V.EQUIPMENTID
SELECT
r.RESOURCEID,
r.RESOURCENAME AS RESOURCE_NAME,
e.EQUIPMENTID AS EQUIPMENT_ID,
CASE WHEN r.RESOURCENAME = e.EQUIPMENTID THEN 'MATCH' ELSE 'DIFFER' END AS NAME_MATCH
FROM DWH.DW_MES_RESOURCE r
JOIN DWH.DW_MES_EQUIPMENTSTATUS_WIP_V e ON r.RESOURCEID = e.RESOURCEID
WHERE ROWNUM <= 50;
-- 4.3 統計 RESOURCENAME vs EQUIPMENTID 匹配率
SELECT
CASE WHEN r.RESOURCENAME = e.EQUIPMENTID THEN 'MATCH' ELSE 'DIFFER' END AS NAME_MATCH,
COUNT(*) AS CNT
FROM DWH.DW_MES_RESOURCE r
JOIN DWH.DW_MES_EQUIPMENTSTATUS_WIP_V e ON r.RESOURCEID = e.RESOURCEID
GROUP BY CASE WHEN r.RESOURCENAME = e.EQUIPMENTID THEN 'MATCH' ELSE 'DIFFER' END;
-- ============================================================
-- 5. 快速摘要查詢(建議先執行此區塊)
-- ============================================================
-- 5.1 三表資料量摘要
SELECT 'DW_MES_RESOURCE (filtered)' AS TABLE_NAME, COUNT(*) AS ROW_COUNT
FROM DWH.DW_MES_RESOURCE r
WHERE ((r.OBJECTCATEGORY = 'ASSEMBLY' AND r.OBJECTTYPE = 'ASSEMBLY')
OR (r.OBJECTCATEGORY = 'WAFERSORT' AND r.OBJECTTYPE = 'WAFERSORT'))
AND (r.LOCATIONNAME IS NULL OR r.LOCATIONNAME NOT IN
('ATEC', 'F區', 'F區焊接站', '報廢', '實驗室', '山東', '成型站_F區', '焊接F區', '無錫', '熒茂'))
AND (r.PJ_ASSETSSTATUS IS NULL OR r.PJ_ASSETSSTATUS NOT IN ('Disapproved'))
UNION ALL
SELECT 'DW_MES_EQUIPMENTSTATUS_WIP_V' AS TABLE_NAME, COUNT(*) AS ROW_COUNT
FROM DWH.DW_MES_EQUIPMENTSTATUS_WIP_V
UNION ALL
SELECT 'DW_MES_SPEC_WORKCENTER_V' AS TABLE_NAME, COUNT(*) AS ROW_COUNT
FROM DWH.DW_MES_SPEC_WORKCENTER_V;
-- 5.2 JOIN 成功率摘要
SELECT
(SELECT COUNT(*) FROM DWH.DW_MES_RESOURCE r
WHERE ((r.OBJECTCATEGORY = 'ASSEMBLY' AND r.OBJECTTYPE = 'ASSEMBLY')
OR (r.OBJECTCATEGORY = 'WAFERSORT' AND r.OBJECTTYPE = 'WAFERSORT'))
AND (r.LOCATIONNAME IS NULL OR r.LOCATIONNAME NOT IN
('ATEC', 'F區', 'F區焊接站', '報廢', '實驗室', '山東', '成型站_F區', '焊接F區', '無錫', '熒茂'))
AND (r.PJ_ASSETSSTATUS IS NULL OR r.PJ_ASSETSSTATUS NOT IN ('Disapproved'))
) AS TOTAL_RESOURCES,
(SELECT COUNT(*) FROM DWH.DW_MES_RESOURCE r
JOIN DWH.DW_MES_EQUIPMENTSTATUS_WIP_V e ON r.RESOURCEID = e.RESOURCEID
WHERE ((r.OBJECTCATEGORY = 'ASSEMBLY' AND r.OBJECTTYPE = 'ASSEMBLY')
OR (r.OBJECTCATEGORY = 'WAFERSORT' AND r.OBJECTTYPE = 'WAFERSORT'))
AND (r.LOCATIONNAME IS NULL OR r.LOCATIONNAME NOT IN
('ATEC', 'F區', 'F區焊接站', '報廢', '實驗室', '山東', '成型站_F區', '焊接F區', '無錫', '熒茂'))
AND (r.PJ_ASSETSSTATUS IS NULL OR r.PJ_ASSETSSTATUS NOT IN ('Disapproved'))
) AS MATCHED_WITH_EQUIPSTATUS,
(SELECT COUNT(DISTINCT r.WORKCENTERNAME) FROM DWH.DW_MES_RESOURCE r
WHERE ((r.OBJECTCATEGORY = 'ASSEMBLY' AND r.OBJECTTYPE = 'ASSEMBLY')
OR (r.OBJECTCATEGORY = 'WAFERSORT' AND r.OBJECTTYPE = 'WAFERSORT'))
AND (r.LOCATIONNAME IS NULL OR r.LOCATIONNAME NOT IN
('ATEC', 'F區', 'F區焊接站', '報廢', '實驗室', '山東', '成型站_F區', '焊接F區', '無錫', '熒茂'))
AND (r.PJ_ASSETSSTATUS IS NULL OR r.PJ_ASSETSSTATUS NOT IN ('Disapproved'))
) AS TOTAL_WORKCENTERS,
(SELECT COUNT(DISTINCT r.WORKCENTERNAME) FROM DWH.DW_MES_RESOURCE r
WHERE ((r.OBJECTCATEGORY = 'ASSEMBLY' AND r.OBJECTTYPE = 'ASSEMBLY')
OR (r.OBJECTCATEGORY = 'WAFERSORT' AND r.OBJECTTYPE = 'WAFERSORT'))
AND (r.LOCATIONNAME IS NULL OR r.LOCATIONNAME NOT IN
('ATEC', 'F區', 'F區焊接站', '報廢', '實驗室', '山東', '成型站_F區', '焊接F區', '無錫', '熒茂'))
AND (r.PJ_ASSETSSTATUS IS NULL OR r.PJ_ASSETSSTATUS NOT IN ('Disapproved'))
AND r.WORKCENTERNAME IN (SELECT DISTINCT WORK_CENTER FROM DWH.DW_MES_SPEC_WORKCENTER_V)
) AS MATCHED_WITH_SPECWC
FROM DUAL;
-- ============================================================
-- 6. 追加調查(基於 5.2 結果)
-- ============================================================
-- 6.1 確認 EQUIPMENTSTATUS_WIP_V 是否有重複 RESOURCEID
-- 如果有重複,表示一台設備可能有多個狀態記錄(如多個維修工單)
SELECT
RESOURCEID,
COUNT(*) AS CNT
FROM DWH.DW_MES_EQUIPMENTSTATUS_WIP_V
GROUP BY RESOURCEID
HAVING COUNT(*) > 1
ORDER BY CNT DESC;
-- 6.2 確認 resource-cache 設備對應情況(用 DISTINCT 計算)
SELECT
COUNT(DISTINCT r.RESOURCEID) AS MATCHED_DISTINCT_RESOURCES
FROM DWH.DW_MES_RESOURCE r
JOIN DWH.DW_MES_EQUIPMENTSTATUS_WIP_V e ON r.RESOURCEID = e.RESOURCEID
WHERE ((r.OBJECTCATEGORY = 'ASSEMBLY' AND r.OBJECTTYPE = 'ASSEMBLY')
OR (r.OBJECTCATEGORY = 'WAFERSORT' AND r.OBJECTTYPE = 'WAFERSORT'))
AND (r.LOCATIONNAME IS NULL OR r.LOCATIONNAME NOT IN
('ATEC', 'F區', 'F區焊接站', '報廢', '實驗室', '山東', '成型站_F區', '焊接F區', '無錫', '熒茂'))
AND (r.PJ_ASSETSSTATUS IS NULL OR r.PJ_ASSETSSTATUS NOT IN ('Disapproved'));
-- 6.3 找出在 resource-cache 但完全無法對應 EQUIPMENTSTATUS_WIP_V 的設備
SELECT
r.RESOURCEID,
r.RESOURCENAME,
r.WORKCENTERNAME,
r.PJ_ASSETSSTATUS
FROM DWH.DW_MES_RESOURCE r
WHERE ((r.OBJECTCATEGORY = 'ASSEMBLY' AND r.OBJECTTYPE = 'ASSEMBLY')
OR (r.OBJECTCATEGORY = 'WAFERSORT' AND r.OBJECTTYPE = 'WAFERSORT'))
AND (r.LOCATIONNAME IS NULL OR r.LOCATIONNAME NOT IN
('ATEC', 'F區', 'F區焊接站', '報廢', '實驗室', '山東', '成型站_F區', '焊接F區', '無錫', '熒茂'))
AND (r.PJ_ASSETSSTATUS IS NULL OR r.PJ_ASSETSSTATUS NOT IN ('Disapproved'))
AND r.RESOURCEID NOT IN (
SELECT RESOURCEID FROM DWH.DW_MES_EQUIPMENTSTATUS_WIP_V WHERE RESOURCEID IS NOT NULL
);
-- 6.4 統計缺失設備數量
SELECT
COUNT(*) AS MISSING_COUNT
FROM DWH.DW_MES_RESOURCE r
WHERE ((r.OBJECTCATEGORY = 'ASSEMBLY' AND r.OBJECTTYPE = 'ASSEMBLY')
OR (r.OBJECTCATEGORY = 'WAFERSORT' AND r.OBJECTTYPE = 'WAFERSORT'))
AND (r.LOCATIONNAME IS NULL OR r.LOCATIONNAME NOT IN
('ATEC', 'F區', 'F區焊接站', '報廢', '實驗室', '山東', '成型站_F區', '焊接F區', '無錫', '熒茂'))
AND (r.PJ_ASSETSSTATUS IS NULL OR r.PJ_ASSETSSTATUS NOT IN ('Disapproved'))
AND r.RESOURCEID NOT IN (
SELECT RESOURCEID FROM DWH.DW_MES_EQUIPMENTSTATUS_WIP_V WHERE RESOURCEID IS NOT NULL
);
-- 6.5 查看重複 RESOURCEID 的詳細資料(取前 10 筆)
SELECT *
FROM DWH.DW_MES_EQUIPMENTSTATUS_WIP_V
WHERE RESOURCEID IN (
SELECT RESOURCEID
FROM DWH.DW_MES_EQUIPMENTSTATUS_WIP_V
GROUP BY RESOURCEID
HAVING COUNT(*) > 1
)
ORDER BY RESOURCEID
FETCH FIRST 20 ROWS ONLY;

View File

@@ -0,0 +1,287 @@
## Context
### 現況
現有機台狀況表透過 `DW_MES_RESOURCE` + `DW_MES_RESOURCESTATUS` 組合查詢取得設備最新狀態:
- 需要 ROW_NUMBER 視窗函數 + 時間條件篩選最新記錄
- 每次查詢掃描約 6,500 萬筆歷史記錄
- 狀態可能延遲數小時(依賴歷史表同步頻率)
### 資料調查結論
| 項目 | 結果 |
|------|------|
| resource-cache 設備數 | 1,804 |
| 能對應即時狀態的設備 | 1,803 (99.94%) |
| 工站對應 WORK_CENTER_GROUP | 18/18 (100%) |
| EQUIPMENTSTATUS_WIP_V 重複記錄 | 有(同設備多 LOT |
| 狀態值相容性 | E10 標準 + 少量非標準 |
### 約束條件
- Redis 已部署,可用於快取
- 現有 `resource-cache` 機制已實作4 小時同步一次
- 現有 `filter_cache` 從 WIP 視圖取得 workcenter_group 對照
- 前端機台狀況表已有完整 UI僅需調整資料來源
---
## Goals / Non-Goals
### Goals
1. **提供即時設備狀態**:從 `DW_MES_EQUIPMENTSTATUS_WIP_V` 取得真正即時的設備狀態5 分鐘同步一次
2. **擴充設備資訊**:新增維修工單、當前 WIP、Track-In 等欄位
3. **統一工站分組**:使用 `DW_MES_SPEC_WORKCENTER_V` 提供一致的 WORK_CENTER_GROUP 對照
4. **保持向後相容**:現有 API 回應結構維持相容,新欄位為追加
### Non-Goals
1. **不取代 resource-cache**resource-cache 仍作為設備主檔來源,提供篩選欄位
2. **不改變篩選邏輯**維持現有的設備篩選條件OBJECTCATEGORY、LOCATIONNAME、PJ_ASSETSSTATUS
3. **不實作 WIP 詳細列表**:多 LOT 情況僅提供聚合數據,不展開明細
4. **不修改設備歷史績效功能**:該功能使用 RESOURCESTATUS_SHIFT不受影響
---
## Decisions
### Decision 1: 快取架構 - 三層快取組合
**選擇**:新增兩個獨立快取,與現有 resource-cache 組合查詢
```
┌─────────────────────┐ ┌─────────────────────────┐ ┌─────────────────────────┐
│ resource-cache │ │ realtime-equipment-cache│ │ workcenter-mapping-cache│
│ DW_MES_RESOURCE │ │ EQUIPMENTSTATUS_WIP_V │ │ SPEC_WORKCENTER_V │
│ (4 小時同步) │ │ (5 分鐘同步) │ │ (每天同步) │
│ ~1,804 筆 │ │ ~2,607 筆 │ │ ~230 筆 │
└─────────┬───────────┘ └───────────┬─────────────┘ └───────────┬─────────────┘
│ │ │
│ RESOURCEID │ RESOURCEID │ WORK_CENTER
└──────────┬───────────────┘ │
│ │
v │
┌─────────────────────┐ │
│ Python 端合併 │◄────── WORKCENTERNAME ──────────┘
│ (查詢時 JOIN) │
└─────────────────────┘
```
**替代方案考量**
- ❌ 單一大快取:同步頻率不同,且 resource-cache 已存在
- ❌ 直接查 Oracle無法達到即時性要求查詢成本高
**理由**
- 各快取獨立同步,頻率可分別調整
- 複用現有 resource-cache降低改動範圍
- workcenter-mapping 變動極少,每天同步即可
---
### Decision 2: 即時狀態快取結構 - 預聚合 by RESOURCEID
**選擇**:同步時預先 GROUP BY RESOURCEID儲存聚合後資料
**Redis 結構**
```
{prefix}:equipment_status:data → JSON Array (每設備一筆,已聚合)
{prefix}:equipment_status:index → Hash {RESOURCEID → array index} (快速查找)
{prefix}:equipment_status:meta:updated → ISO 8601 timestamp
{prefix}:equipment_status:meta:count → 記錄數
```
**單筆資料結構**
```json
{
"RESOURCEID": "488016800000036a",
"EQUIPMENTID": "DB-001",
"EQUIPMENTASSETSSTATUS": "PRD",
"EQUIPMENTASSETSSTATUSREASON": "Production RUN",
"JOBORDER": "J2026010001",
"JOBSTATUS": "Open",
"SYMPTOMCODE": null,
"CAUSECODE": null,
"REPAIRCODE": null,
"LOT_COUNT": 3,
"TOTAL_TRACKIN_QTY": 15000,
"LATEST_TRACKIN_TIME": "2026-01-29T10:30:00"
}
```
**替代方案考量**
- ❌ 儲存原始資料(未聚合):浪費空間,查詢時需再聚合
- ❌ 每設備一個 keykey 數量過多(~2,600不利批次操作
**理由**
- 預聚合減少查詢時運算
- 單一 JSON Array 便於全量載入與篩選
- Index Hash 支援單筆快速查找
---
### Decision 3: 工站對照快取 - 整合進 filter_cache
**選擇**:擴充現有 `filter_cache.py`,新增 workcenter → group 對照
**理由**
- filter_cache 已有 workcenter_groups 邏輯,但來源是 WIP 視圖
- 改用 SPEC_WORKCENTER_V 作為權威來源資料更完整230 筆 vs WIP 中出現的子集)
- 避免新增獨立快取模組,降低複雜度
**變更**
```python
# filter_cache.py 新增
def _load_workcenter_mapping_from_spec():
"""從 DW_MES_SPEC_WORKCENTER_V 載入工站對照"""
sql = """
SELECT DISTINCT
WORK_CENTER,
WORK_CENTER_GROUP,
WORKCENTERSEQUENCE_GROUP,
WORK_CENTER_SHORT
FROM DWH.DW_MES_SPEC_WORKCENTER_V
"""
...
```
---
### Decision 4: 同步策略 - 全量覆蓋
**選擇**:每次同步全量載入並覆蓋
**理由**
- EQUIPMENTSTATUS_WIP_V 僅 ~2,600 筆,全量載入成本低
- 無版本欄位可做差異同步
- 避免增量同步的複雜度(刪除偵測、狀態一致性)
**同步流程**
```
1. 查詢 OracleSELECT * FROM EQUIPMENTSTATUS_WIP_V
2. Python 端 GROUP BY RESOURCEID 聚合
3. 建立 index mapping
4. Redis MULTI/EXEC 原子寫入
```
---
### Decision 5: 狀態值處理 - 保持原值 + 分類標籤
**選擇**:保留原始狀態值,額外提供 `STATUS_CATEGORY` 分類
**狀態分類規則**
```python
STATUS_CATEGORY_MAP = {
'PRD': 'PRODUCTIVE',
'SBY': 'STANDBY',
'UDT': 'DOWN',
'SDT': 'DOWN',
'EGT': 'ENGINEERING',
'NST': 'NOT_SCHEDULED',
'SCRAP': 'INACTIVE',
'設備-LOST': 'INACTIVE',
'設備-RUN': 'PRODUCTIVE', # 需確認
}
# 未列出的歸類為 'OTHER'
```
**理由**
- 保留原值供詳細顯示
- 分類標籤便於前端 UI 著色與統計
---
### Decision 6: API 設計 - 擴充現有 endpoint
**選擇**:擴充 `/api/resource/status` 回應,新增欄位
**新增欄位**
```json
{
"RESOURCENAME": "DB-001",
"WORKCENTERNAME": "焊接_DB",
"WORKCENTER_GROUP": "焊接", // 新增
"NEWSTATUSNAME": "PRD", // 來自即時快取
"NEWREASONNAME": "Production RUN", // 來自即時快取
"STATUS_CATEGORY": "PRODUCTIVE", // 新增
"JOBORDER": "J2026010001", // 新增
"JOBSTATUS": "Open", // 新增
"LOT_COUNT": 3, // 新增
"TOTAL_TRACKIN_QTY": 15000, // 新增
"LATEST_TRACKIN_TIME": "2026-01-29T10:30:00", // 新增
// ... 現有欄位保留
}
```
**Fallback 策略**
- 若 RESOURCEID 在即時快取找不到1/1804使用現有 RESOURCESTATUS 查詢
- 記錄 warning log
---
## Risks / Trade-offs
### Risk 1: 即時快取同步延遲
**風險**5 分鐘同步間隔內,狀態變更不會反映
**緩解**
- 前端顯示「最後更新時間」
- 提供手動刷新按鈕(觸發 force refresh
- 可調整 `EQUIPMENT_STATUS_SYNC_INTERVAL` 環境變數
### Risk 2: 聚合邏輯假設錯誤
**風險**:假設同 RESOURCEID 的 EQUIPMENTASSETSSTATUS 相同,若實際不同會取錯值
**緩解**
- 同步時檢查並記錄 warning
- 取 MAX 或最常出現的值
### Risk 3: SPEC_WORKCENTER_V 與 RESOURCE 的 WORKCENTERNAME 不完全匹配
**風險**:調查顯示 100% 匹配,但未來可能有新工站
**緩解**
- 無法匹配時 WORKCENTER_GROUP 回傳 null
- 每日同步 log 記錄未匹配的 WORKCENTERNAME
### Risk 4: 非標準狀態值增加
**風險**:未來可能出現更多非標準狀態(如 設備-LOST
**緩解**
- STATUS_CATEGORY_MAP 設定檔化,便於更新
- 未知狀態歸類為 'OTHER',不影響系統運作
---
## Migration Plan
### Phase 1: 新增快取層(不影響現有功能)
1. 實作 `realtime_equipment_cache.py`
2. 擴充 `filter_cache.py` 新增 workcenter mapping
3. 新增環境變數與設定
4. 部署並驗證快取同步正常
### Phase 2: 整合至 API
1. 修改 `resource_service.py` 使用新快取
2. 擴充 API 回應欄位
3. 實作 fallback 邏輯
4. 更新 API 文件
### Phase 3: 前端整合
1. 更新機台狀況表顯示新欄位
2. 新增 WORKCENTER_GROUP 篩選器
3. 顯示 WIP 相關資訊LOT_COUNT 等)
### Rollback Strategy
- 環境變數 `REALTIME_EQUIPMENT_CACHE_ENABLED=false` 可完全停用新快取
- API 自動 fallback 到現有 RESOURCESTATUS 查詢邏輯
- 前端新欄位設計為 optional缺失時不顯示
---
## Open Questions
1. **`設備-RUN` 狀態含義**:需與業務確認,暫歸類為 PRODUCTIVE
2. **WIP 詳細資訊需求**:是否需要展開顯示每個 LOT還是僅顯示聚合數據
3. **同步頻率調整**5 分鐘是否滿足業務需求?是否需要更頻繁?
4. **維修工單展開**:一台設備可能有多個工單,是否需要全部顯示?

View File

@@ -0,0 +1,120 @@
## Why
現有機台狀況表使用 `DW_MES_RESOURCE` + `DW_MES_RESOURCESTATUS` 組合查詢,需要複雜的 ROW_NUMBER + 時間視窗計算來推導「最新狀態」。這種方式有幾個問題:
1. **查詢複雜度高**:每次都要 JOIN 兩表並用視窗函數取最新記錄
2. **非真正即時**:依賴 RESOURCESTATUS 的最後 N 天歷史,實際可能延遲數小時
3. **效能負擔大**Oracle 查詢量大(掃描 6,500 萬筆歷史記錄)
`DW_MES_EQUIPMENTSTATUS_WIP_V` 是 DWH 提供的**真正即時視圖**,透過 DB Link 直接查詢源頭,包含設備狀態、維修工單、當前 WIP 等 32 欄位,且資料量僅約 2,600 筆。
## What Changes
### 新增快取層
1. **即時設備狀態快取**5 分鐘同步)
- 來源:`DW_MES_EQUIPMENTSTATUS_WIP_V`
- 資料即時設備狀態、維修工單、WIP Track-In 資訊
- 篩選:使用 `resource-cache` 中的有效設備清單過濾
2. **工站對照快取**(每天同步)
- 來源:`DW_MES_SPEC_WORKCENTER_V`230 筆)
- 資料WORK_CENTER → WORK_CENTER_GROUP 對照、排序規則
### 資料組合邏輯
```
[resource-cache] [realtime-equipment-cache] [workcenter-mapping-cache]
DW_MES_RESOURCE DW_MES_EQUIPMENTSTATUS_WIP_V DW_MES_SPEC_WORKCENTER_V
(篩選後設備主檔) (即時狀態) (工站分組對照)
| | |
+--- RESOURCEID ----+--- RESOURCEID |
| |
v |
[合併後即時機況] |
(主檔欄位 + 即時狀態) |
| |
+--- WORKCENTERNAME ---+--- WORK_CENTER ---+
|
v
[完整機台狀況表]
(含 WORKCENTER_GROUP)
```
### 欄位來源對照
| 欄位 | 來源 | 備註 |
|------|------|------|
| RESOURCEID | resource-cache | 主鍵 |
| RESOURCENAME | resource-cache | 設備編號 |
| WORKCENTERNAME | resource-cache | 工站名稱 |
| RESOURCEFAMILYNAME | resource-cache | 設備族群 |
| PJ_DEPARTMENT | resource-cache | 部門 |
| PJ_ISPRODUCTION | resource-cache | 生產機 Flag |
| PJ_ISKEY | resource-cache | 關鍵機 Flag |
| PJ_ISMONITOR | resource-cache | 監控機 Flag |
| EQUIPMENTASSETSSTATUS | realtime-cache | 即時狀態 |
| EQUIPMENTASSETSSTATUSREASON | realtime-cache | 狀態原因 |
| JOBORDER | realtime-cache | 維修工單號 |
| JOBSTATUS | realtime-cache | 工單狀態 |
| SYMPTOMCODE | realtime-cache | 症狀代碼 |
| RUNCARDLOTID | realtime-cache | 當前 WIP 批次 |
| LOTTRACKINTIME | realtime-cache | Track-In 時間 |
| LOTTRACKINQTY_PCS | realtime-cache | Track-In 數量 |
| WORK_CENTER_GROUP | workcenter-mapping | 工站分組 |
| WORKCENTERSEQUENCE_GROUP | workcenter-mapping | 分組排序 |
## Capabilities
### New Capabilities
- `realtime-equipment-cache`: 即時設備狀態快取層,每 5 分鐘從 `DW_MES_EQUIPMENTSTATUS_WIP_V` 同步至 Redis提供即時狀態、維修工單、WIP Track-In 資訊
- `workcenter-mapping-cache`: 工站對照快取層,每天從 `DW_MES_SPEC_WORKCENTER_V` 同步至 Redis提供 WORK_CENTER → WORK_CENTER_GROUP 對照
### Modified Capabilities
- `resource-cache`: 擴充 API 支援與即時狀態快取合併查詢
## Impact
### 資料來源比較
| 面向 | 現有方式 | 新方式 |
|------|---------|--------|
| **即時狀態來源** | RESOURCE + RESOURCESTATUS JOIN | EQUIPMENTSTATUS_WIP_V (即時) |
| **掃描資料量** | ~9萬 + ~6500萬 | ~2,600 筆 |
| **查詢複雜度** | 視窗函數 + 時間條件 | 簡單全表 |
| **更新頻率** | 查詢時計算 | 5 分鐘快取 |
| **篩選欄位** | 直接可用 | 從 resource-cache 補充 |
### 關鍵對應關係
| 關聯 | 來源表 | 目標表 | 關聯欄位 |
|------|--------|--------|---------|
| 設備主檔 ↔ 即時狀態 | resource-cache | EQUIPMENTSTATUS_WIP_V | RESOURCEID |
| 設備 ↔ 工站分組 | resource-cache | SPEC_WORKCENTER_V | WORKCENTERNAME ↔ WORK_CENTER |
### 待確認事項
1. **狀態值對應**`EQUIPMENTASSETSSTATUS` (PRD, IDLE...) vs `NEWSTATUSNAME` (PRD, SBY, UDT...)
- 需確認值域是否相同或需要 mapping
2. **資料覆蓋範圍**
- EQUIPMENTSTATUS_WIP_V 約 2,631 筆
- resource-cache 篩選後約 3,000+ 台
- 差異可能是「無狀態」的設備,需確認處理方式
3. **WORK_CENTER 對應**
- resource-cache 的 WORKCENTERNAME 與 SPEC_WORKCENTER_V 的 WORK_CENTER 是否完全對應
- 是否有 WORKCENTERNAME 找不到對應 WORK_CENTER_GROUP 的情況
### 受影響程式碼
- 新增:`src/mes_dashboard/services/realtime_equipment_cache.py`
- 新增:`src/mes_dashboard/services/workcenter_mapping_cache.py`
- 修改:`src/mes_dashboard/services/resource_service.py` - 改用快取組合查詢
- 修改:`src/mes_dashboard/routes/resource_routes.py` - API 回應結構可能調整
- 修改:`src/mes_dashboard/templates/resource_status.html` - 新增欄位顯示
### 依賴項
- Redis (已部署)
- 現有 `resource-cache` 機制
- 現有 `filter_cache` 機制(可考慮整合 workcenter-mapping

View File

@@ -0,0 +1,157 @@
## ADDED Requirements
### Requirement: Realtime Equipment Status Data Storage
系統 SHALL 將 `DW_MES_EQUIPMENTSTATUS_WIP_V` 資料(預聚合後)以 JSON 格式儲存於 Redis。
#### Scenario: Data stored with correct keys
- **WHEN** 快取同步完成後
- **THEN** Redis SHALL 包含以下 keys
- `{prefix}:equipment_status:data` - 聚合後設備狀態資料JSON 陣列)
- `{prefix}:equipment_status:index` - RESOURCEID → array index 的 Hash mapping
- `{prefix}:equipment_status:meta:updated` - 快取更新時間ISO 8601 格式)
- `{prefix}:equipment_status:meta:count` - 記錄筆數
#### Scenario: Data aggregated by RESOURCEID
- **WHEN** 從 Oracle 載入資料時
- **THEN** 系統 SHALL 以 RESOURCEID 為 key 進行聚合
- **AND** 狀態欄位EQUIPMENTASSETSSTATUS, EQUIPMENTASSETSSTATUSREASON取任一筆同 RESOURCEID 應相同)
- **AND** LOT_COUNT 為該 RESOURCEID 的記錄數
- **AND** TOTAL_TRACKIN_QTY 為 LOTTRACKINQTY_PCS 的加總
- **AND** LATEST_TRACKIN_TIME 為 LOTTRACKINTIME 的最大值
#### Scenario: Single record structure
- **WHEN** 查詢單筆聚合後資料
- **THEN** 資料結構 SHALL 包含:
- `RESOURCEID`: 設備 ID
- `EQUIPMENTID`: 設備編號
- `OBJECTCATEGORY`: 設備類別
- `EQUIPMENTASSETSSTATUS`: 設備狀態
- `EQUIPMENTASSETSSTATUSREASON`: 狀態原因
- `STATUS_CATEGORY`: 狀態分類PRODUCTIVE/STANDBY/DOWN/ENGINEERING/NOT_SCHEDULED/INACTIVE/OTHER
- `JOBORDER`: 維修工單號(若有)
- `JOBSTATUS`: 工單狀態(若有)
- `SYMPTOMCODE`: 症狀代碼(若有)
- `CAUSECODE`: 故障原因代碼(若有)
- `REPAIRCODE`: 維修處置代碼(若有)
- `LOT_COUNT`: 當前 WIP 批次數
- `TOTAL_TRACKIN_QTY`: Track-In 總數量
- `LATEST_TRACKIN_TIME`: 最新 Track-In 時間
#### Scenario: Atomic update with pipeline
- **WHEN** 快取同步執行時
- **THEN** 系統 SHALL 使用 Redis pipeline 確保所有 keys 原子更新
---
### Requirement: Realtime Equipment Status Background Sync
系統 SHALL 提供背景任務,定期同步 `DW_MES_EQUIPMENTSTATUS_WIP_V` 至 Redis 快取。
#### Scenario: Periodic sync at configured interval
- **WHEN** 應用程式啟動後
- **THEN** 背景任務 SHALL 每 `EQUIPMENT_STATUS_SYNC_INTERVAL` 秒(預設 300 秒 = 5 分鐘)執行同步
#### Scenario: Full table sync each time
- **WHEN** 背景任務執行時
- **THEN** 系統 SHALL 執行全表查詢並覆蓋快取
- **AND** 記錄同步耗時與記錄數至 info 日誌
#### Scenario: Initial cache load on startup
- **WHEN** 應用程式啟動時
- **THEN** 系統 SHALL 立即執行一次快取同步
#### Scenario: Force refresh API
- **WHEN** 呼叫 `refresh_equipment_status_cache(force=True)`
- **THEN** 系統 SHALL 立即執行快取同步,不等待下次排程
---
### Requirement: Realtime Equipment Status Query API
系統 SHALL 提供 API 從 Redis 快取查詢即時設備狀態。
#### Scenario: Get all equipment status
- **WHEN** 呼叫 `get_all_equipment_status()`
- **THEN** 系統 SHALL 回傳快取中所有設備狀態資料List[Dict]
#### Scenario: Get status by RESOURCEID
- **WHEN** 呼叫 `get_equipment_status_by_id(resource_id)`
- **THEN** 系統 SHALL 使用 index hash 快速查找並回傳對應資料Dict
- **AND** 若 ID 不存在則回傳 `None`
#### Scenario: Get status by multiple RESOURCEIDs
- **WHEN** 呼叫 `get_equipment_status_by_ids(resource_ids)`
- **THEN** 系統 SHALL 回傳所有匹配的設備狀態List[Dict]
- **AND** 不存在的 ID 不會出現在結果中
---
### Requirement: Realtime Equipment Status Cache Status API
系統 SHALL 提供 API 查詢快取狀態。
#### Scenario: Get cache status
- **WHEN** 呼叫 `get_equipment_status_cache_status()`
- **THEN** 系統 SHALL 回傳包含以下欄位的 Dict
- `enabled`: 快取是否啟用
- `loaded`: 快取是否已載入
- `count`: 快取記錄數
- `updated_at`: 最後同步時間
---
### Requirement: Realtime Equipment Status Fallback
當 Redis 不可用時,系統 SHALL 記錄錯誤並回傳空結果。
#### Scenario: Redis unavailable
- **WHEN** Redis 連線失敗或超時
- **THEN** 系統 SHALL 記錄 error 日誌
- **AND** 回傳空列表
#### Scenario: Cache disabled by config
- **WHEN** 環境變數 `REALTIME_EQUIPMENT_CACHE_ENABLED` 設為 `false`
- **THEN** 系統 SHALL 完全跳過 Redis
- **AND** 背景同步任務 SHALL 不啟動
- **AND** 所有查詢 API 回傳空結果
---
### Requirement: Status Category Classification
系統 SHALL 為每個狀態值提供分類標籤。
#### Scenario: Standard E10 status classification
- **WHEN** 狀態值為標準 E10 狀態
- **THEN** 系統 SHALL 依據以下規則分類:
- `PRD``PRODUCTIVE`
- `SBY``STANDBY`
- `UDT``DOWN`
- `SDT``DOWN`
- `EGT``ENGINEERING`
- `NST``NOT_SCHEDULED`
#### Scenario: Non-standard status classification
- **WHEN** 狀態值為非標準狀態
- **THEN** 系統 SHALL 依據以下規則分類:
- `SCRAP``INACTIVE`
- `設備-LOST``INACTIVE`
- `設備-RUN``PRODUCTIVE`
- 其他未知狀態 → `OTHER`
---
### Requirement: Realtime Equipment Status Configuration
系統 SHALL 支援透過環境變數配置快取行為。
#### Scenario: Custom sync interval
- **WHEN** 環境變數 `EQUIPMENT_STATUS_SYNC_INTERVAL` 設為 `600`
- **THEN** 背景任務 SHALL 每 600 秒10 分鐘)執行一次
#### Scenario: Default configuration
- **WHEN** 環境變數未設定
- **THEN** 系統 SHALL 使用預設值:
- `REALTIME_EQUIPMENT_CACHE_ENABLED`: `true`
- `EQUIPMENT_STATUS_SYNC_INTERVAL`: `300`5 分鐘)

View File

@@ -0,0 +1,168 @@
## ADDED Requirements
### Requirement: Resource Status Merged Query API
系統 SHALL 提供 API 合併 resource-cache 與 realtime-equipment-cache 資料。
#### Scenario: Get merged resource status
- **WHEN** 呼叫 `get_merged_resource_status()`
- **THEN** 系統 SHALL 回傳合併後的設備狀態清單
- **AND** 每筆資料包含 resource-cache 的主檔欄位RESOURCENAME, WORKCENTERNAME, RESOURCEFAMILYNAME, PJ_DEPARTMENT, PJ_ISPRODUCTION, PJ_ISKEY, PJ_ISMONITOR
- **AND** 每筆資料包含 realtime-equipment-cache 的即時欄位EQUIPMENTASSETSSTATUS, EQUIPMENTASSETSSTATUSREASON, STATUS_CATEGORY, JOBORDER, JOBSTATUS, LOT_COUNT, TOTAL_TRACKIN_QTY, LATEST_TRACKIN_TIME
- **AND** 每筆資料包含 workcenter-mapping-cache 的分組欄位WORKCENTER_GROUP, WORKCENTER_SHORT
#### Scenario: Merge by RESOURCEID
- **WHEN** 合併資料時
- **THEN** 系統 SHALL 以 resource-cache 為主表
- **AND** 使用 RESOURCEID 作為 JOIN key 與 realtime-equipment-cache 合併
- **AND** 使用 WORKCENTERNAME 作為 JOIN key 與 workcenter-mapping-cache 合併
#### Scenario: Handle missing realtime status
- **WHEN** resource-cache 中的設備在 realtime-equipment-cache 找不到對應資料
- **THEN** 即時欄位 SHALL 回傳 `None`
- **AND** 記錄 debug 日誌
#### Scenario: Handle missing workcenter group
- **WHEN** WORKCENTERNAME 在 workcenter-mapping-cache 找不到對應
- **THEN** WORKCENTER_GROUP 與 WORKCENTER_SHORT SHALL 回傳 `None`
---
### Requirement: Resource Status Merged Query with Filter
系統 SHALL 支援帶篩選條件的合併查詢。
#### Scenario: Filter by workcenter groups
- **WHEN** 呼叫 `get_merged_resource_status(workcenter_groups=['焊接', '成型'])`
- **THEN** 系統 SHALL 只回傳 WORKCENTER_GROUP 在指定清單中的設備
#### Scenario: Filter by equipment flags
- **WHEN** 呼叫 `get_merged_resource_status(is_production=True, is_key=True)`
- **THEN** 系統 SHALL 只回傳符合 PJ_ISPRODUCTION=1 且 PJ_ISKEY=1 的設備
#### Scenario: Filter by status category
- **WHEN** 呼叫 `get_merged_resource_status(status_categories=['PRODUCTIVE', 'STANDBY'])`
- **THEN** 系統 SHALL 只回傳 STATUS_CATEGORY 在指定清單中的設備
#### Scenario: Combined filters
- **WHEN** 呼叫 `get_merged_resource_status(workcenter_groups=['焊接'], is_production=True, status_categories=['DOWN'])`
- **THEN** 系統 SHALL 回傳同時符合所有條件的設備
---
### Requirement: Resource Status Summary Statistics
系統 SHALL 提供設備狀態統計摘要 API。
#### Scenario: Get status summary
- **WHEN** 呼叫 `get_resource_status_summary()`
- **THEN** 系統 SHALL 回傳包含以下統計的 Dict
- `total_count`: 設備總數
- `by_status_category`: 各 STATUS_CATEGORY 的設備數
- `by_workcenter_group`: 各 WORKCENTER_GROUP 的設備數
- `with_active_job`: 有維修工單的設備數
- `with_wip`: 有 WIP 的設備數LOT_COUNT > 0
#### Scenario: Summary respects filters
- **WHEN** 呼叫 `get_resource_status_summary(workcenter_groups=['焊接'])`
- **THEN** 統計 SHALL 只計算符合篩選條件的設備
---
### Requirement: Resource Status Workcenter Matrix
系統 SHALL 提供工站 × 狀態矩陣 API。
#### Scenario: Get workcenter status matrix
- **WHEN** 呼叫 `get_workcenter_status_matrix()`
- **THEN** 系統 SHALL 回傳 List[Dict],每筆包含:
- `workcenter_group`: 工站分組名稱
- `workcenter_sequence`: 排序序號
- `total`: 該分組設備總數
- `PRD`: 狀態為 PRD 的數量
- `SBY`: 狀態為 SBY 的數量
- `UDT`: 狀態為 UDT 的數量
- `SDT`: 狀態為 SDT 的數量
- `EGT`: 狀態為 EGT 的數量
- `NST`: 狀態為 NST 的數量
- `OTHER`: 其他狀態的數量
#### Scenario: Matrix sorted by sequence
- **WHEN** 回傳矩陣資料
- **THEN** 資料 SHALL 按 workcenter_sequence 升序排列
---
### Requirement: Health Check Integration
健康檢查 SHALL 包含即時設備狀態快取狀態。
#### Scenario: Equipment status cache in health check
- **WHEN** 呼叫 `GET /health`
- **THEN** 回應 body SHALL 包含 `equipment_status_cache` 區塊:
```json
{
"equipment_status_cache": {
"enabled": true,
"loaded": true,
"count": 1803,
"updated_at": "2026-01-29T14:00:00"
}
}
```
#### Scenario: Workcenter mapping in health check
- **WHEN** 呼叫 `GET /health`
- **THEN** 回應 body SHALL 包含 `workcenter_mapping` 區塊:
```json
{
"workcenter_mapping": {
"loaded": true,
"workcenter_count": 18,
"group_count": 8
}
}
```
---
### Requirement: API Response Extension
機台狀況表 API 回應 SHALL 擴充新欄位。
#### Scenario: Extended response fields
- **WHEN** 呼叫 `GET /api/resource/status`
- **THEN** 每筆設備資料 SHALL 包含以下新增欄位:
- `WORKCENTER_GROUP`: 工站分組
- `WORKCENTER_SHORT`: 工站簡稱
- `STATUS_CATEGORY`: 狀態分類
- `JOBORDER`: 維修工單號
- `JOBSTATUS`: 工單狀態
- `LOT_COUNT`: 當前 WIP 批次數
- `TOTAL_TRACKIN_QTY`: Track-In 總數量
- `LATEST_TRACKIN_TIME`: 最新 Track-In 時間
#### Scenario: Backward compatible response
- **WHEN** 呼叫現有 API
- **THEN** 原有欄位 SHALL 保持不變
- **AND** 新欄位為追加,不影響現有消費者
#### Scenario: Null handling for new fields
- **WHEN** 新欄位資料不存在
- **THEN** 該欄位 SHALL 回傳 `null`(而非省略)
---
### Requirement: Filter Options Extension
篩選選項 API SHALL 新增工站分組選項。
#### Scenario: Workcenter groups in filter options
- **WHEN** 呼叫 `GET /api/resource/status/options`
- **THEN** 回應 SHALL 包含 `workcenter_groups` 欄位
- **AND** 內容為所有 WORK_CENTER_GROUP 清單(按 sequence 排序)
#### Scenario: Status categories in filter options
- **WHEN** 呼叫 `GET /api/resource/status/options`
- **THEN** 回應 SHALL 包含 `status_categories` 欄位
- **AND** 內容為 `['PRODUCTIVE', 'STANDBY', 'DOWN', 'ENGINEERING', 'NOT_SCHEDULED', 'INACTIVE', 'OTHER']`

View File

@@ -0,0 +1,90 @@
## ADDED Requirements
### Requirement: Workcenter Mapping Data Storage
系統 SHALL 將 `DW_MES_SPEC_WORKCENTER_V` 工站對照資料儲存於記憶體快取。
#### Scenario: Mapping data loaded
- **WHEN** 快取載入完成後
- **THEN** 記憶體 SHALL 包含以下資料結構:
- `workcenter_to_group`: Dict mapping WORK_CENTER → WORK_CENTER_GROUP
- `workcenter_to_sequence`: Dict mapping WORK_CENTER → WORKCENTERSEQUENCE_GROUP
- `workcenter_to_short`: Dict mapping WORK_CENTER → WORK_CENTER_SHORT
- `all_groups`: List of unique WORK_CENTER_GROUP按 sequence 排序)
#### Scenario: Full table loaded
- **WHEN** 從 Oracle 載入資料時
- **THEN** 系統 SHALL 查詢 `DW_MES_SPEC_WORKCENTER_V` 全表(約 230 筆)
- **AND** 以 WORK_CENTER 為 key 進行去重
---
### Requirement: Workcenter Mapping Background Sync
系統 SHALL 提供背景任務,定期同步 `DW_MES_SPEC_WORKCENTER_V` 至記憶體快取。
#### Scenario: Daily sync
- **WHEN** 應用程式運行中
- **THEN** 背景任務 SHALL 每 `WORKCENTER_MAPPING_SYNC_INTERVAL` 秒(預設 86400 秒 = 24 小時)執行同步
#### Scenario: Initial cache load on startup
- **WHEN** 應用程式啟動時
- **THEN** 系統 SHALL 立即執行一次快取載入
#### Scenario: Force refresh API
- **WHEN** 呼叫 `refresh_workcenter_mapping(force=True)`
- **THEN** 系統 SHALL 立即執行快取同步
---
### Requirement: Workcenter Mapping Query API
系統 SHALL 提供 API 查詢工站對照資訊。
#### Scenario: Get group by workcenter name
- **WHEN** 呼叫 `get_workcenter_group(workcenter_name)`
- **THEN** 系統 SHALL 回傳對應的 WORK_CENTER_GROUP
- **AND** 若 workcenter_name 不存在則回傳 `None`
#### Scenario: Get all workcenter groups
- **WHEN** 呼叫 `get_all_workcenter_groups()`
- **THEN** 系統 SHALL 回傳所有 WORK_CENTER_GROUP 清單(按 sequence 排序)
#### Scenario: Get workcenter short name
- **WHEN** 呼叫 `get_workcenter_short(workcenter_name)`
- **THEN** 系統 SHALL 回傳對應的 WORK_CENTER_SHORT如 DB, WB, Mold
- **AND** 若不存在則回傳 `None`
#### Scenario: Get workcenters by group
- **WHEN** 呼叫 `get_workcenters_by_group(group_name)`
- **THEN** 系統 SHALL 回傳屬於該 group 的所有 WORK_CENTER 清單
---
### Requirement: Workcenter Mapping Integration with filter_cache
工站對照 SHALL 整合至現有 filter_cache 模組。
#### Scenario: Replace WIP-based workcenter groups
- **WHEN** filter_cache 載入 workcenter groups 時
- **THEN** 系統 SHALL 優先從 `DW_MES_SPEC_WORKCENTER_V` 載入
- **AND** 若載入失敗則 fallback 到現有 WIP 視圖來源
#### Scenario: Unified workcenter mapping source
- **WHEN** 呼叫 `get_workcenter_mapping()``get_workcenter_groups()`
- **THEN** 系統 SHALL 使用 SPEC_WORKCENTER_V 作為資料來源
---
### Requirement: Workcenter Mapping Configuration
系統 SHALL 支援透過環境變數配置快取行為。
#### Scenario: Custom sync interval
- **WHEN** 環境變數 `WORKCENTER_MAPPING_SYNC_INTERVAL` 設為 `43200`
- **THEN** 背景任務 SHALL 每 43200 秒12 小時)執行一次
#### Scenario: Default configuration
- **WHEN** 環境變數未設定
- **THEN** 系統 SHALL 使用預設值:
- `WORKCENTER_MAPPING_SYNC_INTERVAL`: `86400`24 小時)

View File

@@ -0,0 +1,96 @@
## 1. Configuration & Constants
- [x] 1.1 新增環境變數定義至 `config/settings.py`
- `REALTIME_EQUIPMENT_CACHE_ENABLED` (default: true)
- `EQUIPMENT_STATUS_SYNC_INTERVAL` (default: 300)
- `WORKCENTER_MAPPING_SYNC_INTERVAL` (default: 86400)
- [x] 1.2 新增 Redis key 前綴常數至 `config/constants.py`
- `EQUIPMENT_STATUS_DATA_KEY`
- `EQUIPMENT_STATUS_INDEX_KEY`
- `EQUIPMENT_STATUS_META_UPDATED_KEY`
- `EQUIPMENT_STATUS_META_COUNT_KEY`
- [x] 1.3 新增 STATUS_CATEGORY_MAP 狀態分類對照表至 `config/constants.py`
## 2. Realtime Equipment Cache - Core
- [x] 2.1 建立 `services/realtime_equipment_cache.py` 模組骨架
- [x] 2.2 實作 `_load_equipment_status_from_oracle()` - 查詢 DW_MES_EQUIPMENTSTATUS_WIP_V
- [x] 2.3 實作 `_aggregate_by_resourceid()` - 依 RESOURCEID 聚合資料
- 狀態欄位取任一筆
- LOT_COUNT = COUNT(*)
- TOTAL_TRACKIN_QTY = SUM(LOTTRACKINQTY_PCS)
- LATEST_TRACKIN_TIME = MAX(LOTTRACKINTIME)
- [x] 2.4 實作 `_classify_status()` - 狀態分類邏輯
- [x] 2.5 實作 `_save_to_redis()` - 使用 pipeline 原子寫入 Redis
## 3. Realtime Equipment Cache - Query API
- [x] 3.1 實作 `get_all_equipment_status()` - 回傳全部快取資料
- [x] 3.2 實作 `get_equipment_status_by_id(resource_id)` - 單筆查詢
- [x] 3.3 實作 `get_equipment_status_by_ids(resource_ids)` - 批次查詢
- [x] 3.4 實作 `get_equipment_status_cache_status()` - 快取狀態查詢
## 4. Realtime Equipment Cache - Background Sync
- [x] 4.1 實作 `refresh_equipment_status_cache(force=False)` - 同步主函數
- [x] 4.2 實作 `_start_equipment_status_sync_worker()` - 背景 worker 啟動
- [x] 4.3 實作 `init_realtime_equipment_cache()` - 初始化函數(供 app 啟動呼叫)
- [x] 4.4 整合至 `app.py` - 應用程式啟動時初始化快取
## 5. Workcenter Mapping Cache
- [x] 5.1 擴充 `services/filter_cache.py` - 新增 workcenter mapping 相關變數
- [x] 5.2 實作 `_load_workcenter_mapping_from_spec()` - 查詢 DW_MES_SPEC_WORKCENTER_V
- [x] 5.3 實作 `get_workcenter_group(workcenter_name)` - 查詢工站分組
- [x] 5.4 實作 `get_workcenter_short(workcenter_name)` - 查詢工站簡稱
- [x] 5.5 實作 `get_workcenters_by_group(group_name)` - 查詢分組內工站
- [x] 5.6 修改 `_load_workcenter_data()` - 優先使用 SPEC_WORKCENTER_V
## 6. Resource Service - Merged Query
- [x] 6.1 修改 `services/resource_service.py` - import 新快取模組
- [x] 6.2 實作 `get_merged_resource_status()` - 三層快取合併查詢
- [x] 6.3 實作 `get_merged_resource_status()` 的篩選邏輯
- workcenter_groups 篩選
- is_production, is_key, is_monitor 篩選
- status_categories 篩選
- [x] 6.4 實作 `get_resource_status_summary()` - 統計摘要
- [x] 6.5 實作 `get_workcenter_status_matrix()` - 工站狀態矩陣
## 7. API Routes
- [x] 7.1 修改 `routes/resource_routes.py` - 擴充 `/api/resource/status` 使用新查詢
- [x] 7.2 修改 `/api/resource/status/options` - 新增 workcenter_groups, status_categories
- [x] 7.3 新增 `/api/resource/status/summary` endpoint
- [x] 7.4 新增 `/api/resource/status/matrix` endpoint
## 8. Health Check Integration
- [x] 8.1 修改健康檢查 - 新增 equipment_status_cache 狀態
- [x] 8.2 修改健康檢查 - 新增 workcenter_mapping 狀態
## 9. Unit Tests
- [x] 9.1 新增 `tests/test_realtime_equipment_cache.py`
- test_aggregate_by_resourceid
- test_classify_status
- test_get_equipment_status_by_id
- [x] 9.2 新增 `tests/test_workcenter_mapping.py`
- test_get_workcenter_group
- test_get_workcenters_by_group
- [x] 9.3 擴充 `tests/test_resource_service.py`
- test_get_merged_resource_status
- test_get_merged_resource_status_with_filters
- test_get_resource_status_summary
## 10. Integration Tests
- [x] 10.1 新增 `tests/e2e/test_realtime_equipment_e2e.py`
- test_equipment_status_cache_sync
- test_merged_query_api
- test_filter_options_include_new_fields
## 11. Documentation & Cleanup
- [x] 11.1 更新 `config/tables.py` - 新增 DW_MES_SPEC_WORKCENTER_V 描述
- [x] 11.2 更新 README 或 API 文件 - 記錄新增 API 與欄位

View File

@@ -0,0 +1,157 @@
## ADDED Requirements
### Requirement: Realtime Equipment Status Data Storage
系統 SHALL 將 `DW_MES_EQUIPMENTSTATUS_WIP_V` 資料(預聚合後)以 JSON 格式儲存於 Redis。
#### Scenario: Data stored with correct keys
- **WHEN** 快取同步完成後
- **THEN** Redis SHALL 包含以下 keys
- `{prefix}:equipment_status:data` - 聚合後設備狀態資料JSON 陣列)
- `{prefix}:equipment_status:index` - RESOURCEID → array index 的 Hash mapping
- `{prefix}:equipment_status:meta:updated` - 快取更新時間ISO 8601 格式)
- `{prefix}:equipment_status:meta:count` - 記錄筆數
#### Scenario: Data aggregated by RESOURCEID
- **WHEN** 從 Oracle 載入資料時
- **THEN** 系統 SHALL 以 RESOURCEID 為 key 進行聚合
- **AND** 狀態欄位EQUIPMENTASSETSSTATUS, EQUIPMENTASSETSSTATUSREASON取任一筆同 RESOURCEID 應相同)
- **AND** LOT_COUNT 為該 RESOURCEID 的記錄數
- **AND** TOTAL_TRACKIN_QTY 為 LOTTRACKINQTY_PCS 的加總
- **AND** LATEST_TRACKIN_TIME 為 LOTTRACKINTIME 的最大值
#### Scenario: Single record structure
- **WHEN** 查詢單筆聚合後資料
- **THEN** 資料結構 SHALL 包含:
- `RESOURCEID`: 設備 ID
- `EQUIPMENTID`: 設備編號
- `OBJECTCATEGORY`: 設備類別
- `EQUIPMENTASSETSSTATUS`: 設備狀態
- `EQUIPMENTASSETSSTATUSREASON`: 狀態原因
- `STATUS_CATEGORY`: 狀態分類PRODUCTIVE/STANDBY/DOWN/ENGINEERING/NOT_SCHEDULED/INACTIVE/OTHER
- `JOBORDER`: 維修工單號(若有)
- `JOBSTATUS`: 工單狀態(若有)
- `SYMPTOMCODE`: 症狀代碼(若有)
- `CAUSECODE`: 故障原因代碼(若有)
- `REPAIRCODE`: 維修處置代碼(若有)
- `LOT_COUNT`: 當前 WIP 批次數
- `TOTAL_TRACKIN_QTY`: Track-In 總數量
- `LATEST_TRACKIN_TIME`: 最新 Track-In 時間
#### Scenario: Atomic update with pipeline
- **WHEN** 快取同步執行時
- **THEN** 系統 SHALL 使用 Redis pipeline 確保所有 keys 原子更新
---
### Requirement: Realtime Equipment Status Background Sync
系統 SHALL 提供背景任務,定期同步 `DW_MES_EQUIPMENTSTATUS_WIP_V` 至 Redis 快取。
#### Scenario: Periodic sync at configured interval
- **WHEN** 應用程式啟動後
- **THEN** 背景任務 SHALL 每 `EQUIPMENT_STATUS_SYNC_INTERVAL` 秒(預設 300 秒 = 5 分鐘)執行同步
#### Scenario: Full table sync each time
- **WHEN** 背景任務執行時
- **THEN** 系統 SHALL 執行全表查詢並覆蓋快取
- **AND** 記錄同步耗時與記錄數至 info 日誌
#### Scenario: Initial cache load on startup
- **WHEN** 應用程式啟動時
- **THEN** 系統 SHALL 立即執行一次快取同步
#### Scenario: Force refresh API
- **WHEN** 呼叫 `refresh_equipment_status_cache(force=True)`
- **THEN** 系統 SHALL 立即執行快取同步,不等待下次排程
---
### Requirement: Realtime Equipment Status Query API
系統 SHALL 提供 API 從 Redis 快取查詢即時設備狀態。
#### Scenario: Get all equipment status
- **WHEN** 呼叫 `get_all_equipment_status()`
- **THEN** 系統 SHALL 回傳快取中所有設備狀態資料List[Dict]
#### Scenario: Get status by RESOURCEID
- **WHEN** 呼叫 `get_equipment_status_by_id(resource_id)`
- **THEN** 系統 SHALL 使用 index hash 快速查找並回傳對應資料Dict
- **AND** 若 ID 不存在則回傳 `None`
#### Scenario: Get status by multiple RESOURCEIDs
- **WHEN** 呼叫 `get_equipment_status_by_ids(resource_ids)`
- **THEN** 系統 SHALL 回傳所有匹配的設備狀態List[Dict]
- **AND** 不存在的 ID 不會出現在結果中
---
### Requirement: Realtime Equipment Status Cache Status API
系統 SHALL 提供 API 查詢快取狀態。
#### Scenario: Get cache status
- **WHEN** 呼叫 `get_equipment_status_cache_status()`
- **THEN** 系統 SHALL 回傳包含以下欄位的 Dict
- `enabled`: 快取是否啟用
- `loaded`: 快取是否已載入
- `count`: 快取記錄數
- `updated_at`: 最後同步時間
---
### Requirement: Realtime Equipment Status Fallback
當 Redis 不可用時,系統 SHALL 記錄錯誤並回傳空結果。
#### Scenario: Redis unavailable
- **WHEN** Redis 連線失敗或超時
- **THEN** 系統 SHALL 記錄 error 日誌
- **AND** 回傳空列表
#### Scenario: Cache disabled by config
- **WHEN** 環境變數 `REALTIME_EQUIPMENT_CACHE_ENABLED` 設為 `false`
- **THEN** 系統 SHALL 完全跳過 Redis
- **AND** 背景同步任務 SHALL 不啟動
- **AND** 所有查詢 API 回傳空結果
---
### Requirement: Status Category Classification
系統 SHALL 為每個狀態值提供分類標籤。
#### Scenario: Standard E10 status classification
- **WHEN** 狀態值為標準 E10 狀態
- **THEN** 系統 SHALL 依據以下規則分類:
- `PRD``PRODUCTIVE`
- `SBY``STANDBY`
- `UDT``DOWN`
- `SDT``DOWN`
- `EGT``ENGINEERING`
- `NST``NOT_SCHEDULED`
#### Scenario: Non-standard status classification
- **WHEN** 狀態值為非標準狀態
- **THEN** 系統 SHALL 依據以下規則分類:
- `SCRAP``INACTIVE`
- `設備-LOST``INACTIVE`
- `設備-RUN``PRODUCTIVE`
- 其他未知狀態 → `OTHER`
---
### Requirement: Realtime Equipment Status Configuration
系統 SHALL 支援透過環境變數配置快取行為。
#### Scenario: Custom sync interval
- **WHEN** 環境變數 `EQUIPMENT_STATUS_SYNC_INTERVAL` 設為 `600`
- **THEN** 背景任務 SHALL 每 600 秒10 分鐘)執行一次
#### Scenario: Default configuration
- **WHEN** 環境變數未設定
- **THEN** 系統 SHALL 使用預設值:
- `REALTIME_EQUIPMENT_CACHE_ENABLED`: `true`
- `EQUIPMENT_STATUS_SYNC_INTERVAL`: `300`5 分鐘)

View File

@@ -1,261 +1,168 @@
## ADDED Requirements ## ADDED Requirements
### Requirement: Resource Cache Data Storage ### Requirement: Resource Status Merged Query API
系統 SHALL `DW_MES_RESOURCE` 全表資料(套用全域篩選後)以 JSON 格式儲存於 Redis 系統 SHALL 提供 API 合併 resource-cache 與 realtime-equipment-cache 資料
#### Scenario: Data stored with correct keys #### Scenario: Get merged resource status
- **WHEN** 快取同步完成後 - **WHEN** 呼叫 `get_merged_resource_status()`
- **THEN** Redis SHALL 包含以下 keys - **THEN** 系統 SHALL 回傳合併後的設備狀態清單
- `{prefix}:resource:data` - 完整表資料JSON 陣列,包含全部 78 欄位 - **AND** 每筆資料包含 resource-cache 的主檔欄位RESOURCENAME, WORKCENTERNAME, RESOURCEFAMILYNAME, PJ_DEPARTMENT, PJ_ISPRODUCTION, PJ_ISKEY, PJ_ISMONITOR
- `{prefix}:resource:meta:version` - Oracle 資料的 `MAX(LASTCHANGEDATE)` - **AND** 每筆資料包含 realtime-equipment-cache 的即時欄位EQUIPMENTASSETSSTATUS, EQUIPMENTASSETSSTATUSREASON, STATUS_CATEGORY, JOBORDER, JOBSTATUS, LOT_COUNT, TOTAL_TRACKIN_QTY, LATEST_TRACKIN_TIME
- `{prefix}:resource:meta:updated` - 快取更新時間ISO 8601 格式 - **AND** 每筆資料包含 workcenter-mapping-cache 的分組欄位WORKCENTER_GROUP, WORKCENTER_SHORT
- `{prefix}:resource:meta:count` - 記錄筆數
#### Scenario: Global filters applied #### Scenario: Merge by RESOURCEID
- **WHEN** 從 Oracle 載入資料時 - **WHEN** 合併資料時
- **THEN** 系統 SHALL 套用以下篩選條件: - **THEN** 系統 SHALL 以 resource-cache 為主表
- 設備類型:`(OBJECTCATEGORY = 'ASSEMBLY' AND OBJECTTYPE = 'ASSEMBLY') OR (OBJECTCATEGORY = 'WAFERSORT' AND OBJECTTYPE = 'WAFERSORT')` - **AND** 使用 RESOURCEID 作為 JOIN key 與 realtime-equipment-cache 合併
- 排除地點:`LOCATIONNAME NOT IN ('ATEC', 'F區', 'F區焊接站', '報廢', '實驗室', '山東', '成型站_F區', '焊接F區', '無錫', '熒茂')` - **AND** 使用 WORKCENTERNAME 作為 JOIN key 與 workcenter-mapping-cache 合併
- 排除資產狀態:`PJ_ASSETSSTATUS NOT IN ('Disapproved')`
#### Scenario: Atomic update with pipeline #### Scenario: Handle missing realtime status
- **WHEN** 快取同步執行時 - **WHEN** resource-cache 中的設備在 realtime-equipment-cache 找不到對應資料
- **THEN** 系統 SHALL 使用 Redis pipeline 確保所有 keys 原子更新 - **THEN** 即時欄位 SHALL 回傳 `None`
---
### Requirement: Resource Cache Background Sync
系統 SHALL 提供背景任務,定期同步 `DW_MES_RESOURCE` 至 Redis 快取。
#### Scenario: Periodic sync at configured interval
- **WHEN** 應用程式啟動後
- **THEN** 背景任務 SHALL 每 `RESOURCE_SYNC_INTERVAL` 秒(預設 14400 秒 = 4 小時)檢查是否需要同步
#### Scenario: Version check triggers sync
- **WHEN** 背景任務執行時Oracle 的 `MAX(LASTCHANGEDATE)` 與 Redis 中儲存的版本不同
- **THEN** 系統 SHALL 執行全表同步
- **AND** 更新 `{prefix}:resource:meta:version` 為新的版本
- **AND** 更新 `{prefix}:resource:meta:updated` 為當前時間
#### Scenario: Version unchanged skips sync
- **WHEN** 背景任務執行時Oracle 的 `MAX(LASTCHANGEDATE)` 與 Redis 中儲存的版本相同
- **THEN** 系統 SHALL 跳過同步
- **AND** 記錄 debug 日誌 - **AND** 記錄 debug 日誌
#### Scenario: Initial cache load on startup #### Scenario: Handle missing workcenter group
- **WHEN** 應用程式啟動時 Redis 中無 resource 快取資料 - **WHEN** WORKCENTERNAME 在 workcenter-mapping-cache 找不到對應
- **THEN** 系統 SHALL 立即執行一次快取同步 - **THEN** WORKCENTER_GROUP 與 WORKCENTER_SHORT SHALL 回傳 `None`
#### Scenario: Force refresh ignores version check
- **WHEN** 呼叫 `refresh_cache(force=True)`
- **THEN** 系統 SHALL 執行全表同步,不論版本是否相同
--- ---
### Requirement: Resource Cache Query API ### Requirement: Resource Status Merged Query with Filter
系統 SHALL 提供 API 從 Redis 快取查詢設備資料 系統 SHALL 支援帶篩選條件的合併查詢
#### Scenario: Get all resources #### Scenario: Filter by workcenter groups
- **WHEN** 呼叫 `get_all_resources()` - **WHEN** 呼叫 `get_merged_resource_status(workcenter_groups=['焊接', '成型'])`
- **THEN** 系統 SHALL 回傳快取中所有設備資料List[Dict],包含全部 78 欄位) - **THEN** 系統 SHALL 回傳 WORKCENTER_GROUP 在指定清單中的設備
#### Scenario: Get resource by ID #### Scenario: Filter by equipment flags
- **WHEN** 呼叫 `get_resource_by_id(resource_id)` - **WHEN** 呼叫 `get_merged_resource_status(is_production=True, is_key=True)`
- **THEN** 系統 SHALL 回傳對應的設備資料Dict - **THEN** 系統 SHALL 回傳符合 PJ_ISPRODUCTION=1 且 PJ_ISKEY=1 的設備
- **AND** 若 ID 不存在則回傳 `None`
#### Scenario: Get resources by multiple IDs #### Scenario: Filter by status category
- **WHEN** 呼叫 `get_resources_by_ids(resource_ids)` - **WHEN** 呼叫 `get_merged_resource_status(status_categories=['PRODUCTIVE', 'STANDBY'])`
- **THEN** 系統 SHALL 回傳所有匹配的設備資料List[Dict] - **THEN** 系統 SHALL 回傳 STATUS_CATEGORY 在指定清單中的設備
- **AND** 不存在的 ID 不會出現在結果中
#### Scenario: Get resources by filter #### Scenario: Combined filters
- **WHEN** 呼叫 `get_resources_by_filter(workcenters=['焊接_DB'], is_production=True)` - **WHEN** 呼叫 `get_merged_resource_status(workcenter_groups=['焊接'], is_production=True, status_categories=['DOWN'])`
- **THEN** 系統 SHALL 在 Python 端篩選快取資料 - **THEN** 系統 SHALL 回傳同時符合所有條件的設備
- **AND** 回傳符合所有條件的設備清單
--- ---
### Requirement: Resource Cache Distinct Values API ### Requirement: Resource Status Summary Statistics
系統 SHALL 提供 API 取得設備欄位的唯一值清單,供篩選器使用 系統 SHALL 提供設備狀態統計摘要 API
#### Scenario: Get distinct values for column #### Scenario: Get status summary
- **WHEN** 呼叫 `get_distinct_values('RESOURCEFAMILYNAME')` - **WHEN** 呼叫 `get_resource_status_summary()`
- **THEN** 系統 SHALL 回傳該欄位的唯一值清單(排序後) - **THEN** 系統 SHALL 回傳包含以下統計的 Dict
- **AND** 自動過濾 `None` 和空字串 - `total_count`: 設備總數
- `by_status_category`: 各 STATUS_CATEGORY 的設備數
- `by_workcenter_group`: 各 WORKCENTER_GROUP 的設備數
- `with_active_job`: 有維修工單的設備數
- `with_wip`: 有 WIP 的設備數LOT_COUNT > 0
#### Scenario: Convenience methods for common columns #### Scenario: Summary respects filters
- **WHEN** 呼叫 `get_resource_families()` - **WHEN** 呼叫 `get_resource_status_summary(workcenter_groups=['焊接'])`
- **THEN** 統 SHALL 回傳 `RESOURCEFAMILYNAME` 欄位的唯一值清單 - **THEN** 統 SHALL 只計算符合篩選條件的設備
- **AND** `get_workcenters()` 回傳 `WORKCENTERNAME` 唯一值
- **AND** `get_departments()` 回傳 `PJ_DEPARTMENT` 唯一值
--- ---
### Requirement: Resource Cache Status API ### Requirement: Resource Status Workcenter Matrix
系統 SHALL 提供 API 查詢快取狀態 系統 SHALL 提供工站 × 狀態矩陣 API
#### Scenario: Get cache status #### Scenario: Get workcenter status matrix
- **WHEN** 呼叫 `get_cache_status()` - **WHEN** 呼叫 `get_workcenter_status_matrix()`
- **THEN** 系統 SHALL 回傳包含以下欄位的 Dict - **THEN** 系統 SHALL 回傳 List[Dict],每筆包含:
- `enabled`: 快取是否啟用 - `workcenter_group`: 工站分組名稱
- `loaded`: 快取是否已載入 - `workcenter_sequence`: 排序序號
- `count`: 快取記錄 - `total`: 該分組設備總
- `version`: 資料版本MAX(LASTCHANGEDATE) - `PRD`: 狀態為 PRD 的數量
- `updated_at`: 最後同步時間 - `SBY`: 狀態為 SBY 的數量
- `UDT`: 狀態為 UDT 的數量
- `SDT`: 狀態為 SDT 的數量
- `EGT`: 狀態為 EGT 的數量
- `NST`: 狀態為 NST 的數量
- `OTHER`: 其他狀態的數量
#### Scenario: Status when cache not loaded #### Scenario: Matrix sorted by sequence
- **WHEN** 呼叫 `get_cache_status()` 且快取尚未載入 - **WHEN** 回傳矩陣資料
- **THEN** `loaded` SHALL 為 `false` - **THEN** 資料 SHALL 按 workcenter_sequence 升序排列
- **AND** `count` SHALL 為 `0`
---
### Requirement: Resource Cache Fallback
當 Redis 不可用時,系統 SHALL 自動降級到直接查詢 Oracle。
#### Scenario: Redis unavailable triggers fallback
- **WHEN** Redis 連線失敗或超時
- **THEN** 系統 SHALL 直接查詢 Oracle `DW_MES_RESOURCE`
- **AND** 記錄 warning 日誌
#### Scenario: Cache empty triggers fallback
- **WHEN** Redis 可用但 `{prefix}:resource:data` 不存在或為空
- **THEN** 系統 SHALL 直接查詢 Oracle `DW_MES_RESOURCE`
#### Scenario: RESOURCE_CACHE_ENABLED=false disables cache
- **WHEN** 環境變數 `RESOURCE_CACHE_ENABLED` 設為 `false`
- **THEN** 系統 SHALL 完全跳過 Redis直接查詢 Oracle
- **AND** 背景同步任務 SHALL 不啟動
---
### Requirement: Resource Cache Configuration
系統 SHALL 支援透過環境變數配置快取行為。
#### Scenario: Custom sync interval
- **WHEN** 環境變數 `RESOURCE_SYNC_INTERVAL` 設為 `7200`
- **THEN** 背景任務 SHALL 每 7200 秒2 小時)執行一次
#### Scenario: Default configuration
- **WHEN** 環境變數未設定
- **THEN** 系統 SHALL 使用預設值:
- `RESOURCE_CACHE_ENABLED`: `true`
- `RESOURCE_SYNC_INTERVAL`: `14400`4 小時)
#### Scenario: Key prefix from environment
- **WHEN** 環境變數 `REDIS_KEY_PREFIX` 設為 `my_app`
- **THEN** 所有 resource 快取 keys SHALL 使用 `my_app:resource:*` 前綴
--- ---
### Requirement: Health Check Integration ### Requirement: Health Check Integration
健康檢查 SHALL 包含 Resource 快取狀態。 健康檢查 SHALL 包含即時設備狀態快取狀態。
#### Scenario: Resource cache status in health check #### Scenario: Equipment status cache in health check
- **WHEN** 呼叫 `GET /health` 且 resource 快取可用 - **WHEN** 呼叫 `GET /health`
- **THEN** 回應 body SHALL 包含 `resource_cache` 區塊: - **THEN** 回應 body SHALL 包含 `equipment_status_cache` 區塊:
```json ```json
{ {
"resource_cache": { "equipment_status_cache": {
"enabled": true, "enabled": true,
"loaded": true, "loaded": true,
"count": 3500, "count": 1803,
"version": "2026-01-29 10:30:00", "updated_at": "2026-01-29T14:00:00"
"updated_at": "2026-01-29 14:00:00"
} }
} }
``` ```
#### Scenario: Resource cache not loaded warning #### Scenario: Workcenter mapping in health check
- **WHEN** 呼叫 `GET /health` 且 resource 快取啟用但未載入 - **WHEN** 呼叫 `GET /health`
- **THEN** 回應 body 的 `warnings` SHALL 包含 "Resource cache not loaded" - **THEN** 回應 body SHALL 包含 `workcenter_mapping` 區塊:
```json
{
"workcenter_mapping": {
"loaded": true,
"workcenter_count": 18,
"group_count": 8
}
}
```
--- ---
### Requirement: Resource History KPI API - Availability% ### Requirement: API Response Extension
系統 SHALL 在 KPI API 回應中新增 `availability_pct` 欄位。 機台狀況表 API 回應 SHALL 擴充新欄位。
#### Scenario: KPI includes availability percentage #### Scenario: Extended response fields
- **WHEN** 呼叫 `GET /api/resource/history/summary` - **WHEN** 呼叫 `GET /api/resource/status`
- **THEN** 回應的 `kpi` 物件 SHALL 包含 `availability_pct` 欄位 - **THEN** 每筆設備資料 SHALL 包含以下新增欄位
- **AND** `availability_pct` 計算公式為 `(PRD + SBY + EGT) / (PRD + SBY + EGT + SDT + UDT + NST) * 100` - `WORKCENTER_GROUP`: 工站分組
- **AND** 數值四捨五入至小數點後一位 - `WORKCENTER_SHORT`: 工站簡稱
- `STATUS_CATEGORY`: 狀態分類
- `JOBORDER`: 維修工單號
- `JOBSTATUS`: 工單狀態
- `LOT_COUNT`: 當前 WIP 批次數
- `TOTAL_TRACKIN_QTY`: Track-In 總數量
- `LATEST_TRACKIN_TIME`: 最新 Track-In 時間
#### Scenario: Availability percentage handles zero denominator #### Scenario: Backward compatible response
- **WHEN** 分母 `(PRD + SBY + EGT + SDT + UDT + NST)` 為零 - **WHEN** 呼叫現有 API
- **THEN** `availability_pct` SHALL 回傳 `0` - **THEN** 原有欄位 SHALL 保持不變
- **AND** 新欄位為追加,不影響現有消費者
#### Scenario: Null handling for new fields
- **WHEN** 新欄位資料不存在
- **THEN** 該欄位 SHALL 回傳 `null`(而非省略)
--- ---
### Requirement: Resource History Trend API - Availability% ### Requirement: Filter Options Extension
系統 SHALL 在趨勢 API 回應的每個資料點中新增 `availability_pct` 欄位 篩選選項 API SHALL 新增工站分組選項
#### Scenario: Trend data includes availability percentage #### Scenario: Workcenter groups in filter options
- **WHEN** 呼叫 `GET /api/resource/history/summary` - **WHEN** 呼叫 `GET /api/resource/status/options`
- **THEN** 回應的 `trend` 陣列中每個物件 SHALL 包含 `availability_pct` 欄位 - **THEN** 回應 SHALL 包含 `workcenter_groups` 欄位
- **AND** 各資料點的 `availability_pct` 使用該時間區段的 E10 狀態時數計算 - **AND** 內容為所有 WORK_CENTER_GROUP 清單(按 sequence 排序)
#### Scenario: Trend availability calculation formula #### Scenario: Status categories in filter options
- **GIVEN** 單一時間區段的 E10 狀態時數 - **WHEN** 呼叫 `GET /api/resource/status/options`
- **WHEN** 計算該區段的 `availability_pct` - **THEN** 回應 SHALL 包含 `status_categories` 欄位
- **THEN** 公式為 `(PRD_HOURS + SBY_HOURS + EGT_HOURS) / (PRD_HOURS + SBY_HOURS + EGT_HOURS + SDT_HOURS + UDT_HOURS + NST_HOURS) * 100` - **AND** 內容為 `['PRODUCTIVE', 'STANDBY', 'DOWN', 'ENGINEERING', 'NOT_SCHEDULED', 'INACTIVE', 'OTHER']`
---
### Requirement: Resource History Detail API - Availability%
系統 SHALL 在明細 API 回應的每筆資料中新增 `availability_pct` 欄位。
#### Scenario: Detail data includes availability percentage
- **WHEN** 呼叫 `GET /api/resource/history/detail`
- **THEN** 回應的 `data` 陣列中每個物件 SHALL 包含 `availability_pct` 欄位
---
### Requirement: CSV Export - Availability%
系統 SHALL 在 CSV 匯出中新增 Availability% 欄位。
#### Scenario: CSV includes availability column
- **WHEN** 匯出 CSV 檔案
- **THEN** CSV 標頭 SHALL 包含 `Availability%` 欄位(位於 `OU%` 之後)
- **AND** 各列的 `Availability%` 使用該機台的 E10 狀態時數計算
---
### Requirement: Frontend Trend Chart - Availability%
系統 SHALL 在趨勢圖中新增 Availability% 趨勢線。
#### Scenario: Chart displays availability trend line
- **WHEN** 顯示設備歷史績效頁面的趨勢圖
- **THEN** 圖表 SHALL 顯示 Availability% 趨勢線
- **AND** Availability% 使用綠色 (`#10B981`) 顯示
- **AND** OU% 保持原有藍色 (`#3B82F6`)
#### Scenario: Chart legend shows both metrics
- **WHEN** 顯示趨勢圖
- **THEN** 圖例 SHALL 包含 "OU%" 與 "Availability%" 兩項
---
### Requirement: Frontend KPI Card - Availability%
系統 SHALL 在 KPI 區新增 Availability% 卡片。
#### Scenario: KPI section displays availability card
- **WHEN** 顯示設備歷史績效頁面
- **THEN** KPI 區 SHALL 顯示 Availability% 卡片
- **AND** 卡片顯示格式為 `XX.X%`
- **AND** 卡片位置在 OU% 卡片之後

View File

@@ -0,0 +1,90 @@
## ADDED Requirements
### Requirement: Workcenter Mapping Data Storage
系統 SHALL 將 `DW_MES_SPEC_WORKCENTER_V` 工站對照資料儲存於記憶體快取。
#### Scenario: Mapping data loaded
- **WHEN** 快取載入完成後
- **THEN** 記憶體 SHALL 包含以下資料結構:
- `workcenter_to_group`: Dict mapping WORK_CENTER → WORK_CENTER_GROUP
- `workcenter_to_sequence`: Dict mapping WORK_CENTER → WORKCENTERSEQUENCE_GROUP
- `workcenter_to_short`: Dict mapping WORK_CENTER → WORK_CENTER_SHORT
- `all_groups`: List of unique WORK_CENTER_GROUP按 sequence 排序)
#### Scenario: Full table loaded
- **WHEN** 從 Oracle 載入資料時
- **THEN** 系統 SHALL 查詢 `DW_MES_SPEC_WORKCENTER_V` 全表(約 230 筆)
- **AND** 以 WORK_CENTER 為 key 進行去重
---
### Requirement: Workcenter Mapping Background Sync
系統 SHALL 提供背景任務,定期同步 `DW_MES_SPEC_WORKCENTER_V` 至記憶體快取。
#### Scenario: Daily sync
- **WHEN** 應用程式運行中
- **THEN** 背景任務 SHALL 每 `WORKCENTER_MAPPING_SYNC_INTERVAL` 秒(預設 86400 秒 = 24 小時)執行同步
#### Scenario: Initial cache load on startup
- **WHEN** 應用程式啟動時
- **THEN** 系統 SHALL 立即執行一次快取載入
#### Scenario: Force refresh API
- **WHEN** 呼叫 `refresh_workcenter_mapping(force=True)`
- **THEN** 系統 SHALL 立即執行快取同步
---
### Requirement: Workcenter Mapping Query API
系統 SHALL 提供 API 查詢工站對照資訊。
#### Scenario: Get group by workcenter name
- **WHEN** 呼叫 `get_workcenter_group(workcenter_name)`
- **THEN** 系統 SHALL 回傳對應的 WORK_CENTER_GROUP
- **AND** 若 workcenter_name 不存在則回傳 `None`
#### Scenario: Get all workcenter groups
- **WHEN** 呼叫 `get_all_workcenter_groups()`
- **THEN** 系統 SHALL 回傳所有 WORK_CENTER_GROUP 清單(按 sequence 排序)
#### Scenario: Get workcenter short name
- **WHEN** 呼叫 `get_workcenter_short(workcenter_name)`
- **THEN** 系統 SHALL 回傳對應的 WORK_CENTER_SHORT如 DB, WB, Mold
- **AND** 若不存在則回傳 `None`
#### Scenario: Get workcenters by group
- **WHEN** 呼叫 `get_workcenters_by_group(group_name)`
- **THEN** 系統 SHALL 回傳屬於該 group 的所有 WORK_CENTER 清單
---
### Requirement: Workcenter Mapping Integration with filter_cache
工站對照 SHALL 整合至現有 filter_cache 模組。
#### Scenario: Replace WIP-based workcenter groups
- **WHEN** filter_cache 載入 workcenter groups 時
- **THEN** 系統 SHALL 優先從 `DW_MES_SPEC_WORKCENTER_V` 載入
- **AND** 若載入失敗則 fallback 到現有 WIP 視圖來源
#### Scenario: Unified workcenter mapping source
- **WHEN** 呼叫 `get_workcenter_mapping()``get_workcenter_groups()`
- **THEN** 系統 SHALL 使用 SPEC_WORKCENTER_V 作為資料來源
---
### Requirement: Workcenter Mapping Configuration
系統 SHALL 支援透過環境變數配置快取行為。
#### Scenario: Custom sync interval
- **WHEN** 環境變數 `WORKCENTER_MAPPING_SYNC_INTERVAL` 設為 `43200`
- **THEN** 背景任務 SHALL 每 43200 秒12 小時)執行一次
#### Scenario: Default configuration
- **WHEN** 環境變數未設定
- **THEN** 系統 SHALL 使用預設值:
- `WORKCENTER_MAPPING_SYNC_INTERVAL`: `86400`24 小時)

View File

@@ -20,6 +20,7 @@ from mes_dashboard.routes.admin_routes import admin_bp
from mes_dashboard.routes.health_routes import health_bp from mes_dashboard.routes.health_routes import health_bp
from mes_dashboard.services.page_registry import get_page_status, is_api_public from mes_dashboard.services.page_registry import get_page_status, is_api_public
from mes_dashboard.core.cache_updater import start_cache_updater, stop_cache_updater from mes_dashboard.core.cache_updater import start_cache_updater, stop_cache_updater
from mes_dashboard.services.realtime_equipment_cache import init_realtime_equipment_cache
def _configure_logging(app: Flask) -> None: def _configure_logging(app: Flask) -> None:
@@ -72,6 +73,7 @@ def create_app(config_name: str | None = None) -> Flask:
get_engine() get_engine()
start_keepalive() # Keep database connections alive start_keepalive() # Keep database connections alive
start_cache_updater() # Start Redis cache updater start_cache_updater() # Start Redis cache updater
init_realtime_equipment_cache(app) # Start realtime equipment status cache
# Register API routes # Register API routes
register_routes(app) register_routes(app)

View File

@@ -81,3 +81,42 @@ STATUS_DISPLAY_NAMES = {
# WIP status codes to exclude (completed/scrapped) # WIP status codes to exclude (completed/scrapped)
WIP_EXCLUDED_STATUS = (8, 128) WIP_EXCLUDED_STATUS = (8, 128)
# ============================================================
# Redis Key Prefixes - Realtime Equipment Status
# ============================================================
EQUIPMENT_STATUS_DATA_KEY = "equipment_status:data"
EQUIPMENT_STATUS_INDEX_KEY = "equipment_status:index"
EQUIPMENT_STATUS_META_UPDATED_KEY = "equipment_status:meta:updated"
EQUIPMENT_STATUS_META_COUNT_KEY = "equipment_status:meta:count"
# ============================================================
# Status Category Classification
# ============================================================
# Map equipment status to category for grouping/display
STATUS_CATEGORY_MAP = {
'PRD': 'PRODUCTIVE',
'SBY': 'STANDBY',
'UDT': 'DOWN',
'SDT': 'DOWN',
'EGT': 'ENGINEERING',
'NST': 'NOT_SCHEDULED',
'SCRAP': 'INACTIVE',
'設備-LOST': 'INACTIVE',
'設備-RUN': 'PRODUCTIVE',
}
# All possible status categories
STATUS_CATEGORIES = [
'PRODUCTIVE',
'STANDBY',
'DOWN',
'ENGINEERING',
'NOT_SCHEDULED',
'INACTIVE',
'OTHER',
]

View File

@@ -32,6 +32,15 @@ class Config:
# Session configuration # Session configuration
PERMANENT_SESSION_LIFETIME = _int_env("SESSION_LIFETIME", 28800) # 8 hours PERMANENT_SESSION_LIFETIME = _int_env("SESSION_LIFETIME", 28800) # 8 hours
# Realtime Equipment Status Cache
REALTIME_EQUIPMENT_CACHE_ENABLED = os.getenv(
"REALTIME_EQUIPMENT_CACHE_ENABLED", "true"
).lower() in ("true", "1", "yes")
EQUIPMENT_STATUS_SYNC_INTERVAL = _int_env("EQUIPMENT_STATUS_SYNC_INTERVAL", 300) # 5 minutes
# Workcenter Mapping Cache
WORKCENTER_MAPPING_SYNC_INTERVAL = _int_env("WORKCENTER_MAPPING_SYNC_INTERVAL", 86400) # 24 hours
class DevelopmentConfig(Config): class DevelopmentConfig(Config):
"""Development configuration.""" """Development configuration."""

View File

@@ -19,14 +19,14 @@ TABLES_CONFIG = {
'display_name': '設備狀態+WIP 視圖 (DWH.DW_MES_EQUIPMENTSTATUS_WIP_V)', 'display_name': '設備狀態+WIP 視圖 (DWH.DW_MES_EQUIPMENTSTATUS_WIP_V)',
'row_count': 2631, 'row_count': 2631,
'time_field': None, 'time_field': None,
'description': '設備狀態與 WIP 關聯視圖 - 設備當前狀態、維修工單、資產狀態等 32 欄位' 'description': '設備即時狀態視圖 - 透過 DB Link 取得即時設備狀態、維修工單、資產狀態等 32 欄位。用於 realtime-equipment-cache5 分鐘同步)'
}, },
{ {
'name': 'DWH.DW_MES_SPEC_WORKCENTER_V', 'name': 'DWH.DW_MES_SPEC_WORKCENTER_V',
'display_name': '規格工站對照 (DWH.DW_MES_SPEC_WORKCENTER_V)', 'display_name': '規格工站對照 (DWH.DW_MES_SPEC_WORKCENTER_V)',
'row_count': 230, 'row_count': 230,
'time_field': None, 'time_field': None,
'description': '規格與工站對照視圖 - 規格順序、工站群組、工站順序等 9 欄位' 'description': '工站分組對照視圖 - WORK_CENTER 到 WORK_CENTER_GROUP 映射,含 WORKCENTERSEQUENCE_GROUP 排序。用於 filter-cache 的工站分組(每日同步)'
} }
], ],
'現況快照表': [ '現況快照表': [

View File

@@ -92,6 +92,15 @@ def get_key(key: str) -> str:
return f"{REDIS_KEY_PREFIX}:{key}" return f"{REDIS_KEY_PREFIX}:{key}"
def get_key_prefix() -> str:
"""Get the Redis key prefix.
Returns:
The configured key prefix (e.g., "mes_wip")
"""
return REDIS_KEY_PREFIX
def close_redis() -> None: def close_redis() -> None:
"""Close Redis connection. """Close Redis connection.

View File

@@ -95,6 +95,40 @@ def get_resource_cache_status() -> dict:
return get_res_cache_status() return get_res_cache_status()
def get_equipment_status_cache_status() -> dict:
"""Get current realtime equipment status cache status.
Returns:
Dict with equipment status cache information.
"""
from flask import current_app
from mes_dashboard.services.realtime_equipment_cache import (
get_equipment_status_cache_status as get_eq_cache_status,
)
enabled = current_app.config.get('REALTIME_EQUIPMENT_CACHE_ENABLED', True)
if not enabled:
return {'enabled': False}
return get_eq_cache_status()
def get_workcenter_mapping_status() -> dict:
"""Get current workcenter mapping cache status.
Returns:
Dict with workcenter mapping cache information.
"""
from mes_dashboard.services.filter_cache import get_cache_status
status = get_cache_status()
return {
'loaded': status.get('loaded', False),
'workcenter_count': status.get('workcenter_mapping_count', 0),
'group_count': status.get('workcenter_groups_count', 0),
}
@health_bp.route('/health', methods=['GET']) @health_bp.route('/health', methods=['GET'])
def health_check(): def health_check():
"""Health check endpoint. """Health check endpoint.
@@ -134,11 +168,21 @@ def health_check():
if resource_cache.get('enabled') and not resource_cache.get('loaded'): if resource_cache.get('enabled') and not resource_cache.get('loaded'):
warnings.append("Resource cache not loaded") warnings.append("Resource cache not loaded")
# Check equipment status cache
equipment_status_cache = get_equipment_status_cache_status()
if equipment_status_cache.get('enabled') and not equipment_status_cache.get('loaded'):
warnings.append("Equipment status cache not loaded")
# Check workcenter mapping
workcenter_mapping = get_workcenter_mapping_status()
response = { response = {
'status': status, 'status': status,
'services': services, 'services': services,
'cache': get_cache_status(), 'cache': get_cache_status(),
'resource_cache': resource_cache 'resource_cache': resource_cache,
'equipment_status_cache': equipment_status_cache,
'workcenter_mapping': workcenter_mapping,
} }
if errors: if errors:

View File

@@ -16,7 +16,12 @@ from mes_dashboard.services.resource_service import (
query_resource_detail, query_resource_detail,
query_resource_workcenter_status_matrix, query_resource_workcenter_status_matrix,
query_resource_filter_options, query_resource_filter_options,
get_merged_resource_status,
get_resource_status_summary,
get_workcenter_status_matrix,
) )
from mes_dashboard.services.filter_cache import get_workcenter_groups
from mes_dashboard.config.constants import STATUS_CATEGORIES
# Create Blueprint # Create Blueprint
resource_bp = Blueprint('resource', __name__, url_prefix='/api/resource') resource_bp = Blueprint('resource', __name__, url_prefix='/api/resource')
@@ -150,3 +155,150 @@ def api_resource_status_values():
if connection: if connection:
connection.close() connection.close()
return jsonify({'success': False, 'error': str(exc)}), 500 return jsonify({'success': False, 'error': str(exc)}), 500
# ============================================================
# Realtime Equipment Status APIs (New)
# ============================================================
@resource_bp.route('/status')
def api_resource_status():
"""API: Get merged resource status from realtime cache.
Query params:
workcenter_groups: Comma-separated group names (e.g., '焊接,成型')
is_production: '1' or 'true' to filter production equipment
is_key: '1' or 'true' to filter key equipment
is_monitor: '1' or 'true' to filter monitor equipment
status_categories: Comma-separated categories (e.g., 'PRODUCTIVE,DOWN')
"""
# Parse filters
wc_groups_param = request.args.get('workcenter_groups')
workcenter_groups = wc_groups_param.split(',') if wc_groups_param else None
is_production = None
is_prod_param = request.args.get('is_production')
if is_prod_param:
is_production = is_prod_param.lower() in ('1', 'true', 'yes')
is_key = None
is_key_param = request.args.get('is_key')
if is_key_param:
is_key = is_key_param.lower() in ('1', 'true', 'yes')
is_monitor = None
is_monitor_param = request.args.get('is_monitor')
if is_monitor_param:
is_monitor = is_monitor_param.lower() in ('1', 'true', 'yes')
status_cats_param = request.args.get('status_categories')
status_categories = status_cats_param.split(',') if status_cats_param else None
try:
data = get_merged_resource_status(
workcenter_groups=workcenter_groups,
is_production=is_production,
is_key=is_key,
is_monitor=is_monitor,
status_categories=status_categories,
)
return jsonify({
'success': True,
'data': data,
'count': len(data),
})
except Exception as exc:
return jsonify({'success': False, 'error': str(exc)}), 500
@resource_bp.route('/status/options')
def api_resource_status_options():
"""API: Get filter options for realtime status queries.
Returns workcenter_groups, status_categories, and other filter options.
"""
try:
# Get workcenter groups from cache
wc_groups = get_workcenter_groups() or []
return jsonify({
'success': True,
'data': {
'workcenter_groups': [g['name'] for g in wc_groups],
'status_categories': STATUS_CATEGORIES,
}
})
except Exception as exc:
return jsonify({'success': False, 'error': str(exc)}), 500
@resource_bp.route('/status/summary')
def api_resource_status_summary():
"""API: Get resource status summary statistics.
Query params: same as /status
"""
# Parse filters (same as /status)
wc_groups_param = request.args.get('workcenter_groups')
workcenter_groups = wc_groups_param.split(',') if wc_groups_param else None
is_production = None
is_prod_param = request.args.get('is_production')
if is_prod_param:
is_production = is_prod_param.lower() in ('1', 'true', 'yes')
is_key = None
is_key_param = request.args.get('is_key')
if is_key_param:
is_key = is_key_param.lower() in ('1', 'true', 'yes')
is_monitor = None
is_monitor_param = request.args.get('is_monitor')
if is_monitor_param:
is_monitor = is_monitor_param.lower() in ('1', 'true', 'yes')
try:
data = get_resource_status_summary(
workcenter_groups=workcenter_groups,
is_production=is_production,
is_key=is_key,
is_monitor=is_monitor,
)
return jsonify({'success': True, 'data': data})
except Exception as exc:
return jsonify({'success': False, 'error': str(exc)}), 500
@resource_bp.route('/status/matrix')
def api_resource_status_matrix():
"""API: Get workcenter × status matrix.
Query params:
is_production: Filter by production equipment
is_key: Filter by key equipment
is_monitor: Filter by monitor equipment
"""
is_production = None
is_prod_param = request.args.get('is_production')
if is_prod_param:
is_production = is_prod_param.lower() in ('1', 'true', 'yes')
is_key = None
is_key_param = request.args.get('is_key')
if is_key_param:
is_key = is_key_param.lower() in ('1', 'true', 'yes')
is_monitor = None
is_monitor_param = request.args.get('is_monitor')
if is_monitor_param:
is_monitor = is_monitor_param.lower() in ('1', 'true', 'yes')
try:
data = get_workcenter_status_matrix(
is_production=is_production,
is_key=is_key,
is_monitor=is_monitor,
)
return jsonify({'success': True, 'data': data})
except Exception as exc:
return jsonify({'success': False, 'error': str(exc)}), 500

View File

@@ -20,6 +20,7 @@ logger = logging.getLogger('mes_dashboard.filter_cache')
CACHE_TTL_SECONDS = 3600 # 1 hour cache TTL CACHE_TTL_SECONDS = 3600 # 1 hour cache TTL
WIP_VIEW = "DWH.DW_MES_LOT_V" WIP_VIEW = "DWH.DW_MES_LOT_V"
SPEC_WORKCENTER_VIEW = "DWH.DW_MES_SPEC_WORKCENTER_V"
# ============================================================ # ============================================================
# Cache Storage # Cache Storage
@@ -28,6 +29,7 @@ WIP_VIEW = "DWH.DW_MES_LOT_V"
_CACHE = { _CACHE = {
'workcenter_groups': None, # List of {name, sequence} 'workcenter_groups': None, # List of {name, sequence}
'workcenter_mapping': None, # Dict {workcentername: {group, sequence}} 'workcenter_mapping': None, # Dict {workcentername: {group, sequence}}
'workcenter_to_short': None, # Dict {workcentername: short_name}
'last_refresh': None, 'last_refresh': None,
'is_loading': False, 'is_loading': False,
} }
@@ -79,6 +81,57 @@ def get_workcenters_for_groups(groups: List[str]) -> List[str]:
return result return result
def get_workcenter_group(workcenter_name: str) -> Optional[str]:
"""Get workcenter group for a workcenter name.
Args:
workcenter_name: The workcenter name to look up.
Returns:
The WORK_CENTER_GROUP, or None if not found.
"""
mapping = get_workcenter_mapping()
if not mapping or workcenter_name not in mapping:
return None
return mapping[workcenter_name].get('group')
def get_workcenter_short(workcenter_name: str) -> Optional[str]:
"""Get workcenter short name for a workcenter name.
Args:
workcenter_name: The workcenter name to look up.
Returns:
The WORK_CENTER_SHORT (e.g., DB, WB, Mold), or None if not found.
"""
_ensure_cache_loaded()
short_mapping = _CACHE.get('workcenter_to_short')
if not short_mapping or workcenter_name not in short_mapping:
return None
return short_mapping.get(workcenter_name)
def get_workcenters_by_group(group_name: str) -> List[str]:
"""Get all workcenter names that belong to a specific group.
Args:
group_name: The WORKCENTER_GROUP name.
Returns:
List of workcenter names in that group.
"""
mapping = get_workcenter_mapping()
if not mapping:
return []
return [
wc_name
for wc_name, info in mapping.items()
if info.get('group') == group_name
]
# ============================================================ # ============================================================
# Cache Management # Cache Management
# ============================================================ # ============================================================
@@ -144,12 +197,13 @@ def _load_cache() -> bool:
_CACHE['is_loading'] = True _CACHE['is_loading'] = True
try: try:
# Load workcenter groups from DWH.DW_MES_LOT_V # Load workcenter groups - prioritize SPEC_WORKCENTER_V
wc_groups, wc_mapping = _load_workcenter_data() wc_groups, wc_mapping, wc_short = _load_workcenter_data()
with _CACHE_LOCK: with _CACHE_LOCK:
_CACHE['workcenter_groups'] = wc_groups _CACHE['workcenter_groups'] = wc_groups
_CACHE['workcenter_mapping'] = wc_mapping _CACHE['workcenter_mapping'] = wc_mapping
_CACHE['workcenter_to_short'] = wc_short
_CACHE['last_refresh'] = datetime.now() _CACHE['last_refresh'] = datetime.now()
_CACHE['is_loading'] = False _CACHE['is_loading'] = False
@@ -167,24 +221,32 @@ def _load_cache() -> bool:
def _load_workcenter_data(): def _load_workcenter_data():
"""Load workcenter group data from WIP cache (Redis) or fallback to Oracle. """Load workcenter group data from SPEC_WORKCENTER_V (preferred) or fallback to WIP.
Returns: Returns:
Tuple of (groups_list, mapping_dict) Tuple of (groups_list, mapping_dict, short_mapping_dict)
""" """
# Try to load from WIP Redis cache first # Try to load from SPEC_WORKCENTER_V first (authoritative source)
result = _load_workcenter_mapping_from_spec()
if result[0]: # If groups are loaded
logger.debug("Loaded workcenter groups from SPEC_WORKCENTER_V")
return result
# Fallback to WIP cache
logger.warning("Falling back to WIP source for workcenter groups")
try: try:
from mes_dashboard.core.cache import get_cached_wip_data from mes_dashboard.core.cache import get_cached_wip_data
df = get_cached_wip_data() df = get_cached_wip_data()
if df is not None and not df.empty: if df is not None and not df.empty:
logger.debug("Loading workcenter groups from WIP cache") logger.debug("Loading workcenter groups from WIP cache")
return _extract_workcenter_data_from_df(df) groups, mapping = _extract_workcenter_data_from_df(df)
return groups, mapping, {}
except Exception as exc: except Exception as exc:
logger.warning(f"Failed to load from WIP cache: {exc}") logger.warning(f"Failed to load from WIP cache: {exc}")
# Fallback to Oracle direct query # Fallback to Oracle WIP view direct query
logger.debug("Falling back to Oracle for workcenter groups") logger.debug("Falling back to Oracle WIP view for workcenter groups")
try: try:
sql = f""" sql = f"""
SELECT DISTINCT SELECT DISTINCT
@@ -200,13 +262,72 @@ def _load_workcenter_data():
if df is None or df.empty: if df is None or df.empty:
logger.warning("No workcenter data found in DWH.DW_MES_LOT_V") logger.warning("No workcenter data found in DWH.DW_MES_LOT_V")
return [], {} return [], {}, {}
return _extract_workcenter_data_from_df(df) groups, mapping = _extract_workcenter_data_from_df(df)
return groups, mapping, {}
except Exception as exc: except Exception as exc:
logger.error(f"Failed to load workcenter data: {exc}") logger.error(f"Failed to load workcenter data: {exc}")
return [], {} return [], {}, {}
def _load_workcenter_mapping_from_spec():
"""Load workcenter mapping from DW_MES_SPEC_WORKCENTER_V.
This is the authoritative source for workcenter -> group mapping.
Returns:
Tuple of (groups_list, mapping_dict, short_mapping_dict)
"""
try:
sql = f"""
SELECT DISTINCT
WORK_CENTER,
WORK_CENTER_GROUP,
WORKCENTERSEQUENCE_GROUP,
WORK_CENTER_SHORT
FROM {SPEC_WORKCENTER_VIEW}
WHERE WORK_CENTER IS NOT NULL
"""
df = read_sql_df(sql)
if df is None or df.empty:
logger.warning("No data found in SPEC_WORKCENTER_V")
return [], {}, {}
# Build groups list (unique groups, take minimum sequence for each group)
groups_df = df.groupby('WORK_CENTER_GROUP')['WORKCENTERSEQUENCE_GROUP'].min().reset_index()
groups_df = groups_df.sort_values('WORKCENTERSEQUENCE_GROUP')
groups = []
for _, row in groups_df.iterrows():
group_name = row['WORK_CENTER_GROUP']
if group_name:
groups.append({
'name': group_name,
'sequence': int(row['WORKCENTERSEQUENCE_GROUP'] or 999)
})
# Build mapping dict (WORK_CENTER -> group info)
mapping = {}
short_mapping = {}
for _, row in df.iterrows():
wc_name = row['WORK_CENTER']
if wc_name:
mapping[wc_name] = {
'group': row['WORK_CENTER_GROUP'],
'sequence': int(row['WORKCENTERSEQUENCE_GROUP'] or 999)
}
if row.get('WORK_CENTER_SHORT'):
short_mapping[wc_name] = row['WORK_CENTER_SHORT']
logger.info(f"Loaded {len(mapping)} workcenters from SPEC_WORKCENTER_V")
return groups, mapping, short_mapping
except Exception as exc:
logger.error(f"Failed to load from SPEC_WORKCENTER_V: {exc}")
return [], {}, {}
def _extract_workcenter_data_from_df(df): def _extract_workcenter_data_from_df(df):

View File

@@ -0,0 +1,532 @@
# -*- coding: utf-8 -*-
"""Realtime Equipment Status Cache for MES Dashboard.
Provides cached equipment status from DW_MES_EQUIPMENTSTATUS_WIP_V.
Data is synced periodically (default 5 minutes) and stored in Redis.
"""
import json
import logging
import threading
import time
from datetime import datetime
from typing import Any, Dict, List, Optional
from mes_dashboard.core.database import read_sql_df
from mes_dashboard.core.redis_client import get_redis_client, get_key_prefix
from mes_dashboard.config.constants import (
EQUIPMENT_STATUS_DATA_KEY,
EQUIPMENT_STATUS_INDEX_KEY,
EQUIPMENT_STATUS_META_UPDATED_KEY,
EQUIPMENT_STATUS_META_COUNT_KEY,
STATUS_CATEGORY_MAP,
)
logger = logging.getLogger('mes_dashboard.realtime_equipment_cache')
# ============================================================
# Module State
# ============================================================
_SYNC_THREAD: Optional[threading.Thread] = None
_STOP_EVENT = threading.Event()
_SYNC_LOCK = threading.Lock()
# ============================================================
# Oracle Query
# ============================================================
def _load_equipment_status_from_oracle() -> Optional[List[Dict[str, Any]]]:
"""Query DW_MES_EQUIPMENTSTATUS_WIP_V from Oracle.
Returns:
List of equipment status records, or None if query fails.
"""
sql = """
SELECT
RESOURCEID,
EQUIPMENTID,
OBJECTCATEGORY,
EQUIPMENTASSETSSTATUS,
EQUIPMENTASSETSSTATUSREASON,
JOBORDER,
JOBMODEL,
JOBSTAGE,
JOBID,
JOBSTATUS,
CREATEDATE,
SYMPTOMCODE,
CAUSECODE,
REPAIRCODE,
RUNCARDLOTID,
LOTTRACKINQTY_PCS,
LOTTRACKINTIME,
LOTTRACKINEMPLOYEE
FROM DWH.DW_MES_EQUIPMENTSTATUS_WIP_V
"""
try:
df = read_sql_df(sql)
if df is None or df.empty:
logger.warning("No data returned from DW_MES_EQUIPMENTSTATUS_WIP_V")
return []
# Convert DataFrame to list of dicts
records = df.to_dict('records')
# Convert datetime columns to ISO format strings
for record in records:
for key in ['CREATEDATE', 'LOTTRACKINTIME']:
if record.get(key) is not None:
try:
record[key] = record[key].isoformat()
except (AttributeError, TypeError):
pass
logger.info(f"Loaded {len(records)} records from DW_MES_EQUIPMENTSTATUS_WIP_V")
return records
except Exception as exc:
logger.error(f"Failed to load equipment status from Oracle: {exc}")
return None
# ============================================================
# Data Aggregation
# ============================================================
def _classify_status(status: Optional[str]) -> str:
"""Classify equipment status into category.
Args:
status: Equipment status code (e.g., 'PRD', 'SBY')
Returns:
Status category string.
"""
if not status:
return 'OTHER'
return STATUS_CATEGORY_MAP.get(status, 'OTHER')
def _aggregate_by_resourceid(records: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Aggregate equipment status records by RESOURCEID.
For each RESOURCEID:
- Status fields: take first (should be same for all records)
- LOT_COUNT: count of records
- TOTAL_TRACKIN_QTY: sum of LOTTRACKINQTY_PCS
- LATEST_TRACKIN_TIME: max of LOTTRACKINTIME
Args:
records: Raw records from Oracle query.
Returns:
Aggregated records, one per RESOURCEID.
"""
if not records:
return []
# Group by RESOURCEID
grouped: Dict[str, List[Dict[str, Any]]] = {}
for record in records:
resource_id = record.get('RESOURCEID')
if resource_id:
if resource_id not in grouped:
grouped[resource_id] = []
grouped[resource_id].append(record)
# Aggregate each group
aggregated = []
for resource_id, group in grouped.items():
first = group[0]
# Calculate aggregates
lot_count = len(group)
total_qty = sum(
r.get('LOTTRACKINQTY_PCS') or 0
for r in group
)
# Find latest trackin time
trackin_times = [
r.get('LOTTRACKINTIME')
for r in group
if r.get('LOTTRACKINTIME')
]
latest_trackin = max(trackin_times) if trackin_times else None
# Build aggregated record
status = first.get('EQUIPMENTASSETSSTATUS')
aggregated.append({
'RESOURCEID': resource_id,
'EQUIPMENTID': first.get('EQUIPMENTID'),
'OBJECTCATEGORY': first.get('OBJECTCATEGORY'),
'EQUIPMENTASSETSSTATUS': status,
'EQUIPMENTASSETSSTATUSREASON': first.get('EQUIPMENTASSETSSTATUSREASON'),
'STATUS_CATEGORY': _classify_status(status),
'JOBORDER': first.get('JOBORDER'),
'JOBSTATUS': first.get('JOBSTATUS'),
'SYMPTOMCODE': first.get('SYMPTOMCODE'),
'CAUSECODE': first.get('CAUSECODE'),
'REPAIRCODE': first.get('REPAIRCODE'),
'LOT_COUNT': lot_count,
'TOTAL_TRACKIN_QTY': total_qty,
'LATEST_TRACKIN_TIME': latest_trackin,
})
logger.debug(f"Aggregated {len(records)} records into {len(aggregated)} unique resources")
return aggregated
# ============================================================
# Redis Storage
# ============================================================
def _save_to_redis(aggregated: List[Dict[str, Any]]) -> bool:
"""Save aggregated equipment status to Redis.
Uses pipeline for atomic update of all keys.
Args:
aggregated: Aggregated equipment status records.
Returns:
True if save succeeded, False otherwise.
"""
redis_client = get_redis_client()
if not redis_client:
logger.error("Redis client not available")
return False
try:
prefix = get_key_prefix()
data_key = f"{prefix}:{EQUIPMENT_STATUS_DATA_KEY}"
index_key = f"{prefix}:{EQUIPMENT_STATUS_INDEX_KEY}"
updated_key = f"{prefix}:{EQUIPMENT_STATUS_META_UPDATED_KEY}"
count_key = f"{prefix}:{EQUIPMENT_STATUS_META_COUNT_KEY}"
# Build index mapping: RESOURCEID -> array index
index_mapping = {
record['RESOURCEID']: str(idx)
for idx, record in enumerate(aggregated)
}
# Serialize data
data_json = json.dumps(aggregated, ensure_ascii=False, default=str)
updated_at = datetime.now().isoformat()
count = len(aggregated)
# Atomic update using pipeline
pipe = redis_client.pipeline()
pipe.set(data_key, data_json)
pipe.delete(index_key)
if index_mapping:
pipe.hset(index_key, mapping=index_mapping)
pipe.set(updated_key, updated_at)
pipe.set(count_key, str(count))
pipe.execute()
logger.info(f"Saved {count} equipment status records to Redis")
return True
except Exception as exc:
logger.error(f"Failed to save equipment status to Redis: {exc}")
return False
# ============================================================
# Query API
# ============================================================
def get_all_equipment_status() -> List[Dict[str, Any]]:
"""Get all equipment status from cache.
Returns:
List of equipment status records, or empty list if unavailable.
"""
redis_client = get_redis_client()
if not redis_client:
logger.warning("Redis client not available for equipment status query")
return []
try:
prefix = get_key_prefix()
data_key = f"{prefix}:{EQUIPMENT_STATUS_DATA_KEY}"
data_json = redis_client.get(data_key)
if not data_json:
logger.debug("No equipment status data in cache")
return []
return json.loads(data_json)
except Exception as exc:
logger.error(f"Failed to get equipment status from cache: {exc}")
return []
def get_equipment_status_by_id(resource_id: str) -> Optional[Dict[str, Any]]:
"""Get equipment status by RESOURCEID.
Uses index hash for O(1) lookup.
Args:
resource_id: The RESOURCEID to look up.
Returns:
Equipment status record, or None if not found.
"""
redis_client = get_redis_client()
if not redis_client:
return None
try:
prefix = get_key_prefix()
index_key = f"{prefix}:{EQUIPMENT_STATUS_INDEX_KEY}"
data_key = f"{prefix}:{EQUIPMENT_STATUS_DATA_KEY}"
# Get index from hash
idx_str = redis_client.hget(index_key, resource_id)
if idx_str is None:
return None
idx = int(idx_str)
# Get data array
data_json = redis_client.get(data_key)
if not data_json:
return None
data = json.loads(data_json)
if 0 <= idx < len(data):
return data[idx]
return None
except Exception as exc:
logger.error(f"Failed to get equipment status by ID: {exc}")
return None
def get_equipment_status_by_ids(resource_ids: List[str]) -> List[Dict[str, Any]]:
"""Get equipment status for multiple RESOURCEIDs.
Args:
resource_ids: List of RESOURCEIDs to look up.
Returns:
List of equipment status records (only existing ones).
"""
if not resource_ids:
return []
redis_client = get_redis_client()
if not redis_client:
return []
try:
prefix = get_key_prefix()
index_key = f"{prefix}:{EQUIPMENT_STATUS_INDEX_KEY}"
data_key = f"{prefix}:{EQUIPMENT_STATUS_DATA_KEY}"
# Get all indices at once
indices = redis_client.hmget(index_key, resource_ids)
# Get data array
data_json = redis_client.get(data_key)
if not data_json:
return []
data = json.loads(data_json)
# Collect matching records
results = []
for idx_str in indices:
if idx_str is not None:
idx = int(idx_str)
if 0 <= idx < len(data):
results.append(data[idx])
return results
except Exception as exc:
logger.error(f"Failed to get equipment status by IDs: {exc}")
return []
def get_equipment_status_cache_status() -> Dict[str, Any]:
"""Get equipment status cache status.
Returns:
Dict with cache status information.
"""
from flask import current_app
enabled = current_app.config.get('REALTIME_EQUIPMENT_CACHE_ENABLED', True)
if not enabled:
return {
'enabled': False,
'loaded': False,
'count': 0,
'updated_at': None,
}
redis_client = get_redis_client()
if not redis_client:
return {
'enabled': True,
'loaded': False,
'count': 0,
'updated_at': None,
}
try:
prefix = get_key_prefix()
updated_key = f"{prefix}:{EQUIPMENT_STATUS_META_UPDATED_KEY}"
count_key = f"{prefix}:{EQUIPMENT_STATUS_META_COUNT_KEY}"
updated_at = redis_client.get(updated_key)
count_str = redis_client.get(count_key)
return {
'enabled': True,
'loaded': updated_at is not None,
'count': int(count_str) if count_str else 0,
'updated_at': updated_at,
}
except Exception as exc:
logger.error(f"Failed to get equipment status cache status: {exc}")
return {
'enabled': True,
'loaded': False,
'count': 0,
'updated_at': None,
}
# ============================================================
# Background Sync
# ============================================================
def refresh_equipment_status_cache(force: bool = False) -> bool:
"""Refresh equipment status cache.
Args:
force: If True, refresh immediately regardless of state.
Returns:
True if refresh succeeded, False otherwise.
"""
with _SYNC_LOCK:
logger.info("Refreshing equipment status cache...")
start_time = time.time()
# Load from Oracle
records = _load_equipment_status_from_oracle()
if records is None:
logger.error("Failed to load equipment status from Oracle")
return False
# Aggregate
aggregated = _aggregate_by_resourceid(records)
# Save to Redis
success = _save_to_redis(aggregated)
elapsed = time.time() - start_time
if success:
logger.info(f"Equipment status cache refreshed in {elapsed:.2f}s")
else:
logger.error(f"Equipment status cache refresh failed after {elapsed:.2f}s")
return success
def _sync_worker(interval: int):
"""Background worker that periodically syncs equipment status.
Args:
interval: Sync interval in seconds.
"""
logger.info(f"Equipment status sync worker started (interval: {interval}s)")
while not _STOP_EVENT.is_set():
try:
refresh_equipment_status_cache()
except Exception as exc:
logger.error(f"Equipment status sync error: {exc}")
# Wait for next sync or stop signal
_STOP_EVENT.wait(timeout=interval)
logger.info("Equipment status sync worker stopped")
def _start_equipment_status_sync_worker(interval: int):
"""Start the background sync worker thread.
Args:
interval: Sync interval in seconds.
"""
global _SYNC_THREAD
if _SYNC_THREAD is not None and _SYNC_THREAD.is_alive():
logger.warning("Equipment status sync worker already running")
return
_STOP_EVENT.clear()
_SYNC_THREAD = threading.Thread(
target=_sync_worker,
args=(interval,),
daemon=True,
name="equipment-status-sync"
)
_SYNC_THREAD.start()
def stop_equipment_status_sync_worker():
"""Stop the background sync worker thread."""
global _SYNC_THREAD
if _SYNC_THREAD is None or not _SYNC_THREAD.is_alive():
return
logger.info("Stopping equipment status sync worker...")
_STOP_EVENT.set()
_SYNC_THREAD.join(timeout=5)
_SYNC_THREAD = None
# ============================================================
# Initialization
# ============================================================
def init_realtime_equipment_cache(app=None):
"""Initialize the realtime equipment status cache.
Should be called during app initialization.
Args:
app: Flask application instance (optional, uses current_app if None).
"""
from flask import current_app
config = app.config if app else current_app.config
enabled = config.get('REALTIME_EQUIPMENT_CACHE_ENABLED', True)
if not enabled:
logger.info("Realtime equipment cache is disabled")
return
interval = config.get('EQUIPMENT_STATUS_SYNC_INTERVAL', 300)
logger.info("Initializing realtime equipment cache...")
# Initial sync
refresh_equipment_status_cache()
# Start background worker
_start_equipment_status_sync_worker(interval)

View File

@@ -13,6 +13,17 @@ from mes_dashboard.config.constants import (
EXCLUDED_LOCATIONS, EXCLUDED_LOCATIONS,
EXCLUDED_ASSET_STATUSES, EXCLUDED_ASSET_STATUSES,
DEFAULT_DAYS_BACK, DEFAULT_DAYS_BACK,
STATUS_CATEGORIES,
)
from mes_dashboard.services.resource_cache import get_all_resources
from mes_dashboard.services.realtime_equipment_cache import (
get_all_equipment_status,
get_equipment_status_by_id,
)
from mes_dashboard.services.filter_cache import (
get_workcenter_group,
get_workcenter_short,
get_workcenter_groups,
) )
@@ -401,3 +412,243 @@ def query_resource_filter_options(days_back: int = 30) -> Optional[Dict]:
import traceback import traceback
traceback.print_exc() traceback.print_exc()
return None return None
# ============================================================
# Merged Resource Status Query (Three-Layer Cache)
# ============================================================
def get_merged_resource_status(
workcenter_groups: Optional[List[str]] = None,
is_production: Optional[bool] = None,
is_key: Optional[bool] = None,
is_monitor: Optional[bool] = None,
status_categories: Optional[List[str]] = None,
) -> List[Dict[str, Any]]:
"""Get merged resource status from three cache layers.
Combines:
- resource-cache: Equipment master data (RESOURCENAME, WORKCENTERNAME, etc.)
- realtime-equipment-cache: Real-time status (EQUIPMENTASSETSSTATUS, JOBORDER, etc.)
- workcenter-mapping: WORKCENTER_GROUP, WORKCENTER_SHORT
Args:
workcenter_groups: Filter by WORKCENTER_GROUP (e.g., ['焊接', '成型'])
is_production: Filter by PJ_ISPRODUCTION flag
is_key: Filter by PJ_ISKEY flag
is_monitor: Filter by PJ_ISMONITOR flag
status_categories: Filter by STATUS_CATEGORY (e.g., ['PRODUCTIVE', 'DOWN'])
Returns:
List of merged equipment status records.
"""
import logging
logger = logging.getLogger('mes_dashboard.resource_service')
# Get resource master data from cache
resources = get_all_resources()
if not resources:
logger.warning("No resources from resource-cache")
return []
# Get realtime status from cache
equipment_status = get_all_equipment_status()
# Build status lookup by RESOURCEID
status_lookup = {
s['RESOURCEID']: s
for s in equipment_status
} if equipment_status else {}
# Merge data
merged = []
for resource in resources:
resource_id = resource.get('RESOURCEID')
workcenter_name = resource.get('WORKCENTERNAME')
# Get realtime status
realtime = status_lookup.get(resource_id, {})
# Get workcenter mapping
wc_group = get_workcenter_group(workcenter_name) if workcenter_name else None
wc_short = get_workcenter_short(workcenter_name) if workcenter_name else None
# Build merged record
record = {
# From resource-cache
'RESOURCEID': resource_id,
'RESOURCENAME': resource.get('RESOURCENAME'),
'WORKCENTERNAME': workcenter_name,
'RESOURCEFAMILYNAME': resource.get('RESOURCEFAMILYNAME'),
'PJ_DEPARTMENT': resource.get('PJ_DEPARTMENT'),
'PJ_ASSETSSTATUS': resource.get('PJ_ASSETSSTATUS'),
'PJ_ISPRODUCTION': resource.get('PJ_ISPRODUCTION'),
'PJ_ISKEY': resource.get('PJ_ISKEY'),
'PJ_ISMONITOR': resource.get('PJ_ISMONITOR'),
'VENDORNAME': resource.get('VENDORNAME'),
'VENDORMODEL': resource.get('VENDORMODEL'),
'LOCATIONNAME': resource.get('LOCATIONNAME'),
# From workcenter-mapping
'WORKCENTER_GROUP': wc_group,
'WORKCENTER_SHORT': wc_short,
# From realtime-equipment-cache
'EQUIPMENTASSETSSTATUS': realtime.get('EQUIPMENTASSETSSTATUS'),
'EQUIPMENTASSETSSTATUSREASON': realtime.get('EQUIPMENTASSETSSTATUSREASON'),
'STATUS_CATEGORY': realtime.get('STATUS_CATEGORY'),
'JOBORDER': realtime.get('JOBORDER'),
'JOBSTATUS': realtime.get('JOBSTATUS'),
'SYMPTOMCODE': realtime.get('SYMPTOMCODE'),
'CAUSECODE': realtime.get('CAUSECODE'),
'REPAIRCODE': realtime.get('REPAIRCODE'),
'LOT_COUNT': realtime.get('LOT_COUNT'),
'TOTAL_TRACKIN_QTY': realtime.get('TOTAL_TRACKIN_QTY'),
'LATEST_TRACKIN_TIME': realtime.get('LATEST_TRACKIN_TIME'),
}
# Apply filters
if workcenter_groups and wc_group not in workcenter_groups:
continue
if is_production is not None:
if bool(resource.get('PJ_ISPRODUCTION')) != is_production:
continue
if is_key is not None:
if bool(resource.get('PJ_ISKEY')) != is_key:
continue
if is_monitor is not None:
if bool(resource.get('PJ_ISMONITOR')) != is_monitor:
continue
if status_categories:
if record.get('STATUS_CATEGORY') not in status_categories:
continue
merged.append(record)
logger.debug(f"Merged {len(merged)} resource status records")
return merged
def get_resource_status_summary(
workcenter_groups: Optional[List[str]] = None,
is_production: Optional[bool] = None,
is_key: Optional[bool] = None,
is_monitor: Optional[bool] = None,
) -> Dict[str, Any]:
"""Get resource status summary statistics.
Args:
workcenter_groups: Filter by WORKCENTER_GROUP
is_production: Filter by PJ_ISPRODUCTION flag
is_key: Filter by PJ_ISKEY flag
is_monitor: Filter by PJ_ISMONITOR flag
Returns:
Dict with summary statistics.
"""
# Get merged data with filters (except status_categories)
data = get_merged_resource_status(
workcenter_groups=workcenter_groups,
is_production=is_production,
is_key=is_key,
is_monitor=is_monitor,
)
if not data:
return {
'total_count': 0,
'by_status_category': {},
'by_workcenter_group': {},
'with_active_job': 0,
'with_wip': 0,
}
# Count by status category
by_status_category = {}
for record in data:
cat = record.get('STATUS_CATEGORY') or 'UNKNOWN'
by_status_category[cat] = by_status_category.get(cat, 0) + 1
# Count by workcenter group
by_workcenter_group = {}
for record in data:
group = record.get('WORKCENTER_GROUP') or 'UNKNOWN'
by_workcenter_group[group] = by_workcenter_group.get(group, 0) + 1
# Count with active job
with_active_job = sum(1 for r in data if r.get('JOBORDER'))
# Count with WIP
with_wip = sum(1 for r in data if (r.get('LOT_COUNT') or 0) > 0)
return {
'total_count': len(data),
'by_status_category': by_status_category,
'by_workcenter_group': by_workcenter_group,
'with_active_job': with_active_job,
'with_wip': with_wip,
}
def get_workcenter_status_matrix(
is_production: Optional[bool] = None,
is_key: Optional[bool] = None,
is_monitor: Optional[bool] = None,
) -> List[Dict[str, Any]]:
"""Get workcenter × status matrix.
Returns count of equipment by workcenter group and status.
Args:
is_production: Filter by PJ_ISPRODUCTION flag
is_key: Filter by PJ_ISKEY flag
is_monitor: Filter by PJ_ISMONITOR flag
Returns:
List of dicts with workcenter_group and status counts.
"""
# Get merged data
data = get_merged_resource_status(
is_production=is_production,
is_key=is_key,
is_monitor=is_monitor,
)
if not data:
return []
# Get all workcenter groups with sequence
all_groups = get_workcenter_groups() or []
group_sequence = {g['name']: g['sequence'] for g in all_groups}
# Build matrix
matrix = {}
for record in data:
group = record.get('WORKCENTER_GROUP') or 'UNKNOWN'
status = record.get('EQUIPMENTASSETSSTATUS') or 'UNKNOWN'
if group not in matrix:
matrix[group] = {
'workcenter_group': group,
'workcenter_sequence': group_sequence.get(group, 999),
'total': 0,
'PRD': 0,
'SBY': 0,
'UDT': 0,
'SDT': 0,
'EGT': 0,
'NST': 0,
'OTHER': 0,
}
matrix[group]['total'] += 1
# Categorize status
if status in ('PRD', 'SBY', 'UDT', 'SDT', 'EGT', 'NST'):
matrix[group][status] += 1
else:
matrix[group]['OTHER'] += 1
# Convert to list and sort by sequence
result = list(matrix.values())
result.sort(key=lambda x: x['workcenter_sequence'])
return result

View File

@@ -262,7 +262,7 @@
<div class="header"> <div class="header">
<div class="header-left"> <div class="header-left">
<h1>MES 報表入口</h1> <h1>MES 報表入口</h1>
<p>統一入口WIP 即時看板、機台狀態報表與數據表查詢工具</p> <p>統一入口WIP 即時看板、設備即時概況與數據表查詢工具</p>
</div> </div>
<div class="header-right"> <div class="header-right">
<!-- Health Status Indicator --> <!-- Health Status Indicator -->
@@ -310,7 +310,7 @@
<button class="tab" data-target="wipOverviewFrame">WIP 即時概況</button> <button class="tab" data-target="wipOverviewFrame">WIP 即時概況</button>
{% endif %} {% endif %}
{% if can_view_page('/resource') %} {% if can_view_page('/resource') %}
<button class="tab" data-target="resourceFrame">機台狀態報表</button> <button class="tab" data-target="resourceFrame">設備即時概況</button>
{% endif %} {% endif %}
{% if can_view_page('/tables') %} {% if can_view_page('/tables') %}
<button class="tab" data-target="tableFrame">數據表查詢工具</button> <button class="tab" data-target="tableFrame">數據表查詢工具</button>
@@ -329,7 +329,7 @@
<iframe id="wipOverviewFrame" data-src="/wip-overview" title="WIP 即時概況"></iframe> <iframe id="wipOverviewFrame" data-src="/wip-overview" title="WIP 即時概況"></iframe>
{% endif %} {% endif %}
{% if can_view_page('/resource') %} {% if can_view_page('/resource') %}
<iframe id="resourceFrame" data-src="/resource" title="機台狀態報表"></iframe> <iframe id="resourceFrame" data-src="/resource" title="設備即時概況"></iframe>
{% endif %} {% endif %}
{% if can_view_page('/tables') %} {% if can_view_page('/tables') %}
<iframe id="tableFrame" data-src="/tables" title="數據表查詢工具"></iframe> <iframe id="tableFrame" data-src="/tables" title="數據表查詢工具"></iframe>

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,216 @@
# -*- coding: utf-8 -*-
"""End-to-end tests for realtime equipment status cache.
Tests the full flow from cache sync to API response.
Requires a running server with --run-e2e flag.
"""
import pytest
import requests
@pytest.mark.e2e
class TestEquipmentStatusCacheSync:
"""Test equipment status cache synchronization."""
def test_health_check_includes_equipment_status_cache(self, health_url):
"""Test health check includes equipment_status_cache status."""
response = requests.get(health_url)
assert response.status_code == 200
data = response.json()
# Should have equipment_status_cache in response
assert 'equipment_status_cache' in data
cache_status = data['equipment_status_cache']
# Should have expected fields
assert 'enabled' in cache_status
assert 'loaded' in cache_status
assert 'count' in cache_status
assert 'updated_at' in cache_status
def test_health_check_includes_workcenter_mapping(self, health_url):
"""Test health check includes workcenter_mapping status."""
response = requests.get(health_url)
assert response.status_code == 200
data = response.json()
# Should have workcenter_mapping in response
assert 'workcenter_mapping' in data
wc_status = data['workcenter_mapping']
# Should have expected fields
assert 'loaded' in wc_status
assert 'workcenter_count' in wc_status
assert 'group_count' in wc_status
@pytest.mark.e2e
class TestMergedQueryApi:
"""Test merged resource status API endpoints."""
def test_resource_status_endpoint(self, api_base_url):
"""Test /api/resource/status endpoint."""
url = f"{api_base_url}/resource/status"
response = requests.get(url)
assert response.status_code == 200
data = response.json()
assert data['success'] is True
assert 'data' in data
assert 'count' in data
# If data exists, verify structure
if data['data']:
record = data['data'][0]
# Should have merged fields
assert 'RESOURCEID' in record
assert 'RESOURCENAME' in record
# Should have workcenter mapping fields
assert 'WORKCENTER_GROUP' in record
assert 'WORKCENTER_SHORT' in record
# Should have realtime status fields
assert 'STATUS_CATEGORY' in record
def test_resource_status_with_workcenter_filter(self, api_base_url):
"""Test /api/resource/status with workcenter_groups filter."""
url = f"{api_base_url}/resource/status"
response = requests.get(url, params={'workcenter_groups': '焊接'})
assert response.status_code == 200
data = response.json()
assert data['success'] is True
# All results should be in the specified group
for record in data['data']:
# May be None if mapping not found
if record.get('WORKCENTER_GROUP'):
assert record['WORKCENTER_GROUP'] == '焊接'
def test_resource_status_with_production_filter(self, api_base_url):
"""Test /api/resource/status with is_production filter."""
url = f"{api_base_url}/resource/status"
response = requests.get(url, params={'is_production': 'true'})
assert response.status_code == 200
data = response.json()
assert data['success'] is True
def test_resource_status_with_status_category_filter(self, api_base_url):
"""Test /api/resource/status with status_categories filter."""
url = f"{api_base_url}/resource/status"
response = requests.get(url, params={'status_categories': 'PRODUCTIVE,DOWN'})
assert response.status_code == 200
data = response.json()
assert data['success'] is True
# All results should be in specified categories
for record in data['data']:
if record.get('STATUS_CATEGORY'):
assert record['STATUS_CATEGORY'] in ['PRODUCTIVE', 'DOWN']
def test_resource_status_summary_endpoint(self, api_base_url):
"""Test /api/resource/status/summary endpoint."""
url = f"{api_base_url}/resource/status/summary"
response = requests.get(url)
assert response.status_code == 200
data = response.json()
assert data['success'] is True
assert 'data' in data
summary = data['data']
assert 'total_count' in summary
assert 'by_status_category' in summary
assert 'by_workcenter_group' in summary
assert 'with_active_job' in summary
assert 'with_wip' in summary
def test_resource_status_matrix_endpoint(self, api_base_url):
"""Test /api/resource/status/matrix endpoint."""
url = f"{api_base_url}/resource/status/matrix"
response = requests.get(url)
assert response.status_code == 200
data = response.json()
assert data['success'] is True
assert 'data' in data
# If data exists, verify structure
if data['data']:
row = data['data'][0]
assert 'workcenter_group' in row
assert 'workcenter_sequence' in row
assert 'total' in row
# Should have standard status columns
assert 'PRD' in row
assert 'SBY' in row
assert 'UDT' in row
assert 'SDT' in row
assert 'EGT' in row
assert 'NST' in row
assert 'OTHER' in row
@pytest.mark.e2e
class TestFilterOptionsIncludeNewFields:
"""Test filter options API includes new fields."""
def test_status_options_endpoint(self, api_base_url):
"""Test /api/resource/status/options endpoint."""
url = f"{api_base_url}/resource/status/options"
response = requests.get(url)
assert response.status_code == 200
data = response.json()
assert data['success'] is True
assert 'data' in data
options = data['data']
# Should have workcenter_groups
assert 'workcenter_groups' in options
assert isinstance(options['workcenter_groups'], list)
# Should have status_categories
assert 'status_categories' in options
assert isinstance(options['status_categories'], list)
@pytest.mark.e2e
@pytest.mark.redis
class TestCacheIntegration:
"""Test cache integration (requires Redis)."""
def test_cache_data_consistency(self, api_base_url, health_url):
"""Test cache data is consistent between health and API."""
# Get health status
health_resp = requests.get(health_url)
health_data = health_resp.json()
cache_status = health_data.get('equipment_status_cache', {})
if not cache_status.get('enabled') or not cache_status.get('loaded'):
pytest.skip("Equipment status cache not enabled or loaded")
cache_count = cache_status.get('count', 0)
# Get all equipment status via API
api_resp = requests.get(f"{api_base_url}/resource/status")
api_data = api_resp.json()
# Count should be consistent (within reasonable margin for filtering)
api_count = api_data.get('count', 0)
# API may have filters applied from resource-cache, so it could be less
# but should never exceed cache count
assert api_count <= cache_count or cache_count == 0

View File

@@ -0,0 +1,494 @@
# -*- coding: utf-8 -*-
"""Unit tests for realtime_equipment_cache module.
Tests aggregation, status classification, and cache query functionality.
"""
import pytest
from unittest.mock import patch, MagicMock
import json
class TestClassifyStatus:
"""Test _classify_status function."""
def test_classifies_prd_as_productive(self):
"""Test PRD status is classified as PRODUCTIVE."""
from mes_dashboard.services.realtime_equipment_cache import _classify_status
result = _classify_status('PRD')
assert result == 'PRODUCTIVE'
def test_classifies_sby_as_standby(self):
"""Test SBY status is classified as STANDBY."""
from mes_dashboard.services.realtime_equipment_cache import _classify_status
result = _classify_status('SBY')
assert result == 'STANDBY'
def test_classifies_udt_as_down(self):
"""Test UDT status is classified as DOWN."""
from mes_dashboard.services.realtime_equipment_cache import _classify_status
result = _classify_status('UDT')
assert result == 'DOWN'
def test_classifies_sdt_as_down(self):
"""Test SDT status is classified as DOWN."""
from mes_dashboard.services.realtime_equipment_cache import _classify_status
result = _classify_status('SDT')
assert result == 'DOWN'
def test_classifies_egt_as_engineering(self):
"""Test EGT status is classified as ENGINEERING."""
from mes_dashboard.services.realtime_equipment_cache import _classify_status
result = _classify_status('EGT')
assert result == 'ENGINEERING'
def test_classifies_nst_as_not_scheduled(self):
"""Test NST status is classified as NOT_SCHEDULED."""
from mes_dashboard.services.realtime_equipment_cache import _classify_status
result = _classify_status('NST')
assert result == 'NOT_SCHEDULED'
def test_classifies_scrap_as_inactive(self):
"""Test SCRAP status is classified as INACTIVE."""
from mes_dashboard.services.realtime_equipment_cache import _classify_status
result = _classify_status('SCRAP')
assert result == 'INACTIVE'
def test_classifies_unknown_as_other(self):
"""Test unknown status is classified as OTHER."""
from mes_dashboard.services.realtime_equipment_cache import _classify_status
result = _classify_status('UNKNOWN_STATUS')
assert result == 'OTHER'
def test_handles_none_status(self):
"""Test None status is classified as OTHER."""
from mes_dashboard.services.realtime_equipment_cache import _classify_status
result = _classify_status(None)
assert result == 'OTHER'
def test_handles_empty_status(self):
"""Test empty string status is classified as OTHER."""
from mes_dashboard.services.realtime_equipment_cache import _classify_status
result = _classify_status('')
assert result == 'OTHER'
class TestAggregateByResourceid:
"""Test _aggregate_by_resourceid function."""
def test_aggregates_single_record(self):
"""Test aggregation with single record per resource."""
from mes_dashboard.services.realtime_equipment_cache import _aggregate_by_resourceid
records = [
{
'RESOURCEID': 'R001',
'EQUIPMENTID': 'E001',
'OBJECTCATEGORY': 'ASSEMBLY',
'EQUIPMENTASSETSSTATUS': 'PRD',
'EQUIPMENTASSETSSTATUSREASON': None,
'JOBORDER': 'JO001',
'JOBSTATUS': 'RUN',
'SYMPTOMCODE': None,
'CAUSECODE': None,
'REPAIRCODE': None,
'LOTTRACKINQTY_PCS': 100,
'LOTTRACKINTIME': '2024-01-15T10:00:00',
}
]
result = _aggregate_by_resourceid(records)
assert len(result) == 1
assert result[0]['RESOURCEID'] == 'R001'
assert result[0]['LOT_COUNT'] == 1
assert result[0]['TOTAL_TRACKIN_QTY'] == 100
assert result[0]['STATUS_CATEGORY'] == 'PRODUCTIVE'
def test_aggregates_multiple_lots(self):
"""Test aggregation with multiple LOTs per resource (e.g., oven)."""
from mes_dashboard.services.realtime_equipment_cache import _aggregate_by_resourceid
records = [
{
'RESOURCEID': 'R001',
'EQUIPMENTID': 'E001',
'OBJECTCATEGORY': 'ASSEMBLY',
'EQUIPMENTASSETSSTATUS': 'PRD',
'EQUIPMENTASSETSSTATUSREASON': None,
'JOBORDER': 'JO001',
'JOBSTATUS': 'RUN',
'SYMPTOMCODE': None,
'CAUSECODE': None,
'REPAIRCODE': None,
'LOTTRACKINQTY_PCS': 100,
'LOTTRACKINTIME': '2024-01-15T10:00:00',
},
{
'RESOURCEID': 'R001',
'EQUIPMENTID': 'E001',
'OBJECTCATEGORY': 'ASSEMBLY',
'EQUIPMENTASSETSSTATUS': 'PRD',
'EQUIPMENTASSETSSTATUSREASON': None,
'JOBORDER': 'JO002',
'JOBSTATUS': 'RUN',
'SYMPTOMCODE': None,
'CAUSECODE': None,
'REPAIRCODE': None,
'LOTTRACKINQTY_PCS': 150,
'LOTTRACKINTIME': '2024-01-15T11:00:00',
},
{
'RESOURCEID': 'R001',
'EQUIPMENTID': 'E001',
'OBJECTCATEGORY': 'ASSEMBLY',
'EQUIPMENTASSETSSTATUS': 'PRD',
'EQUIPMENTASSETSSTATUSREASON': None,
'JOBORDER': 'JO003',
'JOBSTATUS': 'RUN',
'SYMPTOMCODE': None,
'CAUSECODE': None,
'REPAIRCODE': None,
'LOTTRACKINQTY_PCS': 50,
'LOTTRACKINTIME': '2024-01-15T09:00:00',
},
]
result = _aggregate_by_resourceid(records)
assert len(result) == 1
assert result[0]['RESOURCEID'] == 'R001'
assert result[0]['LOT_COUNT'] == 3
assert result[0]['TOTAL_TRACKIN_QTY'] == 300 # 100 + 150 + 50
assert result[0]['LATEST_TRACKIN_TIME'] == '2024-01-15T11:00:00'
def test_aggregates_multiple_resources(self):
"""Test aggregation with multiple different resources."""
from mes_dashboard.services.realtime_equipment_cache import _aggregate_by_resourceid
records = [
{
'RESOURCEID': 'R001',
'EQUIPMENTID': 'E001',
'OBJECTCATEGORY': 'ASSEMBLY',
'EQUIPMENTASSETSSTATUS': 'PRD',
'EQUIPMENTASSETSSTATUSREASON': None,
'JOBORDER': 'JO001',
'JOBSTATUS': 'RUN',
'SYMPTOMCODE': None,
'CAUSECODE': None,
'REPAIRCODE': None,
'LOTTRACKINQTY_PCS': 100,
'LOTTRACKINTIME': '2024-01-15T10:00:00',
},
{
'RESOURCEID': 'R002',
'EQUIPMENTID': 'E002',
'OBJECTCATEGORY': 'WAFERSORT',
'EQUIPMENTASSETSSTATUS': 'SBY',
'EQUIPMENTASSETSSTATUSREASON': 'Waiting',
'JOBORDER': None,
'JOBSTATUS': None,
'SYMPTOMCODE': None,
'CAUSECODE': None,
'REPAIRCODE': None,
'LOTTRACKINQTY_PCS': None,
'LOTTRACKINTIME': None,
},
]
result = _aggregate_by_resourceid(records)
assert len(result) == 2
r1 = next(r for r in result if r['RESOURCEID'] == 'R001')
r2 = next(r for r in result if r['RESOURCEID'] == 'R002')
assert r1['LOT_COUNT'] == 1
assert r1['STATUS_CATEGORY'] == 'PRODUCTIVE'
assert r2['LOT_COUNT'] == 1
assert r2['STATUS_CATEGORY'] == 'STANDBY'
def test_handles_empty_records(self):
"""Test handles empty record list."""
from mes_dashboard.services.realtime_equipment_cache import _aggregate_by_resourceid
result = _aggregate_by_resourceid([])
assert result == []
def test_handles_null_quantities(self):
"""Test handles null quantities gracefully."""
from mes_dashboard.services.realtime_equipment_cache import _aggregate_by_resourceid
records = [
{
'RESOURCEID': 'R001',
'EQUIPMENTID': 'E001',
'OBJECTCATEGORY': 'ASSEMBLY',
'EQUIPMENTASSETSSTATUS': 'SBY',
'EQUIPMENTASSETSSTATUSREASON': None,
'JOBORDER': None,
'JOBSTATUS': None,
'SYMPTOMCODE': None,
'CAUSECODE': None,
'REPAIRCODE': None,
'LOTTRACKINQTY_PCS': None,
'LOTTRACKINTIME': None,
}
]
result = _aggregate_by_resourceid(records)
assert len(result) == 1
assert result[0]['TOTAL_TRACKIN_QTY'] == 0
assert result[0]['LATEST_TRACKIN_TIME'] is None
def test_skips_records_without_resourceid(self):
"""Test skips records without RESOURCEID."""
from mes_dashboard.services.realtime_equipment_cache import _aggregate_by_resourceid
records = [
{
'RESOURCEID': None,
'EQUIPMENTID': 'E001',
'OBJECTCATEGORY': 'ASSEMBLY',
'EQUIPMENTASSETSSTATUS': 'PRD',
'EQUIPMENTASSETSSTATUSREASON': None,
'JOBORDER': None,
'JOBSTATUS': None,
'SYMPTOMCODE': None,
'CAUSECODE': None,
'REPAIRCODE': None,
'LOTTRACKINQTY_PCS': 100,
'LOTTRACKINTIME': '2024-01-15T10:00:00',
},
{
'RESOURCEID': 'R001',
'EQUIPMENTID': 'E001',
'OBJECTCATEGORY': 'ASSEMBLY',
'EQUIPMENTASSETSSTATUS': 'PRD',
'EQUIPMENTASSETSSTATUSREASON': None,
'JOBORDER': None,
'JOBSTATUS': None,
'SYMPTOMCODE': None,
'CAUSECODE': None,
'REPAIRCODE': None,
'LOTTRACKINQTY_PCS': 50,
'LOTTRACKINTIME': '2024-01-15T10:00:00',
},
]
result = _aggregate_by_resourceid(records)
assert len(result) == 1
assert result[0]['RESOURCEID'] == 'R001'
class TestGetEquipmentStatusById:
"""Test get_equipment_status_by_id function."""
@pytest.fixture(autouse=True)
def reset_modules(self):
"""Reset module state before each test."""
import mes_dashboard.core.redis_client as rc
rc._REDIS_CLIENT = None
yield
rc._REDIS_CLIENT = None
def test_returns_none_when_redis_unavailable(self):
"""Test returns None when Redis client unavailable."""
from mes_dashboard.services.realtime_equipment_cache import get_equipment_status_by_id
with patch('mes_dashboard.services.realtime_equipment_cache.get_redis_client', return_value=None):
result = get_equipment_status_by_id('R001')
assert result is None
def test_returns_none_when_id_not_found(self):
"""Test returns None when resource ID not in index."""
from mes_dashboard.services.realtime_equipment_cache import get_equipment_status_by_id
mock_client = MagicMock()
mock_client.hget.return_value = None
with patch('mes_dashboard.services.realtime_equipment_cache.get_redis_client', return_value=mock_client):
with patch('mes_dashboard.services.realtime_equipment_cache.get_key_prefix', return_value='mes_wip'):
result = get_equipment_status_by_id('R999')
assert result is None
def test_returns_matching_record(self):
"""Test returns matching record from cache."""
from mes_dashboard.services.realtime_equipment_cache import get_equipment_status_by_id
test_data = [
{'RESOURCEID': 'R001', 'STATUS_CATEGORY': 'PRODUCTIVE'},
{'RESOURCEID': 'R002', 'STATUS_CATEGORY': 'STANDBY'},
]
mock_client = MagicMock()
mock_client.hget.return_value = '1' # Index 1 -> R002
mock_client.get.return_value = json.dumps(test_data)
with patch('mes_dashboard.services.realtime_equipment_cache.get_redis_client', return_value=mock_client):
with patch('mes_dashboard.services.realtime_equipment_cache.get_key_prefix', return_value='mes_wip'):
result = get_equipment_status_by_id('R002')
assert result is not None
assert result['RESOURCEID'] == 'R002'
assert result['STATUS_CATEGORY'] == 'STANDBY'
class TestGetEquipmentStatusByIds:
"""Test get_equipment_status_by_ids function."""
@pytest.fixture(autouse=True)
def reset_modules(self):
"""Reset module state before each test."""
import mes_dashboard.core.redis_client as rc
rc._REDIS_CLIENT = None
yield
rc._REDIS_CLIENT = None
def test_returns_empty_for_empty_input(self):
"""Test returns empty list for empty input."""
from mes_dashboard.services.realtime_equipment_cache import get_equipment_status_by_ids
result = get_equipment_status_by_ids([])
assert result == []
def test_returns_empty_when_redis_unavailable(self):
"""Test returns empty list when Redis unavailable."""
from mes_dashboard.services.realtime_equipment_cache import get_equipment_status_by_ids
with patch('mes_dashboard.services.realtime_equipment_cache.get_redis_client', return_value=None):
result = get_equipment_status_by_ids(['R001', 'R002'])
assert result == []
def test_returns_matching_records(self):
"""Test returns all matching records."""
from mes_dashboard.services.realtime_equipment_cache import get_equipment_status_by_ids
test_data = [
{'RESOURCEID': 'R001', 'STATUS_CATEGORY': 'PRODUCTIVE'},
{'RESOURCEID': 'R002', 'STATUS_CATEGORY': 'STANDBY'},
{'RESOURCEID': 'R003', 'STATUS_CATEGORY': 'DOWN'},
]
mock_client = MagicMock()
mock_client.hmget.return_value = ['0', '2', None] # R001 at idx 0, R003 at idx 2, R999 not found
mock_client.get.return_value = json.dumps(test_data)
with patch('mes_dashboard.services.realtime_equipment_cache.get_redis_client', return_value=mock_client):
with patch('mes_dashboard.services.realtime_equipment_cache.get_key_prefix', return_value='mes_wip'):
result = get_equipment_status_by_ids(['R001', 'R003', 'R999'])
assert len(result) == 2
ids = [r['RESOURCEID'] for r in result]
assert 'R001' in ids
assert 'R003' in ids
assert 'R999' not in ids
class TestGetAllEquipmentStatus:
"""Test get_all_equipment_status function."""
@pytest.fixture(autouse=True)
def reset_modules(self):
"""Reset module state before each test."""
import mes_dashboard.core.redis_client as rc
rc._REDIS_CLIENT = None
yield
rc._REDIS_CLIENT = None
def test_returns_empty_when_redis_unavailable(self):
"""Test returns empty list when Redis unavailable."""
from mes_dashboard.services.realtime_equipment_cache import get_all_equipment_status
with patch('mes_dashboard.services.realtime_equipment_cache.get_redis_client', return_value=None):
result = get_all_equipment_status()
assert result == []
def test_returns_empty_when_no_data(self):
"""Test returns empty list when no data in cache."""
from mes_dashboard.services.realtime_equipment_cache import get_all_equipment_status
mock_client = MagicMock()
mock_client.get.return_value = None
with patch('mes_dashboard.services.realtime_equipment_cache.get_redis_client', return_value=mock_client):
with patch('mes_dashboard.services.realtime_equipment_cache.get_key_prefix', return_value='mes_wip'):
result = get_all_equipment_status()
assert result == []
def test_returns_all_cached_data(self):
"""Test returns all cached equipment status."""
from mes_dashboard.services.realtime_equipment_cache import get_all_equipment_status
test_data = [
{'RESOURCEID': 'R001', 'STATUS_CATEGORY': 'PRODUCTIVE'},
{'RESOURCEID': 'R002', 'STATUS_CATEGORY': 'STANDBY'},
]
mock_client = MagicMock()
mock_client.get.return_value = json.dumps(test_data)
with patch('mes_dashboard.services.realtime_equipment_cache.get_redis_client', return_value=mock_client):
with patch('mes_dashboard.services.realtime_equipment_cache.get_key_prefix', return_value='mes_wip'):
result = get_all_equipment_status()
assert len(result) == 2
assert result[0]['RESOURCEID'] == 'R001'
assert result[1]['RESOURCEID'] == 'R002'
class TestGetEquipmentStatusCacheStatus:
"""Test get_equipment_status_cache_status function."""
@pytest.fixture
def app(self):
"""Create application for testing."""
from mes_dashboard.app import create_app
import mes_dashboard.core.database as db
db._ENGINE = None
app = create_app('testing')
app.config['TESTING'] = True
return app
def test_returns_disabled_when_cache_disabled(self, app):
"""Test returns disabled status when cache is disabled."""
app.config['REALTIME_EQUIPMENT_CACHE_ENABLED'] = False
with app.app_context():
from mes_dashboard.services.realtime_equipment_cache import get_equipment_status_cache_status
result = get_equipment_status_cache_status()
assert result['enabled'] is False
assert result['loaded'] is False
def test_returns_loaded_status_when_data_exists(self, app):
"""Test returns loaded status when cache has data."""
app.config['REALTIME_EQUIPMENT_CACHE_ENABLED'] = True
mock_client = MagicMock()
mock_client.get.side_effect = lambda key: {
'mes_wip:equipment_status:meta:updated': '2024-01-15T10:30:00',
'mes_wip:equipment_status:meta:count': '1000',
}.get(key)
with app.app_context():
with patch('mes_dashboard.services.realtime_equipment_cache.get_redis_client', return_value=mock_client):
with patch('mes_dashboard.services.realtime_equipment_cache.get_key_prefix', return_value='mes_wip'):
from mes_dashboard.services.realtime_equipment_cache import get_equipment_status_cache_status
result = get_equipment_status_cache_status()
assert result['enabled'] is True
assert result['loaded'] is True
assert result['count'] == 1000

View File

@@ -0,0 +1,396 @@
# -*- coding: utf-8 -*-
"""Unit tests for resource_service module.
Tests merged resource status queries and summary functions.
"""
import pytest
from unittest.mock import patch, MagicMock
class TestGetMergedResourceStatus:
"""Test get_merged_resource_status function."""
def test_returns_empty_when_no_resources(self):
"""Test returns empty list when no resources available."""
from mes_dashboard.services.resource_service import get_merged_resource_status
with patch('mes_dashboard.services.resource_service.get_all_resources', return_value=[]):
result = get_merged_resource_status()
assert result == []
def test_merges_resource_and_status_data(self):
"""Test merges resource-cache and realtime-equipment-cache data."""
from mes_dashboard.services.resource_service import get_merged_resource_status
mock_resources = [
{
'RESOURCEID': 'R001',
'RESOURCENAME': 'Machine1',
'WORKCENTERNAME': 'WC-01',
'RESOURCEFAMILYNAME': 'Family1',
'PJ_DEPARTMENT': 'Dept1',
'PJ_ASSETSSTATUS': 'Active',
'PJ_ISPRODUCTION': 1,
'PJ_ISKEY': 0,
'PJ_ISMONITOR': 0,
'VENDORNAME': 'Vendor1',
'VENDORMODEL': 'Model1',
'LOCATIONNAME': 'Loc1',
}
]
mock_equipment_status = [
{
'RESOURCEID': 'R001',
'EQUIPMENTASSETSSTATUS': 'PRD',
'EQUIPMENTASSETSSTATUSREASON': None,
'STATUS_CATEGORY': 'PRODUCTIVE',
'JOBORDER': 'JO001',
'JOBSTATUS': 'RUN',
'SYMPTOMCODE': None,
'CAUSECODE': None,
'REPAIRCODE': None,
'LOT_COUNT': 2,
'TOTAL_TRACKIN_QTY': 150,
'LATEST_TRACKIN_TIME': '2024-01-15T10:00:00',
}
]
with patch('mes_dashboard.services.resource_service.get_all_resources', return_value=mock_resources):
with patch('mes_dashboard.services.resource_service.get_all_equipment_status', return_value=mock_equipment_status):
with patch('mes_dashboard.services.resource_service.get_workcenter_group', return_value='焊接'):
with patch('mes_dashboard.services.resource_service.get_workcenter_short', return_value='DB'):
result = get_merged_resource_status()
assert len(result) == 1
r = result[0]
# Resource-cache data
assert r['RESOURCEID'] == 'R001'
assert r['RESOURCENAME'] == 'Machine1'
assert r['WORKCENTERNAME'] == 'WC-01'
# Workcenter mapping
assert r['WORKCENTER_GROUP'] == '焊接'
assert r['WORKCENTER_SHORT'] == 'DB'
# Realtime status
assert r['EQUIPMENTASSETSSTATUS'] == 'PRD'
assert r['STATUS_CATEGORY'] == 'PRODUCTIVE'
assert r['LOT_COUNT'] == 2
def test_handles_resources_without_status(self):
"""Test handles resources that have no realtime status."""
from mes_dashboard.services.resource_service import get_merged_resource_status
mock_resources = [
{
'RESOURCEID': 'R001',
'RESOURCENAME': 'Machine1',
'WORKCENTERNAME': 'WC-01',
'RESOURCEFAMILYNAME': 'Family1',
'PJ_DEPARTMENT': 'Dept1',
'PJ_ASSETSSTATUS': 'Active',
'PJ_ISPRODUCTION': 1,
'PJ_ISKEY': 0,
'PJ_ISMONITOR': 0,
'VENDORNAME': 'Vendor1',
'VENDORMODEL': 'Model1',
'LOCATIONNAME': 'Loc1',
}
]
# No matching equipment status
mock_equipment_status = []
with patch('mes_dashboard.services.resource_service.get_all_resources', return_value=mock_resources):
with patch('mes_dashboard.services.resource_service.get_all_equipment_status', return_value=mock_equipment_status):
with patch('mes_dashboard.services.resource_service.get_workcenter_group', return_value=None):
with patch('mes_dashboard.services.resource_service.get_workcenter_short', return_value=None):
result = get_merged_resource_status()
assert len(result) == 1
r = result[0]
assert r['RESOURCEID'] == 'R001'
# Status fields should be None
assert r['EQUIPMENTASSETSSTATUS'] is None
assert r['STATUS_CATEGORY'] is None
assert r['LOT_COUNT'] is None
class TestGetMergedResourceStatusWithFilters:
"""Test get_merged_resource_status with filter parameters."""
def _get_mock_data(self):
"""Get mock test data."""
mock_resources = [
{
'RESOURCEID': 'R001',
'RESOURCENAME': 'Machine1',
'WORKCENTERNAME': 'WC-01',
'RESOURCEFAMILYNAME': 'Family1',
'PJ_DEPARTMENT': 'Dept1',
'PJ_ASSETSSTATUS': 'Active',
'PJ_ISPRODUCTION': 1,
'PJ_ISKEY': 1,
'PJ_ISMONITOR': 0,
'VENDORNAME': 'Vendor1',
'VENDORMODEL': 'Model1',
'LOCATIONNAME': 'Loc1',
},
{
'RESOURCEID': 'R002',
'RESOURCENAME': 'Machine2',
'WORKCENTERNAME': 'WC-02',
'RESOURCEFAMILYNAME': 'Family2',
'PJ_DEPARTMENT': 'Dept2',
'PJ_ASSETSSTATUS': 'Active',
'PJ_ISPRODUCTION': 0,
'PJ_ISKEY': 0,
'PJ_ISMONITOR': 1,
'VENDORNAME': 'Vendor2',
'VENDORMODEL': 'Model2',
'LOCATIONNAME': 'Loc2',
},
]
mock_equipment_status = [
{
'RESOURCEID': 'R001',
'EQUIPMENTASSETSSTATUS': 'PRD',
'EQUIPMENTASSETSSTATUSREASON': None,
'STATUS_CATEGORY': 'PRODUCTIVE',
'JOBORDER': 'JO001',
'JOBSTATUS': 'RUN',
'SYMPTOMCODE': None,
'CAUSECODE': None,
'REPAIRCODE': None,
'LOT_COUNT': 1,
'TOTAL_TRACKIN_QTY': 100,
'LATEST_TRACKIN_TIME': '2024-01-15T10:00:00',
},
{
'RESOURCEID': 'R002',
'EQUIPMENTASSETSSTATUS': 'SBY',
'EQUIPMENTASSETSSTATUSREASON': 'Waiting',
'STATUS_CATEGORY': 'STANDBY',
'JOBORDER': None,
'JOBSTATUS': None,
'SYMPTOMCODE': None,
'CAUSECODE': None,
'REPAIRCODE': None,
'LOT_COUNT': 0,
'TOTAL_TRACKIN_QTY': 0,
'LATEST_TRACKIN_TIME': None,
},
]
return mock_resources, mock_equipment_status
def test_filters_by_workcenter_groups(self):
"""Test filters by workcenter_groups parameter."""
from mes_dashboard.services.resource_service import get_merged_resource_status
mock_resources, mock_equipment_status = self._get_mock_data()
def mock_get_group(wc_name):
return '焊接' if wc_name == 'WC-01' else '成型'
with patch('mes_dashboard.services.resource_service.get_all_resources', return_value=mock_resources):
with patch('mes_dashboard.services.resource_service.get_all_equipment_status', return_value=mock_equipment_status):
with patch('mes_dashboard.services.resource_service.get_workcenter_group', side_effect=mock_get_group):
with patch('mes_dashboard.services.resource_service.get_workcenter_short', return_value=None):
result = get_merged_resource_status(workcenter_groups=['焊接'])
assert len(result) == 1
assert result[0]['RESOURCEID'] == 'R001'
def test_filters_by_is_production(self):
"""Test filters by is_production parameter."""
from mes_dashboard.services.resource_service import get_merged_resource_status
mock_resources, mock_equipment_status = self._get_mock_data()
with patch('mes_dashboard.services.resource_service.get_all_resources', return_value=mock_resources):
with patch('mes_dashboard.services.resource_service.get_all_equipment_status', return_value=mock_equipment_status):
with patch('mes_dashboard.services.resource_service.get_workcenter_group', return_value=None):
with patch('mes_dashboard.services.resource_service.get_workcenter_short', return_value=None):
result = get_merged_resource_status(is_production=True)
assert len(result) == 1
assert result[0]['RESOURCEID'] == 'R001'
def test_filters_by_is_key(self):
"""Test filters by is_key parameter."""
from mes_dashboard.services.resource_service import get_merged_resource_status
mock_resources, mock_equipment_status = self._get_mock_data()
with patch('mes_dashboard.services.resource_service.get_all_resources', return_value=mock_resources):
with patch('mes_dashboard.services.resource_service.get_all_equipment_status', return_value=mock_equipment_status):
with patch('mes_dashboard.services.resource_service.get_workcenter_group', return_value=None):
with patch('mes_dashboard.services.resource_service.get_workcenter_short', return_value=None):
result = get_merged_resource_status(is_key=True)
assert len(result) == 1
assert result[0]['RESOURCEID'] == 'R001'
def test_filters_by_is_monitor(self):
"""Test filters by is_monitor parameter."""
from mes_dashboard.services.resource_service import get_merged_resource_status
mock_resources, mock_equipment_status = self._get_mock_data()
with patch('mes_dashboard.services.resource_service.get_all_resources', return_value=mock_resources):
with patch('mes_dashboard.services.resource_service.get_all_equipment_status', return_value=mock_equipment_status):
with patch('mes_dashboard.services.resource_service.get_workcenter_group', return_value=None):
with patch('mes_dashboard.services.resource_service.get_workcenter_short', return_value=None):
result = get_merged_resource_status(is_monitor=True)
assert len(result) == 1
assert result[0]['RESOURCEID'] == 'R002'
def test_filters_by_status_categories(self):
"""Test filters by status_categories parameter."""
from mes_dashboard.services.resource_service import get_merged_resource_status
mock_resources, mock_equipment_status = self._get_mock_data()
with patch('mes_dashboard.services.resource_service.get_all_resources', return_value=mock_resources):
with patch('mes_dashboard.services.resource_service.get_all_equipment_status', return_value=mock_equipment_status):
with patch('mes_dashboard.services.resource_service.get_workcenter_group', return_value=None):
with patch('mes_dashboard.services.resource_service.get_workcenter_short', return_value=None):
result = get_merged_resource_status(status_categories=['PRODUCTIVE'])
assert len(result) == 1
assert result[0]['RESOURCEID'] == 'R001'
assert result[0]['STATUS_CATEGORY'] == 'PRODUCTIVE'
def test_combines_multiple_filters(self):
"""Test combines multiple filter criteria."""
from mes_dashboard.services.resource_service import get_merged_resource_status
mock_resources, mock_equipment_status = self._get_mock_data()
with patch('mes_dashboard.services.resource_service.get_all_resources', return_value=mock_resources):
with patch('mes_dashboard.services.resource_service.get_all_equipment_status', return_value=mock_equipment_status):
with patch('mes_dashboard.services.resource_service.get_workcenter_group', return_value=None):
with patch('mes_dashboard.services.resource_service.get_workcenter_short', return_value=None):
# Filter: production AND key
result = get_merged_resource_status(is_production=True, is_key=True)
assert len(result) == 1
assert result[0]['RESOURCEID'] == 'R001'
class TestGetResourceStatusSummary:
"""Test get_resource_status_summary function."""
def test_returns_empty_summary_when_no_data(self):
"""Test returns empty summary when no data."""
from mes_dashboard.services.resource_service import get_resource_status_summary
with patch('mes_dashboard.services.resource_service.get_merged_resource_status', return_value=[]):
result = get_resource_status_summary()
assert result['total_count'] == 0
assert result['by_status_category'] == {}
assert result['by_workcenter_group'] == {}
def test_calculates_summary_statistics(self):
"""Test calculates correct summary statistics."""
from mes_dashboard.services.resource_service import get_resource_status_summary
mock_data = [
{
'RESOURCEID': 'R001',
'STATUS_CATEGORY': 'PRODUCTIVE',
'WORKCENTER_GROUP': '焊接',
'JOBORDER': 'JO001',
'LOT_COUNT': 2,
},
{
'RESOURCEID': 'R002',
'STATUS_CATEGORY': 'PRODUCTIVE',
'WORKCENTER_GROUP': '焊接',
'JOBORDER': 'JO002',
'LOT_COUNT': 1,
},
{
'RESOURCEID': 'R003',
'STATUS_CATEGORY': 'STANDBY',
'WORKCENTER_GROUP': '成型',
'JOBORDER': None,
'LOT_COUNT': 0,
},
]
with patch('mes_dashboard.services.resource_service.get_merged_resource_status', return_value=mock_data):
result = get_resource_status_summary()
assert result['total_count'] == 3
assert result['by_status_category']['PRODUCTIVE'] == 2
assert result['by_status_category']['STANDBY'] == 1
assert result['by_workcenter_group']['焊接'] == 2
assert result['by_workcenter_group']['成型'] == 1
assert result['with_active_job'] == 2
assert result['with_wip'] == 2
class TestGetWorkcenterStatusMatrix:
"""Test get_workcenter_status_matrix function."""
def test_returns_empty_when_no_data(self):
"""Test returns empty list when no data."""
from mes_dashboard.services.resource_service import get_workcenter_status_matrix
with patch('mes_dashboard.services.resource_service.get_merged_resource_status', return_value=[]):
result = get_workcenter_status_matrix()
assert result == []
def test_builds_matrix_by_workcenter_and_status(self):
"""Test builds matrix by workcenter group and status."""
from mes_dashboard.services.resource_service import get_workcenter_status_matrix
mock_data = [
{'WORKCENTER_GROUP': '焊接', 'EQUIPMENTASSETSSTATUS': 'PRD'},
{'WORKCENTER_GROUP': '焊接', 'EQUIPMENTASSETSSTATUS': 'PRD'},
{'WORKCENTER_GROUP': '焊接', 'EQUIPMENTASSETSSTATUS': 'SBY'},
{'WORKCENTER_GROUP': '成型', 'EQUIPMENTASSETSSTATUS': 'UDT'},
]
mock_groups = [
{'name': '焊接', 'sequence': 1},
{'name': '成型', 'sequence': 2},
]
with patch('mes_dashboard.services.resource_service.get_merged_resource_status', return_value=mock_data):
with patch('mes_dashboard.services.resource_service.get_workcenter_groups', return_value=mock_groups):
result = get_workcenter_status_matrix()
assert len(result) == 2
# Should be sorted by sequence
assert result[0]['workcenter_group'] == '焊接'
assert result[0]['total'] == 3
assert result[0]['PRD'] == 2
assert result[0]['SBY'] == 1
assert result[1]['workcenter_group'] == '成型'
assert result[1]['total'] == 1
assert result[1]['UDT'] == 1
def test_handles_unknown_status(self):
"""Test handles unknown status codes."""
from mes_dashboard.services.resource_service import get_workcenter_status_matrix
mock_data = [
{'WORKCENTER_GROUP': '焊接', 'EQUIPMENTASSETSSTATUS': 'CUSTOM_STATUS'},
]
mock_groups = [{'name': '焊接', 'sequence': 1}]
with patch('mes_dashboard.services.resource_service.get_merged_resource_status', return_value=mock_data):
with patch('mes_dashboard.services.resource_service.get_workcenter_groups', return_value=mock_groups):
result = get_workcenter_status_matrix()
assert len(result) == 1
assert result[0]['OTHER'] == 1

View File

@@ -0,0 +1,349 @@
# -*- coding: utf-8 -*-
"""Unit tests for workcenter mapping in filter_cache module.
Tests workcenter group lookup and mapping functionality.
"""
import pytest
from unittest.mock import patch, MagicMock
import pandas as pd
class TestGetWorkcenterGroup:
"""Test get_workcenter_group function."""
@pytest.fixture(autouse=True)
def reset_cache(self):
"""Reset cache state before each test."""
import mes_dashboard.services.filter_cache as fc
with fc._CACHE_LOCK:
fc._CACHE['workcenter_groups'] = None
fc._CACHE['workcenter_mapping'] = None
fc._CACHE['workcenter_to_short'] = None
fc._CACHE['last_refresh'] = None
fc._CACHE['is_loading'] = False
yield
with fc._CACHE_LOCK:
fc._CACHE['workcenter_groups'] = None
fc._CACHE['workcenter_mapping'] = None
fc._CACHE['workcenter_to_short'] = None
fc._CACHE['last_refresh'] = None
fc._CACHE['is_loading'] = False
def test_returns_group_for_valid_workcenter(self):
"""Test returns group for valid workcenter name."""
import mes_dashboard.services.filter_cache as fc
mock_mapping = {
'DB-01': {'group': '焊接', 'sequence': 1},
'WB-01': {'group': '焊線', 'sequence': 2},
}
with patch.object(fc, 'get_workcenter_mapping', return_value=mock_mapping):
result = fc.get_workcenter_group('DB-01')
assert result == '焊接'
def test_returns_none_for_unknown_workcenter(self):
"""Test returns None for unknown workcenter name."""
import mes_dashboard.services.filter_cache as fc
mock_mapping = {
'DB-01': {'group': '焊接', 'sequence': 1},
}
with patch.object(fc, 'get_workcenter_mapping', return_value=mock_mapping):
result = fc.get_workcenter_group('UNKNOWN')
assert result is None
def test_returns_none_when_mapping_unavailable(self):
"""Test returns None when mapping is unavailable."""
import mes_dashboard.services.filter_cache as fc
with patch.object(fc, 'get_workcenter_mapping', return_value=None):
result = fc.get_workcenter_group('DB-01')
assert result is None
class TestGetWorkcenterShort:
"""Test get_workcenter_short function."""
@pytest.fixture(autouse=True)
def reset_cache(self):
"""Reset cache state before each test."""
import mes_dashboard.services.filter_cache as fc
with fc._CACHE_LOCK:
fc._CACHE['workcenter_groups'] = None
fc._CACHE['workcenter_mapping'] = None
fc._CACHE['workcenter_to_short'] = None
fc._CACHE['last_refresh'] = None
fc._CACHE['is_loading'] = False
yield
with fc._CACHE_LOCK:
fc._CACHE['workcenter_groups'] = None
fc._CACHE['workcenter_mapping'] = None
fc._CACHE['workcenter_to_short'] = None
fc._CACHE['last_refresh'] = None
fc._CACHE['is_loading'] = False
def test_returns_short_name_for_valid_workcenter(self):
"""Test returns short name for valid workcenter."""
import mes_dashboard.services.filter_cache as fc
from datetime import datetime
# Set up cache directly
with fc._CACHE_LOCK:
fc._CACHE['workcenter_to_short'] = {
'DB-01': 'DB',
'WB-01': 'WB',
}
fc._CACHE['workcenter_groups'] = [{'name': '焊接', 'sequence': 1}]
fc._CACHE['workcenter_mapping'] = {}
fc._CACHE['last_refresh'] = datetime.now()
result = fc.get_workcenter_short('DB-01')
assert result == 'DB'
def test_returns_none_for_unknown_workcenter(self):
"""Test returns None for unknown workcenter."""
import mes_dashboard.services.filter_cache as fc
from datetime import datetime
with fc._CACHE_LOCK:
fc._CACHE['workcenter_to_short'] = {
'DB-01': 'DB',
}
fc._CACHE['workcenter_groups'] = [{'name': '焊接', 'sequence': 1}]
fc._CACHE['workcenter_mapping'] = {}
fc._CACHE['last_refresh'] = datetime.now()
result = fc.get_workcenter_short('UNKNOWN')
assert result is None
class TestGetWorkcentersByGroup:
"""Test get_workcenters_by_group function."""
def test_returns_workcenters_in_group(self):
"""Test returns all workcenters in specified group."""
import mes_dashboard.services.filter_cache as fc
mock_mapping = {
'DB-01': {'group': '焊接', 'sequence': 1},
'DB-02': {'group': '焊接', 'sequence': 1},
'WB-01': {'group': '焊線', 'sequence': 2},
}
with patch.object(fc, 'get_workcenter_mapping', return_value=mock_mapping):
result = fc.get_workcenters_by_group('焊接')
assert len(result) == 2
assert 'DB-01' in result
assert 'DB-02' in result
assert 'WB-01' not in result
def test_returns_empty_for_unknown_group(self):
"""Test returns empty list for unknown group."""
import mes_dashboard.services.filter_cache as fc
mock_mapping = {
'DB-01': {'group': '焊接', 'sequence': 1},
}
with patch.object(fc, 'get_workcenter_mapping', return_value=mock_mapping):
result = fc.get_workcenters_by_group('UNKNOWN')
assert result == []
def test_returns_empty_when_mapping_unavailable(self):
"""Test returns empty list when mapping unavailable."""
import mes_dashboard.services.filter_cache as fc
with patch.object(fc, 'get_workcenter_mapping', return_value=None):
result = fc.get_workcenters_by_group('焊接')
assert result == []
class TestGetWorkcentersForGroups:
"""Test get_workcenters_for_groups function."""
def test_returns_workcenters_for_multiple_groups(self):
"""Test returns workcenters for multiple groups."""
import mes_dashboard.services.filter_cache as fc
mock_mapping = {
'DB-01': {'group': '焊接', 'sequence': 1},
'WB-01': {'group': '焊線', 'sequence': 2},
'MD-01': {'group': '成型', 'sequence': 3},
}
with patch.object(fc, 'get_workcenter_mapping', return_value=mock_mapping):
result = fc.get_workcenters_for_groups(['焊接', '焊線'])
assert len(result) == 2
assert 'DB-01' in result
assert 'WB-01' in result
assert 'MD-01' not in result
def test_returns_empty_for_empty_groups_list(self):
"""Test returns empty list for empty groups list."""
import mes_dashboard.services.filter_cache as fc
mock_mapping = {
'DB-01': {'group': '焊接', 'sequence': 1},
}
with patch.object(fc, 'get_workcenter_mapping', return_value=mock_mapping):
result = fc.get_workcenters_for_groups([])
assert result == []
class TestGetWorkcenterGroups:
"""Test get_workcenter_groups function."""
@pytest.fixture(autouse=True)
def reset_cache(self):
"""Reset cache state before each test."""
import mes_dashboard.services.filter_cache as fc
with fc._CACHE_LOCK:
fc._CACHE['workcenter_groups'] = None
fc._CACHE['workcenter_mapping'] = None
fc._CACHE['workcenter_to_short'] = None
fc._CACHE['last_refresh'] = None
fc._CACHE['is_loading'] = False
yield
with fc._CACHE_LOCK:
fc._CACHE['workcenter_groups'] = None
fc._CACHE['workcenter_mapping'] = None
fc._CACHE['workcenter_to_short'] = None
fc._CACHE['last_refresh'] = None
fc._CACHE['is_loading'] = False
def test_returns_groups_sorted_by_sequence(self):
"""Test returns groups sorted by sequence."""
import mes_dashboard.services.filter_cache as fc
from datetime import datetime
# Set up cache directly
with fc._CACHE_LOCK:
fc._CACHE['workcenter_groups'] = [
{'name': '成型', 'sequence': 3},
{'name': '焊接', 'sequence': 1},
{'name': '焊線', 'sequence': 2},
]
fc._CACHE['workcenter_mapping'] = {}
fc._CACHE['workcenter_to_short'] = {}
fc._CACHE['last_refresh'] = datetime.now()
result = fc.get_workcenter_groups()
# Should preserve original order (as stored)
assert len(result) == 3
names = [g['name'] for g in result]
assert '成型' in names
assert '焊接' in names
assert '焊線' in names
class TestLoadWorkcenterMappingFromSpec:
"""Test _load_workcenter_mapping_from_spec function."""
def test_builds_mapping_from_spec_view(self):
"""Test builds mapping from SPEC_WORKCENTER_V data."""
import mes_dashboard.services.filter_cache as fc
mock_df = pd.DataFrame({
'WORK_CENTER': ['DB-01', 'DB-02', 'WB-01'],
'WORK_CENTER_GROUP': ['焊接', '焊接', '焊線'],
'WORKCENTERSEQUENCE_GROUP': [1, 1, 2],
'WORK_CENTER_SHORT': ['DB', 'DB', 'WB'],
})
with patch.object(fc, 'read_sql_df', return_value=mock_df):
groups, mapping, short_mapping = fc._load_workcenter_mapping_from_spec()
# Check groups
assert len(groups) == 2 # 2 unique groups
group_names = [g['name'] for g in groups]
assert '焊接' in group_names
assert '焊線' in group_names
# Check mapping
assert len(mapping) == 3
assert mapping['DB-01']['group'] == '焊接'
assert mapping['WB-01']['group'] == '焊線'
# Check short mapping
assert short_mapping['DB-01'] == 'DB'
assert short_mapping['WB-01'] == 'WB'
def test_returns_empty_when_no_data(self):
"""Test returns empty structures when no data."""
import mes_dashboard.services.filter_cache as fc
with patch.object(fc, 'read_sql_df', return_value=None):
groups, mapping, short_mapping = fc._load_workcenter_mapping_from_spec()
assert groups == []
assert mapping == {}
assert short_mapping == {}
def test_handles_empty_dataframe(self):
"""Test handles empty DataFrame."""
import mes_dashboard.services.filter_cache as fc
mock_df = pd.DataFrame(columns=['WORK_CENTER', 'WORK_CENTER_GROUP', 'WORKCENTERSEQUENCE_GROUP', 'WORK_CENTER_SHORT'])
with patch.object(fc, 'read_sql_df', return_value=mock_df):
groups, mapping, short_mapping = fc._load_workcenter_mapping_from_spec()
assert groups == []
assert mapping == {}
assert short_mapping == {}
class TestGetCacheStatus:
"""Test get_cache_status function."""
@pytest.fixture(autouse=True)
def reset_cache(self):
"""Reset cache state before each test."""
import mes_dashboard.services.filter_cache as fc
with fc._CACHE_LOCK:
fc._CACHE['workcenter_groups'] = None
fc._CACHE['workcenter_mapping'] = None
fc._CACHE['workcenter_to_short'] = None
fc._CACHE['last_refresh'] = None
fc._CACHE['is_loading'] = False
yield
with fc._CACHE_LOCK:
fc._CACHE['workcenter_groups'] = None
fc._CACHE['workcenter_mapping'] = None
fc._CACHE['workcenter_to_short'] = None
fc._CACHE['last_refresh'] = None
fc._CACHE['is_loading'] = False
def test_returns_not_loaded_when_empty(self):
"""Test returns loaded=False when cache empty."""
import mes_dashboard.services.filter_cache as fc
result = fc.get_cache_status()
assert result['loaded'] is False
assert result['last_refresh'] is None
def test_returns_loaded_when_data_exists(self):
"""Test returns loaded=True when cache has data."""
import mes_dashboard.services.filter_cache as fc
from datetime import datetime
now = datetime.now()
with fc._CACHE_LOCK:
fc._CACHE['workcenter_groups'] = [{'name': 'G1', 'sequence': 1}]
fc._CACHE['workcenter_mapping'] = {'WC1': {'group': 'G1', 'sequence': 1}}
fc._CACHE['last_refresh'] = now
result = fc.get_cache_status()
assert result['loaded'] is True
assert result['last_refresh'] is not None
assert result['workcenter_groups_count'] == 1
assert result['workcenter_mapping_count'] == 1