refactor: 統一設備維度資料來源至 resource_cache

- 重構 resource_history_service 使用 resource_cache 作為設備主檔來源
- 移除 Oracle JOIN,改用 HISTORYID IN 過濾 SHIFT 資料
- 新增 _get_filtered_resources、_build_resource_lookup 等輔助函數
- resource_cache 新增 WORKCENTERNAME IS NOT NULL 篩選條件
- 設備即時概況矩陣新增可點選篩選功能
- 新增 _clean_nan_values 處理 JSON 序列化 NaN/NaT 值

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
beabigegg
2026-02-02 13:04:56 +08:00
parent f823d8cefd
commit 2a1cda30bd
6 changed files with 802 additions and 342 deletions

View File

@@ -4,10 +4,43 @@
Contains Flask Blueprint for resource/equipment-related API endpoints. Contains Flask Blueprint for resource/equipment-related API endpoints.
""" """
import math
from flask import Blueprint, jsonify, request from flask import Blueprint, jsonify, request
from mes_dashboard.core.database import get_db_connection from mes_dashboard.core.database import get_db_connection
from mes_dashboard.core.cache import cache_get, cache_set, make_cache_key from mes_dashboard.core.cache import cache_get, cache_set, make_cache_key
def _clean_nan_values(data):
"""Convert NaN and NaT values to None for JSON serialization.
Args:
data: List of dicts or single dict.
Returns:
Cleaned data with NaN/NaT replaced by None.
"""
if isinstance(data, list):
return [_clean_nan_values(item) for item in data]
elif isinstance(data, dict):
cleaned = {}
for key, value in data.items():
if isinstance(value, float) and math.isnan(value):
cleaned[key] = None
elif isinstance(value, str) and value == 'NaT':
cleaned[key] = None
elif value != value: # NaN check (NaN != NaN)
cleaned[key] = None
elif isinstance(value, list):
# Recursively clean nested lists (e.g., LOT_DETAILS)
cleaned[key] = _clean_nan_values(value)
elif isinstance(value, dict):
# Recursively clean nested dicts
cleaned[key] = _clean_nan_values(value)
else:
cleaned[key] = value
return cleaned
return data
from mes_dashboard.core.utils import get_days_back from mes_dashboard.core.utils import get_days_back
from mes_dashboard.services.resource_service import ( from mes_dashboard.services.resource_service import (
query_resource_status_summary, query_resource_status_summary,
@@ -202,10 +235,12 @@ def api_resource_status():
is_monitor=is_monitor, is_monitor=is_monitor,
status_categories=status_categories, status_categories=status_categories,
) )
# Clean NaN/NaT values for valid JSON
cleaned_data = _clean_nan_values(data)
return jsonify({ return jsonify({
'success': True, 'success': True,
'data': data, 'data': cleaned_data,
'count': len(data), 'count': len(cleaned_data),
}) })
except Exception as exc: except Exception as exc:
return jsonify({'success': False, 'error': str(exc)}), 500 return jsonify({'success': False, 'error': str(exc)}), 500
@@ -264,7 +299,9 @@ def api_resource_status_summary():
is_key=is_key, is_key=is_key,
is_monitor=is_monitor, is_monitor=is_monitor,
) )
return jsonify({'success': True, 'data': data}) # Clean NaN/NaT values for valid JSON
cleaned_data = _clean_nan_values(data)
return jsonify({'success': True, 'data': cleaned_data})
except Exception as exc: except Exception as exc:
return jsonify({'success': False, 'error': str(exc)}), 500 return jsonify({'success': False, 'error': str(exc)}), 500
@@ -299,6 +336,8 @@ def api_resource_status_matrix():
is_key=is_key, is_key=is_key,
is_monitor=is_monitor, is_monitor=is_monitor,
) )
return jsonify({'success': True, 'data': data}) # Clean NaN/NaT values for valid JSON
cleaned_data = _clean_nan_values(data)
return jsonify({'success': True, 'data': cleaned_data})
except Exception as exc: except Exception as exc:
return jsonify({'success': False, 'error': str(exc)}), 500 return jsonify({'success': False, 'error': str(exc)}), 500

View File

@@ -109,12 +109,35 @@ def _classify_status(status: Optional[str]) -> str:
return STATUS_CATEGORY_MAP.get(status, 'OTHER') return STATUS_CATEGORY_MAP.get(status, 'OTHER')
def _is_valid_value(value) -> bool:
"""Check if a value is valid (not None, not NaN, not empty string).
Args:
value: The value to check.
Returns:
True if valid, False otherwise.
"""
if value is None:
return False
if isinstance(value, str) and (not value.strip() or value == 'NaT'):
return False
# Check for NaN (pandas NaN or float NaN)
try:
if value != value: # NaN != NaN is True
return False
except (TypeError, ValueError):
pass
return True
def _aggregate_by_resourceid(records: List[Dict[str, Any]]) -> List[Dict[str, Any]]: def _aggregate_by_resourceid(records: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Aggregate equipment status records by RESOURCEID. """Aggregate equipment status records by RESOURCEID.
For each RESOURCEID: For each RESOURCEID:
- Status fields: take first (should be same for all records) - Status fields: take first (should be same for all records)
- LOT_COUNT: count of records - LOT_COUNT: count of distinct RUNCARDLOTID values
- LOT_DETAILS: list of LOT information for tooltip display
- TOTAL_TRACKIN_QTY: sum of LOTTRACKINQTY_PCS - TOTAL_TRACKIN_QTY: sum of LOTTRACKINQTY_PCS
- LATEST_TRACKIN_TIME: max of LOTTRACKINTIME - LATEST_TRACKIN_TIME: max of LOTTRACKINTIME
@@ -141,12 +164,29 @@ def _aggregate_by_resourceid(records: List[Dict[str, Any]]) -> List[Dict[str, An
for resource_id, group in grouped.items(): for resource_id, group in grouped.items():
first = group[0] first = group[0]
# Calculate aggregates # Collect unique LOTs by RUNCARDLOTID
lot_count = len(group) seen_lots = set()
total_qty = sum( lot_details = []
r.get('LOTTRACKINQTY_PCS') or 0 total_qty = 0
for r in group
) for r in group:
lot_id = r.get('RUNCARDLOTID')
qty = r.get('LOTTRACKINQTY_PCS')
# Sum only valid quantities
if _is_valid_value(qty):
total_qty += qty
# Only add unique LOTs with valid RUNCARDLOTID
if _is_valid_value(lot_id) and lot_id not in seen_lots:
seen_lots.add(lot_id)
trackin_time = r.get('LOTTRACKINTIME')
trackin_employee = r.get('LOTTRACKINEMPLOYEE')
lot_details.append({
'RUNCARDLOTID': lot_id,
'LOTTRACKINQTY_PCS': qty if _is_valid_value(qty) else None,
'LOTTRACKINTIME': trackin_time if _is_valid_value(trackin_time) else None,
'LOTTRACKINEMPLOYEE': trackin_employee if _is_valid_value(trackin_employee) else None,
})
# Find latest trackin time # Find latest trackin time
trackin_times = [ trackin_times = [
@@ -170,7 +210,8 @@ def _aggregate_by_resourceid(records: List[Dict[str, Any]]) -> List[Dict[str, An
'SYMPTOMCODE': first.get('SYMPTOMCODE'), 'SYMPTOMCODE': first.get('SYMPTOMCODE'),
'CAUSECODE': first.get('CAUSECODE'), 'CAUSECODE': first.get('CAUSECODE'),
'REPAIRCODE': first.get('REPAIRCODE'), 'REPAIRCODE': first.get('REPAIRCODE'),
'LOT_COUNT': lot_count, 'LOT_COUNT': len(seen_lots), # Count distinct RUNCARDLOTID
'LOT_DETAILS': lot_details, # LOT details for tooltip
'TOTAL_TRACKIN_QTY': total_qty, 'TOTAL_TRACKIN_QTY': total_qty,
'LATEST_TRACKIN_TIME': latest_trackin, 'LATEST_TRACKIN_TIME': latest_trackin,
}) })

View File

@@ -52,6 +52,9 @@ def _build_filter_sql() -> str:
"""Build SQL WHERE clause for global filters.""" """Build SQL WHERE clause for global filters."""
conditions = [EQUIPMENT_TYPE_FILTER.strip()] conditions = [EQUIPMENT_TYPE_FILTER.strip()]
# Workcenter filter - exclude resources without WORKCENTERNAME
conditions.append("WORKCENTERNAME IS NOT NULL")
# Location filter # Location filter
if EXCLUDED_LOCATIONS: if EXCLUDED_LOCATIONS:
locations_list = ", ".join(f"'{loc}'" for loc in EXCLUDED_LOCATIONS) locations_list = ", ".join(f"'{loc}'" for loc in EXCLUDED_LOCATIONS)

View File

@@ -6,24 +6,23 @@ Provides functions for querying historical equipment performance data including:
- Summary data (KPI, trend, heatmap, workcenter comparison) - Summary data (KPI, trend, heatmap, workcenter comparison)
- Hierarchical detail data (workcenter → family → resource) - Hierarchical detail data (workcenter → family → resource)
- CSV export with streaming - CSV export with streaming
Architecture:
- Uses resource_cache as the single source of truth for equipment master data
- Queries DW_MES_RESOURCESTATUS_SHIFT only for valid cached resource IDs
- Merges dimension data (WORKCENTERNAME, RESOURCEFAMILYNAME, etc.) from cache
""" """
import io import io
import csv import csv
import logging import logging
from concurrent.futures import ThreadPoolExecutor, as_completed from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime, timedelta from datetime import datetime
from typing import Optional, Dict, List, Any, Generator from typing import Optional, Dict, List, Any, Generator
import pandas as pd import pandas as pd
from mes_dashboard.core.database import read_sql_df from mes_dashboard.core.database import read_sql_df
from mes_dashboard.config.constants import (
EXCLUDED_LOCATIONS,
EXCLUDED_ASSET_STATUSES,
EQUIPMENT_TYPE_FILTER,
EQUIPMENT_FLAG_FILTERS,
)
logger = logging.getLogger('mes_dashboard.resource_history') logger = logging.getLogger('mes_dashboard.resource_history')
@@ -34,6 +33,139 @@ MAX_QUERY_DAYS = 730
E10_STATUSES = ['PRD', 'SBY', 'UDT', 'SDT', 'EGT', 'NST'] E10_STATUSES = ['PRD', 'SBY', 'UDT', 'SDT', 'EGT', 'NST']
# ============================================================
# Resource Cache Integration
# ============================================================
def _get_filtered_resources(
workcenter_groups: Optional[List[str]] = None,
families: Optional[List[str]] = None,
is_production: bool = False,
is_key: bool = False,
is_monitor: bool = False,
) -> List[Dict[str, Any]]:
"""Get filtered resources from resource_cache.
Applies additional filters on top of the cache's pre-applied global filters.
Args:
workcenter_groups: Optional list of WORKCENTER_GROUP names
families: Optional list of RESOURCEFAMILYNAME values
is_production: Filter by production flag
is_key: Filter by key equipment flag
is_monitor: Filter by monitor flag
Returns:
List of resource dicts matching the filters.
"""
from mes_dashboard.services.resource_cache import get_all_resources
from mes_dashboard.services.filter_cache import get_workcenter_mapping
resources = get_all_resources()
if not resources:
logger.warning("No resources available from cache")
return []
# Get workcenter mapping for group filtering
wc_mapping = get_workcenter_mapping() or {}
# Build set of workcenters if filtering by groups
allowed_workcenters = None
if workcenter_groups:
allowed_workcenters = set()
for wc_name, info in wc_mapping.items():
if info.get('group') in workcenter_groups:
allowed_workcenters.add(wc_name)
# Apply filters
filtered = []
for r in resources:
# Workcenter group filter
if allowed_workcenters is not None:
if r.get('WORKCENTERNAME') not in allowed_workcenters:
continue
# Family filter
if families and r.get('RESOURCEFAMILYNAME') not in families:
continue
# Equipment flags filter
if is_production and r.get('PJ_ISPRODUCTION') != 1:
continue
if is_key and r.get('PJ_ISKEY') != 1:
continue
if is_monitor and r.get('PJ_ISMONITOR') != 1:
continue
filtered.append(r)
logger.debug(f"Filtered {len(resources)} resources to {len(filtered)}")
return filtered
def _build_resource_lookup(resources: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
"""Build a lookup dict from RESOURCEID to resource info.
Args:
resources: List of resource dicts from cache.
Returns:
Dict mapping RESOURCEID to resource dict.
"""
return {r['RESOURCEID']: r for r in resources if r.get('RESOURCEID')}
def _get_resource_ids_sql_list(resources: List[Dict[str, Any]], max_chunk_size: int = 1000) -> List[str]:
"""Build SQL IN clause lists for resource IDs.
Oracle has a limit of ~1000 items per IN clause, so we chunk if needed.
Args:
resources: List of resource dicts.
max_chunk_size: Maximum items per IN clause.
Returns:
List of SQL IN clause strings (e.g., "'ID1', 'ID2', 'ID3'").
"""
resource_ids = [r['RESOURCEID'] for r in resources if r.get('RESOURCEID')]
if not resource_ids:
return []
# Escape single quotes
escaped_ids = [rid.replace("'", "''") for rid in resource_ids]
# Chunk into groups
chunks = []
for i in range(0, len(escaped_ids), max_chunk_size):
chunk = escaped_ids[i:i + max_chunk_size]
chunks.append("'" + "', '".join(chunk) + "'")
return chunks
def _build_historyid_filter(resources: List[Dict[str, Any]]) -> str:
"""Build SQL WHERE clause for HISTORYID filtering.
Handles chunking for large resource lists.
Args:
resources: List of resource dicts.
Returns:
SQL condition string (e.g., "HISTORYID IN ('ID1', 'ID2') OR HISTORYID IN ('ID3', 'ID4')").
"""
chunks = _get_resource_ids_sql_list(resources)
if not chunks:
return "1=0" # No resources = no results
if len(chunks) == 1:
return f"HISTORYID IN ({chunks[0]})"
# Multiple chunks need OR
conditions = [f"HISTORYID IN ({chunk})" for chunk in chunks]
return "(" + " OR ".join(conditions) + ")"
# ============================================================ # ============================================================
# Filter Options # Filter Options
# ============================================================ # ============================================================
@@ -85,6 +217,9 @@ def query_summary(
) -> Optional[Dict[str, Any]]: ) -> Optional[Dict[str, Any]]:
"""Query summary data including KPI, trend, heatmap, and workcenter comparison. """Query summary data including KPI, trend, heatmap, and workcenter comparison.
Uses resource_cache as the source for equipment master data.
Queries only DW_MES_RESOURCESTATUS_SHIFT for SHIFT data.
Args: Args:
start_date: Start date in YYYY-MM-DD format start_date: Start date in YYYY-MM-DD format
end_date: End date in YYYY-MM-DD format end_date: End date in YYYY-MM-DD format
@@ -105,123 +240,96 @@ def query_summary(
return {'error': validation} return {'error': validation}
try: try:
# Get filtered resources from cache
resources = _get_filtered_resources(
workcenter_groups=workcenter_groups,
families=families,
is_production=is_production,
is_key=is_key,
is_monitor=is_monitor,
)
if not resources:
logger.warning("No resources match the filter criteria")
return {
'kpi': _build_kpi_from_df(pd.DataFrame()),
'trend': [],
'heatmap': [],
'workcenter_comparison': []
}
# Build resource lookup for dimension merging
resource_lookup = _build_resource_lookup(resources)
historyid_filter = _build_historyid_filter(resources)
# Build SQL components # Build SQL components
date_trunc = _get_date_trunc(granularity) date_trunc = _get_date_trunc(granularity)
location_filter = _build_location_filter('r')
asset_status_filter = _build_asset_status_filter('r')
equipment_filter = _build_equipment_flags_filter(is_production, is_key, is_monitor, 'r')
workcenter_filter = _build_workcenter_groups_filter(workcenter_groups, 'r')
family_filter = _build_families_filter(families, 'r')
# Common CTE with MATERIALIZE hint to force Oracle to materialize the subquery # Base CTE with resource filter
# This prevents the optimizer from inlining the CTE multiple times
base_cte = f""" base_cte = f"""
WITH shift_data AS ( WITH shift_data AS (
SELECT /*+ MATERIALIZE */ HISTORYID, TXNDATE, OLDSTATUSNAME, HOURS SELECT /*+ MATERIALIZE */ HISTORYID, TXNDATE, OLDSTATUSNAME, HOURS
FROM DWH.DW_MES_RESOURCESTATUS_SHIFT FROM DWH.DW_MES_RESOURCESTATUS_SHIFT
WHERE TXNDATE >= TO_DATE('{start_date}', 'YYYY-MM-DD') WHERE TXNDATE >= TO_DATE('{start_date}', 'YYYY-MM-DD')
AND TXNDATE < TO_DATE('{end_date}', 'YYYY-MM-DD') + 1 AND TXNDATE < TO_DATE('{end_date}', 'YYYY-MM-DD') + 1
AND {historyid_filter}
) )
""" """
# Common filter conditions # KPI query - aggregate all
common_filters = f"""
WHERE {EQUIPMENT_TYPE_FILTER}
{location_filter}
{asset_status_filter}
{equipment_filter}
{workcenter_filter}
{family_filter}
"""
# Build all 4 SQL queries
kpi_sql = f""" kpi_sql = f"""
{base_cte} {base_cte}
SELECT SELECT
SUM(CASE WHEN ss.OLDSTATUSNAME = 'PRD' THEN ss.HOURS ELSE 0 END) as PRD_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'PRD' THEN HOURS ELSE 0 END) as PRD_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'SBY' THEN ss.HOURS ELSE 0 END) as SBY_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'SBY' THEN HOURS ELSE 0 END) as SBY_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'UDT' THEN ss.HOURS ELSE 0 END) as UDT_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'UDT' THEN HOURS ELSE 0 END) as UDT_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'SDT' THEN ss.HOURS ELSE 0 END) as SDT_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'SDT' THEN HOURS ELSE 0 END) as SDT_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'EGT' THEN ss.HOURS ELSE 0 END) as EGT_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'EGT' THEN HOURS ELSE 0 END) as EGT_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'NST' THEN ss.HOURS ELSE 0 END) as NST_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'NST' THEN HOURS ELSE 0 END) as NST_HOURS,
COUNT(DISTINCT ss.HISTORYID) as MACHINE_COUNT COUNT(DISTINCT HISTORYID) as MACHINE_COUNT
FROM shift_data ss FROM shift_data
JOIN DWH.DW_MES_RESOURCE r ON ss.HISTORYID = r.RESOURCEID
{common_filters}
""" """
# Trend query - group by date
trend_sql = f""" trend_sql = f"""
{base_cte} {base_cte}
SELECT SELECT
{date_trunc} as DATA_DATE, {date_trunc} as DATA_DATE,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'PRD' THEN ss.HOURS ELSE 0 END) as PRD_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'PRD' THEN HOURS ELSE 0 END) as PRD_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'SBY' THEN ss.HOURS ELSE 0 END) as SBY_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'SBY' THEN HOURS ELSE 0 END) as SBY_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'UDT' THEN ss.HOURS ELSE 0 END) as UDT_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'UDT' THEN HOURS ELSE 0 END) as UDT_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'SDT' THEN ss.HOURS ELSE 0 END) as SDT_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'SDT' THEN HOURS ELSE 0 END) as SDT_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'EGT' THEN ss.HOURS ELSE 0 END) as EGT_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'EGT' THEN HOURS ELSE 0 END) as EGT_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'NST' THEN ss.HOURS ELSE 0 END) as NST_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'NST' THEN HOURS ELSE 0 END) as NST_HOURS,
COUNT(DISTINCT ss.HISTORYID) as MACHINE_COUNT COUNT(DISTINCT HISTORYID) as MACHINE_COUNT
FROM shift_data ss FROM shift_data
JOIN DWH.DW_MES_RESOURCE r ON ss.HISTORYID = r.RESOURCEID
{common_filters}
GROUP BY {date_trunc} GROUP BY {date_trunc}
ORDER BY DATA_DATE ORDER BY DATA_DATE
""" """
heatmap_sql = f""" # Heatmap/Comparison query - group by HISTORYID and date, merge dimension in Python
heatmap_raw_sql = f"""
{base_cte} {base_cte}
SELECT SELECT
r.WORKCENTERNAME, HISTORYID,
{date_trunc} as DATA_DATE, {date_trunc} as DATA_DATE,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'PRD' THEN ss.HOURS ELSE 0 END) as PRD_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'PRD' THEN HOURS ELSE 0 END) as PRD_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'SBY' THEN ss.HOURS ELSE 0 END) as SBY_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'SBY' THEN HOURS ELSE 0 END) as SBY_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'UDT' THEN ss.HOURS ELSE 0 END) as UDT_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'UDT' THEN HOURS ELSE 0 END) as UDT_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'SDT' THEN ss.HOURS ELSE 0 END) as SDT_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'SDT' THEN HOURS ELSE 0 END) as SDT_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'EGT' THEN ss.HOURS ELSE 0 END) as EGT_HOURS SUM(CASE WHEN OLDSTATUSNAME = 'EGT' THEN HOURS ELSE 0 END) as EGT_HOURS
FROM shift_data ss FROM shift_data
JOIN DWH.DW_MES_RESOURCE r ON ss.HISTORYID = r.RESOURCEID GROUP BY HISTORYID, {date_trunc}
WHERE r.WORKCENTERNAME IS NOT NULL ORDER BY HISTORYID, DATA_DATE
AND {EQUIPMENT_TYPE_FILTER}
{location_filter}
{asset_status_filter}
{equipment_filter}
{workcenter_filter}
{family_filter}
GROUP BY r.WORKCENTERNAME, {date_trunc}
ORDER BY r.WORKCENTERNAME, DATA_DATE
""" """
comparison_sql = f""" # Execute queries in parallel
{base_cte}
SELECT
r.WORKCENTERNAME,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'PRD' THEN ss.HOURS ELSE 0 END) as PRD_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'SBY' THEN ss.HOURS ELSE 0 END) as SBY_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'UDT' THEN ss.HOURS ELSE 0 END) as UDT_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'SDT' THEN ss.HOURS ELSE 0 END) as SDT_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'EGT' THEN ss.HOURS ELSE 0 END) as EGT_HOURS,
COUNT(DISTINCT ss.HISTORYID) as MACHINE_COUNT
FROM shift_data ss
JOIN DWH.DW_MES_RESOURCE r ON ss.HISTORYID = r.RESOURCEID
WHERE r.WORKCENTERNAME IS NOT NULL
AND {EQUIPMENT_TYPE_FILTER}
{location_filter}
{asset_status_filter}
{equipment_filter}
{workcenter_filter}
{family_filter}
GROUP BY r.WORKCENTERNAME
ORDER BY PRD_HOURS DESC
"""
# Execute all 4 queries in parallel using ThreadPoolExecutor
results = {} results = {}
with ThreadPoolExecutor(max_workers=4) as executor: with ThreadPoolExecutor(max_workers=3) as executor:
futures = { futures = {
executor.submit(read_sql_df, kpi_sql): 'kpi', executor.submit(read_sql_df, kpi_sql): 'kpi',
executor.submit(read_sql_df, trend_sql): 'trend', executor.submit(read_sql_df, trend_sql): 'trend',
executor.submit(read_sql_df, heatmap_sql): 'heatmap', executor.submit(read_sql_df, heatmap_raw_sql): 'heatmap_raw',
executor.submit(read_sql_df, comparison_sql): 'comparison',
} }
for future in as_completed(futures): for future in as_completed(futures):
query_name = futures[future] query_name = futures[future]
@@ -234,8 +342,11 @@ def query_summary(
# Build response from results # Build response from results
kpi = _build_kpi_from_df(results.get('kpi', pd.DataFrame())) kpi = _build_kpi_from_df(results.get('kpi', pd.DataFrame()))
trend = _build_trend_from_df(results.get('trend', pd.DataFrame()), granularity) trend = _build_trend_from_df(results.get('trend', pd.DataFrame()), granularity)
heatmap = _build_heatmap_from_df(results.get('heatmap', pd.DataFrame()), granularity)
workcenter_comparison = _build_comparison_from_df(results.get('comparison', pd.DataFrame())) # Build heatmap and comparison from raw data with dimension merge
heatmap_raw_df = results.get('heatmap_raw', pd.DataFrame())
heatmap = _build_heatmap_from_raw_df(heatmap_raw_df, resource_lookup, granularity)
workcenter_comparison = _build_comparison_from_raw_df(heatmap_raw_df, resource_lookup)
return { return {
'kpi': kpi, 'kpi': kpi,
@@ -254,10 +365,6 @@ def query_summary(
# Detail Query # Detail Query
# ============================================================ # ============================================================
# Maximum records limit for detail query (disabled - no limit)
# MAX_DETAIL_RECORDS = 5000
def query_detail( def query_detail(
start_date: str, start_date: str,
end_date: str, end_date: str,
@@ -270,6 +377,7 @@ def query_detail(
) -> Optional[Dict[str, Any]]: ) -> Optional[Dict[str, Any]]:
"""Query hierarchical detail data. """Query hierarchical detail data.
Uses resource_cache as the source for equipment master data.
Returns flat data with workcenter, family, resource dimensions. Returns flat data with workcenter, family, resource dimensions.
Frontend handles hierarchy assembly. Frontend handles hierarchy assembly.
@@ -293,58 +401,56 @@ def query_detail(
return {'error': validation} return {'error': validation}
try: try:
# Build SQL components # Get filtered resources from cache
location_filter = _build_location_filter('r') resources = _get_filtered_resources(
asset_status_filter = _build_asset_status_filter('r') workcenter_groups=workcenter_groups,
equipment_filter = _build_equipment_flags_filter(is_production, is_key, is_monitor, 'r') families=families,
workcenter_filter = _build_workcenter_groups_filter(workcenter_groups, 'r') is_production=is_production,
family_filter = _build_families_filter(families, 'r') is_key=is_key,
is_monitor=is_monitor,
)
# Common CTE with MATERIALIZE hint if not resources:
base_cte = f""" logger.warning("No resources match the filter criteria")
return {
'data': [],
'total': 0,
'truncated': False,
'max_records': None
}
# Build resource lookup for dimension merging
resource_lookup = _build_resource_lookup(resources)
historyid_filter = _build_historyid_filter(resources)
# Query SHIFT data grouped by HISTORYID
detail_sql = f"""
WITH shift_data AS ( WITH shift_data AS (
SELECT /*+ MATERIALIZE */ HISTORYID, OLDSTATUSNAME, HOURS SELECT /*+ MATERIALIZE */ HISTORYID, OLDSTATUSNAME, HOURS
FROM DWH.DW_MES_RESOURCESTATUS_SHIFT FROM DWH.DW_MES_RESOURCESTATUS_SHIFT
WHERE TXNDATE >= TO_DATE('{start_date}', 'YYYY-MM-DD') WHERE TXNDATE >= TO_DATE('{start_date}', 'YYYY-MM-DD')
AND TXNDATE < TO_DATE('{end_date}', 'YYYY-MM-DD') + 1 AND TXNDATE < TO_DATE('{end_date}', 'YYYY-MM-DD') + 1
AND {historyid_filter}
) )
"""
# Common filter conditions
common_filters = f"""
WHERE {EQUIPMENT_TYPE_FILTER}
{location_filter}
{asset_status_filter}
{equipment_filter}
{workcenter_filter}
{family_filter}
"""
# Query all detail data (no pagination)
detail_sql = f"""
{base_cte}
SELECT SELECT
r.WORKCENTERNAME, HISTORYID,
r.RESOURCEFAMILYNAME, SUM(CASE WHEN OLDSTATUSNAME = 'PRD' THEN HOURS ELSE 0 END) as PRD_HOURS,
r.RESOURCENAME, SUM(CASE WHEN OLDSTATUSNAME = 'SBY' THEN HOURS ELSE 0 END) as SBY_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'PRD' THEN ss.HOURS ELSE 0 END) as PRD_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'UDT' THEN HOURS ELSE 0 END) as UDT_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'SBY' THEN ss.HOURS ELSE 0 END) as SBY_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'SDT' THEN HOURS ELSE 0 END) as SDT_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'UDT' THEN ss.HOURS ELSE 0 END) as UDT_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'EGT' THEN HOURS ELSE 0 END) as EGT_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'SDT' THEN ss.HOURS ELSE 0 END) as SDT_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'NST' THEN HOURS ELSE 0 END) as NST_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'EGT' THEN ss.HOURS ELSE 0 END) as EGT_HOURS, SUM(HOURS) as TOTAL_HOURS
SUM(CASE WHEN ss.OLDSTATUSNAME = 'NST' THEN ss.HOURS ELSE 0 END) as NST_HOURS, FROM shift_data
SUM(ss.HOURS) as TOTAL_HOURS GROUP BY HISTORYID
FROM shift_data ss ORDER BY HISTORYID
JOIN DWH.DW_MES_RESOURCE r ON ss.HISTORYID = r.RESOURCEID
{common_filters}
GROUP BY r.WORKCENTERNAME, r.RESOURCEFAMILYNAME, r.RESOURCENAME
ORDER BY r.WORKCENTERNAME, r.RESOURCEFAMILYNAME, r.RESOURCENAME
""" """
detail_df = read_sql_df(detail_sql) detail_df = read_sql_df(detail_sql)
total = len(detail_df) if detail_df is not None else 0
data = _build_detail_from_df(detail_df) # Build detail data with dimension merge from cache
data = _build_detail_from_raw_df(detail_df, resource_lookup)
total = len(data)
return { return {
'data': data, 'data': data,
@@ -375,6 +481,7 @@ def export_csv(
) -> Generator[str, None, None]: ) -> Generator[str, None, None]:
"""Generate CSV data as a stream for export. """Generate CSV data as a stream for export.
Uses resource_cache as the source for equipment master data.
Yields CSV rows one at a time to avoid memory issues with large datasets. Yields CSV rows one at a time to avoid memory issues with large datasets.
Args: Args:
@@ -397,49 +504,51 @@ def export_csv(
return return
try: try:
# Build SQL components # Get filtered resources from cache
location_filter = _build_location_filter('r') resources = _get_filtered_resources(
asset_status_filter = _build_asset_status_filter('r') workcenter_groups=workcenter_groups,
equipment_filter = _build_equipment_flags_filter(is_production, is_key, is_monitor, 'r') families=families,
workcenter_filter = _build_workcenter_groups_filter(workcenter_groups, 'r') is_production=is_production,
family_filter = _build_families_filter(families, 'r') is_key=is_key,
is_monitor=is_monitor,
)
# Query all data with CTE and MATERIALIZE hint for performance optimization if not resources:
yield "Error: No resources match the filter criteria\n"
return
# Build resource lookup for dimension merging
resource_lookup = _build_resource_lookup(resources)
historyid_filter = _build_historyid_filter(resources)
# Get workcenter mapping for WORKCENTER_GROUP
from mes_dashboard.services.filter_cache import get_workcenter_mapping
wc_mapping = get_workcenter_mapping() or {}
# Query SHIFT data grouped by HISTORYID
sql = f""" sql = f"""
WITH shift_data AS ( WITH shift_data AS (
SELECT /*+ MATERIALIZE */ HISTORYID, OLDSTATUSNAME, HOURS SELECT /*+ MATERIALIZE */ HISTORYID, OLDSTATUSNAME, HOURS
FROM DWH.DW_MES_RESOURCESTATUS_SHIFT FROM DWH.DW_MES_RESOURCESTATUS_SHIFT
WHERE TXNDATE >= TO_DATE('{start_date}', 'YYYY-MM-DD') WHERE TXNDATE >= TO_DATE('{start_date}', 'YYYY-MM-DD')
AND TXNDATE < TO_DATE('{end_date}', 'YYYY-MM-DD') + 1 AND TXNDATE < TO_DATE('{end_date}', 'YYYY-MM-DD') + 1
AND {historyid_filter}
) )
SELECT SELECT
r.WORKCENTERNAME, HISTORYID,
r.RESOURCEFAMILYNAME, SUM(CASE WHEN OLDSTATUSNAME = 'PRD' THEN HOURS ELSE 0 END) as PRD_HOURS,
r.RESOURCENAME, SUM(CASE WHEN OLDSTATUSNAME = 'SBY' THEN HOURS ELSE 0 END) as SBY_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'PRD' THEN ss.HOURS ELSE 0 END) as PRD_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'UDT' THEN HOURS ELSE 0 END) as UDT_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'SBY' THEN ss.HOURS ELSE 0 END) as SBY_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'SDT' THEN HOURS ELSE 0 END) as SDT_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'UDT' THEN ss.HOURS ELSE 0 END) as UDT_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'EGT' THEN HOURS ELSE 0 END) as EGT_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'SDT' THEN ss.HOURS ELSE 0 END) as SDT_HOURS, SUM(CASE WHEN OLDSTATUSNAME = 'NST' THEN HOURS ELSE 0 END) as NST_HOURS,
SUM(CASE WHEN ss.OLDSTATUSNAME = 'EGT' THEN ss.HOURS ELSE 0 END) as EGT_HOURS, SUM(HOURS) as TOTAL_HOURS
SUM(CASE WHEN ss.OLDSTATUSNAME = 'NST' THEN ss.HOURS ELSE 0 END) as NST_HOURS, FROM shift_data
SUM(ss.HOURS) as TOTAL_HOURS GROUP BY HISTORYID
FROM shift_data ss ORDER BY HISTORYID
JOIN DWH.DW_MES_RESOURCE r ON ss.HISTORYID = r.RESOURCEID
WHERE {EQUIPMENT_TYPE_FILTER}
{location_filter}
{asset_status_filter}
{equipment_filter}
{workcenter_filter}
{family_filter}
GROUP BY r.WORKCENTERNAME, r.RESOURCEFAMILYNAME, r.RESOURCENAME
ORDER BY r.WORKCENTERNAME, r.RESOURCEFAMILYNAME, r.RESOURCENAME
""" """
df = read_sql_df(sql) df = read_sql_df(sql)
# Get workcenter mapping to convert WORKCENTERNAME to WORKCENTER_GROUP
from mes_dashboard.services.filter_cache import get_workcenter_mapping
wc_mapping = get_workcenter_mapping() or {}
# Write CSV header # Write CSV header
output = io.StringIO() output = io.StringIO()
writer = csv.writer(output) writer = csv.writer(output)
@@ -455,47 +564,57 @@ def export_csv(
output.seek(0) output.seek(0)
# Write data rows # Write data rows
for _, row in df.iterrows(): if df is not None:
prd = float(row['PRD_HOURS'] or 0) for _, row in df.iterrows():
sby = float(row['SBY_HOURS'] or 0) historyid = row['HISTORYID']
udt = float(row['UDT_HOURS'] or 0) resource_info = resource_lookup.get(historyid, {})
sdt = float(row['SDT_HOURS'] or 0)
egt = float(row['EGT_HOURS'] or 0)
nst = float(row['NST_HOURS'] or 0)
total = float(row['TOTAL_HOURS'] or 0)
# Map WORKCENTERNAME to WORKCENTER_GROUP # Skip if no resource info found
wc_name = row['WORKCENTERNAME'] if not resource_info:
wc_info = wc_mapping.get(wc_name, {}) continue
wc_group = wc_info.get('group', wc_name) # Fallback to workcentername if no mapping
# Calculate percentages prd = float(row['PRD_HOURS'] or 0)
ou_pct = _calc_ou_pct(prd, sby, udt, sdt, egt) sby = float(row['SBY_HOURS'] or 0)
availability_pct = _calc_availability_pct(prd, sby, udt, sdt, egt, nst) udt = float(row['UDT_HOURS'] or 0)
prd_pct = round(prd / total * 100, 1) if total > 0 else 0 sdt = float(row['SDT_HOURS'] or 0)
sby_pct = round(sby / total * 100, 1) if total > 0 else 0 egt = float(row['EGT_HOURS'] or 0)
udt_pct = round(udt / total * 100, 1) if total > 0 else 0 nst = float(row['NST_HOURS'] or 0)
sdt_pct = round(sdt / total * 100, 1) if total > 0 else 0 total = float(row['TOTAL_HOURS'] or 0)
egt_pct = round(egt / total * 100, 1) if total > 0 else 0
nst_pct = round(nst / total * 100, 1) if total > 0 else 0
csv_row = [ # Get dimension data from cache
wc_group, wc_name = resource_info.get('WORKCENTERNAME', '')
row['RESOURCEFAMILYNAME'], wc_info = wc_mapping.get(wc_name, {})
row['RESOURCENAME'], wc_group = wc_info.get('group', wc_name)
f"{ou_pct}%", family = resource_info.get('RESOURCEFAMILYNAME', '')
f"{availability_pct}%", resource_name = resource_info.get('RESOURCENAME', '')
round(prd, 1), f"{prd_pct}%",
round(sby, 1), f"{sby_pct}%", # Calculate percentages
round(udt, 1), f"{udt_pct}%", ou_pct = _calc_ou_pct(prd, sby, udt, sdt, egt)
round(sdt, 1), f"{sdt_pct}%", availability_pct = _calc_availability_pct(prd, sby, udt, sdt, egt, nst)
round(egt, 1), f"{egt_pct}%", prd_pct = round(prd / total * 100, 1) if total > 0 else 0
round(nst, 1), f"{nst_pct}%" sby_pct = round(sby / total * 100, 1) if total > 0 else 0
] udt_pct = round(udt / total * 100, 1) if total > 0 else 0
writer.writerow(csv_row) sdt_pct = round(sdt / total * 100, 1) if total > 0 else 0
yield output.getvalue() egt_pct = round(egt / total * 100, 1) if total > 0 else 0
output.truncate(0) nst_pct = round(nst / total * 100, 1) if total > 0 else 0
output.seek(0)
csv_row = [
wc_group,
family,
resource_name,
f"{ou_pct}%",
f"{availability_pct}%",
round(prd, 1), f"{prd_pct}%",
round(sby, 1), f"{sby_pct}%",
round(udt, 1), f"{udt_pct}%",
round(sdt, 1), f"{sdt_pct}%",
round(egt, 1), f"{egt_pct}%",
round(nst, 1), f"{nst_pct}%"
]
writer.writerow(csv_row)
yield output.getvalue()
output.truncate(0)
output.seek(0)
except Exception as exc: except Exception as exc:
logger.error(f"CSV export failed: {exc}") logger.error(f"CSV export failed: {exc}")
@@ -523,93 +642,17 @@ def _validate_date_range(start_date: str, end_date: str) -> Optional[str]:
def _get_date_trunc(granularity: str) -> str: def _get_date_trunc(granularity: str) -> str:
"""Get Oracle TRUNC expression for date granularity.""" """Get Oracle TRUNC expression for date granularity.
Note: Uses 'ss' as alias for shift_data CTE.
"""
trunc_map = { trunc_map = {
'day': "TRUNC(ss.TXNDATE)", 'day': "TRUNC(TXNDATE)",
'week': "TRUNC(ss.TXNDATE, 'IW')", 'week': "TRUNC(TXNDATE, 'IW')",
'month': "TRUNC(ss.TXNDATE, 'MM')", 'month': "TRUNC(TXNDATE, 'MM')",
'year': "TRUNC(ss.TXNDATE, 'YYYY')" 'year': "TRUNC(TXNDATE, 'YYYY')"
} }
return trunc_map.get(granularity, "TRUNC(ss.TXNDATE)") return trunc_map.get(granularity, "TRUNC(TXNDATE)")
def _build_location_filter(alias: str) -> str:
"""Build SQL filter for excluded locations."""
if not EXCLUDED_LOCATIONS:
return ""
excluded = "', '".join(EXCLUDED_LOCATIONS)
return f"AND ({alias}.LOCATIONNAME IS NULL OR {alias}.LOCATIONNAME NOT IN ('{excluded}'))"
def _build_asset_status_filter(alias: str) -> str:
"""Build SQL filter for excluded asset statuses."""
if not EXCLUDED_ASSET_STATUSES:
return ""
excluded = "', '".join(EXCLUDED_ASSET_STATUSES)
return f"AND ({alias}.PJ_ASSETSSTATUS IS NULL OR {alias}.PJ_ASSETSSTATUS NOT IN ('{excluded}'))"
def _build_equipment_flags_filter(
is_production: bool,
is_key: bool,
is_monitor: bool,
alias: str
) -> str:
"""Build SQL filter for equipment flags."""
conditions = []
if is_production:
conditions.append(f"NVL({alias}.PJ_ISPRODUCTION, 0) = 1")
if is_key:
conditions.append(f"NVL({alias}.PJ_ISKEY, 0) = 1")
if is_monitor:
conditions.append(f"NVL({alias}.PJ_ISMONITOR, 0) = 1")
return "AND " + " AND ".join(conditions) if conditions else ""
def _build_workcenter_groups_filter(groups: Optional[List[str]], alias: str) -> str:
"""Build SQL filter for workcenter groups.
Uses filter_cache to get workcentername list for selected groups.
Args:
groups: List of WORKCENTER_GROUP names, or None for no filter
alias: Table alias for WORKCENTERNAME column
Returns:
SQL filter clause (empty string if no filter)
"""
if not groups:
return ""
from mes_dashboard.services.filter_cache import get_workcenters_for_groups
workcenters = get_workcenters_for_groups(groups)
if not workcenters:
return ""
# Escape single quotes and build IN clause
escaped = [wc.replace("'", "''") for wc in workcenters]
in_list = "', '".join(escaped)
return f"AND {alias}.WORKCENTERNAME IN ('{in_list}')"
def _build_families_filter(families: Optional[List[str]], alias: str) -> str:
"""Build SQL filter for resource families.
Args:
families: List of RESOURCEFAMILYNAME values, or None for no filter
alias: Table alias for RESOURCEFAMILYNAME column
Returns:
SQL filter clause (empty string if no filter)
"""
if not families:
return ""
# Escape single quotes and build IN clause
escaped = [f.replace("'", "''") for f in families]
in_list = "', '".join(escaped)
return f"AND {alias}.RESOURCEFAMILYNAME IN ('{in_list}')"
def _safe_float(value, default=0.0) -> float: def _safe_float(value, default=0.0) -> float:
@@ -713,8 +756,23 @@ def _build_trend_from_df(df: pd.DataFrame, granularity: str) -> List[Dict]:
return result return result
def _build_heatmap_from_df(df: pd.DataFrame, granularity: str) -> List[Dict]: def _build_heatmap_from_raw_df(
"""Build heatmap data from query result DataFrame.""" df: pd.DataFrame,
resource_lookup: Dict[str, Dict[str, Any]],
granularity: str
) -> List[Dict]:
"""Build heatmap data from raw SHIFT query grouped by HISTORYID.
Merges dimension data from resource_lookup.
Args:
df: DataFrame with HISTORYID, DATA_DATE, and status hours.
resource_lookup: Dict mapping RESOURCEID to resource info.
granularity: Time granularity for date formatting.
Returns:
List of heatmap data dicts.
"""
if df is None or len(df) == 0: if df is None or len(df) == 0:
return [] return []
@@ -725,10 +783,17 @@ def _build_heatmap_from_df(df: pd.DataFrame, granularity: str) -> List[Dict]:
# Aggregate data by WORKCENTER_GROUP and date # Aggregate data by WORKCENTER_GROUP and date
aggregated = {} aggregated = {}
for _, row in df.iterrows(): for _, row in df.iterrows():
wc_name = row['WORKCENTERNAME'] historyid = row['HISTORYID']
# Skip rows with NaN workcenter name resource_info = resource_lookup.get(historyid, {})
if pd.isna(wc_name):
# Skip if no resource info
if not resource_info:
continue continue
wc_name = resource_info.get('WORKCENTERNAME', '')
if not wc_name:
continue
wc_info = wc_mapping.get(wc_name, {}) wc_info = wc_mapping.get(wc_name, {})
wc_group = wc_info.get('group', wc_name) wc_group = wc_info.get('group', wc_name)
date_str = _format_date(row['DATA_DATE'], granularity) date_str = _format_date(row['DATA_DATE'], granularity)
@@ -756,8 +821,21 @@ def _build_heatmap_from_df(df: pd.DataFrame, granularity: str) -> List[Dict]:
return result return result
def _build_comparison_from_df(df: pd.DataFrame) -> List[Dict]: def _build_comparison_from_raw_df(
"""Build workcenter comparison data from query result DataFrame.""" df: pd.DataFrame,
resource_lookup: Dict[str, Dict[str, Any]]
) -> List[Dict]:
"""Build workcenter comparison data from raw SHIFT query grouped by HISTORYID.
Merges dimension data from resource_lookup.
Args:
df: DataFrame with HISTORYID and status hours (may have DATA_DATE if from heatmap query).
resource_lookup: Dict mapping RESOURCEID to resource info.
Returns:
List of comparison data dicts.
"""
if df is None or len(df) == 0: if df is None or len(df) == 0:
return [] return []
@@ -765,25 +843,44 @@ def _build_comparison_from_df(df: pd.DataFrame) -> List[Dict]:
from mes_dashboard.services.filter_cache import get_workcenter_mapping from mes_dashboard.services.filter_cache import get_workcenter_mapping
wc_mapping = get_workcenter_mapping() or {} wc_mapping = get_workcenter_mapping() or {}
# Aggregate data by WORKCENTER_GROUP # First aggregate by HISTORYID (in case df is by HISTORYID + date)
aggregated = {} by_resource = {}
for _, row in df.iterrows(): for _, row in df.iterrows():
wc_name = row['WORKCENTERNAME'] historyid = row['HISTORYID']
# Skip rows with NaN workcenter name if historyid not in by_resource:
if pd.isna(wc_name): by_resource[historyid] = {'prd': 0, 'sby': 0, 'udt': 0, 'sdt': 0, 'egt': 0}
by_resource[historyid]['prd'] += _safe_float(row['PRD_HOURS'])
by_resource[historyid]['sby'] += _safe_float(row['SBY_HOURS'])
by_resource[historyid]['udt'] += _safe_float(row['UDT_HOURS'])
by_resource[historyid]['sdt'] += _safe_float(row['SDT_HOURS'])
by_resource[historyid]['egt'] += _safe_float(row['EGT_HOURS'])
# Then aggregate by WORKCENTER_GROUP
aggregated = {}
for historyid, hours in by_resource.items():
resource_info = resource_lookup.get(historyid, {})
# Skip if no resource info
if not resource_info:
continue continue
wc_name = resource_info.get('WORKCENTERNAME', '')
if not wc_name:
continue
wc_info = wc_mapping.get(wc_name, {}) wc_info = wc_mapping.get(wc_name, {})
wc_group = wc_info.get('group', wc_name) wc_group = wc_info.get('group', wc_name)
if wc_group not in aggregated: if wc_group not in aggregated:
aggregated[wc_group] = {'prd': 0, 'sby': 0, 'udt': 0, 'sdt': 0, 'egt': 0, 'machine_count': 0} aggregated[wc_group] = {'prd': 0, 'sby': 0, 'udt': 0, 'sdt': 0, 'egt': 0, 'machine_count': 0}
aggregated[wc_group]['prd'] += _safe_float(row['PRD_HOURS']) aggregated[wc_group]['prd'] += hours['prd']
aggregated[wc_group]['sby'] += _safe_float(row['SBY_HOURS']) aggregated[wc_group]['sby'] += hours['sby']
aggregated[wc_group]['udt'] += _safe_float(row['UDT_HOURS']) aggregated[wc_group]['udt'] += hours['udt']
aggregated[wc_group]['sdt'] += _safe_float(row['SDT_HOURS']) aggregated[wc_group]['sdt'] += hours['sdt']
aggregated[wc_group]['egt'] += _safe_float(row['EGT_HOURS']) aggregated[wc_group]['egt'] += hours['egt']
aggregated[wc_group]['machine_count'] += int(_safe_float(row['MACHINE_COUNT'])) aggregated[wc_group]['machine_count'] += 1
result = [] result = []
for wc_group, data in aggregated.items(): for wc_group, data in aggregated.items():
@@ -799,8 +896,21 @@ def _build_comparison_from_df(df: pd.DataFrame) -> List[Dict]:
return result return result
def _build_detail_from_df(df: pd.DataFrame) -> List[Dict]: def _build_detail_from_raw_df(
"""Build detail data from query result DataFrame.""" df: pd.DataFrame,
resource_lookup: Dict[str, Dict[str, Any]]
) -> List[Dict]:
"""Build detail data from raw SHIFT query grouped by HISTORYID.
Merges dimension data from resource_lookup.
Args:
df: DataFrame with HISTORYID and status hours.
resource_lookup: Dict mapping RESOURCEID to resource info.
Returns:
List of detail data dicts.
"""
if df is None or len(df) == 0: if df is None or len(df) == 0:
return [] return []
@@ -810,9 +920,11 @@ def _build_detail_from_df(df: pd.DataFrame) -> List[Dict]:
result = [] result = []
for _, row in df.iterrows(): for _, row in df.iterrows():
# Skip rows with NaN workcenter name historyid = row['HISTORYID']
wc_name = row['WORKCENTERNAME'] resource_info = resource_lookup.get(historyid, {})
if pd.isna(wc_name):
# Skip if no resource info
if not resource_info:
continue continue
prd = _safe_float(row['PRD_HOURS']) prd = _safe_float(row['PRD_HOURS'])
@@ -823,18 +935,17 @@ def _build_detail_from_df(df: pd.DataFrame) -> List[Dict]:
nst = _safe_float(row['NST_HOURS']) nst = _safe_float(row['NST_HOURS'])
total = _safe_float(row['TOTAL_HOURS']) total = _safe_float(row['TOTAL_HOURS'])
# Map WORKCENTERNAME to WORKCENTER_GROUP # Get dimension data from cache
wc_name = resource_info.get('WORKCENTERNAME', '')
wc_info = wc_mapping.get(wc_name, {}) wc_info = wc_mapping.get(wc_name, {})
wc_group = wc_info.get('group', wc_name) # Fallback to workcentername if no mapping wc_group = wc_info.get('group', wc_name) # Fallback to workcentername if no mapping
family = resource_info.get('RESOURCEFAMILYNAME', '')
# Handle NaN in string fields resource_name = resource_info.get('RESOURCENAME', '')
family = row['RESOURCEFAMILYNAME']
resource = row['RESOURCENAME']
result.append({ result.append({
'workcenter': wc_group, 'workcenter': wc_group,
'family': family if not pd.isna(family) else '', 'family': family or '',
'resource': resource if not pd.isna(resource) else '', 'resource': resource_name or '',
'ou_pct': _calc_ou_pct(prd, sby, udt, sdt, egt), 'ou_pct': _calc_ou_pct(prd, sby, udt, sdt, egt),
'availability_pct': _calc_availability_pct(prd, sby, udt, sdt, egt, nst), 'availability_pct': _calc_availability_pct(prd, sby, udt, sdt, egt, nst),
'prd_hours': round(prd, 1), 'prd_hours': round(prd, 1),
@@ -852,4 +963,6 @@ def _build_detail_from_df(df: pd.DataFrame) -> List[Dict]:
'machine_count': 1 'machine_count': 1
}) })
# Sort by workcenter, family, resource
result.sort(key=lambda x: (x['workcenter'], x['family'], x['resource']))
return result return result

View File

@@ -501,6 +501,7 @@ def get_merged_resource_status(
'CAUSECODE': realtime.get('CAUSECODE'), 'CAUSECODE': realtime.get('CAUSECODE'),
'REPAIRCODE': realtime.get('REPAIRCODE'), 'REPAIRCODE': realtime.get('REPAIRCODE'),
'LOT_COUNT': realtime.get('LOT_COUNT'), 'LOT_COUNT': realtime.get('LOT_COUNT'),
'LOT_DETAILS': realtime.get('LOT_DETAILS'), # LOT details for tooltip
'TOTAL_TRACKIN_QTY': realtime.get('TOTAL_TRACKIN_QTY'), 'TOTAL_TRACKIN_QTY': realtime.get('TOTAL_TRACKIN_QTY'),
'LATEST_TRACKIN_TIME': realtime.get('LATEST_TRACKIN_TIME'), 'LATEST_TRACKIN_TIME': realtime.get('LATEST_TRACKIN_TIME'),
} }

View File

@@ -283,6 +283,58 @@
.matrix-table .zero { color: #d1d5db; } .matrix-table .zero { color: #d1d5db; }
/* Clickable matrix cells */
.matrix-table td.clickable {
cursor: pointer;
transition: background 0.15s;
}
.matrix-table td.clickable:hover {
background: #e0e7ff;
}
.matrix-table td.clickable.selected {
background: #dbeafe;
box-shadow: inset 0 0 0 2px var(--primary);
}
/* Matrix filter indicator */
.matrix-filter-indicator {
display: none;
align-items: center;
gap: 8px;
padding: 8px 12px;
background: #dbeafe;
border-radius: 6px;
margin-top: 12px;
font-size: 13px;
color: var(--primary-dark);
}
.matrix-filter-indicator.active {
display: inline-flex;
}
.matrix-filter-indicator .filter-text {
font-weight: 500;
}
.btn-clear-filter {
background: transparent;
border: 1px solid var(--primary);
color: var(--primary);
padding: 4px 10px;
border-radius: 4px;
font-size: 12px;
cursor: pointer;
transition: all 0.2s;
}
.btn-clear-filter:hover {
background: var(--primary);
color: white;
}
.ou-badge { .ou-badge {
display: inline-block; display: inline-block;
padding: 2px 8px; padding: 2px 8px;
@@ -389,6 +441,86 @@
color: var(--muted); color: var(--muted);
} }
/* LOT Tooltip */
.lot-info {
position: relative;
cursor: pointer;
}
.lot-info:hover .lot-tooltip {
display: block;
}
.lot-tooltip {
display: none;
position: absolute;
top: calc(100% + 8px);
left: 0;
background: #1e293b;
color: #fff;
padding: 12px;
border-radius: 8px;
font-size: 11px;
white-space: nowrap;
z-index: 1000;
box-shadow: 0 4px 12px rgba(0,0,0,0.3);
min-width: 280px;
max-width: 400px;
max-height: 300px;
overflow-y: auto;
}
.lot-tooltip::after {
content: '';
position: absolute;
bottom: 100%;
left: 20px;
border: 6px solid transparent;
border-bottom-color: #1e293b;
}
.lot-tooltip-title {
font-weight: 600;
margin-bottom: 8px;
padding-bottom: 6px;
border-bottom: 1px solid #475569;
color: #94a3b8;
}
.lot-item {
padding: 6px 0;
border-bottom: 1px solid #334155;
}
.lot-item:last-child {
border-bottom: none;
}
.lot-item-header {
font-weight: 600;
color: #60a5fa;
margin-bottom: 4px;
}
.lot-item-row {
display: flex;
gap: 12px;
flex-wrap: wrap;
}
.lot-item-field {
display: flex;
gap: 4px;
}
.lot-item-label {
color: #94a3b8;
}
.lot-item-value {
color: #e2e8f0;
}
/* Responsive */ /* Responsive */
@media (max-width: 1200px) { @media (max-width: 1200px) {
.summary-grid { grid-template-columns: repeat(3, 1fr); } .summary-grid { grid-template-columns: repeat(3, 1fr); }
@@ -498,6 +630,11 @@
<span class="spinner"></span> 載入中... <span class="spinner"></span> 載入中...
</div> </div>
</div> </div>
<div id="matrixFilterIndicator" class="matrix-filter-indicator">
<span>篩選中:</span>
<span class="filter-text" id="matrixFilterText"></span>
<button class="btn-clear-filter" onclick="clearMatrixFilter()">清除篩選</button>
</div>
</div> </div>
<!-- Equipment List --> <!-- Equipment List -->
@@ -516,6 +653,7 @@
<script> <script>
let allEquipment = []; let allEquipment = [];
let workcenterGroups = []; let workcenterGroups = [];
let matrixFilter = null; // { workcenter_group, status }
function toggleFilter(checkbox, id) { function toggleFilter(checkbox, id) {
const label = document.getElementById(id); const label = document.getElementById(id);
@@ -637,18 +775,28 @@
const avail = row.PRD + row.SBY + row.UDT + row.SDT + row.EGT; const avail = row.PRD + row.SBY + row.UDT + row.SDT + row.EGT;
const ou = avail > 0 ? ((row.PRD / avail) * 100).toFixed(1) : 0; const ou = avail > 0 ? ((row.PRD / avail) * 100).toFixed(1) : 0;
const ouClass = ou >= 80 ? 'high' : (ou >= 50 ? 'medium' : 'low'); const ouClass = ou >= 80 ? 'high' : (ou >= 50 ? 'medium' : 'low');
const wg = row.workcenter_group;
// Helper to render clickable cell
const renderCell = (status, value, colClass) => {
if (value === 0) {
return `<td class="clickable ${colClass} zero" data-wg="${wg}" data-status="${status}">${value}</td>`;
}
const selected = matrixFilter && matrixFilter.workcenter_group === wg && matrixFilter.status === status ? 'selected' : '';
return `<td class="clickable ${colClass} ${selected}" data-wg="${wg}" data-status="${status}" onclick="filterByMatrixCell('${wg}', '${status}')">${value}</td>`;
};
html += ` html += `
<tr> <tr>
<td>${row.workcenter_group}</td> <td>${wg}</td>
<td class="col-total">${row.total}</td> <td class="col-total">${row.total}</td>
<td class="col-prd ${row.PRD === 0 ? 'zero' : ''}">${row.PRD}</td> ${renderCell('PRD', row.PRD, 'col-prd')}
<td class="col-sby ${row.SBY === 0 ? 'zero' : ''}">${row.SBY}</td> ${renderCell('SBY', row.SBY, 'col-sby')}
<td class="col-udt ${row.UDT === 0 ? 'zero' : ''}">${row.UDT}</td> ${renderCell('UDT', row.UDT, 'col-udt')}
<td class="col-sdt ${row.SDT === 0 ? 'zero' : ''}">${row.SDT}</td> ${renderCell('SDT', row.SDT, 'col-sdt')}
<td class="col-egt ${row.EGT === 0 ? 'zero' : ''}">${row.EGT}</td> ${renderCell('EGT', row.EGT, 'col-egt')}
<td class="col-nst ${row.NST === 0 ? 'zero' : ''}">${row.NST}</td> ${renderCell('NST', row.NST, 'col-nst')}
<td class="col-other ${row.OTHER === 0 ? 'zero' : ''}">${row.OTHER}</td> ${renderCell('OTHER', row.OTHER, 'col-other')}
<td><span class="ou-badge ${ouClass}">${ou}%</span></td> <td><span class="ou-badge ${ouClass}">${ou}%</span></td>
</tr> </tr>
`; `;
@@ -668,6 +816,10 @@
async function loadEquipment() { async function loadEquipment() {
const container = document.getElementById('equipmentContainer'); const container = document.getElementById('equipmentContainer');
// Clear matrix filter when reloading data
matrixFilter = null;
document.getElementById('matrixFilterIndicator').classList.remove('active');
try { try {
const queryString = getFilters(); const queryString = getFilters();
const resp = await fetch(`/api/resource/status?${queryString}`); const resp = await fetch(`/api/resource/status?${queryString}`);
@@ -688,6 +840,30 @@
} }
} }
function renderLotTooltip(lotDetails) {
if (!lotDetails || lotDetails.length === 0) return '';
let tooltipHtml = '<div class="lot-tooltip"><div class="lot-tooltip-title">在製批次明細</div>';
lotDetails.forEach(lot => {
const trackinTime = lot.LOTTRACKINTIME ? new Date(lot.LOTTRACKINTIME).toLocaleString('zh-TW') : '--';
const qty = lot.LOTTRACKINQTY_PCS != null ? lot.LOTTRACKINQTY_PCS.toLocaleString() : '--';
tooltipHtml += `
<div class="lot-item">
<div class="lot-item-header">${lot.RUNCARDLOTID || '--'}</div>
<div class="lot-item-row">
<div class="lot-item-field"><span class="lot-item-label">數量:</span><span class="lot-item-value">${qty} pcs</span></div>
<div class="lot-item-field"><span class="lot-item-label">TrackIn:</span><span class="lot-item-value">${trackinTime}</span></div>
<div class="lot-item-field"><span class="lot-item-label">操作員:</span><span class="lot-item-value">${lot.LOTTRACKINEMPLOYEE || '--'}</span></div>
</div>
</div>
`;
});
tooltipHtml += '</div>';
return tooltipHtml;
}
function renderEquipmentList(equipment) { function renderEquipmentList(equipment) {
const container = document.getElementById('equipmentContainer'); const container = document.getElementById('equipmentContainer');
@@ -702,6 +878,13 @@
const statusCat = (eq.STATUS_CATEGORY || 'OTHER').toLowerCase(); const statusCat = (eq.STATUS_CATEGORY || 'OTHER').toLowerCase();
const statusDisplay = getStatusDisplay(eq.EQUIPMENTASSETSSTATUS, eq.STATUS_CATEGORY); const statusDisplay = getStatusDisplay(eq.EQUIPMENTASSETSSTATUS, eq.STATUS_CATEGORY);
// Build LOT info with tooltip
let lotHtml = '';
if (eq.LOT_COUNT > 0) {
const tooltipHtml = renderLotTooltip(eq.LOT_DETAILS);
lotHtml = `<span class="lot-info">📦 ${eq.LOT_COUNT}${tooltipHtml}</span>`;
}
html += ` html += `
<div class="equipment-card status-${statusCat}"> <div class="equipment-card status-${statusCat}">
<div class="eq-header"> <div class="eq-header">
@@ -712,8 +895,8 @@
<span title="工站">📍 ${eq.WORKCENTERNAME || '--'}</span> <span title="工站">📍 ${eq.WORKCENTERNAME || '--'}</span>
<span title="群組">🏭 ${eq.WORKCENTER_GROUP || '--'}</span> <span title="群組">🏭 ${eq.WORKCENTER_GROUP || '--'}</span>
<span title="家族">🔧 ${eq.RESOURCEFAMILYNAME || '--'}</span> <span title="家族">🔧 ${eq.RESOURCEFAMILYNAME || '--'}</span>
<span title="部門">🏢 ${eq.PJ_DEPARTMENT || '--'}</span> <span title="區域">🏢 ${eq.LOCATIONNAME || '--'}</span>
${eq.LOT_COUNT > 0 ? `<span title="在製批數">📦 ${eq.LOT_COUNT} 批</span>` : ''} ${lotHtml}
${eq.JOBORDER ? `<span title="工單">📋 ${eq.JOBORDER}</span>` : ''} ${eq.JOBORDER ? `<span title="工單">📋 ${eq.JOBORDER}</span>` : ''}
</div> </div>
</div> </div>
@@ -724,6 +907,86 @@
container.innerHTML = html; container.innerHTML = html;
} }
function filterByMatrixCell(workcenterGroup, status) {
// Status mapping from matrix column to STATUS_CATEGORY or EQUIPMENTASSETSSTATUS
const statusMap = {
'PRD': 'PRODUCTIVE',
'SBY': 'STANDBY',
'UDT': 'DOWN',
'SDT': 'DOWN',
'EGT': 'ENGINEERING',
'NST': 'NOT_SCHEDULED',
'OTHER': 'OTHER'
};
// Toggle off if clicking same cell
if (matrixFilter && matrixFilter.workcenter_group === workcenterGroup && matrixFilter.status === status) {
clearMatrixFilter();
return;
}
matrixFilter = { workcenter_group: workcenterGroup, status: status };
// Update selected cell highlighting
document.querySelectorAll('.matrix-table td.clickable').forEach(cell => {
cell.classList.remove('selected');
if (cell.dataset.wg === workcenterGroup && cell.dataset.status === status) {
cell.classList.add('selected');
}
});
// Show filter indicator
const statusLabels = {
'PRD': '生產中',
'SBY': '待機',
'UDT': '非計畫停機',
'SDT': '計畫停機',
'EGT': '工程',
'NST': '未排程',
'OTHER': '其他'
};
document.getElementById('matrixFilterText').textContent = `${workcenterGroup} - ${statusLabels[status] || status}`;
document.getElementById('matrixFilterIndicator').classList.add('active');
// Filter and render equipment list
const standardStatuses = ['PRD', 'SBY', 'UDT', 'SDT', 'EGT', 'NST'];
const filtered = allEquipment.filter(eq => {
// Match workcenter group
// Note: If matrix shows "UNKNOWN", it means equipment has no WORKCENTER_GROUP
const eqGroup = eq.WORKCENTER_GROUP || 'UNKNOWN';
if (eqGroup !== workcenterGroup) return false;
// Match status based on EQUIPMENTASSETSSTATUS (same logic as matrix calculation)
const eqStatus = eq.EQUIPMENTASSETSSTATUS || '';
if (status === 'OTHER') {
// OTHER = any status NOT in the standard set
return !standardStatuses.includes(eqStatus);
} else {
// For standard statuses, match exact EQUIPMENTASSETSSTATUS
return eqStatus === status;
}
});
document.getElementById('equipmentCount').textContent = filtered.length;
renderEquipmentList(filtered);
}
function clearMatrixFilter() {
matrixFilter = null;
// Remove selected highlighting
document.querySelectorAll('.matrix-table td.clickable').forEach(cell => {
cell.classList.remove('selected');
});
// Hide filter indicator
document.getElementById('matrixFilterIndicator').classList.remove('active');
// Show all equipment
document.getElementById('equipmentCount').textContent = allEquipment.length;
renderEquipmentList(allEquipment);
}
function getStatusDisplay(status, category) { function getStatusDisplay(status, category) {
const statusMap = { const statusMap = {
'PRD': '生產中', 'PRD': '生產中',