20160116
This commit is contained in:
26
backend/add_column.py
Normal file
26
backend/add_column.py
Normal file
@@ -0,0 +1,26 @@
|
||||
|
||||
import os
|
||||
from sqlalchemy import create_engine, text
|
||||
from app.config import DATABASE_URL, TABLE_PREFIX
|
||||
|
||||
def add_column():
|
||||
engine = create_engine(DATABASE_URL)
|
||||
table_name = f"{TABLE_PREFIX}DIT_Records"
|
||||
column_name = "op_name"
|
||||
|
||||
with engine.connect() as conn:
|
||||
try:
|
||||
# Check if column exists
|
||||
result = conn.execute(text(f"SHOW COLUMNS FROM {table_name} LIKE '{column_name}'"))
|
||||
if result.fetchone():
|
||||
print(f"Column {column_name} already exists in {table_name}.")
|
||||
else:
|
||||
print(f"Adding column {column_name} to {table_name}...")
|
||||
conn.execute(text(f"ALTER TABLE {table_name} ADD COLUMN {column_name} VARCHAR(255) NULL AFTER op_id"))
|
||||
conn.commit()
|
||||
print("Column added successfully.")
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
add_column()
|
||||
26
backend/add_order_date.py
Normal file
26
backend/add_order_date.py
Normal file
@@ -0,0 +1,26 @@
|
||||
|
||||
import os
|
||||
from sqlalchemy import create_engine, text
|
||||
from app.config import DATABASE_URL, TABLE_PREFIX
|
||||
|
||||
def add_order_date_column():
|
||||
engine = create_engine(DATABASE_URL)
|
||||
table_name = f"{TABLE_PREFIX}Order_Records"
|
||||
column_name = "date"
|
||||
|
||||
with engine.connect() as conn:
|
||||
try:
|
||||
# Check if column exists
|
||||
result = conn.execute(text(f"SHOW COLUMNS FROM {table_name} LIKE '{column_name}'"))
|
||||
if result.fetchone():
|
||||
print(f"Column {column_name} already exists in {table_name}.")
|
||||
else:
|
||||
print(f"Adding column {column_name} to {table_name}...")
|
||||
conn.execute(text(f"ALTER TABLE {table_name} ADD COLUMN {column_name} VARCHAR(20) NULL AFTER amount"))
|
||||
conn.commit()
|
||||
print("Column added successfully.")
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
add_order_date_column()
|
||||
@@ -3,7 +3,7 @@ from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from fastapi.responses import FileResponse
|
||||
from app.models import init_db
|
||||
from app.routers import auth, etl, match, dashboard, report, lab
|
||||
from app.routers import auth, etl, match, report, lab, dashboard
|
||||
from app.config import STATIC_DIR, DEBUG, CORS_ORIGINS, APP_HOST, APP_PORT
|
||||
|
||||
# 初始化資料庫
|
||||
@@ -31,9 +31,10 @@ if DEBUG and CORS_ORIGINS:
|
||||
app.include_router(auth.router, prefix="/api")
|
||||
app.include_router(etl.router, prefix="/api")
|
||||
app.include_router(match.router, prefix="/api")
|
||||
app.include_router(dashboard.router, prefix="/api")
|
||||
|
||||
app.include_router(report.router, prefix="/api")
|
||||
app.include_router(lab.router, prefix="/api")
|
||||
app.include_router(dashboard.router, prefix="/api")
|
||||
|
||||
@app.get("/api/health")
|
||||
def health_check():
|
||||
|
||||
@@ -11,6 +11,7 @@ class DitRecord(Base):
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
op_id = Column(String(255), index=True, nullable=False) # 移除 unique,因為同一 op_id 可有多個 pn
|
||||
op_name = Column(String(255), nullable=True) # Opportunity Name
|
||||
erp_account = Column(String(100), index=True) # AQ 欄
|
||||
customer = Column(String(255), nullable=False, index=True)
|
||||
customer_normalized = Column(String(255), index=True)
|
||||
|
||||
@@ -16,5 +16,6 @@ class OrderRecord(Base):
|
||||
qty = Column(Integer, default=0)
|
||||
status = Column(String(50), default='Backlog') # 改為 String 以支援中文狀態
|
||||
amount = Column(Float, default=0.0)
|
||||
date = Column(String(20)) # 訂單日期
|
||||
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
||||
updated_at = Column(DateTime(timezone=True), onupdate=func.now())
|
||||
|
||||
@@ -1,225 +1,238 @@
|
||||
from typing import List
|
||||
from fastapi import APIRouter, Depends
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy import func
|
||||
from sqlalchemy import func, distinct
|
||||
from typing import List, Optional
|
||||
from pydantic import BaseModel
|
||||
from app.models import get_db
|
||||
from app.models.dit import DitRecord
|
||||
from app.models.sample import SampleRecord
|
||||
from app.models.order import OrderRecord
|
||||
from app.models.match import MatchResult, MatchStatus, TargetType
|
||||
from app.models.match import MatchResult, TargetType, MatchStatus
|
||||
|
||||
router = APIRouter(prefix="/dashboard", tags=["Dashboard"])
|
||||
|
||||
class KPIResponse(BaseModel):
|
||||
# --- Pydantic Models ---
|
||||
|
||||
class DashboardKPI(BaseModel):
|
||||
total_dit: int
|
||||
sample_rate: float # 送樣轉換率
|
||||
hit_rate: float # 訂單命中率
|
||||
fulfillment_rate: float # EAU 達成率
|
||||
orphan_sample_rate: float # 無效送樣率
|
||||
sample_rate: float
|
||||
hit_rate: float
|
||||
fulfillment_rate: float
|
||||
no_order_sample_rate: float
|
||||
total_revenue: float
|
||||
|
||||
class FunnelItem(BaseModel):
|
||||
class FunnelData(BaseModel):
|
||||
name: str
|
||||
value: int
|
||||
fill: str
|
||||
|
||||
class AttributionDit(BaseModel):
|
||||
op_id: str
|
||||
class DitSchema(BaseModel):
|
||||
id: int
|
||||
op_id: Optional[str] = None
|
||||
customer: str
|
||||
pn: str
|
||||
eau: int
|
||||
stage: str
|
||||
date: str
|
||||
eau: float = 0
|
||||
stage: Optional[str] = None
|
||||
date: Optional[str] = None
|
||||
|
||||
class AttributionSample(BaseModel):
|
||||
order_no: str
|
||||
date: str
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
class AttributionOrder(BaseModel):
|
||||
order_no: str
|
||||
status: str
|
||||
qty: int
|
||||
amount: float
|
||||
class SampleSchema(BaseModel):
|
||||
id: int
|
||||
order_no: Optional[str] = None
|
||||
customer: str
|
||||
pn: str
|
||||
qty: int = 0
|
||||
date: Optional[str] = None
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
class OrderSchema(BaseModel):
|
||||
id: int
|
||||
order_no: Optional[str] = None
|
||||
customer: str
|
||||
pn: str
|
||||
qty: int = 0
|
||||
amount: float = 0
|
||||
status: Optional[str] = None
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
class AttributionRow(BaseModel):
|
||||
dit: AttributionDit
|
||||
sample: AttributionSample | None
|
||||
order: AttributionOrder | None
|
||||
match_source: str | None
|
||||
dit: DitSchema
|
||||
sample: Optional[SampleSchema] = None
|
||||
order: Optional[OrderSchema] = None
|
||||
match_source: Optional[str] = None
|
||||
attributed_qty: int
|
||||
fulfillment_rate: float
|
||||
|
||||
def get_lifo_attribution(db: Session):
|
||||
"""執行 LIFO 業績分配邏輯"""
|
||||
# 1. 取得所有 DIT,按日期由新到舊排序 (LIFO)
|
||||
dits = db.query(DitRecord).order_by(DitRecord.date.desc()).all()
|
||||
|
||||
# 2. 取得所有已匹配且接受的訂單
|
||||
matched_orders = db.query(MatchResult, OrderRecord).join(
|
||||
OrderRecord, MatchResult.target_id == OrderRecord.id
|
||||
).filter(
|
||||
MatchResult.target_type == TargetType.ORDER,
|
||||
MatchResult.status.in_([MatchStatus.accepted, MatchStatus.auto_matched])
|
||||
).all()
|
||||
|
||||
# 3. 建立業績池 (Revenue Pool) - 按 (客戶, 料號) 分組
|
||||
order_pools = {}
|
||||
for match, order in matched_orders:
|
||||
key = (order.customer_normalized, order.pn)
|
||||
if key not in order_pools:
|
||||
order_pools[key] = 0
|
||||
order_pools[key] += (order.qty or 0)
|
||||
|
||||
# 4. 進行分配
|
||||
attribution_map = {} # dit_id -> {qty, total_eau}
|
||||
for dit in dits:
|
||||
key = (dit.customer_normalized, dit.pn)
|
||||
eau = dit.eau or 0
|
||||
allocated = 0
|
||||
|
||||
if key in order_pools and order_pools[key] > 0:
|
||||
allocated = min(eau, order_pools[key])
|
||||
order_pools[key] -= allocated
|
||||
|
||||
attribution_map[dit.id] = {
|
||||
"qty": allocated,
|
||||
"eau": eau
|
||||
}
|
||||
|
||||
return attribution_map
|
||||
# --- Routes ---
|
||||
|
||||
@router.get("/kpi", response_model=KPIResponse)
|
||||
@router.get("/kpi", response_model=DashboardKPI)
|
||||
def get_kpi(db: Session = Depends(get_db)):
|
||||
"""取得 KPI 統計 (符合規格書 v1.0)"""
|
||||
total_dit = db.query(DitRecord).count()
|
||||
if total_dit == 0:
|
||||
return KPIResponse(total_dit=0, sample_rate=0, hit_rate=0, fulfillment_rate=0, orphan_sample_rate=0, total_revenue=0)
|
||||
return DashboardKPI(
|
||||
total_dit=0, sample_rate=0, hit_rate=0,
|
||||
fulfillment_rate=0, no_order_sample_rate=0, total_revenue=0
|
||||
)
|
||||
|
||||
# 1. 送樣轉換率 (Sample Rate): (有匹配到樣品的 DIT 數) / (總 DIT 數)
|
||||
dits_with_sample = db.query(func.count(func.distinct(MatchResult.dit_id))).filter(
|
||||
# Get valid matches
|
||||
valid_statuses = [MatchStatus.auto_matched, MatchStatus.accepted]
|
||||
|
||||
# 1. Matches for Samples
|
||||
sample_matches = db.query(MatchResult.dit_id).filter(
|
||||
MatchResult.target_type == TargetType.SAMPLE,
|
||||
MatchResult.status.in_([MatchStatus.accepted, MatchStatus.auto_matched])
|
||||
).scalar() or 0
|
||||
sample_rate = (dits_with_sample / total_dit * 100)
|
||||
MatchResult.status.in_(valid_statuses)
|
||||
).distinct().count()
|
||||
|
||||
# 2. 訂單命中率 (Hit Rate): (有匹配到訂單的 DIT 數) / (總 DIT 數)
|
||||
dits_with_order = db.query(func.count(func.distinct(MatchResult.dit_id))).filter(
|
||||
# 2. Matches for Orders
|
||||
order_matches = db.query(MatchResult.dit_id).filter(
|
||||
MatchResult.target_type == TargetType.ORDER,
|
||||
MatchResult.status.in_([MatchStatus.accepted, MatchStatus.auto_matched])
|
||||
).scalar() or 0
|
||||
hit_rate = (dits_with_order / total_dit * 100)
|
||||
MatchResult.status.in_(valid_statuses)
|
||||
).distinct().count()
|
||||
|
||||
# 3. EAU 達成率 (Fulfillment Rate): (歸因之訂單總量) / (DIT 預估 EAU)
|
||||
attribution_map = get_lifo_attribution(db)
|
||||
total_attributed_qty = sum(item['qty'] for item in attribution_map.values())
|
||||
total_eau = sum(item['eau'] for item in attribution_map.values())
|
||||
fulfillment_rate = (total_attributed_qty / total_eau * 100) if total_eau > 0 else 0
|
||||
|
||||
# 4. 無效送樣率 (Orphan Sample Rate): (未匹配到 DIT 的送樣數) / (總送樣數)
|
||||
total_samples = db.query(SampleRecord).count()
|
||||
matched_sample_ids = db.query(func.distinct(MatchResult.target_id)).filter(
|
||||
MatchResult.target_type == TargetType.SAMPLE
|
||||
).all()
|
||||
matched_sample_count = len(matched_sample_ids)
|
||||
orphan_sample_rate = ((total_samples - matched_sample_count) / total_samples * 100) if total_samples > 0 else 0
|
||||
|
||||
# 5. 總營收
|
||||
total_revenue = db.query(func.sum(OrderRecord.amount)).join(
|
||||
# 3. Revenue
|
||||
# Join MatchResult -> OrderRecord to sum amount
|
||||
revenue = db.query(func.sum(OrderRecord.amount)).join(
|
||||
MatchResult, MatchResult.target_id == OrderRecord.id
|
||||
).filter(
|
||||
MatchResult.target_type == TargetType.ORDER,
|
||||
MatchResult.status.in_([MatchStatus.accepted, MatchStatus.auto_matched])
|
||||
).scalar() or 0
|
||||
MatchResult.status.in_(valid_statuses)
|
||||
).scalar() or 0.0
|
||||
|
||||
return KPIResponse(
|
||||
# 4. Fulfillment (Total Matched Order Qty / Total Matched DIT EAU)
|
||||
total_order_qty = db.query(func.sum(OrderRecord.qty)).join(
|
||||
MatchResult, MatchResult.target_id == OrderRecord.id
|
||||
).filter(
|
||||
MatchResult.target_type == TargetType.ORDER,
|
||||
MatchResult.status.in_(valid_statuses)
|
||||
).scalar() or 0
|
||||
|
||||
total_eau = db.query(func.sum(DitRecord.eau)).scalar() or 1 # Avoid div/0
|
||||
|
||||
sample_rate = round((sample_matches / total_dit) * 100, 1)
|
||||
hit_rate = round((order_matches / total_dit) * 100, 1)
|
||||
fulfillment_rate = round((total_order_qty / total_eau) * 100, 1) if total_eau > 0 else 0
|
||||
|
||||
# No Order Sample Rate
|
||||
dit_with_samples = set(x[0] for x in db.query(MatchResult.dit_id).filter(
|
||||
MatchResult.target_type == TargetType.SAMPLE,
|
||||
MatchResult.status.in_(valid_statuses)
|
||||
).distinct().all())
|
||||
|
||||
dit_with_orders = set(x[0] for x in db.query(MatchResult.dit_id).filter(
|
||||
MatchResult.target_type == TargetType.ORDER,
|
||||
MatchResult.status.in_(valid_statuses)
|
||||
).distinct().all())
|
||||
|
||||
dit_sample_no_order = len(dit_with_samples - dit_with_orders)
|
||||
no_order_sample_rate = round((dit_sample_no_order / len(dit_with_samples) * 100), 1) if dit_with_samples else 0.0
|
||||
|
||||
return DashboardKPI(
|
||||
total_dit=total_dit,
|
||||
sample_rate=round(sample_rate, 1),
|
||||
hit_rate=round(hit_rate, 1),
|
||||
fulfillment_rate=round(fulfillment_rate, 1),
|
||||
orphan_sample_rate=round(orphan_sample_rate, 1),
|
||||
total_revenue=total_revenue
|
||||
sample_rate=sample_rate,
|
||||
hit_rate=hit_rate,
|
||||
fulfillment_rate=fulfillment_rate,
|
||||
no_order_sample_rate=no_order_sample_rate,
|
||||
total_revenue=revenue
|
||||
)
|
||||
|
||||
@router.get("/funnel", response_model=List[FunnelItem])
|
||||
@router.get("/funnel", response_model=List[FunnelData])
|
||||
def get_funnel(db: Session = Depends(get_db)):
|
||||
"""取得漏斗數據"""
|
||||
valid_statuses = [MatchStatus.auto_matched, MatchStatus.accepted]
|
||||
|
||||
# Stage 1: DIT
|
||||
total_dit = db.query(DitRecord).count()
|
||||
|
||||
dits_with_sample = db.query(func.count(func.distinct(MatchResult.dit_id))).filter(
|
||||
MatchResult.target_type == TargetType.SAMPLE,
|
||||
MatchResult.status.in_([MatchStatus.accepted, MatchStatus.auto_matched])
|
||||
).scalar() or 0
|
||||
|
||||
dits_with_order = db.query(func.count(func.distinct(MatchResult.dit_id))).filter(
|
||||
MatchResult.target_type == TargetType.ORDER,
|
||||
MatchResult.status.in_([MatchStatus.accepted, MatchStatus.auto_matched])
|
||||
).scalar() or 0
|
||||
|
||||
|
||||
# Stage 2: Sample
|
||||
dit_with_samples = db.query(MatchResult.dit_id).filter(
|
||||
MatchResult.target_type == TargetType.SAMPLE,
|
||||
MatchResult.status.in_(valid_statuses)
|
||||
).distinct().count()
|
||||
|
||||
# Stage 3: Order
|
||||
dit_with_orders = db.query(MatchResult.dit_id).filter(
|
||||
MatchResult.target_type == TargetType.ORDER,
|
||||
MatchResult.status.in_(valid_statuses)
|
||||
).distinct().count()
|
||||
|
||||
return [
|
||||
FunnelItem(name='DIT 案件', value=total_dit, fill='#6366f1'),
|
||||
FunnelItem(name='成功送樣', value=dits_with_sample, fill='#8b5cf6'),
|
||||
FunnelItem(name='取得訂單', value=dits_with_order, fill='#10b981'),
|
||||
FunnelData(name="DIT 總案", value=total_dit, fill="#6366f1"),
|
||||
FunnelData(name="成功送樣", value=dit_with_samples, fill="#a855f7"),
|
||||
FunnelData(name="取得訂單", value=dit_with_orders, fill="#10b981"),
|
||||
]
|
||||
|
||||
@router.get("/attribution", response_model=List[AttributionRow])
|
||||
def get_attribution(db: Session = Depends(get_db)):
|
||||
"""取得歸因明細 (含 LIFO 分配與追溯資訊)"""
|
||||
dit_records = db.query(DitRecord).order_by(DitRecord.date.desc()).all()
|
||||
attribution_map = get_lifo_attribution(db)
|
||||
result = []
|
||||
valid_statuses = [MatchStatus.auto_matched, MatchStatus.accepted]
|
||||
|
||||
matches = db.query(MatchResult).filter(MatchResult.status.in_(valid_statuses)).all()
|
||||
if not matches:
|
||||
return []
|
||||
|
||||
for dit in dit_records:
|
||||
# 找到樣品匹配 (取分數最高的一個)
|
||||
sample_match = db.query(MatchResult).filter(
|
||||
MatchResult.dit_id == dit.id,
|
||||
MatchResult.target_type == TargetType.SAMPLE,
|
||||
MatchResult.status.in_([MatchStatus.accepted, MatchStatus.auto_matched])
|
||||
).order_by(MatchResult.score.desc()).first()
|
||||
|
||||
sample_info = None
|
||||
if sample_match:
|
||||
sample = db.query(SampleRecord).filter(SampleRecord.id == sample_match.target_id).first()
|
||||
if sample:
|
||||
sample_info = AttributionSample(order_no=sample.order_no, date=sample.date or '')
|
||||
|
||||
# 找到訂單匹配 (取分數最高的一個)
|
||||
order_match = db.query(MatchResult).filter(
|
||||
MatchResult.dit_id == dit.id,
|
||||
MatchResult.target_type == TargetType.ORDER,
|
||||
MatchResult.status.in_([MatchStatus.accepted, MatchStatus.auto_matched])
|
||||
).order_by(MatchResult.score.desc()).first()
|
||||
|
||||
order_info = None
|
||||
match_source = None
|
||||
if order_match:
|
||||
order = db.query(OrderRecord).filter(OrderRecord.id == order_match.target_id).first()
|
||||
if order:
|
||||
order_info = AttributionOrder(
|
||||
order_no=order.order_no,
|
||||
status=order.status or 'Unknown',
|
||||
qty=order.qty or 0,
|
||||
amount=order.amount or 0
|
||||
)
|
||||
match_source = order_match.match_source
|
||||
|
||||
attr_data = attribution_map.get(dit.id, {"qty": 0, "eau": dit.eau or 0})
|
||||
fulfillment = (attr_data['qty'] / attr_data['eau'] * 100) if attr_data['eau'] > 0 else 0
|
||||
|
||||
result.append(AttributionRow(
|
||||
dit=AttributionDit(
|
||||
op_id=dit.op_id,
|
||||
customer=dit.customer,
|
||||
pn=dit.pn,
|
||||
eau=dit.eau,
|
||||
stage=dit.stage or '',
|
||||
date=dit.date or ''
|
||||
),
|
||||
sample=sample_info,
|
||||
order=order_info,
|
||||
match_source=match_source,
|
||||
attributed_qty=attr_data['qty'],
|
||||
fulfillment_rate=round(fulfillment, 1)
|
||||
dit_ids = set(m.dit_id for m in matches)
|
||||
|
||||
dits = db.query(DitRecord).filter(DitRecord.id.in_(dit_ids)).all()
|
||||
|
||||
dit_map = {d.id: d for d in dits}
|
||||
|
||||
sample_match_rows = [m for m in matches if m.target_type == TargetType.SAMPLE]
|
||||
order_match_rows = [m for m in matches if m.target_type == TargetType.ORDER]
|
||||
|
||||
sample_ids = [m.target_id for m in sample_match_rows]
|
||||
order_ids = [m.target_id for m in order_match_rows]
|
||||
|
||||
samples = db.query(SampleRecord).filter(SampleRecord.id.in_(sample_ids)).all()
|
||||
orders = db.query(OrderRecord).filter(OrderRecord.id.in_(order_ids)).all()
|
||||
|
||||
sample_lookup = {s.id: s for s in samples}
|
||||
order_lookup = {o.id: o for o in orders}
|
||||
|
||||
results = []
|
||||
|
||||
for dit_id, dit in dit_map.items():
|
||||
s_matches = [m for m in matches if m.dit_id == dit_id and m.target_type == TargetType.SAMPLE]
|
||||
best_sample_match = max(s_matches, key=lambda x: x.score) if s_matches else None
|
||||
sample_obj = sample_lookup.get(best_sample_match.target_id) if best_sample_match else None
|
||||
|
||||
o_matches = [m for m in matches if m.dit_id == dit_id and m.target_type == TargetType.ORDER]
|
||||
best_order_match = max(o_matches, key=lambda x: x.score) if o_matches else None
|
||||
order_obj = order_lookup.get(best_order_match.target_id) if best_order_match else None
|
||||
|
||||
attributed_qty = 0
|
||||
for om in o_matches:
|
||||
o = order_lookup.get(om.target_id)
|
||||
if o:
|
||||
attributed_qty += o.qty
|
||||
|
||||
fulfillment_rate = round((attributed_qty / dit.eau * 100), 1) if dit.eau > 0 else 0
|
||||
|
||||
dit_schema = DitSchema.model_validate(dit)
|
||||
# Handle date to string conversion if needed, Pydantic often handles date -> string automatically in JSON response
|
||||
# checking earlier 'test_server_login' response showed JSON string for 'created_at'.
|
||||
# But here I set it manually to safe string just in case
|
||||
dit_schema.date = str(dit.date) if dit.date else None
|
||||
|
||||
sample_schema = None
|
||||
if sample_obj:
|
||||
sample_schema = SampleSchema.model_validate(sample_obj)
|
||||
sample_schema.date = str(sample_obj.date) if sample_obj.date else None
|
||||
|
||||
order_schema = None
|
||||
if order_obj:
|
||||
order_schema = OrderSchema.model_validate(order_obj)
|
||||
|
||||
results.append(AttributionRow(
|
||||
dit=dit_schema,
|
||||
sample=sample_schema,
|
||||
order=order_schema,
|
||||
match_source=best_order_match.match_source if best_order_match else (best_order_match.reason if best_order_match else None),
|
||||
attributed_qty=attributed_qty,
|
||||
fulfillment_rate=fulfillment_rate
|
||||
))
|
||||
|
||||
return result
|
||||
|
||||
return results
|
||||
|
||||
@@ -81,10 +81,28 @@ def clean_value(val, default=''):
|
||||
if val is None or (isinstance(val, float) and pd.isna(val)):
|
||||
return default
|
||||
str_val = str(val).strip()
|
||||
# Remove leading apostrophe often added by Excel (e.g. '001)
|
||||
str_val = str_val.lstrip("'")
|
||||
if str_val.lower() in ('nan', 'none', 'null', ''):
|
||||
return default
|
||||
return str_val
|
||||
|
||||
def normalize_date(val):
|
||||
"""將日期標準化為 YYYY-MM-DD 格式"""
|
||||
val = clean_value(val, None)
|
||||
if not val:
|
||||
return None
|
||||
|
||||
# 嘗試解析常見格式
|
||||
from datetime import datetime
|
||||
for fmt in ("%Y-%m-%d", "%Y/%m/%d", "%Y-%m-%d %H:%M:%S", "%Y/%m/%d %H:%M:%S", "%d-%b-%y"):
|
||||
try:
|
||||
# Handle Excel default string format often like 2025/9/30
|
||||
dt = datetime.strptime(val.split(' ')[0], fmt.split(' ')[0])
|
||||
return dt.strftime("%Y-%m-%d")
|
||||
except ValueError:
|
||||
continue
|
||||
return val # Return original if parse failed
|
||||
|
||||
@router.post("/import", response_model=ImportResponse)
|
||||
def import_data(request: ImportRequest, db: Session = Depends(get_db)):
|
||||
@@ -150,13 +168,14 @@ def import_data(request: ImportRequest, db: Session = Depends(get_db)):
|
||||
seen_ids.add(unique_key)
|
||||
record = DitRecord(
|
||||
op_id=op_id,
|
||||
op_name=clean_value(row.get('op_name')),
|
||||
erp_account=erp_account,
|
||||
customer=customer,
|
||||
customer_normalized=normalize_customer_name(customer),
|
||||
pn=sanitize_pn(pn),
|
||||
eau=int(row.get('eau', 0)) if row.get('eau') and not pd.isna(row.get('eau')) else 0,
|
||||
stage=clean_value(row.get('stage')),
|
||||
date=clean_value(row.get('date'))
|
||||
date=normalize_date(row.get('date'))
|
||||
)
|
||||
elif file_type == 'sample':
|
||||
sample_id = clean_value(row.get('sample_id'), f'S{idx}')
|
||||
@@ -177,7 +196,7 @@ def import_data(request: ImportRequest, db: Session = Depends(get_db)):
|
||||
customer_normalized=normalize_customer_name(customer),
|
||||
pn=sanitize_pn(pn),
|
||||
qty=int(row.get('qty', 0)) if row.get('qty') and not pd.isna(row.get('qty')) else 0,
|
||||
date=clean_value(row.get('date'))
|
||||
date=normalize_date(row.get('date'))
|
||||
)
|
||||
elif file_type == 'order':
|
||||
order_id = clean_value(row.get('order_id'), f'O{idx}')
|
||||
@@ -195,9 +214,10 @@ def import_data(request: ImportRequest, db: Session = Depends(get_db)):
|
||||
customer=customer,
|
||||
customer_normalized=normalize_customer_name(customer),
|
||||
pn=sanitize_pn(pn),
|
||||
qty=int(row.get('qty', 0)) if row.get('qty') and not pd.isna(row.get('qty')) else 0,
|
||||
qty=int(float(row.get('qty', 0)) * 1000) if row.get('qty') and not pd.isna(row.get('qty')) else 0,
|
||||
status=clean_value(row.get('status'), 'Backlog'),
|
||||
amount=float(row.get('amount', 0)) if row.get('amount') and not pd.isna(row.get('amount')) else 0
|
||||
amount=float(row.get('amount', 0)) if row.get('amount') and not pd.isna(row.get('amount')) else 0,
|
||||
date=normalize_date(row.get('date'))
|
||||
)
|
||||
else:
|
||||
continue
|
||||
@@ -244,3 +264,21 @@ def get_data(data_type: str, db: Session = Depends(get_db)):
|
||||
}
|
||||
for record in records
|
||||
]
|
||||
|
||||
@router.delete("/data")
|
||||
def clear_all_data(db: Session = Depends(get_db)):
|
||||
"""清除所有匯入的資料與分析結果"""
|
||||
try:
|
||||
print("[ETL] Clearing all data...")
|
||||
db.query(ReviewLog).delete()
|
||||
db.query(MatchResult).delete()
|
||||
db.query(DitRecord).delete()
|
||||
db.query(SampleRecord).delete()
|
||||
db.query(OrderRecord).delete()
|
||||
db.commit()
|
||||
print("[ETL] All data cleared successfully.")
|
||||
return {"message": "All data cleared successfully"}
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
print(f"[ETL] Error clearing data: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@@ -11,10 +11,21 @@ from app.models.order import OrderRecord
|
||||
router = APIRouter(prefix="/lab", tags=["Lab"])
|
||||
|
||||
class LabKPI(BaseModel):
|
||||
converted_count: int # 成功收單總數
|
||||
avg_velocity: float # 平均轉換時間 (天)
|
||||
conversion_rate: float # 轉換比例 (%)
|
||||
orphan_count: int # 孤兒樣品總數
|
||||
|
||||
class ConversionRecord(BaseModel):
|
||||
customer: str
|
||||
pn: str
|
||||
sample_date: str
|
||||
sample_qty: int
|
||||
order_date: str
|
||||
order_qty: int
|
||||
days_to_convert: int
|
||||
|
||||
# ... (ScatterPoint and OrphanSample classes remain same)
|
||||
class ScatterPoint(BaseModel):
|
||||
customer: str
|
||||
pn: str
|
||||
@@ -28,11 +39,118 @@ class OrphanSample(BaseModel):
|
||||
order_no: str
|
||||
date: str
|
||||
|
||||
# ... (parse_date function remains same)
|
||||
|
||||
|
||||
|
||||
# Helper to build order lookups
|
||||
from app.services.fuzzy_matcher import normalize_pn_for_matching, normalize_customer_name
|
||||
|
||||
def build_order_lookups(orders):
|
||||
order_lookup_by_id = {}
|
||||
order_lookup_by_name = {}
|
||||
|
||||
for o in orders:
|
||||
clean_pn = normalize_pn_for_matching(o.pn)
|
||||
clean_cust_id = o.cust_id.strip().upper() if o.cust_id else ""
|
||||
norm_cust_name = normalize_customer_name(o.customer)
|
||||
|
||||
o_date = parse_date(o.date) or (o.created_at.replace(tzinfo=None) if o.created_at else datetime.max)
|
||||
|
||||
data = {
|
||||
"date": o_date,
|
||||
"qty": o.qty or 0,
|
||||
"order_no": o.order_no
|
||||
}
|
||||
|
||||
if clean_cust_id:
|
||||
key_id = (clean_cust_id, clean_pn)
|
||||
if key_id not in order_lookup_by_id: order_lookup_by_id[key_id] = []
|
||||
order_lookup_by_id[key_id].append(data)
|
||||
|
||||
key_name = (norm_cust_name, clean_pn)
|
||||
if key_name not in order_lookup_by_name: order_lookup_by_name[key_name] = []
|
||||
order_lookup_by_name[key_name].append(data)
|
||||
|
||||
return order_lookup_by_id, order_lookup_by_name
|
||||
|
||||
@router.get("/conversions", response_model=List[ConversionRecord])
|
||||
def get_conversions(db: Session = Depends(get_db)):
|
||||
# 找出所有樣品
|
||||
samples = db.query(SampleRecord).all()
|
||||
# 找出所有訂單
|
||||
orders = db.query(OrderRecord).all()
|
||||
|
||||
order_lookup_by_id, order_lookup_by_name = build_order_lookups(orders)
|
||||
|
||||
conversions = []
|
||||
|
||||
# We want to list "Sample Records" that successfully converted.
|
||||
# Or "Groups"? The user said "list of sample sent and their order qty".
|
||||
# Listing each sample record seems appropriate.
|
||||
|
||||
for s in samples:
|
||||
clean_pn = normalize_pn_for_matching(s.pn)
|
||||
norm_cust_name = normalize_customer_name(s.customer)
|
||||
clean_cust_id = s.cust_id.strip().upper() if s.cust_id else ""
|
||||
s_date = parse_date(s.date)
|
||||
|
||||
matched_orders = []
|
||||
|
||||
# 1. Try via ID
|
||||
if clean_cust_id:
|
||||
if (clean_cust_id, clean_pn) in order_lookup_by_id:
|
||||
matched_orders.extend(order_lookup_by_id[(clean_cust_id, clean_pn)])
|
||||
|
||||
# 2. Try via Name (Fallback)
|
||||
if not matched_orders:
|
||||
if (norm_cust_name, clean_pn) in order_lookup_by_name:
|
||||
matched_orders.extend(order_lookup_by_name[(norm_cust_name, clean_pn)])
|
||||
|
||||
if matched_orders and s_date:
|
||||
# Sort orders by date
|
||||
matched_orders.sort(key=lambda x: x["date"])
|
||||
first_order = matched_orders[0]
|
||||
|
||||
# Simple aggregations if multiple orders? User asked for "their order qty".
|
||||
# showing total order qty for this PN/Cust might be better
|
||||
total_order_qty = sum(o["qty"] for o in matched_orders)
|
||||
|
||||
days_diff = (first_order["date"] - s_date).days
|
||||
|
||||
# Filter unrealistic past orders?
|
||||
# if days_diff < 0: continue # Optional
|
||||
|
||||
conversions.append(ConversionRecord(
|
||||
customer=s.customer,
|
||||
pn=s.pn,
|
||||
sample_date=s.date,
|
||||
sample_qty=s.qty or 0,
|
||||
order_date=first_order["date"].strftime("%Y-%m-%d"), # First order date
|
||||
order_qty=total_order_qty,
|
||||
days_to_convert=days_diff
|
||||
))
|
||||
|
||||
# Sort by recent sample date
|
||||
return sorted(conversions, key=lambda x: x.sample_date, reverse=True)
|
||||
|
||||
def parse_date(date_str: str) -> Optional[datetime]:
|
||||
try:
|
||||
return datetime.strptime(date_str, "%Y-%m-%d")
|
||||
except:
|
||||
if not date_str:
|
||||
return None
|
||||
val = str(date_str).strip()
|
||||
# Try parsing YYYYMMDD
|
||||
if len(val) == 8 and val.isdigit():
|
||||
try:
|
||||
return datetime.strptime(val, "%Y%m%d")
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
for fmt in ("%Y-%m-%d", "%Y/%m/%d", "%Y-%m-%d %H:%M:%S", "%Y/%m/%d %H:%M:%S", "%d-%b-%y"):
|
||||
try:
|
||||
return datetime.strptime(str(date_str).split(' ')[0], fmt.split(' ')[0])
|
||||
except ValueError:
|
||||
continue
|
||||
return None
|
||||
|
||||
@router.get("/kpi", response_model=LabKPI)
|
||||
def get_lab_kpi(
|
||||
@@ -46,27 +164,34 @@ def get_lab_kpi(
|
||||
|
||||
if start_date:
|
||||
samples_query = samples_query.filter(SampleRecord.date >= start_date)
|
||||
orders_query = orders_query.filter(OrderRecord.created_at >= start_date) # 訂單使用 created_at or date? OrderRecord 只有 created_at 欄位是 DateTime
|
||||
orders_query = orders_query.filter(OrderRecord.date >= start_date)
|
||||
|
||||
if end_date:
|
||||
samples_query = samples_query.filter(SampleRecord.date <= end_date)
|
||||
# Note: OrderRecord 只有 created_at
|
||||
orders_query = orders_query.filter(OrderRecord.date <= end_date)
|
||||
|
||||
samples = samples_query.all()
|
||||
orders = orders_query.all()
|
||||
|
||||
# 建立群組 (ERP Code + PN)
|
||||
# ERP Code correspond to cust_id
|
||||
from app.services.fuzzy_matcher import normalize_pn_for_matching
|
||||
|
||||
sample_groups = {}
|
||||
for s in samples:
|
||||
key = (s.cust_id, s.pn)
|
||||
# Use simple normalization like stripping spaces
|
||||
clean_pn = normalize_pn_for_matching(s.pn)
|
||||
clean_cust = s.cust_id.strip().upper() if s.cust_id else ""
|
||||
key = (clean_cust, clean_pn)
|
||||
if key not in sample_groups:
|
||||
sample_groups[key] = []
|
||||
sample_groups[key].append(s)
|
||||
|
||||
order_groups = {}
|
||||
for o in orders:
|
||||
key = (o.cust_id, o.pn)
|
||||
clean_pn = normalize_pn_for_matching(o.pn)
|
||||
clean_cust = o.cust_id.strip().upper() if o.cust_id else ""
|
||||
key = (clean_cust, clean_pn)
|
||||
if key not in order_groups:
|
||||
order_groups[key] = []
|
||||
order_groups[key].append(o)
|
||||
@@ -76,34 +201,101 @@ def get_lab_kpi(
|
||||
converted_samples_count = 0
|
||||
total_samples_count = len(samples)
|
||||
|
||||
for key, group_samples in sample_groups.items():
|
||||
if key in order_groups:
|
||||
# 轉換成功
|
||||
converted_samples_count += len(group_samples)
|
||||
# Re-use the lookup maps built above if possible, but we need to build them first.
|
||||
# Let's rebuild lookups here for clarity or refactor.
|
||||
# To be safe and clean, let's just implement the loop here.
|
||||
|
||||
from app.services.fuzzy_matcher import normalize_pn_for_matching, normalize_customer_name
|
||||
|
||||
order_lookup_by_id = {}
|
||||
order_lookup_by_name = {}
|
||||
|
||||
for o in orders:
|
||||
clean_pn = normalize_pn_for_matching(o.pn)
|
||||
clean_cust_id = o.cust_id.strip().upper() if o.cust_id else ""
|
||||
norm_cust_name = normalize_customer_name(o.customer)
|
||||
|
||||
o_date = parse_date(o.date) or (o.created_at.replace(tzinfo=None) if o.created_at else datetime.max)
|
||||
|
||||
if clean_cust_id:
|
||||
key_id = (clean_cust_id, clean_pn)
|
||||
if key_id not in order_lookup_by_id: order_lookup_by_id[key_id] = []
|
||||
order_lookup_by_id[key_id].append(o_date)
|
||||
|
||||
key_name = (norm_cust_name, clean_pn)
|
||||
if key_name not in order_lookup_by_name: order_lookup_by_name[key_name] = []
|
||||
order_lookup_by_name[key_name].append(o_date)
|
||||
|
||||
|
||||
# Group Samples by (CustName, PN) for calculation to avoid double counting if multiple samples -> same order
|
||||
# Actually, "Conversion Rate" is usually "Percentage of Sample Records that resulted in Order".
|
||||
# Or "Percentage of Projects". Let's stick to "Sample Groups" (Unique trials).
|
||||
|
||||
unique_sample_groups = {} # (norm_cust_name, clean_pn) -> list of sample dates
|
||||
|
||||
for s in samples:
|
||||
clean_pn = normalize_pn_for_matching(s.pn)
|
||||
norm_cust_name = normalize_customer_name(s.customer)
|
||||
clean_cust_id = s.cust_id.strip().upper() if s.cust_id else ""
|
||||
|
||||
key = (norm_cust_name, clean_pn) # Group by Name+PN
|
||||
if key not in unique_sample_groups:
|
||||
unique_sample_groups[key] = {
|
||||
"dates": [],
|
||||
"cust_ids": set()
|
||||
}
|
||||
s_date = parse_date(s.date)
|
||||
if s_date: unique_sample_groups[key]["dates"].append(s_date)
|
||||
if clean_cust_id: unique_sample_groups[key]["cust_ids"].add(clean_cust_id)
|
||||
|
||||
|
||||
# Calculate
|
||||
total_samples_count = len(unique_sample_groups) # Total "Projects"
|
||||
converted_count = 0
|
||||
|
||||
orphan_count = 0
|
||||
now = datetime.now()
|
||||
|
||||
for key, data in unique_sample_groups.items():
|
||||
norm_cust_name, clean_pn = key
|
||||
|
||||
# Try finding orders
|
||||
matched_dates = []
|
||||
|
||||
# 1. Try via ID
|
||||
for cid in data["cust_ids"]:
|
||||
if (cid, clean_pn) in order_lookup_by_id:
|
||||
matched_dates.extend(order_lookup_by_id[(cid, clean_pn)])
|
||||
|
||||
# 2. Try via Name
|
||||
if not matched_dates:
|
||||
if key in order_lookup_by_name:
|
||||
matched_dates.extend(order_lookup_by_name[key])
|
||||
|
||||
if matched_dates:
|
||||
converted_count += 1
|
||||
# Velocity
|
||||
earliest_sample = min(data["dates"]) if data["dates"] else None
|
||||
# Filter orders that came AFTER sample? Or just first order?
|
||||
# Typically first order date.
|
||||
first_order = min(matched_dates) if matched_dates else None
|
||||
|
||||
# 計算 Velocity: First Order Date - Earliest Sample Date
|
||||
earliest_sample_date = min([parse_date(s.date) for s in group_samples if s.date] or [datetime.max])
|
||||
first_order_date = min([o.created_at for o in order_groups[key] if o.created_at] or [datetime.max])
|
||||
|
||||
if earliest_sample_date != datetime.max and first_order_date != datetime.max:
|
||||
diff = (first_order_date - earliest_sample_date).days
|
||||
if diff >= 0:
|
||||
velocities.append(diff)
|
||||
if earliest_sample and first_order:
|
||||
diff = (first_order - earliest_sample).days
|
||||
if diff >= 0:
|
||||
velocities.append(diff)
|
||||
else:
|
||||
# Check Orphan (No Order)
|
||||
# Use earliest sample date
|
||||
earliest_sample = min(data["dates"]) if data["dates"] else None
|
||||
if earliest_sample and (now - earliest_sample).days > 90:
|
||||
orphan_count += 1
|
||||
|
||||
avg_velocity = sum(velocities) / len(velocities) if velocities else 0
|
||||
conversion_rate = (converted_samples_count / total_samples_count * 100) if total_samples_count > 0 else 0
|
||||
|
||||
# 孤兒樣品: > 90天且無訂單
|
||||
now = datetime.now()
|
||||
orphan_count = 0
|
||||
for key, group_samples in sample_groups.items():
|
||||
if key not in order_groups:
|
||||
for s in group_samples:
|
||||
s_date = parse_date(s.date)
|
||||
if s_date and (now - s_date).days > 90:
|
||||
orphan_count += 1
|
||||
conversion_rate = (converted_count / total_samples_count * 100) if total_samples_count > 0 else 0
|
||||
|
||||
return LabKPI(
|
||||
converted_count=converted_count,
|
||||
avg_velocity=round(avg_velocity, 1),
|
||||
conversion_rate=round(conversion_rate, 1),
|
||||
orphan_count=orphan_count
|
||||
@@ -127,25 +319,117 @@ def get_scatter_data(
|
||||
orders = orders_query.all()
|
||||
|
||||
# 聚合資料
|
||||
data_map = {} # (cust_id, pn) -> {sample_qty, order_qty, customer_name}
|
||||
|
||||
for s in samples:
|
||||
key = (s.cust_id, s.pn)
|
||||
if key not in data_map:
|
||||
data_map[key] = {"sample_qty": 0, "order_qty": 0, "customer": s.customer}
|
||||
data_map[key]["sample_qty"] += (s.qty or 0)
|
||||
from app.services.fuzzy_matcher import normalize_pn_for_matching, normalize_customer_name
|
||||
|
||||
# 建立多重索引的 Order Lookup
|
||||
# order_lookup_by_id: (cust_id, pn) -> Order Data
|
||||
# order_lookup_by_name: (cust_name, pn) -> Order Data
|
||||
order_lookup_by_id = {}
|
||||
order_lookup_by_name = {}
|
||||
|
||||
for o in orders:
|
||||
key = (o.cust_id, o.pn)
|
||||
if key in data_map:
|
||||
data_map[key]["order_qty"] += (o.qty or 0)
|
||||
clean_pn = normalize_pn_for_matching(o.pn)
|
||||
clean_cust_id = o.cust_id.strip().upper() if o.cust_id else ""
|
||||
norm_cust_name = normalize_customer_name(o.customer)
|
||||
|
||||
# Aggregate by Cust ID
|
||||
if clean_cust_id:
|
||||
key_id = (clean_cust_id, clean_pn)
|
||||
if key_id not in order_lookup_by_id:
|
||||
order_lookup_by_id[key_id] = {"qty": 0, "dates": []}
|
||||
order_lookup_by_id[key_id]["qty"] += (o.qty or 0)
|
||||
if o.date: order_lookup_by_id[key_id]["dates"].append(parse_date(o.date) or datetime.max)
|
||||
elif o.created_at: order_lookup_by_id[key_id]["dates"].append(o.created_at.replace(tzinfo=None))
|
||||
|
||||
# Aggregate by Cust Name (Fallback)
|
||||
key_name = (norm_cust_name, clean_pn)
|
||||
if key_name not in order_lookup_by_name:
|
||||
order_lookup_by_name[key_name] = {"qty": 0, "dates": []}
|
||||
order_lookup_by_name[key_name]["qty"] += (o.qty or 0)
|
||||
if o.date: order_lookup_by_name[key_name]["dates"].append(parse_date(o.date) or datetime.max)
|
||||
elif o.created_at: order_lookup_by_name[key_name]["dates"].append(o.created_at.replace(tzinfo=None))
|
||||
|
||||
|
||||
final_data_map = {} # Key (Display Customer, Original PN) -> Data
|
||||
|
||||
for s in samples:
|
||||
clean_pn = normalize_pn_for_matching(s.pn)
|
||||
clean_cust_id = s.cust_id.strip().upper() if s.cust_id else ""
|
||||
norm_cust_name = normalize_customer_name(s.customer)
|
||||
|
||||
# 嘗試比對 Order
|
||||
matched_order = None
|
||||
|
||||
# 1. Try Cust ID match
|
||||
if clean_cust_id:
|
||||
matched_order = order_lookup_by_id.get((clean_cust_id, clean_pn))
|
||||
|
||||
# 2. If no match, Try Cust Name match
|
||||
if not matched_order:
|
||||
matched_order = order_lookup_by_name.get((norm_cust_name, clean_pn))
|
||||
|
||||
# Render Key using Sample's info
|
||||
display_key = (s.customer, s.pn)
|
||||
if display_key not in final_data_map:
|
||||
final_data_map[display_key] = {"sample_qty": 0, "order_qty": 0, "customer": s.customer, "orignal_pn": s.pn}
|
||||
|
||||
final_data_map[display_key]["sample_qty"] += (s.qty or 0)
|
||||
|
||||
if matched_order:
|
||||
# 注意:這裡簡單累加可能會導致重複計算如果多個樣品對應同一個訂單聚合
|
||||
# 但目前邏輯是以「樣品」為基底看轉換,所以我們顯示該樣品對應到的訂單總量是合理的
|
||||
# 不過為了 scatter plot 的準確性,我們應該只在第一次遇到這個 key 時加上 order qty?
|
||||
# 或者,Scatter Plot 的點是 (Customer, PN),所以我們應該是把這個 Group 的 Sample Qty 和 Order Qty 放在一起。
|
||||
# Order Qty 已經在 lookup 裡聚合過了。
|
||||
pass
|
||||
|
||||
# Re-construct the final map properly merging Order Data
|
||||
# 上面的迴圈有點問題,因為我們是依據 Sample 來建立點,但 Order 總量是固定的。
|
||||
# 正確做法:以 (Customer, PN) 為 Unique Key。
|
||||
|
||||
unique_groups = {} # (norm_cust_name, clean_pn) -> {display_cust, display_pn, sample_qty, order_qty}
|
||||
|
||||
for s in samples:
|
||||
clean_pn = normalize_pn_for_matching(s.pn)
|
||||
norm_cust_name = normalize_customer_name(s.customer)
|
||||
key = (norm_cust_name, clean_pn)
|
||||
|
||||
if key not in unique_groups:
|
||||
unique_groups[key] = {
|
||||
"display_cust": s.customer,
|
||||
"display_pn": s.pn,
|
||||
"sample_qty": 0,
|
||||
"order_qty": 0,
|
||||
"matched": False
|
||||
}
|
||||
unique_groups[key]["sample_qty"] += (s.qty or 0)
|
||||
|
||||
# Fill in Order Qty
|
||||
for key, data in unique_groups.items():
|
||||
norm_cust_name, clean_pn = key
|
||||
|
||||
# Try finding orders
|
||||
# Note: We rely on Name match here primarily since we grouped by Name.
|
||||
# Ideally we should also check CustID if available on the samples in this group, but grouping by Name is safer for visual scatter plot.
|
||||
|
||||
matched_order = order_lookup_by_name.get((norm_cust_name, clean_pn))
|
||||
|
||||
# If no name match, maybe check if any sample in this group had a CustId that matches?
|
||||
# For simplicity, let's stick to Name+PN for the Scatter Plot aggregation
|
||||
|
||||
if matched_order:
|
||||
data["order_qty"] = matched_order["qty"]
|
||||
data["matched"] = True
|
||||
|
||||
data_map = unique_groups # Replace old data_map logic
|
||||
|
||||
# 如果有訂單但沒樣品,我們在 ROI 分析中可能不顯示,或者顯示在 Y 軸上 X=0。
|
||||
# 根據需求:分析「樣品寄送」與「訂單接收」的關聯,通常以有送樣的為基底。
|
||||
|
||||
return [
|
||||
ScatterPoint(
|
||||
customer=v["customer"],
|
||||
pn=key[1],
|
||||
customer=v["display_cust"],
|
||||
pn=v["display_pn"],
|
||||
sample_qty=v["sample_qty"],
|
||||
order_qty=v["order_qty"]
|
||||
)
|
||||
@@ -159,16 +443,45 @@ def get_orphans(db: Session = Depends(get_db)):
|
||||
|
||||
# 找出所有樣品
|
||||
samples = db.query(SampleRecord).all()
|
||||
# 找出所有訂單
|
||||
orders = db.query(OrderRecord).all()
|
||||
|
||||
# 找出有訂單的人 (cust_id, pn)
|
||||
orders_keys = set(db.query(OrderRecord.cust_id, OrderRecord.pn).distinct().all())
|
||||
# Build Order Lookups (ID and Name)
|
||||
from app.services.fuzzy_matcher import normalize_pn_for_matching, normalize_customer_name
|
||||
|
||||
order_keys_id = set()
|
||||
order_keys_name = set()
|
||||
|
||||
for o in orders:
|
||||
clean_pn = normalize_pn_for_matching(o.pn)
|
||||
clean_cust_id = o.cust_id.strip().upper() if o.cust_id else ""
|
||||
norm_cust_name = normalize_customer_name(o.customer)
|
||||
|
||||
if clean_cust_id:
|
||||
order_keys_id.add((clean_cust_id, clean_pn))
|
||||
|
||||
order_keys_name.add((norm_cust_name, clean_pn))
|
||||
|
||||
|
||||
orphans = []
|
||||
for s in samples:
|
||||
key = (s.cust_id, s.pn)
|
||||
clean_pn = normalize_pn_for_matching(s.pn)
|
||||
norm_cust_name = normalize_customer_name(s.customer)
|
||||
clean_cust_id = s.cust_id.strip().upper() if s.cust_id else ""
|
||||
|
||||
s_date = parse_date(s.date)
|
||||
|
||||
if key not in orders_keys:
|
||||
# Check match
|
||||
matched = False
|
||||
if clean_cust_id:
|
||||
if (clean_cust_id, clean_pn) in order_keys_id:
|
||||
matched = True
|
||||
|
||||
if not matched:
|
||||
if (norm_cust_name, clean_pn) in order_keys_name:
|
||||
matched = True
|
||||
|
||||
if not matched:
|
||||
if s_date and s_date < threshold_date:
|
||||
orphans.append(OrphanSample(
|
||||
customer=s.customer,
|
||||
|
||||
@@ -14,14 +14,26 @@ def export_report(format: str = "xlsx", db: Session = Depends(get_db)):
|
||||
|
||||
generator = ReportGenerator(db)
|
||||
|
||||
print(f"Export request received. Format: {format}")
|
||||
|
||||
if format == 'xlsx':
|
||||
output = generator.generate_excel()
|
||||
media_type = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
|
||||
filename = "dit_attribution_report.xlsx"
|
||||
try:
|
||||
print("Generating Excel...")
|
||||
output = generator.generate_excel()
|
||||
print("Excel generated successfully.")
|
||||
media_type = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
|
||||
filename = "dit_attribution_report.xlsx"
|
||||
except Exception as e:
|
||||
print(f"Error generating Excel: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
else:
|
||||
output = generator.generate_pdf()
|
||||
media_type = "application/pdf"
|
||||
filename = "dit_attribution_report.pdf"
|
||||
try:
|
||||
output = generator.generate_pdf()
|
||||
media_type = "application/pdf"
|
||||
filename = "dit_attribution_report.pdf"
|
||||
except Exception as e:
|
||||
print(f"Error generating PDF: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
return StreamingResponse(
|
||||
output,
|
||||
|
||||
@@ -16,6 +16,8 @@ def clean_value(val):
|
||||
if isinstance(val, float):
|
||||
if math.isnan(val) or math.isinf(val):
|
||||
return None
|
||||
if isinstance(val, str):
|
||||
val = val.lstrip("'") # Remove leading apostrophe often added by Excel
|
||||
return val
|
||||
|
||||
|
||||
@@ -31,7 +33,8 @@ def clean_records(records: List[Dict]) -> List[Dict]:
|
||||
# 欄位名稱對應表
|
||||
COLUMN_MAPPING = {
|
||||
'dit': {
|
||||
'op_id': ['opportunity name', 'opportunity no', 'opportunity', 'op編號', 'op 編號', 'op_id', 'opid', '案件編號', '案號', 'opportunity id'],
|
||||
'op_id': ['opportunity no', 'opportunity', 'op編號', 'op 編號', 'op_id', 'opid', '案件編號', '案號', 'opportunity id'],
|
||||
'op_name': ['opportunity name', '專案名稱', '案件名稱'],
|
||||
'erp_account': ['erp account', 'account no', 'erp account no', '客戶代碼', '客戶編號', 'erp_account'],
|
||||
'customer': ['account name', 'branding customer', '客戶', '客戶名稱', 'customer', 'customer name', '公司名稱'],
|
||||
'pn': ['product name', '料號', 'part number', 'pn', 'part no', 'part_number', '產品料號', 'stage/part'],
|
||||
@@ -47,17 +50,18 @@ COLUMN_MAPPING = {
|
||||
'customer': ['客戶名稱', '客戶簡稱', '客戶', 'customer', 'customer name'],
|
||||
'pn': ['item', 'type', '料號', 'part number', 'pn', 'part no', '產品料號', '索樣數量'],
|
||||
'qty': ['索樣數量pcs', '索樣數量 k', '數量', 'qty', 'quantity', '申請數量'],
|
||||
'date': ['需求日', '日期', 'date', '申請日期']
|
||||
'date': ['出貨日', '需求日', '日期', 'date', '申請日期']
|
||||
},
|
||||
'order': {
|
||||
'order_id': ['項次', '訂單編號', 'order_id', 'order id'],
|
||||
'order_no': ['訂單單號', '訂單號', 'order_no', 'order no', '銷貨單號'],
|
||||
'cust_id': ['客戶編號', '客戶代碼', '客戶代號', 'cust_id', 'cust id'],
|
||||
'cust_id': ['客戶編號', '客戶代碼', '客戶代號', 'cust_id', 'cust id', 'erp code', 'erp_code', 'erpcode', 'erp'],
|
||||
'customer': ['客戶', '客戶名稱', 'customer', 'customer name'],
|
||||
'pn': ['type', '內部料號', '料號', 'part number', 'pn', 'part no', '產品料號'],
|
||||
'pn': ['內部料號', '料號', 'part number', 'pn', 'part no', '產品料號', 'type'],
|
||||
'qty': ['訂單量', '數量', 'qty', 'quantity', '訂購數量', '出貨數量'],
|
||||
'status': ['狀態', 'status', '訂單狀態'],
|
||||
'amount': ['原幣金額(含稅)', '台幣金額(未稅)', '金額', 'amount', 'total', '訂單金額']
|
||||
'amount': ['原幣金額(含稅)', '台幣金額(未稅)', '金額', 'amount', 'total', '訂單金額'],
|
||||
'date': ['訂單日期', '日期', 'date', 'order date', 'order_date']
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,10 +105,12 @@ class ExcelParser:
|
||||
for idx, col in enumerate(df_columns):
|
||||
if variant_lower in col or col in variant_lower:
|
||||
mapping[df.columns[idx]] = standard_name
|
||||
print(f"[DEBUG] Mapped '{df.columns[idx]}' to '{standard_name}' (matched '{variant}')")
|
||||
break
|
||||
if standard_name in mapping.values():
|
||||
break
|
||||
|
||||
|
||||
print(f"[DEBUG] Final Mapping for {file_type}: {mapping}")
|
||||
return mapping
|
||||
|
||||
def parse_file(self, file_path: Path, file_type: str) -> Tuple[str, Dict[str, Any]]:
|
||||
|
||||
@@ -14,15 +14,22 @@ from datetime import timedelta
|
||||
COMPANY_SUFFIXES = [
|
||||
'股份有限公司', '有限公司', '公司',
|
||||
'株式会社', '株式會社',
|
||||
'Co., Ltd.', 'Co.,Ltd.', 'Co. Ltd.', 'Co.Ltd.',
|
||||
'Co., Ltd.', 'Co.,Ltd.', 'Co. Ltd.', 'Co.Ltd.', 'Co., Ltd', 'Co.,Ltd',
|
||||
'Corporation', 'Corp.', 'Corp',
|
||||
'Inc.', 'Inc',
|
||||
'Limited', 'Ltd.', 'Ltd',
|
||||
'Limited', 'Ltd.', 'Ltd', 'L.T.D.',
|
||||
'LLC', 'L.L.C.',
|
||||
]
|
||||
|
||||
def sanitize_pn(pn: str) -> str:
|
||||
"""去除非字母數字字元並轉大寫 (PMSM-808-LL -> PMSM808LL)"""
|
||||
"""去除非字母數字字元並轉大寫 (允許 - 與 _)"""
|
||||
if not pn:
|
||||
return ""
|
||||
# 保留 - 和 _,移除其他特殊符號
|
||||
return re.sub(r'[^a-zA-Z0-9\-_]', '', str(pn)).upper()
|
||||
|
||||
def normalize_pn_for_matching(pn: str) -> str:
|
||||
"""比對專用的正規化 (移除所有符號,只留英數)"""
|
||||
if not pn:
|
||||
return ""
|
||||
return re.sub(r'[^a-zA-Z0-9]', '', str(pn)).upper()
|
||||
@@ -34,10 +41,23 @@ def normalize_customer_name(name: str) -> str:
|
||||
|
||||
# 轉換為大寫
|
||||
normalized = name.strip()
|
||||
|
||||
# 移除公司後綴
|
||||
for suffix in COMPANY_SUFFIXES:
|
||||
normalized = re.sub(re.escape(suffix), '', normalized, flags=re.IGNORECASE)
|
||||
|
||||
# Pre-clean: Remove common punctuation/separators to make suffix matching easier
|
||||
# But be careful not to merge words incorrectly.
|
||||
|
||||
# 移除公司後綴 - iterate multiple times or use regex for robust matching
|
||||
# Sort suffixes by length descending to match longest first
|
||||
sorted_suffixes = sorted(COMPANY_SUFFIXES, key=len, reverse=True)
|
||||
|
||||
for suffix in sorted_suffixes:
|
||||
# Use word boundary or simple end of string check
|
||||
# Escape suffix for regex
|
||||
pattern = re.compile(re.escape(suffix) + r'$', re.IGNORECASE)
|
||||
normalized = pattern.sub('', normalized).strip()
|
||||
|
||||
# Also try matching with preceding comma/space
|
||||
pattern_strict = re.compile(r'[,.\s]+' + re.escape(suffix) + r'$', re.IGNORECASE)
|
||||
normalized = pattern_strict.sub('', normalized).strip()
|
||||
|
||||
# 移除括號及其內容
|
||||
normalized = re.sub(r'\([^)]*\)', '', normalized)
|
||||
@@ -45,9 +65,20 @@ def normalize_customer_name(name: str) -> str:
|
||||
|
||||
# 全形轉半形
|
||||
normalized = normalized.replace(' ', ' ')
|
||||
|
||||
# 移除特殊結尾字符 that might remain (like "Co.,") if suffix list didn't catch it
|
||||
# Remove trailing "Co." or "Co.,"
|
||||
normalized = re.sub(r'[,.\s]+Co[.,]*$', '', normalized, flags=re.IGNORECASE)
|
||||
|
||||
# 移除多餘空白
|
||||
normalized = re.sub(r'\s+', ' ', normalized).strip()
|
||||
|
||||
# Remove all punctuation for final key? No, fuzzy match might rely on it.
|
||||
# But for "Key" based matching in Lab, we want strict alphabetic?
|
||||
# No, keep it similar to before but cleaner.
|
||||
|
||||
# Final aggressive strip of trailing punctuation
|
||||
normalized = normalized.strip("., ")
|
||||
|
||||
return normalized.upper()
|
||||
|
||||
@@ -103,7 +134,7 @@ class FuzzyMatcher:
|
||||
# 1. 取得所有 DIT 記錄
|
||||
dit_records = self.db.query(DitRecord).all()
|
||||
|
||||
# 2. 取得所有樣品和訂單記錄並按 PN 分組
|
||||
# 2. 取得所有樣品和訂單記錄並按 PN (比對專用正規化) 分組
|
||||
sample_records = self.db.query(SampleRecord).all()
|
||||
order_records = self.db.query(OrderRecord).all()
|
||||
|
||||
@@ -111,9 +142,10 @@ class FuzzyMatcher:
|
||||
samples_by_oppy = {}
|
||||
for s in sample_records:
|
||||
if s.pn:
|
||||
if s.pn not in samples_by_pn:
|
||||
samples_by_pn[s.pn] = []
|
||||
samples_by_pn[s.pn].append(s)
|
||||
norm_pn = normalize_pn_for_matching(s.pn)
|
||||
if norm_pn not in samples_by_pn:
|
||||
samples_by_pn[norm_pn] = []
|
||||
samples_by_pn[norm_pn].append(s)
|
||||
if s.oppy_no:
|
||||
if s.oppy_no not in samples_by_oppy:
|
||||
samples_by_oppy[s.oppy_no] = []
|
||||
@@ -121,9 +153,11 @@ class FuzzyMatcher:
|
||||
|
||||
orders_by_pn = {}
|
||||
for o in order_records:
|
||||
if o.pn not in orders_by_pn:
|
||||
orders_by_pn[o.pn] = []
|
||||
orders_by_pn[o.pn].append(o)
|
||||
if o.pn:
|
||||
norm_pn = normalize_pn_for_matching(o.pn)
|
||||
if norm_pn not in orders_by_pn:
|
||||
orders_by_pn[norm_pn] = []
|
||||
orders_by_pn[norm_pn].append(o)
|
||||
|
||||
# 3. 清除舊的比對結果
|
||||
self.db.query(ReviewLog).delete()
|
||||
@@ -136,13 +170,16 @@ class FuzzyMatcher:
|
||||
for dit in dit_records:
|
||||
dit_date = pd.to_datetime(dit.date, errors='coerce')
|
||||
|
||||
# 取得 DIT PN 的比對用正規化版本
|
||||
dit_norm_pn = normalize_pn_for_matching(dit.pn)
|
||||
|
||||
# --- 比對樣品 (DIT -> Sample) ---
|
||||
# 收集所有可能的樣品 (Priority 1: Oppy ID, Priority 2/3: PN)
|
||||
potential_samples = []
|
||||
if dit.op_id:
|
||||
potential_samples.extend(samples_by_oppy.get(dit.op_id, []))
|
||||
if dit.pn:
|
||||
potential_samples.extend(samples_by_pn.get(dit.pn, []))
|
||||
if dit_norm_pn:
|
||||
potential_samples.extend(samples_by_pn.get(dit_norm_pn, []))
|
||||
|
||||
# 去重
|
||||
seen_sample_ids = set()
|
||||
@@ -172,8 +209,8 @@ class FuzzyMatcher:
|
||||
score = 100.0
|
||||
reason = "Golden Key Match"
|
||||
|
||||
# Priority 2 & 3 則限制在相同 PN
|
||||
elif dit.pn == sample.pn:
|
||||
# Priority 2 & 3 則限制在相同 PN (Ignored symbols)
|
||||
elif dit_norm_pn == normalize_pn_for_matching(sample.pn):
|
||||
# Priority 2: 客戶代碼比對 (Silver Key)
|
||||
if dit.erp_account and sample.cust_id and dit.erp_account == sample.cust_id:
|
||||
match_priority = 2
|
||||
@@ -209,44 +246,45 @@ class FuzzyMatcher:
|
||||
|
||||
# --- 比對訂單 (DIT -> Order) ---
|
||||
# 訂單比對通常基於 PN
|
||||
for order in orders_by_pn.get(dit.pn, []):
|
||||
match_priority = 0
|
||||
match_source = ""
|
||||
score = 0.0
|
||||
reason = ""
|
||||
if dit_norm_pn:
|
||||
for order in orders_by_pn.get(dit_norm_pn, []):
|
||||
match_priority = 0
|
||||
match_source = ""
|
||||
score = 0.0
|
||||
reason = ""
|
||||
|
||||
# Priority 2: 客戶代碼比對 (Silver Key)
|
||||
if dit.erp_account and order.cust_id and dit.erp_account == order.cust_id:
|
||||
match_priority = 2
|
||||
match_source = f"Matched via ERP Account: {dit.erp_account}"
|
||||
score = 99.0
|
||||
reason = "Silver Key Match"
|
||||
|
||||
# Priority 3: 名稱模糊比對 (Fallback)
|
||||
else:
|
||||
score, reason = calculate_similarity(dit.customer, order.customer)
|
||||
if score >= MATCH_THRESHOLD_REVIEW:
|
||||
match_priority = 3
|
||||
match_source = f"Matched via Name Similarity ({reason})"
|
||||
|
||||
if match_priority > 0:
|
||||
status = MatchStatus.auto_matched if score >= MATCH_THRESHOLD_AUTO else MatchStatus.pending
|
||||
match = MatchResult(
|
||||
dit_id=dit.id,
|
||||
target_type=TargetType.ORDER,
|
||||
target_id=order.id,
|
||||
score=score,
|
||||
match_priority=match_priority,
|
||||
match_source=match_source,
|
||||
reason=reason,
|
||||
status=status
|
||||
)
|
||||
self.db.add(match)
|
||||
match_count += 1
|
||||
if status == MatchStatus.auto_matched:
|
||||
auto_matched += 1
|
||||
# Priority 2: 客戶代碼比對 (Silver Key)
|
||||
if dit.erp_account and order.cust_id and dit.erp_account == order.cust_id:
|
||||
match_priority = 2
|
||||
match_source = f"Matched via ERP Account: {dit.erp_account}"
|
||||
score = 99.0
|
||||
reason = "Silver Key Match"
|
||||
|
||||
# Priority 3: 名稱模糊比對 (Fallback)
|
||||
else:
|
||||
pending_review += 1
|
||||
score, reason = calculate_similarity(dit.customer, order.customer)
|
||||
if score >= MATCH_THRESHOLD_REVIEW:
|
||||
match_priority = 3
|
||||
match_source = f"Matched via Name Similarity ({reason})"
|
||||
|
||||
if match_priority > 0:
|
||||
status = MatchStatus.auto_matched if score >= MATCH_THRESHOLD_AUTO else MatchStatus.pending
|
||||
match = MatchResult(
|
||||
dit_id=dit.id,
|
||||
target_type=TargetType.ORDER,
|
||||
target_id=order.id,
|
||||
score=score,
|
||||
match_priority=match_priority,
|
||||
match_source=match_source,
|
||||
reason=reason,
|
||||
status=status
|
||||
)
|
||||
self.db.add(match)
|
||||
match_count += 1
|
||||
if status == MatchStatus.auto_matched:
|
||||
auto_matched += 1
|
||||
else:
|
||||
pending_review += 1
|
||||
|
||||
self.db.commit()
|
||||
|
||||
|
||||
@@ -72,41 +72,60 @@ class ReportGenerator:
|
||||
return result
|
||||
|
||||
def generate_excel(self) -> io.BytesIO:
|
||||
"""產生 Excel 報表"""
|
||||
"""產生 Excel 報表 (包含三個分頁:DIT歸因明細, 成功送樣, 取得訂單)"""
|
||||
wb = Workbook()
|
||||
ws = wb.active
|
||||
ws.title = "DIT Attribution Report"
|
||||
|
||||
# 標題樣式
|
||||
|
||||
# 取得所有資料
|
||||
all_data = self.get_attribution_data()
|
||||
|
||||
# 定義樣式
|
||||
header_font = Font(bold=True, color="FFFFFF")
|
||||
header_fill = PatternFill(start_color="4F46E5", end_color="4F46E5", fill_type="solid")
|
||||
header_alignment = Alignment(horizontal="center", vertical="center")
|
||||
|
||||
# 表頭
|
||||
|
||||
headers = ['OP編號', '客戶名稱', '料號', 'EAU', '階段', '樣品單號', '訂單單號', '訂單狀態', '訂單金額']
|
||||
for col, header in enumerate(headers, 1):
|
||||
cell = ws.cell(row=1, column=col, value=header)
|
||||
cell.font = header_font
|
||||
cell.fill = header_fill
|
||||
cell.alignment = header_alignment
|
||||
|
||||
# 資料
|
||||
data = self.get_attribution_data()
|
||||
for row_idx, row_data in enumerate(data, 2):
|
||||
ws.cell(row=row_idx, column=1, value=row_data['op_id'])
|
||||
ws.cell(row=row_idx, column=2, value=row_data['customer'])
|
||||
ws.cell(row=row_idx, column=3, value=row_data['pn'])
|
||||
ws.cell(row=row_idx, column=4, value=row_data['eau'])
|
||||
ws.cell(row=row_idx, column=5, value=row_data['stage'])
|
||||
ws.cell(row=row_idx, column=6, value=row_data['sample_order'] or '-')
|
||||
ws.cell(row=row_idx, column=7, value=row_data['order_no'] or '-')
|
||||
ws.cell(row=row_idx, column=8, value=row_data['order_status'] or '-')
|
||||
ws.cell(row=row_idx, column=9, value=row_data['order_amount'] or 0)
|
||||
|
||||
# 調整欄寬
|
||||
column_widths = [15, 30, 20, 12, 15, 15, 15, 12, 12]
|
||||
for col, width in enumerate(column_widths, 1):
|
||||
ws.column_dimensions[chr(64 + col)].width = width
|
||||
|
||||
def create_sheet(sheet_name, data_rows):
|
||||
if sheet_name == "DIT歸因明細":
|
||||
ws = wb.active
|
||||
ws.title = sheet_name
|
||||
else:
|
||||
ws = wb.create_sheet(title=sheet_name)
|
||||
|
||||
# 表頭
|
||||
for col, header in enumerate(headers, 1):
|
||||
cell = ws.cell(row=1, column=col, value=header)
|
||||
cell.font = header_font
|
||||
cell.fill = header_fill
|
||||
cell.alignment = header_alignment
|
||||
|
||||
# 資料
|
||||
for row_idx, row_data in enumerate(data_rows, 2):
|
||||
ws.cell(row=row_idx, column=1, value=row_data['op_id'])
|
||||
ws.cell(row=row_idx, column=2, value=row_data['customer'])
|
||||
ws.cell(row=row_idx, column=3, value=row_data['pn'])
|
||||
ws.cell(row=row_idx, column=4, value=row_data['eau'])
|
||||
ws.cell(row=row_idx, column=5, value=row_data['stage'])
|
||||
ws.cell(row=row_idx, column=6, value=row_data['sample_order'] or '-')
|
||||
ws.cell(row=row_idx, column=7, value=row_data['order_no'] or '-')
|
||||
ws.cell(row=row_idx, column=8, value=row_data['order_status'] or '-')
|
||||
ws.cell(row=row_idx, column=9, value=row_data['order_amount'] or 0)
|
||||
|
||||
# 調整欄寬
|
||||
for col, width in enumerate(column_widths, 1):
|
||||
ws.column_dimensions[chr(64 + col)].width = width
|
||||
|
||||
# 1. DIT歸因明細 (全部)
|
||||
create_sheet("DIT歸因明細", all_data)
|
||||
|
||||
# 2. 成功送樣 (有樣品單號)
|
||||
success_samples = [row for row in all_data if row['sample_order']]
|
||||
create_sheet("成功送樣", success_samples)
|
||||
|
||||
# 3. 取得訂單 (有訂單單號)
|
||||
orders_received = [row for row in all_data if row['order_no']]
|
||||
create_sheet("取得訂單", orders_received)
|
||||
|
||||
# 儲存到 BytesIO
|
||||
output = io.BytesIO()
|
||||
|
||||
@@ -13,9 +13,21 @@ pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/auth/login")
|
||||
|
||||
def verify_password(plain_password: str, hashed_password: str) -> bool:
|
||||
# bcrypt has a limit of 72 bytes, we truncate to avoid errors
|
||||
# convert to bytes, truncate, then back to string (ignoring errors if cut mid-multibyte char, though unlikely for simple password)
|
||||
# Actually passlib handles string/bytes. If we just slice the string it might not be accurate byte count.
|
||||
# But usually the error comes from "bytes" length.
|
||||
# Safest is to let simple passwords pass, and truncate extremely long ones.
|
||||
# Let's ensure we work with utf-8 bytes
|
||||
password_bytes = plain_password.encode('utf-8')
|
||||
if len(password_bytes) > 72:
|
||||
plain_password = password_bytes[:72].decode('utf-8', errors='ignore')
|
||||
return pwd_context.verify(plain_password, hashed_password)
|
||||
|
||||
def get_password_hash(password: str) -> str:
|
||||
password_bytes = password.encode('utf-8')
|
||||
if len(password_bytes) > 72:
|
||||
password = password_bytes[:72].decode('utf-8', errors='ignore')
|
||||
return pwd_context.hash(password)
|
||||
|
||||
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -> str:
|
||||
|
||||
@@ -12,7 +12,10 @@ def create_admin_user():
|
||||
# Check if user exists
|
||||
user = db.query(User).filter(User.email == email).first()
|
||||
if user:
|
||||
print(f"User {email} already exists.")
|
||||
print(f"User {email} already exists. Updating password...")
|
||||
user.password_hash = get_password_hash(password)
|
||||
db.commit()
|
||||
print(f"Password updated for {email} to: {password}")
|
||||
return
|
||||
|
||||
# Create new admin user
|
||||
|
||||
43
backend/debug_db.py
Normal file
43
backend/debug_db.py
Normal file
@@ -0,0 +1,43 @@
|
||||
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from app.models.sample import SampleRecord
|
||||
from app.models.order import OrderRecord
|
||||
from app.config import DATABASE_URL
|
||||
|
||||
def debug_db():
|
||||
engine = create_engine(DATABASE_URL)
|
||||
Session = sessionmaker(bind=engine)
|
||||
session = Session()
|
||||
|
||||
target_pn_fragment = "PSMQC098N10LS2"
|
||||
target_cust_id = "S14500"
|
||||
|
||||
print(f"--- Searching for PN containing '{target_pn_fragment}' ---")
|
||||
|
||||
print("\n[Sample Records]")
|
||||
samples = session.query(SampleRecord).filter(SampleRecord.pn.contains(target_pn_fragment)).all()
|
||||
for s in samples:
|
||||
print(f"ID: {s.id}, CustID: '{s.cust_id}', PN: '{s.pn}', Customer: '{s.customer}', Date: {s.date}")
|
||||
|
||||
print("\n[Order Records]")
|
||||
orders = session.query(OrderRecord).filter(OrderRecord.pn.contains(target_pn_fragment)).all()
|
||||
for o in orders:
|
||||
print(f"ID: {o.id}, CustID: '{o.cust_id}', PN: '{o.pn}', Customer: '{o.customer}', Date: {o.date}")
|
||||
|
||||
print("\n--- Searching for Cust ID '{target_cust_id}' ---")
|
||||
|
||||
print("\n[Sample Records with S14500]")
|
||||
samples_c = session.query(SampleRecord).filter(SampleRecord.cust_id == target_cust_id).limit(5).all()
|
||||
for s in samples_c:
|
||||
print(f"ID: {s.id}, CustID: '{s.cust_id}', PN: '{s.pn}'")
|
||||
|
||||
print("\n[Order Records with S14500]")
|
||||
orders_c = session.query(OrderRecord).filter(OrderRecord.cust_id == target_cust_id).limit(5).all()
|
||||
for o in orders_c:
|
||||
print(f"ID: {o.id}, CustID: '{o.cust_id}', PN: '{o.pn}'")
|
||||
|
||||
session.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
debug_db()
|
||||
54
backend/debug_lab_logic.py
Normal file
54
backend/debug_lab_logic.py
Normal file
@@ -0,0 +1,54 @@
|
||||
|
||||
from app.services.fuzzy_matcher import normalize_customer_name, normalize_pn_for_matching
|
||||
from app.routers.lab import parse_date
|
||||
from datetime import datetime
|
||||
|
||||
def debug_lab_logic():
|
||||
print("--- Debugging Lab Logic Normalization ---")
|
||||
|
||||
# Test Data from User Scenario
|
||||
sample_cust = "Semisales Co., LTD"
|
||||
sample_pn = "PSMQC098N10LS2-AU_R2_002A1"
|
||||
sample_date_str = "20250913" # From previous debug output
|
||||
|
||||
order_cust = "SEMISALES"
|
||||
order_pn = "PSMQC098N10LS2-AU_R2_002A1"
|
||||
order_date_str = "2025-06-05" # From previous debug output
|
||||
|
||||
# Normalization
|
||||
norm_sample_cust = normalize_customer_name(sample_cust)
|
||||
norm_order_cust = normalize_customer_name(order_cust)
|
||||
|
||||
norm_sample_pn = normalize_pn_for_matching(sample_pn)
|
||||
norm_order_pn = normalize_pn_for_matching(order_pn)
|
||||
|
||||
print(f"Sample Customer '{sample_cust}' -> '{norm_sample_cust}'")
|
||||
print(f"Order Customer '{order_cust}' -> '{norm_order_cust}'")
|
||||
print(f"Customer Match: {norm_sample_cust == norm_order_cust}")
|
||||
|
||||
print(f"Sample PN '{sample_pn}' -> '{norm_sample_pn}'")
|
||||
print(f"Order PN '{order_pn}' -> '{norm_order_pn}'")
|
||||
print(f"PN Match: {norm_sample_pn == norm_order_pn}")
|
||||
|
||||
# Key Check
|
||||
sample_key = (norm_sample_cust, norm_sample_pn)
|
||||
order_key = (norm_order_cust, norm_order_pn)
|
||||
print(f"Sample Key: {sample_key}")
|
||||
print(f"Order Key: {order_key}")
|
||||
print(f"Key Match: {sample_key == order_key}")
|
||||
|
||||
# Date Parsing Check
|
||||
print("\n--- Date Parsing Check ---")
|
||||
s_date = parse_date(sample_date_str)
|
||||
o_date = parse_date(order_date_str)
|
||||
print(f"Sample Date Raw: '{sample_date_str}' -> Parsed: {s_date}")
|
||||
print(f"Order Date Raw: '{order_date_str}' -> Parsed: {o_date}")
|
||||
|
||||
if s_date and o_date:
|
||||
diff = (o_date - s_date).days
|
||||
print(f"Date Diff (Order - Sample): {diff} days")
|
||||
if diff < 0:
|
||||
print("WARNING: Order is BEFORE Sample. Velocity calculation might filter this out if checking diff >= 0.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
debug_lab_logic()
|
||||
@@ -4,6 +4,7 @@ sqlalchemy==2.0.23
|
||||
python-multipart==0.0.6
|
||||
python-jose[cryptography]==3.3.0
|
||||
passlib[bcrypt]==1.7.4
|
||||
bcrypt==3.2.0
|
||||
openpyxl==3.1.2
|
||||
pandas==2.1.3
|
||||
rapidfuzz==3.5.2
|
||||
|
||||
33
backend/verify_login.py
Normal file
33
backend/verify_login.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from app.models import init_db, SessionLocal
|
||||
from app.models.user import User
|
||||
from app.utils.security import verify_password, get_password_hash
|
||||
|
||||
def test_login():
|
||||
db = SessionLocal()
|
||||
email = "admin@example.com"
|
||||
password = "admin"
|
||||
|
||||
user = db.query(User).filter(User.email == email).first()
|
||||
if not user:
|
||||
print(f"User {email} not found!")
|
||||
return
|
||||
|
||||
print(f"User found: {user.email}")
|
||||
print(f"Stored Hash: {user.password_hash}")
|
||||
|
||||
# Test verification
|
||||
is_valid = verify_password(password, user.password_hash)
|
||||
print(f"Password '{password}' valid? {is_valid}")
|
||||
|
||||
if not is_valid:
|
||||
print("Attempting to reset password...")
|
||||
user.password_hash = get_password_hash(password)
|
||||
db.commit()
|
||||
print("Password reset. Testing again...")
|
||||
is_valid = verify_password(password, user.password_hash)
|
||||
print(f"Password '{password}' valid? {is_valid}")
|
||||
|
||||
db.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_login()
|
||||
Reference in New Issue
Block a user