579 lines
22 KiB
Python
579 lines
22 KiB
Python
from typing import List, Optional
|
|
from datetime import datetime, timedelta
|
|
from fastapi import APIRouter, Depends, Query
|
|
from sqlalchemy.orm import Session
|
|
from sqlalchemy import func, and_
|
|
from pydantic import BaseModel
|
|
from app.models import get_db
|
|
from app.models.sample import SampleRecord
|
|
from app.models.order import OrderRecord
|
|
from app.models.match import MatchResult, MatchStatus, TargetType
|
|
from app.services.fuzzy_matcher import normalize_pn_for_matching, normalize_customer_name
|
|
|
|
router = APIRouter(prefix="/lab", tags=["Lab"])
|
|
|
|
class LabKPI(BaseModel):
|
|
converted_count: int # 成功收單總數
|
|
avg_velocity: float # 平均轉換時間 (天)
|
|
conversion_rate: float # 轉換比例 (%)
|
|
orphan_count: int # 孤兒樣品總數
|
|
no_dit_count: int # 未歸因大額樣品數
|
|
|
|
class ConversionRecord(BaseModel):
|
|
customer: str
|
|
pn: str
|
|
sample_date: str
|
|
sample_qty: int
|
|
order_date: str
|
|
order_qty: int # First Order Qty
|
|
total_order_qty: int # Total Order Qty (Post-Sample)
|
|
days_to_convert: int
|
|
|
|
class ScatterPoint(BaseModel):
|
|
customer: str
|
|
pn: str
|
|
sample_qty: int
|
|
order_qty: int
|
|
|
|
class OrphanSample(BaseModel):
|
|
customer: str
|
|
pn: str
|
|
days_since_sent: int
|
|
order_no: Optional[str] = None
|
|
date: Optional[str] = None
|
|
sample_qty: int = 0
|
|
|
|
class NoDitSample(BaseModel):
|
|
sample_id: str
|
|
customer: str
|
|
pn: str
|
|
order_no: Optional[str]
|
|
date: Optional[str]
|
|
qty: int
|
|
|
|
|
|
def parse_date(date_val) -> Optional[datetime]:
|
|
if not date_val:
|
|
return None
|
|
if isinstance(date_val, datetime):
|
|
return date_val
|
|
if isinstance(date_val, str):
|
|
date_str = date_val.strip()
|
|
try:
|
|
if "T" in date_str:
|
|
return datetime.fromisoformat(date_str.replace("Z", "+00:00"))
|
|
|
|
# Try common formats
|
|
for fmt in ["%Y-%m-%d", "%Y/%m/%d", "%Y.%m.%d", "%d-%m-%Y", "%Y%m%d"]:
|
|
try:
|
|
return datetime.strptime(date_str, fmt)
|
|
except ValueError:
|
|
continue
|
|
|
|
# Fallback: try parsing with pandas if simple strptime fails,
|
|
# but for now let's just stick to common formats to avoid heavy dependency inside loop if not needed.
|
|
return None
|
|
except ValueError:
|
|
return None
|
|
return None
|
|
|
|
def normalize_id(val: any) -> str:
|
|
"""正規化 ID (去除空白、單引號、轉字串)"""
|
|
if val is None:
|
|
return ""
|
|
s = str(val).strip()
|
|
s = s.lstrip("'")
|
|
if s.endswith(".0"):
|
|
s = s[:-2]
|
|
return s.upper()
|
|
|
|
def find_matched_orders(s, order_lookup_by_id, order_lookup_by_name, orders_by_cust_name):
|
|
# Use a dictionary to deduplicate matches by a unique key (e.g. order's internal ID or file_id+row which we don't have, so object ID is best if in memory, or full content)
|
|
# Since we built lookups with `data` dicts that are created fresh in the loop, we can't rely on object identity of `data`.
|
|
# However, `data` might need a unique identifier from the source order.
|
|
# Let's add `order_db_id` to `data` in get_conversions first?
|
|
# Actually, simpler: just collect all and dedup by `(date, qty, order_no, clean_pn)` tuple?
|
|
# Or better, trust the strategy hierarchy but be more permissive?
|
|
|
|
# Strategy change: Try to find ALL valid matches.
|
|
# Combine ID and Name matches.
|
|
|
|
candidates = []
|
|
clean_pn = normalize_pn_for_matching(s.pn)
|
|
norm_cust_name = normalize_customer_name(s.customer)
|
|
clean_cust_id = normalize_id(s.cust_id)
|
|
|
|
# 1. Try ID Match
|
|
if clean_cust_id:
|
|
key_id = (clean_cust_id, clean_pn)
|
|
if key_id in order_lookup_by_id:
|
|
candidates.extend(order_lookup_by_id[key_id])
|
|
|
|
# 2. Try Name Match (ALWAYS check this too, in case ID is missing on some order rows)
|
|
key_name = (norm_cust_name, clean_pn)
|
|
if key_name in order_lookup_by_name:
|
|
candidates.extend(order_lookup_by_name[key_name])
|
|
|
|
# 3. Try Prefix Match (Only if we have relatively few candidates? Or always?)
|
|
# If we already have exact matches, prefix might introduce noise.
|
|
# Let's keep prefix as a fallback OR if the existing candidates count is low?
|
|
# Actually, let's keep it as fallback for now. Explicit matching is better.
|
|
if not candidates and norm_cust_name in orders_by_cust_name:
|
|
candidates_prefix = orders_by_cust_name[norm_cust_name]
|
|
for o_dat in candidates_prefix:
|
|
o_pn = o_dat['clean_pn']
|
|
if o_pn and clean_pn and (clean_pn.startswith(o_pn) or o_pn.startswith(clean_pn)):
|
|
candidates.append(o_dat)
|
|
|
|
# Deduplicate candidates based on a unique signature
|
|
# Signature: (date, qty, order_no)
|
|
unique_candidates = []
|
|
seen = set()
|
|
for c in candidates:
|
|
sig = (c["date"], c["qty"], c["order_no"])
|
|
if sig not in seen:
|
|
seen.add(sig)
|
|
unique_candidates.append(c)
|
|
|
|
return unique_candidates
|
|
|
|
@router.get("/conversions", response_model=List[ConversionRecord])
|
|
def get_conversions(db: Session = Depends(get_db)):
|
|
samples = db.query(SampleRecord).all()
|
|
orders = db.query(OrderRecord).all()
|
|
|
|
# Build Lookups
|
|
order_lookup_by_id = {}
|
|
order_lookup_by_name = {}
|
|
orders_by_cust_name = {} # For prefix matching: name -> list of {clean_pn, date, qty, ...}
|
|
|
|
for o in orders:
|
|
clean_pn = normalize_pn_for_matching(o.pn)
|
|
clean_cust_id = o.cust_id.strip().upper() if o.cust_id else ""
|
|
norm_cust_name = normalize_customer_name(o.customer)
|
|
|
|
o_date = parse_date(o.date) or (o.created_at.replace(tzinfo=None) if o.created_at else datetime.max)
|
|
|
|
data = {
|
|
"date": o_date,
|
|
"qty": o.qty or 0,
|
|
"order_no": o.order_no,
|
|
"clean_pn": clean_pn # Store for prefix check
|
|
}
|
|
|
|
if clean_cust_id:
|
|
key_id = (clean_cust_id, clean_pn)
|
|
if key_id not in order_lookup_by_id: order_lookup_by_id[key_id] = []
|
|
order_lookup_by_id[key_id].append(data)
|
|
|
|
key_name = (norm_cust_name, clean_pn)
|
|
if key_name not in order_lookup_by_name: order_lookup_by_name[key_name] = []
|
|
order_lookup_by_name[key_name].append(data)
|
|
|
|
if norm_cust_name not in orders_by_cust_name: orders_by_cust_name[norm_cust_name] = []
|
|
orders_by_cust_name[norm_cust_name].append(data)
|
|
|
|
conversions = []
|
|
|
|
for s in samples:
|
|
matched_orders = find_matched_orders(s, order_lookup_by_id, order_lookup_by_name, orders_by_cust_name)
|
|
s_date = parse_date(s.date)
|
|
|
|
if matched_orders and s_date:
|
|
# STRICT FILTER: Only consider orders AFTER or ON sample date
|
|
valid_orders = [o for o in matched_orders if o["date"] >= s_date]
|
|
|
|
if valid_orders:
|
|
# Sort orders by date
|
|
valid_orders.sort(key=lambda x: x["date"])
|
|
|
|
# Identify First Order Date & Aggregate Qty for that date
|
|
first_order = valid_orders[0]
|
|
first_date = first_order["date"]
|
|
|
|
# Sum qty of ALL orders that match the first order date
|
|
first_date_qty = sum(o["qty"] for o in valid_orders if o["date"] == first_date)
|
|
|
|
# Total Order Qty (Cumulative for all valid post-sample orders)
|
|
total_order_qty = sum(o["qty"] for o in valid_orders)
|
|
|
|
days_diff = (first_date - s_date).days
|
|
s_date_str = s_date.strftime("%Y-%m-%d")
|
|
|
|
conversions.append(ConversionRecord(
|
|
customer=s.customer,
|
|
pn=s.pn,
|
|
sample_date=s_date_str,
|
|
sample_qty=s.qty or 0,
|
|
order_date=first_date.strftime("%Y-%m-%d"),
|
|
order_qty=first_date_qty, # Show First Order Qty ONLY
|
|
total_order_qty=total_order_qty, # Show Total Qty
|
|
days_to_convert=days_diff
|
|
))
|
|
|
|
return sorted(conversions, key=lambda x: x.sample_date if x.sample_date else "0000-00-00", reverse=True)
|
|
|
|
@router.get("/kpi", response_model=LabKPI)
|
|
def get_lab_kpi(
|
|
start_date: Optional[str] = Query(None),
|
|
end_date: Optional[str] = Query(None),
|
|
db: Session = Depends(get_db)
|
|
):
|
|
# Fetch Data
|
|
samples_query = db.query(SampleRecord)
|
|
orders_query = db.query(OrderRecord)
|
|
|
|
if start_date:
|
|
samples_query = samples_query.filter(SampleRecord.date >= start_date)
|
|
orders_query = orders_query.filter(OrderRecord.date >= start_date)
|
|
if end_date:
|
|
samples_query = samples_query.filter(SampleRecord.date <= end_date)
|
|
orders_query = orders_query.filter(OrderRecord.date <= end_date)
|
|
|
|
samples = samples_query.all()
|
|
orders = orders_query.all()
|
|
|
|
# Build Lookups (Same as conversions)
|
|
orders_by_cust_name = {}
|
|
order_lookup_by_id = {}
|
|
order_lookup_by_name = {}
|
|
|
|
for o in orders:
|
|
clean_pn = normalize_pn_for_matching(o.pn)
|
|
clean_cust_id = o.cust_id.strip().upper() if o.cust_id else ""
|
|
norm_cust_name = normalize_customer_name(o.customer)
|
|
o_date = parse_date(o.date) or (o.created_at.replace(tzinfo=None) if o.created_at else datetime.max)
|
|
|
|
# We only need dates for KPI
|
|
if clean_cust_id:
|
|
key_id = (clean_cust_id, clean_pn)
|
|
if key_id not in order_lookup_by_id: order_lookup_by_id[key_id] = []
|
|
order_lookup_by_id[key_id].append(o_date)
|
|
|
|
key_name = (norm_cust_name, clean_pn)
|
|
if key_name not in order_lookup_by_name: order_lookup_by_name[key_name] = []
|
|
order_lookup_by_name[key_name].append(o_date)
|
|
|
|
if norm_cust_name not in orders_by_cust_name: orders_by_cust_name[norm_cust_name] = []
|
|
orders_by_cust_name[norm_cust_name].append({ "clean_pn": clean_pn, "date": o_date })
|
|
|
|
# Group Samples by (CustName, PN) for Project Count
|
|
unique_sample_groups = {}
|
|
|
|
for s in samples:
|
|
clean_pn = normalize_pn_for_matching(s.pn)
|
|
norm_cust_name = normalize_customer_name(s.customer)
|
|
|
|
key = (norm_cust_name, clean_pn)
|
|
if key not in unique_sample_groups:
|
|
unique_sample_groups[key] = {
|
|
"dates": [],
|
|
"cust_ids": set(),
|
|
"raw_pns": set()
|
|
}
|
|
s_date = parse_date(s.date)
|
|
if s_date: unique_sample_groups[key]["dates"].append(s_date)
|
|
if s.cust_id: unique_sample_groups[key]["cust_ids"].add(s.cust_id.strip().upper())
|
|
unique_sample_groups[key]["raw_pns"].add(clean_pn)
|
|
|
|
# Calculate
|
|
total_samples_count = len(unique_sample_groups)
|
|
converted_count = 0
|
|
orphan_count = 0
|
|
velocities = []
|
|
now = datetime.now()
|
|
|
|
for key, data in unique_sample_groups.items():
|
|
norm_cust_name, group_clean_pn = key
|
|
|
|
matched_dates = []
|
|
|
|
# 1. Try ID Match
|
|
for cid in data["cust_ids"]:
|
|
if (cid, group_clean_pn) in order_lookup_by_id:
|
|
matched_dates.extend(order_lookup_by_id[(cid, group_clean_pn)])
|
|
|
|
# 2. Try Name Match
|
|
if not matched_dates:
|
|
if key in order_lookup_by_name:
|
|
matched_dates.extend(order_lookup_by_name[key])
|
|
|
|
# 3. Try Prefix Match (Using first available PN in group vs Orders of same customer)
|
|
if not matched_dates and norm_cust_name in orders_by_cust_name:
|
|
candidates = orders_by_cust_name[norm_cust_name]
|
|
for o_dat in candidates:
|
|
o_pn = o_dat['clean_pn']
|
|
# Check against ANY PN in this sample group
|
|
for s_pn in data["raw_pns"]:
|
|
if o_pn and (s_pn.startswith(o_pn) or o_pn.startswith(s_pn)):
|
|
matched_dates.append(o_dat["date"])
|
|
|
|
if matched_dates:
|
|
earliest_sample = min(data["dates"]) if data["dates"] else None
|
|
|
|
# STRICT FILTER: Post-Sample Orders Only
|
|
valid_dates = []
|
|
if earliest_sample:
|
|
valid_dates = [d for d in matched_dates if d >= earliest_sample]
|
|
|
|
if valid_dates:
|
|
converted_count += 1
|
|
first_order = min(valid_dates)
|
|
|
|
diff = (first_order - earliest_sample).days
|
|
if diff >= 0:
|
|
velocities.append(diff)
|
|
else:
|
|
# No valid post-sample order -> Potential Orphan
|
|
if earliest_sample and (now - earliest_sample).days > 90:
|
|
orphan_count += 1
|
|
else:
|
|
# Orphan Check
|
|
earliest_sample = min(data["dates"]) if data["dates"] else None
|
|
# If no date, can't determine orphans strictly, but also definitely not converted.
|
|
# Only count as orphan if we know it's old enough.
|
|
if earliest_sample and (now - earliest_sample).days > 90:
|
|
orphan_count += 1
|
|
|
|
avg_velocity = sum(velocities) / len(velocities) if velocities else 0
|
|
conversion_rate = (converted_count / total_samples_count * 100) if total_samples_count > 0 else 0
|
|
|
|
# Calculate No DIT High Qty Samples (Count)
|
|
kpi_samples_query = db.query(SampleRecord).filter(SampleRecord.qty >= 1000)
|
|
if start_date: kpi_samples_query = kpi_samples_query.filter(SampleRecord.date >= start_date)
|
|
if end_date: kpi_samples_query = kpi_samples_query.filter(SampleRecord.date <= end_date)
|
|
|
|
high_qty_samples = kpi_samples_query.all()
|
|
high_qty_ids = [s.id for s in high_qty_samples]
|
|
|
|
no_dit_count = 0
|
|
if high_qty_ids:
|
|
matched_ids = db.query(MatchResult.target_id).filter(
|
|
MatchResult.target_id.in_(high_qty_ids),
|
|
MatchResult.target_type == TargetType.SAMPLE,
|
|
MatchResult.status.in_([MatchStatus.accepted, MatchStatus.auto_matched])
|
|
).all()
|
|
matched_ids_set = set(m[0] for m in matched_ids)
|
|
no_dit_count = len([sid for sid in high_qty_ids if sid not in matched_ids_set])
|
|
|
|
return LabKPI(
|
|
converted_count=converted_count,
|
|
avg_velocity=round(avg_velocity, 1),
|
|
conversion_rate=round(conversion_rate, 1),
|
|
orphan_count=orphan_count,
|
|
no_dit_count=no_dit_count
|
|
)
|
|
|
|
@router.get("/scatter", response_model=List[ScatterPoint])
|
|
def get_scatter_data(
|
|
start_date: Optional[str] = Query(None),
|
|
end_date: Optional[str] = Query(None),
|
|
db: Session = Depends(get_db)
|
|
):
|
|
samples_query = db.query(SampleRecord)
|
|
orders_query = db.query(OrderRecord)
|
|
|
|
if start_date:
|
|
samples_query = samples_query.filter(SampleRecord.date >= start_date)
|
|
if end_date:
|
|
samples_query = samples_query.filter(SampleRecord.date <= end_date)
|
|
|
|
samples = samples_query.all()
|
|
orders = orders_query.all()
|
|
|
|
# Build Lookups (simplified for aggregation)
|
|
orders_by_cust_name = {} # name -> list of {clean_pn, qty, date}
|
|
|
|
for o in orders:
|
|
norm_cust_name = normalize_customer_name(o.customer)
|
|
clean_pn = normalize_pn_for_matching(o.pn)
|
|
o_date = parse_date(o.date) or (o.created_at.replace(tzinfo=None) if o.created_at else datetime.max)
|
|
|
|
if norm_cust_name not in orders_by_cust_name:
|
|
orders_by_cust_name[norm_cust_name] = []
|
|
orders_by_cust_name[norm_cust_name].append({
|
|
"clean_pn": clean_pn,
|
|
"qty": o.qty or 0,
|
|
"date": o_date
|
|
})
|
|
|
|
# Group by (Display Cust, Display PN) - but we need to match broadly
|
|
# Strategy: Group by Display Keys first, then try to find match for that group
|
|
|
|
unique_groups = {} # (norm_cust, clean_pn) -> {display_cust, display_pn, sample_qty, order_qty, min_sample_date}
|
|
|
|
for s in samples:
|
|
norm_cust_name = normalize_customer_name(s.customer)
|
|
clean_pn = normalize_pn_for_matching(s.pn)
|
|
s_date = parse_date(s.date)
|
|
key = (norm_cust_name, clean_pn)
|
|
|
|
if key not in unique_groups:
|
|
unique_groups[key] = {
|
|
"display_cust": s.customer,
|
|
"display_pn": s.pn,
|
|
"sample_qty": 0,
|
|
"order_qty": 0,
|
|
"min_sample_date": s_date
|
|
}
|
|
unique_groups[key]["sample_qty"] += (s.qty or 0)
|
|
|
|
# Update min date
|
|
current_min = unique_groups[key]["min_sample_date"]
|
|
if s_date:
|
|
if not current_min or s_date < current_min:
|
|
unique_groups[key]["min_sample_date"] = s_date
|
|
|
|
# Fill Order Qty
|
|
for key, data in unique_groups.items():
|
|
norm_cust_name, sample_clean_pn = key
|
|
min_s_date = data["min_sample_date"]
|
|
|
|
matched_qty = 0
|
|
|
|
if norm_cust_name in orders_by_cust_name:
|
|
candidates = orders_by_cust_name[norm_cust_name]
|
|
for o_dat in candidates:
|
|
o_pn = o_dat['clean_pn']
|
|
o_date = o_dat['date']
|
|
|
|
# Check Date Causality first
|
|
if min_s_date and o_date < min_s_date:
|
|
continue
|
|
|
|
# Exact or Prefix Match
|
|
if o_pn and (sample_clean_pn == o_pn or sample_clean_pn.startswith(o_pn) or o_pn.startswith(sample_clean_pn)):
|
|
matched_qty += o_dat['qty']
|
|
|
|
data["order_qty"] = matched_qty
|
|
|
|
return [
|
|
ScatterPoint(
|
|
customer=v["display_cust"],
|
|
pn=v["display_pn"],
|
|
sample_qty=v["sample_qty"],
|
|
order_qty=v["order_qty"]
|
|
)
|
|
for key, v in unique_groups.items()
|
|
]
|
|
|
|
@router.get("/orphans", response_model=List[OrphanSample])
|
|
def get_orphans(db: Session = Depends(get_db)):
|
|
now = datetime.now()
|
|
threshold_date = now - timedelta(days=90)
|
|
|
|
samples = db.query(SampleRecord).all()
|
|
# Need to match logic check
|
|
# To save time, we can fetch all orders and build lookup
|
|
orders = db.query(OrderRecord).all()
|
|
|
|
# Build Lookup for Fast Checking
|
|
orders_by_cust_name = {}
|
|
for o in orders:
|
|
norm_cust_name = normalize_customer_name(o.customer)
|
|
clean_pn = normalize_pn_for_matching(o.pn)
|
|
o_date = parse_date(o.date) or (o.created_at.replace(tzinfo=None) if o.created_at else datetime.max)
|
|
|
|
if norm_cust_name not in orders_by_cust_name: orders_by_cust_name[norm_cust_name] = []
|
|
orders_by_cust_name[norm_cust_name].append({
|
|
"clean_pn": clean_pn,
|
|
"date": o_date
|
|
})
|
|
|
|
# Aggregation Dictionary
|
|
# Key: (normalized_customer, normalized_pn, order_no, date_str)
|
|
# Value: { "raw_customer": str, "raw_pn": str, "qty": int, "date_obj": datetime }
|
|
orphan_groups = {}
|
|
|
|
for s in samples:
|
|
norm_cust_name = normalize_customer_name(s.customer)
|
|
clean_pn = normalize_pn_for_matching(s.pn)
|
|
s_date = parse_date(s.date)
|
|
s_date_str = s_date.strftime("%Y-%m-%d") if s_date else "Unknown"
|
|
s_order_no = s.order_no.strip() if s.order_no else ""
|
|
|
|
# Check if matched (Logic same as before, check against all orders)
|
|
matched = False
|
|
|
|
if s_date and norm_cust_name in orders_by_cust_name:
|
|
candidates = orders_by_cust_name[norm_cust_name]
|
|
for o_dat in candidates:
|
|
o_pn = o_dat['clean_pn']
|
|
o_date = o_dat['date']
|
|
|
|
# Check Date Causality first
|
|
if o_date < s_date:
|
|
continue
|
|
|
|
# Check PN Match (Exact or Prefix)
|
|
if o_pn and (clean_pn == o_pn or clean_pn.startswith(o_pn) or o_pn.startswith(clean_pn)):
|
|
matched = True
|
|
break
|
|
|
|
if not matched:
|
|
# Only consider old enough samples
|
|
if s_date and s_date < threshold_date:
|
|
# Add to group
|
|
# We use the FIRST raw customer/pn encountered for display, or could be smarter.
|
|
# Group Key: (norm_cust, clean_pn, order_no, date)
|
|
key = (norm_cust_name, clean_pn, s_order_no, s_date_str)
|
|
|
|
if key not in orphan_groups:
|
|
orphan_groups[key] = {
|
|
"customer": s.customer,
|
|
"pn": s.pn,
|
|
"order_no": s.order_no,
|
|
"date": s_date_str,
|
|
"qty": 0,
|
|
"days": (now - s_date).days
|
|
}
|
|
|
|
orphan_groups[key]["qty"] += (s.qty or 0)
|
|
|
|
# Convert groups to list
|
|
orphans = []
|
|
for data in orphan_groups.values():
|
|
orphans.append(OrphanSample(
|
|
customer=data["customer"],
|
|
pn=data["pn"],
|
|
days_since_sent=data["days"],
|
|
order_no=data["order_no"],
|
|
date=data["date"],
|
|
sample_qty=data["qty"]
|
|
))
|
|
|
|
return sorted(orphans, key=lambda x: x.days_since_sent, reverse=True)
|
|
|
|
@router.get("/no_dit_samples", response_model=List[NoDitSample])
|
|
def get_no_dit_samples(db: Session = Depends(get_db)):
|
|
# Filter High Qty Samples
|
|
high_qty_samples = db.query(SampleRecord).filter(SampleRecord.qty >= 1000).all()
|
|
|
|
results = []
|
|
# Batch query matches for efficiency
|
|
sample_ids = [s.id for s in high_qty_samples]
|
|
if not sample_ids:
|
|
return []
|
|
|
|
matched_ids = db.query(MatchResult.target_id).filter(
|
|
MatchResult.target_id.in_(sample_ids),
|
|
MatchResult.target_type == TargetType.SAMPLE,
|
|
MatchResult.status.in_([MatchStatus.accepted, MatchStatus.auto_matched])
|
|
).all()
|
|
|
|
matched_ids_set = set(m[0] for m in matched_ids)
|
|
|
|
for s in high_qty_samples:
|
|
if s.id not in matched_ids_set:
|
|
s_date = parse_date(s.date)
|
|
results.append(NoDitSample(
|
|
sample_id=str(s.id),
|
|
customer=s.customer,
|
|
pn=s.pn,
|
|
order_no=s.order_no,
|
|
date=s_date.strftime("%Y-%m-%d") if s_date else None,
|
|
qty=s.qty
|
|
))
|
|
|
|
return sorted(results, key=lambda x: x.qty, reverse=True)
|