Commit
·
f01c86d
1
Parent(s):
c011fe9
Initial commit for Hugging Face Space deployment
Browse files- .gitignore +34 -0
- Dockerfile +22 -0
- README.md +23 -0
- app/main.py +58 -0
- app/routers/inference.py +218 -0
- app/static/css/style.css +140 -0
- app/static/img/logo.png +0 -0
- app/templates/index.html +82 -0
- app/templates/login.html +22 -0
- configs/damage_config.yaml +13 -0
- configs/parts_config.yaml +13 -0
- inference/damage_inference.py +58 -0
- inference/parts_inference.py +74 -0
- requirements.txt +12 -0
- training/damage/data.yaml +9 -0
- training/damage/train.py +66 -0
- training/parts/data.yaml +15 -0
- training/parts/train.py +66 -0
- utils/balance_parts_dataset.py +95 -0
- utils/coco_helpers.py +0 -0
- utils/matching.py +19 -0
- utils/organize_complete_datasets.py +220 -0
- utils/visualization.py +0 -0
.gitignore
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python
|
2 |
+
__pycache__/
|
3 |
+
*.pyc
|
4 |
+
*.pyo
|
5 |
+
*.pyd
|
6 |
+
.Python
|
7 |
+
env/
|
8 |
+
venv/
|
9 |
+
ENV/
|
10 |
+
env.bak/
|
11 |
+
venv.bak/
|
12 |
+
cardamage_venv/
|
13 |
+
|
14 |
+
# VSCode
|
15 |
+
.vscode/
|
16 |
+
|
17 |
+
# OS
|
18 |
+
.DS_Store
|
19 |
+
Thumbs.db
|
20 |
+
|
21 |
+
# Backup folders
|
22 |
+
.backup_before_cleanup
|
23 |
+
|
24 |
+
# Results/uploads
|
25 |
+
app/static/results/
|
26 |
+
app/static/uploads/
|
27 |
+
damage_detection_dataset/
|
28 |
+
inference_results/
|
29 |
+
data/
|
30 |
+
# Models and data
|
31 |
+
models/
|
32 |
+
damage_detection_dataset/
|
33 |
+
inference_results/
|
34 |
+
data/
|
Dockerfile
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.10-slim
|
2 |
+
|
3 |
+
WORKDIR /code
|
4 |
+
|
5 |
+
# Install system dependencies
|
6 |
+
RUN apt-get update && apt-get install -y \
|
7 |
+
build-essential \
|
8 |
+
libgl1-mesa-glx \
|
9 |
+
&& rm -rf /var/lib/apt/lists/*
|
10 |
+
|
11 |
+
# Install Python dependencies
|
12 |
+
COPY requirements.txt .
|
13 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
14 |
+
|
15 |
+
# Copy app code
|
16 |
+
COPY . .
|
17 |
+
|
18 |
+
# Expose port
|
19 |
+
EXPOSE 7860
|
20 |
+
|
21 |
+
# Run FastAPI app with uvicorn
|
22 |
+
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Car Damage & Parts Detection
|
3 |
+
emoji: 🚗
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: indigo
|
6 |
+
sdk: docker
|
7 |
+
app_file: app/main.py
|
8 |
+
pinned: false
|
9 |
+
---
|
10 |
+
|
11 |
+
# Car Damage & Parts Detection
|
12 |
+
|
13 |
+
Upload a car image to detect damaged regions and parts using YOLOv8 models.
|
14 |
+
|
15 |
+
## How to use
|
16 |
+
- Upload a car image.
|
17 |
+
- View annotated results and JSON output.
|
18 |
+
|
19 |
+
## Deployment
|
20 |
+
This Space uses FastAPI and YOLOv8, running in a Docker container.
|
21 |
+
|
22 |
+
## Model Weights
|
23 |
+
Model weights are downloaded at startup from public cloud links (not included in repo).
|
app/main.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# --- Model download logic (Hugging Face Hub) ---
|
3 |
+
import os
|
4 |
+
import requests
|
5 |
+
|
6 |
+
def download_if_missing(url, dest):
|
7 |
+
if not os.path.exists(dest):
|
8 |
+
print(f"Downloading model from {url} to {dest}...")
|
9 |
+
os.makedirs(os.path.dirname(dest), exist_ok=True)
|
10 |
+
with requests.get(url, stream=True) as r:
|
11 |
+
r.raise_for_status()
|
12 |
+
with open(dest, "wb") as f:
|
13 |
+
for chunk in r.iter_content(chunk_size=8192):
|
14 |
+
f.write(chunk)
|
15 |
+
print("Download complete.")
|
16 |
+
|
17 |
+
# Hugging Face direct download links
|
18 |
+
DAMAGE_MODEL_URL = "https://huggingface.co/AItoolstack/car_damage_detection/resolve/main/yolov8_models/damage/weights/weights/best.pt"
|
19 |
+
PARTS_MODEL_URL = "https://huggingface.co/AItoolstack/car_damage_detection/resolve/main/yolov8_models/parts/weights/weights/best.pt"
|
20 |
+
|
21 |
+
DAMAGE_MODEL_PATH = os.path.join("models", "damage", "weights", "weights", "best.pt")
|
22 |
+
PARTS_MODEL_PATH = os.path.join("models", "parts", "weights", "weights", "best.pt")
|
23 |
+
|
24 |
+
download_if_missing(DAMAGE_MODEL_URL, DAMAGE_MODEL_PATH)
|
25 |
+
download_if_missing(PARTS_MODEL_URL, PARTS_MODEL_PATH)
|
26 |
+
|
27 |
+
from fastapi import FastAPI, File, UploadFile, BackgroundTasks
|
28 |
+
from fastapi.staticfiles import StaticFiles
|
29 |
+
from fastapi.templating import Jinja2Templates
|
30 |
+
from fastapi.responses import JSONResponse, HTMLResponse
|
31 |
+
from fastapi.middleware.cors import CORSMiddleware
|
32 |
+
import uvicorn
|
33 |
+
from datetime import datetime
|
34 |
+
import aiofiles
|
35 |
+
from pathlib import Path
|
36 |
+
import uuid
|
37 |
+
from app.routers import inference
|
38 |
+
|
39 |
+
app = FastAPI(title="Car Damage Detection API")
|
40 |
+
|
41 |
+
# CORS middleware
|
42 |
+
app.add_middleware(
|
43 |
+
CORSMiddleware,
|
44 |
+
allow_origins=["*"],
|
45 |
+
allow_credentials=True,
|
46 |
+
allow_methods=["*"],
|
47 |
+
allow_headers=["*"],
|
48 |
+
)
|
49 |
+
|
50 |
+
# Mount static files
|
51 |
+
app.mount("/static", StaticFiles(directory="app/static"), name="static")
|
52 |
+
templates = Jinja2Templates(directory="app/templates")
|
53 |
+
|
54 |
+
# Include routers
|
55 |
+
app.include_router(inference.router)
|
56 |
+
|
57 |
+
if __name__ == "__main__":
|
58 |
+
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)
|
app/routers/inference.py
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from fastapi import APIRouter, Request, UploadFile, File, Form
|
3 |
+
from fastapi.responses import HTMLResponse, FileResponse, JSONResponse
|
4 |
+
from fastapi.templating import Jinja2Templates
|
5 |
+
from starlette.background import BackgroundTask
|
6 |
+
import shutil
|
7 |
+
import os
|
8 |
+
import uuid
|
9 |
+
from pathlib import Path
|
10 |
+
from typing import Optional
|
11 |
+
import json
|
12 |
+
import base64
|
13 |
+
from ultralytics import YOLO
|
14 |
+
import cv2
|
15 |
+
import numpy as np
|
16 |
+
|
17 |
+
|
18 |
+
# Templates directory
|
19 |
+
TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "templates")
|
20 |
+
templates = Jinja2Templates(directory=TEMPLATES_DIR)
|
21 |
+
|
22 |
+
router = APIRouter()
|
23 |
+
|
24 |
+
UPLOAD_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "static", "uploads")
|
25 |
+
RESULTS_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "static", "results")
|
26 |
+
|
27 |
+
os.makedirs(UPLOAD_DIR, exist_ok=True)
|
28 |
+
os.makedirs(RESULTS_DIR, exist_ok=True)
|
29 |
+
|
30 |
+
ALLOWED_EXTENSIONS = {"jpg", "jpeg", "png", "tiff", "tif"}
|
31 |
+
|
32 |
+
# Model paths
|
33 |
+
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
34 |
+
DAMAGE_MODEL_PATH = os.path.join(BASE_DIR, "models", "damage", "weights", "weights", "best.pt")
|
35 |
+
PARTS_MODEL_PATH = os.path.join(BASE_DIR, "models", "parts", "weights", "weights", "best.pt")
|
36 |
+
|
37 |
+
# Class names for parts
|
38 |
+
PARTS_CLASS_NAMES = ['headlamp', 'front_bumper', 'hood', 'door', 'rear_bumper']
|
39 |
+
|
40 |
+
# Helper: Run YOLO inference and return results
|
41 |
+
def run_yolo_inference(model_path, image_path, task='segment'):
|
42 |
+
model = YOLO(model_path)
|
43 |
+
results = model.predict(source=image_path, imgsz=640, conf=0.25, save=False, task=task)
|
44 |
+
return results[0]
|
45 |
+
|
46 |
+
# Helper: Draw masks and confidence on image
|
47 |
+
def draw_masks_and_conf(image_path, yolo_result, class_names=None):
|
48 |
+
img = cv2.imread(image_path)
|
49 |
+
overlay = img.copy()
|
50 |
+
out_img = img.copy()
|
51 |
+
colors = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255), (0,255,255)]
|
52 |
+
for i, box in enumerate(yolo_result.boxes):
|
53 |
+
conf = float(box.conf[0])
|
54 |
+
cls = int(box.cls[0])
|
55 |
+
color = colors[cls % len(colors)]
|
56 |
+
# Draw bbox
|
57 |
+
x1, y1, x2, y2 = map(int, box.xyxy[0])
|
58 |
+
cv2.rectangle(overlay, (x1, y1), (x2, y2), color, 2)
|
59 |
+
label = f"{class_names[cls] if class_names else 'damage'}: {conf:.2f}"
|
60 |
+
cv2.putText(overlay, label, (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
|
61 |
+
# Draw mask if available
|
62 |
+
if hasattr(yolo_result, 'masks') and yolo_result.masks is not None:
|
63 |
+
mask = yolo_result.masks.data[i].cpu().numpy()
|
64 |
+
mask = (mask * 255).astype(np.uint8)
|
65 |
+
mask = cv2.resize(mask, (x2-x1, y2-y1))
|
66 |
+
roi = overlay[y1:y2, x1:x2]
|
67 |
+
colored_mask = np.zeros_like(roi)
|
68 |
+
colored_mask[mask > 127] = color
|
69 |
+
overlay[y1:y2, x1:x2] = cv2.addWeighted(roi, 0.5, colored_mask, 0.5, 0)
|
70 |
+
out_img = cv2.addWeighted(overlay, 0.7, img, 0.3, 0)
|
71 |
+
return out_img
|
72 |
+
|
73 |
+
# Helper: Generate JSON output
|
74 |
+
def generate_json_output(filename, damage_result, parts_result):
|
75 |
+
# Damage severity: use max confidence
|
76 |
+
severity_score = float(max([float(box.conf[0]) for box in damage_result.boxes], default=0))
|
77 |
+
damage_regions = []
|
78 |
+
for box in damage_result.boxes:
|
79 |
+
x1, y1, x2, y2 = map(float, box.xyxy[0])
|
80 |
+
conf = float(box.conf[0])
|
81 |
+
damage_regions.append({"bbox": [x1, y1, x2, y2], "confidence": conf})
|
82 |
+
# Parts
|
83 |
+
parts = []
|
84 |
+
for i, box in enumerate(parts_result.boxes):
|
85 |
+
x1, y1, x2, y2 = map(float, box.xyxy[0])
|
86 |
+
conf = float(box.conf[0])
|
87 |
+
cls = int(box.cls[0])
|
88 |
+
# Damage %: use mask area / bbox area if available
|
89 |
+
damage_percentage = None
|
90 |
+
if hasattr(parts_result, 'masks') and parts_result.masks is not None:
|
91 |
+
mask = parts_result.masks.data[i].cpu().numpy()
|
92 |
+
mask_area = np.sum(mask > 0.5)
|
93 |
+
bbox_area = (x2-x1)*(y2-y1)
|
94 |
+
damage_percentage = float(mask_area / bbox_area) if bbox_area > 0 else None
|
95 |
+
parts.append({
|
96 |
+
"part": PARTS_CLASS_NAMES[cls] if cls < len(PARTS_CLASS_NAMES) else str(cls),
|
97 |
+
"damaged": True,
|
98 |
+
"confidence": conf,
|
99 |
+
"damage_percentage": damage_percentage,
|
100 |
+
"bbox": [x1, y1, x2, y2]
|
101 |
+
})
|
102 |
+
# Optionally, add base64 masks
|
103 |
+
# (not implemented here for brevity)
|
104 |
+
return {
|
105 |
+
"filename": filename,
|
106 |
+
"damage": {
|
107 |
+
"severity_score": severity_score,
|
108 |
+
"regions": damage_regions
|
109 |
+
},
|
110 |
+
"parts": parts,
|
111 |
+
"cost_estimate": None
|
112 |
+
}
|
113 |
+
|
114 |
+
# Dummy login credentials
|
115 |
+
def check_login(username: str, password: str) -> bool:
|
116 |
+
return username == "demo" and password == "demo123"
|
117 |
+
|
118 |
+
@router.get("/", response_class=HTMLResponse)
|
119 |
+
def home(request: Request):
|
120 |
+
return templates.TemplateResponse("index.html", {"request": request, "result": None})
|
121 |
+
|
122 |
+
@router.post("/login", response_class=HTMLResponse)
|
123 |
+
def login(request: Request, username: str = Form(...), password: str = Form(...)):
|
124 |
+
if check_login(username, password):
|
125 |
+
return templates.TemplateResponse("index.html", {"request": request, "result": None, "user": username})
|
126 |
+
return templates.TemplateResponse("login.html", {"request": request, "error": "Invalid credentials"})
|
127 |
+
|
128 |
+
@router.get("/login", response_class=HTMLResponse)
|
129 |
+
def login_page(request: Request):
|
130 |
+
return templates.TemplateResponse("login.html", {"request": request})
|
131 |
+
|
132 |
+
@router.post("/upload", response_class=HTMLResponse)
|
133 |
+
def upload_image(request: Request, file: UploadFile = File(...)):
|
134 |
+
ext = file.filename.split(".")[-1].lower()
|
135 |
+
if ext not in ALLOWED_EXTENSIONS:
|
136 |
+
return templates.TemplateResponse("index.html", {"request": request, "error": "Unsupported file type."})
|
137 |
+
|
138 |
+
# Save uploaded file
|
139 |
+
session_id = str(uuid.uuid4())
|
140 |
+
upload_path = os.path.join(UPLOAD_DIR, f"{session_id}.{ext}")
|
141 |
+
with open(upload_path, "wb") as buffer:
|
142 |
+
shutil.copyfileobj(file.file, buffer)
|
143 |
+
|
144 |
+
# Run both inferences
|
145 |
+
try:
|
146 |
+
damage_result = run_yolo_inference(DAMAGE_MODEL_PATH, upload_path)
|
147 |
+
parts_result = run_yolo_inference(PARTS_MODEL_PATH, upload_path)
|
148 |
+
|
149 |
+
# Save annotated images
|
150 |
+
damage_img_path = os.path.join(RESULTS_DIR, f"{session_id}_damage.png")
|
151 |
+
parts_img_path = os.path.join(RESULTS_DIR, f"{session_id}_parts.png")
|
152 |
+
json_path = os.path.join(RESULTS_DIR, f"{session_id}_result.json")
|
153 |
+
damage_img_url = f"/static/results/{session_id}_damage.png"
|
154 |
+
parts_img_url = f"/static/results/{session_id}_parts.png"
|
155 |
+
json_url = f"/static/results/{session_id}_result.json"
|
156 |
+
|
157 |
+
# Defensive: set to None by default
|
158 |
+
damage_img = None
|
159 |
+
parts_img = None
|
160 |
+
json_output = None
|
161 |
+
|
162 |
+
# Only save and set if inference returns boxes
|
163 |
+
if hasattr(damage_result, 'boxes') and len(damage_result.boxes) > 0:
|
164 |
+
damage_img = draw_masks_and_conf(upload_path, damage_result)
|
165 |
+
cv2.imwrite(damage_img_path, damage_img)
|
166 |
+
if hasattr(parts_result, 'boxes') and len(parts_result.boxes) > 0:
|
167 |
+
parts_img = draw_masks_and_conf(upload_path, parts_result, class_names=PARTS_CLASS_NAMES)
|
168 |
+
cv2.imwrite(parts_img_path, parts_img)
|
169 |
+
if (hasattr(damage_result, 'boxes') and len(damage_result.boxes) > 0) or (hasattr(parts_result, 'boxes') and len(parts_result.boxes) > 0):
|
170 |
+
json_output = generate_json_output(file.filename, damage_result, parts_result)
|
171 |
+
with open(json_path, "w") as jf:
|
172 |
+
json.dump(json_output, jf, indent=2)
|
173 |
+
|
174 |
+
# Prepare URLs for download (only if files exist)
|
175 |
+
result = {
|
176 |
+
"filename": file.filename,
|
177 |
+
"damage_image": damage_img_url if damage_img is not None else None,
|
178 |
+
"parts_image": parts_img_url if parts_img is not None else None,
|
179 |
+
"json": json_output,
|
180 |
+
"json_download": json_url if json_output is not None else None
|
181 |
+
}
|
182 |
+
# Debug log
|
183 |
+
print("[DEBUG] Result dict:", result)
|
184 |
+
except Exception as e:
|
185 |
+
result = {
|
186 |
+
"filename": file.filename,
|
187 |
+
"error": f"Inference failed: {str(e)}",
|
188 |
+
"damage_image": None,
|
189 |
+
"parts_image": None,
|
190 |
+
"json": None,
|
191 |
+
"json_download": None
|
192 |
+
}
|
193 |
+
print("[ERROR] Inference failed:", e)
|
194 |
+
|
195 |
+
import threading
|
196 |
+
import time
|
197 |
+
def delayed_cleanup():
|
198 |
+
time.sleep(300) # 5 minutes
|
199 |
+
try:
|
200 |
+
os.remove(upload_path)
|
201 |
+
except Exception:
|
202 |
+
pass
|
203 |
+
for suffix in ["_damage.png", "_parts.png", "_result.json"]:
|
204 |
+
try:
|
205 |
+
os.remove(os.path.join(RESULTS_DIR, f"{session_id}{suffix}"))
|
206 |
+
except Exception:
|
207 |
+
pass
|
208 |
+
|
209 |
+
threading.Thread(target=delayed_cleanup, daemon=True).start()
|
210 |
+
|
211 |
+
return templates.TemplateResponse(
|
212 |
+
"index.html",
|
213 |
+
{
|
214 |
+
"request": request,
|
215 |
+
"result": result,
|
216 |
+
"original_image": f"/static/uploads/{session_id}.{ext}"
|
217 |
+
}
|
218 |
+
)
|
app/static/css/style.css
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@import url('https://fonts.googleapis.com/css2?family=Roboto:wght@400;700&display=swap');
|
2 |
+
|
3 |
+
body {
|
4 |
+
font-family: 'Roboto', Arial, sans-serif;
|
5 |
+
background: #f4f6fa;
|
6 |
+
margin: 0;
|
7 |
+
padding: 0;
|
8 |
+
}
|
9 |
+
.header {
|
10 |
+
background: linear-gradient(90deg, #003366 60%, #e30613 100%);
|
11 |
+
color: #fff;
|
12 |
+
padding: 24px 0 12px 0;
|
13 |
+
text-align: center;
|
14 |
+
box-shadow: 0 2px 8px #00336622;
|
15 |
+
}
|
16 |
+
.logo {
|
17 |
+
max-width: 180px;
|
18 |
+
margin-bottom: 10px;
|
19 |
+
}
|
20 |
+
@media (max-width: 600px) {
|
21 |
+
.logo {
|
22 |
+
max-width: 90px !important;
|
23 |
+
right: 10px !important;
|
24 |
+
top: 10px !important;
|
25 |
+
}
|
26 |
+
.header h2 { font-size: 1.1em; }
|
27 |
+
}
|
28 |
+
.container {
|
29 |
+
max-width: 950px;
|
30 |
+
margin: 40px auto 0 auto;
|
31 |
+
background: #fff;
|
32 |
+
padding: 32px 32px 24px 32px;
|
33 |
+
border-radius: 12px;
|
34 |
+
box-shadow: 0 4px 24px #00336622;
|
35 |
+
}
|
36 |
+
h2, h3, h4 {
|
37 |
+
color: #003366;
|
38 |
+
margin-top: 0;
|
39 |
+
}
|
40 |
+
form {
|
41 |
+
margin-bottom: 24px;
|
42 |
+
display: flex;
|
43 |
+
flex-direction: column;
|
44 |
+
align-items: center;
|
45 |
+
}
|
46 |
+
input[type="file"] {
|
47 |
+
margin-bottom: 16px;
|
48 |
+
font-size: 16px;
|
49 |
+
}
|
50 |
+
button {
|
51 |
+
background: #e30613;
|
52 |
+
color: #fff;
|
53 |
+
border: none;
|
54 |
+
padding: 12px 32px;
|
55 |
+
border-radius: 6px;
|
56 |
+
font-size: 16px;
|
57 |
+
font-weight: 700;
|
58 |
+
cursor: pointer;
|
59 |
+
transition: background 0.2s;
|
60 |
+
margin-bottom: 10px;
|
61 |
+
}
|
62 |
+
button:hover {
|
63 |
+
background: #003366;
|
64 |
+
}
|
65 |
+
.error {
|
66 |
+
color: #e30613;
|
67 |
+
margin-bottom: 16px;
|
68 |
+
font-weight: 700;
|
69 |
+
}
|
70 |
+
.images-row {
|
71 |
+
display: flex;
|
72 |
+
gap: 32px;
|
73 |
+
margin-bottom: 24px;
|
74 |
+
justify-content: center;
|
75 |
+
flex-wrap: wrap;
|
76 |
+
}
|
77 |
+
.result-img {
|
78 |
+
max-width: 260px;
|
79 |
+
border: 2px solid #e30613;
|
80 |
+
border-radius: 8px;
|
81 |
+
box-shadow: 0 2px 8px #00336622;
|
82 |
+
margin-bottom: 8px;
|
83 |
+
}
|
84 |
+
.result-label {
|
85 |
+
text-align: center;
|
86 |
+
font-weight: 700;
|
87 |
+
color: #003366;
|
88 |
+
margin-bottom: 8px;
|
89 |
+
}
|
90 |
+
.download-btn {
|
91 |
+
display: inline-block;
|
92 |
+
background: #003366;
|
93 |
+
color: #fff;
|
94 |
+
padding: 8px 18px;
|
95 |
+
border-radius: 5px;
|
96 |
+
text-decoration: none;
|
97 |
+
font-weight: 700;
|
98 |
+
margin-right: 10px;
|
99 |
+
margin-bottom: 10px;
|
100 |
+
transition: background 0.2s;
|
101 |
+
}
|
102 |
+
.download-btn:hover {
|
103 |
+
background: #e30613;
|
104 |
+
}
|
105 |
+
pre.json-output {
|
106 |
+
background: #f0f0f0;
|
107 |
+
padding: 16px;
|
108 |
+
border-radius: 6px;
|
109 |
+
font-size: 15px;
|
110 |
+
overflow-x: auto;
|
111 |
+
color: #222;
|
112 |
+
margin-bottom: 12px;
|
113 |
+
box-shadow: 0 1px 4px #00336611;
|
114 |
+
}
|
115 |
+
.copy-btn {
|
116 |
+
background: #e30613;
|
117 |
+
color: #fff;
|
118 |
+
border: none;
|
119 |
+
border-radius: 4px;
|
120 |
+
padding: 6px 14px;
|
121 |
+
font-size: 14px;
|
122 |
+
font-weight: 700;
|
123 |
+
cursor: pointer;
|
124 |
+
margin-bottom: 10px;
|
125 |
+
float: right;
|
126 |
+
}
|
127 |
+
.copy-btn:hover {
|
128 |
+
background: #003366;
|
129 |
+
}
|
130 |
+
.footer {
|
131 |
+
text-align: center;
|
132 |
+
color: #888;
|
133 |
+
font-size: 15px;
|
134 |
+
margin: 40px 0 10px 0;
|
135 |
+
}
|
136 |
+
@media (max-width: 900px) {
|
137 |
+
.container { padding: 16px; }
|
138 |
+
.images-row { gap: 12px; }
|
139 |
+
.result-img { max-width: 98vw; }
|
140 |
+
}
|
app/static/img/logo.png
ADDED
![]() |
app/templates/index.html
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<title>Car Damage Detection</title>
|
6 |
+
<link rel="stylesheet" href="/static/css/style.css">
|
7 |
+
<script>
|
8 |
+
function copyJSON() {
|
9 |
+
const pre = document.getElementById('json-pre');
|
10 |
+
if (pre) {
|
11 |
+
navigator.clipboard.writeText(pre.innerText);
|
12 |
+
alert('JSON copied to clipboard!');
|
13 |
+
}
|
14 |
+
}
|
15 |
+
function toggleJSON() {
|
16 |
+
const block = document.getElementById('json-block');
|
17 |
+
const btn = document.getElementById('toggle-json-btn');
|
18 |
+
if (block.style.display === 'none') {
|
19 |
+
block.style.display = 'block';
|
20 |
+
btn.innerText = 'Hide JSON';
|
21 |
+
} else {
|
22 |
+
block.style.display = 'none';
|
23 |
+
btn.innerText = 'Show JSON';
|
24 |
+
}
|
25 |
+
}
|
26 |
+
</script>
|
27 |
+
</head>
|
28 |
+
<body>
|
29 |
+
<div class="header" style="position:relative;">
|
30 |
+
<h2 style="margin:0;">Car Damage & Parts Detection</h2>
|
31 |
+
<img src="/static/img/logo.png" alt="Logo" class="logo" style="position:absolute; top:18px; right:32px; max-width:110px;">
|
32 |
+
</div>
|
33 |
+
<div class="container">
|
34 |
+
<form method="post" action="/upload" enctype="multipart/form-data">
|
35 |
+
<input type="file" name="file" accept="image/*" required>
|
36 |
+
<button type="submit">Upload & Analyze</button>
|
37 |
+
</form>
|
38 |
+
{% if error %}<div class="error">{{ error }}</div>{% endif %}
|
39 |
+
{% if result %}
|
40 |
+
<div class="results">
|
41 |
+
<h3>Results</h3>
|
42 |
+
<div class="images-row">
|
43 |
+
<div>
|
44 |
+
<div class="result-label">Original</div>
|
45 |
+
<img src="{{ original_image }}" alt="Original Image" class="result-img">
|
46 |
+
</div>
|
47 |
+
{% if result.damage_image is defined and result.damage_image %}
|
48 |
+
<div>
|
49 |
+
<div class="result-label">Damage Prediction</div>
|
50 |
+
<img src="{{ result.damage_image }}" alt="Damage Prediction" class="result-img">
|
51 |
+
</div>
|
52 |
+
{% endif %}
|
53 |
+
{% if result.parts_image is defined and result.parts_image %}
|
54 |
+
<div>
|
55 |
+
<div class="result-label">Parts Prediction</div>
|
56 |
+
<img src="{{ result.parts_image }}" alt="Parts Prediction" class="result-img">
|
57 |
+
</div>
|
58 |
+
{% endif %}
|
59 |
+
</div>
|
60 |
+
{% if result.json is defined and result.json %}
|
61 |
+
<h4 style="margin-bottom:8px;">JSON Output</h4>
|
62 |
+
<button class="copy-btn" onclick="copyJSON()" type="button">Copy JSON</button>
|
63 |
+
<button class="copy-btn" id="toggle-json-btn" onclick="toggleJSON()" type="button" style="margin-left:10px;">Show JSON</button>
|
64 |
+
<div id="json-block" style="display:none;">
|
65 |
+
<pre class="json-output" id="json-pre">{{ result.json | tojson(indent=2) }}</pre>
|
66 |
+
</div>
|
67 |
+
<a class="download-btn" href="{{ result.json_download }}" download>Download JSON</a>
|
68 |
+
{% endif %}
|
69 |
+
{% if result.damage_image is defined and result.damage_image %}
|
70 |
+
<a class="download-btn" href="{{ result.damage_image }}" download>Download Damage Image</a>
|
71 |
+
{% endif %}
|
72 |
+
{% if result.parts_image is defined and result.parts_image %}
|
73 |
+
<a class="download-btn" href="{{ result.parts_image }}" download>Download Parts Image</a>
|
74 |
+
{% endif %}
|
75 |
+
</div>
|
76 |
+
{% endif %}
|
77 |
+
</div>
|
78 |
+
<div class="footer">
|
79 |
+
© {{ 2025 }} RSA/Intact. All rights reserved.
|
80 |
+
</div>
|
81 |
+
</body>
|
82 |
+
</html>
|
app/templates/login.html
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<title>Login - Car Damage Detection</title>
|
6 |
+
<link rel="stylesheet" href="/static/css/style.css">
|
7 |
+
</head>
|
8 |
+
<body>
|
9 |
+
<div class="container">
|
10 |
+
<img src="/static/img/logo.png" alt="Logo" class="logo">
|
11 |
+
<h2>Login</h2>
|
12 |
+
{% if error %}<div class="error">{{ error }}</div>{% endif %}
|
13 |
+
<form method="post" action="/login">
|
14 |
+
<label for="username">Username:</label>
|
15 |
+
<input type="text" id="username" name="username" required><br>
|
16 |
+
<label for="password">Password:</label>
|
17 |
+
<input type="password" id="password" name="password" required><br>
|
18 |
+
<button type="submit">Login</button>
|
19 |
+
</form>
|
20 |
+
</div>
|
21 |
+
</body>
|
22 |
+
</html>
|
configs/damage_config.yaml
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Example Detectron2 config for damage model (1 class: damage)
|
2 |
+
_BASE_: "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
|
3 |
+
MODEL:
|
4 |
+
ROI_HEADS:
|
5 |
+
NUM_CLASSES: 1
|
6 |
+
DATASETS:
|
7 |
+
TRAIN: ("damage_train",)
|
8 |
+
TEST: ("damage_val",)
|
9 |
+
SOLVER:
|
10 |
+
IMS_PER_BATCH: 2
|
11 |
+
BASE_LR: 0.00025
|
12 |
+
MAX_ITER: 3000
|
13 |
+
OUTPUT_DIR: "./weights/damage_model"
|
configs/parts_config.yaml
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Example Detectron2 config for parts model (5 classes)
|
2 |
+
_BASE_: "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
|
3 |
+
MODEL:
|
4 |
+
ROI_HEADS:
|
5 |
+
NUM_CLASSES: 5
|
6 |
+
DATASETS:
|
7 |
+
TRAIN: ("parts_train",)
|
8 |
+
TEST: ("parts_val",)
|
9 |
+
SOLVER:
|
10 |
+
IMS_PER_BATCH: 2
|
11 |
+
BASE_LR: 0.00025
|
12 |
+
MAX_ITER: 3000
|
13 |
+
OUTPUT_DIR: "./weights/parts_model"
|
inference/damage_inference.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference and visualization for YOLOv8 damage segmentation on unseen images
|
2 |
+
from ultralytics import YOLO
|
3 |
+
import os
|
4 |
+
from glob import glob
|
5 |
+
import sys
|
6 |
+
|
7 |
+
def run_inference(): # Get absolute paths
|
8 |
+
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
9 |
+
model_path = os.path.join(base_dir, 'models', 'damage', 'weights', 'weights', 'best.pt')
|
10 |
+
img_dir = os.path.join(base_dir, 'damage_detection_dataset', 'img')
|
11 |
+
out_dir = os.path.join(base_dir, 'inference_results', 'damage')
|
12 |
+
|
13 |
+
# Validate paths
|
14 |
+
if not os.path.exists(model_path):
|
15 |
+
print(f"Error: Model weights not found at {model_path}")
|
16 |
+
return
|
17 |
+
|
18 |
+
if not os.path.exists(img_dir):
|
19 |
+
print(f"Error: Image directory not found at {img_dir}")
|
20 |
+
return
|
21 |
+
|
22 |
+
# Create output directory
|
23 |
+
os.makedirs(out_dir, exist_ok=True)
|
24 |
+
|
25 |
+
# Get all images in the dataset
|
26 |
+
all_imgs = sorted(glob(os.path.join(img_dir, '*.jpg')))
|
27 |
+
if not all_imgs:
|
28 |
+
print(f"No images found in {img_dir}")
|
29 |
+
return
|
30 |
+
|
31 |
+
try:
|
32 |
+
# Load model
|
33 |
+
model = YOLO(model_path)
|
34 |
+
|
35 |
+
# Run inference and save results
|
36 |
+
for img_path in all_imgs:
|
37 |
+
try:
|
38 |
+
results = model.predict(
|
39 |
+
source=img_path,
|
40 |
+
save=True,
|
41 |
+
project=out_dir,
|
42 |
+
name='',
|
43 |
+
imgsz=640,
|
44 |
+
conf=0.25
|
45 |
+
)
|
46 |
+
print(f'Processed: {os.path.basename(img_path)}')
|
47 |
+
except Exception as e:
|
48 |
+
print(f"Error processing {os.path.basename(img_path)}: {str(e)}")
|
49 |
+
continue
|
50 |
+
|
51 |
+
print(f'Inference complete. Results saved to {out_dir}')
|
52 |
+
|
53 |
+
except Exception as e:
|
54 |
+
print(f"Error loading model: {str(e)}")
|
55 |
+
return
|
56 |
+
|
57 |
+
if __name__ == '__main__':
|
58 |
+
run_inference()
|
inference/parts_inference.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Inference on unseen images for YOLOv8 parts segmentation
|
2 |
+
from ultralytics import YOLO
|
3 |
+
import os
|
4 |
+
from glob import glob
|
5 |
+
|
6 |
+
def run_inference(): # Get absolute paths
|
7 |
+
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
8 |
+
model_path = os.path.join(base_dir, 'models', 'parts', 'weights', 'weights', 'best.pt')
|
9 |
+
img_dir = os.path.join(base_dir, 'damage_detection_dataset', 'img')
|
10 |
+
train_dir = os.path.join(base_dir, 'data', 'data_yolo_for_training', 'car_parts_damage_dataset', 'images', 'train')
|
11 |
+
out_dir = os.path.join(base_dir, 'inference_results', 'parts')
|
12 |
+
|
13 |
+
# Validate paths
|
14 |
+
if not os.path.exists(model_path):
|
15 |
+
print(f"Error: Model weights not found at {model_path}")
|
16 |
+
return
|
17 |
+
|
18 |
+
if not os.path.exists(img_dir):
|
19 |
+
print(f"Error: Image directory not found at {img_dir}")
|
20 |
+
return
|
21 |
+
|
22 |
+
if not os.path.exists(train_dir):
|
23 |
+
print(f"Warning: Training directory not found at {train_dir}")
|
24 |
+
print("Will run inference on all images instead of just unseen ones")
|
25 |
+
train_imgs = set()
|
26 |
+
else:
|
27 |
+
# Get all images used for training
|
28 |
+
train_imgs = set(os.listdir(train_dir))
|
29 |
+
|
30 |
+
# Create output directory
|
31 |
+
os.makedirs(out_dir, exist_ok=True)
|
32 |
+
|
33 |
+
# Get all images in original dataset
|
34 |
+
all_imgs = set(os.listdir(img_dir))
|
35 |
+
# Select images not used in training
|
36 |
+
unseen_imgs = sorted(list(all_imgs - train_imgs))
|
37 |
+
|
38 |
+
if not unseen_imgs:
|
39 |
+
print(f"No images found for inference in {img_dir}")
|
40 |
+
return
|
41 |
+
|
42 |
+
try:
|
43 |
+
# Load model
|
44 |
+
model = YOLO(model_path)
|
45 |
+
|
46 |
+
# Class names for visualization
|
47 |
+
class_names = ['headlamp', 'front_bumper', 'hood', 'door', 'rear_bumper']
|
48 |
+
|
49 |
+
# Run inference on each unseen image
|
50 |
+
for img_name in unseen_imgs:
|
51 |
+
try:
|
52 |
+
img_path = os.path.join(img_dir, img_name)
|
53 |
+
results = model.predict(
|
54 |
+
source=img_path,
|
55 |
+
save=True,
|
56 |
+
project=out_dir,
|
57 |
+
name='',
|
58 |
+
imgsz=640,
|
59 |
+
conf=0.25,
|
60 |
+
classes=list(range(len(class_names))) # All classes
|
61 |
+
)
|
62 |
+
print(f'Processed: {img_name}')
|
63 |
+
except Exception as e:
|
64 |
+
print(f"Error processing {img_name}: {str(e)}")
|
65 |
+
continue
|
66 |
+
|
67 |
+
print(f'Inference complete. Results saved to {out_dir}')
|
68 |
+
|
69 |
+
except Exception as e:
|
70 |
+
print(f"Error loading model: {str(e)}")
|
71 |
+
return
|
72 |
+
|
73 |
+
if __name__ == '__main__':
|
74 |
+
run_inference()
|
requirements.txt
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
ultralytics
|
2 |
+
opencv-python
|
3 |
+
numpy
|
4 |
+
matplotlib
|
5 |
+
fastapi
|
6 |
+
uvicorn
|
7 |
+
ultralytics
|
8 |
+
opencv-python
|
9 |
+
numpy
|
10 |
+
jinja2
|
11 |
+
starlette
|
12 |
+
requests
|
training/damage/data.yaml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YAML for YOLOv8 damage segmentation (1 class)
|
2 |
+
path: E:/AI-ToolStack/Cardamagedetection/data/data_yolo_for_training/car_damage_dataset # Root directory for dataset
|
3 |
+
train: images/train # Train images (relative to 'path')
|
4 |
+
val: images/val # Val images (relative to 'path')
|
5 |
+
test: images/test # Test images (relative to 'path')
|
6 |
+
|
7 |
+
# Classes
|
8 |
+
names: [damage] # Class names
|
9 |
+
nc: 1 # Number of classes
|
training/damage/train.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv8 segmentation training for car damage detection
|
2 |
+
from ultralytics import YOLO
|
3 |
+
import multiprocessing
|
4 |
+
import os
|
5 |
+
|
6 |
+
def train():
|
7 |
+
# Start from YOLOv8 medium segmentation model
|
8 |
+
model = YOLO('../../models/yolov8m-seg.pt')
|
9 |
+
|
10 |
+
# Get the absolute path to the data.yaml file
|
11 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
12 |
+
data_yaml_path = os.path.join(current_dir, 'data.yaml')
|
13 |
+
|
14 |
+
# Train with optimized parameters
|
15 |
+
model.train(
|
16 |
+
data=data_yaml_path, # Path to data configuration file
|
17 |
+
epochs=150, # Number of epochs
|
18 |
+
imgsz=640, # Image size
|
19 |
+
batch=4, # Batch size
|
20 |
+
workers=4, # Number of workers
|
21 |
+
project='../../models/damage/weights', # Save directory
|
22 |
+
name='yolov8_damage_final', # Run name
|
23 |
+
|
24 |
+
# Learning rate strategy
|
25 |
+
lr0=0.0002, # Initial learning rate
|
26 |
+
lrf=0.000001, # Final learning rate
|
27 |
+
warmup_epochs=25,
|
28 |
+
warmup_momentum=0.8,
|
29 |
+
cos_lr=True, # Use cosine learning rate scheduler
|
30 |
+
|
31 |
+
# Loss weights
|
32 |
+
box=8.0, # Box loss gain
|
33 |
+
cls=4.0, # Class loss gain
|
34 |
+
dfl=2.5, # DFL loss gain
|
35 |
+
|
36 |
+
# Augmentation settings
|
37 |
+
augment=True,
|
38 |
+
mosaic=0.5,
|
39 |
+
mixup=0.2,
|
40 |
+
copy_paste=0.1,
|
41 |
+
degrees=20.0,
|
42 |
+
translate=0.2,
|
43 |
+
scale=0.4,
|
44 |
+
shear=10.0,
|
45 |
+
flipud=0.1,
|
46 |
+
fliplr=0.5,
|
47 |
+
hsv_h=0.015,
|
48 |
+
hsv_s=0.7,
|
49 |
+
hsv_v=0.4,
|
50 |
+
|
51 |
+
# Other optimization settings
|
52 |
+
overlap_mask=True, # Overlap mask segments
|
53 |
+
mask_ratio=4, # Mask downsampling ratio
|
54 |
+
single_cls=True, # Single class detection
|
55 |
+
rect=False, # Rectangular training
|
56 |
+
cache=False, # Cache images for faster training
|
57 |
+
patience=50, # Early stopping patience
|
58 |
+
close_mosaic=10, # Close mosaic augmentation epochs
|
59 |
+
deterministic=True, # Deterministic mode
|
60 |
+
seed=42, # Random seed
|
61 |
+
device=0 # GPU device
|
62 |
+
)
|
63 |
+
|
64 |
+
if __name__ == '__main__':
|
65 |
+
multiprocessing.freeze_support()
|
66 |
+
train()
|
training/parts/data.yaml
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YAML for YOLOv8 parts segmentation (5 classes)
|
2 |
+
path: E:/AI-ToolStack/Cardamagedetection/data/data_yolo_for_training/car_parts_damage_dataset # Root directory for dataset
|
3 |
+
train: images/train # Train images (relative to 'path')
|
4 |
+
val: images/val # Val images (relative to 'path')
|
5 |
+
test: images/test # Test images (relative to 'path')
|
6 |
+
|
7 |
+
# Classes
|
8 |
+
names:
|
9 |
+
0: headlamp
|
10 |
+
1: front_bumper
|
11 |
+
2: hood
|
12 |
+
3: door
|
13 |
+
4: rear_bumper
|
14 |
+
|
15 |
+
nc: 5 # Number of classes
|
training/parts/train.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv8 segmentation training for car parts detection
|
2 |
+
from ultralytics import YOLO
|
3 |
+
import multiprocessing
|
4 |
+
import os
|
5 |
+
|
6 |
+
def train():
|
7 |
+
# Start from YOLOv8 medium segmentation model
|
8 |
+
model = YOLO('../../models/yolov8m-seg.pt')
|
9 |
+
|
10 |
+
# Get the absolute path to the data.yaml file
|
11 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
12 |
+
data_yaml_path = os.path.join(current_dir, 'data.yaml')
|
13 |
+
|
14 |
+
# Train with optimized parameters for parts detection
|
15 |
+
model.train(
|
16 |
+
data=data_yaml_path, # Path to data configuration file
|
17 |
+
epochs=100, # Number of epochs
|
18 |
+
imgsz=640, # Image size
|
19 |
+
batch=4, # Batch size
|
20 |
+
workers=4, # Number of workers
|
21 |
+
project='../../models/parts/weights', # Save directory
|
22 |
+
name='yolov8_parts_final', # Run name
|
23 |
+
|
24 |
+
# Learning rate strategy
|
25 |
+
lr0=0.0002, # Initial learning rate
|
26 |
+
lrf=0.000001, # Final learning rate
|
27 |
+
warmup_epochs=20, # Fewer warmup epochs for parts
|
28 |
+
warmup_momentum=0.8,
|
29 |
+
cos_lr=True, # Use cosine learning rate scheduler
|
30 |
+
|
31 |
+
# Loss weights
|
32 |
+
box=8.0, # Box loss gain
|
33 |
+
cls=4.0, # Class loss gain
|
34 |
+
dfl=2.5, # DFL loss gain
|
35 |
+
|
36 |
+
# Augmentation settings
|
37 |
+
augment=True,
|
38 |
+
mosaic=0.5,
|
39 |
+
mixup=0.2,
|
40 |
+
copy_paste=0.1,
|
41 |
+
degrees=20.0,
|
42 |
+
translate=0.2,
|
43 |
+
scale=0.4,
|
44 |
+
shear=10.0,
|
45 |
+
flipud=0.1,
|
46 |
+
fliplr=0.5,
|
47 |
+
hsv_h=0.015,
|
48 |
+
hsv_s=0.7,
|
49 |
+
hsv_v=0.4,
|
50 |
+
|
51 |
+
# Other optimization settings
|
52 |
+
overlap_mask=True, # Overlap mask segments
|
53 |
+
mask_ratio=4, # Mask downsampling ratio
|
54 |
+
single_cls=False, # Multiple classes for parts
|
55 |
+
rect=False, # Rectangular training
|
56 |
+
cache=False, # Cache images for faster training
|
57 |
+
patience=50, # Early stopping patience
|
58 |
+
close_mosaic=10, # Close mosaic augmentation epochs
|
59 |
+
deterministic=True, # Deterministic mode
|
60 |
+
seed=42, # Random seed
|
61 |
+
device=0 # GPU device
|
62 |
+
)
|
63 |
+
|
64 |
+
if __name__ == '__main__':
|
65 |
+
multiprocessing.freeze_support()
|
66 |
+
train()
|
utils/balance_parts_dataset.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import shutil
|
4 |
+
from collections import defaultdict
|
5 |
+
import random
|
6 |
+
from tqdm import tqdm
|
7 |
+
|
8 |
+
def create_balanced_dataset(source_json, source_img_dir, target_dir, min_samples=50):
|
9 |
+
"""
|
10 |
+
Create a balanced dataset for parts detection by sampling images with different parts.
|
11 |
+
|
12 |
+
Args:
|
13 |
+
source_json (str): Path to source COCO JSON file
|
14 |
+
source_img_dir (str): Path to source images directory
|
15 |
+
target_dir (str): Path to target directory for balanced dataset
|
16 |
+
min_samples (int): Minimum number of samples per class
|
17 |
+
"""
|
18 |
+
# Create target directories
|
19 |
+
os.makedirs(os.path.join(target_dir, 'images'), exist_ok=True)
|
20 |
+
os.makedirs(os.path.join(target_dir, 'labels'), exist_ok=True)
|
21 |
+
|
22 |
+
# Load COCO annotations
|
23 |
+
with open(source_json, 'r') as f:
|
24 |
+
coco = json.load(f)
|
25 |
+
|
26 |
+
# Group images by parts they contain
|
27 |
+
images_by_part = defaultdict(set)
|
28 |
+
image_to_anns = defaultdict(list)
|
29 |
+
|
30 |
+
for ann in coco['annotations']:
|
31 |
+
img_id = ann['image_id']
|
32 |
+
cat_id = ann['category_id']
|
33 |
+
images_by_part[cat_id].add(img_id)
|
34 |
+
image_to_anns[img_id].append(ann)
|
35 |
+
|
36 |
+
# Find images with balanced representation
|
37 |
+
selected_images = set()
|
38 |
+
for part_images in images_by_part.values():
|
39 |
+
# Sample min_samples images for each part
|
40 |
+
sample_size = min(min_samples, len(part_images))
|
41 |
+
selected_images.update(random.sample(list(part_images), sample_size))
|
42 |
+
|
43 |
+
# Copy selected images and create labels
|
44 |
+
id_to_filename = {img['id']: img['file_name'] for img in coco['images']}
|
45 |
+
|
46 |
+
print(f"Creating balanced dataset with {len(selected_images)} images...")
|
47 |
+
for img_id in tqdm(selected_images):
|
48 |
+
# Copy image
|
49 |
+
src_img = os.path.join(source_img_dir, id_to_filename[img_id])
|
50 |
+
dst_img = os.path.join(target_dir, 'images', id_to_filename[img_id])
|
51 |
+
shutil.copy2(src_img, dst_img)
|
52 |
+
|
53 |
+
# Create YOLO label
|
54 |
+
base_name = os.path.splitext(id_to_filename[img_id])[0]
|
55 |
+
label_file = os.path.join(target_dir, 'labels', f"{base_name}.txt")
|
56 |
+
|
57 |
+
# Convert annotations to YOLO format
|
58 |
+
anns = image_to_anns[img_id]
|
59 |
+
label_lines = []
|
60 |
+
|
61 |
+
# Get image dimensions
|
62 |
+
from PIL import Image
|
63 |
+
im = Image.open(src_img)
|
64 |
+
w, h = im.size
|
65 |
+
|
66 |
+
for ann in anns:
|
67 |
+
cat_id = ann['category_id']
|
68 |
+
# Convert segmentation to YOLO format
|
69 |
+
for seg in ann['segmentation']:
|
70 |
+
seg_norm = [str(x/w) if i%2==0 else str(x/h) for i,x in enumerate(seg)]
|
71 |
+
label_lines.append(f"{cat_id} {' '.join(seg_norm)}")
|
72 |
+
|
73 |
+
# Write label file
|
74 |
+
with open(label_file, 'w') as f:
|
75 |
+
f.write('\n'.join(label_lines))
|
76 |
+
|
77 |
+
if __name__ == "__main__":
|
78 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
79 |
+
base_dir = os.path.dirname(current_dir)
|
80 |
+
|
81 |
+
# Process training set
|
82 |
+
create_balanced_dataset(
|
83 |
+
source_json=os.path.join(base_dir, "damage_detection_dataset", "train", "COCO_mul_train_annos.json"),
|
84 |
+
source_img_dir=os.path.join(base_dir, "damage_detection_dataset", "img"),
|
85 |
+
target_dir=os.path.join(base_dir, "data", "parts", "balanced", "train"),
|
86 |
+
min_samples=50
|
87 |
+
)
|
88 |
+
|
89 |
+
# Process validation set
|
90 |
+
create_balanced_dataset(
|
91 |
+
source_json=os.path.join(base_dir, "damage_detection_dataset", "val", "COCO_mul_val_annos.json"),
|
92 |
+
source_img_dir=os.path.join(base_dir, "damage_detection_dataset", "img"),
|
93 |
+
target_dir=os.path.join(base_dir, "data", "parts", "balanced", "val"),
|
94 |
+
min_samples=10
|
95 |
+
)
|
utils/coco_helpers.py
ADDED
File without changes
|
utils/matching.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
def get_overlapping_part(damage_mask, part_masks, part_labels):
|
4 |
+
max_iou = 0
|
5 |
+
matched_part = None
|
6 |
+
for mask, label in zip(part_masks, part_labels):
|
7 |
+
iou = np.sum(np.logical_and(damage_mask, mask)) / np.sum(np.logical_or(damage_mask, mask))
|
8 |
+
if iou > max_iou:
|
9 |
+
max_iou = iou
|
10 |
+
matched_part = label
|
11 |
+
return matched_part, max_iou
|
12 |
+
|
13 |
+
def estimate_severity(area, part_label):
|
14 |
+
if area < 2000:
|
15 |
+
return "minor"
|
16 |
+
elif area < 8000:
|
17 |
+
return "moderate"
|
18 |
+
else:
|
19 |
+
return "severe" if part_label in ["front_bumper", "hood", "rear_bumper"] else "moderate"
|
utils/organize_complete_datasets.py
ADDED
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
import json
|
4 |
+
from collections import defaultdict
|
5 |
+
import random
|
6 |
+
from tqdm import tqdm
|
7 |
+
from PIL import Image
|
8 |
+
|
9 |
+
def convert_coco_to_yolo(coco_json_path, images_dir, output_dir, class_map, split='train'):
|
10 |
+
"""Convert COCO format annotations to YOLO format"""
|
11 |
+
if not os.path.exists(coco_json_path):
|
12 |
+
print(f"Warning: JSON file not found: {coco_json_path}")
|
13 |
+
return set()
|
14 |
+
|
15 |
+
if not os.path.exists(images_dir):
|
16 |
+
print(f"Warning: Images directory not found: {images_dir}")
|
17 |
+
return set()
|
18 |
+
|
19 |
+
print(f"\nProcessing {split} split...")
|
20 |
+
|
21 |
+
# Create output directories
|
22 |
+
labels_dir = os.path.join(output_dir, 'labels', split)
|
23 |
+
images_dir_out = os.path.join(output_dir, 'images', split)
|
24 |
+
os.makedirs(labels_dir, exist_ok=True)
|
25 |
+
os.makedirs(images_dir_out, exist_ok=True)
|
26 |
+
|
27 |
+
# Load COCO annotations
|
28 |
+
try:
|
29 |
+
with open(coco_json_path, 'r') as f:
|
30 |
+
coco = json.load(f)
|
31 |
+
except json.JSONDecodeError:
|
32 |
+
print(f"Error: Invalid JSON file: {coco_json_path}")
|
33 |
+
return set()
|
34 |
+
|
35 |
+
# Create id to filename mapping
|
36 |
+
id_to_filename = {img['id']: img['file_name'] for img in coco['images']}
|
37 |
+
|
38 |
+
# Group annotations by image
|
39 |
+
img_to_anns = defaultdict(list)
|
40 |
+
for ann in coco['annotations']:
|
41 |
+
img_to_anns[ann['image_id']].append(ann)
|
42 |
+
|
43 |
+
# Process each image
|
44 |
+
processed_images = set()
|
45 |
+
for img_id, anns in tqdm(img_to_anns.items(), desc=f"Converting {split} set"):
|
46 |
+
img_file = id_to_filename[img_id]
|
47 |
+
img_path = os.path.join(images_dir, img_file)
|
48 |
+
|
49 |
+
if not os.path.exists(img_path):
|
50 |
+
print(f"Warning: Image {img_path} not found, skipping...")
|
51 |
+
continue
|
52 |
+
|
53 |
+
try:
|
54 |
+
# Copy image
|
55 |
+
shutil.copy2(img_path, os.path.join(images_dir_out, img_file))
|
56 |
+
|
57 |
+
# Get image dimensions
|
58 |
+
with Image.open(img_path) as im:
|
59 |
+
w, h = im.size
|
60 |
+
|
61 |
+
# Convert annotations
|
62 |
+
label_lines = []
|
63 |
+
for ann in anns:
|
64 |
+
cat_id = ann['category_id']
|
65 |
+
if cat_id not in class_map:
|
66 |
+
print(f"Warning: Unknown category ID {cat_id} in {img_file}")
|
67 |
+
continue
|
68 |
+
yolo_cls = class_map[cat_id]
|
69 |
+
|
70 |
+
# Convert segmentation points
|
71 |
+
for seg in ann['segmentation']:
|
72 |
+
coords = [str(x/w) if i%2==0 else str(x/h) for i,x in enumerate(seg)]
|
73 |
+
label_lines.append(f"{yolo_cls} {' '.join(coords)}")
|
74 |
+
|
75 |
+
# Write label file
|
76 |
+
label_file = os.path.join(labels_dir, os.path.splitext(img_file)[0] + '.txt')
|
77 |
+
with open(label_file, 'w') as f:
|
78 |
+
f.write('\n'.join(label_lines))
|
79 |
+
|
80 |
+
processed_images.add(img_id)
|
81 |
+
|
82 |
+
except (IOError, OSError) as e:
|
83 |
+
print(f"Error processing {img_file}: {str(e)}")
|
84 |
+
continue
|
85 |
+
|
86 |
+
return processed_images
|
87 |
+
|
88 |
+
def create_balanced_dataset(source_json, images_dir, output_dir, class_map, min_samples=50, split='train'):
|
89 |
+
"""Create balanced dataset by sampling equal number of images per class"""
|
90 |
+
print(f"\nCreating balanced dataset for {split} split...")
|
91 |
+
|
92 |
+
# Create output directories
|
93 |
+
labels_dir = os.path.join(output_dir, 'labels', split)
|
94 |
+
images_dir_out = os.path.join(output_dir, 'images', split)
|
95 |
+
os.makedirs(labels_dir, exist_ok=True)
|
96 |
+
os.makedirs(images_dir_out, exist_ok=True)
|
97 |
+
|
98 |
+
# Load COCO annotations
|
99 |
+
with open(source_json, 'r') as f:
|
100 |
+
coco = json.load(f)
|
101 |
+
|
102 |
+
# Group images by parts they contain
|
103 |
+
images_by_part = defaultdict(set)
|
104 |
+
image_to_anns = defaultdict(list)
|
105 |
+
|
106 |
+
for ann in coco['annotations']:
|
107 |
+
img_id = ann['image_id']
|
108 |
+
cat_id = ann['category_id']
|
109 |
+
images_by_part[cat_id].add(img_id)
|
110 |
+
image_to_anns[img_id].append(ann)
|
111 |
+
|
112 |
+
# Sample images for balanced dataset
|
113 |
+
selected_images = set()
|
114 |
+
for part_images in images_by_part.values():
|
115 |
+
sample_size = min(min_samples, len(part_images))
|
116 |
+
selected_images.update(random.sample(list(part_images), sample_size))
|
117 |
+
|
118 |
+
# Convert selected images to YOLO format
|
119 |
+
id_to_filename = {img['id']: img['file_name'] for img in coco['images']}
|
120 |
+
|
121 |
+
print(f"Processing {len(selected_images)} images for balanced {split} set...")
|
122 |
+
for img_id in tqdm(selected_images):
|
123 |
+
img_file = id_to_filename[img_id]
|
124 |
+
img_path = os.path.join(images_dir, img_file)
|
125 |
+
|
126 |
+
if not os.path.exists(img_path):
|
127 |
+
print(f"Warning: Image {img_path} not found, skipping...")
|
128 |
+
continue
|
129 |
+
|
130 |
+
# Copy image
|
131 |
+
shutil.copy2(img_path, os.path.join(images_dir_out, img_file))
|
132 |
+
|
133 |
+
# Get image dimensions
|
134 |
+
with Image.open(img_path) as im:
|
135 |
+
w, h = im.size
|
136 |
+
|
137 |
+
# Convert annotations
|
138 |
+
label_lines = []
|
139 |
+
for ann in image_to_anns[img_id]:
|
140 |
+
cat_id = ann['category_id']
|
141 |
+
yolo_cls = class_map[cat_id]
|
142 |
+
|
143 |
+
# Convert segmentation points
|
144 |
+
for seg in ann['segmentation']:
|
145 |
+
coords = [str(x/w) if i%2==0 else str(x/h) for i,x in enumerate(seg)]
|
146 |
+
label_lines.append(f"{yolo_cls} {' '.join(coords)}")
|
147 |
+
|
148 |
+
# Write label file
|
149 |
+
label_file = os.path.join(labels_dir, os.path.splitext(img_file)[0] + '.txt')
|
150 |
+
with open(label_file, 'w') as f:
|
151 |
+
f.write('\n'.join(label_lines))
|
152 |
+
|
153 |
+
def main():
|
154 |
+
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
155 |
+
source_dir = os.path.join(base_dir, 'damage_detection_dataset')
|
156 |
+
|
157 |
+
if not os.path.exists(source_dir):
|
158 |
+
print(f"Error: Source directory not found: {source_dir}")
|
159 |
+
return
|
160 |
+
|
161 |
+
# Set up output directories
|
162 |
+
car_damage_dir = os.path.join(base_dir, 'data', 'data_yolo_for_training', 'car_damage_dataset')
|
163 |
+
car_parts_dir = os.path.join(base_dir, 'data', 'data_yolo_for_training', 'car_parts_damage_dataset')
|
164 |
+
|
165 |
+
# Class mappings
|
166 |
+
damage_class_map = {1: 0} # Assuming damage is class 1 in COCO format
|
167 |
+
parts_class_map = {1: 0, 2: 1, 3: 2, 4: 3, 5: 4} # headlamp, front_bumper, hood, door, rear_bumper
|
168 |
+
|
169 |
+
# Process car damage dataset (full dataset)
|
170 |
+
print("\nProcessing Car Damage Dataset...")
|
171 |
+
for split in ['train', 'val', 'test']:
|
172 |
+
json_name = 'COCO_train_annos.json' if split == 'train' else 'COCO_val_annos.json'
|
173 |
+
json_path = os.path.join(source_dir, split, json_name)
|
174 |
+
images_dir = os.path.join(source_dir, split)
|
175 |
+
|
176 |
+
if os.path.exists(json_path):
|
177 |
+
convert_coco_to_yolo(
|
178 |
+
json_path,
|
179 |
+
images_dir,
|
180 |
+
car_damage_dir,
|
181 |
+
damage_class_map,
|
182 |
+
split
|
183 |
+
)
|
184 |
+
else:
|
185 |
+
print(f"Warning: JSON file not found for {split} split: {json_path}")
|
186 |
+
|
187 |
+
# Process car parts dataset (balanced training, original val/test)
|
188 |
+
print("\nProcessing Car Parts Dataset...")
|
189 |
+
# Training set - balanced
|
190 |
+
train_json = os.path.join(source_dir, 'train', 'COCO_mul_train_annos.json')
|
191 |
+
if os.path.exists(train_json):
|
192 |
+
create_balanced_dataset(
|
193 |
+
train_json,
|
194 |
+
os.path.join(source_dir, 'train'),
|
195 |
+
car_parts_dir,
|
196 |
+
parts_class_map,
|
197 |
+
min_samples=50,
|
198 |
+
split='train'
|
199 |
+
)
|
200 |
+
else:
|
201 |
+
print(f"Warning: Training JSON file not found: {train_json}")
|
202 |
+
|
203 |
+
# Validation and test sets - original
|
204 |
+
for split in ['val', 'test']:
|
205 |
+
json_path = os.path.join(source_dir, split, 'COCO_mul_val_annos.json')
|
206 |
+
images_dir = os.path.join(source_dir, split)
|
207 |
+
|
208 |
+
if os.path.exists(json_path):
|
209 |
+
convert_coco_to_yolo(
|
210 |
+
json_path,
|
211 |
+
images_dir,
|
212 |
+
car_parts_dir,
|
213 |
+
parts_class_map,
|
214 |
+
split
|
215 |
+
)
|
216 |
+
else:
|
217 |
+
print(f"Warning: JSON file not found for {split} split: {json_path}")
|
218 |
+
|
219 |
+
if __name__ == '__main__':
|
220 |
+
main()
|
utils/visualization.py
ADDED
File without changes
|