File size: 4,468 Bytes
ad4dc06
 
 
55408fd
ad4dc06
 
 
 
 
 
 
 
 
55408fd
cf85e96
55408fd
 
 
 
 
 
 
ad4dc06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55408fd
 
 
ad4dc06
 
55408fd
070d885
ad4dc06
 
 
 
 
 
 
 
55408fd
aed0f44
ad4dc06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55408fd
 
 
 
 
ad4dc06
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# Import necessary libraries
import os
from PIL import Image
import torch
from transformers import AutoImageProcessor, AutoModelForImageClassification
import gradio as gr
import openai

# Load the Hugging Face model for car damage detection
model_name = "beingamit99/car_damage_detection"
processor = AutoImageProcessor.from_pretrained(model_name)
model = AutoModelForImageClassification.from_pretrained(model_name)

# Set your OpenAI API key
openai_api_key = os.getenv("OpenAI4oMini")

# Validate API Key
if openai_api_key is None:
    raise ValueError("OpenAI API key is not set. Make sure to set the OpenAI4oMini secret in Hugging Face.")

# Initialize OpenAI Client
client = openai.OpenAI(api_key=openai_api_key)

# Dropdown Options
car_companies = ["Select", "Toyota", "Honda", "Ford", "BMW", "Mercedes", "Audi", "Hyundai", "Kia", "Nissan"]
car_models = [
    "Select",          # Default option
    "Corolla", "Camry", "RAV4", "Highlander",          # Toyota
    "Civic", "Accord", "CR-V", "Pilot",                # Honda
    "Fiesta", "Focus", "Explorer", "Mustang",           # Ford
    "3 Series", "5 Series", "X3", "X5",                # BMW
    "C-Class", "E-Class", "GLC", "GLE",                # Mercedes
    "A3", "A4", "Q5", "Q7",                           # Audi
    "Elantra", "Sonata", "Tucson", "Santa Fe",          # Hyundai
    "Rio", "Optima", "Sportage", "Sorento",             # Kia
    "Sentra", "Altima", "Rogue", "Murano"               # Nissan
]

years = [str(year) for year in range(2000, 2025)]
countries = ["Select", "Pakistan", "USA", "UK", "Canada", "Australia", "Germany", "India", "Japan"]

# Function to Estimate Repair Cost using GPT-4.0 Mini
def estimate_repair_cost(damage_type, company, model, year, country):
    prompt = (
        f"Estimate the repair cost for {damage_type} on a {year} {company} {model} in {country}. "
        f"Provide the approximate total cost in local currency with your confidence level, concisely in 2 lines."
    )

    try:
        # Using client for API call
        response = client.chat.completions.create(
            model="gpt-4o-mini",
            messages=[
                {"role": "system", "content": "You are an expert in car repair cost estimation."},
                {"role": "user", "content": prompt}
            ],
            temperature=0.5,
            max_tokens=100
        )
        # Correctly access the response content
        return response.choices[0].message.content.strip()
    except Exception as e:
        print(f"Error in GPT-4.0 API call: {e}")
        return f"Error: {e}"

# Function to Detect Car Damage from Image using Hugging Face Model
def detect_damage(image):
    inputs = processor(images=image, return_tensors="pt")
    with torch.no_grad():
        outputs = model(**inputs)
    probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
    confidences, predicted_class = torch.max(probs, dim=-1)
    predicted_label = model.config.id2label[predicted_class.item()]
    return predicted_label, confidences.item()

# Function to Process Image and Get Results
def process_image(image, company, model, year, country):
    damage_type, confidence = detect_damage(image)
    cost_estimate = estimate_repair_cost(damage_type, company, model, year, country)
    
    result = {
        "Major Detected Damage": damage_type,
        "Confidence": f"{confidence * 100:.2f}%",
        "Estimated Repair Cost": cost_estimate
    }
    return result

# Gradio Interface
with gr.Blocks() as interface:
    gr.Markdown("# Car Damage Detection and Cost Estimation")
    gr.Markdown("Upload an image of a damaged car to detect the type of damage and estimate the repair cost.")
    
    with gr.Row():
        with gr.Column():
            image_input = gr.Image(type="pil", label="Upload Car Image")
            company_input = gr.Dropdown(choices=car_companies, label="Car Company", value="Select")
            model_input = gr.Dropdown(choices=car_models, label="Car Model", value="Select")
            year_input = gr.Dropdown(choices=years, label="Year of Manufacture", value=years[-1])
            country_input = gr.Dropdown(choices=countries, label="Your Country", value="Select")

    submit_button = gr.Button("Estimate Repair Cost")
    output = gr.JSON(label="Result")

    submit_button.click(process_image, inputs=[image_input, company_input, model_input, year_input, country_input], outputs=output)

# Launch the Gradio Interface
interface.launch()