Spaces:
Sleeping
Sleeping
import streamlit as st | |
from PIL import Image | |
from transformers import AutoFeatureExtractor, AutoModelForImageClassification | |
import torch | |
from datetime import datetime | |
import openai | |
import pandas as pd | |
import numpy as np | |
from sklearn.metrics.pairwise import cosine_similarity | |
from sklearn.preprocessing import LabelEncoder | |
# Initialize OpenAI API key | |
openai.api_key = st.secrets["GPT_TOKEN"] | |
# Function to classify the car image using pre-trained model | |
def classify_image(image): | |
try: | |
# Load the model and feature extractor | |
model_name = "dima806/car_models_image_detection" | |
feature_extractor = AutoFeatureExtractor.from_pretrained(model_name) | |
model = AutoModelForImageClassification.from_pretrained(model_name) | |
# Preprocess the image | |
inputs = feature_extractor(images=image, return_tensors="pt") | |
# Perform inference | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
# Get the predicted class | |
logits = outputs.logits | |
predicted_class_idx = logits.argmax(-1).item() | |
# Get the class label and score | |
predicted_class_label = model.config.id2label[predicted_class_idx] | |
score = torch.nn.functional.softmax(logits, dim=-1)[0, predicted_class_idx].item() | |
# Return the top prediction | |
return [{'label': predicted_class_label, 'score': score}] | |
except Exception as e: | |
st.error(f"Classification error: {e}") | |
return None | |
# Function to get an overview of the car using OpenAI | |
def get_car_overview(brand, model, year): | |
prompt = f"Provide an overview of the following car:\nYear: {year}\nMake: {brand}\nModel: {model}\n" | |
response = openai.ChatCompletion.create( | |
model="gpt-3.5-turbo", | |
messages=[{"role": "user", "content": prompt}] | |
) | |
return response.choices[0].message['content'] | |
# Load and preprocess the car data once (globally for the session) | |
def load_car_data(): | |
try: | |
df = pd.read_csv('CTP_Model1.csv') # Replace with the path to your actual CSV file | |
return df | |
except Exception as e: | |
st.error(f"Error loading CSV file: {e}") | |
return None | |
# Preprocess car data and encode categorical features | |
def preprocess_car_data(df): | |
label_encoders = {} | |
# Encode categorical columns (make, model, trim, fuel, title_status, etc.) | |
for col in ['make', 'model', 'trim', 'fuel', 'title_status', 'transmission', 'drive', 'size', 'type', 'paint_color']: | |
le = LabelEncoder() | |
df[col] = le.fit_transform(df[col]) | |
label_encoders[col] = le | |
# Handle NaN values by filling them with a placeholder (e.g., -1 for categorical columns) | |
df.fillna(-1, inplace=True) | |
return df, label_encoders | |
# Calculate similarity between the classified car and entries in the CSV | |
def find_closest_car(df, label_encoders, make, model, year): | |
# Encode the user-provided make and model | |
make_encoded = label_encoders['make'].transform([make])[0] | |
model_encoded = label_encoders['model'].transform([model])[0] | |
# Create a feature vector for the classified car (make, model, year) | |
classified_car_vector = np.array([make_encoded, model_encoded, year]).reshape(1, -1) | |
# Prepare the data for similarity calculation | |
feature_columns = ['make', 'model', 'year'] | |
df_feature_vectors = df[feature_columns].values | |
# Handle NaN values before calculating similarity | |
df_feature_vectors = np.nan_to_num(df_feature_vectors) # Converts NaN to 0 | |
# Compute cosine similarity between the classified car and all entries in the CSV | |
similarity_scores = cosine_similarity(classified_car_vector, df_feature_vectors) | |
# Get the index of the closest match | |
closest_match_idx = similarity_scores.argmax() | |
# Return the closest match details | |
return df.iloc[closest_match_idx] | |
# Streamlit App | |
st.title("Auto Appraise") | |
st.write("Upload a car image or take a picture to get its brand, model, and overview!") | |
# Initialize session_state image attribute if it doesn't exist | |
if 'image' not in st.session_state: | |
st.session_state.image = None | |
# File uploader for image | |
uploaded_file = st.file_uploader("Choose a car image", type=["jpg", "jpeg", "png"]) | |
# Camera input as an alternative (optional) | |
camera_image = st.camera_input("Or take a picture of the car") | |
# Process the image (either uploaded or from camera) | |
if uploaded_file is not None: | |
st.write("Attempting to open uploaded file...") | |
try: | |
st.session_state.image = Image.open(uploaded_file) | |
st.write("Image uploaded successfully.") | |
except Exception as e: | |
st.error(f"Error opening uploaded file: {str(e)}") | |
elif camera_image is not None: | |
st.write("Attempting to open camera image...") | |
try: | |
st.session_state.image = Image.open(camera_image) | |
st.write("Image captured successfully.") | |
except Exception as e: | |
st.error(f"Error opening camera image: {str(e)}") | |
# Display the processed image | |
if st.session_state.image is not None: | |
st.image(st.session_state.image, caption='Processed Image', use_container_width=True) | |
current_year = datetime.now().year | |
# Classify the car image | |
with st.spinner('Analyzing image...'): | |
car_classifications = classify_image(st.session_state.image) | |
if car_classifications: | |
st.write("Image classification successful.") | |
st.subheader("Car Classification Results:") | |
# for classification in car_classifications: | |
# st.write(f"Model: {classification['label']}") | |
# st.write(f"Confidence: {classification['score'] * 100:.2f}%") | |
# Separate make and model from the classification result | |
top_prediction = car_classifications[0]['label'] | |
make_name, model_name = top_prediction.split(' ', 1) | |
col1, col2= st.columns(2) | |
col1.metric("Identified Car Make", make_name) | |
col2.metric("Identified Car Model", model_name) | |
# st.write(f"Identified Car Model: {make_name}") | |
# st.write(f"Identified Car Model: {model_name}") | |
# Find the closest match in the CSV based on the classification | |
car_data = load_car_data() | |
if car_data is not None: | |
processed_car_data, label_encoders = preprocess_car_data(car_data) | |
closest_car = find_closest_car(processed_car_data, label_encoders, make_name, model_name, current_year) | |
a, b, c, d = st.columns(4) | |
e, f, g = st.columns(3) | |
a.metric("Year", closest_car['year']) | |
b.metric("Price", closest_car['price']) | |
c.metric("Condition", closest_car['condition']) | |
d.metric("Fuel", closest_car['fuel']) | |
e.metric("Transmission", closest_car['transmission']) | |
f.metric("Drive", closest_car['drive']) | |
g.metric("Type", closest_car['type']) | |
# st.write(f"Closest match in database:") | |
# st.write(f"Year: {closest_car['year']}") | |
# st.write(f"Make: {label_encoders['make'].inverse_transform([closest_car['make']])[0]}") | |
# st.write(f"Model: {label_encoders['model'].inverse_transform([closest_car['model']])[0]}") | |
# st.write(f"Price: ${closest_car['price']}") | |
# st.write(f"Condition: {closest_car['condition']}") | |
# st.write(f"Fuel: {closest_car['fuel']}") | |
# st.write(f"Transmission: {closest_car['transmission']}") | |
# st.write(f"Drive: {closest_car['drive']}") | |
# st.write(f"Type: {closest_car['type']}") | |
st.divider() | |
# Get additional information using GPT-3.5-turbo | |
overview = get_car_overview(make_name, model_name, current_year) | |
st.subheader("Car Overview:") | |
st.write(overview) | |
else: | |
st.error("Could not classify the image. Please try again with a different image.") | |
else: | |
st.write("Please upload an image or take a picture to proceed.") |