Spaces:
Sleeping
Sleeping
File size: 6,492 Bytes
9b968be ea53bcc 9b968be ea53bcc 9b968be ea53bcc 9b968be 91ac79b fbddb10 57babb1 9b968be ecae6dc ea53bcc 9b968be ea53bcc 9b968be b43ae44 ea53bcc 57babb1 9b968be 57babb1 ea53bcc ecae6dc ea53bcc 9b968be ea53bcc ecae6dc ea53bcc 9b968be 3600cbf ab20d84 ea53bcc ecae6dc 9b968be ecae6dc 9b968be ea53bcc b43ae44 ea53bcc b43ae44 ea53bcc 57babb1 ecae6dc 299b5f0 9b968be 299b5f0 ea53bcc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
import streamlit as st
import pandas as pd
import openai
import joblib
from PIL import Image
import requests
from io import BytesIO
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import LabelEncoder
from huggingface_hub import hf_hub_download
import base64
from transformers import ViTImageProcessor, ViTForImageClassification
# Dataset loading function with caching
@st.cache_data
def load_datasets():
try:
with st.spinner('Loading dataset...'):
# Load the CSV content
original_data = pd.read_csv('CTP_Model1.csv', low_memory=False)
# Ensure column names match the model's expectations
original_data.columns = original_data.columns.str.strip().str.capitalize()
return original_data
except Exception as e:
st.error(f"Error loading dataset: {str(e)}")
raise e
# Function definitions
def load_image(image_file):
return Image.open(image_file)
def classify_image(image):
processor = ViTImageProcessor.from_pretrained("dima806/car_models_image_detection")
model = ViTForImageClassification.from_pretrained("dima806/car_models_image_detection")
inputs = processor(images=image, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
predicted_class_idx = logits.argmax(-1).item()
return model.config.id2label[predicted_class_idx]
def find_closest_match(df, brand, model):
match = df[(df['Make'].str.contains(brand, case=False)) & (df['Model'].str.contains(model, case=False))]
if not match.empty:
return match.iloc[0]
return None
def get_car_overview(car_data):
prompt = f"Provide an overview of the following car:\nYear: {car_data['Year']}\nMake: {car_data['Make']}\nModel: {car_data['Model']}\nTrim: {car_data['Trim']}\nPrice: ${car_data['Price']}\nCondition: {car_data['Condition']}\n"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}]
)
return response.choices[0].message['content']
def load_model_and_encodings():
try:
with st.spinner('Loading model...'):
model_content = hf_hub_download(repo_id="EdBoy2202/car_prediction_model", filename="car_price_modelv3.pkl")
model = joblib.load(model_content)
# Load datasets
original_data = load_datasets()
label_encoders = {}
categorical_features = ['Make', 'Model', 'Condition', 'Fuel', 'Title_status',
'Transmission', 'Drive', 'Size', 'Type', 'Paint_color']
for feature in categorical_features:
if feature in original_data.columns:
le = LabelEncoder()
unique_values = original_data[feature].fillna('unknown').str.strip().unique()
le.fit(unique_values)
label_encoders[feature.lower()] = le
return model, label_encoders
except Exception as e:
st.error(f"Error loading model: {str(e)}")
raise e
def predict_price(model, encoders, user_input):
# Transform user input into model input format
encoded_features = {feature: encoders[feature].transform([value])[0] if value in encoders[feature] else 0
for feature, value in user_input.items()}
# Create a DataFrame for prediction
input_data = pd.DataFrame([encoded_features])
# Predict price
predicted_price = model.predict(input_data)
return predicted_price[0]
# Streamlit App
st.title("Auto Appraise")
st.write("Capture a car image using your camera or upload an image to get its brand, model, overview, and expected price!")
# Load model and encoders
model, label_encoders = load_model_and_encodings()
# Initialize OpenAI API key
openai.api_key = st.secrets["GPT_TOKEN"] # Your OpenAI API key
HUGGINGFACE_API_KEY = st.secrets["HF_TOKEN"] # Your Hugging Face API key
# Camera input for taking photo
camera_image = st.camera_input("Take a picture of the car!")
if camera_image is not None:
image = load_image(camera_image)
st.image(image, caption='Captured Image.', use_container_width=True)
# Classify the car image
with st.spinner('Classifying image...'):
car_info = classify_image(image)
if car_info:
st.write(f"Identified Car: {car_info}")
# Find the closest match in the CSV
match = find_closest_match(df, brand, model_name)
if match is not None:
st.write("Closest Match Found:")
st.write(match)
# Get additional information using GPT-3.5-turbo
overview = get_car_overview(match)
st.write("Car Overview:")
st.write(overview)
# Interactive Price Prediction
st.subheader("Price Prediction Over Time")
selected_years = st.slider("Select range of years for price prediction",
min_value=2000, max_value=2023, value=(2010, 2023))
years = np.arange(selected_years[0], selected_years[1] + 1)
predicted_prices = []
for year in years:
user_input = {
'Make': brand,
'Model': model_name,
'Condition': match['Condition'],
'Fuel': match['Fuel'],
'Title_status': match['Title_status'],
'Transmission': match['Transmission'],
'Drive': match['Drive'],
'Size': match['Size'],
'Type': match['Type'],
'Paint_color': match['Paint_color'],
'Year': year
}
price = predict_price(model, label_encoders, user_input)
predicted_prices.append(price)
# Plotting the results
plt.figure(figsize=(10, 5))
plt.plot(years, predicted_prices, marker='o')
plt.title(f"Predicted Price of {brand} {model_name} Over Time")
plt.xlabel("Year")
plt.ylabel("Predicted Price ($)")
plt.grid()
st.pyplot(plt)
else:
st.write("No match found in the database.")
else:
st.error("Could not identify the brand or model. Please try again.")
else:
st.write("Please take a picture of the car to proceed.") |