Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ import matplotlib.pyplot as plt
|
|
9 |
import numpy as np
|
10 |
from sklearn.preprocessing import LabelEncoder
|
11 |
from huggingface_hub import hf_hub_download
|
12 |
-
import
|
13 |
from transformers import ViTImageProcessor, ViTForImageClassification
|
14 |
|
15 |
# Dataset loading function with caching
|
@@ -17,35 +17,45 @@ from transformers import ViTImageProcessor, ViTForImageClassification
|
|
17 |
def load_datasets():
|
18 |
try:
|
19 |
with st.spinner('Loading dataset...'):
|
20 |
-
# Load the CSV content
|
21 |
original_data = pd.read_csv('CTP_Model1.csv', low_memory=False)
|
22 |
-
|
23 |
-
# Ensure column names match the model's expectations
|
24 |
original_data.columns = original_data.columns.str.strip().str.capitalize()
|
25 |
return original_data
|
26 |
except Exception as e:
|
27 |
st.error(f"Error loading dataset: {str(e)}")
|
28 |
raise e
|
29 |
|
30 |
-
# Function definitions
|
31 |
-
|
32 |
def load_image(image_file):
|
33 |
return Image.open(image_file)
|
34 |
|
35 |
-
|
36 |
def classify_image(image):
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
-
|
|
|
|
|
|
|
49 |
def find_closest_match(df, brand, model):
|
50 |
match = df[(df['Make'].str.contains(brand, case=False)) & (df['Model'].str.contains(model, case=False))]
|
51 |
if not match.empty:
|
@@ -66,7 +76,6 @@ def load_model_and_encodings():
|
|
66 |
model_content = hf_hub_download(repo_id="EdBoy2202/car_prediction_model", filename="car_price_modelv3.pkl")
|
67 |
model = joblib.load(model_content)
|
68 |
|
69 |
-
# Load datasets
|
70 |
original_data = load_datasets()
|
71 |
|
72 |
label_encoders = {}
|
@@ -86,19 +95,13 @@ def load_model_and_encodings():
|
|
86 |
raise e
|
87 |
|
88 |
def predict_price(model, encoders, user_input):
|
89 |
-
# Transform user input into model input format
|
90 |
encoded_features = {feature: encoders[feature].transform([value])[0] if value in encoders[feature] else 0
|
91 |
for feature, value in user_input.items()}
|
92 |
-
|
93 |
-
# Create a DataFrame for prediction
|
94 |
input_data = pd.DataFrame([encoded_features])
|
95 |
-
|
96 |
-
# Predict price
|
97 |
predicted_price = model.predict(input_data)
|
98 |
return predicted_price[0]
|
99 |
|
100 |
# Streamlit App
|
101 |
-
|
102 |
st.title("Auto Appraise")
|
103 |
st.write("Capture a car image using your camera or upload an image to get its brand, model, overview, and expected price!")
|
104 |
|
@@ -106,8 +109,7 @@ st.write("Capture a car image using your camera or upload an image to get its br
|
|
106 |
model, label_encoders = load_model_and_encodings()
|
107 |
|
108 |
# Initialize OpenAI API key
|
109 |
-
openai.api_key = st.secrets["GPT_TOKEN"]
|
110 |
-
HUGGINGFACE_API_KEY = st.secrets["HF_TOKEN"] # Your Hugging Face API key
|
111 |
|
112 |
# Camera input for taking photo
|
113 |
camera_image = st.camera_input("Take a picture of the car!")
|
@@ -117,13 +119,23 @@ if camera_image is not None:
|
|
117 |
st.image(image, caption='Captured Image.', use_container_width=True)
|
118 |
|
119 |
# Classify the car image
|
120 |
-
with st.spinner('
|
121 |
-
|
122 |
|
123 |
-
if
|
124 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
|
126 |
# Find the closest match in the CSV
|
|
|
127 |
match = find_closest_match(df, brand, model_name)
|
128 |
if match is not None:
|
129 |
st.write("Closest Match Found:")
|
@@ -172,6 +184,6 @@ if camera_image is not None:
|
|
172 |
else:
|
173 |
st.write("No match found in the database.")
|
174 |
else:
|
175 |
-
st.error("Could not
|
176 |
else:
|
177 |
st.write("Please take a picture of the car to proceed.")
|
|
|
9 |
import numpy as np
|
10 |
from sklearn.preprocessing import LabelEncoder
|
11 |
from huggingface_hub import hf_hub_download
|
12 |
+
import torch
|
13 |
from transformers import ViTImageProcessor, ViTForImageClassification
|
14 |
|
15 |
# Dataset loading function with caching
|
|
|
17 |
def load_datasets():
|
18 |
try:
|
19 |
with st.spinner('Loading dataset...'):
|
|
|
20 |
original_data = pd.read_csv('CTP_Model1.csv', low_memory=False)
|
|
|
|
|
21 |
original_data.columns = original_data.columns.str.strip().str.capitalize()
|
22 |
return original_data
|
23 |
except Exception as e:
|
24 |
st.error(f"Error loading dataset: {str(e)}")
|
25 |
raise e
|
26 |
|
|
|
|
|
27 |
def load_image(image_file):
|
28 |
return Image.open(image_file)
|
29 |
|
|
|
30 |
def classify_image(image):
|
31 |
+
try:
|
32 |
+
processor = ViTImageProcessor.from_pretrained("dima806/car_models_image_detection")
|
33 |
+
model = ViTForImageClassification.from_pretrained("dima806/car_models_image_detection")
|
34 |
+
|
35 |
+
inputs = processor(images=image, return_tensors="pt")
|
36 |
+
|
37 |
+
with torch.no_grad():
|
38 |
+
outputs = model(**inputs)
|
39 |
+
|
40 |
+
logits = outputs.logits
|
41 |
+
probabilities = torch.softmax(logits, dim=-1)
|
42 |
+
|
43 |
+
top_predictions = torch.topk(probabilities, k=3)
|
44 |
+
|
45 |
+
predicted_classes = [
|
46 |
+
{
|
47 |
+
'label': model.config.id2label[idx.item()],
|
48 |
+
'probability': prob.item()
|
49 |
+
}
|
50 |
+
for idx, prob in zip(top_predictions.indices[0], top_predictions.values[0])
|
51 |
+
]
|
52 |
+
|
53 |
+
return predicted_classes
|
54 |
|
55 |
+
except Exception as e:
|
56 |
+
st.error(f"Classification error: {e}")
|
57 |
+
return None
|
58 |
+
|
59 |
def find_closest_match(df, brand, model):
|
60 |
match = df[(df['Make'].str.contains(brand, case=False)) & (df['Model'].str.contains(model, case=False))]
|
61 |
if not match.empty:
|
|
|
76 |
model_content = hf_hub_download(repo_id="EdBoy2202/car_prediction_model", filename="car_price_modelv3.pkl")
|
77 |
model = joblib.load(model_content)
|
78 |
|
|
|
79 |
original_data = load_datasets()
|
80 |
|
81 |
label_encoders = {}
|
|
|
95 |
raise e
|
96 |
|
97 |
def predict_price(model, encoders, user_input):
|
|
|
98 |
encoded_features = {feature: encoders[feature].transform([value])[0] if value in encoders[feature] else 0
|
99 |
for feature, value in user_input.items()}
|
|
|
|
|
100 |
input_data = pd.DataFrame([encoded_features])
|
|
|
|
|
101 |
predicted_price = model.predict(input_data)
|
102 |
return predicted_price[0]
|
103 |
|
104 |
# Streamlit App
|
|
|
105 |
st.title("Auto Appraise")
|
106 |
st.write("Capture a car image using your camera or upload an image to get its brand, model, overview, and expected price!")
|
107 |
|
|
|
109 |
model, label_encoders = load_model_and_encodings()
|
110 |
|
111 |
# Initialize OpenAI API key
|
112 |
+
openai.api_key = st.secrets["GPT_TOKEN"]
|
|
|
113 |
|
114 |
# Camera input for taking photo
|
115 |
camera_image = st.camera_input("Take a picture of the car!")
|
|
|
119 |
st.image(image, caption='Captured Image.', use_container_width=True)
|
120 |
|
121 |
# Classify the car image
|
122 |
+
with st.spinner('Analyzing image...'):
|
123 |
+
car_classifications = classify_image(image)
|
124 |
|
125 |
+
if car_classifications:
|
126 |
+
st.subheader("Car Classification Results:")
|
127 |
+
for classification in car_classifications:
|
128 |
+
st.write(f"Model: {classification['label']}")
|
129 |
+
st.write(f"Confidence: {classification['probability']*100:.2f}%")
|
130 |
+
|
131 |
+
# Use the top prediction for further processing
|
132 |
+
top_prediction = car_classifications[0]['label']
|
133 |
+
brand, model_name = top_prediction.split(' ', 1)
|
134 |
+
|
135 |
+
st.write(f"Identified Car: {brand} {model_name}")
|
136 |
|
137 |
# Find the closest match in the CSV
|
138 |
+
df = load_datasets()
|
139 |
match = find_closest_match(df, brand, model_name)
|
140 |
if match is not None:
|
141 |
st.write("Closest Match Found:")
|
|
|
184 |
else:
|
185 |
st.write("No match found in the database.")
|
186 |
else:
|
187 |
+
st.error("Could not classify the image. Please try again with a different image.")
|
188 |
else:
|
189 |
st.write("Please take a picture of the car to proceed.")
|