EdBoy2202 commited on
Commit
63dce96
·
verified ·
1 Parent(s): 7dea0d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -89
app.py CHANGED
@@ -1,111 +1,86 @@
1
- import streamlit as st
2
- from PIL import Image
3
- from transformers import AutoFeatureExtractor, AutoModelForImageClassification
4
- import torch
5
- from datetime import datetime
6
- import openai
7
 
8
- # Initialize OpenAI API key
9
- openai.api_key = st.secrets["GPT_TOKEN"]
10
-
11
- # Function to classify the car image using pre-trained model
12
- def classify_image(image):
13
  try:
14
- # Load the model and feature extractor
15
- model_name = "dima806/car_models_image_detection"
16
- feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
17
- model = AutoModelForImageClassification.from_pretrained(model_name)
18
-
19
- # Preprocess the image
20
- inputs = feature_extractor(images=image, return_tensors="pt")
21
-
22
- # Perform inference
23
- with torch.no_grad():
24
- outputs = model(**inputs)
25
-
26
- # Get the predicted class
27
- logits = outputs.logits
28
- predicted_class_idx = logits.argmax(-1).item()
29
-
30
- # Get the class label and score
31
- predicted_class_label = model.config.id2label[predicted_class_idx]
32
- score = torch.nn.functional.softmax(logits, dim=-1)[0, predicted_class_idx].item()
33
-
34
- # Return the top prediction
35
- return [{'label': predicted_class_label, 'score': score}]
36
-
37
  except Exception as e:
38
- st.error(f"Classification error: {e}")
39
  return None
40
 
41
- # Function to get an overview of the car using OpenAI
42
- def get_car_overview(brand, model, year):
43
- prompt = f"Provide an overview of the following car:\nYear: {year}\nMake: {brand}\nModel: {model}\n"
44
- response = openai.ChatCompletion.create(
45
- model="gpt-3.5-turbo",
46
- messages=[{"role": "user", "content": prompt}]
47
- )
48
- return response.choices[0].message['content']
49
-
50
- # Streamlit App
51
- st.title("Auto Appraise")
52
- st.write("Upload a car image or take a picture to get its brand, model, and overview!")
53
 
54
- # Get the session state
55
- if 'image' not in st.session_state:
56
- st.session_state.image = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
- # File uploader for image
59
- uploaded_file = st.file_uploader("Choose a car image", type=["jpg", "jpeg", "png"])
60
 
61
- # Camera input as an alternative (optional)
62
- camera_image = st.camera_input("Or take a picture of the car")
 
 
63
 
64
- # Process the image (either uploaded or from camera)
65
- if uploaded_file is not None:
66
- st.write("Attempting to open uploaded file...")
67
- try:
68
- st.session_state.image = Image.open(uploaded_file)
69
- st.write("Image uploaded successfully.")
70
- except Exception as e:
71
- st.error(f"Error opening uploaded file: {str(e)}")
72
- elif camera_image is not None:
73
- st.write("Attempting to open camera image...")
74
- try:
75
- st.session_state.image = Image.open(camera_image)
76
- st.write("Image captured successfully.")
77
- except Exception as e:
78
- st.error(f"Error opening camera image: {str(e)}")
79
 
80
- # Display the processed image
81
  if st.session_state.image is not None:
82
- st.image(st.session_state.image, caption='Processed Image', use_container_width=True)
83
-
84
- # Classify the car image
85
  with st.spinner('Analyzing image...'):
86
  car_classifications = classify_image(st.session_state.image)
87
 
88
  if car_classifications:
89
  st.write("Image classification successful.")
90
- st.subheader("Car Classification Results:")
91
- for classification in car_classifications:
92
- st.write(f"Model: {classification['label']}")
93
- st.write(f"Confidence: {classification['score'] * 100:.2f}%")
94
-
95
- # Separate make and model from the classification result
96
  top_prediction = car_classifications[0]['label']
97
  make_name, model_name = top_prediction.split(' ', 1)
98
-
99
- st.write(f"Identified Car Make: {make_name}")
100
- st.write(f"Identified Car Model: {model_name}")
101
 
102
- # Get additional information using GPT-3.5-turbo
103
  current_year = datetime.now().year
104
- overview = get_car_overview(make_name, model_name, current_year)
105
- st.write("Car Overview:")
106
- st.write(overview)
107
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  else:
109
- st.error("Could not classify the image. Please try again with a different image.")
110
- else:
111
- st.write("Please upload an image or take a picture to proceed.")
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ from sklearn.metrics.pairwise import cosine_similarity
4
+ from sklearn.preprocessing import LabelEncoder
 
 
5
 
6
+ # Load the CTP_Model1.csv file
7
+ def load_car_data():
 
 
 
8
  try:
9
+ df = pd.read_csv('CTP_Model1.csv') # Replace with the path to your actual CSV file
10
+ return df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  except Exception as e:
12
+ st.error(f"Error loading CSV file: {e}")
13
  return None
14
 
15
+ # Preprocess car data and encode categorical features
16
+ def preprocess_car_data(df):
17
+ label_encoders = {}
18
+
19
+ # Encode categorical columns (make, model, trim, fuel, title_status, etc.)
20
+ for col in ['make', 'model', 'trim', 'fuel', 'title_status', 'transmission', 'drive', 'size', 'type', 'paint_color']:
21
+ le = LabelEncoder()
22
+ df[col] = le.fit_transform(df[col])
23
+ label_encoders[col] = le
24
+
25
+ return df, label_encoders
 
26
 
27
+ # Calculate similarity between the classified car and entries in the CSV
28
+ def find_closest_car(df, label_encoders, make, model, year):
29
+ # Encode the user-provided make and model
30
+ make_encoded = label_encoders['make'].transform([make])[0]
31
+ model_encoded = label_encoders['model'].transform([model])[0]
32
+
33
+ # Create a feature vector for the classified car (make, model, year)
34
+ classified_car_vector = np.array([make_encoded, model_encoded, year]).reshape(1, -1)
35
+
36
+ # Prepare the data for similarity calculation
37
+ feature_columns = ['make', 'model', 'year']
38
+ df_feature_vectors = df[feature_columns].values
39
+
40
+ # Compute cosine similarity between the classified car and all entries in the CSV
41
+ similarity_scores = cosine_similarity(classified_car_vector, df_feature_vectors)
42
+
43
+ # Get the index of the closest match
44
+ closest_match_idx = similarity_scores.argmax()
45
+
46
+ # Return the closest match details
47
+ return df.iloc[closest_match_idx]
48
 
49
+ # Streamlit App Updates
 
50
 
51
+ # Load and preprocess the car data once (globally for the session)
52
+ car_data = load_car_data()
53
+ if car_data is not None:
54
+ processed_car_data, label_encoders = preprocess_car_data(car_data)
55
 
56
+ # Your existing code for image upload and classification ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
+ # After classification, find the closest car match
59
  if st.session_state.image is not None:
60
+ # Classify the car image (already done earlier)
 
 
61
  with st.spinner('Analyzing image...'):
62
  car_classifications = classify_image(st.session_state.image)
63
 
64
  if car_classifications:
65
  st.write("Image classification successful.")
 
 
 
 
 
 
66
  top_prediction = car_classifications[0]['label']
67
  make_name, model_name = top_prediction.split(' ', 1)
 
 
 
68
 
69
+ # Get the year (you may want to adjust this based on available data)
70
  current_year = datetime.now().year
 
 
 
71
 
72
+ # Find the closest match in the CSV based on the classification
73
+ closest_car = find_closest_car(processed_car_data, label_encoders, make_name, model_name, current_year)
74
+
75
+ st.write(f"Closest match in database:")
76
+ st.write(f"Year: {closest_car['year']}")
77
+ st.write(f"Make: {label_encoders['make'].inverse_transform([closest_car['make']])[0]}")
78
+ st.write(f"Model: {label_encoders['model'].inverse_transform([closest_car['model']])[0]}")
79
+ st.write(f"Price: ${closest_car['price']}")
80
+ st.write(f"Condition: {closest_car['condition']}")
81
+ st.write(f"Fuel: {closest_car['fuel']}")
82
+ st.write(f"Transmission: {closest_car['transmission']}")
83
+ st.write(f"Drive: {closest_car['drive']}")
84
+ st.write(f"Type: {closest_car['type']}")
85
  else:
86
+ st.error("Could not classify the image. Please try again with a different image.")