kendrickfff commited on
Commit
f23b8cb
·
verified ·
1 Parent(s): 05b01da

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -11
app.py CHANGED
@@ -1,10 +1,11 @@
1
  import os
2
  import gradio as gr
3
  from transformers import DetrForObjectDetection, DetrImageProcessor
 
4
  from PIL import Image
5
  import torch
6
- import requests
7
  import json
 
8
 
9
  # Load credentials (stringified JSON) from environment variable
10
  credentials_string = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS")
@@ -21,19 +22,19 @@ with open("service_account.json", "w") as f:
21
  # Set the GOOGLE_APPLICATION_CREDENTIALS environment variable to the temporary file
22
  os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "service_account.json"
23
 
24
- # Initialize Gemini model
25
  llm = ChatGoogleGenerativeAI(model='gemini-1.5-pro')
26
 
27
- # Load the model and processor for DETR
28
  processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50")
29
  model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50")
30
 
31
- # Global chat history variable
32
  chat_history = []
33
 
34
  def chat_with_gemini(message):
35
  global chat_history
36
- # Get a response from the language model
37
  bot_response = llm.predict(message) # This will interact with the Gemini model
38
  chat_history.append((message, bot_response))
39
  return chat_history
@@ -56,15 +57,17 @@ def analyze_image(image_path):
56
 
57
  # If any objects detected, display labels
58
  if len(results["labels"]) > 0:
59
- bot_response = f"Objects detected: {', '.join(map(str, results['labels'].tolist()))}."
 
60
  else:
61
  bot_response = "No objects detected."
62
-
 
 
63
  except Exception as e:
64
- bot_response = f"Error processing the image: {str(e)}"
65
-
66
- chat_history.append(("Uploaded an image for analysis", bot_response))
67
- return chat_history
68
 
69
  # Build the Gradio interface
70
  with gr.Blocks() as demo:
 
1
  import os
2
  import gradio as gr
3
  from transformers import DetrForObjectDetection, DetrImageProcessor
4
+ from langchain_google_genai.chat_models import ChatGoogleGenerativeAI # For Chat Gemini
5
  from PIL import Image
6
  import torch
 
7
  import json
8
+ import requests
9
 
10
  # Load credentials (stringified JSON) from environment variable
11
  credentials_string = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS")
 
22
  # Set the GOOGLE_APPLICATION_CREDENTIALS environment variable to the temporary file
23
  os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "service_account.json"
24
 
25
+ # Initialize Chat Gemini model
26
  llm = ChatGoogleGenerativeAI(model='gemini-1.5-pro')
27
 
28
+ # Load the model and processor for DETR (for object detection)
29
  processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50")
30
  model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50")
31
 
32
+ # Global chat history
33
  chat_history = []
34
 
35
  def chat_with_gemini(message):
36
  global chat_history
37
+ # Get a response from the Gemini model
38
  bot_response = llm.predict(message) # This will interact with the Gemini model
39
  chat_history.append((message, bot_response))
40
  return chat_history
 
57
 
58
  # If any objects detected, display labels
59
  if len(results["labels"]) > 0:
60
+ detected_objects = [str(label.item()) for label in results["labels"]]
61
+ bot_response = f"Objects detected: {', '.join(detected_objects)}."
62
  else:
63
  bot_response = "No objects detected."
64
+
65
+ chat_history.append(("Uploaded an image for analysis", bot_response))
66
+ return chat_history
67
  except Exception as e:
68
+ error_msg = f"Error processing the image: {str(e)}"
69
+ chat_history.append(("Error during image analysis", error_msg))
70
+ return chat_history
 
71
 
72
  # Build the Gradio interface
73
  with gr.Blocks() as demo: