ANON-STUDIOS-254 commited on
Commit
6297cd2
·
verified ·
1 Parent(s): ca77dc8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -47
app.py CHANGED
@@ -4,7 +4,7 @@ from PIL import Image, ImageOps
4
  import tensorflow as tf
5
  from huggingface_hub import InferenceClient
6
 
7
- # Load the pre-trained Keras model
8
  model = tf.keras.models.load_model("keras_model.h5", compile=False)
9
 
10
  # Load the class labels
@@ -14,25 +14,35 @@ with open("labels.txt", "r") as file:
14
  # Initialize the HuggingFace client for the chatbot
15
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
16
 
17
- # Classify image function
 
 
 
 
 
 
18
  def classify_image(img):
 
19
  try:
20
  size = (224, 224)
21
  image = ImageOps.fit(img, size, Image.Resampling.LANCZOS)
22
  image_array = np.asarray(image)
23
  normalized_image_array = (image_array.astype(np.float32) / 127.5) - 1
24
  data = normalized_image_array.reshape((1, 224, 224, 3))
 
 
25
  prediction = model.predict(data)
26
  index = np.argmax(prediction)
27
  class_name = class_names[index]
28
  confidence_score = prediction[0][index]
 
29
  return class_name, confidence_score
30
  except Exception as e:
31
  print(f"Error in classify_image: {e}")
32
  return "Error", 0
33
 
34
- # Respond function for AI recommendation
35
  def respond(message, history, system_message, max_tokens, temperature, top_p):
 
36
  try:
37
  messages = [{"role": "system", "content": system_message}]
38
  for user_message, assistant_message in history:
@@ -44,84 +54,134 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
44
 
45
  response = ""
46
  for response_message in client.chat_completion(
47
- messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p
 
 
 
 
48
  ):
49
  token = response_message.choices[0].delta.content
50
  response += token
 
 
51
  return response
52
  except Exception as e:
53
  print(f"Error in respond: {e}")
54
  return "Error generating response"
55
 
56
- # Custom CSS
57
  custom_css = """
58
  body {
59
  font-family: 'Arial', sans-serif;
60
  background-color: #f4e9e0;
61
  color: #2e2e2e;
62
  }
63
- ...
64
- footer { display: none !important; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  """
66
 
67
- def process_image_and_questions(image, max_tokens, temperature, top_p, need_for_power, need_for_affiliation, need_for_achievement):
68
- # Classify the image to detect emotion
69
- class_name, confidence_score = classify_image(image)
70
- emotion_result = {"Detected Emotion": class_name, "Confidence Score": f"{confidence_score:.2f}"}
71
-
72
- if class_name != "Error":
73
- # Create a message for the AI based on the emotion and user inputs
74
- input_message = (f"The detected emotion is {class_name} with a confidence score of {confidence_score:.2f}.\n"
75
- f"User rated their need for power as {need_for_power}/5, need for affiliation as {need_for_affiliation}/5, "
76
- f"and need for achievement as {need_for_achievement}/5.\n"
77
- "Please provide personalized recommendations based on these factors.")
78
-
79
- # Generate AI recommendation
80
- recommendation = respond(
81
- input_message, history=[],
82
- system_message="You are a psychologist providing therapeutic recommendations based on emotions and user inputs.",
83
- max_tokens=int(max_tokens), temperature=float(temperature), top_p=float(top_p)
84
- )
85
- return emotion_result, recommendation
86
- else:
87
- return {"Detected Emotion": "Error", "Confidence Score": "0"}, "Error generating response"
88
-
89
- # Gradio Interface
90
  def emotion_detection_interface():
 
91
  with gr.Blocks(css=custom_css) as demo:
92
- gr.Markdown("### HISIA: Emotion Detector and Personalized Recommendations")
93
-
94
  with gr.Row():
95
  with gr.Column(scale=1, min_width=200):
96
  image_input = gr.Image(type="pil", label="Upload an Image", elem_id="emotion-image")
97
  submit_button = gr.Button("Classify Image", elem_id="classify-button")
98
-
99
  with gr.Column(scale=2, min_width=300):
100
  emotion_output = gr.JSON(label="Emotion Detection Result", elem_id="output-container")
101
  ai_response_output = gr.Textbox(label="AI Recommendations", elem_id="output-container", lines=5)
102
-
103
- # Sliders for adjusting AI response settings
 
 
 
104
  with gr.Row():
105
  max_tokens_slider = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="VERBOSENESS")
106
  temperature_slider = gr.Slider(minimum=0.1, maximum=3.0, value=0.7, step=0.1, label="CREATIVITY")
107
  top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="BROADNESS")
108
-
109
- # Additional inputs for user needs
110
- with gr.Row():
111
- need_for_power = gr.Slider(minimum=1, maximum=5, step=1, label="How important is it for you to have control or influence over others?")
112
- need_for_affiliation = gr.Slider(minimum=1, maximum=5, step=1, label="How important is it for you to feel connected with others or belong to groups?")
113
- need_for_achievement = gr.Slider(minimum=1, maximum=5, step=1, label="How important is it for you to achieve goals or excel in your work?")
114
-
115
- # Process and display results
 
 
 
 
 
 
 
 
 
 
 
116
  submit_button.click(
117
- process_image_and_questions,
118
- inputs=[image_input, max_tokens_slider, temperature_slider, top_p_slider,
119
- need_for_power, need_for_affiliation, need_for_achievement],
120
  outputs=[emotion_output, ai_response_output]
121
  )
122
 
123
  return demo
124
 
125
- # Launch the interface
126
  if __name__ == "__main__":
127
  emotion_detection_interface().launch()
 
4
  import tensorflow as tf
5
  from huggingface_hub import InferenceClient
6
 
7
+ # Load the pre-trained Keras model using TensorFlow's Keras
8
  model = tf.keras.models.load_model("keras_model.h5", compile=False)
9
 
10
  # Load the class labels
 
14
  # Initialize the HuggingFace client for the chatbot
15
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
16
 
17
+ # Sample images for the emotion detection
18
+ examples = [
19
+ ["https://firebasestorage.googleapis.com/v0/b/hisia-4b65b.appspot.com/o/a-captivating-ukiyo-e-inspired-poster-featuring-a--wTg7L-f2Tfiy6K8w6aWnKA-KbGU9GSKSDGBbbxrCO65Mg.jpeg?alt=media&token=64590de9-e265-44ac-a766-aeecd455ed5d"],
20
+ ["https://firebasestorage.googleapis.com/v0/b/hisia-4b65b.appspot.com/o/poster-ai-themed-kenyan-female-silhoutte-written-l-PMIXpNWGQ8KaNNetQRVJuQ-B1TteyL-S5OTPZFXvfGybg.jpeg?alt=media&token=fc10f96d-403e-4f75-bd9c-810e0da36867"],
21
+ ["https://firebasestorage.googleapis.com/v0/b/hisia-4b65b.appspot.com/o/poster-ai-themed-kenyan-male-silhoutte-written-log-z3fqBD5bQOOj6uqGd_iXLQ-4aBfNy0ZTgmLlTsZh1dzIA.jpeg?alt=media&token=f218f160-d38e-482f-97a9-5442c2f251a7"]
22
+ ]
23
+
24
  def classify_image(img):
25
+ """Classify the image and return the detected emotion and confidence score."""
26
  try:
27
  size = (224, 224)
28
  image = ImageOps.fit(img, size, Image.Resampling.LANCZOS)
29
  image_array = np.asarray(image)
30
  normalized_image_array = (image_array.astype(np.float32) / 127.5) - 1
31
  data = normalized_image_array.reshape((1, 224, 224, 3))
32
+
33
+ # Perform prediction using the model
34
  prediction = model.predict(data)
35
  index = np.argmax(prediction)
36
  class_name = class_names[index]
37
  confidence_score = prediction[0][index]
38
+
39
  return class_name, confidence_score
40
  except Exception as e:
41
  print(f"Error in classify_image: {e}")
42
  return "Error", 0
43
 
 
44
  def respond(message, history, system_message, max_tokens, temperature, top_p):
45
+ """Generate a response from the chatbot based on the input message and conversation history."""
46
  try:
47
  messages = [{"role": "system", "content": system_message}]
48
  for user_message, assistant_message in history:
 
54
 
55
  response = ""
56
  for response_message in client.chat_completion(
57
+ messages,
58
+ max_tokens=max_tokens,
59
+ stream=True,
60
+ temperature=temperature,
61
+ top_p=top_p,
62
  ):
63
  token = response_message.choices[0].delta.content
64
  response += token
65
+
66
+ print(f"API Response: {response}") # Debugging: Print the API response
67
  return response
68
  except Exception as e:
69
  print(f"Error in respond: {e}")
70
  return "Error generating response"
71
 
72
+ # Define the custom CSS for styling the interface and hiding the footer
73
  custom_css = """
74
  body {
75
  font-family: 'Arial', sans-serif;
76
  background-color: #f4e9e0;
77
  color: #2e2e2e;
78
  }
79
+ .gradio-container {
80
+ border-radius: 12px;
81
+ padding: 20px;
82
+ background: linear-gradient(135deg, #f5b8b8, #a0d6a2);
83
+ box-shadow: 0px 4px 15px rgba(0, 0, 0, 0.2);
84
+ }
85
+ .gradio-container h1 {
86
+ font-family: 'Arial', sans-serif;
87
+ font-size: 2.2em;
88
+ text-align: center;
89
+ color: #1c1c1c;
90
+ margin-bottom: 20px;
91
+ }
92
+ .gradio-container p {
93
+ font-size: 1em;
94
+ text-align: center;
95
+ color: #4a4a4a;
96
+ }
97
+ .gradio-button {
98
+ background-color: #d55a5a;
99
+ border: none;
100
+ color: white;
101
+ padding: 12px 24px;
102
+ font-size: 1.1em;
103
+ cursor: pointer;
104
+ border-radius: 8px;
105
+ transition: background-color 0.2s ease;
106
+ }
107
+ .gradio-button:hover {
108
+ background-color: #b93e3e;
109
+ }
110
+ #output-container {
111
+ border-radius: 12px;
112
+ background-color: #ffffff;
113
+ padding: 20px;
114
+ color: #2e2e2e;
115
+ box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.2);
116
+ }
117
+ #output-container h3 {
118
+ font-family: 'Arial', sans-serif;
119
+ font-size: 1.4em;
120
+ color: #1c1c1c;
121
+ }
122
+ .gr-examples {
123
+ text-align: center;
124
+ }
125
+ .gr-example-img {
126
+ width: 120px;
127
+ border-radius: 8px;
128
+ margin: 5px;
129
+ box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.2);
130
+ }
131
+ footer {
132
+ display: none !important; /* Hides the footer */
133
+ }
134
  """
135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  def emotion_detection_interface():
137
+ """Create and return the Gradio interface with sliders for AI response settings and a single view for emotion detection and recommendations."""
138
  with gr.Blocks(css=custom_css) as demo:
139
+ gr.Markdown("### HISIA: Emotion Detector and Therapeutic Recommendations")
140
+
141
  with gr.Row():
142
  with gr.Column(scale=1, min_width=200):
143
  image_input = gr.Image(type="pil", label="Upload an Image", elem_id="emotion-image")
144
  submit_button = gr.Button("Classify Image", elem_id="classify-button")
145
+
146
  with gr.Column(scale=2, min_width=300):
147
  emotion_output = gr.JSON(label="Emotion Detection Result", elem_id="output-container")
148
  ai_response_output = gr.Textbox(label="AI Recommendations", elem_id="output-container", lines=5)
149
+
150
+ # Add sample images
151
+ gr.Examples(examples, inputs=image_input)
152
+
153
+ # Add sliders for adjusting AI response settings
154
  with gr.Row():
155
  max_tokens_slider = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="VERBOSENESS")
156
  temperature_slider = gr.Slider(minimum=0.1, maximum=3.0, value=0.7, step=0.1, label="CREATIVITY")
157
  top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="BROADNESS")
158
+
159
+ def process_image(image, max_tokens, temperature, top_p):
160
+ """Process the image and generate emotion detection result and AI recommendations."""
161
+ class_name, confidence_score = classify_image(image)
162
+ emotion_result = {"Detected Emotion": class_name, "Confidence Score": f"{confidence_score:.2f}"}
163
+ if class_name != "Error":
164
+ # Generate AI recommendation based on detected emotion
165
+ recommendation = respond(
166
+ class_name,
167
+ history=[],
168
+ system_message="You are a psychologist that provides therapeutic recommendations based on emotions. Always address the clients in the second pronouns person like your, you, etc",
169
+ max_tokens=int(max_tokens),
170
+ temperature=float(temperature),
171
+ top_p=float(top_p),
172
+ )
173
+ return emotion_result, recommendation
174
+ else:
175
+ return {"Detected Emotion": "Error", "Confidence Score": "0"}, "Error generating response"
176
+
177
  submit_button.click(
178
+ process_image,
179
+ inputs=[image_input, max_tokens_slider, temperature_slider, top_p_slider],
 
180
  outputs=[emotion_output, ai_response_output]
181
  )
182
 
183
  return demo
184
 
185
+ # Launch the combined interface
186
  if __name__ == "__main__":
187
  emotion_detection_interface().launch()