arad1367 commited on
Commit
ef1f2bb
·
verified ·
1 Parent(s): 015667b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -27
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  import requests
3
  import base64
 
4
  import os
5
 
6
  # --- UI Constants ---
@@ -42,7 +43,10 @@ def openai_responses_api(api_key, messages, previous_response_id=None):
42
  if r.status_code == 200:
43
  return r.json()
44
  else:
45
- raise Exception(r.json().get("error", {}).get("message", "Unknown error"))
 
 
 
46
 
47
  def encode_image_to_base64(image):
48
  """Encode uploaded image to base64 string."""
@@ -72,32 +76,34 @@ def chat_fn(user_message, chat_history, image, state):
72
  api_key = state.get("api_key")
73
  if not api_key:
74
  return chat_history, state, gr.update(visible=True), "Please enter your OpenAI API key."
 
 
75
  # Prepare messages for API
76
- messages = []
77
- for turn in chat_history:
78
- messages.append({"role": turn["role"], "content": turn["content"]})
79
  # Add current user message
80
  if image is not None:
81
- # Image attached: send as multimodal input
82
  base64_img = encode_image_to_base64(image)
83
  content = [
84
  {"type": "input_text", "text": user_message or "Please describe this image."},
85
  {"type": "input_image", "image_url": f"data:image/jpeg;base64,{base64_img}"}
86
  ]
87
  messages.append({"role": "user", "content": content})
88
- chat_history.append({"role": "user", "content": f"{user_message}\n[Image attached]"})
89
  else:
90
  messages.append({"role": "user", "content": user_message})
91
- chat_history.append({"role": "user", "content": user_message})
92
  # Call OpenAI Responses API
93
- try:
94
- response = openai_responses_api(api_key, messages)
95
- output_text = response.get("output_text", "")
96
- chat_history.append({"role": "assistant", "content": output_text})
97
- state["previous_response_id"] = response.get("id")
98
- return chat_history, state, gr.update(visible=False), ""
99
- except Exception as e:
100
- return chat_history, state, gr.update(visible=True), f"API error: {str(e)}"
 
 
 
101
 
102
  def image_analysis_fn(image, state):
103
  """Analyze uploaded image using OpenAI Responses API."""
@@ -116,17 +122,17 @@ def image_analysis_fn(image, state):
116
  ]
117
  }
118
  ]
119
- try:
120
- response = openai_responses_api(api_key, messages)
121
- return response.get("output_text", ""), gr.update(visible=False), ""
122
- except Exception as e:
123
- return f"API error: {str(e)}", gr.update(visible=True), f"API error: {str(e)}"
124
 
125
  def export_chat(chat_history):
126
  """Export chat history as JSON."""
127
- import json
128
  chat_json = chat_history
129
- return gr.File.update(value=("chat_history.json", json.dumps(chat_json, indent=2)), visible=True)
 
 
130
 
131
  def show_iframe(space_url):
132
  """Show iframe code for embedding."""
@@ -167,17 +173,37 @@ with gr.Blocks(theme=gr.themes.Soft(), css=".footer {text-align: center; font-si
167
  iframe_btn = gr.Button("Get iframe code")
168
  iframe_code = gr.Textbox(label="Embed code", interactive=False, visible=False)
169
  # Chat logic
170
- send_btn.click(chat_fn, [user_input, chatbot, image_input, state], [chatbot, state, chat_error, api_key_error])
171
- user_input.submit(chat_fn, [user_input, chatbot, image_input, state], [chatbot, state, chat_error, api_key_error])
172
- export_btn.click(export_chat, chatbot, export_file)
173
- iframe_btn.click(show_iframe, gr.Textbox(value="https://huggingface.co/spaces/your-space-name", visible=False), iframe_code)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
  # --- Image Analysis Tab ---
175
  with gr.Tab("🖼️ Image Analysis"):
176
  image_upload = gr.Image(type="filepath", label="Upload image")
177
  analyze_btn = gr.Button("Analyze")
178
  image_result = gr.Textbox(label="Analysis result", lines=4)
179
  image_error = gr.Markdown("", visible=False)
180
- analyze_btn.click(image_analysis_fn, [image_upload, state], [image_result, api_key_error, image_error])
 
 
 
 
181
 
182
  # --- Footer ---
183
  gr.Markdown(f"<div class='footer'>{FOOTER}</div>")
 
1
  import gradio as gr
2
  import requests
3
  import base64
4
+ import json
5
  import os
6
 
7
  # --- UI Constants ---
 
43
  if r.status_code == 200:
44
  return r.json()
45
  else:
46
+ try:
47
+ return {"error": r.json().get("error", {}).get("message", "Unknown error")}
48
+ except Exception:
49
+ return {"error": "Unknown error"}
50
 
51
  def encode_image_to_base64(image):
52
  """Encode uploaded image to base64 string."""
 
76
  api_key = state.get("api_key")
77
  if not api_key:
78
  return chat_history, state, gr.update(visible=True), "Please enter your OpenAI API key."
79
+ if not user_message and image is None:
80
+ return chat_history, state, gr.update(visible=False), ""
81
  # Prepare messages for API
82
+ messages = chat_history.copy()
 
 
83
  # Add current user message
84
  if image is not None:
 
85
  base64_img = encode_image_to_base64(image)
86
  content = [
87
  {"type": "input_text", "text": user_message or "Please describe this image."},
88
  {"type": "input_image", "image_url": f"data:image/jpeg;base64,{base64_img}"}
89
  ]
90
  messages.append({"role": "user", "content": content})
91
+ display_user = (user_message or "") + "\n[Image attached]"
92
  else:
93
  messages.append({"role": "user", "content": user_message})
94
+ display_user = user_message
95
  # Call OpenAI Responses API
96
+ response = openai_responses_api(api_key, messages, state.get("previous_response_id"))
97
+ if "error" in response:
98
+ return chat_history, state, gr.update(visible=True), f"API error: {response['error']}"
99
+ output_text = response.get("output_text", "")
100
+ state["previous_response_id"] = response.get("id")
101
+ # Update chat history for display
102
+ chat_history = chat_history + [
103
+ {"role": "user", "content": display_user},
104
+ {"role": "assistant", "content": output_text}
105
+ ]
106
+ return chat_history, state, gr.update(visible=False), ""
107
 
108
  def image_analysis_fn(image, state):
109
  """Analyze uploaded image using OpenAI Responses API."""
 
122
  ]
123
  }
124
  ]
125
+ response = openai_responses_api(api_key, messages)
126
+ if "error" in response:
127
+ return f"API error: {response['error']}", gr.update(visible=True), f"API error: {response['error']}"
128
+ return response.get("output_text", ""), gr.update(visible=False), ""
 
129
 
130
  def export_chat(chat_history):
131
  """Export chat history as JSON."""
 
132
  chat_json = chat_history
133
+ with open("chat_history.json", "w", encoding="utf-8") as f:
134
+ json.dump(chat_json, f, indent=2, ensure_ascii=False)
135
+ return "chat_history.json"
136
 
137
  def show_iframe(space_url):
138
  """Show iframe code for embedding."""
 
173
  iframe_btn = gr.Button("Get iframe code")
174
  iframe_code = gr.Textbox(label="Embed code", interactive=False, visible=False)
175
  # Chat logic
176
+ send_btn.click(
177
+ chat_fn,
178
+ [user_input, chatbot, image_input, state],
179
+ [chatbot, state, chat_error, api_key_error]
180
+ )
181
+ user_input.submit(
182
+ chat_fn,
183
+ [user_input, chatbot, image_input, state],
184
+ [chatbot, state, chat_error, api_key_error]
185
+ )
186
+ export_btn.click(
187
+ export_chat,
188
+ chatbot,
189
+ export_file
190
+ )
191
+ iframe_btn.click(
192
+ show_iframe,
193
+ gr.Textbox(value="https://huggingface.co/spaces/your-space-name", visible=False),
194
+ iframe_code
195
+ )
196
  # --- Image Analysis Tab ---
197
  with gr.Tab("🖼️ Image Analysis"):
198
  image_upload = gr.Image(type="filepath", label="Upload image")
199
  analyze_btn = gr.Button("Analyze")
200
  image_result = gr.Textbox(label="Analysis result", lines=4)
201
  image_error = gr.Markdown("", visible=False)
202
+ analyze_btn.click(
203
+ image_analysis_fn,
204
+ [image_upload, state],
205
+ [image_result, api_key_error, image_error]
206
+ )
207
 
208
  # --- Footer ---
209
  gr.Markdown(f"<div class='footer'>{FOOTER}</div>")