Update app.py
Browse files
app.py
CHANGED
@@ -2,9 +2,10 @@ import os
|
|
2 |
import base64
|
3 |
import gradio as gr
|
4 |
import requests
|
5 |
-
from io import BytesIO
|
6 |
import json
|
|
|
7 |
from PIL import Image
|
|
|
8 |
|
9 |
# Get API key from environment variable for security
|
10 |
OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY", "")
|
@@ -88,22 +89,23 @@ text_models = [(name, model_id) for name, model_id, _, _, _ in free_models]
|
|
88 |
def encode_image(image):
|
89 |
"""Convert PIL Image to base64 string"""
|
90 |
buffered = BytesIO()
|
91 |
-
image.save(buffered, format="
|
92 |
return base64.b64encode(buffered.getvalue()).decode("utf-8")
|
93 |
|
94 |
-
def
|
95 |
-
"""Process message and
|
96 |
model_id = next((model_id for name, model_id, _, _, _ in free_models if name == model_name), text_models[0][1])
|
97 |
|
98 |
# Check if API key is set
|
99 |
if not OPENROUTER_API_KEY:
|
100 |
-
|
|
|
101 |
|
102 |
# Setup headers and URL
|
103 |
headers = {
|
104 |
"Content-Type": "application/json",
|
105 |
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
|
106 |
-
"HTTP-Referer": "https://huggingface.co/spaces", # Replace with your actual space URL in production
|
107 |
}
|
108 |
|
109 |
url = "https://openrouter.ai/api/v1/chat/completions"
|
@@ -125,7 +127,7 @@ def process_message(message, chat_history, model_name, uploaded_image=None):
|
|
125 |
{
|
126 |
"type": "image_url",
|
127 |
"image_url": {
|
128 |
-
"url": f"data:image/
|
129 |
}
|
130 |
}
|
131 |
]
|
@@ -136,24 +138,55 @@ def process_message(message, chat_history, model_name, uploaded_image=None):
|
|
136 |
# Build request data
|
137 |
data = {
|
138 |
"model": model_id,
|
139 |
-
"messages": messages
|
|
|
|
|
140 |
}
|
141 |
|
142 |
try:
|
143 |
-
#
|
144 |
-
|
145 |
-
|
146 |
|
147 |
-
#
|
148 |
-
|
149 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
150 |
|
151 |
-
|
152 |
-
chat_history.append((message, reply))
|
153 |
-
return reply, chat_history
|
154 |
|
155 |
except Exception as e:
|
156 |
-
|
|
|
|
|
157 |
|
158 |
# Create a nice CSS theme
|
159 |
css = """
|
@@ -179,11 +212,11 @@ css = """
|
|
179 |
}
|
180 |
"""
|
181 |
|
182 |
-
with gr.Blocks(css=css) as demo:
|
183 |
gr.HTML("""
|
184 |
<div class="app-header">
|
185 |
<h1>🔆 CrispChat</h1>
|
186 |
-
<p>Chat with
|
187 |
</div>
|
188 |
""")
|
189 |
|
@@ -195,6 +228,7 @@ with gr.Blocks(css=css) as demo:
|
|
195 |
show_share_button=False,
|
196 |
elem_id="chatbot",
|
197 |
layout="panel",
|
|
|
198 |
)
|
199 |
|
200 |
with gr.Row():
|
@@ -208,7 +242,6 @@ with gr.Blocks(css=css) as demo:
|
|
208 |
type="pil",
|
209 |
label="Image Upload (optional)",
|
210 |
show_label=False,
|
211 |
-
tool="select",
|
212 |
scale=2
|
213 |
)
|
214 |
submit_btn = gr.Button("Send", scale=1, variant="primary")
|
@@ -234,7 +267,7 @@ with gr.Blocks(css=css) as demo:
|
|
234 |
|
235 |
with gr.Accordion("API", open=False):
|
236 |
api_url = gr.Textbox(
|
237 |
-
value="https://
|
238 |
label="API Endpoint",
|
239 |
interactive=False
|
240 |
)
|
@@ -266,7 +299,7 @@ with gr.Blocks(css=css) as demo:
|
|
266 |
def on_submit(message, history, model, image):
|
267 |
if not message and not image:
|
268 |
return "", history
|
269 |
-
return "",
|
270 |
|
271 |
# Set up submission events
|
272 |
submit_btn.click(
|
@@ -299,8 +332,60 @@ def api_generate(message, model=None, image_data=None):
|
|
299 |
|
300 |
# Generate response
|
301 |
try:
|
302 |
-
|
303 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
304 |
except Exception as e:
|
305 |
return {"error": f"Error generating response: {str(e)}"}
|
306 |
|
|
|
2 |
import base64
|
3 |
import gradio as gr
|
4 |
import requests
|
|
|
5 |
import json
|
6 |
+
from io import BytesIO
|
7 |
from PIL import Image
|
8 |
+
import time
|
9 |
|
10 |
# Get API key from environment variable for security
|
11 |
OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY", "")
|
|
|
89 |
def encode_image(image):
|
90 |
"""Convert PIL Image to base64 string"""
|
91 |
buffered = BytesIO()
|
92 |
+
image.save(buffered, format="JPEG")
|
93 |
return base64.b64encode(buffered.getvalue()).decode("utf-8")
|
94 |
|
95 |
+
def process_message_stream(message, chat_history, model_name, uploaded_image=None):
|
96 |
+
"""Process message and stream the model response"""
|
97 |
model_id = next((model_id for name, model_id, _, _, _ in free_models if name == model_name), text_models[0][1])
|
98 |
|
99 |
# Check if API key is set
|
100 |
if not OPENROUTER_API_KEY:
|
101 |
+
yield "Please set your OpenRouter API key in the environment variables.", chat_history
|
102 |
+
return
|
103 |
|
104 |
# Setup headers and URL
|
105 |
headers = {
|
106 |
"Content-Type": "application/json",
|
107 |
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
|
108 |
+
"HTTP-Referer": "https://huggingface.co/spaces/cstr/CrispChat", # Replace with your actual space URL in production
|
109 |
}
|
110 |
|
111 |
url = "https://openrouter.ai/api/v1/chat/completions"
|
|
|
127 |
{
|
128 |
"type": "image_url",
|
129 |
"image_url": {
|
130 |
+
"url": f"data:image/jpeg;base64,{base64_image}"
|
131 |
}
|
132 |
}
|
133 |
]
|
|
|
138 |
# Build request data
|
139 |
data = {
|
140 |
"model": model_id,
|
141 |
+
"messages": messages,
|
142 |
+
"stream": True,
|
143 |
+
"temperature": 0.7
|
144 |
}
|
145 |
|
146 |
try:
|
147 |
+
# Create a new message pair in the chat history
|
148 |
+
chat_history.append((message, ""))
|
149 |
+
full_response = ""
|
150 |
|
151 |
+
# Make streaming API call
|
152 |
+
with requests.post(url, headers=headers, json=data, stream=True) as response:
|
153 |
+
response.raise_for_status()
|
154 |
+
buffer = ""
|
155 |
+
|
156 |
+
for chunk in response.iter_content(chunk_size=1024, decode_unicode=False):
|
157 |
+
if chunk:
|
158 |
+
buffer += chunk.decode('utf-8')
|
159 |
+
|
160 |
+
while True:
|
161 |
+
line_end = buffer.find('\n')
|
162 |
+
if line_end == -1:
|
163 |
+
break
|
164 |
+
|
165 |
+
line = buffer[:line_end].strip()
|
166 |
+
buffer = buffer[line_end + 1:]
|
167 |
+
|
168 |
+
if line.startswith('data: '):
|
169 |
+
data = line[6:]
|
170 |
+
if data == '[DONE]':
|
171 |
+
break
|
172 |
+
|
173 |
+
try:
|
174 |
+
data_obj = json.loads(data)
|
175 |
+
delta_content = data_obj["choices"][0]["delta"].get("content", "")
|
176 |
+
if delta_content:
|
177 |
+
full_response += delta_content
|
178 |
+
# Update the last assistant message
|
179 |
+
chat_history[-1] = (message, full_response)
|
180 |
+
yield full_response, chat_history
|
181 |
+
except json.JSONDecodeError:
|
182 |
+
pass
|
183 |
|
184 |
+
return full_response, chat_history
|
|
|
|
|
185 |
|
186 |
except Exception as e:
|
187 |
+
error_msg = f"Error: {str(e)}"
|
188 |
+
chat_history[-1] = (message, error_msg)
|
189 |
+
yield error_msg, chat_history
|
190 |
|
191 |
# Create a nice CSS theme
|
192 |
css = """
|
|
|
212 |
}
|
213 |
"""
|
214 |
|
215 |
+
with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
|
216 |
gr.HTML("""
|
217 |
<div class="app-header">
|
218 |
<h1>🔆 CrispChat</h1>
|
219 |
+
<p>Chat with AI models - supports text and images</p>
|
220 |
</div>
|
221 |
""")
|
222 |
|
|
|
228 |
show_share_button=False,
|
229 |
elem_id="chatbot",
|
230 |
layout="panel",
|
231 |
+
type="messages" # Use new message format
|
232 |
)
|
233 |
|
234 |
with gr.Row():
|
|
|
242 |
type="pil",
|
243 |
label="Image Upload (optional)",
|
244 |
show_label=False,
|
|
|
245 |
scale=2
|
246 |
)
|
247 |
submit_btn = gr.Button("Send", scale=1, variant="primary")
|
|
|
267 |
|
268 |
with gr.Accordion("API", open=False):
|
269 |
api_url = gr.Textbox(
|
270 |
+
value="https://cstr-crispchat.hf.space/api/generate",
|
271 |
label="API Endpoint",
|
272 |
interactive=False
|
273 |
)
|
|
|
299 |
def on_submit(message, history, model, image):
|
300 |
if not message and not image:
|
301 |
return "", history
|
302 |
+
return "", process_message_stream(message, history, model, image)
|
303 |
|
304 |
# Set up submission events
|
305 |
submit_btn.click(
|
|
|
332 |
|
333 |
# Generate response
|
334 |
try:
|
335 |
+
# Setup headers and URL
|
336 |
+
headers = {
|
337 |
+
"Content-Type": "application/json",
|
338 |
+
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
|
339 |
+
"HTTP-Referer": "https://huggingface.co/spaces",
|
340 |
+
}
|
341 |
+
|
342 |
+
url = "https://openrouter.ai/api/v1/chat/completions"
|
343 |
+
|
344 |
+
# Get model_id from model_name
|
345 |
+
model_id = next((model_id for name, model_id, _, _, _ in free_models if name == model_name), None)
|
346 |
+
if not model_id and model:
|
347 |
+
# Check if model parameter is a direct model ID
|
348 |
+
model_id = model
|
349 |
+
|
350 |
+
if not model_id:
|
351 |
+
model_id = text_models[0][1]
|
352 |
+
|
353 |
+
# Build messages
|
354 |
+
messages = []
|
355 |
+
|
356 |
+
if image:
|
357 |
+
# Image processing for vision models
|
358 |
+
base64_image = encode_image(image)
|
359 |
+
content = [
|
360 |
+
{"type": "text", "text": message},
|
361 |
+
{
|
362 |
+
"type": "image_url",
|
363 |
+
"image_url": {
|
364 |
+
"url": f"data:image/jpeg;base64,{base64_image}"
|
365 |
+
}
|
366 |
+
}
|
367 |
+
]
|
368 |
+
messages.append({"role": "user", "content": content})
|
369 |
+
else:
|
370 |
+
messages.append({"role": "user", "content": message})
|
371 |
+
|
372 |
+
# Build request data
|
373 |
+
data = {
|
374 |
+
"model": model_id,
|
375 |
+
"messages": messages,
|
376 |
+
"temperature": 0.7
|
377 |
+
}
|
378 |
+
|
379 |
+
# Make API call
|
380 |
+
response = requests.post(url, headers=headers, json=data)
|
381 |
+
response.raise_for_status()
|
382 |
+
|
383 |
+
# Parse response
|
384 |
+
result = response.json()
|
385 |
+
reply = result.get("choices", [{}])[0].get("message", {}).get("content", "No response")
|
386 |
+
|
387 |
+
return {"response": reply}
|
388 |
+
|
389 |
except Exception as e:
|
390 |
return {"error": f"Error generating response: {str(e)}"}
|
391 |
|