cstr commited on
Commit
cef7f39
·
verified ·
1 Parent(s): b142a4a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +148 -259
app.py CHANGED
@@ -5,265 +5,149 @@ import requests
5
  import json
6
  from io import BytesIO
7
  from PIL import Image
8
- import time
9
 
10
  # Get API key from environment variable for security
11
  OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY", "")
12
 
13
- # Simplified model information with only name and ID
14
- free_models = [
15
- ("Google: Gemini Pro 2.0 Experimental", "google/gemini-2.0-pro-exp-02-05:free"),
16
- ("Google: Gemini 2.0 Flash", "google/gemini-2.0-flash-exp:free"),
17
- ("Google: Gemini Pro 2.5 Experimental", "google/gemini-2.5-pro-exp-03-25:free"),
18
- ("Meta: Llama 3.2 11B Vision", "meta-llama/llama-3.2-11b-vision-instruct:free"),
19
- ("Qwen: Qwen2.5 VL 72B", "qwen/qwen2.5-vl-72b-instruct:free"),
20
- ("DeepSeek: DeepSeek R1", "deepseek/deepseek-r1:free"),
21
- ("Meta: Llama 3.1 8B", "meta-llama/llama-3.1-8b-instruct:free"),
22
- ("Mistral: Mistral Small 3.1 24B", "mistralai/mistral-small-3.1-24b-instruct:free")
23
  ]
24
 
25
- # Helper functions
26
- def encode_image(image):
27
- """Convert PIL Image to base64 string"""
28
- buffered = BytesIO()
29
- image.save(buffered, format="JPEG")
30
- return base64.b64encode(buffered.getvalue()).decode("utf-8")
31
-
32
- def encode_file(file_path):
33
- """Convert text file to string"""
34
- try:
35
- with open(file_path, 'r', encoding='utf-8') as file:
36
- return file.read()
37
- except Exception as e:
38
- return f"Error reading file: {str(e)}"
39
-
40
- def generate_response(message, chat_history, model_name, uploaded_image=None, uploaded_file=None,
41
- temp=0.7, max_tok=1000, use_stream=True):
42
- """Process message and get response from API"""
43
- # Find model ID
44
- model_id = next((model_id for name, model_id in free_models if name == model_name), free_models[0][1])
45
 
46
- # Get context from history
47
  messages = []
48
- for turn in chat_history:
49
- if isinstance(turn, tuple):
50
- user_msg, ai_msg = turn
51
- messages.append({"role": "user", "content": user_msg})
52
- messages.append({"role": "assistant", "content": ai_msg})
53
 
54
  # Process file if provided
55
- if uploaded_file:
56
- file_content = encode_file(uploaded_file)
57
- message = f"{message}\n\nFile content:\n```\n{file_content}\n```"
 
 
 
 
58
 
59
- # Create new message
60
- if uploaded_image:
61
- # Process image for vision models
62
- base64_image = encode_image(uploaded_image)
63
- content = [
64
- {"type": "text", "text": message},
65
- {
66
- "type": "image_url",
67
- "image_url": {
68
- "url": f"data:image/jpeg;base64,{base64_image}"
 
 
 
 
 
 
69
  }
70
- }
71
- ]
72
- messages.append({"role": "user", "content": content})
 
73
  else:
74
  messages.append({"role": "user", "content": message})
75
 
76
- # Setup headers and URL
77
  headers = {
78
  "Content-Type": "application/json",
79
  "Authorization": f"Bearer {OPENROUTER_API_KEY}",
80
  "HTTP-Referer": "https://huggingface.co/spaces",
81
  }
82
 
83
- url = "https://openrouter.ai/api/v1/chat/completions"
84
-
85
- # Build request data
86
  data = {
87
  "model": model_id,
88
  "messages": messages,
89
- "stream": use_stream,
90
- "temperature": temp,
91
- "max_tokens": max_tok
92
  }
93
 
94
- # Add message to chat history
95
- chat_history.append((message, ""))
96
-
97
  try:
98
- if use_stream:
99
- # Streaming response
100
- with requests.post(url, headers=headers, json=data, stream=True) as response:
101
- response.raise_for_status()
102
-
103
- full_response = ""
104
- buffer = ""
105
-
106
- for chunk in response.iter_content(chunk_size=1024, decode_unicode=False):
107
- if chunk:
108
- buffer += chunk.decode('utf-8')
109
-
110
- # Process line by line
111
- while '\n' in buffer:
112
- line, buffer = buffer.split('\n', 1)
113
- line = line.strip()
114
-
115
- if line.startswith('data: '):
116
- data = line[6:]
117
- if data == '[DONE]':
118
- break
119
-
120
- try:
121
- data_obj = json.loads(data)
122
- delta_content = data_obj["choices"][0]["delta"].get("content", "")
123
- if delta_content:
124
- full_response += delta_content
125
- chat_history[-1] = (message, full_response)
126
- yield chat_history
127
- except Exception:
128
- pass
129
-
130
- # Final yield to ensure complete message
131
- if full_response:
132
- chat_history[-1] = (message, full_response)
133
- yield chat_history
134
-
135
- else:
136
- # Non-streaming response
137
- response = requests.post(url, headers=headers, json=data)
138
- response.raise_for_status()
139
- result = response.json()
140
-
141
- reply = result.get("choices", [{}])[0].get("message", {}).get("content", "No response")
142
- chat_history[-1] = (message, reply)
143
- yield chat_history
144
-
145
  except Exception as e:
146
- error_msg = f"Error: {str(e)}"
147
- chat_history[-1] = (message, error_msg)
148
- yield chat_history
149
-
150
- def clear_chat():
151
- """Clear the chat history"""
152
- return []
153
 
154
- def clear_input():
155
- """Clear the input field"""
156
- return "", None, None
157
-
158
- # Create a very simple UI
159
- with gr.Blocks(theme=gr.themes.Default()) as demo:
160
  gr.Markdown("# 🔆 CrispChat")
161
 
 
 
162
  with gr.Row():
163
  with gr.Column(scale=3):
164
- chatbot = gr.Chatbot(
165
- height=500,
166
- layout="bubble",
167
- show_copy_button=True,
168
- show_share_button=False,
169
- avatar_images=("👤", "🤖")
170
  )
171
-
172
- with gr.Group():
173
- user_message = gr.Textbox(
174
- placeholder="Type your message here...",
175
- lines=3,
176
- show_label=False
177
- )
178
-
179
- with gr.Row():
180
- image_upload = gr.Image(
181
- type="pil",
182
- label="Image (optional)",
183
- show_label=True
184
- )
185
-
186
- file_upload = gr.File(
187
- label="Text File (optional)",
188
- file_types=[".txt", ".md", ".py", ".js", ".html", ".css", ".json"]
189
- )
190
-
191
- with gr.Row():
192
- submit_btn = gr.Button("Send", variant="primary")
193
- clear_chat_btn = gr.Button("Clear Chat")
194
 
195
  with gr.Column(scale=1):
196
- model_selector = gr.Dropdown(
197
- choices=[name for name, _ in free_models],
198
- value=free_models[0][0],
199
- label="Select Model"
200
- )
201
-
202
- temperature = gr.Slider(
203
- minimum=0.1,
204
- maximum=2.0,
205
- value=0.7,
206
- step=0.1,
207
- label="Temperature"
208
- )
209
-
210
- max_tokens = gr.Slider(
211
- minimum=100,
212
- maximum=4000,
213
- value=1000,
214
- step=100,
215
- label="Max Tokens"
216
- )
217
-
218
- streaming = gr.Checkbox(
219
- label="Streaming",
220
- value=True
221
  )
222
 
223
- # Set up submit events
224
- submit_btn.click(
225
- fn=generate_response,
226
- inputs=[
227
- user_message,
228
- chatbot,
229
- model_selector,
230
- image_upload,
231
- file_upload,
232
- temperature,
233
- max_tokens,
234
- streaming
235
- ],
 
 
236
  outputs=chatbot
237
  ).then(
238
- fn=clear_input,
239
- outputs=[user_message, image_upload, file_upload]
240
  )
241
 
242
- user_message.submit(
243
- fn=generate_response,
244
- inputs=[
245
- user_message,
246
- chatbot,
247
- model_selector,
248
- image_upload,
249
- file_upload,
250
- temperature,
251
- max_tokens,
252
- streaming
253
- ],
254
  outputs=chatbot
255
  ).then(
256
- fn=clear_input,
257
- outputs=[user_message, image_upload, file_upload]
258
  )
259
 
260
- # Clear chat button
261
- clear_chat_btn.click(
262
- fn=clear_chat,
263
- outputs=chatbot
264
- )
265
 
266
- # API for external access
267
  from fastapi import FastAPI
268
  from pydantic import BaseModel
269
 
@@ -276,60 +160,64 @@ class GenerateRequest(BaseModel):
276
 
277
  @app.post("/api/generate")
278
  async def api_generate(request: GenerateRequest):
279
- """API endpoint for generating responses"""
280
- try:
281
- # Get model ID
282
- model_id = request.model
283
- if not model_id:
284
- model_id = free_models[0][1]
 
 
 
 
 
285
 
286
- # Process image if provided
287
- messages = []
288
- if request.image_data:
289
- try:
290
- image_bytes = base64.b64decode(request.image_data)
291
- image = Image.open(BytesIO(image_bytes))
292
- base64_image = encode_image(image)
293
- content = [
294
- {"type": "text", "text": request.message},
295
- {
296
- "type": "image_url",
297
- "image_url": {
298
- "url": f"data:image/jpeg;base64,{base64_image}"
299
- }
300
  }
301
- ]
302
- messages.append({"role": "user", "content": content})
303
- except Exception as e:
304
- return {"error": f"Image processing error: {str(e)}"}
305
- else:
306
- messages.append({"role": "user", "content": request.message})
307
-
308
- # Setup API call
309
- headers = {
310
- "Content-Type": "application/json",
311
- "Authorization": f"Bearer {OPENROUTER_API_KEY}",
312
- "HTTP-Referer": "https://huggingface.co/spaces",
313
- }
314
-
315
- url = "https://openrouter.ai/api/v1/chat/completions"
316
-
317
- data = {
318
- "model": model_id,
319
- "messages": messages,
320
- "temperature": 0.7
321
- }
322
-
323
- # Make API call
324
- response = requests.post(url, headers=headers, json=data)
 
 
 
 
325
  response.raise_for_status()
326
 
327
- # Parse response
328
  result = response.json()
329
  reply = result.get("choices", [{}])[0].get("message", {}).get("content", "No response")
330
 
331
  return {"response": reply}
332
-
333
  except Exception as e:
334
  return {"error": f"Error: {str(e)}"}
335
 
@@ -338,4 +226,5 @@ app = gr.mount_gradio_app(app, demo, path="/")
338
 
339
  # Launch the app
340
  if __name__ == "__main__":
341
- demo.launch()
 
 
5
  import json
6
  from io import BytesIO
7
  from PIL import Image
 
8
 
9
  # Get API key from environment variable for security
10
  OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY", "")
11
 
12
+ # Simplified model list
13
+ models = [
14
+ ("Google Gemini Pro 2.0", "google/gemini-2.0-pro-exp-02-05:free"),
15
+ ("Google Gemini 2.5 Pro", "google/gemini-2.5-pro-exp-03-25:free"),
16
+ ("Meta Llama 3.2 Vision", "meta-llama/llama-3.2-11b-vision-instruct:free"),
17
+ ("Qwen 2.5 VL", "qwen/qwen2.5-vl-72b-instruct:free"),
18
+ ("DeepSeek R1", "deepseek/deepseek-r1:free"),
19
+ ("Mistral 3.1", "mistralai/mistral-small-3.1-24b-instruct:free")
 
 
20
  ]
21
 
22
+ def get_response(message, history, model_name, image=None, file=None):
23
+ """Simple function to get response from API"""
24
+ # Find model ID from name
25
+ model_id = next((mid for name, mid in models if name == model_name), models[0][1])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
+ # Format messages from history
28
  messages = []
29
+ for human, ai in history:
30
+ messages.append({"role": "user", "content": human})
31
+ if ai: # Only add if there's a response
32
+ messages.append({"role": "assistant", "content": ai})
 
33
 
34
  # Process file if provided
35
+ if file:
36
+ try:
37
+ with open(file.name, 'r', encoding='utf-8') as f:
38
+ file_content = f.read()
39
+ message = f"{message}\n\nFile content:\n```\n{file_content}\n```"
40
+ except Exception as e:
41
+ message = f"{message}\n\nError reading file: {str(e)}"
42
 
43
+ # Process image if provided
44
+ if image is not None:
45
+ try:
46
+ # Convert image to base64
47
+ buffered = BytesIO()
48
+ image.save(buffered, format="JPEG")
49
+ base64_image = base64.b64encode(buffered.getvalue()).decode("utf-8")
50
+
51
+ # Create multimodal content
52
+ content = [
53
+ {"type": "text", "text": message},
54
+ {
55
+ "type": "image_url",
56
+ "image_url": {
57
+ "url": f"data:image/jpeg;base64,{base64_image}"
58
+ }
59
  }
60
+ ]
61
+ messages.append({"role": "user", "content": content})
62
+ except Exception as e:
63
+ messages.append({"role": "user", "content": f"{message}\n\nError processing image: {str(e)}"})
64
  else:
65
  messages.append({"role": "user", "content": message})
66
 
67
+ # Make API call (non-streaming for reliability)
68
  headers = {
69
  "Content-Type": "application/json",
70
  "Authorization": f"Bearer {OPENROUTER_API_KEY}",
71
  "HTTP-Referer": "https://huggingface.co/spaces",
72
  }
73
 
 
 
 
74
  data = {
75
  "model": model_id,
76
  "messages": messages,
77
+ "temperature": 0.7,
78
+ "max_tokens": 1000
 
79
  }
80
 
 
 
 
81
  try:
82
+ response = requests.post(
83
+ "https://openrouter.ai/api/v1/chat/completions",
84
+ headers=headers,
85
+ json=data,
86
+ timeout=60
87
+ )
88
+ response.raise_for_status()
89
+
90
+ result = response.json()
91
+ reply = result.get("choices", [{}])[0].get("message", {}).get("content", "No response")
92
+
93
+ return reply
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  except Exception as e:
95
+ return f"Error: {str(e)}"
 
 
 
 
 
 
96
 
97
+ # Create ultra simple interface
98
+ with gr.Blocks() as demo:
 
 
 
 
99
  gr.Markdown("# 🔆 CrispChat")
100
 
101
+ chatbot = gr.Chatbot(height=450)
102
+
103
  with gr.Row():
104
  with gr.Column(scale=3):
105
+ msg = gr.Textbox(
106
+ placeholder="Type your message here...",
107
+ lines=3,
108
+ label="Message"
 
 
109
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
  with gr.Column(scale=1):
112
+ model = gr.Dropdown(
113
+ choices=[name for name, _ in models],
114
+ value=models[0][0],
115
+ label="Model"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  )
117
 
118
+ with gr.Row():
119
+ with gr.Column(scale=1):
120
+ img = gr.Image(type="pil", label="Image (optional)")
121
+
122
+ with gr.Column(scale=1):
123
+ file = gr.File(label="Text File (optional)")
124
+
125
+ with gr.Row():
126
+ submit = gr.Button("Send")
127
+ clear = gr.Button("Clear")
128
+
129
+ # Events
130
+ submit.click(
131
+ fn=get_response,
132
+ inputs=[msg, chatbot, model, img, file],
133
  outputs=chatbot
134
  ).then(
135
+ lambda: "", None, None,
136
+ outputs=[msg, img, file]
137
  )
138
 
139
+ msg.submit(
140
+ fn=get_response,
141
+ inputs=[msg, chatbot, model, img, file],
 
 
 
 
 
 
 
 
 
142
  outputs=chatbot
143
  ).then(
144
+ lambda: "", None, None,
145
+ outputs=[msg, img, file]
146
  )
147
 
148
+ clear.click(lambda: [], outputs=chatbot)
 
 
 
 
149
 
150
+ # Define FastAPI endpoint
151
  from fastapi import FastAPI
152
  from pydantic import BaseModel
153
 
 
160
 
161
  @app.post("/api/generate")
162
  async def api_generate(request: GenerateRequest):
163
+ """Simple API endpoint"""
164
+ model_id = request.model or models[0][1]
165
+
166
+ messages = []
167
+
168
+ # Process image if provided
169
+ if request.image_data:
170
+ try:
171
+ # Decode base64 image
172
+ image_bytes = base64.b64decode(request.image_data)
173
+ image = Image.open(BytesIO(image_bytes))
174
 
175
+ # Re-encode to ensure proper format
176
+ buffered = BytesIO()
177
+ image.save(buffered, format="JPEG")
178
+ base64_image = base64.b64encode(buffered.getvalue()).decode("utf-8")
179
+
180
+ content = [
181
+ {"type": "text", "text": request.message},
182
+ {
183
+ "type": "image_url",
184
+ "image_url": {
185
+ "url": f"data:image/jpeg;base64,{base64_image}"
 
 
 
186
  }
187
+ }
188
+ ]
189
+ messages.append({"role": "user", "content": content})
190
+ except Exception as e:
191
+ return {"error": f"Image processing error: {str(e)}"}
192
+ else:
193
+ messages.append({"role": "user", "content": request.message})
194
+
195
+ # Make API call
196
+ headers = {
197
+ "Content-Type": "application/json",
198
+ "Authorization": f"Bearer {OPENROUTER_API_KEY}",
199
+ "HTTP-Referer": "https://huggingface.co/spaces",
200
+ }
201
+
202
+ data = {
203
+ "model": model_id,
204
+ "messages": messages,
205
+ "temperature": 0.7
206
+ }
207
+
208
+ try:
209
+ response = requests.post(
210
+ "https://openrouter.ai/api/v1/chat/completions",
211
+ headers=headers,
212
+ json=data,
213
+ timeout=60
214
+ )
215
  response.raise_for_status()
216
 
 
217
  result = response.json()
218
  reply = result.get("choices", [{}])[0].get("message", {}).get("content", "No response")
219
 
220
  return {"response": reply}
 
221
  except Exception as e:
222
  return {"error": f"Error: {str(e)}"}
223
 
 
226
 
227
  # Launch the app
228
  if __name__ == "__main__":
229
+ import uvicorn
230
+ uvicorn.run(app, host="0.0.0.0", port=7860)