cstr commited on
Commit
25f51d0
·
verified ·
1 Parent(s): cef7f39

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +125 -120
app.py CHANGED
@@ -9,7 +9,7 @@ from PIL import Image
9
  # Get API key from environment variable for security
10
  OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY", "")
11
 
12
- # Simplified model list
13
  models = [
14
  ("Google Gemini Pro 2.0", "google/gemini-2.0-pro-exp-02-05:free"),
15
  ("Google Gemini 2.5 Pro", "google/gemini-2.5-pro-exp-03-25:free"),
@@ -19,20 +19,20 @@ models = [
19
  ("Mistral 3.1", "mistralai/mistral-small-3.1-24b-instruct:free")
20
  ]
21
 
22
- def get_response(message, history, model_name, image=None, file=None):
23
- """Simple function to get response from API"""
24
- # Find model ID from name
25
  model_id = next((mid for name, mid in models if name == model_name), models[0][1])
26
 
27
- # Format messages from history
28
  messages = []
29
  for human, ai in history:
30
  messages.append({"role": "user", "content": human})
31
- if ai: # Only add if there's a response
32
  messages.append({"role": "assistant", "content": ai})
33
 
34
- # Process file if provided
35
- if file:
36
  try:
37
  with open(file.name, 'r', encoding='utf-8') as f:
38
  file_content = f.read()
@@ -40,15 +40,13 @@ def get_response(message, history, model_name, image=None, file=None):
40
  except Exception as e:
41
  message = f"{message}\n\nError reading file: {str(e)}"
42
 
43
- # Process image if provided
44
  if image is not None:
45
  try:
46
- # Convert image to base64
47
  buffered = BytesIO()
48
  image.save(buffered, format="JPEG")
49
  base64_image = base64.b64encode(buffered.getvalue()).decode("utf-8")
50
 
51
- # Create multimodal content
52
  content = [
53
  {"type": "text", "text": message},
54
  {
@@ -60,94 +58,104 @@ def get_response(message, history, model_name, image=None, file=None):
60
  ]
61
  messages.append({"role": "user", "content": content})
62
  except Exception as e:
63
- messages.append({"role": "user", "content": f"{message}\n\nError processing image: {str(e)}"})
 
64
  else:
65
  messages.append({"role": "user", "content": message})
66
 
67
- # Make API call (non-streaming for reliability)
68
- headers = {
69
- "Content-Type": "application/json",
70
- "Authorization": f"Bearer {OPENROUTER_API_KEY}",
71
- "HTTP-Referer": "https://huggingface.co/spaces",
72
- }
73
-
74
- data = {
75
- "model": model_id,
76
- "messages": messages,
77
- "temperature": 0.7,
78
- "max_tokens": 1000
79
- }
80
-
81
  try:
82
  response = requests.post(
83
  "https://openrouter.ai/api/v1/chat/completions",
84
- headers=headers,
85
- json=data,
 
 
 
 
 
 
 
 
 
86
  timeout=60
87
  )
88
  response.raise_for_status()
89
 
90
  result = response.json()
91
- reply = result.get("choices", [{}])[0].get("message", {}).get("content", "No response")
92
-
93
- return reply
94
  except Exception as e:
95
  return f"Error: {str(e)}"
96
 
97
- # Create ultra simple interface
 
 
 
98
  with gr.Blocks() as demo:
99
  gr.Markdown("# 🔆 CrispChat")
100
 
101
- chatbot = gr.Chatbot(height=450)
 
 
 
102
 
103
- with gr.Row():
104
- with gr.Column(scale=3):
105
- msg = gr.Textbox(
106
- placeholder="Type your message here...",
107
- lines=3,
108
- label="Message"
109
- )
110
-
111
- with gr.Column(scale=1):
112
- model = gr.Dropdown(
113
- choices=[name for name, _ in models],
114
- value=models[0][0],
115
- label="Model"
116
- )
117
 
118
- with gr.Row():
119
- with gr.Column(scale=1):
120
- img = gr.Image(type="pil", label="Image (optional)")
121
-
122
- with gr.Column(scale=1):
123
- file = gr.File(label="Text File (optional)")
 
 
 
 
 
 
 
 
124
 
125
  with gr.Row():
126
- submit = gr.Button("Send")
127
- clear = gr.Button("Clear")
 
 
 
 
128
 
129
- # Events
130
- submit.click(
131
- fn=get_response,
132
- inputs=[msg, chatbot, model, img, file],
133
- outputs=chatbot
134
- ).then(
135
- lambda: "", None, None,
136
- outputs=[msg, img, file]
 
 
 
 
 
 
137
  )
138
 
139
- msg.submit(
140
- fn=get_response,
141
- inputs=[msg, chatbot, model, img, file],
142
- outputs=chatbot
143
- ).then(
144
- lambda: "", None, None,
145
- outputs=[msg, img, file]
146
  )
147
 
148
- clear.click(lambda: [], outputs=chatbot)
 
 
 
149
 
150
- # Define FastAPI endpoint
151
  from fastapi import FastAPI
152
  from pydantic import BaseModel
153
 
@@ -160,56 +168,53 @@ class GenerateRequest(BaseModel):
160
 
161
  @app.post("/api/generate")
162
  async def api_generate(request: GenerateRequest):
163
- """Simple API endpoint"""
164
- model_id = request.model or models[0][1]
165
-
166
- messages = []
167
-
168
- # Process image if provided
169
- if request.image_data:
170
- try:
171
- # Decode base64 image
172
- image_bytes = base64.b64decode(request.image_data)
173
- image = Image.open(BytesIO(image_bytes))
174
-
175
- # Re-encode to ensure proper format
176
- buffered = BytesIO()
177
- image.save(buffered, format="JPEG")
178
- base64_image = base64.b64encode(buffered.getvalue()).decode("utf-8")
179
-
180
- content = [
181
- {"type": "text", "text": request.message},
182
- {
183
- "type": "image_url",
184
- "image_url": {
185
- "url": f"data:image/jpeg;base64,{base64_image}"
186
- }
187
- }
188
- ]
189
- messages.append({"role": "user", "content": content})
190
- except Exception as e:
191
- return {"error": f"Image processing error: {str(e)}"}
192
- else:
193
- messages.append({"role": "user", "content": request.message})
194
-
195
- # Make API call
196
- headers = {
197
- "Content-Type": "application/json",
198
- "Authorization": f"Bearer {OPENROUTER_API_KEY}",
199
- "HTTP-Referer": "https://huggingface.co/spaces",
200
- }
201
-
202
- data = {
203
- "model": model_id,
204
- "messages": messages,
205
- "temperature": 0.7
206
- }
207
-
208
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  response = requests.post(
210
  "https://openrouter.ai/api/v1/chat/completions",
211
- headers=headers,
212
- json=data,
 
 
 
 
 
 
 
 
213
  timeout=60
214
  )
215
  response.raise_for_status()
@@ -224,7 +229,7 @@ async def api_generate(request: GenerateRequest):
224
  # Mount Gradio app
225
  app = gr.mount_gradio_app(app, demo, path="/")
226
 
227
- # Launch the app
228
  if __name__ == "__main__":
229
  import uvicorn
230
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
9
  # Get API key from environment variable for security
10
  OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY", "")
11
 
12
+ # Model list
13
  models = [
14
  ("Google Gemini Pro 2.0", "google/gemini-2.0-pro-exp-02-05:free"),
15
  ("Google Gemini 2.5 Pro", "google/gemini-2.5-pro-exp-03-25:free"),
 
19
  ("Mistral 3.1", "mistralai/mistral-small-3.1-24b-instruct:free")
20
  ]
21
 
22
+ def get_ai_response(message, history, model_name, image=None, file=None):
23
+ """Get response from AI"""
24
+ # Find model ID
25
  model_id = next((mid for name, mid in models if name == model_name), models[0][1])
26
 
27
+ # Prepare messages
28
  messages = []
29
  for human, ai in history:
30
  messages.append({"role": "user", "content": human})
31
+ if ai:
32
  messages.append({"role": "assistant", "content": ai})
33
 
34
+ # Handle file
35
+ if file is not None:
36
  try:
37
  with open(file.name, 'r', encoding='utf-8') as f:
38
  file_content = f.read()
 
40
  except Exception as e:
41
  message = f"{message}\n\nError reading file: {str(e)}"
42
 
43
+ # Handle image
44
  if image is not None:
45
  try:
 
46
  buffered = BytesIO()
47
  image.save(buffered, format="JPEG")
48
  base64_image = base64.b64encode(buffered.getvalue()).decode("utf-8")
49
 
 
50
  content = [
51
  {"type": "text", "text": message},
52
  {
 
58
  ]
59
  messages.append({"role": "user", "content": content})
60
  except Exception as e:
61
+ messages.append({"role": "user", "content": message})
62
+ return f"Error processing image: {str(e)}"
63
  else:
64
  messages.append({"role": "user", "content": message})
65
 
66
+ # API call
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  try:
68
  response = requests.post(
69
  "https://openrouter.ai/api/v1/chat/completions",
70
+ headers={
71
+ "Content-Type": "application/json",
72
+ "Authorization": f"Bearer {OPENROUTER_API_KEY}",
73
+ "HTTP-Referer": "https://huggingface.co/spaces",
74
+ },
75
+ json={
76
+ "model": model_id,
77
+ "messages": messages,
78
+ "temperature": 0.7,
79
+ "max_tokens": 1000
80
+ },
81
  timeout=60
82
  )
83
  response.raise_for_status()
84
 
85
  result = response.json()
86
+ return result.get("choices", [{}])[0].get("message", {}).get("content", "No response")
 
 
87
  except Exception as e:
88
  return f"Error: {str(e)}"
89
 
90
+ def clear_inputs():
91
+ """Clear input fields"""
92
+ return "", None, None
93
+
94
  with gr.Blocks() as demo:
95
  gr.Markdown("# 🔆 CrispChat")
96
 
97
+ chatbot = gr.Chatbot(
98
+ height=450,
99
+ type="messages" # Use the new format as suggested in the warning
100
+ )
101
 
102
+ model_selector = gr.Dropdown(
103
+ choices=[name for name, _ in models],
104
+ value=models[0][0],
105
+ label="Model"
106
+ )
 
 
 
 
 
 
 
 
 
107
 
108
+ msg_input = gr.Textbox(
109
+ placeholder="Type your message here...",
110
+ lines=3,
111
+ label="Message"
112
+ )
113
+
114
+ img_input = gr.Image(
115
+ type="pil",
116
+ label="Image (optional)"
117
+ )
118
+
119
+ file_input = gr.File(
120
+ label="Text File (optional)"
121
+ )
122
 
123
  with gr.Row():
124
+ submit_btn = gr.Button("Send")
125
+ clear_btn = gr.Button("Clear Chat")
126
+
127
+ # Define clear function
128
+ def clear_chat():
129
+ return []
130
 
131
+ # Submit function
132
+ def submit_message(message, chat_history, model, image, file):
133
+ if not message and not image and not file:
134
+ return chat_history, "", None, None
135
+
136
+ response = get_ai_response(message, chat_history, model, image, file)
137
+ chat_history.append((message, response))
138
+ return chat_history, "", None, None
139
+
140
+ # Set up events
141
+ submit_btn.click(
142
+ fn=submit_message,
143
+ inputs=[msg_input, chatbot, model_selector, img_input, file_input],
144
+ outputs=[chatbot, msg_input, img_input, file_input]
145
  )
146
 
147
+ msg_input.submit(
148
+ fn=submit_message,
149
+ inputs=[msg_input, chatbot, model_selector, img_input, file_input],
150
+ outputs=[chatbot, msg_input, img_input, file_input]
 
 
 
151
  )
152
 
153
+ clear_btn.click(
154
+ fn=clear_chat,
155
+ outputs=[chatbot]
156
+ )
157
 
158
+ # FastAPI endpoint
159
  from fastapi import FastAPI
160
  from pydantic import BaseModel
161
 
 
168
 
169
  @app.post("/api/generate")
170
  async def api_generate(request: GenerateRequest):
171
+ """API endpoint for text generation"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  try:
173
+ model_id = request.model or models[0][1]
174
+
175
+ # Prepare messages
176
+ messages = []
177
+
178
+ # Handle image
179
+ if request.image_data:
180
+ try:
181
+ # Decode base64 image
182
+ image_bytes = base64.b64decode(request.image_data)
183
+ image = Image.open(BytesIO(image_bytes))
184
+
185
+ # Re-encode
186
+ buffered = BytesIO()
187
+ image.save(buffered, format="JPEG")
188
+ base64_image = base64.b64encode(buffered.getvalue()).decode("utf-8")
189
+
190
+ content = [
191
+ {"type": "text", "text": request.message},
192
+ {
193
+ "type": "image_url",
194
+ "image_url": {
195
+ "url": f"data:image/jpeg;base64,{base64_image}"
196
+ }
197
+ }
198
+ ]
199
+ messages.append({"role": "user", "content": content})
200
+ except Exception as e:
201
+ return {"error": f"Image processing error: {str(e)}"}
202
+ else:
203
+ messages.append({"role": "user", "content": request.message})
204
+
205
+ # API call
206
  response = requests.post(
207
  "https://openrouter.ai/api/v1/chat/completions",
208
+ headers={
209
+ "Content-Type": "application/json",
210
+ "Authorization": f"Bearer {OPENROUTER_API_KEY}",
211
+ "HTTP-Referer": "https://huggingface.co/spaces",
212
+ },
213
+ json={
214
+ "model": model_id,
215
+ "messages": messages,
216
+ "temperature": 0.7
217
+ },
218
  timeout=60
219
  )
220
  response.raise_for_status()
 
229
  # Mount Gradio app
230
  app = gr.mount_gradio_app(app, demo, path="/")
231
 
232
+ # Launch
233
  if __name__ == "__main__":
234
  import uvicorn
235
  uvicorn.run(app, host="0.0.0.0", port=7860)