CrispChat / app.py
cstr's picture
Update app.py
25f51d0 verified
raw
history blame
7.29 kB
import os
import base64
import gradio as gr
import requests
import json
from io import BytesIO
from PIL import Image
# Get API key from environment variable for security
OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY", "")
# Model list
models = [
("Google Gemini Pro 2.0", "google/gemini-2.0-pro-exp-02-05:free"),
("Google Gemini 2.5 Pro", "google/gemini-2.5-pro-exp-03-25:free"),
("Meta Llama 3.2 Vision", "meta-llama/llama-3.2-11b-vision-instruct:free"),
("Qwen 2.5 VL", "qwen/qwen2.5-vl-72b-instruct:free"),
("DeepSeek R1", "deepseek/deepseek-r1:free"),
("Mistral 3.1", "mistralai/mistral-small-3.1-24b-instruct:free")
]
def get_ai_response(message, history, model_name, image=None, file=None):
"""Get response from AI"""
# Find model ID
model_id = next((mid for name, mid in models if name == model_name), models[0][1])
# Prepare messages
messages = []
for human, ai in history:
messages.append({"role": "user", "content": human})
if ai:
messages.append({"role": "assistant", "content": ai})
# Handle file
if file is not None:
try:
with open(file.name, 'r', encoding='utf-8') as f:
file_content = f.read()
message = f"{message}\n\nFile content:\n```\n{file_content}\n```"
except Exception as e:
message = f"{message}\n\nError reading file: {str(e)}"
# Handle image
if image is not None:
try:
buffered = BytesIO()
image.save(buffered, format="JPEG")
base64_image = base64.b64encode(buffered.getvalue()).decode("utf-8")
content = [
{"type": "text", "text": message},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
}
}
]
messages.append({"role": "user", "content": content})
except Exception as e:
messages.append({"role": "user", "content": message})
return f"Error processing image: {str(e)}"
else:
messages.append({"role": "user", "content": message})
# API call
try:
response = requests.post(
"https://openrouter.ai/api/v1/chat/completions",
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
"HTTP-Referer": "https://huggingface.co/spaces",
},
json={
"model": model_id,
"messages": messages,
"temperature": 0.7,
"max_tokens": 1000
},
timeout=60
)
response.raise_for_status()
result = response.json()
return result.get("choices", [{}])[0].get("message", {}).get("content", "No response")
except Exception as e:
return f"Error: {str(e)}"
def clear_inputs():
"""Clear input fields"""
return "", None, None
with gr.Blocks() as demo:
gr.Markdown("# 🔆 CrispChat")
chatbot = gr.Chatbot(
height=450,
type="messages" # Use the new format as suggested in the warning
)
model_selector = gr.Dropdown(
choices=[name for name, _ in models],
value=models[0][0],
label="Model"
)
msg_input = gr.Textbox(
placeholder="Type your message here...",
lines=3,
label="Message"
)
img_input = gr.Image(
type="pil",
label="Image (optional)"
)
file_input = gr.File(
label="Text File (optional)"
)
with gr.Row():
submit_btn = gr.Button("Send")
clear_btn = gr.Button("Clear Chat")
# Define clear function
def clear_chat():
return []
# Submit function
def submit_message(message, chat_history, model, image, file):
if not message and not image and not file:
return chat_history, "", None, None
response = get_ai_response(message, chat_history, model, image, file)
chat_history.append((message, response))
return chat_history, "", None, None
# Set up events
submit_btn.click(
fn=submit_message,
inputs=[msg_input, chatbot, model_selector, img_input, file_input],
outputs=[chatbot, msg_input, img_input, file_input]
)
msg_input.submit(
fn=submit_message,
inputs=[msg_input, chatbot, model_selector, img_input, file_input],
outputs=[chatbot, msg_input, img_input, file_input]
)
clear_btn.click(
fn=clear_chat,
outputs=[chatbot]
)
# FastAPI endpoint
from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
class GenerateRequest(BaseModel):
message: str
model: str = None
image_data: str = None
@app.post("/api/generate")
async def api_generate(request: GenerateRequest):
"""API endpoint for text generation"""
try:
model_id = request.model or models[0][1]
# Prepare messages
messages = []
# Handle image
if request.image_data:
try:
# Decode base64 image
image_bytes = base64.b64decode(request.image_data)
image = Image.open(BytesIO(image_bytes))
# Re-encode
buffered = BytesIO()
image.save(buffered, format="JPEG")
base64_image = base64.b64encode(buffered.getvalue()).decode("utf-8")
content = [
{"type": "text", "text": request.message},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
}
}
]
messages.append({"role": "user", "content": content})
except Exception as e:
return {"error": f"Image processing error: {str(e)}"}
else:
messages.append({"role": "user", "content": request.message})
# API call
response = requests.post(
"https://openrouter.ai/api/v1/chat/completions",
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
"HTTP-Referer": "https://huggingface.co/spaces",
},
json={
"model": model_id,
"messages": messages,
"temperature": 0.7
},
timeout=60
)
response.raise_for_status()
result = response.json()
reply = result.get("choices", [{}])[0].get("message", {}).get("content", "No response")
return {"response": reply}
except Exception as e:
return {"error": f"Error: {str(e)}"}
# Mount Gradio app
app = gr.mount_gradio_app(app, demo, path="/")
# Launch
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)