Spaces:
Runtime error
Runtime error
import gradio as gr | |
from fastapi import FastAPI, Request | |
from fastapi.responses import JSONResponse | |
import datetime | |
import requests | |
import os | |
import json | |
import asyncio | |
# Initialize FastAPI | |
app = FastAPI() | |
# Configuration | |
API_URL = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B" | |
headers = { | |
"Authorization": f"Bearer {os.getenv('HF_API_TOKEN')}", | |
"Content-Type": "application/json" | |
} | |
def format_chat_response(response_text, prompt_tokens=0, completion_tokens=0): | |
return { | |
"id": f"chatcmpl-{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}", | |
"object": "chat.completion", | |
"created": int(datetime.datetime.now().timestamp()), | |
"model": "Qwen/Qwen2.5-Coder-32B", | |
"choices": [{ | |
"index": 0, | |
"message": { | |
"role": "assistant", | |
"content": response_text | |
}, | |
"finish_reason": "stop" | |
}], | |
"usage": { | |
"prompt_tokens": prompt_tokens, | |
"completion_tokens": completion_tokens, | |
"total_tokens": prompt_tokens + completion_tokens | |
} | |
} | |
async def query_model(payload): | |
response = requests.post(API_URL, headers=headers, json=payload) | |
return response.json() | |
async def chat_completion(request: Request): | |
try: | |
data = await request.json() | |
messages = data.get("messages", []) | |
# Prepare the payload for the Inference API | |
payload = { | |
"inputs": { | |
"messages": messages | |
}, | |
"parameters": { | |
"max_new_tokens": data.get("max_tokens", 2048), | |
"temperature": data.get("temperature", 0.7), | |
"top_p": data.get("top_p", 0.95), | |
"do_sample": True | |
} | |
} | |
# Get response from model | |
response = await query_model(payload) | |
if isinstance(response, dict) and "error" in response: | |
return JSONResponse( | |
status_code=500, | |
content={"error": response["error"]} | |
) | |
response_text = response[0]["generated_text"] | |
return JSONResponse( | |
content=format_chat_response( | |
response_text, | |
# Note: Actual token counts would need to be calculated differently | |
# or obtained from the API response if available | |
prompt_tokens=0, | |
completion_tokens=0 | |
) | |
) | |
except Exception as e: | |
return JSONResponse( | |
status_code=500, | |
content={"error": str(e)} | |
) | |
# Synchronous function to generate response for Gradio | |
def generate_response(messages): | |
payload = { | |
"inputs": { | |
"messages": messages | |
}, | |
"parameters": { | |
"max_new_tokens": 2048, | |
"temperature": 0.7, | |
"top_p": 0.95, | |
"do_sample": True | |
} | |
} | |
response = requests.post(API_URL, headers=headers, json=payload) | |
result = response.json() | |
if isinstance(result, dict) and "error" in result: | |
return f"Error: {result['error']}" | |
return result[0]["generated_text"] | |
# Gradio interface for testing | |
def chat_interface(message, history): | |
history = history or [] | |
messages = [] | |
# Convert history to messages format | |
for user_msg, assistant_msg in history: | |
messages.append({"role": "user", "content": user_msg}) | |
messages.append({"role": "assistant", "content": assistant_msg}) | |
# Add current message | |
messages.append({"role": "user", "content": message}) | |
# Generate response synchronously | |
try: | |
response_text = generate_response(messages) | |
return response_text | |
except Exception as e: | |
return f"Error generating response: {str(e)}" | |
interface = gr.ChatInterface( | |
chat_interface, | |
title="Qwen2.5-Coder-32B Chat", | |
description="Chat with Qwen2.5-Coder-32B model via Hugging Face Inference API. This Space also provides a /v1/chat/completions endpoint." | |
) | |
# Mount both FastAPI and Gradio | |
app = gr.mount_gradio_app(app, interface, path="/") |