Spaces:
Sleeping
Sleeping
File size: 920 Bytes
bcadd1e 8b4334f 6112f4c 8b4334f 6112f4c 4f95f37 6112f4c bcf785a 8b4334f bcadd1e 8b4334f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
import gradio as gr
import requests
import os
# Define the Hugging Face API endpoint
api_url = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-72B-Instruct"
# Retrieve the token from the environment
headers = {"Authorization": f"Bearer {os.getenv('HF_Token')}"}
# Define a function to send prompts to the model and get responses
def query(prompt):
response = requests.post(api_url, headers=headers, json={"inputs": prompt})
if response.status_code == 200:
return response.json()[0]["generated_text"]
else:
return f"Error {response.status_code}: {response.text}"
# Create a Gradio interface with text input and output
demo = gr.Interface(
fn=query,
inputs="text",
outputs="text",
title="Qwen-2.5 72B Interaction",
description="Ask complex mathematical or pattern-related questions and get responses from Qwen-2.5 72B."
)
# Launch the interface
demo.launch()
|