Spaces:
Sleeping
Sleeping
import gradio as gr | |
import requests | |
from dotenv import load_dotenv | |
import os | |
load_dotenv() | |
HUGGINGFACE_TOKEN = os.getenv('HUGGINGFACE_TOKEN') | |
# Hugging Face API configuration | |
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3" | |
HEADERS = {"Authorization": f"Bearer {HUGGINGFACE_TOKEN}"} | |
# Function to interact with the Hugging Face model | |
def query_huggingface_api(input_text): | |
payload = {"inputs": input_text} | |
try: | |
response = requests.post(API_URL, headers=HEADERS, json=payload) | |
response.raise_for_status() # Raise error for HTTP errors | |
return response.json()[0]["generated_text"] | |
except requests.exceptions.RequestException as e: | |
return f"Error: {str(e)}" | |
# Gradio interface | |
def chatbot(input_text): | |
response = query_huggingface_api(input_text) | |
return response | |
iface = gr.Interface( | |
fn=chatbot, | |
inputs=gr.Textbox(lines=2, placeholder="Type your message here..."), | |
outputs=gr.Textbox(), | |
title="AI Chatbot", | |
description="Chat with the AI powered by Hugging Face Mistral-7B-Instruct-v0.3.", | |
) | |
if __name__ == "__main__": | |
iface.launch() |