Spaces:
Sleeping
Sleeping
File size: 1,154 Bytes
2973c99 fcb4a44 2973c99 fcb4a44 2973c99 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import gradio as gr
import requests
from dotenv import load_dotenv
import os
load_dotenv()
HUGGINGFACE_TOKEN = os.getenv('HUGGINGFACE_TOKEN')
# Hugging Face API configuration
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
HEADERS = {"Authorization": f"Bearer {HUGGINGFACE_TOKEN}"}
# Function to interact with the Hugging Face model
def query_huggingface_api(input_text):
payload = {"inputs": input_text}
try:
response = requests.post(API_URL, headers=HEADERS, json=payload)
response.raise_for_status() # Raise error for HTTP errors
return response.json()[0]["generated_text"]
except requests.exceptions.RequestException as e:
return f"Error: {str(e)}"
# Gradio interface
def chatbot(input_text):
response = query_huggingface_api(input_text)
return response
iface = gr.Interface(
fn=chatbot,
inputs=gr.Textbox(lines=2, placeholder="Type your message here..."),
outputs=gr.Textbox(),
title="AI Chatbot",
description="Chat with the AI powered by Hugging Face Mistral-7B-Instruct-v0.3.",
)
if __name__ == "__main__":
iface.launch() |