Z3ktrix's picture
Update app.py
c0fe403 verified
raw
history blame
1.7 kB
#import gradio as gr
#gr.load("models/mistralai/Mistral-7B-Instruct-v0.3").launch()
import os
import requests
import discord
from discord.ext import commands
from dotenv import load_dotenv
# Load environment variables from the .env file
load_dotenv()
DISCORD_TOKEN = os.getenv('dsTOK')
HF_API_KEY = os.getenv('HFREAD')
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
headers = {"Authorization": f"Bearer {HF_API_KEY}"}
# Function to query the Hugging Face model
def query_huggingface(payload):
try:
response = requests.post(API_URL, headers=headers, json=payload)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error querying the API: {e}")
return {"error": str(e)}
# Initialize the Discord bot
intents = discord.Intents.default()
intents.messages = True
intents.message_content = True
bot = commands.Bot(command_prefix="!", intents=intents)
@bot.event
async def on_ready():
print(f'Bot is ready. Logged in as {bot.user}')
@bot.command(name='ask')
async def ask(ctx, *, question: str):
"""
Command to ask a question to the Hugging Face model.
"""
await ctx.send(f"Question: {question}")
response = query_huggingface({"inputs": question})
if 'generated_text' in response:
await ctx.send(f"Response: {response['generated_text']}")
elif isinstance(response, list) and len(response) > 0 and 'generated_text' in response[0]:
await ctx.send(f"Response: {response[0]['generated_text']}")
else:
await ctx.send("Sorry, I couldn't generate a response.")
# Run the bot
bot.run(DISCORD_TOKEN)