#import gradio as gr #gr.load("models/mistralai/Mistral-7B-Instruct-v0.3").launch() import os import requests import discord from discord.ext import commands from dotenv import load_dotenv # Load environment variables from the .env file load_dotenv() DISCORD_TOKEN = os.getenv('dsTOK') HF_API_KEY = os.getenv('HFREAD') API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3" headers = {"Authorization": f"Bearer {HF_API_KEY}"} # Function to query the Hugging Face model with a structured prompt def query_huggingface(prompt): try: response = requests.post(API_URL, headers=headers, json={"inputs": prompt}) response.raise_for_status() return response.json() except requests.exceptions.RequestException as e: print(f"Error querying the API: {e}") return {"error": str(e)} # Initialize the Discord bot intents = discord.Intents.default() intents.messages = True intents.message_content = True bot = commands.Bot(command_prefix="!", intents=intents) @bot.event async def on_ready(): print(f'Bot is ready. Logged in as {bot.user}') @bot.command(name='ask') async def ask(ctx, *, question: str): """ Command to ask a question to the Hugging Face model with an instructive prompt. """ # Create a structured prompt prompt = f"Keep Responses to a 'PG' Rating OR lower. Do not put input in your response. Do not be annoying. Please provide a detailed response to the following: {question}" await ctx.send(f"Question: {question}") response = query_huggingface(prompt) if 'generated_text' in response: await ctx.send(f"Response: {response['generated_text']}") elif isinstance(response, list) and len(response) > 0 and 'generated_text' in response[0]: await ctx.send(f"Response: {response[0]['generated_text']}") else: await ctx.send("Sorry, I couldn't generate a response.") # Run the bot bot.run(DISCORD_TOKEN)