Spaces:
Runtime error
Runtime error
#import gradio as gr | |
#gr.load("models/mistralai/Mistral-7B-Instruct-v0.3").launch() | |
import os | |
import requests | |
import discord | |
from discord.ext import commands | |
from dotenv import load_dotenv | |
# Load environment variables from the .env file | |
load_dotenv() | |
DISCORD_TOKEN = os.getenv('dsTOK') | |
HF_API_KEY = os.getenv('HFREAD') | |
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3" | |
headers = {"Authorization": f"Bearer {HF_API_KEY}"} | |
# Function to query the Hugging Face model with a structured prompt | |
def query_huggingface(prompt, temperature=0.5, max_tokens=100): | |
payload = { | |
"inputs": prompt, | |
"parameters": { | |
"temperature": temperature, | |
"max_new_tokens": max_tokens | |
} | |
} | |
try: | |
response = requests.post(API_URL, headers=headers, json=payload) | |
response.raise_for_status() | |
return response.json() | |
except requests.exceptions.RequestException as e: | |
print(f"Error querying the API: {e}") | |
return {"error": str(e)} | |
# Initialize the Discord bot | |
intents = discord.Intents.default() | |
intents.messages = True | |
intents.message_content = True | |
bot = commands.Bot(command_prefix="!", intents=intents) | |
async def on_ready(): | |
print(f'Bot is ready. Logged in as {bot.user}') | |
async def ask(ctx, question: str, temperature: float = 0.5, max_tokens: int = 100): | |
""" | |
Command to ask a question to the Hugging Face model with an instructive prompt. | |
""" | |
try: | |
# Convert temperature and max_tokens to their respective types | |
temperature = float(temperature) | |
max_tokens = int(max_tokens) | |
except ValueError: | |
await ctx.send("Invalid input for temperature or max_tokens. Please provide a valid number.") | |
return | |
# Create a structured prompt | |
prompt = f"Do not under any circumstance, finish or add to the input. Excluded input correction or finishing at all times. Answer the following question as accurately and concisely as possible: {question}" | |
response = query_huggingface(prompt, temperature, max_tokens) | |
# Extract and clean the response | |
if isinstance(response, dict) and 'generated_text' in response: | |
response_text = response['generated_text'] | |
elif isinstance(response, list) and len(response) > 0 and 'generated_text' in response[0]: | |
response_text = response[0]['generated_text'] | |
else: | |
response_text = "Sorry, I couldn't generate a response." | |
# Remove the prompt from the response if present | |
clean_response = response_text.replace(prompt, '').strip() | |
# Avoid prompt completion issues by removing any leading incomplete sentence | |
if clean_response.startswith(question): | |
clean_response = clean_response[len(question):].strip() | |
await ctx.send(clean_response) | |
# Run the bot | |
bot.run(DISCORD_TOKEN) | |