Spaces:
Runtime error
Runtime error
File size: 4,577 Bytes
d48ca0f 211da2c d48ca0f df9d99f d48ca0f 211da2c d48ca0f c0fe403 211da2c d48ca0f 211da2c df9d99f b05ac74 df9d99f 37566bc df9d99f 37566bc 211da2c 37566bc 211da2c df9d99f 211da2c df9d99f 211da2c df9d99f 211da2c df9d99f 211da2c df9d99f 211da2c b05ac74 211da2c 03f3768 df9d99f 03f3768 df9d99f 03f3768 df9d99f 03f3768 7030ea0 37566bc df9d99f b168951 79a23f3 b168951 211da2c b168951 afca611 b168951 afca611 211da2c df9d99f 211da2c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
#import gradio as gr
#gr.load("models/mistralai/Mistral-7B-Instruct-v0.3").launch()
import os
import requests
import discord
from discord.ext import commands
from dotenv import load_dotenv
import logging
# Load environment variables from the .env file
load_dotenv()
DISCORD_TOKEN = os.getenv('dsTOK')
HF_API_KEY = os.getenv('HFREAD')
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
headers = {"Authorization": f"Bearer {HF_API_KEY}"}
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Function to query the Hugging Face model with a structured prompt
def query_huggingface(prompt, temperature=0.5, max_tokens=100, top_k=50, top_p=0.9):
payload = {
"inputs": prompt,
"parameters": {
"temperature": temperature,
"max_new_tokens": max_tokens,
"top_k": top_k,
"top_p": top_p
}
}
try:
response = requests.post(API_URL, headers=headers, json=payload)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
logger.error(f"Error querying the API: {e}")
return {"error": str(e)}
# Initialize the Discord bot with a default prefix
intents = discord.Intents.default()
intents.messages = True
intents.message_content = True
default_prefix = "!"
bot = commands.Bot(command_prefix=commands.when_mentioned_or(default_prefix), intents=intents)
@bot.event
async def on_ready():
logger.info(f'Bot is ready. Logged in as {bot.user}')
@bot.command(name='ask')
@commands.cooldown(rate=1, per=10, type=commands.BucketType.user)
async def ask(ctx, question: str, temperature: float = 0.5, max_tokens: int = 100, top_k: int = 50, top_p: float = 0.9):
"""
Command to ask a question to the Hugging Face model with an instructive prompt.
"""
try:
# Convert parameters to their respective types
temperature = float(temperature)
max_tokens = int(max_tokens)
top_k = int(top_k)
top_p = float(top_p)
except ValueError:
await ctx.send("Invalid input for one of the parameters. Please provide valid numbers.")
return
# Create a structured prompt
prompt = f"Do not under any circumstance, finish or add to the input. Excluded input correction or finishing at all times. Answer the following question as accurately and concisely as possible: {question}"
response = query_huggingface(prompt, temperature, max_tokens, top_k, top_p)
# Extract and clean the response
if isinstance(response, dict) and 'generated_text' in response:
response_text = response['generated_text']
elif isinstance(response, list) and len(response) > 0 and 'generated_text' in response[0]:
response_text = response[0]['generated_text']
else:
response_text = "Sorry, I couldn't generate a response."
# Remove the prompt from the response if present
clean_response = response_text.replace(prompt, '').strip()
# Avoid prompt completion issues by removing any leading incomplete sentence
if clean_response.startswith(question):
clean_response = clean_response[len(question):].strip()
await ctx.send(clean_response)
@ask.error
async def ask_error(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(f"This command is on cooldown. Please try again after {int(error.retry_after)} seconds.")
@bot.command(name='setprefix')
@commands.has_permissions(administrator=True)
async def set_prefix(ctx, prefix: str):
bot.command_prefix = commands.when_mentioned_or(prefix)
await ctx.send(f"Command prefix changed to: {prefix}")
@bot.command(name='help')
async def help_command(ctx):
help_text = (
"Here are the commands you can use:\n"
"!ask <question> [temperature] [max_tokens] [top_k] [top_p] - Ask a question to the AI model.\n"
"Optional parameters:\n"
" temperature (default=0.5) - Controls the randomness of the response.\n"
" max_tokens (default=100) - Limits the length of the response.\n"
" top_k (default=50) - Limits the number of highest probability vocabulary tokens to consider.\n"
" top_p (default=0.9) - Limits the cumulative probability of the highest probability vocabulary tokens.\n"
"!setprefix <prefix> - Change the command prefix (admin only).\n"
"!help - Display this help message."
)
await ctx.send(help_text)
# Run the bot
bot.run(DISCORD_TOKEN)
|