Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ import requests
|
|
5 |
import discord
|
6 |
from discord.ext import commands
|
7 |
from dotenv import load_dotenv
|
|
|
8 |
|
9 |
# Load environment variables from the .env file
|
10 |
load_dotenv()
|
@@ -15,13 +16,19 @@ HF_API_KEY = os.getenv('HFREAD')
|
|
15 |
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
|
16 |
headers = {"Authorization": f"Bearer {HF_API_KEY}"}
|
17 |
|
|
|
|
|
|
|
|
|
18 |
# Function to query the Hugging Face model with a structured prompt
|
19 |
-
def query_huggingface(prompt, temperature=0.5, max_tokens=100):
|
20 |
payload = {
|
21 |
"inputs": prompt,
|
22 |
"parameters": {
|
23 |
"temperature": temperature,
|
24 |
-
"max_new_tokens": max_tokens
|
|
|
|
|
25 |
}
|
26 |
}
|
27 |
try:
|
@@ -29,36 +36,39 @@ def query_huggingface(prompt, temperature=0.5, max_tokens=100):
|
|
29 |
response.raise_for_status()
|
30 |
return response.json()
|
31 |
except requests.exceptions.RequestException as e:
|
32 |
-
|
33 |
return {"error": str(e)}
|
34 |
|
35 |
-
# Initialize the Discord bot
|
36 |
intents = discord.Intents.default()
|
37 |
intents.messages = True
|
38 |
intents.message_content = True
|
39 |
-
|
40 |
-
bot = commands.Bot(command_prefix=
|
41 |
|
42 |
@bot.event
|
43 |
async def on_ready():
|
44 |
-
|
45 |
|
46 |
@bot.command(name='ask')
|
47 |
-
|
|
|
48 |
"""
|
49 |
Command to ask a question to the Hugging Face model with an instructive prompt.
|
50 |
"""
|
51 |
try:
|
52 |
-
# Convert
|
53 |
temperature = float(temperature)
|
54 |
max_tokens = int(max_tokens)
|
|
|
|
|
55 |
except ValueError:
|
56 |
-
await ctx.send("Invalid input for
|
57 |
return
|
58 |
|
59 |
# Create a structured prompt
|
60 |
prompt = f"Do not under any circumstance, finish or add to the input. Excluded input correction or finishing at all times. Answer the following question as accurately and concisely as possible: {question}"
|
61 |
-
response = query_huggingface(prompt, temperature, max_tokens)
|
62 |
|
63 |
# Extract and clean the response
|
64 |
if isinstance(response, dict) and 'generated_text' in response:
|
@@ -77,5 +87,31 @@ async def ask(ctx, question: str, temperature: float = 0.5, max_tokens: int = 10
|
|
77 |
|
78 |
await ctx.send(clean_response)
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
# Run the bot
|
81 |
bot.run(DISCORD_TOKEN)
|
|
|
5 |
import discord
|
6 |
from discord.ext import commands
|
7 |
from dotenv import load_dotenv
|
8 |
+
import logging
|
9 |
|
10 |
# Load environment variables from the .env file
|
11 |
load_dotenv()
|
|
|
16 |
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
|
17 |
headers = {"Authorization": f"Bearer {HF_API_KEY}"}
|
18 |
|
19 |
+
# Set up logging
|
20 |
+
logging.basicConfig(level=logging.INFO)
|
21 |
+
logger = logging.getLogger(__name__)
|
22 |
+
|
23 |
# Function to query the Hugging Face model with a structured prompt
|
24 |
+
def query_huggingface(prompt, temperature=0.5, max_tokens=100, top_k=50, top_p=0.9):
|
25 |
payload = {
|
26 |
"inputs": prompt,
|
27 |
"parameters": {
|
28 |
"temperature": temperature,
|
29 |
+
"max_new_tokens": max_tokens,
|
30 |
+
"top_k": top_k,
|
31 |
+
"top_p": top_p
|
32 |
}
|
33 |
}
|
34 |
try:
|
|
|
36 |
response.raise_for_status()
|
37 |
return response.json()
|
38 |
except requests.exceptions.RequestException as e:
|
39 |
+
logger.error(f"Error querying the API: {e}")
|
40 |
return {"error": str(e)}
|
41 |
|
42 |
+
# Initialize the Discord bot with a default prefix
|
43 |
intents = discord.Intents.default()
|
44 |
intents.messages = True
|
45 |
intents.message_content = True
|
46 |
+
default_prefix = "!"
|
47 |
+
bot = commands.Bot(command_prefix=commands.when_mentioned_or(default_prefix), intents=intents)
|
48 |
|
49 |
@bot.event
|
50 |
async def on_ready():
|
51 |
+
logger.info(f'Bot is ready. Logged in as {bot.user}')
|
52 |
|
53 |
@bot.command(name='ask')
|
54 |
+
@commands.cooldown(rate=1, per=10, type=commands.BucketType.user)
|
55 |
+
async def ask(ctx, question: str, temperature: float = 0.5, max_tokens: int = 100, top_k: int = 50, top_p: float = 0.9):
|
56 |
"""
|
57 |
Command to ask a question to the Hugging Face model with an instructive prompt.
|
58 |
"""
|
59 |
try:
|
60 |
+
# Convert parameters to their respective types
|
61 |
temperature = float(temperature)
|
62 |
max_tokens = int(max_tokens)
|
63 |
+
top_k = int(top_k)
|
64 |
+
top_p = float(top_p)
|
65 |
except ValueError:
|
66 |
+
await ctx.send("Invalid input for one of the parameters. Please provide valid numbers.")
|
67 |
return
|
68 |
|
69 |
# Create a structured prompt
|
70 |
prompt = f"Do not under any circumstance, finish or add to the input. Excluded input correction or finishing at all times. Answer the following question as accurately and concisely as possible: {question}"
|
71 |
+
response = query_huggingface(prompt, temperature, max_tokens, top_k, top_p)
|
72 |
|
73 |
# Extract and clean the response
|
74 |
if isinstance(response, dict) and 'generated_text' in response:
|
|
|
87 |
|
88 |
await ctx.send(clean_response)
|
89 |
|
90 |
+
@ask.error
|
91 |
+
async def ask_error(ctx, error):
|
92 |
+
if isinstance(error, commands.CommandOnCooldown):
|
93 |
+
await ctx.send(f"This command is on cooldown. Please try again after {int(error.retry_after)} seconds.")
|
94 |
+
|
95 |
+
@bot.command(name='setprefix')
|
96 |
+
@commands.has_permissions(administrator=True)
|
97 |
+
async def set_prefix(ctx, prefix: str):
|
98 |
+
bot.command_prefix = commands.when_mentioned_or(prefix)
|
99 |
+
await ctx.send(f"Command prefix changed to: {prefix}")
|
100 |
+
|
101 |
+
@bot.command(name='help')
|
102 |
+
async def help_command(ctx):
|
103 |
+
help_text = (
|
104 |
+
"Here are the commands you can use:\n"
|
105 |
+
"!ask <question> [temperature] [max_tokens] [top_k] [top_p] - Ask a question to the AI model.\n"
|
106 |
+
"Optional parameters:\n"
|
107 |
+
" temperature (default=0.5) - Controls the randomness of the response.\n"
|
108 |
+
" max_tokens (default=100) - Limits the length of the response.\n"
|
109 |
+
" top_k (default=50) - Limits the number of highest probability vocabulary tokens to consider.\n"
|
110 |
+
" top_p (default=0.9) - Limits the cumulative probability of the highest probability vocabulary tokens.\n"
|
111 |
+
"!setprefix <prefix> - Change the command prefix (admin only).\n"
|
112 |
+
"!help - Display this help message."
|
113 |
+
)
|
114 |
+
await ctx.send(help_text)
|
115 |
+
|
116 |
# Run the bot
|
117 |
bot.run(DISCORD_TOKEN)
|