Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -15,9 +15,6 @@ HF_API_KEY = os.getenv('HFREAD')
|
|
15 |
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
|
16 |
headers = {"Authorization": f"Bearer {HF_API_KEY}"}
|
17 |
|
18 |
-
# List of forbidden words/phrases
|
19 |
-
forbidden_words = ["badword1", "badword2", "inappropriate_phrase"] # Extend this list as needed
|
20 |
-
|
21 |
# Function to query the Hugging Face model with a structured prompt
|
22 |
def query_huggingface(prompt):
|
23 |
try:
|
@@ -28,13 +25,6 @@ def query_huggingface(prompt):
|
|
28 |
print(f"Error querying the API: {e}")
|
29 |
return {"error": str(e)}
|
30 |
|
31 |
-
# Function to check if the response contains forbidden words/phrases
|
32 |
-
def contains_forbidden_content(text):
|
33 |
-
for word in forbidden_words:
|
34 |
-
if word in text.lower():
|
35 |
-
return True
|
36 |
-
return False
|
37 |
-
|
38 |
# Initialize the Discord bot
|
39 |
intents = discord.Intents.default()
|
40 |
intents.messages = True
|
@@ -52,22 +42,16 @@ async def ask(ctx, *, question: str):
|
|
52 |
Command to ask a question to the Hugging Face model with an instructive prompt.
|
53 |
"""
|
54 |
# Create a structured prompt
|
55 |
-
prompt = f"Please provide a detailed
|
|
|
56 |
response = query_huggingface(prompt)
|
57 |
-
|
58 |
-
|
59 |
-
if isinstance(response, dict) and 'generated_text' in response:
|
60 |
-
generated_text = response['generated_text']
|
61 |
elif isinstance(response, list) and len(response) > 0 and 'generated_text' in response[0]:
|
62 |
-
|
63 |
-
|
64 |
-
if generated_text:
|
65 |
-
if contains_forbidden_content(generated_text):
|
66 |
-
await ctx.send("Sorry, the response contains inappropriate content and cannot be displayed.")
|
67 |
-
else:
|
68 |
-
await ctx.send(generated_text)
|
69 |
else:
|
70 |
await ctx.send("Sorry, I couldn't generate a response.")
|
71 |
|
72 |
# Run the bot
|
73 |
bot.run(DISCORD_TOKEN)
|
|
|
|
15 |
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
|
16 |
headers = {"Authorization": f"Bearer {HF_API_KEY}"}
|
17 |
|
|
|
|
|
|
|
18 |
# Function to query the Hugging Face model with a structured prompt
|
19 |
def query_huggingface(prompt):
|
20 |
try:
|
|
|
25 |
print(f"Error querying the API: {e}")
|
26 |
return {"error": str(e)}
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
# Initialize the Discord bot
|
29 |
intents = discord.Intents.default()
|
30 |
intents.messages = True
|
|
|
42 |
Command to ask a question to the Hugging Face model with an instructive prompt.
|
43 |
"""
|
44 |
# Create a structured prompt
|
45 |
+
prompt = f"Please provide a detailed response to the following question: {question}"
|
46 |
+
await ctx.send(f"Question: {question}")
|
47 |
response = query_huggingface(prompt)
|
48 |
+
if 'generated_text' in response:
|
49 |
+
await ctx.send(f"Response: {response['generated_text']}")
|
|
|
|
|
50 |
elif isinstance(response, list) and len(response) > 0 and 'generated_text' in response[0]:
|
51 |
+
await ctx.send(f"Response: {response[0]['generated_text']}")
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
else:
|
53 |
await ctx.send("Sorry, I couldn't generate a response.")
|
54 |
|
55 |
# Run the bot
|
56 |
bot.run(DISCORD_TOKEN)
|
57 |
+
|