Z3ktrix commited on
Commit
e4934d1
Β·
verified Β·
1 Parent(s): 6b7694f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -27
app.py CHANGED
@@ -35,10 +35,15 @@ def query_huggingface(prompt, temperature=0.5, max_tokens=100, top_k=50, top_p=0
35
  response = requests.post(API_URL, headers=headers, json=payload)
36
  response.raise_for_status()
37
  result = response.json()
38
- return result[0]['generated_text'] if isinstance(result, list) and len(result) > 0 else result.get('generated_text', 'No response generated.')
 
 
 
 
 
39
  except requests.exceptions.RequestException as e:
40
  logger.error(f"Error querying the API: {e}")
41
- return {"error": str(e)}
42
 
43
  # Initialize the Discord bot with a default prefix
44
  intents = discord.Intents.default()
@@ -53,31 +58,27 @@ async def on_ready():
53
 
54
  @bot.command(name='ask')
55
  @commands.cooldown(rate=1, per=10, type=commands.BucketType.user)
56
- async def ask(ctx, question: str, temperature: float = 0.8, max_tokens: int = 1000, top_k: int = 70, top_p: float = 0.4):
57
  """
58
  Command to ask a question to the Hugging Face model with an instructive prompt.
59
  """
60
- try:
61
- # Convert parameters to their respective types
62
- temperature = float(temperature)
63
- max_tokens = int(max_tokens)
64
- top_k = int(top_k)
65
- top_p = float(top_p)
66
- except ValueError:
67
- await ctx.send("Invalid input for one of the parameters. Please provide valid numbers.")
68
  return
69
 
70
- # Create a structured prompt
71
- prompt = f"Answer the following question accurately and concisely: {question}"
72
- response = query_huggingface(prompt, temperature, max_tokens, top_k, top_p)
73
-
74
- # Extract and clean the response
75
- if 'error' in response:
76
- clean_response = f"Error: {response['error']}"
77
- else:
78
  clean_response = response.replace(prompt, '').strip()
79
-
80
- await ctx.send(clean_response)
 
 
 
81
 
82
  @ask.error
83
  async def ask_error(ctx, error):
@@ -97,12 +98,7 @@ async def set_prefix(ctx, prefix: str):
97
  async def help_custom(ctx):
98
  help_text = (
99
  "Here are the commands you can use:\n"
100
- "!ask <question> [temperature] [max_tokens] [top_k] [top_p] - Ask a question to the AI model.\n"
101
- "Optional parameters:\n"
102
- " temperature (default=0.5) - Controls the randomness of the response.\n"
103
- " max_tokens (default=100) - Limits the length of the response.\n"
104
- " top_k (default=50) - Limits the number of highest probability vocabulary tokens to consider.\n"
105
- " top_p (default=0.9) - Limits the cumulative probability of the highest probability vocabulary tokens.\n"
106
  "!setprefix <prefix> - Change the command prefix (admin only).\n"
107
  "!help_custom - Display this help message."
108
  )
 
35
  response = requests.post(API_URL, headers=headers, json=payload)
36
  response.raise_for_status()
37
  result = response.json()
38
+ if isinstance(result, list) and len(result) > 0 and 'generated_text' in result[0]:
39
+ return result[0]['generated_text']
40
+ elif 'generated_text' in result:
41
+ return result['generated_text']
42
+ else:
43
+ return "No response generated."
44
  except requests.exceptions.RequestException as e:
45
  logger.error(f"Error querying the API: {e}")
46
+ return f"Error: {str(e)}"
47
 
48
  # Initialize the Discord bot with a default prefix
49
  intents = discord.Intents.default()
 
58
 
59
  @bot.command(name='ask')
60
  @commands.cooldown(rate=1, per=10, type=commands.BucketType.user)
61
+ async def ask(ctx, *question):
62
  """
63
  Command to ask a question to the Hugging Face model with an instructive prompt.
64
  """
65
+ if not question:
66
+ await ctx.send("You need to ask a question.")
 
 
 
 
 
 
67
  return
68
 
69
+ question = " ".join(question)
70
+ try:
71
+ # Create a structured prompt
72
+ prompt = f"Answer the following question accurately and concisely: {question}"
73
+ response = query_huggingface(prompt, temperature=0.8, max_tokens=1000, top_k=70, top_p=0.4)
74
+
75
+ # Extract and clean the response
 
76
  clean_response = response.replace(prompt, '').strip()
77
+
78
+ await ctx.send(clean_response)
79
+ except Exception as e:
80
+ logger.error(f"Error processing the ask command: {e}")
81
+ await ctx.send("An error occurred while processing your request.")
82
 
83
  @ask.error
84
  async def ask_error(ctx, error):
 
98
  async def help_custom(ctx):
99
  help_text = (
100
  "Here are the commands you can use:\n"
101
+ "!ask <question> - Ask a question to the AI model.\n"
 
 
 
 
 
102
  "!setprefix <prefix> - Change the command prefix (admin only).\n"
103
  "!help_custom - Display this help message."
104
  )