Z3ktrix commited on
Commit
afca611
Β·
verified Β·
1 Parent(s): b168951

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -42,8 +42,7 @@ async def ask(ctx, *, question: str):
42
  Command to ask a question to the Hugging Face model with an instructive prompt.
43
  """
44
  # Create a structured prompt
45
- prompt = f"Please provide a detailed response to the following question: {question}"
46
- await ctx.send(f"Question: {question}")
47
  response = query_huggingface(prompt)
48
 
49
  # Extract and clean the response
@@ -53,11 +52,15 @@ async def ask(ctx, *, question: str):
53
  response_text = response[0]['generated_text']
54
  else:
55
  response_text = "Sorry, I couldn't generate a response."
56
-
57
  # Remove the prompt from the response if present
58
  clean_response = response_text.replace(prompt, '').strip()
59
 
60
- await ctx.send(f"Response: {clean_response}")
 
 
 
 
61
 
62
  # Run the bot
63
  bot.run(DISCORD_TOKEN)
 
42
  Command to ask a question to the Hugging Face model with an instructive prompt.
43
  """
44
  # Create a structured prompt
45
+ prompt = f"Answer the following question as accurately and concisely as possible: {question}"
 
46
  response = query_huggingface(prompt)
47
 
48
  # Extract and clean the response
 
52
  response_text = response[0]['generated_text']
53
  else:
54
  response_text = "Sorry, I couldn't generate a response."
55
+
56
  # Remove the prompt from the response if present
57
  clean_response = response_text.replace(prompt, '').strip()
58
 
59
+ # Avoid prompt completion issues by removing any leading incomplete sentence
60
+ if clean_response.startswith(question):
61
+ clean_response = clean_response[len(question):].strip()
62
+
63
+ await ctx.send(clean_response)
64
 
65
  # Run the bot
66
  bot.run(DISCORD_TOKEN)