Radosław Wolnik commited on
Commit
ba7e0ae
·
1 Parent(s): 12b4ccb
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -34,17 +34,18 @@ async def on_message(message):
34
  async for message in channel.history(limit=10):
35
  messages.append(message.content)
36
  previous_messages = ("\n".join(messages))
 
37
  response = ai.text_generation(previous_messages, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0)
38
- splited_response = split_string(response)
39
- for part in splited_response:
40
  await channel.send(part)
41
 
42
  message_counts[message.channel.id] = 0 # Reset the counter
43
 
44
 
45
- response = ai.text_generation(message, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0)
46
- splited_response = split_string(str(response))
47
- for part in splited_response:
48
  await channel.send(part)
49
 
50
  await bot.process_commands(message) # Ensure commands still work
 
34
  async for message in channel.history(limit=10):
35
  messages.append(message.content)
36
  previous_messages = ("\n".join(messages))
37
+
38
  response = ai.text_generation(previous_messages, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0)
39
+ splitted_response = split_string(response)
40
+ for part in splitted_response:
41
  await channel.send(part)
42
 
43
  message_counts[message.channel.id] = 0 # Reset the counter
44
 
45
 
46
+ response = str(ai.text_generation(message, details=False, stream=False, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0))
47
+ splitted_response = split_string(response)
48
+ for part in splitted_response:
49
  await channel.send(part)
50
 
51
  await bot.process_commands(message) # Ensure commands still work