Radosław Wolnik commited on
Commit
74b7d49
·
1 Parent(s): 0724869
Files changed (1) hide show
  1. app.py +7 -22
app.py CHANGED
@@ -28,32 +28,26 @@ async def on_message(message):
28
  print(message_counts[message.channel.id])
29
 
30
  messages = []
31
- if message_counts[message.channel.id] >= 4: # Check if the count reaches 10
32
  async for message in channel.history(limit=4):
33
  messages.append(message.content)
34
 
35
  previous_messages = ("\n".join(messages))
36
 
37
  x = generate(previous_messages, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0)
38
- print(x)
39
- await channel.send(x)
40
 
 
41
 
42
  message_counts[message.channel.id] = 0 # Reset the counter
43
 
44
-
45
  await bot.process_commands(message) # Ensure commands still work
46
 
47
- def format_prompt(message, history):
48
- prompt = "<s>"
49
- for user_prompt, bot_response in history:
50
- prompt += f"[INST] {user_prompt} [/INST]"
51
- prompt += f" {bot_response}</s> "
52
- prompt += f"[INST] {message} [/INST]"
53
- return prompt
54
 
55
  def generate(
56
- prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
57
  ):
58
  temperature = float(temperature)
59
  if temperature < 1e-2:
@@ -69,16 +63,7 @@ def generate(
69
  seed=42,
70
  )
71
 
72
- formatted_prompt = format_prompt(prompt)
73
-
74
- stream = ai.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
75
- output = ""
76
-
77
- for response in stream:
78
- output += response.token.text
79
- yield output
80
- return output
81
-
82
 
83
  @bot.event
84
  async def on_ready():
 
28
  print(message_counts[message.channel.id])
29
 
30
  messages = []
31
+ if message_counts[message.channel.id] >= 1: # Check if the count reaches 10
32
  async for message in channel.history(limit=4):
33
  messages.append(message.content)
34
 
35
  previous_messages = ("\n".join(messages))
36
 
37
  x = generate(previous_messages, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0)
 
 
38
 
39
+ await channel.send(x)
40
 
41
  message_counts[message.channel.id] = 0 # Reset the counter
42
 
 
43
  await bot.process_commands(message) # Ensure commands still work
44
 
45
+ def split_string(text: str) -> list[str]:
46
+ """Helper function to split text into chunks"""
47
+ return [text[i:i+3900] for i in range(0, len(text), 3900)]
 
 
 
 
48
 
49
  def generate(
50
+ prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0
51
  ):
52
  temperature = float(temperature)
53
  if temperature < 1e-2:
 
63
  seed=42,
64
  )
65
 
66
+ return ai.text_generation(prompt, **generate_kwargs, stream=False, details=True, return_full_text=True)
 
 
 
 
 
 
 
 
 
67
 
68
  @bot.event
69
  async def on_ready():