Radosław Wolnik commited on
Commit
0724869
·
1 Parent(s): f3d11f1

new methot of generation

Browse files
Files changed (2) hide show
  1. ChatAI/chat_ai.py +2 -2
  2. app.py +40 -3
ChatAI/chat_ai.py CHANGED
@@ -1,3 +1,3 @@
1
- from transformers import pipeline
2
 
3
- pipe = pipeline("text2text-generation", model="facebook/blenderbot-400M-distill")
 
1
+ from huggingface_hub import InferenceClient
2
 
3
+ pipe = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
app.py CHANGED
@@ -32,10 +32,11 @@ async def on_message(message):
32
  async for message in channel.history(limit=4):
33
  messages.append(message.content)
34
 
35
- response = ai("\n".join(messages), max_length=1900, truncation=True, pad_token_id=50256)
36
 
37
-
38
- await channel.send(response[0]['generated_text'])
 
39
 
40
 
41
  message_counts[message.channel.id] = 0 # Reset the counter
@@ -43,6 +44,42 @@ async def on_message(message):
43
 
44
  await bot.process_commands(message) # Ensure commands still work
45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  @bot.event
47
  async def on_ready():
48
  print(f'Logged in as {bot.user}') # Logs bot login in console
 
32
  async for message in channel.history(limit=4):
33
  messages.append(message.content)
34
 
35
+ previous_messages = ("\n".join(messages))
36
 
37
+ x = generate(previous_messages, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0)
38
+ print(x)
39
+ await channel.send(x)
40
 
41
 
42
  message_counts[message.channel.id] = 0 # Reset the counter
 
44
 
45
  await bot.process_commands(message) # Ensure commands still work
46
 
47
+ def format_prompt(message, history):
48
+ prompt = "<s>"
49
+ for user_prompt, bot_response in history:
50
+ prompt += f"[INST] {user_prompt} [/INST]"
51
+ prompt += f" {bot_response}</s> "
52
+ prompt += f"[INST] {message} [/INST]"
53
+ return prompt
54
+
55
+ def generate(
56
+ prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
57
+ ):
58
+ temperature = float(temperature)
59
+ if temperature < 1e-2:
60
+ temperature = 1e-2
61
+ top_p = float(top_p)
62
+
63
+ generate_kwargs = dict(
64
+ temperature=temperature,
65
+ max_new_tokens=max_new_tokens,
66
+ top_p=top_p,
67
+ repetition_penalty=repetition_penalty,
68
+ do_sample=True,
69
+ seed=42,
70
+ )
71
+
72
+ formatted_prompt = format_prompt(prompt)
73
+
74
+ stream = ai.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
75
+ output = ""
76
+
77
+ for response in stream:
78
+ output += response.token.text
79
+ yield output
80
+ return output
81
+
82
+
83
  @bot.event
84
  async def on_ready():
85
  print(f'Logged in as {bot.user}') # Logs bot login in console