futranbg commited on
Commit
61d12d7
1 Parent(s): 5ec054f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -6
app.py CHANGED
@@ -12,9 +12,7 @@ model2api = [
12
  "timdettmers/guanaco-33b-merged",
13
  ]
14
 
15
- BOT_NAME = "Assistant"
16
-
17
- STOP_SEQUENCES = ["\nUser:", " User:", "###", "</s>"]
18
 
19
  EXAMPLES = [
20
  ["Hey LLAMA! Any recommendations for my holidays in Abu Dhabi?"],
@@ -24,13 +22,13 @@ EXAMPLES = [
24
  ["Can you write a short tweet about the release of our latest AI model, LLAMA LLM?"]
25
  ]
26
 
27
- def format_prompt(message, history, system_prompt):
28
  prompt = ""
29
  if system_prompt:
30
  prompt += f"System: {system_prompt}\n"
31
  for user_prompt, bot_response in history:
32
  prompt += f"User: {user_prompt}\n"
33
- prompt += f"{BOT_NAME}: {bot_response}\n"
34
  prompt += f"""User: {message}\n{BOT_NAME}:"""
35
  return prompt
36
 
@@ -54,7 +52,6 @@ def generate(
54
  seed=seed,
55
  )
56
  seed = seed + 1
57
- formatted_prompt = format_prompt(prompt, history, system_prompt)
58
 
59
  client = InferenceClient()
60
  clientList = (client.list_deployed_models('text-generation-inference'))['text-generation']
@@ -65,6 +62,13 @@ def generate(
65
  print(f"Choosen model: {model}")
66
  break
67
 
 
 
 
 
 
 
 
68
  try:
69
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
70
  output = ""
 
12
  "timdettmers/guanaco-33b-merged",
13
  ]
14
 
15
+ STOP_SEQUENCES = ["\nUser:", " User:", "<|endoftext|>", "</s>"]
 
 
16
 
17
  EXAMPLES = [
18
  ["Hey LLAMA! Any recommendations for my holidays in Abu Dhabi?"],
 
22
  ["Can you write a short tweet about the release of our latest AI model, LLAMA LLM?"]
23
  ]
24
 
25
+ def format_prompt(message, history, system_prompt, bot_name):
26
  prompt = ""
27
  if system_prompt:
28
  prompt += f"System: {system_prompt}\n"
29
  for user_prompt, bot_response in history:
30
  prompt += f"User: {user_prompt}\n"
31
+ prompt += f"{bot_name}: {bot_response}\n"
32
  prompt += f"""User: {message}\n{BOT_NAME}:"""
33
  return prompt
34
 
 
52
  seed=seed,
53
  )
54
  seed = seed + 1
 
55
 
56
  client = InferenceClient()
57
  clientList = (client.list_deployed_models('text-generation-inference'))['text-generation']
 
62
  print(f"Choosen model: {model}")
63
  break
64
 
65
+ if model == model2api[0]:
66
+ bot_name = "Falcon"
67
+ else:
68
+ bot_name = "Assistant"
69
+
70
+ formatted_prompt = format_prompt(prompt, history, system_prompt, bot_name)
71
+
72
  try:
73
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
74
  output = ""