futranbg commited on
Commit
e1eb2b8
1 Parent(s): b525961

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -7,17 +7,17 @@ import gradio as gr
7
  from huggingface_hub import Repository, InferenceClient
8
 
9
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
10
- API_URL = "https://api-inference.huggingface.co/models/tiiuae/falcon-180B-chat"
11
- BOT_NAME = "Falcon"
12
 
13
- STOP_SEQUENCES = ["\nUser:", "<|endoftext|>", " User:", "###"]
14
 
15
  EXAMPLES = [
16
- ["Hey Falcon! Any recommendations for my holidays in Abu Dhabi?"],
17
  ["What's the Everett interpretation of quantum mechanics?"],
18
  ["Give me a list of the top 10 dive sites you would recommend around the world."],
19
  ["Can you tell me more about deep-water soloing?"],
20
- ["Can you write a short tweet about the release of our latest AI model, Falcon LLM?"]
21
  ]
22
 
23
  client = InferenceClient(
@@ -31,7 +31,7 @@ def format_prompt(message, history, system_prompt):
31
  prompt += f"System: {system_prompt}\n"
32
  for user_prompt, bot_response in history:
33
  prompt += f"User: {user_prompt}\n"
34
- prompt += f"Falcon: {bot_response}\n" # Response already contains "Falcon: "
35
  prompt += f"""User: {message}
36
  Falcon:"""
37
  return prompt
 
7
  from huggingface_hub import Repository, InferenceClient
8
 
9
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
10
+ API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-2-70b-chat-hf"
11
+ BOT_NAME = "LLAMA"
12
 
13
+ STOP_SEQUENCES = ["\nUser:", "<|endoftext|>", " User:", "###", "</s>"]
14
 
15
  EXAMPLES = [
16
+ ["Hey LLAMA! Any recommendations for my holidays in Abu Dhabi?"],
17
  ["What's the Everett interpretation of quantum mechanics?"],
18
  ["Give me a list of the top 10 dive sites you would recommend around the world."],
19
  ["Can you tell me more about deep-water soloing?"],
20
+ ["Can you write a short tweet about the release of our latest AI model, LLAMA LLM?"]
21
  ]
22
 
23
  client = InferenceClient(
 
31
  prompt += f"System: {system_prompt}\n"
32
  for user_prompt, bot_response in history:
33
  prompt += f"User: {user_prompt}\n"
34
+ prompt += f"LLAMA: {bot_response}\n" # Response already contains "Falcon: "
35
  prompt += f"""User: {message}
36
  Falcon:"""
37
  return prompt