research14 commited on
Commit
d067fae
·
1 Parent(s): cc4df4b

checkpoint - chatbots work, but weird

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -5,8 +5,8 @@ import os
5
  import openai
6
 
7
  # Load the Vicuna 7B model and tokenizer
8
- vicuna_tokenizer = AutoTokenizer.from_pretrained("lmsys/vicuna-7b-v1.5")
9
- vicuna_model = AutoModelForCausalLM.from_pretrained("lmsys/vicuna-7b-v1.5")
10
 
11
  # Load the LLaMA 7b model and tokenizer
12
  llama_tokenizer = AutoTokenizer.from_pretrained("luodian/llama-7b-hf")
@@ -50,13 +50,13 @@ def gpt_respond(tab_name, message, chat_history, max_convo_length = 10):
50
  formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
51
  print('Prompt + Context:')
52
  print(formatted_prompt)
53
- bot_message = chat(system_prompt = f'''Generate the output only for the assistant. Output any <{tab_name}> in the following sentence one per line without any additional text.''',
54
  user_prompt = formatted_prompt)
55
  chat_history.append((message, bot_message))
56
  return "", chat_history
57
 
58
  def vicuna_respond(tab_name, message, chat_history):
59
- formatted_prompt = f'''Output any {tab_name} in the following sentence one per line without any additional text: {message}'''
60
  print('Prompt + Context:')
61
  print(formatted_prompt)
62
  input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
@@ -69,7 +69,7 @@ def vicuna_respond(tab_name, message, chat_history):
69
  return tab_name, "", chat_history
70
 
71
  def llama_respond(tab_name, message, chat_history):
72
- formatted_prompt = f'''Output any {tab_name} in the following sentence one per line without any additional text: {message}'''
73
  print('Prompt + Context:')
74
  print(formatted_prompt)
75
  input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
 
5
  import openai
6
 
7
  # Load the Vicuna 7B model and tokenizer
8
+ vicuna_tokenizer = AutoTokenizer.from_pretrained("lmsys/vicuna-7b-v1.3")
9
+ vicuna_model = AutoModelForCausalLM.from_pretrained("lmsys/vicuna-7b-v1.3")
10
 
11
  # Load the LLaMA 7b model and tokenizer
12
  llama_tokenizer = AutoTokenizer.from_pretrained("luodian/llama-7b-hf")
 
50
  formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
51
  print('Prompt + Context:')
52
  print(formatted_prompt)
53
+ bot_message = chat(system_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text.''',
54
  user_prompt = formatted_prompt)
55
  chat_history.append((message, bot_message))
56
  return "", chat_history
57
 
58
  def vicuna_respond(tab_name, message, chat_history):
59
+ formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
60
  print('Prompt + Context:')
61
  print(formatted_prompt)
62
  input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
 
69
  return tab_name, "", chat_history
70
 
71
  def llama_respond(tab_name, message, chat_history):
72
+ formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
73
  print('Prompt + Context:')
74
  print(formatted_prompt)
75
  input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")