research14 commited on
Commit
d6deec7
·
1 Parent(s): e0d127e

update code format

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -15,6 +15,9 @@ vicuna_model = AutoModelForCausalLM.from_pretrained("lmsys/vicuna-7b-v1.3")
15
  llama_tokenizer = AutoTokenizer.from_pretrained("daryl149/llama-2-7b-chat-hf")
16
  llama_model = AutoModelForCausalLM.from_pretrained("daryl149/llama-2-7b-chat-hf")
17
 
 
 
 
18
  def linguistic_features_fn(message):
19
  # Load a trained spaCy pipeline
20
  nlp = spacy.load("en_core_web_sm")
@@ -37,14 +40,11 @@ def linguistic_features_fn(message):
37
 
38
  return formatted_output
39
 
40
- os.environ['OPENAI_API_KEY']
41
- openai.api_key = os.environ['OPENAI_API_KEY']
42
-
43
  def chat(user_prompt, model = 'gpt-3.5-turbo', temperature = 0, verbose = False):
44
  ''' Normal call of OpenAI API '''
45
  response = openai.ChatCompletion.create(
46
  temperature = temperature,
47
- model=model,
48
  messages=[
49
  {"role": "user", "content": user_prompt}
50
  ])
 
15
  llama_tokenizer = AutoTokenizer.from_pretrained("daryl149/llama-2-7b-chat-hf")
16
  llama_model = AutoModelForCausalLM.from_pretrained("daryl149/llama-2-7b-chat-hf")
17
 
18
+ os.environ['OPENAI_API_KEY']
19
+ openai.api_key = os.environ['OPENAI_API_KEY']
20
+
21
  def linguistic_features_fn(message):
22
  # Load a trained spaCy pipeline
23
  nlp = spacy.load("en_core_web_sm")
 
40
 
41
  return formatted_output
42
 
 
 
 
43
  def chat(user_prompt, model = 'gpt-3.5-turbo', temperature = 0, verbose = False):
44
  ''' Normal call of OpenAI API '''
45
  response = openai.ChatCompletion.create(
46
  temperature = temperature,
47
+ model = model,
48
  messages=[
49
  {"role": "user", "content": user_prompt}
50
  ])