peterkros commited on
Commit
1866c12
·
verified ·
1 Parent(s): ffb9fa0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -35,7 +35,7 @@ def generate_response(selected_question):
35
 
36
  try:
37
  # Calculate input tokens
38
- input_tokens = len(openai.Completion.create(engine="text-davinci-003", prompt=prompt, max_tokens=1).usage['total_tokens']) - 1
39
 
40
  response = openai.ChatCompletion.create(
41
  model="gpt-3.5-turbo",
@@ -62,7 +62,7 @@ def generate_response(selected_question):
62
  )
63
 
64
  follow_up_questions = follow_up_response.choices[0].message['content'].strip().split('\n')
65
- topics_str = "Topic analysis not available"
66
 
67
  # Calculate the total tokens used
68
  total_input_tokens = input_tokens + response.usage['prompt_tokens']
@@ -132,7 +132,6 @@ with gr.Blocks(css=css) as demo:
132
  value=chat_history,
133
  elem_id="chatbot",
134
  bubble_full_width=False,
135
- label="Safe Chatbot v1",
136
  avatar_images=(None, os.path.join(os.getcwd(), "avatar.png"))
137
  )
138
 
 
35
 
36
  try:
37
  # Calculate input tokens
38
+ input_tokens = len(openai.Completion.create(engine="gpt-3.5-turbo", prompt=prompt, max_tokens=1).usage['total_tokens']) - 1
39
 
40
  response = openai.ChatCompletion.create(
41
  model="gpt-3.5-turbo",
 
62
  )
63
 
64
  follow_up_questions = follow_up_response.choices[0].message['content'].strip().split('\n')
65
+
66
 
67
  # Calculate the total tokens used
68
  total_input_tokens = input_tokens + response.usage['prompt_tokens']
 
132
  value=chat_history,
133
  elem_id="chatbot",
134
  bubble_full_width=False,
 
135
  avatar_images=(None, os.path.join(os.getcwd(), "avatar.png"))
136
  )
137