peterkros commited on
Commit
ea0606d
·
verified ·
1 Parent(s): 1866c12

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -12
app.py CHANGED
@@ -22,6 +22,18 @@ def add_file(history, file):
22
  history.append(file_info)
23
  return history
24
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  def initialize_chat():
26
  # This function initializes the chat with an initial question.
27
  initial_question = "I'm 14 years old female and want to become a graphic designer. I'm living in Uttar Pradesh in India. How can I start?"
@@ -32,17 +44,17 @@ def initialize_chat():
32
 
33
  def generate_response(selected_question):
34
  prompt = selected_question # Ensure selected_question is a string
 
 
 
 
35
 
36
  try:
37
- # Calculate input tokens
38
- input_tokens = len(openai.Completion.create(engine="gpt-3.5-turbo", prompt=prompt, max_tokens=1).usage['total_tokens']) - 1
39
 
40
  response = openai.ChatCompletion.create(
41
  model="gpt-3.5-turbo",
42
- messages=[
43
- {"role": "system", "content": "You are a friendly and helpful chatbot."},
44
- {"role": "user", "content": prompt}
45
- ],
46
  max_tokens=150,
47
  temperature=0.7,
48
  )
@@ -51,21 +63,22 @@ def generate_response(selected_question):
51
  output_tokens = response.usage['completion_tokens']
52
 
53
  follow_up_prompt = f"Based on the following response, suggest three follow-up questions: {output_text}"
 
 
 
 
54
  follow_up_response = openai.ChatCompletion.create(
55
  model="gpt-3.5-turbo",
56
- messages=[
57
- {"role": "system", "content": "You are a friendly and helpful chatbot."},
58
- {"role": "user", "content": follow_up_prompt}
59
- ],
60
  max_tokens=100,
61
  temperature=0.7,
62
  )
63
 
64
  follow_up_questions = follow_up_response.choices[0].message['content'].strip().split('\n')
65
-
66
 
67
  # Calculate the total tokens used
68
- total_input_tokens = input_tokens + response.usage['prompt_tokens']
69
  total_output_tokens = output_tokens + follow_up_response.usage['completion_tokens']
70
 
71
  # Calculate cost
@@ -132,6 +145,7 @@ with gr.Blocks(css=css) as demo:
132
  value=chat_history,
133
  elem_id="chatbot",
134
  bubble_full_width=False,
 
135
  avatar_images=(None, os.path.join(os.getcwd(), "avatar.png"))
136
  )
137
 
 
22
  history.append(file_info)
23
  return history
24
 
25
+ def num_tokens_from_messages(messages, model="gpt-3.5-turbo"):
26
+ encoding = openai.Encoding.from_model(model)
27
+ num_tokens = 0
28
+ for message in messages:
29
+ num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
30
+ for key, value in message.items():
31
+ num_tokens += len(encoding.encode(value))
32
+ if key == "name": # if there's a name, the role is omitted
33
+ num_tokens += 1 # role is always required and always 1 token
34
+ num_tokens += 2 # every reply is primed with <im_start>assistant
35
+ return num_tokens
36
+
37
  def initialize_chat():
38
  # This function initializes the chat with an initial question.
39
  initial_question = "I'm 14 years old female and want to become a graphic designer. I'm living in Uttar Pradesh in India. How can I start?"
 
44
 
45
  def generate_response(selected_question):
46
  prompt = selected_question # Ensure selected_question is a string
47
+ messages = [
48
+ {"role": "system", "content": "You are a friendly and helpful chatbot."},
49
+ {"role": "user", "content": prompt}
50
+ ]
51
 
52
  try:
53
+ input_tokens = num_tokens_from_messages(messages, model="gpt-3.5-turbo")
 
54
 
55
  response = openai.ChatCompletion.create(
56
  model="gpt-3.5-turbo",
57
+ messages=messages,
 
 
 
58
  max_tokens=150,
59
  temperature=0.7,
60
  )
 
63
  output_tokens = response.usage['completion_tokens']
64
 
65
  follow_up_prompt = f"Based on the following response, suggest three follow-up questions: {output_text}"
66
+ follow_up_messages = [
67
+ {"role": "system", "content": "You are a friendly and helpful chatbot."},
68
+ {"role": "user", "content": follow_up_prompt}
69
+ ]
70
  follow_up_response = openai.ChatCompletion.create(
71
  model="gpt-3.5-turbo",
72
+ messages=follow_up_messages,
 
 
 
73
  max_tokens=100,
74
  temperature=0.7,
75
  )
76
 
77
  follow_up_questions = follow_up_response.choices[0].message['content'].strip().split('\n')
78
+ topics_str = "Topic analysis not available"
79
 
80
  # Calculate the total tokens used
81
+ total_input_tokens = input_tokens + num_tokens_from_messages(follow_up_messages, model="gpt-3.5-turbo")
82
  total_output_tokens = output_tokens + follow_up_response.usage['completion_tokens']
83
 
84
  # Calculate cost
 
145
  value=chat_history,
146
  elem_id="chatbot",
147
  bubble_full_width=False,
148
+ label="Safe Chatbot v1",
149
  avatar_images=(None, os.path.join(os.getcwd(), "avatar.png"))
150
  )
151