Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,10 @@ import os
|
|
5 |
# Set your OpenAI API key
|
6 |
openai.api_key = os.getenv('OPENAI_API_KEY')
|
7 |
|
|
|
|
|
|
|
|
|
8 |
def print_like_dislike(x: gr.LikeData):
|
9 |
print(x.index, x.value, x.liked)
|
10 |
|
@@ -19,16 +23,20 @@ def add_file(history, file):
|
|
19 |
return history
|
20 |
|
21 |
def initialize_chat():
|
22 |
-
# This function initializes the chat with
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
|
|
26 |
|
27 |
def generate_response(selected_question):
|
28 |
-
global chat_history
|
29 |
prompt = selected_question # Ensure selected_question is a string
|
30 |
|
31 |
try:
|
|
|
|
|
|
|
32 |
response = openai.ChatCompletion.create(
|
33 |
model="gpt-3.5-turbo",
|
34 |
messages=[
|
@@ -37,9 +45,12 @@ def generate_response(selected_question):
|
|
37 |
],
|
38 |
max_tokens=150,
|
39 |
temperature=0.7,
|
40 |
-
)
|
41 |
|
42 |
-
|
|
|
|
|
|
|
43 |
follow_up_response = openai.ChatCompletion.create(
|
44 |
model="gpt-3.5-turbo",
|
45 |
messages=[
|
@@ -48,20 +59,30 @@ def generate_response(selected_question):
|
|
48 |
],
|
49 |
max_tokens=100,
|
50 |
temperature=0.7,
|
51 |
-
)
|
52 |
|
53 |
-
follow_up_questions = follow_up_response.split('\n')
|
54 |
topics_str = "Topic analysis not available"
|
55 |
|
56 |
-
#
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
except Exception as e:
|
60 |
-
new_response =
|
61 |
-
chat_history.append(new_response)
|
62 |
follow_up_questions = []
|
|
|
|
|
|
|
63 |
|
64 |
-
return
|
65 |
|
66 |
def update_suggested_questions(follow_up_questions):
|
67 |
return gr.Markdown.update(value="\n".join(f"* {q}" for q in follow_up_questions))
|
@@ -88,6 +109,9 @@ css = """
|
|
88 |
}
|
89 |
"""
|
90 |
|
|
|
|
|
|
|
91 |
with gr.Blocks(css=css) as demo:
|
92 |
with gr.Row():
|
93 |
with gr.Column(scale=1):
|
@@ -97,10 +121,15 @@ with gr.Blocks(css=css) as demo:
|
|
97 |
In the realm of digital communication, the development of an advanced chatbot that incorporates topic modeling represents a significant leap towards enhancing user interaction and maintaining focus during conversations. This innovative chatbot design is specifically engineered to streamline discussions by guiding users to select from a curated list of suggested questions. This approach is crafted to mitigate the risk of diverging into off-topic dialogues, which are common pitfalls in conventional chatbot systems.
|
98 |
"""
|
99 |
)
|
100 |
-
suggested_questions = gr.Markdown(
|
|
|
|
|
|
|
|
|
|
|
101 |
with gr.Column(scale=1, elem_id="chat-container"):
|
102 |
chatbot = gr.Chatbot(
|
103 |
-
|
104 |
elem_id="chatbot",
|
105 |
bubble_full_width=False,
|
106 |
label="Safe Chatbot v1",
|
@@ -111,7 +140,7 @@ with gr.Blocks(css=css) as demo:
|
|
111 |
txt = gr.Textbox(scale=4, show_label=False, placeholder="Select Question", container=False, interactive=False) # Adjust based on need
|
112 |
btn = gr.Button("Submit")
|
113 |
|
114 |
-
btn.click(fn=generate_response, inputs=[txt], outputs=[chatbot, suggested_questions])
|
115 |
|
116 |
chatbot.like(print_like_dislike, None, None)
|
117 |
|
|
|
5 |
# Set your OpenAI API key
|
6 |
openai.api_key = os.getenv('OPENAI_API_KEY')
|
7 |
|
8 |
+
# Pricing constants
|
9 |
+
INPUT_COST_PER_TOKEN = 0.50 / 1_000_000
|
10 |
+
OUTPUT_COST_PER_TOKEN = 1.50 / 1_000_000
|
11 |
+
|
12 |
def print_like_dislike(x: gr.LikeData):
|
13 |
print(x.index, x.value, x.liked)
|
14 |
|
|
|
23 |
return history
|
24 |
|
25 |
def initialize_chat():
|
26 |
+
# This function initializes the chat with an initial question.
|
27 |
+
initial_question = "I'm 14 years old female and want to become a graphic designer. I'm living in Uttar Pradesh in India. How can I start?"
|
28 |
+
chat_history = [(None, initial_question)]
|
29 |
+
response, follow_up_questions, input_tokens, output_tokens, cost = generate_response(initial_question)
|
30 |
+
chat_history.append((None, response))
|
31 |
+
return chat_history, follow_up_questions, input_tokens, output_tokens, cost
|
32 |
|
33 |
def generate_response(selected_question):
|
|
|
34 |
prompt = selected_question # Ensure selected_question is a string
|
35 |
|
36 |
try:
|
37 |
+
# Calculate input tokens
|
38 |
+
input_tokens = len(openai.Completion.create(engine="text-davinci-003", prompt=prompt, max_tokens=1).usage['total_tokens']) - 1
|
39 |
+
|
40 |
response = openai.ChatCompletion.create(
|
41 |
model="gpt-3.5-turbo",
|
42 |
messages=[
|
|
|
45 |
],
|
46 |
max_tokens=150,
|
47 |
temperature=0.7,
|
48 |
+
)
|
49 |
|
50 |
+
output_text = response.choices[0].message['content'].strip()
|
51 |
+
output_tokens = response.usage['completion_tokens']
|
52 |
+
|
53 |
+
follow_up_prompt = f"Based on the following response, suggest three follow-up questions: {output_text}"
|
54 |
follow_up_response = openai.ChatCompletion.create(
|
55 |
model="gpt-3.5-turbo",
|
56 |
messages=[
|
|
|
59 |
],
|
60 |
max_tokens=100,
|
61 |
temperature=0.7,
|
62 |
+
)
|
63 |
|
64 |
+
follow_up_questions = follow_up_response.choices[0].message['content'].strip().split('\n')
|
65 |
topics_str = "Topic analysis not available"
|
66 |
|
67 |
+
# Calculate the total tokens used
|
68 |
+
total_input_tokens = input_tokens + response.usage['prompt_tokens']
|
69 |
+
total_output_tokens = output_tokens + follow_up_response.usage['completion_tokens']
|
70 |
+
|
71 |
+
# Calculate cost
|
72 |
+
input_cost = total_input_tokens * INPUT_COST_PER_TOKEN
|
73 |
+
output_cost = total_output_tokens * OUTPUT_COST_PER_TOKEN
|
74 |
+
total_cost = input_cost + output_cost
|
75 |
+
|
76 |
+
# Adjusted to return the response and follow-up questions
|
77 |
+
new_response = output_text + "\n\nTopics: " + topics_str
|
78 |
except Exception as e:
|
79 |
+
new_response = f"Error generating response: {e}"
|
|
|
80 |
follow_up_questions = []
|
81 |
+
total_input_tokens = 0
|
82 |
+
total_output_tokens = 0
|
83 |
+
total_cost = 0.0
|
84 |
|
85 |
+
return new_response, follow_up_questions, total_input_tokens, total_output_tokens, total_cost
|
86 |
|
87 |
def update_suggested_questions(follow_up_questions):
|
88 |
return gr.Markdown.update(value="\n".join(f"* {q}" for q in follow_up_questions))
|
|
|
109 |
}
|
110 |
"""
|
111 |
|
112 |
+
# Initialize the chat history and suggested questions
|
113 |
+
chat_history, initial_suggested_questions, initial_input_tokens, initial_output_tokens, initial_cost = initialize_chat()
|
114 |
+
|
115 |
with gr.Blocks(css=css) as demo:
|
116 |
with gr.Row():
|
117 |
with gr.Column(scale=1):
|
|
|
121 |
In the realm of digital communication, the development of an advanced chatbot that incorporates topic modeling represents a significant leap towards enhancing user interaction and maintaining focus during conversations. This innovative chatbot design is specifically engineered to streamline discussions by guiding users to select from a curated list of suggested questions. This approach is crafted to mitigate the risk of diverging into off-topic dialogues, which are common pitfalls in conventional chatbot systems.
|
122 |
"""
|
123 |
)
|
124 |
+
suggested_questions = gr.Markdown(
|
125 |
+
value="### Suggested Questions:\n\n" + "\n".join(f"* {q}" for q in initial_suggested_questions)
|
126 |
+
)
|
127 |
+
token_info = gr.Markdown(
|
128 |
+
value=f"### Token Usage:\n\n* Input Tokens: {initial_input_tokens}\n* Output Tokens: {initial_output_tokens}\n* Total Cost: ${initial_cost:.4f}"
|
129 |
+
)
|
130 |
with gr.Column(scale=1, elem_id="chat-container"):
|
131 |
chatbot = gr.Chatbot(
|
132 |
+
value=chat_history,
|
133 |
elem_id="chatbot",
|
134 |
bubble_full_width=False,
|
135 |
label="Safe Chatbot v1",
|
|
|
140 |
txt = gr.Textbox(scale=4, show_label=False, placeholder="Select Question", container=False, interactive=False) # Adjust based on need
|
141 |
btn = gr.Button("Submit")
|
142 |
|
143 |
+
btn.click(fn=generate_response, inputs=[txt], outputs=[chatbot, suggested_questions, token_info])
|
144 |
|
145 |
chatbot.like(print_like_dislike, None, None)
|
146 |
|