Spaces:
Sleeping
Sleeping
File size: 6,661 Bytes
80beabf f9946df 80beabf ffb9fa0 80beabf ea0606d f9946df ea0606d 80beabf ffb9fa0 80beabf ea0606d 80beabf ea0606d ffb9fa0 2288685 ea0606d 80beabf ffb9fa0 80beabf ffb9fa0 ea0606d 2194f61 ea0606d 2194f61 ffb9fa0 2194f61 ffb9fa0 ea0606d 80beabf ffb9fa0 ea0606d ffb9fa0 80beabf ffb9fa0 2194f61 ffb9fa0 80beabf ffb9fa0 2194f61 1526d48 80beabf 22d4f68 1577770 22d4f68 fb397d1 22d4f68 80beabf 22d4f68 fb397d1 22d4f68 4d07f34 ffb9fa0 22d4f68 1577770 ffb9fa0 1577770 ffb9fa0 1577770 ea0606d 1577770 ffb9fa0 1577770 80beabf 22d4f68 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
import gradio as gr
import openai
import os
import tiktoken
# Set your OpenAI API key
openai.api_key = os.getenv('OPENAI_API_KEY')
# Pricing constants
INPUT_COST_PER_TOKEN = 0.50 / 1_000_000
OUTPUT_COST_PER_TOKEN = 1.50 / 1_000_000
def print_like_dislike(x: gr.LikeData):
print(x.index, x.value, x.liked)
def add_text(history, text):
history.append((text, "**That's cool!**"))
return history
def add_file(history, file):
# Assuming you want to display the name of the uploaded file
file_info = (f"Uploaded file: {file.name}", "")
history.append(file_info)
return history
def num_tokens_from_messages(messages, model="gpt-3.5-turbo"):
encoding = tiktoken.encoding_for_model(model)
num_tokens = 0
for message in messages:
num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name": # if there's a name, the role is omitted
num_tokens += 1 # role is always required and always 1 token
num_tokens += 2 # every reply is primed with <im_start>assistant
return num_tokens
def initialize_chat():
# This function initializes the chat with an initial question.
initial_question = "I'm 14 years old female and want to become a graphic designer. I'm living in Uttar Pradesh in India. How can I start?"
chat_history = [(None, initial_question)]
response, follow_up_questions, input_tokens, output_tokens, cost = generate_response(initial_question)
chat_history.append((None, response))
return chat_history, follow_up_questions, input_tokens, output_tokens, cost
def generate_response(selected_question):
prompt = selected_question # Ensure selected_question is a string
messages = [
{"role": "system", "content": "You are a friendly and helpful chatbot."},
{"role": "user", "content": prompt}
]
try:
input_tokens = num_tokens_from_messages(messages, model="gpt-3.5-turbo")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=150,
temperature=0.7,
)
output_text = response.choices[0].message['content'].strip()
output_tokens = response.usage['completion_tokens']
follow_up_prompt = f"Based on the following response, suggest three follow-up questions: {output_text}"
follow_up_messages = [
{"role": "system", "content": "You are a friendly and helpful chatbot."},
{"role": "user", "content": follow_up_prompt}
]
follow_up_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=follow_up_messages,
max_tokens=100,
temperature=0.7,
)
follow_up_questions = follow_up_response.choices[0].message['content'].strip().split('\n')
topics_str = "Topic analysis not available"
# Calculate the total tokens used
total_input_tokens = input_tokens + num_tokens_from_messages(follow_up_messages, model="gpt-3.5-turbo")
total_output_tokens = output_tokens + follow_up_response.usage['completion_tokens']
# Calculate cost
input_cost = total_input_tokens * INPUT_COST_PER_TOKEN
output_cost = total_output_tokens * OUTPUT_COST_PER_TOKEN
total_cost = input_cost + output_cost
# Adjusted to return the response and follow-up questions
new_response = output_text + "\n\nTopics: " + topics_str
except Exception as e:
new_response = f"Error generating response: {e}"
follow_up_questions = []
total_input_tokens = 0
total_output_tokens = 0
total_cost = 0.0
return new_response, follow_up_questions, total_input_tokens, total_output_tokens, total_cost
def update_suggested_questions(follow_up_questions):
return gr.Markdown.update(value="\n".join(f"* {q}" for q in follow_up_questions))
# CSS for the phone layout and background
css = """
#chat-container {
max-width: 400px;
margin: auto;
border: 1px solid #ccc;
border-radius: 20px;
overflow: hidden;
background: url('https://path-to-your-phone-background-image.png') no-repeat center center;
background-size: cover;
height: 700px;
padding: 20px;
box-sizing: border-box;
}
#chatbot {
height: calc(100% - 50px);
overflow-y: auto;
background: transparent;
}
"""
# Initialize the chat history and suggested questions
chat_history, initial_suggested_questions, initial_input_tokens, initial_output_tokens, initial_cost = initialize_chat()
with gr.Blocks(css=css) as demo:
with gr.Row():
with gr.Column(scale=1):
gr.Markdown(
"""
# Child safe chatbot project!
In the realm of digital communication, the development of an advanced chatbot that incorporates topic modeling represents a significant leap towards enhancing user interaction and maintaining focus during conversations. This innovative chatbot design is specifically engineered to streamline discussions by guiding users to select from a curated list of suggested questions. This approach is crafted to mitigate the risk of diverging into off-topic dialogues, which are common pitfalls in conventional chatbot systems.
"""
)
suggested_questions = gr.Markdown(
value="### Suggested Questions:\n\n" + "\n".join(f"* {q}" for q in initial_suggested_questions)
)
token_info = gr.Markdown(
value=f"### Token Usage:\n\n* Input Tokens: {initial_input_tokens}\n* Output Tokens: {initial_output_tokens}\n* Total Cost: ${initial_cost:.4f}"
)
with gr.Column(scale=1, elem_id="chat-container"):
chatbot = gr.Chatbot(
value=chat_history,
elem_id="chatbot",
bubble_full_width=False,
label="Safe Chatbot v1",
avatar_images=(None, os.path.join(os.getcwd(), "avatar.png"))
)
with gr.Row():
txt = gr.Textbox(scale=4, show_label=False, placeholder="Select Question", container=False, interactive=False) # Adjust based on need
btn = gr.Button("Submit")
btn.click(fn=generate_response, inputs=[txt], outputs=[chatbot, suggested_questions, token_info])
chatbot.like(print_like_dislike, None, None)
if __name__ == "__main__":
demo.launch(share=True) |