|
import gradio as gr |
|
from sentence_transformers import SentenceTransformer, util |
|
import openai |
|
import os |
|
|
|
os.environ["TOKENIZERS_PARALLELISM"] = "false" |
|
|
|
|
|
filename = "output_topic_details.txt" |
|
retrieval_model_name = 'output/sentence-transformer-finetuned/' |
|
|
|
|
|
|
|
system_message = "You are a comfort chatbot specialized in providing information on destressing activities." |
|
|
|
messages = [{"role": "system", "content": system_message}] |
|
messages.append({ |
|
"role": "system", |
|
"content": "Do not use Markdown Format. Do not include hashtags or asterisks" |
|
}) |
|
|
|
|
|
try: |
|
retrieval_model = SentenceTransformer(retrieval_model_name) |
|
print("Models loaded successfully.") |
|
except Exception as e: |
|
print(f"Failed to load models: {e}") |
|
|
|
def load_and_preprocess_text(filename): |
|
""" |
|
Load and preprocess text from a file, removing empty lines and stripping whitespace. |
|
""" |
|
try: |
|
with open(filename, 'r', encoding='utf-8') as file: |
|
segments = [line.strip() for line in file if line.strip()] |
|
print("Text loaded and preprocessed successfully.") |
|
return segments |
|
except Exception as e: |
|
print(f"Failed to load or preprocess text: {e}") |
|
return [] |
|
|
|
segments = load_and_preprocess_text(filename) |
|
|
|
def find_relevant_segment(user_query, segments): |
|
""" |
|
Find the most relevant text segment for a user's query using cosine similarity among sentence embeddings. |
|
This version finds the best match based on the content of the query. |
|
""" |
|
try: |
|
|
|
lower_query = user_query.lower() |
|
|
|
|
|
query_embedding = retrieval_model.encode(lower_query) |
|
segment_embeddings = retrieval_model.encode(segments) |
|
|
|
|
|
similarities = util.pytorch_cos_sim(query_embedding, segment_embeddings)[0] |
|
|
|
|
|
best_idx = similarities.argmax() |
|
|
|
|
|
return segments[best_idx] |
|
except Exception as e: |
|
print(f"Error in finding relevant segment: {e}") |
|
return "" |
|
|
|
def generate_response(user_query, relevant_segment): |
|
""" |
|
Generate a response emphasizing the bot's capability in providing therapy, destressing activites, and student opportunities information. |
|
""" |
|
try: |
|
user_message = f"Here's the information on your request: {relevant_segment}" |
|
|
|
|
|
messages.append({"role": "user", "content": user_message}) |
|
|
|
response = openai.ChatCompletion.create( |
|
model="gpt-4o", |
|
messages=messages, |
|
max_tokens=4000, |
|
temperature=0.5, |
|
top_p=1, |
|
frequency_penalty=0.5, |
|
presence_penalty=0.5, |
|
) |
|
|
|
|
|
output_text = response['choices'][0]['message']['content'].strip() |
|
|
|
|
|
messages.append({"role": "assistant", "content": output_text}) |
|
|
|
return output_text |
|
|
|
except Exception as e: |
|
print(f"Error in generating response: {e}") |
|
return f"Error in generating response: {e}" |
|
|
|
def query_model(question): |
|
""" |
|
Process a question, find relevant information, and generate a response. |
|
""" |
|
if question == "": |
|
return "Welcome to CalmConnect's CalmBot! Ask me anything about destressing strategies and we'll provide you ways to unlock your inner calm!" |
|
relevant_segment = find_relevant_segment(question, segments) |
|
if not relevant_segment: |
|
return "Could not find specific information. Please refine your question or head to our resources page." |
|
response = generate_response(question, relevant_segment) |
|
return response |
|
|
|
welcome_message = "" |
|
topics = "" |
|
|
|
theme = gr.themes.Default( |
|
primary_hue="neutral", |
|
secondary_hue="neutral", |
|
).set( |
|
background_fill_primary='#e3e9da', |
|
background_fill_primary_dark='#e3e9da', |
|
background_fill_secondary="#f8f1ea", |
|
background_fill_secondary_dark="#f8f1ea", |
|
border_color_accent="#f8f1ea", |
|
border_color_accent_dark="#e3e9da", |
|
border_color_accent_subdued="#f8f1ea", |
|
border_color_primary="#f8f1ea", |
|
block_border_color="#f8f1ea", |
|
button_primary_background_fill="#f8f1ea", |
|
button_primary_background_fill_dark="#f8f1ea" |
|
) |
|
|
|
|
|
with gr.Blocks(theme=theme) as demo: |
|
|
|
gr.Markdown(welcome_message) |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=0.8): |
|
gr.Markdown(topics) |
|
|
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
question = gr.Textbox(label="You", placeholder="What do you want to talk to CalmBot about?") |
|
answer = gr.Textbox(label="CalmBot's Response :D", placeholder="CalmBot will respond here..", interactive=False, lines=20) |
|
submit_button = gr.Button("Submit") |
|
submit_button.click(fn=query_model, inputs=question, outputs=answer) |
|
|
|
|
|
|
|
demo.launch() |
|
|
|
|
|
|
|
|
|
|
|
|
|
demo.launch(share=True) |