File size: 5,818 Bytes
1cc6224 6088410 1cc6224 540e3e4 ff9d83f 1cc6224 6088410 1cc6224 6088410 ff9d83f 1cc6224 ff9d83f 1cc6224 b1f112f 1cc6224 2ed90b3 1cc6224 b7fae0d 1cc6224 0111bf5 c826996 18c58f0 0111bf5 c0e5f5a 00bfae0 eeb0302 c0e5f5a 1cc6224 7fb230c 1cc6224 d8f7c7b 18c58f0 1cc6224 18c58f0 7f73896 1cc6224 f855620 1cc6224 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
import gradio as gr
from sentence_transformers import SentenceTransformer, util
import openai
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# Initialize paths and model identifiers for easy configuration and maintenance
filename = "output_topic_details.txt" # Path to the file storing literature-specific details
retrieval_model_name = 'output/sentence-transformer-finetuned/'
openai.api_key = os.environ["OPENAI_API_KEY"]
system_message = "You are a literature chatbot specialized in providing information on the context behind classic literature, the themes in specific classic books, and encouraging users to further explore literature"
# Initial system message to set the behavior of the assistant
messages = [{"role": "system", "content": system_message}]
# Attempt to load the necessary models and provide feedback on success or failure
try:
retrieval_model = SentenceTransformer(retrieval_model_name)
print("Models loaded successfully.")
except Exception as e:
print(f"Failed to load models: {e}")
def load_and_preprocess_text(filename):
"""
Load and preprocess text from a file, removing empty lines and stripping whitespace.
"""
try:
with open(filename, 'r', encoding='utf-8') as file:
segments = [line.strip() for line in file if line.strip()]
print("Text loaded and preprocessed successfully.")
return segments
except Exception as e:
print(f"Failed to load or preprocess text: {e}")
return []
segments = load_and_preprocess_text(filename)
def find_relevant_segment(user_query, segments):
"""
Find the most relevant text segment for a user's query using cosine similarity among sentence embeddings.
This version finds the best match based on the content of the query.
"""
try:
# Lowercase the query for better matching
lower_query = user_query.lower()
# Encode the query and the segments
query_embedding = retrieval_model.encode(lower_query)
segment_embeddings = retrieval_model.encode(segments)
# Compute cosine similarities between the query and the segments
similarities = util.pytorch_cos_sim(query_embedding, segment_embeddings)[0]
# Find the index of the most similar segment
best_idx = similarities.argmax()
# Return the most relevant segment
return segments[best_idx]
except Exception as e:
print(f"Error in finding relevant segment: {e}")
return ""
def generate_response(user_query, relevant_segment):
"""
Generate a response emphasizing the bot's capability in providing literature information.
"""
try:
user_message = f"Here's the information on your book: {relevant_segment}"
# Append user's message to messages list
messages.append({"role": "user", "content": user_message})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=150,
temperature=0.2,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Extract the response text
output_text = response['choices'][0]['message']['content'].strip()
# Append assistant's message to messages list for context
messages.append({"role": "assistant", "content": output_text})
return output_text
except Exception as e:
print(f"Error in generating response: {e}")
return f"Error in generating response: {e}"
def query_model(question):
"""
Process a question, find relevant information, and generate a response.
"""
if question == "":
return "Welcome to LitBot! Ask me anything about literature, book themes, and the historical context behind your book."
relevant_segment = find_relevant_segment(question, segments)
if not relevant_segment:
return "Could not find specific information. Please refine your question."
response = generate_response(question, relevant_segment)
return response
# Define the welcome message and specific topics the chatbot can provide information about
welcome_message = """
# 📖 Welcome to LitBot!
## An AI-driven assistant for all literature-related queries, LitBot is your new trusted reading guide! Created by Katie, Madeline, and Tiffany of the 2024 Kode With Klossy Los Angeles Camp.
"""
topics = """
### Feel free to ask anything from the topics below!
- Themes
- Historical Context
- Symbolism
- Potential Reading Challenges
- Controversies
- Book Background Information
"""
books = """
### You can ask about any of these books:
- The Great Gatsby
- The Crucible
- Fahrenheit 451
- Of Mice and Men
- To Kill a Mockingbird
- Romeo and Juliet
"""
books2 = """
###
- The Catcher in the Rye
- Pride and Prejudice
- Lord of the Flies
- Hamlet
"""
# Setup the Gradio Blocks interface with custom layout components
with gr.Blocks(theme='JohnSmith9982/small_and_pretty') as demo:
gr.Markdown(welcome_message) # Display the formatted welcome message
with gr.Row():
# with gr.Column():
gr.Markdown(topics) # Show the topics on the left side
gr.Markdown(books)
gr.Markdown(books2)
with gr.Row():
with gr.Column():
question = gr.Textbox(label="Your question", placeholder="What do you want to ask about?")
answer = gr.Textbox(label="LitBot Response", placeholder="LitBot will respond here...", interactive=False, lines=20)
submit_button = gr.Button("Submit")
submit_button.click(fn=query_model, inputs=question, outputs=answer)
# Launch the Gradio app to allow user interaction
demo.launch(share=True)
|