File size: 10,323 Bytes
1cc6224 b8e6954 1cc6224 b8e6954 f771594 b8e6954 1cc6224 b8e6954 46bf5eb b8e6954 f771594 ff9d83f b8e6954 f771594 b8e6954 f771594 1cc6224 f771594 1cc6224 f771594 b8e6954 f771594 b8e6954 f771594 b8e6954 f771594 b8e6954 f771594 b8e6954 b1ef128 b8e6954 1cc6224 f771594 1cc6224 f771594 b8e6954 1cc6224 b8e6954 f771594 1cc6224 b994ffa f771594 1cc6224 141a5fb 7d198a1 c990a6c 7d198a1 1cc6224 b8e6954 f771594 1cc6224 f771594 1cc6224 f771594 1cc6224 9960694 d746470 1cc6224 b8e6954 f771594 b1ef128 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 |
import gradio as gr
from sentence_transformers import SentenceTransformer, util
import openai
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
openai.api_key = os.environ["OPENAI_API_KEY"]
# Initialize paths and model identifiers for easy configuration and maintenance
filename = "output_topic_details.txt" # Path to the file storing restaurant-specific details
retrieval_model_name = 'output/sentence-transformer-finetuned/'
# Initialize the system message for the chatbot
system_message = "You are a restaurant recommending chatbot that suggests one restaurant in Seattle from the restaurant database based on the criteria the user provides."
# Initial system message to set the behavior of the assistant
messages = [{"role": "system", "content": system_message}]
# Load the SentenceTransformer model
try:
retrieval_model = SentenceTransformer(retrieval_model_name)
print("Models loaded successfully.")
except Exception as e:
print(f"Failed to load models: {e}")
def load_and_preprocess_text(filename):
"""
Load and preprocess text from a file, removing empty lines and stripping whitespace.
"""
try:
with open(filename, 'r', encoding='utf-8') as file:
segments = [line.strip() for line in file if line.strip()]
print("Text loaded and preprocessed successfully.")
return segments
except Exception as e:
print(f"Failed to load or preprocess text: {e}")
return []
segments = load_and_preprocess_text(filename)
def find_relevant_segment(user_query, segments):
"""
Find the most relevant text segment for a user's query using cosine similarity among sentence embeddings.
This version finds the best match based on the content of the query.
"""
try:
lower_query = user_query.lower()
query_embedding = retrieval_model.encode(lower_query)
segment_embeddings = retrieval_model.encode(segments)
similarities = util.pytorch_cos_sim(query_embedding, segment_embeddings)[0]
best_idx = similarities.argmax()
return segments[best_idx]
except Exception as e:
print(f"Error in finding relevant segment: {e}")
return ""
def generate_response(user_query, relevant_segment):
"""
Generate a response emphasizing the bot's capability in suggesting a restaurant.
"""
try:
user_message = f"Here is a local restaurant based on your information: {relevant_segment}"
messages.append({"role": "user", "content": user_message})
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
max_tokens=150,
temperature=0.2,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
output_text = response['choices'][0]['message']['content'].strip()
messages.append({"role": "assistant", "content": output_text})
return output_text
except Exception as e:
print(f"Error in generating response: {e}")
return f"Error in generating response: {e}"
# Define a sample list of restaurants (replace this with your actual data source)
restaurants = [
{
"name": "Saffron Grill",
"cuisine": "Middle Eastern",
"price": "Moderate",
"gluten_free": True,
"vegan": False,
"lactose_intolerant": True,
"pescatarian": True,
"allergen_friendly": False,
"halal": True,
"kosher": False,
"vegetarian": True,
"website": "https://www.saffrongrill.com"
},
{
"name": "Tasty Thai",
"cuisine": "Thai",
"price": "Low",
"gluten_free": False,
"vegan": True,
"lactose_intolerant": True,
"pescatarian": True,
"allergen_friendly": True,
"halal": False,
"kosher": False,
"vegetarian": True,
"website": "https://www.tastythai.com"
},
# Add more restaurant entries as needed
]
def find_restaurants(criteria):
"""
Finds restaurants based on the given criteria.
Parameters:
criteria (dict): Dictionary containing filtering criteria.
Returns:
List of restaurants that match the criteria.
"""
matching_restaurants = []
for restaurant in restaurants:
match = True
for key, value in criteria.items():
if key in restaurant:
if isinstance(restaurant[key], bool):
if restaurant[key] != value:
match = False
break
elif restaurant[key].lower() != value.lower():
match = False
break
if match:
matching_restaurants.append(restaurant)
return matching_restaurants
def generate_recommendation(criteria):
"""
Generates a recommendation based on the criteria.
Parameters:
criteria (dict): Dictionary containing filtering criteria.
Returns:
String with the recommendation or a message if no matches are found.
"""
results = find_restaurants(criteria)
if results:
recommendations = []
for result in results:
recommendation = (
f"Based on your criteria, I recommend {result['name']}. "
f"It's a {result['price'].lower()} priced {result['cuisine'].lower()} restaurant with "
f"{'gluten-free options' if result['gluten_free'] else 'no gluten-free options'}, "
f"{'vegan options' if result['vegan'] else 'no vegan options'}, "
f"{'lactose-intolerant options' if result['lactose_intolerant'] else 'no lactose-intolerant options'}, "
f"{'pescatarian options' if result['pescatarian'] else 'no pescatarian options'}, "
f"{'allergen-friendly options' if result['allergen_friendly'] else 'no allergen-friendly options'}, "
f"{'halal options' if result['halal'] else 'no halal options'}, "
f"{'kosher options' if result['kosher'] else 'no kosher options'}, "
f"and { 'vegetarian options' if result['vegetarian'] else 'no vegetarian options'}. "
f"Visit their website for more details: {result['website']}"
)
recommendations.append(recommendation)
return "\n".join(recommendations)
else:
return "Sorry, no restaurants meet your criteria. Please try adjusting your filters."
def query_model(question):
"""
Process a question, find relevant information, and generate a response.
"""
if question == "":
return "Give me your preferences..."
if "restaurant" in question.lower():
# Extract criteria from the question
criteria = {}
if "gluten-free" in question.lower():
criteria["gluten_free"] = True
if "vegan" in question.lower():
criteria["vegan"] = True
if "lactose-intolerant" in question.lower():
criteria["lactose_intolerant"] = True
if "pescatarian" in question.lower():
criteria["pescatarian"] = True
if "allergen-friendly" in question.lower():
criteria["allergen_friendly"] = True
if "halal" in question.lower():
criteria["halal"] = True
if "kosher" in question.lower():
criteria["kosher"] = True
if "vegetarian" in question.lower():
criteria["vegetarian"] = True
# Extract price and cuisine
if "low" in question.lower():
criteria["price"] = "Low"
elif "moderate" in question.lower():
criteria["price"] = "Moderate"
elif "high" in question.lower():
criteria["price"] = "High"
if any(cuisine in question.lower() for cuisine in ["american", "indian", "middle eastern", "chinese", "italian", "thai", "hawaiian-korean", "japanese", "ethiopian", "pakistani", "mexican", "ghanaian", "vietnamese", "filipino", "spanish", "turkish"]):
criteria["cuisine"] = next(cuisine for cuisine in ["american", "indian", "middle eastern", "chinese", "italian", "thai", "hawaiian-korean", "japanese", "ethiopian", "pakistani", "mexican", "ghanaian", "vietnamese", "filipino", "spanish", "turkish"] if cuisine in question.lower())
response = generate_recommendation(criteria)
else:
relevant_segment = find_relevant_segment(question, segments)
if not relevant_segment:
return "Could not find specific information. Please refine your question."
response = generate_response(question, relevant_segment)
return response
# Define the welcome message and specific topics the chatbot can provide information about
welcome_message = """
# Welcome to Ethical Eats Explorer!
## Your AI-driven assistant for restaurant recs in Seattle. Created by Saranya, Cindy, and Liana of the 2024 Kode With Klossy Seattle Camp.
"""
topics = """
### Please give me your restaurant preferences:
- Dietary Restrictions
- Cuisine Preferences (optional)
- Cuisines: American, Indian, Middle Eastern, Chinese, Italian, Thai, Hawaiian-Korean, Japanese, Ethiopian, Pakistani, Mexican, Ghanaian, Vietnamese, Filipino, Spanish, Turkish
- Budget Preferences (Low: $0 - $20, Moderate: $20 - $30, High: $30+ - per person)
Please send your message in the format: "Could you give me a (cuisine) restaurant with (dietary restriction) options that is (budget) budget?"
"""
# Setup the Gradio Blocks interface with custom layout components
with gr.Blocks(theme='JohnSmith9982/small_and_pretty') as demo:
gr.Markdown(welcome_message) # Display the formatted welcome message
with gr.Row():
with gr.Column():
gr.Markdown(topics) # Show the topics on the left side
with gr.Row():
with gr.Column():
question = gr.Textbox(label="Your question", placeholder="Give me your information...")
answer = gr.Textbox(label="Explorer's Response", placeholder="Explorer will respond here...", interactive=False, lines=10)
submit_button = gr.Button("Submit")
submit_button.click(fn=query_model, inputs=question, outputs=answer)
# Launch the Gradio app to allow user interaction
demo.launch(share=True)
|