amburp commited on
Commit
42b8668
·
verified ·
1 Parent(s): 383d1fc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -174
app.py CHANGED
@@ -169,177 +169,4 @@ with gr.Blocks(theme=theme) as demo:
169
 
170
 
171
  # Launch the Gradio app to allow user interaction
172
- demo.launch(share=True)
173
-
174
- # import gradio as gr
175
- # from sentence_transformers import SentenceTransformer, util
176
- # import openai
177
- # import os
178
-
179
- # os.environ["TOKENIZERS_PARALLELISM"] = "false"
180
-
181
- # # Initialize paths and model identifiers for easy configuration and maintenance
182
- # filename = "output_topic_details.txt" # Path to the file storing chess-specific details
183
- # retrieval_model_name = 'output/sentence-transformer-finetuned/'
184
-
185
- # openai.api_key = os.environ["OPENAI_API_KEY"]
186
-
187
- # system_message = "You are a eco-friendly travel chatbot specialized in providing information on eco-friendly restaurants, hotels, and attractions in NYC."
188
- # # Initial system message to set the behavior of the assistant
189
- # messages = [{"role": "system", "content": system_message}]
190
-
191
- # # Attempt to load the necessary models and provide feedback on success or failure
192
- # try:
193
- # retrieval_model = SentenceTransformer(retrieval_model_name)
194
- # print("Models loaded successfully.")
195
- # except Exception as e:
196
- # print(f"Failed to load models: {e}")
197
-
198
- # def load_and_preprocess_text(filename):
199
- # """
200
- # Load and preprocess text from a file, removing empty lines and stripping whitespace.
201
- # """
202
- # try:
203
- # with open(filename, 'r', encoding='utf-8') as file:
204
- # segments = [line.strip() for line in file if line.strip()]
205
- # print("Text loaded and preprocessed successfully.")
206
- # return segments
207
- # except Exception as e:
208
- # print(f"Failed to load or preprocess text: {e}")
209
- # return []
210
-
211
- # segments = load_and_preprocess_text(filename)
212
-
213
- # def find_relevant_segment(user_query, segments):
214
- # """
215
- # Find the most relevant text segment for a user's query using cosine similarity among sentence embeddings.
216
- # This version finds the best match based on the content of the query.
217
- # """
218
- # try:
219
- # # Lowercase the query for better matching
220
- # lower_query = user_query.lower()
221
-
222
- # # Encode the query and the segments
223
- # query_embedding = retrieval_model.encode(lower_query)
224
- # segment_embeddings = retrieval_model.encode(segments)
225
-
226
- # # Compute cosine similarities between the query and the segments
227
- # similarities = util.pytorch_cos_sim(query_embedding, segment_embeddings)[0]
228
-
229
- # # Find the index of the most similar segment
230
- # best_idx = similarities.argmax()
231
-
232
- # # Return the most relevant segment
233
- # return segments[best_idx]
234
- # except Exception as e:
235
- # print(f"Error in finding relevant segment: {e}")
236
- # return ""
237
-
238
- # def generate_response(user_query, relevant_segment):
239
- # """
240
- # Generate a response emphasizing the bot's capability in providing eco-friendly travel information.
241
- # """
242
- # try:
243
- # user_message = f"Here's the information on eco-friendly travel information: {relevant_segment}"
244
-
245
- # # Append user's message to messages list
246
- # messages.append({"role": "user", "content": user_message})
247
-
248
- # response = openai.ChatCompletion.create(
249
- # model="gpt-3.5-turbo",
250
- # messages=messages,
251
- # max_tokens=150,
252
- # temperature=0.2,
253
- # top_p=1,
254
- # frequency_penalty=0,
255
- # presence_penalty=0
256
- # )
257
-
258
- # # Extract the response text
259
- # output_text = response['choices'][0]['message']['content'].strip()
260
-
261
- # # Append assistant's message to messages list for context
262
- # messages.append({"role": "assistant", "content": output_text})
263
-
264
- # return output_text
265
-
266
- # except Exception as e:
267
- # print(f"Error in generating response: {e}")
268
- # return f"Error in generating response: {e}"
269
-
270
- # def query_model(question):
271
- # """
272
- # Process a question, find relevant information, and generate a response.
273
- # """
274
- # if question == "":
275
- # return "Welcome to GreenGuide! Ask me anything about eco-friendly hotels, restaurants, and things to do in NYC."
276
- # relevant_segment = find_relevant_segment(question, segments)
277
- # if not relevant_segment:
278
- # return "Could not find specific information. Please refine your question."
279
- # response = generate_response(question, relevant_segment)
280
- # return response
281
-
282
- # # Define the HTML iframe content
283
-
284
- # iframe = '''
285
- # <iframe src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d193595.2528001417!2d-74.1444872802558!3d40.69763123330436!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x89c24fa5d33f083b%3A0xc80b8f06e177fe62!2sNew%20York%2C%20NY!5e0!3m2!1sen!2sus!4v1722483445443!5m2!1sen!2sus" width="600" height="450" style="border:0;" allowfullscreen="" loading="lazy" referrerpolicy="no-referrer-when-downgrade"></iframe>
286
- # '''
287
-
288
-
289
- # # Define the welcome message and specific topics the chatbot can provide information about
290
- # welcome_message = """
291
- # # 🌱 Welcome to GreenGuide!
292
- # ## Your AI-driven assistant for all eco-friendly travel-related queries in NYC. Created by Eva, Amy, and Ambur of the 2024 Kode With Klossy NYC AI/ML Camp.
293
- # """
294
-
295
- # topics = """
296
- # ### Feel free to ask me anything things to do in the city!
297
- # - Hotels (affordable, luxury)
298
- # - Restaurants (regular, vegetarian, vegan)
299
- # - Parks & Gardens
300
- # - Thrift Stores
301
- # - Attractions
302
- # """
303
-
304
- # # Create a Gradio HTML component
305
- # def display_iframe():
306
- # return iframe
307
- # def display_image():
308
- # return "https://i.giphy.com/media/v1.Y2lkPTc5MGI3NjExZzdqMnkzcWpjbGhmM3hzcXp0MGpuaTF5djR4bjBxM3Biam5zbzNnMCZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9cw/GxMnTi3hV3qaIgbgQL/giphy.gif"
309
- # #return "https://cdn-uploads.huggingface.co/production/uploads/6668622b72b61ba78fe7d4bb/PkWjNxvGm9MOqGkZdiT4e.png"
310
- # theme = gr.themes.Monochrome(
311
- # primary_hue="amber", #okay this did NOT work lmaoo
312
- # secondary_hue="rose",
313
- # ).set(
314
- # background_fill_primary='#CBE9A2', # BACKGROUND
315
- # background_fill_primary_dark='#768550',
316
- # background_fill_secondary='#768550', # BUTTON HOVER
317
- # background_fill_secondary_dark='#99a381', #LOADING BAR
318
- # border_color_accent='#768550',
319
- # border_color_accent_dark='#768550',
320
- # border_color_accent_subdued='#768550',
321
- # border_color_primary='#03a9f4',
322
- # block_border_color='#b3e5fc',
323
- # button_primary_background_fill='#768550',
324
- # button_primary_background_fill_dark='#768550'
325
- # )
326
-
327
- # # Setup the Gradio Blocks interface with custom layout components
328
- # with gr.Blocks(theme=theme) as demo:
329
- # gr.Image("header2.png", show_label = False, show_share_button = False, show_download_button = False) #CHANGE !!
330
- # gr.Markdown(welcome_message) # Display the formatted welcome message
331
- # with gr.Row():
332
- # with gr.Column():
333
- # gr.Markdown(topics) # Show the topics on the left side
334
- # with gr.Row():
335
- # with gr.Column():
336
- # question = gr.Textbox(label="Your question", placeholder="What do you want to ask about?")
337
- # answer = gr.Textbox(label="GreenGuide Response", placeholder="GreenGuide will respond here...", interactive=False, lines=10)
338
- # submit_button = gr.Button("Submit")
339
- # submit_button.click(fn=query_model, inputs=question, outputs=answer)
340
-
341
- # gr.HTML(iframe)
342
-
343
-
344
- # # Launch the Gradio app to allow user interaction
345
- # demo.launch(share=True)
 
169
 
170
 
171
  # Launch the Gradio app to allow user interaction
172
+ demo.launch(share=True)