Pijush2023 commited on
Commit
d717650
·
verified ·
1 Parent(s): 342b204

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -12
app.py CHANGED
@@ -314,28 +314,114 @@ chain_neo4j = (
314
 
315
 
316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
  def generate_answer(message, choice, retrieval_mode, selected_model):
318
  logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
319
 
320
  try:
321
- # Handle hotel-related queries
322
  if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
323
  response = fetch_google_hotels()
324
  return response, extract_addresses(response)
325
 
326
- # Handle restaurant-related queries
327
  if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
328
  response = fetch_yelp_restaurants()
329
  return response, extract_addresses(response)
330
 
331
- # Handle flight-related queries
332
  if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
333
  response = fetch_google_flights()
334
  return response, extract_addresses(response)
335
 
336
  if retrieval_mode == "VDB":
337
  if selected_model == chat_model:
338
- # Use GPT-4o with its vector store and template
339
  retriever = gpt_retriever
340
  prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
341
  context = retriever.get_relevant_documents(message)
@@ -351,15 +437,9 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
351
  return response['result'], extract_addresses(response['result'])
352
 
353
  elif selected_model == phi_pipe:
354
- # Use Phi-3.5 with its vector store and a simplified prompt
355
  retriever = phi_retriever
356
  context = retriever.get_relevant_documents(message)
357
- prompt = f"""
358
- Here is the information based on the documents provided:
359
- {context}
360
-
361
- {message}
362
- """
363
 
364
  logging.debug(f"Phi-3.5 Prompt: {prompt}")
365
 
@@ -392,7 +472,6 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
392
 
393
 
394
 
395
-
396
  def bot(history, choice, tts_choice, retrieval_mode, model_choice):
397
  if not history:
398
  return history
 
314
 
315
 
316
 
317
+ # def generate_answer(message, choice, retrieval_mode, selected_model):
318
+ # logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
319
+
320
+ # try:
321
+ # # Handle hotel-related queries
322
+ # if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
323
+ # response = fetch_google_hotels()
324
+ # return response, extract_addresses(response)
325
+
326
+ # # Handle restaurant-related queries
327
+ # if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
328
+ # response = fetch_yelp_restaurants()
329
+ # return response, extract_addresses(response)
330
+
331
+ # # Handle flight-related queries
332
+ # if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
333
+ # response = fetch_google_flights()
334
+ # return response, extract_addresses(response)
335
+
336
+ # if retrieval_mode == "VDB":
337
+ # if selected_model == chat_model:
338
+ # # Use GPT-4o with its vector store and template
339
+ # retriever = gpt_retriever
340
+ # prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
341
+ # context = retriever.get_relevant_documents(message)
342
+ # prompt = prompt_template.format(context=context, question=message)
343
+
344
+ # qa_chain = RetrievalQA.from_chain_type(
345
+ # llm=chat_model,
346
+ # chain_type="stuff",
347
+ # retriever=retriever,
348
+ # chain_type_kwargs={"prompt": prompt_template}
349
+ # )
350
+ # response = qa_chain({"query": message})
351
+ # return response['result'], extract_addresses(response['result'])
352
+
353
+ # elif selected_model == phi_pipe:
354
+ # # Use Phi-3.5 with its vector store and a simplified prompt
355
+ # retriever = phi_retriever
356
+ # context = retriever.get_relevant_documents(message)
357
+ # prompt = f"""
358
+ # Here is the information based on the documents provided:
359
+ # {context}
360
+
361
+ # {message}
362
+ # """
363
+
364
+ # logging.debug(f"Phi-3.5 Prompt: {prompt}")
365
+
366
+ # response = selected_model(prompt, **{
367
+ # "max_new_tokens": 300,
368
+ # "return_full_text": False,
369
+ # "temperature": 0.5,
370
+ # "do_sample": False,
371
+ # })
372
+
373
+ # if response:
374
+ # generated_text = response[0]['generated_text']
375
+ # logging.debug(f"Phi-3.5 Response: {generated_text}")
376
+ # return generated_text, extract_addresses(generated_text)
377
+ # else:
378
+ # logging.error("Phi-3.5 did not return any response.")
379
+ # return "No response generated.", []
380
+
381
+ # elif retrieval_mode == "KGF":
382
+ # response = chain_neo4j.invoke({"question": message})
383
+ # return response, extract_addresses(response)
384
+ # else:
385
+ # return "Invalid retrieval mode selected.", []
386
+
387
+ # except Exception as e:
388
+ # logging.error(f"Error in generate_answer: {e}")
389
+ # return "Sorry, I encountered an error while processing your request.", []
390
+
391
+
392
+ # Short Prompt Template for Phi-3.5 Proprietary Model
393
+
394
+ phi_short_template = f"""
395
+ As an expert on Birmingham, Alabama, I will provide concise, accurate, and informative responses to your queries. Given the sunny weather today, {current_date}, feel free to ask me anything you need to know about the city.
396
+
397
+ - Brief details and facts
398
+ - No unnecessary elaboration
399
+ - Focus on relevance
400
+
401
+ {{context}}
402
+ Question: {{question}}
403
+ Answer:
404
+ """
405
+
406
+ # Incorporate this template into the Phi-3.5 response generation section
407
  def generate_answer(message, choice, retrieval_mode, selected_model):
408
  logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
409
 
410
  try:
 
411
  if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
412
  response = fetch_google_hotels()
413
  return response, extract_addresses(response)
414
 
 
415
  if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
416
  response = fetch_yelp_restaurants()
417
  return response, extract_addresses(response)
418
 
 
419
  if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
420
  response = fetch_google_flights()
421
  return response, extract_addresses(response)
422
 
423
  if retrieval_mode == "VDB":
424
  if selected_model == chat_model:
 
425
  retriever = gpt_retriever
426
  prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
427
  context = retriever.get_relevant_documents(message)
 
437
  return response['result'], extract_addresses(response['result'])
438
 
439
  elif selected_model == phi_pipe:
 
440
  retriever = phi_retriever
441
  context = retriever.get_relevant_documents(message)
442
+ prompt = phi_short_template.format(context=context, question=message)
 
 
 
 
 
443
 
444
  logging.debug(f"Phi-3.5 Prompt: {prompt}")
445
 
 
472
 
473
 
474
 
 
475
  def bot(history, choice, tts_choice, retrieval_mode, model_choice):
476
  if not history:
477
  return history