Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -467,154 +467,154 @@ Detailed Answer:
|
|
467 |
|
468 |
import traceback
|
469 |
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
#
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
#
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
489 |
-
#
|
490 |
-
#
|
491 |
-
#
|
492 |
-
#
|
493 |
-
#
|
494 |
-
#
|
495 |
-
|
496 |
-
#
|
497 |
-
#
|
498 |
-
#
|
499 |
-
#
|
500 |
-
#
|
501 |
-
#
|
502 |
-
|
503 |
-
#
|
504 |
-
#
|
505 |
-
#
|
506 |
-
#
|
507 |
-
#
|
508 |
-
#
|
509 |
-
|
510 |
-
#
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
|
520 |
-
|
521 |
-
|
522 |
-
|
523 |
-
|
524 |
-
|
525 |
-
|
526 |
-
|
527 |
-
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
|
541 |
-
|
542 |
-
|
543 |
-
|
544 |
-
|
545 |
-
|
546 |
-
|
547 |
-
|
548 |
-
|
549 |
-
|
550 |
|
551 |
|
552 |
|
553 |
-
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
|
558 |
-
|
559 |
-
|
560 |
-
#
|
561 |
-
|
562 |
-
|
563 |
-
|
564 |
-
|
565 |
-
|
566 |
-
|
567 |
-
|
568 |
-
|
569 |
-
|
570 |
-
|
571 |
-
|
572 |
-
|
573 |
-
|
574 |
-
|
575 |
-
|
576 |
-
|
577 |
-
|
578 |
-
|
579 |
-
|
580 |
-
|
581 |
-
|
582 |
-
|
583 |
-
|
584 |
-
|
585 |
-
|
586 |
-
|
587 |
-
|
588 |
|
589 |
-
|
590 |
-
|
591 |
-
|
592 |
-
|
593 |
|
594 |
-
def generate_answer(message, choice, retrieval_mode, selected_model):
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
|
603 |
|
604 |
-
|
605 |
-
|
606 |
-
|
607 |
-
|
608 |
-
|
609 |
-
|
610 |
-
|
611 |
-
|
612 |
-
|
613 |
-
|
614 |
-
|
615 |
-
|
616 |
-
|
617 |
-
|
618 |
|
619 |
|
620 |
|
@@ -631,58 +631,58 @@ def add_message(history, message):
|
|
631 |
def print_like_dislike(x: gr.LikeData):
|
632 |
print(x.index, x.value, x.liked)
|
633 |
|
634 |
-
|
635 |
-
|
636 |
-
|
637 |
-
|
638 |
-
|
639 |
-
|
640 |
-
|
641 |
-
|
642 |
-
|
643 |
-
|
644 |
-
|
645 |
-
|
646 |
-
|
647 |
-
|
648 |
-
|
649 |
-
|
650 |
-
|
651 |
-
|
652 |
-
|
653 |
-
|
654 |
-
|
655 |
-
|
656 |
-
|
657 |
-
|
658 |
-
|
659 |
-
|
660 |
-
|
661 |
-
|
662 |
-
|
663 |
-
|
664 |
-
|
665 |
-
|
666 |
-
|
667 |
-
|
668 |
-
|
669 |
-
|
670 |
-
|
671 |
-
|
672 |
-
|
673 |
-
|
674 |
-
|
675 |
-
|
676 |
-
|
677 |
-
|
678 |
-
|
679 |
-
|
680 |
-
|
681 |
-
|
682 |
-
|
683 |
-
|
684 |
-
|
685 |
-
|
686 |
|
687 |
from diffusers import DiffusionPipeline
|
688 |
import torch
|
@@ -1100,32 +1100,32 @@ def handle_retrieval_mode_change(choice):
|
|
1100 |
|
1101 |
|
1102 |
|
1103 |
-
# def handle_model_choice_change(selected_model):
|
1104 |
-
# if selected_model == "LM-2":
|
1105 |
-
# # Disable retrieval mode and select style when LM-2 is selected
|
1106 |
-
# return gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False)
|
1107 |
-
# elif selected_model == "LM-1":
|
1108 |
-
# # Enable retrieval mode and select style for LM-1
|
1109 |
-
# return gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)
|
1110 |
-
# else:
|
1111 |
-
# # Default case: allow interaction
|
1112 |
-
# return gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)
|
1113 |
-
|
1114 |
def handle_model_choice_change(selected_model):
|
1115 |
-
if selected_model == "LM-2":
|
1116 |
# Disable retrieval mode and select style when LM-2 is selected
|
1117 |
-
return (
|
1118 |
-
|
1119 |
-
|
1120 |
-
|
1121 |
-
)
|
1122 |
else:
|
1123 |
-
#
|
1124 |
-
return (
|
1125 |
-
|
1126 |
-
|
1127 |
-
|
1128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1129 |
|
1130 |
#Flux Coding
|
1131 |
|
|
|
467 |
|
468 |
import traceback
|
469 |
|
470 |
+
def generate_answer(message, choice, retrieval_mode, selected_model):
|
471 |
+
logging.debug(f"generate_answer called with choice: {choice}, retrieval_mode: {retrieval_mode}, and selected_model: {selected_model}")
|
472 |
+
|
473 |
+
# Logic for disabling options for Phi-3.5
|
474 |
+
if selected_model == "LM-2":
|
475 |
+
choice = None
|
476 |
+
retrieval_mode = None
|
477 |
+
|
478 |
+
try:
|
479 |
+
# Select the appropriate template based on the choice and model
|
480 |
+
if choice == "Details" and selected_model == chat_model1: # GPT-4o-mini
|
481 |
+
prompt_template = PromptTemplate(input_variables=["context", "question"], template=gpt4o_mini_template_details)
|
482 |
+
elif choice == "Details":
|
483 |
+
prompt_template = QA_CHAIN_PROMPT_1
|
484 |
+
elif choice == "Conversational":
|
485 |
+
prompt_template = QA_CHAIN_PROMPT_2
|
486 |
+
else:
|
487 |
+
prompt_template = QA_CHAIN_PROMPT_1 # Fallback to template1
|
488 |
+
|
489 |
+
# # Handle hotel-related queries
|
490 |
+
# if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
|
491 |
+
# logging.debug("Handling hotel-related query")
|
492 |
+
# response = fetch_google_hotels()
|
493 |
+
# logging.debug(f"Hotel response: {response}")
|
494 |
+
# return response, extract_addresses(response)
|
495 |
+
|
496 |
+
# # Handle restaurant-related queries
|
497 |
+
# if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
|
498 |
+
# logging.debug("Handling restaurant-related query")
|
499 |
+
# response = fetch_yelp_restaurants()
|
500 |
+
# logging.debug(f"Restaurant response: {response}")
|
501 |
+
# return response, extract_addresses(response)
|
502 |
+
|
503 |
+
# # Handle flight-related queries
|
504 |
+
# if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
|
505 |
+
# logging.debug("Handling flight-related query")
|
506 |
+
# response = fetch_google_flights()
|
507 |
+
# logging.debug(f"Flight response: {response}")
|
508 |
+
# return response, extract_addresses(response)
|
509 |
+
|
510 |
+
# Retrieval-based response
|
511 |
+
if retrieval_mode == "VDB":
|
512 |
+
logging.debug("Using VDB retrieval mode")
|
513 |
+
if selected_model == chat_model:
|
514 |
+
logging.debug("Selected model: LM-1")
|
515 |
+
retriever = gpt_retriever
|
516 |
+
context = retriever.get_relevant_documents(message)
|
517 |
+
logging.debug(f"Retrieved context: {context}")
|
518 |
+
|
519 |
+
prompt = prompt_template.format(context=context, question=message)
|
520 |
+
logging.debug(f"Generated prompt: {prompt}")
|
521 |
+
|
522 |
+
qa_chain = RetrievalQA.from_chain_type(
|
523 |
+
llm=chat_model,
|
524 |
+
chain_type="stuff",
|
525 |
+
retriever=retriever,
|
526 |
+
chain_type_kwargs={"prompt": prompt_template}
|
527 |
+
)
|
528 |
+
response = qa_chain({"query": message})
|
529 |
+
logging.debug(f"LM-1 response: {response}")
|
530 |
+
return response['result'], extract_addresses(response['result'])
|
531 |
|
532 |
+
elif selected_model == chat_model1:
|
533 |
+
logging.debug("Selected model: LM-3")
|
534 |
+
retriever = gpt_retriever
|
535 |
+
context = retriever.get_relevant_documents(message)
|
536 |
+
logging.debug(f"Retrieved context: {context}")
|
537 |
+
|
538 |
+
prompt = prompt_template.format(context=context, question=message)
|
539 |
+
logging.debug(f"Generated prompt: {prompt}")
|
540 |
+
|
541 |
+
qa_chain = RetrievalQA.from_chain_type(
|
542 |
+
llm=chat_model1,
|
543 |
+
chain_type="stuff",
|
544 |
+
retriever=retriever,
|
545 |
+
chain_type_kwargs={"prompt": prompt_template}
|
546 |
+
)
|
547 |
+
response = qa_chain({"query": message})
|
548 |
+
logging.debug(f"LM-3 response: {response}")
|
549 |
+
return response['result'], extract_addresses(response['result'])
|
550 |
|
551 |
|
552 |
|
553 |
+
elif selected_model == phi_pipe:
|
554 |
+
logging.debug("Selected model: LM-2")
|
555 |
+
retriever = phi_retriever
|
556 |
+
context_documents = retriever.get_relevant_documents(message)
|
557 |
+
context = "\n".join([doc.page_content for doc in context_documents])
|
558 |
+
logging.debug(f"Retrieved context for LM-2: {context}")
|
559 |
+
|
560 |
+
# Use the correct template variable
|
561 |
+
prompt = phi_custom_template.format(context=context, question=message)
|
562 |
+
logging.debug(f"Generated LM-2 prompt: {prompt}")
|
563 |
+
|
564 |
+
response = selected_model(prompt, **{
|
565 |
+
"max_new_tokens": 400,
|
566 |
+
"return_full_text": True,
|
567 |
+
"temperature": 0.7,
|
568 |
+
"do_sample": True,
|
569 |
+
})
|
570 |
+
|
571 |
+
if response:
|
572 |
+
generated_text = response[0]['generated_text']
|
573 |
+
logging.debug(f"LM-2 Response: {generated_text}")
|
574 |
+
cleaned_response = clean_response(generated_text)
|
575 |
+
return cleaned_response, extract_addresses(cleaned_response)
|
576 |
+
else:
|
577 |
+
logging.error("LM-2 did not return any response.")
|
578 |
+
return "No response generated.", []
|
579 |
+
|
580 |
+
elif retrieval_mode == "KGF":
|
581 |
+
logging.debug("Using KGF retrieval mode")
|
582 |
+
response = chain_neo4j.invoke({"question": message})
|
583 |
+
logging.debug(f"KGF response: {response}")
|
584 |
+
return response, extract_addresses(response)
|
585 |
+
else:
|
586 |
+
logging.error("Invalid retrieval mode selected.")
|
587 |
+
return "Invalid retrieval mode selected.", []
|
588 |
|
589 |
+
except Exception as e:
|
590 |
+
logging.error(f"Error in generate_answer: {str(e)}")
|
591 |
+
logging.error(traceback.format_exc())
|
592 |
+
return "Sorry, I encountered an error while processing your request.", []
|
593 |
|
594 |
+
# def generate_answer(message, choice, retrieval_mode, selected_model):
|
595 |
+
# # Logic for Phi-3.5
|
596 |
+
# if selected_model == phi_pipe: # LM-2 Phi-3.5 selected
|
597 |
+
# retriever = phi_retriever
|
598 |
+
# context_documents = retriever.get_relevant_documents(message)
|
599 |
+
# context = "\n".join([doc.page_content for doc in context_documents])
|
600 |
+
|
601 |
+
# # Use the correct template for Phi-3.5
|
602 |
+
# prompt = phi_custom_template.format(context=context, question=message)
|
603 |
|
604 |
+
# response = selected_model(prompt, **{
|
605 |
+
# "max_new_tokens": 400,
|
606 |
+
# "return_full_text": True,
|
607 |
+
# "temperature": 0.7,
|
608 |
+
# "do_sample": True,
|
609 |
+
# })
|
610 |
+
|
611 |
+
# if response:
|
612 |
+
# generated_text = response[0]['generated_text']
|
613 |
+
# cleaned_response = clean_response(generated_text)
|
614 |
+
# # return cleaned_response, extract_addresses(cleaned_response)
|
615 |
+
# return cleaned_response
|
616 |
+
# else:
|
617 |
+
# return "No response generated.", []
|
618 |
|
619 |
|
620 |
|
|
|
631 |
def print_like_dislike(x: gr.LikeData):
|
632 |
print(x.index, x.value, x.liked)
|
633 |
|
634 |
+
def extract_addresses(response):
|
635 |
+
if not isinstance(response, str):
|
636 |
+
response = str(response)
|
637 |
+
address_patterns = [
|
638 |
+
r'([A-Z].*,\sBirmingham,\sAL\s\d{5})',
|
639 |
+
r'(\d{4}\s.*,\sBirmingham,\sAL\s\d{5})',
|
640 |
+
r'([A-Z].*,\sAL\s\d{5})',
|
641 |
+
r'([A-Z].*,.*\sSt,\sBirmingham,\sAL\s\d{5})',
|
642 |
+
r'([A-Z].*,.*\sStreets,\sBirmingham,\sAL\s\d{5})',
|
643 |
+
r'(\d{2}.*\sStreets)',
|
644 |
+
r'([A-Z].*\s\d{2},\sBirmingham,\sAL\s\d{5})',
|
645 |
+
r'([a-zA-Z]\s Birmingham)',
|
646 |
+
r'([a-zA-Z].*,\sBirmingham,\sAL)',
|
647 |
+
r'(.*),(Birmingham, AL,USA)$'
|
648 |
+
r'(^Birmingham,AL$)',
|
649 |
+
r'((.*)(Stadium|Field),.*,\sAL$)',
|
650 |
+
r'((.*)(Stadium|Field),.*,\sFL$)',
|
651 |
+
r'((.*)(Stadium|Field),.*,\sMS$)',
|
652 |
+
r'((.*)(Stadium|Field),.*,\sAR$)',
|
653 |
+
r'((.*)(Stadium|Field),.*,\sKY$)',
|
654 |
+
r'((.*)(Stadium|Field),.*,\sTN$)',
|
655 |
+
r'((.*)(Stadium|Field),.*,\sLA$)',
|
656 |
+
r'((.*)(Stadium|Field),.*,\sFL$)'
|
657 |
+
|
658 |
+
]
|
659 |
+
addresses = []
|
660 |
+
for pattern in address_patterns:
|
661 |
+
addresses.extend(re.findall(pattern, response))
|
662 |
+
return addresses
|
663 |
+
|
664 |
+
all_addresses = []
|
665 |
+
|
666 |
+
def generate_map(location_names):
|
667 |
+
global all_addresses
|
668 |
+
all_addresses.extend(location_names)
|
669 |
+
|
670 |
+
api_key = os.environ['GOOGLEMAPS_API_KEY']
|
671 |
+
gmaps = GoogleMapsClient(key=api_key)
|
672 |
+
|
673 |
+
m = folium.Map(location=[33.5175, -86.809444], zoom_start=12)
|
674 |
+
|
675 |
+
for location_name in all_addresses:
|
676 |
+
geocode_result = gmaps.geocode(location_name)
|
677 |
+
if geocode_result:
|
678 |
+
location = geocode_result[0]['geometry']['location']
|
679 |
+
folium.Marker(
|
680 |
+
[location['lat'], location['lng']],
|
681 |
+
tooltip=f"{geocode_result[0]['formatted_address']}"
|
682 |
+
).add_to(m)
|
683 |
+
|
684 |
+
map_html = m._repr_html_()
|
685 |
+
return map_html
|
686 |
|
687 |
from diffusers import DiffusionPipeline
|
688 |
import torch
|
|
|
1100 |
|
1101 |
|
1102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1103 |
def handle_model_choice_change(selected_model):
|
1104 |
+
if selected_model == "LM-2":
|
1105 |
# Disable retrieval mode and select style when LM-2 is selected
|
1106 |
+
return gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False)
|
1107 |
+
elif selected_model == "LM-1":
|
1108 |
+
# Enable retrieval mode and select style for LM-1
|
1109 |
+
return gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)
|
|
|
1110 |
else:
|
1111 |
+
# Default case: allow interaction
|
1112 |
+
return gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)
|
1113 |
+
|
1114 |
+
# def handle_model_choice_change(selected_model):
|
1115 |
+
# if selected_model == "LM-2": # When LM-2 (Phi-3.5) is selected
|
1116 |
+
# # Disable retrieval mode and select style when LM-2 is selected
|
1117 |
+
# return (
|
1118 |
+
# gr.update(interactive=False), # Disable retrieval mode
|
1119 |
+
# gr.update(interactive=False), # Disable style (Details/Conversational)
|
1120 |
+
# gr.update(interactive=False) # Disable the model choice itself
|
1121 |
+
# )
|
1122 |
+
# else:
|
1123 |
+
# # Disable GPT-4o, GPT-4o-mini, and KGF, only Phi-3.5 works
|
1124 |
+
# return (
|
1125 |
+
# gr.update(interactive=True), # Allow retrieval mode for other models
|
1126 |
+
# gr.update(interactive=True), # Allow style options for other models
|
1127 |
+
# gr.update(interactive=True) # Allow other models to be selected
|
1128 |
+
# )
|
1129 |
|
1130 |
#Flux Coding
|
1131 |
|