Pijush2023 commited on
Commit
98bc14b
·
verified ·
1 Parent(s): df5cc55

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -48
app.py CHANGED
@@ -964,20 +964,6 @@ graph = Neo4jGraph(
964
  password="B_sZbapCTZoQDWj1JrhwqElsNa-jm5Zq1m_mAnyPYog"
965
  )
966
 
967
- # Avoid pushing the graph documents to Neo4j every time
968
- # Only push the documents once and comment the code below after the initial push
969
- # dataset_name = "Pijush2023/birmindata07312024"
970
- # page_content_column = 'events_description'
971
- # loader = HuggingFaceDatasetLoader(dataset_name, page_content_column)
972
- # data = loader.load()
973
-
974
- # text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=50)
975
- # documents = text_splitter.split_documents(data)
976
-
977
- # llm_transformer = LLMGraphTransformer(llm=chat_model)
978
- # graph_documents = llm_transformer.convert_to_graph_documents(documents)
979
- # graph.add_graph_documents(graph_documents, baseEntityLabel=True, include_source=True)
980
-
981
  class Entities(BaseModel):
982
  names: list[str] = Field(..., description="All the person, organization, or business entities that appear in the text")
983
 
@@ -1029,37 +1015,6 @@ def retriever_neo4j(question: str):
1029
  structured_data = structured_retriever(question)
1030
  return structured_data
1031
 
1032
- _template = """Given the following conversation and a follow-up question, rephrase the follow-up question to be a standalone question,
1033
- in its original language.
1034
- Chat History:
1035
- {chat_history}
1036
- Follow Up Input: {question}
1037
- Standalone question:"""
1038
-
1039
- CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
1040
-
1041
- def _format_chat_history(chat_history: list[tuple[str, str]]) -> list:
1042
- buffer = []
1043
- for human, ai in chat_history:
1044
- buffer.append(HumanMessage(content=human))
1045
- buffer.append(AIMessage(content=ai))
1046
- return buffer
1047
-
1048
- _search_query = RunnableBranch(
1049
- (
1050
- RunnableLambda(lambda x: bool(x.get("chat_history"))).with_config(
1051
- run_name="HasChatHistoryCheck"
1052
- ),
1053
- RunnablePassthrough.assign(
1054
- chat_history=lambda x: _format_chat_history(x["chat_history"])
1055
- )
1056
- | CONDENSE_QUESTION_PROMPT
1057
- | ChatOpenAI(temperature=0, api_key=os.environ['OPENAI_API_KEY'])
1058
- | StrOutputParser(),
1059
- ),
1060
- RunnableLambda(lambda x : x["question"]),
1061
- )
1062
-
1063
  template = """Answer the question based only on the following context:
1064
  {context}
1065
  Question: {question}
@@ -1096,8 +1051,10 @@ def generate_answer(message, choice, retrieval_mode):
1096
  response = qa_chain({"query": message})
1097
  return response['result'], extract_addresses(response['result'])
1098
  elif retrieval_mode == "Knowledge-Graph":
1099
- response = chain_neo4j.invoke({"question": message})
1100
- return response, extract_addresses(response)
 
 
1101
  else:
1102
  return "Invalid retrieval mode selected.", []
1103
 
@@ -1298,7 +1255,7 @@ def show_map_if_details(history, choice):
1298
  if choice in ["Details", "Conversational"]:
1299
  return gr.update(visible=True), update_map_with_response(history)
1300
  else:
1301
- return gr.update(visible=False), ""
1302
 
1303
  def generate_audio_elevenlabs(text):
1304
  XI_API_KEY = os.environ['ELEVENLABS_API']
@@ -1681,3 +1638,4 @@ demo.launch(share=True)
1681
 
1682
 
1683
 
 
 
964
  password="B_sZbapCTZoQDWj1JrhwqElsNa-jm5Zq1m_mAnyPYog"
965
  )
966
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
967
  class Entities(BaseModel):
968
  names: list[str] = Field(..., description="All the person, organization, or business entities that appear in the text")
969
 
 
1015
  structured_data = structured_retriever(question)
1016
  return structured_data
1017
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1018
  template = """Answer the question based only on the following context:
1019
  {context}
1020
  Question: {question}
 
1051
  response = qa_chain({"query": message})
1052
  return response['result'], extract_addresses(response['result'])
1053
  elif retrieval_mode == "Knowledge-Graph":
1054
+ context = retriever_neo4j(message)
1055
+ qa_chain = ChatPromptTemplate.from_template(prompt_template.template)
1056
+ response = qa_chain.invoke({"context": context, "question": message})
1057
+ return response['result'], extract_addresses(response['result'])
1058
  else:
1059
  return "Invalid retrieval mode selected.", []
1060
 
 
1255
  if choice in ["Details", "Conversational"]:
1256
  return gr.update(visible=True), update_map_with_response(history)
1257
  else:
1258
+ return gr.update(visible(False)), ""
1259
 
1260
  def generate_audio_elevenlabs(text):
1261
  XI_API_KEY = os.environ['ELEVENLABS_API']
 
1638
 
1639
 
1640
 
1641
+