Nguyen Quang Truong commited on
Commit
58106c5
·
1 Parent(s): 47908a3
Agent/tools/kg_search.py CHANGED
@@ -21,7 +21,7 @@ examples = example_pairs["examples"]
21
 
22
  # LLM for choose the best similar examples
23
  load_dotenv()
24
- os.environ["GOOGLE_API_KEY"] = os.getenv("GEMINI_API_KEY")
25
 
26
  embedding_model = GoogleGenerativeAIEmbeddings(
27
  model= "models/text-embedding-004"
@@ -63,7 +63,7 @@ def generate_cypher(question: str) -> str:
63
  os.environ["NEO4J_URI"] = os.getenv("NEO4J_URI")
64
  os.environ["NEO4J_USERNAME"] = os.getenv("NEO4J_USERNAME")
65
  os.environ["NEO4J_PASSWORD"] = os.getenv("NEO4J_PASSWORD")
66
- os.environ["GOOGLE_API_KEY"] = os.getenv("GEMINI_API_KEY")
67
 
68
  gemini_chat = ChatGoogleGenerativeAI(
69
  model= "gemini-1.5-flash-latest"
 
21
 
22
  # LLM for choose the best similar examples
23
  load_dotenv()
24
+ os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY")
25
 
26
  embedding_model = GoogleGenerativeAIEmbeddings(
27
  model= "models/text-embedding-004"
 
63
  os.environ["NEO4J_URI"] = os.getenv("NEO4J_URI")
64
  os.environ["NEO4J_USERNAME"] = os.getenv("NEO4J_USERNAME")
65
  os.environ["NEO4J_PASSWORD"] = os.getenv("NEO4J_PASSWORD")
66
+ os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY")
67
 
68
  gemini_chat = ChatGoogleGenerativeAI(
69
  model= "gemini-1.5-flash-latest"
Agent/tools/tavily_search_v2.py CHANGED
@@ -7,7 +7,7 @@ from langchain.tools import BaseTool, StructuredTool, tool
7
  load_dotenv()
8
 
9
  os.environ["TAVILY_API_KEY"] = os.getenv("TAVILY_API_KEY")
10
- os.environ["GOOGLE_API_KEY"] = os.getenv("GEMINI_API_KEY")
11
 
12
  def tavily_search(question: str) -> str:
13
  """
 
7
  load_dotenv()
8
 
9
  os.environ["TAVILY_API_KEY"] = os.getenv("TAVILY_API_KEY")
10
+ os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY")
11
 
12
  def tavily_search(question: str) -> str:
13
  """
Agent/utils.py CHANGED
@@ -51,13 +51,7 @@ def get_llm_response(query):
51
  return chain.invoke({"query": query})["result"]
52
 
53
  def llm_answer(message, history):
54
- # history_langchain_format = []
55
- #
56
- # for human, ai in history:
57
- # history_langchain_format.append(HumanMessage(content= human))
58
- # history_langchain_format.append(AIMessage(content= ai))
59
- #
60
- # history_langchain_format.append(HumanMessage(content= message["text"]))
61
 
62
  try:
63
  response = get_llm_response(message["text"])
 
51
  return chain.invoke({"query": query})["result"]
52
 
53
  def llm_answer(message, history):
54
+
 
 
 
 
 
 
55
 
56
  try:
57
  response = get_llm_response(message["text"])