WilliamGazeley commited on
Commit
3651997
·
1 Parent(s): 9efba8b

Migrate to langgraph

Browse files
Dockerfile CHANGED
@@ -45,6 +45,8 @@ RUN pyenv install ${PYTHON_VERSION} && \
45
  COPY --chown=1000 ./requirements.txt /tmp/requirements.txt
46
  RUN pip install --no-cache-dir --upgrade -r /tmp/requirements.txt && \
47
  pip install flash-attn --no-build-isolation
 
 
48
  RUN ollama pull ${OLLAMA_MODEL}
49
 
50
  COPY --chown=1000 src ${HOME}/app
 
45
  COPY --chown=1000 ./requirements.txt /tmp/requirements.txt
46
  RUN pip install --no-cache-dir --upgrade -r /tmp/requirements.txt && \
47
  pip install flash-attn --no-build-isolation
48
+
49
+ RUN curl -fsSL https://ollama.com/install.sh | sh
50
  RUN ollama pull ${OLLAMA_MODEL}
51
 
52
  COPY --chown=1000 src ${HOME}/app
requirements.txt CHANGED
@@ -27,3 +27,4 @@ azure-search-documents==11.6.0b1
27
  azure-identity==1.16.0
28
  loguru==0.7.2
29
  openai==1.30.1
 
 
27
  azure-identity==1.16.0
28
  loguru==0.7.2
29
  openai==1.30.1
30
+ langgraph==0.0.50
src/agents/__init__.py CHANGED
@@ -1,29 +1,60 @@
 
 
 
 
 
1
  from langchain_community.chat_models import ChatOllama
2
- from prompts.prompt import rag_agent_prompt
3
  from agents.functions_agent.base import create_functions_agent
4
- from langchain.agents import AgentExecutor
5
- from langchain.memory import ChatMessageHistory
6
- from functions import get_openai_functions, tools, get_openai_tools
7
  from config import config
8
 
9
- llm = ChatOllama(model = config.ollama_model, temperature = 0.55)
 
 
 
 
 
 
 
10
 
11
  tools_dict = get_openai_tools()
12
 
13
- history = ChatMessageHistory()
14
-
15
- functions_agent = create_functions_agent(llm=llm, prompt=rag_agent_prompt)
16
- functions_agent_executor = AgentExecutor(agent=functions_agent, tools=tools, verbose=True, return_intermediate_steps=True)
17
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  if __name__ == "__main__":
 
 
 
 
 
 
20
  while True:
21
  try:
22
  inp = input("User:")
23
  if inp == "/bye":
24
  break
25
 
26
- response = functions_agent_executor.invoke({"input": inp, "chat_history": history, "tools" : tools_dict})
27
  response['output'] = response['output'].replace("<|im_end|>", "")
28
  history.add_user_message(inp)
29
  history.add_ai_message(response['output'])
 
1
+ from typing import Dict
2
+ import langchain_community
3
+ from langchain.agents import AgentExecutor
4
+ from langchain_core.prompts import PromptTemplate
5
+ from langchain_core.output_parsers import JsonOutputParser, StrOutputParser
6
  from langchain_community.chat_models import ChatOllama
7
+
8
  from agents.functions_agent.base import create_functions_agent
9
+ from functions import tools, get_openai_tools
 
 
10
  from config import config
11
 
12
+ from prompts.prompt import (
13
+ rag_agent_prompt,
14
+ doc_grader_agent_prompt,
15
+ router_agent_prompt,
16
+ hallucination_grader_agent_prompt,
17
+ rephrase_agent_prompt,
18
+ output_agent_prompt,
19
+ )
20
 
21
  tools_dict = get_openai_tools()
22
 
23
+ def get_agents(llm: langchain_community.chat_models.ChatOllama) -> Dict[str, AgentExecutor]:
24
+ """Returns all available agents."""
25
+ functions_agent = create_functions_agent(llm=llm, prompt=rag_agent_prompt)
26
+ functions_agent_executor = AgentExecutor(agent=functions_agent, tools=tools, verbose=True, return_intermediate_steps=True)
27
+
28
+ rephrase_agent = rephrase_agent_prompt | llm | StrOutputParser()
29
+ router_agent = router_agent_prompt | llm | JsonOutputParser()
30
+ hallucination_grader_agent = hallucination_grader_agent_prompt | llm | JsonOutputParser()
31
+ doc_grader_agent = doc_grader_agent_prompt | llm | JsonOutputParser()
32
+ output_agent = output_agent_prompt | llm | StrOutputParser()
33
+
34
+ return {
35
+ "function_agent": functions_agent_executor,
36
+ "doc_grader_agent": doc_grader_agent,
37
+ "hallucination_grader_agent": hallucination_grader_agent,
38
+ "router_agent": router_agent,
39
+ "rephrase_agent": rephrase_agent,
40
+ "output_agent": output_agent,
41
+ }
42
+
43
 
44
  if __name__ == "__main__":
45
+ from langchain.memory import ChatMessageHistory
46
+ history = ChatMessageHistory()
47
+
48
+ llm = ChatOllama(model = config.ollama_model, temperature = 0.55)
49
+ function_agent = get_agents(llm)["function_agent"]
50
+
51
  while True:
52
  try:
53
  inp = input("User:")
54
  if inp == "/bye":
55
  break
56
 
57
+ response = function_agent.invoke({"input": inp, "chat_history": history, "tools" : tools_dict})
58
  response['output'] = response['output'].replace("<|im_end|>", "")
59
  history.add_user_message(inp)
60
  history.add_ai_message(response['output'])
src/agents/functions_agent/base.py CHANGED
@@ -45,4 +45,4 @@ def create_functions_agent(
45
  | llm
46
  | FunctionsAgentOutputParser()
47
  )
48
- return agent
 
45
  | llm
46
  | FunctionsAgentOutputParser()
47
  )
48
+ return agent
src/app.py CHANGED
@@ -1,71 +1,100 @@
1
- import os
2
  from time import time
 
3
  import huggingface_hub
4
  import streamlit as st
 
 
 
 
 
 
 
5
  from config import config
6
- from functioncall import ModelInference
 
 
 
 
 
 
 
 
7
 
8
 
9
  @st.cache_resource(show_spinner="Loading model..")
10
- def init_llm():
11
  huggingface_hub.login(token=config.hf_token, new_session=False)
12
- llm = ModelInference(chat_template=config.chat_template)
13
- return llm
14
-
15
-
16
- def function_agent(prompt):
17
- try:
18
- return llm.generate_function_call(
19
- prompt, config.chat_template, config.num_fewshot, config.max_depth
20
- )
21
- except Exception as e:
22
- return f"An error occurred: {str(e)}"
23
-
24
-
25
- def output_agent(context, user_input):
26
- """Takes the output of the RAG and generates a final response."""
27
- try:
28
- config.status.update(label=":bulb: Preparing answer..")
29
- script_dir = os.path.dirname(os.path.abspath(__file__))
30
- prompt_path = os.path.join(script_dir, "prompt_assets", "output_sys_prompt.yml")
31
- prompt_schema = llm.prompter.read_yaml_file(prompt_path)
32
- sys_prompt = (
33
- llm.prompter.format_yaml_prompt(prompt_schema, dict())
34
- + f"Information:\n{context}"
35
- )
36
- convo = [
37
- {"role": "system", "content": sys_prompt},
38
- {"role": "user", "content": user_input},
39
- ]
40
- response = llm.run_inference(convo)
41
- return response
42
- except Exception as e:
43
- return f"An error occurred: {str(e)}"
44
-
45
- def query_agent(prompt):
46
- """Modifies the prompt and runs inference on it."""
47
- try:
48
- config.status.update(label=":brain: Starting inference..")
49
- script_dir = os.path.dirname(os.path.abspath(__file__))
50
- prompt_path = os.path.join(script_dir, "prompt_assets", "output_sys_prompt.yml")
51
- prompt_schema = llm.prompter.read_yaml_file(prompt_path)
52
- sys_prompt = llm.prompter.format_yaml_prompt(prompt_schema, dict())
53
- convo = [
54
- {"role": "system", "content": sys_prompt},
55
- {"role": "user", "content": prompt},
56
- ]
57
- response = llm.run_inference(convo)
58
- return response
59
- except Exception as e:
60
- return f"An error occurred: {str(e)}"
61
-
62
-
63
- def get_response(input_text: str):
64
- """This is the main function that generates the final response."""
65
- agent_resp = function_agent(input_text)
66
- output = output_agent(agent_resp, input_text)
67
- return output
68
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  def main():
71
  st.title("LLM-ADE 9B Demo")
@@ -76,7 +105,10 @@ def main():
76
  if input_text:
77
  with st.status("Generating response...") as status:
78
  config.status = status
79
- st.write(get_response(input_text))
 
 
 
80
  config.status.update(label="Finished!", state="complete", expanded=True)
81
  else:
82
  st.warning("Please enter some text to generate a response.")
@@ -84,17 +116,18 @@ def main():
84
 
85
  def main_headless(prompt: str):
86
  start = time()
87
- print("\033[94m" + get_response(prompt) + "\033[0m")
 
 
 
88
  print(f"Time taken: {time() - start:.2f}s\n" + "-" * 20)
89
 
90
 
91
- llm = init_llm()
92
-
93
 
94
  if __name__ == "__main__":
95
  if config.headless:
96
  import fire
97
-
98
  fire.Fire(main_headless)
99
  else:
100
  main()
 
 
1
  from time import time
2
+ from pprint import pprint
3
  import huggingface_hub
4
  import streamlit as st
5
+ from typing import Literal, Dict
6
+ from typing_extensions import TypedDict
7
+ import langchain
8
+ from langgraph.graph import END, StateGraph
9
+ from langchain_community.chat_models import ChatOllama
10
+ from logger import logger
11
+
12
  from config import config
13
+ from agents import get_agents, tools_dict
14
+
15
+
16
+ class GraphState(TypedDict):
17
+ """Represents the state of the graph."""
18
+ question: str
19
+ rephrased_question: str
20
+ function_agent_output: str
21
+ generation: str
22
 
23
 
24
  @st.cache_resource(show_spinner="Loading model..")
25
+ def init_agents() -> dict[str, langchain.agents.AgentExecutor]:
26
  huggingface_hub.login(token=config.hf_token, new_session=False)
27
+ llm = ChatOllama(model = config.ollama_model, temperature = 0.8)
28
+ return get_agents(llm)
29
+
30
+
31
+ # Nodes -----------------------------------------------------------------------
32
+
33
+ def question_node(state: GraphState) -> Dict[str, str]:
34
+ """
35
+ Generate a question for the function agent.
36
+ """
37
+ logger.info("Generating question for function agent")
38
+ config.status.update(label=":question: Breaking down question")
39
+ question = state["question"]
40
+ logger.info(f"Original question: {question}")
41
+ rephrased_question = agents["rephrase_agent"].invoke({"question": question})
42
+ logger.info(f"Rephrased question: {rephrased_question}")
43
+ return {"rephrased_question": rephrased_question}
44
+
45
+ def function_agent_node(state: GraphState) -> Literal["finished"]:
46
+ """
47
+ Call the function agent
48
+ """
49
+ logger.info("Calling function agent")
50
+ question = state["rephrased_question"]
51
+ response = agents["function_agent"].invoke({"input": question, "tools": tools_dict}).get("output")
52
+ config.status.update(label=":brain: Analysing data..")
53
+ logger.info(f"Function agent output: {response}")
54
+ return {"function_agent_output": response}
55
+
56
+ def output_node(state: GraphState) -> Dict[str, str]:
57
+ """
58
+ Generate the final output
59
+ """
60
+ logger.info("Generating output")
61
+ config.status.update(label=":bulb: Preparing response..")
62
+ generation = agents["output_agent"].invoke({"context": state["function_agent_output"],
63
+ "question": state["rephrased_question"]})
64
+ return {"generation": generation}
65
+
66
+ # Conditional Edge ------------------------------------------------------------
67
+
68
+ def route_question(state: GraphState) -> Literal["vectorstore", "websearch"]:
69
+ """
70
+ Route quesition to web search or RAG
71
+ """
72
+ logger.info("Routing question")
73
+ config.state.update(label=":chart_with_upwards_trend: Routing question")
74
+ question = state["question"]
75
+ logger.info(f"Question: {question}")
76
+ source = agents["router_agent"].invoke({"question": question})
77
+ logger.info(source)
78
+ logger.info(source["datasource"])
79
+ if source["datasource"] == "vectorstore":
80
+ return "vectorstore"
81
+ elif source["datasource"] == "websearch":
82
+ return "websearch"
83
+
84
+
85
+ # Graph -----------------------------------------------------------------------
86
+
87
+ workflow = StateGraph(GraphState)
88
+ workflow.add_node("question_rephrase", question_node)
89
+ workflow.add_node("function_agent", function_agent_node)
90
+ workflow.add_node("output_node", output_node)
91
+
92
+ workflow.set_entry_point("question_rephrase")
93
+ workflow.add_edge("question_rephrase", "function_agent")
94
+ workflow.add_edge("function_agent", "output_node")
95
+ workflow.set_finish_point("output_node")
96
+
97
+ flow = workflow.compile()
98
 
99
  def main():
100
  st.title("LLM-ADE 9B Demo")
 
105
  if input_text:
106
  with st.status("Generating response...") as status:
107
  config.status = status
108
+ for output in flow.stream({"question": input_text}):
109
+ for key, value in output.items():
110
+ pprint(f"Finished running: {key}")
111
+ st.write(value["generation"])
112
  config.status.update(label="Finished!", state="complete", expanded=True)
113
  else:
114
  st.warning("Please enter some text to generate a response.")
 
116
 
117
  def main_headless(prompt: str):
118
  start = time()
119
+ for output in flow.stream({"question": prompt}):
120
+ for key, value in output.items():
121
+ pprint(f"Finished running: {key}")
122
+ print("\033[94m" + value["generation"] + "\033[0m")
123
  print(f"Time taken: {time() - start:.2f}s\n" + "-" * 20)
124
 
125
 
126
+ agents = init_agents()
 
127
 
128
  if __name__ == "__main__":
129
  if config.headless:
130
  import fire
 
131
  fire.Fire(main_headless)
132
  else:
133
  main()
src/functions.py CHANGED
@@ -230,6 +230,7 @@ def get_dividend_data(symbol: str) -> pd.DataFrame:
230
  def get_company_news(symbol: str) -> pd.DataFrame:
231
  """
232
  Get company news and press releases for a given stock symbol.
 
233
 
234
  Args:
235
  symbol (str): The stock symbol.
@@ -285,7 +286,7 @@ tools = [
285
  get_analysis,
286
  # google_search_and_scrape,
287
  get_current_stock_price,
288
- get_company_news,
289
  # get_company_profile,
290
  # get_stock_fundamentals,
291
  # get_financial_statements,
 
230
  def get_company_news(symbol: str) -> pd.DataFrame:
231
  """
232
  Get company news and press releases for a given stock symbol.
233
+ If you use this, you must use the articles to backup your analysis.
234
 
235
  Args:
236
  symbol (str): The stock symbol.
 
286
  get_analysis,
287
  # google_search_and_scrape,
288
  get_current_stock_price,
289
+ # get_company_news,
290
  # get_company_profile,
291
  # get_stock_fundamentals,
292
  # get_financial_statements,
src/prompts/answer_grader_template.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ sys_msg: "
2
+ You are a grader assessing whether an
3
+ answer is useful to resolve a question. Give a binary score 'yes' or 'no' to indicate whether the answer is
4
+ useful to resolve a question. Provide the binary score as a JSON with a single key 'score' and no preamble or explanation."
5
+ human_msg: "
6
+ Here is the answer:
7
+ \n ------- \n
8
+ {generation}
9
+ \n ------- \n
10
+ Here is the question: {question}"
src/prompts/doc_grader_template.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ sys_msg: "
2
+ You are a grader assessing relevance
3
+ of a retrieved document to a user question. If the document contains keywords related to the user question,
4
+ grade it as relevant. It does not need to be a stringent test. The goal is to filter out erroneous retrievals. \n
5
+ Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question. \n
6
+ Provide the binary score as a JSON with a single key 'score' and no premable or explanation."
7
+ human_msg: "
8
+ Here is the retrieved document: \n\n {document}
9
+
10
+ Here is the user question: {question} \n
11
+ "
src/prompts/hallucination_grader_template.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ sys_msg: "
2
+ You are a grader assessing whether
3
+ an answer is grounded in / supported by a set of facts. Give a binary 'yes' or 'no' score to indicate
4
+ whether the answer is grounded in / supported by a set of facts. Provide the binary score as a JSON with a
5
+ single key 'score' and no preamble or explanation.
6
+ "
src/prompts/output_agent_template.yaml ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ sys_msg: "
2
+ #ROLE:
3
+ You are a financial expert named IRAI with experience and expertise in stocks and cryptocurrency.
4
+ You have a comprehensive understanding of finance, investing, and quantiative analysis; you are a thought leader in these fields.
5
+ You will be fielding questions about finance from top executives and managers of successful startups.
6
+ #OBJECTIVE:
7
+ Answer questions as best as accurately as possible given your current knowledge and all available information.
8
+ Your answers should demonstrate expert insight and indepth analysis.
9
+ Use analysis, information, and data from the assistant message to form your answers.
10
+ #INSTRUCTIONS:
11
+ Try to incorprate the numerical data from the function calls to support your analysis, do not state any other numerical data in your answer.
12
+ Use all the available information to answer the question, including the data and information from the function calls.
13
+ Do not mention any function calls, such as \"get_analysis\", \"get_current_stock_price\", or \"get_key_financial_ratios\" in your answer.
14
+ Give a direct answer to question, concise yet insightful. Your answer should aim to impress the user with your insight.
15
+ Do not give additional instructions related to seeking financial advisor or professional in your answer.
16
+ Do not ask any followup questions in your answer.
17
+ Do not ask for any additional information in your answer.
18
+ Do not mention Morgan Stanley in your answer.
19
+ #Style and tone:
20
+ Please answer in a friendly and engaging manner representing a top female crypto expert.
21
+ Do not be formal, answer as you would in a conversation with a friend.
22
+ Keep your response short unless the question requires a longer response.
23
+ #Audience:
24
+ You are talking to a high net worth individual with high risk tolerance and lots of investing experience - they do not need traditional, generic financial advice.
25
+ \n ------- \n
26
+ Your thoughts are as follows:
27
+ {context}
28
+ \n ------- \n"
29
+ human_msg: "
30
+ My question: {question}"
src/prompts/prompt.py CHANGED
@@ -4,14 +4,90 @@ from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTem
4
  import yaml
5
 
6
  current_dir = os.path.dirname(os.path.abspath(__file__))
 
 
 
7
  with open(f"{current_dir}/rag_template.yaml", "r") as yaml_file:
8
- templates = yaml.safe_load(yaml_file)
 
 
 
9
 
10
- # RAG Agent
11
- sys_msg_template: str = templates["sys_msg"]
12
- human_msg_template: str = templates["human_msg"]
13
  rag_agent_prompt = ChatPromptTemplate.from_messages([
14
  SystemMessagePromptTemplate.from_template(sys_msg_template),
15
  HumanMessagePromptTemplate.from_template(human_msg_template),
16
  MessagesPlaceholder(variable_name = "agent_scratchpad")
17
  ])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  import yaml
5
 
6
  current_dir = os.path.dirname(os.path.abspath(__file__))
7
+
8
+
9
+ # Function Agent
10
  with open(f"{current_dir}/rag_template.yaml", "r") as yaml_file:
11
+ rag_templates = yaml.safe_load(yaml_file)
12
+
13
+ sys_msg_template: str = rag_templates["sys_msg"]
14
+ human_msg_template: str = rag_templates["human_msg"]
15
 
 
 
 
16
  rag_agent_prompt = ChatPromptTemplate.from_messages([
17
  SystemMessagePromptTemplate.from_template(sys_msg_template),
18
  HumanMessagePromptTemplate.from_template(human_msg_template),
19
  MessagesPlaceholder(variable_name = "agent_scratchpad")
20
  ])
21
+
22
+
23
+ # Question Rephrase Agent
24
+ with open(f"{current_dir}/question_rephrase_template.yaml", "r") as yaml_file:
25
+ rephrase_templates = yaml.safe_load(yaml_file)
26
+
27
+ sys_msg_template: str = rephrase_templates["sys_msg"]
28
+ human_msg_template: str = rephrase_templates["human_msg"]
29
+
30
+ rephrase_agent_prompt = ChatPromptTemplate.from_messages([
31
+ SystemMessagePromptTemplate.from_template(sys_msg_template),
32
+ HumanMessagePromptTemplate.from_template(human_msg_template),
33
+ ])
34
+
35
+
36
+ # Router Agent
37
+ with open(f"{current_dir}/router_template.yaml", "r") as yaml_file:
38
+ router_templates = yaml.safe_load(yaml_file)
39
+
40
+ sys_msg_template: str = router_templates["sys_msg"]
41
+
42
+ router_agent_prompt = ChatPromptTemplate.from_messages([
43
+ SystemMessagePromptTemplate.from_template(sys_msg_template),
44
+ ])
45
+
46
+
47
+ # Document Grader Agent
48
+ with open(f"{current_dir}/doc_grader_template.yaml", "r") as yaml_file:
49
+ grader_templates = yaml.safe_load(yaml_file)
50
+
51
+ sys_msg_template: str = grader_templates["sys_msg"]
52
+ human_msg_template: str = grader_templates["human_msg"]
53
+
54
+ doc_grader_agent_prompt = ChatPromptTemplate.from_messages([
55
+ SystemMessagePromptTemplate.from_template(sys_msg_template),
56
+ HumanMessagePromptTemplate.from_template(human_msg_template),
57
+ ])
58
+
59
+
60
+ # Hallucination Grader Agent
61
+ with open(f"{current_dir}/hallucination_grader_template.yaml", "r") as yaml_file:
62
+ hallucination_grader_templates = yaml.safe_load(yaml_file)
63
+
64
+ sys_msg_template: str = hallucination_grader_templates["sys_msg"]
65
+
66
+ hallucination_grader_agent_prompt = ChatPromptTemplate.from_messages([
67
+ SystemMessagePromptTemplate.from_template(sys_msg_template),
68
+ ])
69
+
70
+
71
+ # Answer Grader Agent
72
+ with open(f"{current_dir}/answer_grader_template.yaml", "r") as yaml_file:
73
+ answer_grader_templates = yaml.safe_load(yaml_file)
74
+
75
+ sys_msg_template: str = answer_grader_templates["sys_msg"]
76
+ human_msg_template: str = answer_grader_templates["human_msg"]
77
+
78
+ answer_grader_agent_prompt = ChatPromptTemplate.from_messages([
79
+ SystemMessagePromptTemplate.from_template(sys_msg_template),
80
+ HumanMessagePromptTemplate.from_template(human_msg_template),
81
+ ])
82
+
83
+ # Output Agent
84
+ with open(f"{current_dir}/output_agent_template.yaml", "r") as yaml_file:
85
+ output_agent_templates = yaml.safe_load(yaml_file)
86
+
87
+ sys_msg_template: str = output_agent_templates["sys_msg"]
88
+ human_msg_template: str = output_agent_templates["human_msg"]
89
+
90
+ output_agent_prompt = ChatPromptTemplate.from_messages([
91
+ SystemMessagePromptTemplate.from_template(sys_msg_template),
92
+ HumanMessagePromptTemplate.from_template(human_msg_template),
93
+ ])
src/prompts/question_rephrase_template.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ sys_msg: "
2
+ #ROLE:
3
+ You are a crypto expert named IRAI with experience and expertise in cryptocurrency.
4
+ You are taking questions from a group of high net worth individuals with high risk tolerance and lots of investing experience.
5
+ You have an intern hired to assist you in finding information regarding crypto and digital assets.
6
+ #OBJECTIVE:
7
+ Identify and infer what information is required to answer the question
8
+ Your rephrased question is directed at the intern to help them understand the user's question better.
9
+ #INSTRUCTIONS:
10
+ Rephrase the question to include more context and specifics such that the intern can answer the user's question.
11
+ Rephrasing is necessary when the question is too vague or broad, you will need to infer context.
12
+ If the question has no context, use your expertise to create an intelligent question regarding the subject in the original question.
13
+ If rephrasing is unncecessary, return the original question.
14
+ Never ask for additional information in your rephrased question.
15
+ The rephrased question should be concise and clear. Do not be conversational. Ask informally."
16
+ human_msg: "
17
+ {question}"
src/prompts/router_template.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ sys_msg: "
2
+ You are an expert at routing a
3
+ user question to a vectorstore or web search. Use the vectorstore for questions on LLM agents,
4
+ prompt engineering, and adversarial attacks. You do not need to be stringent with the keywords
5
+ in the question related to these topics. Otherwise, use web-search. Give a binary choice 'web_search'
6
+ or 'vectorstore' based on the question. Return the a JSON with a single key 'datasource' and
7
+ no premable or explanation. Question to route: {question}:"