Update app.py
Browse files
app.py
CHANGED
@@ -2,10 +2,8 @@ import os
|
|
2 |
import streamlit as st
|
3 |
from typing import List, Tuple
|
4 |
import json
|
5 |
-
import uvicorn
|
6 |
from dotenv import load_dotenv
|
7 |
load_dotenv()
|
8 |
-
from fastapi import FastAPI
|
9 |
from langchain.agents import AgentExecutor
|
10 |
from langchain.agents.format_scratchpad import format_to_openai_function_messages
|
11 |
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
|
@@ -160,7 +158,7 @@ def search():
|
|
160 |
prompt = PromptTemplate(template=template, input_variables=["context","question"])
|
161 |
chain_type_kwargs = {"prompt": prompt}
|
162 |
return RetrievalQA.from_chain_type(
|
163 |
-
llm=
|
164 |
chain_type="stuff",
|
165 |
retriever=vector.as_retriever(),
|
166 |
chain_type_kwargs=chain_type_kwargs,
|
@@ -184,7 +182,7 @@ app = FastAPI(
|
|
184 |
)
|
185 |
|
186 |
tools = [chain_rag_tool, api_tool]
|
187 |
-
llm_with_tools =
|
188 |
|
189 |
|
190 |
def _format_chat_history(chat_history: List[Tuple[str, str]]):
|
|
|
2 |
import streamlit as st
|
3 |
from typing import List, Tuple
|
4 |
import json
|
|
|
5 |
from dotenv import load_dotenv
|
6 |
load_dotenv()
|
|
|
7 |
from langchain.agents import AgentExecutor
|
8 |
from langchain.agents.format_scratchpad import format_to_openai_function_messages
|
9 |
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
|
|
|
158 |
prompt = PromptTemplate(template=template, input_variables=["context","question"])
|
159 |
chain_type_kwargs = {"prompt": prompt}
|
160 |
return RetrievalQA.from_chain_type(
|
161 |
+
llm=llm_1,
|
162 |
chain_type="stuff",
|
163 |
retriever=vector.as_retriever(),
|
164 |
chain_type_kwargs=chain_type_kwargs,
|
|
|
182 |
)
|
183 |
|
184 |
tools = [chain_rag_tool, api_tool]
|
185 |
+
llm_with_tools = llm_1.bind(functions=[format_tool_to_openai_function(t) for t in tools])
|
186 |
|
187 |
|
188 |
def _format_chat_history(chat_history: List[Tuple[str, str]]):
|