Spaces:
Sleeping
Sleeping
Commit
·
b18b667
1
Parent(s):
e3ffed9
Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
-
import pandas as pd
|
5 |
-
import pickle
|
6 |
from typing import List
|
7 |
|
8 |
from langchain.llms import OpenAIChat
|
@@ -60,15 +60,36 @@ RESPONSE_PROMPT = PromptTemplate(
|
|
60 |
)
|
61 |
|
62 |
|
63 |
-
# load vectorstore of embeddings
|
64 |
-
with open("files/vectorstores/arthur_vectorstore.pkl", "rb") as f:
|
65 |
-
|
66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
|
69 |
def get_langchain_agent(api_key):
|
70 |
os.environ["OPENAI_API_KEY"] = api_key
|
71 |
|
|
|
|
|
72 |
manager = CallbackManager([])
|
73 |
question_manager = CallbackManager([])
|
74 |
stream_manager = CallbackManager([])
|
@@ -115,14 +136,14 @@ def get_source_doc(output):
|
|
115 |
return source_text, source_doc_link
|
116 |
|
117 |
|
118 |
-
def log_inference(chat_history: List[List[str]], llm_feedback: int) -> None:
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
|
127 |
|
128 |
def chat(inp, history, agent):
|
@@ -173,11 +194,11 @@ def launch_ask_arthur(share=False):
|
|
173 |
],
|
174 |
inputs=message,
|
175 |
)
|
176 |
-
# feedback radio button
|
177 |
-
llm_feedback = gr.Radio(
|
178 |
-
|
179 |
-
)
|
180 |
-
submit_feedback_button = gr.Button("Submit feedback")
|
181 |
with gr.Column():
|
182 |
source_link = gr.Markdown()
|
183 |
source_page = gr.Markdown()
|
@@ -193,10 +214,10 @@ def launch_ask_arthur(share=False):
|
|
193 |
message.submit(chat, inputs=[message, state, agent_state], outputs=[chatbot, state, source_page, source_link])
|
194 |
|
195 |
|
196 |
-
submit_feedback_button.click(
|
197 |
-
|
198 |
-
|
199 |
-
)
|
200 |
|
201 |
demo.queue().launch(share=share)
|
202 |
|
|
|
1 |
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
+
# import pandas as pd
|
5 |
+
# import pickle
|
6 |
from typing import List
|
7 |
|
8 |
from langchain.llms import OpenAIChat
|
|
|
60 |
)
|
61 |
|
62 |
|
63 |
+
# # load vectorstore of embeddings
|
64 |
+
# with open("files/vectorstores/arthur_vectorstore.pkl", "rb") as f:
|
65 |
+
# global arthur_vectorstore
|
66 |
+
# arthur_vectorstore = pickle.load(f)
|
67 |
+
|
68 |
+
arthur_vectorstore = None
|
69 |
+
|
70 |
+
def ingest_docs(dir_name, vectorstore_name):
|
71 |
+
loader = DirectoryLoader(dir_name)
|
72 |
+
raw_documents = loader.load()
|
73 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
74 |
+
chunk_size=1000,
|
75 |
+
chunk_overlap=200,
|
76 |
+
)
|
77 |
+
documents = text_splitter.split_documents(raw_documents)
|
78 |
+
embeddings = OpenAIEmbeddings()
|
79 |
+
vectorstore = FAISS.from_documents(documents, embeddings)
|
80 |
+
|
81 |
+
arthur_vectorstore = vectorstore
|
82 |
+
|
83 |
+
# # Save vectorstore
|
84 |
+
# with open(f"files/vectorstores/{vectorstore_name}_vectorstore.pkl", "wb") as f:
|
85 |
+
# pickle.dump(vectorstore, f)
|
86 |
|
87 |
|
88 |
def get_langchain_agent(api_key):
|
89 |
os.environ["OPENAI_API_KEY"] = api_key
|
90 |
|
91 |
+
ingest_docs("files/arthur-docs-markdown", "arthur")
|
92 |
+
|
93 |
manager = CallbackManager([])
|
94 |
question_manager = CallbackManager([])
|
95 |
stream_manager = CallbackManager([])
|
|
|
136 |
return source_text, source_doc_link
|
137 |
|
138 |
|
139 |
+
# def log_inference(chat_history: List[List[str]], llm_feedback: int) -> None:
|
140 |
+
# reference_data = pd.read_csv("files/reference_data.csv", index_col=None)
|
141 |
+
# chat_text = []
|
142 |
+
# for user_text, bot_text in chat_history:
|
143 |
+
# bot_text = bot_text.replace("\n", "").replace("<br>", "")
|
144 |
+
# chat_text.append(f"input:<{user_text}>,output:<{bot_text}>,")
|
145 |
+
# reference_data.loc[len(reference_data)] = {"chat": "".join(chat_text), "llm_feedback": llm_feedback}
|
146 |
+
# reference_data.to_csv("files/reference_data.csv", index=False)
|
147 |
|
148 |
|
149 |
def chat(inp, history, agent):
|
|
|
194 |
],
|
195 |
inputs=message,
|
196 |
)
|
197 |
+
# # feedback radio button
|
198 |
+
# llm_feedback = gr.Radio(
|
199 |
+
# ["0","1","2"], value="0", label="How useful was this? (0 = bad, 1 = meh, 2 = good)"
|
200 |
+
# )
|
201 |
+
# submit_feedback_button = gr.Button("Submit feedback")
|
202 |
with gr.Column():
|
203 |
source_link = gr.Markdown()
|
204 |
source_page = gr.Markdown()
|
|
|
214 |
message.submit(chat, inputs=[message, state, agent_state], outputs=[chatbot, state, source_page, source_link])
|
215 |
|
216 |
|
217 |
+
# submit_feedback_button.click(
|
218 |
+
# log_inference,
|
219 |
+
# [chatbot, llm_feedback],
|
220 |
+
# )
|
221 |
|
222 |
demo.queue().launch(share=share)
|
223 |
|