Spaces:
Running
Running
Commit
·
b4a415a
1
Parent(s):
5847676
save
Browse files- .gitattributes +1 -0
- README.md +1 -1
- app.py +105 -0
- query_data.py +35 -0
- requirements.txt +7 -0
- saved_model/vector.db/.gitattributes +3 -0
- saved_model/vector.db/index.faiss +3 -0
- saved_model/vector.db/index.pkl +3 -0
.gitattributes
CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*.faiss filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 🏢
|
|
4 |
colorFrom: pink
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 3.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
|
|
4 |
colorFrom: pink
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.27.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
app.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
import os
|
3 |
+
from typing import Optional, Tuple
|
4 |
+
|
5 |
+
import gradio as gr
|
6 |
+
import pickle
|
7 |
+
from query_data import get_chain
|
8 |
+
from threading import Lock
|
9 |
+
from langchain.vectorstores.faiss import FAISS
|
10 |
+
from langchain.embeddings import OpenAIEmbeddings
|
11 |
+
|
12 |
+
embeddings = OpenAIEmbeddings()
|
13 |
+
vectorstore = FAISS.load_local('saved_model/vector.db',embeddings)
|
14 |
+
|
15 |
+
def set_openai_api_key(api_key: str):
|
16 |
+
"""Set the api key and return chain.
|
17 |
+
If no api_key, then None is returned.
|
18 |
+
"""
|
19 |
+
if api_key:
|
20 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
21 |
+
chain = get_chain(vectorstore)
|
22 |
+
os.environ["OPENAI_API_KEY"] = ""
|
23 |
+
return chain
|
24 |
+
|
25 |
+
class ChatWrapper:
|
26 |
+
|
27 |
+
def __init__(self):
|
28 |
+
self.lock = Lock()
|
29 |
+
def __call__(
|
30 |
+
self, api_key: str, inp: str, history: Optional[Tuple[str, str]], chain
|
31 |
+
):
|
32 |
+
"""Execute the chat functionality."""
|
33 |
+
self.lock.acquire()
|
34 |
+
try:
|
35 |
+
history = history or []
|
36 |
+
# If chain is None, that is because no API key was provided.
|
37 |
+
if chain is None:
|
38 |
+
history.append((inp, "Please paste your OpenAI key to use"))
|
39 |
+
return history, history
|
40 |
+
# Set OpenAI key
|
41 |
+
import openai
|
42 |
+
openai.api_key = api_key
|
43 |
+
# Run chain and append input.
|
44 |
+
output = chain({"question": inp, "chat_history": history})["answer"]
|
45 |
+
history.append((inp, output))
|
46 |
+
except Exception as e:
|
47 |
+
raise e
|
48 |
+
finally:
|
49 |
+
self.lock.release()
|
50 |
+
return history, history
|
51 |
+
|
52 |
+
chat = ChatWrapper()
|
53 |
+
|
54 |
+
block = gr.Blocks(css=".gradio-container {background-color: lightgray}")
|
55 |
+
|
56 |
+
with block:
|
57 |
+
with gr.Row():
|
58 |
+
gr.Markdown("<h3><center>CTM-GPT (中草藥資訊查詢小幫手)</center></h3><br/><center>Author: markchiang</center>")
|
59 |
+
|
60 |
+
openai_api_key_textbox = gr.Textbox(
|
61 |
+
placeholder="Paste your OpenAI API key (sk-...)",
|
62 |
+
show_label=False,
|
63 |
+
lines=1,
|
64 |
+
type="password",
|
65 |
+
)
|
66 |
+
|
67 |
+
chatbot = gr.Chatbot()
|
68 |
+
|
69 |
+
with gr.Row():
|
70 |
+
message = gr.Textbox(
|
71 |
+
label="What's your question?",
|
72 |
+
placeholder="問中草藥相關的問題",
|
73 |
+
lines=1,
|
74 |
+
)
|
75 |
+
submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
|
76 |
+
|
77 |
+
gr.Examples(
|
78 |
+
examples=[
|
79 |
+
"癌症相關的中草藥",
|
80 |
+
"七寶美髯丹有哪些成分?",
|
81 |
+
"麻黃湯每種成分各有多少量?",
|
82 |
+
"鬚髮早白可以用什麼藥方?",
|
83 |
+
],
|
84 |
+
inputs=message,
|
85 |
+
)
|
86 |
+
|
87 |
+
gr.HTML("Demo application of TCM helper.")
|
88 |
+
|
89 |
+
gr.HTML(
|
90 |
+
"<center>Author: markchiang<br/>Powered by OpenAI and Gradio. </center>"
|
91 |
+
)
|
92 |
+
|
93 |
+
state = gr.State()
|
94 |
+
agent_state = gr.State()
|
95 |
+
|
96 |
+
submit.click(chat, inputs=[openai_api_key_textbox, message, state, agent_state], outputs=[chatbot, state])
|
97 |
+
message.submit(chat, inputs=[openai_api_key_textbox, message, state, agent_state], outputs=[chatbot, state])
|
98 |
+
|
99 |
+
openai_api_key_textbox.change(
|
100 |
+
set_openai_api_key,
|
101 |
+
inputs=[openai_api_key_textbox],
|
102 |
+
outputs=[agent_state],
|
103 |
+
)
|
104 |
+
|
105 |
+
block.launch(debug=True)
|
query_data.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts.prompt import PromptTemplate
|
2 |
+
from langchain.llms import OpenAI
|
3 |
+
from langchain.chains import ChatVectorDBChain
|
4 |
+
|
5 |
+
_template = """鑑於以下對話和後續問題,將後續問題改寫為獨立問題。
|
6 |
+
您可以假設這是有關中草藥和疾病與健康相關的問題。
|
7 |
+
|
8 |
+
Chat History:
|
9 |
+
{chat_history}
|
10 |
+
Follow Up Input: {question}
|
11 |
+
Standalone question:"""
|
12 |
+
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
|
13 |
+
|
14 |
+
template = """你是一名 AI 助手,負責回答有關中草藥與健康相關的問題。
|
15 |
+
您將獲得一份長文檔的以下提取部分和一個問題。 提供對話答案。
|
16 |
+
如果你不知道答案,就說“我不確定。” 不要試圖編造答案。
|
17 |
+
如果問題不是關於中草藥與疾病健康的知識,請禮貌地告訴他們你只能回答關於中草藥相關的問題。
|
18 |
+
Question: {question}
|
19 |
+
=========
|
20 |
+
{context}
|
21 |
+
=========
|
22 |
+
Answer in Markdown:"""
|
23 |
+
QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "context"])
|
24 |
+
|
25 |
+
|
26 |
+
def get_chain(vectorstore):
|
27 |
+
llm = OpenAI(temperature=0,model_name="gpt-4")
|
28 |
+
qa_chain = ChatVectorDBChain.from_llm(
|
29 |
+
llm,
|
30 |
+
vectorstore,
|
31 |
+
# qa_prompt=QA_PROMPT,
|
32 |
+
condense_question_prompt=CONDENSE_QUESTION_PROMPT,
|
33 |
+
return_source_documents=True
|
34 |
+
)
|
35 |
+
return qa_chain
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pip==23.1
|
2 |
+
langchain==0.0.148
|
3 |
+
openai==0.27.2
|
4 |
+
unstructured==0.6.1
|
5 |
+
faiss-cpu==1.7.4
|
6 |
+
gradio==3.27.0
|
7 |
+
tiktoken
|
saved_model/vector.db/.gitattributes
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2446fde201f48768a639ed346a65d5e8c17f69bd0aa0109b11694eb3c4bb2056
|
3 |
+
size 44
|
saved_model/vector.db/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a4b0600d3913cda0bd7e6f857a359467b237e464b228242c630dd1ef82fa41d2
|
3 |
+
size 10371117
|
saved_model/vector.db/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:906b9a352f805c3c671bbfb75470a39ae0dbd02d093991112d357a8ecd7b11d0
|
3 |
+
size 3201973
|