markchiang commited on
Commit
1345d2a
·
1 Parent(s): 5847676

init version

Browse files
Files changed (5) hide show
  1. README.md +1 -1
  2. app.py +104 -0
  3. model.pkl +3 -0
  4. query_data.py +35 -0
  5. requirements.txt +6 -0
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🏢
4
  colorFrom: pink
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 3.28.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
 
4
  colorFrom: pink
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 3.27.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
app.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ import os
3
+ from typing import Optional, Tuple
4
+
5
+ import gradio as gr
6
+ import pickle
7
+ from query_data import get_chain
8
+ from threading import Lock
9
+
10
+ with open("model.pkl", "rb") as f:
11
+ vectorstore = pickle.load(f)
12
+
13
+
14
+ def set_openai_api_key(api_key: str):
15
+ """Set the api key and return chain.
16
+ If no api_key, then None is returned.
17
+ """
18
+ if api_key:
19
+ os.environ["OPENAI_API_KEY"] = api_key
20
+ chain = get_chain(vectorstore)
21
+ os.environ["OPENAI_API_KEY"] = ""
22
+ return chain
23
+
24
+ class ChatWrapper:
25
+
26
+ def __init__(self):
27
+ self.lock = Lock()
28
+ def __call__(
29
+ self, api_key: str, inp: str, history: Optional[Tuple[str, str]], chain
30
+ ):
31
+ """Execute the chat functionality."""
32
+ self.lock.acquire()
33
+ try:
34
+ history = history or []
35
+ # If chain is None, that is because no API key was provided.
36
+ if chain is None:
37
+ history.append((inp, "Please paste your OpenAI key to use"))
38
+ return history, history
39
+ # Set OpenAI key
40
+ import openai
41
+ openai.api_key = api_key
42
+ # Run chain and append input.
43
+ output = chain({"question": inp, "chat_history": history})["answer"]
44
+ history.append((inp, output))
45
+ except Exception as e:
46
+ raise e
47
+ finally:
48
+ self.lock.release()
49
+ return history, history
50
+
51
+ chat = ChatWrapper()
52
+
53
+ block = gr.Blocks(css=".gradio-container {background-color: lightgray}")
54
+
55
+ with block:
56
+ with gr.Row():
57
+ gr.Markdown("<h3><center>CTM-GPT (中草藥資訊查詢小幫手)</center></h3><br/><center>Author: markchiang</center>")
58
+
59
+ openai_api_key_textbox = gr.Textbox(
60
+ placeholder="Paste your OpenAI API key (sk-...)",
61
+ show_label=False,
62
+ lines=1,
63
+ type="password",
64
+ )
65
+
66
+ chatbot = gr.Chatbot()
67
+
68
+ with gr.Row():
69
+ message = gr.Textbox(
70
+ label="What's your question?",
71
+ placeholder="問中草藥相關的問題",
72
+ lines=1,
73
+ )
74
+ submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
75
+
76
+ gr.Examples(
77
+ examples=[
78
+ "癌症相關的中草藥",
79
+ "七寶美髯丹有哪些成分?",
80
+ "麻黃湯每種成分各有多少量?",
81
+ "鬚髮早白可以用什麼藥方?",
82
+ ],
83
+ inputs=message,
84
+ )
85
+
86
+ gr.HTML("Demo application of TCM helper.")
87
+
88
+ gr.HTML(
89
+ "<center>Author: markchiang<br/>Powered by OpenAI and Gradio. </center>"
90
+ )
91
+
92
+ state = gr.State()
93
+ agent_state = gr.State()
94
+
95
+ submit.click(chat, inputs=[openai_api_key_textbox, message, state, agent_state], outputs=[chatbot, state])
96
+ message.submit(chat, inputs=[openai_api_key_textbox, message, state, agent_state], outputs=[chatbot, state])
97
+
98
+ openai_api_key_textbox.change(
99
+ set_openai_api_key,
100
+ inputs=[openai_api_key_textbox],
101
+ outputs=[agent_state],
102
+ )
103
+
104
+ block.launch(debug=True)
model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ed259b9c1fac5f17cdf4330ef20a1bb3f2bcbf30b42a61ad27bdfc3fc1c0f57
3
+ size 13573160
query_data.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.prompts.prompt import PromptTemplate
2
+ from langchain.llms import OpenAI
3
+ from langchain.chains import ChatVectorDBChain
4
+
5
+ _template = """鑑於以下對話和後續問題,將後續問題改寫為獨立問題。
6
+ 您可以假設這是有關中草藥和疾病與健康相關的問題。
7
+
8
+ Chat History:
9
+ {chat_history}
10
+ Follow Up Input: {question}
11
+ Standalone question:"""
12
+ CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
13
+
14
+ template = """你是一名 AI 助手,負責回答有關中草藥與健康相關的問題。
15
+ 您將獲得一份長文檔的以下提取部分和一個問題。 提供對話答案。
16
+ 如果你不知道答案,就說“我不確定。” 不要試圖編造答案。
17
+ 如果問題不是關於中草藥與疾病健康的知識,請禮貌地告訴他們你只能回答關於中草藥相關的問題。
18
+ Question: {question}
19
+ =========
20
+ {context}
21
+ =========
22
+ Answer in Markdown:"""
23
+ QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "context"])
24
+
25
+
26
+ def get_chain(vectorstore):
27
+ llm = OpenAI(temperature=0,model_name="gpt-4")
28
+ qa_chain = ChatVectorDBChain.from_llm(
29
+ llm,
30
+ vectorstore,
31
+ # qa_prompt=QA_PROMPT,
32
+ condense_question_prompt=CONDENSE_QUESTION_PROMPT,
33
+ return_source_documents=True
34
+ )
35
+ return qa_chain
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ langchain==0.0.148
2
+ openai==0.27.2
3
+ unstructured==0.6.1
4
+ faiss-cpu==1.7.4
5
+ gradio==3.27.0
6
+ tiktoken