terry-li-hm
commited on
Commit
·
1c88374
1
Parent(s):
35a5de6
Migrate files
Browse files- app.py +111 -67
- public/favicon.svg +1 -0
- public/logo_dark.svg +1 -0
- public/logo_light.svg +1 -0
app.py
CHANGED
@@ -1,74 +1,118 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
from
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
""
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
),
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
)
|
57 |
|
58 |
-
|
|
|
|
|
|
|
59 |
|
60 |
-
|
61 |
|
62 |
-
# Call OpenAI
|
63 |
-
async for stream_resp in await openai.ChatCompletion.acreate(
|
64 |
-
messages=[m.to_openai() for m in prompt.messages], stream=True, **settings
|
65 |
-
):
|
66 |
-
token = stream_resp.choices[0]["delta"].get("content", "")
|
67 |
-
await msg.stream_token(token)
|
68 |
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import chainlit as cl
|
4 |
+
import openai
|
5 |
+
from chainlit.input_widget import Select, Slider, Switch
|
6 |
+
from langchain.chat_models import ChatOpenAI
|
7 |
+
from llama_index import (
|
8 |
+
LLMPredictor,
|
9 |
+
ServiceContext,
|
10 |
+
StorageContext,
|
11 |
+
TrafilaturaWebReader,
|
12 |
+
VectorStoreIndex,
|
13 |
+
load_index_from_storage,
|
14 |
+
)
|
15 |
+
from llama_index.callbacks.base import CallbackManager
|
16 |
+
from llama_index.llms import ChatMessage, HuggingFaceLLM, MessageRole, OpenAI
|
17 |
+
|
18 |
+
|
19 |
+
def get_api_key():
|
20 |
+
api_key = os.getenv("OPENAI_API_KEY")
|
21 |
+
if api_key is None:
|
22 |
+
print("OPENAI_API_KEY missing from environment variables")
|
23 |
+
api_key = input("Please enter your OPENAI_API_KEY: ")
|
24 |
+
return api_key
|
25 |
+
|
26 |
+
|
27 |
+
openai.api_key = get_api_key()
|
28 |
+
|
29 |
+
|
30 |
+
def load_index():
|
31 |
+
try:
|
32 |
+
storage_context = StorageContext.from_defaults(persist_dir="./storage")
|
33 |
+
index = load_index_from_storage(storage_context)
|
34 |
+
except FileNotFoundError:
|
35 |
+
print("Storage file not found. Loading from web.")
|
36 |
+
documents = TrafilaturaWebReader().load_data(["https://bit.ly/45BncJA"])
|
37 |
+
index = VectorStoreIndex.from_documents(documents)
|
38 |
+
index.storage_context.persist()
|
39 |
+
return index
|
40 |
+
|
41 |
+
|
42 |
+
index = load_index()
|
43 |
+
|
44 |
+
welcome_msg = (
|
45 |
+
"Hi there! I’m your China Life chatbot, specialising in answering "
|
46 |
+
"[frequently asked questions](https://bit.ly/45BncJA). "
|
47 |
+
"How may I assist you today? "
|
48 |
+
"Feel free to ask questions like, "
|
49 |
+
"“Is there any action required after receiving the policy?” or "
|
50 |
+
"“Can I settle using a demand draft?”"
|
51 |
+
)
|
52 |
+
|
53 |
+
|
54 |
+
@cl.on_chat_start
|
55 |
+
async def start():
|
56 |
+
chat_profile = cl.user_session.get("chat_profile")
|
57 |
+
msg = cl.Message(content="")
|
58 |
+
for token in list(welcome_msg):
|
59 |
+
await cl.sleep(0.01)
|
60 |
+
await msg.stream_token(token)
|
61 |
+
|
62 |
+
await msg.send()
|
63 |
+
|
64 |
+
settings = await cl.ChatSettings(
|
65 |
+
[
|
66 |
+
Select(
|
67 |
+
id="Model",
|
68 |
+
label="Model",
|
69 |
+
values=["gpt-3.5-turbo", "gpt-4"],
|
70 |
+
initial_index=1,
|
71 |
),
|
72 |
+
Slider(
|
73 |
+
id="Temperature",
|
74 |
+
label="Temperature",
|
75 |
+
initial=0,
|
76 |
+
min=0,
|
77 |
+
max=2,
|
78 |
+
step=0.1,
|
79 |
+
),
|
80 |
+
]
|
81 |
+
).send()
|
82 |
+
await setup_query_engine(settings)
|
83 |
+
|
84 |
+
|
85 |
+
@cl.on_settings_update
|
86 |
+
async def setup_query_engine(settings):
|
87 |
+
print("on_settings_update", settings)
|
88 |
+
|
89 |
+
llm = OpenAI(model=settings["Model"], temperature=settings["Temperature"])
|
90 |
+
|
91 |
+
service_context = ServiceContext.from_defaults(
|
92 |
+
llm=llm, callback_manager=CallbackManager([cl.LlamaIndexCallbackHandler()])
|
93 |
)
|
94 |
|
95 |
+
query_engine = index.as_query_engine(
|
96 |
+
service_context=service_context,
|
97 |
+
streaming=True,
|
98 |
+
)
|
99 |
|
100 |
+
cl.user_session.set("query_engine", query_engine)
|
101 |
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
103 |
+
@cl.on_message
|
104 |
+
async def main(message: cl.Message):
|
105 |
+
query_engine = cl.user_session.get("query_engine")
|
106 |
|
107 |
+
if query_engine is None:
|
108 |
+
await start()
|
109 |
+
query_engine = cl.user_session.get("query_engine")
|
110 |
+
|
111 |
+
if query_engine:
|
112 |
+
query_result = await cl.make_async(query_engine.query)(message.content)
|
113 |
+
response_message = cl.Message(content=query_result.response_txt or "")
|
114 |
+
|
115 |
+
for token in query_result.response_gen:
|
116 |
+
await response_message.stream_token(token=token)
|
117 |
+
|
118 |
+
await response_message.send()
|
public/favicon.svg
ADDED
|
public/logo_dark.svg
ADDED
|
public/logo_light.svg
ADDED
|