Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,61 +4,39 @@ import os
|
|
4 |
|
5 |
import streamlit as st
|
6 |
from PIL import Image
|
7 |
-
from functools import partial
|
8 |
-
|
9 |
-
def generate_response(question):
|
10 |
-
response = st.session_state.vq.submit_query(question)
|
11 |
-
return response
|
12 |
-
|
13 |
-
def generate_streaming_response(question):
|
14 |
-
response = st.session_state.vq.submit_query_streaming(question)
|
15 |
-
return response
|
16 |
-
|
17 |
-
|
18 |
-
def set_query(q: str):
|
19 |
-
st.session_state.messages.append({"role": "user", "content": q})
|
20 |
-
if st.session_state.cfg.streaming:
|
21 |
-
stream = generate_streaming_response(q)
|
22 |
-
response = st.write_stream(stream)
|
23 |
-
else:
|
24 |
-
with st.spinner("Thinking..."):
|
25 |
-
response = generate_response(q)
|
26 |
-
st.write(response)
|
27 |
-
message = {"role": "assistant", "content": response}
|
28 |
-
st.session_state.messages.append(message)
|
29 |
-
|
30 |
|
31 |
|
32 |
def launch_bot():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
if 'cfg' not in st.session_state:
|
35 |
corpus_ids = str(os.environ['corpus_ids']).split(',')
|
36 |
-
questions = list(eval(os.environ['examples']))
|
37 |
cfg = OmegaConf.create({
|
38 |
'customer_id': str(os.environ['customer_id']),
|
39 |
'corpus_ids': corpus_ids,
|
40 |
'api_key': str(os.environ['api_key']),
|
41 |
'title': os.environ['title'],
|
42 |
'description': os.environ['description'],
|
43 |
-
'examples': questions,
|
44 |
'source_data_desc': os.environ['source_data_desc'],
|
45 |
'streaming': os.environ.get('streaming', False),
|
46 |
-
'prompt_name': os.environ.get('prompt_name', None)
|
47 |
-
|
48 |
})
|
49 |
st.session_state.cfg = cfg
|
50 |
st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_ids, cfg.prompt_name)
|
51 |
|
52 |
cfg = st.session_state.cfg
|
53 |
vq = st.session_state.vq
|
54 |
-
|
55 |
st.set_page_config(page_title=cfg.title, layout="wide")
|
56 |
-
sidebar, main_content, questions_col = st.columns([2, 5, 3]) # Adjust column widths as needed
|
57 |
|
58 |
# left side content
|
59 |
-
with sidebar:
|
60 |
image = Image.open('Vectara-logo.png')
|
61 |
-
st.image(image, width=250)
|
62 |
st.markdown(f"## Welcome to {cfg.title}\n\n"
|
63 |
f"This demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}\n\n")
|
64 |
|
@@ -70,42 +48,37 @@ def launch_bot():
|
|
70 |
"This app uses Vectara [Chat API](https://docs.vectara.com/docs/console-ui/vectara-chat-overview) to query the corpus and present the results to you, answering your question.\n\n"
|
71 |
)
|
72 |
st.markdown("---")
|
|
|
73 |
|
74 |
-
|
75 |
-
|
76 |
-
for example in st.session_state.cfg.examples:
|
77 |
-
st.button(example, on_click=partial(set_query, example), use_container_width=True)
|
78 |
|
79 |
-
|
80 |
-
st.
|
81 |
-
st.markdown(f"<center> <h4> {cfg.description} <h4> </center>", unsafe_allow_html=True)
|
82 |
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
|
|
|
|
|
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
else:
|
104 |
-
with st.spinner("Thinking..."):
|
105 |
-
response = generate_response(prompt)
|
106 |
-
st.write(response)
|
107 |
-
message = {"role": "assistant", "content": response}
|
108 |
-
st.session_state.messages.append(message)
|
109 |
|
110 |
if __name__ == "__main__":
|
111 |
launch_bot()
|
|
|
4 |
|
5 |
import streamlit as st
|
6 |
from PIL import Image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
|
9 |
def launch_bot():
|
10 |
+
def generate_response(question):
|
11 |
+
response = vq.submit_query(question)
|
12 |
+
return response
|
13 |
+
|
14 |
+
def generate_streaming_response(question):
|
15 |
+
response = vq.submit_query_streaming(question)
|
16 |
+
return response
|
17 |
|
18 |
if 'cfg' not in st.session_state:
|
19 |
corpus_ids = str(os.environ['corpus_ids']).split(',')
|
|
|
20 |
cfg = OmegaConf.create({
|
21 |
'customer_id': str(os.environ['customer_id']),
|
22 |
'corpus_ids': corpus_ids,
|
23 |
'api_key': str(os.environ['api_key']),
|
24 |
'title': os.environ['title'],
|
25 |
'description': os.environ['description'],
|
|
|
26 |
'source_data_desc': os.environ['source_data_desc'],
|
27 |
'streaming': os.environ.get('streaming', False),
|
28 |
+
'prompt_name': os.environ.get('prompt_name', None)
|
|
|
29 |
})
|
30 |
st.session_state.cfg = cfg
|
31 |
st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_ids, cfg.prompt_name)
|
32 |
|
33 |
cfg = st.session_state.cfg
|
34 |
vq = st.session_state.vq
|
|
|
35 |
st.set_page_config(page_title=cfg.title, layout="wide")
|
|
|
36 |
|
37 |
# left side content
|
38 |
+
with st.sidebar:
|
39 |
image = Image.open('Vectara-logo.png')
|
|
|
40 |
st.markdown(f"## Welcome to {cfg.title}\n\n"
|
41 |
f"This demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}\n\n")
|
42 |
|
|
|
48 |
"This app uses Vectara [Chat API](https://docs.vectara.com/docs/console-ui/vectara-chat-overview) to query the corpus and present the results to you, answering your question.\n\n"
|
49 |
)
|
50 |
st.markdown("---")
|
51 |
+
st.image(image, width=250)
|
52 |
|
53 |
+
st.markdown(f"<center> <h2> Vectara chat demo: {cfg.title} </h2> </center>", unsafe_allow_html=True)
|
54 |
+
st.markdown(f"<center> <h4> {cfg.description} <h4> </center>", unsafe_allow_html=True)
|
|
|
|
|
55 |
|
56 |
+
if "messages" not in st.session_state.keys():
|
57 |
+
st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
|
|
|
58 |
|
59 |
+
# Display chat messages
|
60 |
+
for message in st.session_state.messages:
|
61 |
+
with st.chat_message(message["role"]):
|
62 |
+
st.write(message["content"])
|
63 |
+
|
64 |
+
# User-provided prompt
|
65 |
+
if prompt := st.chat_input():
|
66 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
67 |
+
with st.chat_message("user"):
|
68 |
+
st.write(prompt)
|
69 |
|
70 |
+
# Generate a new response if last message is not from assistant
|
71 |
+
if st.session_state.messages[-1]["role"] != "assistant":
|
72 |
+
with st.chat_message("assistant"):
|
73 |
+
if cfg.streaming:
|
74 |
+
stream = generate_streaming_response(prompt)
|
75 |
+
response = st.write_stream(stream)
|
76 |
+
else:
|
77 |
+
with st.spinner("Thinking..."):
|
78 |
+
response = generate_response(prompt)
|
79 |
+
st.write(response)
|
80 |
+
message = {"role": "assistant", "content": response}
|
81 |
+
st.session_state.messages.append(message)
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
if __name__ == "__main__":
|
84 |
launch_bot()
|