ofermend commited on
Commit
b497ee0
·
verified ·
1 Parent(s): 60f0f89

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -63
app.py CHANGED
@@ -4,61 +4,39 @@ import os
4
 
5
  import streamlit as st
6
  from PIL import Image
7
- from functools import partial
8
-
9
- def generate_response(question):
10
- response = st.session_state.vq.submit_query(question)
11
- return response
12
-
13
- def generate_streaming_response(question):
14
- response = st.session_state.vq.submit_query_streaming(question)
15
- return response
16
-
17
-
18
- def set_query(q: str):
19
- st.session_state.messages.append({"role": "user", "content": q})
20
- if st.session_state.cfg.streaming:
21
- stream = generate_streaming_response(q)
22
- response = st.write_stream(stream)
23
- else:
24
- with st.spinner("Thinking..."):
25
- response = generate_response(q)
26
- st.write(response)
27
- message = {"role": "assistant", "content": response}
28
- st.session_state.messages.append(message)
29
-
30
 
31
 
32
  def launch_bot():
 
 
 
 
 
 
 
33
 
34
  if 'cfg' not in st.session_state:
35
  corpus_ids = str(os.environ['corpus_ids']).split(',')
36
- questions = list(eval(os.environ['examples']))
37
  cfg = OmegaConf.create({
38
  'customer_id': str(os.environ['customer_id']),
39
  'corpus_ids': corpus_ids,
40
  'api_key': str(os.environ['api_key']),
41
  'title': os.environ['title'],
42
  'description': os.environ['description'],
43
- 'examples': questions,
44
  'source_data_desc': os.environ['source_data_desc'],
45
  'streaming': os.environ.get('streaming', False),
46
- 'prompt_name': os.environ.get('prompt_name', None),
47
-
48
  })
49
  st.session_state.cfg = cfg
50
  st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_ids, cfg.prompt_name)
51
 
52
  cfg = st.session_state.cfg
53
  vq = st.session_state.vq
54
-
55
  st.set_page_config(page_title=cfg.title, layout="wide")
56
- sidebar, main_content, questions_col = st.columns([2, 5, 3]) # Adjust column widths as needed
57
 
58
  # left side content
59
- with sidebar:
60
  image = Image.open('Vectara-logo.png')
61
- st.image(image, width=250)
62
  st.markdown(f"## Welcome to {cfg.title}\n\n"
63
  f"This demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}\n\n")
64
 
@@ -70,42 +48,37 @@ def launch_bot():
70
  "This app uses Vectara [Chat API](https://docs.vectara.com/docs/console-ui/vectara-chat-overview) to query the corpus and present the results to you, answering your question.\n\n"
71
  )
72
  st.markdown("---")
 
73
 
74
- with questions_col:
75
- st.markdown("### Sample Questions")
76
- for example in st.session_state.cfg.examples:
77
- st.button(example, on_click=partial(set_query, example), use_container_width=True)
78
 
79
- with main_content:
80
- st.markdown(f"<center> <h2> Vectara chat demo: {cfg.title} </h2> </center>", unsafe_allow_html=True)
81
- st.markdown(f"<center> <h4> {cfg.description} <h4> </center>", unsafe_allow_html=True)
82
 
83
- if "messages" not in st.session_state.keys():
84
- st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
85
-
86
- # Display chat messages
87
- for message in st.session_state.messages:
88
- with st.chat_message(message["role"]):
89
- st.write(message["content"])
 
 
 
90
 
91
- # User-provided prompt
92
- if prompt := st.chat_input():
93
- st.session_state.messages.append({"role": "user", "content": prompt})
94
- with st.chat_message("user"):
95
- st.write(prompt)
96
-
97
- # Generate a new response if last message is not from assistant
98
- if st.session_state.messages[-1]["role"] != "assistant":
99
- with st.chat_message("assistant"):
100
- if cfg.streaming:
101
- stream = generate_streaming_response(prompt)
102
- response = st.write_stream(stream)
103
- else:
104
- with st.spinner("Thinking..."):
105
- response = generate_response(prompt)
106
- st.write(response)
107
- message = {"role": "assistant", "content": response}
108
- st.session_state.messages.append(message)
109
 
110
  if __name__ == "__main__":
111
  launch_bot()
 
4
 
5
  import streamlit as st
6
  from PIL import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
 
9
  def launch_bot():
10
+ def generate_response(question):
11
+ response = vq.submit_query(question)
12
+ return response
13
+
14
+ def generate_streaming_response(question):
15
+ response = vq.submit_query_streaming(question)
16
+ return response
17
 
18
  if 'cfg' not in st.session_state:
19
  corpus_ids = str(os.environ['corpus_ids']).split(',')
 
20
  cfg = OmegaConf.create({
21
  'customer_id': str(os.environ['customer_id']),
22
  'corpus_ids': corpus_ids,
23
  'api_key': str(os.environ['api_key']),
24
  'title': os.environ['title'],
25
  'description': os.environ['description'],
 
26
  'source_data_desc': os.environ['source_data_desc'],
27
  'streaming': os.environ.get('streaming', False),
28
+ 'prompt_name': os.environ.get('prompt_name', None)
 
29
  })
30
  st.session_state.cfg = cfg
31
  st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_ids, cfg.prompt_name)
32
 
33
  cfg = st.session_state.cfg
34
  vq = st.session_state.vq
 
35
  st.set_page_config(page_title=cfg.title, layout="wide")
 
36
 
37
  # left side content
38
+ with st.sidebar:
39
  image = Image.open('Vectara-logo.png')
 
40
  st.markdown(f"## Welcome to {cfg.title}\n\n"
41
  f"This demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}\n\n")
42
 
 
48
  "This app uses Vectara [Chat API](https://docs.vectara.com/docs/console-ui/vectara-chat-overview) to query the corpus and present the results to you, answering your question.\n\n"
49
  )
50
  st.markdown("---")
51
+ st.image(image, width=250)
52
 
53
+ st.markdown(f"<center> <h2> Vectara chat demo: {cfg.title} </h2> </center>", unsafe_allow_html=True)
54
+ st.markdown(f"<center> <h4> {cfg.description} <h4> </center>", unsafe_allow_html=True)
 
 
55
 
56
+ if "messages" not in st.session_state.keys():
57
+ st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
 
58
 
59
+ # Display chat messages
60
+ for message in st.session_state.messages:
61
+ with st.chat_message(message["role"]):
62
+ st.write(message["content"])
63
+
64
+ # User-provided prompt
65
+ if prompt := st.chat_input():
66
+ st.session_state.messages.append({"role": "user", "content": prompt})
67
+ with st.chat_message("user"):
68
+ st.write(prompt)
69
 
70
+ # Generate a new response if last message is not from assistant
71
+ if st.session_state.messages[-1]["role"] != "assistant":
72
+ with st.chat_message("assistant"):
73
+ if cfg.streaming:
74
+ stream = generate_streaming_response(prompt)
75
+ response = st.write_stream(stream)
76
+ else:
77
+ with st.spinner("Thinking..."):
78
+ response = generate_response(prompt)
79
+ st.write(response)
80
+ message = {"role": "assistant", "content": response}
81
+ st.session_state.messages.append(message)
 
 
 
 
 
 
82
 
83
  if __name__ == "__main__":
84
  launch_bot()