File size: 2,707 Bytes
30d349c
 
 
 
1f13f87
30d349c
 
9b44f60
b9276c3
30d349c
 
4b1db34
9b44f60
cddbc52
4b1db34
30d349c
 
 
0f93138
30d349c
 
 
 
 
 
 
 
9b44f60
30d349c
9b44f60
30d349c
1f13f87
 
9b44f60
30d349c
4b1db34
 
4f860ce
30d349c
 
 
4f860ce
2f3e659
30d349c
 
 
 
4b1db34
30d349c
 
 
4f860ce
30d349c
 
 
 
4f860ce
 
30d349c
4b1db34
30d349c
 
4f860ce
9b44f60
 
4f860ce
30d349c
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
from omegaconf import OmegaConf
from query import VectaraQuery
import streamlit as st
import os
from PIL import Image

def launch_bot():
    def generate_response(question):
        response = vq.submit_query(question)
        return response

    def reset():
        st.session_state.messages = [{"role": "assistant", "content": "Please ask your question about drink names.", "avatar": '🦖'}]
        st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_id, cfg.prompt_name)

    if 'cfg' not in st.session_state:
        cfg = OmegaConf.create({
            'customer_id': str(os.environ['VECTARA_CUSTOMER_ID']),
            'corpus_id': '46',  # Fixed corpus ID for drink names
            'api_key': str(os.environ['VECTARA_API_KEY']),
            'prompt_name': 'vectara-experimental-summary-ext-2023-12-11-large',
        })
        st.session_state.cfg = cfg
        st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_id, cfg.prompt_name)

    cfg = st.session_state.cfg
    vq = st.session_state.vq
    st.set_page_config(page_title="Drink Name Query Bot", layout="wide")

    # Left side content
    with st.sidebar:
        image = Image.open('Vectara-logo.png')
        st.image(image, width=250)
        st.markdown(f"## Welcome to Drink Name Query Bot.\n\n\n")

        if st.button('Start Over'):
            reset()

        st.markdown("---")
        st.markdown(
            "## How this works?\n"
            "This app was built with [Vectara](https://vectara.com).\n\n"
            "It demonstrates the use of the Chat functionality along with custom prompts and GPT4-Turbo (as part of our [Scale plan](https://vectara.com/pricing/))"
        )
        st.markdown("---")

    if "messages" not in st.session_state.keys():
        reset()

    # Display chat messages
    for message in st.session_state.messages:
        with st.chat_message(message["role"], avatar=message["avatar"]):
            st.write(message["content"])

    # User-provided prompt
    if prompt := st.chat_input():
        st.session_state.messages.append({"role": "user", "content": prompt, "avatar": '🧑‍💻'})
        with st.chat_message("user", avatar='🧑‍💻'):
            st.write(prompt)

    # Generate a new response if last message is not from assistant
    if st.session_state.messages[-1]["role"] != "assistant":
        with st.chat_message("assistant", avatar='🤖'):
            response = generate_response(prompt) 
            st.write(response)
            message = {"role": "assistant", "content": response, "avatar": '🤖'}
            st.session_state.messages.append(message)
    
if __name__ == "__main__":
    launch_bot()