File size: 3,083 Bytes
30d349c
 
 
 
 
 
4f860ce
 
 
 
 
30d349c
4f860ce
 
30d349c
 
 
 
 
4f860ce
30d349c
 
 
 
 
 
 
 
 
 
 
 
 
824d371
 
 
 
 
30d349c
4f860ce
 
 
 
 
 
 
 
30d349c
 
 
4f860ce
 
30d349c
 
 
 
4f860ce
30d349c
 
 
4f860ce
30d349c
 
 
 
4f860ce
 
30d349c
 
 
 
4f860ce
 
30d349c
4f860ce
30d349c
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80

from omegaconf import OmegaConf
from query import VectaraQuery
import streamlit as st
import os

topics =  { 
    'Standardized testing in education': '266',
    'Ethical implications of genetic editing': '267'
}

def launch_bot():
    def generate_response(question, cfg):
        response = vq.submit_query(question, cfg.bot_role, cfg.topic, cfg.style)
        return response

    if 'cfg' not in st.session_state:
        cfg = OmegaConf.create({
            'customer_id': str(os.environ['VECTARA_CUSTOMER_ID']),
            'corpus_id': list(topics.values())[0],
            'api_key': str(os.environ['VECTARA_API_KEY']),
            'prompt_name': 'vectara-experimental-summary-ext-2023-12-11-large',
        })
        st.session_state.cfg = cfg
        st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_id, cfg.prompt_name)

    cfg = st.session_state.cfg
    vq = st.session_state.vq
    st.set_page_config(page_title="Debate Bot", layout="wide")


    # left side content
    with st.sidebar:
        st.markdown(f"## Welcome to Debate Bot.\n\n\n")

        role_options = ['in opposition to', 'in support of']
        cfg.human_role = st.selectbox('Your are:', role_options)
        cfg.bot_role = role_options[1] if cfg.human_role == role_options[0] else role_options[0]

        topic_options = list(topics.keys())
        cfg.topic = st.selectbox('The topic:', topic_options)
        vq.corpus_id = topics[cfg.topic]

        st.markdown("\n")
        debate_styles = ['Lincoln-Douglas', 'Spontaneous Argumentation', 'Parliamentary debates']
        cfg.style = st.selectbox('Debate Style:', debate_styles)

        st.markdown("---")
        st.markdown(
            "## How this works?\n"
            "This app was built with [Vectara](https://vectara.com).\n\n"
            "It demonstrates the use of the Chat functionality along with custom prompts and GPT4-Turbo (as part of our Scale plan)"
        )
        st.markdown("---")

    if "messages" not in st.session_state.keys():
        st.session_state.messages = [{"role": "assistant", "content": f"Please make your opening statement.", "avatar": 'πŸ¦–'}]

    # Display chat messages
    for message in st.session_state.messages:
        with st.chat_message(message["role"], avatar=message["avatar"]):
            st.write(message["content"])

    # User-provided prompt
    if prompt := st.chat_input():
        st.session_state.messages.append({"role": "user", "content": prompt, "avatar": 'πŸ§‘β€πŸ’»'})
        with st.chat_message("user", avatar='πŸ§‘β€πŸ’»'):
            st.write(prompt)
    
    # Generate a new response if last message is not from assistant
    if st.session_state.messages[-1]["role"] != "assistant":
        with st.chat_message("assistant", avatar='πŸ€–'):
            stream = generate_response(prompt, cfg) 
            response = st.write_stream(stream) 
            message = {"role": "assistant", "content": response, "avatar": 'πŸ€–'}
            st.session_state.messages.append(message)
    
if __name__ == "__main__":
    launch_bot()