File size: 2,837 Bytes
fd7b88e
 
 
 
11182eb
fd7b88e
11182eb
cc93d6f
c1f1c6e
fd7b88e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c1f1c6e
fd7b88e
 
1ced004
a7d579b
d8b04f9
 
 
 
 
 
a7d579b
 
 
fd7b88e
 
 
 
 
 
 
 
 
 
 
1ced004
fd7b88e
d21554d
11182eb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d21554d
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import requests
import streamlit as st
import time

st.title("Omdena Chatbot Interface")

# Edit API url here
url = 'https://omdena-lc-omdena-ng-lagos-chatbot-model.hf.space'

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# Accept user input
if user_input := st.chat_input("What is up?"):
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": user_input})
    # Display user message in chat message container
    with st.chat_message("user"):
        st.markdown(user_input)

    # Send user input to Rasa webhook
    payload = {"sender": "user", "message": user_input}
    response = requests.post(url+'/webhooks/rest/webhook', json=payload)
    bot_reply = response.json()

    # Extract assistant response
    if bot_reply !=[]:
        assistant_response = ""
        if len(bot_reply)>1:
            for reply in bot_reply[1:]:
                assistant_response+=(" "+reply['text']) 
        else:
            assistant_response = bot_reply[0]['text']
    else:
        assistant_response = 'API request returned with an empty list []. Please continue with a different question'

    # Display assistant response in chat message container
    with st.chat_message("assistant"):
        message_placeholder = st.empty()
        full_response = ""
        # Simulate stream of response with milliseconds delay
        for chunk in assistant_response.split():
            full_response += chunk + " "
            time.sleep(0.05)
            # Add a blinking cursor to simulate typing
            message_placeholder.markdown(full_response + "▌")
        message_placeholder.markdown(full_response)
    
    # Add assistant response to chat history
    st.session_state.messages.append({"role": "assistant", "content": full_response})
    
    # Save to google sheet
    # Deployed web app URL for writing google sheets
    webhook_url = "https://script.google.com/macros/s/AKfycbzhikyq7IduuEPGmrvcmJV9YlziiVyBysQ_oYf7lOzF8w9zg--BI2S_5cLuftp0pKqy/exec"
    action = "?action=addData"
    # Data to send 
    data = {
    "user": user_input,
    "bot": assistant_response
           }
    try:
        # Send POST request to the webhook URL
        response = requests.post(webhook_url + action, json=data)
    except:
        pass
    
    
# Add debug button to display RASA version, Model Name
with st.expander("Debug"):
    if st.button("Show Debug Info"):
        request_ids = ['/status', '/version']
        results = [requests.get(url+request_id).json() for request_id in request_ids]
        st.write(results)
    else:
        st.write("")