Spaces:
Sleeping
Sleeping
# -*- coding: utf-8 -*- | |
""" | |
Created on Fri Aug 18 08:01:41 2023 | |
@author: Shamim Ahamed, RE AIMS Lab | |
""" | |
import streamlit as st | |
import pandas as pd | |
from tqdm.cli import tqdm | |
import numpy as np | |
import requests | |
import pandas as pd | |
from tqdm import tqdm | |
def get_user_data(api, parameters): | |
response = requests.post(f"{api}", json=parameters) | |
if response.status_code == 200: | |
return response.json() | |
else: | |
print(f"ERROR: {response.status_code}") | |
return None | |
st.set_page_config(page_title="SuSastho.AI Chatbot", page_icon="🚀", layout='wide') | |
st.markdown(""" | |
<style> | |
p { | |
font-size:0.8rem !important; | |
} | |
textarea { | |
font-size: 0.8rem !important; | |
padding: 0.8rem 1rem 0.75rem 0.8rem !important; | |
} | |
button { | |
padding: 0.65rem !important; | |
} | |
.css-1lr5yb2 { | |
background-color: rgb(105 197 180) !important; | |
} | |
.css-1c7y2kd { | |
background-color: Transparent !important; | |
} | |
.css-4oy321 { | |
background-color: rgba(240, 242, 246, 0.5) !important; | |
} | |
</style> | |
""", unsafe_allow_html=True) | |
st.markdown(""" | |
<style> | |
#MainMenu {visibility: hidden;} | |
footer {visibility: hidden;} | |
</style> | |
""",unsafe_allow_html=True) | |
model_names = { | |
'BLOOM 7B': 'bloom-7b', | |
} | |
with st.sidebar: | |
st.title("SuSastho.AI - ChatBot 🚀") | |
model_name = model_names[st.selectbox('Model', list(model_names.keys()), 0)] | |
max_ctx = st.slider('Select Top N Context', min_value=1, max_value=6, value=3, step=1) | |
# ctx_checker_tmp = st.slider('Context Checker Sensitivity', min_value=0.001, max_value=1.0, value=0.008, step=0.001) | |
ctx_checker_tmp = 0.008 | |
lm_tmp = st.slider('Language Model Sensitivity', min_value=0.001, max_value=1.0, value=0.1, step=0.001) | |
cls_threshold = st.slider('Classification Threshold', min_value=0.01, max_value=1.0, value=0.5, step=0.01) | |
verbose = st.checkbox('Show Detailed Response', value=False) | |
if verbose == True: | |
retv_cnt = st.slider('Display N retrived Doc', min_value=0, max_value=32, value=0, step=1) | |
endpoint = st.secrets["LLMEndpoint"] | |
def main(): | |
if model_name == 'None': | |
st.markdown('##### Please select a model.') | |
return | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [{"role": 'assistant', "content": 'হ্যালো! আমি একটি এআই অ্যাসিস্ট্যান্ট। কীভাবে সাহায্য করতে পারি? 😊'}] | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# Accept user input | |
if prompt := st.chat_input("এখানে মেসেজ লিখুন"): | |
# Display user message in chat message container | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
## Get context | |
params = { | |
"chat_history": [ | |
{"content": prompt} | |
], | |
"model": "bloom-7b", | |
"mode": "specific", | |
"config": { | |
"ctx_checker_tmp": ctx_checker_tmp, | |
"lm_tmp": lm_tmp, | |
"max_ctx": max_ctx, | |
"cls_threshold": cls_threshold, | |
"llm_enable": True, | |
} | |
} | |
resp = get_user_data(endpoint, params) | |
if resp == None: | |
st.markdown('#### INTERNAL ERROR') | |
return | |
response = resp['data']['responses'][0]['content'] | |
context = resp['data']['logs']['content']['retrival_model']['matched_doc'] | |
context_prob = resp['data']['logs']['content']['retrival_model']['matched_prob'] | |
if verbose: | |
clen = len(context) | |
retrived = resp['data']['logs']['content']['retrival_model']['retrived_doc'][:retv_cnt] | |
retrived_prob = resp['data']['logs']['content']['retrival_model']['retrived_prob'][:retv_cnt] | |
retrived = [str(round(b, 3)) + ': ' + a for a, b in zip (retrived, retrived_prob)] | |
retrived = '\n\n===============================\n\n'.join(retrived) | |
context = [str(round(b, 3)) + ': ' + a for a, b in zip (context, context_prob)] | |
context = '\n\n===============================\n\n'.join(context) | |
response = f'###### Config: Context Checker Value: {ctx_checker_tmp}, LM Value: {lm_tmp}\n\n##### Retrived Context:\n{retrived}\n\n##### Matched Context:{clen}\n{context}\n\n##### Response:\n{response}' | |
# Display assistant response in chat message container | |
with st.chat_message("assistant", avatar=None): | |
st.markdown(response) | |
# Add assistant response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
def app_viewport(): | |
passw = st.empty() | |
appc = st.container() | |
if 'logged_in' not in st.session_state: | |
with passw.container(): | |
secret = st.text_input('Please Enter Access Code') | |
if st.button("Submit", type='primary'): | |
if secret == st.secrets["login_secret"]: | |
passw.empty() | |
st.session_state['logged_in'] = True | |
else: | |
st.error('Wrong Access Code.') | |
if 'logged_in' in st.session_state and st.session_state['logged_in'] == True: | |
with appc: | |
main() | |
if __name__ == '__main__': | |
app_viewport() | |