File size: 1,411 Bytes
e936a3f
bd1c309
 
e936a3f
bd1c309
 
e936a3f
bd1c309
e936a3f
bd1c309
 
 
e936a3f
 
bd1c309
 
e936a3f
 
 
bd1c309
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fbd4b06
e936a3f
bd1c309
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import streamlit as st
from transformers import pipeline
from collections import deque

# Configure system prompt
system_prompt = "You are an AI assistant that specializes in helping with code-based questions and tasks. Feel free to ask anything related to coding!"

st.title("Falcon QA Bot")

@st.cache(allow_output_mutation=True)
def get_qa_pipeline():
    return pipeline("question-answering", model="tiiuae/falcon-7b-instruct", device=0)

def chat(query):
    pipeline = get_qa_pipeline()
    result = pipeline(question=query, max_length=2000, context=system_prompt)
    return result

def main():
    user_queue = deque()
    
    st.markdown('<style>div.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True)
    
    input = st.text_area("What do you want to ask about", value="", height=150, max_chars=500, key="input")
    if st.button("Ask"):
        if input:
            user_queue.append(input)
    
    if user_queue:
        current_user = user_queue[0]
        st.text_area("System Prompt", value=system_prompt, height=150, disabled=True)
        st.text_area("User Input", value=current_user, height=150, disabled=True)
        with st.spinner("Generating response..."):
            output = chat(current_user)
        st.text_area("Falcon's Answer", value=output["answer"], height=150, disabled=True)
        user_queue.popleft()

if __name__ == '__main__':
    main()