|
import streamlit as st |
|
import time |
|
from queue import Queue |
|
|
|
st.title("Falcon QA Bot") |
|
|
|
huggingfacehub_api_token = st.secrets["hf_token"] |
|
|
|
from langchain import HuggingFaceHub, PromptTemplate, LLMChain |
|
|
|
repo_id = "tiiuae/falcon-7b-instruct" |
|
llm = HuggingFaceHub(huggingfacehub_api_token=huggingfacehub_api_token, |
|
repo_id=repo_id, |
|
model_kwargs={"temperature":0.2, "max_new_tokens":2000}) |
|
|
|
template = """ |
|
You are an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. |
|
|
|
{question} |
|
|
|
""" |
|
|
|
queue = Queue() |
|
|
|
def chat(query): |
|
prompt = PromptTemplate(template=template, input_variables=["question"]) |
|
llm_chain = LLMChain(prompt=prompt,verbose=True,llm=llm) |
|
|
|
result = llm_chain.predict(question=query) |
|
|
|
return result |
|
|
|
def main(): |
|
input = st.text_input("What do you want to ask about", placeholder="Input your question here") |
|
if input: |
|
|
|
queue.put(input) |
|
|
|
|
|
if not queue.empty(): |
|
|
|
query = queue.get() |
|
|
|
|
|
result = chat(query) |
|
|
|
|
|
st.write(result,unsafe_allow_html=True) |
|
|
|
if __name__ == '__main__': |
|
main() |