|
import streamlit as st |
|
from queue import Queue |
|
from langchain import HuggingFaceHub, PromptTemplate, LLMChain |
|
|
|
|
|
st.title("Falcon QA Bot") |
|
|
|
|
|
huggingfacehub_api_token = st.secrets["hf_token"] |
|
|
|
|
|
repo_id = "tiiuae/falcon-7b-instruct" |
|
|
|
|
|
llm = HuggingFaceHub( |
|
huggingfacehub_api_token=huggingfacehub_api_token, |
|
repo_id=repo_id, |
|
model_kwargs={"temperature": 0.2, "max_new_tokens": 2000} |
|
) |
|
|
|
|
|
template = """ |
|
You are an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. |
|
|
|
{question} |
|
""" |
|
|
|
|
|
queue = Queue() |
|
|
|
def chat(query): |
|
""" |
|
Generates a response to the user's question using the LLMChain model. |
|
|
|
:param query: User's question. |
|
:type query: str |
|
:return: Response to the user's question. |
|
:rtype: str |
|
""" |
|
|
|
prompt = PromptTemplate(template=template, input_variables=["question"]) |
|
|
|
|
|
llm_chain = LLMChain(prompt=prompt, verbose=True, llm=llm) |
|
|
|
|
|
result = llm_chain.predict(question=query) |
|
|
|
return result |
|
|
|
def main(): |
|
""" |
|
Main function for the Streamlit app. |
|
""" |
|
|
|
user_question = st.text_input("What do you want to ask about", placeholder="Input your question here") |
|
|
|
if user_question: |
|
|
|
queue.put(user_question) |
|
|
|
|
|
if not queue.empty(): |
|
|
|
query = queue.get() |
|
|
|
|
|
response = chat(query) |
|
|
|
|
|
st.write(response, unsafe_allow_html=True) |
|
|
|
if __name__ == '__main__': |
|
main() |
|
|