Not-Grim-Refer commited on
Commit
2e00c46
1 Parent(s): 77c7c1e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -14
app.py CHANGED
@@ -1,40 +1,63 @@
1
  import streamlit as st
2
- import time
3
  from queue import Queue
 
4
 
 
5
  st.title("Falcon QA Bot")
6
 
 
7
  huggingfacehub_api_token = st.secrets["hf_token"]
8
 
9
- from langchain import HuggingFaceHub, PromptTemplate, LLMChain
10
-
11
  repo_id = "tiiuae/falcon-7b-instruct"
12
- llm = HuggingFaceHub(huggingfacehub_api_token=huggingfacehub_api_token,
13
- repo_id=repo_id,
14
- model_kwargs={"temperature":0.2, "max_new_tokens":2000})
15
 
 
 
 
 
 
 
 
 
16
  template = """
17
  You are an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
18
 
19
  {question}
20
-
21
  """
22
 
 
23
  queue = Queue()
24
 
25
  def chat(query):
 
 
 
 
 
 
 
 
 
26
  prompt = PromptTemplate(template=template, input_variables=["question"])
27
- llm_chain = LLMChain(prompt=prompt,verbose=True,llm=llm)
28
 
 
 
 
 
29
  result = llm_chain.predict(question=query)
30
 
31
  return result
32
 
33
  def main():
34
- input = st.text_input("What do you want to ask about", placeholder="Input your question here")
35
- if input:
 
 
 
 
 
36
  # Add the user's question to the queue
37
- queue.put(input)
38
 
39
  # Check if there are any waiting users
40
  if not queue.empty():
@@ -42,10 +65,10 @@ def main():
42
  query = queue.get()
43
 
44
  # Generate a response to the user's question
45
- result = chat(query)
46
 
47
  # Display the response to the user
48
- st.write(result,unsafe_allow_html=True)
49
 
50
  if __name__ == '__main__':
51
- main()
 
1
  import streamlit as st
 
2
  from queue import Queue
3
+ from langchain import HuggingFaceHub, PromptTemplate, LLMChain
4
 
5
+ # Set the title of the Streamlit app
6
  st.title("Falcon QA Bot")
7
 
8
+ # Get the Hugging Face Hub API token from Streamlit secrets
9
  huggingfacehub_api_token = st.secrets["hf_token"]
10
 
11
+ # Set the repository ID for the Falcon model
 
12
  repo_id = "tiiuae/falcon-7b-instruct"
 
 
 
13
 
14
+ # Initialize the Hugging Face Hub and LLMChain
15
+ llm = HuggingFaceHub(
16
+ huggingfacehub_api_token=huggingfacehub_api_token,
17
+ repo_id=repo_id,
18
+ model_kwargs={"temperature": 0.2, "max_new_tokens": 2000}
19
+ )
20
+
21
+ # Define the template for the assistant's response
22
  template = """
23
  You are an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
24
 
25
  {question}
 
26
  """
27
 
28
+ # Create a queue to store user questions
29
  queue = Queue()
30
 
31
  def chat(query):
32
+ """
33
+ Generates a response to the user's question using the LLMChain model.
34
+
35
+ :param query: User's question.
36
+ :type query: str
37
+ :return: Response to the user's question.
38
+ :rtype: str
39
+ """
40
+ # Create a prompt template with the question variable
41
  prompt = PromptTemplate(template=template, input_variables=["question"])
 
42
 
43
+ # Create an LLMChain instance with the prompt and the Falcon model
44
+ llm_chain = LLMChain(prompt=prompt, verbose=True, llm=llm)
45
+
46
+ # Generate a response to the user's question
47
  result = llm_chain.predict(question=query)
48
 
49
  return result
50
 
51
  def main():
52
+ """
53
+ Main function for the Streamlit app.
54
+ """
55
+ # Get the user's question from the input text box
56
+ user_question = st.text_input("What do you want to ask about", placeholder="Input your question here")
57
+
58
+ if user_question:
59
  # Add the user's question to the queue
60
+ queue.put(user_question)
61
 
62
  # Check if there are any waiting users
63
  if not queue.empty():
 
65
  query = queue.get()
66
 
67
  # Generate a response to the user's question
68
+ response = chat(query)
69
 
70
  # Display the response to the user
71
+ st.write(response, unsafe_allow_html=True)
72
 
73
  if __name__ == '__main__':
74
+ main()