Not-Grim-Refer commited on
Commit
fb7ceee
1 Parent(s): d848b6b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -27
app.py CHANGED
@@ -1,39 +1,51 @@
1
  import streamlit as st
2
- from transformers import pipeline
3
- from collections import deque
4
-
5
- # Configure system prompt
6
- system_prompt = "You are an AI assistant that specializes in helping with code-based questions and tasks. Feel free to ask anything related to coding!"
7
 
8
  st.title("Falcon QA Bot")
9
 
10
- @st.cache(allow_output_mutation=True)
11
- def get_qa_pipeline():
12
- return pipeline("question-answering", model="tiiuae/falcon-7b-instruct", device=0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  def chat(query):
15
- pipeline = get_qa_pipeline()
16
- result = pipeline(question=query, max_length=2000, context=system_prompt)
 
 
 
17
  return result
18
 
19
  def main():
20
- user_queue = deque()
21
-
22
- st.markdown('<style>div.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True)
23
-
24
- input = st.text_area("What do you want to ask about", value="", height=150, max_chars=500, key="input")
25
- if st.button("Ask"):
26
- if input:
27
- user_queue.append(input)
28
-
29
- if user_queue:
30
- current_user = user_queue[0]
31
- st.text_area("System Prompt", value=system_prompt, height=150, disabled=True)
32
- st.text_area("User Input", value=current_user, height=150, disabled=True)
33
- with st.spinner("Generating response..."):
34
- output = chat(current_user)
35
- st.text_area("Falcon's Answer", value=output["answer"], height=150, disabled=True)
36
- user_queue.popleft()
37
 
38
  if __name__ == '__main__':
39
  main()
 
1
  import streamlit as st
2
+ import time
3
+ from queue import Queue
 
 
 
4
 
5
  st.title("Falcon QA Bot")
6
 
7
+ huggingfacehub_api_token = st.secrets["hf_token"]
8
+
9
+ from langchain import HuggingFaceHub, PromptTemplate, LLMChain
10
+
11
+ repo_id = "tiiuae/falcon-7b-instruct"
12
+ llm = HuggingFaceHub(huggingfacehub_api_token=huggingfacehub_api_token,
13
+ repo_id=repo_id,
14
+ model_kwargs={"temperature":0.2, "max_new_tokens":2000})
15
+
16
+ template = """
17
+ You are an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
18
+
19
+ {question}
20
+
21
+ """
22
+
23
+ queue = Queue()
24
 
25
  def chat(query):
26
+ prompt = PromptTemplate(template=template, input_variables=["question"])
27
+ llm_chain = LLMChain(prompt=prompt,verbose=True,llm=llm)
28
+
29
+ result = llm_chain.predict(question=query)
30
+
31
  return result
32
 
33
  def main():
34
+ input = st.text_input("What do you want to ask about", placeholder="Input your question here")
35
+ if input:
36
+ # Add the user's question to the queue
37
+ queue.put(input)
38
+
39
+ # Check if there are any waiting users
40
+ if not queue.empty():
41
+ # Get the next user's question from the queue
42
+ query = queue.get()
43
+
44
+ # Generate a response to the user's question
45
+ result = chat(query)
46
+
47
+ # Display the response to the user
48
+ st.write(result,unsafe_allow_html=True)
 
 
49
 
50
  if __name__ == '__main__':
51
  main()