Not-Grim-Refer commited on
Commit
bd1c309
1 Parent(s): 4cdf418

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -49
app.py CHANGED
@@ -1,60 +1,39 @@
1
  import streamlit as st
 
 
2
 
3
- st.title("Falcon QA Bot")
4
-
5
- # import chainlit as cl
6
-
7
- import os
8
- huggingfacehub_api_token = st.secrets["hf_token"]
9
-
10
- from langchain import HuggingFaceHub, PromptTemplate, LLMChain
11
-
12
- repo_id = "tiiuae/falcon-7b-instruct"
13
- llm = HuggingFaceHub(huggingfacehub_api_token=huggingfacehub_api_token,
14
- repo_id=repo_id,
15
- model_kwargs={"temperature":0.2, "max_new_tokens":2000})
16
-
17
- template = """
18
- You are an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
19
-
20
- {question}
21
-
22
- """
23
- # input = st.text_input("What do you want to ask about", placeholder="Input your question here")
24
-
25
-
26
- # # @cl.langchain_factory
27
- # def factory():
28
- # prompt = PromptTemplate(template=template, input_variables=['question'])
29
- # llm_chain = LLMChain(prompt=prompt, llm=llm, verbose=True)
30
 
31
- # return llm_chain
32
-
33
-
34
- prompt = PromptTemplate(template=template, input_variables=["question"])
35
- llm_chain = LLMChain(prompt=prompt,verbose=True,llm=llm)
36
-
37
- # result = llm_chain.predict(question=input)
38
 
39
- # print(result)
 
 
40
 
41
  def chat(query):
42
- # prompt = PromptTemplate(template=template, input_variables=["question"])
43
- # llm_chain = LLMChain(prompt=prompt,verbose=True,llm=llm)
44
-
45
- result = llm_chain.predict(question=query)
46
-
47
  return result
48
 
49
-
50
-
51
-
52
  def main():
53
- input = st.text_input("What do you want to ask about", placeholder="Input your question here")
54
- if input:
55
- output = chat(input)
56
- st.write(output,unsafe_allow_html=True)
57
-
 
 
 
 
 
 
 
 
 
 
 
 
58
 
59
  if __name__ == '__main__':
60
- main()
 
1
  import streamlit as st
2
+ from transformers import pipeline
3
+ from collections import deque
4
 
5
+ # Configure system prompt
6
+ system_prompt = "You are an AI assistant that specializes in helping with code-based questions and tasks. Feel free to ask anything related to coding!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ st.title("Falcon QA Bot")
 
 
 
 
 
 
9
 
10
+ @st.cache(allow_output_mutation=True)
11
+ def get_qa_pipeline():
12
+ return pipeline("question-answering", model="tiiuae/falcon-7b-instruct", device=0)
13
 
14
  def chat(query):
15
+ pipeline = get_qa_pipeline()
16
+ result = pipeline(question=query, max_length=2000, context=system_prompt)
 
 
 
17
  return result
18
 
 
 
 
19
  def main():
20
+ user_queue = deque()
21
+
22
+ st.markdown('<style>div.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True)
23
+
24
+ input = st.text_area("What do you want to ask about", value="", height=150, max_chars=500, key="input")
25
+ if st.button("Ask"):
26
+ if input:
27
+ user_queue.append(input)
28
+
29
+ if user_queue:
30
+ current_user = user_queue[0]
31
+ st.text_area("System Prompt", value=system_prompt, height=150, disabled=True)
32
+ st.text_area("User Input", value=current_user, height=150, disabled=True)
33
+ with st.spinner("Generating response..."):
34
+ output = chat(current_user)
35
+ st.text_area("Falcon's Answer", value=output["answer"], height=150, disabled=True)
36
+ user_queue.popleft()
37
 
38
  if __name__ == '__main__':
39
+ main()