Dekode commited on
Commit
8dfaca9
·
verified ·
1 Parent(s): 5b0f27d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -72
app.py CHANGED
@@ -1,84 +1,60 @@
1
  import streamlit as st
2
- from llmware.prompts import Prompt
3
- import io, os, re
4
- import PyPDF2
 
 
 
 
5
 
6
- def register_gguf_model():
 
 
7
 
8
- prompter = Prompt()
9
- your_model_name = "llama"
10
- hf_repo_name = "TheBloke/Llama-2-7B-Chat-GGUF"
11
- model_file = "llama-2-7b-chat.Q3_K_M.gguf"
12
- print("registering models")
13
- prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
14
- your_model_name = "open_gpt4"
15
- hf_repo_name = "TheBloke/Open_Gpt4_8x7B-GGUF"
16
- model_file = "open_gpt4_8x7b.Q3_K_M.gguf"
17
- prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
18
- your_model_name = "phi2"
19
- hf_repo_name = "TheBloke/phi-2-GGUF"
20
- model_file = "phi-2.Q3_K_M.gguf"
21
- prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
22
- your_model_name = "mistral"
23
- hf_repo_name = "TheBloke/Mistral-7B-Instruct-v0.2-GGUF"
24
- model_file = "mistral-7b-instruct-v0.2.Q3_K_M.gguf"
25
- prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
26
- return prompter
27
 
28
- def main():
29
- st.title("BetterZila RAG Enabled LLM")
30
- with st.spinner("Registering Models for use..."):
31
- prompter = register_gguf_model()
32
-
33
- data_path = "data/"
34
-
35
- st.sidebar.subheader("Select Model")
36
- model_name = st.sidebar.selectbox("Select Model", ["llama", "open_gpt4", "phi2", "mistral"])
37
- with st.spinner("Loading Model..."):
38
- prompter.load_model(model_name)
39
- st.success("Model Loaded!")
40
 
41
- queries = ['Can you give me an example from history where the enemy was crushed totally from the book?', "What's the point of making myself less accessible?", "Can you tell me the story of Queen Elizabeth I from this 48 laws of power book?"]
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- st.subheader("Query")
44
 
45
- with st.spinner("Loading PDF file..."):
46
- for file in os.listdir(data_path):
47
- if file.endswith(".pdf"):
48
- print("Found PDF file: ", file)
49
- pdf_file = file
50
- break
51
- print("loading Source...")
52
- source = prompter.add_source_document(data_path, pdf_file, query=None)
53
 
 
 
 
 
 
 
54
  for query in queries:
55
  st.subheader(f"Query: {query}")
56
- with st.spinner("Generating response..."):
57
- responses = prompter.prompt_with_source(query, prompt_name="just_the_facts", temperature=0.3)
58
-
59
- for r, response in enumerate(responses):
60
- st.write(query)
61
- st.write(re.sub("[\n]", " ", response["llm_response"]).strip())
62
-
63
  st.success("Responses generated!")
64
-
65
- # for query in queries:
66
- # st.subheader(f"Query: {query}")
67
- # with st.spinner("Generating response..."):
68
- # for file in os.listdir(data_path):
69
- # if file.endswith(".pdf"):
70
- # print("Found PDF file: ", file)
71
- # print("loading Source...")
72
- # source = prompter.add_source_document(data_path, file, query=None)
73
- # print("generating response...")
74
- # responses = prompter.prompt_with_source(query, prompt_name="just_the_facts", temperature=0.3)
75
- # print("response generated!")
76
- # for r, response in enumerate(responses):
77
- # print(query, ":", re.sub("[\n]"," ", response["llm_response"]).strip())
78
- # prompter.clear_source_materials()
79
- # st.write(query)
80
- # st.write(re.sub("[\n]"," ", response["llm_response"]).strip())
81
- # st.success("Response generated!")
82
-
83
  if __name__ == "__main__":
84
- main()
 
1
  import streamlit as st
2
+ from langchain_community.document_loaders.pdf import PyPDFDirectoryLoader
3
+ from langchain.text_splitter import CharacterTextSplitter
4
+ from langchain_community.embeddings import HuggingFaceInstructEmbeddings
5
+ from langchain_community.vectorstores import FAISS
6
+ from langchain.chains import ConversationalRetrievalChain
7
+ from langchain_community.llms import HuggingFaceHub
8
+ from langchain.memory import ConversationBufferMemory
9
 
10
+ def make_vectorstore(embeddings):
11
+ # use glob to find all the pdf files in the data folder in the base directory
12
+ loader = PyPDFDirectoryLoader("data")
13
 
14
+ # load the documents
15
+ documents = loader.load()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
+ # split the documents into chunks of 1400 characters with 0 overlap
18
+ text_splitter = CharacterTextSplitter(chunk_size=1400, chunk_overlap=0)
19
+
20
+ # split the documents into chunks of 1400 characters with 0 overlap
21
+ texts = text_splitter.split_documents(documents)
22
+
23
+ # create a vector store from the documents
24
+ docsearch = FAISS.from_documents(texts, embeddings)
 
 
 
 
25
 
26
+ return docsearch
27
+
28
+ def get_conversation(vectorstore):
29
+
30
+ # create a memory object to store the conversation history
31
+ memory = ConversationBufferMemory(memory_key="chat_history",return_messages=True,)
32
+
33
+ conversation_chain = ConversationalRetrievalChain.from_chain_type(
34
+ llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512}, huggingfacehub_api_token = st.secrets["hf_token"]),
35
+ chain_type="stuff",
36
+ retriever=vectorstore.as_retriever(),
37
+ memory=memory)
38
 
39
+ return conversation_chain
40
 
41
+ def get_response(conversation_chain, query):
42
+ # get the response
43
+ response = conversation_chain.run(query)
44
+ return response
 
 
 
 
45
 
46
+ def main():
47
+ st.title("BetterZila RAG Enabled LLM")
48
+ embeddings = HuggingFaceInstructEmbeddings(model_name="google/t5-v1_1-xl", model_kwargs = {'device': 'cpu'})
49
+ vectorstore = make_vectorstore(embeddings)
50
+ conversation_chain = get_conversation(vectorstore)
51
+ queries = ["Can you give me an example from history where the enemy was crushed totally from the book?", "What's the point of making myself less accessible?", "Can you tell me the story of Queen Elizabeth I from this 48 laws of power book?"]
52
  for query in queries:
53
  st.subheader(f"Query: {query}")
54
+ response = get_response(conversation_chain, query)
55
+ st.write(query)
56
+ st.write(response["llm_response"])
 
 
 
 
57
  st.success("Responses generated!")
58
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  if __name__ == "__main__":
60
+ main()