Spaces:
Sleeping
Sleeping
Pratik Dwivedi
commited on
Commit
·
5b0f27d
1
Parent(s):
543f41b
using lower compute models
Browse files- app.py +33 -50
- requirements.txt +0 -1
app.py
CHANGED
@@ -1,84 +1,67 @@
|
|
1 |
import streamlit as st
|
2 |
from llmware.prompts import Prompt
|
3 |
-
from llmware.library import Library
|
4 |
-
from llmware.retrieval import Query
|
5 |
-
from llmware.setup import Setup
|
6 |
-
import requests
|
7 |
import io, os, re
|
8 |
import PyPDF2
|
9 |
|
10 |
-
def create_fin_docs_sample_library(library_name):
|
11 |
-
|
12 |
-
print(f"update: creating library - {library_name}")
|
13 |
-
|
14 |
-
library = Library().create_new_library(library_name)
|
15 |
-
sample_files_path = Setup().load_sample_files(over_write=False)
|
16 |
-
ingestion_folder_path = os.path.join(sample_files_path, "data")
|
17 |
-
parsing_output = library.add_files(ingestion_folder_path)
|
18 |
-
library.install_new_embedding(embedding_model_name="mini-lm-sbert", vector_db="faiss",batch_size=200)
|
19 |
-
return library
|
20 |
-
|
21 |
-
def basic_semantic_retrieval_example (library):
|
22 |
-
|
23 |
-
q = Query(library)
|
24 |
-
|
25 |
-
# Set the keys that should be returned - optional - full set of keys will be returned by default
|
26 |
-
q.query_result_return_keys = ["distance","file_source", "page_num", "text"]
|
27 |
-
|
28 |
-
# perform a simple query
|
29 |
-
my_query = "Elizabeth I"
|
30 |
-
query_results1 = q.semantic_query(my_query, result_count=20)
|
31 |
-
# print(query_results1)
|
32 |
-
# Iterate through query_results, which is a list of result dicts
|
33 |
-
print(f"\nQuery 1 - {my_query}")
|
34 |
-
for i, result in enumerate(query_results1):
|
35 |
-
print("results - ", i, result)
|
36 |
-
|
37 |
-
|
38 |
def register_gguf_model():
|
39 |
|
40 |
prompter = Prompt()
|
41 |
your_model_name = "llama"
|
42 |
hf_repo_name = "TheBloke/Llama-2-7B-Chat-GGUF"
|
43 |
-
model_file = "llama-2-7b-chat.
|
44 |
print("registering models")
|
45 |
prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
|
46 |
your_model_name = "open_gpt4"
|
47 |
hf_repo_name = "TheBloke/Open_Gpt4_8x7B-GGUF"
|
48 |
-
model_file = "open_gpt4_8x7b.
|
49 |
prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
|
50 |
your_model_name = "phi2"
|
51 |
hf_repo_name = "TheBloke/phi-2-GGUF"
|
52 |
-
model_file = "phi-2.
|
53 |
prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
|
54 |
your_model_name = "mistral"
|
55 |
hf_repo_name = "TheBloke/Mistral-7B-Instruct-v0.2-GGUF"
|
56 |
-
model_file = "mistral-7b-instruct-v0.2.
|
57 |
prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
|
58 |
return prompter
|
59 |
|
60 |
def main():
|
61 |
st.title("BetterZila RAG Enabled LLM")
|
62 |
-
|
63 |
-
|
64 |
|
65 |
data_path = "data/"
|
66 |
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
# st.success("Model Loaded!")
|
73 |
|
74 |
queries = ['Can you give me an example from history where the enemy was crushed totally from the book?', "What's the point of making myself less accessible?", "Can you tell me the story of Queen Elizabeth I from this 48 laws of power book?"]
|
75 |
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
# for query in queries:
|
83 |
# st.subheader(f"Query: {query}")
|
84 |
# with st.spinner("Generating response..."):
|
|
|
1 |
import streamlit as st
|
2 |
from llmware.prompts import Prompt
|
|
|
|
|
|
|
|
|
3 |
import io, os, re
|
4 |
import PyPDF2
|
5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
def register_gguf_model():
|
7 |
|
8 |
prompter = Prompt()
|
9 |
your_model_name = "llama"
|
10 |
hf_repo_name = "TheBloke/Llama-2-7B-Chat-GGUF"
|
11 |
+
model_file = "llama-2-7b-chat.Q3_K_M.gguf"
|
12 |
print("registering models")
|
13 |
prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
|
14 |
your_model_name = "open_gpt4"
|
15 |
hf_repo_name = "TheBloke/Open_Gpt4_8x7B-GGUF"
|
16 |
+
model_file = "open_gpt4_8x7b.Q3_K_M.gguf"
|
17 |
prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
|
18 |
your_model_name = "phi2"
|
19 |
hf_repo_name = "TheBloke/phi-2-GGUF"
|
20 |
+
model_file = "phi-2.Q3_K_M.gguf"
|
21 |
prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
|
22 |
your_model_name = "mistral"
|
23 |
hf_repo_name = "TheBloke/Mistral-7B-Instruct-v0.2-GGUF"
|
24 |
+
model_file = "mistral-7b-instruct-v0.2.Q3_K_M.gguf"
|
25 |
prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
|
26 |
return prompter
|
27 |
|
28 |
def main():
|
29 |
st.title("BetterZila RAG Enabled LLM")
|
30 |
+
with st.spinner("Registering Models for use..."):
|
31 |
+
prompter = register_gguf_model()
|
32 |
|
33 |
data_path = "data/"
|
34 |
|
35 |
+
st.sidebar.subheader("Select Model")
|
36 |
+
model_name = st.sidebar.selectbox("Select Model", ["llama", "open_gpt4", "phi2", "mistral"])
|
37 |
+
with st.spinner("Loading Model..."):
|
38 |
+
prompter.load_model(model_name)
|
39 |
+
st.success("Model Loaded!")
|
|
|
40 |
|
41 |
queries = ['Can you give me an example from history where the enemy was crushed totally from the book?', "What's the point of making myself less accessible?", "Can you tell me the story of Queen Elizabeth I from this 48 laws of power book?"]
|
42 |
|
43 |
+
st.subheader("Query")
|
44 |
+
|
45 |
+
with st.spinner("Loading PDF file..."):
|
46 |
+
for file in os.listdir(data_path):
|
47 |
+
if file.endswith(".pdf"):
|
48 |
+
print("Found PDF file: ", file)
|
49 |
+
pdf_file = file
|
50 |
+
break
|
51 |
+
print("loading Source...")
|
52 |
+
source = prompter.add_source_document(data_path, pdf_file, query=None)
|
53 |
+
|
54 |
+
for query in queries:
|
55 |
+
st.subheader(f"Query: {query}")
|
56 |
+
with st.spinner("Generating response..."):
|
57 |
+
responses = prompter.prompt_with_source(query, prompt_name="just_the_facts", temperature=0.3)
|
58 |
+
|
59 |
+
for r, response in enumerate(responses):
|
60 |
+
st.write(query)
|
61 |
+
st.write(re.sub("[\n]", " ", response["llm_response"]).strip())
|
62 |
+
|
63 |
+
st.success("Responses generated!")
|
64 |
+
|
65 |
# for query in queries:
|
66 |
# st.subheader(f"Query: {query}")
|
67 |
# with st.spinner("Generating response..."):
|
requirements.txt
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
llmware
|
2 |
streamlit
|
3 |
-
requests
|
4 |
PyPDF2
|
|
|
1 |
llmware
|
2 |
streamlit
|
|
|
3 |
PyPDF2
|