Pratik Dwivedi
changed
543f41b
raw
history blame
4.5 kB
import streamlit as st
from llmware.prompts import Prompt
from llmware.library import Library
from llmware.retrieval import Query
from llmware.setup import Setup
import requests
import io, os, re
import PyPDF2
def create_fin_docs_sample_library(library_name):
print(f"update: creating library - {library_name}")
library = Library().create_new_library(library_name)
sample_files_path = Setup().load_sample_files(over_write=False)
ingestion_folder_path = os.path.join(sample_files_path, "data")
parsing_output = library.add_files(ingestion_folder_path)
library.install_new_embedding(embedding_model_name="mini-lm-sbert", vector_db="faiss",batch_size=200)
return library
def basic_semantic_retrieval_example (library):
q = Query(library)
# Set the keys that should be returned - optional - full set of keys will be returned by default
q.query_result_return_keys = ["distance","file_source", "page_num", "text"]
# perform a simple query
my_query = "Elizabeth I"
query_results1 = q.semantic_query(my_query, result_count=20)
# print(query_results1)
# Iterate through query_results, which is a list of result dicts
print(f"\nQuery 1 - {my_query}")
for i, result in enumerate(query_results1):
print("results - ", i, result)
def register_gguf_model():
prompter = Prompt()
your_model_name = "llama"
hf_repo_name = "TheBloke/Llama-2-7B-Chat-GGUF"
model_file = "llama-2-7b-chat.Q5_K_S.gguf"
print("registering models")
prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
your_model_name = "open_gpt4"
hf_repo_name = "TheBloke/Open_Gpt4_8x7B-GGUF"
model_file = "open_gpt4_8x7b.Q4_K_M.gguf"
prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
your_model_name = "phi2"
hf_repo_name = "TheBloke/phi-2-GGUF"
model_file = "phi-2.Q4_K_M.gguf"
prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
your_model_name = "mistral"
hf_repo_name = "TheBloke/Mistral-7B-Instruct-v0.2-GGUF"
model_file = "mistral-7b-instruct-v0.2.Q4_K_M.gguf"
prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
return prompter
def main():
st.title("BetterZila RAG Enabled LLM")
# with st.spinner("Registering Models for use..."):
# prompter = register_gguf_model()
data_path = "data/"
# keep the select box to llama as default but give a button right below it that says select model after which the model will be loaded
# st.sidebar.subheader("Select Model")
# model_name = st.sidebar.selectbox("Select Model", ["llama", "open_gpt4", "phi2", "mistral"])
# with st.spinner("Loading Model..."):
# prompter.load_model(model_name)
# st.success("Model Loaded!")
queries = ['Can you give me an example from history where the enemy was crushed totally from the book?', "What's the point of making myself less accessible?", "Can you tell me the story of Queen Elizabeth I from this 48 laws of power book?"]
lib = create_fin_docs_sample_library("48laws")
res_dict=basic_semantic_retrieval_example(lib)
for i, result in enumerate(res_dict):
st.write(i, result)
# st.subheader("Query")
# for query in queries:
# st.subheader(f"Query: {query}")
# with st.spinner("Generating response..."):
# for file in os.listdir(data_path):
# if file.endswith(".pdf"):
# print("Found PDF file: ", file)
# print("loading Source...")
# source = prompter.add_source_document(data_path, file, query=None)
# print("generating response...")
# responses = prompter.prompt_with_source(query, prompt_name="just_the_facts", temperature=0.3)
# print("response generated!")
# for r, response in enumerate(responses):
# print(query, ":", re.sub("[\n]"," ", response["llm_response"]).strip())
# prompter.clear_source_materials()
# st.write(query)
# st.write(re.sub("[\n]"," ", response["llm_response"]).strip())
# st.success("Response generated!")
if __name__ == "__main__":
main()