Pratik Dwivedi
functionality test
9bf72c1
raw
history blame
1.82 kB
import streamlit as st
from llmware.prompts import Prompt
import requests
import io
import PyPDF2
def register_gguf_model():
prompter = Prompt()
your_model_name = "my_model"
hf_repo_name = "TheBloke/Llama-2-7B-Chat-GGUF"
model_file = "llama-2-7b-chat.Q5_K_S.gguf"
print("registering model")
prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
print("loading model")
prompter.load_model(your_model_name)
return prompter
def load_pdf_from_url(url):
r = requests.get(url)
f = io.BytesIO(r.content)
file = PyPDF2.PdfReader(f)
return file
def load_pdf_content(pdf):
content = ""
for page in pdf.pages:
content += page.extract_text()
return content
def main():
st.title("BetterZila RAG Enabled LLM")
with st.spinner("Loading model..."):
prompter = register_gguf_model()
st.success("Model loaded!")
with st.spinner("Loading PDF content from the assignment URL..."):
url = "https://pgcag.files.wordpress.com/2010/01/48lawsofpower.pdf"
pdf = load_pdf_from_url(url)
content = load_pdf_content(pdf)
st.success("PDF content loaded!")
queries = ['Can you give me an example from history where the enemy was crushed totally from the book?', "What's the point of making myself less accessible?", "Can you tell me the story of Queen Elizabeth I from this 48 laws of power book?"]
for query in queries:
st.subheader(f"Query: {query}")
with st.spinner("Generating response..."):
resp = prompter.prompt_main(query, context=content)
response = resp['llm_response']
st.success("Response generated!")
st.write(response)
if __name__ == "__main__":
main()