Spaces:
Sleeping
Sleeping
import streamlit as st | |
from llmware.prompts import Prompt | |
import requests | |
import io | |
import PyPDF2 | |
def register_gguf_model(): | |
prompter = Prompt() | |
your_model_name = "my_model" | |
hf_repo_name = "TheBloke/Llama-2-7B-Chat-GGUF" | |
model_file = "llama-2-7b-chat.Q5_K_S.gguf" | |
print("registering model") | |
prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat") | |
print("loading model") | |
prompter.load_model(your_model_name) | |
return prompter | |
def load_pdf_from_url(url): | |
r = requests.get(url) | |
f = io.BytesIO(r.content) | |
file = PyPDF2.PdfReader(f) | |
return file | |
def load_pdf_content(pdf): | |
content = "" | |
for page in pdf.pages: | |
content += page.extract_text() | |
return content | |
def main(): | |
st.title("GGUF model loading test") | |
# with st.spinner("Loading model..."): | |
# prompter = register_gguf_model() | |
# if prompter: | |
# st.success("Model loaded!") | |
url = "https://pgcag.files.wordpress.com/2010/01/48lawsofpower.pdf" | |
pdf = load_pdf_from_url(url) | |
content = load_pdf_content(pdf) | |
st.write(content) | |
if __name__ == "__main__": | |
main() | |