Spaces:
Sleeping
Sleeping
File size: 1,194 Bytes
a2cccdb e7f4e46 a2cccdb e7f4e46 a2cccdb e7f4e46 a2cccdb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import streamlit as st
from llmware.prompts import Prompt
import requests
import io
import PyPDF2
def register_gguf_model():
prompter = Prompt()
your_model_name = "my_model"
hf_repo_name = "TheBloke/Llama-2-7B-Chat-GGUF"
model_file = "llama-2-7b-chat.Q5_K_S.gguf"
print("registering model")
prompter.model_catalog.register_gguf_model(your_model_name,hf_repo_name, model_file, prompt_wrapper="open_chat")
print("loading model")
prompter.load_model(your_model_name)
return prompter
def load_pdf_from_url(url):
r = requests.get(url)
f = io.BytesIO(r.content)
file = PyPDF2.PdfReader(f)
return file
def load_pdf_content(pdf):
content = ""
for page in pdf.pages:
content += page.extract_text()
return content
def main():
st.title("GGUF model loading test")
# with st.spinner("Loading model..."):
# prompter = register_gguf_model()
# if prompter:
# st.success("Model loaded!")
url = "https://pgcag.files.wordpress.com/2010/01/48lawsofpower.pdf"
pdf = load_pdf_from_url(url)
content = load_pdf_content(pdf)
st.write(content)
if __name__ == "__main__":
main()
|