File size: 2,726 Bytes
6378bf1
 
bd2041d
 
b6d30d1
bd2041d
 
6378bf1
bd2041d
6378bf1
 
 
 
b6d30d1
6378bf1
 
bd2041d
 
 
 
b6d30d1
 
bd2041d
 
6378bf1
 
 
bd2041d
 
 
 
 
 
 
 
 
 
6378bf1
bd2041d
 
 
6378bf1
bd2041d
b6d30d1
bd2041d
b6d30d1
 
 
 
 
6378bf1
bd2041d
b6d30d1
 
bd2041d
b6d30d1
bd2041d
6378bf1
f965a1f
 
 
6378bf1
bd2041d
 
 
 
 
 
 
6378bf1
 
b6d30d1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import gradio as gr
from langchain.vectorstores import Chroma
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from transformers import LayoutLMv3Processor, AutoModelForSeq2SeqLM
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from pdf2image import convert_from_path
import os

class LayoutLMv3OCR:
    def __init__(self):
        self.processor = LayoutLMv3Processor.from_pretrained("microsoft/layoutlmv3-base")
        self.model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/layoutlmv3-base")

    def extract_text(self, pdf_path):
        images = convert_from_path(pdf_path)
        text_pages = []
        for image in images:
            inputs = self.processor(images=image, return_tensors="pt")
            outputs = self.model.generate(**inputs)
            text = self.processor.batch_decode(outputs, skip_special_tokens=True)[0]
            text_pages.append(text)
        return text_pages

ocr_tool = LayoutLMv3OCR()

def process_pdf_and_query(pdf_path, question):
    loader = PyPDFLoader(pdf_path)
    documents = loader.load()

    embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
    vectordb = Chroma.from_documents(documents, embeddings)

    retriever = vectordb.as_retriever()
    prompt_template = "Beantworte die folgende Frage basierend auf dem Dokument: {context}\nFrage: {question}\nAntwort:"
    prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template)

    qa_chain = RetrievalQA.from_chain_type(llm=None, retriever=retriever, chain_type_kwargs={"prompt": prompt})
    response = qa_chain.run(input_documents=documents, question=question)
    return response

def chatbot_response(pdf, question):
    # Speichern der hochgeladenen Datei auf dem lokalen Dateisystem
    pdf_path = "uploaded_pdf.pdf"
    
    # Schreibe die PDF-Datei in eine lokale Datei
    with open(pdf_path, "wb") as f:
        f.write(pdf.read())
    
    extracted_text = ocr_tool.extract_text(pdf_path)
    answer = process_pdf_and_query(pdf_path, question)
    
    # Lösche die gespeicherte PDF-Datei nach der Verarbeitung
    os.remove(pdf_path)
    
    return answer

pdf_input = gr.File(label="PDF-Datei hochladen")
question_input = gr.Textbox(label="Frage eingeben")
response_output = gr.Textbox(label="Antwort")

interface = gr.Interface(
    fn=chatbot_response,
    inputs=[pdf_input, question_input],
    outputs=response_output,
    title="RAG Chatbot mit PDF-Unterstützung",
    description="Lade eine PDF-Datei hoch und stelle Fragen zu ihrem Inhalt."
)

if __name__ == "__main__":
    interface.launch(share=True)