import os import gradio as gr from langchain_community.document_loaders import PyPDFLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import FAISS from langchain.chains import ConversationalRetrievalChain from langchain.memory import ConversationBufferMemory from langchain_community.llms import HuggingFacePipeline from transformers import pipeline EMBEDDINGS_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2" LLM_MODEL_NAME = "google/flan-t5-small" def load_and_split_docs(list_file_path): if not list_file_path: return [], "Fehler: Keine Dokumente gefunden!" loaders = [PyPDFLoader(x) for x in list_file_path] documents = [] for loader in loaders: documents.extend(loader.load()) text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=32) return text_splitter.split_documents(documents) def create_db(docs): embeddings = HuggingFaceEmbeddings(model_name=EMBEDDINGS_MODEL_NAME) return FAISS.from_documents(docs, embeddings) def initialize_llm_chain(llm_model, temperature, max_tokens, vector_db): local_pipeline = pipeline( "text2text-generation", model=llm_model, max_length=max_tokens, temperature=temperature ) llm = HuggingFacePipeline(pipeline=local_pipeline) memory = ConversationBufferMemory(memory_key="chat_history") retriever = vector_db.as_retriever() return ConversationalRetrievalChain.from_llm( llm, retriever=retriever, memory=memory, return_source_documents=True ) def demo(): with gr.Blocks() as demo: vector_db = gr.State() qa_chain = gr.State() gr.HTML("