Spaces:
Sleeping
Sleeping
from fastapi import FastAPI, File, UploadFile | |
import os | |
import shutil | |
from LoadLLM import Loadllm | |
import tempfile | |
from langchain_community.document_loaders import PyMuPDFLoader | |
from langchain_community.embeddings import HuggingFaceEmbeddings | |
from langchain_community.vectorstores import FAISS | |
from langchain.chains import ConversationalRetrievalChain | |
DB_FAISS_PATH = 'vectorstore/db_faiss' | |
app = FastAPI() | |
async def home(): | |
return "API Server Running" | |
async def PromptLLM(file: UploadFile = File(...)): | |
with tempfile.NamedTemporaryFile(delete=False) as temp_file: # Create temporary file | |
temp_file_path = temp_file.name | |
with open(temp_file_path, 'wb') as f: | |
shutil.copyfileobj(file.file, f) | |
loader = PyMuPDFLoader(file_path= temp_file_path) | |
data = loader.load() | |
# Create embeddings using Sentence Transformers | |
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2') | |
# Create a FAISS vector store and save embeddings | |
db = FAISS.from_documents(data, embeddings) | |
db.save_local(DB_FAISS_PATH) | |
# Load the language model | |
# Load the language model | |
llm = Loadllm.load_llm() | |
# Create a conversational chain | |
chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=db.as_retriever()) | |
result = chain({"question": query, "chat_history": ''}) | |
return result['answer'] | |