Spaces:
Sleeping
Sleeping
File size: 1,288 Bytes
d631e70 2bfe4ad d4795ef d631e70 d4795ef d631e70 d4795ef d631e70 abb32d5 d4795ef aad567b 13fc60b aad567b d4795ef d631e70 197d88d d4795ef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
from fastapi import FastAPI, File, UploadFile, Form
import os
from langchain_community.document_loaders import PyMuPDFLoader
from LoadLLM import Loadllm
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain
DB_FAISS_PATH = 'vectorstore/db_faiss'
app = FastAPI()
@app.get('/')
async def home():
return "API Server Running"
@app.post('/PromptBuddy')
async def PromptLLM(file: UploadFile = File(...), query: str = Form(...)):
with open(f"uploads/{file.filename}", "wb") as f:
f.write(file.file.read())
loader = PyMuPDFLoader(file_path=f"uploads/{file.filename}")
data = loader.load()
# Create embeddings using Sentence Transformers
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')
# Create a FAISS vector store and save embeddings
db = FAISS.from_documents(data, embeddings)
db.save_local(DB_FAISS_PATH)
# Load the language model
llm = Loadllm.load_llm()
# Create a conversational chain
chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=db.as_retriever())
result = chain({"question": query, "chat_history": ''})
return result["answer"]
|