Spaces:
Sleeping
Sleeping
File size: 1,440 Bytes
209fb07 6a4b65e 2bfe4ad 885b102 bc66bdf 9281999 209fb07 d4795ef 209fb07 8cfa9bf d631e70 d4795ef d631e70 d4795ef d631e70 6a4b65e d4795ef 9281999 eefa09e 13fc60b 209fb07 d4795ef 209fb07 bc66bdf 209fb07 bc66bdf 209fb07 d4795ef 197d88d d4795ef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
from fastapi import FastAPI, File, UploadFile
import os
import shutil
from LoadLLM import Loadllm
import tempfile
from langchain_community.document_loaders import PyMuPDFLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain
DB_FAISS_PATH = 'vectorstore/db_faiss'
app = FastAPI()
@app.get('/')
async def home():
return "API Server Running"
@app.post('/PromptBuddy')
async def PromptLLM(file: UploadFile = File(...)):
with tempfile.NamedTemporaryFile(delete=False) as temp_file: # Create temporary file
temp_file_path = temp_file.name
with open(temp_file_path, 'wb') as f:
shutil.copyfileobj(file.file, f)
loader = PyMuPDFLoader(file_path= temp_file_path)
data = loader.load()
# Create embeddings using Sentence Transformers
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')
# Create a FAISS vector store and save embeddings
db = FAISS.from_documents(data, embeddings)
db.save_local(DB_FAISS_PATH)
# Load the language model
# Load the language model
llm = Loadllm.load_llm()
# Create a conversational chain
chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=db.as_retriever())
result = chain({"question": query, "chat_history": ''})
return result['answer']
|