hypeconqueror1's picture
Update main.py
9f038c4 verified
from fastapi import FastAPI, File, UploadFile
import os
import shutil
from LoadLLM import Loadllm
import tempfile
from langchain_community.document_loaders import PyMuPDFLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain
import google.generativeai as genai
GOOGLE_API_KEY = 'AIzaSyDlpiD80XQIPdQV-u1bhBlc0zW3QxZuaEQ'
genai.configure(api_key=GOOGLE_API_KEY)
model = genai.GenerativeModel('gemini-pro')
#response = model.generate_content(query)
#return response.text
DB_FAISS_PATH = 'vectorstore/db_faiss'
app = FastAPI()
@app.get('/')
async def home():
return "API Server Running"
@app.post('/PromptBuddy')
async def PromptLLM(file: UploadFile = File(...)):
with tempfile.NamedTemporaryFile(delete=False) as temp_file: # Create temporary file
temp_file_path = temp_file.name
with open(temp_file_path, 'wb') as f:
shutil.copyfileobj(file.file, f)
loader = PyMuPDFLoader(file_path= temp_file_path)
data = loader.load()
# Create embeddings using Sentence Transformers
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')
# Create a FAISS vector store and save embeddings
db = FAISS.from_documents(data, embeddings)
db.save_local(DB_FAISS_PATH)
# Load the language model
# Load the language model
llm = Loadllm.load_llm()
# Create a conversational chain
chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=db.as_retriever())
result = chain({"question": "Summarise this report", "chat_history": ''})
summary = result['answer']
response = model.generate_content(summary + "\nBased on the information provided, what are the key medical insights and considerations for this patient?(100 words)")
ans = {"summary": summary, "insights": response.text}
return ans