File size: 1,952 Bytes
209fb07
6a4b65e
2bfe4ad
885b102
bc66bdf
9281999
209fb07
 
 
 
d4795ef
d9a9ebc
 
 
 
 
 
 
 
 
 
 
 
209fb07
8cfa9bf
d631e70
d4795ef
d631e70
 
d4795ef
 
d631e70
6a4b65e
d4795ef
9281999
 
 
 
eefa09e
13fc60b
209fb07
d4795ef
 
209fb07
 
 
 
 
 
 
 
bc66bdf
 
209fb07
 
 
 
83e7676
d9a9ebc
 
 
 
 
 
 
 
 
d4795ef
 
197d88d
d4795ef
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72

from fastapi import FastAPI, File, UploadFile
import os
import shutil
from LoadLLM import Loadllm
import tempfile
from langchain_community.document_loaders import PyMuPDFLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain

import google.generativeai as genai


GOOGLE_API_KEY = 'AIzaSyA13K0uJP5ti0R6eCy_ogK0UlqenbFfr_o'

genai.configure(api_key=GOOGLE_API_KEY)


model = genai.GenerativeModel('gemini-pro')
#response = model.generate_content(query)
#return response.text

DB_FAISS_PATH = 'vectorstore/db_faiss'

app = FastAPI()

@app.get('/')
async def home():
    return "API Server Running"

@app.post('/PromptBuddy')
async def PromptLLM(file: UploadFile = File(...)):

    with tempfile.NamedTemporaryFile(delete=False) as temp_file:  # Create temporary file
      temp_file_path = temp_file.name
      with open(temp_file_path, 'wb') as f:
        shutil.copyfileobj(file.file, f)
    

    loader = PyMuPDFLoader(file_path= temp_file_path)
    data = loader.load()

    # Create embeddings using Sentence Transformers
    embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')

    # Create a FAISS vector store and save embeddings
    db = FAISS.from_documents(data, embeddings)
    db.save_local(DB_FAISS_PATH)

    # Load the language model
    # Load the language model
    llm = Loadllm.load_llm()

    # Create a conversational chain
    chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=db.as_retriever())

    result = chain({"question": "Summarise this report", "chat_history": ''})
    summary =  result['answer']

    response = model.generate_content(summary + "\nBased on the information provided, what are the key medical insights and considerations for this patient?")

    ans = {"summary": summary, "insights": response.text}

    return ans