Sasiraj01's picture
Update app.py
00f8bac verified
import gradio as gr
from fastapi import FastAPI
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.vectorstores import FAISS
from dotenv import load_dotenv
import os
load_dotenv()
app = FastAPI()
openai_api_key = os.getenv("OPENAI_API_KEY")
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
prompt_template = """
You are an expert in skin cancer, etc.
Answer the question based only on the following context, which can include text, images, and tables:
{context}
Question: {question}
Don't answer if you are not sure and decline to answer and say "Sorry, I don't have much information about it."
Just return the helpful answer in as much detail as possible.
Answer:
"""
qa_chain = LLMChain(llm=ChatOpenAI(model="gpt-4", openai_api_key=openai_api_key, max_tokens=1024),
prompt=PromptTemplate.from_template(prompt_template))
def get_answer(question: str):
relevant_docs = db.similarity_search(question)
context = ""
relevant_images = []
for d in relevant_docs:
if d.metadata['type'] == 'text':
context += '[text]' + d.metadata['original_content']
elif d.metadata['type'] == 'table':
context += '[table]' + d.metadata['original_content']
elif d.metadata['type'] == 'image':
context += '[image]' + d.page_content
relevant_images.append(d.metadata['original_content'])
result = qa_chain.run({'context': context, 'question': question})
return {"relevant_images": relevant_images[0], "result": result}
iface = gr.Interface(fn=get_answer, inputs="text", outputs="json")
# Run the Gradio interface inside FastAPI
if __name__ == "__main__":
iface.launch()