Roberta2024's picture
Update app.py
022296a verified
import os
import asyncio
import gradio as gr
from langchain_core.prompts import PromptTemplate
from langchain_core.documents import Document
from langchain_google_genai import ChatGoogleGenerativeAI
import google.generativeai as genai
from langchain.chains.question_answering import load_qa_chain
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from PIL import Image
import io
from functools import lru_cache
import concurrent.futures
import pymupdf
# Configure Gemini API
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
# Load Mistral model (lazy loading)
model_path = "nvidia/Mistral-NeMo-Minitron-8B-Base"
mistral_tokenizer = None
mistral_model = None
def load_mistral_model():
global mistral_tokenizer, mistral_model
if mistral_tokenizer is None or mistral_model is None:
mistral_tokenizer = AutoTokenizer.from_pretrained(model_path)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dtype = torch.bfloat16
mistral_model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=dtype, device_map=device)
@lru_cache(maxsize=100)
def get_pdf_content(file_path):
doc = pymupdf.open(file_path)
content = []
for page_num in range(len(doc)):
page = doc[page_num]
text = page.get_text()
content.append(Document(page_content=text, metadata={"page": page_num + 1}))
return content
async def process_pdf(file_path, question):
model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
prompt_template = """Answer the question as precise as possible using the provided context. If the answer is not contained in the context, say "answer not available in context" \n\n Context: \n {context}?\n Question: \n {question} \n Answer: """
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
pdf_content = get_pdf_content(file_path)
context = "\n".join([doc.page_content for doc in pdf_content[:5]]) # Limit to first 5 pages for efficiency
stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
stuff_answer = await stuff_chain.arun({"input_documents": pdf_content[:5], "question": question, "context": context})
return stuff_answer
async def process_image(image, question):
model = genai.GenerativeModel('gemini-pro-vision')
response = await model.generate_content_async([image, question])
return response.text
async def generate_mistral_followup(answer):
load_mistral_model()
mistral_prompt = f"Based on this answer: {answer}\nGenerate a follow-up question:"
mistral_inputs = mistral_tokenizer.encode(mistral_prompt, return_tensors='pt').to(mistral_model.device)
with torch.no_grad():
mistral_outputs = mistral_model.generate(mistral_inputs, max_length=50)
mistral_output = mistral_tokenizer.decode(mistral_outputs[0], skip_special_tokens=True)
return mistral_output
async def process_input(file, image, question):
try:
if file is not None:
gemini_answer = await process_pdf(file.name, question)
elif image is not None:
gemini_answer = await process_image(image, question)
else:
return "Please upload a PDF file or an image."
mistral_followup = await generate_mistral_followup(gemini_answer)
combined_output = f"Gemini Answer: {gemini_answer}\n\nMistral Follow-up: {mistral_followup}"
return combined_output
except Exception as e:
return f"An error occurred: {str(e)}"
# Gradio Interface
with gr.Blocks() as demo:
gr.Markdown("# Optimized Multi-modal RAG Knowledge Retrieval using Gemini API and Mistral Model")
with gr.Row():
with gr.Column():
input_file = gr.File(label="Upload PDF File")
input_image = gr.Image(type="pil", label="Upload Image")
input_question = gr.Textbox(label="Ask about the document or image")
output_text = gr.Textbox(label="Answer - Combined Gemini and Mistral")
submit_button = gr.Button("Submit")
submit_button.click(fn=lambda file, image, question: asyncio.run(process_input(file, image, question)),
inputs=[input_file, input_image, input_question],
outputs=output_text)
if __name__ == "__main__":
demo.launch()