Spaces:
Runtime error
Runtime error
import faiss | |
import pickle | |
import gradio as gr | |
from PIL import Image | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.vectorstores import FAISS | |
from langchain.chains import LLMChain | |
from langchain.chat_models import ChatOpenAI | |
from langchain.prompts import PromptTemplate | |
# Load OpenAI API key (Replace with Hugging Face Secrets later) | |
openai_api_key = "your_openai_api_key" | |
# Load FAISS vectorstore | |
load_path = "faiss_index" | |
vectorstore = FAISS.load_local( | |
load_path, OpenAIEmbeddings(openai_api_key=openai_api_key), allow_dangerous_deserialization=True | |
) | |
# Define prompt template | |
prompt_template = """ | |
You are an expert assistant. Answer based on the given context (text, tables, images). | |
Context: {context} | |
Question: {question} | |
If you cannot find relevant data, reply with: "Sorry, I don't have enough information." | |
Answer: | |
""" | |
qa_chain = LLMChain( | |
llm=ChatOpenAI(model="gpt-4", openai_api_key=openai_api_key, max_tokens=1024), | |
prompt=PromptTemplate.from_template(prompt_template) | |
) | |
# Function to handle queries | |
def answer(query): | |
relevant_docs = vectorstore.similarity_search(query) | |
context = "" | |
relevant_images = [] | |
for doc in relevant_docs: | |
if doc.metadata['type'] == 'text': | |
context += '[text] ' + doc.metadata['original_content'] + "\n" | |
elif doc.metadata['type'] == 'table': | |
context += '[table] ' + doc.metadata['original_content'] + "\n" | |
elif doc.metadata['type'] == 'image': | |
context += '[image] ' + doc.page_content + "\n" | |
relevant_images.append(doc.metadata['original_content']) # Store image file paths | |
response = qa_chain.run({'context': context, 'question': query}) | |
# Load images (if available) | |
images = [] | |
for img_path in relevant_images: | |
try: | |
images.append(Image.open(img_path)) | |
except: | |
pass # Ignore errors | |
return response, images | |
# Gradio UI | |
def chatbot_interface(question): | |
response, images = answer(question) | |
return response, images if images else None | |
iface = gr.Interface( | |
fn=chatbot_interface, | |
inputs="text", | |
outputs=["text", "gallery"], | |
title="Text & Image Retrieval Chatbot", | |
description="Ask a question and get an answer with relevant images if available.", | |
) | |
iface.launch() | |