File size: 2,332 Bytes
7c9d574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import faiss
import pickle
import gradio as gr
from PIL import Image
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate

# Load OpenAI API key (Replace with Hugging Face Secrets later)
openai_api_key = "your_openai_api_key"

# Load FAISS vectorstore
load_path = "faiss_index"
vectorstore = FAISS.load_local(
    load_path, OpenAIEmbeddings(openai_api_key=openai_api_key), allow_dangerous_deserialization=True
)

# Define prompt template
prompt_template = """
You are an expert assistant. Answer based on the given context (text, tables, images).
Context: {context}

Question: {question}

If you cannot find relevant data, reply with: "Sorry, I don't have enough information."
Answer:
"""

qa_chain = LLMChain(
    llm=ChatOpenAI(model="gpt-4", openai_api_key=openai_api_key, max_tokens=1024),
    prompt=PromptTemplate.from_template(prompt_template)
)

# Function to handle queries
def answer(query):
    relevant_docs = vectorstore.similarity_search(query)
    context = ""
    relevant_images = []

    for doc in relevant_docs:
        if doc.metadata['type'] == 'text':
            context += '[text] ' + doc.metadata['original_content'] + "\n"
        elif doc.metadata['type'] == 'table':
            context += '[table] ' + doc.metadata['original_content'] + "\n"
        elif doc.metadata['type'] == 'image':
            context += '[image] ' + doc.page_content + "\n"
            relevant_images.append(doc.metadata['original_content'])  # Store image file paths

    response = qa_chain.run({'context': context, 'question': query})
    
    # Load images (if available)
    images = []
    for img_path in relevant_images:
        try:
            images.append(Image.open(img_path))
        except:
            pass  # Ignore errors

    return response, images

# Gradio UI
def chatbot_interface(question):
    response, images = answer(question)
    return response, images if images else None

iface = gr.Interface(
    fn=chatbot_interface,
    inputs="text",
    outputs=["text", "gallery"],
    title="Text & Image Retrieval Chatbot",
    description="Ask a question and get an answer with relevant images if available.",
)

iface.launch()