Sasiraj01 commited on
Commit
7c9d574
·
verified ·
1 Parent(s): 4357f24

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -0
app.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import faiss
2
+ import pickle
3
+ import gradio as gr
4
+ from PIL import Image
5
+ from langchain.embeddings.openai import OpenAIEmbeddings
6
+ from langchain.vectorstores import FAISS
7
+ from langchain.chains import LLMChain
8
+ from langchain.chat_models import ChatOpenAI
9
+ from langchain.prompts import PromptTemplate
10
+
11
+ # Load OpenAI API key (Replace with Hugging Face Secrets later)
12
+ openai_api_key = "your_openai_api_key"
13
+
14
+ # Load FAISS vectorstore
15
+ load_path = "faiss_index"
16
+ vectorstore = FAISS.load_local(
17
+ load_path, OpenAIEmbeddings(openai_api_key=openai_api_key), allow_dangerous_deserialization=True
18
+ )
19
+
20
+ # Define prompt template
21
+ prompt_template = """
22
+ You are an expert assistant. Answer based on the given context (text, tables, images).
23
+ Context: {context}
24
+
25
+ Question: {question}
26
+
27
+ If you cannot find relevant data, reply with: "Sorry, I don't have enough information."
28
+ Answer:
29
+ """
30
+
31
+ qa_chain = LLMChain(
32
+ llm=ChatOpenAI(model="gpt-4", openai_api_key=openai_api_key, max_tokens=1024),
33
+ prompt=PromptTemplate.from_template(prompt_template)
34
+ )
35
+
36
+ # Function to handle queries
37
+ def answer(query):
38
+ relevant_docs = vectorstore.similarity_search(query)
39
+ context = ""
40
+ relevant_images = []
41
+
42
+ for doc in relevant_docs:
43
+ if doc.metadata['type'] == 'text':
44
+ context += '[text] ' + doc.metadata['original_content'] + "\n"
45
+ elif doc.metadata['type'] == 'table':
46
+ context += '[table] ' + doc.metadata['original_content'] + "\n"
47
+ elif doc.metadata['type'] == 'image':
48
+ context += '[image] ' + doc.page_content + "\n"
49
+ relevant_images.append(doc.metadata['original_content']) # Store image file paths
50
+
51
+ response = qa_chain.run({'context': context, 'question': query})
52
+
53
+ # Load images (if available)
54
+ images = []
55
+ for img_path in relevant_images:
56
+ try:
57
+ images.append(Image.open(img_path))
58
+ except:
59
+ pass # Ignore errors
60
+
61
+ return response, images
62
+
63
+ # Gradio UI
64
+ def chatbot_interface(question):
65
+ response, images = answer(question)
66
+ return response, images if images else None
67
+
68
+ iface = gr.Interface(
69
+ fn=chatbot_interface,
70
+ inputs="text",
71
+ outputs=["text", "gallery"],
72
+ title="Text & Image Retrieval Chatbot",
73
+ description="Ask a question and get an answer with relevant images if available.",
74
+ )
75
+
76
+ iface.launch()