Spaces:
Build error
Build error
File size: 6,441 Bytes
409f81b 2c02a9e 409f81b 84f3457 47ecda0 2c02a9e 0632240 2c02a9e 0632240 2c02a9e 3a0b46d 2c02a9e f7133fb 3a0b46d 409f81b 3a0b46d 409f81b 18cd638 2c02a9e 3a0b46d 2fcc853 3a0b46d 47ecda0 3a0b46d f812db9 4d0c42b 409f81b 261cad3 3a0b46d ba470cd 3a0b46d 4d0c42b f812db9 9afffa7 2c02a9e 8ceb607 6e6d28c cd89674 f7133fb f812db9 cd89674 3a0b46d 9afffa7 ba470cd cd89674 ba470cd 9599ad9 3a0b46d 6e6d28c ba470cd 8ab4823 3a0b46d 087637b 773aab2 087637b 773aab2 3a0b46d cd89674 3a0b46d cd89674 3a0b46d cd89674 3a0b46d 9599ad9 3a0b46d 9afffa7 0f90f97 9afffa7 773aab2 3a0b46d e264586 f812db9 2c02a9e 261cad3 ba470cd 6e6d28c 70fd172 fa02121 47ecda0 0385c04 84f3457 409f81b d7100c1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 |
import os
import fitz
from docx import Document
from sentence_transformers import SentenceTransformer
import faiss
import numpy as np
import pickle
import gradio as gr
from langchain_community.llms import HuggingFaceEndpoint
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
# Function to extract text from a PDF file
def extract_text_from_pdf(pdf_path):
text = ""
try:
doc = fitz.open(pdf_path)
for page_num in range(len(doc)):
page = doc.load_page(page_num)
text += page.get_text()
except Exception as e:
print(f"Error extracting text from PDF: {e}")
return text
# Function to extract text from a Word document
def extract_text_from_docx(docx_path):
text = ""
try:
doc = Document(docx_path)
text = "\n".join([para.text for para in doc.paragraphs])
except Exception as e:
print(f"Error extracting text from DOCX: {e}")
return text
# Initialize the embedding model
embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
# Hugging Face API token
api_token = os.getenv('HUGGINGFACEHUB_API_TOKEN')
if not api_token:
raise ValueError("HUGGINGFACEHUB_API_TOKEN environment variable is not set or invalid")
# Initialize the HuggingFace LLM
llm = HuggingFaceEndpoint(
endpoint_url="https://api-inference.huggingface.co/models/gpt2", # Using gpt2 model
model_kwargs={"api_key": api_token}
)
# Initialize the HuggingFace embeddings
embedding = HuggingFaceEmbeddings()
# Load or create FAISS index
index_path = "faiss_index.pkl"
document_texts_path = "document_texts.pkl"
document_texts = []
if os.path.exists(index_path) and os.path.exists(document_texts_path):
try:
with open(index_path, "rb") as f:
index = pickle.load(f)
print("Loaded FAISS index from faiss_index.pkl")
with open(document_texts_path, "rb") as f:
document_texts = pickle.load(f)
print("Loaded document texts from document_texts.pkl")
except Exception as e:
print(f"Error loading FAISS index or document texts: {e}")
else:
# Create a new FAISS index if it doesn't exist
index = faiss.IndexFlatL2(embedding_model.get_sentence_embedding_dimension())
with open(index_path, "wb") as f:
pickle.dump(index, f)
print("Created new FAISS index and saved to faiss_index.pkl")
def preprocess_text(text):
# Add more preprocessing steps if necessary
return text.strip()
def upload_files(files):
global index, document_texts
try:
for file in files:
file_path = file.name # Get the file path from the NamedString object
if file_path.endswith('.pdf'):
text = extract_text_from_pdf(file_path)
elif file_path.endswith('.docx'):
text = extract_text_from_docx(file_path)
else:
return "Unsupported file format"
print(f"Extracted text: {text[:100]}...") # Debug: Show the first 100 characters of the extracted text
# Process the text and update FAISS index
sentences = text.split("\n")
sentences = [preprocess_text(sentence) for sentence in sentences if sentence.strip()]
embeddings = embedding_model.encode(sentences)
print(f"Embeddings shape: {embeddings.shape}") # Debug: Show the shape of the embeddings
index.add(np.array(embeddings))
document_texts.extend(sentences) # Store sentences for retrieval
# Save the updated index and documents
with open(index_path, "wb") as f:
pickle.dump(index, f)
print("Saved updated FAISS index to faiss_index.pkl")
with open(document_texts_path, "wb") as f:
pickle.dump(document_texts, f)
print("Saved updated document texts to document_texts.pkl")
return "Files processed successfully"
except Exception as e:
print(f"Error processing files: {e}")
return f"Error processing files: {e}"
# Single prompt for multiple questions
prompt_template = """
Answer the question based on the provided context.
If the answer is not in the provided context, just say, "answer is not available in the context".
Don't provide the wrong answer.
Context:
{context}
Question:
{question}
Answer:
"""
def query_text(text):
try:
print(f"Query text: {text}") # Debug: Show the query text
# Encode the query text
query_embedding = embedding_model.encode([text])
print(f"Query embedding shape: {query_embedding.shape}") # Debug: Show the shape of the query embedding
# Search the FAISS index
D, I = index.search(np.array(query_embedding), k=5)
print(f"Distances: {D}, Indices: {I}") # Debug: Show the distances and indices of the search results
top_documents = []
for idx in I[0]:
if idx != -1 and idx < len(document_texts): # Ensure that a valid index is found
top_documents.append(document_texts[idx]) # Append the actual sentences for the response
else:
print(f"Invalid index found: {idx}")
# Remove duplicates and sort by relevance
top_documents = list(dict.fromkeys(top_documents))
# Join the top documents for the context
context = "\n".join(top_documents)
# Prepare the prompt
prompt = prompt_template.format(context=context, question=text)
# Query the LLM
response = llm(prompt)
return response
except Exception as e:
print(f"Error querying text: {e}")
return f"Error querying text: {e}"
# Create Gradio interface
with gr.Blocks() as demo:
gr.Markdown("## Document Upload and Query System")
with gr.Tab("Upload Files"):
upload = gr.File(file_count="multiple", label="Upload PDF or DOCX files")
upload_button = gr.Button("Upload")
upload_output = gr.Textbox()
upload_button.click(fn=upload_files, inputs=upload, outputs=upload_output)
with gr.Tab("Query"):
query = gr.Textbox(label="Enter your query")
query_button = gr.Button("Search")
query_output = gr.Textbox()
query_button.click(fn=query_text, inputs=query, outputs=query_output)
demo.launch()
|