Spaces:
Build error
Build error
File size: 5,630 Bytes
1649416 944d263 80e4cb4 90bf4dc 784183f 944d263 834c71a 944d263 56ec544 944d263 834c71a 944d263 8d35da0 944d263 9ce0b96 944d263 9ce0b96 944d263 ed7a2c6 8d35da0 944d263 834c71a ac5f15c 944d263 ac5f15c 944d263 834c71a 944d263 834c71a 944d263 ac5f15c 944d263 8d35da0 7adb197 944d263 90bf4dc 7adb197 ac5f15c 944d263 7adb197 944d263 7adb197 ac5f15c 7adb197 944d263 ac5f15c 944d263 ac5f15c 944d263 ac5f15c 944d263 ac5f15c 944d263 90bf4dc 7adb197 944d263 ac5f15c 944d263 ac5f15c 944d263 ac5f15c 944d263 ac5f15c 944d263 ac5f15c 944d263 ac5f15c 944d263 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
import os
import fitz
from docx import Document
from sentence_transformers import SentenceTransformer
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
import faiss
import numpy as np
import pickle
import gradio as gr
from typing import List
from langchain_community.llms import HuggingFaceEndpoint
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
# Function to extract text from a PDF file
def extract_text_from_pdf(pdf_path):
text = ""
try:
doc = fitz.open(pdf_path)
for page_num in range(len(doc)):
page = doc.load_page(page_num)
text += page.get_text()
except Exception as e:
print(f"Error extracting text from PDF: {e}")
return text
# Function to extract text from a Word document
def extract_text_from_docx(docx_path):
text = ""
try:
doc = Document(docx_path)
text = "\n".join([para.text for para in doc.paragraphs])
except Exception as e:
print(f"Error extracting text from DOCX: {e}")
return text
# Initialize the embedding model
embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
# Hugging Face API token
api_token = os.getenv('HUGGINGFACEHUB_API_TOKEN')
if not api_token:
raise ValueError("HUGGINGFACEHUB_API_TOKEN environment variable is not set")
# Initialize RAG models from Hugging Face
generator_model_name = "facebook/bart-base"
retriever_model_name = "facebook/bart-base"
generator = AutoModelForSeq2SeqLM.from_pretrained(generator_model_name)
generator_tokenizer = AutoTokenizer.from_pretrained(generator_model_name)
retriever = AutoModelForSeq2SeqLM.from_pretrained(retriever_model_name)
retriever_tokenizer = AutoTokenizer.from_pretrained(retriever_model_name)
# Initialize the HuggingFace LLM
llm = HuggingFaceEndpoint(
endpoint_url="https://api-inference.huggingface.co/models/gpt2",
model_kwargs={"api_key": api_token}
)
# Initialize the HuggingFace embeddings
embedding = HuggingFaceEmbeddings()
# FAISS index and storage paths
index_path = "faiss_index.pkl"
document_texts_path = "document_texts.pkl"
document_texts = []
# Load or create FAISS index using cosine similarity (Inner Product + Normalized vectors)
if os.path.exists(index_path) and os.path.exists(document_texts_path):
try:
with open(index_path, "rb") as f:
index = pickle.load(f)
print("Loaded FAISS index from faiss_index.pkl")
with open(document_texts_path, "rb") as f:
document_texts = pickle.load(f)
print("Loaded document texts from document_texts.pkl")
except Exception as e:
print(f"Error loading FAISS index or document texts: {e}")
else:
index = faiss.IndexFlatIP(embedding_model.get_sentence_embedding_dimension())
with open(index_path, "wb") as f:
pickle.dump(index, f)
print("Created new FAISS index and saved to faiss_index.pkl")
def upload_files(files):
global index, document_texts
try:
for file in files:
file_path = file.name
if file_path.endswith('.pdf'):
text = extract_text_from_pdf(file_path)
elif file_path.endswith('.docx'):
text = extract_text_from_docx(file_path)
else:
return "Unsupported file format"
print(f"Extracted text: {text[:100]}...")
sentences = text.split("\n")
embeddings = embedding_model.encode(sentences, normalize_embeddings=True) # Cosine similarity step
print(f"Embeddings shape: {embeddings.shape}")
index.add(np.array(embeddings))
document_texts.extend(sentences)
# Save updated index and texts
with open(index_path, "wb") as f:
pickle.dump(index, f)
print("Saved updated FAISS index to faiss_index.pkl")
with open(document_texts_path, "wb") as f:
pickle.dump(document_texts, f)
print("Saved updated document texts to document_texts.pkl")
return "Files processed successfully"
except Exception as e:
print(f"Error processing files: {e}")
return f"Error processing files: {e}"
def query_text(text):
try:
print(f"Query text: {text}")
query_embedding = embedding_model.encode([text], normalize_embeddings=True) # Cosine similarity step
print(f"Query embedding shape: {query_embedding.shape}")
D, I = index.search(np.array(query_embedding), k=5)
print(f"Distances: {D}, Indices: {I}")
top_documents = []
for idx in I[0]:
if idx != -1 and idx < len(document_texts):
top_documents.append(document_texts[idx])
else:
print(f"Invalid index found: {idx}")
return "\n\n".join(top_documents)
except Exception as e:
print(f"Error querying text: {e}")
return f"Error querying text: {e}"
# Gradio Interface
with gr.Blocks() as demo:
gr.Markdown("## Document Upload and Query System with Cosine Similarity")
with gr.Tab("Upload Files"):
upload = gr.File(file_count="multiple", label="Upload PDF or DOCX files")
upload_button = gr.Button("Upload")
upload_output = gr.Textbox()
upload_button.click(fn=upload_files, inputs=upload, outputs=upload_output)
with gr.Tab("Query"):
query = gr.Textbox(label="Enter your query")
query_button = gr.Button("Search")
query_output = gr.Textbox()
query_button.click(fn=query_text, inputs=query, outputs=query_output)
demo.launch()
|