File size: 4,385 Bytes
409f81b
2c02a9e
409f81b
 
 
 
 
84f3457
8ceb607
47ecda0
 
 
2c02a9e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
409f81b
 
 
 
 
 
 
 
2c02a9e
47ecda0
 
 
 
 
 
2c02a9e
47ecda0
409f81b
 
 
261cad3
 
409f81b
 
261cad3
 
 
 
 
 
 
 
409f81b
 
47ecda0
d7100c1
47ecda0
261cad3
43e526a
409f81b
2c02a9e
8ceb607
2c02a9e
6cc8328
2c02a9e
6cc8328
 
2c02a9e
 
6cc8328
 
2c02a9e
6cc8328
47ecda0
6cc8328
2c02a9e
 
47ecda0
2c02a9e
8ceb607
2c02a9e
8ceb607
2c02a9e
261cad3
 
 
 
 
84f3457
2c02a9e
 
 
 
47ecda0
2c02a9e
 
 
 
 
 
8ceb607
 
43e526a
 
2c02a9e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261cad3
47ecda0
 
 
0385c04
 
84f3457
 
 
409f81b
d7100c1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import os
import fitz
from docx import Document
from sentence_transformers import SentenceTransformer
import faiss
import numpy as np
import pickle
import gradio as gr
from typing import List
from langchain_community.llms import HuggingFaceEndpoint
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings

# Function to extract text from a PDF file
def extract_text_from_pdf(pdf_path):
    text = ""
    doc = fitz.open(pdf_path)
    for page_num in range(len(doc)):
        page = doc.load_page(page_num)
        text += page.get_text()
    return text

# Function to extract text from a Word document
def extract_text_from_docx(docx_path):
    doc = Document(docx_path)
    text = "\n".join([para.text for para in doc.paragraphs])
    return text

# Initialize the embedding model
embedding_model = SentenceTransformer('all-MiniLM-L6-v2')

# Hugging Face API token
api_token = os.getenv('HUGGINGFACEHUB_API_TOKEN')
if not api_token:
    raise ValueError("HUGGINGFACEHUB_API_TOKEN environment variable is not set")

# Initialize the HuggingFace LLM
llm = HuggingFaceEndpoint(
    endpoint_url="https://api-inference.huggingface.co/models/gpt2",
    model_kwargs={"api_key": api_token}
)

# Initialize the HuggingFace embeddings
embedding = HuggingFaceEmbeddings()

# Load or create FAISS index
index_path = "faiss_index.pkl"
document_texts_path = "document_texts.pkl"

if os.path.exists(index_path):
    with open(index_path, "rb") as f:
        index = pickle.load(f)
        print("Loaded FAISS index from faiss_index.pkl")
    if os.path.exists(document_texts_path):
        with open(document_texts_path, "rb") as f:
            document_texts = pickle.load(f)
            print("Loaded document texts from document_texts.pkl")
    else:
        document_texts = []
else:
    # Create a new FAISS index if it doesn't exist
    index = faiss.IndexFlatL2(embedding_model.get_sentence_embedding_dimension())
    document_texts = []
    with open(index_path, "wb") as f:
        pickle.dump(index, f)
        print("Created new FAISS index and saved to faiss_index.pkl")

def upload_files(files):
    global index, document_texts
    for file in files:
        content = file.read()
        if file.name.endswith('.pdf'):
            with open("temp.pdf", "wb") as f:
                f.write(content)
            text = extract_text_from_pdf("temp.pdf")
        elif file.name.endswith('.docx'):
            with open("temp.docx", "wb") as f:
                f.write(content)
            text = extract_text_from_docx("temp.docx")
        else:
            return "Unsupported file format"

        # Process the text and update FAISS index
        sentences = text.split("\n")
        embeddings = embedding_model.encode(sentences)
        index.add(np.array(embeddings))
        document_texts.append(text)

    # Save the updated index and documents
    with open(index_path, "wb") as f:
        pickle.dump(index, f)
        print("Saved updated FAISS index to faiss_index.pkl")
    with open(document_texts_path, "wb") as f:
        pickle.dump(document_texts, f)
        print("Saved updated document texts to document_texts.pkl")

    return "Files processed successfully"

def query_text(text):
    # Encode the query text
    query_embedding = embedding_model.encode([text])
    
    # Search the FAISS index
    D, I = index.search(np.array(query_embedding), k=5)
    
    top_documents = []
    for idx in I[0]:
        if idx != -1 and idx < len(document_texts):  # Ensure that a valid index is found
            top_documents.append(document_texts[idx])
        else:
            print(f"Invalid index found: {idx}")

    return top_documents

# Create Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("## Document Upload and Query System")
    
    with gr.Tab("Upload Files"):
        upload = gr.File(file_count="multiple", label="Upload PDF or DOCX files")
        upload_button = gr.Button("Upload")
        upload_output = gr.Textbox()
        upload_button.click(fn=upload_files, inputs=upload, outputs=upload_output)
    
    with gr.Tab("Query"):
        query = gr.Textbox(label="Enter your query")
        query_button = gr.Button("Search")
        query_output = gr.Textbox()
        query_button.click(fn=query_text, inputs=query, outputs=query_output)

demo.launch()