NaimaAqeel commited on
Commit
c770340
·
verified ·
1 Parent(s): e21b098

Delete main.py

Browse files
Files changed (1) hide show
  1. main.py +0 -121
main.py DELETED
@@ -1,121 +0,0 @@
1
- import os
2
- import fitz # PyMuPDF
3
- from docx import Document
4
- from sentence_transformers import SentenceTransformer
5
- import faiss
6
- import numpy as np
7
- import pickle
8
- from langchain_community.llms import HuggingFaceEndpoint
9
- from langchain_community.vectorstores import FAISS
10
- from langchain_community.embeddings import HuggingFaceEmbeddings
11
-
12
- # Function to extract text from a PDF file
13
- def extract_text_from_pdf(pdf_path):
14
- text = ""
15
- doc = fitz.open(pdf_path)
16
- for page_num in range(len(doc)):
17
- page = doc.load_page(page_num)
18
- text += page.get_text()
19
- return text
20
-
21
- # Function to extract text from a Word document
22
- def extract_text_from_docx(docx_path):
23
- doc = Document(docx_path)
24
- text = "\n".join([para.text for para in doc.paragraphs])
25
- return text
26
-
27
- # Initialize the embedding model
28
- embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
29
-
30
- # Path to the document (can be either a single file or a directory)
31
- docs_path = "C:\\Users\\MOD\\chatbot\\Should companies implement a four.docx"
32
-
33
- documents = []
34
- doc_texts = []
35
-
36
- if os.path.isdir(docs_path):
37
- # Iterate through all files in the directory
38
- for filename in os.listdir(docs_path):
39
- file_path = os.path.join(docs_path, filename)
40
- if filename.endswith(".pdf"):
41
- text = extract_text_from_pdf(file_path)
42
- documents.append(filename)
43
- doc_texts.append(text)
44
- elif filename.endswith(".docx"):
45
- text = extract_text_from_docx(file_path)
46
- documents.append(filename)
47
- doc_texts.append(text)
48
- elif os.path.isfile(docs_path):
49
- # Process a single file
50
- if docs_path.endswith(".pdf"):
51
- text = extract_text_from_pdf(docs_path)
52
- documents.append(os.path.basename(docs_path))
53
- doc_texts.append(text)
54
- elif docs_path.endswith(".docx"):
55
- text = extract_text_from_docx(docs_path)
56
- documents.append(os.path.basename(docs_path))
57
- doc_texts.append(text)
58
- else:
59
- print("Invalid path specified. Please provide a valid file or directory path.")
60
-
61
- # Generate embeddings for the document texts
62
- embeddings = embedding_model.encode(doc_texts)
63
-
64
- # Create a FAISS index
65
- d = embeddings.shape[1] # Dimension of the embeddings
66
- index = faiss.IndexFlatL2(d) # L2 distance metric
67
- index.add(np.array(embeddings)) # Add embeddings to the index
68
-
69
- # Save the FAISS index and metadata
70
- index_path = "faiss_index"
71
- if not os.path.exists(index_path):
72
- os.makedirs(index_path)
73
-
74
- faiss.write_index(index, os.path.join(index_path, "index.faiss"))
75
-
76
- # Save the document metadata to a file for retrieval purposes
77
- with open(os.path.join(index_path, "documents.txt"), "w") as f:
78
- for doc in documents:
79
- f.write("%s\n" % doc)
80
-
81
- # Save additional metadata
82
- metadata = {
83
- "documents": documents,
84
- "embeddings": embeddings
85
- }
86
- with open(os.path.join(index_path, "index.pkl"), "wb") as f:
87
- pickle.dump(metadata, f)
88
-
89
- print("FAISS index and documents saved.")
90
-
91
- # Load the FAISS index and metadata
92
- index = faiss.read_index(os.path.join(index_path, "index.faiss"))
93
- with open(os.path.join(index_path, "index.pkl"), "rb") as f:
94
- metadata = pickle.load(f)
95
- documents = metadata["documents"]
96
- embeddings = metadata["embeddings"]
97
-
98
- # Retrieve the API token from the environment variable
99
- api_token = os.getenv('HUGGINGFACEHUB_API_TOKEN')
100
- if api_token is None:
101
- raise ValueError("HUGGINGFACEHUB_API_TOKEN environment variable is not set")
102
-
103
- print(f"API Token: {api_token[:5]}...") # Print the first 5 characters of the token for verification
104
-
105
- # Initialize the LLM
106
- llm = HuggingFaceEndpoint(
107
- endpoint_url="https://api-inference.huggingface.co/models/gpt2",
108
- model_kwargs={"api_key": api_token}
109
- )
110
-
111
- # Function to perform a search query
112
- def search(query, k=5):
113
- query_embedding = embedding_model.encode([query])
114
- D, I = index.search(np.array(query_embedding), k)
115
- results = [documents[i] for i in I[0]]
116
- return results
117
-
118
- # Example query
119
- query = "What is the impact of a four-day work week?"
120
- results = search(query)
121
- print("Top documents:", results)