cogcorp commited on
Commit
47125e3
·
1 Parent(s): 8e836ec
Files changed (1) hide show
  1. cgpt.py +263 -0
cgpt.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fitz
2
+ import re
3
+ import numpy as np
4
+ import tensorflow_hub as hub
5
+ import openai
6
+ import gradio as gr
7
+ import shutil
8
+ import os
9
+ from sklearn.neighbors import NearestNeighbors
10
+ from tempfile import NamedTemporaryFile
11
+
12
+ openAI_key = "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"
13
+
14
+
15
+
16
+ def download_pdf(url, output_path):
17
+ urllib.request.urlretrieve(url, output_path)
18
+
19
+
20
+ def preprocess(text):
21
+ text = text.replace('\n', ' ')
22
+ text = re.sub('\s+', ' ', text)
23
+ return text
24
+
25
+
26
+ def pdf_to_text(path, start_page=1, end_page=None):
27
+ doc = fitz.open(path)
28
+ total_pages = doc.page_count
29
+
30
+ if end_page is None:
31
+ end_page = total_pages
32
+
33
+ text_list = []
34
+
35
+ for i in range(start_page-1, end_page):
36
+ text = doc.load_page(i).get_text("text")
37
+ text = preprocess(text)
38
+ text_list.append(text)
39
+
40
+ doc.close()
41
+ return text_list
42
+
43
+
44
+ def text_to_chunks(texts, word_length=150, start_page=1):
45
+ text_toks = [t.split(' ') for t in texts]
46
+ page_nums = []
47
+ chunks = []
48
+
49
+ for idx, words in enumerate(text_toks):
50
+ for i in range(0, len(words), word_length):
51
+ chunk = words[i:i+word_length]
52
+ if (i+word_length) > len(words) and (len(chunk) < word_length) and (
53
+ len(text_toks) != (idx+1)):
54
+ text_toks[idx+1] = chunk + text_toks[idx+1]
55
+ continue
56
+ chunk = ' '.join(chunk).strip()
57
+ chunk = f'[{idx+start_page}]' + ' ' + '"' + chunk + '"'
58
+ chunks.append(chunk)
59
+ return chunks
60
+
61
+
62
+
63
+ class SemanticSearch:
64
+
65
+ def __init__(self):
66
+ self.use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')
67
+ self.fitted = False
68
+
69
+
70
+ def fit(self, data, batch=1000, n_neighbors=5):
71
+ self.data = data
72
+ self.embeddings = self.get_text_embedding(data, batch=batch)
73
+ n_neighbors = min(n_neighbors, len(self.embeddings))
74
+ self.nn = NearestNeighbors(n_neighbors=n_neighbors)
75
+ self.nn.fit(self.embeddings)
76
+ self.fitted = True
77
+
78
+
79
+ def __call__(self, text, return_data=True):
80
+ inp_emb = self.use([text])
81
+ neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0]
82
+
83
+ if return_data:
84
+ return [self.data[i] for i in neighbors]
85
+ else:
86
+ return neighbors
87
+
88
+
89
+ def get_text_embedding(self, texts, batch=1000):
90
+ embeddings = []
91
+ for i in range(0, len(texts), batch):
92
+ text_batch = texts[i:(i+batch)]
93
+ emb_batch = self.use(text_batch)
94
+ embeddings.append(emb_batch)
95
+ embeddings = np.vstack(embeddings)
96
+ return embeddings
97
+
98
+
99
+
100
+ #def load_recommender(path, start_page=1):
101
+ # global recommender
102
+ # texts = pdf_to_text(path, start_page=start_page)
103
+ # chunks = text_to_chunks(texts, start_page=start_page)
104
+ # recommender.fit(chunks)
105
+ # return 'Corpus Loaded.'
106
+
107
+ # The modified function generates embeddings based on PDF file name and page number and checks if the embeddings file exists before loading or generating it.
108
+
109
+ def load_recommender(path, start_page=1):
110
+ global recommender
111
+ pdf_file = os.path.basename(path)
112
+ embeddings_file = f"{pdf_file}_{start_page}.npy"
113
+
114
+ if os.path.isfile(embeddings_file):
115
+ embeddings = np.load(embeddings_file)
116
+ recommender.embeddings = embeddings
117
+ recommender.fitted = True
118
+ return "Embeddings loaded from file"
119
+
120
+ texts = pdf_to_text(path, start_page=start_page)
121
+ chunks = text_to_chunks(texts, start_page=start_page)
122
+ recommender.fit(chunks)
123
+ np.save(embeddings_file, recommender.embeddings)
124
+ return 'Corpus Loaded.'
125
+
126
+
127
+
128
+ def generate_text(openAI_key,prompt, engine="text-davinci-003"):
129
+ openai.api_key = openAI_key
130
+ completions = openai.Completion.create(
131
+ engine=engine,
132
+ prompt=prompt,
133
+ max_tokens=512,
134
+ n=1,
135
+ stop=None,
136
+ temperature=0.7,
137
+ )
138
+ message = completions.choices[0].text
139
+ return message
140
+
141
+ def process_file(file):
142
+ temp_file = NamedTemporaryFile(delete=False, suffix='.pdf')
143
+ file.save(temp_file.name)
144
+ temp_file.close()
145
+ return temp_file.name
146
+
147
+
148
+
149
+ def generate_text2(openAI_key, prompt, engine="text-davinci-003"):
150
+ openai.api_key = openAI_key
151
+ messages = [{'role': 'system', 'content': 'You are a helpful assistant.'},
152
+ {'role': 'user', 'content': prompt}]
153
+
154
+ completions = openai.ChatCompletion.create(
155
+ model=engine,
156
+ messages=messages,
157
+ max_tokens=512,
158
+ n=1,
159
+ stop=None,
160
+ temperature=0.7,
161
+ )
162
+ message = completions.choices[0].message['content']
163
+ return message
164
+
165
+ def generate_answer(question,openAI_key):
166
+ topn_chunks = recommender(question)
167
+ prompt = ""
168
+ prompt += 'search results:\n\n'
169
+ for c in topn_chunks:
170
+ prompt += c + '\n\n'
171
+
172
+ prompt += "Instructions: Compose a comprehensive reply to the query using the search results given. "\
173
+ "Make sure the answer is correct and don't output false content. "\
174
+ "answer should be short and concise. Answer step-by-step. \n\nQuery: {question}\nAnswer: "
175
+
176
+ prompt += f"Query: {question}\nAnswer:"
177
+ answer = generate_text(openAI_key, prompt,"text-davinci-003")
178
+ return answer
179
+
180
+ def unique_filename(file_name):
181
+ counter = 1
182
+ new_file_name = file_name
183
+ while os.path.isfile(new_file_name):
184
+ name, ext = os.path.splitext(file_name)
185
+ new_file_name = f"{name}_{counter}{ext}"
186
+ counter += 1
187
+ return new_file_name
188
+
189
+
190
+ def question_answer(url, file, question, openAI_key):
191
+ #openapi key here
192
+
193
+ if url.strip() == '' and file == None:
194
+ return '[ERROR]: Both URL and PDF is empty. Provide at least one.', False
195
+
196
+ if url.strip() != '' and file != None:
197
+ return '[ERROR]: Both URL and PDF is provided. Please provide only one (either URL or PDF).', False
198
+
199
+ if url.strip() != '':
200
+ glob_url = url
201
+ download_pdf(glob_url, 'corpus.pdf')
202
+ load_recommender('corpus.pdf')
203
+ else:
204
+ old_file_name = file.name
205
+ file_name = old_file_name[:-12] + old_file_name[-4:]
206
+ file_name = unique_filename(file_name) # Ensure the new file name is unique
207
+
208
+ # Copy the content of the old file to the new file and delete the old file
209
+ with open(old_file_name, 'rb') as src, open(file_name, 'wb') as dst:
210
+ shutil.copyfileobj(src, dst)
211
+ os.remove(old_file_name)
212
+
213
+ load_recommender(file_name)
214
+
215
+ if question.strip().lower() == 'exit':
216
+ return '', False
217
+
218
+ answer = generate_answer(question, openAI_key)
219
+ return answer, True # Assuming the function returns an answer in all other cases
220
+
221
+
222
+ def main_loop(url: str, file: str, question: str):
223
+ answer, cont = question_answer(url, file, question, openAI_key)
224
+ return answer, cont
225
+
226
+
227
+ def on_click(*args):
228
+ answer.value = main_loop(url.value, file.value, question.value)
229
+
230
+
231
+ recommender = SemanticSearch()
232
+
233
+ title = 'Cognitive pdfGPT'
234
+ description = """ What is Cognitive pdfGPT?
235
+ The problem is that OpenAI has a 4K token limit and cannot process an entire PDF file as input. Additionally, ChatGPT cannot (as of yet) directly talk to external data. The solution is Cognitive pdfGPT, which allows you to chat with your PDF file using GPT functionalities. The application breaks the document into smaller files and generates embeddings using a powerful Deep Averaging Network Encoder. A semantic search is performed on your data, and the top relevant results are used to generate a response. *DO NOT USE CONFIDENTIAL INFORMATION* """
236
+
237
+
238
+
239
+
240
+
241
+ with gr.Blocks() as demo:
242
+
243
+ gr.Markdown(f'<center><h1>{title}</h1></center>')
244
+ gr.Markdown(description)
245
+
246
+ with gr.Row():
247
+
248
+ with gr.Group():
249
+ url=gr.Textbox(label='Enter PDF URL here')
250
+ gr.Markdown("<center><h3>OR<h3></center>")
251
+ file=gr.File(label='Upload your PDF/ Research Paper * NO CONFIDENTIAL FILES SHOULD BE USED *', file_types=['.pdf'])
252
+ question=gr.Textbox(label='Enter your question here')
253
+ btn=gr.Button(value='Submit')
254
+ btn.style(full_width=False)
255
+
256
+ with gr.Group():
257
+ answer = gr.Textbox(label='The answer to your question is :')
258
+ gr.Image("logo.jpg")
259
+ btn.click(main_loop, inputs=[url, file, question], outputs=[answer])
260
+
261
+
262
+
263
+ demo.launch()