import fitz import re import numpy as np import tensorflow_hub as hub import openai import gradio as gr import shutil import os from sklearn.neighbors import NearestNeighbors from tempfile import NamedTemporaryFile openAI_key = os.environ['OpenAPI'] def download_pdf(url, output_path): urllib.request.urlretrieve(url, output_path) def preprocess(text): text = text.replace('\n', ' ') text = re.sub('\s+', ' ', text) return text def pdf_to_text(path, start_page=1, end_page=None): doc = fitz.open(path) total_pages = doc.page_count if end_page is None: end_page = total_pages text_list = [] for i in range(start_page-1, end_page): text = doc.load_page(i).get_text("text") text = preprocess(text) text_list.append(text) doc.close() return text_list def text_to_chunks(texts, word_length=150, start_page=1): text_toks = [t.split(' ') for t in texts] page_nums = [] chunks = [] for idx, words in enumerate(text_toks): for i in range(0, len(words), word_length): chunk = words[i:i+word_length] if (i+word_length) > len(words) and (len(chunk) < word_length) and ( len(text_toks) != (idx+1)): text_toks[idx+1] = chunk + text_toks[idx+1] continue chunk = ' '.join(chunk).strip() chunk = f'[{idx+start_page}]' + ' ' + '"' + chunk + '"' chunks.append(chunk) return chunks class SemanticSearch: def __init__(self): self.use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4') self.fitted = False def fit(self, data, batch=1000, n_neighbors=5): self.data = data self.embeddings = self.get_text_embedding(data, batch=batch) n_neighbors = min(n_neighbors, len(self.embeddings)) self.nn = NearestNeighbors(n_neighbors=n_neighbors) self.nn.fit(self.embeddings) self.fitted = True def __call__(self, text, return_data=True): inp_emb = self.use([text]) neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0] if return_data: return [self.data[i] for i in neighbors] else: return neighbors def get_text_embedding(self, texts, batch=1000): embeddings = [] for i in range(0, len(texts), batch): text_batch = texts[i:(i+batch)] emb_batch = self.use(text_batch) embeddings.append(emb_batch) embeddings = np.vstack(embeddings) return embeddings #def load_recommender(path, start_page=1): # global recommender # texts = pdf_to_text(path, start_page=start_page) # chunks = text_to_chunks(texts, start_page=start_page) # recommender.fit(chunks) # return 'Corpus Loaded.' # The modified function generates embeddings based on PDF file name and page number and checks if the embeddings file exists before loading or generating it. def load_recommender(path, start_page=1): global recommender pdf_file = os.path.basename(path) embeddings_file = f"{pdf_file}_{start_page}.npy" if os.path.isfile(embeddings_file): embeddings = np.load(embeddings_file) recommender.embeddings = embeddings recommender.fitted = True return "Embeddings loaded from file" texts = pdf_to_text(path, start_page=start_page) chunks = text_to_chunks(texts, start_page=start_page) recommender.fit(chunks) np.save(embeddings_file, recommender.embeddings) return 'Corpus Loaded.' def generate_text(openAI_key,prompt, engine="text-davinci-003"): openai.api_key = openAI_key completions = openai.Completion.create( engine=engine, prompt=prompt, max_tokens=512, n=1, stop=None, temperature=0.7, ) message = completions.choices[0].text return message def process_file(file): temp_file = NamedTemporaryFile(delete=False, suffix='.pdf') file.save(temp_file.name) temp_file.close() return temp_file.name def generate_text2(openAI_key, prompt, engine="text-davinci-003"): openai.api_key = openAI_key messages = [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': prompt}] completions = openai.ChatCompletion.create( model=engine, messages=messages, max_tokens=512, n=1, stop=None, temperature=0.7, ) message = completions.choices[0].message['content'] return message def generate_answer(question,openAI_key): topn_chunks = recommender(question) prompt = "" prompt += 'search results:\n\n' for c in topn_chunks: prompt += c + '\n\n' prompt += "Instructions: Compose a comprehensive reply to the query using the search results given. "\ "Make sure the answer is correct and don't output false content. "\ "answer should be short and concise. Answer step-by-step. \n\nQuery: {question}\nAnswer: " prompt += f"Query: {question}\nAnswer:" answer = generate_text(openAI_key, prompt,"text-davinci-003") return answer def unique_filename(file_name): counter = 1 new_file_name = file_name while os.path.isfile(new_file_name): name, ext = os.path.splitext(file_name) new_file_name = f"{name}_{counter}{ext}" counter += 1 return new_file_name def question_answer(url, file, question, openAI_key): #openapi key here if url.strip() == '' and file == None: return '[ERROR]: Both URL and PDF is empty. Provide at least one.', False if url.strip() != '' and file != None: return '[ERROR]: Both URL and PDF is provided. Please provide only one (either URL or PDF).', False if url.strip() != '': glob_url = url download_pdf(glob_url, 'corpus.pdf') load_recommender('corpus.pdf') else: old_file_name = file.name file_name = old_file_name[:-12] + old_file_name[-4:] file_name = unique_filename(file_name) # Ensure the new file name is unique # Copy the content of the old file to the new file and delete the old file with open(old_file_name, 'rb') as src, open(file_name, 'wb') as dst: shutil.copyfileobj(src, dst) os.remove(old_file_name) load_recommender(file_name) if question.strip().lower() == 'exit': return '', False answer = generate_answer(question, openAI_key) return answer, True # Assuming the function returns an answer in all other cases def main_loop(url: str, file: str, question: str): answer, cont = question_answer(url, file, question, openAI_key) return answer, cont def on_click(*args): answer.value = main_loop(url.value, file.value, question.value) recommender = SemanticSearch() title = 'Cognitive pdfGPT' description = """ Why use Cognitive pdfGPT? The issue is OpenAI has a 4K token constraint, preventing it from processing an entire PDF file as input. Additionally, ChatGPT cannot (as of yet) directly talk to external data. The solution is Cognitive pdfGPT, which allows you to chat with your PDF file using GPT functionalities. The application converts the document into smaller files and generates embeddings using a powerful Deep Averaging Network Encoder. A semantic search is performed on your data, and the top relevant results are used to generate a response. 🛑DO NOT USE CONFIDENTIAL INFORMATION """ with gr.Blocks() as demo: gr.Markdown(f'

{title}

') gr.Markdown(description) with gr.Row(): with gr.Group(): file=gr.File(label='➡️ Upload your PDF ⬅️ NO CONFIDENTIAL FILES SHOULD BE USED ', file_types=['.pdf']) url=gr.Textbox(label=' ') question=gr.Textbox(label='🔤 Enter your question here 🔤') btn=gr.Button(value='Submit') btn.style(full_width=False) with gr.Group(): gr.Image("logo.jpg") answer = gr.Textbox(label='The answer to your question is :') btn.click(main_loop, inputs=[url, file, question], outputs=[answer]) demo.launch()