dhairyashah commited on
Commit
5156ae8
·
verified ·
1 Parent(s): 2c8b539

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -52
app.py CHANGED
@@ -1,56 +1,134 @@
1
  #!/usr/bin/env python
2
  # -*- coding: utf-8 -*-
3
 
 
4
  import tqdm
5
  from PIL import Image
 
6
  import torch
7
  import fitz
 
8
  import gradio as gr
9
  import spaces
10
  import os
11
  from transformers import AutoModel
12
  from transformers import AutoTokenizer
13
  import numpy as np
 
14
 
15
- cache_dir = 'pdf_cache'
16
  os.makedirs(cache_dir, exist_ok=True)
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  device = 'cuda'
19
 
20
- print("Embedding model loading...")
21
- model_path = 'RhapsodyAI/minicpm-visual-embedding-v0'
22
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
23
  model = AutoModel.from_pretrained(model_path, trust_remote_code=True)
24
  model.eval()
25
  model.to(device)
26
- print("Embedding model loaded successfully!")
27
 
28
- print("Generation model loading...")
29
  gen_model_path = 'openbmb/MiniCPM-V-2_6'
30
  gen_tokenizer = AutoTokenizer.from_pretrained(gen_model_path, trust_remote_code=True)
31
  gen_model = AutoModel.from_pretrained(gen_model_path, trust_remote_code=True, attn_implementation='sdpa', torch_dtype=torch.bfloat16)
32
  gen_model.eval()
33
  gen_model.to(device)
34
- print("Generation model loaded successfully!")
35
-
36
- @spaces.GPU(duration=100)
37
- def process_pdf(pdf_file, max_pages, progress=gr.Progress()):
38
- doc = fitz.open("pdf", pdf_file)
39
- num_pages = min(max_pages, len(doc))
40
-
41
- images = []
42
- for page_num in progress.tqdm(range(num_pages)):
43
- page = doc[page_num]
44
- pix = page.get_pixmap(dpi=200)
45
- image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
46
- images.append(image)
47
-
48
- return images
49
 
50
  @spaces.GPU(duration=50)
51
  def answer_question(images, question):
52
  global gen_model, gen_tokenizer
53
- images_ = [img.convert('RGB') for img in images]
54
  msgs = [{'role': 'user', 'content': [question, *images_]}]
55
  answer = gen_model.chat(
56
  image=None,
@@ -61,44 +139,43 @@ def answer_question(images, question):
61
  return answer
62
 
63
  with gr.Blocks() as app:
64
- gr.Markdown("# PDF Question Answering with Vision Language Model")
65
-
66
- gr.Markdown("""
67
- This application uses a Vision Language Model to answer questions about PDF documents.
68
 
69
- 1. Upload a PDF file
70
- 2. Set the maximum number of pages to process
71
- 3. Click "Process PDF" to extract the pages
72
- 4. Enter your question about the PDF content
73
- 5. Click "Answer Question" to get the model's response
74
- """)
75
 
 
 
 
 
 
 
 
 
 
76
  with gr.Row():
77
- file_input = gr.File(type="binary", label="Upload PDF")
78
- max_pages = gr.Number(value=10, minimum=1, maximum=50, step=1, label="Maximum number of pages to process")
79
  process_button = gr.Button("Process PDF")
 
 
 
80
 
81
  with gr.Row():
82
  query_input = gr.Text(label="Your Question")
83
- answer_button = gr.Button("Answer Question")
84
-
85
- images_output = gr.Gallery(label="Processed PDF Pages", visible=False)
86
- gen_model_response = gr.Textbox(label="Model's Answer")
87
-
88
- def process_and_show(pdf_file, max_pages):
89
- images = process_pdf(pdf_file, max_pages)
90
- return gr.Gallery.update(value=images, visible=True)
91
 
92
- process_button.click(
93
- process_and_show,
94
- inputs=[file_input, max_pages],
95
- outputs=images_output
96
- )
97
 
98
- answer_button.click(
99
- answer_question,
100
- inputs=[images_output, query_input],
101
- outputs=gen_model_response
102
- )
103
 
104
- app.launch()
 
1
  #!/usr/bin/env python
2
  # -*- coding: utf-8 -*-
3
 
4
+
5
  import tqdm
6
  from PIL import Image
7
+ import hashlib
8
  import torch
9
  import fitz
10
+ import threading
11
  import gradio as gr
12
  import spaces
13
  import os
14
  from transformers import AutoModel
15
  from transformers import AutoTokenizer
16
  import numpy as np
17
+ import json
18
 
19
+ cache_dir = '/data/kb_cache'
20
  os.makedirs(cache_dir, exist_ok=True)
21
 
22
+ def get_image_md5(img: Image.Image):
23
+ img_byte_array = img.tobytes()
24
+ hash_md5 = hashlib.md5()
25
+ hash_md5.update(img_byte_array)
26
+ hex_digest = hash_md5.hexdigest()
27
+ return hex_digest
28
+
29
+ def calculate_md5_from_binary(binary_data):
30
+ hash_md5 = hashlib.md5()
31
+ hash_md5.update(binary_data)
32
+ return hash_md5.hexdigest()
33
+
34
+ @spaces.GPU(duration=100)
35
+ def add_pdf_gradio(pdf_file_binary, progress=gr.Progress()):
36
+ global model, tokenizer
37
+ model.eval()
38
+
39
+ this_cache_dir = os.path.join(cache_dir, 'temp_cache')
40
+ os.makedirs(this_cache_dir, exist_ok=True)
41
+
42
+ with open(os.path.join(this_cache_dir, f"src.pdf"), 'wb') as file:
43
+ file.write(pdf_file_binary)
44
+
45
+ dpi = 200
46
+ doc = fitz.open("pdf", pdf_file_binary)
47
+
48
+ reps_list = []
49
+ images = []
50
+ image_md5s = []
51
+
52
+ for page in progress.tqdm(doc):
53
+ pix = page.get_pixmap(dpi=dpi)
54
+ image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
55
+ image_md5 = get_image_md5(image)
56
+ image_md5s.append(image_md5)
57
+ with torch.no_grad():
58
+ reps = model(text=[''], image=[image], tokenizer=tokenizer).reps
59
+ reps_list.append(reps.squeeze(0).cpu().numpy())
60
+ images.append(image)
61
+
62
+ for idx in range(len(images)):
63
+ image = images[idx]
64
+ image_md5 = image_md5s[idx]
65
+ cache_image_path = os.path.join(this_cache_dir, f"{image_md5}.png")
66
+ image.save(cache_image_path)
67
+
68
+ np.save(os.path.join(this_cache_dir, f"reps.npy"), reps_list)
69
+
70
+ with open(os.path.join(this_cache_dir, f"md5s.txt"), 'w') as f:
71
+ for item in image_md5s:
72
+ f.write(item+'\n')
73
+
74
+ return "PDF processed successfully!"
75
+
76
+ @spaces.GPU(duration=50)
77
+ def retrieve_gradio(query: str, topk: int):
78
+ global model, tokenizer
79
+
80
+ model.eval()
81
+
82
+ target_cache_dir = os.path.join(cache_dir, 'temp_cache')
83
+
84
+ if not os.path.exists(target_cache_dir):
85
+ return None
86
+
87
+ md5s = []
88
+ with open(os.path.join(target_cache_dir, f"md5s.txt"), 'r') as f:
89
+ for line in f:
90
+ md5s.append(line.rstrip('\n'))
91
+
92
+ doc_reps = np.load(os.path.join(target_cache_dir, f"reps.npy"))
93
+
94
+ query_with_instruction = "Represent this query for retrieving relevant document: " + query
95
+ with torch.no_grad():
96
+ query_rep = model(text=[query_with_instruction], image=[None], tokenizer=tokenizer).reps.squeeze(0).cpu()
97
+
98
+ doc_reps_cat = torch.stack([torch.Tensor(i) for i in doc_reps], dim=0)
99
+
100
+ similarities = torch.matmul(query_rep, doc_reps_cat.T)
101
+
102
+ topk_values, topk_doc_ids = torch.topk(similarities, k=topk)
103
+
104
+ topk_doc_ids_np = topk_doc_ids.cpu().numpy()
105
+
106
+ images_topk = [Image.open(os.path.join(target_cache_dir, f"{md5s[idx]}.png")) for idx in topk_doc_ids_np]
107
+
108
+ return images_topk
109
+
110
  device = 'cuda'
111
 
112
+ print("emb model load begin...")
113
+ model_path = 'RhapsodyAI/minicpm-visual-embedding-v0' # replace with your local model path
114
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
115
  model = AutoModel.from_pretrained(model_path, trust_remote_code=True)
116
  model.eval()
117
  model.to(device)
118
+ print("emb model load success!")
119
 
120
+ print("gen model load begin...")
121
  gen_model_path = 'openbmb/MiniCPM-V-2_6'
122
  gen_tokenizer = AutoTokenizer.from_pretrained(gen_model_path, trust_remote_code=True)
123
  gen_model = AutoModel.from_pretrained(gen_model_path, trust_remote_code=True, attn_implementation='sdpa', torch_dtype=torch.bfloat16)
124
  gen_model.eval()
125
  gen_model.to(device)
126
+ print("gen model load success!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
  @spaces.GPU(duration=50)
129
  def answer_question(images, question):
130
  global gen_model, gen_tokenizer
131
+ images_ = [Image.open(image[0]).convert('RGB') for image in images]
132
  msgs = [{'role': 'user', 'content': [question, *images_]}]
133
  answer = gen_model.chat(
134
  image=None,
 
139
  return answer
140
 
141
  with gr.Blocks() as app:
142
+ gr.Markdown("# MiniCPMV-RAG-PDFQA: Two Vision Language Models Enable End-to-End RAG")
 
 
 
143
 
144
+ gr.Markdown("""
145
+ - A Vision Language Model Dense Retriever ([minicpm-visual-embedding-v0](https://huggingface.co/RhapsodyAI/minicpm-visual-embedding-v0)) **directly reads** your PDFs **without need of OCR**, produce **multimodal dense representations** and build your personal library.
146
+
147
+ - **Ask a question**, it retrieves the most relevant pages, then [MiniCPM-V-2.6](https://huggingface.co/spaces/openbmb/MiniCPM-V-2_6) will answer your question based on pages recalled, with strong multi-image understanding capability.
 
 
148
 
149
+ - It helps you read a long **visually-intensive** or **text-oriented** PDF document and find the pages that answer your question.
150
+
151
+ - It helps you build a personal library and retrieve book pages from a large collection of books.
152
+
153
+ - It works like a human: read, store, retrieve, and answer with full vision.
154
+ """)
155
+
156
+ gr.Markdown("- Currently online demo support PDF document with less than 50 pages due to GPU time limit. Deploy on your own machine for longer PDFs and books.")
157
+
158
  with gr.Row():
159
+ file_input = gr.File(type="binary", label="Step 1: Upload PDF")
 
160
  process_button = gr.Button("Process PDF")
161
+ file_result = gr.Textbox(label="PDF Process Status")
162
+
163
+ process_button.click(add_pdf_gradio, inputs=[file_input], outputs=file_result)
164
 
165
  with gr.Row():
166
  query_input = gr.Text(label="Your Question")
167
+ topk_input = gr.Number(value=5, minimum=1, maximum=10, step=1, label="Number of Pages to Retrieve")
168
+ retrieve_button = gr.Button("Retrieve Pages")
169
+ images_output = gr.Gallery(label="Retrieved Pages")
170
+
171
+ retrieve_button.click(retrieve_gradio, inputs=[query_input, topk_input], outputs=images_output)
 
 
 
172
 
173
+ with gr.Row():
174
+ answer_button = gr.Button("Answer Question")
175
+ gen_model_response = gr.Textbox(label="MiniCPM-V-2.6's Answer")
 
 
176
 
177
+ answer_button.click(fn=answer_question, inputs=[images_output, query_input], outputs=gen_model_response)
178
+
179
+ gr.Markdown("By using this demo, you agree to share your use data with us for research purpose, to help improve user experience.")
 
 
180
 
181
+ app.launch()