davanstrien HF Staff commited on
Commit
b366864
·
1 Parent(s): 3f59035

Add NuMarkdown-8B-Thinking OCR script with reasoning capabilities

Browse files
Files changed (1) hide show
  1. numarkdown-ocr.py +627 -0
numarkdown-ocr.py ADDED
@@ -0,0 +1,627 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.11"
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "huggingface-hub[hf_transfer]",
6
+ # "pillow",
7
+ # "vllm",
8
+ # "tqdm",
9
+ # "toolz",
10
+ # "torch", # Added for CUDA check
11
+ # ]
12
+ #
13
+ # ///
14
+
15
+ """
16
+ Convert document images to markdown using NuMarkdown-8B-Thinking with vLLM.
17
+
18
+ This script processes images through the NuMarkdown model to extract
19
+ text with advanced reasoning capabilities, ideal for complex document understanding.
20
+
21
+ Features:
22
+ - Reasoning-based document analysis with thinking tokens
23
+ - Superior table extraction and formatting
24
+ - Complex layout understanding
25
+ - Mathematical formula recognition
26
+ - Clean markdown output generation
27
+ - Optional thinking trace inclusion
28
+ """
29
+
30
+ import argparse
31
+ import base64
32
+ import io
33
+ import json
34
+ import logging
35
+ import os
36
+ import re
37
+ import sys
38
+ from typing import Any, Dict, List, Union, Optional, Tuple
39
+ from datetime import datetime
40
+
41
+ import torch
42
+ from datasets import load_dataset
43
+ from huggingface_hub import DatasetCard, login
44
+ from PIL import Image
45
+ from toolz import partition_all
46
+ from tqdm.auto import tqdm
47
+ from vllm import LLM, SamplingParams
48
+
49
+ logging.basicConfig(level=logging.INFO)
50
+ logger = logging.getLogger(__name__)
51
+
52
+
53
+ def check_cuda_availability():
54
+ """Check if CUDA is available and exit if not."""
55
+ if not torch.cuda.is_available():
56
+ logger.error("CUDA is not available. This script requires a GPU.")
57
+ logger.error("Please run on a machine with a CUDA-capable GPU.")
58
+ sys.exit(1)
59
+ else:
60
+ logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
61
+
62
+
63
+ def validate_and_resize_image(
64
+ image: Image.Image,
65
+ min_pixels: int = 100 * 28 * 28,
66
+ max_pixels: int = 5000 * 28 * 28,
67
+ ) -> Image.Image:
68
+ """Validate and resize image to meet pixel constraints if necessary."""
69
+ width, height = image.size
70
+ total_pixels = width * height
71
+
72
+ if total_pixels < min_pixels or total_pixels > max_pixels:
73
+ # Calculate scaling factor
74
+ if total_pixels < min_pixels:
75
+ scale = (min_pixels / total_pixels) ** 0.5
76
+ else:
77
+ scale = (max_pixels / total_pixels) ** 0.5
78
+
79
+ new_width = int(width * scale)
80
+ new_height = int(height * scale)
81
+
82
+ logger.debug(f"Resizing image from {width}x{height} to {new_width}x{new_height}")
83
+ image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
84
+
85
+ return image
86
+
87
+
88
+ def extract_answer_from_thinking(text: str, include_thinking: bool = False) -> str:
89
+ """
90
+ Extract the final answer from NuMarkdown's thinking output.
91
+
92
+ The model generates output in format:
93
+ <think>reasoning process...</think>
94
+ <answer>final markdown output</answer>
95
+ """
96
+ if include_thinking:
97
+ # Return the full output including thinking traces
98
+ return text.strip()
99
+
100
+ # Extract content between <answer> tags
101
+ answer_pattern = r'<answer>(.*?)</answer>'
102
+ answer_match = re.search(answer_pattern, text, re.DOTALL)
103
+
104
+ if answer_match:
105
+ return answer_match.group(1).strip()
106
+
107
+ # If no answer tags found, check if the entire text is markdown
108
+ # (sometimes the model might not use tags)
109
+ if not '<think>' in text and not '<answer>' in text:
110
+ return text.strip()
111
+
112
+ # Fallback: return everything after </think> if present
113
+ think_end = text.find('</think>')
114
+ if think_end != -1:
115
+ remaining = text[think_end + 8:].strip()
116
+ # Remove <answer> tags if present
117
+ remaining = remaining.replace('<answer>', '').replace('</answer>', '').strip()
118
+ return remaining
119
+
120
+ # Last resort: return the full text
121
+ logger.warning("Could not extract answer from thinking tokens, returning full text")
122
+ return text.strip()
123
+
124
+
125
+ def make_numarkdown_message(
126
+ image: Union[Image.Image, Dict[str, Any], str],
127
+ prompt: str = "Convert this document to markdown. Focus on preserving structure, tables, formulas, and all textual content.",
128
+ ) -> List[Dict]:
129
+ """Create chat message for NuMarkdown processing."""
130
+ # Convert to PIL Image if needed
131
+ if isinstance(image, Image.Image):
132
+ pil_img = image.convert("RGB")
133
+ elif isinstance(image, dict) and "bytes" in image:
134
+ pil_img = Image.open(io.BytesIO(image["bytes"])).convert("RGB")
135
+ elif isinstance(image, str):
136
+ pil_img = Image.open(image).convert("RGB")
137
+ else:
138
+ raise ValueError(f"Unsupported image type: {type(image)}")
139
+
140
+ # Validate and resize if necessary
141
+ pil_img = validate_and_resize_image(pil_img)
142
+
143
+ # Convert to base64 data URI
144
+ buf = io.BytesIO()
145
+ pil_img.save(buf, format="PNG")
146
+ data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
147
+
148
+ # Return message in vLLM chat format
149
+ return [
150
+ {
151
+ "role": "user",
152
+ "content": [
153
+ {"type": "image_url", "image_url": {"url": data_uri}},
154
+ {"type": "text", "text": prompt},
155
+ ],
156
+ }
157
+ ]
158
+
159
+
160
+ def create_dataset_card(
161
+ source_dataset: str,
162
+ model: str,
163
+ num_samples: int,
164
+ processing_time: str,
165
+ batch_size: int,
166
+ max_model_len: int,
167
+ max_tokens: int,
168
+ gpu_memory_utilization: float,
169
+ include_thinking: bool,
170
+ image_column: str = "image",
171
+ split: str = "train",
172
+ ) -> str:
173
+ """Create a dataset card documenting the OCR process."""
174
+ model_name = model.split("/")[-1]
175
+
176
+ return f"""---
177
+ tags:
178
+ - ocr
179
+ - document-processing
180
+ - numarkdown
181
+ - markdown
182
+ - reasoning
183
+ - thinking-tokens
184
+ - uv-script
185
+ - generated
186
+ ---
187
+
188
+ # Document OCR using {model_name}
189
+
190
+ This dataset contains markdown-formatted OCR results from images in [{source_dataset}](https://huggingface.co/datasets/{source_dataset}) using NuMarkdown-8B-Thinking.
191
+
192
+ ## Processing Details
193
+
194
+ - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
195
+ - **Model**: [{model}](https://huggingface.co/{model})
196
+ - **Number of Samples**: {num_samples:,}
197
+ - **Processing Time**: {processing_time}
198
+ - **Processing Date**: {datetime.now().strftime("%Y-%m-%d %H:%M UTC")}
199
+
200
+ ### Configuration
201
+
202
+ - **Image Column**: `{image_column}`
203
+ - **Output Column**: `markdown`
204
+ - **Dataset Split**: `{split}`
205
+ - **Batch Size**: {batch_size}
206
+ - **Max Model Length**: {max_model_len:,} tokens
207
+ - **Max Output Tokens**: {max_tokens:,}
208
+ - **GPU Memory Utilization**: {gpu_memory_utilization:.1%}
209
+ - **Thinking Traces**: {"Included" if include_thinking else "Excluded (only final answers)"}
210
+
211
+ ## Model Information
212
+
213
+ NuMarkdown-8B-Thinking is a state-of-the-art reasoning-based document OCR model that excels at:
214
+ - 🧠 **Reasoning Process** - Analyzes document layout before generation
215
+ - 📊 **Complex Tables** - Superior table extraction and formatting
216
+ - 📐 **Mathematical Formulas** - Accurate LaTeX/math notation preservation
217
+ - 📝 **Document Structure** - Maintains hierarchical document organization
218
+ - 🔍 **Layout Analysis** - Understands complex multi-column layouts
219
+ - ✨ **Clean Output** - Generates well-formatted markdown
220
+
221
+ ### Thinking Tokens
222
+
223
+ This model uses a unique "thinking" process where it:
224
+ 1. Analyzes the document structure internally (`<think>` phase)
225
+ 2. Generates the final markdown output (`<answer>` phase)
226
+
227
+ {"The dataset includes both thinking traces and final answers." if include_thinking else "Only the final answers are included (thinking traces removed)."}
228
+
229
+ ## Dataset Structure
230
+
231
+ The dataset contains all original columns plus:
232
+ - `markdown`: The extracted text in markdown format
233
+ - `inference_info`: JSON list tracking all OCR models applied to this dataset
234
+
235
+ ## Usage
236
+
237
+ ```python
238
+ from datasets import load_dataset
239
+ import json
240
+
241
+ # Load the dataset
242
+ dataset = load_dataset("{{output_dataset_id}}", split="{split}")
243
+
244
+ # Access the markdown text
245
+ for example in dataset:
246
+ print(example["markdown"])
247
+ break
248
+
249
+ # View all OCR models applied to this dataset
250
+ inference_info = json.loads(dataset[0]["inference_info"])
251
+ for info in inference_info:
252
+ print(f"Column: {{info['column_name']}} - Model: {{info['model_id']}}")
253
+ ```
254
+
255
+ ## Reproduction
256
+
257
+ This dataset was generated using the [uv-scripts/ocr](https://huggingface.co/datasets/uv-scripts/ocr) NuMarkdown OCR script:
258
+
259
+ ```bash
260
+ uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/numarkdown-ocr.py \\
261
+ {source_dataset} \\
262
+ <output-dataset> \\
263
+ --image-column {image_column} \\
264
+ --batch-size {batch_size} \\
265
+ --max-model-len {max_model_len} \\
266
+ --max-tokens {max_tokens} \\
267
+ --gpu-memory-utilization {gpu_memory_utilization} \\
268
+ {"--include-thinking" if include_thinking else ""}
269
+ ```
270
+
271
+ ## Performance
272
+
273
+ - **Processing Speed**: ~{num_samples / (float(processing_time.split()[0]) * 60):.1f} images/second
274
+ - **GPU Configuration**: vLLM with {gpu_memory_utilization:.0%} GPU memory utilization
275
+ - **Model Size**: 8.29B parameters
276
+
277
+ Generated with 🤖 [UV Scripts](https://huggingface.co/uv-scripts)
278
+ """
279
+
280
+
281
+ def main(
282
+ input_dataset: str,
283
+ output_dataset: str,
284
+ image_column: str = "image",
285
+ batch_size: int = 16,
286
+ model: str = "numind/NuMarkdown-8B-Thinking",
287
+ max_model_len: int = 16384,
288
+ max_tokens: int = 8192,
289
+ gpu_memory_utilization: float = 0.9,
290
+ hf_token: str = None,
291
+ split: str = "train",
292
+ max_samples: int = None,
293
+ private: bool = False,
294
+ shuffle: bool = False,
295
+ seed: int = 42,
296
+ include_thinking: bool = False,
297
+ temperature: float = 0.0,
298
+ custom_prompt: Optional[str] = None,
299
+ ):
300
+ """Process images from HF dataset through NuMarkdown model."""
301
+
302
+ # Check CUDA availability first
303
+ check_cuda_availability()
304
+
305
+ # Track processing start time
306
+ start_time = datetime.now()
307
+
308
+ # Enable HF_TRANSFER for faster downloads
309
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
310
+
311
+ # Login to HF if token provided
312
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
313
+ if HF_TOKEN:
314
+ login(token=HF_TOKEN)
315
+
316
+ # Load dataset
317
+ logger.info(f"Loading dataset: {input_dataset}")
318
+ dataset = load_dataset(input_dataset, split=split)
319
+
320
+ # Validate image column
321
+ if image_column not in dataset.column_names:
322
+ raise ValueError(
323
+ f"Column '{image_column}' not found. Available: {dataset.column_names}"
324
+ )
325
+
326
+ # Shuffle if requested
327
+ if shuffle:
328
+ logger.info(f"Shuffling dataset with seed {seed}")
329
+ dataset = dataset.shuffle(seed=seed)
330
+
331
+ # Limit samples if requested
332
+ if max_samples:
333
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
334
+ logger.info(f"Limited to {len(dataset)} samples")
335
+
336
+ # Initialize vLLM with trust_remote_code for NuMarkdown
337
+ logger.info(f"Initializing vLLM with model: {model}")
338
+ llm = LLM(
339
+ model=model,
340
+ trust_remote_code=True, # Required for NuMarkdown
341
+ max_model_len=max_model_len,
342
+ gpu_memory_utilization=gpu_memory_utilization,
343
+ limit_mm_per_prompt={"image": 1},
344
+ )
345
+
346
+ # Set up sampling parameters
347
+ sampling_params = SamplingParams(
348
+ temperature=temperature,
349
+ max_tokens=max_tokens,
350
+ )
351
+
352
+ # Use custom prompt if provided, otherwise use default
353
+ prompt = custom_prompt or "Convert this document to markdown. Focus on preserving structure, tables, formulas, and all textual content."
354
+
355
+ # Process images in batches
356
+ all_markdown = []
357
+
358
+ logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
359
+ logger.info(f"Including thinking traces: {include_thinking}")
360
+
361
+ # Process in batches to avoid memory issues
362
+ for batch_indices in tqdm(
363
+ partition_all(batch_size, range(len(dataset))),
364
+ total=(len(dataset) + batch_size - 1) // batch_size,
365
+ desc="OCR processing",
366
+ ):
367
+ batch_indices = list(batch_indices)
368
+ batch_images = [dataset[i][image_column] for i in batch_indices]
369
+
370
+ try:
371
+ # Create messages for batch
372
+ batch_messages = [
373
+ make_numarkdown_message(img, prompt) for img in batch_images
374
+ ]
375
+
376
+ # Process with vLLM
377
+ outputs = llm.chat(batch_messages, sampling_params)
378
+
379
+ # Extract markdown from outputs
380
+ for output in outputs:
381
+ raw_text = output.outputs[0].text.strip()
382
+ # Extract answer from thinking tokens
383
+ markdown_text = extract_answer_from_thinking(raw_text, include_thinking)
384
+ all_markdown.append(markdown_text)
385
+
386
+ except Exception as e:
387
+ logger.error(f"Error processing batch: {e}")
388
+ # Add error placeholders for failed batch
389
+ all_markdown.extend(["[OCR FAILED]"] * len(batch_images))
390
+
391
+ # Add markdown column to dataset
392
+ logger.info("Adding markdown column to dataset")
393
+ dataset = dataset.add_column("markdown", all_markdown)
394
+
395
+ # Handle inference_info tracking
396
+ logger.info("Updating inference_info...")
397
+
398
+ # Check for existing inference_info
399
+ if "inference_info" in dataset.column_names:
400
+ # Parse existing info from first row (all rows have same info)
401
+ try:
402
+ existing_info = json.loads(dataset[0]["inference_info"])
403
+ if not isinstance(existing_info, list):
404
+ existing_info = [existing_info] # Convert old format to list
405
+ except (json.JSONDecodeError, TypeError):
406
+ existing_info = []
407
+ # Remove old column to update it
408
+ dataset = dataset.remove_columns(["inference_info"])
409
+ else:
410
+ existing_info = []
411
+
412
+ # Add new inference info
413
+ new_info = {
414
+ "column_name": "markdown",
415
+ "model_id": model,
416
+ "processing_date": datetime.now().isoformat(),
417
+ "batch_size": batch_size,
418
+ "max_tokens": max_tokens,
419
+ "gpu_memory_utilization": gpu_memory_utilization,
420
+ "max_model_len": max_model_len,
421
+ "include_thinking": include_thinking,
422
+ "temperature": temperature,
423
+ "prompt": prompt,
424
+ "script": "numarkdown-ocr.py",
425
+ "script_version": "1.0.0",
426
+ "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/numarkdown-ocr.py"
427
+ }
428
+ existing_info.append(new_info)
429
+
430
+ # Add updated inference_info column
431
+ info_json = json.dumps(existing_info, ensure_ascii=False)
432
+ dataset = dataset.add_column("inference_info", [info_json] * len(dataset))
433
+
434
+ # Push to hub
435
+ logger.info(f"Pushing to {output_dataset}")
436
+ dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
437
+
438
+ # Calculate processing time
439
+ end_time = datetime.now()
440
+ processing_duration = end_time - start_time
441
+ processing_time = f"{processing_duration.total_seconds() / 60:.1f} minutes"
442
+
443
+ # Create and push dataset card
444
+ logger.info("Creating dataset card...")
445
+ card_content = create_dataset_card(
446
+ source_dataset=input_dataset,
447
+ model=model,
448
+ num_samples=len(dataset),
449
+ processing_time=processing_time,
450
+ batch_size=batch_size,
451
+ max_model_len=max_model_len,
452
+ max_tokens=max_tokens,
453
+ gpu_memory_utilization=gpu_memory_utilization,
454
+ include_thinking=include_thinking,
455
+ image_column=image_column,
456
+ split=split,
457
+ )
458
+
459
+ card = DatasetCard(card_content)
460
+ card.push_to_hub(output_dataset, token=HF_TOKEN)
461
+ logger.info("✅ Dataset card created and pushed!")
462
+
463
+ logger.info("✅ OCR conversion complete!")
464
+ logger.info(
465
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
466
+ )
467
+
468
+
469
+ if __name__ == "__main__":
470
+ # Show example usage if no arguments
471
+ if len(sys.argv) == 1:
472
+ print("=" * 80)
473
+ print("NuMarkdown-8B-Thinking OCR with Reasoning")
474
+ print("=" * 80)
475
+ print("\nThis script converts document images to markdown using")
476
+ print("the NuMarkdown-8B-Thinking model with advanced reasoning capabilities.")
477
+ print("\nFeatures:")
478
+ print("- 🧠 Reasoning-based document analysis")
479
+ print("- 📊 Superior table extraction and formatting")
480
+ print("- 📐 Mathematical formula recognition")
481
+ print("- 📝 Complex layout understanding")
482
+ print("- ✨ Clean markdown generation")
483
+ print("- 🔍 Optional thinking trace inclusion")
484
+ print("\nExample usage:")
485
+ print("\n1. Basic OCR conversion:")
486
+ print(" uv run numarkdown-ocr.py document-images markdown-docs")
487
+ print("\n2. Include thinking traces:")
488
+ print(" uv run numarkdown-ocr.py complex-docs analyzed-docs --include-thinking")
489
+ print("\n3. With custom settings:")
490
+ print(" uv run numarkdown-ocr.py scientific-papers extracted-text \\")
491
+ print(" --batch-size 8 \\")
492
+ print(" --max-tokens 8192 \\")
493
+ print(" --gpu-memory-utilization 0.9")
494
+ print("\n4. Process a subset for testing:")
495
+ print(" uv run numarkdown-ocr.py large-dataset test-output --max-samples 10")
496
+ print("\n5. Custom prompt for specific needs:")
497
+ print(" uv run numarkdown-ocr.py invoices invoice-data \\")
498
+ print(' --custom-prompt "Extract all invoice details including line items"')
499
+ print("\n6. Running on HF Jobs:")
500
+ print(" hf jobs uv run --flavor l4x1 \\")
501
+ print(' -e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \\')
502
+ print(" https://huggingface.co/datasets/uv-scripts/ocr/raw/main/numarkdown-ocr.py \\")
503
+ print(" your-document-dataset \\")
504
+ print(" your-markdown-output")
505
+ print("\n" + "=" * 80)
506
+ print("\nFor full help, run: uv run numarkdown-ocr.py --help")
507
+ sys.exit(0)
508
+
509
+ parser = argparse.ArgumentParser(
510
+ description="OCR images to markdown using NuMarkdown-8B-Thinking with reasoning",
511
+ formatter_class=argparse.RawDescriptionHelpFormatter,
512
+ epilog="""
513
+ Examples:
514
+ # Basic usage
515
+ uv run numarkdown-ocr.py my-images-dataset ocr-results
516
+
517
+ # Include thinking traces in output
518
+ uv run numarkdown-ocr.py documents analyzed-docs --include-thinking
519
+
520
+ # Process subset for testing
521
+ uv run numarkdown-ocr.py large-dataset test-output --max-samples 100
522
+
523
+ # Custom prompt for specific extraction
524
+ uv run numarkdown-ocr.py forms form-data --custom-prompt "Extract all form fields and values"
525
+
526
+ # Random sample from dataset
527
+ uv run numarkdown-ocr.py ordered-dataset random-sample --max-samples 50 --shuffle
528
+ """,
529
+ )
530
+
531
+ parser.add_argument("input_dataset", help="Input dataset ID from Hugging Face Hub")
532
+ parser.add_argument("output_dataset", help="Output dataset ID for Hugging Face Hub")
533
+ parser.add_argument(
534
+ "--image-column",
535
+ default="image",
536
+ help="Column containing images (default: image)",
537
+ )
538
+ parser.add_argument(
539
+ "--batch-size",
540
+ type=int,
541
+ default=16,
542
+ help="Batch size for processing (default: 16, lower than others due to model size)",
543
+ )
544
+ parser.add_argument(
545
+ "--model",
546
+ default="numind/NuMarkdown-8B-Thinking",
547
+ help="Model to use (default: numind/NuMarkdown-8B-Thinking)",
548
+ )
549
+ parser.add_argument(
550
+ "--max-model-len",
551
+ type=int,
552
+ default=16384,
553
+ help="Maximum model context length (default: 16384)",
554
+ )
555
+ parser.add_argument(
556
+ "--max-tokens",
557
+ type=int,
558
+ default=8192,
559
+ help="Maximum tokens to generate (default: 8192)",
560
+ )
561
+ parser.add_argument(
562
+ "--gpu-memory-utilization",
563
+ type=float,
564
+ default=0.9,
565
+ help="GPU memory utilization (default: 0.9)",
566
+ )
567
+ parser.add_argument("--hf-token", help="Hugging Face API token")
568
+ parser.add_argument(
569
+ "--split", default="train", help="Dataset split to use (default: train)"
570
+ )
571
+ parser.add_argument(
572
+ "--max-samples",
573
+ type=int,
574
+ help="Maximum number of samples to process (for testing)",
575
+ )
576
+ parser.add_argument(
577
+ "--private", action="store_true", help="Make output dataset private"
578
+ )
579
+ parser.add_argument(
580
+ "--shuffle",
581
+ action="store_true",
582
+ help="Shuffle the dataset before processing (useful for random sampling)",
583
+ )
584
+ parser.add_argument(
585
+ "--seed",
586
+ type=int,
587
+ default=42,
588
+ help="Random seed for shuffling (default: 42)",
589
+ )
590
+ parser.add_argument(
591
+ "--include-thinking",
592
+ action="store_true",
593
+ help="Include thinking traces in output (default: only final answers)",
594
+ )
595
+ parser.add_argument(
596
+ "--temperature",
597
+ type=float,
598
+ default=0.0,
599
+ help="Temperature for generation (default: 0.0 for deterministic)",
600
+ )
601
+ parser.add_argument(
602
+ "--custom-prompt",
603
+ type=str,
604
+ help="Custom prompt for the model (overrides default)",
605
+ )
606
+
607
+ args = parser.parse_args()
608
+
609
+ main(
610
+ input_dataset=args.input_dataset,
611
+ output_dataset=args.output_dataset,
612
+ image_column=args.image_column,
613
+ batch_size=args.batch_size,
614
+ model=args.model,
615
+ max_model_len=args.max_model_len,
616
+ max_tokens=args.max_tokens,
617
+ gpu_memory_utilization=args.gpu_memory_utilization,
618
+ hf_token=args.hf_token,
619
+ split=args.split,
620
+ max_samples=args.max_samples,
621
+ private=args.private,
622
+ shuffle=args.shuffle,
623
+ seed=args.seed,
624
+ include_thinking=args.include_thinking,
625
+ temperature=args.temperature,
626
+ custom_prompt=args.custom_prompt,
627
+ )