Chesscorner commited on
Commit
9c0daeb
·
verified ·
1 Parent(s): 7b9e4ff

Update chess_ground-targz.py

Browse files
Files changed (1) hide show
  1. chess_ground-targz.py +24 -37
chess_ground-targz.py CHANGED
@@ -2,27 +2,11 @@ import os
2
  import json
3
  import tarfile
4
  import datasets
5
-
6
- # Description of the dataset
7
- _DESCRIPTION = """\
8
- Dataset for extracting notations from chess scoresheets, integrating both image and text data.
9
- """
10
-
11
- # BibTeX citation for the dataset
12
- _CITATION = """\
13
- @InProceedings{huggingface:dataset,
14
- title = {A great new dataset},
15
- author={huggingface, Inc.},
16
- year={2020}
17
- }
18
- """
19
-
20
- # License of the dataset
21
- _LICENSE = "Creative Commons Attribution 3.0"
22
 
23
  class ChessImageTextDataset(datasets.GeneratorBasedBuilder):
24
- """Dataset for linking chess scoresheet images with ground truth text."""
25
-
26
  def _info(self):
27
  # Define the features of your dataset (images + text)
28
  features = datasets.Features(
@@ -44,12 +28,9 @@ class ChessImageTextDataset(datasets.GeneratorBasedBuilder):
44
 
45
  def _split_generators(self, dl_manager):
46
  """Define the splits of the dataset."""
47
-
48
- # Load the image dataset (tar.gz file)
49
  image_dataset_url = "https://huggingface.co/datasets/Chesscorner/chess-images/resolve/main/flat_images.tar.gz"
50
  extracted_image_path = dl_manager.download(image_dataset_url)
51
 
52
- # Load the text dataset (ground truths)
53
  text_dataset_url = "https://huggingface.co/datasets/Chesscorner/jsonl-chess-dataset/resolve/main/train.jsonl/train.jsonl"
54
  text_filepath = dl_manager.download(text_dataset_url)
55
 
@@ -64,12 +45,14 @@ class ChessImageTextDataset(datasets.GeneratorBasedBuilder):
64
  ]
65
 
66
  def _generate_examples(self, image_tar_path, text_filepath):
67
- """Generate examples by linking images and text."""
68
  idx = 0
69
-
70
  # Extract and map text IDs to their corresponding images
71
  image_mapping = self._extract_images_from_tar(image_tar_path)
72
 
 
 
 
73
  # Load the text dataset (ground truths) from the JSONL file
74
  with open(text_filepath, encoding="utf-8") as fp:
75
  for line in fp:
@@ -79,19 +62,24 @@ class ChessImageTextDataset(datasets.GeneratorBasedBuilder):
79
  # Extract the text ID (assuming text ID matches image filename)
80
  text_id = text[:5] # Adjust this based on the actual pattern of text IDs
81
 
82
- # Find the corresponding image file
83
- image_file = image_mapping.get(f"{text_id}.png") # Adjust file extension if necessary
84
-
85
- # Ensure the image exists and yield the example
86
- if image_file:
87
- yield idx, {
88
- "image": image_file,
89
- "text": text,
90
- }
91
- else:
92
- print(f"Image not found for ID: {text_id}")
93
-
 
 
 
94
  idx += 1
 
 
95
 
96
  def _extract_images_from_tar(self, tar_path):
97
  """Extracts the images from the tar.gz archive and returns a mapping of image filenames to file paths."""
@@ -116,4 +104,3 @@ class ChessImageTextDataset(datasets.GeneratorBasedBuilder):
116
  image_mapping[image_filename] = extracted_image_path
117
 
118
  return image_mapping
119
-
 
2
  import json
3
  import tarfile
4
  import datasets
5
+ from collections import defaultdict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  class ChessImageTextDataset(datasets.GeneratorBasedBuilder):
8
+ """Dataset for linking chess scoresheet images with multiple ground truth texts."""
9
+
10
  def _info(self):
11
  # Define the features of your dataset (images + text)
12
  features = datasets.Features(
 
28
 
29
  def _split_generators(self, dl_manager):
30
  """Define the splits of the dataset."""
 
 
31
  image_dataset_url = "https://huggingface.co/datasets/Chesscorner/chess-images/resolve/main/flat_images.tar.gz"
32
  extracted_image_path = dl_manager.download(image_dataset_url)
33
 
 
34
  text_dataset_url = "https://huggingface.co/datasets/Chesscorner/jsonl-chess-dataset/resolve/main/train.jsonl/train.jsonl"
35
  text_filepath = dl_manager.download(text_dataset_url)
36
 
 
45
  ]
46
 
47
  def _generate_examples(self, image_tar_path, text_filepath):
48
+ """Generate examples by linking images with multiple related texts."""
49
  idx = 0
 
50
  # Extract and map text IDs to their corresponding images
51
  image_mapping = self._extract_images_from_tar(image_tar_path)
52
 
53
+ # Dictionary to hold multiple texts for each image ID
54
+ grouped_texts = defaultdict(list)
55
+
56
  # Load the text dataset (ground truths) from the JSONL file
57
  with open(text_filepath, encoding="utf-8") as fp:
58
  for line in fp:
 
62
  # Extract the text ID (assuming text ID matches image filename)
63
  text_id = text[:5] # Adjust this based on the actual pattern of text IDs
64
 
65
+ # Group texts by their text_id (which corresponds to the image)
66
+ grouped_texts[text_id].append(text)
67
+
68
+ # Now generate examples, linking each image to its grouped texts
69
+ for text_id, texts in grouped_texts.items():
70
+ image_file = image_mapping.get(f"{text_id}.png") # Adjust file extension if necessary
71
+
72
+ # Ensure the image exists and yield the example
73
+ if image_file:
74
+ # Join the texts related to the same image
75
+ combined_text = " ".join(texts)
76
+ yield idx, {
77
+ "image": image_file,
78
+ "text": combined_text, # Link all related texts together
79
+ }
80
  idx += 1
81
+ else:
82
+ print(f"Image not found for ID: {text_id}")
83
 
84
  def _extract_images_from_tar(self, tar_path):
85
  """Extracts the images from the tar.gz archive and returns a mapping of image filenames to file paths."""
 
104
  image_mapping[image_filename] = extracted_image_path
105
 
106
  return image_mapping