Sneriko commited on
Commit
62f1588
·
verified ·
1 Parent(s): 2284802

Upload goteborgs_poliskammare_fore_1900.py

Browse files
Files changed (1) hide show
  1. goteborgs_poliskammare_fore_1900.py +374 -99
goteborgs_poliskammare_fore_1900.py CHANGED
@@ -1,5 +1,4 @@
1
- # ladda upp datasetet i en zip av imgs och en zip av xml, skapa flera archive iterators och använd dom (men hur blir det med ordningen?)
2
-
3
  import os
4
  import xml.etree.ElementTree as ET
5
  from glob import glob
@@ -13,49 +12,97 @@ from datasets import (
13
  Features,
14
  GeneratorBasedBuilder,
15
  Image,
 
16
  Split,
17
  SplitGenerator,
18
  Value,
19
  )
20
  from PIL import Image as PILImage
 
21
 
22
 
23
  class HTRDatasetConfig(BuilderConfig):
24
- """BuilderConfig for HTRDataset"""
25
 
26
- def __init__(self, **kwargs):
27
- super(HTRDatasetConfig, self).__init__(**kwargs)
 
 
28
 
29
 
30
  class HTRDataset(GeneratorBasedBuilder):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  BUILDER_CONFIGS = [
32
  HTRDatasetConfig(
33
- name="htr_dataset",
34
- version="1.0.0",
35
- description="Line dataset for text recognition of historical swedish",
36
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  ]
38
 
39
  def _info(self):
40
- features = Features({"image": Image(), "transcription": Value("string")})
41
- return DatasetInfo(features=features)
42
 
43
  def _split_generators(self, dl_manager):
 
44
  """
45
- images = dl_manager.download_and_extract(
46
- [
47
- f"https://huggingface.co/datasets/Riksarkivet/alvsborgs_losen/resolve/main/data/images/alvsborgs_losen_imgs_{i}.tar.gz"
48
- for i in range(1, 3)
49
- ]
50
- )
51
- xmls = dl_manager.download_and_extract(
52
- [
53
- f"https://huggingface.co/datasets/Riksarkivet/alvsborgs_losen/resolve/main/data/page_xmls/alvsborgs_losen_page_xmls_{i}.tar.gz"
54
- for i in range(1, 3)
55
- ]
56
- )
57
- """
58
 
 
 
59
  images = dl_manager.download_and_extract(
60
  [
61
  f"https://huggingface.co/datasets/Riksarkivet/goteborgs_poliskammare_fore_1900/resolve/main/data/images/goteborgs_poliskammare_fore_1900_images_{i}.tar.gz"
@@ -68,6 +115,12 @@ class HTRDataset(GeneratorBasedBuilder):
68
  for i in range(1, 3)
69
  ]
70
  )
 
 
 
 
 
 
71
  image_extensions = [
72
  "*.jpg",
73
  "*.jpeg",
@@ -84,16 +137,18 @@ class HTRDataset(GeneratorBasedBuilder):
84
  "*.TIF",
85
  "*.TIFF",
86
  ]
87
- imgs_nested = [glob(os.path.join(x, "**", ext), recursive=True) for ext in image_extensions for x in images]
88
- imgs_flat = [item for sublist in imgs_nested for item in sublist]
89
- sorted_imgs = sorted(imgs_flat, key=lambda x: Path(x).stem)
90
- xmls_nested = [glob(os.path.join(x, "**", "*.xml"), recursive=True) for x in xmls]
91
- xmls_flat = [item for sublist in xmls_nested for item in sublist]
92
- sorted_xmls = sorted(xmls_flat, key=lambda x: Path(x).stem)
93
- assert len(sorted_imgs) == len(sorted_xmls)
94
- imgs_xmls = []
95
- for img, xml in zip(sorted_imgs, sorted_xmls):
96
- imgs_xmls.append((img, xml))
 
 
97
 
98
  return [
99
  SplitGenerator(
@@ -102,110 +157,330 @@ class HTRDataset(GeneratorBasedBuilder):
102
  )
103
  ]
104
 
 
 
 
 
 
 
 
105
  def _generate_examples(self, imgs_xmls):
106
- for img, xml in imgs_xmls:
107
- assert Path(img).stem == Path(xml).stem
108
- img_filename = Path(img).stem
109
- volume = PurePath(img).parts[-2]
110
 
 
 
 
 
111
  lines_data = self.parse_pagexml(xml)
112
-
113
- # Convert the bytes to a NumPy array
114
  image_array = cv2.imread(img)
115
 
116
  for i, line in enumerate(lines_data):
117
  line_id = str(i).zfill(4)
118
- try:
119
- cropped_image = self.crop_line_image(image_array, line["coords"])
120
- except Exception as e:
121
- print(e)
122
- continue
123
 
124
- # Logging to ensure data types and shapes
125
- cropped_image_np = np.array(cropped_image, dtype=np.uint8)
126
-
127
- # Ensure transcription is a string and not None
128
- transcription = str(line["transcription"])
129
- if transcription is None or not isinstance(transcription, str) or transcription == "":
130
  print(f"Invalid transcription: {transcription}")
131
  continue
132
 
133
- # Generate and log the unique key
134
  unique_key = f"{volume}_{img_filename}_{line_id}"
135
-
136
- try:
137
- yield (
138
- unique_key,
139
- {"image": cropped_image, "transcription": transcription},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  )
141
- except Exception as e:
142
- print(f"Error yielding example {unique_key}: {e}")
143
 
144
- def parse_pagexml(self, xml):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
  try:
146
- tree = ET.parse(xml)
147
- root = tree.getroot()
148
  except ET.ParseError as e:
149
- print(e)
150
- return []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
 
 
152
  namespaces = {"ns": "http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15"}
153
- page = root.find("ns:Page", namespaces)
154
- if page is None:
155
- print("no page")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
  return []
157
 
158
- text_regions = page.findall("ns:TextRegion", namespaces)
159
  lines_data = []
160
- for region in text_regions:
161
- lines = region.findall("ns:TextLine", namespaces)
162
-
163
- for line in lines:
164
  try:
165
  line_id = line.get("id")
166
- coords = line.find("ns:Coords", namespaces).get("points")
167
- coords = [tuple(map(int, p.split(","))) for p in coords.split()]
168
- transcription = line.find("ns:TextEquiv/ns:Unicode", namespaces).text
169
-
170
  lines_data.append({"line_id": line_id, "coords": coords, "transcription": transcription})
171
  except Exception as e:
172
- print(e)
173
- continue
174
-
175
  return lines_data
176
 
177
  def crop_line_image(self, img, coords):
 
 
 
178
  coords = np.array(coords)
179
- # img = HTRDataset.np_to_cv2(image)
180
- mask = np.zeros(img.shape[0:2], dtype=np.uint8)
181
 
182
- try:
183
- cv2.drawContours(mask, [coords], -1, (255, 255, 255), -1, cv2.LINE_AA)
184
- except Exception as e:
185
- print(e)
186
  res = cv2.bitwise_and(img, img, mask=mask)
187
  rect = cv2.boundingRect(coords)
188
 
 
189
  wbg = np.ones_like(img, np.uint8) * 255
190
  cv2.bitwise_not(wbg, wbg, mask=mask)
191
-
192
- # overlap the resulted cropped image on the white background
193
  dst = wbg + res
194
 
195
  cropped = dst[rect[1] : rect[1] + rect[3], rect[0] : rect[0] + rect[2]]
196
 
197
- cropped = HTRDataset.cv2_to_pil(cropped)
198
- return cropped
 
 
 
 
199
 
200
- def np_to_cv2(image_array):
201
- image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
202
- image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
203
- return image_rgb
 
 
 
204
 
205
- # Convert OpenCV image to PIL Image
206
- def cv2_to_pil(cv2_image):
207
- # Convert BGR to RGB
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  cv2_image_rgb = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2RGB)
209
- # Convert NumPy array to PIL image
210
- pil_image = PILImage.fromarray(cv2_image_rgb)
211
- return pil_image
 
1
+ import io
 
2
  import os
3
  import xml.etree.ElementTree as ET
4
  from glob import glob
 
12
  Features,
13
  GeneratorBasedBuilder,
14
  Image,
15
+ Sequence,
16
  Split,
17
  SplitGenerator,
18
  Value,
19
  )
20
  from PIL import Image as PILImage
21
+ from shapely.geometry import Polygon
22
 
23
 
24
  class HTRDatasetConfig(BuilderConfig):
25
+ """Configuration for each dataset variant."""
26
 
27
+ def __init__(self, name, description, process_func, features, **kwargs):
28
+ super().__init__(name=name, description=description, **kwargs)
29
+ self.process_func = process_func
30
+ self.features = features
31
 
32
 
33
  class HTRDataset(GeneratorBasedBuilder):
34
+ # Define feature structures for each dataset type
35
+ text_recognition_features = Features(
36
+ {
37
+ "image": Image(),
38
+ "transcription": Value("string"),
39
+ }
40
+ )
41
+
42
+ segmentation_features = Features(
43
+ {
44
+ "image_name": Value("string"),
45
+ "image": Image(),
46
+ "annotations": Sequence(
47
+ {
48
+ "polygon": Sequence(Sequence(Value("float32"))),
49
+ "transcription": Value("string"),
50
+ "class": Value("string"),
51
+ }
52
+ ),
53
+ }
54
+ )
55
+
56
  BUILDER_CONFIGS = [
57
  HTRDatasetConfig(
58
+ name="text_recognition",
59
+ description="textline dataset for text recognition of historical Swedish",
60
+ process_func="text_recognition",
61
+ features=text_recognition_features,
62
+ ),
63
+ HTRDatasetConfig(
64
+ name="inst_seg_lines_within_regions",
65
+ description="Cropped text region images with text line annotations",
66
+ process_func="inst_seg_lines_within_regions",
67
+ features=segmentation_features,
68
+ ),
69
+ HTRDatasetConfig(
70
+ name="inst_seg_regions_and_lines",
71
+ description="Original images with both region and line annotations",
72
+ process_func="inst_seg_regions_and_lines",
73
+ features=segmentation_features,
74
+ ),
75
+ HTRDatasetConfig(
76
+ name="inst_seg_lines",
77
+ description="Original images with text line annotations only",
78
+ process_func="inst_seg_lines",
79
+ features=segmentation_features,
80
+ ),
81
+ HTRDatasetConfig(
82
+ name="inst_seg_regions",
83
+ description="Original images with text region annotations only",
84
+ process_func="inst_seg_regions",
85
+ features=segmentation_features,
86
+ ),
87
  ]
88
 
89
  def _info(self):
90
+ return DatasetInfo(features=self.config.features)
 
91
 
92
  def _split_generators(self, dl_manager):
93
+ # Define URLs for images and XMLs
94
  """
95
+ images_url = [
96
+ f"https://huggingface.co/datasets/Riksarkivet/ra_enstaka_sidor/resolve/main/data/images/ra_enstaka_sidor_images_{i}.tar.gz"
97
+ for i in range(1, 3)
98
+ ]
99
+ xmls_url = [
100
+ f"https://huggingface.co/datasets/Riksarkivet/ra_enstaka_sidor/resolve/main/data/page_xmls/ra_enstaka_sidor_page_xmls_{i}.tar.gz"
101
+ for i in range(1, 3)
102
+ ]
 
 
 
 
 
103
 
104
+ """
105
+
106
  images = dl_manager.download_and_extract(
107
  [
108
  f"https://huggingface.co/datasets/Riksarkivet/goteborgs_poliskammare_fore_1900/resolve/main/data/images/goteborgs_poliskammare_fore_1900_images_{i}.tar.gz"
 
115
  for i in range(1, 3)
116
  ]
117
  )
118
+
119
+ # Download and extract images and XMLs
120
+ # images = dl_manager.download_and_extract(images_url)
121
+ # xmls = dl_manager.download_and_extract(xmls_url)
122
+
123
+ # Define supported image file extensions
124
  image_extensions = [
125
  "*.jpg",
126
  "*.jpeg",
 
137
  "*.TIF",
138
  "*.TIFF",
139
  ]
140
+
141
+ # Collect and sort image and XML file paths
142
+ imgs_flat = self._collect_file_paths(images, image_extensions)
143
+ xmls_flat = self._collect_file_paths(xmls, ["*.xml"])
144
+
145
+ # Ensure the number of images matches the number of XML files
146
+ assert len(imgs_flat) == len(xmls_flat)
147
+
148
+ # Pair images and XML files
149
+ imgs_xmls = list(
150
+ zip(sorted(imgs_flat, key=lambda x: Path(x).stem), sorted(xmls_flat, key=lambda x: Path(x).stem))
151
+ )
152
 
153
  return [
154
  SplitGenerator(
 
157
  )
158
  ]
159
 
160
+ def _collect_file_paths(self, folders, extensions):
161
+ """Collects file paths recursively from specified folders."""
162
+ files_nested = [
163
+ glob(os.path.join(folder, "**", ext), recursive=True) for ext in extensions for folder in folders
164
+ ]
165
+ return [file for sublist in files_nested for file in sublist]
166
+
167
  def _generate_examples(self, imgs_xmls):
168
+ process_func = getattr(self, self.config.process_func)
169
+ return process_func(imgs_xmls)
 
 
170
 
171
+ def text_recognition(self, imgs_xmls):
172
+ """Process for line dataset with cropped images and transcriptions."""
173
+ for img, xml in imgs_xmls:
174
+ img_filename, volume = self._extract_filename_and_volume(img, xml)
175
  lines_data = self.parse_pagexml(xml)
 
 
176
  image_array = cv2.imread(img)
177
 
178
  for i, line in enumerate(lines_data):
179
  line_id = str(i).zfill(4)
180
+ cropped_image = self.crop_line_image(image_array, line["coords"])
181
+ transcription = line["transcription"]
 
 
 
182
 
183
+ if not transcription:
 
 
 
 
 
184
  print(f"Invalid transcription: {transcription}")
185
  continue
186
 
 
187
  unique_key = f"{volume}_{img_filename}_{line_id}"
188
+ yield unique_key, {"image": cropped_image, "transcription": transcription}
189
+
190
+ def inst_seg_lines_within_regions(self, imgs_xmls):
191
+ """Process for cropped images with text line annotations."""
192
+ for img_path, xml_path in imgs_xmls:
193
+ img_filename, volume = self._extract_filename_and_volume(img_path, xml_path)
194
+ image = PILImage.open(img_path)
195
+ root = self._parse_xml(xml_path)
196
+ namespaces = {"ns": "http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15"}
197
+
198
+ # Iterate through each TextRegion
199
+ try:
200
+ for reg_ind, region in enumerate(root.findall(".//ns:TextRegion", namespaces=namespaces)):
201
+ reg_id = str(reg_ind).zfill(4)
202
+ region_polygon = self._get_polygon(region, namespaces)
203
+ min_x, min_y, max_x, max_y = self._get_bbox(region_polygon)
204
+ cropped_region_image = self.crop_image(image, region_polygon)
205
+
206
+ annotations = self._get_line_annotations_within_region(
207
+ region, namespaces, min_x, min_y, region_polygon
208
  )
 
 
209
 
210
+ unique_key = f"{volume}_{img_filename}_{reg_id}"
211
+ try:
212
+ yield (
213
+ unique_key,
214
+ {
215
+ "image": {"bytes": self._image_to_bytes(cropped_region_image)},
216
+ "annotations": annotations,
217
+ "image_name": unique_key,
218
+ },
219
+ )
220
+ except:
221
+ print("still error")
222
+ continue
223
+ except Exception:
224
+ continue
225
+
226
+ def inst_seg_regions_and_lines(self, imgs_xmls):
227
+ """Process for original images with both region and line annotations."""
228
+ for img_path, xml_path in imgs_xmls:
229
+ img_filename, volume = self._extract_filename_and_volume(img_path, xml_path)
230
+ image = PILImage.open(img_path)
231
+ root = self._parse_xml(xml_path)
232
+ annotations = self._get_region_and_line_annotations(root)
233
+
234
+ unique_key = f"{volume}_{img_filename}"
235
+ yield unique_key, {"image_name": unique_key, "image": image, "annotations": annotations}
236
+
237
+ def inst_seg_lines(self, imgs_xmls):
238
+ """Process for original images with text line annotations only."""
239
+ for img_path, xml_path in imgs_xmls:
240
+ img_filename, volume = self._extract_filename_and_volume(img_path, xml_path)
241
+ image = PILImage.open(img_path)
242
+ root = self._parse_xml(xml_path)
243
+
244
+ annotations = self._get_line_annotations(root)
245
+
246
+ unique_key = f"{volume}_{img_filename}"
247
+ yield unique_key, {"image_name": unique_key, "image": image, "annotations": annotations}
248
+
249
+ def inst_seg_regions(self, imgs_xmls):
250
+ """Process for original images with text region annotations only."""
251
+ for img_path, xml_path in imgs_xmls:
252
+ img_filename, volume = self._extract_filename_and_volume(img_path, xml_path)
253
+ image = PILImage.open(img_path)
254
+ root = self._parse_xml(xml_path)
255
+
256
+ annotations = self._get_region_annotations(root)
257
+
258
+ unique_key = f"{volume}_{img_filename}"
259
+ yield unique_key, {"image_name": unique_key, "image": image, "annotations": annotations}
260
+
261
+ def _extract_filename_and_volume(self, img, xml):
262
+ """Extracts the filename and volume from the image and XML paths."""
263
+ assert Path(img).stem == Path(xml).stem
264
+ img_filename = Path(img).stem
265
+ volume = PurePath(img).parts[-2]
266
+ return img_filename, volume
267
+
268
+ def _parse_xml(self, xml_path):
269
+ """Parses the XML file and returns the root element."""
270
  try:
271
+ tree = ET.parse(xml_path)
272
+ return tree.getroot()
273
  except ET.ParseError as e:
274
+ print(f"XML Parse Error: {e}")
275
+ return None
276
+
277
+ def _get_line_annotations_within_region(self, region, namespaces, min_x, min_y, region_polygon):
278
+ """Generates annotations for text lines within a region."""
279
+ annotations = []
280
+ for line in region.findall(".//ns:TextLine", namespaces=namespaces):
281
+ line_polygon = self._get_polygon(line, namespaces)
282
+ clipped_line_polygon = self.clip_polygon_to_region(line_polygon, region_polygon)
283
+
284
+ if len(clipped_line_polygon) < 3:
285
+ print(f"Invalid polygon detected for line: {line_polygon}, clipped: {clipped_line_polygon}")
286
+ continue
287
+
288
+ translated_polygon = [(x - min_x, y - min_y) for x, y in clipped_line_polygon]
289
+ transcription = "".join(line.itertext()).strip()
290
+
291
+ annotations.append(
292
+ {
293
+ "polygon": translated_polygon,
294
+ "transcription": transcription,
295
+ "class": "textline",
296
+ }
297
+ )
298
+ return annotations
299
+
300
+ def _get_region_and_line_annotations(self, root):
301
+ """Generates annotations for both text regions and lines."""
302
+ annotations = []
303
+
304
+ # Get region annotations
305
+ annotations.extend(self._get_region_annotations(root))
306
+
307
+ # Get line annotations
308
+ annotations.extend(self._get_line_annotations(root))
309
+
310
+ return annotations
311
 
312
+ def _get_line_annotations(self, root):
313
+ """Generates annotations for text lines only."""
314
  namespaces = {"ns": "http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15"}
315
+ annotations = []
316
+ for region in root.findall(".//ns:TextRegion", namespaces=namespaces):
317
+ for line in region.findall(".//ns:TextLine", namespaces=namespaces):
318
+ line_polygon = self._get_polygon(line, namespaces)
319
+ transcription = "".join(line.itertext()).strip()
320
+ annotations.append(
321
+ {
322
+ "polygon": line_polygon,
323
+ "transcription": transcription,
324
+ "class": "textline",
325
+ }
326
+ )
327
+ return annotations
328
+
329
+ def _get_region_annotations(self, root):
330
+ """Generates annotations for text regions only."""
331
+ namespaces = {"ns": "http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15"}
332
+ annotations = []
333
+ for region in root.findall(".//ns:TextRegion", namespaces=namespaces):
334
+ region_polygon = self._get_polygon(region, namespaces)
335
+ annotations.append(
336
+ {
337
+ "polygon": region_polygon,
338
+ "transcription": "",
339
+ "class": "textregion",
340
+ }
341
+ )
342
+ return annotations
343
+
344
+ def _image_to_bytes(self, image):
345
+ """Converts a PIL image to bytes."""
346
+ with io.BytesIO() as output:
347
+ image.save(output, format="PNG")
348
+ return output.getvalue()
349
+
350
+ def crop_image(self, img_pil, coords):
351
+ coords = np.array(coords)
352
+ img = np.array(img_pil)
353
+ mask = np.zeros(img.shape[0:2], dtype=np.uint8)
354
+
355
+ try:
356
+ # Ensure the coordinates are within the bounds of the image
357
+ coords[:, 0] = np.clip(coords[:, 0], 0, img.shape[1] - 1)
358
+ coords[:, 1] = np.clip(coords[:, 1], 0, img.shape[0] - 1)
359
+
360
+ # Draw the mask
361
+ cv2.drawContours(mask, [coords], -1, (255, 255, 255), -1, cv2.LINE_AA)
362
+
363
+ # Apply mask to image
364
+ res = cv2.bitwise_and(img, img, mask=mask)
365
+ rect = cv2.boundingRect(coords)
366
+
367
+ # Ensure the bounding box is within the image dimensions
368
+ rect = (
369
+ max(0, rect[0]),
370
+ max(0, rect[1]),
371
+ min(rect[2], img.shape[1] - rect[0]),
372
+ min(rect[3], img.shape[0] - rect[1]),
373
+ )
374
+
375
+ wbg = np.ones_like(img, np.uint8) * 255
376
+ cv2.bitwise_not(wbg, wbg, mask=mask)
377
+
378
+ # Overlap the resulted cropped image on the white background
379
+ dst = wbg + res
380
+
381
+ # Use validated rect for cropping
382
+ cropped = dst[rect[1] : rect[1] + rect[3], rect[0] : rect[0] + rect[2]]
383
+
384
+ # Convert the NumPy array back to a PIL image
385
+ cropped_pil = PILImage.fromarray(cropped)
386
+
387
+ return cropped_pil
388
+
389
+ except Exception as e:
390
+ print(f"Error in cropping: {e}")
391
+ return img_pil # Return the original image if there's an error
392
+
393
+ def _create_mask(self, shape, coords):
394
+ """Creates a mask for the specified polygon coordinates."""
395
+ mask = np.zeros(shape, dtype=np.uint8)
396
+ cv2.drawContours(mask, [np.array(coords)], -1, (255, 255, 255), -1, cv2.LINE_AA)
397
+ return mask
398
+
399
+ def parse_pagexml(self, xml):
400
+ """Parses the PAGE XML and extracts line data."""
401
+ root = self._parse_xml(xml)
402
+ if not root:
403
  return []
404
 
405
+ namespaces = {"ns": "http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15"}
406
  lines_data = []
407
+ for region in root.findall(".//ns:TextRegion", namespaces):
408
+ for line in region.findall(".//ns:TextLine", namespaces):
 
 
409
  try:
410
  line_id = line.get("id")
411
+ coords = self._get_polygon(line, namespaces)
412
+ transcription = line.find("ns:TextEquiv/ns:Unicode", namespaces).text or ""
 
 
413
  lines_data.append({"line_id": line_id, "coords": coords, "transcription": transcription})
414
  except Exception as e:
415
+ print(f"Error parsing line: {e}")
 
 
416
  return lines_data
417
 
418
  def crop_line_image(self, img, coords):
419
+ """Crops a line image based on the provided coordinates."""
420
+ mask = self._create_mask(img.shape[:2], coords)
421
+
422
  coords = np.array(coords)
 
 
423
 
424
+ # Apply mask to image
 
 
 
425
  res = cv2.bitwise_and(img, img, mask=mask)
426
  rect = cv2.boundingRect(coords)
427
 
428
+ # Create a white background and overlay the cropped image
429
  wbg = np.ones_like(img, np.uint8) * 255
430
  cv2.bitwise_not(wbg, wbg, mask=mask)
 
 
431
  dst = wbg + res
432
 
433
  cropped = dst[rect[1] : rect[1] + rect[3], rect[0] : rect[0] + rect[2]]
434
 
435
+ return self.cv2_to_pil(cropped)
436
+
437
+ def _get_polygon(self, element, namespaces):
438
+ """Extracts polygon points from a PAGE XML element."""
439
+ coords = element.find(".//ns:Coords", namespaces=namespaces).attrib["points"]
440
+ return [tuple(map(int, p.split(","))) for p in coords.split()]
441
 
442
+ def _get_bbox(self, polygon):
443
+ """Calculates the bounding box from polygon points."""
444
+ min_x = min(p[0] for p in polygon)
445
+ min_y = min(p[1] for p in polygon)
446
+ max_x = max(p[0] for p in polygon)
447
+ max_y = max(p[1] for p in polygon)
448
+ return min_x, min_y, max_x, max_y
449
 
450
+ def clip_polygon_to_region(self, line_polygon, region_polygon):
451
+ """
452
+ Clips a line polygon to ensure it's inside the region polygon using Shapely.
453
+ Returns the original line polygon if the intersection is empty.
454
+ """
455
+ # Convert lists of points to Shapely Polygons
456
+ line_poly = Polygon(line_polygon)
457
+ region_poly = Polygon(region_polygon)
458
+
459
+ # Compute the intersection of the line polygon with the region polygon
460
+ try:
461
+ intersection = line_poly.intersection(region_poly)
462
+ except Exception:
463
+ return line_polygon
464
+
465
+ # Return the intersection points as a list of tuples
466
+ if intersection.is_empty:
467
+ print(
468
+ f"No intersection found for line_polygon {line_polygon} within region_polygon {region_polygon}, returning original polygon."
469
+ )
470
+ return line_polygon
471
+ elif intersection.geom_type == "Polygon":
472
+ return list(intersection.exterior.coords)
473
+ elif intersection.geom_type == "MultiPolygon":
474
+ # If the result is a MultiPolygon, take the largest by area (or another heuristic)
475
+ largest_polygon = max(intersection.geoms, key=lambda p: p.area)
476
+ return list(largest_polygon.exterior.coords)
477
+ elif intersection.geom_type == "LineString":
478
+ return list(intersection.coords)
479
+ else:
480
+ print(f"Unexpected intersection type: {intersection.geom_type}")
481
+ return line_polygon
482
+
483
+ def cv2_to_pil(self, cv2_image):
484
+ """Converts an OpenCV image to a PIL Image."""
485
  cv2_image_rgb = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2RGB)
486
+ return PILImage.fromarray(cv2_image_rgb)