Update soybean_dataset.py
Browse files- soybean_dataset.py +35 -46
soybean_dataset.py
CHANGED
@@ -21,7 +21,7 @@ import os
|
|
21 |
from typing import List
|
22 |
import datasets
|
23 |
import logging
|
24 |
-
import
|
25 |
import numpy as np
|
26 |
from PIL import Image
|
27 |
import os
|
@@ -32,11 +32,6 @@ from numpy import asarray
|
|
32 |
import requests
|
33 |
from io import BytesIO
|
34 |
from numpy import asarray
|
35 |
-
from concurrent.futures import ThreadPoolExecutor, as_completed
|
36 |
-
import requests
|
37 |
-
import asyncio
|
38 |
-
from functools import wraps
|
39 |
-
import logging
|
40 |
|
41 |
|
42 |
# TODO: Add BibTeX citation
|
@@ -70,11 +65,11 @@ _LICENSE = "Under a Creative Commons license"
|
|
70 |
# TODO: Add link to the official dataset URLs here
|
71 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
72 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
73 |
-
|
74 |
_URLs = {
|
75 |
-
"train" : "https://
|
76 |
-
"test": "https://
|
77 |
-
"valid": "https://
|
78 |
}
|
79 |
|
80 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
@@ -90,8 +85,7 @@ class SoybeanDataset(datasets.GeneratorBasedBuilder):
|
|
90 |
description=_DESCRIPTION,
|
91 |
features=datasets.Features(
|
92 |
{
|
93 |
-
|
94 |
-
"sets": datasets.Value("string"),
|
95 |
"original_image": datasets.Image(),
|
96 |
"segmentation_image": datasets.Image(),
|
97 |
|
@@ -121,49 +115,44 @@ class SoybeanDataset(datasets.GeneratorBasedBuilder):
|
|
121 |
datasets.SplitGenerator(
|
122 |
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}),
|
123 |
]
|
124 |
-
|
125 |
-
def process_image(self,image_url):
|
126 |
-
response = requests.get(image_url)
|
127 |
-
response.raise_for_status() # This will raise an exception if there is a download error
|
128 |
|
129 |
-
|
130 |
-
|
131 |
-
|
|
|
|
|
|
|
|
|
132 |
|
133 |
|
134 |
|
135 |
def _generate_examples(self, filepath):
|
136 |
-
#"""Yields examples as (key, example) tuples."""
|
137 |
logging.info("generating examples from = %s", filepath)
|
138 |
-
|
139 |
-
with
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
|
|
153 |
|
154 |
-
#
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
"original_image": original_image,
|
160 |
"segmentation_image": segmentation_image,
|
161 |
-
|
162 |
-
}
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
|
168 |
|
169 |
|
|
|
21 |
from typing import List
|
22 |
import datasets
|
23 |
import logging
|
24 |
+
import zipfile
|
25 |
import numpy as np
|
26 |
from PIL import Image
|
27 |
import os
|
|
|
32 |
import requests
|
33 |
from io import BytesIO
|
34 |
from numpy import asarray
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
|
37 |
# TODO: Add BibTeX citation
|
|
|
65 |
# TODO: Add link to the official dataset URLs here
|
66 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
67 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
68 |
+
|
69 |
_URLs = {
|
70 |
+
"train" : "https://huggingface.co/datasets/lisawen/soybean_dataset/resolve/main/train.zip?download=true",
|
71 |
+
"test": "https://huggingface.co/datasets/lisawen/soybean_dataset/resolve/main/test.zip?download=true",
|
72 |
+
"valid": "https://huggingface.co/datasets/lisawen/soybean_dataset/resolve/main/valid.zip?download=true"
|
73 |
}
|
74 |
|
75 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
|
|
85 |
description=_DESCRIPTION,
|
86 |
features=datasets.Features(
|
87 |
{
|
88 |
+
|
|
|
89 |
"original_image": datasets.Image(),
|
90 |
"segmentation_image": datasets.Image(),
|
91 |
|
|
|
115 |
datasets.SplitGenerator(
|
116 |
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}),
|
117 |
]
|
|
|
|
|
|
|
|
|
118 |
|
119 |
+
# def process_image(self,image_url):
|
120 |
+
# response = requests.get(image_url)
|
121 |
+
# response.raise_for_status() # This will raise an exception if there is a download error
|
122 |
+
|
123 |
+
# # Open the image from the downloaded bytes and return the PIL Image
|
124 |
+
# img = Image.open(BytesIO(response.content))
|
125 |
+
# return img
|
126 |
|
127 |
|
128 |
|
129 |
def _generate_examples(self, filepath):
|
|
|
130 |
logging.info("generating examples from = %s", filepath)
|
131 |
+
# Open the zip file
|
132 |
+
with zipfile.ZipFile(filepath, 'r') as zip_ref:
|
133 |
+
# List all the contents of the zip file
|
134 |
+
zip_list = zip_ref.namelist()
|
135 |
+
|
136 |
+
# Create pairs of original and segmentation images
|
137 |
+
images_pairs = [(f, f.replace('_original.jpg', '_segmentation.png')) for f in zip_list if '_original.jpg' in f]
|
138 |
+
|
139 |
+
# Iterate over the pairs and yield examples
|
140 |
+
for original_image_name, segmentation_image_name in images_pairs:
|
141 |
+
# The unique_id is derived from the original image name by stripping away the file extension and the suffix
|
142 |
+
unique_id = original_image_name.split('_')[0]
|
143 |
+
|
144 |
+
# Extract and process the original image
|
145 |
+
with zip_ref.open(original_image_name) as original_file:
|
146 |
+
original_image = Image.open(original_file)
|
147 |
|
148 |
+
# Extract and process the segmentation image
|
149 |
+
with zip_ref.open(segmentation_image_name) as segmentation_file:
|
150 |
+
segmentation_image = Image.open(segmentation_file)
|
151 |
+
|
152 |
+
yield unique_id, {
|
153 |
"original_image": original_image,
|
154 |
"segmentation_image": segmentation_image,
|
155 |
+
}
|
|
|
|
|
|
|
|
|
|
|
156 |
|
157 |
|
158 |
|