lisawen commited on
Commit
23506a6
·
verified ·
1 Parent(s): e6c2d58

Update soybean_dataset.py

Browse files
Files changed (1) hide show
  1. soybean_dataset.py +35 -46
soybean_dataset.py CHANGED
@@ -21,7 +21,7 @@ import os
21
  from typing import List
22
  import datasets
23
  import logging
24
- import csv
25
  import numpy as np
26
  from PIL import Image
27
  import os
@@ -32,11 +32,6 @@ from numpy import asarray
32
  import requests
33
  from io import BytesIO
34
  from numpy import asarray
35
- from concurrent.futures import ThreadPoolExecutor, as_completed
36
- import requests
37
- import asyncio
38
- from functools import wraps
39
- import logging
40
 
41
 
42
  # TODO: Add BibTeX citation
@@ -70,11 +65,11 @@ _LICENSE = "Under a Creative Commons license"
70
  # TODO: Add link to the official dataset URLs here
71
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
72
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
73
- _URL = "/content/drive/MyDrive/sta_663/soybean/dataset.csv"
74
  _URLs = {
75
- "train" : "https://raw.githubusercontent.com/lisawen0707/soybean/main/train_dataset.csv",
76
- "test": "https://raw.githubusercontent.com/lisawen0707/soybean/main/test_dataset.csv",
77
- "valid": "https://raw.githubusercontent.com/lisawen0707/soybean/main/valid_dataset.csv"
78
  }
79
 
80
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
@@ -90,8 +85,7 @@ class SoybeanDataset(datasets.GeneratorBasedBuilder):
90
  description=_DESCRIPTION,
91
  features=datasets.Features(
92
  {
93
- "unique_id": datasets.Value("string"),
94
- "sets": datasets.Value("string"),
95
  "original_image": datasets.Image(),
96
  "segmentation_image": datasets.Image(),
97
 
@@ -121,49 +115,44 @@ class SoybeanDataset(datasets.GeneratorBasedBuilder):
121
  datasets.SplitGenerator(
122
  name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}),
123
  ]
124
-
125
- def process_image(self,image_url):
126
- response = requests.get(image_url)
127
- response.raise_for_status() # This will raise an exception if there is a download error
128
 
129
- # Open the image from the downloaded bytes and return the PIL Image
130
- img = Image.open(BytesIO(response.content))
131
- return img
 
 
 
 
132
 
133
 
134
 
135
  def _generate_examples(self, filepath):
136
- #"""Yields examples as (key, example) tuples."""
137
  logging.info("generating examples from = %s", filepath)
138
-
139
- with open(filepath, encoding="utf-8") as f:
140
- data = csv.DictReader(f)
141
-
142
-
143
- for row in data:
144
- # Assuming the 'original_image' column has the full path to the image file
145
- unique_id = row['unique_id']
146
- original_image_path = row['original_image']
147
- segmentation_image_path = row['segmentation_image']
148
- sets = row['sets']
149
-
150
- original_image = self.process_image(original_image_path)
151
- segmentation_image = self.process_image(segmentation_image_path)
152
-
 
153
 
154
- # Here you need to replace 'initial_radius', 'final_radius', 'initial_angle', 'final_angle', 'target'
155
- # with actual columns from your CSV or additional processing you need to do
156
- yield row['unique_id'], {
157
- "unique_id": unique_id,
158
- "sets": sets,
159
  "original_image": original_image,
160
  "segmentation_image": segmentation_image,
161
- # ... add other features if necessary
162
- }
163
-
164
-
165
-
166
-
167
 
168
 
169
 
 
21
  from typing import List
22
  import datasets
23
  import logging
24
+ import zipfile
25
  import numpy as np
26
  from PIL import Image
27
  import os
 
32
  import requests
33
  from io import BytesIO
34
  from numpy import asarray
 
 
 
 
 
35
 
36
 
37
  # TODO: Add BibTeX citation
 
65
  # TODO: Add link to the official dataset URLs here
66
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
67
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
68
+
69
  _URLs = {
70
+ "train" : "https://huggingface.co/datasets/lisawen/soybean_dataset/resolve/main/train.zip?download=true",
71
+ "test": "https://huggingface.co/datasets/lisawen/soybean_dataset/resolve/main/test.zip?download=true",
72
+ "valid": "https://huggingface.co/datasets/lisawen/soybean_dataset/resolve/main/valid.zip?download=true"
73
  }
74
 
75
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
 
85
  description=_DESCRIPTION,
86
  features=datasets.Features(
87
  {
88
+
 
89
  "original_image": datasets.Image(),
90
  "segmentation_image": datasets.Image(),
91
 
 
115
  datasets.SplitGenerator(
116
  name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}),
117
  ]
 
 
 
 
118
 
119
+ # def process_image(self,image_url):
120
+ # response = requests.get(image_url)
121
+ # response.raise_for_status() # This will raise an exception if there is a download error
122
+
123
+ # # Open the image from the downloaded bytes and return the PIL Image
124
+ # img = Image.open(BytesIO(response.content))
125
+ # return img
126
 
127
 
128
 
129
  def _generate_examples(self, filepath):
 
130
  logging.info("generating examples from = %s", filepath)
131
+ # Open the zip file
132
+ with zipfile.ZipFile(filepath, 'r') as zip_ref:
133
+ # List all the contents of the zip file
134
+ zip_list = zip_ref.namelist()
135
+
136
+ # Create pairs of original and segmentation images
137
+ images_pairs = [(f, f.replace('_original.jpg', '_segmentation.png')) for f in zip_list if '_original.jpg' in f]
138
+
139
+ # Iterate over the pairs and yield examples
140
+ for original_image_name, segmentation_image_name in images_pairs:
141
+ # The unique_id is derived from the original image name by stripping away the file extension and the suffix
142
+ unique_id = original_image_name.split('_')[0]
143
+
144
+ # Extract and process the original image
145
+ with zip_ref.open(original_image_name) as original_file:
146
+ original_image = Image.open(original_file)
147
 
148
+ # Extract and process the segmentation image
149
+ with zip_ref.open(segmentation_image_name) as segmentation_file:
150
+ segmentation_image = Image.open(segmentation_file)
151
+
152
+ yield unique_id, {
153
  "original_image": original_image,
154
  "segmentation_image": segmentation_image,
155
+ }
 
 
 
 
 
156
 
157
 
158