Update multi-model-plant-genome-corpus.py
Browse files- multi-model-plant-genome-corpus.py +121 -173
multi-model-plant-genome-corpus.py
CHANGED
@@ -1,14 +1,4 @@
|
|
1 |
-
|
2 |
-
Script for the plant multi-species genomes dataset.
|
3 |
-
This dataset contains the genomes from 11 different species.
|
4 |
-
|
5 |
-
Added functionality:
|
6 |
-
- `split_mode` argument in load_dataset.
|
7 |
-
"chromosome" (default): Original splitting by chromosome records.
|
8 |
-
"genome": Split by genome size (smallest 2 -> val, next 2 -> test, rest -> train).
|
9 |
-
"""
|
10 |
-
|
11 |
-
from typing import List
|
12 |
import datasets
|
13 |
from Bio import SeqIO
|
14 |
import os
|
@@ -57,12 +47,11 @@ def clean_sequence(seq: str) -> str:
|
|
57 |
|
58 |
class PlantMultiSpeciesGenomesConfig(datasets.BuilderConfig):
|
59 |
"""BuilderConfig for the Plant Multi Species Pre-training Dataset."""
|
60 |
-
def __init__(self, *args, chunk_length: int, overlap: int = 100,
|
61 |
"""
|
62 |
Args:
|
63 |
chunk_length (int): length of each chunk
|
64 |
overlap (int): overlap size (defaults to 100)
|
65 |
-
split_mode (str): "chromosome" or "genome"
|
66 |
"""
|
67 |
num_kbp = int(chunk_length/1000)
|
68 |
super().__init__(
|
@@ -72,24 +61,20 @@ class PlantMultiSpeciesGenomesConfig(datasets.BuilderConfig):
|
|
72 |
)
|
73 |
self.chunk_length = chunk_length
|
74 |
self.overlap = overlap
|
75 |
-
self.split_mode = split_mode
|
76 |
|
77 |
class PlantMultiSpeciesGenomes(datasets.GeneratorBasedBuilder):
|
78 |
VERSION = datasets.Version("1.1.0")
|
79 |
BUILDER_CONFIG_CLASS = PlantMultiSpeciesGenomesConfig
|
80 |
-
# Allow user to specify split_mode via load_dataset(..., split_mode="chromosome" or "genome")
|
81 |
BUILDER_CONFIGS = [
|
82 |
-
PlantMultiSpeciesGenomesConfig(chunk_length=chunk_length
|
83 |
]
|
84 |
-
|
85 |
DEFAULT_CONFIG_NAME = "1kbp"
|
86 |
|
87 |
@classmethod
|
88 |
-
def from_config(cls, chunk_length=1000, overlap=100,
|
89 |
return cls(PlantMultiSpeciesGenomesConfig(
|
90 |
chunk_length=chunk_length,
|
91 |
overlap=overlap,
|
92 |
-
split_mode=split_mode,
|
93 |
**kwargs
|
94 |
))
|
95 |
|
@@ -110,6 +95,51 @@ class PlantMultiSpeciesGenomes(datasets.GeneratorBasedBuilder):
|
|
110 |
citation=_CITATION,
|
111 |
)
|
112 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
114 |
# Download filepaths
|
115 |
filepaths_txt = dl_manager.download_and_extract('plant_genome_file_names.txt')
|
@@ -117,160 +147,78 @@ class PlantMultiSpeciesGenomes(datasets.GeneratorBasedBuilder):
|
|
117 |
filepaths = [os.path.join("plant_genomes", filepath.rstrip()) for filepath in f]
|
118 |
|
119 |
genome_files = [dl_manager.download_and_extract(f) for f in filepaths]
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
for split_type in ['train', 'val', 'test']:
|
139 |
-
if len(
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
datasets.
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
]
|
171 |
-
else:
|
172 |
-
file_lengths = []
|
173 |
-
for file in genome_files:
|
174 |
-
with open(file, 'rt') as f:
|
175 |
-
length_sum = 0
|
176 |
-
for rec in SeqIO.parse(f, 'fasta'):
|
177 |
-
length_sum += len(rec.seq)
|
178 |
-
file_lengths.append((file, length_sum))
|
179 |
-
|
180 |
-
file_lengths.sort(key=lambda x: x[1]) # smallest first
|
181 |
-
|
182 |
-
val_files = [fl[0] for fl in file_lengths[:1]]
|
183 |
-
test_files = [fl[0] for fl in file_lengths[1:2]]
|
184 |
-
train_files = [fl[0] for fl in file_lengths[2:]]
|
185 |
-
|
186 |
-
return [
|
187 |
-
datasets.SplitGenerator(
|
188 |
-
name=datasets.Split.TRAIN,
|
189 |
-
gen_kwargs={
|
190 |
-
"files": train_files,
|
191 |
-
"chunk_length": self.config.chunk_length,
|
192 |
-
"chromo_splits": None, # not used
|
193 |
-
"split_type": "train"
|
194 |
-
}
|
195 |
-
),
|
196 |
-
datasets.SplitGenerator(
|
197 |
-
name=datasets.Split.VALIDATION,
|
198 |
-
gen_kwargs={
|
199 |
-
"files": val_files,
|
200 |
-
"chunk_length": self.config.chunk_length,
|
201 |
-
"chromo_splits": None,
|
202 |
-
"split_type": "val"
|
203 |
-
}
|
204 |
-
),
|
205 |
-
datasets.SplitGenerator(
|
206 |
-
name=datasets.Split.TEST,
|
207 |
-
gen_kwargs={
|
208 |
-
"files": test_files,
|
209 |
-
"chunk_length": self.config.chunk_length,
|
210 |
-
"chromo_splits": None,
|
211 |
-
"split_type": "test"
|
212 |
-
}
|
213 |
-
),
|
214 |
-
]
|
215 |
-
|
216 |
-
|
217 |
-
def _generate_examples(self, files, chunk_length, chromo_splits, split_type):
|
218 |
key = 0
|
219 |
-
|
220 |
-
for
|
221 |
-
|
222 |
-
|
223 |
-
|
|
|
|
|
|
|
224 |
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
chunk_sequence = sequence[start_pos:end_pos]
|
241 |
-
|
242 |
-
yield key, {
|
243 |
-
'sequence': chunk_sequence,
|
244 |
-
'description': record.description,
|
245 |
-
'start_pos': start_pos,
|
246 |
-
'end_pos': end_pos,
|
247 |
-
}
|
248 |
-
key += 1
|
249 |
-
else:
|
250 |
-
for file in files:
|
251 |
-
with open(file, 'rt') as f:
|
252 |
-
fasta_sequences = SeqIO.parse(f, 'fasta')
|
253 |
-
|
254 |
-
for record in fasta_sequences:
|
255 |
-
sequence = clean_sequence(str(record.seq))
|
256 |
-
seq_length = len(sequence)
|
257 |
-
num_chunks = (seq_length - 2 * self.config.overlap) // chunk_length
|
258 |
-
|
259 |
-
if num_chunks < 1:
|
260 |
-
continue
|
261 |
-
|
262 |
-
sequence = sequence[:(chunk_length * num_chunks + 2 * self.config.overlap)]
|
263 |
-
seq_length = len(sequence)
|
264 |
-
|
265 |
-
for i in range(num_chunks):
|
266 |
-
start_pos = i * chunk_length
|
267 |
-
end_pos = min(seq_length, (i+1) * chunk_length + 2 * self.config.overlap)
|
268 |
-
chunk_sequence = sequence[start_pos:end_pos]
|
269 |
-
|
270 |
-
yield key, {
|
271 |
-
'sequence': chunk_sequence,
|
272 |
-
'description': record.description,
|
273 |
-
'start_pos': start_pos,
|
274 |
-
'end_pos': end_pos,
|
275 |
-
}
|
276 |
-
key += 1
|
|
|
1 |
+
from typing import List, Dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import datasets
|
3 |
from Bio import SeqIO
|
4 |
import os
|
|
|
47 |
|
48 |
class PlantMultiSpeciesGenomesConfig(datasets.BuilderConfig):
|
49 |
"""BuilderConfig for the Plant Multi Species Pre-training Dataset."""
|
50 |
+
def __init__(self, *args, chunk_length: int, overlap: int = 100, **kwargs):
|
51 |
"""
|
52 |
Args:
|
53 |
chunk_length (int): length of each chunk
|
54 |
overlap (int): overlap size (defaults to 100)
|
|
|
55 |
"""
|
56 |
num_kbp = int(chunk_length/1000)
|
57 |
super().__init__(
|
|
|
61 |
)
|
62 |
self.chunk_length = chunk_length
|
63 |
self.overlap = overlap
|
|
|
64 |
|
65 |
class PlantMultiSpeciesGenomes(datasets.GeneratorBasedBuilder):
|
66 |
VERSION = datasets.Version("1.1.0")
|
67 |
BUILDER_CONFIG_CLASS = PlantMultiSpeciesGenomesConfig
|
|
|
68 |
BUILDER_CONFIGS = [
|
69 |
+
PlantMultiSpeciesGenomesConfig(chunk_length=chunk_length) for chunk_length in _CHUNK_LENGTHS
|
70 |
]
|
|
|
71 |
DEFAULT_CONFIG_NAME = "1kbp"
|
72 |
|
73 |
@classmethod
|
74 |
+
def from_config(cls, chunk_length=1000, overlap=100, **kwargs):
|
75 |
return cls(PlantMultiSpeciesGenomesConfig(
|
76 |
chunk_length=chunk_length,
|
77 |
overlap=overlap,
|
|
|
78 |
**kwargs
|
79 |
))
|
80 |
|
|
|
95 |
citation=_CITATION,
|
96 |
)
|
97 |
|
98 |
+
def _calculate_split_ratio(self, size: int, total_size: int) -> float:
|
99 |
+
"""
|
100 |
+
Calculate the split ratio for validation and test sets based on genome size.
|
101 |
+
Args:
|
102 |
+
size: Size of the current genome
|
103 |
+
total_size: Total size of all genomes
|
104 |
+
Returns:
|
105 |
+
float: Ratio to use for validation and test sets
|
106 |
+
"""
|
107 |
+
size_ratio = size / total_size
|
108 |
+
|
109 |
+
if size_ratio > 0.2: # Very large genomes (>20% of total)
|
110 |
+
return 0.002 # 0.2%
|
111 |
+
elif size_ratio > 0.1: # Large genomes (10-20% of total)
|
112 |
+
return 0.005 # 0.5%
|
113 |
+
elif size_ratio > 0.05: # Medium genomes (5-10% of total)
|
114 |
+
return 0.01 # 1%
|
115 |
+
else: # Small genomes (<5% of total)
|
116 |
+
return 0.02 # 2%
|
117 |
+
|
118 |
+
def _split_genome(self, records: List, ratio: float) -> Dict[str, List]:
|
119 |
+
"""
|
120 |
+
Split a genome's records into train, validation, and test sets.
|
121 |
+
Args:
|
122 |
+
records: List of genome records
|
123 |
+
ratio: Ratio to use for validation and test sets
|
124 |
+
Returns:
|
125 |
+
Dict containing split records
|
126 |
+
"""
|
127 |
+
total_records = len(records)
|
128 |
+
n_val = max(1, int(total_records * ratio))
|
129 |
+
n_test = max(1, int(total_records * ratio))
|
130 |
+
|
131 |
+
# Cap at total 10% for validation + test
|
132 |
+
if n_val + n_test > total_records * 0.1:
|
133 |
+
scale = total_records * 0.1 / (n_val + n_test)
|
134 |
+
n_val = int(n_val * scale)
|
135 |
+
n_test = int(n_test * scale)
|
136 |
+
|
137 |
+
return {
|
138 |
+
'val': records[:n_val],
|
139 |
+
'test': records[n_val:n_val+n_test],
|
140 |
+
'train': records[n_val+n_test:]
|
141 |
+
}
|
142 |
+
|
143 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
144 |
# Download filepaths
|
145 |
filepaths_txt = dl_manager.download_and_extract('plant_genome_file_names.txt')
|
|
|
147 |
filepaths = [os.path.join("plant_genomes", filepath.rstrip()) for filepath in f]
|
148 |
|
149 |
genome_files = [dl_manager.download_and_extract(f) for f in filepaths]
|
150 |
+
|
151 |
+
# Calculate total genome size and individual sizes
|
152 |
+
genome_sizes = {}
|
153 |
+
total_size = 0
|
154 |
+
for file in genome_files:
|
155 |
+
with open(file, 'rt') as f:
|
156 |
+
size = sum(len(str(record.seq)) for record in SeqIO.parse(f, 'fasta'))
|
157 |
+
genome_sizes[file] = size
|
158 |
+
total_size += size
|
159 |
+
|
160 |
+
# Split each genome and store records
|
161 |
+
split_records = {'train': [], 'val': [], 'test': []}
|
162 |
+
for file in genome_files:
|
163 |
+
with open(file, 'rt') as f:
|
164 |
+
records = list(SeqIO.parse(f, 'fasta'))
|
165 |
+
ratio = self._calculate_split_ratio(genome_sizes[file], total_size)
|
166 |
+
splits = self._split_genome(records, ratio)
|
167 |
+
|
168 |
for split_type in ['train', 'val', 'test']:
|
169 |
+
if len(splits[split_type]) > 0:
|
170 |
+
split_records[split_type].append((file, splits[split_type]))
|
171 |
+
|
172 |
+
return [
|
173 |
+
datasets.SplitGenerator(
|
174 |
+
name=datasets.Split.TRAIN,
|
175 |
+
gen_kwargs={
|
176 |
+
"split_records": split_records['train'],
|
177 |
+
"chunk_length": self.config.chunk_length,
|
178 |
+
"overlap": self.config.overlap
|
179 |
+
}
|
180 |
+
),
|
181 |
+
datasets.SplitGenerator(
|
182 |
+
name=datasets.Split.VALIDATION,
|
183 |
+
gen_kwargs={
|
184 |
+
"split_records": split_records['val'],
|
185 |
+
"chunk_length": self.config.chunk_length,
|
186 |
+
"overlap": self.config.overlap
|
187 |
+
}
|
188 |
+
),
|
189 |
+
datasets.SplitGenerator(
|
190 |
+
name=datasets.Split.TEST,
|
191 |
+
gen_kwargs={
|
192 |
+
"split_records": split_records['test'],
|
193 |
+
"chunk_length": self.config.chunk_length,
|
194 |
+
"overlap": self.config.overlap
|
195 |
+
}
|
196 |
+
),
|
197 |
+
]
|
198 |
+
|
199 |
+
def _generate_examples(self, split_records, chunk_length, overlap):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
key = 0
|
201 |
+
for file, records in split_records:
|
202 |
+
for record in records:
|
203 |
+
sequence = clean_sequence(str(record.seq))
|
204 |
+
seq_length = len(sequence)
|
205 |
+
num_chunks = (seq_length - 2 * overlap) // chunk_length
|
206 |
+
|
207 |
+
if num_chunks < 1:
|
208 |
+
continue
|
209 |
|
210 |
+
sequence = sequence[:(chunk_length * num_chunks + 2 * overlap)]
|
211 |
+
seq_length = len(sequence)
|
212 |
+
|
213 |
+
for i in range(num_chunks):
|
214 |
+
start_pos = i * chunk_length
|
215 |
+
end_pos = min(seq_length, (i+1) * chunk_length + 2 * overlap)
|
216 |
+
chunk_sequence = sequence[start_pos:end_pos]
|
217 |
+
|
218 |
+
yield key, {
|
219 |
+
'sequence': chunk_sequence,
|
220 |
+
'description': record.description,
|
221 |
+
'start_pos': start_pos,
|
222 |
+
'end_pos': end_pos,
|
223 |
+
}
|
224 |
+
key += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|