mahdibaghbanzadeh commited on
Commit
1a25c36
·
verified ·
1 Parent(s): 8287554

Create multi_sp.py

Browse files
Files changed (1) hide show
  1. multi_sp.py +204 -0
multi_sp.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script
2
+ # contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Script for the multi-species genomes dataset. This dataset contains the genomes
16
+ from 850 different species."""
17
+
18
+ from typing import List
19
+ import datasets
20
+ import pandas as pd
21
+ from Bio import SeqIO
22
+ import random
23
+
24
+
25
+ # Find for instance the citation on arxiv or on the dataset repo/website
26
+ _CITATION = """\
27
+ @article{o2016reference,
28
+ title={Reference sequence (RefSeq) database at NCBI: current status, taxonomic expansion, and functional annotation},
29
+ author={O'Leary, Nuala A and Wright, Mathew W and Brister, J Rodney and Ciufo, Stacy and Haddad, Diana and McVeigh, Rich and Rajput, Bhanu and Robbertse, Barbara and Smith-White, Brian and Ako-Adjei, Danso and others},
30
+ journal={Nucleic acids research},
31
+ volume={44},
32
+ number={D1},
33
+ pages={D733--D745},
34
+ year={2016},
35
+ publisher={Oxford University Press}
36
+ }
37
+ """
38
+
39
+ # You can copy an official description
40
+ _DESCRIPTION = """\
41
+ Dataset made of diverse genomes available on NCBI.
42
+ """
43
+
44
+ _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/"
45
+
46
+ _LICENSE = "https://www.ncbi.nlm.nih.gov/home/about/policies/"
47
+
48
+ _CHUNK_LENGTHS = [6000, 12000]
49
+
50
+
51
+ def filter_fn(char: str) -> str:
52
+ """
53
+ Transforms any letter different from a base nucleotide into an 'N'.
54
+ """
55
+ if char in {'A', 'T', 'C', 'G'}:
56
+ return char
57
+ else:
58
+ return 'N'
59
+
60
+
61
+ def clean_sequence(seq: str) -> str:
62
+ """
63
+ Process a chunk of DNA to have all letters in upper and restricted to
64
+ A, T, C, G and N.
65
+ """
66
+ seq = seq.upper()
67
+ seq = map(filter_fn, seq)
68
+ seq = ''.join(list(seq))
69
+ return seq
70
+
71
+
72
+ class MultiSpeciesGenomesConfig(datasets.BuilderConfig):
73
+ """BuilderConfig for Genome Reads."""
74
+
75
+ def __init__(self, *args, chunk_length: int, overlap: int = 100, **kwargs):
76
+ """BuilderConfig for the multi species genomes.
77
+ Args:
78
+ chunk_length (:obj:`int`): Chunk length.
79
+ overlap: (:obj:`int`): Overlap in base pairs for two consecutive chunks (defaults to 100).
80
+ **kwargs: keyword arguments forwarded to super.
81
+ """
82
+ num_kbp = int(chunk_length/1000)
83
+ super().__init__(
84
+ *args,
85
+ name=f'{num_kbp}kbp',
86
+ **kwargs,
87
+ )
88
+ self.chunk_length = chunk_length
89
+ self.overlap = overlap
90
+
91
+
92
+ class MultiSpeciesGenomes(datasets.GeneratorBasedBuilder):
93
+ """Genomes from all species listed in the urls.txt, filtered and split into chunks of consecutive
94
+ nucleotides. first splits are train, then validation, then test. the url.txt is splitter by empty lines."""
95
+
96
+ VERSION = datasets.Version("0.0.1")
97
+ BUILDER_CONFIG_CLASS = MultiSpeciesGenomesConfig
98
+ BUILDER_CONFIGS = [MultiSpeciesGenomesConfig(chunk_length=chunk_length) for chunk_length in _CHUNK_LENGTHS]
99
+ DEFAULT_CONFIG_NAME = "6kbp"
100
+
101
+ def _info(self):
102
+
103
+ features = datasets.Features(
104
+ {
105
+ "sequence": datasets.Value("string"),
106
+ "description": datasets.Value("string"),
107
+ "start_pos": datasets.Value("int32"),
108
+ "end_pos": datasets.Value("int32"),
109
+ "fasta_url": datasets.Value("string")
110
+ }
111
+ )
112
+ return datasets.DatasetInfo(
113
+ # This is the description that will appear on the datasets page.
114
+ description=_DESCRIPTION,
115
+ # This defines the different columns of the dataset and their types
116
+ features=features,
117
+ # Homepage of the dataset for documentation
118
+ homepage=_HOMEPAGE,
119
+ # License for the dataset if available
120
+ license=_LICENSE,
121
+ # Citation for the dataset
122
+ citation=_CITATION,
123
+ )
124
+
125
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
126
+
127
+ urls_filepath = dl_manager.download_and_extract('urls.txt')
128
+ train_urls, test_urls, validation_urls = [], [], []
129
+
130
+ # all the line befor the first empy line are the train urls
131
+ with open(urls_filepath) as urls_file:
132
+ urls = [line.rstrip() for line in urls_file]
133
+ split = 0
134
+ for url in urls:
135
+ if url == '':
136
+ split += 1
137
+ continue
138
+ if split == 0:
139
+ train_urls.append(url)
140
+ elif split == 1:
141
+ validation_urls.append(url)
142
+ else:
143
+ test_urls.append(url)
144
+ random.seed(42)
145
+ random.shuffle(train_urls)
146
+
147
+ train_downloaded_files = dl_manager.download_and_extract(train_urls)
148
+ test_downloaded_files = dl_manager.download_and_extract(test_urls)
149
+ validation_downloaded_files = dl_manager.download_and_extract(validation_urls)
150
+
151
+ return [
152
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": train_downloaded_files, "chunk_length": self.config.chunk_length, "split": "train"}),
153
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"files": validation_downloaded_files, "chunk_length": self.config.chunk_length, "split": "validation"}),
154
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"files": test_downloaded_files, "chunk_length": self.config.chunk_length, "split": "test"}),
155
+ ]
156
+
157
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
158
+ def _generate_examples(self, files, chunk_length, split):
159
+ key = 0
160
+ for file in files:
161
+ with open(file, 'rt') as f:
162
+ fasta_sequences = SeqIO.parse(f, 'fasta')
163
+ try:
164
+ for record in fasta_sequences:
165
+
166
+ # parse descriptions in the fasta file
167
+ sequence, description = str(record.seq), record.description
168
+
169
+ # clean chromosome sequence
170
+ sequence = clean_sequence(sequence)
171
+ seq_length = len(sequence)
172
+
173
+ # split into chunks
174
+ num_chunks = (seq_length - 2 * self.config.overlap) // chunk_length
175
+
176
+ if num_chunks < 1:
177
+ continue
178
+
179
+ sequence = sequence[:(chunk_length * num_chunks + 2 * self.config.overlap)]
180
+ seq_length = len(sequence)
181
+ num_chunks = list(range(num_chunks))
182
+ if split == 'validation':
183
+ random.seed(42)
184
+ random.shuffle(num_chunks)
185
+ n_samples = int(len(num_chunks)*0.2)
186
+ num_chunks = num_chunks[:n_samples]
187
+ for i in num_chunks:
188
+ # get chunk
189
+ start_pos = i * chunk_length
190
+ end_pos = min(seq_length, (i+1) * chunk_length + 2 * self.config.overlap)
191
+ chunk_sequence = sequence[start_pos:end_pos]
192
+
193
+ # yield chunk
194
+ yield key, {
195
+ 'sequence': chunk_sequence,
196
+ 'description': description,
197
+ 'start_pos': start_pos,
198
+ 'end_pos': end_pos,
199
+ 'fasta_url': file.split('::')[-1]
200
+ }
201
+ key += 1
202
+ except Exception as e:
203
+ print(f"Error while processing {file}: {e}")
204
+ continue