File size: 5,076 Bytes
2a07266
 
 
 
f7d009d
 
d36ac0a
b85c835
f7d009d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b85c835
079dc76
b85c835
 
 
 
 
 
f7d009d
 
 
0d003d6
 
 
f7d009d
0d003d6
 
 
f7d009d
 
 
 
 
 
 
402c3e9
f7d009d
47dc5e5
f7d009d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b85c835
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f7d009d
b85c835
 
 
 
 
 
 
 
f7d009d
 
 
 
b85c835
f7d009d
 
 
 
 
b85c835
 
319c343
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
079dc76
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
#!/usr/bin/env python
# coding=utf-8
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
"""Paired sequences from the Observed Antibody Space database"""
import datasets
import os
import csv

_CITATION = """\
@article{Olsen_Boyles_Deane_2022, 
title={Observed Antibody Space: A diverse database of cleaned, annotated, and translated unpaired and paired antibody sequences}, 
volume={31}, rights={© 2021 The Authors. Protein Science published by Wiley Periodicals LLC on behalf of The Protein Society.}, 
ISSN={1469-896X}, DOI={10.1002/pro.4205}, 
number={1}, journal={Protein Science}, author={Olsen, Tobias H. and Boyles, Fergus and Deane, Charlotte M.}, 
year={2022}, pages={141–146}, language={en} }

"""
_DESCRIPTION = """\
Paired heavy and light chain antibody sequences for multiple species.
"""

_HOMEPAGE = "https://opig.stats.ox.ac.uk/webapps/oas/"

_LICENSE = "cc-by-4.0"
_BASE_URL = "https://aws-hcls-ml.s3.amazonaws.com/oas-paired-sequence-data/raw/"

# _URLS = {
#     "human": _BASE_URL + "human.tar.gz",
#     "rat_SD": _BASE_URL + "rat_SD.tar.gz",
#     "mouse_BALB_c": _BASE_URL + "mouse_BALB_c.tar.gz",
#     "mouse_C57BL_6": _BASE_URL + "mouse_C57BL_6.tar.gz",
# }
_FEATURES = datasets.Features(
    {
        "sequence_alignment_aa_heavy": datasets.Value("string"),
        "cdr1_aa_heavy": datasets.Value("string"),
        "cdr2_aa_heavy": datasets.Value("string"),
        "cdr3_aa_heavy": datasets.Value("string"),
        "sequence_alignment_aa_light": datasets.Value("string"),
        "cdr1_aa_light": datasets.Value("string"),
        "cdr2_aa_light": datasets.Value("string"),
        "cdr3_aa_light": datasets.Value("string"),
    }
)


class OasPairedSequenceData(datasets.GeneratorBasedBuilder):
    """OAS paired sequence data."""

    VERSION = datasets.Version("1.2.0")
    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name="human", version=VERSION, description="human"),
        datasets.BuilderConfig(name="rat_SD", version=VERSION, description="rat_SD"),
        datasets.BuilderConfig(
            name="mouse_BALB_c", version=VERSION, description="mouse_BALB_c"
        ),
        datasets.BuilderConfig(
            name="mouse_C57BL_6", version=VERSION, description="mouse_C57BL_6"
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=_FEATURES,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    # def _split_generators(self, dl_manager):
    #     urls = _URLS[self.config.name]
    #     data_dir = dl_manager.download_and_extract(urls)
    #     return [
    #         datasets.SplitGenerator(
    #             name=datasets.Split.TRAIN,
    #             gen_kwargs={
    #                 "filepath": os.path.join(data_dir),
    #                 "split": "train",
    #             },
    #         ),
    #     ]

    # def _generate_examples(self, filepath, split):
    #     table = pd.read_parquet(filepath)
    #     for key, row in enumerate(table.itertuples()):
    #         if key == 0:
    #             continue
    #         yield key, {
    #             "sequence_alignment_aa_heavy": row[1],
    #             "cdr1_aa_heavy": row[2],
    #             "cdr2_aa_heavy": row[3],
    #             "cdr3_aa_heavy": row[4],
    #             "sequence_alignment_aa_light": row[5],
    #             "cdr1_aa_light": row[6],
    #             "cdr2_aa_light": row[7],
    #             "cdr3_aa_light": row[8],
    #         }

    def _split_generators(self, dl_manager):
        data_unit_file = os.path.join(
            os.getcwd(), "data_units", self.config.name + ".txt"
        )
        with open(data_unit_file, "r") as f:
            urls = [
                os.path.join(_BASE_URL, self.config.name, line.strip()) for line in f
            ]
        data_files = dl_manager.download_and_extract(urls)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": data_files,
                    "split": "train",
                },
            ),
        ]

    def _generate_examples(self, filepaths):
        for filepath in filepaths:
            with open(filepath, "r") as f:
                reader = csv.reader(f, delimiter=",")
                for key, row in enumerate(reader):
                    if key < 2:
                        continue
                    else:
                        yield key - 2, {
                            "sequence_alignment_aa_heavy": row[10],
                            "cdr1_aa_heavy": row[35],
                            "cdr2_aa_heavy": row[39],
                            "cdr3_aa_heavy": row[43],
                            "sequence_alignment_aa_light": row[100],
                            "cdr1_aa_light": row[123],
                            "cdr2_aa_light": row[127],
                            "cdr3_aa_light": row[131],
                        }