File size: 3,984 Bytes
d17ef4a
7f403fc
d17ef4a
 
 
 
 
 
 
 
 
7f403fc
d17ef4a
 
 
 
68c44ad
 
d17ef4a
 
 
 
 
 
 
 
 
e39ab47
 
 
 
 
d17ef4a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7f403fc
 
 
d17ef4a
7f403fc
 
 
 
 
 
d17ef4a
 
 
 
 
7f403fc
d17ef4a
 
 
 
 
 
 
 
 
68c44ad
d17ef4a
 
68c44ad
d17ef4a
 
68c44ad
d17ef4a
 
 
7f403fc
d17ef4a
7f403fc
 
d17ef4a
 
68c44ad
d17ef4a
 
 
7f403fc
 
 
 
d17ef4a
7f403fc
 
d17ef4a
 
 
 
7f403fc
 
d17ef4a
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
#%%
from typing import Any
import pyreadr
import pandas as pd
import numpy as np
import sqlite3
import requests
import datasets
import tempfile
import rdata
import json
from typing import Any

#%%
sqlite_url = "https://experimenthub.bioconductor.org/metadata/experimenthub.sqlite3"
DATA_URL = "https://bioconductorhubs.blob.core.windows.net/experimenthub/curatedMetagenomicData/"
RDATA_URL = "https://huggingface.co/datasets/wwydmanski/metagenomic_curated/resolve/main/sampleMetadata.rda"

CITATION = """\
Pasolli E, Schiffer L, Manghi P, Renson A, Obenchain V, Truong D, Beghini F, Malik F, Ramos M, Dowd J, Huttenhower C, Morgan M, Segata N, Waldron L (2017). Accessible, curated metagenomic data through ExperimentHub. Nat. Methods, 14 (11), 1023-1024. ISSN 1548-7091, 1548-7105, doi: 10.1038/nmeth.4468.
"""

# %%

def get_metadata():
    ehids = []
    descriptions = []
    with tempfile.NamedTemporaryFile(delete=False) as tmpfname:
        r = requests.get("https://huggingface.co/datasets/wwydmanski/metagenomic_curated/raw/main/index.tsv", allow_redirects=True)
        open(tmpfname.name, 'wb').write(r.content)

    with open(tmpfname.name, "r") as f:
        for line in f:
            ehid, desc = line.split("\t")
            ehids.append(ehid)
            descriptions.append(desc)
    return ehids, descriptions

# %%
class MetagenomicCurated(datasets.GeneratorBasedBuilder):
    """Metagenomic Curated Data"""

    ehids, descriptions = get_metadata()
    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name=ehid, 
                               version=datasets.Version("1.0.0"), 
                               description=d.strip())
        for ehid, d in zip(ehids, descriptions)
    ]

    def __call__(self, *args: Any, **kwds: Any) -> Any:
        return super().__call__(*args, **kwds)

    def _info(self):
        try:
            features = {
                i: datasets.Value("float32") for i in self.features
            }
        except:
            features = {}
        return datasets.DatasetInfo(
            description=self.config.description,
            citation=CITATION,
            homepage="https://waldronlab.io/curatedMetagenomicData/index.html",
            license="https://www.r-project.org/Licenses/Artistic-2.0",
            # features=features
        )
    
    def _split_generators(self, dl_manager):
        json_url = f"https://experimenthub.bioconductor.org/ehid/{self.config.name}"
        r = requests.get(json_url, allow_redirects=True)
        metadata = json.loads(r.content)
        url = metadata['location_prefix']+metadata['rdatapaths'][0]['rdatapath']

        data_fname: str = dl_manager.download(url)
        rdata_path: str = dl_manager.download(RDATA_URL)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_fname, "rdata_path": rdata_path}),
        ]

    def _generate_examples(self, filepath, rdata_path):
        parsed = rdata.parser.parse_file(filepath)
        converted = rdata.conversion.convert(parsed)
        expressions = list(converted.values())[0].assayData['exprs']

        data_df = expressions.to_pandas().T
        self.features = data_df.columns

        study_name = list(converted.keys())[0].split(".")[0]

        meta = pyreadr.read_r(rdata_path)['sampleMetadata']
        metadata = meta.loc[meta['study_name'] == study_name].set_index('sample_id')

        for idx, (i, row) in enumerate(data_df.iterrows()):
            try:
                md = {i: str(j) for i, j in metadata.loc[i].to_dict().items()}
            except KeyError:
                md = {}
            yield idx, {
                "features": row.to_dict(),
                "metadata": md
            }

# %%
if __name__=="__main__":
    ds = datasets.load_dataset("./metagenomic_curated.py", "EH1726")
    X = np.array([list(i.values()) for i in ds['train']['features']])
    y = np.array([x['study_condition'] for x in ds['train']['metadata']])

# %%