File size: 5,123 Bytes
50320ea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47c3e08
50320ea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
af1e58a
 
 
50320ea
af1e58a
a2f69c9
50320ea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a578d96
50320ea
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import evaluate
import datasets
# import moses
from moses import metrics
import pandas as pd
from tdc import Evaluator
from tdc import Oracle


_DESCRIPTION = """

Comprehensive suite of metrics designed to assess the performance of molecular generation models, for understanding how well a model can produce novel, chemically valid molecules that are relevant to specific research objectives.

"""


_KWARGS_DESCRIPTION = """
Args:
    generated_smiles (`list` of `string`): A collection of SMILES (Simplified Molecular Input Line Entry System) strings generated by the model, ideally encompassing more than 30,000 samples.
    train_smiles (`list` of `string`): The dataset of SMILES strings used to train the model, serving as a reference to evaluate the novelty and diversity of the generated molecules.

Returns:
    Dectionary item containing various metrics to evaluate model performance
"""


_CITATION = """
@article{DBLP:journals/corr/abs-1811-12823,
  author       = {Daniil Polykovskiy and
                  Alexander Zhebrak and
                  Benjam{\'{\i}}n S{\'{a}}nchez{-}Lengeling and
                  Sergey Golovanov and
                  Oktai Tatanov and
                  Stanislav Belyaev and
                  Rauf Kurbanov and
                  Aleksey Artamonov and
                  Vladimir Aladinskiy and
                  Mark Veselov and
                  Artur Kadurin and
                  Sergey I. Nikolenko and
                  Al{\'{a}}n Aspuru{-}Guzik and
                  Alex Zhavoronkov},
  title        = {Molecular Sets {(MOSES):} {A} Benchmarking Platform for Molecular
                  Generation Models},
  journal      = {CoRR},
  volume       = {abs/1811.12823},
  year         = {2018},
  url          = {http://arxiv.org/abs/1811.12823},
  eprinttype    = {arXiv},
  eprint       = {1811.12823},
  timestamp    = {Fri, 26 Nov 2021 15:34:30 +0100},
  biburl       = {https://dblp.org/rec/journals/corr/abs-1811-12823.bib},
  bibsource    = {dblp computer science bibliography, https://dblp.org}
}
"""


@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class molgen_metric(evaluate.Measurement):
    """TODO: Short description of my evaluation module."""

    def _info(self):
        return evaluate.MeasurementInfo(
            description=_DESCRIPTION,
            citation=_CITATION,
            inputs_description=_KWARGS_DESCRIPTION,
            features=datasets.Features(
                {
                    "generated_smiles": datasets.Sequence(datasets.Value("string")),
                    "train_smiles": datasets.Sequence(datasets.Value("string")),
                }
                if self.config_name == "multilabel"
                else {
                    "generated_smiles": datasets.Value("string"),
                    "train_smiles": datasets.Value("string"),
                }
            ),
                
            reference_urls=["https://github.com/molecularsets/moses", "https://tdcommons.ai/functions/oracles/"],
        )

    def _compute(self, generated_smiles, train_smiles = None):

        Results = metrics.get_all_metrics(gen = generated_smiles, train= train_smiles)
        
        generated_smiles = [s for s in generated_smiles if s != '']

        evaluator = Evaluator(name = 'KL_Divergence')
        KL_Divergence = evaluator(generated_smiles, train_smiles)
                
        Results.update({
            "KL_Divergence": KL_Divergence,
        })


        oracle_list = [
        'QED', 'SA', 'MPO', 'GSK3B', 'JNK3',
        'DRD2', 'LogP', 'Rediscovery', 'Similarity',
        'Median', 'Isomers', 'Valsartan_SMARTS', 'Hop'
        ]
    
        for oracle_name in oracle_list:
            oracle = Oracle(name=oracle_name)
            if oracle_name in ['Rediscovery', 'MPO', 'Similarity', 'Median', 'Isomers', 'Hop']:
                score = oracle(generated_smiles)
                if isinstance(score, dict):
                    score = {key: sum(values)/len(values) for key, values in score.items()}
            else:
                score = oracle(generated_smiles)
                if isinstance(score, list):
                    score = sum(score) / len(score)
            
            Results.update({f"{oracle_name}": score})

        keys_to_remove = ["FCD/TestSF", "SNN/TestSF", "Frag/TestSF", "Scaf/TestSF"]
        for key in keys_to_remove:
            Results.pop(key, None)    

        return {"results": Results}