Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
OrionDBExplorer.get_pipeline
(self, pipeline=None, name=None, template=None, created_by=None)
Get a Pipeline object from the database. All the arguments are optional but empty queries are not allowed, so at least one argument needs to be passed with a value different than ``None``. Args: pipeline (Template, ObjectID or str): Pipeline object (or the corresponding ObjectID, or its string representation) that we want to retreive. name (str): Name of the Pipeline. template (Template or ObjectID or str): Template object (or the corresponding ObjectID, or its string representation) from which the Pipeline has to be derived. created_by (str): Unique identifier of the user that created the Pipeline. Raises: ValueError: If the no arguments are passed with a value different than ``None`` or the query resolves to more than one object. Returns: Pipeline
Get a Pipeline object from the database.
def get_pipeline(self, pipeline=None, name=None, template=None, created_by=None): """Get a Pipeline object from the database. All the arguments are optional but empty queries are not allowed, so at least one argument needs to be passed with a value different than ``None``. Args: pipeline (Template, ObjectID or str): Pipeline object (or the corresponding ObjectID, or its string representation) that we want to retreive. name (str): Name of the Pipeline. template (Template or ObjectID or str): Template object (or the corresponding ObjectID, or its string representation) from which the Pipeline has to be derived. created_by (str): Unique identifier of the user that created the Pipeline. Raises: ValueError: If the no arguments are passed with a value different than ``None`` or the query resolves to more than one object. Returns: Pipeline """ return schema.Pipeline.get( pipeline=pipeline, name=name, template=template, created_by=created_by, )
[ "def", "get_pipeline", "(", "self", ",", "pipeline", "=", "None", ",", "name", "=", "None", ",", "template", "=", "None", ",", "created_by", "=", "None", ")", ":", "return", "schema", ".", "Pipeline", ".", "get", "(", "pipeline", "=", "pipeline", ",", "name", "=", "name", ",", "template", "=", "template", ",", "created_by", "=", "created_by", ",", ")" ]
[ 560, 4 ]
[ 591, 9 ]
python
en
['en', 'en', 'en']
True
OrionDBExplorer.add_experiment
(self, name, template, dataset, signals=None, project=None)
Add a new Experiment object to the database. The Experiment will have to be associated to a Template and a Dataset. Optionally, a list of Signal objects or the corresponding ObjectIds have to can be passed to associate this Experiment to only a subset of Signals from the Dataset. In this case, the Signals passed need to be part of the Dataset, otherwise an Exception will be raised. If no Signals are passed, all the Signals from the Dataset are used. A project name can also be passed as a string to group experiments of a single project together. Args: name (str): Name of the Experiment. template (Template or ObjectID or str): Template object (or the corresponding ObjectID, or its string representation) that we want to use in this Experiment. dataset (Dataset or ObjectID or str): Dataset object (or the corresponding ObjectID, or its string representation) which will be used for this Experiment. signals (list[Signal, ObjectId or str]): list of Signals (or their corresponding ObjectIds) to be used for this Experiment. project (str): Name of the project which this Experiment belongs to. Raises: NotUniqueError: If an Experiment with the same name for this Template already exists. Returns: Experiment
Add a new Experiment object to the database.
def add_experiment(self, name, template, dataset, signals=None, project=None): """Add a new Experiment object to the database. The Experiment will have to be associated to a Template and a Dataset. Optionally, a list of Signal objects or the corresponding ObjectIds have to can be passed to associate this Experiment to only a subset of Signals from the Dataset. In this case, the Signals passed need to be part of the Dataset, otherwise an Exception will be raised. If no Signals are passed, all the Signals from the Dataset are used. A project name can also be passed as a string to group experiments of a single project together. Args: name (str): Name of the Experiment. template (Template or ObjectID or str): Template object (or the corresponding ObjectID, or its string representation) that we want to use in this Experiment. dataset (Dataset or ObjectID or str): Dataset object (or the corresponding ObjectID, or its string representation) which will be used for this Experiment. signals (list[Signal, ObjectId or str]): list of Signals (or their corresponding ObjectIds) to be used for this Experiment. project (str): Name of the project which this Experiment belongs to. Raises: NotUniqueError: If an Experiment with the same name for this Template already exists. Returns: Experiment """ dataset = self.get_dataset(dataset) if not signals: signals = dataset.signals else: for signal in signals: if self.get_signal(signal).dataset != dataset: raise ValueError('All Signals must belong to the Dataset') return schema.Experiment.insert( name=name, project=project, template=template, dataset=dataset, signals=signals, created_by=self.user )
[ "def", "add_experiment", "(", "self", ",", "name", ",", "template", ",", "dataset", ",", "signals", "=", "None", ",", "project", "=", "None", ")", ":", "dataset", "=", "self", ".", "get_dataset", "(", "dataset", ")", "if", "not", "signals", ":", "signals", "=", "dataset", ".", "signals", "else", ":", "for", "signal", "in", "signals", ":", "if", "self", ".", "get_signal", "(", "signal", ")", ".", "dataset", "!=", "dataset", ":", "raise", "ValueError", "(", "'All Signals must belong to the Dataset'", ")", "return", "schema", ".", "Experiment", ".", "insert", "(", "name", "=", "name", ",", "project", "=", "project", ",", "template", "=", "template", ",", "dataset", "=", "dataset", ",", "signals", "=", "signals", ",", "created_by", "=", "self", ".", "user", ")" ]
[ 597, 4 ]
[ 650, 9 ]
python
en
['en', 'en', 'en']
True
OrionDBExplorer.get_experiments
(self, name=None, template=None, dataset=None, signals=None, project=None, created_by=None)
Query the Experiments collection. All the details about the matching Experiments will be returned in a ``pandas.DataFrame``. All the arguments are optional, so a call without arguments will return a table with information about all the Experiments availabe. Args: name (str): Name of the Experiment. template (Template or ObjectID or str): Template that the Experiments must use. dataset (Dataset or ObjectID or str): Dataset that the Experiments must use. signals (list[Signal, ObjectId or str]): Signals that the Experiments must use. project (str): Name of the project which the Experiments must belong to. created_by (str): Unique identifier of the user that created the Experiments. Returns: pandas.DataFrame
Query the Experiments collection.
def get_experiments(self, name=None, template=None, dataset=None, signals=None, project=None, created_by=None): """Query the Experiments collection. All the details about the matching Experiments will be returned in a ``pandas.DataFrame``. All the arguments are optional, so a call without arguments will return a table with information about all the Experiments availabe. Args: name (str): Name of the Experiment. template (Template or ObjectID or str): Template that the Experiments must use. dataset (Dataset or ObjectID or str): Dataset that the Experiments must use. signals (list[Signal, ObjectId or str]): Signals that the Experiments must use. project (str): Name of the project which the Experiments must belong to. created_by (str): Unique identifier of the user that created the Experiments. Returns: pandas.DataFrame """ return schema.Experiment.find( as_df_=True, name=name, project=project, template=template, dataset=dataset, signals=signals, created_by=created_by, )
[ "def", "get_experiments", "(", "self", ",", "name", "=", "None", ",", "template", "=", "None", ",", "dataset", "=", "None", ",", "signals", "=", "None", ",", "project", "=", "None", ",", "created_by", "=", "None", ")", ":", "return", "schema", ".", "Experiment", ".", "find", "(", "as_df_", "=", "True", ",", "name", "=", "name", ",", "project", "=", "project", ",", "template", "=", "template", ",", "dataset", "=", "dataset", ",", "signals", "=", "signals", ",", "created_by", "=", "created_by", ",", ")" ]
[ 652, 4 ]
[ 687, 9 ]
python
en
['en', 'en', 'en']
True
OrionDBExplorer.get_experiment
(self, experiment=None, name=None, project=None, template=None, dataset=None, signals=None, created_by=None)
Get an Experiment object from the database. All the arguments are optional but empty queries are not allowed, so at least one argument needs to be passed with a value different than ``None``. Args: experiment (Experiment, ObjectID or str): Experiment object (or the corresponding ObjectID, or its string representation) that we want to retreive. name (str): Name of the Experiment. template (Template or ObjectID or str): Template that the Experiment must use. dataset (Dataset or ObjectID or str): Dataset that the Experiment must use. signals (list[Signal, ObjectId or str]): Signals that the Experiment must use. project (str): Name of the project which the Experiment must belong to. created_by (str): Unique identifier of the user that created the Experiment. Raises: ValueError: If the no arguments are passed with a value different than ``None`` or the query resolves to more than one object. Returns: Experiment
Get an Experiment object from the database.
def get_experiment(self, experiment=None, name=None, project=None, template=None, dataset=None, signals=None, created_by=None): """Get an Experiment object from the database. All the arguments are optional but empty queries are not allowed, so at least one argument needs to be passed with a value different than ``None``. Args: experiment (Experiment, ObjectID or str): Experiment object (or the corresponding ObjectID, or its string representation) that we want to retreive. name (str): Name of the Experiment. template (Template or ObjectID or str): Template that the Experiment must use. dataset (Dataset or ObjectID or str): Dataset that the Experiment must use. signals (list[Signal, ObjectId or str]): Signals that the Experiment must use. project (str): Name of the project which the Experiment must belong to. created_by (str): Unique identifier of the user that created the Experiment. Raises: ValueError: If the no arguments are passed with a value different than ``None`` or the query resolves to more than one object. Returns: Experiment """ return schema.Experiment.get( experiment=experiment, name=name, project=project, template=template, dataset=dataset, signals=signals, created_by=created_by, )
[ "def", "get_experiment", "(", "self", ",", "experiment", "=", "None", ",", "name", "=", "None", ",", "project", "=", "None", ",", "template", "=", "None", ",", "dataset", "=", "None", ",", "signals", "=", "None", ",", "created_by", "=", "None", ")", ":", "return", "schema", ".", "Experiment", ".", "get", "(", "experiment", "=", "experiment", ",", "name", "=", "name", ",", "project", "=", "project", ",", "template", "=", "template", ",", "dataset", "=", "dataset", ",", "signals", "=", "signals", ",", "created_by", "=", "created_by", ",", ")" ]
[ 689, 4 ]
[ 729, 9 ]
python
en
['en', 'en', 'en']
True
OrionDBExplorer.add_datarun
(self, experiment, pipeline)
Add a new Datarun object to the database. The Datarun needs to be associated to an Experiment and a Pipeline. Args: experiment (Experiment or ObjectID or str): Experiment object (or the corresponding ObjectID, or its string representation) to which this Datarun belongs. pipeline (Pipeline or ObjectID or str): Pipeline object (or the corresponding ObjectID, or its string representation) used by this Datarun. Returns: Datarun
Add a new Datarun object to the database.
def add_datarun(self, experiment, pipeline): """Add a new Datarun object to the database. The Datarun needs to be associated to an Experiment and a Pipeline. Args: experiment (Experiment or ObjectID or str): Experiment object (or the corresponding ObjectID, or its string representation) to which this Datarun belongs. pipeline (Pipeline or ObjectID or str): Pipeline object (or the corresponding ObjectID, or its string representation) used by this Datarun. Returns: Datarun """ return schema.Datarun.insert( experiment=experiment, pipeline=pipeline, )
[ "def", "add_datarun", "(", "self", ",", "experiment", ",", "pipeline", ")", ":", "return", "schema", ".", "Datarun", ".", "insert", "(", "experiment", "=", "experiment", ",", "pipeline", "=", "pipeline", ",", ")" ]
[ 735, 4 ]
[ 754, 9 ]
python
en
['en', 'en', 'en']
True
OrionDBExplorer.get_dataruns
(self, experiment=None, pipeline=None, status=None)
Query the Dataruns collection. All the details about the matching Dataruns will be returned in a ``pandas.DataFrame``. All the arguments are optional, so a call without arguments will return a table with information about all the Dataruns availabe. Args: experiment (Experiment or ObjectID or str): Experiment to which the Dataruns must belong. pipeline (Pipeline or ObjectID or str): Pipeline which the Dataruns must use. status (str): Status which the Dataruns must have. Returns: pandas.DataFrame
Query the Dataruns collection.
def get_dataruns(self, experiment=None, pipeline=None, status=None): """Query the Dataruns collection. All the details about the matching Dataruns will be returned in a ``pandas.DataFrame``. All the arguments are optional, so a call without arguments will return a table with information about all the Dataruns availabe. Args: experiment (Experiment or ObjectID or str): Experiment to which the Dataruns must belong. pipeline (Pipeline or ObjectID or str): Pipeline which the Dataruns must use. status (str): Status which the Dataruns must have. Returns: pandas.DataFrame """ return schema.Datarun.find( as_df_=True, experiment=experiment, pipeline=pipeline, status=status, exclude_=['software_versions'], )
[ "def", "get_dataruns", "(", "self", ",", "experiment", "=", "None", ",", "pipeline", "=", "None", ",", "status", "=", "None", ")", ":", "return", "schema", ".", "Datarun", ".", "find", "(", "as_df_", "=", "True", ",", "experiment", "=", "experiment", ",", "pipeline", "=", "pipeline", ",", "status", "=", "status", ",", "exclude_", "=", "[", "'software_versions'", "]", ",", ")" ]
[ 756, 4 ]
[ 782, 9 ]
python
en
['en', 'en', 'en']
True
OrionDBExplorer.get_datarun
(self, datarun=None, experiment=None, pipeline=None, status=None)
Get a Datarun object from the database. All the arguments are optional but empty queries are not allowed, so at least one argument needs to be passed with a value different than ``None``. Args: datarun (Datarun, ObjectID or str): Datarun object (or the corresponding ObjectID, or its string representation) that we want to retreive. experiment (Experiment or ObjectID or str): Experiment to which the Datarun must belong. pipeline (Pipeline or ObjectID or str): Pipeline which the Datarun must use. status (str): Status which the Datarun must have. Raises: ValueError: If the no arguments are passed with a value different than ``None`` or the query resolves to more than one object. Returns: Datarun
Get a Datarun object from the database.
def get_datarun(self, datarun=None, experiment=None, pipeline=None, status=None): """Get a Datarun object from the database. All the arguments are optional but empty queries are not allowed, so at least one argument needs to be passed with a value different than ``None``. Args: datarun (Datarun, ObjectID or str): Datarun object (or the corresponding ObjectID, or its string representation) that we want to retreive. experiment (Experiment or ObjectID or str): Experiment to which the Datarun must belong. pipeline (Pipeline or ObjectID or str): Pipeline which the Datarun must use. status (str): Status which the Datarun must have. Raises: ValueError: If the no arguments are passed with a value different than ``None`` or the query resolves to more than one object. Returns: Datarun """ return schema.Datarun.get( experiment=experiment, pipeline=pipeline, status=status, )
[ "def", "get_datarun", "(", "self", ",", "datarun", "=", "None", ",", "experiment", "=", "None", ",", "pipeline", "=", "None", ",", "status", "=", "None", ")", ":", "return", "schema", ".", "Datarun", ".", "get", "(", "experiment", "=", "experiment", ",", "pipeline", "=", "pipeline", ",", "status", "=", "status", ",", ")" ]
[ 784, 4 ]
[ 813, 9 ]
python
en
['en', 'en', 'en']
True
OrionDBExplorer.add_signalrun
(self, datarun, signal)
Add a new Signalrun object to the database. The Signalrun needs to be associated to a Datarun and a Signal. Args: datarun (Datarun or ObjectID or str): Datarun object (or the corresponding ObjectID, or its string representation) to which this Signalrun belongs. signal (Signal or ObjectID or str): Signal object (or the corresponding ObjectID, or its string representation) used by this Signalrun. Returns: Datarun
Add a new Signalrun object to the database.
def add_signalrun(self, datarun, signal): """Add a new Signalrun object to the database. The Signalrun needs to be associated to a Datarun and a Signal. Args: datarun (Datarun or ObjectID or str): Datarun object (or the corresponding ObjectID, or its string representation) to which this Signalrun belongs. signal (Signal or ObjectID or str): Signal object (or the corresponding ObjectID, or its string representation) used by this Signalrun. Returns: Datarun """ return schema.Signalrun.insert( datarun=datarun, signal=signal, )
[ "def", "add_signalrun", "(", "self", ",", "datarun", ",", "signal", ")", ":", "return", "schema", ".", "Signalrun", ".", "insert", "(", "datarun", "=", "datarun", ",", "signal", "=", "signal", ",", ")" ]
[ 819, 4 ]
[ 838, 9 ]
python
en
['en', 'en', 'en']
True
OrionDBExplorer.get_signalruns
(self, datarun=None, signal=None, status=None)
Query the Signalruns collection. All the details about the matching Signalruns will be returned in a ``pandas.DataFrame``. All the arguments are optional, so a call without arguments will return a table with information about all the Dataruns availabe. Args: datarun (Datarun or ObjectID or str): Datarun to which the Signalruns must belong. signal (Signal or ObjectID or str): Signal which the Signalruns must use. status (str): Status which the Signalruns must have. Returns: pandas.DataFrame
Query the Signalruns collection.
def get_signalruns(self, datarun=None, signal=None, status=None): """Query the Signalruns collection. All the details about the matching Signalruns will be returned in a ``pandas.DataFrame``. All the arguments are optional, so a call without arguments will return a table with information about all the Dataruns availabe. Args: datarun (Datarun or ObjectID or str): Datarun to which the Signalruns must belong. signal (Signal or ObjectID or str): Signal which the Signalruns must use. status (str): Status which the Signalruns must have. Returns: pandas.DataFrame """ return schema.Signalrun.find( as_df_=True, datarun=datarun, signal=signal, status=status, )
[ "def", "get_signalruns", "(", "self", ",", "datarun", "=", "None", ",", "signal", "=", "None", ",", "status", "=", "None", ")", ":", "return", "schema", ".", "Signalrun", ".", "find", "(", "as_df_", "=", "True", ",", "datarun", "=", "datarun", ",", "signal", "=", "signal", ",", "status", "=", "status", ",", ")" ]
[ 840, 4 ]
[ 865, 9 ]
python
en
['en', 'gl', 'en']
True
OrionDBExplorer.get_signalrun
(self, signalrun=None, datarun=None, signal=None, status=None)
Get a Signalrun object from the database. All the arguments are optional but empty queries are not allowed, so at least one argument needs to be passed with a value different than ``None``. Args: signalrun (Signalrun, ObjectID or str): Signalrun object (or the corresponding ObjectID, or its string representation) that we want to retreive. datarun (Datarun or ObjectID or str): Datarun to which the Signalrun must belong. signal (Signal or ObjectID or str): Signal which the Signalrun must use. status (str): Status which the Signalrun must have. Raises: ValueError: If the no arguments are passed with a value different than ``None`` or the query resolves to more than one object. Returns: Signalrun
Get a Signalrun object from the database.
def get_signalrun(self, signalrun=None, datarun=None, signal=None, status=None): """Get a Signalrun object from the database. All the arguments are optional but empty queries are not allowed, so at least one argument needs to be passed with a value different than ``None``. Args: signalrun (Signalrun, ObjectID or str): Signalrun object (or the corresponding ObjectID, or its string representation) that we want to retreive. datarun (Datarun or ObjectID or str): Datarun to which the Signalrun must belong. signal (Signal or ObjectID or str): Signal which the Signalrun must use. status (str): Status which the Signalrun must have. Raises: ValueError: If the no arguments are passed with a value different than ``None`` or the query resolves to more than one object. Returns: Signalrun """ return schema.Signalrun.get( signalrun=signalrun, datarun=datarun, signal=signal, status=status, )
[ "def", "get_signalrun", "(", "self", ",", "signalrun", "=", "None", ",", "datarun", "=", "None", ",", "signal", "=", "None", ",", "status", "=", "None", ")", ":", "return", "schema", ".", "Signalrun", ".", "get", "(", "signalrun", "=", "signalrun", ",", "datarun", "=", "datarun", ",", "signal", "=", "signal", ",", "status", "=", "status", ",", ")" ]
[ 867, 4 ]
[ 897, 9 ]
python
en
['en', 'en', 'en']
True
OrionDBExplorer.add_event
(self, start_time, stop_time, source, severity=None, signalrun=None, signal=None)
Add a new Event object to the database. The Event needs to have at least a start_time and a stop_time, and be associated to either a Signal or a Signalrun. If a Signalrun is given but no Signal is, the created Event will be associated to the Signalrun signal value. If both a Signalrun and a Signal are given, the Signal must be the one used by the Signalrun. Args: start_time (int): Timestamp at which the event starts. stop_time (int): Timestamp at which the event ends. source (str): Description of where this Event was created. It must be "orion", "shape matching" or "manually created". severity (float): Severity score value. Optional. signalrun (Signalrun or ObjectID or str): Signalrun object (or the corresponding ObjectID, or its string representation) to which this Event is associated. signal (Signal or ObjectID or str): Signal object (or the corresponding ObjectID, or its string representation) to which this Event is associated. Raises: ValueError: if neither a Signal or a Signalrun are given, or if the Signal is not the one used by the Signalrun. Returns: Event
Add a new Event object to the database.
def add_event(self, start_time, stop_time, source, severity=None, signalrun=None, signal=None): """Add a new Event object to the database. The Event needs to have at least a start_time and a stop_time, and be associated to either a Signal or a Signalrun. If a Signalrun is given but no Signal is, the created Event will be associated to the Signalrun signal value. If both a Signalrun and a Signal are given, the Signal must be the one used by the Signalrun. Args: start_time (int): Timestamp at which the event starts. stop_time (int): Timestamp at which the event ends. source (str): Description of where this Event was created. It must be "orion", "shape matching" or "manually created". severity (float): Severity score value. Optional. signalrun (Signalrun or ObjectID or str): Signalrun object (or the corresponding ObjectID, or its string representation) to which this Event is associated. signal (Signal or ObjectID or str): Signal object (or the corresponding ObjectID, or its string representation) to which this Event is associated. Raises: ValueError: if neither a Signal or a Signalrun are given, or if the Signal is not the one used by the Signalrun. Returns: Event """ if signal is None and signalrun is None: raise ValueError('An Event must be associated to either a Signalrun or a Signal') if signal is not None and signalrun is not None: if self.get_signal(signal) != self.get_signalrun(signalrun).signal: raise ValueError('Signal cannot be different than Signalrun.signal') return schema.Event.insert( signalrun=signalrun, signal=signal or signalrun.signal, start_time=int(start_time), stop_time=int(stop_time), severity=severity, source=source, )
[ "def", "add_event", "(", "self", ",", "start_time", ",", "stop_time", ",", "source", ",", "severity", "=", "None", ",", "signalrun", "=", "None", ",", "signal", "=", "None", ")", ":", "if", "signal", "is", "None", "and", "signalrun", "is", "None", ":", "raise", "ValueError", "(", "'An Event must be associated to either a Signalrun or a Signal'", ")", "if", "signal", "is", "not", "None", "and", "signalrun", "is", "not", "None", ":", "if", "self", ".", "get_signal", "(", "signal", ")", "!=", "self", ".", "get_signalrun", "(", "signalrun", ")", ".", "signal", ":", "raise", "ValueError", "(", "'Signal cannot be different than Signalrun.signal'", ")", "return", "schema", ".", "Event", ".", "insert", "(", "signalrun", "=", "signalrun", ",", "signal", "=", "signal", "or", "signalrun", ".", "signal", ",", "start_time", "=", "int", "(", "start_time", ")", ",", "stop_time", "=", "int", "(", "stop_time", ")", ",", "severity", "=", "severity", ",", "source", "=", "source", ",", ")" ]
[ 903, 4 ]
[ 954, 9 ]
python
en
['en', 'en', 'en']
True
OrionDBExplorer.get_events
(self, signalrun=None, signal=None, source=None)
Query the Events collection. All the details about the matching Signalruns will be returned in a ``pandas.DataFrame``. All the arguments are optional, so a call without arguments will return a table with information about all the Events availabe. Args: signalrun (Signalrun or ObjectID or str): Signalrun to which the Events must belong. signal (Signal or ObjectID or str): Signal to which the Events be associated. source (str): Source from which the Events must come. Returns: pandas.DataFrame
Query the Events collection.
def get_events(self, signalrun=None, signal=None, source=None): """Query the Events collection. All the details about the matching Signalruns will be returned in a ``pandas.DataFrame``. All the arguments are optional, so a call without arguments will return a table with information about all the Events availabe. Args: signalrun (Signalrun or ObjectID or str): Signalrun to which the Events must belong. signal (Signal or ObjectID or str): Signal to which the Events be associated. source (str): Source from which the Events must come. Returns: pandas.DataFrame """ return schema.Event.find( as_df_=True, signalrun=signalrun, signal=signal, source=source, )
[ "def", "get_events", "(", "self", ",", "signalrun", "=", "None", ",", "signal", "=", "None", ",", "source", "=", "None", ")", ":", "return", "schema", ".", "Event", ".", "find", "(", "as_df_", "=", "True", ",", "signalrun", "=", "signalrun", ",", "signal", "=", "signal", ",", "source", "=", "source", ",", ")" ]
[ 956, 4 ]
[ 981, 9 ]
python
en
['en', 'en', 'en']
True
OrionDBExplorer.get_event
(self, event=None, signalrun=None, signal=None, source=None)
Get an Event object from the database. All the arguments are optional but empty queries are not allowed, so at least one argument needs to be passed with a value different than ``None``. Args: event (Event, ObjectID or str): Event object (or the corresponding ObjectID, or its string representation) that we want to retreive. signalrun (Signalrun or ObjectID or str): Signalrun to which the Events must belong. signal (Signal or ObjectID or str): Signal to which the Events be associated. source (str): Source from which the Events must come. Raises: ValueError: If the no arguments are passed with a value different than ``None`` or the query resolves to more than one object. Returns: Event
Get an Event object from the database.
def get_event(self, event=None, signalrun=None, signal=None, source=None): """Get an Event object from the database. All the arguments are optional but empty queries are not allowed, so at least one argument needs to be passed with a value different than ``None``. Args: event (Event, ObjectID or str): Event object (or the corresponding ObjectID, or its string representation) that we want to retreive. signalrun (Signalrun or ObjectID or str): Signalrun to which the Events must belong. signal (Signal or ObjectID or str): Signal to which the Events be associated. source (str): Source from which the Events must come. Raises: ValueError: If the no arguments are passed with a value different than ``None`` or the query resolves to more than one object. Returns: Event """ return schema.Event.get( event=event, signalrun=signalrun, signal=signal, source=source, )
[ "def", "get_event", "(", "self", ",", "event", "=", "None", ",", "signalrun", "=", "None", ",", "signal", "=", "None", ",", "source", "=", "None", ")", ":", "return", "schema", ".", "Event", ".", "get", "(", "event", "=", "event", ",", "signalrun", "=", "signalrun", ",", "signal", "=", "signal", ",", "source", "=", "source", ",", ")" ]
[ 983, 4 ]
[ 1013, 9 ]
python
en
['en', 'en', 'en']
True
OrionDBExplorer.add_annotation
(self, event, tag=None, comment=None)
Add a new Annotation object to the database. The Event needs to be associated with an Event, and can be given a ``tag`` and a text comment. Args: event (Event or ObjectID or str): Event object (or the corresponding ObjectID, or its string representation) to which this Annotation is associated. tag (str): Tag of this Annotation. comment (str): Text comment of this Annotation. Returns: Annotation
Add a new Annotation object to the database.
def add_annotation(self, event, tag=None, comment=None): """Add a new Annotation object to the database. The Event needs to be associated with an Event, and can be given a ``tag`` and a text comment. Args: event (Event or ObjectID or str): Event object (or the corresponding ObjectID, or its string representation) to which this Annotation is associated. tag (str): Tag of this Annotation. comment (str): Text comment of this Annotation. Returns: Annotation """ return schema.Annotation.insert( event=event, tag=tag, comment=comment, )
[ "def", "add_annotation", "(", "self", ",", "event", ",", "tag", "=", "None", ",", "comment", "=", "None", ")", ":", "return", "schema", ".", "Annotation", ".", "insert", "(", "event", "=", "event", ",", "tag", "=", "tag", ",", "comment", "=", "comment", ",", ")" ]
[ 1019, 4 ]
[ 1041, 9 ]
python
en
['en', 'en', 'en']
True
OrionDBExplorer.get_annotations
(self, event=None, tag=None, comment=None, created_by=None)
Query the Annotations collection. All the details about the matching Annotations will be returned in a ``pandas.DataFrame``. All the arguments are optional, so a call without arguments will return a table with information about all the Annotations availabe. Args: event (Event or ObjectID or str): Event to which the Annotations must belong. tag (str): Tag which the Annotations must have. comment (str): Comment which the Annotations must have. created_by (str): Unique identifier of the user that created the Annotations. Returns: pandas.DataFrame
Query the Annotations collection.
def get_annotations(self, event=None, tag=None, comment=None, created_by=None): """Query the Annotations collection. All the details about the matching Annotations will be returned in a ``pandas.DataFrame``. All the arguments are optional, so a call without arguments will return a table with information about all the Annotations availabe. Args: event (Event or ObjectID or str): Event to which the Annotations must belong. tag (str): Tag which the Annotations must have. comment (str): Comment which the Annotations must have. created_by (str): Unique identifier of the user that created the Annotations. Returns: pandas.DataFrame """ return schema.Annotation.find( as_df_=True, event=event, tag=tag, created_by=created_by, )
[ "def", "get_annotations", "(", "self", ",", "event", "=", "None", ",", "tag", "=", "None", ",", "comment", "=", "None", ",", "created_by", "=", "None", ")", ":", "return", "schema", ".", "Annotation", ".", "find", "(", "as_df_", "=", "True", ",", "event", "=", "event", ",", "tag", "=", "tag", ",", "created_by", "=", "created_by", ",", ")" ]
[ 1043, 4 ]
[ 1070, 9 ]
python
en
['en', 'en', 'en']
True
OrionDBExplorer.get_annotation
(self, annotation=None, event=None, tag=None, created_by=None)
Get an Event object from the database. All the arguments are optional but empty queries are not allowed, so at least one argument needs to be passed with a value different than ``None``. Args: annotation (Annotation, ObjectID or str): Annotation object (or the corresponding ObjectID, or its string representation) that we want to retreive. event (Event or ObjectID or str): Event to which the Annotation must belong. tag (str): Tag which the Annotation must have. comment (str): Comment which the Annotation must have. created_by (str): Unique identifier of the user that created the Annotation. Raises: ValueError: If the no arguments are passed with a value different than ``None`` or the query resolves to more than one object. Returns: Annotation
Get an Event object from the database.
def get_annotation(self, annotation=None, event=None, tag=None, created_by=None): """Get an Event object from the database. All the arguments are optional but empty queries are not allowed, so at least one argument needs to be passed with a value different than ``None``. Args: annotation (Annotation, ObjectID or str): Annotation object (or the corresponding ObjectID, or its string representation) that we want to retreive. event (Event or ObjectID or str): Event to which the Annotation must belong. tag (str): Tag which the Annotation must have. comment (str): Comment which the Annotation must have. created_by (str): Unique identifier of the user that created the Annotation. Raises: ValueError: If the no arguments are passed with a value different than ``None`` or the query resolves to more than one object. Returns: Annotation """ return schema.Annotation.get( annotation=annotation, event=event, tag=tag, created_by=created_by, )
[ "def", "get_annotation", "(", "self", ",", "annotation", "=", "None", ",", "event", "=", "None", ",", "tag", "=", "None", ",", "created_by", "=", "None", ")", ":", "return", "schema", ".", "Annotation", ".", "get", "(", "annotation", "=", "annotation", ",", "event", "=", "event", ",", "tag", "=", "tag", ",", "created_by", "=", "created_by", ",", ")" ]
[ 1072, 4 ]
[ 1104, 9 ]
python
en
['en', 'en', 'en']
True
TransformersSummarizer.__init__
( self, model_name_or_path: str = "google/pegasus-xsum", model_version: Optional[str] = None, tokenizer: Optional[str] = None, max_length: int = 200, min_length: int = 5, use_gpu: int = 0, clean_up_tokenization_spaces: bool = True, separator_for_single_summary: str = " ", )
Load a Summarization model from Transformers. See the up-to-date list of available models on `huggingface.co/models <https://huggingface.co/models?filter=summarization>`__ :param model_name_or_path: Directory of a saved model or the name of a public model e.g. 'facebook/rag-token-nq', 'facebook/rag-sequence-nq'. See https://huggingface.co/models?filter=summarization for full list of available models. :param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash. :param tokenizer: Name of the tokenizer (usually the same as model) :param max_length: Maximum length of summarized text :param min_length: Minimum length of summarized text :param use_gpu: If < 0, then use cpu. If >= 0, this is the ordinal of the gpu to use :param clean_up_tokenization_spaces: Whether or not to clean up the potential extra spaces in the text output :param separator_for_single_summary: If `generate_single_summary=True` in `predict()`, we need to join all docs into a single text. This separator appears between those subsequent docs.
Load a Summarization model from Transformers. See the up-to-date list of available models on `huggingface.co/models <https://huggingface.co/models?filter=summarization>`__
def __init__( self, model_name_or_path: str = "google/pegasus-xsum", model_version: Optional[str] = None, tokenizer: Optional[str] = None, max_length: int = 200, min_length: int = 5, use_gpu: int = 0, clean_up_tokenization_spaces: bool = True, separator_for_single_summary: str = " ", ): """ Load a Summarization model from Transformers. See the up-to-date list of available models on `huggingface.co/models <https://huggingface.co/models?filter=summarization>`__ :param model_name_or_path: Directory of a saved model or the name of a public model e.g. 'facebook/rag-token-nq', 'facebook/rag-sequence-nq'. See https://huggingface.co/models?filter=summarization for full list of available models. :param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash. :param tokenizer: Name of the tokenizer (usually the same as model) :param max_length: Maximum length of summarized text :param min_length: Minimum length of summarized text :param use_gpu: If < 0, then use cpu. If >= 0, this is the ordinal of the gpu to use :param clean_up_tokenization_spaces: Whether or not to clean up the potential extra spaces in the text output :param separator_for_single_summary: If `generate_single_summary=True` in `predict()`, we need to join all docs into a single text. This separator appears between those subsequent docs. """ # TODO AutoModelForSeq2SeqLM is only necessary with transformers==4.1.1, with newer versions use the pipeline directly if tokenizer is None: tokenizer = model_name_or_path model = AutoModelForSeq2SeqLM.from_pretrained(pretrained_model_name_or_path=model_name_or_path, revision=model_version) self.summarizer = pipeline("summarization", model=model, tokenizer=tokenizer, device=use_gpu) self.max_length = max_length self.min_length = min_length self.clean_up_tokenization_spaces = clean_up_tokenization_spaces self.separator_for_single_summary = separator_for_single_summary
[ "def", "__init__", "(", "self", ",", "model_name_or_path", ":", "str", "=", "\"google/pegasus-xsum\"", ",", "model_version", ":", "Optional", "[", "str", "]", "=", "None", ",", "tokenizer", ":", "Optional", "[", "str", "]", "=", "None", ",", "max_length", ":", "int", "=", "200", ",", "min_length", ":", "int", "=", "5", ",", "use_gpu", ":", "int", "=", "0", ",", "clean_up_tokenization_spaces", ":", "bool", "=", "True", ",", "separator_for_single_summary", ":", "str", "=", "\" \"", ",", ")", ":", "# TODO AutoModelForSeq2SeqLM is only necessary with transformers==4.1.1, with newer versions use the pipeline directly", "if", "tokenizer", "is", "None", ":", "tokenizer", "=", "model_name_or_path", "model", "=", "AutoModelForSeq2SeqLM", ".", "from_pretrained", "(", "pretrained_model_name_or_path", "=", "model_name_or_path", ",", "revision", "=", "model_version", ")", "self", ".", "summarizer", "=", "pipeline", "(", "\"summarization\"", ",", "model", "=", "model", ",", "tokenizer", "=", "tokenizer", ",", "device", "=", "use_gpu", ")", "self", ".", "max_length", "=", "max_length", "self", ".", "min_length", "=", "min_length", "self", ".", "clean_up_tokenization_spaces", "=", "clean_up_tokenization_spaces", "self", ".", "separator_for_single_summary", "=", "separator_for_single_summary" ]
[ 49, 4 ]
[ 86, 72 ]
python
en
['en', 'error', 'th']
False
TransformersSummarizer.predict
(self, documents: List[Document], generate_single_summary: bool = False)
Produce the summarization from the supplied documents. These document can for example be retrieved via the Retriever. :param documents: Related documents (e.g. coming from a retriever) that the answer shall be conditioned on. :param generate_single_summary: Whether to generate a single summary for all documents or one summary per document. If set to "True", all docs will be joined to a single string that will then be summarized. Important: The summary will depend on the order of the supplied documents! :return: List of Documents, where Document.text contains the summarization and Document.meta["context"] the original, not summarized text
Produce the summarization from the supplied documents. These document can for example be retrieved via the Retriever.
def predict(self, documents: List[Document], generate_single_summary: bool = False) -> List[Document]: """ Produce the summarization from the supplied documents. These document can for example be retrieved via the Retriever. :param documents: Related documents (e.g. coming from a retriever) that the answer shall be conditioned on. :param generate_single_summary: Whether to generate a single summary for all documents or one summary per document. If set to "True", all docs will be joined to a single string that will then be summarized. Important: The summary will depend on the order of the supplied documents! :return: List of Documents, where Document.text contains the summarization and Document.meta["context"] the original, not summarized text """ if self.min_length > self.max_length: raise AttributeError("min_length cannot be greater than max_length") if len(documents) == 0: raise AttributeError("Summarizer needs at least one document to produce a summary.") contexts: List[str] = [doc.text for doc in documents] if generate_single_summary: # Documents order is very important to produce summary. # Different order of same documents produce different summary. contexts = [self.separator_for_single_summary.join(contexts)] summaries = self.summarizer( contexts, min_length=self.min_length, max_length=self.max_length, return_text=True, clean_up_tokenization_spaces=self.clean_up_tokenization_spaces, ) result: List[Document] = [] for context, summarized_answer in zip(contexts, summaries): cur_doc = Document(text=summarized_answer['summary_text'], meta={"context": context}) result.append(cur_doc) return result
[ "def", "predict", "(", "self", ",", "documents", ":", "List", "[", "Document", "]", ",", "generate_single_summary", ":", "bool", "=", "False", ")", "->", "List", "[", "Document", "]", ":", "if", "self", ".", "min_length", ">", "self", ".", "max_length", ":", "raise", "AttributeError", "(", "\"min_length cannot be greater than max_length\"", ")", "if", "len", "(", "documents", ")", "==", "0", ":", "raise", "AttributeError", "(", "\"Summarizer needs at least one document to produce a summary.\"", ")", "contexts", ":", "List", "[", "str", "]", "=", "[", "doc", ".", "text", "for", "doc", "in", "documents", "]", "if", "generate_single_summary", ":", "# Documents order is very important to produce summary.", "# Different order of same documents produce different summary.", "contexts", "=", "[", "self", ".", "separator_for_single_summary", ".", "join", "(", "contexts", ")", "]", "summaries", "=", "self", ".", "summarizer", "(", "contexts", ",", "min_length", "=", "self", ".", "min_length", ",", "max_length", "=", "self", ".", "max_length", ",", "return_text", "=", "True", ",", "clean_up_tokenization_spaces", "=", "self", ".", "clean_up_tokenization_spaces", ",", ")", "result", ":", "List", "[", "Document", "]", "=", "[", "]", "for", "context", ",", "summarized_answer", "in", "zip", "(", "contexts", ",", "summaries", ")", ":", "cur_doc", "=", "Document", "(", "text", "=", "summarized_answer", "[", "'summary_text'", "]", ",", "meta", "=", "{", "\"context\"", ":", "context", "}", ")", "result", ".", "append", "(", "cur_doc", ")", "return", "result" ]
[ 88, 4 ]
[ 129, 21 ]
python
en
['en', 'error', 'th']
False
fit_pipeline
(data: Union[str, pd.DataFrame], pipeline: Union[str, MLPipeline, dict] = None, hyperparameters: Union[str, pd.DataFrame] = None, save_path: str = None)
Fit an Orion pipeline to the data. The pipeine can be passed as: * An ``str`` with a path to a JSON file. * An ``str`` with the name of a registered Orion pipeline. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. If no pipeline is passed, the default Orion pipeline is used. Args: data (str or DataFrame): Data to which the pipeline should be fitted. It can be passed as a path to a CSV file or as a DataFrame. pipeline (str, Pipeline or dict): Pipeline to use. It can be passed as: * An ``str`` with a path to a JSON file. * An ``str`` with the name of a registered pipeline. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. hyperparameters (str or dict): Hyperparameters to set to the pipeline. It can be passed as a hyperparameters ``dict`` in the ``mlblocks`` format or as a path to the corresponding JSON file. Defaults to ``None``. save_path (str): Path to the file where the fitted Orion instance will be stored using ``pickle``. If not given, the Orion instance is returned. Defaults to ``None``. Returns: Orion: If no save_path is provided, the fitted Orion instance is returned.
Fit an Orion pipeline to the data.
def fit_pipeline(data: Union[str, pd.DataFrame], pipeline: Union[str, MLPipeline, dict] = None, hyperparameters: Union[str, pd.DataFrame] = None, save_path: str = None) -> Orion: """Fit an Orion pipeline to the data. The pipeine can be passed as: * An ``str`` with a path to a JSON file. * An ``str`` with the name of a registered Orion pipeline. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. If no pipeline is passed, the default Orion pipeline is used. Args: data (str or DataFrame): Data to which the pipeline should be fitted. It can be passed as a path to a CSV file or as a DataFrame. pipeline (str, Pipeline or dict): Pipeline to use. It can be passed as: * An ``str`` with a path to a JSON file. * An ``str`` with the name of a registered pipeline. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. hyperparameters (str or dict): Hyperparameters to set to the pipeline. It can be passed as a hyperparameters ``dict`` in the ``mlblocks`` format or as a path to the corresponding JSON file. Defaults to ``None``. save_path (str): Path to the file where the fitted Orion instance will be stored using ``pickle``. If not given, the Orion instance is returned. Defaults to ``None``. Returns: Orion: If no save_path is provided, the fitted Orion instance is returned. """ data = _load_data(data) hyperparameters = _load_dict(hyperparameters) if pipeline is None: pipeline = Orion.DEFAULT_PIPELINE orion = Orion(pipeline, hyperparameters) orion.fit(data) if save_path: orion.save(save_path) else: return orion
[ "def", "fit_pipeline", "(", "data", ":", "Union", "[", "str", ",", "pd", ".", "DataFrame", "]", ",", "pipeline", ":", "Union", "[", "str", ",", "MLPipeline", ",", "dict", "]", "=", "None", ",", "hyperparameters", ":", "Union", "[", "str", ",", "pd", ".", "DataFrame", "]", "=", "None", ",", "save_path", ":", "str", "=", "None", ")", "->", "Orion", ":", "data", "=", "_load_data", "(", "data", ")", "hyperparameters", "=", "_load_dict", "(", "hyperparameters", ")", "if", "pipeline", "is", "None", ":", "pipeline", "=", "Orion", ".", "DEFAULT_PIPELINE", "orion", "=", "Orion", "(", "pipeline", ",", "hyperparameters", ")", "orion", ".", "fit", "(", "data", ")", "if", "save_path", ":", "orion", ".", "save", "(", "save_path", ")", "else", ":", "return", "orion" ]
[ 58, 0 ]
[ 109, 20 ]
python
en
['en', 'en', 'en']
True
detect_anomalies
(data: Union[str, pd.DataFrame] = None, pipeline: Union[Orion, str, MLPipeline, dict] = None, hyperparameters: Union[str, pd.DataFrame] = None, train_data: Union[str, pd.DataFrame] = None)
Detect anomalies on timeseries data. The anomalies are detected using an Orion pipeline which can be passed as: * An ``Orion`` instance. * An ``str`` with the path to an Orion pickle file. * An ``str`` with a path to a JSON file. * An ``str`` with the name of a registered Orion pipeline. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. If no pipeline is passed, the default Orion pipeline is used. Optionally, separated learning data can be passed to fit the pipeline to it before using it to detect anomalies. Args: data (str or DataFrame): Data to analyze searching for anomalies. It can be passed as a path to a CSV file or as a DataFrame. pipeline (str or Pipeline or dict): Pipeline to use. It can be passed as: * An ``Orion`` instance. * An ``str`` with the path to an Orion pickle file. * An ``str`` with a path to a JSON file. * An ``str`` with the name of a registered pipeline. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. hyperparameters (str or dict): Hyperparameters to set to the pipeline. It can be passed as a hyperparameters ``dict`` in the ``mlblocks`` format or as a path to the corresponding JSON file. Ignored if being passed a previously serialized ``Orion`` instance. Defaults to ``None``. train_data (str or DataFrame): Data to which the pipeline should be fitted. It can be passed as a path to a CSV file or as a DataFrame. If not given, the pipeline is used without fitting it first. Returns: DataFrame: ``pandas.DataFrame`` containing the detected anomalies.
Detect anomalies on timeseries data.
def detect_anomalies(data: Union[str, pd.DataFrame] = None, pipeline: Union[Orion, str, MLPipeline, dict] = None, hyperparameters: Union[str, pd.DataFrame] = None, train_data: Union[str, pd.DataFrame] = None) -> pd.DataFrame: """Detect anomalies on timeseries data. The anomalies are detected using an Orion pipeline which can be passed as: * An ``Orion`` instance. * An ``str`` with the path to an Orion pickle file. * An ``str`` with a path to a JSON file. * An ``str`` with the name of a registered Orion pipeline. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. If no pipeline is passed, the default Orion pipeline is used. Optionally, separated learning data can be passed to fit the pipeline to it before using it to detect anomalies. Args: data (str or DataFrame): Data to analyze searching for anomalies. It can be passed as a path to a CSV file or as a DataFrame. pipeline (str or Pipeline or dict): Pipeline to use. It can be passed as: * An ``Orion`` instance. * An ``str`` with the path to an Orion pickle file. * An ``str`` with a path to a JSON file. * An ``str`` with the name of a registered pipeline. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. hyperparameters (str or dict): Hyperparameters to set to the pipeline. It can be passed as a hyperparameters ``dict`` in the ``mlblocks`` format or as a path to the corresponding JSON file. Ignored if being passed a previously serialized ``Orion`` instance. Defaults to ``None``. train_data (str or DataFrame): Data to which the pipeline should be fitted. It can be passed as a path to a CSV file or as a DataFrame. If not given, the pipeline is used without fitting it first. Returns: DataFrame: ``pandas.DataFrame`` containing the detected anomalies. """ data = _load_data(data) orion = _load_orion(pipeline, hyperparameters) if train_data is not None: train_data = _load_data(train_data) orion.fit(train_data) return orion.detect(data)
[ "def", "detect_anomalies", "(", "data", ":", "Union", "[", "str", ",", "pd", ".", "DataFrame", "]", "=", "None", ",", "pipeline", ":", "Union", "[", "Orion", ",", "str", ",", "MLPipeline", ",", "dict", "]", "=", "None", ",", "hyperparameters", ":", "Union", "[", "str", ",", "pd", ".", "DataFrame", "]", "=", "None", ",", "train_data", ":", "Union", "[", "str", ",", "pd", ".", "DataFrame", "]", "=", "None", ")", "->", "pd", ".", "DataFrame", ":", "data", "=", "_load_data", "(", "data", ")", "orion", "=", "_load_orion", "(", "pipeline", ",", "hyperparameters", ")", "if", "train_data", "is", "not", "None", ":", "train_data", "=", "_load_data", "(", "train_data", ")", "orion", ".", "fit", "(", "train_data", ")", "return", "orion", ".", "detect", "(", "data", ")" ]
[ 112, 0 ]
[ 166, 29 ]
python
en
['en', 'sn', 'en']
True
evaluate_pipeline
(data: Union[str, pd.DataFrame], truth: Union[str, pd.DataFrame], pipeline: Union[str, dict, MLPipeline], hyperparameters: Union[str, pd.DataFrame] = None, metrics: List[Union[callable, str]] = None, train_data: Union[str, pd.DataFrame] = None)
Evaluate the performance of a pipeline. The pipeline is evaluated by executing it on a signal for which anomalies are known and then applying one or more metrics to it to compute scores. The pipeline can be passed as: * An ``str`` with a path to a JSON file. * An ``str`` with the path to a pickle file. * An ``str`` with the name of a registered Orion pipeline. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. If the pipeline is not fitted, it is possible to pass separated learning data can be passed to fit the pipeline to it before using it to detect anomalies. Args: data (str or DataFrame): Data to analyze searching for anomalies. It can be passed as a path to a CSV file or as a DataFrame. truth (str or DataFrame): Table of known anomalies to use as the ground truth for scoring. It can be passed as a path to a CSV file or as a DataFrame. pipeline (str or Pipeline or dict): Pipeline to use. It can be passed as: * An ``str`` with a path to a JSON file. * An ``str`` with the name of a registered pipeline. * An ``str`` with the path to a pickle file. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. hyperparameters (str or dict): Hyperparameters to set to the pipeline. It can be passed as a hyperparameters ``dict`` in the ``mlblocks`` format or as a path to the corresponding JSON file. Defaults to ``None``. metrics (list[str]): List of metrics to use. If not passed, all the Orion metrics are applied. train_data (str or DataFrame): Data to which the pipeline should be fitted. It can be passed as a path to a CSV file or as a DataFrame. If not given, the pipeline is used without fitting it first.
Evaluate the performance of a pipeline.
def evaluate_pipeline(data: Union[str, pd.DataFrame], truth: Union[str, pd.DataFrame], pipeline: Union[str, dict, MLPipeline], hyperparameters: Union[str, pd.DataFrame] = None, metrics: List[Union[callable, str]] = None, train_data: Union[str, pd.DataFrame] = None) -> pd.DataFrame: """Evaluate the performance of a pipeline. The pipeline is evaluated by executing it on a signal for which anomalies are known and then applying one or more metrics to it to compute scores. The pipeline can be passed as: * An ``str`` with a path to a JSON file. * An ``str`` with the path to a pickle file. * An ``str`` with the name of a registered Orion pipeline. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. If the pipeline is not fitted, it is possible to pass separated learning data can be passed to fit the pipeline to it before using it to detect anomalies. Args: data (str or DataFrame): Data to analyze searching for anomalies. It can be passed as a path to a CSV file or as a DataFrame. truth (str or DataFrame): Table of known anomalies to use as the ground truth for scoring. It can be passed as a path to a CSV file or as a DataFrame. pipeline (str or Pipeline or dict): Pipeline to use. It can be passed as: * An ``str`` with a path to a JSON file. * An ``str`` with the name of a registered pipeline. * An ``str`` with the path to a pickle file. * An ``MLPipeline`` instance. * A ``dict`` with an ``MLPipeline`` specification. hyperparameters (str or dict): Hyperparameters to set to the pipeline. It can be passed as a hyperparameters ``dict`` in the ``mlblocks`` format or as a path to the corresponding JSON file. Defaults to ``None``. metrics (list[str]): List of metrics to use. If not passed, all the Orion metrics are applied. train_data (str or DataFrame): Data to which the pipeline should be fitted. It can be passed as a path to a CSV file or as a DataFrame. If not given, the pipeline is used without fitting it first. """ data = _load_data(data) truth = _load_data(truth) fit = train_data is not None if fit: train_data = _load_data(train_data) orion = _load_orion(pipeline, hyperparameters) return orion.detect(data, truth, fit, train_data, metrics)
[ "def", "evaluate_pipeline", "(", "data", ":", "Union", "[", "str", ",", "pd", ".", "DataFrame", "]", ",", "truth", ":", "Union", "[", "str", ",", "pd", ".", "DataFrame", "]", ",", "pipeline", ":", "Union", "[", "str", ",", "dict", ",", "MLPipeline", "]", ",", "hyperparameters", ":", "Union", "[", "str", ",", "pd", ".", "DataFrame", "]", "=", "None", ",", "metrics", ":", "List", "[", "Union", "[", "callable", ",", "str", "]", "]", "=", "None", ",", "train_data", ":", "Union", "[", "str", ",", "pd", ".", "DataFrame", "]", "=", "None", ")", "->", "pd", ".", "DataFrame", ":", "data", "=", "_load_data", "(", "data", ")", "truth", "=", "_load_data", "(", "truth", ")", "fit", "=", "train_data", "is", "not", "None", "if", "fit", ":", "train_data", "=", "_load_data", "(", "train_data", ")", "orion", "=", "_load_orion", "(", "pipeline", ",", "hyperparameters", ")", "return", "orion", ".", "detect", "(", "data", ",", "truth", ",", "fit", ",", "train_data", ",", "metrics", ")" ]
[ 169, 0 ]
[ 227, 62 ]
python
en
['en', 'en', 'en']
True
test_render_datasource_new_notebook_with_pandas_Datasource
( empty_data_context, construct_datasource_new_notebook_assets, )
What does this test and why? The DatasourceNewNotebookRenderer should generate a notebook with text based on the datasource we are trying to implement. Here we are testing pandas Datasource.
What does this test and why? The DatasourceNewNotebookRenderer should generate a notebook with text based on the datasource we are trying to implement. Here we are testing pandas Datasource.
def test_render_datasource_new_notebook_with_pandas_Datasource( empty_data_context, construct_datasource_new_notebook_assets, ): """ What does this test and why? The DatasourceNewNotebookRenderer should generate a notebook with text based on the datasource we are trying to implement. Here we are testing pandas Datasource. """ context: DataContext = empty_data_context datasource_name = "my_pandas_datasource_name" datasource_yaml = '"""test_yaml:\n indented_key: value"""' datasource_new_notebook_renderer = DatasourceNewNotebookRenderer( context=context, datasource_type=DatasourceTypes.PANDAS, datasource_yaml=datasource_yaml, datasource_name=datasource_name, ) obs: nbformat.NotebookNode = datasource_new_notebook_renderer.render() assert isinstance(obs, dict) datasource_new_notebook_assets = construct_datasource_new_notebook_assets( datasource_name=datasource_name, datasource_yaml=datasource_yaml ) expected_cells = ( datasource_new_notebook_assets["pandas_header"] + datasource_new_notebook_assets["imports"] + datasource_new_notebook_assets["customize_docs_cell"] + datasource_new_notebook_assets["datasource_name_cell"] + datasource_new_notebook_assets["files_docs_cell"] + datasource_new_notebook_assets["template_cell"] + datasource_new_notebook_assets["test_yaml_cells"] + datasource_new_notebook_assets["save_datasource_cells"] ) expected = { "nbformat": 4, "nbformat_minor": 4, "metadata": {}, "cells": expected_cells, } del expected["nbformat_minor"] del obs["nbformat_minor"] for obs_cell, expected_cell in zip(obs["cells"], expected["cells"]): obs_cell.pop("id", None) assert obs_cell == expected_cell assert obs == expected
[ "def", "test_render_datasource_new_notebook_with_pandas_Datasource", "(", "empty_data_context", ",", "construct_datasource_new_notebook_assets", ",", ")", ":", "context", ":", "DataContext", "=", "empty_data_context", "datasource_name", "=", "\"my_pandas_datasource_name\"", "datasource_yaml", "=", "'\"\"\"test_yaml:\\n indented_key: value\"\"\"'", "datasource_new_notebook_renderer", "=", "DatasourceNewNotebookRenderer", "(", "context", "=", "context", ",", "datasource_type", "=", "DatasourceTypes", ".", "PANDAS", ",", "datasource_yaml", "=", "datasource_yaml", ",", "datasource_name", "=", "datasource_name", ",", ")", "obs", ":", "nbformat", ".", "NotebookNode", "=", "datasource_new_notebook_renderer", ".", "render", "(", ")", "assert", "isinstance", "(", "obs", ",", "dict", ")", "datasource_new_notebook_assets", "=", "construct_datasource_new_notebook_assets", "(", "datasource_name", "=", "datasource_name", ",", "datasource_yaml", "=", "datasource_yaml", ")", "expected_cells", "=", "(", "datasource_new_notebook_assets", "[", "\"pandas_header\"", "]", "+", "datasource_new_notebook_assets", "[", "\"imports\"", "]", "+", "datasource_new_notebook_assets", "[", "\"customize_docs_cell\"", "]", "+", "datasource_new_notebook_assets", "[", "\"datasource_name_cell\"", "]", "+", "datasource_new_notebook_assets", "[", "\"files_docs_cell\"", "]", "+", "datasource_new_notebook_assets", "[", "\"template_cell\"", "]", "+", "datasource_new_notebook_assets", "[", "\"test_yaml_cells\"", "]", "+", "datasource_new_notebook_assets", "[", "\"save_datasource_cells\"", "]", ")", "expected", "=", "{", "\"nbformat\"", ":", "4", ",", "\"nbformat_minor\"", ":", "4", ",", "\"metadata\"", ":", "{", "}", ",", "\"cells\"", ":", "expected_cells", ",", "}", "del", "expected", "[", "\"nbformat_minor\"", "]", "del", "obs", "[", "\"nbformat_minor\"", "]", "for", "obs_cell", ",", "expected_cell", "in", "zip", "(", "obs", "[", "\"cells\"", "]", ",", "expected", "[", "\"cells\"", "]", ")", ":", "obs_cell", ".", "pop", "(", "\"id\"", ",", "None", ")", "assert", "obs_cell", "==", "expected_cell", "assert", "obs", "==", "expected" ]
[ 180, 0 ]
[ 231, 26 ]
python
en
['en', 'error', 'th']
False
test_render_datasource_new_notebook_with_spark_Datasource
( empty_data_context, construct_datasource_new_notebook_assets, )
What does this test and why? The DatasourceNewNotebookRenderer should generate a notebook with text based on the datasource we are trying to implement. Here we are testing spark Datasource.
What does this test and why? The DatasourceNewNotebookRenderer should generate a notebook with text based on the datasource we are trying to implement. Here we are testing spark Datasource.
def test_render_datasource_new_notebook_with_spark_Datasource( empty_data_context, construct_datasource_new_notebook_assets, ): """ What does this test and why? The DatasourceNewNotebookRenderer should generate a notebook with text based on the datasource we are trying to implement. Here we are testing spark Datasource. """ context: DataContext = empty_data_context datasource_name = "my_spark_datasource_name" datasource_yaml = '"""test_yaml:\n indented_key: value"""' datasource_new_notebook_renderer = DatasourceNewNotebookRenderer( context=context, datasource_type=DatasourceTypes.SPARK, datasource_yaml=datasource_yaml, datasource_name=datasource_name, ) obs: nbformat.NotebookNode = datasource_new_notebook_renderer.render() assert isinstance(obs, dict) datasource_new_notebook_assets = construct_datasource_new_notebook_assets( datasource_name=datasource_name, datasource_yaml=datasource_yaml ) expected_cells = ( datasource_new_notebook_assets["spark_header"] + datasource_new_notebook_assets["imports"] + datasource_new_notebook_assets["customize_docs_cell"] + datasource_new_notebook_assets["datasource_name_cell"] + datasource_new_notebook_assets["files_docs_cell"] + datasource_new_notebook_assets["template_cell"] + datasource_new_notebook_assets["test_yaml_cells"] + datasource_new_notebook_assets["save_datasource_cells"] ) expected = { "nbformat": 4, "nbformat_minor": 4, "metadata": {}, "cells": expected_cells, } del expected["nbformat_minor"] del obs["nbformat_minor"] for obs_cell, expected_cell in zip(obs["cells"], expected["cells"]): obs_cell.pop("id", None) assert obs_cell == expected_cell assert obs == expected
[ "def", "test_render_datasource_new_notebook_with_spark_Datasource", "(", "empty_data_context", ",", "construct_datasource_new_notebook_assets", ",", ")", ":", "context", ":", "DataContext", "=", "empty_data_context", "datasource_name", "=", "\"my_spark_datasource_name\"", "datasource_yaml", "=", "'\"\"\"test_yaml:\\n indented_key: value\"\"\"'", "datasource_new_notebook_renderer", "=", "DatasourceNewNotebookRenderer", "(", "context", "=", "context", ",", "datasource_type", "=", "DatasourceTypes", ".", "SPARK", ",", "datasource_yaml", "=", "datasource_yaml", ",", "datasource_name", "=", "datasource_name", ",", ")", "obs", ":", "nbformat", ".", "NotebookNode", "=", "datasource_new_notebook_renderer", ".", "render", "(", ")", "assert", "isinstance", "(", "obs", ",", "dict", ")", "datasource_new_notebook_assets", "=", "construct_datasource_new_notebook_assets", "(", "datasource_name", "=", "datasource_name", ",", "datasource_yaml", "=", "datasource_yaml", ")", "expected_cells", "=", "(", "datasource_new_notebook_assets", "[", "\"spark_header\"", "]", "+", "datasource_new_notebook_assets", "[", "\"imports\"", "]", "+", "datasource_new_notebook_assets", "[", "\"customize_docs_cell\"", "]", "+", "datasource_new_notebook_assets", "[", "\"datasource_name_cell\"", "]", "+", "datasource_new_notebook_assets", "[", "\"files_docs_cell\"", "]", "+", "datasource_new_notebook_assets", "[", "\"template_cell\"", "]", "+", "datasource_new_notebook_assets", "[", "\"test_yaml_cells\"", "]", "+", "datasource_new_notebook_assets", "[", "\"save_datasource_cells\"", "]", ")", "expected", "=", "{", "\"nbformat\"", ":", "4", ",", "\"nbformat_minor\"", ":", "4", ",", "\"metadata\"", ":", "{", "}", ",", "\"cells\"", ":", "expected_cells", ",", "}", "del", "expected", "[", "\"nbformat_minor\"", "]", "del", "obs", "[", "\"nbformat_minor\"", "]", "for", "obs_cell", ",", "expected_cell", "in", "zip", "(", "obs", "[", "\"cells\"", "]", ",", "expected", "[", "\"cells\"", "]", ")", ":", "obs_cell", ".", "pop", "(", "\"id\"", ",", "None", ")", "assert", "obs_cell", "==", "expected_cell", "assert", "obs", "==", "expected" ]
[ 234, 0 ]
[ 285, 26 ]
python
en
['en', 'error', 'th']
False
test_render_datasource_new_notebook_with_sql_Datasource
( empty_data_context, construct_datasource_new_notebook_assets, )
What does this test and why? The DatasourceNewNotebookRenderer should generate a notebook with text based on the datasource we are trying to implement. Here we are testing sql Datasource.
What does this test and why? The DatasourceNewNotebookRenderer should generate a notebook with text based on the datasource we are trying to implement. Here we are testing sql Datasource.
def test_render_datasource_new_notebook_with_sql_Datasource( empty_data_context, construct_datasource_new_notebook_assets, ): """ What does this test and why? The DatasourceNewNotebookRenderer should generate a notebook with text based on the datasource we are trying to implement. Here we are testing sql Datasource. """ context: DataContext = empty_data_context datasource_name = "my_sql_datasource_name" datasource_yaml = '"""test_yaml:\n indented_key: value"""' datasource_new_notebook_renderer = DatasourceNewNotebookRenderer( context=context, datasource_type=DatasourceTypes.SQL, datasource_yaml=datasource_yaml, datasource_name=datasource_name, sql_credentials_snippet='host = "localhost"', ) obs: nbformat.NotebookNode = datasource_new_notebook_renderer.render() assert isinstance(obs, dict) datasource_new_notebook_assets = construct_datasource_new_notebook_assets( datasource_name=datasource_name, datasource_yaml=datasource_yaml ) expected_cells = ( datasource_new_notebook_assets["sql_header"] + datasource_new_notebook_assets["imports"] + datasource_new_notebook_assets["customize_docs_cell"] + datasource_new_notebook_assets["datasource_name_cell"] + datasource_new_notebook_assets["sql_docs_cell"] + datasource_new_notebook_assets["sql_credentials_cell"] + datasource_new_notebook_assets["template_cell"] + datasource_new_notebook_assets["test_yaml_cells"] + datasource_new_notebook_assets["save_datasource_cells"] ) expected = { "nbformat": 4, "nbformat_minor": 4, "metadata": {}, "cells": expected_cells, } del expected["nbformat_minor"] del obs["nbformat_minor"] for obs_cell, expected_cell in zip(obs["cells"], expected["cells"]): obs_cell.pop("id", None) assert obs_cell == expected_cell assert obs == expected
[ "def", "test_render_datasource_new_notebook_with_sql_Datasource", "(", "empty_data_context", ",", "construct_datasource_new_notebook_assets", ",", ")", ":", "context", ":", "DataContext", "=", "empty_data_context", "datasource_name", "=", "\"my_sql_datasource_name\"", "datasource_yaml", "=", "'\"\"\"test_yaml:\\n indented_key: value\"\"\"'", "datasource_new_notebook_renderer", "=", "DatasourceNewNotebookRenderer", "(", "context", "=", "context", ",", "datasource_type", "=", "DatasourceTypes", ".", "SQL", ",", "datasource_yaml", "=", "datasource_yaml", ",", "datasource_name", "=", "datasource_name", ",", "sql_credentials_snippet", "=", "'host = \"localhost\"'", ",", ")", "obs", ":", "nbformat", ".", "NotebookNode", "=", "datasource_new_notebook_renderer", ".", "render", "(", ")", "assert", "isinstance", "(", "obs", ",", "dict", ")", "datasource_new_notebook_assets", "=", "construct_datasource_new_notebook_assets", "(", "datasource_name", "=", "datasource_name", ",", "datasource_yaml", "=", "datasource_yaml", ")", "expected_cells", "=", "(", "datasource_new_notebook_assets", "[", "\"sql_header\"", "]", "+", "datasource_new_notebook_assets", "[", "\"imports\"", "]", "+", "datasource_new_notebook_assets", "[", "\"customize_docs_cell\"", "]", "+", "datasource_new_notebook_assets", "[", "\"datasource_name_cell\"", "]", "+", "datasource_new_notebook_assets", "[", "\"sql_docs_cell\"", "]", "+", "datasource_new_notebook_assets", "[", "\"sql_credentials_cell\"", "]", "+", "datasource_new_notebook_assets", "[", "\"template_cell\"", "]", "+", "datasource_new_notebook_assets", "[", "\"test_yaml_cells\"", "]", "+", "datasource_new_notebook_assets", "[", "\"save_datasource_cells\"", "]", ")", "expected", "=", "{", "\"nbformat\"", ":", "4", ",", "\"nbformat_minor\"", ":", "4", ",", "\"metadata\"", ":", "{", "}", ",", "\"cells\"", ":", "expected_cells", ",", "}", "del", "expected", "[", "\"nbformat_minor\"", "]", "del", "obs", "[", "\"nbformat_minor\"", "]", "for", "obs_cell", ",", "expected_cell", "in", "zip", "(", "obs", "[", "\"cells\"", "]", ",", "expected", "[", "\"cells\"", "]", ")", ":", "obs_cell", ".", "pop", "(", "\"id\"", ",", "None", ")", "assert", "obs_cell", "==", "expected_cell", "assert", "obs", "==", "expected" ]
[ 288, 0 ]
[ 341, 26 ]
python
en
['en', 'error', 'th']
False
SmokeTestCase.test_xxx
(self)
XXX identity
XXX identity
def test_xxx(self): """XXX identity""" pass
[ "def", "test_xxx", "(", "self", ")", ":", "pass" ]
[ 3, 4 ]
[ 5, 12 ]
python
en
['en', 'pl', 'en']
False
_FindCommandInPath
(command)
If there are no slashes in the command given, this function searches the PATH env to find the given command, and converts it to an absolute path. We have to do this because MSVS is looking for an actual file to launch a debugger on, not just a command line. Note that this happens at GYP time, so anything needing to be built needs to have a full path.
If there are no slashes in the command given, this function searches the PATH env to find the given command, and converts it to an absolute path. We have to do this because MSVS is looking for an actual file to launch a debugger on, not just a command line. Note that this happens at GYP time, so anything needing to be built needs to have a full path.
def _FindCommandInPath(command): """If there are no slashes in the command given, this function searches the PATH env to find the given command, and converts it to an absolute path. We have to do this because MSVS is looking for an actual file to launch a debugger on, not just a command line. Note that this happens at GYP time, so anything needing to be built needs to have a full path.""" if '/' in command or '\\' in command: # If the command already has path elements (either relative or # absolute), then assume it is constructed properly. return command else: # Search through the path list and find an existing file that # we can access. paths = os.environ.get('PATH','').split(os.pathsep) for path in paths: item = os.path.join(path, command) if os.path.isfile(item) and os.access(item, os.X_OK): return item return command
[ "def", "_FindCommandInPath", "(", "command", ")", ":", "if", "'/'", "in", "command", "or", "'\\\\'", "in", "command", ":", "# If the command already has path elements (either relative or", "# absolute), then assume it is constructed properly.", "return", "command", "else", ":", "# Search through the path list and find an existing file that", "# we can access.", "paths", "=", "os", ".", "environ", ".", "get", "(", "'PATH'", ",", "''", ")", ".", "split", "(", "os", ".", "pathsep", ")", "for", "path", "in", "paths", ":", "item", "=", "os", ".", "path", ".", "join", "(", "path", ",", "command", ")", "if", "os", ".", "path", ".", "isfile", "(", "item", ")", "and", "os", ".", "access", "(", "item", ",", "os", ".", "X_OK", ")", ":", "return", "item", "return", "command" ]
[ 16, 0 ]
[ 35, 16 ]
python
en
['en', 'en', 'en']
True
Writer.__init__
(self, user_file_path, version, name)
Initializes the user file. Args: user_file_path: Path to the user file. version: Version info. name: Name of the user file.
Initializes the user file.
def __init__(self, user_file_path, version, name): """Initializes the user file. Args: user_file_path: Path to the user file. version: Version info. name: Name of the user file. """ self.user_file_path = user_file_path self.version = version self.name = name self.configurations = {}
[ "def", "__init__", "(", "self", ",", "user_file_path", ",", "version", ",", "name", ")", ":", "self", ".", "user_file_path", "=", "user_file_path", "self", ".", "version", "=", "version", "self", ".", "name", "=", "name", "self", ".", "configurations", "=", "{", "}" ]
[ 56, 2 ]
[ 67, 28 ]
python
en
['en', 'en', 'en']
True
Writer.AddConfig
(self, name)
Adds a configuration to the project. Args: name: Configuration name.
Adds a configuration to the project.
def AddConfig(self, name): """Adds a configuration to the project. Args: name: Configuration name. """ self.configurations[name] = ['Configuration', {'Name': name}]
[ "def", "AddConfig", "(", "self", ",", "name", ")", ":", "self", ".", "configurations", "[", "name", "]", "=", "[", "'Configuration'", ",", "{", "'Name'", ":", "name", "}", "]" ]
[ 69, 2 ]
[ 75, 65 ]
python
en
['en', 'en', 'en']
True
Writer.AddDebugSettings
(self, config_name, command, environment = {}, working_directory="")
Adds a DebugSettings node to the user file for a particular config. Args: command: command line to run. First element in the list is the executable. All elements of the command will be quoted if necessary. working_directory: other files which may trigger the rule. (optional)
Adds a DebugSettings node to the user file for a particular config.
def AddDebugSettings(self, config_name, command, environment = {}, working_directory=""): """Adds a DebugSettings node to the user file for a particular config. Args: command: command line to run. First element in the list is the executable. All elements of the command will be quoted if necessary. working_directory: other files which may trigger the rule. (optional) """ command = _QuoteWin32CommandLineArgs(command) abs_command = _FindCommandInPath(command[0]) if environment and isinstance(environment, dict): env_list = ['%s="%s"' % (key, val) for (key,val) in environment.iteritems()] environment = ' '.join(env_list) else: environment = '' n_cmd = ['DebugSettings', {'Command': abs_command, 'WorkingDirectory': working_directory, 'CommandArguments': " ".join(command[1:]), 'RemoteMachine': socket.gethostname(), 'Environment': environment, 'EnvironmentMerge': 'true', # Currently these are all "dummy" values that we're just setting # in the default manner that MSVS does it. We could use some of # these to add additional capabilities, I suppose, but they might # not have parity with other platforms then. 'Attach': 'false', 'DebuggerType': '3', # 'auto' debugger 'Remote': '1', 'RemoteCommand': '', 'HttpUrl': '', 'PDBPath': '', 'SQLDebugging': '', 'DebuggerFlavor': '0', 'MPIRunCommand': '', 'MPIRunArguments': '', 'MPIRunWorkingDirectory': '', 'ApplicationCommand': '', 'ApplicationArguments': '', 'ShimCommand': '', 'MPIAcceptMode': '', 'MPIAcceptFilter': '' }] # Find the config, and add it if it doesn't exist. if config_name not in self.configurations: self.AddConfig(config_name) # Add the DebugSettings onto the appropriate config. self.configurations[config_name].append(n_cmd)
[ "def", "AddDebugSettings", "(", "self", ",", "config_name", ",", "command", ",", "environment", "=", "{", "}", ",", "working_directory", "=", "\"\"", ")", ":", "command", "=", "_QuoteWin32CommandLineArgs", "(", "command", ")", "abs_command", "=", "_FindCommandInPath", "(", "command", "[", "0", "]", ")", "if", "environment", "and", "isinstance", "(", "environment", ",", "dict", ")", ":", "env_list", "=", "[", "'%s=\"%s\"'", "%", "(", "key", ",", "val", ")", "for", "(", "key", ",", "val", ")", "in", "environment", ".", "iteritems", "(", ")", "]", "environment", "=", "' '", ".", "join", "(", "env_list", ")", "else", ":", "environment", "=", "''", "n_cmd", "=", "[", "'DebugSettings'", ",", "{", "'Command'", ":", "abs_command", ",", "'WorkingDirectory'", ":", "working_directory", ",", "'CommandArguments'", ":", "\" \"", ".", "join", "(", "command", "[", "1", ":", "]", ")", ",", "'RemoteMachine'", ":", "socket", ".", "gethostname", "(", ")", ",", "'Environment'", ":", "environment", ",", "'EnvironmentMerge'", ":", "'true'", ",", "# Currently these are all \"dummy\" values that we're just setting", "# in the default manner that MSVS does it. We could use some of", "# these to add additional capabilities, I suppose, but they might", "# not have parity with other platforms then.", "'Attach'", ":", "'false'", ",", "'DebuggerType'", ":", "'3'", ",", "# 'auto' debugger", "'Remote'", ":", "'1'", ",", "'RemoteCommand'", ":", "''", ",", "'HttpUrl'", ":", "''", ",", "'PDBPath'", ":", "''", ",", "'SQLDebugging'", ":", "''", ",", "'DebuggerFlavor'", ":", "'0'", ",", "'MPIRunCommand'", ":", "''", ",", "'MPIRunArguments'", ":", "''", ",", "'MPIRunWorkingDirectory'", ":", "''", ",", "'ApplicationCommand'", ":", "''", ",", "'ApplicationArguments'", ":", "''", ",", "'ShimCommand'", ":", "''", ",", "'MPIAcceptMode'", ":", "''", ",", "'MPIAcceptFilter'", ":", "''", "}", "]", "# Find the config, and add it if it doesn't exist.", "if", "config_name", "not", "in", "self", ".", "configurations", ":", "self", ".", "AddConfig", "(", "config_name", ")", "# Add the DebugSettings onto the appropriate config.", "self", ".", "configurations", "[", "config_name", "]", ".", "append", "(", "n_cmd", ")" ]
[ 77, 2 ]
[ 132, 50 ]
python
en
['en', 'en', 'en']
True
Writer.WriteIfChanged
(self)
Writes the user file.
Writes the user file.
def WriteIfChanged(self): """Writes the user file.""" configs = ['Configurations'] for config, spec in sorted(self.configurations.iteritems()): configs.append(spec) content = ['VisualStudioUserFile', {'Version': self.version.ProjectVersion(), 'Name': self.name }, configs] easy_xml.WriteXmlIfChanged(content, self.user_file_path, encoding="Windows-1252")
[ "def", "WriteIfChanged", "(", "self", ")", ":", "configs", "=", "[", "'Configurations'", "]", "for", "config", ",", "spec", "in", "sorted", "(", "self", ".", "configurations", ".", "iteritems", "(", ")", ")", ":", "configs", ".", "append", "(", "spec", ")", "content", "=", "[", "'VisualStudioUserFile'", ",", "{", "'Version'", ":", "self", ".", "version", ".", "ProjectVersion", "(", ")", ",", "'Name'", ":", "self", ".", "name", "}", ",", "configs", "]", "easy_xml", ".", "WriteXmlIfChanged", "(", "content", ",", "self", ".", "user_file_path", ",", "encoding", "=", "\"Windows-1252\"", ")" ]
[ 134, 2 ]
[ 146, 55 ]
python
en
['en', 'en', 'en']
True
test_metric_store_store_backend_id
(in_memory_param_store)
What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated.
What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated.
def test_metric_store_store_backend_id(in_memory_param_store): """ What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated. """ # Check that store_backend_id exists can be read assert in_memory_param_store.store_backend_id is not None # Check that store_backend_id is a valid UUID assert test_utils.validate_uuid4(in_memory_param_store.store_backend_id)
[ "def", "test_metric_store_store_backend_id", "(", "in_memory_param_store", ")", ":", "# Check that store_backend_id exists can be read", "assert", "in_memory_param_store", ".", "store_backend_id", "is", "not", "None", "# Check that store_backend_id is a valid UUID", "assert", "test_utils", ".", "validate_uuid4", "(", "in_memory_param_store", ".", "store_backend_id", ")" ]
[ 84, 0 ]
[ 93, 76 ]
python
en
['en', 'error', 'th']
False
is_union
(declaration)
Returns True if declaration represents a C++ union Args: declaration (declaration_t): the declaration to be checked. Returns: bool: True if declaration represents a C++ union
Returns True if declaration represents a C++ union
def is_union(declaration): """ Returns True if declaration represents a C++ union Args: declaration (declaration_t): the declaration to be checked. Returns: bool: True if declaration represents a C++ union """ if not is_class(declaration): return False decl = class_traits.get_declaration(declaration) return decl.class_type == class_declaration.CLASS_TYPES.UNION
[ "def", "is_union", "(", "declaration", ")", ":", "if", "not", "is_class", "(", "declaration", ")", ":", "return", "False", "decl", "=", "class_traits", ".", "get_declaration", "(", "declaration", ")", "return", "decl", ".", "class_type", "==", "class_declaration", ".", "CLASS_TYPES", ".", "UNION" ]
[ 17, 0 ]
[ 30, 65 ]
python
en
['en', 'error', 'th']
False
is_struct
(declaration)
Returns True if declaration represents a C++ struct Args: declaration (declaration_t): the declaration to be checked. Returns: bool: True if declaration represents a C++ struct
Returns True if declaration represents a C++ struct
def is_struct(declaration): """ Returns True if declaration represents a C++ struct Args: declaration (declaration_t): the declaration to be checked. Returns: bool: True if declaration represents a C++ struct """ if not is_class(declaration): return False decl = class_traits.get_declaration(declaration) return decl.class_type == class_declaration.CLASS_TYPES.STRUCT
[ "def", "is_struct", "(", "declaration", ")", ":", "if", "not", "is_class", "(", "declaration", ")", ":", "return", "False", "decl", "=", "class_traits", ".", "get_declaration", "(", "declaration", ")", "return", "decl", ".", "class_type", "==", "class_declaration", ".", "CLASS_TYPES", ".", "STRUCT" ]
[ 33, 0 ]
[ 46, 66 ]
python
en
['en', 'error', 'th']
False
find_trivial_constructor
(type_)
Returns reference to trivial constructor. Args: type_ (declarations.class_t): the class to be searched. Returns: declarations.constructor_t: the trivial constructor
Returns reference to trivial constructor.
def find_trivial_constructor(type_): """ Returns reference to trivial constructor. Args: type_ (declarations.class_t): the class to be searched. Returns: declarations.constructor_t: the trivial constructor """ assert isinstance(type_, class_declaration.class_t) trivial = type_.constructors( lambda x: is_trivial_constructor(x), recursive=False, allow_empty=True) if trivial: return trivial[0] else: return None
[ "def", "find_trivial_constructor", "(", "type_", ")", ":", "assert", "isinstance", "(", "type_", ",", "class_declaration", ".", "class_t", ")", "trivial", "=", "type_", ".", "constructors", "(", "lambda", "x", ":", "is_trivial_constructor", "(", "x", ")", ",", "recursive", "=", "False", ",", "allow_empty", "=", "True", ")", "if", "trivial", ":", "return", "trivial", "[", "0", "]", "else", ":", "return", "None" ]
[ 110, 0 ]
[ 130, 19 ]
python
en
['en', 'error', 'th']
False
find_copy_constructor
(type_)
Returns reference to copy constructor. Args: type_ (declarations.class_t): the class to be searched. Returns: declarations.constructor_t: the copy constructor
Returns reference to copy constructor.
def find_copy_constructor(type_): """ Returns reference to copy constructor. Args: type_ (declarations.class_t): the class to be searched. Returns: declarations.constructor_t: the copy constructor """ copy_ = type_.constructors( lambda x: is_copy_constructor(x), recursive=False, allow_empty=True) if copy_: return copy_[0] else: return None
[ "def", "find_copy_constructor", "(", "type_", ")", ":", "copy_", "=", "type_", ".", "constructors", "(", "lambda", "x", ":", "is_copy_constructor", "(", "x", ")", ",", "recursive", "=", "False", ",", "allow_empty", "=", "True", ")", "if", "copy_", ":", "return", "copy_", "[", "0", "]", "else", ":", "return", "None" ]
[ 133, 0 ]
[ 151, 19 ]
python
en
['en', 'error', 'th']
False
find_noncopyable_vars
(type_, already_visited_cls_vars=None)
Returns list of all `noncopyable` variables. If an already_visited_cls_vars list is provided as argument, the returned list will not contain these variables. This list will be extended with whatever variables pointing to classes have been found. Args: type_ (declarations.class_t): the class to be searched. already_visited_cls_vars (list): optional list of vars that should not be checked a second time, to prevent infinite recursions. Returns: list: list of all `noncopyable` variables.
Returns list of all `noncopyable` variables.
def find_noncopyable_vars(type_, already_visited_cls_vars=None): """ Returns list of all `noncopyable` variables. If an already_visited_cls_vars list is provided as argument, the returned list will not contain these variables. This list will be extended with whatever variables pointing to classes have been found. Args: type_ (declarations.class_t): the class to be searched. already_visited_cls_vars (list): optional list of vars that should not be checked a second time, to prevent infinite recursions. Returns: list: list of all `noncopyable` variables. """ assert isinstance(type_, class_declaration.class_t) logger = utils.loggers.cxx_parser mvars = type_.variables( lambda v: not v.type_qualifiers.has_static, recursive=False, allow_empty=True) noncopyable_vars = [] if already_visited_cls_vars is None: already_visited_cls_vars = [] message = ( "__contains_noncopyable_mem_var - %s - TRUE - " + "contains const member variable") for mvar in mvars: type_ = type_traits.remove_reference(mvar.decl_type) if type_traits.is_const(type_): no_const = type_traits.remove_const(type_) if type_traits.is_fundamental(no_const) or is_enum(no_const): logger.debug( (message + "- fundamental or enum"), type_.decl_string) noncopyable_vars.append(mvar) if is_class(no_const): logger.debug((message + " - class"), type_.decl_string) noncopyable_vars.append(mvar) if type_traits.is_array(no_const): logger.debug((message + " - array"), type_.decl_string) noncopyable_vars.append(mvar) if class_traits.is_my_case(type_): cls = class_traits.get_declaration(type_) # Exclude classes that have already been visited. if cls in already_visited_cls_vars: continue already_visited_cls_vars.append(cls) if is_noncopyable(cls, already_visited_cls_vars): logger.debug( (message + " - class that is not copyable"), type_.decl_string) noncopyable_vars.append(mvar) logger.debug(( "__contains_noncopyable_mem_var - %s - FALSE - doesn't " + "contain noncopyable members"), type_.decl_string) return noncopyable_vars
[ "def", "find_noncopyable_vars", "(", "type_", ",", "already_visited_cls_vars", "=", "None", ")", ":", "assert", "isinstance", "(", "type_", ",", "class_declaration", ".", "class_t", ")", "logger", "=", "utils", ".", "loggers", ".", "cxx_parser", "mvars", "=", "type_", ".", "variables", "(", "lambda", "v", ":", "not", "v", ".", "type_qualifiers", ".", "has_static", ",", "recursive", "=", "False", ",", "allow_empty", "=", "True", ")", "noncopyable_vars", "=", "[", "]", "if", "already_visited_cls_vars", "is", "None", ":", "already_visited_cls_vars", "=", "[", "]", "message", "=", "(", "\"__contains_noncopyable_mem_var - %s - TRUE - \"", "+", "\"contains const member variable\"", ")", "for", "mvar", "in", "mvars", ":", "type_", "=", "type_traits", ".", "remove_reference", "(", "mvar", ".", "decl_type", ")", "if", "type_traits", ".", "is_const", "(", "type_", ")", ":", "no_const", "=", "type_traits", ".", "remove_const", "(", "type_", ")", "if", "type_traits", ".", "is_fundamental", "(", "no_const", ")", "or", "is_enum", "(", "no_const", ")", ":", "logger", ".", "debug", "(", "(", "message", "+", "\"- fundamental or enum\"", ")", ",", "type_", ".", "decl_string", ")", "noncopyable_vars", ".", "append", "(", "mvar", ")", "if", "is_class", "(", "no_const", ")", ":", "logger", ".", "debug", "(", "(", "message", "+", "\" - class\"", ")", ",", "type_", ".", "decl_string", ")", "noncopyable_vars", ".", "append", "(", "mvar", ")", "if", "type_traits", ".", "is_array", "(", "no_const", ")", ":", "logger", ".", "debug", "(", "(", "message", "+", "\" - array\"", ")", ",", "type_", ".", "decl_string", ")", "noncopyable_vars", ".", "append", "(", "mvar", ")", "if", "class_traits", ".", "is_my_case", "(", "type_", ")", ":", "cls", "=", "class_traits", ".", "get_declaration", "(", "type_", ")", "# Exclude classes that have already been visited.", "if", "cls", "in", "already_visited_cls_vars", ":", "continue", "already_visited_cls_vars", ".", "append", "(", "cls", ")", "if", "is_noncopyable", "(", "cls", ",", "already_visited_cls_vars", ")", ":", "logger", ".", "debug", "(", "(", "message", "+", "\" - class that is not copyable\"", ")", ",", "type_", ".", "decl_string", ")", "noncopyable_vars", ".", "append", "(", "mvar", ")", "logger", ".", "debug", "(", "(", "\"__contains_noncopyable_mem_var - %s - FALSE - doesn't \"", "+", "\"contain noncopyable members\"", ")", ",", "type_", ".", "decl_string", ")", "return", "noncopyable_vars" ]
[ 154, 0 ]
[ 224, 27 ]
python
en
['en', 'error', 'th']
False
has_trivial_constructor
(class_)
if class has public trivial constructor, this function will return reference to it, None otherwise
if class has public trivial constructor, this function will return reference to it, None otherwise
def has_trivial_constructor(class_): """if class has public trivial constructor, this function will return reference to it, None otherwise""" class_ = class_traits.get_declaration(class_) trivial = find_trivial_constructor(class_) if trivial and trivial.access_type == 'public': return trivial
[ "def", "has_trivial_constructor", "(", "class_", ")", ":", "class_", "=", "class_traits", ".", "get_declaration", "(", "class_", ")", "trivial", "=", "find_trivial_constructor", "(", "class_", ")", "if", "trivial", "and", "trivial", ".", "access_type", "==", "'public'", ":", "return", "trivial" ]
[ 227, 0 ]
[ 233, 22 ]
python
en
['en', 'en', 'en']
True
has_copy_constructor
(class_)
if class has public copy constructor, this function will return reference to it, None otherwise
if class has public copy constructor, this function will return reference to it, None otherwise
def has_copy_constructor(class_): """if class has public copy constructor, this function will return reference to it, None otherwise""" class_ = class_traits.get_declaration(class_) copy_constructor = find_copy_constructor(class_) if copy_constructor and copy_constructor.access_type == 'public': return copy_constructor
[ "def", "has_copy_constructor", "(", "class_", ")", ":", "class_", "=", "class_traits", ".", "get_declaration", "(", "class_", ")", "copy_constructor", "=", "find_copy_constructor", "(", "class_", ")", "if", "copy_constructor", "and", "copy_constructor", ".", "access_type", "==", "'public'", ":", "return", "copy_constructor" ]
[ 236, 0 ]
[ 242, 31 ]
python
en
['en', 'en', 'en']
True
has_destructor
(class_)
if class has destructor, this function will return reference to it, None otherwise
if class has destructor, this function will return reference to it, None otherwise
def has_destructor(class_): """if class has destructor, this function will return reference to it, None otherwise""" class_ = class_traits.get_declaration(class_) destructor = class_.decls( decl_type=calldef_members.destructor_t, recursive=False, allow_empty=True) if destructor: return destructor[0]
[ "def", "has_destructor", "(", "class_", ")", ":", "class_", "=", "class_traits", ".", "get_declaration", "(", "class_", ")", "destructor", "=", "class_", ".", "decls", "(", "decl_type", "=", "calldef_members", ".", "destructor_t", ",", "recursive", "=", "False", ",", "allow_empty", "=", "True", ")", "if", "destructor", ":", "return", "destructor", "[", "0", "]" ]
[ 245, 0 ]
[ 254, 28 ]
python
en
['en', 'en', 'en']
True
has_public_constructor
(class_)
if class has any public constructor, this function will return list of them, otherwise None
if class has any public constructor, this function will return list of them, otherwise None
def has_public_constructor(class_): """if class has any public constructor, this function will return list of them, otherwise None""" class_ = class_traits.get_declaration(class_) decls = class_.constructors( lambda c: not is_copy_constructor(c) and c.access_type == 'public', recursive=False, allow_empty=True) if decls: return decls
[ "def", "has_public_constructor", "(", "class_", ")", ":", "class_", "=", "class_traits", ".", "get_declaration", "(", "class_", ")", "decls", "=", "class_", ".", "constructors", "(", "lambda", "c", ":", "not", "is_copy_constructor", "(", "c", ")", "and", "c", ".", "access_type", "==", "'public'", ",", "recursive", "=", "False", ",", "allow_empty", "=", "True", ")", "if", "decls", ":", "return", "decls" ]
[ 257, 0 ]
[ 266, 20 ]
python
en
['en', 'en', 'en']
True
has_public_assign
(class_)
returns True, if class has public assign operator, False otherwise
returns True, if class has public assign operator, False otherwise
def has_public_assign(class_): """returns True, if class has public assign operator, False otherwise""" class_ = class_traits.get_declaration(class_) decls = class_.member_operators( lambda o: o.symbol == '=' and o.access_type == 'public', recursive=False, allow_empty=True) return bool(decls)
[ "def", "has_public_assign", "(", "class_", ")", ":", "class_", "=", "class_traits", ".", "get_declaration", "(", "class_", ")", "decls", "=", "class_", ".", "member_operators", "(", "lambda", "o", ":", "o", ".", "symbol", "==", "'='", "and", "o", ".", "access_type", "==", "'public'", ",", "recursive", "=", "False", ",", "allow_empty", "=", "True", ")", "return", "bool", "(", "decls", ")" ]
[ 269, 0 ]
[ 276, 22 ]
python
en
['en', 'en', 'en']
True
has_public_destructor
(decl_type)
returns True, if class has public destructor, False otherwise
returns True, if class has public destructor, False otherwise
def has_public_destructor(decl_type): """returns True, if class has public destructor, False otherwise""" d = has_destructor(decl_type) return d and d.access_type == 'public'
[ "def", "has_public_destructor", "(", "decl_type", ")", ":", "d", "=", "has_destructor", "(", "decl_type", ")", "return", "d", "and", "d", ".", "access_type", "==", "'public'" ]
[ 279, 0 ]
[ 282, 42 ]
python
en
['en', 'fr', 'en']
True
has_vtable
(decl_type)
True, if class has virtual table, False otherwise
True, if class has virtual table, False otherwise
def has_vtable(decl_type): """True, if class has virtual table, False otherwise""" assert isinstance(decl_type, class_declaration.class_t) return bool( decl_type.calldefs( lambda f: isinstance(f, calldef_members.member_function_t) and f.virtuality != calldef_types.VIRTUALITY_TYPES.NOT_VIRTUAL, recursive=False, allow_empty=True))
[ "def", "has_vtable", "(", "decl_type", ")", ":", "assert", "isinstance", "(", "decl_type", ",", "class_declaration", ".", "class_t", ")", "return", "bool", "(", "decl_type", ".", "calldefs", "(", "lambda", "f", ":", "isinstance", "(", "f", ",", "calldef_members", ".", "member_function_t", ")", "and", "f", ".", "virtuality", "!=", "calldef_types", ".", "VIRTUALITY_TYPES", ".", "NOT_VIRTUAL", ",", "recursive", "=", "False", ",", "allow_empty", "=", "True", ")", ")" ]
[ 285, 0 ]
[ 293, 30 ]
python
en
['en', 'en', 'en']
True
is_base_and_derived
(based, derived)
returns True, if there is "base and derived" relationship between classes, False otherwise
returns True, if there is "base and derived" relationship between classes, False otherwise
def is_base_and_derived(based, derived): """returns True, if there is "base and derived" relationship between classes, False otherwise""" assert isinstance(based, class_declaration.class_t) assert isinstance(derived, (class_declaration.class_t, tuple)) if isinstance(derived, class_declaration.class_t): all_derived = ([derived]) else: # tuple all_derived = derived for derived_cls in all_derived: for base_desc in derived_cls.recursive_bases: if base_desc.related_class == based: return True return False
[ "def", "is_base_and_derived", "(", "based", ",", "derived", ")", ":", "assert", "isinstance", "(", "based", ",", "class_declaration", ".", "class_t", ")", "assert", "isinstance", "(", "derived", ",", "(", "class_declaration", ".", "class_t", ",", "tuple", ")", ")", "if", "isinstance", "(", "derived", ",", "class_declaration", ".", "class_t", ")", ":", "all_derived", "=", "(", "[", "derived", "]", ")", "else", ":", "# tuple", "all_derived", "=", "derived", "for", "derived_cls", "in", "all_derived", ":", "for", "base_desc", "in", "derived_cls", ".", "recursive_bases", ":", "if", "base_desc", ".", "related_class", "==", "based", ":", "return", "True", "return", "False" ]
[ 296, 0 ]
[ 311, 16 ]
python
en
['en', 'en', 'en']
True
has_any_non_copyconstructor
(decl_type)
if class has any public constructor, which is not copy constructor, this function will return list of them, otherwise None
if class has any public constructor, which is not copy constructor, this function will return list of them, otherwise None
def has_any_non_copyconstructor(decl_type): """if class has any public constructor, which is not copy constructor, this function will return list of them, otherwise None""" class_ = class_traits.get_declaration(decl_type) decls = class_.constructors( lambda c: not is_copy_constructor(c) and c.access_type == 'public', recursive=False, allow_empty=True) if decls: return decls
[ "def", "has_any_non_copyconstructor", "(", "decl_type", ")", ":", "class_", "=", "class_traits", ".", "get_declaration", "(", "decl_type", ")", "decls", "=", "class_", ".", "constructors", "(", "lambda", "c", ":", "not", "is_copy_constructor", "(", "c", ")", "and", "c", ".", "access_type", "==", "'public'", ",", "recursive", "=", "False", ",", "allow_empty", "=", "True", ")", "if", "decls", ":", "return", "decls" ]
[ 314, 0 ]
[ 323, 20 ]
python
en
['en', 'en', 'en']
True
is_convertible
(source, target)
returns True, if source could be converted to target, otherwise False
returns True, if source could be converted to target, otherwise False
def is_convertible(source, target): """returns True, if source could be converted to target, otherwise False""" return __is_convertible_t(source, target).is_convertible()
[ "def", "is_convertible", "(", "source", ",", "target", ")", ":", "return", "__is_convertible_t", "(", "source", ",", "target", ")", ".", "is_convertible", "(", ")" ]
[ 654, 0 ]
[ 656, 62 ]
python
en
['en', 'en', 'en']
True
__is_noncopyable_single
(class_, already_visited_cls_vars=None)
Implementation detail. Checks if the class is non copyable, without considering the base classes. Args: class_ (declarations.class_t): the class to be checked already_visited_cls_vars (list): optional list of vars that should not be checked a second time, to prevent infinite recursions. Returns: bool: if the class is non copyable
Implementation detail.
def __is_noncopyable_single(class_, already_visited_cls_vars=None): """ Implementation detail. Checks if the class is non copyable, without considering the base classes. Args: class_ (declarations.class_t): the class to be checked already_visited_cls_vars (list): optional list of vars that should not be checked a second time, to prevent infinite recursions. Returns: bool: if the class is non copyable """ # It is not enough to check base classes, we should also to check # member variables. logger = utils.loggers.cxx_parser if has_copy_constructor(class_) \ and has_public_constructor(class_) \ and has_public_assign(class_) \ and has_public_destructor(class_): msg = os.linesep.join([ "__is_noncopyable_single - %s - COPYABLE:" % class_.decl_string, " trivial copy constructor: yes", " public constructor: yes", " public assign: yes", " public destructor: yes"]) logger.debug(msg) return False if already_visited_cls_vars is None: already_visited_cls_vars = [] if find_noncopyable_vars(class_, already_visited_cls_vars): logger.debug( ("__is_noncopyable_single(TRUE) - %s - contains noncopyable " + "members"), class_.decl_string) return True else: logger.debug(( "__is_noncopyable_single(FALSE) - %s - COPYABLE, because is " + "doesn't contains noncopyable members"), class_.decl_string) return False
[ "def", "__is_noncopyable_single", "(", "class_", ",", "already_visited_cls_vars", "=", "None", ")", ":", "# It is not enough to check base classes, we should also to check", "# member variables.", "logger", "=", "utils", ".", "loggers", ".", "cxx_parser", "if", "has_copy_constructor", "(", "class_", ")", "and", "has_public_constructor", "(", "class_", ")", "and", "has_public_assign", "(", "class_", ")", "and", "has_public_destructor", "(", "class_", ")", ":", "msg", "=", "os", ".", "linesep", ".", "join", "(", "[", "\"__is_noncopyable_single - %s - COPYABLE:\"", "%", "class_", ".", "decl_string", ",", "\" trivial copy constructor: yes\"", ",", "\" public constructor: yes\"", ",", "\" public assign: yes\"", ",", "\" public destructor: yes\"", "]", ")", "logger", ".", "debug", "(", "msg", ")", "return", "False", "if", "already_visited_cls_vars", "is", "None", ":", "already_visited_cls_vars", "=", "[", "]", "if", "find_noncopyable_vars", "(", "class_", ",", "already_visited_cls_vars", ")", ":", "logger", ".", "debug", "(", "(", "\"__is_noncopyable_single(TRUE) - %s - contains noncopyable \"", "+", "\"members\"", ")", ",", "class_", ".", "decl_string", ")", "return", "True", "else", ":", "logger", ".", "debug", "(", "(", "\"__is_noncopyable_single(FALSE) - %s - COPYABLE, because is \"", "+", "\"doesn't contains noncopyable members\"", ")", ",", "class_", ".", "decl_string", ")", "return", "False" ]
[ 659, 0 ]
[ 702, 20 ]
python
en
['en', 'error', 'th']
False
is_noncopyable
(class_, already_visited_cls_vars=None)
Checks if class is non copyable Args: class_ (declarations.class_t): the class to be checked already_visited_cls_vars (list): optional list of vars that should not be checked a second time, to prevent infinite recursions. In general you can ignore this argument, it is mainly used during recursive calls of is_noncopyable() done by pygccxml. Returns: bool: if the class is non copyable
Checks if class is non copyable
def is_noncopyable(class_, already_visited_cls_vars=None): """ Checks if class is non copyable Args: class_ (declarations.class_t): the class to be checked already_visited_cls_vars (list): optional list of vars that should not be checked a second time, to prevent infinite recursions. In general you can ignore this argument, it is mainly used during recursive calls of is_noncopyable() done by pygccxml. Returns: bool: if the class is non copyable """ logger = utils.loggers.cxx_parser class_decl = class_traits.get_declaration(class_) true_header = "is_noncopyable(TRUE) - %s - " % class_.decl_string # false_header = "is_noncopyable(false) - %s - " % class_.decl_string if is_union(class_): return False if class_decl.is_abstract: logger.debug(true_header + "abstract client") return True # if class has public, user defined copy constructor, than this class is # copyable copy_ = find_copy_constructor(class_decl) if copy_ and copy_.access_type == 'public' and not copy_.is_artificial: return False if already_visited_cls_vars is None: already_visited_cls_vars = [] for base_desc in class_decl.recursive_bases: assert isinstance(base_desc, class_declaration.hierarchy_info_t) if base_desc.related_class.decl_string in \ ('::boost::noncopyable', '::boost::noncopyable_::noncopyable'): logger.debug(true_header + "derives from boost::noncopyable") return True if not has_copy_constructor(base_desc.related_class): base_copy_ = find_copy_constructor(base_desc.related_class) if base_copy_ and base_copy_.access_type == 'private': logger.debug( true_header + "there is private copy constructor") return True elif __is_noncopyable_single( base_desc.related_class, already_visited_cls_vars): logger.debug( true_header + "__is_noncopyable_single returned True") return True if __is_noncopyable_single( base_desc.related_class, already_visited_cls_vars): logger.debug( true_header + "__is_noncopyable_single returned True") return True if not has_copy_constructor(class_decl): logger.debug(true_header + "does not have trivial copy constructor") return True elif not has_public_constructor(class_decl): logger.debug(true_header + "does not have a public constructor") return True elif has_destructor(class_decl) and not has_public_destructor(class_decl): logger.debug(true_header + "has private destructor") return True else: return __is_noncopyable_single(class_decl, already_visited_cls_vars)
[ "def", "is_noncopyable", "(", "class_", ",", "already_visited_cls_vars", "=", "None", ")", ":", "logger", "=", "utils", ".", "loggers", ".", "cxx_parser", "class_decl", "=", "class_traits", ".", "get_declaration", "(", "class_", ")", "true_header", "=", "\"is_noncopyable(TRUE) - %s - \"", "%", "class_", ".", "decl_string", "# false_header = \"is_noncopyable(false) - %s - \" % class_.decl_string", "if", "is_union", "(", "class_", ")", ":", "return", "False", "if", "class_decl", ".", "is_abstract", ":", "logger", ".", "debug", "(", "true_header", "+", "\"abstract client\"", ")", "return", "True", "# if class has public, user defined copy constructor, than this class is", "# copyable", "copy_", "=", "find_copy_constructor", "(", "class_decl", ")", "if", "copy_", "and", "copy_", ".", "access_type", "==", "'public'", "and", "not", "copy_", ".", "is_artificial", ":", "return", "False", "if", "already_visited_cls_vars", "is", "None", ":", "already_visited_cls_vars", "=", "[", "]", "for", "base_desc", "in", "class_decl", ".", "recursive_bases", ":", "assert", "isinstance", "(", "base_desc", ",", "class_declaration", ".", "hierarchy_info_t", ")", "if", "base_desc", ".", "related_class", ".", "decl_string", "in", "(", "'::boost::noncopyable'", ",", "'::boost::noncopyable_::noncopyable'", ")", ":", "logger", ".", "debug", "(", "true_header", "+", "\"derives from boost::noncopyable\"", ")", "return", "True", "if", "not", "has_copy_constructor", "(", "base_desc", ".", "related_class", ")", ":", "base_copy_", "=", "find_copy_constructor", "(", "base_desc", ".", "related_class", ")", "if", "base_copy_", "and", "base_copy_", ".", "access_type", "==", "'private'", ":", "logger", ".", "debug", "(", "true_header", "+", "\"there is private copy constructor\"", ")", "return", "True", "elif", "__is_noncopyable_single", "(", "base_desc", ".", "related_class", ",", "already_visited_cls_vars", ")", ":", "logger", ".", "debug", "(", "true_header", "+", "\"__is_noncopyable_single returned True\"", ")", "return", "True", "if", "__is_noncopyable_single", "(", "base_desc", ".", "related_class", ",", "already_visited_cls_vars", ")", ":", "logger", ".", "debug", "(", "true_header", "+", "\"__is_noncopyable_single returned True\"", ")", "return", "True", "if", "not", "has_copy_constructor", "(", "class_decl", ")", ":", "logger", ".", "debug", "(", "true_header", "+", "\"does not have trivial copy constructor\"", ")", "return", "True", "elif", "not", "has_public_constructor", "(", "class_decl", ")", ":", "logger", ".", "debug", "(", "true_header", "+", "\"does not have a public constructor\"", ")", "return", "True", "elif", "has_destructor", "(", "class_decl", ")", "and", "not", "has_public_destructor", "(", "class_decl", ")", ":", "logger", ".", "debug", "(", "true_header", "+", "\"has private destructor\"", ")", "return", "True", "else", ":", "return", "__is_noncopyable_single", "(", "class_decl", ",", "already_visited_cls_vars", ")" ]
[ 705, 0 ]
[ 783, 76 ]
python
en
['en', 'error', 'th']
False
is_unary_operator
(oper)
returns True, if operator is unary operator, otherwise False
returns True, if operator is unary operator, otherwise False
def is_unary_operator(oper): """returns True, if operator is unary operator, otherwise False""" # definition: # memeber in class # ret-type operator symbol() # ret-type operator [++ --](int) # globally # ret-type operator symbol( arg ) # ret-type operator [++ --](X&, int) symbols = ['!', '&', '~', '*', '+', '++', '-', '--'] if not isinstance(oper, calldef_members.operator_t): return False if oper.symbol not in symbols: return False if isinstance(oper, calldef_members.member_operator_t): if len(oper.arguments) == 0: return True elif oper.symbol in ['++', '--'] and \ isinstance(oper.arguments[0].decl_type, cpptypes.int_t): return True else: return False else: if len(oper.arguments) == 1: return True elif oper.symbol in ['++', '--'] \ and len(oper.arguments) == 2 \ and isinstance(oper.arguments[1].decl_type, cpptypes.int_t): # may be I need to add additional check whether first argument is # reference or not? return True else: return False
[ "def", "is_unary_operator", "(", "oper", ")", ":", "# definition:", "# memeber in class", "# ret-type operator symbol()", "# ret-type operator [++ --](int)", "# globally", "# ret-type operator symbol( arg )", "# ret-type operator [++ --](X&, int)", "symbols", "=", "[", "'!'", ",", "'&'", ",", "'~'", ",", "'*'", ",", "'+'", ",", "'++'", ",", "'-'", ",", "'--'", "]", "if", "not", "isinstance", "(", "oper", ",", "calldef_members", ".", "operator_t", ")", ":", "return", "False", "if", "oper", ".", "symbol", "not", "in", "symbols", ":", "return", "False", "if", "isinstance", "(", "oper", ",", "calldef_members", ".", "member_operator_t", ")", ":", "if", "len", "(", "oper", ".", "arguments", ")", "==", "0", ":", "return", "True", "elif", "oper", ".", "symbol", "in", "[", "'++'", ",", "'--'", "]", "and", "isinstance", "(", "oper", ".", "arguments", "[", "0", "]", ".", "decl_type", ",", "cpptypes", ".", "int_t", ")", ":", "return", "True", "else", ":", "return", "False", "else", ":", "if", "len", "(", "oper", ".", "arguments", ")", "==", "1", ":", "return", "True", "elif", "oper", ".", "symbol", "in", "[", "'++'", ",", "'--'", "]", "and", "len", "(", "oper", ".", "arguments", ")", "==", "2", "and", "isinstance", "(", "oper", ".", "arguments", "[", "1", "]", ".", "decl_type", ",", "cpptypes", ".", "int_t", ")", ":", "# may be I need to add additional check whether first argument is", "# reference or not?", "return", "True", "else", ":", "return", "False" ]
[ 786, 0 ]
[ 818, 24 ]
python
en
['en', 'en', 'en']
True
is_binary_operator
(oper)
returns True, if operator is binary operator, otherwise False
returns True, if operator is binary operator, otherwise False
def is_binary_operator(oper): """returns True, if operator is binary operator, otherwise False""" # definition: # memeber in class # ret-type operator symbol(arg) # globally # ret-type operator symbol( arg1, arg2 ) symbols = [ ',', '()', '[]', '!=', '%', '%=', '&', '&&', '&=', '*', '*=', '+', '+=', '-', '-=', '->', '->*', '/', '/=', '<', '<<', '<<=', '<=', '=', '==', '>', '>=', '>>', '>>=', '^', '^=', '|', '|=', '||'] if not isinstance(oper, calldef_members.operator_t): return False if oper.symbol not in symbols: return False if isinstance(oper, calldef_members.member_operator_t): if len(oper.arguments) == 1: return True else: return False else: if len(oper.arguments) == 2: return True else: return False
[ "def", "is_binary_operator", "(", "oper", ")", ":", "# definition:", "# memeber in class", "# ret-type operator symbol(arg)", "# globally", "# ret-type operator symbol( arg1, arg2 )", "symbols", "=", "[", "','", ",", "'()'", ",", "'[]'", ",", "'!='", ",", "'%'", ",", "'%='", ",", "'&'", ",", "'&&'", ",", "'&='", ",", "'*'", ",", "'*='", ",", "'+'", ",", "'+='", ",", "'-'", ",", "'-='", ",", "'->'", ",", "'->*'", ",", "'/'", ",", "'/='", ",", "'<'", ",", "'<<'", ",", "'<<='", ",", "'<='", ",", "'='", ",", "'=='", ",", "'>'", ",", "'>='", ",", "'>>'", ",", "'>>='", ",", "'^'", ",", "'^='", ",", "'|'", ",", "'|='", ",", "'||'", "]", "if", "not", "isinstance", "(", "oper", ",", "calldef_members", ".", "operator_t", ")", ":", "return", "False", "if", "oper", ".", "symbol", "not", "in", "symbols", ":", "return", "False", "if", "isinstance", "(", "oper", ",", "calldef_members", ".", "member_operator_t", ")", ":", "if", "len", "(", "oper", ".", "arguments", ")", "==", "1", ":", "return", "True", "else", ":", "return", "False", "else", ":", "if", "len", "(", "oper", ".", "arguments", ")", "==", "2", ":", "return", "True", "else", ":", "return", "False" ]
[ 821, 0 ]
[ 845, 24 ]
python
en
['en', 'en', 'en']
True
is_copy_constructor
(constructor)
Check if the declaration is a copy constructor, Args: constructor (declarations.constructor_t): the constructor to be checked. Returns: bool: True if this is a copy constructor, False instead.
Check if the declaration is a copy constructor,
def is_copy_constructor(constructor): """ Check if the declaration is a copy constructor, Args: constructor (declarations.constructor_t): the constructor to be checked. Returns: bool: True if this is a copy constructor, False instead. """ assert isinstance(constructor, calldef_members.constructor_t) args = constructor.arguments parent = constructor.parent # A copy constructor has only one argument if len(args) != 1: return False # We have only one argument, get it arg = args[0] if not isinstance(arg.decl_type, cpptypes.compound_t): # An argument of type declarated_t (a typedef) could be passed to # the constructor; and it could be a reference. # But in c++ you can NOT write : # "typedef class MyClass { MyClass(const MyClass & arg) {} }" # If the argument is a typedef, this is not a copy constructor. # See the hierarchy of declarated_t and coumpound_t. They both # inherit from type_t but are not related so we can discriminate # between them. return False # The argument needs to be passed by reference in a copy constructor if not type_traits.is_reference(arg.decl_type): return False # The argument needs to be const for a copy constructor if not type_traits.is_const(arg.decl_type.base): return False un_aliased = type_traits.remove_alias(arg.decl_type.base) # un_aliased now refers to const_t instance if not isinstance(un_aliased.base, cpptypes.declarated_t): # We are looking for a declaration # If "class MyClass { MyClass(const int & arg) {} }" is used, # this is not copy constructor, so we return False here. # -> un_aliased.base == cpptypes.int_t (!= cpptypes.declarated_t) return False # Final check: compare the parent (the class declaration for example) # with the declaration of the type passed as argument. return id(un_aliased.base.declaration) == id(parent)
[ "def", "is_copy_constructor", "(", "constructor", ")", ":", "assert", "isinstance", "(", "constructor", ",", "calldef_members", ".", "constructor_t", ")", "args", "=", "constructor", ".", "arguments", "parent", "=", "constructor", ".", "parent", "# A copy constructor has only one argument", "if", "len", "(", "args", ")", "!=", "1", ":", "return", "False", "# We have only one argument, get it", "arg", "=", "args", "[", "0", "]", "if", "not", "isinstance", "(", "arg", ".", "decl_type", ",", "cpptypes", ".", "compound_t", ")", ":", "# An argument of type declarated_t (a typedef) could be passed to", "# the constructor; and it could be a reference.", "# But in c++ you can NOT write :", "# \"typedef class MyClass { MyClass(const MyClass & arg) {} }\"", "# If the argument is a typedef, this is not a copy constructor.", "# See the hierarchy of declarated_t and coumpound_t. They both", "# inherit from type_t but are not related so we can discriminate", "# between them.", "return", "False", "# The argument needs to be passed by reference in a copy constructor", "if", "not", "type_traits", ".", "is_reference", "(", "arg", ".", "decl_type", ")", ":", "return", "False", "# The argument needs to be const for a copy constructor", "if", "not", "type_traits", ".", "is_const", "(", "arg", ".", "decl_type", ".", "base", ")", ":", "return", "False", "un_aliased", "=", "type_traits", ".", "remove_alias", "(", "arg", ".", "decl_type", ".", "base", ")", "# un_aliased now refers to const_t instance", "if", "not", "isinstance", "(", "un_aliased", ".", "base", ",", "cpptypes", ".", "declarated_t", ")", ":", "# We are looking for a declaration", "# If \"class MyClass { MyClass(const int & arg) {} }\" is used,", "# this is not copy constructor, so we return False here.", "# -> un_aliased.base == cpptypes.int_t (!= cpptypes.declarated_t)", "return", "False", "# Final check: compare the parent (the class declaration for example)", "# with the declaration of the type passed as argument.", "return", "id", "(", "un_aliased", ".", "base", ".", "declaration", ")", "==", "id", "(", "parent", ")" ]
[ 848, 0 ]
[ 901, 56 ]
python
en
['en', 'error', 'th']
False
is_trivial_constructor
(constructor)
Check if the declaration is a trivial constructor. Args: constructor (declarations.constructor_t): the constructor to be checked. Returns: bool: True if this is a trivial constructor, False instead.
Check if the declaration is a trivial constructor.
def is_trivial_constructor(constructor): """ Check if the declaration is a trivial constructor. Args: constructor (declarations.constructor_t): the constructor to be checked. Returns: bool: True if this is a trivial constructor, False instead. """ assert isinstance(constructor, calldef_members.constructor_t) return not bool(constructor.arguments)
[ "def", "is_trivial_constructor", "(", "constructor", ")", ":", "assert", "isinstance", "(", "constructor", ",", "calldef_members", ".", "constructor_t", ")", "return", "not", "bool", "(", "constructor", ".", "arguments", ")" ]
[ 904, 0 ]
[ 917, 42 ]
python
en
['en', 'error', 'th']
False
declaration_xxx_traits.is_my_case
(self, type_)
returns True, if type represents the desired declaration, False otherwise
returns True, if type represents the desired declaration, False otherwise
def is_my_case(self, type_): """returns True, if type represents the desired declaration, False otherwise""" return ( isinstance(self.__apply_sequence(type_), self.declaration_class) )
[ "def", "is_my_case", "(", "self", ",", "type_", ")", ":", "return", "(", "isinstance", "(", "self", ".", "__apply_sequence", "(", "type_", ")", ",", "self", ".", "declaration_class", ")", ")" ]
[ 70, 4 ]
[ 75, 9 ]
python
en
['en', 'nl', 'en']
True
declaration_xxx_traits.get_declaration
(self, type_)
returns reference to the declaration Precondition: self.is_my_case( type ) == True
returns reference to the declaration
def get_declaration(self, type_): """returns reference to the declaration Precondition: self.is_my_case( type ) == True """ return self.__apply_sequence(type_)
[ "def", "get_declaration", "(", "self", ",", "type_", ")", ":", "return", "self", ".", "__apply_sequence", "(", "type_", ")" ]
[ 77, 4 ]
[ 82, 43 ]
python
en
['en', 'en', 'en']
True
test_cli_init_on_existing_project_with_no_uncommitted_dirs_answering_yes_to_fixing_them
( mock_webbrowser, caplog, tmp_path_factory, )
This test walks through the onboarding experience. The user just checked an existing project out of source control and does not yet have an uncommitted directory.
This test walks through the onboarding experience.
def test_cli_init_on_existing_project_with_no_uncommitted_dirs_answering_yes_to_fixing_them( mock_webbrowser, caplog, tmp_path_factory, ): """ This test walks through the onboarding experience. The user just checked an existing project out of source control and does not yet have an uncommitted directory. """ root_dir = tmp_path_factory.mktemp("hiya") root_dir = str(root_dir) os.makedirs(os.path.join(root_dir, "data")) data_folder_path = os.path.join(root_dir, "data") data_path = os.path.join(root_dir, "data", "Titanic.csv") fixture_path = file_relative_path( __file__, os.path.join("..", "..", "test_sets", "Titanic.csv") ) shutil.copy(fixture_path, data_path) # Create a new project from scratch that we will use for the test in the next step runner = CliRunner(mix_stderr=False) result = runner.invoke( cli, ["init", "-d", root_dir], input="\n\n1\n1\n{}\n\n\n\n2\n{}\n\n\n\n".format(data_folder_path, data_path), catch_exceptions=False, ) stdout = result.output assert result.exit_code == 0 assert mock_webbrowser.call_count == 1 assert ( "{}/great_expectations/uncommitted/data_docs/local_site/validations/Titanic/warning/".format( root_dir ) in mock_webbrowser.call_args[0][0] ) assert "Great Expectations is now set up." in stdout context = DataContext(os.path.join(root_dir, DataContext.GE_DIR)) uncommitted_dir = os.path.join(context.root_directory, "uncommitted") shutil.rmtree(uncommitted_dir) assert not os.path.isdir(uncommitted_dir) # Test the second invocation of init runner = CliRunner(mix_stderr=False) with pytest.warns( UserWarning, match="Warning. An existing `great_expectations.yml` was found" ): result = runner.invoke( cli, ["init", "-d", root_dir], input="Y\nn\n", catch_exceptions=False ) stdout = result.stdout assert result.exit_code == 0 assert "Great Expectations added some missing files required to run." in stdout assert "You may see new files in" in stdout assert "OK. You must run" not in stdout assert "great_expectations init" not in stdout assert "to fix the missing files!" not in stdout assert "Would you like to build & view this project's Data Docs!?" in stdout assert os.path.isdir(uncommitted_dir) config_var_path = os.path.join(uncommitted_dir, "config_variables.yml") assert os.path.isfile(config_var_path) with open(config_var_path) as f: assert f.read() == CONFIG_VARIABLES_TEMPLATE assert_no_logging_messages_or_tracebacks(caplog, result)
[ "def", "test_cli_init_on_existing_project_with_no_uncommitted_dirs_answering_yes_to_fixing_them", "(", "mock_webbrowser", ",", "caplog", ",", "tmp_path_factory", ",", ")", ":", "root_dir", "=", "tmp_path_factory", ".", "mktemp", "(", "\"hiya\"", ")", "root_dir", "=", "str", "(", "root_dir", ")", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"data\"", ")", ")", "data_folder_path", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"data\"", ")", "data_path", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"data\"", ",", "\"Titanic.csv\"", ")", "fixture_path", "=", "file_relative_path", "(", "__file__", ",", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "\"..\"", ",", "\"test_sets\"", ",", "\"Titanic.csv\"", ")", ")", "shutil", ".", "copy", "(", "fixture_path", ",", "data_path", ")", "# Create a new project from scratch that we will use for the test in the next step", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"init\"", ",", "\"-d\"", ",", "root_dir", "]", ",", "input", "=", "\"\\n\\n1\\n1\\n{}\\n\\n\\n\\n2\\n{}\\n\\n\\n\\n\"", ".", "format", "(", "data_folder_path", ",", "data_path", ")", ",", "catch_exceptions", "=", "False", ",", ")", "stdout", "=", "result", ".", "output", "assert", "result", ".", "exit_code", "==", "0", "assert", "mock_webbrowser", ".", "call_count", "==", "1", "assert", "(", "\"{}/great_expectations/uncommitted/data_docs/local_site/validations/Titanic/warning/\"", ".", "format", "(", "root_dir", ")", "in", "mock_webbrowser", ".", "call_args", "[", "0", "]", "[", "0", "]", ")", "assert", "\"Great Expectations is now set up.\"", "in", "stdout", "context", "=", "DataContext", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "DataContext", ".", "GE_DIR", ")", ")", "uncommitted_dir", "=", "os", ".", "path", ".", "join", "(", "context", ".", "root_directory", ",", "\"uncommitted\"", ")", "shutil", ".", "rmtree", "(", "uncommitted_dir", ")", "assert", "not", "os", ".", "path", ".", "isdir", "(", "uncommitted_dir", ")", "# Test the second invocation of init", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "with", "pytest", ".", "warns", "(", "UserWarning", ",", "match", "=", "\"Warning. An existing `great_expectations.yml` was found\"", ")", ":", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"init\"", ",", "\"-d\"", ",", "root_dir", "]", ",", "input", "=", "\"Y\\nn\\n\"", ",", "catch_exceptions", "=", "False", ")", "stdout", "=", "result", ".", "stdout", "assert", "result", ".", "exit_code", "==", "0", "assert", "\"Great Expectations added some missing files required to run.\"", "in", "stdout", "assert", "\"You may see new files in\"", "in", "stdout", "assert", "\"OK. You must run\"", "not", "in", "stdout", "assert", "\"great_expectations init\"", "not", "in", "stdout", "assert", "\"to fix the missing files!\"", "not", "in", "stdout", "assert", "\"Would you like to build & view this project's Data Docs!?\"", "in", "stdout", "assert", "os", ".", "path", ".", "isdir", "(", "uncommitted_dir", ")", "config_var_path", "=", "os", ".", "path", ".", "join", "(", "uncommitted_dir", ",", "\"config_variables.yml\"", ")", "assert", "os", ".", "path", ".", "isfile", "(", "config_var_path", ")", "with", "open", "(", "config_var_path", ")", "as", "f", ":", "assert", "f", ".", "read", "(", ")", "==", "CONFIG_VARIABLES_TEMPLATE", "assert_no_logging_messages_or_tracebacks", "(", "caplog", ",", "result", ")" ]
[ 17, 0 ]
[ 89, 60 ]
python
en
['en', 'error', 'th']
False
test_cli_init_on_complete_existing_project_all_uncommitted_dirs_exist
( mock_webbrowser, caplog, tmp_path_factory, )
This test walks through the onboarding experience. The user just checked an existing project out of source control and does not yet have an uncommitted directory.
This test walks through the onboarding experience.
def test_cli_init_on_complete_existing_project_all_uncommitted_dirs_exist( mock_webbrowser, caplog, tmp_path_factory, ): """ This test walks through the onboarding experience. The user just checked an existing project out of source control and does not yet have an uncommitted directory. """ root_dir = tmp_path_factory.mktemp("hiya") root_dir = str(root_dir) os.makedirs(os.path.join(root_dir, "data")) data_folder_path = os.path.join(root_dir, "data") data_path = os.path.join(root_dir, "data", "Titanic.csv") fixture_path = file_relative_path( __file__, os.path.join("..", "..", "test_sets", "Titanic.csv") ) shutil.copy(fixture_path, data_path) # Create a new project from scratch that we will use for the test in the next step runner = CliRunner(mix_stderr=False) result = runner.invoke( cli, ["init", "-d", root_dir], input="\n\n1\n1\n{}\n\n\n\n2\n{}\n\n\n\n".format( data_folder_path, data_path, catch_exceptions=False ), ) assert result.exit_code == 0 assert mock_webbrowser.call_count == 1 assert ( "{}/great_expectations/uncommitted/data_docs/local_site/validations/Titanic/warning/".format( root_dir ) in mock_webbrowser.call_args[0][0] ) # Now the test begins - rerun the init on an existing project runner = CliRunner(mix_stderr=False) with pytest.warns( UserWarning, match="Warning. An existing `great_expectations.yml` was found" ): result = runner.invoke( cli, ["init", "-d", root_dir], input="n\n", catch_exceptions=False ) stdout = result.stdout assert mock_webbrowser.call_count == 1 assert result.exit_code == 0 assert "This looks like an existing project that" in stdout assert "appears complete" in stdout assert "ready to roll" in stdout assert "Would you like to build & view this project's Data Docs" in stdout assert_no_logging_messages_or_tracebacks(caplog, result)
[ "def", "test_cli_init_on_complete_existing_project_all_uncommitted_dirs_exist", "(", "mock_webbrowser", ",", "caplog", ",", "tmp_path_factory", ",", ")", ":", "root_dir", "=", "tmp_path_factory", ".", "mktemp", "(", "\"hiya\"", ")", "root_dir", "=", "str", "(", "root_dir", ")", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"data\"", ")", ")", "data_folder_path", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"data\"", ")", "data_path", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"data\"", ",", "\"Titanic.csv\"", ")", "fixture_path", "=", "file_relative_path", "(", "__file__", ",", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "\"..\"", ",", "\"test_sets\"", ",", "\"Titanic.csv\"", ")", ")", "shutil", ".", "copy", "(", "fixture_path", ",", "data_path", ")", "# Create a new project from scratch that we will use for the test in the next step", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"init\"", ",", "\"-d\"", ",", "root_dir", "]", ",", "input", "=", "\"\\n\\n1\\n1\\n{}\\n\\n\\n\\n2\\n{}\\n\\n\\n\\n\"", ".", "format", "(", "data_folder_path", ",", "data_path", ",", "catch_exceptions", "=", "False", ")", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "assert", "mock_webbrowser", ".", "call_count", "==", "1", "assert", "(", "\"{}/great_expectations/uncommitted/data_docs/local_site/validations/Titanic/warning/\"", ".", "format", "(", "root_dir", ")", "in", "mock_webbrowser", ".", "call_args", "[", "0", "]", "[", "0", "]", ")", "# Now the test begins - rerun the init on an existing project", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "with", "pytest", ".", "warns", "(", "UserWarning", ",", "match", "=", "\"Warning. An existing `great_expectations.yml` was found\"", ")", ":", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"init\"", ",", "\"-d\"", ",", "root_dir", "]", ",", "input", "=", "\"n\\n\"", ",", "catch_exceptions", "=", "False", ")", "stdout", "=", "result", ".", "stdout", "assert", "mock_webbrowser", ".", "call_count", "==", "1", "assert", "result", ".", "exit_code", "==", "0", "assert", "\"This looks like an existing project that\"", "in", "stdout", "assert", "\"appears complete\"", "in", "stdout", "assert", "\"ready to roll\"", "in", "stdout", "assert", "\"Would you like to build & view this project's Data Docs\"", "in", "stdout", "assert_no_logging_messages_or_tracebacks", "(", "caplog", ",", "result", ")" ]
[ 93, 0 ]
[ 150, 60 ]
python
en
['en', 'error', 'th']
False
test_escape_all_config_variables
(empty_data_context_with_config_variables)
Make sure that all types of input to escape_all_config_variables are escaped properly: str, dict, OrderedDict, list Make sure that changing the escape string works as expected.
Make sure that all types of input to escape_all_config_variables are escaped properly: str, dict, OrderedDict, list Make sure that changing the escape string works as expected.
def test_escape_all_config_variables(empty_data_context_with_config_variables): """ Make sure that all types of input to escape_all_config_variables are escaped properly: str, dict, OrderedDict, list Make sure that changing the escape string works as expected. """ context = empty_data_context_with_config_variables # str value_str = "pas$word1" escaped_value_str = r"pas\$word1" assert context.escape_all_config_variables(value=value_str) == escaped_value_str value_str2 = "pas$wor$d1$" escaped_value_str2 = r"pas\$wor\$d1\$" assert context.escape_all_config_variables(value=value_str2) == escaped_value_str2 # dict value_dict = { "drivername": "postgresql", "host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"), "port": "5432", "username": "postgres", "password": "pass$word1", "database": "postgres", } escaped_value_dict = { "drivername": "postgresql", "host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"), "port": "5432", "username": "postgres", "password": r"pass\$word1", "database": "postgres", } assert context.escape_all_config_variables(value=value_dict) == escaped_value_dict # OrderedDict value_ordered_dict = OrderedDict( [ ("UNCOMMITTED", "uncommitted"), ("docs_test_folder", "test$folder"), ( "test_db", { "drivername": "postgresql", "host": "some_host", "port": "5432", "username": "postgres", "password": "pa$sword1", "database": "postgres", }, ), ] ) escaped_value_ordered_dict = OrderedDict( [ ("UNCOMMITTED", "uncommitted"), ("docs_test_folder", r"test\$folder"), ( "test_db", { "drivername": "postgresql", "host": "some_host", "port": "5432", "username": "postgres", "password": r"pa\$sword1", "database": "postgres", }, ), ] ) assert ( context.escape_all_config_variables(value=value_ordered_dict) == escaped_value_ordered_dict ) # list value_list = [ "postgresql", os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"), "5432", "postgres", "pass$word1", "postgres", ] escaped_value_list = [ "postgresql", os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"), "5432", "postgres", r"pass\$word1", "postgres", ] assert context.escape_all_config_variables(value=value_list) == escaped_value_list # Custom escape string value_str_custom_escape_string = "pas$word1" escaped_value_str_custom_escape_string = "pas@*&$word1" assert ( context.escape_all_config_variables( value=value_str_custom_escape_string, dollar_sign_escape_string="@*&$" ) == escaped_value_str_custom_escape_string ) value_str_custom_escape_string2 = "pas$wor$d1$" escaped_value_str_custom_escape_string2 = "pas@*&$wor@*&$d1@*&$" assert ( context.escape_all_config_variables( value=value_str_custom_escape_string2, dollar_sign_escape_string="@*&$" ) == escaped_value_str_custom_escape_string2 )
[ "def", "test_escape_all_config_variables", "(", "empty_data_context_with_config_variables", ")", ":", "context", "=", "empty_data_context_with_config_variables", "# str", "value_str", "=", "\"pas$word1\"", "escaped_value_str", "=", "r\"pas\\$word1\"", "assert", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_str", ")", "==", "escaped_value_str", "value_str2", "=", "\"pas$wor$d1$\"", "escaped_value_str2", "=", "r\"pas\\$wor\\$d1\\$\"", "assert", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_str2", ")", "==", "escaped_value_str2", "# dict", "value_dict", "=", "{", "\"drivername\"", ":", "\"postgresql\"", ",", "\"host\"", ":", "os", ".", "getenv", "(", "\"GE_TEST_LOCAL_DB_HOSTNAME\"", ",", "\"localhost\"", ")", ",", "\"port\"", ":", "\"5432\"", ",", "\"username\"", ":", "\"postgres\"", ",", "\"password\"", ":", "\"pass$word1\"", ",", "\"database\"", ":", "\"postgres\"", ",", "}", "escaped_value_dict", "=", "{", "\"drivername\"", ":", "\"postgresql\"", ",", "\"host\"", ":", "os", ".", "getenv", "(", "\"GE_TEST_LOCAL_DB_HOSTNAME\"", ",", "\"localhost\"", ")", ",", "\"port\"", ":", "\"5432\"", ",", "\"username\"", ":", "\"postgres\"", ",", "\"password\"", ":", "r\"pass\\$word1\"", ",", "\"database\"", ":", "\"postgres\"", ",", "}", "assert", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_dict", ")", "==", "escaped_value_dict", "# OrderedDict", "value_ordered_dict", "=", "OrderedDict", "(", "[", "(", "\"UNCOMMITTED\"", ",", "\"uncommitted\"", ")", ",", "(", "\"docs_test_folder\"", ",", "\"test$folder\"", ")", ",", "(", "\"test_db\"", ",", "{", "\"drivername\"", ":", "\"postgresql\"", ",", "\"host\"", ":", "\"some_host\"", ",", "\"port\"", ":", "\"5432\"", ",", "\"username\"", ":", "\"postgres\"", ",", "\"password\"", ":", "\"pa$sword1\"", ",", "\"database\"", ":", "\"postgres\"", ",", "}", ",", ")", ",", "]", ")", "escaped_value_ordered_dict", "=", "OrderedDict", "(", "[", "(", "\"UNCOMMITTED\"", ",", "\"uncommitted\"", ")", ",", "(", "\"docs_test_folder\"", ",", "r\"test\\$folder\"", ")", ",", "(", "\"test_db\"", ",", "{", "\"drivername\"", ":", "\"postgresql\"", ",", "\"host\"", ":", "\"some_host\"", ",", "\"port\"", ":", "\"5432\"", ",", "\"username\"", ":", "\"postgres\"", ",", "\"password\"", ":", "r\"pa\\$sword1\"", ",", "\"database\"", ":", "\"postgres\"", ",", "}", ",", ")", ",", "]", ")", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_ordered_dict", ")", "==", "escaped_value_ordered_dict", ")", "# list", "value_list", "=", "[", "\"postgresql\"", ",", "os", ".", "getenv", "(", "\"GE_TEST_LOCAL_DB_HOSTNAME\"", ",", "\"localhost\"", ")", ",", "\"5432\"", ",", "\"postgres\"", ",", "\"pass$word1\"", ",", "\"postgres\"", ",", "]", "escaped_value_list", "=", "[", "\"postgresql\"", ",", "os", ".", "getenv", "(", "\"GE_TEST_LOCAL_DB_HOSTNAME\"", ",", "\"localhost\"", ")", ",", "\"5432\"", ",", "\"postgres\"", ",", "r\"pass\\$word1\"", ",", "\"postgres\"", ",", "]", "assert", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_list", ")", "==", "escaped_value_list", "# Custom escape string", "value_str_custom_escape_string", "=", "\"pas$word1\"", "escaped_value_str_custom_escape_string", "=", "\"pas@*&$word1\"", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_str_custom_escape_string", ",", "dollar_sign_escape_string", "=", "\"@*&$\"", ")", "==", "escaped_value_str_custom_escape_string", ")", "value_str_custom_escape_string2", "=", "\"pas$wor$d1$\"", "escaped_value_str_custom_escape_string2", "=", "\"pas@*&$wor@*&$d1@*&$\"", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_str_custom_escape_string2", ",", "dollar_sign_escape_string", "=", "\"@*&$\"", ")", "==", "escaped_value_str_custom_escape_string2", ")" ]
[ 382, 0 ]
[ 493, 5 ]
python
en
['en', 'error', 'th']
False
test_escape_all_config_variables_skip_substitution_vars
( empty_data_context_with_config_variables, )
What does this test and why? escape_all_config_variables(skip_if_substitution_variable=True/False) should function as documented.
What does this test and why? escape_all_config_variables(skip_if_substitution_variable=True/False) should function as documented.
def test_escape_all_config_variables_skip_substitution_vars( empty_data_context_with_config_variables, ): """ What does this test and why? escape_all_config_variables(skip_if_substitution_variable=True/False) should function as documented. """ context = empty_data_context_with_config_variables # str value_str = "$VALUE_STR" escaped_value_str = r"\$VALUE_STR" assert ( context.escape_all_config_variables( value=value_str, skip_if_substitution_variable=True ) == value_str ) assert ( context.escape_all_config_variables( value=value_str, skip_if_substitution_variable=False ) == escaped_value_str ) value_str2 = "VALUE_$TR" escaped_value_str2 = r"VALUE_\$TR" assert ( context.escape_all_config_variables( value=value_str2, skip_if_substitution_variable=True ) == escaped_value_str2 ) assert ( context.escape_all_config_variables( value=value_str2, skip_if_substitution_variable=False ) == escaped_value_str2 ) multi_value_str = "${USER}:pas$word@${HOST}:${PORT}/${DATABASE}" escaped_multi_value_str = r"\${USER}:pas\$word@\${HOST}:\${PORT}/\${DATABASE}" assert ( context.escape_all_config_variables( value=multi_value_str, skip_if_substitution_variable=True ) == multi_value_str ) assert ( context.escape_all_config_variables( value=multi_value_str, skip_if_substitution_variable=False ) == escaped_multi_value_str ) multi_value_str2 = "$USER:pas$word@$HOST:${PORT}/${DATABASE}" escaped_multi_value_str2 = r"\$USER:pas\$word@\$HOST:\${PORT}/\${DATABASE}" assert ( context.escape_all_config_variables( value=multi_value_str2, skip_if_substitution_variable=True ) == multi_value_str2 ) assert ( context.escape_all_config_variables( value=multi_value_str2, skip_if_substitution_variable=False ) == escaped_multi_value_str2 ) multi_value_str3 = "USER:pas$word@$HOST:${PORT}/${DATABASE}" escaped_multi_value_str3 = r"USER:pas\$word@\$HOST:\${PORT}/\${DATABASE}" assert ( context.escape_all_config_variables( value=multi_value_str3, skip_if_substitution_variable=True ) == escaped_multi_value_str3 ) assert ( context.escape_all_config_variables( value=multi_value_str3, skip_if_substitution_variable=False ) == escaped_multi_value_str3 ) # dict value_dict = { "drivername": "postgresql", "host": "${HOST}", "port": "5432", "username": "postgres", "password": "pass$word1", "database": "$postgres", "sub_dict": { "test_val_no_escaping": "test_val", "test_val_escaping": "te$t_val", "test_val_substitution": "$test_val", "test_val_substitution_braces": "${test_val}", }, } escaped_value_dict = { "drivername": "postgresql", "host": r"\${HOST}", "port": "5432", "username": "postgres", "password": r"pass\$word1", "database": r"\$postgres", "sub_dict": { "test_val_no_escaping": "test_val", "test_val_escaping": r"te\$t_val", "test_val_substitution": r"\$test_val", "test_val_substitution_braces": r"\${test_val}", }, } escaped_value_dict_skip_substitution_variables = { "drivername": "postgresql", "host": "${HOST}", "port": "5432", "username": "postgres", "password": r"pass\$word1", "database": "$postgres", "sub_dict": { "test_val_no_escaping": "test_val", "test_val_escaping": r"te\$t_val", "test_val_substitution": "$test_val", "test_val_substitution_braces": "${test_val}", }, } assert ( context.escape_all_config_variables( value=value_dict, skip_if_substitution_variable=False ) == escaped_value_dict ) assert ( context.escape_all_config_variables( value=value_dict, skip_if_substitution_variable=True ) == escaped_value_dict_skip_substitution_variables ) # OrderedDict value_ordered_dict = OrderedDict( [ ("UNCOMMITTED", "uncommitted"), ("docs_test_folder", "test$folder"), ( "test_db", { "drivername": "$postgresql", "host": "some_host", "port": "5432", "username": "${USERNAME}", "password": "pa$sword1", "database": "postgres", }, ), ] ) escaped_value_ordered_dict = OrderedDict( [ ("UNCOMMITTED", "uncommitted"), ("docs_test_folder", r"test\$folder"), ( "test_db", { "drivername": r"\$postgresql", "host": "some_host", "port": "5432", "username": r"\${USERNAME}", "password": r"pa\$sword1", "database": "postgres", }, ), ] ) escaped_value_ordered_dict_skip_substitution_variables = OrderedDict( [ ("UNCOMMITTED", "uncommitted"), ("docs_test_folder", r"test\$folder"), ( "test_db", { "drivername": "$postgresql", "host": "some_host", "port": "5432", "username": "${USERNAME}", "password": r"pa\$sword1", "database": "postgres", }, ), ] ) assert ( context.escape_all_config_variables( value=value_ordered_dict, skip_if_substitution_variable=False ) == escaped_value_ordered_dict ) assert ( context.escape_all_config_variables( value=value_ordered_dict, skip_if_substitution_variable=True ) == escaped_value_ordered_dict_skip_substitution_variables ) # list value_list = [ "postgresql", os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"), "5432", "$postgres", "pass$word1", "${POSTGRES}", ] escaped_value_list = [ "postgresql", os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"), "5432", r"\$postgres", r"pass\$word1", r"\${POSTGRES}", ] escaped_value_list_skip_substitution_variables = [ "postgresql", os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"), "5432", "$postgres", r"pass\$word1", "${POSTGRES}", ] assert ( context.escape_all_config_variables( value=value_list, skip_if_substitution_variable=False ) == escaped_value_list ) assert ( context.escape_all_config_variables( value=value_list, skip_if_substitution_variable=True ) == escaped_value_list_skip_substitution_variables )
[ "def", "test_escape_all_config_variables_skip_substitution_vars", "(", "empty_data_context_with_config_variables", ",", ")", ":", "context", "=", "empty_data_context_with_config_variables", "# str", "value_str", "=", "\"$VALUE_STR\"", "escaped_value_str", "=", "r\"\\$VALUE_STR\"", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_str", ",", "skip_if_substitution_variable", "=", "True", ")", "==", "value_str", ")", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_str", ",", "skip_if_substitution_variable", "=", "False", ")", "==", "escaped_value_str", ")", "value_str2", "=", "\"VALUE_$TR\"", "escaped_value_str2", "=", "r\"VALUE_\\$TR\"", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_str2", ",", "skip_if_substitution_variable", "=", "True", ")", "==", "escaped_value_str2", ")", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_str2", ",", "skip_if_substitution_variable", "=", "False", ")", "==", "escaped_value_str2", ")", "multi_value_str", "=", "\"${USER}:pas$word@${HOST}:${PORT}/${DATABASE}\"", "escaped_multi_value_str", "=", "r\"\\${USER}:pas\\$word@\\${HOST}:\\${PORT}/\\${DATABASE}\"", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "multi_value_str", ",", "skip_if_substitution_variable", "=", "True", ")", "==", "multi_value_str", ")", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "multi_value_str", ",", "skip_if_substitution_variable", "=", "False", ")", "==", "escaped_multi_value_str", ")", "multi_value_str2", "=", "\"$USER:pas$word@$HOST:${PORT}/${DATABASE}\"", "escaped_multi_value_str2", "=", "r\"\\$USER:pas\\$word@\\$HOST:\\${PORT}/\\${DATABASE}\"", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "multi_value_str2", ",", "skip_if_substitution_variable", "=", "True", ")", "==", "multi_value_str2", ")", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "multi_value_str2", ",", "skip_if_substitution_variable", "=", "False", ")", "==", "escaped_multi_value_str2", ")", "multi_value_str3", "=", "\"USER:pas$word@$HOST:${PORT}/${DATABASE}\"", "escaped_multi_value_str3", "=", "r\"USER:pas\\$word@\\$HOST:\\${PORT}/\\${DATABASE}\"", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "multi_value_str3", ",", "skip_if_substitution_variable", "=", "True", ")", "==", "escaped_multi_value_str3", ")", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "multi_value_str3", ",", "skip_if_substitution_variable", "=", "False", ")", "==", "escaped_multi_value_str3", ")", "# dict", "value_dict", "=", "{", "\"drivername\"", ":", "\"postgresql\"", ",", "\"host\"", ":", "\"${HOST}\"", ",", "\"port\"", ":", "\"5432\"", ",", "\"username\"", ":", "\"postgres\"", ",", "\"password\"", ":", "\"pass$word1\"", ",", "\"database\"", ":", "\"$postgres\"", ",", "\"sub_dict\"", ":", "{", "\"test_val_no_escaping\"", ":", "\"test_val\"", ",", "\"test_val_escaping\"", ":", "\"te$t_val\"", ",", "\"test_val_substitution\"", ":", "\"$test_val\"", ",", "\"test_val_substitution_braces\"", ":", "\"${test_val}\"", ",", "}", ",", "}", "escaped_value_dict", "=", "{", "\"drivername\"", ":", "\"postgresql\"", ",", "\"host\"", ":", "r\"\\${HOST}\"", ",", "\"port\"", ":", "\"5432\"", ",", "\"username\"", ":", "\"postgres\"", ",", "\"password\"", ":", "r\"pass\\$word1\"", ",", "\"database\"", ":", "r\"\\$postgres\"", ",", "\"sub_dict\"", ":", "{", "\"test_val_no_escaping\"", ":", "\"test_val\"", ",", "\"test_val_escaping\"", ":", "r\"te\\$t_val\"", ",", "\"test_val_substitution\"", ":", "r\"\\$test_val\"", ",", "\"test_val_substitution_braces\"", ":", "r\"\\${test_val}\"", ",", "}", ",", "}", "escaped_value_dict_skip_substitution_variables", "=", "{", "\"drivername\"", ":", "\"postgresql\"", ",", "\"host\"", ":", "\"${HOST}\"", ",", "\"port\"", ":", "\"5432\"", ",", "\"username\"", ":", "\"postgres\"", ",", "\"password\"", ":", "r\"pass\\$word1\"", ",", "\"database\"", ":", "\"$postgres\"", ",", "\"sub_dict\"", ":", "{", "\"test_val_no_escaping\"", ":", "\"test_val\"", ",", "\"test_val_escaping\"", ":", "r\"te\\$t_val\"", ",", "\"test_val_substitution\"", ":", "\"$test_val\"", ",", "\"test_val_substitution_braces\"", ":", "\"${test_val}\"", ",", "}", ",", "}", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_dict", ",", "skip_if_substitution_variable", "=", "False", ")", "==", "escaped_value_dict", ")", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_dict", ",", "skip_if_substitution_variable", "=", "True", ")", "==", "escaped_value_dict_skip_substitution_variables", ")", "# OrderedDict", "value_ordered_dict", "=", "OrderedDict", "(", "[", "(", "\"UNCOMMITTED\"", ",", "\"uncommitted\"", ")", ",", "(", "\"docs_test_folder\"", ",", "\"test$folder\"", ")", ",", "(", "\"test_db\"", ",", "{", "\"drivername\"", ":", "\"$postgresql\"", ",", "\"host\"", ":", "\"some_host\"", ",", "\"port\"", ":", "\"5432\"", ",", "\"username\"", ":", "\"${USERNAME}\"", ",", "\"password\"", ":", "\"pa$sword1\"", ",", "\"database\"", ":", "\"postgres\"", ",", "}", ",", ")", ",", "]", ")", "escaped_value_ordered_dict", "=", "OrderedDict", "(", "[", "(", "\"UNCOMMITTED\"", ",", "\"uncommitted\"", ")", ",", "(", "\"docs_test_folder\"", ",", "r\"test\\$folder\"", ")", ",", "(", "\"test_db\"", ",", "{", "\"drivername\"", ":", "r\"\\$postgresql\"", ",", "\"host\"", ":", "\"some_host\"", ",", "\"port\"", ":", "\"5432\"", ",", "\"username\"", ":", "r\"\\${USERNAME}\"", ",", "\"password\"", ":", "r\"pa\\$sword1\"", ",", "\"database\"", ":", "\"postgres\"", ",", "}", ",", ")", ",", "]", ")", "escaped_value_ordered_dict_skip_substitution_variables", "=", "OrderedDict", "(", "[", "(", "\"UNCOMMITTED\"", ",", "\"uncommitted\"", ")", ",", "(", "\"docs_test_folder\"", ",", "r\"test\\$folder\"", ")", ",", "(", "\"test_db\"", ",", "{", "\"drivername\"", ":", "\"$postgresql\"", ",", "\"host\"", ":", "\"some_host\"", ",", "\"port\"", ":", "\"5432\"", ",", "\"username\"", ":", "\"${USERNAME}\"", ",", "\"password\"", ":", "r\"pa\\$sword1\"", ",", "\"database\"", ":", "\"postgres\"", ",", "}", ",", ")", ",", "]", ")", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_ordered_dict", ",", "skip_if_substitution_variable", "=", "False", ")", "==", "escaped_value_ordered_dict", ")", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_ordered_dict", ",", "skip_if_substitution_variable", "=", "True", ")", "==", "escaped_value_ordered_dict_skip_substitution_variables", ")", "# list", "value_list", "=", "[", "\"postgresql\"", ",", "os", ".", "getenv", "(", "\"GE_TEST_LOCAL_DB_HOSTNAME\"", ",", "\"localhost\"", ")", ",", "\"5432\"", ",", "\"$postgres\"", ",", "\"pass$word1\"", ",", "\"${POSTGRES}\"", ",", "]", "escaped_value_list", "=", "[", "\"postgresql\"", ",", "os", ".", "getenv", "(", "\"GE_TEST_LOCAL_DB_HOSTNAME\"", ",", "\"localhost\"", ")", ",", "\"5432\"", ",", "r\"\\$postgres\"", ",", "r\"pass\\$word1\"", ",", "r\"\\${POSTGRES}\"", ",", "]", "escaped_value_list_skip_substitution_variables", "=", "[", "\"postgresql\"", ",", "os", ".", "getenv", "(", "\"GE_TEST_LOCAL_DB_HOSTNAME\"", ",", "\"localhost\"", ")", ",", "\"5432\"", ",", "\"$postgres\"", ",", "r\"pass\\$word1\"", ",", "\"${POSTGRES}\"", ",", "]", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_list", ",", "skip_if_substitution_variable", "=", "False", ")", "==", "escaped_value_list", ")", "assert", "(", "context", ".", "escape_all_config_variables", "(", "value", "=", "value_list", ",", "skip_if_substitution_variable", "=", "True", ")", "==", "escaped_value_list_skip_substitution_variables", ")" ]
[ 496, 0 ]
[ 738, 5 ]
python
en
['en', 'error', 'th']
False
test_create_data_context_and_config_vars_in_code
(tmp_path_factory, monkeypatch)
What does this test and why? Creating a DataContext via .create(), then using .save_config_variable() to save a variable that will eventually be substituted (e.g. ${SOME_VAR}) should result in the proper escaping of $. This is in response to issue #2196
What does this test and why? Creating a DataContext via .create(), then using .save_config_variable() to save a variable that will eventually be substituted (e.g. ${SOME_VAR}) should result in the proper escaping of $. This is in response to issue #2196
def test_create_data_context_and_config_vars_in_code(tmp_path_factory, monkeypatch): """ What does this test and why? Creating a DataContext via .create(), then using .save_config_variable() to save a variable that will eventually be substituted (e.g. ${SOME_VAR}) should result in the proper escaping of $. This is in response to issue #2196 """ project_path = str(tmp_path_factory.mktemp("data_context")) context = ge.DataContext.create( project_root_dir=project_path, usage_statistics_enabled=False, ) CONFIG_VARS = { "DB_HOST": "${DB_HOST_FROM_ENV_VAR}", "DB_NAME": "DB_NAME", "DB_USER": "DB_USER", "DB_PWD": "pas$word", } for k, v in CONFIG_VARS.items(): context.save_config_variable(k, v) config_vars_file_contents = context._load_config_variables_file() # Add escaping for DB_PWD since it is not of the form ${SOMEVAR} or $SOMEVAR CONFIG_VARS_WITH_ESCAPING = CONFIG_VARS.copy() CONFIG_VARS_WITH_ESCAPING["DB_PWD"] = r"pas\$word" # Ensure all config vars saved are in the config_variables.yml file # and that escaping was added for "pas$word" -> "pas\$word" assert all( item in config_vars_file_contents.items() for item in CONFIG_VARS_WITH_ESCAPING.items() ) assert not all( item in config_vars_file_contents.items() for item in CONFIG_VARS.items() ) # Add env var for substitution monkeypatch.setenv("DB_HOST_FROM_ENV_VAR", "DB_HOST_FROM_ENV_VAR_VALUE") datasource_config = DatasourceConfig( class_name="SqlAlchemyDatasource", credentials={ "drivername": "postgresql", "host": "$DB_HOST", "port": "65432", "database": "${DB_NAME}", "username": "${DB_USER}", "password": "${DB_PWD}", }, ) datasource_config_schema = DatasourceConfigSchema() # use context.add_datasource to test this by adding a datasource with values to substitute. context.add_datasource( initialize=False, name="test_datasource", **datasource_config_schema.dump(datasource_config) ) assert context.list_datasources()[0]["credentials"] == { "drivername": "postgresql", "host": "DB_HOST_FROM_ENV_VAR_VALUE", "port": "65432", "database": "DB_NAME", "username": "DB_USER", # Note masking of "password" field "password": "***", } # Check context substitutes escaped variables appropriately data_context_config_schema = DataContextConfigSchema() context_with_variables_substituted_dict = data_context_config_schema.dump( context.get_config_with_variables_substituted() ) test_datasource_credentials = context_with_variables_substituted_dict[ "datasources" ]["test_datasource"]["credentials"] assert test_datasource_credentials["host"] == "DB_HOST_FROM_ENV_VAR_VALUE" assert test_datasource_credentials["username"] == "DB_USER" assert test_datasource_credentials["password"] == "pas$word" assert test_datasource_credentials["database"] == "DB_NAME" # Ensure skip_if_substitution_variable=False works as documented context.save_config_variable( "escaped", "$SOME_VAR", skip_if_substitution_variable=False ) context.save_config_variable( "escaped_curly", "${SOME_VAR}", skip_if_substitution_variable=False ) config_vars_file_contents = context._load_config_variables_file() assert config_vars_file_contents["escaped"] == r"\$SOME_VAR" assert config_vars_file_contents["escaped_curly"] == r"\${SOME_VAR}"
[ "def", "test_create_data_context_and_config_vars_in_code", "(", "tmp_path_factory", ",", "monkeypatch", ")", ":", "project_path", "=", "str", "(", "tmp_path_factory", ".", "mktemp", "(", "\"data_context\"", ")", ")", "context", "=", "ge", ".", "DataContext", ".", "create", "(", "project_root_dir", "=", "project_path", ",", "usage_statistics_enabled", "=", "False", ",", ")", "CONFIG_VARS", "=", "{", "\"DB_HOST\"", ":", "\"${DB_HOST_FROM_ENV_VAR}\"", ",", "\"DB_NAME\"", ":", "\"DB_NAME\"", ",", "\"DB_USER\"", ":", "\"DB_USER\"", ",", "\"DB_PWD\"", ":", "\"pas$word\"", ",", "}", "for", "k", ",", "v", "in", "CONFIG_VARS", ".", "items", "(", ")", ":", "context", ".", "save_config_variable", "(", "k", ",", "v", ")", "config_vars_file_contents", "=", "context", ".", "_load_config_variables_file", "(", ")", "# Add escaping for DB_PWD since it is not of the form ${SOMEVAR} or $SOMEVAR", "CONFIG_VARS_WITH_ESCAPING", "=", "CONFIG_VARS", ".", "copy", "(", ")", "CONFIG_VARS_WITH_ESCAPING", "[", "\"DB_PWD\"", "]", "=", "r\"pas\\$word\"", "# Ensure all config vars saved are in the config_variables.yml file", "# and that escaping was added for \"pas$word\" -> \"pas\\$word\"", "assert", "all", "(", "item", "in", "config_vars_file_contents", ".", "items", "(", ")", "for", "item", "in", "CONFIG_VARS_WITH_ESCAPING", ".", "items", "(", ")", ")", "assert", "not", "all", "(", "item", "in", "config_vars_file_contents", ".", "items", "(", ")", "for", "item", "in", "CONFIG_VARS", ".", "items", "(", ")", ")", "# Add env var for substitution", "monkeypatch", ".", "setenv", "(", "\"DB_HOST_FROM_ENV_VAR\"", ",", "\"DB_HOST_FROM_ENV_VAR_VALUE\"", ")", "datasource_config", "=", "DatasourceConfig", "(", "class_name", "=", "\"SqlAlchemyDatasource\"", ",", "credentials", "=", "{", "\"drivername\"", ":", "\"postgresql\"", ",", "\"host\"", ":", "\"$DB_HOST\"", ",", "\"port\"", ":", "\"65432\"", ",", "\"database\"", ":", "\"${DB_NAME}\"", ",", "\"username\"", ":", "\"${DB_USER}\"", ",", "\"password\"", ":", "\"${DB_PWD}\"", ",", "}", ",", ")", "datasource_config_schema", "=", "DatasourceConfigSchema", "(", ")", "# use context.add_datasource to test this by adding a datasource with values to substitute.", "context", ".", "add_datasource", "(", "initialize", "=", "False", ",", "name", "=", "\"test_datasource\"", ",", "*", "*", "datasource_config_schema", ".", "dump", "(", "datasource_config", ")", ")", "assert", "context", ".", "list_datasources", "(", ")", "[", "0", "]", "[", "\"credentials\"", "]", "==", "{", "\"drivername\"", ":", "\"postgresql\"", ",", "\"host\"", ":", "\"DB_HOST_FROM_ENV_VAR_VALUE\"", ",", "\"port\"", ":", "\"65432\"", ",", "\"database\"", ":", "\"DB_NAME\"", ",", "\"username\"", ":", "\"DB_USER\"", ",", "# Note masking of \"password\" field", "\"password\"", ":", "\"***\"", ",", "}", "# Check context substitutes escaped variables appropriately", "data_context_config_schema", "=", "DataContextConfigSchema", "(", ")", "context_with_variables_substituted_dict", "=", "data_context_config_schema", ".", "dump", "(", "context", ".", "get_config_with_variables_substituted", "(", ")", ")", "test_datasource_credentials", "=", "context_with_variables_substituted_dict", "[", "\"datasources\"", "]", "[", "\"test_datasource\"", "]", "[", "\"credentials\"", "]", "assert", "test_datasource_credentials", "[", "\"host\"", "]", "==", "\"DB_HOST_FROM_ENV_VAR_VALUE\"", "assert", "test_datasource_credentials", "[", "\"username\"", "]", "==", "\"DB_USER\"", "assert", "test_datasource_credentials", "[", "\"password\"", "]", "==", "\"pas$word\"", "assert", "test_datasource_credentials", "[", "\"database\"", "]", "==", "\"DB_NAME\"", "# Ensure skip_if_substitution_variable=False works as documented", "context", ".", "save_config_variable", "(", "\"escaped\"", ",", "\"$SOME_VAR\"", ",", "skip_if_substitution_variable", "=", "False", ")", "context", ".", "save_config_variable", "(", "\"escaped_curly\"", ",", "\"${SOME_VAR}\"", ",", "skip_if_substitution_variable", "=", "False", ")", "config_vars_file_contents", "=", "context", ".", "_load_config_variables_file", "(", ")", "assert", "config_vars_file_contents", "[", "\"escaped\"", "]", "==", "r\"\\$SOME_VAR\"", "assert", "config_vars_file_contents", "[", "\"escaped_curly\"", "]", "==", "r\"\\${SOME_VAR}\"" ]
[ 741, 0 ]
[ 838, 72 ]
python
en
['en', 'error', 'th']
False
create_text_fc
(text)
Creates :class:`parser.file_configuration_t` instance, configured to contain Python string, that contains valid C++ code :param text: C++ code :type text: str :rtype: :class:`parser.file_configuration_t`
Creates :class:`parser.file_configuration_t` instance, configured to contain Python string, that contains valid C++ code
def create_text_fc(text): """ Creates :class:`parser.file_configuration_t` instance, configured to contain Python string, that contains valid C++ code :param text: C++ code :type text: str :rtype: :class:`parser.file_configuration_t` """ return file_configuration_t( data=text, content_type=file_configuration_t.CONTENT_TYPE.TEXT)
[ "def", "create_text_fc", "(", "text", ")", ":", "return", "file_configuration_t", "(", "data", "=", "text", ",", "content_type", "=", "file_configuration_t", ".", "CONTENT_TYPE", ".", "TEXT", ")" ]
[ 98, 0 ]
[ 111, 60 ]
python
en
['en', 'error', 'th']
False
create_source_fc
(header)
Creates :class:`parser.file_configuration_t` instance, configured to contain path to C++ source file :param header: path to C++ source file :type header: str :rtype: :class:`parser.file_configuration_t`
Creates :class:`parser.file_configuration_t` instance, configured to contain path to C++ source file
def create_source_fc(header): """ Creates :class:`parser.file_configuration_t` instance, configured to contain path to C++ source file :param header: path to C++ source file :type header: str :rtype: :class:`parser.file_configuration_t` """ return file_configuration_t( data=header, content_type=file_configuration_t.CONTENT_TYPE.STANDARD_SOURCE_FILE)
[ "def", "create_source_fc", "(", "header", ")", ":", "return", "file_configuration_t", "(", "data", "=", "header", ",", "content_type", "=", "file_configuration_t", ".", "CONTENT_TYPE", ".", "STANDARD_SOURCE_FILE", ")" ]
[ 114, 0 ]
[ 127, 76 ]
python
en
['en', 'error', 'th']
False
create_gccxml_fc
(xml_file)
Creates :class:`parser.file_configuration_t` instance, configured to contain path to GCC-XML generated XML file. :param xml_file: path to GCC-XML generated XML file :type xml_file: str :rtype: :class:`parser.file_configuration_t`
Creates :class:`parser.file_configuration_t` instance, configured to contain path to GCC-XML generated XML file.
def create_gccxml_fc(xml_file): """ Creates :class:`parser.file_configuration_t` instance, configured to contain path to GCC-XML generated XML file. :param xml_file: path to GCC-XML generated XML file :type xml_file: str :rtype: :class:`parser.file_configuration_t` """ return file_configuration_t( data=xml_file, content_type=file_configuration_t.CONTENT_TYPE.GCCXML_GENERATED_FILE)
[ "def", "create_gccxml_fc", "(", "xml_file", ")", ":", "return", "file_configuration_t", "(", "data", "=", "xml_file", ",", "content_type", "=", "file_configuration_t", ".", "CONTENT_TYPE", ".", "GCCXML_GENERATED_FILE", ")" ]
[ 130, 0 ]
[ 143, 77 ]
python
en
['en', 'error', 'th']
False
create_cached_source_fc
(header, cached_source_file)
Creates :class:`parser.file_configuration_t` instance, configured to contain path to GCC-XML generated XML file and C++ source file. If XML file does not exists, it will be created and used for parsing. If XML file exists, it will be used for parsing. :param header: path to C++ source file :type header: str :param cached_source_file: path to GCC-XML generated XML file :type cached_source_file: str :rtype: :class:`parser.file_configuration_t`
Creates :class:`parser.file_configuration_t` instance, configured to contain path to GCC-XML generated XML file and C++ source file. If XML file does not exists, it will be created and used for parsing. If XML file exists, it will be used for parsing.
def create_cached_source_fc(header, cached_source_file): """ Creates :class:`parser.file_configuration_t` instance, configured to contain path to GCC-XML generated XML file and C++ source file. If XML file does not exists, it will be created and used for parsing. If XML file exists, it will be used for parsing. :param header: path to C++ source file :type header: str :param cached_source_file: path to GCC-XML generated XML file :type cached_source_file: str :rtype: :class:`parser.file_configuration_t` """ return file_configuration_t( data=header, cached_source_file=cached_source_file, content_type=file_configuration_t.CONTENT_TYPE.CACHED_SOURCE_FILE)
[ "def", "create_cached_source_fc", "(", "header", ",", "cached_source_file", ")", ":", "return", "file_configuration_t", "(", "data", "=", "header", ",", "cached_source_file", "=", "cached_source_file", ",", "content_type", "=", "file_configuration_t", ".", "CONTENT_TYPE", ".", "CACHED_SOURCE_FILE", ")" ]
[ 146, 0 ]
[ 165, 74 ]
python
en
['en', 'error', 'th']
False
project_reader_t.__init__
(self, config, cache=None, decl_factory=None)
:param config: GCCXML configuration :type config: :class:xml_generator_configuration_t :param cache: declaration cache, by default a cache functionality will not be used :type cache: :class:`cache_base_t` instance or `str` :param decl_factory: declaration factory :type decl_factory: :class:`decl_factory_t`
:param config: GCCXML configuration :type config: :class:xml_generator_configuration_t
def __init__(self, config, cache=None, decl_factory=None): """ :param config: GCCXML configuration :type config: :class:xml_generator_configuration_t :param cache: declaration cache, by default a cache functionality will not be used :type cache: :class:`cache_base_t` instance or `str` :param decl_factory: declaration factory :type decl_factory: :class:`decl_factory_t` """ self.__config = config self.__dcache = None if isinstance(cache, declarations_cache.cache_base_t): self.__dcache = cache elif utils.is_str(cache): self.__dcache = declarations_cache.file_cache_t(cache) else: self.__dcache = declarations_cache.dummy_cache_t() self.__decl_factory = decl_factory if not decl_factory: self.__decl_factory = pygccxml.declarations.decl_factory_t() self.logger = utils.loggers.cxx_parser self.__xml_generator_from_xml_file = None
[ "def", "__init__", "(", "self", ",", "config", ",", "cache", "=", "None", ",", "decl_factory", "=", "None", ")", ":", "self", ".", "__config", "=", "config", "self", ".", "__dcache", "=", "None", "if", "isinstance", "(", "cache", ",", "declarations_cache", ".", "cache_base_t", ")", ":", "self", ".", "__dcache", "=", "cache", "elif", "utils", ".", "is_str", "(", "cache", ")", ":", "self", ".", "__dcache", "=", "declarations_cache", ".", "file_cache_t", "(", "cache", ")", "else", ":", "self", ".", "__dcache", "=", "declarations_cache", ".", "dummy_cache_t", "(", ")", "self", ".", "__decl_factory", "=", "decl_factory", "if", "not", "decl_factory", ":", "self", ".", "__decl_factory", "=", "pygccxml", ".", "declarations", ".", "decl_factory_t", "(", ")", "self", ".", "logger", "=", "utils", ".", "loggers", ".", "cxx_parser", "self", ".", "__xml_generator_from_xml_file", "=", "None" ]
[ 172, 4 ]
[ 198, 49 ]
python
en
['en', 'error', 'th']
False
project_reader_t.xml_generator_from_xml_file
(self)
Configuration object containing information about the xml generator read from the xml file. Returns: utils.xml_generators: configuration object
Configuration object containing information about the xml generator read from the xml file.
def xml_generator_from_xml_file(self): """ Configuration object containing information about the xml generator read from the xml file. Returns: utils.xml_generators: configuration object """ return self.__xml_generator_from_xml_file
[ "def", "xml_generator_from_xml_file", "(", "self", ")", ":", "return", "self", ".", "__xml_generator_from_xml_file" ]
[ 201, 4 ]
[ 209, 49 ]
python
en
['en', 'error', 'th']
False
project_reader_t.get_os_file_names
(files)
returns file names :param files: list of strings and\\or :class:`file_configuration_t` instances. :type files: list
returns file names
def get_os_file_names(files): """ returns file names :param files: list of strings and\\or :class:`file_configuration_t` instances. :type files: list """ fnames = [] for f in files: if utils.is_str(f): fnames.append(f) elif isinstance(f, file_configuration_t): if f.content_type in ( file_configuration_t.CONTENT_TYPE.STANDARD_SOURCE_FILE, file_configuration_t.CONTENT_TYPE.CACHED_SOURCE_FILE): fnames.append(f.data) else: pass return fnames
[ "def", "get_os_file_names", "(", "files", ")", ":", "fnames", "=", "[", "]", "for", "f", "in", "files", ":", "if", "utils", ".", "is_str", "(", "f", ")", ":", "fnames", ".", "append", "(", "f", ")", "elif", "isinstance", "(", "f", ",", "file_configuration_t", ")", ":", "if", "f", ".", "content_type", "in", "(", "file_configuration_t", ".", "CONTENT_TYPE", ".", "STANDARD_SOURCE_FILE", ",", "file_configuration_t", ".", "CONTENT_TYPE", ".", "CACHED_SOURCE_FILE", ")", ":", "fnames", ".", "append", "(", "f", ".", "data", ")", "else", ":", "pass", "return", "fnames" ]
[ 212, 4 ]
[ 233, 21 ]
python
en
['en', 'error', 'th']
False
project_reader_t.read_files
( self, files, compilation_mode=COMPILATION_MODE.FILE_BY_FILE)
parses a set of files :param files: list of strings and\\or :class:`file_configuration_t` instances. :type files: list :param compilation_mode: determines whether the files are parsed individually or as one single chunk :type compilation_mode: :class:`COMPILATION_MODE` :rtype: [:class:`declaration_t`]
parses a set of files
def read_files( self, files, compilation_mode=COMPILATION_MODE.FILE_BY_FILE): """ parses a set of files :param files: list of strings and\\or :class:`file_configuration_t` instances. :type files: list :param compilation_mode: determines whether the files are parsed individually or as one single chunk :type compilation_mode: :class:`COMPILATION_MODE` :rtype: [:class:`declaration_t`] """ if compilation_mode == COMPILATION_MODE.ALL_AT_ONCE \ and len(files) == len(self.get_os_file_names(files)): return self.__parse_all_at_once(files) else: if compilation_mode == COMPILATION_MODE.ALL_AT_ONCE: msg = ''.join([ "Unable to parse files using ALL_AT_ONCE mode. ", "There is some file configuration that is not file. ", "pygccxml.parser.project_reader_t switches to ", "FILE_BY_FILE mode."]) self.logger.warning(msg) return self.__parse_file_by_file(files)
[ "def", "read_files", "(", "self", ",", "files", ",", "compilation_mode", "=", "COMPILATION_MODE", ".", "FILE_BY_FILE", ")", ":", "if", "compilation_mode", "==", "COMPILATION_MODE", ".", "ALL_AT_ONCE", "and", "len", "(", "files", ")", "==", "len", "(", "self", ".", "get_os_file_names", "(", "files", ")", ")", ":", "return", "self", ".", "__parse_all_at_once", "(", "files", ")", "else", ":", "if", "compilation_mode", "==", "COMPILATION_MODE", ".", "ALL_AT_ONCE", ":", "msg", "=", "''", ".", "join", "(", "[", "\"Unable to parse files using ALL_AT_ONCE mode. \"", ",", "\"There is some file configuration that is not file. \"", ",", "\"pygccxml.parser.project_reader_t switches to \"", ",", "\"FILE_BY_FILE mode.\"", "]", ")", "self", ".", "logger", ".", "warning", "(", "msg", ")", "return", "self", ".", "__parse_file_by_file", "(", "files", ")" ]
[ 235, 4 ]
[ 263, 51 ]
python
en
['en', 'error', 'th']
False
project_reader_t.read_string
(self, content)
Parse a string containing C/C++ source code. :param content: C/C++ source code. :type content: str :rtype: Declarations
Parse a string containing C/C++ source code.
def read_string(self, content): """Parse a string containing C/C++ source code. :param content: C/C++ source code. :type content: str :rtype: Declarations """ reader = source_reader.source_reader_t( self.__config, None, self.__decl_factory) decls = reader.read_string(content) self.__xml_generator_from_xml_file = reader.xml_generator_from_xml_file return decls
[ "def", "read_string", "(", "self", ",", "content", ")", ":", "reader", "=", "source_reader", ".", "source_reader_t", "(", "self", ".", "__config", ",", "None", ",", "self", ".", "__decl_factory", ")", "decls", "=", "reader", ".", "read_string", "(", "content", ")", "self", ".", "__xml_generator_from_xml_file", "=", "reader", ".", "xml_generator_from_xml_file", "return", "decls" ]
[ 357, 4 ]
[ 370, 20 ]
python
en
['en', 'en', 'en']
True
project_reader_t.read_xml
(self, file_configuration)
parses C++ code, defined on the file_configurations and returns GCCXML generated file content
parses C++ code, defined on the file_configurations and returns GCCXML generated file content
def read_xml(self, file_configuration): """parses C++ code, defined on the file_configurations and returns GCCXML generated file content""" xml_file_path = None delete_xml_file = True fc = file_configuration reader = source_reader.source_reader_t( self.__config, None, self.__decl_factory) try: if fc.content_type == fc.CONTENT_TYPE.STANDARD_SOURCE_FILE: self.logger.info('Parsing source file "%s" ... ', fc.data) xml_file_path = reader.create_xml_file(fc.data) elif fc.content_type == \ file_configuration_t.CONTENT_TYPE.GCCXML_GENERATED_FILE: self.logger.info('Parsing xml file "%s" ... ', fc.data) xml_file_path = fc.data delete_xml_file = False elif fc.content_type == fc.CONTENT_TYPE.CACHED_SOURCE_FILE: # TODO: raise error when header file does not exist if not os.path.exists(fc.cached_source_file): dir_ = os.path.split(fc.cached_source_file)[0] if dir_ and not os.path.exists(dir_): os.makedirs(dir_) self.logger.info( 'Creating xml file "%s" from source file "%s" ... ', fc.cached_source_file, fc.data) xml_file_path = reader.create_xml_file( fc.data, fc.cached_source_file) else: xml_file_path = fc.cached_source_file else: xml_file_path = reader.create_xml_file_from_string(fc.data) with open(xml_file_path, "r") as xml_file: xml = xml_file.read() utils.remove_file_no_raise(xml_file_path, self.__config) self.__xml_generator_from_xml_file = \ reader.xml_generator_from_xml_file return xml finally: if xml_file_path and delete_xml_file: utils.remove_file_no_raise(xml_file_path, self.__config)
[ "def", "read_xml", "(", "self", ",", "file_configuration", ")", ":", "xml_file_path", "=", "None", "delete_xml_file", "=", "True", "fc", "=", "file_configuration", "reader", "=", "source_reader", ".", "source_reader_t", "(", "self", ".", "__config", ",", "None", ",", "self", ".", "__decl_factory", ")", "try", ":", "if", "fc", ".", "content_type", "==", "fc", ".", "CONTENT_TYPE", ".", "STANDARD_SOURCE_FILE", ":", "self", ".", "logger", ".", "info", "(", "'Parsing source file \"%s\" ... '", ",", "fc", ".", "data", ")", "xml_file_path", "=", "reader", ".", "create_xml_file", "(", "fc", ".", "data", ")", "elif", "fc", ".", "content_type", "==", "file_configuration_t", ".", "CONTENT_TYPE", ".", "GCCXML_GENERATED_FILE", ":", "self", ".", "logger", ".", "info", "(", "'Parsing xml file \"%s\" ... '", ",", "fc", ".", "data", ")", "xml_file_path", "=", "fc", ".", "data", "delete_xml_file", "=", "False", "elif", "fc", ".", "content_type", "==", "fc", ".", "CONTENT_TYPE", ".", "CACHED_SOURCE_FILE", ":", "# TODO: raise error when header file does not exist", "if", "not", "os", ".", "path", ".", "exists", "(", "fc", ".", "cached_source_file", ")", ":", "dir_", "=", "os", ".", "path", ".", "split", "(", "fc", ".", "cached_source_file", ")", "[", "0", "]", "if", "dir_", "and", "not", "os", ".", "path", ".", "exists", "(", "dir_", ")", ":", "os", ".", "makedirs", "(", "dir_", ")", "self", ".", "logger", ".", "info", "(", "'Creating xml file \"%s\" from source file \"%s\" ... '", ",", "fc", ".", "cached_source_file", ",", "fc", ".", "data", ")", "xml_file_path", "=", "reader", ".", "create_xml_file", "(", "fc", ".", "data", ",", "fc", ".", "cached_source_file", ")", "else", ":", "xml_file_path", "=", "fc", ".", "cached_source_file", "else", ":", "xml_file_path", "=", "reader", ".", "create_xml_file_from_string", "(", "fc", ".", "data", ")", "with", "open", "(", "xml_file_path", ",", "\"r\"", ")", "as", "xml_file", ":", "xml", "=", "xml_file", ".", "read", "(", ")", "utils", ".", "remove_file_no_raise", "(", "xml_file_path", ",", "self", ".", "__config", ")", "self", ".", "__xml_generator_from_xml_file", "=", "reader", ".", "xml_generator_from_xml_file", "return", "xml", "finally", ":", "if", "xml_file_path", "and", "delete_xml_file", ":", "utils", ".", "remove_file_no_raise", "(", "xml_file_path", ",", "self", ".", "__config", ")" ]
[ 372, 4 ]
[ 416, 72 ]
python
en
['en', 'en', 'en']
True
column_aggregate_value
( engine: Type[ExecutionEngine], metric_fn_type="value", domain_type="column", **kwargs, )
Return the column aggregate metric decorator for the specified engine. Args: engine: **kwargs: Returns:
Return the column aggregate metric decorator for the specified engine.
def column_aggregate_value( engine: Type[ExecutionEngine], metric_fn_type="value", domain_type="column", **kwargs, ): """Return the column aggregate metric decorator for the specified engine. Args: engine: **kwargs: Returns: """ if issubclass(engine, PandasExecutionEngine): def wrapper(metric_fn: Callable): @metric_value( engine=PandasExecutionEngine, metric_fn_type=metric_fn_type, domain_type=domain_type, ) @wraps(metric_fn) def inner_func( cls, execution_engine: PandasExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], runtime_configuration: Dict, ): filter_column_isnull = kwargs.get( "filter_column_isnull", getattr(cls, "filter_column_isnull", False) ) df, _, accessor_domain_kwargs = execution_engine.get_compute_domain( domain_kwargs=metric_domain_kwargs, domain_type=domain_type ) column_name = accessor_domain_kwargs["column"] if column_name not in metrics["table.columns"]: raise ge_exceptions.ExecutionEngineError( message=f'Error: The column "{column_name}" in BatchData does not exist.' ) if filter_column_isnull: df = df[df[column_name].notnull()] return metric_fn( cls, column=df[column_name], **metric_value_kwargs, _metrics=metrics, ) return inner_func return wrapper else: raise ValueError( "column_aggregate_value decorator only supports PandasExecutionEngine" )
[ "def", "column_aggregate_value", "(", "engine", ":", "Type", "[", "ExecutionEngine", "]", ",", "metric_fn_type", "=", "\"value\"", ",", "domain_type", "=", "\"column\"", ",", "*", "*", "kwargs", ",", ")", ":", "if", "issubclass", "(", "engine", ",", "PandasExecutionEngine", ")", ":", "def", "wrapper", "(", "metric_fn", ":", "Callable", ")", ":", "@", "metric_value", "(", "engine", "=", "PandasExecutionEngine", ",", "metric_fn_type", "=", "metric_fn_type", ",", "domain_type", "=", "domain_type", ",", ")", "@", "wraps", "(", "metric_fn", ")", "def", "inner_func", "(", "cls", ",", "execution_engine", ":", "PandasExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "runtime_configuration", ":", "Dict", ",", ")", ":", "filter_column_isnull", "=", "kwargs", ".", "get", "(", "\"filter_column_isnull\"", ",", "getattr", "(", "cls", ",", "\"filter_column_isnull\"", ",", "False", ")", ")", "df", ",", "_", ",", "accessor_domain_kwargs", "=", "execution_engine", ".", "get_compute_domain", "(", "domain_kwargs", "=", "metric_domain_kwargs", ",", "domain_type", "=", "domain_type", ")", "column_name", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "if", "column_name", "not", "in", "metrics", "[", "\"table.columns\"", "]", ":", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "f'Error: The column \"{column_name}\" in BatchData does not exist.'", ")", "if", "filter_column_isnull", ":", "df", "=", "df", "[", "df", "[", "column_name", "]", ".", "notnull", "(", ")", "]", "return", "metric_fn", "(", "cls", ",", "column", "=", "df", "[", "column_name", "]", ",", "*", "*", "metric_value_kwargs", ",", "_metrics", "=", "metrics", ",", ")", "return", "inner_func", "return", "wrapper", "else", ":", "raise", "ValueError", "(", "\"column_aggregate_value decorator only supports PandasExecutionEngine\"", ")" ]
[ 28, 0 ]
[ 91, 9 ]
python
en
['en', 'en', 'en']
True
column_aggregate_partial
(engine: Type[ExecutionEngine], **kwargs)
Return the column aggregate metric decorator for the specified engine. Args: engine: **kwargs: Returns:
Return the column aggregate metric decorator for the specified engine.
def column_aggregate_partial(engine: Type[ExecutionEngine], **kwargs): """Return the column aggregate metric decorator for the specified engine. Args: engine: **kwargs: Returns: """ partial_fn_type = MetricPartialFunctionTypes.AGGREGATE_FN domain_type = MetricDomainTypes.COLUMN if issubclass(engine, SqlAlchemyExecutionEngine): def wrapper(metric_fn: Callable): @metric_partial( engine=SqlAlchemyExecutionEngine, partial_fn_type=partial_fn_type, domain_type=domain_type, ) @wraps(metric_fn) def inner_func( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], runtime_configuration: Dict, ): filter_column_isnull = kwargs.get( "filter_column_isnull", getattr(cls, "filter_column_isnull", False) ) if filter_column_isnull: compute_domain_kwargs = execution_engine.add_column_row_condition( metric_domain_kwargs ) else: # We do not copy here because if compute domain is different, it will be copied by get_compute_domain compute_domain_kwargs = metric_domain_kwargs ( selectable, compute_domain_kwargs, accessor_domain_kwargs, ) = execution_engine.get_compute_domain( compute_domain_kwargs, domain_type=domain_type ) column_name: str = accessor_domain_kwargs["column"] sqlalchemy_engine: sa.engine.Engine = execution_engine.engine if column_name not in metrics["table.columns"]: raise ge_exceptions.ExecutionEngineError( message=f'Error: The column "{column_name}" in BatchData does not exist.' ) dialect = sqlalchemy_engine.dialect metric_aggregate = metric_fn( cls, column=sa.column(column_name), **metric_value_kwargs, _dialect=dialect, _table=selectable, _column_name=column_name, _sqlalchemy_engine=sqlalchemy_engine, _metrics=metrics, ) return metric_aggregate, compute_domain_kwargs, accessor_domain_kwargs return inner_func return wrapper elif issubclass(engine, SparkDFExecutionEngine): def wrapper(metric_fn: Callable): @metric_partial( engine=SparkDFExecutionEngine, partial_fn_type=partial_fn_type, domain_type=domain_type, ) @wraps(metric_fn) def inner_func( cls, execution_engine: SparkDFExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], runtime_configuration: Dict, ): filter_column_isnull = kwargs.get( "filter_column_isnull", getattr(cls, "filter_column_isnull", False) ) if filter_column_isnull: compute_domain_kwargs = execution_engine.add_column_row_condition( metric_domain_kwargs ) else: # We do not copy here because if compute domain is different, it will be copied by get_compute_domain compute_domain_kwargs = metric_domain_kwargs ( data, compute_domain_kwargs, accessor_domain_kwargs, ) = execution_engine.get_compute_domain( domain_kwargs=compute_domain_kwargs, domain_type=domain_type ) column_name = accessor_domain_kwargs["column"] if column_name not in metrics["table.columns"]: raise ge_exceptions.ExecutionEngineError( message=f'Error: The column "{column_name}" in BatchData does not exist.' ) column = data[column_name] metric_aggregate = metric_fn( cls, column=column, **metric_value_kwargs, _table=data, _column_name=column_name, _metrics=metrics, ) return metric_aggregate, compute_domain_kwargs, accessor_domain_kwargs return inner_func return wrapper else: raise ValueError("Unsupported engine for column_aggregate_partial")
[ "def", "column_aggregate_partial", "(", "engine", ":", "Type", "[", "ExecutionEngine", "]", ",", "*", "*", "kwargs", ")", ":", "partial_fn_type", "=", "MetricPartialFunctionTypes", ".", "AGGREGATE_FN", "domain_type", "=", "MetricDomainTypes", ".", "COLUMN", "if", "issubclass", "(", "engine", ",", "SqlAlchemyExecutionEngine", ")", ":", "def", "wrapper", "(", "metric_fn", ":", "Callable", ")", ":", "@", "metric_partial", "(", "engine", "=", "SqlAlchemyExecutionEngine", ",", "partial_fn_type", "=", "partial_fn_type", ",", "domain_type", "=", "domain_type", ",", ")", "@", "wraps", "(", "metric_fn", ")", "def", "inner_func", "(", "cls", ",", "execution_engine", ":", "SqlAlchemyExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "runtime_configuration", ":", "Dict", ",", ")", ":", "filter_column_isnull", "=", "kwargs", ".", "get", "(", "\"filter_column_isnull\"", ",", "getattr", "(", "cls", ",", "\"filter_column_isnull\"", ",", "False", ")", ")", "if", "filter_column_isnull", ":", "compute_domain_kwargs", "=", "execution_engine", ".", "add_column_row_condition", "(", "metric_domain_kwargs", ")", "else", ":", "# We do not copy here because if compute domain is different, it will be copied by get_compute_domain", "compute_domain_kwargs", "=", "metric_domain_kwargs", "(", "selectable", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "=", "execution_engine", ".", "get_compute_domain", "(", "compute_domain_kwargs", ",", "domain_type", "=", "domain_type", ")", "column_name", ":", "str", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "sqlalchemy_engine", ":", "sa", ".", "engine", ".", "Engine", "=", "execution_engine", ".", "engine", "if", "column_name", "not", "in", "metrics", "[", "\"table.columns\"", "]", ":", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "f'Error: The column \"{column_name}\" in BatchData does not exist.'", ")", "dialect", "=", "sqlalchemy_engine", ".", "dialect", "metric_aggregate", "=", "metric_fn", "(", "cls", ",", "column", "=", "sa", ".", "column", "(", "column_name", ")", ",", "*", "*", "metric_value_kwargs", ",", "_dialect", "=", "dialect", ",", "_table", "=", "selectable", ",", "_column_name", "=", "column_name", ",", "_sqlalchemy_engine", "=", "sqlalchemy_engine", ",", "_metrics", "=", "metrics", ",", ")", "return", "metric_aggregate", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", "return", "inner_func", "return", "wrapper", "elif", "issubclass", "(", "engine", ",", "SparkDFExecutionEngine", ")", ":", "def", "wrapper", "(", "metric_fn", ":", "Callable", ")", ":", "@", "metric_partial", "(", "engine", "=", "SparkDFExecutionEngine", ",", "partial_fn_type", "=", "partial_fn_type", ",", "domain_type", "=", "domain_type", ",", ")", "@", "wraps", "(", "metric_fn", ")", "def", "inner_func", "(", "cls", ",", "execution_engine", ":", "SparkDFExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "runtime_configuration", ":", "Dict", ",", ")", ":", "filter_column_isnull", "=", "kwargs", ".", "get", "(", "\"filter_column_isnull\"", ",", "getattr", "(", "cls", ",", "\"filter_column_isnull\"", ",", "False", ")", ")", "if", "filter_column_isnull", ":", "compute_domain_kwargs", "=", "execution_engine", ".", "add_column_row_condition", "(", "metric_domain_kwargs", ")", "else", ":", "# We do not copy here because if compute domain is different, it will be copied by get_compute_domain", "compute_domain_kwargs", "=", "metric_domain_kwargs", "(", "data", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "=", "execution_engine", ".", "get_compute_domain", "(", "domain_kwargs", "=", "compute_domain_kwargs", ",", "domain_type", "=", "domain_type", ")", "column_name", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "if", "column_name", "not", "in", "metrics", "[", "\"table.columns\"", "]", ":", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "f'Error: The column \"{column_name}\" in BatchData does not exist.'", ")", "column", "=", "data", "[", "column_name", "]", "metric_aggregate", "=", "metric_fn", "(", "cls", ",", "column", "=", "column", ",", "*", "*", "metric_value_kwargs", ",", "_table", "=", "data", ",", "_column_name", "=", "column_name", ",", "_metrics", "=", "metrics", ",", ")", "return", "metric_aggregate", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", "return", "inner_func", "return", "wrapper", "else", ":", "raise", "ValueError", "(", "\"Unsupported engine for column_aggregate_partial\"", ")" ]
[ 94, 0 ]
[ 227, 75 ]
python
en
['en', 'en', 'en']
True
ExpectColumnDistinctValuesToEqualSet.validate_configuration
(self, configuration: Optional[ExpectationConfiguration])
Validating that user has inputted a value set and that configuration has been initialized
Validating that user has inputted a value set and that configuration has been initialized
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]): """Validating that user has inputted a value set and that configuration has been initialized""" super().validate_configuration(configuration) if configuration is None: configuration = self.configuration try: assert "value_set" in configuration.kwargs, "value_set is required" assert isinstance( configuration.kwargs["value_set"], (list, set, dict) ), "value_set must be a list or a set" if isinstance(configuration.kwargs["value_set"], dict): assert ( "$PARAMETER" in configuration.kwargs["value_set"] ), 'Evaluation Parameter dict for value_set kwarg must have "$PARAMETER" key' except AssertionError as e: raise InvalidExpectationConfigurationError(str(e)) return True
[ "def", "validate_configuration", "(", "self", ",", "configuration", ":", "Optional", "[", "ExpectationConfiguration", "]", ")", ":", "super", "(", ")", ".", "validate_configuration", "(", "configuration", ")", "if", "configuration", "is", "None", ":", "configuration", "=", "self", ".", "configuration", "try", ":", "assert", "\"value_set\"", "in", "configuration", ".", "kwargs", ",", "\"value_set is required\"", "assert", "isinstance", "(", "configuration", ".", "kwargs", "[", "\"value_set\"", "]", ",", "(", "list", ",", "set", ",", "dict", ")", ")", ",", "\"value_set must be a list or a set\"", "if", "isinstance", "(", "configuration", ".", "kwargs", "[", "\"value_set\"", "]", ",", "dict", ")", ":", "assert", "(", "\"$PARAMETER\"", "in", "configuration", ".", "kwargs", "[", "\"value_set\"", "]", ")", ",", "'Evaluation Parameter dict for value_set kwarg must have \"$PARAMETER\" key'", "except", "AssertionError", "as", "e", ":", "raise", "InvalidExpectationConfigurationError", "(", "str", "(", "e", ")", ")", "return", "True" ]
[ 46, 4 ]
[ 62, 19 ]
python
en
['en', 'en', 'en']
True
V2Route.host_constraints
(self, prune_unreachable_routes: bool)
Return a set of hostglobs that match (a superset of) all hostnames that this route can apply to. An emtpy set means that this route cannot possibly apply to any hostnames. This considers SNI information and (if prune_unreachable_routes) HeaderMatchers that `exact_match` on the `:authority` header. There are other things that could narrow the set down more, but that we don't consider (like regex matches on `:authority`), leading to it possibly returning a set that is too broad. That's OK for correctness, it just means that we'll emit an Envoy config that contains extra work for Envoy.
Return a set of hostglobs that match (a superset of) all hostnames that this route can apply to.
def host_constraints(self, prune_unreachable_routes: bool) -> Set[str]: """Return a set of hostglobs that match (a superset of) all hostnames that this route can apply to. An emtpy set means that this route cannot possibly apply to any hostnames. This considers SNI information and (if prune_unreachable_routes) HeaderMatchers that `exact_match` on the `:authority` header. There are other things that could narrow the set down more, but that we don't consider (like regex matches on `:authority`), leading to it possibly returning a set that is too broad. That's OK for correctness, it just means that we'll emit an Envoy config that contains extra work for Envoy. """ # Start by grabbing a list of all the SNI host globs for this route. If there aren't any, # default to "*". hostglobs = set(self.get('_sni', {}).get('hosts', ['*'])) # If we're going to do any aggressive pruning here... if prune_unreachable_routes: # Note: We're *pruning*; the hostglobs set will only ever get *smaller*, it will never # grow. If it gets down to the empty set, then we can safely bail early. # Take all the HeaderMatchers... header_matchers = self.get("match", {}).get("headers", []) for header in header_matchers: # ... and look for ones that exact_match on :authority. if header.get("name") == ":authority" and "exact_match" in header: exact_match = header["exact_match"] if "*" in exact_match: # A real :authority header will never contain a "*", so if this route has an # exact_match looking for one, then this route is unreachable. hostglobs = set() break # hostglobs is empty, no point in doing more work elif any(hostglob_matches(glob, exact_match) for glob in hostglobs): # The exact_match that this route is looking for is matched by one or more # of the hostglobs; so this route is reachable (so far). Set hostglobs to # just match that route. Because we already checked if the exact_match # contains a "*", we don't need to worry about it possibly being interpreted # incorrectly as a glob. hostglobs = set([exact_match]) # Don't "break" here--if somehow this route has multiple disagreeing # HeaderMatchers on :authority, then it's unreachable and we want the next # iteration of the loop to trigger the "else" clause and prune hostglobs # down to the empty set. else: # The exact_match that this route is looking for isn't matched by any of the # hostglobs; so this route is unreachable. hostglobs = set() break # hostglobs is empty, no point in doing more work return hostglobs
[ "def", "host_constraints", "(", "self", ",", "prune_unreachable_routes", ":", "bool", ")", "->", "Set", "[", "str", "]", ":", "# Start by grabbing a list of all the SNI host globs for this route. If there aren't any,", "# default to \"*\".", "hostglobs", "=", "set", "(", "self", ".", "get", "(", "'_sni'", ",", "{", "}", ")", ".", "get", "(", "'hosts'", ",", "[", "'*'", "]", ")", ")", "# If we're going to do any aggressive pruning here...", "if", "prune_unreachable_routes", ":", "# Note: We're *pruning*; the hostglobs set will only ever get *smaller*, it will never", "# grow. If it gets down to the empty set, then we can safely bail early.", "# Take all the HeaderMatchers...", "header_matchers", "=", "self", ".", "get", "(", "\"match\"", ",", "{", "}", ")", ".", "get", "(", "\"headers\"", ",", "[", "]", ")", "for", "header", "in", "header_matchers", ":", "# ... and look for ones that exact_match on :authority.", "if", "header", ".", "get", "(", "\"name\"", ")", "==", "\":authority\"", "and", "\"exact_match\"", "in", "header", ":", "exact_match", "=", "header", "[", "\"exact_match\"", "]", "if", "\"*\"", "in", "exact_match", ":", "# A real :authority header will never contain a \"*\", so if this route has an", "# exact_match looking for one, then this route is unreachable.", "hostglobs", "=", "set", "(", ")", "break", "# hostglobs is empty, no point in doing more work", "elif", "any", "(", "hostglob_matches", "(", "glob", ",", "exact_match", ")", "for", "glob", "in", "hostglobs", ")", ":", "# The exact_match that this route is looking for is matched by one or more", "# of the hostglobs; so this route is reachable (so far). Set hostglobs to", "# just match that route. Because we already checked if the exact_match", "# contains a \"*\", we don't need to worry about it possibly being interpreted", "# incorrectly as a glob.", "hostglobs", "=", "set", "(", "[", "exact_match", "]", ")", "# Don't \"break\" here--if somehow this route has multiple disagreeing", "# HeaderMatchers on :authority, then it's unreachable and we want the next", "# iteration of the loop to trigger the \"else\" clause and prune hostglobs", "# down to the empty set.", "else", ":", "# The exact_match that this route is looking for isn't matched by any of the", "# hostglobs; so this route is unreachable.", "hostglobs", "=", "set", "(", ")", "break", "# hostglobs is empty, no point in doing more work", "return", "hostglobs" ]
[ 422, 4 ]
[ 475, 24 ]
python
en
['en', 'en', 'en']
True
BaseGenerator.predict
(self, query: str, documents: List[Document], top_k: Optional[int])
Abstract method to generate answers. :param query: Query :param documents: Related documents (e.g. coming from a retriever) that the answer shall be conditioned on. :param top_k: Number of returned answers :return: Generated answers plus additional infos in a dict
Abstract method to generate answers.
def predict(self, query: str, documents: List[Document], top_k: Optional[int]) -> Dict: """ Abstract method to generate answers. :param query: Query :param documents: Related documents (e.g. coming from a retriever) that the answer shall be conditioned on. :param top_k: Number of returned answers :return: Generated answers plus additional infos in a dict """ pass
[ "def", "predict", "(", "self", ",", "query", ":", "str", ",", "documents", ":", "List", "[", "Document", "]", ",", "top_k", ":", "Optional", "[", "int", "]", ")", "->", "Dict", ":", "pass" ]
[ 14, 4 ]
[ 23, 12 ]
python
en
['en', 'error', 'th']
False
Store.__init__
( self, store_backend=None, runtime_environment=None, store_name="no_store_name" )
Runtime environment may be necessary to instantiate store backend elements. Args: store_backend: runtime_environment: store_name: store name given in the DataContextConfig (via either in-code or yaml configuration)
Runtime environment may be necessary to instantiate store backend elements. Args: store_backend: runtime_environment: store_name: store name given in the DataContextConfig (via either in-code or yaml configuration)
def __init__( self, store_backend=None, runtime_environment=None, store_name="no_store_name" ): """ Runtime environment may be necessary to instantiate store backend elements. Args: store_backend: runtime_environment: store_name: store name given in the DataContextConfig (via either in-code or yaml configuration) """ if store_backend is None: store_backend = {"class_name": "InMemoryStoreBackend"} self._store_name = store_name logger.debug("Building store_backend.") module_name = "great_expectations.data_context.store" self._store_backend = instantiate_class_from_config( config=store_backend, runtime_environment=runtime_environment or {}, config_defaults={ "module_name": module_name, "store_name": self._store_name, }, ) if not self._store_backend: raise ClassInstantiationError( module_name=module_name, package_name=None, class_name=store_backend ) if not isinstance(self._store_backend, StoreBackend): raise DataContextError( "Invalid StoreBackend configuration: expected a StoreBackend instance." ) self._use_fixed_length_key = self._store_backend.fixed_length_key
[ "def", "__init__", "(", "self", ",", "store_backend", "=", "None", ",", "runtime_environment", "=", "None", ",", "store_name", "=", "\"no_store_name\"", ")", ":", "if", "store_backend", "is", "None", ":", "store_backend", "=", "{", "\"class_name\"", ":", "\"InMemoryStoreBackend\"", "}", "self", ".", "_store_name", "=", "store_name", "logger", ".", "debug", "(", "\"Building store_backend.\"", ")", "module_name", "=", "\"great_expectations.data_context.store\"", "self", ".", "_store_backend", "=", "instantiate_class_from_config", "(", "config", "=", "store_backend", ",", "runtime_environment", "=", "runtime_environment", "or", "{", "}", ",", "config_defaults", "=", "{", "\"module_name\"", ":", "module_name", ",", "\"store_name\"", ":", "self", ".", "_store_name", ",", "}", ",", ")", "if", "not", "self", ".", "_store_backend", ":", "raise", "ClassInstantiationError", "(", "module_name", "=", "module_name", ",", "package_name", "=", "None", ",", "class_name", "=", "store_backend", ")", "if", "not", "isinstance", "(", "self", ".", "_store_backend", ",", "StoreBackend", ")", ":", "raise", "DataContextError", "(", "\"Invalid StoreBackend configuration: expected a StoreBackend instance.\"", ")", "self", ".", "_use_fixed_length_key", "=", "self", ".", "_store_backend", ".", "fixed_length_key" ]
[ 30, 4 ]
[ 61, 73 ]
python
en
['en', 'error', 'th']
False
Store.ge_cloud_response_json_to_object_dict
(self, response_json: Dict)
This method takes full json response from GE cloud and outputs a dict appropriate for deserialization into a GE object
This method takes full json response from GE cloud and outputs a dict appropriate for deserialization into a GE object
def ge_cloud_response_json_to_object_dict(self, response_json: Dict) -> Dict: """ This method takes full json response from GE cloud and outputs a dict appropriate for deserialization into a GE object """ return response_json
[ "def", "ge_cloud_response_json_to_object_dict", "(", "self", ",", "response_json", ":", "Dict", ")", "->", "Dict", ":", "return", "response_json" ]
[ 63, 4 ]
[ 68, 28 ]
python
en
['en', 'error', 'th']
False
Store.store_backend_id
(self)
Report the store_backend_id of the currently-configured StoreBackend Returns: store_backend_id which is a UUID(version=4)
Report the store_backend_id of the currently-configured StoreBackend Returns: store_backend_id which is a UUID(version=4)
def store_backend_id(self) -> str: """ Report the store_backend_id of the currently-configured StoreBackend Returns: store_backend_id which is a UUID(version=4) """ return self._store_backend.store_backend_id
[ "def", "store_backend_id", "(", "self", ")", "->", "str", ":", "return", "self", ".", "_store_backend", ".", "store_backend_id" ]
[ 89, 4 ]
[ 95, 51 ]
python
en
['en', 'error', 'th']
False
Store.store_backend_id_warnings_suppressed
(self)
Report the store_backend_id of the currently-configured StoreBackend, suppressing warnings for invalid configurations. Returns: store_backend_id which is a UUID(version=4)
Report the store_backend_id of the currently-configured StoreBackend, suppressing warnings for invalid configurations. Returns: store_backend_id which is a UUID(version=4)
def store_backend_id_warnings_suppressed(self): """ Report the store_backend_id of the currently-configured StoreBackend, suppressing warnings for invalid configurations. Returns: store_backend_id which is a UUID(version=4) """ return self._store_backend.store_backend_id_warnings_suppressed
[ "def", "store_backend_id_warnings_suppressed", "(", "self", ")", ":", "return", "self", ".", "_store_backend", ".", "store_backend_id_warnings_suppressed" ]
[ 98, 4 ]
[ 104, 71 ]
python
en
['en', 'error', 'th']
False
docs
()
Data Docs operations
Data Docs operations
def docs(): """Data Docs operations""" pass
[ "def", "docs", "(", ")", ":", "pass" ]
[ 12, 0 ]
[ 14, 8 ]
python
en
['en', 'bg', 'en']
True
docs_build
(directory, site_name, view=True, assume_yes=False)
Build Data Docs for a project.
Build Data Docs for a project.
def docs_build(directory, site_name, view=True, assume_yes=False): """Build Data Docs for a project.""" context = toolkit.load_data_context_with_error_handling(directory) build_docs(context, site_name=site_name, view=view, assume_yes=assume_yes) toolkit.send_usage_message( data_context=context, event="cli.docs.build", success=True )
[ "def", "docs_build", "(", "directory", ",", "site_name", ",", "view", "=", "True", ",", "assume_yes", "=", "False", ")", ":", "context", "=", "toolkit", ".", "load_data_context_with_error_handling", "(", "directory", ")", "build_docs", "(", "context", ",", "site_name", "=", "site_name", ",", "view", "=", "view", ",", "assume_yes", "=", "assume_yes", ")", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.docs.build\"", ",", "success", "=", "True", ")" ]
[ 42, 0 ]
[ 48, 5 ]
python
en
['en', 'en', 'en']
True
docs_list
(directory)
List known Data Docs Sites.
List known Data Docs Sites.
def docs_list(directory): """List known Data Docs Sites.""" context = toolkit.load_data_context_with_error_handling(directory) docs_sites_url_dicts = context.get_docs_sites_urls() docs_sites_strings = [ " - <cyan>{}</cyan>: {}".format( docs_site_dict["site_name"], docs_site_dict.get("site_url") or f"site configured but does not exist. Run the following command to build site: great_expectations " f'docs build --site-name {docs_site_dict["site_name"]}', ) for docs_site_dict in docs_sites_url_dicts ] if len(docs_sites_strings) == 0: cli_message("No Data Docs sites found") else: list_intro_string = _build_intro_string(docs_sites_strings) cli_message_list(docs_sites_strings, list_intro_string) toolkit.send_usage_message( data_context=context, event="cli.docs.list", success=True )
[ "def", "docs_list", "(", "directory", ")", ":", "context", "=", "toolkit", ".", "load_data_context_with_error_handling", "(", "directory", ")", "docs_sites_url_dicts", "=", "context", ".", "get_docs_sites_urls", "(", ")", "docs_sites_strings", "=", "[", "\" - <cyan>{}</cyan>: {}\"", ".", "format", "(", "docs_site_dict", "[", "\"site_name\"", "]", ",", "docs_site_dict", ".", "get", "(", "\"site_url\"", ")", "or", "f\"site configured but does not exist. Run the following command to build site: great_expectations \"", "f'docs build --site-name {docs_site_dict[\"site_name\"]}'", ",", ")", "for", "docs_site_dict", "in", "docs_sites_url_dicts", "]", "if", "len", "(", "docs_sites_strings", ")", "==", "0", ":", "cli_message", "(", "\"No Data Docs sites found\"", ")", "else", ":", "list_intro_string", "=", "_build_intro_string", "(", "docs_sites_strings", ")", "cli_message_list", "(", "docs_sites_strings", ",", "list_intro_string", ")", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.docs.list\"", ",", "success", "=", "True", ")" ]
[ 58, 0 ]
[ 81, 5 ]
python
en
['en', 'fr', 'en']
True
clean_data_docs
(directory, site_name=None, all=None)
Delete data docs
Delete data docs
def clean_data_docs(directory, site_name=None, all=None): """Delete data docs""" context = toolkit.load_data_context_with_error_handling(directory) failed = True if site_name is None and all is None: cli_message( "<red>{}</red>".format( "Please specify --all to remove all sites or specify a specific site using " "--site_name" ) ) sys.exit(1) context.clean_data_docs(site_name=site_name) failed = False if not failed and context is not None: toolkit.send_usage_message( data_context=context, event="cli.docs.clean", success=True ) cli_message("<green>{}</green>".format("Cleaned data docs")) if failed and context is not None: toolkit.send_usage_message( data_context=context, event="cli.docs.clean", success=False )
[ "def", "clean_data_docs", "(", "directory", ",", "site_name", "=", "None", ",", "all", "=", "None", ")", ":", "context", "=", "toolkit", ".", "load_data_context_with_error_handling", "(", "directory", ")", "failed", "=", "True", "if", "site_name", "is", "None", "and", "all", "is", "None", ":", "cli_message", "(", "\"<red>{}</red>\"", ".", "format", "(", "\"Please specify --all to remove all sites or specify a specific site using \"", "\"--site_name\"", ")", ")", "sys", ".", "exit", "(", "1", ")", "context", ".", "clean_data_docs", "(", "site_name", "=", "site_name", ")", "failed", "=", "False", "if", "not", "failed", "and", "context", "is", "not", "None", ":", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.docs.clean\"", ",", "success", "=", "True", ")", "cli_message", "(", "\"<green>{}</green>\"", ".", "format", "(", "\"Cleaned data docs\"", ")", ")", "if", "failed", "and", "context", "is", "not", "None", ":", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.docs.clean\"", ",", "success", "=", "False", ")" ]
[ 97, 0 ]
[ 120, 9 ]
python
it
['it', 'sr', 'pt']
False
build_docs
(context, site_name=None, view=True, assume_yes=False)
Build documentation in a context
Build documentation in a context
def build_docs(context, site_name=None, view=True, assume_yes=False): """Build documentation in a context""" logger.debug("Starting cli.datasource.build_docs") if site_name is not None: site_names = [site_name] else: site_names = None index_page_locator_infos = context.build_data_docs( site_names=site_names, dry_run=True ) msg = "\nThe following Data Docs sites will be built:\n\n" for site_name, index_page_locator_info in index_page_locator_infos.items(): msg += " - <cyan>{}:</cyan> ".format(site_name) msg += "{}\n".format(index_page_locator_info) cli_message(msg) if not assume_yes: toolkit.confirm_proceed_or_exit() cli_message("\nBuilding Data Docs...\n") context.build_data_docs(site_names=site_names) cli_message("Done building Data Docs") if view: context.open_data_docs(site_name=site_name, only_if_exists=True)
[ "def", "build_docs", "(", "context", ",", "site_name", "=", "None", ",", "view", "=", "True", ",", "assume_yes", "=", "False", ")", ":", "logger", ".", "debug", "(", "\"Starting cli.datasource.build_docs\"", ")", "if", "site_name", "is", "not", "None", ":", "site_names", "=", "[", "site_name", "]", "else", ":", "site_names", "=", "None", "index_page_locator_infos", "=", "context", ".", "build_data_docs", "(", "site_names", "=", "site_names", ",", "dry_run", "=", "True", ")", "msg", "=", "\"\\nThe following Data Docs sites will be built:\\n\\n\"", "for", "site_name", ",", "index_page_locator_info", "in", "index_page_locator_infos", ".", "items", "(", ")", ":", "msg", "+=", "\" - <cyan>{}:</cyan> \"", ".", "format", "(", "site_name", ")", "msg", "+=", "\"{}\\n\"", ".", "format", "(", "index_page_locator_info", ")", "cli_message", "(", "msg", ")", "if", "not", "assume_yes", ":", "toolkit", ".", "confirm_proceed_or_exit", "(", ")", "cli_message", "(", "\"\\nBuilding Data Docs...\\n\"", ")", "context", ".", "build_data_docs", "(", "site_names", "=", "site_names", ")", "cli_message", "(", "\"Done building Data Docs\"", ")", "if", "view", ":", "context", ".", "open_data_docs", "(", "site_name", "=", "site_name", ",", "only_if_exists", "=", "True", ")" ]
[ 132, 0 ]
[ 159, 72 ]
python
en
['en', 'en', 'en']
True
byte_info.byte_size
(self)
Size of this declaration/type in bytes Returns: int: Size of this declaration/type in bytes
Size of this declaration/type in bytes
def byte_size(self): """ Size of this declaration/type in bytes Returns: int: Size of this declaration/type in bytes """ return self._byte_size
[ "def", "byte_size", "(", "self", ")", ":", "return", "self", ".", "_byte_size" ]
[ 19, 4 ]
[ 26, 30 ]
python
en
['en', 'error', 'th']
False
byte_info.byte_size
(self, new_byte_size)
Set size of this declaration/type in bytes Args: new_byte_size (int): Size of this declaration/type in bytes
Set size of this declaration/type in bytes
def byte_size(self, new_byte_size): """ Set size of this declaration/type in bytes Args: new_byte_size (int): Size of this declaration/type in bytes """ self._byte_size = new_byte_size
[ "def", "byte_size", "(", "self", ",", "new_byte_size", ")", ":", "self", ".", "_byte_size", "=", "new_byte_size" ]
[ 29, 4 ]
[ 36, 39 ]
python
en
['en', 'error', 'th']
False
byte_info.byte_align
(self)
Alignment of this declaration/type in bytes Returns: int: Alignment of this declaration/type in bytes
Alignment of this declaration/type in bytes
def byte_align(self): """ Alignment of this declaration/type in bytes Returns: int: Alignment of this declaration/type in bytes """ return self._byte_align
[ "def", "byte_align", "(", "self", ")", ":", "return", "self", ".", "_byte_align" ]
[ 39, 4 ]
[ 46, 31 ]
python
en
['en', 'error', 'th']
False
byte_info.byte_align
(self, new_byte_align)
Set size of alignment of this declaration/type in bytes Args: new_byte_align (int): Alignment of this declaration/type in bytes
Set size of alignment of this declaration/type in bytes
def byte_align(self, new_byte_align): """ Set size of alignment of this declaration/type in bytes Args: new_byte_align (int): Alignment of this declaration/type in bytes """ self._byte_align = new_byte_align
[ "def", "byte_align", "(", "self", ",", "new_byte_align", ")", ":", "self", ".", "_byte_align", "=", "new_byte_align" ]
[ 49, 4 ]
[ 56, 41 ]
python
en
['en', 'error', 'th']
False
BatchKwargsGenerator.get_available_data_asset_names
(self)
Return the list of asset names known by this batch kwargs generator. Returns: A list of available names
Return the list of asset names known by this batch kwargs generator.
def get_available_data_asset_names(self): """Return the list of asset names known by this batch kwargs generator. Returns: A list of available names """ raise NotImplementedError
[ "def", "get_available_data_asset_names", "(", "self", ")", ":", "raise", "NotImplementedError" ]
[ 181, 4 ]
[ 187, 33 ]
python
en
['en', 'en', 'en']
True
BatchKwargsGenerator.get_available_partition_ids
(self, generator_asset=None, data_asset_name=None)
Applies the current _partitioner to the batches available on data_asset_name and returns a list of valid partition_id strings that can be used to identify batches of data. Args: data_asset_name: the data asset whose partitions should be returned. Returns: A list of partition_id strings
Applies the current _partitioner to the batches available on data_asset_name and returns a list of valid partition_id strings that can be used to identify batches of data.
def get_available_partition_ids(self, generator_asset=None, data_asset_name=None): """ Applies the current _partitioner to the batches available on data_asset_name and returns a list of valid partition_id strings that can be used to identify batches of data. Args: data_asset_name: the data asset whose partitions should be returned. Returns: A list of partition_id strings """ raise NotImplementedError
[ "def", "get_available_partition_ids", "(", "self", ",", "generator_asset", "=", "None", ",", "data_asset_name", "=", "None", ")", ":", "raise", "NotImplementedError" ]
[ 190, 4 ]
[ 201, 33 ]
python
en
['en', 'error', 'th']
False
BatchKwargsGenerator.build_batch_kwargs
(self, data_asset_name=None, partition_id=None, **kwargs)
The key workhorse. Docs forthcoming.
The key workhorse. Docs forthcoming.
def build_batch_kwargs(self, data_asset_name=None, partition_id=None, **kwargs): if (not kwargs.get("name") and not data_asset_name) or ( kwargs.get("name") and data_asset_name ): raise ValueError("Please provide either name or data_asset_name.") if kwargs.get("name"): warnings.warn( "The 'name' argument will be deprecated and renamed to 'data_asset_name'. " "Please update code accordingly.", DeprecationWarning, ) data_asset_name = kwargs.pop("name") """The key workhorse. Docs forthcoming.""" if data_asset_name is not None: batch_parameters = {"data_asset_name": data_asset_name} else: batch_parameters = dict() if partition_id is not None: batch_parameters["partition_id"] = partition_id batch_parameters.update(kwargs) param_keys = set(batch_parameters.keys()) recognized_params = ( self.recognized_batch_parameters | self._datasource.recognized_batch_parameters ) if not param_keys <= recognized_params: logger.warning( "Unrecognized batch_parameter(s): %s" % str(param_keys - recognized_params) ) batch_kwargs = self._build_batch_kwargs(batch_parameters) batch_kwargs["data_asset_name"] = data_asset_name # Track the datasource *in batch_kwargs* when building from a context so that the context can easily reuse them. batch_kwargs["datasource"] = self._datasource.name return batch_kwargs
[ "def", "build_batch_kwargs", "(", "self", ",", "data_asset_name", "=", "None", ",", "partition_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "(", "not", "kwargs", ".", "get", "(", "\"name\"", ")", "and", "not", "data_asset_name", ")", "or", "(", "kwargs", ".", "get", "(", "\"name\"", ")", "and", "data_asset_name", ")", ":", "raise", "ValueError", "(", "\"Please provide either name or data_asset_name.\"", ")", "if", "kwargs", ".", "get", "(", "\"name\"", ")", ":", "warnings", ".", "warn", "(", "\"The 'name' argument will be deprecated and renamed to 'data_asset_name'. \"", "\"Please update code accordingly.\"", ",", "DeprecationWarning", ",", ")", "data_asset_name", "=", "kwargs", ".", "pop", "(", "\"name\"", ")", "if", "data_asset_name", "is", "not", "None", ":", "batch_parameters", "=", "{", "\"data_asset_name\"", ":", "data_asset_name", "}", "else", ":", "batch_parameters", "=", "dict", "(", ")", "if", "partition_id", "is", "not", "None", ":", "batch_parameters", "[", "\"partition_id\"", "]", "=", "partition_id", "batch_parameters", ".", "update", "(", "kwargs", ")", "param_keys", "=", "set", "(", "batch_parameters", ".", "keys", "(", ")", ")", "recognized_params", "=", "(", "self", ".", "recognized_batch_parameters", "|", "self", ".", "_datasource", ".", "recognized_batch_parameters", ")", "if", "not", "param_keys", "<=", "recognized_params", ":", "logger", ".", "warning", "(", "\"Unrecognized batch_parameter(s): %s\"", "%", "str", "(", "param_keys", "-", "recognized_params", ")", ")", "batch_kwargs", "=", "self", ".", "_build_batch_kwargs", "(", "batch_parameters", ")", "batch_kwargs", "[", "\"data_asset_name\"", "]", "=", "data_asset_name", "# Track the datasource *in batch_kwargs* when building from a context so that the context can easily reuse them.", "batch_kwargs", "[", "\"datasource\"", "]", "=", "self", ".", "_datasource", ".", "name", "return", "batch_kwargs" ]
[ 251, 4 ]
[ 287, 27 ]
python
en
['en', 'en', 'en']
True
MemcachedDiscovery._resync
(self)
Check if the list of available nodes has changed. If any change is detected, a new HashClient pointing to all currently available nodes is returned, otherwise the current client is returned.
Check if the list of available nodes has changed. If any change is detected, a new HashClient pointing to all currently available nodes is returned, otherwise the current client is returned.
def _resync(self): """ Check if the list of available nodes has changed. If any change is detected, a new HashClient pointing to all currently available nodes is returned, otherwise the current client is returned. """ # Collect the all Memcached pods' IP addresses try: _, _, ips = socket.gethostbyname_ex(self.host) except socket.gaierror: # The host could not be found. This mean that either the service is # down or that no pods are running ips = [] if set(ips) != set(self._ips): # A different list of ips has been detected, so we generate # a new client self._ips = ips if self._ips: servers = [(ip, self.port) for ip in self._ips] self._client = HashClient(servers, use_pooling=True) else: self._client = None
[ "def", "_resync", "(", "self", ")", ":", "# Collect the all Memcached pods' IP addresses", "try", ":", "_", ",", "_", ",", "ips", "=", "socket", ".", "gethostbyname_ex", "(", "self", ".", "host", ")", "except", "socket", ".", "gaierror", ":", "# The host could not be found. This mean that either the service is", "# down or that no pods are running", "ips", "=", "[", "]", "if", "set", "(", "ips", ")", "!=", "set", "(", "self", ".", "_ips", ")", ":", "# A different list of ips has been detected, so we generate", "# a new client", "self", ".", "_ips", "=", "ips", "if", "self", ".", "_ips", ":", "servers", "=", "[", "(", "ip", ",", "self", ".", "port", ")", "for", "ip", "in", "self", ".", "_ips", "]", "self", ".", "_client", "=", "HashClient", "(", "servers", ",", "use_pooling", "=", "True", ")", "else", ":", "self", ".", "_client", "=", "None" ]
[ 35, 4 ]
[ 56, 35 ]
python
en
['en', 'error', 'th']
False
Trading.make_trades
(self, companies)
Executes trades for the specified companies based on sentiment.
Executes trades for the specified companies based on sentiment.
def make_trades(self, companies): """Executes trades for the specified companies based on sentiment.""" # Determine whether the markets are open. market_status = self.get_market_status() if not market_status: self.logs.error('Not trading without market status.') return False # Filter for any strategies resulting in trades. actionable_strategies = [] market_status = self.get_market_status() for company in companies: strategy = self.get_strategy(company, market_status) if strategy['action'] != 'hold': actionable_strategies.append(strategy) else: self.logs.warn('Dropping strategy: %s' % strategy) if not actionable_strategies: self.logs.warn('No actionable strategies for trading.') return False # Calculate the budget per strategy. balance = self.get_balance() budget = self.get_budget(balance, len(actionable_strategies)) if not budget: self.logs.warn('No budget for trading: %s %s %s' % (budget, balance, actionable_strategies)) return False self.logs.debug('Using budget: %s x $%s' % (len(actionable_strategies), budget)) # Handle trades for each strategy. success = True for strategy in actionable_strategies: ticker = strategy['ticker'] action = strategy['action'] # Execute the strategy. if action == 'bull': self.logs.info('Bull: %s %s' % (ticker, budget)) success = success and self.bull(ticker, budget) elif action == 'bear': self.logs.info('Bear: %s %s' % (ticker, budget)) success = success and self.bear(ticker, budget) else: self.logs.error('Unknown strategy: %s' % strategy) return success
[ "def", "make_trades", "(", "self", ",", "companies", ")", ":", "# Determine whether the markets are open.", "market_status", "=", "self", ".", "get_market_status", "(", ")", "if", "not", "market_status", ":", "self", ".", "logs", ".", "error", "(", "'Not trading without market status.'", ")", "return", "False", "# Filter for any strategies resulting in trades.", "actionable_strategies", "=", "[", "]", "market_status", "=", "self", ".", "get_market_status", "(", ")", "for", "company", "in", "companies", ":", "strategy", "=", "self", ".", "get_strategy", "(", "company", ",", "market_status", ")", "if", "strategy", "[", "'action'", "]", "!=", "'hold'", ":", "actionable_strategies", ".", "append", "(", "strategy", ")", "else", ":", "self", ".", "logs", ".", "warn", "(", "'Dropping strategy: %s'", "%", "strategy", ")", "if", "not", "actionable_strategies", ":", "self", ".", "logs", ".", "warn", "(", "'No actionable strategies for trading.'", ")", "return", "False", "# Calculate the budget per strategy.", "balance", "=", "self", ".", "get_balance", "(", ")", "budget", "=", "self", ".", "get_budget", "(", "balance", ",", "len", "(", "actionable_strategies", ")", ")", "if", "not", "budget", ":", "self", ".", "logs", ".", "warn", "(", "'No budget for trading: %s %s %s'", "%", "(", "budget", ",", "balance", ",", "actionable_strategies", ")", ")", "return", "False", "self", ".", "logs", ".", "debug", "(", "'Using budget: %s x $%s'", "%", "(", "len", "(", "actionable_strategies", ")", ",", "budget", ")", ")", "# Handle trades for each strategy.", "success", "=", "True", "for", "strategy", "in", "actionable_strategies", ":", "ticker", "=", "strategy", "[", "'ticker'", "]", "action", "=", "strategy", "[", "'action'", "]", "# Execute the strategy.", "if", "action", "==", "'bull'", ":", "self", ".", "logs", ".", "info", "(", "'Bull: %s %s'", "%", "(", "ticker", ",", "budget", ")", ")", "success", "=", "success", "and", "self", ".", "bull", "(", "ticker", ",", "budget", ")", "elif", "action", "==", "'bear'", ":", "self", ".", "logs", ".", "info", "(", "'Bear: %s %s'", "%", "(", "ticker", ",", "budget", ")", ")", "success", "=", "success", "and", "self", ".", "bear", "(", "ticker", ",", "budget", ")", "else", ":", "self", ".", "logs", ".", "error", "(", "'Unknown strategy: %s'", "%", "strategy", ")", "return", "success" ]
[ 67, 4 ]
[ 118, 22 ]
python
en
['en', 'en', 'en']
True
Trading.get_strategy
(self, company, market_status)
Determines the strategy for trading a company based on sentiment and market status.
Determines the strategy for trading a company based on sentiment and market status.
def get_strategy(self, company, market_status): """Determines the strategy for trading a company based on sentiment and market status. """ ticker = company['ticker'] sentiment = company['sentiment'] strategy = {} strategy['name'] = company['name'] if 'root' in company: strategy['root'] = company['root'] strategy['sentiment'] = company['sentiment'] strategy['ticker'] = ticker strategy['exchange'] = company['exchange'] # Don't do anything with blacklisted stocks. if ticker in TICKER_BLACKLIST: strategy['action'] = 'hold' strategy['reason'] = 'blacklist' return strategy # TODO: Figure out some strategy for the markets closed case. # Don't trade unless the markets are open or are about to open. if market_status != 'open' and market_status != 'pre': strategy['action'] = 'hold' strategy['reason'] = 'market closed' return strategy # Can't trade without sentiment. if sentiment == 0: strategy['action'] = 'hold' strategy['reason'] = 'neutral sentiment' return strategy # Determine bull or bear based on sentiment direction. if sentiment > 0: strategy['action'] = 'bull' strategy['reason'] = 'positive sentiment' return strategy else: # sentiment < 0 strategy['action'] = 'bear' strategy['reason'] = 'negative sentiment' return strategy
[ "def", "get_strategy", "(", "self", ",", "company", ",", "market_status", ")", ":", "ticker", "=", "company", "[", "'ticker'", "]", "sentiment", "=", "company", "[", "'sentiment'", "]", "strategy", "=", "{", "}", "strategy", "[", "'name'", "]", "=", "company", "[", "'name'", "]", "if", "'root'", "in", "company", ":", "strategy", "[", "'root'", "]", "=", "company", "[", "'root'", "]", "strategy", "[", "'sentiment'", "]", "=", "company", "[", "'sentiment'", "]", "strategy", "[", "'ticker'", "]", "=", "ticker", "strategy", "[", "'exchange'", "]", "=", "company", "[", "'exchange'", "]", "# Don't do anything with blacklisted stocks.", "if", "ticker", "in", "TICKER_BLACKLIST", ":", "strategy", "[", "'action'", "]", "=", "'hold'", "strategy", "[", "'reason'", "]", "=", "'blacklist'", "return", "strategy", "# TODO: Figure out some strategy for the markets closed case.", "# Don't trade unless the markets are open or are about to open.", "if", "market_status", "!=", "'open'", "and", "market_status", "!=", "'pre'", ":", "strategy", "[", "'action'", "]", "=", "'hold'", "strategy", "[", "'reason'", "]", "=", "'market closed'", "return", "strategy", "# Can't trade without sentiment.", "if", "sentiment", "==", "0", ":", "strategy", "[", "'action'", "]", "=", "'hold'", "strategy", "[", "'reason'", "]", "=", "'neutral sentiment'", "return", "strategy", "# Determine bull or bear based on sentiment direction.", "if", "sentiment", ">", "0", ":", "strategy", "[", "'action'", "]", "=", "'bull'", "strategy", "[", "'reason'", "]", "=", "'positive sentiment'", "return", "strategy", "else", ":", "# sentiment < 0", "strategy", "[", "'action'", "]", "=", "'bear'", "strategy", "[", "'reason'", "]", "=", "'negative sentiment'", "return", "strategy" ]
[ 120, 4 ]
[ 163, 27 ]
python
en
['en', 'en', 'en']
True
Trading.get_budget
(self, balance, num_strategies)
Calculates the budget per company based on the available balance.
Calculates the budget per company based on the available balance.
def get_budget(self, balance, num_strategies): """Calculates the budget per company based on the available balance.""" if num_strategies <= 0: self.logs.warn('No budget without strategies.') return 0.0 return round(max(0.0, balance - CASH_HOLD) / num_strategies, 2)
[ "def", "get_budget", "(", "self", ",", "balance", ",", "num_strategies", ")", ":", "if", "num_strategies", "<=", "0", ":", "self", ".", "logs", ".", "warn", "(", "'No budget without strategies.'", ")", "return", "0.0", "return", "round", "(", "max", "(", "0.0", ",", "balance", "-", "CASH_HOLD", ")", "/", "num_strategies", ",", "2", ")" ]
[ 165, 4 ]
[ 171, 71 ]
python
en
['en', 'en', 'en']
True
Trading.get_market_status
(self)
Finds out whether the markets are open right now.
Finds out whether the markets are open right now.
def get_market_status(self): """Finds out whether the markets are open right now.""" clock_url = TRADEKING_API_URL % 'market/clock' response = self.make_request(url=clock_url) if not response: self.logs.error('No clock response.') return None try: clock_response = response['response'] current = clock_response['status']['current'] except KeyError: self.logs.error('Malformed clock response: %s' % response) return None if current not in ['pre', 'open', 'after', 'close']: self.logs.error('Unknown market status: %s' % current) return None self.logs.debug('Current market status: %s' % current) return current
[ "def", "get_market_status", "(", "self", ")", ":", "clock_url", "=", "TRADEKING_API_URL", "%", "'market/clock'", "response", "=", "self", ".", "make_request", "(", "url", "=", "clock_url", ")", "if", "not", "response", ":", "self", ".", "logs", ".", "error", "(", "'No clock response.'", ")", "return", "None", "try", ":", "clock_response", "=", "response", "[", "'response'", "]", "current", "=", "clock_response", "[", "'status'", "]", "[", "'current'", "]", "except", "KeyError", ":", "self", ".", "logs", ".", "error", "(", "'Malformed clock response: %s'", "%", "response", ")", "return", "None", "if", "current", "not", "in", "[", "'pre'", ",", "'open'", ",", "'after'", ",", "'close'", "]", ":", "self", ".", "logs", ".", "error", "(", "'Unknown market status: %s'", "%", "current", ")", "return", "None", "self", ".", "logs", ".", "debug", "(", "'Current market status: %s'", "%", "current", ")", "return", "current" ]
[ 173, 4 ]
[ 195, 22 ]
python
en
['en', 'en', 'en']
True
Trading.get_historical_prices
(self, ticker, timestamp)
Finds the last price at or before a timestamp and at EOD.
Finds the last price at or before a timestamp and at EOD.
def get_historical_prices(self, ticker, timestamp): """Finds the last price at or before a timestamp and at EOD.""" # Start with today's quotes. quotes = self.get_day_quotes(ticker, timestamp) if not quotes: self.logs.warn('No quotes for day: %s' % timestamp) return None # Depending on where we land relative to the trading day, pick the # right quote and EOD quote. first_quote = quotes[0] first_quote_time = first_quote['time'] last_quote = quotes[-1] last_quote_time = last_quote['time'] if timestamp < first_quote_time: self.logs.debug('Using previous quote.') previous_day = self.get_previous_day(timestamp) previous_quotes = self.get_day_quotes(ticker, previous_day) if not previous_quotes: self.logs.error('No quotes for previous day: %s' % previous_day) return None quote_at = previous_quotes[-1] quote_eod = last_quote elif timestamp >= first_quote_time and timestamp <= last_quote_time: self.logs.debug('Using closest quote.') # Walk through the quotes until we stepped over the timestamp. previous_quote = first_quote for quote in quotes: quote_time = quote['time'] if quote_time > timestamp: break previous_quote = quote quote_at = previous_quote quote_eod = last_quote else: # timestamp > last_quote_time self.logs.debug('Using last quote.') quote_at = last_quote next_day = self.get_next_day(timestamp) next_quotes = self.get_day_quotes(ticker, next_day) if not next_quotes: self.logs.error('No quotes for next day: %s' % next_day) return None quote_eod = next_quotes[-1] self.logs.debug('Using quotes: %s %s' % (quote_at, quote_eod)) return {'at': quote_at['price'], 'eod': quote_eod['price']}
[ "def", "get_historical_prices", "(", "self", ",", "ticker", ",", "timestamp", ")", ":", "# Start with today's quotes.", "quotes", "=", "self", ".", "get_day_quotes", "(", "ticker", ",", "timestamp", ")", "if", "not", "quotes", ":", "self", ".", "logs", ".", "warn", "(", "'No quotes for day: %s'", "%", "timestamp", ")", "return", "None", "# Depending on where we land relative to the trading day, pick the", "# right quote and EOD quote.", "first_quote", "=", "quotes", "[", "0", "]", "first_quote_time", "=", "first_quote", "[", "'time'", "]", "last_quote", "=", "quotes", "[", "-", "1", "]", "last_quote_time", "=", "last_quote", "[", "'time'", "]", "if", "timestamp", "<", "first_quote_time", ":", "self", ".", "logs", ".", "debug", "(", "'Using previous quote.'", ")", "previous_day", "=", "self", ".", "get_previous_day", "(", "timestamp", ")", "previous_quotes", "=", "self", ".", "get_day_quotes", "(", "ticker", ",", "previous_day", ")", "if", "not", "previous_quotes", ":", "self", ".", "logs", ".", "error", "(", "'No quotes for previous day: %s'", "%", "previous_day", ")", "return", "None", "quote_at", "=", "previous_quotes", "[", "-", "1", "]", "quote_eod", "=", "last_quote", "elif", "timestamp", ">=", "first_quote_time", "and", "timestamp", "<=", "last_quote_time", ":", "self", ".", "logs", ".", "debug", "(", "'Using closest quote.'", ")", "# Walk through the quotes until we stepped over the timestamp.", "previous_quote", "=", "first_quote", "for", "quote", "in", "quotes", ":", "quote_time", "=", "quote", "[", "'time'", "]", "if", "quote_time", ">", "timestamp", ":", "break", "previous_quote", "=", "quote", "quote_at", "=", "previous_quote", "quote_eod", "=", "last_quote", "else", ":", "# timestamp > last_quote_time", "self", ".", "logs", ".", "debug", "(", "'Using last quote.'", ")", "quote_at", "=", "last_quote", "next_day", "=", "self", ".", "get_next_day", "(", "timestamp", ")", "next_quotes", "=", "self", ".", "get_day_quotes", "(", "ticker", ",", "next_day", ")", "if", "not", "next_quotes", ":", "self", ".", "logs", ".", "error", "(", "'No quotes for next day: %s'", "%", "next_day", ")", "return", "None", "quote_eod", "=", "next_quotes", "[", "-", "1", "]", "self", ".", "logs", ".", "debug", "(", "'Using quotes: %s %s'", "%", "(", "quote_at", ",", "quote_eod", ")", ")", "return", "{", "'at'", ":", "quote_at", "[", "'price'", "]", ",", "'eod'", ":", "quote_eod", "[", "'price'", "]", "}" ]
[ 197, 4 ]
[ 244, 67 ]
python
en
['en', 'en', 'en']
True
Trading.get_day_quotes
(self, ticker, timestamp)
Collects all quotes from the day of the market timestamp.
Collects all quotes from the day of the market timestamp.
def get_day_quotes(self, ticker, timestamp): """Collects all quotes from the day of the market timestamp.""" polygon_client = PolygonClient(POLYGON_API_KEY) quotes = [] # The timestamp is expected in market time. day_str = timestamp.strftime('%Y-%m-%d') try: response = polygon_client.stocks_equities_aggregates( ticker, 1, 'minute', day_str, day_str) results = response.results except AttributeError as e: self.logs.error( 'Failed to request historical data for %s on %s: %s' % ( ticker, timestamp, e)) return None for result in results: try: # Parse and convert the current minute's timestamp. minute_timestamp = result['t'] / 1000 minute_market_time = self.utc_to_market_time( datetime.fromtimestamp(minute_timestamp)) # Use the price at the beginning of the minute. price = result['o'] if not price or price < 0: self.logs.warn('Invalid price: %s' % price) continue quote = {'time': minute_market_time, 'price': price} quotes.append(quote) except (KeyError, TypeError, ValueError) as e: self.logs.warn('Failed to parse result: %s' % e) return quotes
[ "def", "get_day_quotes", "(", "self", ",", "ticker", ",", "timestamp", ")", ":", "polygon_client", "=", "PolygonClient", "(", "POLYGON_API_KEY", ")", "quotes", "=", "[", "]", "# The timestamp is expected in market time.", "day_str", "=", "timestamp", ".", "strftime", "(", "'%Y-%m-%d'", ")", "try", ":", "response", "=", "polygon_client", ".", "stocks_equities_aggregates", "(", "ticker", ",", "1", ",", "'minute'", ",", "day_str", ",", "day_str", ")", "results", "=", "response", ".", "results", "except", "AttributeError", "as", "e", ":", "self", ".", "logs", ".", "error", "(", "'Failed to request historical data for %s on %s: %s'", "%", "(", "ticker", ",", "timestamp", ",", "e", ")", ")", "return", "None", "for", "result", "in", "results", ":", "try", ":", "# Parse and convert the current minute's timestamp.", "minute_timestamp", "=", "result", "[", "'t'", "]", "/", "1000", "minute_market_time", "=", "self", ".", "utc_to_market_time", "(", "datetime", ".", "fromtimestamp", "(", "minute_timestamp", ")", ")", "# Use the price at the beginning of the minute.", "price", "=", "result", "[", "'o'", "]", "if", "not", "price", "or", "price", "<", "0", ":", "self", ".", "logs", ".", "warn", "(", "'Invalid price: %s'", "%", "price", ")", "continue", "quote", "=", "{", "'time'", ":", "minute_market_time", ",", "'price'", ":", "price", "}", "quotes", ".", "append", "(", "quote", ")", "except", "(", "KeyError", ",", "TypeError", ",", "ValueError", ")", "as", "e", ":", "self", ".", "logs", ".", "warn", "(", "'Failed to parse result: %s'", "%", "e", ")", "return", "quotes" ]
[ 247, 4 ]
[ 283, 21 ]
python
en
['en', 'en', 'en']
True
Trading.is_trading_day
(self, timestamp)
Tests whether markets are open on a given day.
Tests whether markets are open on a given day.
def is_trading_day(self, timestamp): """Tests whether markets are open on a given day.""" # Markets are closed on holidays. if timestamp in UnitedStates(): self.logs.debug('Identified holiday: %s' % timestamp) return False # Markets are closed on weekends. if timestamp.weekday() in [5, 6]: self.logs.debug('Identified weekend: %s' % timestamp) return False # Otherwise markets are open. return True
[ "def", "is_trading_day", "(", "self", ",", "timestamp", ")", ":", "# Markets are closed on holidays.", "if", "timestamp", "in", "UnitedStates", "(", ")", ":", "self", ".", "logs", ".", "debug", "(", "'Identified holiday: %s'", "%", "timestamp", ")", "return", "False", "# Markets are closed on weekends.", "if", "timestamp", ".", "weekday", "(", ")", "in", "[", "5", ",", "6", "]", ":", "self", ".", "logs", ".", "debug", "(", "'Identified weekend: %s'", "%", "timestamp", ")", "return", "False", "# Otherwise markets are open.", "return", "True" ]
[ 285, 4 ]
[ 299, 19 ]
python
en
['en', 'en', 'en']
True
Trading.get_previous_day
(self, timestamp)
Finds the previous trading day.
Finds the previous trading day.
def get_previous_day(self, timestamp): """Finds the previous trading day.""" previous_day = timestamp - timedelta(days=1) # Walk backwards until we hit a trading day. while not self.is_trading_day(previous_day): previous_day -= timedelta(days=1) self.logs.debug('Previous trading day for %s: %s' % (timestamp, previous_day)) return previous_day
[ "def", "get_previous_day", "(", "self", ",", "timestamp", ")", ":", "previous_day", "=", "timestamp", "-", "timedelta", "(", "days", "=", "1", ")", "# Walk backwards until we hit a trading day.", "while", "not", "self", ".", "is_trading_day", "(", "previous_day", ")", ":", "previous_day", "-=", "timedelta", "(", "days", "=", "1", ")", "self", ".", "logs", ".", "debug", "(", "'Previous trading day for %s: %s'", "%", "(", "timestamp", ",", "previous_day", ")", ")", "return", "previous_day" ]
[ 301, 4 ]
[ 312, 27 ]
python
en
['en', 'en', 'en']
True
Trading.get_next_day
(self, timestamp)
Finds the next trading day.
Finds the next trading day.
def get_next_day(self, timestamp): """Finds the next trading day.""" next_day = timestamp + timedelta(days=1) # Walk forward until we hit a trading day. while not self.is_trading_day(next_day): next_day += timedelta(days=1) self.logs.debug('Next trading day for %s: %s' % (timestamp, next_day)) return next_day
[ "def", "get_next_day", "(", "self", ",", "timestamp", ")", ":", "next_day", "=", "timestamp", "+", "timedelta", "(", "days", "=", "1", ")", "# Walk forward until we hit a trading day.", "while", "not", "self", ".", "is_trading_day", "(", "next_day", ")", ":", "next_day", "+=", "timedelta", "(", "days", "=", "1", ")", "self", ".", "logs", ".", "debug", "(", "'Next trading day for %s: %s'", "%", "(", "timestamp", ",", "next_day", ")", ")", "return", "next_day" ]
[ 314, 4 ]
[ 325, 23 ]
python
en
['en', 'en', 'en']
True