id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
3,000 | UDST/urbansim | urbansim/models/regression.py | RegressionModel.predict | def predict(self, data):
"""
Predict a new data set based on an estimated model.
Parameters
----------
data : pandas.DataFrame
Data to use for prediction. Must contain all the columns
referenced by the right-hand side of the `model_expression`.
Returns
-------
result : pandas.Series
Predicted values as a pandas Series. Will have the index of `data`
after applying filters.
"""
self.assert_fitted()
with log_start_finish('predicting model {}'.format(self.name), logger):
return predict(
data, self.predict_filters, self.model_fit, self.ytransform) | python | def predict(self, data):
"""
Predict a new data set based on an estimated model.
Parameters
----------
data : pandas.DataFrame
Data to use for prediction. Must contain all the columns
referenced by the right-hand side of the `model_expression`.
Returns
-------
result : pandas.Series
Predicted values as a pandas Series. Will have the index of `data`
after applying filters.
"""
self.assert_fitted()
with log_start_finish('predicting model {}'.format(self.name), logger):
return predict(
data, self.predict_filters, self.model_fit, self.ytransform) | [
"def",
"predict",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"assert_fitted",
"(",
")",
"with",
"log_start_finish",
"(",
"'predicting model {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
",",
"logger",
")",
":",
"return",
"predict",
"(",
"data",
",",
"self",
".",
"predict_filters",
",",
"self",
".",
"model_fit",
",",
"self",
".",
"ytransform",
")"
] | Predict a new data set based on an estimated model.
Parameters
----------
data : pandas.DataFrame
Data to use for prediction. Must contain all the columns
referenced by the right-hand side of the `model_expression`.
Returns
-------
result : pandas.Series
Predicted values as a pandas Series. Will have the index of `data`
after applying filters. | [
"Predict",
"a",
"new",
"data",
"set",
"based",
"on",
"an",
"estimated",
"model",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L390-L410 |
3,001 | UDST/urbansim | urbansim/models/regression.py | RegressionModel.to_dict | def to_dict(self):
"""
Returns a dictionary representation of a RegressionModel instance.
"""
d = {
'model_type': 'regression',
'name': self.name,
'fit_filters': self.fit_filters,
'predict_filters': self.predict_filters,
'model_expression': self.model_expression,
'ytransform': YTRANSFORM_MAPPING[self.ytransform],
'fitted': self.fitted,
'fit_parameters': None,
'fit_rsquared': None,
'fit_rsquared_adj': None
}
if self.fitted:
d['fit_parameters'] = yamlio.frame_to_yaml_safe(
self.fit_parameters)
d['fit_rsquared'] = float(self.model_fit.rsquared)
d['fit_rsquared_adj'] = float(self.model_fit.rsquared_adj)
return d | python | def to_dict(self):
"""
Returns a dictionary representation of a RegressionModel instance.
"""
d = {
'model_type': 'regression',
'name': self.name,
'fit_filters': self.fit_filters,
'predict_filters': self.predict_filters,
'model_expression': self.model_expression,
'ytransform': YTRANSFORM_MAPPING[self.ytransform],
'fitted': self.fitted,
'fit_parameters': None,
'fit_rsquared': None,
'fit_rsquared_adj': None
}
if self.fitted:
d['fit_parameters'] = yamlio.frame_to_yaml_safe(
self.fit_parameters)
d['fit_rsquared'] = float(self.model_fit.rsquared)
d['fit_rsquared_adj'] = float(self.model_fit.rsquared_adj)
return d | [
"def",
"to_dict",
"(",
"self",
")",
":",
"d",
"=",
"{",
"'model_type'",
":",
"'regression'",
",",
"'name'",
":",
"self",
".",
"name",
",",
"'fit_filters'",
":",
"self",
".",
"fit_filters",
",",
"'predict_filters'",
":",
"self",
".",
"predict_filters",
",",
"'model_expression'",
":",
"self",
".",
"model_expression",
",",
"'ytransform'",
":",
"YTRANSFORM_MAPPING",
"[",
"self",
".",
"ytransform",
"]",
",",
"'fitted'",
":",
"self",
".",
"fitted",
",",
"'fit_parameters'",
":",
"None",
",",
"'fit_rsquared'",
":",
"None",
",",
"'fit_rsquared_adj'",
":",
"None",
"}",
"if",
"self",
".",
"fitted",
":",
"d",
"[",
"'fit_parameters'",
"]",
"=",
"yamlio",
".",
"frame_to_yaml_safe",
"(",
"self",
".",
"fit_parameters",
")",
"d",
"[",
"'fit_rsquared'",
"]",
"=",
"float",
"(",
"self",
".",
"model_fit",
".",
"rsquared",
")",
"d",
"[",
"'fit_rsquared_adj'",
"]",
"=",
"float",
"(",
"self",
".",
"model_fit",
".",
"rsquared_adj",
")",
"return",
"d"
] | Returns a dictionary representation of a RegressionModel instance. | [
"Returns",
"a",
"dictionary",
"representation",
"of",
"a",
"RegressionModel",
"instance",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L412-L436 |
3,002 | UDST/urbansim | urbansim/models/regression.py | RegressionModel.columns_used | def columns_used(self):
"""
Returns all the columns used in this model for filtering
and in the model expression.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
util.columns_in_formula(self.model_expression)))) | python | def columns_used(self):
"""
Returns all the columns used in this model for filtering
and in the model expression.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
util.columns_in_formula(self.model_expression)))) | [
"def",
"columns_used",
"(",
"self",
")",
":",
"return",
"list",
"(",
"tz",
".",
"unique",
"(",
"tz",
".",
"concatv",
"(",
"util",
".",
"columns_in_filters",
"(",
"self",
".",
"fit_filters",
")",
",",
"util",
".",
"columns_in_filters",
"(",
"self",
".",
"predict_filters",
")",
",",
"util",
".",
"columns_in_formula",
"(",
"self",
".",
"model_expression",
")",
")",
")",
")"
] | Returns all the columns used in this model for filtering
and in the model expression. | [
"Returns",
"all",
"the",
"columns",
"used",
"in",
"this",
"model",
"for",
"filtering",
"and",
"in",
"the",
"model",
"expression",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L460-L469 |
3,003 | UDST/urbansim | urbansim/models/regression.py | RegressionModelGroup.add_model | def add_model(self, model):
"""
Add a `RegressionModel` instance.
Parameters
----------
model : `RegressionModel`
Should have a ``.name`` attribute matching one of
the groupby segments.
"""
logger.debug(
'adding model {} to group {}'.format(model.name, self.name))
self.models[model.name] = model | python | def add_model(self, model):
"""
Add a `RegressionModel` instance.
Parameters
----------
model : `RegressionModel`
Should have a ``.name`` attribute matching one of
the groupby segments.
"""
logger.debug(
'adding model {} to group {}'.format(model.name, self.name))
self.models[model.name] = model | [
"def",
"add_model",
"(",
"self",
",",
"model",
")",
":",
"logger",
".",
"debug",
"(",
"'adding model {} to group {}'",
".",
"format",
"(",
"model",
".",
"name",
",",
"self",
".",
"name",
")",
")",
"self",
".",
"models",
"[",
"model",
".",
"name",
"]",
"=",
"model"
] | Add a `RegressionModel` instance.
Parameters
----------
model : `RegressionModel`
Should have a ``.name`` attribute matching one of
the groupby segments. | [
"Add",
"a",
"RegressionModel",
"instance",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L546-L559 |
3,004 | UDST/urbansim | urbansim/models/regression.py | RegressionModelGroup.add_model_from_params | def add_model_from_params(self, name, fit_filters, predict_filters,
model_expression, ytransform=None):
"""
Add a model by passing arguments through to `RegressionModel`.
Parameters
----------
name : any
Must match a groupby segment name.
fit_filters : list of str
Filters applied before fitting the model.
predict_filters : list of str
Filters applied before calculating new data points.
model_expression : str
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied.
"""
logger.debug(
'adding model {} to group {}'.format(name, self.name))
model = RegressionModel(
fit_filters, predict_filters, model_expression, ytransform, name)
self.models[name] = model | python | def add_model_from_params(self, name, fit_filters, predict_filters,
model_expression, ytransform=None):
"""
Add a model by passing arguments through to `RegressionModel`.
Parameters
----------
name : any
Must match a groupby segment name.
fit_filters : list of str
Filters applied before fitting the model.
predict_filters : list of str
Filters applied before calculating new data points.
model_expression : str
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied.
"""
logger.debug(
'adding model {} to group {}'.format(name, self.name))
model = RegressionModel(
fit_filters, predict_filters, model_expression, ytransform, name)
self.models[name] = model | [
"def",
"add_model_from_params",
"(",
"self",
",",
"name",
",",
"fit_filters",
",",
"predict_filters",
",",
"model_expression",
",",
"ytransform",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'adding model {} to group {}'",
".",
"format",
"(",
"name",
",",
"self",
".",
"name",
")",
")",
"model",
"=",
"RegressionModel",
"(",
"fit_filters",
",",
"predict_filters",
",",
"model_expression",
",",
"ytransform",
",",
"name",
")",
"self",
".",
"models",
"[",
"name",
"]",
"=",
"model"
] | Add a model by passing arguments through to `RegressionModel`.
Parameters
----------
name : any
Must match a groupby segment name.
fit_filters : list of str
Filters applied before fitting the model.
predict_filters : list of str
Filters applied before calculating new data points.
model_expression : str
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied. | [
"Add",
"a",
"model",
"by",
"passing",
"arguments",
"through",
"to",
"RegressionModel",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L561-L590 |
3,005 | UDST/urbansim | urbansim/models/regression.py | RegressionModelGroup.fit | def fit(self, data, debug=False):
"""
Fit each of the models in the group.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true (default false) will pass the debug parameter
to model estimation.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names.
"""
with log_start_finish(
'fitting models in group {}'.format(self.name), logger):
return {name: self.models[name].fit(df, debug=debug)
for name, df in self._iter_groups(data)} | python | def fit(self, data, debug=False):
"""
Fit each of the models in the group.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true (default false) will pass the debug parameter
to model estimation.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names.
"""
with log_start_finish(
'fitting models in group {}'.format(self.name), logger):
return {name: self.models[name].fit(df, debug=debug)
for name, df in self._iter_groups(data)} | [
"def",
"fit",
"(",
"self",
",",
"data",
",",
"debug",
"=",
"False",
")",
":",
"with",
"log_start_finish",
"(",
"'fitting models in group {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
",",
"logger",
")",
":",
"return",
"{",
"name",
":",
"self",
".",
"models",
"[",
"name",
"]",
".",
"fit",
"(",
"df",
",",
"debug",
"=",
"debug",
")",
"for",
"name",
",",
"df",
"in",
"self",
".",
"_iter_groups",
"(",
"data",
")",
"}"
] | Fit each of the models in the group.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true (default false) will pass the debug parameter
to model estimation.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names. | [
"Fit",
"each",
"of",
"the",
"models",
"in",
"the",
"group",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L612-L633 |
3,006 | UDST/urbansim | urbansim/models/regression.py | SegmentedRegressionModel.from_yaml | def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a SegmentedRegressionModel instance from a saved YAML
configuration. Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
SegmentedRegressionModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
default_model_expr = cfg['default_config']['model_expression']
default_ytransform = cfg['default_config']['ytransform']
seg = cls(
cfg['segmentation_col'], cfg['fit_filters'],
cfg['predict_filters'], default_model_expr,
YTRANSFORM_MAPPING[default_ytransform], cfg['min_segment_size'],
cfg['name'])
if "models" not in cfg:
cfg["models"] = {}
for name, m in cfg['models'].items():
m['model_expression'] = m.get(
'model_expression', default_model_expr)
m['ytransform'] = m.get('ytransform', default_ytransform)
m['fit_filters'] = None
m['predict_filters'] = None
reg = RegressionModel.from_yaml(yamlio.convert_to_yaml(m, None))
seg._group.add_model(reg)
logger.debug(
'loaded segmented regression model {} from yaml'.format(seg.name))
return seg | python | def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a SegmentedRegressionModel instance from a saved YAML
configuration. Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
SegmentedRegressionModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
default_model_expr = cfg['default_config']['model_expression']
default_ytransform = cfg['default_config']['ytransform']
seg = cls(
cfg['segmentation_col'], cfg['fit_filters'],
cfg['predict_filters'], default_model_expr,
YTRANSFORM_MAPPING[default_ytransform], cfg['min_segment_size'],
cfg['name'])
if "models" not in cfg:
cfg["models"] = {}
for name, m in cfg['models'].items():
m['model_expression'] = m.get(
'model_expression', default_model_expr)
m['ytransform'] = m.get('ytransform', default_ytransform)
m['fit_filters'] = None
m['predict_filters'] = None
reg = RegressionModel.from_yaml(yamlio.convert_to_yaml(m, None))
seg._group.add_model(reg)
logger.debug(
'loaded segmented regression model {} from yaml'.format(seg.name))
return seg | [
"def",
"from_yaml",
"(",
"cls",
",",
"yaml_str",
"=",
"None",
",",
"str_or_buffer",
"=",
"None",
")",
":",
"cfg",
"=",
"yamlio",
".",
"yaml_to_dict",
"(",
"yaml_str",
",",
"str_or_buffer",
")",
"default_model_expr",
"=",
"cfg",
"[",
"'default_config'",
"]",
"[",
"'model_expression'",
"]",
"default_ytransform",
"=",
"cfg",
"[",
"'default_config'",
"]",
"[",
"'ytransform'",
"]",
"seg",
"=",
"cls",
"(",
"cfg",
"[",
"'segmentation_col'",
"]",
",",
"cfg",
"[",
"'fit_filters'",
"]",
",",
"cfg",
"[",
"'predict_filters'",
"]",
",",
"default_model_expr",
",",
"YTRANSFORM_MAPPING",
"[",
"default_ytransform",
"]",
",",
"cfg",
"[",
"'min_segment_size'",
"]",
",",
"cfg",
"[",
"'name'",
"]",
")",
"if",
"\"models\"",
"not",
"in",
"cfg",
":",
"cfg",
"[",
"\"models\"",
"]",
"=",
"{",
"}",
"for",
"name",
",",
"m",
"in",
"cfg",
"[",
"'models'",
"]",
".",
"items",
"(",
")",
":",
"m",
"[",
"'model_expression'",
"]",
"=",
"m",
".",
"get",
"(",
"'model_expression'",
",",
"default_model_expr",
")",
"m",
"[",
"'ytransform'",
"]",
"=",
"m",
".",
"get",
"(",
"'ytransform'",
",",
"default_ytransform",
")",
"m",
"[",
"'fit_filters'",
"]",
"=",
"None",
"m",
"[",
"'predict_filters'",
"]",
"=",
"None",
"reg",
"=",
"RegressionModel",
".",
"from_yaml",
"(",
"yamlio",
".",
"convert_to_yaml",
"(",
"m",
",",
"None",
")",
")",
"seg",
".",
"_group",
".",
"add_model",
"(",
"reg",
")",
"logger",
".",
"debug",
"(",
"'loaded segmented regression model {} from yaml'",
".",
"format",
"(",
"seg",
".",
"name",
")",
")",
"return",
"seg"
] | Create a SegmentedRegressionModel instance from a saved YAML
configuration. Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
SegmentedRegressionModel | [
"Create",
"a",
"SegmentedRegressionModel",
"instance",
"from",
"a",
"saved",
"YAML",
"configuration",
".",
"Arguments",
"are",
"mutally",
"exclusive",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L726-L768 |
3,007 | UDST/urbansim | urbansim/models/regression.py | SegmentedRegressionModel.add_segment | def add_segment(self, name, model_expression=None, ytransform='default'):
"""
Add a new segment with its own model expression and ytransform.
Parameters
----------
name :
Segment name. Must match a segment in the groupby of the data.
model_expression : str or dict, optional
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
If not given the default model will be used, which must not be
None.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
If not given the default ytransform will be used.
"""
if not model_expression:
if self.default_model_expr is None:
raise ValueError(
'No default model available, '
'you must supply a model experssion.')
model_expression = self.default_model_expr
if ytransform == 'default':
ytransform = self.default_ytransform
# no fit or predict filters, we'll take care of that this side.
self._group.add_model_from_params(
name, None, None, model_expression, ytransform)
logger.debug('added segment {} to model {}'.format(name, self.name)) | python | def add_segment(self, name, model_expression=None, ytransform='default'):
"""
Add a new segment with its own model expression and ytransform.
Parameters
----------
name :
Segment name. Must match a segment in the groupby of the data.
model_expression : str or dict, optional
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
If not given the default model will be used, which must not be
None.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
If not given the default ytransform will be used.
"""
if not model_expression:
if self.default_model_expr is None:
raise ValueError(
'No default model available, '
'you must supply a model experssion.')
model_expression = self.default_model_expr
if ytransform == 'default':
ytransform = self.default_ytransform
# no fit or predict filters, we'll take care of that this side.
self._group.add_model_from_params(
name, None, None, model_expression, ytransform)
logger.debug('added segment {} to model {}'.format(name, self.name)) | [
"def",
"add_segment",
"(",
"self",
",",
"name",
",",
"model_expression",
"=",
"None",
",",
"ytransform",
"=",
"'default'",
")",
":",
"if",
"not",
"model_expression",
":",
"if",
"self",
".",
"default_model_expr",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'No default model available, '",
"'you must supply a model experssion.'",
")",
"model_expression",
"=",
"self",
".",
"default_model_expr",
"if",
"ytransform",
"==",
"'default'",
":",
"ytransform",
"=",
"self",
".",
"default_ytransform",
"# no fit or predict filters, we'll take care of that this side.",
"self",
".",
"_group",
".",
"add_model_from_params",
"(",
"name",
",",
"None",
",",
"None",
",",
"model_expression",
",",
"ytransform",
")",
"logger",
".",
"debug",
"(",
"'added segment {} to model {}'",
".",
"format",
"(",
"name",
",",
"self",
".",
"name",
")",
")"
] | Add a new segment with its own model expression and ytransform.
Parameters
----------
name :
Segment name. Must match a segment in the groupby of the data.
model_expression : str or dict, optional
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
If not given the default model will be used, which must not be
None.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
If not given the default ytransform will be used. | [
"Add",
"a",
"new",
"segment",
"with",
"its",
"own",
"model",
"expression",
"and",
"ytransform",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L770-L806 |
3,008 | UDST/urbansim | urbansim/models/regression.py | SegmentedRegressionModel.fit | def fit(self, data, debug=False):
"""
Fit each segment. Segments that have not already been explicitly
added will be automatically added with default model and ytransform.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true will pass debug to the fit method of each model.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names.
"""
data = util.apply_filter_query(data, self.fit_filters)
unique = data[self.segmentation_col].unique()
value_counts = data[self.segmentation_col].value_counts()
# Remove any existing segments that may no longer have counterparts
# in the data. This can happen when loading a saved model and then
# calling this method with data that no longer has segments that
# were there the last time this was called.
gone = set(self._group.models) - set(unique)
for g in gone:
del self._group.models[g]
for x in unique:
if x not in self._group.models and \
value_counts[x] > self.min_segment_size:
self.add_segment(x)
with log_start_finish(
'fitting models in segmented model {}'.format(self.name),
logger):
return self._group.fit(data, debug=debug) | python | def fit(self, data, debug=False):
"""
Fit each segment. Segments that have not already been explicitly
added will be automatically added with default model and ytransform.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true will pass debug to the fit method of each model.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names.
"""
data = util.apply_filter_query(data, self.fit_filters)
unique = data[self.segmentation_col].unique()
value_counts = data[self.segmentation_col].value_counts()
# Remove any existing segments that may no longer have counterparts
# in the data. This can happen when loading a saved model and then
# calling this method with data that no longer has segments that
# were there the last time this was called.
gone = set(self._group.models) - set(unique)
for g in gone:
del self._group.models[g]
for x in unique:
if x not in self._group.models and \
value_counts[x] > self.min_segment_size:
self.add_segment(x)
with log_start_finish(
'fitting models in segmented model {}'.format(self.name),
logger):
return self._group.fit(data, debug=debug) | [
"def",
"fit",
"(",
"self",
",",
"data",
",",
"debug",
"=",
"False",
")",
":",
"data",
"=",
"util",
".",
"apply_filter_query",
"(",
"data",
",",
"self",
".",
"fit_filters",
")",
"unique",
"=",
"data",
"[",
"self",
".",
"segmentation_col",
"]",
".",
"unique",
"(",
")",
"value_counts",
"=",
"data",
"[",
"self",
".",
"segmentation_col",
"]",
".",
"value_counts",
"(",
")",
"# Remove any existing segments that may no longer have counterparts",
"# in the data. This can happen when loading a saved model and then",
"# calling this method with data that no longer has segments that",
"# were there the last time this was called.",
"gone",
"=",
"set",
"(",
"self",
".",
"_group",
".",
"models",
")",
"-",
"set",
"(",
"unique",
")",
"for",
"g",
"in",
"gone",
":",
"del",
"self",
".",
"_group",
".",
"models",
"[",
"g",
"]",
"for",
"x",
"in",
"unique",
":",
"if",
"x",
"not",
"in",
"self",
".",
"_group",
".",
"models",
"and",
"value_counts",
"[",
"x",
"]",
">",
"self",
".",
"min_segment_size",
":",
"self",
".",
"add_segment",
"(",
"x",
")",
"with",
"log_start_finish",
"(",
"'fitting models in segmented model {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
",",
"logger",
")",
":",
"return",
"self",
".",
"_group",
".",
"fit",
"(",
"data",
",",
"debug",
"=",
"debug",
")"
] | Fit each segment. Segments that have not already been explicitly
added will be automatically added with default model and ytransform.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true will pass debug to the fit method of each model.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names. | [
"Fit",
"each",
"segment",
".",
"Segments",
"that",
"have",
"not",
"already",
"been",
"explicitly",
"added",
"will",
"be",
"automatically",
"added",
"with",
"default",
"model",
"and",
"ytransform",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L808-L847 |
3,009 | UDST/urbansim | urbansim/models/regression.py | SegmentedRegressionModel.columns_used | def columns_used(self):
"""
Returns all the columns used across all models in the group
for filtering and in the model expression.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
util.columns_in_formula(self.default_model_expr),
self._group.columns_used(),
[self.segmentation_col]))) | python | def columns_used(self):
"""
Returns all the columns used across all models in the group
for filtering and in the model expression.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
util.columns_in_formula(self.default_model_expr),
self._group.columns_used(),
[self.segmentation_col]))) | [
"def",
"columns_used",
"(",
"self",
")",
":",
"return",
"list",
"(",
"tz",
".",
"unique",
"(",
"tz",
".",
"concatv",
"(",
"util",
".",
"columns_in_filters",
"(",
"self",
".",
"fit_filters",
")",
",",
"util",
".",
"columns_in_filters",
"(",
"self",
".",
"predict_filters",
")",
",",
"util",
".",
"columns_in_formula",
"(",
"self",
".",
"default_model_expr",
")",
",",
"self",
".",
"_group",
".",
"columns_used",
"(",
")",
",",
"[",
"self",
".",
"segmentation_col",
"]",
")",
")",
")"
] | Returns all the columns used across all models in the group
for filtering and in the model expression. | [
"Returns",
"all",
"the",
"columns",
"used",
"across",
"all",
"models",
"in",
"the",
"group",
"for",
"filtering",
"and",
"in",
"the",
"model",
"expression",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L956-L967 |
3,010 | UDST/urbansim | urbansim/models/relocation.py | find_movers | def find_movers(choosers, rates, rate_column):
"""
Returns an array of the indexes of the `choosers` that are slated
to move.
Parameters
----------
choosers : pandas.DataFrame
Table of agents from which to find movers.
rates : pandas.DataFrame
Table of relocation rates. Index is unused.
Other columns describe filters on the `choosers`
table so that different segments can have different relocation
rates. Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
An example `rates` structure:
age_of_head_max age_of_head_min
nan 65
65 40
In this example the `choosers` table would need to have an
'age_of_head' column on which to filter.
nan should be used to flag filters that do not apply
in a given row.
rate_column : object
Name of column in `rates` table that has relocation rates.
Returns
-------
movers : pandas.Index
Suitable for indexing `choosers` by index.
"""
logger.debug('start: find movers for relocation')
relocation_rates = pd.Series(
np.zeros(len(choosers)), index=choosers.index)
for _, row in rates.iterrows():
indexes = util.filter_table(choosers, row, ignore={rate_column}).index
relocation_rates.loc[indexes] = row[rate_column]
movers = relocation_rates.index[
relocation_rates > np.random.random(len(choosers))]
logger.debug('picked {} movers for relocation'.format(len(movers)))
logger.debug('finish: find movers for relocation')
return movers | python | def find_movers(choosers, rates, rate_column):
"""
Returns an array of the indexes of the `choosers` that are slated
to move.
Parameters
----------
choosers : pandas.DataFrame
Table of agents from which to find movers.
rates : pandas.DataFrame
Table of relocation rates. Index is unused.
Other columns describe filters on the `choosers`
table so that different segments can have different relocation
rates. Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
An example `rates` structure:
age_of_head_max age_of_head_min
nan 65
65 40
In this example the `choosers` table would need to have an
'age_of_head' column on which to filter.
nan should be used to flag filters that do not apply
in a given row.
rate_column : object
Name of column in `rates` table that has relocation rates.
Returns
-------
movers : pandas.Index
Suitable for indexing `choosers` by index.
"""
logger.debug('start: find movers for relocation')
relocation_rates = pd.Series(
np.zeros(len(choosers)), index=choosers.index)
for _, row in rates.iterrows():
indexes = util.filter_table(choosers, row, ignore={rate_column}).index
relocation_rates.loc[indexes] = row[rate_column]
movers = relocation_rates.index[
relocation_rates > np.random.random(len(choosers))]
logger.debug('picked {} movers for relocation'.format(len(movers)))
logger.debug('finish: find movers for relocation')
return movers | [
"def",
"find_movers",
"(",
"choosers",
",",
"rates",
",",
"rate_column",
")",
":",
"logger",
".",
"debug",
"(",
"'start: find movers for relocation'",
")",
"relocation_rates",
"=",
"pd",
".",
"Series",
"(",
"np",
".",
"zeros",
"(",
"len",
"(",
"choosers",
")",
")",
",",
"index",
"=",
"choosers",
".",
"index",
")",
"for",
"_",
",",
"row",
"in",
"rates",
".",
"iterrows",
"(",
")",
":",
"indexes",
"=",
"util",
".",
"filter_table",
"(",
"choosers",
",",
"row",
",",
"ignore",
"=",
"{",
"rate_column",
"}",
")",
".",
"index",
"relocation_rates",
".",
"loc",
"[",
"indexes",
"]",
"=",
"row",
"[",
"rate_column",
"]",
"movers",
"=",
"relocation_rates",
".",
"index",
"[",
"relocation_rates",
">",
"np",
".",
"random",
".",
"random",
"(",
"len",
"(",
"choosers",
")",
")",
"]",
"logger",
".",
"debug",
"(",
"'picked {} movers for relocation'",
".",
"format",
"(",
"len",
"(",
"movers",
")",
")",
")",
"logger",
".",
"debug",
"(",
"'finish: find movers for relocation'",
")",
"return",
"movers"
] | Returns an array of the indexes of the `choosers` that are slated
to move.
Parameters
----------
choosers : pandas.DataFrame
Table of agents from which to find movers.
rates : pandas.DataFrame
Table of relocation rates. Index is unused.
Other columns describe filters on the `choosers`
table so that different segments can have different relocation
rates. Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
An example `rates` structure:
age_of_head_max age_of_head_min
nan 65
65 40
In this example the `choosers` table would need to have an
'age_of_head' column on which to filter.
nan should be used to flag filters that do not apply
in a given row.
rate_column : object
Name of column in `rates` table that has relocation rates.
Returns
-------
movers : pandas.Index
Suitable for indexing `choosers` by index. | [
"Returns",
"an",
"array",
"of",
"the",
"indexes",
"of",
"the",
"choosers",
"that",
"are",
"slated",
"to",
"move",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/relocation.py#L16-L67 |
3,011 | UDST/urbansim | urbansim/models/supplydemand.py | _calculate_adjustment | def _calculate_adjustment(
lcm, choosers, alternatives, alt_segmenter,
clip_change_low, clip_change_high, multiplier_func=None):
"""
Calculate adjustments to prices to compensate for
supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
clip_change_low : float
The minimum amount by which to multiply prices each iteration.
clip_change_high : float
The maximum amount by which to multiply prices each iteration.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
alts_muliplier : pandas.Series
Same index as `alternatives`, values clipped to `clip_change_low`
and `clip_change_high`.
submarkets_multiplier : pandas.Series
Index is unique values from `alt_segmenter`, values are the ratio
of demand / supply for each segment in `alt_segmenter`.
finished : boolean
boolean indicator that this adjustment should be considered the
final adjustment (if True). If false, the iterative algorithm
should continue.
"""
logger.debug('start: calculate supply and demand price adjustment ratio')
# probabilities of agents choosing * number of agents = demand
demand = lcm.summed_probabilities(choosers, alternatives)
# group by submarket
demand = demand.groupby(alt_segmenter.loc[demand.index].values).sum()
# number of alternatives
supply = alt_segmenter.value_counts()
if multiplier_func is not None:
multiplier, finished = multiplier_func(demand, supply)
else:
multiplier, finished = (demand / supply), False
multiplier = multiplier.clip(clip_change_low, clip_change_high)
# broadcast multiplier back to alternatives index
alts_muliplier = multiplier.loc[alt_segmenter]
alts_muliplier.index = alt_segmenter.index
logger.debug(
('finish: calculate supply and demand price adjustment multiplier '
'with mean multiplier {}').format(multiplier.mean()))
return alts_muliplier, multiplier, finished | python | def _calculate_adjustment(
lcm, choosers, alternatives, alt_segmenter,
clip_change_low, clip_change_high, multiplier_func=None):
"""
Calculate adjustments to prices to compensate for
supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
clip_change_low : float
The minimum amount by which to multiply prices each iteration.
clip_change_high : float
The maximum amount by which to multiply prices each iteration.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
alts_muliplier : pandas.Series
Same index as `alternatives`, values clipped to `clip_change_low`
and `clip_change_high`.
submarkets_multiplier : pandas.Series
Index is unique values from `alt_segmenter`, values are the ratio
of demand / supply for each segment in `alt_segmenter`.
finished : boolean
boolean indicator that this adjustment should be considered the
final adjustment (if True). If false, the iterative algorithm
should continue.
"""
logger.debug('start: calculate supply and demand price adjustment ratio')
# probabilities of agents choosing * number of agents = demand
demand = lcm.summed_probabilities(choosers, alternatives)
# group by submarket
demand = demand.groupby(alt_segmenter.loc[demand.index].values).sum()
# number of alternatives
supply = alt_segmenter.value_counts()
if multiplier_func is not None:
multiplier, finished = multiplier_func(demand, supply)
else:
multiplier, finished = (demand / supply), False
multiplier = multiplier.clip(clip_change_low, clip_change_high)
# broadcast multiplier back to alternatives index
alts_muliplier = multiplier.loc[alt_segmenter]
alts_muliplier.index = alt_segmenter.index
logger.debug(
('finish: calculate supply and demand price adjustment multiplier '
'with mean multiplier {}').format(multiplier.mean()))
return alts_muliplier, multiplier, finished | [
"def",
"_calculate_adjustment",
"(",
"lcm",
",",
"choosers",
",",
"alternatives",
",",
"alt_segmenter",
",",
"clip_change_low",
",",
"clip_change_high",
",",
"multiplier_func",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'start: calculate supply and demand price adjustment ratio'",
")",
"# probabilities of agents choosing * number of agents = demand",
"demand",
"=",
"lcm",
".",
"summed_probabilities",
"(",
"choosers",
",",
"alternatives",
")",
"# group by submarket",
"demand",
"=",
"demand",
".",
"groupby",
"(",
"alt_segmenter",
".",
"loc",
"[",
"demand",
".",
"index",
"]",
".",
"values",
")",
".",
"sum",
"(",
")",
"# number of alternatives",
"supply",
"=",
"alt_segmenter",
".",
"value_counts",
"(",
")",
"if",
"multiplier_func",
"is",
"not",
"None",
":",
"multiplier",
",",
"finished",
"=",
"multiplier_func",
"(",
"demand",
",",
"supply",
")",
"else",
":",
"multiplier",
",",
"finished",
"=",
"(",
"demand",
"/",
"supply",
")",
",",
"False",
"multiplier",
"=",
"multiplier",
".",
"clip",
"(",
"clip_change_low",
",",
"clip_change_high",
")",
"# broadcast multiplier back to alternatives index",
"alts_muliplier",
"=",
"multiplier",
".",
"loc",
"[",
"alt_segmenter",
"]",
"alts_muliplier",
".",
"index",
"=",
"alt_segmenter",
".",
"index",
"logger",
".",
"debug",
"(",
"(",
"'finish: calculate supply and demand price adjustment multiplier '",
"'with mean multiplier {}'",
")",
".",
"format",
"(",
"multiplier",
".",
"mean",
"(",
")",
")",
")",
"return",
"alts_muliplier",
",",
"multiplier",
",",
"finished"
] | Calculate adjustments to prices to compensate for
supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
clip_change_low : float
The minimum amount by which to multiply prices each iteration.
clip_change_high : float
The maximum amount by which to multiply prices each iteration.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
alts_muliplier : pandas.Series
Same index as `alternatives`, values clipped to `clip_change_low`
and `clip_change_high`.
submarkets_multiplier : pandas.Series
Index is unique values from `alt_segmenter`, values are the ratio
of demand / supply for each segment in `alt_segmenter`.
finished : boolean
boolean indicator that this adjustment should be considered the
final adjustment (if True). If false, the iterative algorithm
should continue. | [
"Calculate",
"adjustments",
"to",
"prices",
"to",
"compensate",
"for",
"supply",
"and",
"demand",
"effects",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/supplydemand.py#L15-L81 |
3,012 | UDST/urbansim | urbansim/models/supplydemand.py | supply_and_demand | def supply_and_demand(
lcm, choosers, alternatives, alt_segmenter, price_col,
base_multiplier=None, clip_change_low=0.75, clip_change_high=1.25,
iterations=5, multiplier_func=None):
"""
Adjust real estate prices to compensate for supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : str, array, or pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
If a string, it is expected to be the name of a column
in `alternatives`. If a Series it should have the same index
as `alternatives`.
price_col : str
The name of the column in `alternatives` that corresponds to price.
This column is what is adjusted by this model.
base_multiplier : pandas.Series, optional
A series describing a starting multiplier for submarket prices.
Index should be submarket IDs.
clip_change_low : float, optional
The minimum amount by which to multiply prices each iteration.
clip_change_high : float, optional
The maximum amount by which to multiply prices each iteration.
iterations : int, optional
Number of times to update prices based on supply/demand comparisons.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
new_prices : pandas.Series
Equivalent of the `price_col` in `alternatives`.
submarkets_ratios : pandas.Series
Price adjustment ratio for each submarket. If `base_multiplier` is
given this will be a cummulative multiplier including the
`base_multiplier` and the multipliers calculated for this year.
"""
logger.debug('start: calculating supply and demand price adjustment')
# copy alternatives so we don't modify the user's original
alternatives = alternatives.copy()
# if alt_segmenter is a string, get the actual column for segmenting demand
if isinstance(alt_segmenter, str):
alt_segmenter = alternatives[alt_segmenter]
elif isinstance(alt_segmenter, np.array):
alt_segmenter = pd.Series(alt_segmenter, index=alternatives.index)
choosers, alternatives = lcm.apply_predict_filters(choosers, alternatives)
alt_segmenter = alt_segmenter.loc[alternatives.index]
# check base ratio and apply it to prices if given
if base_multiplier is not None:
bm = base_multiplier.loc[alt_segmenter]
bm.index = alt_segmenter.index
alternatives[price_col] = alternatives[price_col] * bm
base_multiplier = base_multiplier.copy()
for _ in range(iterations):
alts_muliplier, submarkets_multiplier, finished = _calculate_adjustment(
lcm, choosers, alternatives, alt_segmenter,
clip_change_low, clip_change_high, multiplier_func=multiplier_func)
alternatives[price_col] = alternatives[price_col] * alts_muliplier
# might need to initialize this for holding cumulative multiplier
if base_multiplier is None:
base_multiplier = pd.Series(
np.ones(len(submarkets_multiplier)),
index=submarkets_multiplier.index)
base_multiplier *= submarkets_multiplier
if finished:
break
logger.debug('finish: calculating supply and demand price adjustment')
return alternatives[price_col], base_multiplier | python | def supply_and_demand(
lcm, choosers, alternatives, alt_segmenter, price_col,
base_multiplier=None, clip_change_low=0.75, clip_change_high=1.25,
iterations=5, multiplier_func=None):
"""
Adjust real estate prices to compensate for supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : str, array, or pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
If a string, it is expected to be the name of a column
in `alternatives`. If a Series it should have the same index
as `alternatives`.
price_col : str
The name of the column in `alternatives` that corresponds to price.
This column is what is adjusted by this model.
base_multiplier : pandas.Series, optional
A series describing a starting multiplier for submarket prices.
Index should be submarket IDs.
clip_change_low : float, optional
The minimum amount by which to multiply prices each iteration.
clip_change_high : float, optional
The maximum amount by which to multiply prices each iteration.
iterations : int, optional
Number of times to update prices based on supply/demand comparisons.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
new_prices : pandas.Series
Equivalent of the `price_col` in `alternatives`.
submarkets_ratios : pandas.Series
Price adjustment ratio for each submarket. If `base_multiplier` is
given this will be a cummulative multiplier including the
`base_multiplier` and the multipliers calculated for this year.
"""
logger.debug('start: calculating supply and demand price adjustment')
# copy alternatives so we don't modify the user's original
alternatives = alternatives.copy()
# if alt_segmenter is a string, get the actual column for segmenting demand
if isinstance(alt_segmenter, str):
alt_segmenter = alternatives[alt_segmenter]
elif isinstance(alt_segmenter, np.array):
alt_segmenter = pd.Series(alt_segmenter, index=alternatives.index)
choosers, alternatives = lcm.apply_predict_filters(choosers, alternatives)
alt_segmenter = alt_segmenter.loc[alternatives.index]
# check base ratio and apply it to prices if given
if base_multiplier is not None:
bm = base_multiplier.loc[alt_segmenter]
bm.index = alt_segmenter.index
alternatives[price_col] = alternatives[price_col] * bm
base_multiplier = base_multiplier.copy()
for _ in range(iterations):
alts_muliplier, submarkets_multiplier, finished = _calculate_adjustment(
lcm, choosers, alternatives, alt_segmenter,
clip_change_low, clip_change_high, multiplier_func=multiplier_func)
alternatives[price_col] = alternatives[price_col] * alts_muliplier
# might need to initialize this for holding cumulative multiplier
if base_multiplier is None:
base_multiplier = pd.Series(
np.ones(len(submarkets_multiplier)),
index=submarkets_multiplier.index)
base_multiplier *= submarkets_multiplier
if finished:
break
logger.debug('finish: calculating supply and demand price adjustment')
return alternatives[price_col], base_multiplier | [
"def",
"supply_and_demand",
"(",
"lcm",
",",
"choosers",
",",
"alternatives",
",",
"alt_segmenter",
",",
"price_col",
",",
"base_multiplier",
"=",
"None",
",",
"clip_change_low",
"=",
"0.75",
",",
"clip_change_high",
"=",
"1.25",
",",
"iterations",
"=",
"5",
",",
"multiplier_func",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'start: calculating supply and demand price adjustment'",
")",
"# copy alternatives so we don't modify the user's original",
"alternatives",
"=",
"alternatives",
".",
"copy",
"(",
")",
"# if alt_segmenter is a string, get the actual column for segmenting demand",
"if",
"isinstance",
"(",
"alt_segmenter",
",",
"str",
")",
":",
"alt_segmenter",
"=",
"alternatives",
"[",
"alt_segmenter",
"]",
"elif",
"isinstance",
"(",
"alt_segmenter",
",",
"np",
".",
"array",
")",
":",
"alt_segmenter",
"=",
"pd",
".",
"Series",
"(",
"alt_segmenter",
",",
"index",
"=",
"alternatives",
".",
"index",
")",
"choosers",
",",
"alternatives",
"=",
"lcm",
".",
"apply_predict_filters",
"(",
"choosers",
",",
"alternatives",
")",
"alt_segmenter",
"=",
"alt_segmenter",
".",
"loc",
"[",
"alternatives",
".",
"index",
"]",
"# check base ratio and apply it to prices if given",
"if",
"base_multiplier",
"is",
"not",
"None",
":",
"bm",
"=",
"base_multiplier",
".",
"loc",
"[",
"alt_segmenter",
"]",
"bm",
".",
"index",
"=",
"alt_segmenter",
".",
"index",
"alternatives",
"[",
"price_col",
"]",
"=",
"alternatives",
"[",
"price_col",
"]",
"*",
"bm",
"base_multiplier",
"=",
"base_multiplier",
".",
"copy",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"iterations",
")",
":",
"alts_muliplier",
",",
"submarkets_multiplier",
",",
"finished",
"=",
"_calculate_adjustment",
"(",
"lcm",
",",
"choosers",
",",
"alternatives",
",",
"alt_segmenter",
",",
"clip_change_low",
",",
"clip_change_high",
",",
"multiplier_func",
"=",
"multiplier_func",
")",
"alternatives",
"[",
"price_col",
"]",
"=",
"alternatives",
"[",
"price_col",
"]",
"*",
"alts_muliplier",
"# might need to initialize this for holding cumulative multiplier",
"if",
"base_multiplier",
"is",
"None",
":",
"base_multiplier",
"=",
"pd",
".",
"Series",
"(",
"np",
".",
"ones",
"(",
"len",
"(",
"submarkets_multiplier",
")",
")",
",",
"index",
"=",
"submarkets_multiplier",
".",
"index",
")",
"base_multiplier",
"*=",
"submarkets_multiplier",
"if",
"finished",
":",
"break",
"logger",
".",
"debug",
"(",
"'finish: calculating supply and demand price adjustment'",
")",
"return",
"alternatives",
"[",
"price_col",
"]",
",",
"base_multiplier"
] | Adjust real estate prices to compensate for supply and demand effects.
Parameters
----------
lcm : LocationChoiceModel
Used to calculate the probability of agents choosing among
alternatives. Must be fully configured and fitted.
choosers : pandas.DataFrame
alternatives : pandas.DataFrame
alt_segmenter : str, array, or pandas.Series
Will be used to segment alternatives and probabilities to do
comparisons of supply and demand by submarket.
If a string, it is expected to be the name of a column
in `alternatives`. If a Series it should have the same index
as `alternatives`.
price_col : str
The name of the column in `alternatives` that corresponds to price.
This column is what is adjusted by this model.
base_multiplier : pandas.Series, optional
A series describing a starting multiplier for submarket prices.
Index should be submarket IDs.
clip_change_low : float, optional
The minimum amount by which to multiply prices each iteration.
clip_change_high : float, optional
The maximum amount by which to multiply prices each iteration.
iterations : int, optional
Number of times to update prices based on supply/demand comparisons.
multiplier_func : function (returns Series, boolean)
A function which takes separate demand and supply Series
and returns a tuple where the first item is a Series with the
ratio of new price to old price (all indexes should be the same) -
by default the ratio of demand to supply is the ratio of the new
price to the old price. The second return value is a
boolean which when True tells this module to stop looping (that
convergence has been satisfied)
Returns
-------
new_prices : pandas.Series
Equivalent of the `price_col` in `alternatives`.
submarkets_ratios : pandas.Series
Price adjustment ratio for each submarket. If `base_multiplier` is
given this will be a cummulative multiplier including the
`base_multiplier` and the multipliers calculated for this year. | [
"Adjust",
"real",
"estate",
"prices",
"to",
"compensate",
"for",
"supply",
"and",
"demand",
"effects",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/supplydemand.py#L84-L173 |
3,013 | UDST/urbansim | urbansim/developer/developer.py | Developer._max_form | def _max_form(f, colname):
"""
Assumes dataframe with hierarchical columns with first index equal to the
use and second index equal to the attribute.
e.g. f.columns equal to::
mixedoffice building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost
industrial building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost
"""
df = f.stack(level=0)[[colname]].stack().unstack(level=1).reset_index(level=1, drop=True)
return df.idxmax(axis=1) | python | def _max_form(f, colname):
"""
Assumes dataframe with hierarchical columns with first index equal to the
use and second index equal to the attribute.
e.g. f.columns equal to::
mixedoffice building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost
industrial building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost
"""
df = f.stack(level=0)[[colname]].stack().unstack(level=1).reset_index(level=1, drop=True)
return df.idxmax(axis=1) | [
"def",
"_max_form",
"(",
"f",
",",
"colname",
")",
":",
"df",
"=",
"f",
".",
"stack",
"(",
"level",
"=",
"0",
")",
"[",
"[",
"colname",
"]",
"]",
".",
"stack",
"(",
")",
".",
"unstack",
"(",
"level",
"=",
"1",
")",
".",
"reset_index",
"(",
"level",
"=",
"1",
",",
"drop",
"=",
"True",
")",
"return",
"df",
".",
"idxmax",
"(",
"axis",
"=",
"1",
")"
] | Assumes dataframe with hierarchical columns with first index equal to the
use and second index equal to the attribute.
e.g. f.columns equal to::
mixedoffice building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost
industrial building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost | [
"Assumes",
"dataframe",
"with",
"hierarchical",
"columns",
"with",
"first",
"index",
"equal",
"to",
"the",
"use",
"and",
"second",
"index",
"equal",
"to",
"the",
"attribute",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/developer.py#L23-L44 |
3,014 | UDST/urbansim | urbansim/developer/developer.py | Developer.keep_form_with_max_profit | def keep_form_with_max_profit(self, forms=None):
"""
This converts the dataframe, which shows all profitable forms,
to the form with the greatest profit, so that more profitable
forms outcompete less profitable forms.
Parameters
----------
forms: list of strings
List of forms which compete which other. Can leave some out.
Returns
-------
Nothing. Goes from a multi-index to a single index with only the
most profitable form.
"""
f = self.feasibility
if forms is not None:
f = f[forms]
if len(f) > 0:
mu = self._max_form(f, "max_profit")
indexes = [tuple(x) for x in mu.reset_index().values]
else:
indexes = []
df = f.stack(level=0).loc[indexes]
df.index.names = ["parcel_id", "form"]
df = df.reset_index(level=1)
return df | python | def keep_form_with_max_profit(self, forms=None):
"""
This converts the dataframe, which shows all profitable forms,
to the form with the greatest profit, so that more profitable
forms outcompete less profitable forms.
Parameters
----------
forms: list of strings
List of forms which compete which other. Can leave some out.
Returns
-------
Nothing. Goes from a multi-index to a single index with only the
most profitable form.
"""
f = self.feasibility
if forms is not None:
f = f[forms]
if len(f) > 0:
mu = self._max_form(f, "max_profit")
indexes = [tuple(x) for x in mu.reset_index().values]
else:
indexes = []
df = f.stack(level=0).loc[indexes]
df.index.names = ["parcel_id", "form"]
df = df.reset_index(level=1)
return df | [
"def",
"keep_form_with_max_profit",
"(",
"self",
",",
"forms",
"=",
"None",
")",
":",
"f",
"=",
"self",
".",
"feasibility",
"if",
"forms",
"is",
"not",
"None",
":",
"f",
"=",
"f",
"[",
"forms",
"]",
"if",
"len",
"(",
"f",
")",
">",
"0",
":",
"mu",
"=",
"self",
".",
"_max_form",
"(",
"f",
",",
"\"max_profit\"",
")",
"indexes",
"=",
"[",
"tuple",
"(",
"x",
")",
"for",
"x",
"in",
"mu",
".",
"reset_index",
"(",
")",
".",
"values",
"]",
"else",
":",
"indexes",
"=",
"[",
"]",
"df",
"=",
"f",
".",
"stack",
"(",
"level",
"=",
"0",
")",
".",
"loc",
"[",
"indexes",
"]",
"df",
".",
"index",
".",
"names",
"=",
"[",
"\"parcel_id\"",
",",
"\"form\"",
"]",
"df",
"=",
"df",
".",
"reset_index",
"(",
"level",
"=",
"1",
")",
"return",
"df"
] | This converts the dataframe, which shows all profitable forms,
to the form with the greatest profit, so that more profitable
forms outcompete less profitable forms.
Parameters
----------
forms: list of strings
List of forms which compete which other. Can leave some out.
Returns
-------
Nothing. Goes from a multi-index to a single index with only the
most profitable form. | [
"This",
"converts",
"the",
"dataframe",
"which",
"shows",
"all",
"profitable",
"forms",
"to",
"the",
"form",
"with",
"the",
"greatest",
"profit",
"so",
"that",
"more",
"profitable",
"forms",
"outcompete",
"less",
"profitable",
"forms",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/developer.py#L46-L75 |
3,015 | UDST/urbansim | urbansim/developer/developer.py | Developer.compute_units_to_build | def compute_units_to_build(num_agents, num_units, target_vacancy):
"""
Compute number of units to build to match target vacancy.
Parameters
----------
num_agents : int
number of agents that need units in the region
num_units : int
number of units in buildings
target_vacancy : float (0-1.0)
target vacancy rate
Returns
-------
number_of_units : int
the number of units that need to be built
"""
print("Number of agents: {:,}".format(num_agents))
print("Number of agent spaces: {:,}".format(int(num_units)))
assert target_vacancy < 1.0
target_units = int(max(num_agents / (1 - target_vacancy) - num_units, 0))
print("Current vacancy = {:.2f}"
.format(1 - num_agents / float(num_units)))
print("Target vacancy = {:.2f}, target of new units = {:,}"
.format(target_vacancy, target_units))
return target_units | python | def compute_units_to_build(num_agents, num_units, target_vacancy):
"""
Compute number of units to build to match target vacancy.
Parameters
----------
num_agents : int
number of agents that need units in the region
num_units : int
number of units in buildings
target_vacancy : float (0-1.0)
target vacancy rate
Returns
-------
number_of_units : int
the number of units that need to be built
"""
print("Number of agents: {:,}".format(num_agents))
print("Number of agent spaces: {:,}".format(int(num_units)))
assert target_vacancy < 1.0
target_units = int(max(num_agents / (1 - target_vacancy) - num_units, 0))
print("Current vacancy = {:.2f}"
.format(1 - num_agents / float(num_units)))
print("Target vacancy = {:.2f}, target of new units = {:,}"
.format(target_vacancy, target_units))
return target_units | [
"def",
"compute_units_to_build",
"(",
"num_agents",
",",
"num_units",
",",
"target_vacancy",
")",
":",
"print",
"(",
"\"Number of agents: {:,}\"",
".",
"format",
"(",
"num_agents",
")",
")",
"print",
"(",
"\"Number of agent spaces: {:,}\"",
".",
"format",
"(",
"int",
"(",
"num_units",
")",
")",
")",
"assert",
"target_vacancy",
"<",
"1.0",
"target_units",
"=",
"int",
"(",
"max",
"(",
"num_agents",
"/",
"(",
"1",
"-",
"target_vacancy",
")",
"-",
"num_units",
",",
"0",
")",
")",
"print",
"(",
"\"Current vacancy = {:.2f}\"",
".",
"format",
"(",
"1",
"-",
"num_agents",
"/",
"float",
"(",
"num_units",
")",
")",
")",
"print",
"(",
"\"Target vacancy = {:.2f}, target of new units = {:,}\"",
".",
"format",
"(",
"target_vacancy",
",",
"target_units",
")",
")",
"return",
"target_units"
] | Compute number of units to build to match target vacancy.
Parameters
----------
num_agents : int
number of agents that need units in the region
num_units : int
number of units in buildings
target_vacancy : float (0-1.0)
target vacancy rate
Returns
-------
number_of_units : int
the number of units that need to be built | [
"Compute",
"number",
"of",
"units",
"to",
"build",
"to",
"match",
"target",
"vacancy",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/developer.py#L78-L104 |
3,016 | UDST/urbansim | urbansim/developer/developer.py | Developer.pick | def pick(self, form, target_units, parcel_size, ave_unit_size,
current_units, max_parcel_size=200000, min_unit_size=400,
drop_after_build=True, residential=True, bldg_sqft_per_job=400.0,
profit_to_prob_func=None):
"""
Choose the buildings from the list that are feasible to build in
order to match the specified demand.
Parameters
----------
form : string or list
One or more of the building forms from the pro forma specification -
e.g. "residential" or "mixedresidential" - these are configuration
parameters passed previously to the pro forma. If more than one form
is passed the forms compete with each other (based on profitability)
for which one gets built in order to meet demand.
target_units : int
The number of units to build. For non-residential buildings this
should be passed as the number of job spaces that need to be created.
parcel_size : series
The size of the parcels. This was passed to feasibility as well,
but should be passed here as well. Index should be parcel_ids.
ave_unit_size : series
The average residential unit size around each parcel - this is
indexed by parcel, but is usually a disaggregated version of a
zonal or accessibility aggregation.
bldg_sqft_per_job : float (default 400.0)
The average square feet per job for this building form.
min_unit_size : float
Values less than this number in ave_unit_size will be set to this
number. Deals with cases where units are currently not built.
current_units : series
The current number of units on the parcel. Is used to compute the
net number of units produced by the developer model. Many times
the developer model is redeveloping units (demolishing them) and
is trying to meet a total number of net units produced.
max_parcel_size : float
Parcels larger than this size will not be considered for
development - usually large parcels should be specified manually
in a development projects table.
drop_after_build : bool
Whether or not to drop parcels from consideration after they
have been chosen for development. Usually this is true so as
to not develop the same parcel twice.
residential: bool
If creating non-residential buildings set this to false and
developer will fill in job_spaces rather than residential_units
profit_to_prob_func: function
As there are so many ways to turn the development feasibility
into a probability to select it for building, the user may pass
a function which takes the feasibility dataframe and returns
a series of probabilities. If no function is passed, the behavior
of this method will not change
Returns
-------
None if thar are no feasible buildings
new_buildings : dataframe
DataFrame of buildings to add. These buildings are rows from the
DataFrame that is returned from feasibility.
"""
if len(self.feasibility) == 0:
# no feasible buildings, might as well bail
return
if form is None:
df = self.feasibility
elif isinstance(form, list):
df = self.keep_form_with_max_profit(form)
else:
df = self.feasibility[form]
# feasible buildings only for this building type
df = df[df.max_profit_far > 0]
ave_unit_size[ave_unit_size < min_unit_size] = min_unit_size
df["ave_unit_size"] = ave_unit_size
df["parcel_size"] = parcel_size
df['current_units'] = current_units
df = df[df.parcel_size < max_parcel_size]
df['residential_units'] = (df.residential_sqft / df.ave_unit_size).round()
df['job_spaces'] = (df.non_residential_sqft / bldg_sqft_per_job).round()
if residential:
df['net_units'] = df.residential_units - df.current_units
else:
df['net_units'] = df.job_spaces - df.current_units
df = df[df.net_units > 0]
if len(df) == 0:
print("WARNING THERE ARE NO FEASIBLE BUILDING TO CHOOSE FROM")
return
# print "Describe of net units\n", df.net_units.describe()
print("Sum of net units that are profitable: {:,}"
.format(int(df.net_units.sum())))
if profit_to_prob_func:
p = profit_to_prob_func(df)
else:
df['max_profit_per_size'] = df.max_profit / df.parcel_size
p = df.max_profit_per_size.values / df.max_profit_per_size.sum()
if df.net_units.sum() < target_units:
print("WARNING THERE WERE NOT ENOUGH PROFITABLE UNITS TO",
"MATCH DEMAND")
build_idx = df.index.values
elif target_units <= 0:
build_idx = []
else:
# we don't know how many developments we will need, as they differ in net_units.
# If all developments have net_units of 1 than we need target_units of them.
# So we choose the smaller of available developments and target_units.
choices = np.random.choice(df.index.values, size=min(len(df.index), target_units),
replace=False, p=p)
tot_units = df.net_units.loc[choices].values.cumsum()
ind = int(np.searchsorted(tot_units, target_units, side="left")) + 1
build_idx = choices[:ind]
if drop_after_build:
self.feasibility = self.feasibility.drop(build_idx)
new_df = df.loc[build_idx]
new_df.index.name = "parcel_id"
return new_df.reset_index() | python | def pick(self, form, target_units, parcel_size, ave_unit_size,
current_units, max_parcel_size=200000, min_unit_size=400,
drop_after_build=True, residential=True, bldg_sqft_per_job=400.0,
profit_to_prob_func=None):
"""
Choose the buildings from the list that are feasible to build in
order to match the specified demand.
Parameters
----------
form : string or list
One or more of the building forms from the pro forma specification -
e.g. "residential" or "mixedresidential" - these are configuration
parameters passed previously to the pro forma. If more than one form
is passed the forms compete with each other (based on profitability)
for which one gets built in order to meet demand.
target_units : int
The number of units to build. For non-residential buildings this
should be passed as the number of job spaces that need to be created.
parcel_size : series
The size of the parcels. This was passed to feasibility as well,
but should be passed here as well. Index should be parcel_ids.
ave_unit_size : series
The average residential unit size around each parcel - this is
indexed by parcel, but is usually a disaggregated version of a
zonal or accessibility aggregation.
bldg_sqft_per_job : float (default 400.0)
The average square feet per job for this building form.
min_unit_size : float
Values less than this number in ave_unit_size will be set to this
number. Deals with cases where units are currently not built.
current_units : series
The current number of units on the parcel. Is used to compute the
net number of units produced by the developer model. Many times
the developer model is redeveloping units (demolishing them) and
is trying to meet a total number of net units produced.
max_parcel_size : float
Parcels larger than this size will not be considered for
development - usually large parcels should be specified manually
in a development projects table.
drop_after_build : bool
Whether or not to drop parcels from consideration after they
have been chosen for development. Usually this is true so as
to not develop the same parcel twice.
residential: bool
If creating non-residential buildings set this to false and
developer will fill in job_spaces rather than residential_units
profit_to_prob_func: function
As there are so many ways to turn the development feasibility
into a probability to select it for building, the user may pass
a function which takes the feasibility dataframe and returns
a series of probabilities. If no function is passed, the behavior
of this method will not change
Returns
-------
None if thar are no feasible buildings
new_buildings : dataframe
DataFrame of buildings to add. These buildings are rows from the
DataFrame that is returned from feasibility.
"""
if len(self.feasibility) == 0:
# no feasible buildings, might as well bail
return
if form is None:
df = self.feasibility
elif isinstance(form, list):
df = self.keep_form_with_max_profit(form)
else:
df = self.feasibility[form]
# feasible buildings only for this building type
df = df[df.max_profit_far > 0]
ave_unit_size[ave_unit_size < min_unit_size] = min_unit_size
df["ave_unit_size"] = ave_unit_size
df["parcel_size"] = parcel_size
df['current_units'] = current_units
df = df[df.parcel_size < max_parcel_size]
df['residential_units'] = (df.residential_sqft / df.ave_unit_size).round()
df['job_spaces'] = (df.non_residential_sqft / bldg_sqft_per_job).round()
if residential:
df['net_units'] = df.residential_units - df.current_units
else:
df['net_units'] = df.job_spaces - df.current_units
df = df[df.net_units > 0]
if len(df) == 0:
print("WARNING THERE ARE NO FEASIBLE BUILDING TO CHOOSE FROM")
return
# print "Describe of net units\n", df.net_units.describe()
print("Sum of net units that are profitable: {:,}"
.format(int(df.net_units.sum())))
if profit_to_prob_func:
p = profit_to_prob_func(df)
else:
df['max_profit_per_size'] = df.max_profit / df.parcel_size
p = df.max_profit_per_size.values / df.max_profit_per_size.sum()
if df.net_units.sum() < target_units:
print("WARNING THERE WERE NOT ENOUGH PROFITABLE UNITS TO",
"MATCH DEMAND")
build_idx = df.index.values
elif target_units <= 0:
build_idx = []
else:
# we don't know how many developments we will need, as they differ in net_units.
# If all developments have net_units of 1 than we need target_units of them.
# So we choose the smaller of available developments and target_units.
choices = np.random.choice(df.index.values, size=min(len(df.index), target_units),
replace=False, p=p)
tot_units = df.net_units.loc[choices].values.cumsum()
ind = int(np.searchsorted(tot_units, target_units, side="left")) + 1
build_idx = choices[:ind]
if drop_after_build:
self.feasibility = self.feasibility.drop(build_idx)
new_df = df.loc[build_idx]
new_df.index.name = "parcel_id"
return new_df.reset_index() | [
"def",
"pick",
"(",
"self",
",",
"form",
",",
"target_units",
",",
"parcel_size",
",",
"ave_unit_size",
",",
"current_units",
",",
"max_parcel_size",
"=",
"200000",
",",
"min_unit_size",
"=",
"400",
",",
"drop_after_build",
"=",
"True",
",",
"residential",
"=",
"True",
",",
"bldg_sqft_per_job",
"=",
"400.0",
",",
"profit_to_prob_func",
"=",
"None",
")",
":",
"if",
"len",
"(",
"self",
".",
"feasibility",
")",
"==",
"0",
":",
"# no feasible buildings, might as well bail",
"return",
"if",
"form",
"is",
"None",
":",
"df",
"=",
"self",
".",
"feasibility",
"elif",
"isinstance",
"(",
"form",
",",
"list",
")",
":",
"df",
"=",
"self",
".",
"keep_form_with_max_profit",
"(",
"form",
")",
"else",
":",
"df",
"=",
"self",
".",
"feasibility",
"[",
"form",
"]",
"# feasible buildings only for this building type",
"df",
"=",
"df",
"[",
"df",
".",
"max_profit_far",
">",
"0",
"]",
"ave_unit_size",
"[",
"ave_unit_size",
"<",
"min_unit_size",
"]",
"=",
"min_unit_size",
"df",
"[",
"\"ave_unit_size\"",
"]",
"=",
"ave_unit_size",
"df",
"[",
"\"parcel_size\"",
"]",
"=",
"parcel_size",
"df",
"[",
"'current_units'",
"]",
"=",
"current_units",
"df",
"=",
"df",
"[",
"df",
".",
"parcel_size",
"<",
"max_parcel_size",
"]",
"df",
"[",
"'residential_units'",
"]",
"=",
"(",
"df",
".",
"residential_sqft",
"/",
"df",
".",
"ave_unit_size",
")",
".",
"round",
"(",
")",
"df",
"[",
"'job_spaces'",
"]",
"=",
"(",
"df",
".",
"non_residential_sqft",
"/",
"bldg_sqft_per_job",
")",
".",
"round",
"(",
")",
"if",
"residential",
":",
"df",
"[",
"'net_units'",
"]",
"=",
"df",
".",
"residential_units",
"-",
"df",
".",
"current_units",
"else",
":",
"df",
"[",
"'net_units'",
"]",
"=",
"df",
".",
"job_spaces",
"-",
"df",
".",
"current_units",
"df",
"=",
"df",
"[",
"df",
".",
"net_units",
">",
"0",
"]",
"if",
"len",
"(",
"df",
")",
"==",
"0",
":",
"print",
"(",
"\"WARNING THERE ARE NO FEASIBLE BUILDING TO CHOOSE FROM\"",
")",
"return",
"# print \"Describe of net units\\n\", df.net_units.describe()",
"print",
"(",
"\"Sum of net units that are profitable: {:,}\"",
".",
"format",
"(",
"int",
"(",
"df",
".",
"net_units",
".",
"sum",
"(",
")",
")",
")",
")",
"if",
"profit_to_prob_func",
":",
"p",
"=",
"profit_to_prob_func",
"(",
"df",
")",
"else",
":",
"df",
"[",
"'max_profit_per_size'",
"]",
"=",
"df",
".",
"max_profit",
"/",
"df",
".",
"parcel_size",
"p",
"=",
"df",
".",
"max_profit_per_size",
".",
"values",
"/",
"df",
".",
"max_profit_per_size",
".",
"sum",
"(",
")",
"if",
"df",
".",
"net_units",
".",
"sum",
"(",
")",
"<",
"target_units",
":",
"print",
"(",
"\"WARNING THERE WERE NOT ENOUGH PROFITABLE UNITS TO\"",
",",
"\"MATCH DEMAND\"",
")",
"build_idx",
"=",
"df",
".",
"index",
".",
"values",
"elif",
"target_units",
"<=",
"0",
":",
"build_idx",
"=",
"[",
"]",
"else",
":",
"# we don't know how many developments we will need, as they differ in net_units.",
"# If all developments have net_units of 1 than we need target_units of them.",
"# So we choose the smaller of available developments and target_units.",
"choices",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"df",
".",
"index",
".",
"values",
",",
"size",
"=",
"min",
"(",
"len",
"(",
"df",
".",
"index",
")",
",",
"target_units",
")",
",",
"replace",
"=",
"False",
",",
"p",
"=",
"p",
")",
"tot_units",
"=",
"df",
".",
"net_units",
".",
"loc",
"[",
"choices",
"]",
".",
"values",
".",
"cumsum",
"(",
")",
"ind",
"=",
"int",
"(",
"np",
".",
"searchsorted",
"(",
"tot_units",
",",
"target_units",
",",
"side",
"=",
"\"left\"",
")",
")",
"+",
"1",
"build_idx",
"=",
"choices",
"[",
":",
"ind",
"]",
"if",
"drop_after_build",
":",
"self",
".",
"feasibility",
"=",
"self",
".",
"feasibility",
".",
"drop",
"(",
"build_idx",
")",
"new_df",
"=",
"df",
".",
"loc",
"[",
"build_idx",
"]",
"new_df",
".",
"index",
".",
"name",
"=",
"\"parcel_id\"",
"return",
"new_df",
".",
"reset_index",
"(",
")"
] | Choose the buildings from the list that are feasible to build in
order to match the specified demand.
Parameters
----------
form : string or list
One or more of the building forms from the pro forma specification -
e.g. "residential" or "mixedresidential" - these are configuration
parameters passed previously to the pro forma. If more than one form
is passed the forms compete with each other (based on profitability)
for which one gets built in order to meet demand.
target_units : int
The number of units to build. For non-residential buildings this
should be passed as the number of job spaces that need to be created.
parcel_size : series
The size of the parcels. This was passed to feasibility as well,
but should be passed here as well. Index should be parcel_ids.
ave_unit_size : series
The average residential unit size around each parcel - this is
indexed by parcel, but is usually a disaggregated version of a
zonal or accessibility aggregation.
bldg_sqft_per_job : float (default 400.0)
The average square feet per job for this building form.
min_unit_size : float
Values less than this number in ave_unit_size will be set to this
number. Deals with cases where units are currently not built.
current_units : series
The current number of units on the parcel. Is used to compute the
net number of units produced by the developer model. Many times
the developer model is redeveloping units (demolishing them) and
is trying to meet a total number of net units produced.
max_parcel_size : float
Parcels larger than this size will not be considered for
development - usually large parcels should be specified manually
in a development projects table.
drop_after_build : bool
Whether or not to drop parcels from consideration after they
have been chosen for development. Usually this is true so as
to not develop the same parcel twice.
residential: bool
If creating non-residential buildings set this to false and
developer will fill in job_spaces rather than residential_units
profit_to_prob_func: function
As there are so many ways to turn the development feasibility
into a probability to select it for building, the user may pass
a function which takes the feasibility dataframe and returns
a series of probabilities. If no function is passed, the behavior
of this method will not change
Returns
-------
None if thar are no feasible buildings
new_buildings : dataframe
DataFrame of buildings to add. These buildings are rows from the
DataFrame that is returned from feasibility. | [
"Choose",
"the",
"buildings",
"from",
"the",
"list",
"that",
"are",
"feasible",
"to",
"build",
"in",
"order",
"to",
"match",
"the",
"specified",
"demand",
"."
] | 79f815a6503e109f50be270cee92d0f4a34f49ef | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/developer/developer.py#L106-L231 |
3,017 | linkedin/luminol | src/luminol/__init__.py | Luminol._analyze_root_causes | def _analyze_root_causes(self):
"""
Conduct root cause analysis.
The first metric of the list is taken as the root cause right now.
"""
causes = {}
for a in self.anomalies:
try:
causes[a] = self.correlations[a][0]
except IndexError:
raise exceptions.InvalidDataFormat('luminol.luminol: dict correlations contains empty list.')
self.causes = causes | python | def _analyze_root_causes(self):
"""
Conduct root cause analysis.
The first metric of the list is taken as the root cause right now.
"""
causes = {}
for a in self.anomalies:
try:
causes[a] = self.correlations[a][0]
except IndexError:
raise exceptions.InvalidDataFormat('luminol.luminol: dict correlations contains empty list.')
self.causes = causes | [
"def",
"_analyze_root_causes",
"(",
"self",
")",
":",
"causes",
"=",
"{",
"}",
"for",
"a",
"in",
"self",
".",
"anomalies",
":",
"try",
":",
"causes",
"[",
"a",
"]",
"=",
"self",
".",
"correlations",
"[",
"a",
"]",
"[",
"0",
"]",
"except",
"IndexError",
":",
"raise",
"exceptions",
".",
"InvalidDataFormat",
"(",
"'luminol.luminol: dict correlations contains empty list.'",
")",
"self",
".",
"causes",
"=",
"causes"
] | Conduct root cause analysis.
The first metric of the list is taken as the root cause right now. | [
"Conduct",
"root",
"cause",
"analysis",
".",
"The",
"first",
"metric",
"of",
"the",
"list",
"is",
"taken",
"as",
"the",
"root",
"cause",
"right",
"now",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/__init__.py#L32-L43 |
3,018 | linkedin/luminol | src/luminol/correlator.py | Correlator._sanity_check | def _sanity_check(self):
"""
Check if the time series have more than two data points.
"""
if len(self.time_series_a) < 2 or len(self.time_series_b) < 2:
raise exceptions.NotEnoughDataPoints('luminol.Correlator: Too few data points!') | python | def _sanity_check(self):
"""
Check if the time series have more than two data points.
"""
if len(self.time_series_a) < 2 or len(self.time_series_b) < 2:
raise exceptions.NotEnoughDataPoints('luminol.Correlator: Too few data points!') | [
"def",
"_sanity_check",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"time_series_a",
")",
"<",
"2",
"or",
"len",
"(",
"self",
".",
"time_series_b",
")",
"<",
"2",
":",
"raise",
"exceptions",
".",
"NotEnoughDataPoints",
"(",
"'luminol.Correlator: Too few data points!'",
")"
] | Check if the time series have more than two data points. | [
"Check",
"if",
"the",
"time",
"series",
"have",
"more",
"than",
"two",
"data",
"points",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/correlator.py#L92-L97 |
3,019 | linkedin/luminol | src/luminol/correlator.py | Correlator._correlate | def _correlate(self):
"""
Run correlation algorithm.
"""
a = self.algorithm(**self.algorithm_params)
self.correlation_result = a.run() | python | def _correlate(self):
"""
Run correlation algorithm.
"""
a = self.algorithm(**self.algorithm_params)
self.correlation_result = a.run() | [
"def",
"_correlate",
"(",
"self",
")",
":",
"a",
"=",
"self",
".",
"algorithm",
"(",
"*",
"*",
"self",
".",
"algorithm_params",
")",
"self",
".",
"correlation_result",
"=",
"a",
".",
"run",
"(",
")"
] | Run correlation algorithm. | [
"Run",
"correlation",
"algorithm",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/correlator.py#L99-L104 |
3,020 | linkedin/luminol | demo/src/rca.py | RCA._analyze | def _analyze(self):
"""
Analyzes if a matrix has anomalies.
If any anomaly is found, determine if the matrix correlates with any other matrixes.
To be implemented.
"""
output = defaultdict(list)
output_by_name = defaultdict(list)
scores = self.anomaly_detector.get_all_scores()
if self.anomalies:
for anomaly in self.anomalies:
metrix_scores = scores
start_t, end_t = anomaly.get_time_window()
t = anomaly.exact_timestamp
# Compute extended start timestamp and extended end timestamp.
room = (end_t - start_t) / 2
if not room:
room = 30
extended_start_t = start_t - room
extended_end_t = end_t + room
metrix_scores_cropped = metrix_scores.crop(extended_start_t, extended_end_t)
# Adjust the two timestamps if not enough data points are included.
while len(metrix_scores_cropped) < 2:
extended_start_t = extended_start_t - room
extended_end_t = extended_end_t + room
metrix_scores_cropped = metrix_scores.crop(extended_start_t, extended_end_t)
# Correlate with other metrics
for entry in self.related_metrices:
try:
entry_correlation_result = Correlator(self.metrix, entry, time_period=(extended_start_t, extended_end_t),
use_anomaly_score=True).get_correlation_result()
record = extended_start_t, extended_end_t, entry_correlation_result.__dict__, entry
record_by_name = extended_start_t, extended_end_t, entry_correlation_result.__dict__
output[t].append(record)
output_by_name[entry].append(record_by_name)
except exceptions.NotEnoughDataPoints:
pass
self.output = output
self.output_by_name = output_by_name | python | def _analyze(self):
"""
Analyzes if a matrix has anomalies.
If any anomaly is found, determine if the matrix correlates with any other matrixes.
To be implemented.
"""
output = defaultdict(list)
output_by_name = defaultdict(list)
scores = self.anomaly_detector.get_all_scores()
if self.anomalies:
for anomaly in self.anomalies:
metrix_scores = scores
start_t, end_t = anomaly.get_time_window()
t = anomaly.exact_timestamp
# Compute extended start timestamp and extended end timestamp.
room = (end_t - start_t) / 2
if not room:
room = 30
extended_start_t = start_t - room
extended_end_t = end_t + room
metrix_scores_cropped = metrix_scores.crop(extended_start_t, extended_end_t)
# Adjust the two timestamps if not enough data points are included.
while len(metrix_scores_cropped) < 2:
extended_start_t = extended_start_t - room
extended_end_t = extended_end_t + room
metrix_scores_cropped = metrix_scores.crop(extended_start_t, extended_end_t)
# Correlate with other metrics
for entry in self.related_metrices:
try:
entry_correlation_result = Correlator(self.metrix, entry, time_period=(extended_start_t, extended_end_t),
use_anomaly_score=True).get_correlation_result()
record = extended_start_t, extended_end_t, entry_correlation_result.__dict__, entry
record_by_name = extended_start_t, extended_end_t, entry_correlation_result.__dict__
output[t].append(record)
output_by_name[entry].append(record_by_name)
except exceptions.NotEnoughDataPoints:
pass
self.output = output
self.output_by_name = output_by_name | [
"def",
"_analyze",
"(",
"self",
")",
":",
"output",
"=",
"defaultdict",
"(",
"list",
")",
"output_by_name",
"=",
"defaultdict",
"(",
"list",
")",
"scores",
"=",
"self",
".",
"anomaly_detector",
".",
"get_all_scores",
"(",
")",
"if",
"self",
".",
"anomalies",
":",
"for",
"anomaly",
"in",
"self",
".",
"anomalies",
":",
"metrix_scores",
"=",
"scores",
"start_t",
",",
"end_t",
"=",
"anomaly",
".",
"get_time_window",
"(",
")",
"t",
"=",
"anomaly",
".",
"exact_timestamp",
"# Compute extended start timestamp and extended end timestamp.",
"room",
"=",
"(",
"end_t",
"-",
"start_t",
")",
"/",
"2",
"if",
"not",
"room",
":",
"room",
"=",
"30",
"extended_start_t",
"=",
"start_t",
"-",
"room",
"extended_end_t",
"=",
"end_t",
"+",
"room",
"metrix_scores_cropped",
"=",
"metrix_scores",
".",
"crop",
"(",
"extended_start_t",
",",
"extended_end_t",
")",
"# Adjust the two timestamps if not enough data points are included.",
"while",
"len",
"(",
"metrix_scores_cropped",
")",
"<",
"2",
":",
"extended_start_t",
"=",
"extended_start_t",
"-",
"room",
"extended_end_t",
"=",
"extended_end_t",
"+",
"room",
"metrix_scores_cropped",
"=",
"metrix_scores",
".",
"crop",
"(",
"extended_start_t",
",",
"extended_end_t",
")",
"# Correlate with other metrics",
"for",
"entry",
"in",
"self",
".",
"related_metrices",
":",
"try",
":",
"entry_correlation_result",
"=",
"Correlator",
"(",
"self",
".",
"metrix",
",",
"entry",
",",
"time_period",
"=",
"(",
"extended_start_t",
",",
"extended_end_t",
")",
",",
"use_anomaly_score",
"=",
"True",
")",
".",
"get_correlation_result",
"(",
")",
"record",
"=",
"extended_start_t",
",",
"extended_end_t",
",",
"entry_correlation_result",
".",
"__dict__",
",",
"entry",
"record_by_name",
"=",
"extended_start_t",
",",
"extended_end_t",
",",
"entry_correlation_result",
".",
"__dict__",
"output",
"[",
"t",
"]",
".",
"append",
"(",
"record",
")",
"output_by_name",
"[",
"entry",
"]",
".",
"append",
"(",
"record_by_name",
")",
"except",
"exceptions",
".",
"NotEnoughDataPoints",
":",
"pass",
"self",
".",
"output",
"=",
"output",
"self",
".",
"output_by_name",
"=",
"output_by_name"
] | Analyzes if a matrix has anomalies.
If any anomaly is found, determine if the matrix correlates with any other matrixes.
To be implemented. | [
"Analyzes",
"if",
"a",
"matrix",
"has",
"anomalies",
".",
"If",
"any",
"anomaly",
"is",
"found",
"determine",
"if",
"the",
"matrix",
"correlates",
"with",
"any",
"other",
"matrixes",
".",
"To",
"be",
"implemented",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/demo/src/rca.py#L49-L92 |
3,021 | linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/default_detector.py | DefaultDetector._set_scores | def _set_scores(self):
"""
Set anomaly scores using a weighted sum.
"""
anom_scores_ema = self.exp_avg_detector.run()
anom_scores_deri = self.derivative_detector.run()
anom_scores = {}
for timestamp in anom_scores_ema.timestamps:
# Compute a weighted anomaly score.
anom_scores[timestamp] = max(anom_scores_ema[timestamp],
anom_scores_ema[timestamp] * DEFAULT_DETECTOR_EMA_WEIGHT + anom_scores_deri[timestamp] * (1 - DEFAULT_DETECTOR_EMA_WEIGHT))
# If ema score is significant enough, take the bigger one of the weighted score and deri score.
if anom_scores_ema[timestamp] > DEFAULT_DETECTOR_EMA_SIGNIFICANT:
anom_scores[timestamp] = max(anom_scores[timestamp], anom_scores_deri[timestamp])
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | python | def _set_scores(self):
"""
Set anomaly scores using a weighted sum.
"""
anom_scores_ema = self.exp_avg_detector.run()
anom_scores_deri = self.derivative_detector.run()
anom_scores = {}
for timestamp in anom_scores_ema.timestamps:
# Compute a weighted anomaly score.
anom_scores[timestamp] = max(anom_scores_ema[timestamp],
anom_scores_ema[timestamp] * DEFAULT_DETECTOR_EMA_WEIGHT + anom_scores_deri[timestamp] * (1 - DEFAULT_DETECTOR_EMA_WEIGHT))
# If ema score is significant enough, take the bigger one of the weighted score and deri score.
if anom_scores_ema[timestamp] > DEFAULT_DETECTOR_EMA_SIGNIFICANT:
anom_scores[timestamp] = max(anom_scores[timestamp], anom_scores_deri[timestamp])
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | [
"def",
"_set_scores",
"(",
"self",
")",
":",
"anom_scores_ema",
"=",
"self",
".",
"exp_avg_detector",
".",
"run",
"(",
")",
"anom_scores_deri",
"=",
"self",
".",
"derivative_detector",
".",
"run",
"(",
")",
"anom_scores",
"=",
"{",
"}",
"for",
"timestamp",
"in",
"anom_scores_ema",
".",
"timestamps",
":",
"# Compute a weighted anomaly score.",
"anom_scores",
"[",
"timestamp",
"]",
"=",
"max",
"(",
"anom_scores_ema",
"[",
"timestamp",
"]",
",",
"anom_scores_ema",
"[",
"timestamp",
"]",
"*",
"DEFAULT_DETECTOR_EMA_WEIGHT",
"+",
"anom_scores_deri",
"[",
"timestamp",
"]",
"*",
"(",
"1",
"-",
"DEFAULT_DETECTOR_EMA_WEIGHT",
")",
")",
"# If ema score is significant enough, take the bigger one of the weighted score and deri score.",
"if",
"anom_scores_ema",
"[",
"timestamp",
"]",
">",
"DEFAULT_DETECTOR_EMA_SIGNIFICANT",
":",
"anom_scores",
"[",
"timestamp",
"]",
"=",
"max",
"(",
"anom_scores",
"[",
"timestamp",
"]",
",",
"anom_scores_deri",
"[",
"timestamp",
"]",
")",
"self",
".",
"anom_scores",
"=",
"TimeSeries",
"(",
"self",
".",
"_denoise_scores",
"(",
"anom_scores",
")",
")"
] | Set anomaly scores using a weighted sum. | [
"Set",
"anomaly",
"scores",
"using",
"a",
"weighted",
"sum",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/default_detector.py#L35-L49 |
3,022 | linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/derivative_detector.py | DerivativeDetector._compute_derivatives | def _compute_derivatives(self):
"""
Compute derivatives of the time series.
"""
derivatives = []
for i, (timestamp, value) in enumerate(self.time_series_items):
if i > 0:
pre_item = self.time_series_items[i - 1]
pre_timestamp = pre_item[0]
pre_value = pre_item[1]
td = timestamp - pre_timestamp
derivative = (value - pre_value) / td if td != 0 else value - pre_value
derivative = abs(derivative)
derivatives.append(derivative)
# First timestamp is assigned the same derivative as the second timestamp.
if derivatives:
derivatives.insert(0, derivatives[0])
self.derivatives = derivatives | python | def _compute_derivatives(self):
"""
Compute derivatives of the time series.
"""
derivatives = []
for i, (timestamp, value) in enumerate(self.time_series_items):
if i > 0:
pre_item = self.time_series_items[i - 1]
pre_timestamp = pre_item[0]
pre_value = pre_item[1]
td = timestamp - pre_timestamp
derivative = (value - pre_value) / td if td != 0 else value - pre_value
derivative = abs(derivative)
derivatives.append(derivative)
# First timestamp is assigned the same derivative as the second timestamp.
if derivatives:
derivatives.insert(0, derivatives[0])
self.derivatives = derivatives | [
"def",
"_compute_derivatives",
"(",
"self",
")",
":",
"derivatives",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"timestamp",
",",
"value",
")",
"in",
"enumerate",
"(",
"self",
".",
"time_series_items",
")",
":",
"if",
"i",
">",
"0",
":",
"pre_item",
"=",
"self",
".",
"time_series_items",
"[",
"i",
"-",
"1",
"]",
"pre_timestamp",
"=",
"pre_item",
"[",
"0",
"]",
"pre_value",
"=",
"pre_item",
"[",
"1",
"]",
"td",
"=",
"timestamp",
"-",
"pre_timestamp",
"derivative",
"=",
"(",
"value",
"-",
"pre_value",
")",
"/",
"td",
"if",
"td",
"!=",
"0",
"else",
"value",
"-",
"pre_value",
"derivative",
"=",
"abs",
"(",
"derivative",
")",
"derivatives",
".",
"append",
"(",
"derivative",
")",
"# First timestamp is assigned the same derivative as the second timestamp.",
"if",
"derivatives",
":",
"derivatives",
".",
"insert",
"(",
"0",
",",
"derivatives",
"[",
"0",
"]",
")",
"self",
".",
"derivatives",
"=",
"derivatives"
] | Compute derivatives of the time series. | [
"Compute",
"derivatives",
"of",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/derivative_detector.py#L38-L55 |
3,023 | linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py | BitmapDetector._sanity_check | def _sanity_check(self):
"""
Check if there are enough data points.
"""
windows = self.lag_window_size + self.future_window_size
if (not self.lag_window_size or not self.future_window_size or self.time_series_length < windows or windows < DEFAULT_BITMAP_MINIMAL_POINTS_IN_WINDOWS):
raise exceptions.NotEnoughDataPoints
# If window size is too big, too many data points will be assigned a score of 0 in the first lag window
# and the last future window.
if self.lag_window_size > DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS:
self.lag_window_size = DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS
if self.future_window_size > DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS:
self.future_window_size = DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS | python | def _sanity_check(self):
"""
Check if there are enough data points.
"""
windows = self.lag_window_size + self.future_window_size
if (not self.lag_window_size or not self.future_window_size or self.time_series_length < windows or windows < DEFAULT_BITMAP_MINIMAL_POINTS_IN_WINDOWS):
raise exceptions.NotEnoughDataPoints
# If window size is too big, too many data points will be assigned a score of 0 in the first lag window
# and the last future window.
if self.lag_window_size > DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS:
self.lag_window_size = DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS
if self.future_window_size > DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS:
self.future_window_size = DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS | [
"def",
"_sanity_check",
"(",
"self",
")",
":",
"windows",
"=",
"self",
".",
"lag_window_size",
"+",
"self",
".",
"future_window_size",
"if",
"(",
"not",
"self",
".",
"lag_window_size",
"or",
"not",
"self",
".",
"future_window_size",
"or",
"self",
".",
"time_series_length",
"<",
"windows",
"or",
"windows",
"<",
"DEFAULT_BITMAP_MINIMAL_POINTS_IN_WINDOWS",
")",
":",
"raise",
"exceptions",
".",
"NotEnoughDataPoints",
"# If window size is too big, too many data points will be assigned a score of 0 in the first lag window",
"# and the last future window.",
"if",
"self",
".",
"lag_window_size",
">",
"DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS",
":",
"self",
".",
"lag_window_size",
"=",
"DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS",
"if",
"self",
".",
"future_window_size",
">",
"DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS",
":",
"self",
".",
"future_window_size",
"=",
"DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS"
] | Check if there are enough data points. | [
"Check",
"if",
"there",
"are",
"enough",
"data",
"points",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py#L60-L73 |
3,024 | linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py | BitmapDetector._generate_SAX | def _generate_SAX(self):
"""
Generate SAX representation for all values of the time series.
"""
sections = {}
self.value_min = self.time_series.min()
self.value_max = self.time_series.max()
# Break the whole value range into different sections.
section_height = (self.value_max - self.value_min) / self.precision
for section_number in range(self.precision):
sections[section_number] = self.value_min + section_number * section_height
# Generate SAX representation.
self.sax = ''.join(self._generate_SAX_single(sections, value) for value in self.time_series.values) | python | def _generate_SAX(self):
"""
Generate SAX representation for all values of the time series.
"""
sections = {}
self.value_min = self.time_series.min()
self.value_max = self.time_series.max()
# Break the whole value range into different sections.
section_height = (self.value_max - self.value_min) / self.precision
for section_number in range(self.precision):
sections[section_number] = self.value_min + section_number * section_height
# Generate SAX representation.
self.sax = ''.join(self._generate_SAX_single(sections, value) for value in self.time_series.values) | [
"def",
"_generate_SAX",
"(",
"self",
")",
":",
"sections",
"=",
"{",
"}",
"self",
".",
"value_min",
"=",
"self",
".",
"time_series",
".",
"min",
"(",
")",
"self",
".",
"value_max",
"=",
"self",
".",
"time_series",
".",
"max",
"(",
")",
"# Break the whole value range into different sections.",
"section_height",
"=",
"(",
"self",
".",
"value_max",
"-",
"self",
".",
"value_min",
")",
"/",
"self",
".",
"precision",
"for",
"section_number",
"in",
"range",
"(",
"self",
".",
"precision",
")",
":",
"sections",
"[",
"section_number",
"]",
"=",
"self",
".",
"value_min",
"+",
"section_number",
"*",
"section_height",
"# Generate SAX representation.",
"self",
".",
"sax",
"=",
"''",
".",
"join",
"(",
"self",
".",
"_generate_SAX_single",
"(",
"sections",
",",
"value",
")",
"for",
"value",
"in",
"self",
".",
"time_series",
".",
"values",
")"
] | Generate SAX representation for all values of the time series. | [
"Generate",
"SAX",
"representation",
"for",
"all",
"values",
"of",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py#L92-L104 |
3,025 | linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py | BitmapDetector._set_scores | def _set_scores(self):
"""
Compute anomaly scores for the time series by sliding both lagging window and future window.
"""
anom_scores = {}
self._generate_SAX()
self._construct_all_SAX_chunk_dict()
length = self.time_series_length
lws = self.lag_window_size
fws = self.future_window_size
for i, timestamp in enumerate(self.time_series.timestamps):
if i < lws or i > length - fws:
anom_scores[timestamp] = 0
else:
anom_scores[timestamp] = self._compute_anom_score_between_two_windows(i)
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | python | def _set_scores(self):
"""
Compute anomaly scores for the time series by sliding both lagging window and future window.
"""
anom_scores = {}
self._generate_SAX()
self._construct_all_SAX_chunk_dict()
length = self.time_series_length
lws = self.lag_window_size
fws = self.future_window_size
for i, timestamp in enumerate(self.time_series.timestamps):
if i < lws or i > length - fws:
anom_scores[timestamp] = 0
else:
anom_scores[timestamp] = self._compute_anom_score_between_two_windows(i)
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | [
"def",
"_set_scores",
"(",
"self",
")",
":",
"anom_scores",
"=",
"{",
"}",
"self",
".",
"_generate_SAX",
"(",
")",
"self",
".",
"_construct_all_SAX_chunk_dict",
"(",
")",
"length",
"=",
"self",
".",
"time_series_length",
"lws",
"=",
"self",
".",
"lag_window_size",
"fws",
"=",
"self",
".",
"future_window_size",
"for",
"i",
",",
"timestamp",
"in",
"enumerate",
"(",
"self",
".",
"time_series",
".",
"timestamps",
")",
":",
"if",
"i",
"<",
"lws",
"or",
"i",
">",
"length",
"-",
"fws",
":",
"anom_scores",
"[",
"timestamp",
"]",
"=",
"0",
"else",
":",
"anom_scores",
"[",
"timestamp",
"]",
"=",
"self",
".",
"_compute_anom_score_between_two_windows",
"(",
"i",
")",
"self",
".",
"anom_scores",
"=",
"TimeSeries",
"(",
"self",
".",
"_denoise_scores",
"(",
"anom_scores",
")",
")"
] | Compute anomaly scores for the time series by sliding both lagging window and future window. | [
"Compute",
"anomaly",
"scores",
"for",
"the",
"time",
"series",
"by",
"sliding",
"both",
"lagging",
"window",
"and",
"future",
"window",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py#L196-L212 |
3,026 | linkedin/luminol | src/luminol/algorithms/correlator_algorithms/cross_correlator.py | CrossCorrelator._detect_correlation | def _detect_correlation(self):
"""
Detect correlation by computing correlation coefficients for all allowed shift steps,
then take the maximum.
"""
correlations = []
shifted_correlations = []
self.time_series_a.normalize()
self.time_series_b.normalize()
a, b = self.time_series_a.align(self.time_series_b)
a_values, b_values = a.values, b.values
a_avg, b_avg = a.average(), b.average()
a_stdev, b_stdev = a.stdev(), b.stdev()
n = len(a)
denom = a_stdev * b_stdev * n
# Find the maximal shift steps according to the maximal shift seconds.
allowed_shift_step = self._find_allowed_shift(a.timestamps)
if allowed_shift_step:
shift_upper_bound = allowed_shift_step
shift_lower_bound = -allowed_shift_step
else:
shift_upper_bound = 1
shift_lower_bound = 0
for delay in range(shift_lower_bound, shift_upper_bound):
delay_in_seconds = a.timestamps[abs(delay)] - a.timestamps[0]
if delay < 0:
delay_in_seconds = -delay_in_seconds
s = 0
for i in range(n):
j = i + delay
if j < 0 or j >= n:
continue
else:
s += ((a_values[i] - a_avg) * (b_values[j] - b_avg))
r = s / denom if denom != 0 else s
correlations.append([delay_in_seconds, r])
# Take shift into account to create a "shifted correlation coefficient".
if self.max_shift_milliseconds:
shifted_correlations.append(r * (1 + float(delay_in_seconds) / self.max_shift_milliseconds * self.shift_impact))
else:
shifted_correlations.append(r)
max_correlation = list(max(correlations, key=lambda k: k[1]))
max_shifted_correlation = max(shifted_correlations)
max_correlation.append(max_shifted_correlation)
self.correlation_result = CorrelationResult(*max_correlation) | python | def _detect_correlation(self):
"""
Detect correlation by computing correlation coefficients for all allowed shift steps,
then take the maximum.
"""
correlations = []
shifted_correlations = []
self.time_series_a.normalize()
self.time_series_b.normalize()
a, b = self.time_series_a.align(self.time_series_b)
a_values, b_values = a.values, b.values
a_avg, b_avg = a.average(), b.average()
a_stdev, b_stdev = a.stdev(), b.stdev()
n = len(a)
denom = a_stdev * b_stdev * n
# Find the maximal shift steps according to the maximal shift seconds.
allowed_shift_step = self._find_allowed_shift(a.timestamps)
if allowed_shift_step:
shift_upper_bound = allowed_shift_step
shift_lower_bound = -allowed_shift_step
else:
shift_upper_bound = 1
shift_lower_bound = 0
for delay in range(shift_lower_bound, shift_upper_bound):
delay_in_seconds = a.timestamps[abs(delay)] - a.timestamps[0]
if delay < 0:
delay_in_seconds = -delay_in_seconds
s = 0
for i in range(n):
j = i + delay
if j < 0 or j >= n:
continue
else:
s += ((a_values[i] - a_avg) * (b_values[j] - b_avg))
r = s / denom if denom != 0 else s
correlations.append([delay_in_seconds, r])
# Take shift into account to create a "shifted correlation coefficient".
if self.max_shift_milliseconds:
shifted_correlations.append(r * (1 + float(delay_in_seconds) / self.max_shift_milliseconds * self.shift_impact))
else:
shifted_correlations.append(r)
max_correlation = list(max(correlations, key=lambda k: k[1]))
max_shifted_correlation = max(shifted_correlations)
max_correlation.append(max_shifted_correlation)
self.correlation_result = CorrelationResult(*max_correlation) | [
"def",
"_detect_correlation",
"(",
"self",
")",
":",
"correlations",
"=",
"[",
"]",
"shifted_correlations",
"=",
"[",
"]",
"self",
".",
"time_series_a",
".",
"normalize",
"(",
")",
"self",
".",
"time_series_b",
".",
"normalize",
"(",
")",
"a",
",",
"b",
"=",
"self",
".",
"time_series_a",
".",
"align",
"(",
"self",
".",
"time_series_b",
")",
"a_values",
",",
"b_values",
"=",
"a",
".",
"values",
",",
"b",
".",
"values",
"a_avg",
",",
"b_avg",
"=",
"a",
".",
"average",
"(",
")",
",",
"b",
".",
"average",
"(",
")",
"a_stdev",
",",
"b_stdev",
"=",
"a",
".",
"stdev",
"(",
")",
",",
"b",
".",
"stdev",
"(",
")",
"n",
"=",
"len",
"(",
"a",
")",
"denom",
"=",
"a_stdev",
"*",
"b_stdev",
"*",
"n",
"# Find the maximal shift steps according to the maximal shift seconds.",
"allowed_shift_step",
"=",
"self",
".",
"_find_allowed_shift",
"(",
"a",
".",
"timestamps",
")",
"if",
"allowed_shift_step",
":",
"shift_upper_bound",
"=",
"allowed_shift_step",
"shift_lower_bound",
"=",
"-",
"allowed_shift_step",
"else",
":",
"shift_upper_bound",
"=",
"1",
"shift_lower_bound",
"=",
"0",
"for",
"delay",
"in",
"range",
"(",
"shift_lower_bound",
",",
"shift_upper_bound",
")",
":",
"delay_in_seconds",
"=",
"a",
".",
"timestamps",
"[",
"abs",
"(",
"delay",
")",
"]",
"-",
"a",
".",
"timestamps",
"[",
"0",
"]",
"if",
"delay",
"<",
"0",
":",
"delay_in_seconds",
"=",
"-",
"delay_in_seconds",
"s",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"j",
"=",
"i",
"+",
"delay",
"if",
"j",
"<",
"0",
"or",
"j",
">=",
"n",
":",
"continue",
"else",
":",
"s",
"+=",
"(",
"(",
"a_values",
"[",
"i",
"]",
"-",
"a_avg",
")",
"*",
"(",
"b_values",
"[",
"j",
"]",
"-",
"b_avg",
")",
")",
"r",
"=",
"s",
"/",
"denom",
"if",
"denom",
"!=",
"0",
"else",
"s",
"correlations",
".",
"append",
"(",
"[",
"delay_in_seconds",
",",
"r",
"]",
")",
"# Take shift into account to create a \"shifted correlation coefficient\".",
"if",
"self",
".",
"max_shift_milliseconds",
":",
"shifted_correlations",
".",
"append",
"(",
"r",
"*",
"(",
"1",
"+",
"float",
"(",
"delay_in_seconds",
")",
"/",
"self",
".",
"max_shift_milliseconds",
"*",
"self",
".",
"shift_impact",
")",
")",
"else",
":",
"shifted_correlations",
".",
"append",
"(",
"r",
")",
"max_correlation",
"=",
"list",
"(",
"max",
"(",
"correlations",
",",
"key",
"=",
"lambda",
"k",
":",
"k",
"[",
"1",
"]",
")",
")",
"max_shifted_correlation",
"=",
"max",
"(",
"shifted_correlations",
")",
"max_correlation",
".",
"append",
"(",
"max_shifted_correlation",
")",
"self",
".",
"correlation_result",
"=",
"CorrelationResult",
"(",
"*",
"max_correlation",
")"
] | Detect correlation by computing correlation coefficients for all allowed shift steps,
then take the maximum. | [
"Detect",
"correlation",
"by",
"computing",
"correlation",
"coefficients",
"for",
"all",
"allowed",
"shift",
"steps",
"then",
"take",
"the",
"maximum",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/correlator_algorithms/cross_correlator.py#L39-L83 |
3,027 | linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/exp_avg_detector.py | ExpAvgDetector._compute_anom_data_using_window | def _compute_anom_data_using_window(self):
"""
Compute anomaly scores using a lagging window.
"""
anom_scores = {}
values = self.time_series.values
stdev = numpy.std(values)
for i, (timestamp, value) in enumerate(self.time_series_items):
if i < self.lag_window_size:
anom_score = self._compute_anom_score(values[:i + 1], value)
else:
anom_score = self._compute_anom_score(values[i - self.lag_window_size: i + 1], value)
if stdev:
anom_scores[timestamp] = anom_score / stdev
else:
anom_scores[timestamp] = anom_score
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | python | def _compute_anom_data_using_window(self):
"""
Compute anomaly scores using a lagging window.
"""
anom_scores = {}
values = self.time_series.values
stdev = numpy.std(values)
for i, (timestamp, value) in enumerate(self.time_series_items):
if i < self.lag_window_size:
anom_score = self._compute_anom_score(values[:i + 1], value)
else:
anom_score = self._compute_anom_score(values[i - self.lag_window_size: i + 1], value)
if stdev:
anom_scores[timestamp] = anom_score / stdev
else:
anom_scores[timestamp] = anom_score
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | [
"def",
"_compute_anom_data_using_window",
"(",
"self",
")",
":",
"anom_scores",
"=",
"{",
"}",
"values",
"=",
"self",
".",
"time_series",
".",
"values",
"stdev",
"=",
"numpy",
".",
"std",
"(",
"values",
")",
"for",
"i",
",",
"(",
"timestamp",
",",
"value",
")",
"in",
"enumerate",
"(",
"self",
".",
"time_series_items",
")",
":",
"if",
"i",
"<",
"self",
".",
"lag_window_size",
":",
"anom_score",
"=",
"self",
".",
"_compute_anom_score",
"(",
"values",
"[",
":",
"i",
"+",
"1",
"]",
",",
"value",
")",
"else",
":",
"anom_score",
"=",
"self",
".",
"_compute_anom_score",
"(",
"values",
"[",
"i",
"-",
"self",
".",
"lag_window_size",
":",
"i",
"+",
"1",
"]",
",",
"value",
")",
"if",
"stdev",
":",
"anom_scores",
"[",
"timestamp",
"]",
"=",
"anom_score",
"/",
"stdev",
"else",
":",
"anom_scores",
"[",
"timestamp",
"]",
"=",
"anom_score",
"self",
".",
"anom_scores",
"=",
"TimeSeries",
"(",
"self",
".",
"_denoise_scores",
"(",
"anom_scores",
")",
")"
] | Compute anomaly scores using a lagging window. | [
"Compute",
"anomaly",
"scores",
"using",
"a",
"lagging",
"window",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/exp_avg_detector.py#L53-L69 |
3,028 | linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/exp_avg_detector.py | ExpAvgDetector._compute_anom_data_decay_all | def _compute_anom_data_decay_all(self):
"""
Compute anomaly scores using a lagging window covering all the data points before.
"""
anom_scores = {}
values = self.time_series.values
ema = utils.compute_ema(self.smoothing_factor, values)
stdev = numpy.std(values)
for i, (timestamp, value) in enumerate(self.time_series_items):
anom_score = abs((value - ema[i]) / stdev) if stdev else value - ema[i]
anom_scores[timestamp] = anom_score
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | python | def _compute_anom_data_decay_all(self):
"""
Compute anomaly scores using a lagging window covering all the data points before.
"""
anom_scores = {}
values = self.time_series.values
ema = utils.compute_ema(self.smoothing_factor, values)
stdev = numpy.std(values)
for i, (timestamp, value) in enumerate(self.time_series_items):
anom_score = abs((value - ema[i]) / stdev) if stdev else value - ema[i]
anom_scores[timestamp] = anom_score
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores)) | [
"def",
"_compute_anom_data_decay_all",
"(",
"self",
")",
":",
"anom_scores",
"=",
"{",
"}",
"values",
"=",
"self",
".",
"time_series",
".",
"values",
"ema",
"=",
"utils",
".",
"compute_ema",
"(",
"self",
".",
"smoothing_factor",
",",
"values",
")",
"stdev",
"=",
"numpy",
".",
"std",
"(",
"values",
")",
"for",
"i",
",",
"(",
"timestamp",
",",
"value",
")",
"in",
"enumerate",
"(",
"self",
".",
"time_series_items",
")",
":",
"anom_score",
"=",
"abs",
"(",
"(",
"value",
"-",
"ema",
"[",
"i",
"]",
")",
"/",
"stdev",
")",
"if",
"stdev",
"else",
"value",
"-",
"ema",
"[",
"i",
"]",
"anom_scores",
"[",
"timestamp",
"]",
"=",
"anom_score",
"self",
".",
"anom_scores",
"=",
"TimeSeries",
"(",
"self",
".",
"_denoise_scores",
"(",
"anom_scores",
")",
")"
] | Compute anomaly scores using a lagging window covering all the data points before. | [
"Compute",
"anomaly",
"scores",
"using",
"a",
"lagging",
"window",
"covering",
"all",
"the",
"data",
"points",
"before",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/exp_avg_detector.py#L71-L82 |
3,029 | linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries._generic_binary_op | def _generic_binary_op(self, other, op):
"""
Perform the method operation specified in the op parameter on the values
within the instance's time series values and either another time series
or a constant number value.
:param other: Time series of values or a constant number to use in calculations with instance's time series.
:param func op: The method to perform the calculation between the values.
:return: :class:`TimeSeries` object.
"""
output = {}
if isinstance(other, TimeSeries):
for key, value in self.items():
if key in other:
try:
result = op(value, other[key])
if result is NotImplemented:
other_type = type(other[key])
other_op = vars(other_type).get(op.__name__)
if other_op:
output[key] = other_op(other_type(value), other[key])
else:
output[key] = result
except ZeroDivisionError:
continue
else:
for key, value in self.items():
try:
result = op(value, other)
if result is NotImplemented:
other_type = type(other)
other_op = vars(other_type).get(op.__name__)
if other_op:
output[key] = other_op(other_type(value), other)
else:
output[key] = result
except ZeroDivisionError:
continue
if output:
return TimeSeries(output)
else:
raise ValueError('TimeSeries data was empty or invalid.') | python | def _generic_binary_op(self, other, op):
"""
Perform the method operation specified in the op parameter on the values
within the instance's time series values and either another time series
or a constant number value.
:param other: Time series of values or a constant number to use in calculations with instance's time series.
:param func op: The method to perform the calculation between the values.
:return: :class:`TimeSeries` object.
"""
output = {}
if isinstance(other, TimeSeries):
for key, value in self.items():
if key in other:
try:
result = op(value, other[key])
if result is NotImplemented:
other_type = type(other[key])
other_op = vars(other_type).get(op.__name__)
if other_op:
output[key] = other_op(other_type(value), other[key])
else:
output[key] = result
except ZeroDivisionError:
continue
else:
for key, value in self.items():
try:
result = op(value, other)
if result is NotImplemented:
other_type = type(other)
other_op = vars(other_type).get(op.__name__)
if other_op:
output[key] = other_op(other_type(value), other)
else:
output[key] = result
except ZeroDivisionError:
continue
if output:
return TimeSeries(output)
else:
raise ValueError('TimeSeries data was empty or invalid.') | [
"def",
"_generic_binary_op",
"(",
"self",
",",
"other",
",",
"op",
")",
":",
"output",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"other",
",",
"TimeSeries",
")",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"other",
":",
"try",
":",
"result",
"=",
"op",
"(",
"value",
",",
"other",
"[",
"key",
"]",
")",
"if",
"result",
"is",
"NotImplemented",
":",
"other_type",
"=",
"type",
"(",
"other",
"[",
"key",
"]",
")",
"other_op",
"=",
"vars",
"(",
"other_type",
")",
".",
"get",
"(",
"op",
".",
"__name__",
")",
"if",
"other_op",
":",
"output",
"[",
"key",
"]",
"=",
"other_op",
"(",
"other_type",
"(",
"value",
")",
",",
"other",
"[",
"key",
"]",
")",
"else",
":",
"output",
"[",
"key",
"]",
"=",
"result",
"except",
"ZeroDivisionError",
":",
"continue",
"else",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"items",
"(",
")",
":",
"try",
":",
"result",
"=",
"op",
"(",
"value",
",",
"other",
")",
"if",
"result",
"is",
"NotImplemented",
":",
"other_type",
"=",
"type",
"(",
"other",
")",
"other_op",
"=",
"vars",
"(",
"other_type",
")",
".",
"get",
"(",
"op",
".",
"__name__",
")",
"if",
"other_op",
":",
"output",
"[",
"key",
"]",
"=",
"other_op",
"(",
"other_type",
"(",
"value",
")",
",",
"other",
")",
"else",
":",
"output",
"[",
"key",
"]",
"=",
"result",
"except",
"ZeroDivisionError",
":",
"continue",
"if",
"output",
":",
"return",
"TimeSeries",
"(",
"output",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'TimeSeries data was empty or invalid.'",
")"
] | Perform the method operation specified in the op parameter on the values
within the instance's time series values and either another time series
or a constant number value.
:param other: Time series of values or a constant number to use in calculations with instance's time series.
:param func op: The method to perform the calculation between the values.
:return: :class:`TimeSeries` object. | [
"Perform",
"the",
"method",
"operation",
"specified",
"in",
"the",
"op",
"parameter",
"on",
"the",
"values",
"within",
"the",
"instance",
"s",
"time",
"series",
"values",
"and",
"either",
"another",
"time",
"series",
"or",
"a",
"constant",
"number",
"value",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L150-L192 |
3,030 | linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries._get_value_type | def _get_value_type(self, other):
"""
Get the object type of the value within the values portion of the time series.
:return: `type` of object
"""
if self.values:
return type(self.values[0])
elif isinstance(other, TimeSeries) and other.values:
return type(other.values[0])
else:
raise ValueError('Cannot perform arithmetic on empty time series.') | python | def _get_value_type(self, other):
"""
Get the object type of the value within the values portion of the time series.
:return: `type` of object
"""
if self.values:
return type(self.values[0])
elif isinstance(other, TimeSeries) and other.values:
return type(other.values[0])
else:
raise ValueError('Cannot perform arithmetic on empty time series.') | [
"def",
"_get_value_type",
"(",
"self",
",",
"other",
")",
":",
"if",
"self",
".",
"values",
":",
"return",
"type",
"(",
"self",
".",
"values",
"[",
"0",
"]",
")",
"elif",
"isinstance",
"(",
"other",
",",
"TimeSeries",
")",
"and",
"other",
".",
"values",
":",
"return",
"type",
"(",
"other",
".",
"values",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Cannot perform arithmetic on empty time series.'",
")"
] | Get the object type of the value within the values portion of the time series.
:return: `type` of object | [
"Get",
"the",
"object",
"type",
"of",
"the",
"value",
"within",
"the",
"values",
"portion",
"of",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L194-L205 |
3,031 | linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.smooth | def smooth(self, smoothing_factor):
"""
return a new time series which is a exponential smoothed version of the original data series.
soomth forward once, backward once, and then take the average.
:param float smoothing_factor: smoothing factor
:return: :class:`TimeSeries` object.
"""
forward_smooth = {}
backward_smooth = {}
output = {}
if self:
pre = self.values[0]
next = self.values[-1]
for key, value in self.items():
forward_smooth[key] = smoothing_factor * pre + (1 - smoothing_factor) * value
pre = forward_smooth[key]
for key, value in reversed(self.items()):
backward_smooth[key] = smoothing_factor * next + (1 - smoothing_factor) * value
next = backward_smooth[key]
for key in forward_smooth.keys():
output[key] = (forward_smooth[key] + backward_smooth[key]) / 2
return TimeSeries(output) | python | def smooth(self, smoothing_factor):
"""
return a new time series which is a exponential smoothed version of the original data series.
soomth forward once, backward once, and then take the average.
:param float smoothing_factor: smoothing factor
:return: :class:`TimeSeries` object.
"""
forward_smooth = {}
backward_smooth = {}
output = {}
if self:
pre = self.values[0]
next = self.values[-1]
for key, value in self.items():
forward_smooth[key] = smoothing_factor * pre + (1 - smoothing_factor) * value
pre = forward_smooth[key]
for key, value in reversed(self.items()):
backward_smooth[key] = smoothing_factor * next + (1 - smoothing_factor) * value
next = backward_smooth[key]
for key in forward_smooth.keys():
output[key] = (forward_smooth[key] + backward_smooth[key]) / 2
return TimeSeries(output) | [
"def",
"smooth",
"(",
"self",
",",
"smoothing_factor",
")",
":",
"forward_smooth",
"=",
"{",
"}",
"backward_smooth",
"=",
"{",
"}",
"output",
"=",
"{",
"}",
"if",
"self",
":",
"pre",
"=",
"self",
".",
"values",
"[",
"0",
"]",
"next",
"=",
"self",
".",
"values",
"[",
"-",
"1",
"]",
"for",
"key",
",",
"value",
"in",
"self",
".",
"items",
"(",
")",
":",
"forward_smooth",
"[",
"key",
"]",
"=",
"smoothing_factor",
"*",
"pre",
"+",
"(",
"1",
"-",
"smoothing_factor",
")",
"*",
"value",
"pre",
"=",
"forward_smooth",
"[",
"key",
"]",
"for",
"key",
",",
"value",
"in",
"reversed",
"(",
"self",
".",
"items",
"(",
")",
")",
":",
"backward_smooth",
"[",
"key",
"]",
"=",
"smoothing_factor",
"*",
"next",
"+",
"(",
"1",
"-",
"smoothing_factor",
")",
"*",
"value",
"next",
"=",
"backward_smooth",
"[",
"key",
"]",
"for",
"key",
"in",
"forward_smooth",
".",
"keys",
"(",
")",
":",
"output",
"[",
"key",
"]",
"=",
"(",
"forward_smooth",
"[",
"key",
"]",
"+",
"backward_smooth",
"[",
"key",
"]",
")",
"/",
"2",
"return",
"TimeSeries",
"(",
"output",
")"
] | return a new time series which is a exponential smoothed version of the original data series.
soomth forward once, backward once, and then take the average.
:param float smoothing_factor: smoothing factor
:return: :class:`TimeSeries` object. | [
"return",
"a",
"new",
"time",
"series",
"which",
"is",
"a",
"exponential",
"smoothed",
"version",
"of",
"the",
"original",
"data",
"series",
".",
"soomth",
"forward",
"once",
"backward",
"once",
"and",
"then",
"take",
"the",
"average",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L248-L272 |
3,032 | linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.add_offset | def add_offset(self, offset):
"""
Return a new time series with all timestamps incremented by some offset.
:param int offset: The number of seconds to offset the time series.
:return: `None`
"""
self.timestamps = [ts + offset for ts in self.timestamps] | python | def add_offset(self, offset):
"""
Return a new time series with all timestamps incremented by some offset.
:param int offset: The number of seconds to offset the time series.
:return: `None`
"""
self.timestamps = [ts + offset for ts in self.timestamps] | [
"def",
"add_offset",
"(",
"self",
",",
"offset",
")",
":",
"self",
".",
"timestamps",
"=",
"[",
"ts",
"+",
"offset",
"for",
"ts",
"in",
"self",
".",
"timestamps",
"]"
] | Return a new time series with all timestamps incremented by some offset.
:param int offset: The number of seconds to offset the time series.
:return: `None` | [
"Return",
"a",
"new",
"time",
"series",
"with",
"all",
"timestamps",
"incremented",
"by",
"some",
"offset",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L274-L281 |
3,033 | linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.normalize | def normalize(self):
"""
Return a new time series with all values normalized to 0 to 1.
:return: `None`
"""
maximum = self.max()
if maximum:
self.values = [value / maximum for value in self.values] | python | def normalize(self):
"""
Return a new time series with all values normalized to 0 to 1.
:return: `None`
"""
maximum = self.max()
if maximum:
self.values = [value / maximum for value in self.values] | [
"def",
"normalize",
"(",
"self",
")",
":",
"maximum",
"=",
"self",
".",
"max",
"(",
")",
"if",
"maximum",
":",
"self",
".",
"values",
"=",
"[",
"value",
"/",
"maximum",
"for",
"value",
"in",
"self",
".",
"values",
"]"
] | Return a new time series with all values normalized to 0 to 1.
:return: `None` | [
"Return",
"a",
"new",
"time",
"series",
"with",
"all",
"values",
"normalized",
"to",
"0",
"to",
"1",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L283-L291 |
3,034 | linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.crop | def crop(self, start_timestamp, end_timestamp):
"""
Return a new TimeSeries object contains all the timstamps and values within
the specified range.
:param int start_timestamp: the start timestamp value
:param int end_timestamp: the end timestamp value
:return: :class:`TimeSeries` object.
"""
output = {}
for key, value in self.items():
if key >= start_timestamp and key <= end_timestamp:
output[key] = value
if output:
return TimeSeries(output)
else:
raise ValueError('TimeSeries data was empty or invalid.') | python | def crop(self, start_timestamp, end_timestamp):
"""
Return a new TimeSeries object contains all the timstamps and values within
the specified range.
:param int start_timestamp: the start timestamp value
:param int end_timestamp: the end timestamp value
:return: :class:`TimeSeries` object.
"""
output = {}
for key, value in self.items():
if key >= start_timestamp and key <= end_timestamp:
output[key] = value
if output:
return TimeSeries(output)
else:
raise ValueError('TimeSeries data was empty or invalid.') | [
"def",
"crop",
"(",
"self",
",",
"start_timestamp",
",",
"end_timestamp",
")",
":",
"output",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"self",
".",
"items",
"(",
")",
":",
"if",
"key",
">=",
"start_timestamp",
"and",
"key",
"<=",
"end_timestamp",
":",
"output",
"[",
"key",
"]",
"=",
"value",
"if",
"output",
":",
"return",
"TimeSeries",
"(",
"output",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'TimeSeries data was empty or invalid.'",
")"
] | Return a new TimeSeries object contains all the timstamps and values within
the specified range.
:param int start_timestamp: the start timestamp value
:param int end_timestamp: the end timestamp value
:return: :class:`TimeSeries` object. | [
"Return",
"a",
"new",
"TimeSeries",
"object",
"contains",
"all",
"the",
"timstamps",
"and",
"values",
"within",
"the",
"specified",
"range",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L293-L310 |
3,035 | linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.average | def average(self, default=None):
"""
Calculate the average value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the average value or `None`.
"""
return numpy.asscalar(numpy.average(self.values)) if self.values else default | python | def average(self, default=None):
"""
Calculate the average value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the average value or `None`.
"""
return numpy.asscalar(numpy.average(self.values)) if self.values else default | [
"def",
"average",
"(",
"self",
",",
"default",
"=",
"None",
")",
":",
"return",
"numpy",
".",
"asscalar",
"(",
"numpy",
".",
"average",
"(",
"self",
".",
"values",
")",
")",
"if",
"self",
".",
"values",
"else",
"default"
] | Calculate the average value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the average value or `None`. | [
"Calculate",
"the",
"average",
"value",
"over",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L312-L319 |
3,036 | linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.median | def median(self, default=None):
"""
Calculate the median value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the median value or `None`.
"""
return numpy.asscalar(numpy.median(self.values)) if self.values else default | python | def median(self, default=None):
"""
Calculate the median value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the median value or `None`.
"""
return numpy.asscalar(numpy.median(self.values)) if self.values else default | [
"def",
"median",
"(",
"self",
",",
"default",
"=",
"None",
")",
":",
"return",
"numpy",
".",
"asscalar",
"(",
"numpy",
".",
"median",
"(",
"self",
".",
"values",
")",
")",
"if",
"self",
".",
"values",
"else",
"default"
] | Calculate the median value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the median value or `None`. | [
"Calculate",
"the",
"median",
"value",
"over",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L321-L328 |
3,037 | linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.max | def max(self, default=None):
"""
Calculate the maximum value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the maximum value or `None`.
"""
return numpy.asscalar(numpy.max(self.values)) if self.values else default | python | def max(self, default=None):
"""
Calculate the maximum value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the maximum value or `None`.
"""
return numpy.asscalar(numpy.max(self.values)) if self.values else default | [
"def",
"max",
"(",
"self",
",",
"default",
"=",
"None",
")",
":",
"return",
"numpy",
".",
"asscalar",
"(",
"numpy",
".",
"max",
"(",
"self",
".",
"values",
")",
")",
"if",
"self",
".",
"values",
"else",
"default"
] | Calculate the maximum value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the maximum value or `None`. | [
"Calculate",
"the",
"maximum",
"value",
"over",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L330-L337 |
3,038 | linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.min | def min(self, default=None):
"""
Calculate the minimum value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the maximum value or `None`.
"""
return numpy.asscalar(numpy.min(self.values)) if self.values else default | python | def min(self, default=None):
"""
Calculate the minimum value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the maximum value or `None`.
"""
return numpy.asscalar(numpy.min(self.values)) if self.values else default | [
"def",
"min",
"(",
"self",
",",
"default",
"=",
"None",
")",
":",
"return",
"numpy",
".",
"asscalar",
"(",
"numpy",
".",
"min",
"(",
"self",
".",
"values",
")",
")",
"if",
"self",
".",
"values",
"else",
"default"
] | Calculate the minimum value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the maximum value or `None`. | [
"Calculate",
"the",
"minimum",
"value",
"over",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L339-L346 |
3,039 | linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.percentile | def percentile(self, n, default=None):
"""
Calculate the Nth Percentile value over the time series.
:param int n: Integer value of the percentile to calculate.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the Nth percentile value or `None`.
"""
return numpy.asscalar(numpy.percentile(self.values, n)) if self.values else default | python | def percentile(self, n, default=None):
"""
Calculate the Nth Percentile value over the time series.
:param int n: Integer value of the percentile to calculate.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the Nth percentile value or `None`.
"""
return numpy.asscalar(numpy.percentile(self.values, n)) if self.values else default | [
"def",
"percentile",
"(",
"self",
",",
"n",
",",
"default",
"=",
"None",
")",
":",
"return",
"numpy",
".",
"asscalar",
"(",
"numpy",
".",
"percentile",
"(",
"self",
".",
"values",
",",
"n",
")",
")",
"if",
"self",
".",
"values",
"else",
"default"
] | Calculate the Nth Percentile value over the time series.
:param int n: Integer value of the percentile to calculate.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the Nth percentile value or `None`. | [
"Calculate",
"the",
"Nth",
"Percentile",
"value",
"over",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L348-L356 |
3,040 | linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.stdev | def stdev(self, default=None):
"""
Calculate the standard deviation of the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the standard deviation value or `None`.
"""
return numpy.asscalar(numpy.std(self.values)) if self.values else default | python | def stdev(self, default=None):
"""
Calculate the standard deviation of the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the standard deviation value or `None`.
"""
return numpy.asscalar(numpy.std(self.values)) if self.values else default | [
"def",
"stdev",
"(",
"self",
",",
"default",
"=",
"None",
")",
":",
"return",
"numpy",
".",
"asscalar",
"(",
"numpy",
".",
"std",
"(",
"self",
".",
"values",
")",
")",
"if",
"self",
".",
"values",
"else",
"default"
] | Calculate the standard deviation of the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the standard deviation value or `None`. | [
"Calculate",
"the",
"standard",
"deviation",
"of",
"the",
"time",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L358-L365 |
3,041 | linkedin/luminol | src/luminol/modules/time_series.py | TimeSeries.sum | def sum(self, default=None):
"""
Calculate the sum of all the values in the times series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the sum or `None`.
"""
return numpy.asscalar(numpy.sum(self.values)) if self.values else default | python | def sum(self, default=None):
"""
Calculate the sum of all the values in the times series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the sum or `None`.
"""
return numpy.asscalar(numpy.sum(self.values)) if self.values else default | [
"def",
"sum",
"(",
"self",
",",
"default",
"=",
"None",
")",
":",
"return",
"numpy",
".",
"asscalar",
"(",
"numpy",
".",
"sum",
"(",
"self",
".",
"values",
")",
")",
"if",
"self",
".",
"values",
"else",
"default"
] | Calculate the sum of all the values in the times series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the sum or `None`. | [
"Calculate",
"the",
"sum",
"of",
"all",
"the",
"values",
"in",
"the",
"times",
"series",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L367-L374 |
3,042 | linkedin/luminol | src/luminol/anomaly_detector.py | AnomalyDetector._detect_anomalies | def _detect_anomalies(self):
"""
Detect anomalies using a threshold on anomaly scores.
"""
anom_scores = self.anom_scores
max_anom_score = anom_scores.max()
anomalies = []
if max_anom_score:
threshold = self.threshold or max_anom_score * self.score_percent_threshold
# Find all the anomaly intervals.
intervals = []
start, end = None, None
for timestamp, value in anom_scores.iteritems():
if value > threshold:
end = timestamp
if not start:
start = timestamp
elif start and end is not None:
intervals.append([start, end])
start = None
end = None
if start is not None:
intervals.append([start, end])
# Locate the exact anomaly point within each anomaly interval.
for interval_start, interval_end in intervals:
interval_series = anom_scores.crop(interval_start, interval_end)
self.refine_algorithm_params['time_series'] = interval_series
refine_algorithm = self.refine_algorithm(**self.refine_algorithm_params)
scores = refine_algorithm.run()
max_refine_score = scores.max()
# Get the timestamp of the maximal score.
max_refine_timestamp = scores.timestamps[scores.values.index(max_refine_score)]
anomaly = Anomaly(interval_start, interval_end, interval_series.max(), max_refine_timestamp)
anomalies.append(anomaly)
self.anomalies = anomalies | python | def _detect_anomalies(self):
"""
Detect anomalies using a threshold on anomaly scores.
"""
anom_scores = self.anom_scores
max_anom_score = anom_scores.max()
anomalies = []
if max_anom_score:
threshold = self.threshold or max_anom_score * self.score_percent_threshold
# Find all the anomaly intervals.
intervals = []
start, end = None, None
for timestamp, value in anom_scores.iteritems():
if value > threshold:
end = timestamp
if not start:
start = timestamp
elif start and end is not None:
intervals.append([start, end])
start = None
end = None
if start is not None:
intervals.append([start, end])
# Locate the exact anomaly point within each anomaly interval.
for interval_start, interval_end in intervals:
interval_series = anom_scores.crop(interval_start, interval_end)
self.refine_algorithm_params['time_series'] = interval_series
refine_algorithm = self.refine_algorithm(**self.refine_algorithm_params)
scores = refine_algorithm.run()
max_refine_score = scores.max()
# Get the timestamp of the maximal score.
max_refine_timestamp = scores.timestamps[scores.values.index(max_refine_score)]
anomaly = Anomaly(interval_start, interval_end, interval_series.max(), max_refine_timestamp)
anomalies.append(anomaly)
self.anomalies = anomalies | [
"def",
"_detect_anomalies",
"(",
"self",
")",
":",
"anom_scores",
"=",
"self",
".",
"anom_scores",
"max_anom_score",
"=",
"anom_scores",
".",
"max",
"(",
")",
"anomalies",
"=",
"[",
"]",
"if",
"max_anom_score",
":",
"threshold",
"=",
"self",
".",
"threshold",
"or",
"max_anom_score",
"*",
"self",
".",
"score_percent_threshold",
"# Find all the anomaly intervals.",
"intervals",
"=",
"[",
"]",
"start",
",",
"end",
"=",
"None",
",",
"None",
"for",
"timestamp",
",",
"value",
"in",
"anom_scores",
".",
"iteritems",
"(",
")",
":",
"if",
"value",
">",
"threshold",
":",
"end",
"=",
"timestamp",
"if",
"not",
"start",
":",
"start",
"=",
"timestamp",
"elif",
"start",
"and",
"end",
"is",
"not",
"None",
":",
"intervals",
".",
"append",
"(",
"[",
"start",
",",
"end",
"]",
")",
"start",
"=",
"None",
"end",
"=",
"None",
"if",
"start",
"is",
"not",
"None",
":",
"intervals",
".",
"append",
"(",
"[",
"start",
",",
"end",
"]",
")",
"# Locate the exact anomaly point within each anomaly interval.",
"for",
"interval_start",
",",
"interval_end",
"in",
"intervals",
":",
"interval_series",
"=",
"anom_scores",
".",
"crop",
"(",
"interval_start",
",",
"interval_end",
")",
"self",
".",
"refine_algorithm_params",
"[",
"'time_series'",
"]",
"=",
"interval_series",
"refine_algorithm",
"=",
"self",
".",
"refine_algorithm",
"(",
"*",
"*",
"self",
".",
"refine_algorithm_params",
")",
"scores",
"=",
"refine_algorithm",
".",
"run",
"(",
")",
"max_refine_score",
"=",
"scores",
".",
"max",
"(",
")",
"# Get the timestamp of the maximal score.",
"max_refine_timestamp",
"=",
"scores",
".",
"timestamps",
"[",
"scores",
".",
"values",
".",
"index",
"(",
"max_refine_score",
")",
"]",
"anomaly",
"=",
"Anomaly",
"(",
"interval_start",
",",
"interval_end",
",",
"interval_series",
".",
"max",
"(",
")",
",",
"max_refine_timestamp",
")",
"anomalies",
".",
"append",
"(",
"anomaly",
")",
"self",
".",
"anomalies",
"=",
"anomalies"
] | Detect anomalies using a threshold on anomaly scores. | [
"Detect",
"anomalies",
"using",
"a",
"threshold",
"on",
"anomaly",
"scores",
"."
] | 42e4ab969b774ff98f902d064cb041556017f635 | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/anomaly_detector.py#L106-L145 |
3,043 | jpoullet2000/atlasclient | atlasclient/exceptions.py | handle_response | def handle_response(response):
"""
Given a requests.Response object, throw the appropriate exception, if applicable.
"""
# ignore valid responses
if response.status_code < 400:
return
cls = _status_to_exception_type.get(response.status_code, HttpError)
kwargs = {
'code': response.status_code,
'method': response.request.method,
'url': response.request.url,
'details': response.text,
}
if response.headers and 'retry-after' in response.headers:
kwargs['retry_after'] = response.headers.get('retry-after')
raise cls(**kwargs) | python | def handle_response(response):
"""
Given a requests.Response object, throw the appropriate exception, if applicable.
"""
# ignore valid responses
if response.status_code < 400:
return
cls = _status_to_exception_type.get(response.status_code, HttpError)
kwargs = {
'code': response.status_code,
'method': response.request.method,
'url': response.request.url,
'details': response.text,
}
if response.headers and 'retry-after' in response.headers:
kwargs['retry_after'] = response.headers.get('retry-after')
raise cls(**kwargs) | [
"def",
"handle_response",
"(",
"response",
")",
":",
"# ignore valid responses",
"if",
"response",
".",
"status_code",
"<",
"400",
":",
"return",
"cls",
"=",
"_status_to_exception_type",
".",
"get",
"(",
"response",
".",
"status_code",
",",
"HttpError",
")",
"kwargs",
"=",
"{",
"'code'",
":",
"response",
".",
"status_code",
",",
"'method'",
":",
"response",
".",
"request",
".",
"method",
",",
"'url'",
":",
"response",
".",
"request",
".",
"url",
",",
"'details'",
":",
"response",
".",
"text",
",",
"}",
"if",
"response",
".",
"headers",
"and",
"'retry-after'",
"in",
"response",
".",
"headers",
":",
"kwargs",
"[",
"'retry_after'",
"]",
"=",
"response",
".",
"headers",
".",
"get",
"(",
"'retry-after'",
")",
"raise",
"cls",
"(",
"*",
"*",
"kwargs",
")"
] | Given a requests.Response object, throw the appropriate exception, if applicable. | [
"Given",
"a",
"requests",
".",
"Response",
"object",
"throw",
"the",
"appropriate",
"exception",
"if",
"applicable",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/exceptions.py#L178-L199 |
3,044 | jpoullet2000/atlasclient | atlasclient/models.py | EntityBulkCollection.create | def create(self, data, **kwargs):
"""
Create classifitions for specific entity
"""
self.client.post(self.url, data=data) | python | def create(self, data, **kwargs):
"""
Create classifitions for specific entity
"""
self.client.post(self.url, data=data) | [
"def",
"create",
"(",
"self",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"client",
".",
"post",
"(",
"self",
".",
"url",
",",
"data",
"=",
"data",
")"
] | Create classifitions for specific entity | [
"Create",
"classifitions",
"for",
"specific",
"entity"
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/models.py#L259-L263 |
3,045 | jpoullet2000/atlasclient | atlasclient/models.py | RelationshipGuid.create | def create(self, **kwargs):
"""Raise error since guid cannot be duplicated
"""
raise exceptions.MethodNotImplemented(method=self.create, url=self.url, details='GUID cannot be duplicated, to create a new GUID use the relationship resource') | python | def create(self, **kwargs):
"""Raise error since guid cannot be duplicated
"""
raise exceptions.MethodNotImplemented(method=self.create, url=self.url, details='GUID cannot be duplicated, to create a new GUID use the relationship resource') | [
"def",
"create",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"raise",
"exceptions",
".",
"MethodNotImplemented",
"(",
"method",
"=",
"self",
".",
"create",
",",
"url",
"=",
"self",
".",
"url",
",",
"details",
"=",
"'GUID cannot be duplicated, to create a new GUID use the relationship resource'",
")"
] | Raise error since guid cannot be duplicated | [
"Raise",
"error",
"since",
"guid",
"cannot",
"be",
"duplicated"
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/models.py#L706-L709 |
3,046 | jpoullet2000/atlasclient | atlasclient/utils.py | normalize_underscore_case | def normalize_underscore_case(name):
"""Normalize an underscore-separated descriptor to something more readable.
i.e. 'NAGIOS_SERVER' becomes 'Nagios Server', and 'host_components' becomes
'Host Components'
"""
normalized = name.lower()
normalized = re.sub(r'_(\w)',
lambda match: ' ' + match.group(1).upper(),
normalized)
return normalized[0].upper() + normalized[1:] | python | def normalize_underscore_case(name):
"""Normalize an underscore-separated descriptor to something more readable.
i.e. 'NAGIOS_SERVER' becomes 'Nagios Server', and 'host_components' becomes
'Host Components'
"""
normalized = name.lower()
normalized = re.sub(r'_(\w)',
lambda match: ' ' + match.group(1).upper(),
normalized)
return normalized[0].upper() + normalized[1:] | [
"def",
"normalize_underscore_case",
"(",
"name",
")",
":",
"normalized",
"=",
"name",
".",
"lower",
"(",
")",
"normalized",
"=",
"re",
".",
"sub",
"(",
"r'_(\\w)'",
",",
"lambda",
"match",
":",
"' '",
"+",
"match",
".",
"group",
"(",
"1",
")",
".",
"upper",
"(",
")",
",",
"normalized",
")",
"return",
"normalized",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"+",
"normalized",
"[",
"1",
":",
"]"
] | Normalize an underscore-separated descriptor to something more readable.
i.e. 'NAGIOS_SERVER' becomes 'Nagios Server', and 'host_components' becomes
'Host Components' | [
"Normalize",
"an",
"underscore",
"-",
"separated",
"descriptor",
"to",
"something",
"more",
"readable",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/utils.py#L32-L42 |
3,047 | jpoullet2000/atlasclient | atlasclient/utils.py | normalize_camel_case | def normalize_camel_case(name):
"""Normalize a camelCase descriptor to something more readable.
i.e. 'camelCase' or 'CamelCase' becomes 'Camel Case'
"""
normalized = re.sub('([a-z])([A-Z])',
lambda match: ' '.join([match.group(1), match.group(2)]),
name)
return normalized[0].upper() + normalized[1:] | python | def normalize_camel_case(name):
"""Normalize a camelCase descriptor to something more readable.
i.e. 'camelCase' or 'CamelCase' becomes 'Camel Case'
"""
normalized = re.sub('([a-z])([A-Z])',
lambda match: ' '.join([match.group(1), match.group(2)]),
name)
return normalized[0].upper() + normalized[1:] | [
"def",
"normalize_camel_case",
"(",
"name",
")",
":",
"normalized",
"=",
"re",
".",
"sub",
"(",
"'([a-z])([A-Z])'",
",",
"lambda",
"match",
":",
"' '",
".",
"join",
"(",
"[",
"match",
".",
"group",
"(",
"1",
")",
",",
"match",
".",
"group",
"(",
"2",
")",
"]",
")",
",",
"name",
")",
"return",
"normalized",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"+",
"normalized",
"[",
"1",
":",
"]"
] | Normalize a camelCase descriptor to something more readable.
i.e. 'camelCase' or 'CamelCase' becomes 'Camel Case' | [
"Normalize",
"a",
"camelCase",
"descriptor",
"to",
"something",
"more",
"readable",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/utils.py#L45-L53 |
3,048 | jpoullet2000/atlasclient | atlasclient/utils.py | version_tuple | def version_tuple(version):
"""Convert a version string or tuple to a tuple.
Should be returned in the form: (major, minor, release).
"""
if isinstance(version, str):
return tuple(int(x) for x in version.split('.'))
elif isinstance(version, tuple):
return version
else:
raise ValueError("Invalid version: %s" % version) | python | def version_tuple(version):
"""Convert a version string or tuple to a tuple.
Should be returned in the form: (major, minor, release).
"""
if isinstance(version, str):
return tuple(int(x) for x in version.split('.'))
elif isinstance(version, tuple):
return version
else:
raise ValueError("Invalid version: %s" % version) | [
"def",
"version_tuple",
"(",
"version",
")",
":",
"if",
"isinstance",
"(",
"version",
",",
"str",
")",
":",
"return",
"tuple",
"(",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"version",
".",
"split",
"(",
"'.'",
")",
")",
"elif",
"isinstance",
"(",
"version",
",",
"tuple",
")",
":",
"return",
"version",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid version: %s\"",
"%",
"version",
")"
] | Convert a version string or tuple to a tuple.
Should be returned in the form: (major, minor, release). | [
"Convert",
"a",
"version",
"string",
"or",
"tuple",
"to",
"a",
"tuple",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/utils.py#L56-L66 |
3,049 | jpoullet2000/atlasclient | atlasclient/utils.py | version_str | def version_str(version):
"""Convert a version tuple or string to a string.
Should be returned in the form: major.minor.release
"""
if isinstance(version, str):
return version
elif isinstance(version, tuple):
return '.'.join([str(int(x)) for x in version])
else:
raise ValueError("Invalid version: %s" % version) | python | def version_str(version):
"""Convert a version tuple or string to a string.
Should be returned in the form: major.minor.release
"""
if isinstance(version, str):
return version
elif isinstance(version, tuple):
return '.'.join([str(int(x)) for x in version])
else:
raise ValueError("Invalid version: %s" % version) | [
"def",
"version_str",
"(",
"version",
")",
":",
"if",
"isinstance",
"(",
"version",
",",
"str",
")",
":",
"return",
"version",
"elif",
"isinstance",
"(",
"version",
",",
"tuple",
")",
":",
"return",
"'.'",
".",
"join",
"(",
"[",
"str",
"(",
"int",
"(",
"x",
")",
")",
"for",
"x",
"in",
"version",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid version: %s\"",
"%",
"version",
")"
] | Convert a version tuple or string to a string.
Should be returned in the form: major.minor.release | [
"Convert",
"a",
"version",
"tuple",
"or",
"string",
"to",
"a",
"string",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/utils.py#L69-L79 |
3,050 | jpoullet2000/atlasclient | atlasclient/utils.py | generate_http_basic_token | def generate_http_basic_token(username, password):
"""
Generates a HTTP basic token from username and password
Returns a token string (not a byte)
"""
token = base64.b64encode('{}:{}'.format(username, password).encode('utf-8')).decode('utf-8')
return token | python | def generate_http_basic_token(username, password):
"""
Generates a HTTP basic token from username and password
Returns a token string (not a byte)
"""
token = base64.b64encode('{}:{}'.format(username, password).encode('utf-8')).decode('utf-8')
return token | [
"def",
"generate_http_basic_token",
"(",
"username",
",",
"password",
")",
":",
"token",
"=",
"base64",
".",
"b64encode",
"(",
"'{}:{}'",
".",
"format",
"(",
"username",
",",
"password",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"token"
] | Generates a HTTP basic token from username and password
Returns a token string (not a byte) | [
"Generates",
"a",
"HTTP",
"basic",
"token",
"from",
"username",
"and",
"password"
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/utils.py#L81-L88 |
3,051 | jpoullet2000/atlasclient | atlasclient/base.py | GeneratedIdentifierMixin.identifier | def identifier(self):
"""These models have server-generated identifiers.
If we don't already have it in memory, then assume that it has not
yet been generated.
"""
if self.primary_key not in self._data:
return 'Unknown'
return str(self._data[self.primary_key]) | python | def identifier(self):
"""These models have server-generated identifiers.
If we don't already have it in memory, then assume that it has not
yet been generated.
"""
if self.primary_key not in self._data:
return 'Unknown'
return str(self._data[self.primary_key]) | [
"def",
"identifier",
"(",
"self",
")",
":",
"if",
"self",
".",
"primary_key",
"not",
"in",
"self",
".",
"_data",
":",
"return",
"'Unknown'",
"return",
"str",
"(",
"self",
".",
"_data",
"[",
"self",
".",
"primary_key",
"]",
")"
] | These models have server-generated identifiers.
If we don't already have it in memory, then assume that it has not
yet been generated. | [
"These",
"models",
"have",
"server",
"-",
"generated",
"identifiers",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/base.py#L79-L87 |
3,052 | jpoullet2000/atlasclient | atlasclient/base.py | QueryableModelCollection.url | def url(self):
"""The url for this collection."""
if self.parent is None:
# TODO: differing API Versions?
pieces = [self.client.base_url, 'api', 'atlas', 'v2']
else:
pieces = [self.parent.url]
pieces.append(self.model_class.path)
return '/'.join(pieces) | python | def url(self):
"""The url for this collection."""
if self.parent is None:
# TODO: differing API Versions?
pieces = [self.client.base_url, 'api', 'atlas', 'v2']
else:
pieces = [self.parent.url]
pieces.append(self.model_class.path)
return '/'.join(pieces) | [
"def",
"url",
"(",
"self",
")",
":",
"if",
"self",
".",
"parent",
"is",
"None",
":",
"# TODO: differing API Versions?",
"pieces",
"=",
"[",
"self",
".",
"client",
".",
"base_url",
",",
"'api'",
",",
"'atlas'",
",",
"'v2'",
"]",
"else",
":",
"pieces",
"=",
"[",
"self",
".",
"parent",
".",
"url",
"]",
"pieces",
".",
"append",
"(",
"self",
".",
"model_class",
".",
"path",
")",
"return",
"'/'",
".",
"join",
"(",
"pieces",
")"
] | The url for this collection. | [
"The",
"url",
"for",
"this",
"collection",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/base.py#L230-L239 |
3,053 | jpoullet2000/atlasclient | atlasclient/base.py | QueryableModelCollection.inflate | def inflate(self):
"""Load the collection from the server, if necessary."""
if not self._is_inflated:
self.check_version()
for k, v in self._filter.items():
if '[' in v:
self._filter[k] = ast.literal_eval(v)
self.load(self.client.get(self.url, params=self._filter))
self._is_inflated = True
return self | python | def inflate(self):
"""Load the collection from the server, if necessary."""
if not self._is_inflated:
self.check_version()
for k, v in self._filter.items():
if '[' in v:
self._filter[k] = ast.literal_eval(v)
self.load(self.client.get(self.url, params=self._filter))
self._is_inflated = True
return self | [
"def",
"inflate",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_is_inflated",
":",
"self",
".",
"check_version",
"(",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_filter",
".",
"items",
"(",
")",
":",
"if",
"'['",
"in",
"v",
":",
"self",
".",
"_filter",
"[",
"k",
"]",
"=",
"ast",
".",
"literal_eval",
"(",
"v",
")",
"self",
".",
"load",
"(",
"self",
".",
"client",
".",
"get",
"(",
"self",
".",
"url",
",",
"params",
"=",
"self",
".",
"_filter",
")",
")",
"self",
".",
"_is_inflated",
"=",
"True",
"return",
"self"
] | Load the collection from the server, if necessary. | [
"Load",
"the",
"collection",
"from",
"the",
"server",
"if",
"necessary",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/base.py#L241-L251 |
3,054 | jpoullet2000/atlasclient | atlasclient/base.py | QueryableModelCollection.load | def load(self, response):
"""Parse the GET response for the collection.
This operates as a lazy-loader, meaning that the data are only downloaded
from the server if there are not already loaded.
Collection items are loaded sequentially.
In some rare cases, a collection can have an asynchronous request
triggered. For those cases, we handle it here.
"""
self._models = []
if isinstance(response, dict):
for key in response.keys():
model = self.model_class(self, href='')
model.load(response[key])
self._models.append(model)
else:
for item in response:
model = self.model_class(self,
href=item.get('href'))
model.load(item)
self._models.append(model) | python | def load(self, response):
"""Parse the GET response for the collection.
This operates as a lazy-loader, meaning that the data are only downloaded
from the server if there are not already loaded.
Collection items are loaded sequentially.
In some rare cases, a collection can have an asynchronous request
triggered. For those cases, we handle it here.
"""
self._models = []
if isinstance(response, dict):
for key in response.keys():
model = self.model_class(self, href='')
model.load(response[key])
self._models.append(model)
else:
for item in response:
model = self.model_class(self,
href=item.get('href'))
model.load(item)
self._models.append(model) | [
"def",
"load",
"(",
"self",
",",
"response",
")",
":",
"self",
".",
"_models",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"response",
",",
"dict",
")",
":",
"for",
"key",
"in",
"response",
".",
"keys",
"(",
")",
":",
"model",
"=",
"self",
".",
"model_class",
"(",
"self",
",",
"href",
"=",
"''",
")",
"model",
".",
"load",
"(",
"response",
"[",
"key",
"]",
")",
"self",
".",
"_models",
".",
"append",
"(",
"model",
")",
"else",
":",
"for",
"item",
"in",
"response",
":",
"model",
"=",
"self",
".",
"model_class",
"(",
"self",
",",
"href",
"=",
"item",
".",
"get",
"(",
"'href'",
")",
")",
"model",
".",
"load",
"(",
"item",
")",
"self",
".",
"_models",
".",
"append",
"(",
"model",
")"
] | Parse the GET response for the collection.
This operates as a lazy-loader, meaning that the data are only downloaded
from the server if there are not already loaded.
Collection items are loaded sequentially.
In some rare cases, a collection can have an asynchronous request
triggered. For those cases, we handle it here. | [
"Parse",
"the",
"GET",
"response",
"for",
"the",
"collection",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/base.py#L254-L275 |
3,055 | jpoullet2000/atlasclient | atlasclient/base.py | QueryableModelCollection.create | def create(self, *args, **kwargs):
"""Add a resource to this collection."""
href = self.url
if len(args) == 1:
kwargs[self.model_class.primary_key] = args[0]
href = '/'.join([href, args[0]])
model = self.model_class(self,
href=href.replace('classifications/', 'classification/'),
data=kwargs)
model.create(**kwargs)
self._models.append(model)
return model | python | def create(self, *args, **kwargs):
"""Add a resource to this collection."""
href = self.url
if len(args) == 1:
kwargs[self.model_class.primary_key] = args[0]
href = '/'.join([href, args[0]])
model = self.model_class(self,
href=href.replace('classifications/', 'classification/'),
data=kwargs)
model.create(**kwargs)
self._models.append(model)
return model | [
"def",
"create",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"href",
"=",
"self",
".",
"url",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"kwargs",
"[",
"self",
".",
"model_class",
".",
"primary_key",
"]",
"=",
"args",
"[",
"0",
"]",
"href",
"=",
"'/'",
".",
"join",
"(",
"[",
"href",
",",
"args",
"[",
"0",
"]",
"]",
")",
"model",
"=",
"self",
".",
"model_class",
"(",
"self",
",",
"href",
"=",
"href",
".",
"replace",
"(",
"'classifications/'",
",",
"'classification/'",
")",
",",
"data",
"=",
"kwargs",
")",
"model",
".",
"create",
"(",
"*",
"*",
"kwargs",
")",
"self",
".",
"_models",
".",
"append",
"(",
"model",
")",
"return",
"model"
] | Add a resource to this collection. | [
"Add",
"a",
"resource",
"to",
"this",
"collection",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/base.py#L277-L288 |
3,056 | jpoullet2000/atlasclient | atlasclient/base.py | QueryableModelCollection.update | def update(self, **kwargs):
"""Update all resources in this collection."""
self.inflate()
for model in self._models:
model.update(**kwargs)
return self | python | def update(self, **kwargs):
"""Update all resources in this collection."""
self.inflate()
for model in self._models:
model.update(**kwargs)
return self | [
"def",
"update",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"inflate",
"(",
")",
"for",
"model",
"in",
"self",
".",
"_models",
":",
"model",
".",
"update",
"(",
"*",
"*",
"kwargs",
")",
"return",
"self"
] | Update all resources in this collection. | [
"Update",
"all",
"resources",
"in",
"this",
"collection",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/base.py#L290-L295 |
3,057 | jpoullet2000/atlasclient | atlasclient/base.py | QueryableModelCollection.delete | def delete(self, **kwargs):
"""Delete all resources in this collection."""
self.inflate()
for model in self._models:
model.delete(**kwargs)
return | python | def delete(self, **kwargs):
"""Delete all resources in this collection."""
self.inflate()
for model in self._models:
model.delete(**kwargs)
return | [
"def",
"delete",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"inflate",
"(",
")",
"for",
"model",
"in",
"self",
".",
"_models",
":",
"model",
".",
"delete",
"(",
"*",
"*",
"kwargs",
")",
"return"
] | Delete all resources in this collection. | [
"Delete",
"all",
"resources",
"in",
"this",
"collection",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/base.py#L297-L302 |
3,058 | jpoullet2000/atlasclient | atlasclient/base.py | QueryableModelCollection.wait | def wait(self, **kwargs):
"""Wait until any pending asynchronous requests are finished for this collection."""
if self.request:
self.request.wait(**kwargs)
self.request = None
return self.inflate() | python | def wait(self, **kwargs):
"""Wait until any pending asynchronous requests are finished for this collection."""
if self.request:
self.request.wait(**kwargs)
self.request = None
return self.inflate() | [
"def",
"wait",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"request",
":",
"self",
".",
"request",
".",
"wait",
"(",
"*",
"*",
"kwargs",
")",
"self",
".",
"request",
"=",
"None",
"return",
"self",
".",
"inflate",
"(",
")"
] | Wait until any pending asynchronous requests are finished for this collection. | [
"Wait",
"until",
"any",
"pending",
"asynchronous",
"requests",
"are",
"finished",
"for",
"this",
"collection",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/base.py#L305-L310 |
3,059 | jpoullet2000/atlasclient | atlasclient/base.py | QueryableModel.url | def url(self):
"""Gets the url for the resource this model represents.
It will just use the 'href' passed in to the constructor if that exists.
Otherwise, it will generated it based on the collection's url and the
model's identifier.
"""
if self._href is not None:
return self._href
if self.identifier:
# for some reason atlas does not use classifications here in the path when considering one classification
path = '/'.join([self.parent.url.replace('classifications/', 'classficiation/'), self.identifier])
return path
raise exceptions.ClientError("Not able to determine object URL") | python | def url(self):
"""Gets the url for the resource this model represents.
It will just use the 'href' passed in to the constructor if that exists.
Otherwise, it will generated it based on the collection's url and the
model's identifier.
"""
if self._href is not None:
return self._href
if self.identifier:
# for some reason atlas does not use classifications here in the path when considering one classification
path = '/'.join([self.parent.url.replace('classifications/', 'classficiation/'), self.identifier])
return path
raise exceptions.ClientError("Not able to determine object URL") | [
"def",
"url",
"(",
"self",
")",
":",
"if",
"self",
".",
"_href",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_href",
"if",
"self",
".",
"identifier",
":",
"# for some reason atlas does not use classifications here in the path when considering one classification",
"path",
"=",
"'/'",
".",
"join",
"(",
"[",
"self",
".",
"parent",
".",
"url",
".",
"replace",
"(",
"'classifications/'",
",",
"'classficiation/'",
")",
",",
"self",
".",
"identifier",
"]",
")",
"return",
"path",
"raise",
"exceptions",
".",
"ClientError",
"(",
"\"Not able to determine object URL\"",
")"
] | Gets the url for the resource this model represents.
It will just use the 'href' passed in to the constructor if that exists.
Otherwise, it will generated it based on the collection's url and the
model's identifier. | [
"Gets",
"the",
"url",
"for",
"the",
"resource",
"this",
"model",
"represents",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/base.py#L568-L581 |
3,060 | jpoullet2000/atlasclient | atlasclient/base.py | QueryableModel.inflate | def inflate(self):
"""Load the resource from the server, if not already loaded."""
if not self._is_inflated:
if self._is_inflating:
# catch infinite recursion when attempting to inflate
# an object that doesn't have enough data to inflate
msg = ("There is not enough data to inflate this object. "
"Need either an href: {} or a {}: {}")
msg = msg.format(self._href, self.primary_key, self._data.get(self.primary_key))
raise exceptions.ClientError(msg)
self._is_inflating = True
try:
params = self.searchParameters if hasattr(self, 'searchParameters') else {}
# To keep the method same as the original request. The default is GET
self.load(self.client.request(self.method, self.url, **params))
except Exception:
self.load(self._data)
self._is_inflated = True
self._is_inflating = False
return self | python | def inflate(self):
"""Load the resource from the server, if not already loaded."""
if not self._is_inflated:
if self._is_inflating:
# catch infinite recursion when attempting to inflate
# an object that doesn't have enough data to inflate
msg = ("There is not enough data to inflate this object. "
"Need either an href: {} or a {}: {}")
msg = msg.format(self._href, self.primary_key, self._data.get(self.primary_key))
raise exceptions.ClientError(msg)
self._is_inflating = True
try:
params = self.searchParameters if hasattr(self, 'searchParameters') else {}
# To keep the method same as the original request. The default is GET
self.load(self.client.request(self.method, self.url, **params))
except Exception:
self.load(self._data)
self._is_inflated = True
self._is_inflating = False
return self | [
"def",
"inflate",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_is_inflated",
":",
"if",
"self",
".",
"_is_inflating",
":",
"# catch infinite recursion when attempting to inflate",
"# an object that doesn't have enough data to inflate",
"msg",
"=",
"(",
"\"There is not enough data to inflate this object. \"",
"\"Need either an href: {} or a {}: {}\"",
")",
"msg",
"=",
"msg",
".",
"format",
"(",
"self",
".",
"_href",
",",
"self",
".",
"primary_key",
",",
"self",
".",
"_data",
".",
"get",
"(",
"self",
".",
"primary_key",
")",
")",
"raise",
"exceptions",
".",
"ClientError",
"(",
"msg",
")",
"self",
".",
"_is_inflating",
"=",
"True",
"try",
":",
"params",
"=",
"self",
".",
"searchParameters",
"if",
"hasattr",
"(",
"self",
",",
"'searchParameters'",
")",
"else",
"{",
"}",
"# To keep the method same as the original request. The default is GET",
"self",
".",
"load",
"(",
"self",
".",
"client",
".",
"request",
"(",
"self",
".",
"method",
",",
"self",
".",
"url",
",",
"*",
"*",
"params",
")",
")",
"except",
"Exception",
":",
"self",
".",
"load",
"(",
"self",
".",
"_data",
")",
"self",
".",
"_is_inflated",
"=",
"True",
"self",
".",
"_is_inflating",
"=",
"False",
"return",
"self"
] | Load the resource from the server, if not already loaded. | [
"Load",
"the",
"resource",
"from",
"the",
"server",
"if",
"not",
"already",
"loaded",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/base.py#L583-L605 |
3,061 | jpoullet2000/atlasclient | atlasclient/base.py | QueryableModel.load | def load(self, response):
"""The load method parses the raw JSON response from the server.
Most models are not returned in the main response body, but in a key
such as 'entity', defined by the 'data_key' attribute on the class.
Also, related objects are often returned and can be used to pre-cache
related model objects without having to contact the server again. This
method handles all of those cases.
Also, if a request has triggered a background operation, the request
details are returned in a 'Requests' section. We need to store that
request object so we can poll it until completion.
"""
if 'href' in response:
self._href = response.pop('href')
if self.data_key and self.data_key in response:
self._data.update(response.pop(self.data_key))
# preload related object collections, if received
for rel in [x for x in self.relationships if x in response and response[x]]:
rel_class = self.relationships[rel]
collection = rel_class.collection_class(
self.client, rel_class, parent=self
)
self._relationship_cache[rel] = collection(response[rel])
else:
self._data.update(response) | python | def load(self, response):
"""The load method parses the raw JSON response from the server.
Most models are not returned in the main response body, but in a key
such as 'entity', defined by the 'data_key' attribute on the class.
Also, related objects are often returned and can be used to pre-cache
related model objects without having to contact the server again. This
method handles all of those cases.
Also, if a request has triggered a background operation, the request
details are returned in a 'Requests' section. We need to store that
request object so we can poll it until completion.
"""
if 'href' in response:
self._href = response.pop('href')
if self.data_key and self.data_key in response:
self._data.update(response.pop(self.data_key))
# preload related object collections, if received
for rel in [x for x in self.relationships if x in response and response[x]]:
rel_class = self.relationships[rel]
collection = rel_class.collection_class(
self.client, rel_class, parent=self
)
self._relationship_cache[rel] = collection(response[rel])
else:
self._data.update(response) | [
"def",
"load",
"(",
"self",
",",
"response",
")",
":",
"if",
"'href'",
"in",
"response",
":",
"self",
".",
"_href",
"=",
"response",
".",
"pop",
"(",
"'href'",
")",
"if",
"self",
".",
"data_key",
"and",
"self",
".",
"data_key",
"in",
"response",
":",
"self",
".",
"_data",
".",
"update",
"(",
"response",
".",
"pop",
"(",
"self",
".",
"data_key",
")",
")",
"# preload related object collections, if received",
"for",
"rel",
"in",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"relationships",
"if",
"x",
"in",
"response",
"and",
"response",
"[",
"x",
"]",
"]",
":",
"rel_class",
"=",
"self",
".",
"relationships",
"[",
"rel",
"]",
"collection",
"=",
"rel_class",
".",
"collection_class",
"(",
"self",
".",
"client",
",",
"rel_class",
",",
"parent",
"=",
"self",
")",
"self",
".",
"_relationship_cache",
"[",
"rel",
"]",
"=",
"collection",
"(",
"response",
"[",
"rel",
"]",
")",
"else",
":",
"self",
".",
"_data",
".",
"update",
"(",
"response",
")"
] | The load method parses the raw JSON response from the server.
Most models are not returned in the main response body, but in a key
such as 'entity', defined by the 'data_key' attribute on the class.
Also, related objects are often returned and can be used to pre-cache
related model objects without having to contact the server again. This
method handles all of those cases.
Also, if a request has triggered a background operation, the request
details are returned in a 'Requests' section. We need to store that
request object so we can poll it until completion. | [
"The",
"load",
"method",
"parses",
"the",
"raw",
"JSON",
"response",
"from",
"the",
"server",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/base.py#L623-L648 |
3,062 | jpoullet2000/atlasclient | atlasclient/base.py | QueryableModel.delete | def delete(self, **kwargs):
"""Delete a resource by issuing a DELETE http request against it."""
self.method = 'delete'
if len(kwargs) > 0:
self.load(self.client.delete(self.url, params=kwargs))
else:
self.load(self.client.delete(self.url))
self.parent.remove(self)
return | python | def delete(self, **kwargs):
"""Delete a resource by issuing a DELETE http request against it."""
self.method = 'delete'
if len(kwargs) > 0:
self.load(self.client.delete(self.url, params=kwargs))
else:
self.load(self.client.delete(self.url))
self.parent.remove(self)
return | [
"def",
"delete",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"method",
"=",
"'delete'",
"if",
"len",
"(",
"kwargs",
")",
">",
"0",
":",
"self",
".",
"load",
"(",
"self",
".",
"client",
".",
"delete",
"(",
"self",
".",
"url",
",",
"params",
"=",
"kwargs",
")",
")",
"else",
":",
"self",
".",
"load",
"(",
"self",
".",
"client",
".",
"delete",
"(",
"self",
".",
"url",
")",
")",
"self",
".",
"parent",
".",
"remove",
"(",
"self",
")",
"return"
] | Delete a resource by issuing a DELETE http request against it. | [
"Delete",
"a",
"resource",
"by",
"issuing",
"a",
"DELETE",
"http",
"request",
"against",
"it",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/base.py#L686-L694 |
3,063 | jpoullet2000/atlasclient | atlasclient/events.py | publish | def publish(obj, event, event_state, **kwargs):
"""Publish an event from an object.
This is a really basic pub-sub event system to allow for tracking progress
on methods externally. It fires the events for the first match it finds in
the object hierarchy, going most specific to least. If no match is found
for the exact event+event_state, the most specific event+ANY is fired
instead.
Multiple callbacks can be bound to the event+event_state if desired. All
will be fired in the order they were registered.
"""
# short-circuit if nothing is listening
if len(EVENT_HANDLERS) == 0:
return
if inspect.isclass(obj):
pub_cls = obj
else:
pub_cls = obj.__class__
potential = [x.__name__ for x in inspect.getmro(pub_cls)]
# if we don't find a match for this event/event_state we fire the events
# for this event/ANY instead for the closest match
fallbacks = None
callbacks = []
for cls in potential:
event_key = '.'.join([cls, event, event_state])
backup_key = '.'.join([cls, event, states.ANY])
if event_key in EVENT_HANDLERS:
callbacks = EVENT_HANDLERS[event_key]
break
elif fallbacks is None and backup_key in EVENT_HANDLERS:
fallbacks = EVENT_HANDLERS[backup_key]
if fallbacks is not None:
callbacks = fallbacks
for callback in callbacks:
callback(obj, **kwargs)
return | python | def publish(obj, event, event_state, **kwargs):
"""Publish an event from an object.
This is a really basic pub-sub event system to allow for tracking progress
on methods externally. It fires the events for the first match it finds in
the object hierarchy, going most specific to least. If no match is found
for the exact event+event_state, the most specific event+ANY is fired
instead.
Multiple callbacks can be bound to the event+event_state if desired. All
will be fired in the order they were registered.
"""
# short-circuit if nothing is listening
if len(EVENT_HANDLERS) == 0:
return
if inspect.isclass(obj):
pub_cls = obj
else:
pub_cls = obj.__class__
potential = [x.__name__ for x in inspect.getmro(pub_cls)]
# if we don't find a match for this event/event_state we fire the events
# for this event/ANY instead for the closest match
fallbacks = None
callbacks = []
for cls in potential:
event_key = '.'.join([cls, event, event_state])
backup_key = '.'.join([cls, event, states.ANY])
if event_key in EVENT_HANDLERS:
callbacks = EVENT_HANDLERS[event_key]
break
elif fallbacks is None and backup_key in EVENT_HANDLERS:
fallbacks = EVENT_HANDLERS[backup_key]
if fallbacks is not None:
callbacks = fallbacks
for callback in callbacks:
callback(obj, **kwargs)
return | [
"def",
"publish",
"(",
"obj",
",",
"event",
",",
"event_state",
",",
"*",
"*",
"kwargs",
")",
":",
"# short-circuit if nothing is listening",
"if",
"len",
"(",
"EVENT_HANDLERS",
")",
"==",
"0",
":",
"return",
"if",
"inspect",
".",
"isclass",
"(",
"obj",
")",
":",
"pub_cls",
"=",
"obj",
"else",
":",
"pub_cls",
"=",
"obj",
".",
"__class__",
"potential",
"=",
"[",
"x",
".",
"__name__",
"for",
"x",
"in",
"inspect",
".",
"getmro",
"(",
"pub_cls",
")",
"]",
"# if we don't find a match for this event/event_state we fire the events",
"# for this event/ANY instead for the closest match",
"fallbacks",
"=",
"None",
"callbacks",
"=",
"[",
"]",
"for",
"cls",
"in",
"potential",
":",
"event_key",
"=",
"'.'",
".",
"join",
"(",
"[",
"cls",
",",
"event",
",",
"event_state",
"]",
")",
"backup_key",
"=",
"'.'",
".",
"join",
"(",
"[",
"cls",
",",
"event",
",",
"states",
".",
"ANY",
"]",
")",
"if",
"event_key",
"in",
"EVENT_HANDLERS",
":",
"callbacks",
"=",
"EVENT_HANDLERS",
"[",
"event_key",
"]",
"break",
"elif",
"fallbacks",
"is",
"None",
"and",
"backup_key",
"in",
"EVENT_HANDLERS",
":",
"fallbacks",
"=",
"EVENT_HANDLERS",
"[",
"backup_key",
"]",
"if",
"fallbacks",
"is",
"not",
"None",
":",
"callbacks",
"=",
"fallbacks",
"for",
"callback",
"in",
"callbacks",
":",
"callback",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
"return"
] | Publish an event from an object.
This is a really basic pub-sub event system to allow for tracking progress
on methods externally. It fires the events for the first match it finds in
the object hierarchy, going most specific to least. If no match is found
for the exact event+event_state, the most specific event+ANY is fired
instead.
Multiple callbacks can be bound to the event+event_state if desired. All
will be fired in the order they were registered. | [
"Publish",
"an",
"event",
"from",
"an",
"object",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/events.py#L41-L81 |
3,064 | jpoullet2000/atlasclient | atlasclient/events.py | subscribe | def subscribe(obj, event, callback, event_state=None):
"""Subscribe an event from an class.
Subclasses of the class/object will also fire events for this class,
unless a more specific event exists.
"""
if inspect.isclass(obj):
cls = obj.__name__
else:
cls = obj.__class__.__name__
if event_state is None:
event_state = states.ANY
event_key = '.'.join([cls, event, event_state])
if event_key not in EVENT_HANDLERS:
EVENT_HANDLERS[event_key] = []
EVENT_HANDLERS[event_key].append(callback)
return | python | def subscribe(obj, event, callback, event_state=None):
"""Subscribe an event from an class.
Subclasses of the class/object will also fire events for this class,
unless a more specific event exists.
"""
if inspect.isclass(obj):
cls = obj.__name__
else:
cls = obj.__class__.__name__
if event_state is None:
event_state = states.ANY
event_key = '.'.join([cls, event, event_state])
if event_key not in EVENT_HANDLERS:
EVENT_HANDLERS[event_key] = []
EVENT_HANDLERS[event_key].append(callback)
return | [
"def",
"subscribe",
"(",
"obj",
",",
"event",
",",
"callback",
",",
"event_state",
"=",
"None",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"obj",
")",
":",
"cls",
"=",
"obj",
".",
"__name__",
"else",
":",
"cls",
"=",
"obj",
".",
"__class__",
".",
"__name__",
"if",
"event_state",
"is",
"None",
":",
"event_state",
"=",
"states",
".",
"ANY",
"event_key",
"=",
"'.'",
".",
"join",
"(",
"[",
"cls",
",",
"event",
",",
"event_state",
"]",
")",
"if",
"event_key",
"not",
"in",
"EVENT_HANDLERS",
":",
"EVENT_HANDLERS",
"[",
"event_key",
"]",
"=",
"[",
"]",
"EVENT_HANDLERS",
"[",
"event_key",
"]",
".",
"append",
"(",
"callback",
")",
"return"
] | Subscribe an event from an class.
Subclasses of the class/object will also fire events for this class,
unless a more specific event exists. | [
"Subscribe",
"an",
"event",
"from",
"an",
"class",
"."
] | 4548b441143ebf7fc4075d113db5ca5a23e0eed2 | https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/events.py#L84-L103 |
3,065 | psolin/cleanco | cleanco.py | cleanco.clean_name | def clean_name(self, suffix=True, prefix=False, middle=False, multi=False):
"return cleared version of the business name"
name = self.business_name
# Run it through the string_stripper once more
name = self.string_stripper(name)
loname = name.lower()
# return name without suffixed/prefixed/middle type term(s)
for item in suffix_sort:
if suffix:
if loname.endswith(" " + item):
start = loname.find(item)
end = len(item)
name = name[0:-end-1]
name = self.string_stripper(name)
if multi==False:
break
if prefix:
if loname.startswith(item+' '):
name = name[len(item)+1:]
if multi==False:
break
if middle:
term = ' ' + item + ' '
if term in loname:
start = loname.find(term)
end = start + len(term)
name = name[:start] + " " + name[end:]
if multi==False:
break
return self.string_stripper(name) | python | def clean_name(self, suffix=True, prefix=False, middle=False, multi=False):
"return cleared version of the business name"
name = self.business_name
# Run it through the string_stripper once more
name = self.string_stripper(name)
loname = name.lower()
# return name without suffixed/prefixed/middle type term(s)
for item in suffix_sort:
if suffix:
if loname.endswith(" " + item):
start = loname.find(item)
end = len(item)
name = name[0:-end-1]
name = self.string_stripper(name)
if multi==False:
break
if prefix:
if loname.startswith(item+' '):
name = name[len(item)+1:]
if multi==False:
break
if middle:
term = ' ' + item + ' '
if term in loname:
start = loname.find(term)
end = start + len(term)
name = name[:start] + " " + name[end:]
if multi==False:
break
return self.string_stripper(name) | [
"def",
"clean_name",
"(",
"self",
",",
"suffix",
"=",
"True",
",",
"prefix",
"=",
"False",
",",
"middle",
"=",
"False",
",",
"multi",
"=",
"False",
")",
":",
"name",
"=",
"self",
".",
"business_name",
"# Run it through the string_stripper once more",
"name",
"=",
"self",
".",
"string_stripper",
"(",
"name",
")",
"loname",
"=",
"name",
".",
"lower",
"(",
")",
"# return name without suffixed/prefixed/middle type term(s)",
"for",
"item",
"in",
"suffix_sort",
":",
"if",
"suffix",
":",
"if",
"loname",
".",
"endswith",
"(",
"\" \"",
"+",
"item",
")",
":",
"start",
"=",
"loname",
".",
"find",
"(",
"item",
")",
"end",
"=",
"len",
"(",
"item",
")",
"name",
"=",
"name",
"[",
"0",
":",
"-",
"end",
"-",
"1",
"]",
"name",
"=",
"self",
".",
"string_stripper",
"(",
"name",
")",
"if",
"multi",
"==",
"False",
":",
"break",
"if",
"prefix",
":",
"if",
"loname",
".",
"startswith",
"(",
"item",
"+",
"' '",
")",
":",
"name",
"=",
"name",
"[",
"len",
"(",
"item",
")",
"+",
"1",
":",
"]",
"if",
"multi",
"==",
"False",
":",
"break",
"if",
"middle",
":",
"term",
"=",
"' '",
"+",
"item",
"+",
"' '",
"if",
"term",
"in",
"loname",
":",
"start",
"=",
"loname",
".",
"find",
"(",
"term",
")",
"end",
"=",
"start",
"+",
"len",
"(",
"term",
")",
"name",
"=",
"name",
"[",
":",
"start",
"]",
"+",
"\" \"",
"+",
"name",
"[",
"end",
":",
"]",
"if",
"multi",
"==",
"False",
":",
"break",
"return",
"self",
".",
"string_stripper",
"(",
"name",
")"
] | return cleared version of the business name | [
"return",
"cleared",
"version",
"of",
"the",
"business",
"name"
] | 56ff6542c339df625adcaf7f4ed4c81035fd575a | https://github.com/psolin/cleanco/blob/56ff6542c339df625adcaf7f4ed4c81035fd575a/cleanco.py#L70-L104 |
3,066 | mosdef-hub/foyer | foyer/smarts_graph.py | SMARTSGraph._add_nodes | def _add_nodes(self):
"""Add all atoms in the SMARTS string as nodes in the graph."""
for n, atom in enumerate(self.ast.select('atom')):
self.add_node(n, atom=atom)
self._atom_indices[id(atom)] = n | python | def _add_nodes(self):
"""Add all atoms in the SMARTS string as nodes in the graph."""
for n, atom in enumerate(self.ast.select('atom')):
self.add_node(n, atom=atom)
self._atom_indices[id(atom)] = n | [
"def",
"_add_nodes",
"(",
"self",
")",
":",
"for",
"n",
",",
"atom",
"in",
"enumerate",
"(",
"self",
".",
"ast",
".",
"select",
"(",
"'atom'",
")",
")",
":",
"self",
".",
"add_node",
"(",
"n",
",",
"atom",
"=",
"atom",
")",
"self",
".",
"_atom_indices",
"[",
"id",
"(",
"atom",
")",
"]",
"=",
"n"
] | Add all atoms in the SMARTS string as nodes in the graph. | [
"Add",
"all",
"atoms",
"in",
"the",
"SMARTS",
"string",
"as",
"nodes",
"in",
"the",
"graph",
"."
] | 9e39c71208fc01a6cc7b7cbe5a533c56830681d3 | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/smarts_graph.py#L51-L55 |
3,067 | mosdef-hub/foyer | foyer/smarts_graph.py | SMARTSGraph._add_edges | def _add_edges(self, ast_node, trunk=None):
""""Add all bonds in the SMARTS string as edges in the graph."""
atom_indices = self._atom_indices
for atom in ast_node.tail:
if atom.head == 'atom':
atom_idx = atom_indices[id(atom)]
if atom.is_first_kid and atom.parent().head == 'branch':
trunk_idx = atom_indices[id(trunk)]
self.add_edge(atom_idx, trunk_idx)
if not atom.is_last_kid:
if atom.next_kid.head == 'atom':
next_idx = atom_indices[id(atom.next_kid)]
self.add_edge(atom_idx, next_idx)
elif atom.next_kid.head == 'branch':
trunk = atom
else: # We traveled through the whole branch.
return
elif atom.head == 'branch':
self._add_edges(atom, trunk) | python | def _add_edges(self, ast_node, trunk=None):
""""Add all bonds in the SMARTS string as edges in the graph."""
atom_indices = self._atom_indices
for atom in ast_node.tail:
if atom.head == 'atom':
atom_idx = atom_indices[id(atom)]
if atom.is_first_kid and atom.parent().head == 'branch':
trunk_idx = atom_indices[id(trunk)]
self.add_edge(atom_idx, trunk_idx)
if not atom.is_last_kid:
if atom.next_kid.head == 'atom':
next_idx = atom_indices[id(atom.next_kid)]
self.add_edge(atom_idx, next_idx)
elif atom.next_kid.head == 'branch':
trunk = atom
else: # We traveled through the whole branch.
return
elif atom.head == 'branch':
self._add_edges(atom, trunk) | [
"def",
"_add_edges",
"(",
"self",
",",
"ast_node",
",",
"trunk",
"=",
"None",
")",
":",
"atom_indices",
"=",
"self",
".",
"_atom_indices",
"for",
"atom",
"in",
"ast_node",
".",
"tail",
":",
"if",
"atom",
".",
"head",
"==",
"'atom'",
":",
"atom_idx",
"=",
"atom_indices",
"[",
"id",
"(",
"atom",
")",
"]",
"if",
"atom",
".",
"is_first_kid",
"and",
"atom",
".",
"parent",
"(",
")",
".",
"head",
"==",
"'branch'",
":",
"trunk_idx",
"=",
"atom_indices",
"[",
"id",
"(",
"trunk",
")",
"]",
"self",
".",
"add_edge",
"(",
"atom_idx",
",",
"trunk_idx",
")",
"if",
"not",
"atom",
".",
"is_last_kid",
":",
"if",
"atom",
".",
"next_kid",
".",
"head",
"==",
"'atom'",
":",
"next_idx",
"=",
"atom_indices",
"[",
"id",
"(",
"atom",
".",
"next_kid",
")",
"]",
"self",
".",
"add_edge",
"(",
"atom_idx",
",",
"next_idx",
")",
"elif",
"atom",
".",
"next_kid",
".",
"head",
"==",
"'branch'",
":",
"trunk",
"=",
"atom",
"else",
":",
"# We traveled through the whole branch.",
"return",
"elif",
"atom",
".",
"head",
"==",
"'branch'",
":",
"self",
".",
"_add_edges",
"(",
"atom",
",",
"trunk",
")"
] | Add all bonds in the SMARTS string as edges in the graph. | [
"Add",
"all",
"bonds",
"in",
"the",
"SMARTS",
"string",
"as",
"edges",
"in",
"the",
"graph",
"."
] | 9e39c71208fc01a6cc7b7cbe5a533c56830681d3 | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/smarts_graph.py#L57-L75 |
3,068 | mosdef-hub/foyer | foyer/smarts_graph.py | SMARTSGraph._add_label_edges | def _add_label_edges(self):
"""Add edges between all atoms with the same atom_label in rings."""
labels = self.ast.select('atom_label')
if not labels:
return
# We need each individual label and atoms with multiple ring labels
# would yield e.g. the string '12' so split those up.
label_digits = defaultdict(list)
for label in labels:
digits = list(label.tail[0])
for digit in digits:
label_digits[digit].append(label.parent())
for label, (atom1, atom2) in label_digits.items():
atom1_idx = self._atom_indices[id(atom1)]
atom2_idx = self._atom_indices[id(atom2)]
self.add_edge(atom1_idx, atom2_idx) | python | def _add_label_edges(self):
"""Add edges between all atoms with the same atom_label in rings."""
labels = self.ast.select('atom_label')
if not labels:
return
# We need each individual label and atoms with multiple ring labels
# would yield e.g. the string '12' so split those up.
label_digits = defaultdict(list)
for label in labels:
digits = list(label.tail[0])
for digit in digits:
label_digits[digit].append(label.parent())
for label, (atom1, atom2) in label_digits.items():
atom1_idx = self._atom_indices[id(atom1)]
atom2_idx = self._atom_indices[id(atom2)]
self.add_edge(atom1_idx, atom2_idx) | [
"def",
"_add_label_edges",
"(",
"self",
")",
":",
"labels",
"=",
"self",
".",
"ast",
".",
"select",
"(",
"'atom_label'",
")",
"if",
"not",
"labels",
":",
"return",
"# We need each individual label and atoms with multiple ring labels",
"# would yield e.g. the string '12' so split those up.",
"label_digits",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"label",
"in",
"labels",
":",
"digits",
"=",
"list",
"(",
"label",
".",
"tail",
"[",
"0",
"]",
")",
"for",
"digit",
"in",
"digits",
":",
"label_digits",
"[",
"digit",
"]",
".",
"append",
"(",
"label",
".",
"parent",
"(",
")",
")",
"for",
"label",
",",
"(",
"atom1",
",",
"atom2",
")",
"in",
"label_digits",
".",
"items",
"(",
")",
":",
"atom1_idx",
"=",
"self",
".",
"_atom_indices",
"[",
"id",
"(",
"atom1",
")",
"]",
"atom2_idx",
"=",
"self",
".",
"_atom_indices",
"[",
"id",
"(",
"atom2",
")",
"]",
"self",
".",
"add_edge",
"(",
"atom1_idx",
",",
"atom2_idx",
")"
] | Add edges between all atoms with the same atom_label in rings. | [
"Add",
"edges",
"between",
"all",
"atoms",
"with",
"the",
"same",
"atom_label",
"in",
"rings",
"."
] | 9e39c71208fc01a6cc7b7cbe5a533c56830681d3 | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/smarts_graph.py#L77-L94 |
3,069 | mosdef-hub/foyer | foyer/smarts_graph.py | SMARTSGraph.find_matches | def find_matches(self, topology):
"""Return sets of atoms that match this SMARTS pattern in a topology.
Notes:
------
When this function gets used in atomtyper.py, we actively modify the
white- and blacklists of the atoms in `topology` after finding a match.
This means that between every successive call of
`subgraph_isomorphisms_iter()`, the topology against which we are
matching may have actually changed. Currently, we take advantage of this
behavior in some edges cases (e.g. see `test_hexa_coordinated` in
`test_smarts.py`).
"""
# Note: Needs to be updated in sync with the grammar in `smarts.py`.
ring_tokens = ['ring_size', 'ring_count']
has_ring_rules = any(self.ast.select(token)
for token in ring_tokens)
_prepare_atoms(topology, compute_cycles=has_ring_rules)
top_graph = nx.Graph()
top_graph.add_nodes_from(((a.index, {'atom': a})
for a in topology.atoms()))
top_graph.add_edges_from(((b[0].index, b[1].index)
for b in topology.bonds()))
if self._graph_matcher is None:
atom = nx.get_node_attributes(self, name='atom')[0]
if len(atom.select('atom_symbol')) == 1 and not atom.select('not_expression'):
try:
element = atom.select('atom_symbol').strees[0].tail[0]
except IndexError:
try:
atomic_num = atom.select('atomic_num').strees[0].tail[0]
element = pt.Element[int(atomic_num)]
except IndexError:
element = None
else:
element = None
self._graph_matcher = SMARTSMatcher(top_graph, self,
node_match=self._node_match,
element=element)
matched_atoms = set()
for mapping in self._graph_matcher.subgraph_isomorphisms_iter():
mapping = {node_id: atom_id for atom_id, node_id in mapping.items()}
# The first node in the smarts graph always corresponds to the atom
# that we are trying to match.
atom_index = mapping[0]
# Don't yield duplicate matches found via matching the pattern in a
# different order.
if atom_index not in matched_atoms:
matched_atoms.add(atom_index)
yield atom_index | python | def find_matches(self, topology):
"""Return sets of atoms that match this SMARTS pattern in a topology.
Notes:
------
When this function gets used in atomtyper.py, we actively modify the
white- and blacklists of the atoms in `topology` after finding a match.
This means that between every successive call of
`subgraph_isomorphisms_iter()`, the topology against which we are
matching may have actually changed. Currently, we take advantage of this
behavior in some edges cases (e.g. see `test_hexa_coordinated` in
`test_smarts.py`).
"""
# Note: Needs to be updated in sync with the grammar in `smarts.py`.
ring_tokens = ['ring_size', 'ring_count']
has_ring_rules = any(self.ast.select(token)
for token in ring_tokens)
_prepare_atoms(topology, compute_cycles=has_ring_rules)
top_graph = nx.Graph()
top_graph.add_nodes_from(((a.index, {'atom': a})
for a in topology.atoms()))
top_graph.add_edges_from(((b[0].index, b[1].index)
for b in topology.bonds()))
if self._graph_matcher is None:
atom = nx.get_node_attributes(self, name='atom')[0]
if len(atom.select('atom_symbol')) == 1 and not atom.select('not_expression'):
try:
element = atom.select('atom_symbol').strees[0].tail[0]
except IndexError:
try:
atomic_num = atom.select('atomic_num').strees[0].tail[0]
element = pt.Element[int(atomic_num)]
except IndexError:
element = None
else:
element = None
self._graph_matcher = SMARTSMatcher(top_graph, self,
node_match=self._node_match,
element=element)
matched_atoms = set()
for mapping in self._graph_matcher.subgraph_isomorphisms_iter():
mapping = {node_id: atom_id for atom_id, node_id in mapping.items()}
# The first node in the smarts graph always corresponds to the atom
# that we are trying to match.
atom_index = mapping[0]
# Don't yield duplicate matches found via matching the pattern in a
# different order.
if atom_index not in matched_atoms:
matched_atoms.add(atom_index)
yield atom_index | [
"def",
"find_matches",
"(",
"self",
",",
"topology",
")",
":",
"# Note: Needs to be updated in sync with the grammar in `smarts.py`.",
"ring_tokens",
"=",
"[",
"'ring_size'",
",",
"'ring_count'",
"]",
"has_ring_rules",
"=",
"any",
"(",
"self",
".",
"ast",
".",
"select",
"(",
"token",
")",
"for",
"token",
"in",
"ring_tokens",
")",
"_prepare_atoms",
"(",
"topology",
",",
"compute_cycles",
"=",
"has_ring_rules",
")",
"top_graph",
"=",
"nx",
".",
"Graph",
"(",
")",
"top_graph",
".",
"add_nodes_from",
"(",
"(",
"(",
"a",
".",
"index",
",",
"{",
"'atom'",
":",
"a",
"}",
")",
"for",
"a",
"in",
"topology",
".",
"atoms",
"(",
")",
")",
")",
"top_graph",
".",
"add_edges_from",
"(",
"(",
"(",
"b",
"[",
"0",
"]",
".",
"index",
",",
"b",
"[",
"1",
"]",
".",
"index",
")",
"for",
"b",
"in",
"topology",
".",
"bonds",
"(",
")",
")",
")",
"if",
"self",
".",
"_graph_matcher",
"is",
"None",
":",
"atom",
"=",
"nx",
".",
"get_node_attributes",
"(",
"self",
",",
"name",
"=",
"'atom'",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"atom",
".",
"select",
"(",
"'atom_symbol'",
")",
")",
"==",
"1",
"and",
"not",
"atom",
".",
"select",
"(",
"'not_expression'",
")",
":",
"try",
":",
"element",
"=",
"atom",
".",
"select",
"(",
"'atom_symbol'",
")",
".",
"strees",
"[",
"0",
"]",
".",
"tail",
"[",
"0",
"]",
"except",
"IndexError",
":",
"try",
":",
"atomic_num",
"=",
"atom",
".",
"select",
"(",
"'atomic_num'",
")",
".",
"strees",
"[",
"0",
"]",
".",
"tail",
"[",
"0",
"]",
"element",
"=",
"pt",
".",
"Element",
"[",
"int",
"(",
"atomic_num",
")",
"]",
"except",
"IndexError",
":",
"element",
"=",
"None",
"else",
":",
"element",
"=",
"None",
"self",
".",
"_graph_matcher",
"=",
"SMARTSMatcher",
"(",
"top_graph",
",",
"self",
",",
"node_match",
"=",
"self",
".",
"_node_match",
",",
"element",
"=",
"element",
")",
"matched_atoms",
"=",
"set",
"(",
")",
"for",
"mapping",
"in",
"self",
".",
"_graph_matcher",
".",
"subgraph_isomorphisms_iter",
"(",
")",
":",
"mapping",
"=",
"{",
"node_id",
":",
"atom_id",
"for",
"atom_id",
",",
"node_id",
"in",
"mapping",
".",
"items",
"(",
")",
"}",
"# The first node in the smarts graph always corresponds to the atom",
"# that we are trying to match.",
"atom_index",
"=",
"mapping",
"[",
"0",
"]",
"# Don't yield duplicate matches found via matching the pattern in a",
"# different order.",
"if",
"atom_index",
"not",
"in",
"matched_atoms",
":",
"matched_atoms",
".",
"add",
"(",
"atom_index",
")",
"yield",
"atom_index"
] | Return sets of atoms that match this SMARTS pattern in a topology.
Notes:
------
When this function gets used in atomtyper.py, we actively modify the
white- and blacklists of the atoms in `topology` after finding a match.
This means that between every successive call of
`subgraph_isomorphisms_iter()`, the topology against which we are
matching may have actually changed. Currently, we take advantage of this
behavior in some edges cases (e.g. see `test_hexa_coordinated` in
`test_smarts.py`). | [
"Return",
"sets",
"of",
"atoms",
"that",
"match",
"this",
"SMARTS",
"pattern",
"in",
"a",
"topology",
"."
] | 9e39c71208fc01a6cc7b7cbe5a533c56830681d3 | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/smarts_graph.py#L150-L203 |
3,070 | mosdef-hub/foyer | foyer/smarts_graph.py | SMARTSMatcher.candidate_pairs_iter | def candidate_pairs_iter(self):
"""Iterator over candidate pairs of nodes in G1 and G2."""
# All computations are done using the current state!
G2_nodes = self.G2_nodes
# First we compute the inout-terminal sets.
T1_inout = set(self.inout_1.keys()) - set(self.core_1.keys())
T2_inout = set(self.inout_2.keys()) - set(self.core_2.keys())
# If T1_inout and T2_inout are both nonempty.
# P(s) = T1_inout x {min T2_inout}
if T1_inout and T2_inout:
for node in T1_inout:
yield node, min(T2_inout)
else:
# First we determine the candidate node for G2
other_node = min(G2_nodes - set(self.core_2))
host_nodes = self.valid_nodes if other_node == 0 else self.G1.nodes()
for node in host_nodes:
if node not in self.core_1:
yield node, other_node | python | def candidate_pairs_iter(self):
"""Iterator over candidate pairs of nodes in G1 and G2."""
# All computations are done using the current state!
G2_nodes = self.G2_nodes
# First we compute the inout-terminal sets.
T1_inout = set(self.inout_1.keys()) - set(self.core_1.keys())
T2_inout = set(self.inout_2.keys()) - set(self.core_2.keys())
# If T1_inout and T2_inout are both nonempty.
# P(s) = T1_inout x {min T2_inout}
if T1_inout and T2_inout:
for node in T1_inout:
yield node, min(T2_inout)
else:
# First we determine the candidate node for G2
other_node = min(G2_nodes - set(self.core_2))
host_nodes = self.valid_nodes if other_node == 0 else self.G1.nodes()
for node in host_nodes:
if node not in self.core_1:
yield node, other_node | [
"def",
"candidate_pairs_iter",
"(",
"self",
")",
":",
"# All computations are done using the current state!",
"G2_nodes",
"=",
"self",
".",
"G2_nodes",
"# First we compute the inout-terminal sets.",
"T1_inout",
"=",
"set",
"(",
"self",
".",
"inout_1",
".",
"keys",
"(",
")",
")",
"-",
"set",
"(",
"self",
".",
"core_1",
".",
"keys",
"(",
")",
")",
"T2_inout",
"=",
"set",
"(",
"self",
".",
"inout_2",
".",
"keys",
"(",
")",
")",
"-",
"set",
"(",
"self",
".",
"core_2",
".",
"keys",
"(",
")",
")",
"# If T1_inout and T2_inout are both nonempty.",
"# P(s) = T1_inout x {min T2_inout}",
"if",
"T1_inout",
"and",
"T2_inout",
":",
"for",
"node",
"in",
"T1_inout",
":",
"yield",
"node",
",",
"min",
"(",
"T2_inout",
")",
"else",
":",
"# First we determine the candidate node for G2",
"other_node",
"=",
"min",
"(",
"G2_nodes",
"-",
"set",
"(",
"self",
".",
"core_2",
")",
")",
"host_nodes",
"=",
"self",
".",
"valid_nodes",
"if",
"other_node",
"==",
"0",
"else",
"self",
".",
"G1",
".",
"nodes",
"(",
")",
"for",
"node",
"in",
"host_nodes",
":",
"if",
"node",
"not",
"in",
"self",
".",
"core_1",
":",
"yield",
"node",
",",
"other_node"
] | Iterator over candidate pairs of nodes in G1 and G2. | [
"Iterator",
"over",
"candidate",
"pairs",
"of",
"nodes",
"in",
"G1",
"and",
"G2",
"."
] | 9e39c71208fc01a6cc7b7cbe5a533c56830681d3 | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/smarts_graph.py#L216-L236 |
3,071 | mosdef-hub/foyer | foyer/atomtyper.py | find_atomtypes | def find_atomtypes(topology, forcefield, max_iter=10):
"""Determine atomtypes for all atoms.
Parameters
----------
topology : simtk.openmm.app.Topology
The topology that we are trying to atomtype.
forcefield : foyer.Forcefield
The forcefield object.
max_iter : int, optional, default=10
The maximum number of iterations.
"""
rules = _load_rules(forcefield)
# Only consider rules for elements found in topology
subrules = dict()
system_elements = {a.element.symbol for a in topology.atoms()}
for key,val in rules.items():
atom = val.node[0]['atom']
if len(atom.select('atom_symbol')) == 1 and not atom.select('not_expression'):
try:
element = atom.select('atom_symbol').strees[0].tail[0]
except IndexError:
try:
atomic_num = atom.select('atomic_num').strees[0].tail[0]
element = pt.Element[int(atomic_num)]
except IndexError:
element = None
else:
element = None
if element is None or element in system_elements:
subrules[key] = val
rules = subrules
_iterate_rules(rules, topology, max_iter=max_iter)
_resolve_atomtypes(topology) | python | def find_atomtypes(topology, forcefield, max_iter=10):
"""Determine atomtypes for all atoms.
Parameters
----------
topology : simtk.openmm.app.Topology
The topology that we are trying to atomtype.
forcefield : foyer.Forcefield
The forcefield object.
max_iter : int, optional, default=10
The maximum number of iterations.
"""
rules = _load_rules(forcefield)
# Only consider rules for elements found in topology
subrules = dict()
system_elements = {a.element.symbol for a in topology.atoms()}
for key,val in rules.items():
atom = val.node[0]['atom']
if len(atom.select('atom_symbol')) == 1 and not atom.select('not_expression'):
try:
element = atom.select('atom_symbol').strees[0].tail[0]
except IndexError:
try:
atomic_num = atom.select('atomic_num').strees[0].tail[0]
element = pt.Element[int(atomic_num)]
except IndexError:
element = None
else:
element = None
if element is None or element in system_elements:
subrules[key] = val
rules = subrules
_iterate_rules(rules, topology, max_iter=max_iter)
_resolve_atomtypes(topology) | [
"def",
"find_atomtypes",
"(",
"topology",
",",
"forcefield",
",",
"max_iter",
"=",
"10",
")",
":",
"rules",
"=",
"_load_rules",
"(",
"forcefield",
")",
"# Only consider rules for elements found in topology",
"subrules",
"=",
"dict",
"(",
")",
"system_elements",
"=",
"{",
"a",
".",
"element",
".",
"symbol",
"for",
"a",
"in",
"topology",
".",
"atoms",
"(",
")",
"}",
"for",
"key",
",",
"val",
"in",
"rules",
".",
"items",
"(",
")",
":",
"atom",
"=",
"val",
".",
"node",
"[",
"0",
"]",
"[",
"'atom'",
"]",
"if",
"len",
"(",
"atom",
".",
"select",
"(",
"'atom_symbol'",
")",
")",
"==",
"1",
"and",
"not",
"atom",
".",
"select",
"(",
"'not_expression'",
")",
":",
"try",
":",
"element",
"=",
"atom",
".",
"select",
"(",
"'atom_symbol'",
")",
".",
"strees",
"[",
"0",
"]",
".",
"tail",
"[",
"0",
"]",
"except",
"IndexError",
":",
"try",
":",
"atomic_num",
"=",
"atom",
".",
"select",
"(",
"'atomic_num'",
")",
".",
"strees",
"[",
"0",
"]",
".",
"tail",
"[",
"0",
"]",
"element",
"=",
"pt",
".",
"Element",
"[",
"int",
"(",
"atomic_num",
")",
"]",
"except",
"IndexError",
":",
"element",
"=",
"None",
"else",
":",
"element",
"=",
"None",
"if",
"element",
"is",
"None",
"or",
"element",
"in",
"system_elements",
":",
"subrules",
"[",
"key",
"]",
"=",
"val",
"rules",
"=",
"subrules",
"_iterate_rules",
"(",
"rules",
",",
"topology",
",",
"max_iter",
"=",
"max_iter",
")",
"_resolve_atomtypes",
"(",
"topology",
")"
] | Determine atomtypes for all atoms.
Parameters
----------
topology : simtk.openmm.app.Topology
The topology that we are trying to atomtype.
forcefield : foyer.Forcefield
The forcefield object.
max_iter : int, optional, default=10
The maximum number of iterations. | [
"Determine",
"atomtypes",
"for",
"all",
"atoms",
"."
] | 9e39c71208fc01a6cc7b7cbe5a533c56830681d3 | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/atomtyper.py#L7-L43 |
3,072 | mosdef-hub/foyer | foyer/atomtyper.py | _load_rules | def _load_rules(forcefield):
"""Load atomtyping rules from a forcefield into SMARTSGraphs. """
rules = dict()
for rule_name, smarts in forcefield.atomTypeDefinitions.items():
overrides = forcefield.atomTypeOverrides.get(rule_name)
if overrides is not None:
overrides = set(overrides)
else:
overrides = set()
rules[rule_name] = SMARTSGraph(smarts_string=smarts,
parser=forcefield.parser,
name=rule_name,
overrides=overrides)
return rules | python | def _load_rules(forcefield):
"""Load atomtyping rules from a forcefield into SMARTSGraphs. """
rules = dict()
for rule_name, smarts in forcefield.atomTypeDefinitions.items():
overrides = forcefield.atomTypeOverrides.get(rule_name)
if overrides is not None:
overrides = set(overrides)
else:
overrides = set()
rules[rule_name] = SMARTSGraph(smarts_string=smarts,
parser=forcefield.parser,
name=rule_name,
overrides=overrides)
return rules | [
"def",
"_load_rules",
"(",
"forcefield",
")",
":",
"rules",
"=",
"dict",
"(",
")",
"for",
"rule_name",
",",
"smarts",
"in",
"forcefield",
".",
"atomTypeDefinitions",
".",
"items",
"(",
")",
":",
"overrides",
"=",
"forcefield",
".",
"atomTypeOverrides",
".",
"get",
"(",
"rule_name",
")",
"if",
"overrides",
"is",
"not",
"None",
":",
"overrides",
"=",
"set",
"(",
"overrides",
")",
"else",
":",
"overrides",
"=",
"set",
"(",
")",
"rules",
"[",
"rule_name",
"]",
"=",
"SMARTSGraph",
"(",
"smarts_string",
"=",
"smarts",
",",
"parser",
"=",
"forcefield",
".",
"parser",
",",
"name",
"=",
"rule_name",
",",
"overrides",
"=",
"overrides",
")",
"return",
"rules"
] | Load atomtyping rules from a forcefield into SMARTSGraphs. | [
"Load",
"atomtyping",
"rules",
"from",
"a",
"forcefield",
"into",
"SMARTSGraphs",
"."
] | 9e39c71208fc01a6cc7b7cbe5a533c56830681d3 | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/atomtyper.py#L46-L59 |
3,073 | mosdef-hub/foyer | foyer/atomtyper.py | _iterate_rules | def _iterate_rules(rules, topology, max_iter):
"""Iteratively run all the rules until the white- and backlists converge.
Parameters
----------
rules : dict
A dictionary mapping rule names (typically atomtype names) to
SMARTSGraphs that evaluate those rules.
topology : simtk.openmm.app.Topology
The topology that we are trying to atomtype.
max_iter : int
The maximum number of iterations.
"""
atoms = list(topology.atoms())
for _ in range(max_iter):
max_iter -= 1
found_something = False
for rule in rules.values():
for match_index in rule.find_matches(topology):
atom = atoms[match_index]
if rule.name not in atom.whitelist:
atom.whitelist.add(rule.name)
atom.blacklist |= rule.overrides
found_something = True
if not found_something:
break
else:
warn("Reached maximum iterations. Something probably went wrong.") | python | def _iterate_rules(rules, topology, max_iter):
"""Iteratively run all the rules until the white- and backlists converge.
Parameters
----------
rules : dict
A dictionary mapping rule names (typically atomtype names) to
SMARTSGraphs that evaluate those rules.
topology : simtk.openmm.app.Topology
The topology that we are trying to atomtype.
max_iter : int
The maximum number of iterations.
"""
atoms = list(topology.atoms())
for _ in range(max_iter):
max_iter -= 1
found_something = False
for rule in rules.values():
for match_index in rule.find_matches(topology):
atom = atoms[match_index]
if rule.name not in atom.whitelist:
atom.whitelist.add(rule.name)
atom.blacklist |= rule.overrides
found_something = True
if not found_something:
break
else:
warn("Reached maximum iterations. Something probably went wrong.") | [
"def",
"_iterate_rules",
"(",
"rules",
",",
"topology",
",",
"max_iter",
")",
":",
"atoms",
"=",
"list",
"(",
"topology",
".",
"atoms",
"(",
")",
")",
"for",
"_",
"in",
"range",
"(",
"max_iter",
")",
":",
"max_iter",
"-=",
"1",
"found_something",
"=",
"False",
"for",
"rule",
"in",
"rules",
".",
"values",
"(",
")",
":",
"for",
"match_index",
"in",
"rule",
".",
"find_matches",
"(",
"topology",
")",
":",
"atom",
"=",
"atoms",
"[",
"match_index",
"]",
"if",
"rule",
".",
"name",
"not",
"in",
"atom",
".",
"whitelist",
":",
"atom",
".",
"whitelist",
".",
"add",
"(",
"rule",
".",
"name",
")",
"atom",
".",
"blacklist",
"|=",
"rule",
".",
"overrides",
"found_something",
"=",
"True",
"if",
"not",
"found_something",
":",
"break",
"else",
":",
"warn",
"(",
"\"Reached maximum iterations. Something probably went wrong.\"",
")"
] | Iteratively run all the rules until the white- and backlists converge.
Parameters
----------
rules : dict
A dictionary mapping rule names (typically atomtype names) to
SMARTSGraphs that evaluate those rules.
topology : simtk.openmm.app.Topology
The topology that we are trying to atomtype.
max_iter : int
The maximum number of iterations. | [
"Iteratively",
"run",
"all",
"the",
"rules",
"until",
"the",
"white",
"-",
"and",
"backlists",
"converge",
"."
] | 9e39c71208fc01a6cc7b7cbe5a533c56830681d3 | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/atomtyper.py#L62-L90 |
3,074 | mosdef-hub/foyer | foyer/atomtyper.py | _resolve_atomtypes | def _resolve_atomtypes(topology):
"""Determine the final atomtypes from the white- and blacklists. """
for atom in topology.atoms():
atomtype = [rule_name for rule_name in atom.whitelist - atom.blacklist]
if len(atomtype) == 1:
atom.id = atomtype[0]
elif len(atomtype) > 1:
raise FoyerError("Found multiple types for atom {} ({}): {}.".format(
atom.index, atom.element.name, atomtype))
else:
raise FoyerError("Found no types for atom {} ({}).".format(
atom.index, atom.element.name)) | python | def _resolve_atomtypes(topology):
"""Determine the final atomtypes from the white- and blacklists. """
for atom in topology.atoms():
atomtype = [rule_name for rule_name in atom.whitelist - atom.blacklist]
if len(atomtype) == 1:
atom.id = atomtype[0]
elif len(atomtype) > 1:
raise FoyerError("Found multiple types for atom {} ({}): {}.".format(
atom.index, atom.element.name, atomtype))
else:
raise FoyerError("Found no types for atom {} ({}).".format(
atom.index, atom.element.name)) | [
"def",
"_resolve_atomtypes",
"(",
"topology",
")",
":",
"for",
"atom",
"in",
"topology",
".",
"atoms",
"(",
")",
":",
"atomtype",
"=",
"[",
"rule_name",
"for",
"rule_name",
"in",
"atom",
".",
"whitelist",
"-",
"atom",
".",
"blacklist",
"]",
"if",
"len",
"(",
"atomtype",
")",
"==",
"1",
":",
"atom",
".",
"id",
"=",
"atomtype",
"[",
"0",
"]",
"elif",
"len",
"(",
"atomtype",
")",
">",
"1",
":",
"raise",
"FoyerError",
"(",
"\"Found multiple types for atom {} ({}): {}.\"",
".",
"format",
"(",
"atom",
".",
"index",
",",
"atom",
".",
"element",
".",
"name",
",",
"atomtype",
")",
")",
"else",
":",
"raise",
"FoyerError",
"(",
"\"Found no types for atom {} ({}).\"",
".",
"format",
"(",
"atom",
".",
"index",
",",
"atom",
".",
"element",
".",
"name",
")",
")"
] | Determine the final atomtypes from the white- and blacklists. | [
"Determine",
"the",
"final",
"atomtypes",
"from",
"the",
"white",
"-",
"and",
"blacklists",
"."
] | 9e39c71208fc01a6cc7b7cbe5a533c56830681d3 | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/atomtyper.py#L93-L104 |
3,075 | mosdef-hub/foyer | foyer/forcefield.py | generate_topology | def generate_topology(non_omm_topology, non_element_types=None,
residues=None):
"""Create an OpenMM Topology from another supported topology structure."""
if non_element_types is None:
non_element_types = set()
if isinstance(non_omm_topology, pmd.Structure):
return _topology_from_parmed(non_omm_topology, non_element_types)
elif has_mbuild:
mb = import_('mbuild')
if (non_omm_topology, mb.Compound):
pmdCompoundStructure = non_omm_topology.to_parmed(residues=residues)
return _topology_from_parmed(pmdCompoundStructure, non_element_types)
else:
raise FoyerError('Unknown topology format: {}\n'
'Supported formats are: '
'"parmed.Structure", '
'"mbuild.Compound", '
'"openmm.app.Topology"'.format(non_omm_topology)) | python | def generate_topology(non_omm_topology, non_element_types=None,
residues=None):
"""Create an OpenMM Topology from another supported topology structure."""
if non_element_types is None:
non_element_types = set()
if isinstance(non_omm_topology, pmd.Structure):
return _topology_from_parmed(non_omm_topology, non_element_types)
elif has_mbuild:
mb = import_('mbuild')
if (non_omm_topology, mb.Compound):
pmdCompoundStructure = non_omm_topology.to_parmed(residues=residues)
return _topology_from_parmed(pmdCompoundStructure, non_element_types)
else:
raise FoyerError('Unknown topology format: {}\n'
'Supported formats are: '
'"parmed.Structure", '
'"mbuild.Compound", '
'"openmm.app.Topology"'.format(non_omm_topology)) | [
"def",
"generate_topology",
"(",
"non_omm_topology",
",",
"non_element_types",
"=",
"None",
",",
"residues",
"=",
"None",
")",
":",
"if",
"non_element_types",
"is",
"None",
":",
"non_element_types",
"=",
"set",
"(",
")",
"if",
"isinstance",
"(",
"non_omm_topology",
",",
"pmd",
".",
"Structure",
")",
":",
"return",
"_topology_from_parmed",
"(",
"non_omm_topology",
",",
"non_element_types",
")",
"elif",
"has_mbuild",
":",
"mb",
"=",
"import_",
"(",
"'mbuild'",
")",
"if",
"(",
"non_omm_topology",
",",
"mb",
".",
"Compound",
")",
":",
"pmdCompoundStructure",
"=",
"non_omm_topology",
".",
"to_parmed",
"(",
"residues",
"=",
"residues",
")",
"return",
"_topology_from_parmed",
"(",
"pmdCompoundStructure",
",",
"non_element_types",
")",
"else",
":",
"raise",
"FoyerError",
"(",
"'Unknown topology format: {}\\n'",
"'Supported formats are: '",
"'\"parmed.Structure\", '",
"'\"mbuild.Compound\", '",
"'\"openmm.app.Topology\"'",
".",
"format",
"(",
"non_omm_topology",
")",
")"
] | Create an OpenMM Topology from another supported topology structure. | [
"Create",
"an",
"OpenMM",
"Topology",
"from",
"another",
"supported",
"topology",
"structure",
"."
] | 9e39c71208fc01a6cc7b7cbe5a533c56830681d3 | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/forcefield.py#L87-L105 |
3,076 | mosdef-hub/foyer | foyer/forcefield.py | _topology_from_parmed | def _topology_from_parmed(structure, non_element_types):
"""Convert a ParmEd Structure to an OpenMM Topology."""
topology = app.Topology()
residues = dict()
for pmd_residue in structure.residues:
chain = topology.addChain()
omm_residue = topology.addResidue(pmd_residue.name, chain)
residues[pmd_residue] = omm_residue
atoms = dict() # pmd.Atom: omm.Atom
for pmd_atom in structure.atoms:
name = pmd_atom.name
if pmd_atom.name in non_element_types:
element = non_element_types[pmd_atom.name]
else:
if (isinstance(pmd_atom.atomic_number, int) and
pmd_atom.atomic_number != 0):
element = elem.Element.getByAtomicNumber(pmd_atom.atomic_number)
else:
element = elem.Element.getBySymbol(pmd_atom.name)
omm_atom = topology.addAtom(name, element, residues[pmd_atom.residue])
atoms[pmd_atom] = omm_atom
omm_atom.bond_partners = []
for bond in structure.bonds:
atom1 = atoms[bond.atom1]
atom2 = atoms[bond.atom2]
topology.addBond(atom1, atom2)
atom1.bond_partners.append(atom2)
atom2.bond_partners.append(atom1)
if structure.box_vectors and np.any([x._value for x in structure.box_vectors]):
topology.setPeriodicBoxVectors(structure.box_vectors)
positions = structure.positions
return topology, positions | python | def _topology_from_parmed(structure, non_element_types):
"""Convert a ParmEd Structure to an OpenMM Topology."""
topology = app.Topology()
residues = dict()
for pmd_residue in structure.residues:
chain = topology.addChain()
omm_residue = topology.addResidue(pmd_residue.name, chain)
residues[pmd_residue] = omm_residue
atoms = dict() # pmd.Atom: omm.Atom
for pmd_atom in structure.atoms:
name = pmd_atom.name
if pmd_atom.name in non_element_types:
element = non_element_types[pmd_atom.name]
else:
if (isinstance(pmd_atom.atomic_number, int) and
pmd_atom.atomic_number != 0):
element = elem.Element.getByAtomicNumber(pmd_atom.atomic_number)
else:
element = elem.Element.getBySymbol(pmd_atom.name)
omm_atom = topology.addAtom(name, element, residues[pmd_atom.residue])
atoms[pmd_atom] = omm_atom
omm_atom.bond_partners = []
for bond in structure.bonds:
atom1 = atoms[bond.atom1]
atom2 = atoms[bond.atom2]
topology.addBond(atom1, atom2)
atom1.bond_partners.append(atom2)
atom2.bond_partners.append(atom1)
if structure.box_vectors and np.any([x._value for x in structure.box_vectors]):
topology.setPeriodicBoxVectors(structure.box_vectors)
positions = structure.positions
return topology, positions | [
"def",
"_topology_from_parmed",
"(",
"structure",
",",
"non_element_types",
")",
":",
"topology",
"=",
"app",
".",
"Topology",
"(",
")",
"residues",
"=",
"dict",
"(",
")",
"for",
"pmd_residue",
"in",
"structure",
".",
"residues",
":",
"chain",
"=",
"topology",
".",
"addChain",
"(",
")",
"omm_residue",
"=",
"topology",
".",
"addResidue",
"(",
"pmd_residue",
".",
"name",
",",
"chain",
")",
"residues",
"[",
"pmd_residue",
"]",
"=",
"omm_residue",
"atoms",
"=",
"dict",
"(",
")",
"# pmd.Atom: omm.Atom",
"for",
"pmd_atom",
"in",
"structure",
".",
"atoms",
":",
"name",
"=",
"pmd_atom",
".",
"name",
"if",
"pmd_atom",
".",
"name",
"in",
"non_element_types",
":",
"element",
"=",
"non_element_types",
"[",
"pmd_atom",
".",
"name",
"]",
"else",
":",
"if",
"(",
"isinstance",
"(",
"pmd_atom",
".",
"atomic_number",
",",
"int",
")",
"and",
"pmd_atom",
".",
"atomic_number",
"!=",
"0",
")",
":",
"element",
"=",
"elem",
".",
"Element",
".",
"getByAtomicNumber",
"(",
"pmd_atom",
".",
"atomic_number",
")",
"else",
":",
"element",
"=",
"elem",
".",
"Element",
".",
"getBySymbol",
"(",
"pmd_atom",
".",
"name",
")",
"omm_atom",
"=",
"topology",
".",
"addAtom",
"(",
"name",
",",
"element",
",",
"residues",
"[",
"pmd_atom",
".",
"residue",
"]",
")",
"atoms",
"[",
"pmd_atom",
"]",
"=",
"omm_atom",
"omm_atom",
".",
"bond_partners",
"=",
"[",
"]",
"for",
"bond",
"in",
"structure",
".",
"bonds",
":",
"atom1",
"=",
"atoms",
"[",
"bond",
".",
"atom1",
"]",
"atom2",
"=",
"atoms",
"[",
"bond",
".",
"atom2",
"]",
"topology",
".",
"addBond",
"(",
"atom1",
",",
"atom2",
")",
"atom1",
".",
"bond_partners",
".",
"append",
"(",
"atom2",
")",
"atom2",
".",
"bond_partners",
".",
"append",
"(",
"atom1",
")",
"if",
"structure",
".",
"box_vectors",
"and",
"np",
".",
"any",
"(",
"[",
"x",
".",
"_value",
"for",
"x",
"in",
"structure",
".",
"box_vectors",
"]",
")",
":",
"topology",
".",
"setPeriodicBoxVectors",
"(",
"structure",
".",
"box_vectors",
")",
"positions",
"=",
"structure",
".",
"positions",
"return",
"topology",
",",
"positions"
] | Convert a ParmEd Structure to an OpenMM Topology. | [
"Convert",
"a",
"ParmEd",
"Structure",
"to",
"an",
"OpenMM",
"Topology",
"."
] | 9e39c71208fc01a6cc7b7cbe5a533c56830681d3 | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/forcefield.py#L108-L143 |
3,077 | mosdef-hub/foyer | foyer/forcefield.py | _topology_from_residue | def _topology_from_residue(res):
"""Converts a openmm.app.Topology.Residue to openmm.app.Topology.
Parameters
----------
res : openmm.app.Topology.Residue
An individual residue in an openmm.app.Topology
Returns
-------
topology : openmm.app.Topology
The generated topology
"""
topology = app.Topology()
chain = topology.addChain()
new_res = topology.addResidue(res.name, chain)
atoms = dict() # { omm.Atom in res : omm.Atom in *new* topology }
for res_atom in res.atoms():
topology_atom = topology.addAtom(name=res_atom.name,
element=res_atom.element,
residue=new_res)
atoms[res_atom] = topology_atom
topology_atom.bond_partners = []
for bond in res.bonds():
atom1 = atoms[bond.atom1]
atom2 = atoms[bond.atom2]
topology.addBond(atom1, atom2)
atom1.bond_partners.append(atom2)
atom2.bond_partners.append(atom1)
return topology | python | def _topology_from_residue(res):
"""Converts a openmm.app.Topology.Residue to openmm.app.Topology.
Parameters
----------
res : openmm.app.Topology.Residue
An individual residue in an openmm.app.Topology
Returns
-------
topology : openmm.app.Topology
The generated topology
"""
topology = app.Topology()
chain = topology.addChain()
new_res = topology.addResidue(res.name, chain)
atoms = dict() # { omm.Atom in res : omm.Atom in *new* topology }
for res_atom in res.atoms():
topology_atom = topology.addAtom(name=res_atom.name,
element=res_atom.element,
residue=new_res)
atoms[res_atom] = topology_atom
topology_atom.bond_partners = []
for bond in res.bonds():
atom1 = atoms[bond.atom1]
atom2 = atoms[bond.atom2]
topology.addBond(atom1, atom2)
atom1.bond_partners.append(atom2)
atom2.bond_partners.append(atom1)
return topology | [
"def",
"_topology_from_residue",
"(",
"res",
")",
":",
"topology",
"=",
"app",
".",
"Topology",
"(",
")",
"chain",
"=",
"topology",
".",
"addChain",
"(",
")",
"new_res",
"=",
"topology",
".",
"addResidue",
"(",
"res",
".",
"name",
",",
"chain",
")",
"atoms",
"=",
"dict",
"(",
")",
"# { omm.Atom in res : omm.Atom in *new* topology }",
"for",
"res_atom",
"in",
"res",
".",
"atoms",
"(",
")",
":",
"topology_atom",
"=",
"topology",
".",
"addAtom",
"(",
"name",
"=",
"res_atom",
".",
"name",
",",
"element",
"=",
"res_atom",
".",
"element",
",",
"residue",
"=",
"new_res",
")",
"atoms",
"[",
"res_atom",
"]",
"=",
"topology_atom",
"topology_atom",
".",
"bond_partners",
"=",
"[",
"]",
"for",
"bond",
"in",
"res",
".",
"bonds",
"(",
")",
":",
"atom1",
"=",
"atoms",
"[",
"bond",
".",
"atom1",
"]",
"atom2",
"=",
"atoms",
"[",
"bond",
".",
"atom2",
"]",
"topology",
".",
"addBond",
"(",
"atom1",
",",
"atom2",
")",
"atom1",
".",
"bond_partners",
".",
"append",
"(",
"atom2",
")",
"atom2",
".",
"bond_partners",
".",
"append",
"(",
"atom1",
")",
"return",
"topology"
] | Converts a openmm.app.Topology.Residue to openmm.app.Topology.
Parameters
----------
res : openmm.app.Topology.Residue
An individual residue in an openmm.app.Topology
Returns
-------
topology : openmm.app.Topology
The generated topology | [
"Converts",
"a",
"openmm",
".",
"app",
".",
"Topology",
".",
"Residue",
"to",
"openmm",
".",
"app",
".",
"Topology",
"."
] | 9e39c71208fc01a6cc7b7cbe5a533c56830681d3 | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/forcefield.py#L146-L180 |
3,078 | mosdef-hub/foyer | foyer/forcefield.py | _check_independent_residues | def _check_independent_residues(topology):
"""Check to see if residues will constitute independent graphs."""
for res in topology.residues():
atoms_in_residue = set([atom for atom in res.atoms()])
bond_partners_in_residue = [item for sublist in [atom.bond_partners for atom in res.atoms()] for item in sublist]
# Handle the case of a 'residue' with no neighbors
if not bond_partners_in_residue:
continue
if set(atoms_in_residue) != set(bond_partners_in_residue):
return False
return True | python | def _check_independent_residues(topology):
"""Check to see if residues will constitute independent graphs."""
for res in topology.residues():
atoms_in_residue = set([atom for atom in res.atoms()])
bond_partners_in_residue = [item for sublist in [atom.bond_partners for atom in res.atoms()] for item in sublist]
# Handle the case of a 'residue' with no neighbors
if not bond_partners_in_residue:
continue
if set(atoms_in_residue) != set(bond_partners_in_residue):
return False
return True | [
"def",
"_check_independent_residues",
"(",
"topology",
")",
":",
"for",
"res",
"in",
"topology",
".",
"residues",
"(",
")",
":",
"atoms_in_residue",
"=",
"set",
"(",
"[",
"atom",
"for",
"atom",
"in",
"res",
".",
"atoms",
"(",
")",
"]",
")",
"bond_partners_in_residue",
"=",
"[",
"item",
"for",
"sublist",
"in",
"[",
"atom",
".",
"bond_partners",
"for",
"atom",
"in",
"res",
".",
"atoms",
"(",
")",
"]",
"for",
"item",
"in",
"sublist",
"]",
"# Handle the case of a 'residue' with no neighbors",
"if",
"not",
"bond_partners_in_residue",
":",
"continue",
"if",
"set",
"(",
"atoms_in_residue",
")",
"!=",
"set",
"(",
"bond_partners_in_residue",
")",
":",
"return",
"False",
"return",
"True"
] | Check to see if residues will constitute independent graphs. | [
"Check",
"to",
"see",
"if",
"residues",
"will",
"constitute",
"independent",
"graphs",
"."
] | 9e39c71208fc01a6cc7b7cbe5a533c56830681d3 | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/forcefield.py#L183-L193 |
3,079 | mosdef-hub/foyer | foyer/forcefield.py | _update_atomtypes | def _update_atomtypes(unatomtyped_topology, res_name, prototype):
"""Update atomtypes in residues in a topology using a prototype topology.
Atomtypes are updated when residues in each topology have matching names.
Parameters
----------
unatomtyped_topology : openmm.app.Topology
Topology lacking atomtypes defined by `find_atomtypes`.
prototype : openmm.app.Topology
Prototype topology with atomtypes defined by `find_atomtypes`.
"""
for res in unatomtyped_topology.residues():
if res.name == res_name:
for old_atom, new_atom_id in zip([atom for atom in res.atoms()], [atom.id for atom in prototype.atoms()]):
old_atom.id = new_atom_id | python | def _update_atomtypes(unatomtyped_topology, res_name, prototype):
"""Update atomtypes in residues in a topology using a prototype topology.
Atomtypes are updated when residues in each topology have matching names.
Parameters
----------
unatomtyped_topology : openmm.app.Topology
Topology lacking atomtypes defined by `find_atomtypes`.
prototype : openmm.app.Topology
Prototype topology with atomtypes defined by `find_atomtypes`.
"""
for res in unatomtyped_topology.residues():
if res.name == res_name:
for old_atom, new_atom_id in zip([atom for atom in res.atoms()], [atom.id for atom in prototype.atoms()]):
old_atom.id = new_atom_id | [
"def",
"_update_atomtypes",
"(",
"unatomtyped_topology",
",",
"res_name",
",",
"prototype",
")",
":",
"for",
"res",
"in",
"unatomtyped_topology",
".",
"residues",
"(",
")",
":",
"if",
"res",
".",
"name",
"==",
"res_name",
":",
"for",
"old_atom",
",",
"new_atom_id",
"in",
"zip",
"(",
"[",
"atom",
"for",
"atom",
"in",
"res",
".",
"atoms",
"(",
")",
"]",
",",
"[",
"atom",
".",
"id",
"for",
"atom",
"in",
"prototype",
".",
"atoms",
"(",
")",
"]",
")",
":",
"old_atom",
".",
"id",
"=",
"new_atom_id"
] | Update atomtypes in residues in a topology using a prototype topology.
Atomtypes are updated when residues in each topology have matching names.
Parameters
----------
unatomtyped_topology : openmm.app.Topology
Topology lacking atomtypes defined by `find_atomtypes`.
prototype : openmm.app.Topology
Prototype topology with atomtypes defined by `find_atomtypes`. | [
"Update",
"atomtypes",
"in",
"residues",
"in",
"a",
"topology",
"using",
"a",
"prototype",
"topology",
"."
] | 9e39c71208fc01a6cc7b7cbe5a533c56830681d3 | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/forcefield.py#L196-L212 |
3,080 | mosdef-hub/foyer | foyer/forcefield.py | Forcefield.registerAtomType | def registerAtomType(self, parameters):
"""Register a new atom type. """
name = parameters['name']
if name in self._atomTypes:
raise ValueError('Found multiple definitions for atom type: ' + name)
atom_class = parameters['class']
mass = _convertParameterToNumber(parameters['mass'])
element = None
if 'element' in parameters:
element, custom = self._create_element(parameters['element'], mass)
if custom:
self.non_element_types[element.symbol] = element
self._atomTypes[name] = self.__class__._AtomType(name, atom_class, mass, element)
if atom_class in self._atomClasses:
type_set = self._atomClasses[atom_class]
else:
type_set = set()
self._atomClasses[atom_class] = type_set
type_set.add(name)
self._atomClasses[''].add(name)
name = parameters['name']
if 'def' in parameters:
self.atomTypeDefinitions[name] = parameters['def']
if 'overrides' in parameters:
overrides = set(atype.strip() for atype
in parameters['overrides'].split(","))
if overrides:
self.atomTypeOverrides[name] = overrides
if 'des' in parameters:
self.atomTypeDesc[name] = parameters['desc']
if 'doi' in parameters:
dois = set(doi.strip() for doi in parameters['doi'].split(','))
self.atomTypeRefs[name] = dois | python | def registerAtomType(self, parameters):
"""Register a new atom type. """
name = parameters['name']
if name in self._atomTypes:
raise ValueError('Found multiple definitions for atom type: ' + name)
atom_class = parameters['class']
mass = _convertParameterToNumber(parameters['mass'])
element = None
if 'element' in parameters:
element, custom = self._create_element(parameters['element'], mass)
if custom:
self.non_element_types[element.symbol] = element
self._atomTypes[name] = self.__class__._AtomType(name, atom_class, mass, element)
if atom_class in self._atomClasses:
type_set = self._atomClasses[atom_class]
else:
type_set = set()
self._atomClasses[atom_class] = type_set
type_set.add(name)
self._atomClasses[''].add(name)
name = parameters['name']
if 'def' in parameters:
self.atomTypeDefinitions[name] = parameters['def']
if 'overrides' in parameters:
overrides = set(atype.strip() for atype
in parameters['overrides'].split(","))
if overrides:
self.atomTypeOverrides[name] = overrides
if 'des' in parameters:
self.atomTypeDesc[name] = parameters['desc']
if 'doi' in parameters:
dois = set(doi.strip() for doi in parameters['doi'].split(','))
self.atomTypeRefs[name] = dois | [
"def",
"registerAtomType",
"(",
"self",
",",
"parameters",
")",
":",
"name",
"=",
"parameters",
"[",
"'name'",
"]",
"if",
"name",
"in",
"self",
".",
"_atomTypes",
":",
"raise",
"ValueError",
"(",
"'Found multiple definitions for atom type: '",
"+",
"name",
")",
"atom_class",
"=",
"parameters",
"[",
"'class'",
"]",
"mass",
"=",
"_convertParameterToNumber",
"(",
"parameters",
"[",
"'mass'",
"]",
")",
"element",
"=",
"None",
"if",
"'element'",
"in",
"parameters",
":",
"element",
",",
"custom",
"=",
"self",
".",
"_create_element",
"(",
"parameters",
"[",
"'element'",
"]",
",",
"mass",
")",
"if",
"custom",
":",
"self",
".",
"non_element_types",
"[",
"element",
".",
"symbol",
"]",
"=",
"element",
"self",
".",
"_atomTypes",
"[",
"name",
"]",
"=",
"self",
".",
"__class__",
".",
"_AtomType",
"(",
"name",
",",
"atom_class",
",",
"mass",
",",
"element",
")",
"if",
"atom_class",
"in",
"self",
".",
"_atomClasses",
":",
"type_set",
"=",
"self",
".",
"_atomClasses",
"[",
"atom_class",
"]",
"else",
":",
"type_set",
"=",
"set",
"(",
")",
"self",
".",
"_atomClasses",
"[",
"atom_class",
"]",
"=",
"type_set",
"type_set",
".",
"add",
"(",
"name",
")",
"self",
".",
"_atomClasses",
"[",
"''",
"]",
".",
"add",
"(",
"name",
")",
"name",
"=",
"parameters",
"[",
"'name'",
"]",
"if",
"'def'",
"in",
"parameters",
":",
"self",
".",
"atomTypeDefinitions",
"[",
"name",
"]",
"=",
"parameters",
"[",
"'def'",
"]",
"if",
"'overrides'",
"in",
"parameters",
":",
"overrides",
"=",
"set",
"(",
"atype",
".",
"strip",
"(",
")",
"for",
"atype",
"in",
"parameters",
"[",
"'overrides'",
"]",
".",
"split",
"(",
"\",\"",
")",
")",
"if",
"overrides",
":",
"self",
".",
"atomTypeOverrides",
"[",
"name",
"]",
"=",
"overrides",
"if",
"'des'",
"in",
"parameters",
":",
"self",
".",
"atomTypeDesc",
"[",
"name",
"]",
"=",
"parameters",
"[",
"'desc'",
"]",
"if",
"'doi'",
"in",
"parameters",
":",
"dois",
"=",
"set",
"(",
"doi",
".",
"strip",
"(",
")",
"for",
"doi",
"in",
"parameters",
"[",
"'doi'",
"]",
".",
"split",
"(",
"','",
")",
")",
"self",
".",
"atomTypeRefs",
"[",
"name",
"]",
"=",
"dois"
] | Register a new atom type. | [
"Register",
"a",
"new",
"atom",
"type",
"."
] | 9e39c71208fc01a6cc7b7cbe5a533c56830681d3 | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/forcefield.py#L307-L341 |
3,081 | mosdef-hub/foyer | foyer/forcefield.py | Forcefield.run_atomtyping | def run_atomtyping(self, topology, use_residue_map=True):
"""Atomtype the topology
Parameters
----------
topology : openmm.app.Topology
Molecular structure to find atom types of
use_residue_map : boolean, optional, default=True
Store atomtyped topologies of residues to a dictionary that maps
them to residue names. Each topology, including atomtypes, will be
copied to other residues with the same name. This avoids repeatedly
calling the subgraph isomorphism on idential residues and should
result in better performance for systems with many identical
residues, i.e. a box of water. Note that for this to be applied to
independent molecules, they must each be saved as different
residues in the topology.
"""
if use_residue_map:
independent_residues = _check_independent_residues(topology)
if independent_residues:
residue_map = dict()
for res in topology.residues():
if res.name not in residue_map.keys():
residue = _topology_from_residue(res)
find_atomtypes(residue, forcefield=self)
residue_map[res.name] = residue
for key, val in residue_map.items():
_update_atomtypes(topology, key, val)
else:
find_atomtypes(topology, forcefield=self)
else:
find_atomtypes(topology, forcefield=self)
if not all([a.id for a in topology.atoms()][0]):
raise ValueError('Not all atoms in topology have atom types')
return topology | python | def run_atomtyping(self, topology, use_residue_map=True):
"""Atomtype the topology
Parameters
----------
topology : openmm.app.Topology
Molecular structure to find atom types of
use_residue_map : boolean, optional, default=True
Store atomtyped topologies of residues to a dictionary that maps
them to residue names. Each topology, including atomtypes, will be
copied to other residues with the same name. This avoids repeatedly
calling the subgraph isomorphism on idential residues and should
result in better performance for systems with many identical
residues, i.e. a box of water. Note that for this to be applied to
independent molecules, they must each be saved as different
residues in the topology.
"""
if use_residue_map:
independent_residues = _check_independent_residues(topology)
if independent_residues:
residue_map = dict()
for res in topology.residues():
if res.name not in residue_map.keys():
residue = _topology_from_residue(res)
find_atomtypes(residue, forcefield=self)
residue_map[res.name] = residue
for key, val in residue_map.items():
_update_atomtypes(topology, key, val)
else:
find_atomtypes(topology, forcefield=self)
else:
find_atomtypes(topology, forcefield=self)
if not all([a.id for a in topology.atoms()][0]):
raise ValueError('Not all atoms in topology have atom types')
return topology | [
"def",
"run_atomtyping",
"(",
"self",
",",
"topology",
",",
"use_residue_map",
"=",
"True",
")",
":",
"if",
"use_residue_map",
":",
"independent_residues",
"=",
"_check_independent_residues",
"(",
"topology",
")",
"if",
"independent_residues",
":",
"residue_map",
"=",
"dict",
"(",
")",
"for",
"res",
"in",
"topology",
".",
"residues",
"(",
")",
":",
"if",
"res",
".",
"name",
"not",
"in",
"residue_map",
".",
"keys",
"(",
")",
":",
"residue",
"=",
"_topology_from_residue",
"(",
"res",
")",
"find_atomtypes",
"(",
"residue",
",",
"forcefield",
"=",
"self",
")",
"residue_map",
"[",
"res",
".",
"name",
"]",
"=",
"residue",
"for",
"key",
",",
"val",
"in",
"residue_map",
".",
"items",
"(",
")",
":",
"_update_atomtypes",
"(",
"topology",
",",
"key",
",",
"val",
")",
"else",
":",
"find_atomtypes",
"(",
"topology",
",",
"forcefield",
"=",
"self",
")",
"else",
":",
"find_atomtypes",
"(",
"topology",
",",
"forcefield",
"=",
"self",
")",
"if",
"not",
"all",
"(",
"[",
"a",
".",
"id",
"for",
"a",
"in",
"topology",
".",
"atoms",
"(",
")",
"]",
"[",
"0",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'Not all atoms in topology have atom types'",
")",
"return",
"topology"
] | Atomtype the topology
Parameters
----------
topology : openmm.app.Topology
Molecular structure to find atom types of
use_residue_map : boolean, optional, default=True
Store atomtyped topologies of residues to a dictionary that maps
them to residue names. Each topology, including atomtypes, will be
copied to other residues with the same name. This avoids repeatedly
calling the subgraph isomorphism on idential residues and should
result in better performance for systems with many identical
residues, i.e. a box of water. Note that for this to be applied to
independent molecules, they must each be saved as different
residues in the topology. | [
"Atomtype",
"the",
"topology"
] | 9e39c71208fc01a6cc7b7cbe5a533c56830681d3 | https://github.com/mosdef-hub/foyer/blob/9e39c71208fc01a6cc7b7cbe5a533c56830681d3/foyer/forcefield.py#L452-L493 |
3,082 | mgedmin/check-manifest | check_manifest.py | cd | def cd(directory):
"""Change the current working directory, temporarily.
Use as a context manager: with cd(d): ...
"""
old_dir = os.getcwd()
try:
os.chdir(directory)
yield
finally:
os.chdir(old_dir) | python | def cd(directory):
"""Change the current working directory, temporarily.
Use as a context manager: with cd(d): ...
"""
old_dir = os.getcwd()
try:
os.chdir(directory)
yield
finally:
os.chdir(old_dir) | [
"def",
"cd",
"(",
"directory",
")",
":",
"old_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"try",
":",
"os",
".",
"chdir",
"(",
"directory",
")",
"yield",
"finally",
":",
"os",
".",
"chdir",
"(",
"old_dir",
")"
] | Change the current working directory, temporarily.
Use as a context manager: with cd(d): ... | [
"Change",
"the",
"current",
"working",
"directory",
"temporarily",
"."
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L164-L174 |
3,083 | mgedmin/check-manifest | check_manifest.py | mkdtemp | def mkdtemp(hint=''):
"""Create a temporary directory, then clean it up.
Use as a context manager: with mkdtemp('-purpose'): ...
"""
dirname = tempfile.mkdtemp(prefix='check-manifest-', suffix=hint)
try:
yield dirname
finally:
rmtree(dirname) | python | def mkdtemp(hint=''):
"""Create a temporary directory, then clean it up.
Use as a context manager: with mkdtemp('-purpose'): ...
"""
dirname = tempfile.mkdtemp(prefix='check-manifest-', suffix=hint)
try:
yield dirname
finally:
rmtree(dirname) | [
"def",
"mkdtemp",
"(",
"hint",
"=",
"''",
")",
":",
"dirname",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"prefix",
"=",
"'check-manifest-'",
",",
"suffix",
"=",
"hint",
")",
"try",
":",
"yield",
"dirname",
"finally",
":",
"rmtree",
"(",
"dirname",
")"
] | Create a temporary directory, then clean it up.
Use as a context manager: with mkdtemp('-purpose'): ... | [
"Create",
"a",
"temporary",
"directory",
"then",
"clean",
"it",
"up",
"."
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L178-L187 |
3,084 | mgedmin/check-manifest | check_manifest.py | chmod_plus | def chmod_plus(path, add_bits=stat.S_IWUSR):
"""Change a file's mode by adding a few bits.
Like chmod +<bits> <path> in a Unix shell.
"""
try:
os.chmod(path, stat.S_IMODE(os.stat(path).st_mode) | add_bits)
except OSError: # pragma: nocover
pass | python | def chmod_plus(path, add_bits=stat.S_IWUSR):
"""Change a file's mode by adding a few bits.
Like chmod +<bits> <path> in a Unix shell.
"""
try:
os.chmod(path, stat.S_IMODE(os.stat(path).st_mode) | add_bits)
except OSError: # pragma: nocover
pass | [
"def",
"chmod_plus",
"(",
"path",
",",
"add_bits",
"=",
"stat",
".",
"S_IWUSR",
")",
":",
"try",
":",
"os",
".",
"chmod",
"(",
"path",
",",
"stat",
".",
"S_IMODE",
"(",
"os",
".",
"stat",
"(",
"path",
")",
".",
"st_mode",
")",
"|",
"add_bits",
")",
"except",
"OSError",
":",
"# pragma: nocover",
"pass"
] | Change a file's mode by adding a few bits.
Like chmod +<bits> <path> in a Unix shell. | [
"Change",
"a",
"file",
"s",
"mode",
"by",
"adding",
"a",
"few",
"bits",
"."
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L190-L198 |
3,085 | mgedmin/check-manifest | check_manifest.py | rmtree | def rmtree(path):
"""A version of rmtree that can deal with read-only files and directories.
Needed because the stock shutil.rmtree() fails with an access error
when there are read-only files in the directory on Windows, or when the
directory itself is read-only on Unix.
"""
def onerror(func, path, exc_info):
# Did you know what on Python 3.3 on Windows os.remove() and
# os.unlink() are distinct functions?
if func is os.remove or func is os.unlink or func is os.rmdir:
if sys.platform != 'win32':
chmod_plus(os.path.dirname(path), stat.S_IWUSR | stat.S_IXUSR)
chmod_plus(path)
func(path)
else:
raise
shutil.rmtree(path, onerror=onerror) | python | def rmtree(path):
"""A version of rmtree that can deal with read-only files and directories.
Needed because the stock shutil.rmtree() fails with an access error
when there are read-only files in the directory on Windows, or when the
directory itself is read-only on Unix.
"""
def onerror(func, path, exc_info):
# Did you know what on Python 3.3 on Windows os.remove() and
# os.unlink() are distinct functions?
if func is os.remove or func is os.unlink or func is os.rmdir:
if sys.platform != 'win32':
chmod_plus(os.path.dirname(path), stat.S_IWUSR | stat.S_IXUSR)
chmod_plus(path)
func(path)
else:
raise
shutil.rmtree(path, onerror=onerror) | [
"def",
"rmtree",
"(",
"path",
")",
":",
"def",
"onerror",
"(",
"func",
",",
"path",
",",
"exc_info",
")",
":",
"# Did you know what on Python 3.3 on Windows os.remove() and",
"# os.unlink() are distinct functions?",
"if",
"func",
"is",
"os",
".",
"remove",
"or",
"func",
"is",
"os",
".",
"unlink",
"or",
"func",
"is",
"os",
".",
"rmdir",
":",
"if",
"sys",
".",
"platform",
"!=",
"'win32'",
":",
"chmod_plus",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
",",
"stat",
".",
"S_IWUSR",
"|",
"stat",
".",
"S_IXUSR",
")",
"chmod_plus",
"(",
"path",
")",
"func",
"(",
"path",
")",
"else",
":",
"raise",
"shutil",
".",
"rmtree",
"(",
"path",
",",
"onerror",
"=",
"onerror",
")"
] | A version of rmtree that can deal with read-only files and directories.
Needed because the stock shutil.rmtree() fails with an access error
when there are read-only files in the directory on Windows, or when the
directory itself is read-only on Unix. | [
"A",
"version",
"of",
"rmtree",
"that",
"can",
"deal",
"with",
"read",
"-",
"only",
"files",
"and",
"directories",
"."
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L201-L218 |
3,086 | mgedmin/check-manifest | check_manifest.py | copy_files | def copy_files(filelist, destdir):
"""Copy a list of files to destdir, preserving directory structure.
File names should be relative to the current working directory.
"""
for filename in filelist:
destfile = os.path.join(destdir, filename)
# filename should not be absolute, but let's double-check
assert destfile.startswith(destdir + os.path.sep)
destfiledir = os.path.dirname(destfile)
if not os.path.isdir(destfiledir):
os.makedirs(destfiledir)
if os.path.isdir(filename):
os.mkdir(destfile)
else:
shutil.copy2(filename, destfile) | python | def copy_files(filelist, destdir):
"""Copy a list of files to destdir, preserving directory structure.
File names should be relative to the current working directory.
"""
for filename in filelist:
destfile = os.path.join(destdir, filename)
# filename should not be absolute, but let's double-check
assert destfile.startswith(destdir + os.path.sep)
destfiledir = os.path.dirname(destfile)
if not os.path.isdir(destfiledir):
os.makedirs(destfiledir)
if os.path.isdir(filename):
os.mkdir(destfile)
else:
shutil.copy2(filename, destfile) | [
"def",
"copy_files",
"(",
"filelist",
",",
"destdir",
")",
":",
"for",
"filename",
"in",
"filelist",
":",
"destfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destdir",
",",
"filename",
")",
"# filename should not be absolute, but let's double-check",
"assert",
"destfile",
".",
"startswith",
"(",
"destdir",
"+",
"os",
".",
"path",
".",
"sep",
")",
"destfiledir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"destfile",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"destfiledir",
")",
":",
"os",
".",
"makedirs",
"(",
"destfiledir",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"filename",
")",
":",
"os",
".",
"mkdir",
"(",
"destfile",
")",
"else",
":",
"shutil",
".",
"copy2",
"(",
"filename",
",",
"destfile",
")"
] | Copy a list of files to destdir, preserving directory structure.
File names should be relative to the current working directory. | [
"Copy",
"a",
"list",
"of",
"files",
"to",
"destdir",
"preserving",
"directory",
"structure",
"."
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L221-L236 |
3,087 | mgedmin/check-manifest | check_manifest.py | get_one_file_in | def get_one_file_in(dirname):
"""Return the pathname of the one file in a directory.
Raises if the directory has no files or more than one file.
"""
files = os.listdir(dirname)
if len(files) > 1:
raise Failure('More than one file exists in %s:\n%s' %
(dirname, '\n'.join(sorted(files))))
elif not files:
raise Failure('No files found in %s' % dirname)
return os.path.join(dirname, files[0]) | python | def get_one_file_in(dirname):
"""Return the pathname of the one file in a directory.
Raises if the directory has no files or more than one file.
"""
files = os.listdir(dirname)
if len(files) > 1:
raise Failure('More than one file exists in %s:\n%s' %
(dirname, '\n'.join(sorted(files))))
elif not files:
raise Failure('No files found in %s' % dirname)
return os.path.join(dirname, files[0]) | [
"def",
"get_one_file_in",
"(",
"dirname",
")",
":",
"files",
"=",
"os",
".",
"listdir",
"(",
"dirname",
")",
"if",
"len",
"(",
"files",
")",
">",
"1",
":",
"raise",
"Failure",
"(",
"'More than one file exists in %s:\\n%s'",
"%",
"(",
"dirname",
",",
"'\\n'",
".",
"join",
"(",
"sorted",
"(",
"files",
")",
")",
")",
")",
"elif",
"not",
"files",
":",
"raise",
"Failure",
"(",
"'No files found in %s'",
"%",
"dirname",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"files",
"[",
"0",
"]",
")"
] | Return the pathname of the one file in a directory.
Raises if the directory has no files or more than one file. | [
"Return",
"the",
"pathname",
"of",
"the",
"one",
"file",
"in",
"a",
"directory",
"."
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L239-L250 |
3,088 | mgedmin/check-manifest | check_manifest.py | unicodify | def unicodify(filename):
"""Make sure filename is Unicode.
Because the tarfile module on Python 2 doesn't return Unicode.
"""
if isinstance(filename, bytes):
return filename.decode(locale.getpreferredencoding())
else:
return filename | python | def unicodify(filename):
"""Make sure filename is Unicode.
Because the tarfile module on Python 2 doesn't return Unicode.
"""
if isinstance(filename, bytes):
return filename.decode(locale.getpreferredencoding())
else:
return filename | [
"def",
"unicodify",
"(",
"filename",
")",
":",
"if",
"isinstance",
"(",
"filename",
",",
"bytes",
")",
":",
"return",
"filename",
".",
"decode",
"(",
"locale",
".",
"getpreferredencoding",
"(",
")",
")",
"else",
":",
"return",
"filename"
] | Make sure filename is Unicode.
Because the tarfile module on Python 2 doesn't return Unicode. | [
"Make",
"sure",
"filename",
"is",
"Unicode",
"."
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L253-L261 |
3,089 | mgedmin/check-manifest | check_manifest.py | get_archive_file_list | def get_archive_file_list(archive_filename):
"""Return the list of files in an archive.
Supports .tar.gz and .zip.
"""
if archive_filename.endswith('.zip'):
with closing(zipfile.ZipFile(archive_filename)) as zf:
return add_directories(zf.namelist())
elif archive_filename.endswith(('.tar.gz', '.tar.bz2', '.tar')):
with closing(tarfile.open(archive_filename)) as tf:
return add_directories(list(map(unicodify, tf.getnames())))
else:
ext = os.path.splitext(archive_filename)[-1]
raise Failure('Unrecognized archive type: %s' % ext) | python | def get_archive_file_list(archive_filename):
"""Return the list of files in an archive.
Supports .tar.gz and .zip.
"""
if archive_filename.endswith('.zip'):
with closing(zipfile.ZipFile(archive_filename)) as zf:
return add_directories(zf.namelist())
elif archive_filename.endswith(('.tar.gz', '.tar.bz2', '.tar')):
with closing(tarfile.open(archive_filename)) as tf:
return add_directories(list(map(unicodify, tf.getnames())))
else:
ext = os.path.splitext(archive_filename)[-1]
raise Failure('Unrecognized archive type: %s' % ext) | [
"def",
"get_archive_file_list",
"(",
"archive_filename",
")",
":",
"if",
"archive_filename",
".",
"endswith",
"(",
"'.zip'",
")",
":",
"with",
"closing",
"(",
"zipfile",
".",
"ZipFile",
"(",
"archive_filename",
")",
")",
"as",
"zf",
":",
"return",
"add_directories",
"(",
"zf",
".",
"namelist",
"(",
")",
")",
"elif",
"archive_filename",
".",
"endswith",
"(",
"(",
"'.tar.gz'",
",",
"'.tar.bz2'",
",",
"'.tar'",
")",
")",
":",
"with",
"closing",
"(",
"tarfile",
".",
"open",
"(",
"archive_filename",
")",
")",
"as",
"tf",
":",
"return",
"add_directories",
"(",
"list",
"(",
"map",
"(",
"unicodify",
",",
"tf",
".",
"getnames",
"(",
")",
")",
")",
")",
"else",
":",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"archive_filename",
")",
"[",
"-",
"1",
"]",
"raise",
"Failure",
"(",
"'Unrecognized archive type: %s'",
"%",
"ext",
")"
] | Return the list of files in an archive.
Supports .tar.gz and .zip. | [
"Return",
"the",
"list",
"of",
"files",
"in",
"an",
"archive",
"."
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L264-L277 |
3,090 | mgedmin/check-manifest | check_manifest.py | strip_toplevel_name | def strip_toplevel_name(filelist):
"""Strip toplevel name from a file list.
>>> strip_toplevel_name(['a', 'a/b', 'a/c', 'a/c/d'])
['b', 'c', 'c/d']
>>> strip_toplevel_name(['a/b', 'a/c', 'a/c/d'])
['b', 'c', 'c/d']
"""
if not filelist:
return filelist
prefix = filelist[0]
if '/' in prefix:
prefix = prefix.partition('/')[0] + '/'
names = filelist
else:
prefix = prefix + '/'
names = filelist[1:]
for name in names:
if not name.startswith(prefix):
raise Failure("File doesn't have the common prefix (%s): %s"
% (name, prefix))
return [name[len(prefix):] for name in names] | python | def strip_toplevel_name(filelist):
"""Strip toplevel name from a file list.
>>> strip_toplevel_name(['a', 'a/b', 'a/c', 'a/c/d'])
['b', 'c', 'c/d']
>>> strip_toplevel_name(['a/b', 'a/c', 'a/c/d'])
['b', 'c', 'c/d']
"""
if not filelist:
return filelist
prefix = filelist[0]
if '/' in prefix:
prefix = prefix.partition('/')[0] + '/'
names = filelist
else:
prefix = prefix + '/'
names = filelist[1:]
for name in names:
if not name.startswith(prefix):
raise Failure("File doesn't have the common prefix (%s): %s"
% (name, prefix))
return [name[len(prefix):] for name in names] | [
"def",
"strip_toplevel_name",
"(",
"filelist",
")",
":",
"if",
"not",
"filelist",
":",
"return",
"filelist",
"prefix",
"=",
"filelist",
"[",
"0",
"]",
"if",
"'/'",
"in",
"prefix",
":",
"prefix",
"=",
"prefix",
".",
"partition",
"(",
"'/'",
")",
"[",
"0",
"]",
"+",
"'/'",
"names",
"=",
"filelist",
"else",
":",
"prefix",
"=",
"prefix",
"+",
"'/'",
"names",
"=",
"filelist",
"[",
"1",
":",
"]",
"for",
"name",
"in",
"names",
":",
"if",
"not",
"name",
".",
"startswith",
"(",
"prefix",
")",
":",
"raise",
"Failure",
"(",
"\"File doesn't have the common prefix (%s): %s\"",
"%",
"(",
"name",
",",
"prefix",
")",
")",
"return",
"[",
"name",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
"for",
"name",
"in",
"names",
"]"
] | Strip toplevel name from a file list.
>>> strip_toplevel_name(['a', 'a/b', 'a/c', 'a/c/d'])
['b', 'c', 'c/d']
>>> strip_toplevel_name(['a/b', 'a/c', 'a/c/d'])
['b', 'c', 'c/d'] | [
"Strip",
"toplevel",
"name",
"from",
"a",
"file",
"list",
"."
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L280-L303 |
3,091 | mgedmin/check-manifest | check_manifest.py | detect_vcs | def detect_vcs():
"""Detect the version control system used for the current directory."""
location = os.path.abspath('.')
while True:
for vcs in Git, Mercurial, Bazaar, Subversion:
if vcs.detect(location):
return vcs
parent = os.path.dirname(location)
if parent == location:
raise Failure("Couldn't find version control data"
" (git/hg/bzr/svn supported)")
location = parent | python | def detect_vcs():
"""Detect the version control system used for the current directory."""
location = os.path.abspath('.')
while True:
for vcs in Git, Mercurial, Bazaar, Subversion:
if vcs.detect(location):
return vcs
parent = os.path.dirname(location)
if parent == location:
raise Failure("Couldn't find version control data"
" (git/hg/bzr/svn supported)")
location = parent | [
"def",
"detect_vcs",
"(",
")",
":",
"location",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"'.'",
")",
"while",
"True",
":",
"for",
"vcs",
"in",
"Git",
",",
"Mercurial",
",",
"Bazaar",
",",
"Subversion",
":",
"if",
"vcs",
".",
"detect",
"(",
"location",
")",
":",
"return",
"vcs",
"parent",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"location",
")",
"if",
"parent",
"==",
"location",
":",
"raise",
"Failure",
"(",
"\"Couldn't find version control data\"",
"\" (git/hg/bzr/svn supported)\"",
")",
"location",
"=",
"parent"
] | Detect the version control system used for the current directory. | [
"Detect",
"the",
"version",
"control",
"system",
"used",
"for",
"the",
"current",
"directory",
"."
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L465-L476 |
3,092 | mgedmin/check-manifest | check_manifest.py | normalize_name | def normalize_name(name):
"""Some VCS print directory names with trailing slashes. Strip them.
Easiest is to normalize the path.
And encodings may trip us up too, especially when comparing lists
of files. Plus maybe lowercase versus uppercase.
"""
name = os.path.normpath(name)
name = unicodify(name)
if sys.platform == 'darwin':
# Mac OSX may have problems comparing non-ascii filenames, so
# we convert them.
name = unicodedata.normalize('NFC', name)
return name | python | def normalize_name(name):
"""Some VCS print directory names with trailing slashes. Strip them.
Easiest is to normalize the path.
And encodings may trip us up too, especially when comparing lists
of files. Plus maybe lowercase versus uppercase.
"""
name = os.path.normpath(name)
name = unicodify(name)
if sys.platform == 'darwin':
# Mac OSX may have problems comparing non-ascii filenames, so
# we convert them.
name = unicodedata.normalize('NFC', name)
return name | [
"def",
"normalize_name",
"(",
"name",
")",
":",
"name",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"name",
")",
"name",
"=",
"unicodify",
"(",
"name",
")",
"if",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"# Mac OSX may have problems comparing non-ascii filenames, so",
"# we convert them.",
"name",
"=",
"unicodedata",
".",
"normalize",
"(",
"'NFC'",
",",
"name",
")",
"return",
"name"
] | Some VCS print directory names with trailing slashes. Strip them.
Easiest is to normalize the path.
And encodings may trip us up too, especially when comparing lists
of files. Plus maybe lowercase versus uppercase. | [
"Some",
"VCS",
"print",
"directory",
"names",
"with",
"trailing",
"slashes",
".",
"Strip",
"them",
"."
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L490-L504 |
3,093 | mgedmin/check-manifest | check_manifest.py | read_config | def read_config():
"""Read configuration from file if possible."""
# XXX modifies global state, which is kind of evil
config = _load_config()
if config.get(CFG_IGNORE_DEFAULT_RULES[1], False):
del IGNORE[:]
if CFG_IGNORE[1] in config:
IGNORE.extend(p for p in config[CFG_IGNORE[1]] if p)
if CFG_IGNORE_BAD_IDEAS[1] in config:
IGNORE_BAD_IDEAS.extend(p for p in config[CFG_IGNORE_BAD_IDEAS[1]] if p) | python | def read_config():
"""Read configuration from file if possible."""
# XXX modifies global state, which is kind of evil
config = _load_config()
if config.get(CFG_IGNORE_DEFAULT_RULES[1], False):
del IGNORE[:]
if CFG_IGNORE[1] in config:
IGNORE.extend(p for p in config[CFG_IGNORE[1]] if p)
if CFG_IGNORE_BAD_IDEAS[1] in config:
IGNORE_BAD_IDEAS.extend(p for p in config[CFG_IGNORE_BAD_IDEAS[1]] if p) | [
"def",
"read_config",
"(",
")",
":",
"# XXX modifies global state, which is kind of evil",
"config",
"=",
"_load_config",
"(",
")",
"if",
"config",
".",
"get",
"(",
"CFG_IGNORE_DEFAULT_RULES",
"[",
"1",
"]",
",",
"False",
")",
":",
"del",
"IGNORE",
"[",
":",
"]",
"if",
"CFG_IGNORE",
"[",
"1",
"]",
"in",
"config",
":",
"IGNORE",
".",
"extend",
"(",
"p",
"for",
"p",
"in",
"config",
"[",
"CFG_IGNORE",
"[",
"1",
"]",
"]",
"if",
"p",
")",
"if",
"CFG_IGNORE_BAD_IDEAS",
"[",
"1",
"]",
"in",
"config",
":",
"IGNORE_BAD_IDEAS",
".",
"extend",
"(",
"p",
"for",
"p",
"in",
"config",
"[",
"CFG_IGNORE_BAD_IDEAS",
"[",
"1",
"]",
"]",
"if",
"p",
")"
] | Read configuration from file if possible. | [
"Read",
"configuration",
"from",
"file",
"if",
"possible",
"."
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L593-L602 |
3,094 | mgedmin/check-manifest | check_manifest.py | _load_config | def _load_config():
"""Searches for config files, reads them and returns a dictionary
Looks for a ``check-manifest`` section in ``pyproject.toml``,
``setup.cfg``, and ``tox.ini``, in that order. The first file
that exists and has that section will be loaded and returned as a
dictionary.
"""
if os.path.exists("pyproject.toml"):
config = toml.load("pyproject.toml")
if CFG_SECTION_CHECK_MANIFEST in config.get("tool", {}):
return config["tool"][CFG_SECTION_CHECK_MANIFEST]
search_files = ['setup.cfg', 'tox.ini']
config_parser = ConfigParser.ConfigParser()
for filename in search_files:
if (config_parser.read([filename])
and config_parser.has_section(CFG_SECTION_CHECK_MANIFEST)):
config = {}
if config_parser.has_option(*CFG_IGNORE_DEFAULT_RULES):
ignore_defaults = config_parser.getboolean(*CFG_IGNORE_DEFAULT_RULES)
config[CFG_IGNORE_DEFAULT_RULES[1]] = ignore_defaults
if config_parser.has_option(*CFG_IGNORE):
patterns = [
p.strip()
for p in config_parser.get(*CFG_IGNORE).splitlines()
]
config[CFG_IGNORE[1]] = patterns
if config_parser.has_option(*CFG_IGNORE_BAD_IDEAS):
patterns = [
p.strip()
for p in config_parser.get(*CFG_IGNORE_BAD_IDEAS).splitlines()
]
config[CFG_IGNORE_BAD_IDEAS[1]] = patterns
return config
return {} | python | def _load_config():
"""Searches for config files, reads them and returns a dictionary
Looks for a ``check-manifest`` section in ``pyproject.toml``,
``setup.cfg``, and ``tox.ini``, in that order. The first file
that exists and has that section will be loaded and returned as a
dictionary.
"""
if os.path.exists("pyproject.toml"):
config = toml.load("pyproject.toml")
if CFG_SECTION_CHECK_MANIFEST in config.get("tool", {}):
return config["tool"][CFG_SECTION_CHECK_MANIFEST]
search_files = ['setup.cfg', 'tox.ini']
config_parser = ConfigParser.ConfigParser()
for filename in search_files:
if (config_parser.read([filename])
and config_parser.has_section(CFG_SECTION_CHECK_MANIFEST)):
config = {}
if config_parser.has_option(*CFG_IGNORE_DEFAULT_RULES):
ignore_defaults = config_parser.getboolean(*CFG_IGNORE_DEFAULT_RULES)
config[CFG_IGNORE_DEFAULT_RULES[1]] = ignore_defaults
if config_parser.has_option(*CFG_IGNORE):
patterns = [
p.strip()
for p in config_parser.get(*CFG_IGNORE).splitlines()
]
config[CFG_IGNORE[1]] = patterns
if config_parser.has_option(*CFG_IGNORE_BAD_IDEAS):
patterns = [
p.strip()
for p in config_parser.get(*CFG_IGNORE_BAD_IDEAS).splitlines()
]
config[CFG_IGNORE_BAD_IDEAS[1]] = patterns
return config
return {} | [
"def",
"_load_config",
"(",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"\"pyproject.toml\"",
")",
":",
"config",
"=",
"toml",
".",
"load",
"(",
"\"pyproject.toml\"",
")",
"if",
"CFG_SECTION_CHECK_MANIFEST",
"in",
"config",
".",
"get",
"(",
"\"tool\"",
",",
"{",
"}",
")",
":",
"return",
"config",
"[",
"\"tool\"",
"]",
"[",
"CFG_SECTION_CHECK_MANIFEST",
"]",
"search_files",
"=",
"[",
"'setup.cfg'",
",",
"'tox.ini'",
"]",
"config_parser",
"=",
"ConfigParser",
".",
"ConfigParser",
"(",
")",
"for",
"filename",
"in",
"search_files",
":",
"if",
"(",
"config_parser",
".",
"read",
"(",
"[",
"filename",
"]",
")",
"and",
"config_parser",
".",
"has_section",
"(",
"CFG_SECTION_CHECK_MANIFEST",
")",
")",
":",
"config",
"=",
"{",
"}",
"if",
"config_parser",
".",
"has_option",
"(",
"*",
"CFG_IGNORE_DEFAULT_RULES",
")",
":",
"ignore_defaults",
"=",
"config_parser",
".",
"getboolean",
"(",
"*",
"CFG_IGNORE_DEFAULT_RULES",
")",
"config",
"[",
"CFG_IGNORE_DEFAULT_RULES",
"[",
"1",
"]",
"]",
"=",
"ignore_defaults",
"if",
"config_parser",
".",
"has_option",
"(",
"*",
"CFG_IGNORE",
")",
":",
"patterns",
"=",
"[",
"p",
".",
"strip",
"(",
")",
"for",
"p",
"in",
"config_parser",
".",
"get",
"(",
"*",
"CFG_IGNORE",
")",
".",
"splitlines",
"(",
")",
"]",
"config",
"[",
"CFG_IGNORE",
"[",
"1",
"]",
"]",
"=",
"patterns",
"if",
"config_parser",
".",
"has_option",
"(",
"*",
"CFG_IGNORE_BAD_IDEAS",
")",
":",
"patterns",
"=",
"[",
"p",
".",
"strip",
"(",
")",
"for",
"p",
"in",
"config_parser",
".",
"get",
"(",
"*",
"CFG_IGNORE_BAD_IDEAS",
")",
".",
"splitlines",
"(",
")",
"]",
"config",
"[",
"CFG_IGNORE_BAD_IDEAS",
"[",
"1",
"]",
"]",
"=",
"patterns",
"return",
"config",
"return",
"{",
"}"
] | Searches for config files, reads them and returns a dictionary
Looks for a ``check-manifest`` section in ``pyproject.toml``,
``setup.cfg``, and ``tox.ini``, in that order. The first file
that exists and has that section will be loaded and returned as a
dictionary. | [
"Searches",
"for",
"config",
"files",
"reads",
"them",
"and",
"returns",
"a",
"dictionary"
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L605-L646 |
3,095 | mgedmin/check-manifest | check_manifest.py | read_manifest | def read_manifest():
"""Read existing configuration from MANIFEST.in.
We use that to ignore anything the MANIFEST.in ignores.
"""
# XXX modifies global state, which is kind of evil
if not os.path.isfile('MANIFEST.in'):
return
ignore, ignore_regexps = _get_ignore_from_manifest('MANIFEST.in')
IGNORE.extend(ignore)
IGNORE_REGEXPS.extend(ignore_regexps) | python | def read_manifest():
"""Read existing configuration from MANIFEST.in.
We use that to ignore anything the MANIFEST.in ignores.
"""
# XXX modifies global state, which is kind of evil
if not os.path.isfile('MANIFEST.in'):
return
ignore, ignore_regexps = _get_ignore_from_manifest('MANIFEST.in')
IGNORE.extend(ignore)
IGNORE_REGEXPS.extend(ignore_regexps) | [
"def",
"read_manifest",
"(",
")",
":",
"# XXX modifies global state, which is kind of evil",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"'MANIFEST.in'",
")",
":",
"return",
"ignore",
",",
"ignore_regexps",
"=",
"_get_ignore_from_manifest",
"(",
"'MANIFEST.in'",
")",
"IGNORE",
".",
"extend",
"(",
"ignore",
")",
"IGNORE_REGEXPS",
".",
"extend",
"(",
"ignore_regexps",
")"
] | Read existing configuration from MANIFEST.in.
We use that to ignore anything the MANIFEST.in ignores. | [
"Read",
"existing",
"configuration",
"from",
"MANIFEST",
".",
"in",
"."
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L649-L659 |
3,096 | mgedmin/check-manifest | check_manifest.py | file_matches | def file_matches(filename, patterns):
"""Does this filename match any of the patterns?"""
return any(fnmatch.fnmatch(filename, pat)
or fnmatch.fnmatch(os.path.basename(filename), pat)
for pat in patterns) | python | def file_matches(filename, patterns):
"""Does this filename match any of the patterns?"""
return any(fnmatch.fnmatch(filename, pat)
or fnmatch.fnmatch(os.path.basename(filename), pat)
for pat in patterns) | [
"def",
"file_matches",
"(",
"filename",
",",
"patterns",
")",
":",
"return",
"any",
"(",
"fnmatch",
".",
"fnmatch",
"(",
"filename",
",",
"pat",
")",
"or",
"fnmatch",
".",
"fnmatch",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
",",
"pat",
")",
"for",
"pat",
"in",
"patterns",
")"
] | Does this filename match any of the patterns? | [
"Does",
"this",
"filename",
"match",
"any",
"of",
"the",
"patterns?"
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L774-L778 |
3,097 | mgedmin/check-manifest | check_manifest.py | file_matches_regexps | def file_matches_regexps(filename, patterns):
"""Does this filename match any of the regular expressions?"""
return any(re.match(pat, filename) for pat in patterns) | python | def file_matches_regexps(filename, patterns):
"""Does this filename match any of the regular expressions?"""
return any(re.match(pat, filename) for pat in patterns) | [
"def",
"file_matches_regexps",
"(",
"filename",
",",
"patterns",
")",
":",
"return",
"any",
"(",
"re",
".",
"match",
"(",
"pat",
",",
"filename",
")",
"for",
"pat",
"in",
"patterns",
")"
] | Does this filename match any of the regular expressions? | [
"Does",
"this",
"filename",
"match",
"any",
"of",
"the",
"regular",
"expressions?"
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L781-L783 |
3,098 | mgedmin/check-manifest | check_manifest.py | strip_sdist_extras | def strip_sdist_extras(filelist):
"""Strip generated files that are only present in source distributions.
We also strip files that are ignored for other reasons, like
command line arguments, setup.cfg rules or MANIFEST.in rules.
"""
return [name for name in filelist
if not file_matches(name, IGNORE)
and not file_matches_regexps(name, IGNORE_REGEXPS)] | python | def strip_sdist_extras(filelist):
"""Strip generated files that are only present in source distributions.
We also strip files that are ignored for other reasons, like
command line arguments, setup.cfg rules or MANIFEST.in rules.
"""
return [name for name in filelist
if not file_matches(name, IGNORE)
and not file_matches_regexps(name, IGNORE_REGEXPS)] | [
"def",
"strip_sdist_extras",
"(",
"filelist",
")",
":",
"return",
"[",
"name",
"for",
"name",
"in",
"filelist",
"if",
"not",
"file_matches",
"(",
"name",
",",
"IGNORE",
")",
"and",
"not",
"file_matches_regexps",
"(",
"name",
",",
"IGNORE_REGEXPS",
")",
"]"
] | Strip generated files that are only present in source distributions.
We also strip files that are ignored for other reasons, like
command line arguments, setup.cfg rules or MANIFEST.in rules. | [
"Strip",
"generated",
"files",
"that",
"are",
"only",
"present",
"in",
"source",
"distributions",
"."
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L786-L794 |
3,099 | mgedmin/check-manifest | check_manifest.py | find_suggestions | def find_suggestions(filelist):
"""Suggest MANIFEST.in patterns for missing files."""
suggestions = set()
unknowns = []
for filename in filelist:
if os.path.isdir(filename):
# it's impossible to add empty directories via MANIFEST.in anyway,
# and non-empty directories will be added automatically when we
# specify patterns for files inside them
continue
for pattern, suggestion in SUGGESTIONS:
m = pattern.match(filename)
if m is not None:
suggestions.add(pattern.sub(suggestion, filename))
break
else:
unknowns.append(filename)
return sorted(suggestions), unknowns | python | def find_suggestions(filelist):
"""Suggest MANIFEST.in patterns for missing files."""
suggestions = set()
unknowns = []
for filename in filelist:
if os.path.isdir(filename):
# it's impossible to add empty directories via MANIFEST.in anyway,
# and non-empty directories will be added automatically when we
# specify patterns for files inside them
continue
for pattern, suggestion in SUGGESTIONS:
m = pattern.match(filename)
if m is not None:
suggestions.add(pattern.sub(suggestion, filename))
break
else:
unknowns.append(filename)
return sorted(suggestions), unknowns | [
"def",
"find_suggestions",
"(",
"filelist",
")",
":",
"suggestions",
"=",
"set",
"(",
")",
"unknowns",
"=",
"[",
"]",
"for",
"filename",
"in",
"filelist",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"filename",
")",
":",
"# it's impossible to add empty directories via MANIFEST.in anyway,",
"# and non-empty directories will be added automatically when we",
"# specify patterns for files inside them",
"continue",
"for",
"pattern",
",",
"suggestion",
"in",
"SUGGESTIONS",
":",
"m",
"=",
"pattern",
".",
"match",
"(",
"filename",
")",
"if",
"m",
"is",
"not",
"None",
":",
"suggestions",
".",
"add",
"(",
"pattern",
".",
"sub",
"(",
"suggestion",
",",
"filename",
")",
")",
"break",
"else",
":",
"unknowns",
".",
"append",
"(",
"filename",
")",
"return",
"sorted",
"(",
"suggestions",
")",
",",
"unknowns"
] | Suggest MANIFEST.in patterns for missing files. | [
"Suggest",
"MANIFEST",
".",
"in",
"patterns",
"for",
"missing",
"files",
"."
] | 7f787e8272f56c5750670bfb3223509e0df72708 | https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L803-L820 |
Subsets and Splits