Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
Layer._make_feature | (self, feat_id) |
Helper routine for __getitem__ that constructs a Feature from the given
Feature ID. If the OGR Layer does not support random-access reading,
then each feature of the layer will be incremented through until the
a Feature is found matching the given feature ID.
|
Helper routine for __getitem__ that constructs a Feature from the given
Feature ID. If the OGR Layer does not support random-access reading,
then each feature of the layer will be incremented through until the
a Feature is found matching the given feature ID.
| def _make_feature(self, feat_id):
"""
Helper routine for __getitem__ that constructs a Feature from the given
Feature ID. If the OGR Layer does not support random-access reading,
then each feature of the layer will be incremented through until the
a Feature is found matching the given feature ID.
"""
if self._random_read:
# If the Layer supports random reading, return.
try:
return Feature(capi.get_feature(self.ptr, feat_id), self)
except GDALException:
pass
else:
# Random access isn't supported, have to increment through
# each feature until the given feature ID is encountered.
for feat in self:
if feat.fid == feat_id:
return feat
# Should have returned a Feature, raise an IndexError.
raise IndexError('Invalid feature id: %s.' % feat_id) | [
"def",
"_make_feature",
"(",
"self",
",",
"feat_id",
")",
":",
"if",
"self",
".",
"_random_read",
":",
"# If the Layer supports random reading, return.",
"try",
":",
"return",
"Feature",
"(",
"capi",
".",
"get_feature",
"(",
"self",
".",
"ptr",
",",
"feat_id",
")",
",",
"self",
")",
"except",
"GDALException",
":",
"pass",
"else",
":",
"# Random access isn't supported, have to increment through",
"# each feature until the given feature ID is encountered.",
"for",
"feat",
"in",
"self",
":",
"if",
"feat",
".",
"fid",
"==",
"feat_id",
":",
"return",
"feat",
"# Should have returned a Feature, raise an IndexError.",
"raise",
"IndexError",
"(",
"'Invalid feature id: %s.'",
"%",
"feat_id",
")"
] | [
69,
4
] | [
89,
61
] | python | en | ['en', 'error', 'th'] | False |
Layer.extent | (self) | Return the extent (an Envelope) of this layer. | Return the extent (an Envelope) of this layer. | def extent(self):
"Return the extent (an Envelope) of this layer."
env = OGREnvelope()
capi.get_extent(self.ptr, byref(env), 1)
return Envelope(env) | [
"def",
"extent",
"(",
"self",
")",
":",
"env",
"=",
"OGREnvelope",
"(",
")",
"capi",
".",
"get_extent",
"(",
"self",
".",
"ptr",
",",
"byref",
"(",
"env",
")",
",",
"1",
")",
"return",
"Envelope",
"(",
"env",
")"
] | [
93,
4
] | [
97,
28
] | python | en | ['en', 'en', 'en'] | True |
Layer.name | (self) | Return the name of this layer in the Data Source. | Return the name of this layer in the Data Source. | def name(self):
"Return the name of this layer in the Data Source."
name = capi.get_fd_name(self._ldefn)
return force_str(name, self._ds.encoding, strings_only=True) | [
"def",
"name",
"(",
"self",
")",
":",
"name",
"=",
"capi",
".",
"get_fd_name",
"(",
"self",
".",
"_ldefn",
")",
"return",
"force_str",
"(",
"name",
",",
"self",
".",
"_ds",
".",
"encoding",
",",
"strings_only",
"=",
"True",
")"
] | [
100,
4
] | [
103,
68
] | python | en | ['en', 'en', 'en'] | True |
Layer.num_feat | (self, force=1) | Return the number of features in the Layer. | Return the number of features in the Layer. | def num_feat(self, force=1):
"Return the number of features in the Layer."
return capi.get_feature_count(self.ptr, force) | [
"def",
"num_feat",
"(",
"self",
",",
"force",
"=",
"1",
")",
":",
"return",
"capi",
".",
"get_feature_count",
"(",
"self",
".",
"ptr",
",",
"force",
")"
] | [
106,
4
] | [
108,
54
] | python | en | ['en', 'en', 'en'] | True |
Layer.num_fields | (self) | Return the number of fields in the Layer. | Return the number of fields in the Layer. | def num_fields(self):
"Return the number of fields in the Layer."
return capi.get_field_count(self._ldefn) | [
"def",
"num_fields",
"(",
"self",
")",
":",
"return",
"capi",
".",
"get_field_count",
"(",
"self",
".",
"_ldefn",
")"
] | [
111,
4
] | [
113,
48
] | python | en | ['en', 'en', 'en'] | True |
Layer.geom_type | (self) | Return the geometry type (OGRGeomType) of the Layer. | Return the geometry type (OGRGeomType) of the Layer. | def geom_type(self):
"Return the geometry type (OGRGeomType) of the Layer."
return OGRGeomType(capi.get_fd_geom_type(self._ldefn)) | [
"def",
"geom_type",
"(",
"self",
")",
":",
"return",
"OGRGeomType",
"(",
"capi",
".",
"get_fd_geom_type",
"(",
"self",
".",
"_ldefn",
")",
")"
] | [
116,
4
] | [
118,
62
] | python | en | ['en', 'en', 'en'] | True |
Layer.srs | (self) | Return the Spatial Reference used in this Layer. | Return the Spatial Reference used in this Layer. | def srs(self):
"Return the Spatial Reference used in this Layer."
try:
ptr = capi.get_layer_srs(self.ptr)
return SpatialReference(srs_api.clone_srs(ptr))
except SRSException:
return None | [
"def",
"srs",
"(",
"self",
")",
":",
"try",
":",
"ptr",
"=",
"capi",
".",
"get_layer_srs",
"(",
"self",
".",
"ptr",
")",
"return",
"SpatialReference",
"(",
"srs_api",
".",
"clone_srs",
"(",
"ptr",
")",
")",
"except",
"SRSException",
":",
"return",
"None"
] | [
121,
4
] | [
127,
23
] | python | en | ['en', 'en', 'en'] | True |
Layer.fields | (self) |
Return a list of string names corresponding to each of the Fields
available in this Layer.
|
Return a list of string names corresponding to each of the Fields
available in this Layer.
| def fields(self):
"""
Return a list of string names corresponding to each of the Fields
available in this Layer.
"""
return [force_str(
capi.get_field_name(capi.get_field_defn(self._ldefn, i)),
self._ds.encoding, strings_only=True,
) for i in range(self.num_fields)] | [
"def",
"fields",
"(",
"self",
")",
":",
"return",
"[",
"force_str",
"(",
"capi",
".",
"get_field_name",
"(",
"capi",
".",
"get_field_defn",
"(",
"self",
".",
"_ldefn",
",",
"i",
")",
")",
",",
"self",
".",
"_ds",
".",
"encoding",
",",
"strings_only",
"=",
"True",
",",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_fields",
")",
"]"
] | [
130,
4
] | [
138,
42
] | python | en | ['en', 'error', 'th'] | False |
Layer.field_types | (self) |
Return a list of the types of fields in this Layer. For example,
return the list [OFTInteger, OFTReal, OFTString] for an OGR layer that
has an integer, a floating-point, and string fields.
|
Return a list of the types of fields in this Layer. For example,
return the list [OFTInteger, OFTReal, OFTString] for an OGR layer that
has an integer, a floating-point, and string fields.
| def field_types(self):
"""
Return a list of the types of fields in this Layer. For example,
return the list [OFTInteger, OFTReal, OFTString] for an OGR layer that
has an integer, a floating-point, and string fields.
"""
return [OGRFieldTypes[capi.get_field_type(capi.get_field_defn(self._ldefn, i))]
for i in range(self.num_fields)] | [
"def",
"field_types",
"(",
"self",
")",
":",
"return",
"[",
"OGRFieldTypes",
"[",
"capi",
".",
"get_field_type",
"(",
"capi",
".",
"get_field_defn",
"(",
"self",
".",
"_ldefn",
",",
"i",
")",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_fields",
")",
"]"
] | [
141,
4
] | [
148,
48
] | python | en | ['en', 'error', 'th'] | False |
Layer.field_widths | (self) | Return a list of the maximum field widths for the features. | Return a list of the maximum field widths for the features. | def field_widths(self):
"Return a list of the maximum field widths for the features."
return [capi.get_field_width(capi.get_field_defn(self._ldefn, i))
for i in range(self.num_fields)] | [
"def",
"field_widths",
"(",
"self",
")",
":",
"return",
"[",
"capi",
".",
"get_field_width",
"(",
"capi",
".",
"get_field_defn",
"(",
"self",
".",
"_ldefn",
",",
"i",
")",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_fields",
")",
"]"
] | [
151,
4
] | [
154,
48
] | python | en | ['en', 'en', 'en'] | True |
Layer.field_precisions | (self) | Return the field precisions for the features. | Return the field precisions for the features. | def field_precisions(self):
"Return the field precisions for the features."
return [capi.get_field_precision(capi.get_field_defn(self._ldefn, i))
for i in range(self.num_fields)] | [
"def",
"field_precisions",
"(",
"self",
")",
":",
"return",
"[",
"capi",
".",
"get_field_precision",
"(",
"capi",
".",
"get_field_defn",
"(",
"self",
".",
"_ldefn",
",",
"i",
")",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_fields",
")",
"]"
] | [
157,
4
] | [
160,
48
] | python | en | ['en', 'en', 'en'] | True |
Layer.get_fields | (self, field_name) |
Return a list containing the given field name for every Feature
in the Layer.
|
Return a list containing the given field name for every Feature
in the Layer.
| def get_fields(self, field_name):
"""
Return a list containing the given field name for every Feature
in the Layer.
"""
if field_name not in self.fields:
raise GDALException('invalid field name: %s' % field_name)
return [feat.get(field_name) for feat in self] | [
"def",
"get_fields",
"(",
"self",
",",
"field_name",
")",
":",
"if",
"field_name",
"not",
"in",
"self",
".",
"fields",
":",
"raise",
"GDALException",
"(",
"'invalid field name: %s'",
"%",
"field_name",
")",
"return",
"[",
"feat",
".",
"get",
"(",
"field_name",
")",
"for",
"feat",
"in",
"self",
"]"
] | [
186,
4
] | [
193,
54
] | python | en | ['en', 'error', 'th'] | False |
Layer.get_geoms | (self, geos=False) |
Return a list containing the OGRGeometry for every Feature in
the Layer.
|
Return a list containing the OGRGeometry for every Feature in
the Layer.
| def get_geoms(self, geos=False):
"""
Return a list containing the OGRGeometry for every Feature in
the Layer.
"""
if geos:
from django.contrib.gis.geos import GEOSGeometry
return [GEOSGeometry(feat.geom.wkb) for feat in self]
else:
return [feat.geom for feat in self] | [
"def",
"get_geoms",
"(",
"self",
",",
"geos",
"=",
"False",
")",
":",
"if",
"geos",
":",
"from",
"django",
".",
"contrib",
".",
"gis",
".",
"geos",
"import",
"GEOSGeometry",
"return",
"[",
"GEOSGeometry",
"(",
"feat",
".",
"geom",
".",
"wkb",
")",
"for",
"feat",
"in",
"self",
"]",
"else",
":",
"return",
"[",
"feat",
".",
"geom",
"for",
"feat",
"in",
"self",
"]"
] | [
195,
4
] | [
204,
47
] | python | en | ['en', 'error', 'th'] | False |
Layer.test_capability | (self, capability) |
Return a bool indicating whether the this Layer supports the given
capability (a string). Valid capability strings include:
'RandomRead', 'SequentialWrite', 'RandomWrite', 'FastSpatialFilter',
'FastFeatureCount', 'FastGetExtent', 'CreateField', 'Transactions',
'DeleteFeature', and 'FastSetNextByIndex'.
|
Return a bool indicating whether the this Layer supports the given
capability (a string). Valid capability strings include:
'RandomRead', 'SequentialWrite', 'RandomWrite', 'FastSpatialFilter',
'FastFeatureCount', 'FastGetExtent', 'CreateField', 'Transactions',
'DeleteFeature', and 'FastSetNextByIndex'.
| def test_capability(self, capability):
"""
Return a bool indicating whether the this Layer supports the given
capability (a string). Valid capability strings include:
'RandomRead', 'SequentialWrite', 'RandomWrite', 'FastSpatialFilter',
'FastFeatureCount', 'FastGetExtent', 'CreateField', 'Transactions',
'DeleteFeature', and 'FastSetNextByIndex'.
"""
return bool(capi.test_capability(self.ptr, force_bytes(capability))) | [
"def",
"test_capability",
"(",
"self",
",",
"capability",
")",
":",
"return",
"bool",
"(",
"capi",
".",
"test_capability",
"(",
"self",
".",
"ptr",
",",
"force_bytes",
"(",
"capability",
")",
")",
")"
] | [
206,
4
] | [
214,
76
] | python | en | ['en', 'error', 'th'] | False |
update_record_count | (count_a, count_b) | Updates the running number of records processed.
Given previous running total and current batch size, return new running total.
Args:
count_a: tf.int64 scalar tensor of previous running total of records.
count_b: tf.int64 scalar tensor of current batch size.
Returns:
A tf.int64 scalar tensor of new running total of records.
| Updates the running number of records processed. | def update_record_count(count_a, count_b):
"""Updates the running number of records processed.
Given previous running total and current batch size, return new running total.
Args:
count_a: tf.int64 scalar tensor of previous running total of records.
count_b: tf.int64 scalar tensor of current batch size.
Returns:
A tf.int64 scalar tensor of new running total of records.
"""
return count_a + count_b | [
"def",
"update_record_count",
"(",
"count_a",
",",
"count_b",
")",
":",
"return",
"count_a",
"+",
"count_b"
] | [
4,
0
] | [
16,
26
] | python | en | ['en', 'en', 'en'] | True |
update_mean_incremental | (count_a, mean_a, value_b) | Updates the running mean vector incrementally.
Given previous running total, running column means, and single example's
column values, return new running column means.
Args:
count_a: tf.int64 scalar tensor of previous running total of records.
mean_a: tf.float64 vector tensor of previous running column means.
value_b: tf.float64 vector tensor of single example's column values.
Returns:
A tf.float64 vector tensor of new running column means.
| Updates the running mean vector incrementally. | def update_mean_incremental(count_a, mean_a, value_b):
"""Updates the running mean vector incrementally.
Given previous running total, running column means, and single example's
column values, return new running column means.
Args:
count_a: tf.int64 scalar tensor of previous running total of records.
mean_a: tf.float64 vector tensor of previous running column means.
value_b: tf.float64 vector tensor of single example's column values.
Returns:
A tf.float64 vector tensor of new running column means.
"""
umean_a = mean_a * tf.cast(x=count_a, dtype=tf.float64)
mean_ab_num = umean_a + tf.squeeze(input=value_b, axis=0)
mean_ab = mean_ab_num / tf.cast(x=count_a + 1, dtype=tf.float64)
return mean_ab | [
"def",
"update_mean_incremental",
"(",
"count_a",
",",
"mean_a",
",",
"value_b",
")",
":",
"umean_a",
"=",
"mean_a",
"*",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_a",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"mean_ab_num",
"=",
"umean_a",
"+",
"tf",
".",
"squeeze",
"(",
"input",
"=",
"value_b",
",",
"axis",
"=",
"0",
")",
"mean_ab",
"=",
"mean_ab_num",
"/",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_a",
"+",
"1",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"return",
"mean_ab"
] | [
22,
0
] | [
40,
16
] | python | en | ['en', 'en', 'en'] | True |
update_cov_incremental | (
count_a, mean_a, cov_a, value_b, mean_ab, sample_cov) | Updates the running covariance matrix incrementally.
Given previous running total, running column means, running covariance matrix,
single example's column values, new running column means, and whether to use
sample covariance or not, return new running covariance matrix.
Args:
count_a: tf.int64 scalar tensor of previous running total of records.
mean_a: tf.float64 vector tensor of previous running column means.
cov_a: tf.float64 matrix tensor of previous running covariance matrix.
value_b: tf.float64 vector tensor of single example's column values.
mean_ab: tf.float64 vector tensor of new running column means.
sample_cov: Bool flag on whether sample or population covariance is used.
Returns:
A tf.float64 matrix tensor of new covariance matrix.
| Updates the running covariance matrix incrementally. | def update_cov_incremental(
count_a, mean_a, cov_a, value_b, mean_ab, sample_cov):
"""Updates the running covariance matrix incrementally.
Given previous running total, running column means, running covariance matrix,
single example's column values, new running column means, and whether to use
sample covariance or not, return new running covariance matrix.
Args:
count_a: tf.int64 scalar tensor of previous running total of records.
mean_a: tf.float64 vector tensor of previous running column means.
cov_a: tf.float64 matrix tensor of previous running covariance matrix.
value_b: tf.float64 vector tensor of single example's column values.
mean_ab: tf.float64 vector tensor of new running column means.
sample_cov: Bool flag on whether sample or population covariance is used.
Returns:
A tf.float64 matrix tensor of new covariance matrix.
"""
mean_diff = tf.matmul(
a=value_b - mean_a, b=value_b - mean_ab, transpose_a=True)
if sample_cov:
ucov_a = cov_a * tf.cast(x=count_a - 1, dtype=tf.float64)
cov_ab = (ucov_a + mean_diff) / tf.cast(x=count_a, dtype=tf.float64)
else:
ucov_a = cov_a * tf.cast(x=count_a, dtype=tf.float64)
cov_ab = (ucov_a + mean_diff) / tf.cast(x=count_a + 1, dtype=tf.float64)
return cov_ab | [
"def",
"update_cov_incremental",
"(",
"count_a",
",",
"mean_a",
",",
"cov_a",
",",
"value_b",
",",
"mean_ab",
",",
"sample_cov",
")",
":",
"mean_diff",
"=",
"tf",
".",
"matmul",
"(",
"a",
"=",
"value_b",
"-",
"mean_a",
",",
"b",
"=",
"value_b",
"-",
"mean_ab",
",",
"transpose_a",
"=",
"True",
")",
"if",
"sample_cov",
":",
"ucov_a",
"=",
"cov_a",
"*",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_a",
"-",
"1",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"cov_ab",
"=",
"(",
"ucov_a",
"+",
"mean_diff",
")",
"/",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_a",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"else",
":",
"ucov_a",
"=",
"cov_a",
"*",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_a",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"cov_ab",
"=",
"(",
"ucov_a",
"+",
"mean_diff",
")",
"/",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_a",
"+",
"1",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"return",
"cov_ab"
] | [
44,
0
] | [
73,
15
] | python | en | ['en', 'de', 'en'] | True |
singleton_batch_cov_variable_updating | (
inner_size, X, count_variable, mean_variable, cov_variable) | Updates mahalanobis variables incrementally when number_of_rows equals 1.
Given the inner size of the matrix, the data vector X, the variable tracking
running record counts, the variable tracking running column means, and the
variable tracking running covariance matrix, returns updated running
covariance matrix, running column means, and running record count variables.
Args:
inner_size: Inner size of matrix X.
X: tf.float64 matrix tensor of input data.
count_variable: tf.int64 scalar variable tracking running record counts.
mean_variable: tf.float64 vector variable tracking running column means.
cov_variable: tf.float64 matrix variable tracking running covariance matrix.
Returns:
Updated running covariance matrix, running column means, and running record
count variables.
| Updates mahalanobis variables incrementally when number_of_rows equals 1. | def singleton_batch_cov_variable_updating(
inner_size, X, count_variable, mean_variable, cov_variable):
"""Updates mahalanobis variables incrementally when number_of_rows equals 1.
Given the inner size of the matrix, the data vector X, the variable tracking
running record counts, the variable tracking running column means, and the
variable tracking running covariance matrix, returns updated running
covariance matrix, running column means, and running record count variables.
Args:
inner_size: Inner size of matrix X.
X: tf.float64 matrix tensor of input data.
count_variable: tf.int64 scalar variable tracking running record counts.
mean_variable: tf.float64 vector variable tracking running column means.
cov_variable: tf.float64 matrix variable tracking running covariance matrix.
Returns:
Updated running covariance matrix, running column means, and running record
count variables.
"""
# Calculate new combined mean for incremental covariance matrix calculation
# time_shape = (num_feat,), features_shape = (seq_len,)
mean_ab = update_mean_incremental(
count_a=count_variable, mean_a=mean_variable, value_b=X)
# Update running variables from single example
# time_shape = (), features_shape = ()
count_tensor = update_record_count(count_a=count_variable, count_b=1)
# time_shape = (num_feat,), features_shape = (seq_len,)
mean_tensor = mean_ab
# Check if inner dimension is greater than 1 to calculate covariance matrix
if inner_size == 1:
cov_tensor = tf.zeros_like(tensor=cov_variable, dtype=tf.float64)
else:
# time_shape = (num_feat, num_feat)
# features_shape = (seq_len, seq_len)
cov_tensor = update_cov_incremental(
count_a=count_variable,
mean_a=mean_variable,
cov_a=cov_variable,
value_b=X,
mean_ab=mean_ab,
sample_cov=True)
# Assign values to variables, use control dependencies around return to
# enforce the mahalanobis variables to be assigned, the control order matters,
# hence the separate contexts.
with tf.control_dependencies(
control_inputs=[tf.assign(ref=cov_variable, value=cov_tensor)]):
with tf.control_dependencies(
control_inputs=[tf.assign(ref=mean_variable, value=mean_tensor)]):
with tf.control_dependencies(
control_inputs=[tf.assign(ref=count_variable, value=count_tensor)]):
return (tf.identity(input=cov_variable),
tf.identity(input=mean_variable),
tf.identity(input=count_variable)) | [
"def",
"singleton_batch_cov_variable_updating",
"(",
"inner_size",
",",
"X",
",",
"count_variable",
",",
"mean_variable",
",",
"cov_variable",
")",
":",
"# Calculate new combined mean for incremental covariance matrix calculation",
"# time_shape = (num_feat,), features_shape = (seq_len,)",
"mean_ab",
"=",
"update_mean_incremental",
"(",
"count_a",
"=",
"count_variable",
",",
"mean_a",
"=",
"mean_variable",
",",
"value_b",
"=",
"X",
")",
"# Update running variables from single example",
"# time_shape = (), features_shape = ()",
"count_tensor",
"=",
"update_record_count",
"(",
"count_a",
"=",
"count_variable",
",",
"count_b",
"=",
"1",
")",
"# time_shape = (num_feat,), features_shape = (seq_len,)",
"mean_tensor",
"=",
"mean_ab",
"# Check if inner dimension is greater than 1 to calculate covariance matrix",
"if",
"inner_size",
"==",
"1",
":",
"cov_tensor",
"=",
"tf",
".",
"zeros_like",
"(",
"tensor",
"=",
"cov_variable",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"else",
":",
"# time_shape = (num_feat, num_feat)",
"# features_shape = (seq_len, seq_len)",
"cov_tensor",
"=",
"update_cov_incremental",
"(",
"count_a",
"=",
"count_variable",
",",
"mean_a",
"=",
"mean_variable",
",",
"cov_a",
"=",
"cov_variable",
",",
"value_b",
"=",
"X",
",",
"mean_ab",
"=",
"mean_ab",
",",
"sample_cov",
"=",
"True",
")",
"# Assign values to variables, use control dependencies around return to",
"# enforce the mahalanobis variables to be assigned, the control order matters,",
"# hence the separate contexts.",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"tf",
".",
"assign",
"(",
"ref",
"=",
"cov_variable",
",",
"value",
"=",
"cov_tensor",
")",
"]",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"tf",
".",
"assign",
"(",
"ref",
"=",
"mean_variable",
",",
"value",
"=",
"mean_tensor",
")",
"]",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"tf",
".",
"assign",
"(",
"ref",
"=",
"count_variable",
",",
"value",
"=",
"count_tensor",
")",
"]",
")",
":",
"return",
"(",
"tf",
".",
"identity",
"(",
"input",
"=",
"cov_variable",
")",
",",
"tf",
".",
"identity",
"(",
"input",
"=",
"mean_variable",
")",
",",
"tf",
".",
"identity",
"(",
"input",
"=",
"count_variable",
")",
")"
] | [
76,
0
] | [
134,
50
] | python | en | ['en', 'en', 'en'] | True |
singleton_batch_var_variable_updating | (
inner_size, x, count_variable, mean_variable, var_variable) | Updates mahalanobis thresh vars incrementally when number_of_rows equals 1.
Given the inner size of the matrix, the data scalar x, the variable tracking
running record counts, the variable tracking the running mean, and the
variable tracking the running variance, returns updated running variance,
running mean, and running record count variables.
Args:
inner_size: Inner size of matrix X.
x: tf.float64 scalar tensor of input data.
count_variable: tf.int64 scalar variable tracking running record counts.
mean_variable: tf.float64 scalar variable tracking running mean.
var_variable: tf.float64 scalar variable tracking running variance.
Returns:
Updated running variance, running mean, and running record count variables.
| Updates mahalanobis thresh vars incrementally when number_of_rows equals 1. | def singleton_batch_var_variable_updating(
inner_size, x, count_variable, mean_variable, var_variable):
"""Updates mahalanobis thresh vars incrementally when number_of_rows equals 1.
Given the inner size of the matrix, the data scalar x, the variable tracking
running record counts, the variable tracking the running mean, and the
variable tracking the running variance, returns updated running variance,
running mean, and running record count variables.
Args:
inner_size: Inner size of matrix X.
x: tf.float64 scalar tensor of input data.
count_variable: tf.int64 scalar variable tracking running record counts.
mean_variable: tf.float64 scalar variable tracking running mean.
var_variable: tf.float64 scalar variable tracking running variance.
Returns:
Updated running variance, running mean, and running record count variables.
"""
# Calculate new combined mean for incremental covariance matrix calculation
# time_shape = (), features_shape = ()
mean_ab = update_mean_incremental(
count_a=count_variable, mean_a=mean_variable, value_b=x)
# Update running variables from single example
# time_shape = (), features_shape = ()
count_tensor = update_record_count(count_a=count_variable, count_b=1)
# time_shape = (), features_shape = ()
mean_tensor = mean_ab
# Check if inner dimension is greater than 1 to calculate covariance matrix
if inner_size == 1:
var_tensor = tf.zeros_like(tensor=var_variable, dtype=tf.float64)
else:
# time_shape = (), features_shape = ()
var_tensor = update_cov_incremental(
count_a=count_variable,
mean_a=tf.reshape(tensor=mean_variable, shape=[1]),
cov_a=tf.reshape(tensor=var_variable, shape=[1, 1]),
value_b=tf.reshape(tensor=x, shape=[1, 1]),
mean_ab=tf.reshape(tensor=mean_ab, shape=[1]),
sample_cov=True)
var_tensor = tf.squeeze(input=var_tensor)
# Assign values to variables, use control dependencies around return to
# enforce the mahalanobis variables to be assigned, the control order matters,
# hence the separate contexts.
with tf.control_dependencies(
control_inputs=[tf.assign(ref=var_variable, value=var_tensor)]):
with tf.control_dependencies(
control_inputs=[tf.assign(ref=mean_variable, value=mean_tensor)]):
with tf.control_dependencies(
control_inputs=[tf.assign(ref=count_variable, value=count_tensor)]):
return (tf.identity(input=var_variable),
tf.identity(input=mean_variable),
tf.identity(input=count_variable)) | [
"def",
"singleton_batch_var_variable_updating",
"(",
"inner_size",
",",
"x",
",",
"count_variable",
",",
"mean_variable",
",",
"var_variable",
")",
":",
"# Calculate new combined mean for incremental covariance matrix calculation",
"# time_shape = (), features_shape = ()",
"mean_ab",
"=",
"update_mean_incremental",
"(",
"count_a",
"=",
"count_variable",
",",
"mean_a",
"=",
"mean_variable",
",",
"value_b",
"=",
"x",
")",
"# Update running variables from single example",
"# time_shape = (), features_shape = ()",
"count_tensor",
"=",
"update_record_count",
"(",
"count_a",
"=",
"count_variable",
",",
"count_b",
"=",
"1",
")",
"# time_shape = (), features_shape = ()",
"mean_tensor",
"=",
"mean_ab",
"# Check if inner dimension is greater than 1 to calculate covariance matrix",
"if",
"inner_size",
"==",
"1",
":",
"var_tensor",
"=",
"tf",
".",
"zeros_like",
"(",
"tensor",
"=",
"var_variable",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"else",
":",
"# time_shape = (), features_shape = ()",
"var_tensor",
"=",
"update_cov_incremental",
"(",
"count_a",
"=",
"count_variable",
",",
"mean_a",
"=",
"tf",
".",
"reshape",
"(",
"tensor",
"=",
"mean_variable",
",",
"shape",
"=",
"[",
"1",
"]",
")",
",",
"cov_a",
"=",
"tf",
".",
"reshape",
"(",
"tensor",
"=",
"var_variable",
",",
"shape",
"=",
"[",
"1",
",",
"1",
"]",
")",
",",
"value_b",
"=",
"tf",
".",
"reshape",
"(",
"tensor",
"=",
"x",
",",
"shape",
"=",
"[",
"1",
",",
"1",
"]",
")",
",",
"mean_ab",
"=",
"tf",
".",
"reshape",
"(",
"tensor",
"=",
"mean_ab",
",",
"shape",
"=",
"[",
"1",
"]",
")",
",",
"sample_cov",
"=",
"True",
")",
"var_tensor",
"=",
"tf",
".",
"squeeze",
"(",
"input",
"=",
"var_tensor",
")",
"# Assign values to variables, use control dependencies around return to",
"# enforce the mahalanobis variables to be assigned, the control order matters,",
"# hence the separate contexts.",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"tf",
".",
"assign",
"(",
"ref",
"=",
"var_variable",
",",
"value",
"=",
"var_tensor",
")",
"]",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"tf",
".",
"assign",
"(",
"ref",
"=",
"mean_variable",
",",
"value",
"=",
"mean_tensor",
")",
"]",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"tf",
".",
"assign",
"(",
"ref",
"=",
"count_variable",
",",
"value",
"=",
"count_tensor",
")",
"]",
")",
":",
"return",
"(",
"tf",
".",
"identity",
"(",
"input",
"=",
"var_variable",
")",
",",
"tf",
".",
"identity",
"(",
"input",
"=",
"mean_variable",
")",
",",
"tf",
".",
"identity",
"(",
"input",
"=",
"count_variable",
")",
")"
] | [
137,
0
] | [
195,
50
] | python | en | ['en', 'en', 'en'] | True |
update_mean_batch | (count_a, mean_a, count_b, mean_b) | Updates the running mean vector with a batch of data.
Given previous running total, running column means, current batch size, and
batch's column means, return new running column means.
Args:
count_a: tf.int64 scalar tensor of previous running total of records.
mean_a: tf.float64 vector tensor of previous running column means.
count_b: tf.int64 scalar tensor of current batch size.
mean_b: tf.float64 vector tensor of batch's column means.
Returns:
A tf.float64 vector tensor of new running column means.
| Updates the running mean vector with a batch of data. | def update_mean_batch(count_a, mean_a, count_b, mean_b):
"""Updates the running mean vector with a batch of data.
Given previous running total, running column means, current batch size, and
batch's column means, return new running column means.
Args:
count_a: tf.int64 scalar tensor of previous running total of records.
mean_a: tf.float64 vector tensor of previous running column means.
count_b: tf.int64 scalar tensor of current batch size.
mean_b: tf.float64 vector tensor of batch's column means.
Returns:
A tf.float64 vector tensor of new running column means.
"""
sum_a = mean_a * tf.cast(x=count_a, dtype=tf.float64)
sum_b = mean_b * tf.cast(x=count_b, dtype=tf.float64)
mean_ab = (sum_a + sum_b) / tf.cast(x=count_a + count_b, dtype=tf.float64)
return mean_ab | [
"def",
"update_mean_batch",
"(",
"count_a",
",",
"mean_a",
",",
"count_b",
",",
"mean_b",
")",
":",
"sum_a",
"=",
"mean_a",
"*",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_a",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"sum_b",
"=",
"mean_b",
"*",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_b",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"mean_ab",
"=",
"(",
"sum_a",
"+",
"sum_b",
")",
"/",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_a",
"+",
"count_b",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"return",
"mean_ab"
] | [
201,
0
] | [
220,
16
] | python | en | ['en', 'en', 'en'] | True |
update_cov_batch | (
count_a, mean_a, cov_a, count_b, mean_b, cov_b, sample_cov) | Updates the running covariance matrix with batch of data.
Given previous running total, running column means, running covariance matrix,
current batch size, batch's column means, batch's covariance matrix, and
whether to use sample covariance or not, return new running covariance matrix.
Args:
count_a: tf.int64 scalar tensor of previous running total of records.
mean_a: tf.float64 vector tensor of previous running column means.
cov_a: tf.float64 matrix tensor of previous running covariance matrix.
count_b: tf.int64 scalar tensor of current batch size.
mean_b: tf.float64 vector tensor of batch's column means.
cov_b: tf.float64 matrix tensor of batch's covariance matrix.
sample_cov: Bool flag on whether sample or population covariance is used.
Returns:
A tf.float64 matrix tensor of new running covariance matrix.
| Updates the running covariance matrix with batch of data. | def update_cov_batch(
count_a, mean_a, cov_a, count_b, mean_b, cov_b, sample_cov):
"""Updates the running covariance matrix with batch of data.
Given previous running total, running column means, running covariance matrix,
current batch size, batch's column means, batch's covariance matrix, and
whether to use sample covariance or not, return new running covariance matrix.
Args:
count_a: tf.int64 scalar tensor of previous running total of records.
mean_a: tf.float64 vector tensor of previous running column means.
cov_a: tf.float64 matrix tensor of previous running covariance matrix.
count_b: tf.int64 scalar tensor of current batch size.
mean_b: tf.float64 vector tensor of batch's column means.
cov_b: tf.float64 matrix tensor of batch's covariance matrix.
sample_cov: Bool flag on whether sample or population covariance is used.
Returns:
A tf.float64 matrix tensor of new running covariance matrix.
"""
mean_diff = tf.expand_dims(input=mean_a - mean_b, axis=0)
if sample_cov:
ucov_a = cov_a * tf.cast(x=count_a - 1, dtype=tf.float64)
ucov_b = cov_b * tf.cast(x=count_b - 1, dtype=tf.float64)
den = tf.cast(x=count_a + count_b - 1, dtype=tf.float64)
else:
ucov_a = cov_a * tf.cast(x=count_a, dtype=tf.float64)
ucov_b = cov_b * tf.cast(x=count_b, dtype=tf.float64)
den = tf.cast(x=count_a + count_b, dtype=tf.float64)
mean_diff = tf.matmul(a=mean_diff, b=mean_diff, transpose_a=True)
mean_scaling_num = tf.cast(x=count_a * count_b, dtype=tf.float64)
mean_scaling_den = tf.cast(x=count_a + count_b, dtype=tf.float64)
mean_scaling = mean_scaling_num / mean_scaling_den
cov_ab = (ucov_a + ucov_b + mean_diff * mean_scaling) / den
return cov_ab | [
"def",
"update_cov_batch",
"(",
"count_a",
",",
"mean_a",
",",
"cov_a",
",",
"count_b",
",",
"mean_b",
",",
"cov_b",
",",
"sample_cov",
")",
":",
"mean_diff",
"=",
"tf",
".",
"expand_dims",
"(",
"input",
"=",
"mean_a",
"-",
"mean_b",
",",
"axis",
"=",
"0",
")",
"if",
"sample_cov",
":",
"ucov_a",
"=",
"cov_a",
"*",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_a",
"-",
"1",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"ucov_b",
"=",
"cov_b",
"*",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_b",
"-",
"1",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"den",
"=",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_a",
"+",
"count_b",
"-",
"1",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"else",
":",
"ucov_a",
"=",
"cov_a",
"*",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_a",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"ucov_b",
"=",
"cov_b",
"*",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_b",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"den",
"=",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_a",
"+",
"count_b",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"mean_diff",
"=",
"tf",
".",
"matmul",
"(",
"a",
"=",
"mean_diff",
",",
"b",
"=",
"mean_diff",
",",
"transpose_a",
"=",
"True",
")",
"mean_scaling_num",
"=",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_a",
"*",
"count_b",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"mean_scaling_den",
"=",
"tf",
".",
"cast",
"(",
"x",
"=",
"count_a",
"+",
"count_b",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"mean_scaling",
"=",
"mean_scaling_num",
"/",
"mean_scaling_den",
"cov_ab",
"=",
"(",
"ucov_a",
"+",
"ucov_b",
"+",
"mean_diff",
"*",
"mean_scaling",
")",
"/",
"den",
"return",
"cov_ab"
] | [
223,
0
] | [
260,
15
] | python | en | ['en', 'en', 'en'] | True |
non_singleton_batch_cov_variable_updating | (
cur_batch_size, inner_size, X, count_variable, mean_variable, cov_variable) | Updates mahalanobis variables when number_of_rows does NOT equal 1.
Given the current batch size, inner size of the matrix, the data matrix X,
the variable tracking running record counts, the variable tracking running
column means, and the variable tracking running covariance matrix, returns
updated running covariance matrix, running column means, and running record
count variables.
Args:
cur_batch_size: Number of examples in current batch (could be partial).
inner_size: Inner size of matrix X.
X: tf.float64 matrix tensor of input data.
count_variable: tf.int64 scalar variable tracking running record counts.
mean_variable: tf.float64 vector variable tracking running column means.
cov_variable: tf.float64 matrix variable tracking running covariance matrix.
Returns:
Updated running covariance matrix, running column means, and running record
count variables.
| Updates mahalanobis variables when number_of_rows does NOT equal 1. | def non_singleton_batch_cov_variable_updating(
cur_batch_size, inner_size, X, count_variable, mean_variable, cov_variable):
"""Updates mahalanobis variables when number_of_rows does NOT equal 1.
Given the current batch size, inner size of the matrix, the data matrix X,
the variable tracking running record counts, the variable tracking running
column means, and the variable tracking running covariance matrix, returns
updated running covariance matrix, running column means, and running record
count variables.
Args:
cur_batch_size: Number of examples in current batch (could be partial).
inner_size: Inner size of matrix X.
X: tf.float64 matrix tensor of input data.
count_variable: tf.int64 scalar variable tracking running record counts.
mean_variable: tf.float64 vector variable tracking running column means.
cov_variable: tf.float64 matrix variable tracking running covariance matrix.
Returns:
Updated running covariance matrix, running column means, and running record
count variables.
"""
# Find statistics of batch
number_of_rows = cur_batch_size * inner_size
# time_shape = (num_feat,), features_shape = (seq_len,)
X_mean = tf.reduce_mean(input_tensor=X, axis=0)
# time_shape = (cur_batch_size * seq_len, num_feat)
# features_shape = (cur_batch_size * num_feat, seq_len)
X_centered = X - X_mean
if inner_size > 1:
# time_shape = (num_feat, num_feat)
# features_shape = (seq_len, seq_len)
X_cov = tf.matmul(
a=X_centered,
b=X_centered,
transpose_a=True) / tf.cast(x=number_of_rows - 1, dtype=tf.float64)
# Update running variables from batch statistics
# time_shape = (), features_shape = ()
count_tensor = update_record_count(
count_a=count_variable, count_b=number_of_rows)
# time_shape = (num_feat,), features_shape = (seq_len,)
mean_tensor = update_mean_batch(
count_a=count_variable,
mean_a=mean_variable,
count_b=number_of_rows,
mean_b=X_mean)
# Check if inner dimension is greater than 1 to calculate covariance matrix
if inner_size == 1:
cov_tensor = tf.zeros_like(tensor=cov_variable, dtype=tf.float64)
else:
# time_shape = (num_feat, num_feat)
# features_shape = (seq_len, seq_len)
cov_tensor = update_cov_batch(
count_a=count_variable,
mean_a=mean_variable,
cov_a=cov_variable,
count_b=number_of_rows,
mean_b=X_mean,
cov_b=X_cov,
sample_cov=True)
# Assign values to variables, use control dependencies around return to
# enforce the mahalanobis variables to be assigned, the control order matters,
# hence the separate contexts.
with tf.control_dependencies(
control_inputs=[tf.assign(ref=cov_variable, value=cov_tensor)]):
with tf.control_dependencies(
control_inputs=[tf.assign(ref=mean_variable, value=mean_tensor)]):
with tf.control_dependencies(
control_inputs=[tf.assign(ref=count_variable, value=count_tensor)]):
return (tf.identity(input=cov_variable),
tf.identity(input=mean_variable),
tf.identity(input=count_variable)) | [
"def",
"non_singleton_batch_cov_variable_updating",
"(",
"cur_batch_size",
",",
"inner_size",
",",
"X",
",",
"count_variable",
",",
"mean_variable",
",",
"cov_variable",
")",
":",
"# Find statistics of batch",
"number_of_rows",
"=",
"cur_batch_size",
"*",
"inner_size",
"# time_shape = (num_feat,), features_shape = (seq_len,)",
"X_mean",
"=",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"X",
",",
"axis",
"=",
"0",
")",
"# time_shape = (cur_batch_size * seq_len, num_feat)",
"# features_shape = (cur_batch_size * num_feat, seq_len)",
"X_centered",
"=",
"X",
"-",
"X_mean",
"if",
"inner_size",
">",
"1",
":",
"# time_shape = (num_feat, num_feat)",
"# features_shape = (seq_len, seq_len)",
"X_cov",
"=",
"tf",
".",
"matmul",
"(",
"a",
"=",
"X_centered",
",",
"b",
"=",
"X_centered",
",",
"transpose_a",
"=",
"True",
")",
"/",
"tf",
".",
"cast",
"(",
"x",
"=",
"number_of_rows",
"-",
"1",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"# Update running variables from batch statistics",
"# time_shape = (), features_shape = ()",
"count_tensor",
"=",
"update_record_count",
"(",
"count_a",
"=",
"count_variable",
",",
"count_b",
"=",
"number_of_rows",
")",
"# time_shape = (num_feat,), features_shape = (seq_len,)",
"mean_tensor",
"=",
"update_mean_batch",
"(",
"count_a",
"=",
"count_variable",
",",
"mean_a",
"=",
"mean_variable",
",",
"count_b",
"=",
"number_of_rows",
",",
"mean_b",
"=",
"X_mean",
")",
"# Check if inner dimension is greater than 1 to calculate covariance matrix",
"if",
"inner_size",
"==",
"1",
":",
"cov_tensor",
"=",
"tf",
".",
"zeros_like",
"(",
"tensor",
"=",
"cov_variable",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"else",
":",
"# time_shape = (num_feat, num_feat)",
"# features_shape = (seq_len, seq_len)",
"cov_tensor",
"=",
"update_cov_batch",
"(",
"count_a",
"=",
"count_variable",
",",
"mean_a",
"=",
"mean_variable",
",",
"cov_a",
"=",
"cov_variable",
",",
"count_b",
"=",
"number_of_rows",
",",
"mean_b",
"=",
"X_mean",
",",
"cov_b",
"=",
"X_cov",
",",
"sample_cov",
"=",
"True",
")",
"# Assign values to variables, use control dependencies around return to",
"# enforce the mahalanobis variables to be assigned, the control order matters,",
"# hence the separate contexts.",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"tf",
".",
"assign",
"(",
"ref",
"=",
"cov_variable",
",",
"value",
"=",
"cov_tensor",
")",
"]",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"tf",
".",
"assign",
"(",
"ref",
"=",
"mean_variable",
",",
"value",
"=",
"mean_tensor",
")",
"]",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"tf",
".",
"assign",
"(",
"ref",
"=",
"count_variable",
",",
"value",
"=",
"count_tensor",
")",
"]",
")",
":",
"return",
"(",
"tf",
".",
"identity",
"(",
"input",
"=",
"cov_variable",
")",
",",
"tf",
".",
"identity",
"(",
"input",
"=",
"mean_variable",
")",
",",
"tf",
".",
"identity",
"(",
"input",
"=",
"count_variable",
")",
")"
] | [
263,
0
] | [
342,
50
] | python | en | ['en', 'en', 'en'] | True |
non_singleton_batch_var_variable_updating | (
cur_batch_size, inner_size, x, count_variable, mean_variable, var_variable) | Updates mahalanobis thresh variables when number_of_rows does NOT equal 1.
Given the current batch size, inner size of the matrix, the data vector x,
the variable tracking the running record count, the variable tracking the
running mean, and the variable tracking the running variance, returns
updated running variance, running mean, and running record count variables.
Args:
cur_batch_size: Number of examples in current batch (could be partial).
inner_size: Inner size of matrix X.
x: tf.float64 vector tensor of mahalanobis distance.
count_variable: tf.int64 scalar variable tracking running record count.
mean_variable: tf.float64 scalar variable tracking running mean.
var_variable: tf.float64 scalar variable tracking running variance.
Returns:
Updated running variance, running mean, and running record count variables.
| Updates mahalanobis thresh variables when number_of_rows does NOT equal 1. | def non_singleton_batch_var_variable_updating(
cur_batch_size, inner_size, x, count_variable, mean_variable, var_variable):
"""Updates mahalanobis thresh variables when number_of_rows does NOT equal 1.
Given the current batch size, inner size of the matrix, the data vector x,
the variable tracking the running record count, the variable tracking the
running mean, and the variable tracking the running variance, returns
updated running variance, running mean, and running record count variables.
Args:
cur_batch_size: Number of examples in current batch (could be partial).
inner_size: Inner size of matrix X.
x: tf.float64 vector tensor of mahalanobis distance.
count_variable: tf.int64 scalar variable tracking running record count.
mean_variable: tf.float64 scalar variable tracking running mean.
var_variable: tf.float64 scalar variable tracking running variance.
Returns:
Updated running variance, running mean, and running record count variables.
"""
# Find statistics of batch
number_of_rows = cur_batch_size * inner_size
# time_shape = (), features_shape = ()
x_mean = tf.reduce_mean(input_tensor=x)
# time_shape = (cur_batch_size * seq_len,)
# features_shape = (cur_batch_size * num_feat,)
x_centered = x - x_mean
if inner_size > 1:
# time_shape = (), features_shape = ()
x_var = tf.reduce_sum(input_tensor=tf.square(x=x_centered))
x_var /= tf.cast(x=number_of_rows - 1, dtype=tf.float64)
# Update running variables from batch statistics
# time_shape = (), features_shape = ()
count_tensor = update_record_count(
count_a=count_variable, count_b=number_of_rows)
# time_shape = (), features_shape = ()
mean_tensor = update_mean_batch(
count_a=count_variable,
mean_a=mean_variable,
count_b=number_of_rows,
mean_b=x_mean)
# Check if inner dimension is greater than 1 to calculate covariance matrix
if inner_size == 1:
var_tensor = tf.zeros_like(tensor=var_variable, dtype=tf.float64)
else:
# time_shape = (num_feat, num_feat)
# features_shape = (seq_len, seq_len)
var_tensor = update_cov_batch(
count_a=count_variable,
mean_a=mean_variable,
cov_a=var_variable,
count_b=number_of_rows,
mean_b=tf.expand_dims(input=x_mean, axis=0),
cov_b=tf.reshape(tensor=x_var, shape=[1, 1]),
sample_cov=True)
var_tensor = tf.squeeze(input=var_tensor)
# Assign values to variables, use control dependencies around return to
# enforce the mahalanobis thresh variables to be assigned, the control order
# matters, hence the separate contexts.
with tf.control_dependencies(
control_inputs=[tf.assign(ref=var_variable, value=var_tensor)]):
with tf.control_dependencies(
control_inputs=[tf.assign(ref=mean_variable, value=mean_tensor)]):
with tf.control_dependencies(
control_inputs=[tf.assign(ref=count_variable, value=count_tensor)]):
return (tf.identity(input=var_variable),
tf.identity(input=mean_variable),
tf.identity(input=count_variable)) | [
"def",
"non_singleton_batch_var_variable_updating",
"(",
"cur_batch_size",
",",
"inner_size",
",",
"x",
",",
"count_variable",
",",
"mean_variable",
",",
"var_variable",
")",
":",
"# Find statistics of batch",
"number_of_rows",
"=",
"cur_batch_size",
"*",
"inner_size",
"# time_shape = (), features_shape = ()",
"x_mean",
"=",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"x",
")",
"# time_shape = (cur_batch_size * seq_len,)",
"# features_shape = (cur_batch_size * num_feat,)",
"x_centered",
"=",
"x",
"-",
"x_mean",
"if",
"inner_size",
">",
"1",
":",
"# time_shape = (), features_shape = ()",
"x_var",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"tf",
".",
"square",
"(",
"x",
"=",
"x_centered",
")",
")",
"x_var",
"/=",
"tf",
".",
"cast",
"(",
"x",
"=",
"number_of_rows",
"-",
"1",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"# Update running variables from batch statistics",
"# time_shape = (), features_shape = ()",
"count_tensor",
"=",
"update_record_count",
"(",
"count_a",
"=",
"count_variable",
",",
"count_b",
"=",
"number_of_rows",
")",
"# time_shape = (), features_shape = ()",
"mean_tensor",
"=",
"update_mean_batch",
"(",
"count_a",
"=",
"count_variable",
",",
"mean_a",
"=",
"mean_variable",
",",
"count_b",
"=",
"number_of_rows",
",",
"mean_b",
"=",
"x_mean",
")",
"# Check if inner dimension is greater than 1 to calculate covariance matrix",
"if",
"inner_size",
"==",
"1",
":",
"var_tensor",
"=",
"tf",
".",
"zeros_like",
"(",
"tensor",
"=",
"var_variable",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"else",
":",
"# time_shape = (num_feat, num_feat)",
"# features_shape = (seq_len, seq_len)",
"var_tensor",
"=",
"update_cov_batch",
"(",
"count_a",
"=",
"count_variable",
",",
"mean_a",
"=",
"mean_variable",
",",
"cov_a",
"=",
"var_variable",
",",
"count_b",
"=",
"number_of_rows",
",",
"mean_b",
"=",
"tf",
".",
"expand_dims",
"(",
"input",
"=",
"x_mean",
",",
"axis",
"=",
"0",
")",
",",
"cov_b",
"=",
"tf",
".",
"reshape",
"(",
"tensor",
"=",
"x_var",
",",
"shape",
"=",
"[",
"1",
",",
"1",
"]",
")",
",",
"sample_cov",
"=",
"True",
")",
"var_tensor",
"=",
"tf",
".",
"squeeze",
"(",
"input",
"=",
"var_tensor",
")",
"# Assign values to variables, use control dependencies around return to",
"# enforce the mahalanobis thresh variables to be assigned, the control order",
"# matters, hence the separate contexts.",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"tf",
".",
"assign",
"(",
"ref",
"=",
"var_variable",
",",
"value",
"=",
"var_tensor",
")",
"]",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"tf",
".",
"assign",
"(",
"ref",
"=",
"mean_variable",
",",
"value",
"=",
"mean_tensor",
")",
"]",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"tf",
".",
"assign",
"(",
"ref",
"=",
"count_variable",
",",
"value",
"=",
"count_tensor",
")",
"]",
")",
":",
"return",
"(",
"tf",
".",
"identity",
"(",
"input",
"=",
"var_variable",
")",
",",
"tf",
".",
"identity",
"(",
"input",
"=",
"mean_variable",
")",
",",
"tf",
".",
"identity",
"(",
"input",
"=",
"count_variable",
")",
")"
] | [
345,
0
] | [
421,
50
] | python | en | ['en', 'en', 'en'] | True |
mahalanobis_dist | (err_vec, mean_vec, inv_cov, final_shape) | Calculates mahalanobis distance from MLE.
Given reconstruction error vector, mean reconstruction error vector, inverse
covariance of reconstruction error, and mahalanobis distance tensor's final
shape, return mahalanobis distance.
Args:
err_vec: tf.float64 matrix tensor of reconstruction errors.
mean_vec: tf.float64 vector variable tracking running column means of
reconstruction errors.
inv_cov: tf.float64 matrix variable tracking running covariance matrix of
reconstruction errors.
final_shape: Final shape of mahalanobis distance tensor.
Returns:
tf.float64 matrix tensor of mahalanobis distance.
| Calculates mahalanobis distance from MLE. | def mahalanobis_dist(err_vec, mean_vec, inv_cov, final_shape):
"""Calculates mahalanobis distance from MLE.
Given reconstruction error vector, mean reconstruction error vector, inverse
covariance of reconstruction error, and mahalanobis distance tensor's final
shape, return mahalanobis distance.
Args:
err_vec: tf.float64 matrix tensor of reconstruction errors.
mean_vec: tf.float64 vector variable tracking running column means of
reconstruction errors.
inv_cov: tf.float64 matrix variable tracking running covariance matrix of
reconstruction errors.
final_shape: Final shape of mahalanobis distance tensor.
Returns:
tf.float64 matrix tensor of mahalanobis distance.
"""
# time_shape = (cur_batch_size * seq_len, num_feat)
# features_shape = (cur_batch_size * num_feat, seq_len)
err_vec_cen = err_vec - mean_vec
# time_shape = (num_feat, cur_batch_size * seq_len)
# features_shape = (seq_len, cur_batch_size * num_feat)
mahalanobis_right_product = tf.matmul(
a=inv_cov, b=err_vec_cen, transpose_b=True)
# time_shape = (cur_batch_size * seq_len, cur_batch_size * seq_len)
# features_shape = (cur_batch_size * num_feat, cur_batch_size * num_feat)
mahalanobis_dist_vectorized = tf.matmul(
a=err_vec_cen, b=mahalanobis_right_product)
# time_shape = (cur_batch_size * seq_len,)
# features_shape = (cur_batch_size * num_feat,)
mahalanobis_dist_flat = tf.diag_part(input=mahalanobis_dist_vectorized)
# time_shape = (cur_batch_size, seq_len)
# features_shape = (cur_batch_size, num_feat)
mahalanobis_dist_final_shaped = tf.reshape(
tensor=mahalanobis_dist_flat, shape=[-1, final_shape])
# time_shape = (cur_batch_size, seq_len)
# features_shape = (cur_batch_size, num_feat)
mahalanobis_dist_final_shaped_sqrt = tf.sqrt(x=mahalanobis_dist_final_shaped)
return mahalanobis_dist_final_shaped_sqrt | [
"def",
"mahalanobis_dist",
"(",
"err_vec",
",",
"mean_vec",
",",
"inv_cov",
",",
"final_shape",
")",
":",
"# time_shape = (cur_batch_size * seq_len, num_feat)",
"# features_shape = (cur_batch_size * num_feat, seq_len)",
"err_vec_cen",
"=",
"err_vec",
"-",
"mean_vec",
"# time_shape = (num_feat, cur_batch_size * seq_len)",
"# features_shape = (seq_len, cur_batch_size * num_feat)",
"mahalanobis_right_product",
"=",
"tf",
".",
"matmul",
"(",
"a",
"=",
"inv_cov",
",",
"b",
"=",
"err_vec_cen",
",",
"transpose_b",
"=",
"True",
")",
"# time_shape = (cur_batch_size * seq_len, cur_batch_size * seq_len)",
"# features_shape = (cur_batch_size * num_feat, cur_batch_size * num_feat)",
"mahalanobis_dist_vectorized",
"=",
"tf",
".",
"matmul",
"(",
"a",
"=",
"err_vec_cen",
",",
"b",
"=",
"mahalanobis_right_product",
")",
"# time_shape = (cur_batch_size * seq_len,)",
"# features_shape = (cur_batch_size * num_feat,)",
"mahalanobis_dist_flat",
"=",
"tf",
".",
"diag_part",
"(",
"input",
"=",
"mahalanobis_dist_vectorized",
")",
"# time_shape = (cur_batch_size, seq_len)",
"# features_shape = (cur_batch_size, num_feat)",
"mahalanobis_dist_final_shaped",
"=",
"tf",
".",
"reshape",
"(",
"tensor",
"=",
"mahalanobis_dist_flat",
",",
"shape",
"=",
"[",
"-",
"1",
",",
"final_shape",
"]",
")",
"# time_shape = (cur_batch_size, seq_len)",
"# features_shape = (cur_batch_size, num_feat)",
"mahalanobis_dist_final_shaped_sqrt",
"=",
"tf",
".",
"sqrt",
"(",
"x",
"=",
"mahalanobis_dist_final_shaped",
")",
"return",
"mahalanobis_dist_final_shaped_sqrt"
] | [
424,
0
] | [
469,
43
] | python | en | ['en', 'su', 'en'] | True |
calculate_error_distribution_statistics_training | (
cur_batch_size,
X_time_abs_recon_err,
abs_err_count_time_var,
abs_err_mean_time_var,
abs_err_cov_time_var,
abs_err_inv_cov_time_var,
X_feat_abs_recon_err,
abs_err_count_feat_var,
abs_err_mean_feat_var,
abs_err_cov_feat_var,
abs_err_inv_cov_feat_var,
params,
dummy_var) | Calculates error distribution statistics during training mode.
Given dimensions of inputs, reconstructed inputs' absolute errors, and
variables tracking counts, means, and covariances of error distribution,
returns loss and train_op.
Args:
cur_batch_size: Current batch size, could be partially filled.
X_time_abs_recon_err: Time major reconstructed input data's absolute
reconstruction error.
abs_err_count_time_var: Time major running count of number of records.
abs_err_mean_time_var: Time major running column means of absolute error.
abs_err_cov_time_var: Time major running covariance matrix of absolute
error.
abs_err_inv_cov_time_var: Time major running inverse covariance matrix of
absolute error.
X_feat_abs_recon_err: Feature major reconstructed input data's absolute
reconstruction error.
abs_err_count_feat_var: Feature major running count of number of records.
abs_err_mean_feat_var: Feature major running column means of absolute error.
abs_err_cov_feat_var: Feature major running covariance matrix of absolute
error.
abs_err_inv_cov_feat_var: Feature major running inverse covariance matrix of
absolute error.
params: Dictionary of parameters.
dummy_var: Dummy variable used to allow training mode to happen since it
requires a gradient to tie back to the graph dependency.
Returns:
loss: The scalar loss to tie our updates back to Estimator graph.
train_op: The train operation to tie our updates back to Estimator graph.
| Calculates error distribution statistics during training mode. | def calculate_error_distribution_statistics_training(
cur_batch_size,
X_time_abs_recon_err,
abs_err_count_time_var,
abs_err_mean_time_var,
abs_err_cov_time_var,
abs_err_inv_cov_time_var,
X_feat_abs_recon_err,
abs_err_count_feat_var,
abs_err_mean_feat_var,
abs_err_cov_feat_var,
abs_err_inv_cov_feat_var,
params,
dummy_var):
"""Calculates error distribution statistics during training mode.
Given dimensions of inputs, reconstructed inputs' absolute errors, and
variables tracking counts, means, and covariances of error distribution,
returns loss and train_op.
Args:
cur_batch_size: Current batch size, could be partially filled.
X_time_abs_recon_err: Time major reconstructed input data's absolute
reconstruction error.
abs_err_count_time_var: Time major running count of number of records.
abs_err_mean_time_var: Time major running column means of absolute error.
abs_err_cov_time_var: Time major running covariance matrix of absolute
error.
abs_err_inv_cov_time_var: Time major running inverse covariance matrix of
absolute error.
X_feat_abs_recon_err: Feature major reconstructed input data's absolute
reconstruction error.
abs_err_count_feat_var: Feature major running count of number of records.
abs_err_mean_feat_var: Feature major running column means of absolute error.
abs_err_cov_feat_var: Feature major running covariance matrix of absolute
error.
abs_err_inv_cov_feat_var: Feature major running inverse covariance matrix of
absolute error.
params: Dictionary of parameters.
dummy_var: Dummy variable used to allow training mode to happen since it
requires a gradient to tie back to the graph dependency.
Returns:
loss: The scalar loss to tie our updates back to Estimator graph.
train_op: The train operation to tie our updates back to Estimator graph.
"""
with tf.variable_scope(
name_or_scope="mahalanobis_dist_vars", reuse=tf.AUTO_REUSE):
# Time based
singleton_time_condition = tf.equal(
x=cur_batch_size * params["seq_len"], y=1)
cov_time_var, mean_time_var, count_time_var = tf.cond(
pred=singleton_time_condition,
true_fn=lambda: singleton_batch_cov_variable_updating(
params["seq_len"],
X_time_abs_recon_err,
abs_err_count_time_var,
abs_err_mean_time_var,
abs_err_cov_time_var),
false_fn=lambda: non_singleton_batch_cov_variable_updating(
cur_batch_size,
params["seq_len"],
X_time_abs_recon_err,
abs_err_count_time_var,
abs_err_mean_time_var,
abs_err_cov_time_var))
# Features based
singleton_feat_condition = tf.equal(
x=cur_batch_size * params["num_feat"], y=1)
cov_feat_var, mean_feat_var, count_feat_var = tf.cond(
pred=singleton_feat_condition,
true_fn=lambda: singleton_batch_cov_variable_updating(
params["num_feat"],
X_feat_abs_recon_err,
abs_err_count_feat_var,
abs_err_mean_feat_var,
abs_err_cov_feat_var),
false_fn=lambda: non_singleton_batch_cov_variable_updating(
cur_batch_size,
params["num_feat"],
X_feat_abs_recon_err,
abs_err_count_feat_var,
abs_err_mean_feat_var,
abs_err_cov_feat_var))
# Lastly use control dependencies around loss to enforce the mahalanobis
# variables to be assigned, the control order matters, hence the separate
# contexts
with tf.control_dependencies(
control_inputs=[cov_time_var, cov_feat_var]):
with tf.control_dependencies(
control_inputs=[mean_time_var, mean_feat_var]):
with tf.control_dependencies(
control_inputs=[count_time_var, count_feat_var]):
# Time based
# shape = (num_feat, num_feat)
abs_err_inv_cov_time_tensor = \
tf.matrix_inverse(input=cov_time_var + \
tf.eye(num_rows=tf.shape(input=cov_time_var)[0],
dtype=tf.float64) * params["eps"])
# Features based
# shape = (seq_len, seq_len)
abs_err_inv_cov_feat_tensor = \
tf.matrix_inverse(input=cov_feat_var + \
tf.eye(num_rows=tf.shape(input=cov_feat_var)[0],
dtype=tf.float64) * params["eps"])
with tf.control_dependencies(
control_inputs=[tf.assign(ref=abs_err_inv_cov_time_var,
value=abs_err_inv_cov_time_tensor),
tf.assign(ref=abs_err_inv_cov_feat_var,
value=abs_err_inv_cov_feat_tensor)]):
loss = tf.reduce_sum(
input_tensor=tf.zeros(shape=(), dtype=tf.float64) * dummy_var)
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.train.get_global_step(),
learning_rate=params["learning_rate"],
optimizer="SGD")
return loss, train_op | [
"def",
"calculate_error_distribution_statistics_training",
"(",
"cur_batch_size",
",",
"X_time_abs_recon_err",
",",
"abs_err_count_time_var",
",",
"abs_err_mean_time_var",
",",
"abs_err_cov_time_var",
",",
"abs_err_inv_cov_time_var",
",",
"X_feat_abs_recon_err",
",",
"abs_err_count_feat_var",
",",
"abs_err_mean_feat_var",
",",
"abs_err_cov_feat_var",
",",
"abs_err_inv_cov_feat_var",
",",
"params",
",",
"dummy_var",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name_or_scope",
"=",
"\"mahalanobis_dist_vars\"",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"# Time based",
"singleton_time_condition",
"=",
"tf",
".",
"equal",
"(",
"x",
"=",
"cur_batch_size",
"*",
"params",
"[",
"\"seq_len\"",
"]",
",",
"y",
"=",
"1",
")",
"cov_time_var",
",",
"mean_time_var",
",",
"count_time_var",
"=",
"tf",
".",
"cond",
"(",
"pred",
"=",
"singleton_time_condition",
",",
"true_fn",
"=",
"lambda",
":",
"singleton_batch_cov_variable_updating",
"(",
"params",
"[",
"\"seq_len\"",
"]",
",",
"X_time_abs_recon_err",
",",
"abs_err_count_time_var",
",",
"abs_err_mean_time_var",
",",
"abs_err_cov_time_var",
")",
",",
"false_fn",
"=",
"lambda",
":",
"non_singleton_batch_cov_variable_updating",
"(",
"cur_batch_size",
",",
"params",
"[",
"\"seq_len\"",
"]",
",",
"X_time_abs_recon_err",
",",
"abs_err_count_time_var",
",",
"abs_err_mean_time_var",
",",
"abs_err_cov_time_var",
")",
")",
"# Features based",
"singleton_feat_condition",
"=",
"tf",
".",
"equal",
"(",
"x",
"=",
"cur_batch_size",
"*",
"params",
"[",
"\"num_feat\"",
"]",
",",
"y",
"=",
"1",
")",
"cov_feat_var",
",",
"mean_feat_var",
",",
"count_feat_var",
"=",
"tf",
".",
"cond",
"(",
"pred",
"=",
"singleton_feat_condition",
",",
"true_fn",
"=",
"lambda",
":",
"singleton_batch_cov_variable_updating",
"(",
"params",
"[",
"\"num_feat\"",
"]",
",",
"X_feat_abs_recon_err",
",",
"abs_err_count_feat_var",
",",
"abs_err_mean_feat_var",
",",
"abs_err_cov_feat_var",
")",
",",
"false_fn",
"=",
"lambda",
":",
"non_singleton_batch_cov_variable_updating",
"(",
"cur_batch_size",
",",
"params",
"[",
"\"num_feat\"",
"]",
",",
"X_feat_abs_recon_err",
",",
"abs_err_count_feat_var",
",",
"abs_err_mean_feat_var",
",",
"abs_err_cov_feat_var",
")",
")",
"# Lastly use control dependencies around loss to enforce the mahalanobis",
"# variables to be assigned, the control order matters, hence the separate",
"# contexts",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"cov_time_var",
",",
"cov_feat_var",
"]",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"mean_time_var",
",",
"mean_feat_var",
"]",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"count_time_var",
",",
"count_feat_var",
"]",
")",
":",
"# Time based",
"# shape = (num_feat, num_feat)",
"abs_err_inv_cov_time_tensor",
"=",
"tf",
".",
"matrix_inverse",
"(",
"input",
"=",
"cov_time_var",
"+",
"tf",
".",
"eye",
"(",
"num_rows",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"cov_time_var",
")",
"[",
"0",
"]",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"*",
"params",
"[",
"\"eps\"",
"]",
")",
"# Features based",
"# shape = (seq_len, seq_len)",
"abs_err_inv_cov_feat_tensor",
"=",
"tf",
".",
"matrix_inverse",
"(",
"input",
"=",
"cov_feat_var",
"+",
"tf",
".",
"eye",
"(",
"num_rows",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"cov_feat_var",
")",
"[",
"0",
"]",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"*",
"params",
"[",
"\"eps\"",
"]",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"tf",
".",
"assign",
"(",
"ref",
"=",
"abs_err_inv_cov_time_var",
",",
"value",
"=",
"abs_err_inv_cov_time_tensor",
")",
",",
"tf",
".",
"assign",
"(",
"ref",
"=",
"abs_err_inv_cov_feat_var",
",",
"value",
"=",
"abs_err_inv_cov_feat_tensor",
")",
"]",
")",
":",
"loss",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"tf",
".",
"zeros",
"(",
"shape",
"=",
"(",
")",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"*",
"dummy_var",
")",
"train_op",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"optimize_loss",
"(",
"loss",
"=",
"loss",
",",
"global_step",
"=",
"tf",
".",
"train",
".",
"get_global_step",
"(",
")",
",",
"learning_rate",
"=",
"params",
"[",
"\"learning_rate\"",
"]",
",",
"optimizer",
"=",
"\"SGD\"",
")",
"return",
"loss",
",",
"train_op"
] | [
472,
0
] | [
596,
23
] | python | da | ['da', 'la', 'en'] | False |
XFrameOptionsMiddleware.get_xframe_options_value | (self, request, response) |
Get the value to set for the X_FRAME_OPTIONS header. Use the value from
the X_FRAME_OPTIONS setting, or 'DENY' if not set.
This method can be overridden if needed, allowing it to vary based on
the request or response.
|
Get the value to set for the X_FRAME_OPTIONS header. Use the value from
the X_FRAME_OPTIONS setting, or 'DENY' if not set. | def get_xframe_options_value(self, request, response):
"""
Get the value to set for the X_FRAME_OPTIONS header. Use the value from
the X_FRAME_OPTIONS setting, or 'DENY' if not set.
This method can be overridden if needed, allowing it to vary based on
the request or response.
"""
return getattr(settings, 'X_FRAME_OPTIONS', 'DENY').upper() | [
"def",
"get_xframe_options_value",
"(",
"self",
",",
"request",
",",
"response",
")",
":",
"return",
"getattr",
"(",
"settings",
",",
"'X_FRAME_OPTIONS'",
",",
"'DENY'",
")",
".",
"upper",
"(",
")"
] | [
38,
4
] | [
46,
67
] | python | en | ['en', 'error', 'th'] | False |
_parse_arguments | (argv) | Parses command-line arguments. | Parses command-line arguments. | def _parse_arguments(argv):
"""Parses command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--game',
help='Which open ai gym game to play',
type=str,
default='CartPole-v0')
parser.add_argument(
'--episodes',
help='The number of episodes to simulate',
type=int,
default=200)
parser.add_argument(
'--learning_rate',
help='Learning rate for the nueral network',
type=float,
default=0.2)
parser.add_argument(
'--hidden_neurons',
help='The number of nuerons to use per layer',
type=int,
default=30)
parser.add_argument(
'--gamma',
help='The gamma or "discount" factor to discount future states',
type=float,
default=0.5)
parser.add_argument(
'--explore_decay',
help='The rate at which to decay the probability of a random action',
type=float,
default=0.1)
parser.add_argument(
'--memory_size',
help='Size of the memory buffer',
type=int,
default=100000)
parser.add_argument(
'--memory_batch_size',
help='The amount of memories to sample from the buffer while training',
type=int,
default=8)
parser.add_argument(
'--job-dir',
help='Directory where to save the given model',
type=str,
default='models/')
parser.add_argument(
'--print_rate',
help='How often to print the score, 0 if never',
type=int,
default=0)
parser.add_argument(
'--eval_rate',
help="""While training, perform an on-policy simulation and record
metrics to tensorboard every <record_rate> steps, 0 if never. Use
higher values to avoid hyperparameter tuning "too many metrics"
error""",
type=int,
default=20)
return parser.parse_known_args(argv) | [
"def",
"_parse_arguments",
"(",
"argv",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--game'",
",",
"help",
"=",
"'Which open ai gym game to play'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'CartPole-v0'",
")",
"parser",
".",
"add_argument",
"(",
"'--episodes'",
",",
"help",
"=",
"'The number of episodes to simulate'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"200",
")",
"parser",
".",
"add_argument",
"(",
"'--learning_rate'",
",",
"help",
"=",
"'Learning rate for the nueral network'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.2",
")",
"parser",
".",
"add_argument",
"(",
"'--hidden_neurons'",
",",
"help",
"=",
"'The number of nuerons to use per layer'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"30",
")",
"parser",
".",
"add_argument",
"(",
"'--gamma'",
",",
"help",
"=",
"'The gamma or \"discount\" factor to discount future states'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.5",
")",
"parser",
".",
"add_argument",
"(",
"'--explore_decay'",
",",
"help",
"=",
"'The rate at which to decay the probability of a random action'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.1",
")",
"parser",
".",
"add_argument",
"(",
"'--memory_size'",
",",
"help",
"=",
"'Size of the memory buffer'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"100000",
")",
"parser",
".",
"add_argument",
"(",
"'--memory_batch_size'",
",",
"help",
"=",
"'The amount of memories to sample from the buffer while training'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"8",
")",
"parser",
".",
"add_argument",
"(",
"'--job-dir'",
",",
"help",
"=",
"'Directory where to save the given model'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'models/'",
")",
"parser",
".",
"add_argument",
"(",
"'--print_rate'",
",",
"help",
"=",
"'How often to print the score, 0 if never'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
")",
"parser",
".",
"add_argument",
"(",
"'--eval_rate'",
",",
"help",
"=",
"\"\"\"While training, perform an on-policy simulation and record\n metrics to tensorboard every <record_rate> steps, 0 if never. Use\n higher values to avoid hyperparameter tuning \"too many metrics\"\n error\"\"\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"20",
")",
"return",
"parser",
".",
"parse_known_args",
"(",
"argv",
")"
] | [
33,
0
] | [
94,
40
] | python | en | ['en', 'fr', 'en'] | True |
_run | (game, network_params, memory_params, explore_decay, ops) | Sets up and runs the gaming simulation.
Initializes TensorFlow, the training agent, and the game environment.
The agent plays the game from the starting state for a number of
episodes set by the user.
Args:
args: The arguments from the command line parsed by_parse_arguments.
| Sets up and runs the gaming simulation. | def _run(game, network_params, memory_params, explore_decay, ops):
"""Sets up and runs the gaming simulation.
Initializes TensorFlow, the training agent, and the game environment.
The agent plays the game from the starting state for a number of
episodes set by the user.
Args:
args: The arguments from the command line parsed by_parse_arguments.
"""
# Setup TensorBoard Writer.
trial_id = json.loads(
os.environ.get('TF_CONFIG', '{}')).get('task', {}).get('trial', '')
output_path = ops.job_dir if not trial_id else ops.job_dir + '/'
tensorboard = TensorBoard(log_dir=output_path)
hpt = hypertune.HyperTune()
graph = tf.Graph()
with graph.as_default():
env = gym.make(game)
agent = _create_agent(
env, network_params, memory_params, explore_decay)
tensorboard.set_model(agent.network)
def _train_or_evaluate(print_score, training=False):
"""Runs a gaming simulation and writes results for tensorboard.
Args:
print_score (bool): True to print a score to the console.
training (bool): True if the agent is training, False to eval.
Returns:
loss if training, else reward for evaluating.
"""
reward = _play(agent, env, training)
if print_score:
print(
'Training - ' if training else 'Evaluating - ',
'Episode: {}'.format(episode),
'Total reward: {}'.format(reward),
)
if training:
return agent.learn()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='episode_reward',
metric_value=reward,
global_step=episode)
return reward
for episode in range(1, ops.episodes+1):
print_score = ops.print_rate and episode % ops.print_rate == 0
get_summary = ops.eval_rate and episode % ops.eval_rate == 0
loss = _train_or_evaluate(print_score, training=True)
if get_summary:
reward = _train_or_evaluate(print_score)
# No loss if there is not enough samples in memory to learn.
if loss:
summary = {'loss': loss, 'eval_reward': reward}
tensorboard.on_epoch_end(episode, summary)
tensorboard.on_train_end(None)
_record_video(env, agent, output_path)
agent.network.save(output_path, save_format='tf') | [
"def",
"_run",
"(",
"game",
",",
"network_params",
",",
"memory_params",
",",
"explore_decay",
",",
"ops",
")",
":",
"# Setup TensorBoard Writer.",
"trial_id",
"=",
"json",
".",
"loads",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'TF_CONFIG'",
",",
"'{}'",
")",
")",
".",
"get",
"(",
"'task'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'trial'",
",",
"''",
")",
"output_path",
"=",
"ops",
".",
"job_dir",
"if",
"not",
"trial_id",
"else",
"ops",
".",
"job_dir",
"+",
"'/'",
"tensorboard",
"=",
"TensorBoard",
"(",
"log_dir",
"=",
"output_path",
")",
"hpt",
"=",
"hypertune",
".",
"HyperTune",
"(",
")",
"graph",
"=",
"tf",
".",
"Graph",
"(",
")",
"with",
"graph",
".",
"as_default",
"(",
")",
":",
"env",
"=",
"gym",
".",
"make",
"(",
"game",
")",
"agent",
"=",
"_create_agent",
"(",
"env",
",",
"network_params",
",",
"memory_params",
",",
"explore_decay",
")",
"tensorboard",
".",
"set_model",
"(",
"agent",
".",
"network",
")",
"def",
"_train_or_evaluate",
"(",
"print_score",
",",
"training",
"=",
"False",
")",
":",
"\"\"\"Runs a gaming simulation and writes results for tensorboard.\n\n Args:\n print_score (bool): True to print a score to the console.\n training (bool): True if the agent is training, False to eval.\n\n Returns:\n loss if training, else reward for evaluating.\n \"\"\"",
"reward",
"=",
"_play",
"(",
"agent",
",",
"env",
",",
"training",
")",
"if",
"print_score",
":",
"print",
"(",
"'Training - '",
"if",
"training",
"else",
"'Evaluating - '",
",",
"'Episode: {}'",
".",
"format",
"(",
"episode",
")",
",",
"'Total reward: {}'",
".",
"format",
"(",
"reward",
")",
",",
")",
"if",
"training",
":",
"return",
"agent",
".",
"learn",
"(",
")",
"hpt",
".",
"report_hyperparameter_tuning_metric",
"(",
"hyperparameter_metric_tag",
"=",
"'episode_reward'",
",",
"metric_value",
"=",
"reward",
",",
"global_step",
"=",
"episode",
")",
"return",
"reward",
"for",
"episode",
"in",
"range",
"(",
"1",
",",
"ops",
".",
"episodes",
"+",
"1",
")",
":",
"print_score",
"=",
"ops",
".",
"print_rate",
"and",
"episode",
"%",
"ops",
".",
"print_rate",
"==",
"0",
"get_summary",
"=",
"ops",
".",
"eval_rate",
"and",
"episode",
"%",
"ops",
".",
"eval_rate",
"==",
"0",
"loss",
"=",
"_train_or_evaluate",
"(",
"print_score",
",",
"training",
"=",
"True",
")",
"if",
"get_summary",
":",
"reward",
"=",
"_train_or_evaluate",
"(",
"print_score",
")",
"# No loss if there is not enough samples in memory to learn.",
"if",
"loss",
":",
"summary",
"=",
"{",
"'loss'",
":",
"loss",
",",
"'eval_reward'",
":",
"reward",
"}",
"tensorboard",
".",
"on_epoch_end",
"(",
"episode",
",",
"summary",
")",
"tensorboard",
".",
"on_train_end",
"(",
"None",
")",
"_record_video",
"(",
"env",
",",
"agent",
",",
"output_path",
")",
"agent",
".",
"network",
".",
"save",
"(",
"output_path",
",",
"save_format",
"=",
"'tf'",
")"
] | [
97,
0
] | [
162,
57
] | python | en | ['en', 'fil', 'en'] | True |
_play | (agent, env, training, recorder=None) | Plays through one episode of the game.
Initializes TensorFlow, the training agent, and the game environment.
The agent plays the game from the starting state for a number of
episodes set by the user.
Args:
agent: The actor learning to play in the given environment.
env: The environment for the agent to act in. This code is intended for
use with OpenAI Gym, but the user can alter the code to provide their
own environment.
training (bool): True is the agent is training.
recorder (optional): A gym video recorder object to save the simulation
to a movie.
| Plays through one episode of the game. | def _play(agent, env, training, recorder=None):
"""Plays through one episode of the game.
Initializes TensorFlow, the training agent, and the game environment.
The agent plays the game from the starting state for a number of
episodes set by the user.
Args:
agent: The actor learning to play in the given environment.
env: The environment for the agent to act in. This code is intended for
use with OpenAI Gym, but the user can alter the code to provide their
own environment.
training (bool): True is the agent is training.
recorder (optional): A gym video recorder object to save the simulation
to a movie.
"""
episode_reward = 0 # The total reward for an episode.
state = env.reset() # Set up Environment and get start state.
done = False
if recorder:
recorder.capture_frame()
while not done:
action = agent.act(state, training)
state_prime, reward, done, _ = env.step(action)
episode_reward += reward
agent.memory.add((state, action, reward, state_prime, done))
if recorder:
recorder.capture_frame()
state = state_prime # st+1 is now our current state.
return episode_reward | [
"def",
"_play",
"(",
"agent",
",",
"env",
",",
"training",
",",
"recorder",
"=",
"None",
")",
":",
"episode_reward",
"=",
"0",
"# The total reward for an episode.",
"state",
"=",
"env",
".",
"reset",
"(",
")",
"# Set up Environment and get start state.",
"done",
"=",
"False",
"if",
"recorder",
":",
"recorder",
".",
"capture_frame",
"(",
")",
"while",
"not",
"done",
":",
"action",
"=",
"agent",
".",
"act",
"(",
"state",
",",
"training",
")",
"state_prime",
",",
"reward",
",",
"done",
",",
"_",
"=",
"env",
".",
"step",
"(",
"action",
")",
"episode_reward",
"+=",
"reward",
"agent",
".",
"memory",
".",
"add",
"(",
"(",
"state",
",",
"action",
",",
"reward",
",",
"state_prime",
",",
"done",
")",
")",
"if",
"recorder",
":",
"recorder",
".",
"capture_frame",
"(",
")",
"state",
"=",
"state_prime",
"# st+1 is now our current state.",
"return",
"episode_reward"
] | [
165,
0
] | [
197,
25
] | python | en | ['en', 'en', 'en'] | True |
_create_agent | (env, network_params, memory_params, explore_decay) | Creates a Reinforcement Learning agent.
Args:
env: The environment for the agent to act in.
args: The arguments from the command line parsed by_parse_arguments.
Returns:
An RL agent.
| Creates a Reinforcement Learning agent. | def _create_agent(env, network_params, memory_params, explore_decay):
"""Creates a Reinforcement Learning agent.
Args:
env: The environment for the agent to act in.
args: The arguments from the command line parsed by_parse_arguments.
Returns:
An RL agent.
"""
space_shape = env.observation_space.shape
action_size = env.action_space.n
network = model.deep_q_network(
space_shape, action_size, *network_params)
memory = model.Memory(*memory_params)
return model.Agent(network, memory, explore_decay, action_size) | [
"def",
"_create_agent",
"(",
"env",
",",
"network_params",
",",
"memory_params",
",",
"explore_decay",
")",
":",
"space_shape",
"=",
"env",
".",
"observation_space",
".",
"shape",
"action_size",
"=",
"env",
".",
"action_space",
".",
"n",
"network",
"=",
"model",
".",
"deep_q_network",
"(",
"space_shape",
",",
"action_size",
",",
"*",
"network_params",
")",
"memory",
"=",
"model",
".",
"Memory",
"(",
"*",
"memory_params",
")",
"return",
"model",
".",
"Agent",
"(",
"network",
",",
"memory",
",",
"explore_decay",
",",
"action_size",
")"
] | [
200,
0
] | [
215,
67
] | python | en | ['en', 'en', 'en'] | True |
_record_video | (env, agent, output_path) | Records a video of an agent playing a gaming simulation.
Args:
env: The environment for the agent to act in.
agent: An RL agent created by _create_agent.
output_path (str): The directory path of where to save the recording.
| Records a video of an agent playing a gaming simulation. | def _record_video(env, agent, output_path):
"""Records a video of an agent playing a gaming simulation.
Args:
env: The environment for the agent to act in.
agent: An RL agent created by _create_agent.
output_path (str): The directory path of where to save the recording.
"""
video_recorder = VideoRecorder(env, RECORDING_NAME)
_play(agent, env, False, recorder=video_recorder)
video_recorder.close()
env.close()
# Check if output directory is google cloud and save there if so.
if output_path.startswith("gs://"):
[bucket_name, blob_path] = output_path[5:].split("/", 1)
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_path + RECORDING_NAME)
blob.upload_from_filename(RECORDING_NAME) | [
"def",
"_record_video",
"(",
"env",
",",
"agent",
",",
"output_path",
")",
":",
"video_recorder",
"=",
"VideoRecorder",
"(",
"env",
",",
"RECORDING_NAME",
")",
"_play",
"(",
"agent",
",",
"env",
",",
"False",
",",
"recorder",
"=",
"video_recorder",
")",
"video_recorder",
".",
"close",
"(",
")",
"env",
".",
"close",
"(",
")",
"# Check if output directory is google cloud and save there if so.",
"if",
"output_path",
".",
"startswith",
"(",
"\"gs://\"",
")",
":",
"[",
"bucket_name",
",",
"blob_path",
"]",
"=",
"output_path",
"[",
"5",
":",
"]",
".",
"split",
"(",
"\"/\"",
",",
"1",
")",
"storage_client",
"=",
"storage",
".",
"Client",
"(",
")",
"bucket",
"=",
"storage_client",
".",
"get_bucket",
"(",
"bucket_name",
")",
"blob",
"=",
"bucket",
".",
"blob",
"(",
"blob_path",
"+",
"RECORDING_NAME",
")",
"blob",
".",
"upload_from_filename",
"(",
"RECORDING_NAME",
")"
] | [
218,
0
] | [
237,
49
] | python | en | ['en', 'en', 'en'] | True |
main | () | Parses command line arguments and kicks off the gaming simulation. | Parses command line arguments and kicks off the gaming simulation. | def main():
"""Parses command line arguments and kicks off the gaming simulation."""
args = _parse_arguments(sys.argv[1:])[0]
network_params = (args.learning_rate, args.hidden_neurons)
memory_params = (args.memory_size, args.memory_batch_size, args.gamma)
ops = argparse.Namespace(**{
'job_dir': args.job_dir,
'episodes': args.episodes,
'print_rate': args.print_rate,
'eval_rate': args.eval_rate
})
_run(args.game, network_params, memory_params, args.explore_decay, ops) | [
"def",
"main",
"(",
")",
":",
"args",
"=",
"_parse_arguments",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
"[",
"0",
"]",
"network_params",
"=",
"(",
"args",
".",
"learning_rate",
",",
"args",
".",
"hidden_neurons",
")",
"memory_params",
"=",
"(",
"args",
".",
"memory_size",
",",
"args",
".",
"memory_batch_size",
",",
"args",
".",
"gamma",
")",
"ops",
"=",
"argparse",
".",
"Namespace",
"(",
"*",
"*",
"{",
"'job_dir'",
":",
"args",
".",
"job_dir",
",",
"'episodes'",
":",
"args",
".",
"episodes",
",",
"'print_rate'",
":",
"args",
".",
"print_rate",
",",
"'eval_rate'",
":",
"args",
".",
"eval_rate",
"}",
")",
"_run",
"(",
"args",
".",
"game",
",",
"network_params",
",",
"memory_params",
",",
"args",
".",
"explore_decay",
",",
"ops",
")"
] | [
240,
0
] | [
251,
75
] | python | en | ['en', 'en', 'en'] | True |
parse_requirements | (
filename: str,
session: PipSession,
finder: Optional["PackageFinder"] = None,
options: Optional[optparse.Values] = None,
constraint: bool = False,
) | Parse a requirements file and yield ParsedRequirement instances.
:param filename: Path or url of requirements file.
:param session: PipSession instance.
:param finder: Instance of pip.index.PackageFinder.
:param options: cli options.
:param constraint: If true, parsing a constraint file rather than
requirements file.
| Parse a requirements file and yield ParsedRequirement instances. | def parse_requirements(
filename: str,
session: PipSession,
finder: Optional["PackageFinder"] = None,
options: Optional[optparse.Values] = None,
constraint: bool = False,
) -> Iterator[ParsedRequirement]:
"""Parse a requirements file and yield ParsedRequirement instances.
:param filename: Path or url of requirements file.
:param session: PipSession instance.
:param finder: Instance of pip.index.PackageFinder.
:param options: cli options.
:param constraint: If true, parsing a constraint file rather than
requirements file.
"""
line_parser = get_line_parser(finder)
parser = RequirementsFileParser(session, line_parser)
for parsed_line in parser.parse(filename, constraint):
parsed_req = handle_line(
parsed_line,
options=options,
finder=finder,
session=session
)
if parsed_req is not None:
yield parsed_req | [
"def",
"parse_requirements",
"(",
"filename",
":",
"str",
",",
"session",
":",
"PipSession",
",",
"finder",
":",
"Optional",
"[",
"\"PackageFinder\"",
"]",
"=",
"None",
",",
"options",
":",
"Optional",
"[",
"optparse",
".",
"Values",
"]",
"=",
"None",
",",
"constraint",
":",
"bool",
"=",
"False",
",",
")",
"->",
"Iterator",
"[",
"ParsedRequirement",
"]",
":",
"line_parser",
"=",
"get_line_parser",
"(",
"finder",
")",
"parser",
"=",
"RequirementsFileParser",
"(",
"session",
",",
"line_parser",
")",
"for",
"parsed_line",
"in",
"parser",
".",
"parse",
"(",
"filename",
",",
"constraint",
")",
":",
"parsed_req",
"=",
"handle_line",
"(",
"parsed_line",
",",
"options",
"=",
"options",
",",
"finder",
"=",
"finder",
",",
"session",
"=",
"session",
")",
"if",
"parsed_req",
"is",
"not",
"None",
":",
"yield",
"parsed_req"
] | [
115,
0
] | [
142,
28
] | python | en | ['en', 'en', 'en'] | True |
preprocess | (content: str) | Split, filter, and join lines, and return a line iterator
:param content: the content of the requirements file
| Split, filter, and join lines, and return a line iterator | def preprocess(content: str) -> ReqFileLines:
"""Split, filter, and join lines, and return a line iterator
:param content: the content of the requirements file
"""
lines_enum: ReqFileLines = enumerate(content.splitlines(), start=1)
lines_enum = join_lines(lines_enum)
lines_enum = ignore_comments(lines_enum)
lines_enum = expand_env_variables(lines_enum)
return lines_enum | [
"def",
"preprocess",
"(",
"content",
":",
"str",
")",
"->",
"ReqFileLines",
":",
"lines_enum",
":",
"ReqFileLines",
"=",
"enumerate",
"(",
"content",
".",
"splitlines",
"(",
")",
",",
"start",
"=",
"1",
")",
"lines_enum",
"=",
"join_lines",
"(",
"lines_enum",
")",
"lines_enum",
"=",
"ignore_comments",
"(",
"lines_enum",
")",
"lines_enum",
"=",
"expand_env_variables",
"(",
"lines_enum",
")",
"return",
"lines_enum"
] | [
145,
0
] | [
154,
21
] | python | en | ['en', 'en', 'en'] | True |
handle_line | (
line: ParsedLine,
options: Optional[optparse.Values] = None,
finder: Optional["PackageFinder"] = None,
session: Optional[PipSession] = None,
) | Handle a single parsed requirements line; This can result in
creating/yielding requirements, or updating the finder.
:param line: The parsed line to be processed.
:param options: CLI options.
:param finder: The finder - updated by non-requirement lines.
:param session: The session - updated by non-requirement lines.
Returns a ParsedRequirement object if the line is a requirement line,
otherwise returns None.
For lines that contain requirements, the only options that have an effect
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
ignored.
For lines that do not contain requirements, the only options that have an
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
be present, but are ignored. These lines may contain multiple options
(although our docs imply only one is supported), and all our parsed and
affect the finder.
| Handle a single parsed requirements line; This can result in
creating/yielding requirements, or updating the finder. | def handle_line(
line: ParsedLine,
options: Optional[optparse.Values] = None,
finder: Optional["PackageFinder"] = None,
session: Optional[PipSession] = None,
) -> Optional[ParsedRequirement]:
"""Handle a single parsed requirements line; This can result in
creating/yielding requirements, or updating the finder.
:param line: The parsed line to be processed.
:param options: CLI options.
:param finder: The finder - updated by non-requirement lines.
:param session: The session - updated by non-requirement lines.
Returns a ParsedRequirement object if the line is a requirement line,
otherwise returns None.
For lines that contain requirements, the only options that have an effect
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
ignored.
For lines that do not contain requirements, the only options that have an
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
be present, but are ignored. These lines may contain multiple options
(although our docs imply only one is supported), and all our parsed and
affect the finder.
"""
if line.is_requirement:
parsed_req = handle_requirement_line(line, options)
return parsed_req
else:
handle_option_line(
line.opts,
line.filename,
line.lineno,
finder,
options,
session,
)
return None | [
"def",
"handle_line",
"(",
"line",
":",
"ParsedLine",
",",
"options",
":",
"Optional",
"[",
"optparse",
".",
"Values",
"]",
"=",
"None",
",",
"finder",
":",
"Optional",
"[",
"\"PackageFinder\"",
"]",
"=",
"None",
",",
"session",
":",
"Optional",
"[",
"PipSession",
"]",
"=",
"None",
",",
")",
"->",
"Optional",
"[",
"ParsedRequirement",
"]",
":",
"if",
"line",
".",
"is_requirement",
":",
"parsed_req",
"=",
"handle_requirement_line",
"(",
"line",
",",
"options",
")",
"return",
"parsed_req",
"else",
":",
"handle_option_line",
"(",
"line",
".",
"opts",
",",
"line",
".",
"filename",
",",
"line",
".",
"lineno",
",",
"finder",
",",
"options",
",",
"session",
",",
")",
"return",
"None"
] | [
262,
0
] | [
303,
19
] | python | en | ['en', 'en', 'en'] | True |
break_args_options | (line: str) | Break up the line into an args and options string. We only want to shlex
(and then optparse) the options, not the args. args can contain markers
which are corrupted by shlex.
| Break up the line into an args and options string. We only want to shlex
(and then optparse) the options, not the args. args can contain markers
which are corrupted by shlex.
| def break_args_options(line: str) -> Tuple[str, str]:
"""Break up the line into an args and options string. We only want to shlex
(and then optparse) the options, not the args. args can contain markers
which are corrupted by shlex.
"""
tokens = line.split(' ')
args = []
options = tokens[:]
for token in tokens:
if token.startswith('-') or token.startswith('--'):
break
else:
args.append(token)
options.pop(0)
return ' '.join(args), ' '.join(options) | [
"def",
"break_args_options",
"(",
"line",
":",
"str",
")",
"->",
"Tuple",
"[",
"str",
",",
"str",
"]",
":",
"tokens",
"=",
"line",
".",
"split",
"(",
"' '",
")",
"args",
"=",
"[",
"]",
"options",
"=",
"tokens",
"[",
":",
"]",
"for",
"token",
"in",
"tokens",
":",
"if",
"token",
".",
"startswith",
"(",
"'-'",
")",
"or",
"token",
".",
"startswith",
"(",
"'--'",
")",
":",
"break",
"else",
":",
"args",
".",
"append",
"(",
"token",
")",
"options",
".",
"pop",
"(",
"0",
")",
"return",
"' '",
".",
"join",
"(",
"args",
")",
",",
"' '",
".",
"join",
"(",
"options",
")"
] | [
392,
0
] | [
406,
44
] | python | en | ['en', 'en', 'en'] | True |
build_parser | () |
Return a parser for parsing requirement lines
|
Return a parser for parsing requirement lines
| def build_parser() -> optparse.OptionParser:
"""
Return a parser for parsing requirement lines
"""
parser = optparse.OptionParser(add_help_option=False)
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
# By default optparse sys.exits on parsing errors. We want to wrap
# that in our own exception.
def parser_exit(self: Any, msg: str) -> "NoReturn":
raise OptionParsingError(msg)
# NOTE: mypy disallows assigning to a method
# https://github.com/python/mypy/issues/2427
parser.exit = parser_exit # type: ignore
return parser | [
"def",
"build_parser",
"(",
")",
"->",
"optparse",
".",
"OptionParser",
":",
"parser",
"=",
"optparse",
".",
"OptionParser",
"(",
"add_help_option",
"=",
"False",
")",
"option_factories",
"=",
"SUPPORTED_OPTIONS",
"+",
"SUPPORTED_OPTIONS_REQ",
"for",
"option_factory",
"in",
"option_factories",
":",
"option",
"=",
"option_factory",
"(",
")",
"parser",
".",
"add_option",
"(",
"option",
")",
"# By default optparse sys.exits on parsing errors. We want to wrap",
"# that in our own exception.",
"def",
"parser_exit",
"(",
"self",
":",
"Any",
",",
"msg",
":",
"str",
")",
"->",
"\"NoReturn\"",
":",
"raise",
"OptionParsingError",
"(",
"msg",
")",
"# NOTE: mypy disallows assigning to a method",
"# https://github.com/python/mypy/issues/2427",
"parser",
".",
"exit",
"=",
"parser_exit",
"# type: ignore",
"return",
"parser"
] | [
414,
0
] | [
433,
17
] | python | en | ['en', 'error', 'th'] | False |
join_lines | (lines_enum: ReqFileLines) | Joins a line ending in '\' with the previous line (except when following
comments). The joined line takes on the index of the first line.
| Joins a line ending in '\' with the previous line (except when following
comments). The joined line takes on the index of the first line.
| def join_lines(lines_enum: ReqFileLines) -> ReqFileLines:
"""Joins a line ending in '\' with the previous line (except when following
comments). The joined line takes on the index of the first line.
"""
primary_line_number = None
new_line: List[str] = []
for line_number, line in lines_enum:
if not line.endswith('\\') or COMMENT_RE.match(line):
if COMMENT_RE.match(line):
# this ensures comments are always matched later
line = ' ' + line
if new_line:
new_line.append(line)
assert primary_line_number is not None
yield primary_line_number, ''.join(new_line)
new_line = []
else:
yield line_number, line
else:
if not new_line:
primary_line_number = line_number
new_line.append(line.strip('\\'))
# last line contains \
if new_line:
assert primary_line_number is not None
yield primary_line_number, ''.join(new_line) | [
"def",
"join_lines",
"(",
"lines_enum",
":",
"ReqFileLines",
")",
"->",
"ReqFileLines",
":",
"primary_line_number",
"=",
"None",
"new_line",
":",
"List",
"[",
"str",
"]",
"=",
"[",
"]",
"for",
"line_number",
",",
"line",
"in",
"lines_enum",
":",
"if",
"not",
"line",
".",
"endswith",
"(",
"'\\\\'",
")",
"or",
"COMMENT_RE",
".",
"match",
"(",
"line",
")",
":",
"if",
"COMMENT_RE",
".",
"match",
"(",
"line",
")",
":",
"# this ensures comments are always matched later",
"line",
"=",
"' '",
"+",
"line",
"if",
"new_line",
":",
"new_line",
".",
"append",
"(",
"line",
")",
"assert",
"primary_line_number",
"is",
"not",
"None",
"yield",
"primary_line_number",
",",
"''",
".",
"join",
"(",
"new_line",
")",
"new_line",
"=",
"[",
"]",
"else",
":",
"yield",
"line_number",
",",
"line",
"else",
":",
"if",
"not",
"new_line",
":",
"primary_line_number",
"=",
"line_number",
"new_line",
".",
"append",
"(",
"line",
".",
"strip",
"(",
"'\\\\'",
")",
")",
"# last line contains \\",
"if",
"new_line",
":",
"assert",
"primary_line_number",
"is",
"not",
"None",
"yield",
"primary_line_number",
",",
"''",
".",
"join",
"(",
"new_line",
")"
] | [
436,
0
] | [
462,
52
] | python | en | ['en', 'en', 'en'] | True |
ignore_comments | (lines_enum: ReqFileLines) |
Strips comments and filter empty lines.
|
Strips comments and filter empty lines.
| def ignore_comments(lines_enum: ReqFileLines) -> ReqFileLines:
"""
Strips comments and filter empty lines.
"""
for line_number, line in lines_enum:
line = COMMENT_RE.sub('', line)
line = line.strip()
if line:
yield line_number, line | [
"def",
"ignore_comments",
"(",
"lines_enum",
":",
"ReqFileLines",
")",
"->",
"ReqFileLines",
":",
"for",
"line_number",
",",
"line",
"in",
"lines_enum",
":",
"line",
"=",
"COMMENT_RE",
".",
"sub",
"(",
"''",
",",
"line",
")",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
":",
"yield",
"line_number",
",",
"line"
] | [
467,
0
] | [
475,
35
] | python | en | ['en', 'error', 'th'] | False |
expand_env_variables | (lines_enum: ReqFileLines) | Replace all environment variables that can be retrieved via `os.getenv`.
The only allowed format for environment variables defined in the
requirement file is `${MY_VARIABLE_1}` to ensure two things:
1. Strings that contain a `$` aren't accidentally (partially) expanded.
2. Ensure consistency across platforms for requirement files.
These points are the result of a discussion on the `github pull
request #3514 <https://github.com/pypa/pip/pull/3514>`_.
Valid characters in variable names follow the `POSIX standard
<http://pubs.opengroup.org/onlinepubs/9699919799/>`_ and are limited
to uppercase letter, digits and the `_` (underscore).
| Replace all environment variables that can be retrieved via `os.getenv`. | def expand_env_variables(lines_enum: ReqFileLines) -> ReqFileLines:
"""Replace all environment variables that can be retrieved via `os.getenv`.
The only allowed format for environment variables defined in the
requirement file is `${MY_VARIABLE_1}` to ensure two things:
1. Strings that contain a `$` aren't accidentally (partially) expanded.
2. Ensure consistency across platforms for requirement files.
These points are the result of a discussion on the `github pull
request #3514 <https://github.com/pypa/pip/pull/3514>`_.
Valid characters in variable names follow the `POSIX standard
<http://pubs.opengroup.org/onlinepubs/9699919799/>`_ and are limited
to uppercase letter, digits and the `_` (underscore).
"""
for line_number, line in lines_enum:
for env_var, var_name in ENV_VAR_RE.findall(line):
value = os.getenv(var_name)
if not value:
continue
line = line.replace(env_var, value)
yield line_number, line | [
"def",
"expand_env_variables",
"(",
"lines_enum",
":",
"ReqFileLines",
")",
"->",
"ReqFileLines",
":",
"for",
"line_number",
",",
"line",
"in",
"lines_enum",
":",
"for",
"env_var",
",",
"var_name",
"in",
"ENV_VAR_RE",
".",
"findall",
"(",
"line",
")",
":",
"value",
"=",
"os",
".",
"getenv",
"(",
"var_name",
")",
"if",
"not",
"value",
":",
"continue",
"line",
"=",
"line",
".",
"replace",
"(",
"env_var",
",",
"value",
")",
"yield",
"line_number",
",",
"line"
] | [
478,
0
] | [
502,
31
] | python | en | ['en', 'en', 'en'] | True |
get_file_content | (url: str, session: PipSession) | Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode.
Respects # -*- coding: declarations on the retrieved files.
:param url: File path or url.
:param session: PipSession instance.
| Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode.
Respects # -*- coding: declarations on the retrieved files. | def get_file_content(url: str, session: PipSession) -> Tuple[str, str]:
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode.
Respects # -*- coding: declarations on the retrieved files.
:param url: File path or url.
:param session: PipSession instance.
"""
scheme = get_url_scheme(url)
# Pip has special support for file:// URLs (LocalFSAdapter).
if scheme in ['http', 'https', 'file']:
resp = session.get(url)
raise_for_status(resp)
return resp.url, resp.text
# Assume this is a bare path.
try:
with open(url, 'rb') as f:
content = auto_decode(f.read())
except OSError as exc:
raise InstallationError(f'Could not open requirements file: {exc}')
return url, content | [
"def",
"get_file_content",
"(",
"url",
":",
"str",
",",
"session",
":",
"PipSession",
")",
"->",
"Tuple",
"[",
"str",
",",
"str",
"]",
":",
"scheme",
"=",
"get_url_scheme",
"(",
"url",
")",
"# Pip has special support for file:// URLs (LocalFSAdapter).",
"if",
"scheme",
"in",
"[",
"'http'",
",",
"'https'",
",",
"'file'",
"]",
":",
"resp",
"=",
"session",
".",
"get",
"(",
"url",
")",
"raise_for_status",
"(",
"resp",
")",
"return",
"resp",
".",
"url",
",",
"resp",
".",
"text",
"# Assume this is a bare path.",
"try",
":",
"with",
"open",
"(",
"url",
",",
"'rb'",
")",
"as",
"f",
":",
"content",
"=",
"auto_decode",
"(",
"f",
".",
"read",
"(",
")",
")",
"except",
"OSError",
"as",
"exc",
":",
"raise",
"InstallationError",
"(",
"f'Could not open requirements file: {exc}'",
")",
"return",
"url",
",",
"content"
] | [
505,
0
] | [
527,
23
] | python | en | ['en', 'en', 'en'] | True |
RequirementsFileParser.parse | (self, filename: str, constraint: bool) | Parse a given file, yielding parsed lines.
| Parse a given file, yielding parsed lines.
| def parse(self, filename: str, constraint: bool) -> Iterator[ParsedLine]:
"""Parse a given file, yielding parsed lines.
"""
yield from self._parse_and_recurse(filename, constraint) | [
"def",
"parse",
"(",
"self",
",",
"filename",
":",
"str",
",",
"constraint",
":",
"bool",
")",
"->",
"Iterator",
"[",
"ParsedLine",
"]",
":",
"yield",
"from",
"self",
".",
"_parse_and_recurse",
"(",
"filename",
",",
"constraint",
")"
] | [
315,
4
] | [
318,
64
] | python | en | ['en', 'en', 'en'] | True |
DashboardsServiceAsyncClient.__init__ | (
self,
*,
credentials: credentials.Credentials = None,
transport: Union[str, DashboardsServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
) | Instantiate the dashboards service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.DashboardsServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint, this is the default value for
the environment variable) and "auto" (auto switch to the default
mTLS endpoint if client SSL credentials is present). However,
the ``api_endpoint`` property takes precedence if provided.
(2) The ``client_cert_source`` property is used to provide client
SSL credentials for mutual TLS transport. If not provided, the
default SSL credentials will be used if present.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
| Instantiate the dashboards service client. | def __init__(
self,
*,
credentials: credentials.Credentials = None,
transport: Union[str, DashboardsServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
) -> None:
"""Instantiate the dashboards service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.DashboardsServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint, this is the default value for
the environment variable) and "auto" (auto switch to the default
mTLS endpoint if client SSL credentials is present). However,
the ``api_endpoint`` property takes precedence if provided.
(2) The ``client_cert_source`` property is used to provide client
SSL credentials for mutual TLS transport. If not provided, the
default SSL credentials will be used if present.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = DashboardsServiceClient(
credentials=credentials, transport=transport, client_options=client_options,
) | [
"def",
"__init__",
"(",
"self",
",",
"*",
",",
"credentials",
":",
"credentials",
".",
"Credentials",
"=",
"None",
",",
"transport",
":",
"Union",
"[",
"str",
",",
"DashboardsServiceTransport",
"]",
"=",
"\"grpc_asyncio\"",
",",
"client_options",
":",
"ClientOptions",
"=",
"None",
",",
")",
"->",
"None",
":",
"self",
".",
"_client",
"=",
"DashboardsServiceClient",
"(",
"credentials",
"=",
"credentials",
",",
"transport",
"=",
"transport",
",",
"client_options",
"=",
"client_options",
",",
")"
] | [
59,
4
] | [
98,
9
] | python | en | ['en', 'en', 'en'] | True |
DashboardsServiceAsyncClient.create_dashboard | (
self,
request: dashboards_service.CreateDashboardRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) | r"""Creates a new custom dashboard.
This method requires the ``monitoring.dashboards.create``
permission on the specified project. For more information, see
`Google Cloud IAM <https://cloud.google.com/iam>`__.
Args:
request (:class:`~.dashboards_service.CreateDashboardRequest`):
The request object. The `CreateDashboard` request.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.dashboard.Dashboard:
A Google Stackdriver dashboard.
Dashboards define the content and layout
of pages in the Stackdriver web
application.
| r"""Creates a new custom dashboard. | async def create_dashboard(
self,
request: dashboards_service.CreateDashboardRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dashboard.Dashboard:
r"""Creates a new custom dashboard.
This method requires the ``monitoring.dashboards.create``
permission on the specified project. For more information, see
`Google Cloud IAM <https://cloud.google.com/iam>`__.
Args:
request (:class:`~.dashboards_service.CreateDashboardRequest`):
The request object. The `CreateDashboard` request.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.dashboard.Dashboard:
A Google Stackdriver dashboard.
Dashboards define the content and layout
of pages in the Stackdriver web
application.
"""
# Create or coerce a protobuf request object.
request = dashboards_service.CreateDashboardRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_dashboard,
default_timeout=30.0,
client_info=_client_info,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response | [
"async",
"def",
"create_dashboard",
"(",
"self",
",",
"request",
":",
"dashboards_service",
".",
"CreateDashboardRequest",
"=",
"None",
",",
"*",
",",
"retry",
":",
"retries",
".",
"Retry",
"=",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
":",
"float",
"=",
"None",
",",
"metadata",
":",
"Sequence",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"(",
")",
",",
")",
"->",
"dashboard",
".",
"Dashboard",
":",
"# Create or coerce a protobuf request object.",
"request",
"=",
"dashboards_service",
".",
"CreateDashboardRequest",
"(",
"request",
")",
"# Wrap the RPC method; this adds retry and timeout information,",
"# and friendly error handling.",
"rpc",
"=",
"gapic_v1",
".",
"method_async",
".",
"wrap_method",
"(",
"self",
".",
"_client",
".",
"_transport",
".",
"create_dashboard",
",",
"default_timeout",
"=",
"30.0",
",",
"client_info",
"=",
"_client_info",
",",
")",
"# Certain fields should be provided within the metadata header;",
"# add these here.",
"metadata",
"=",
"tuple",
"(",
"metadata",
")",
"+",
"(",
"gapic_v1",
".",
"routing_header",
".",
"to_grpc_metadata",
"(",
"(",
"(",
"\"parent\"",
",",
"request",
".",
"parent",
")",
",",
")",
")",
",",
")",
"# Send the request.",
"response",
"=",
"await",
"rpc",
"(",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
",",
")",
"# Done; return the response.",
"return",
"response"
] | [
100,
4
] | [
154,
23
] | python | cy | ['pt', 'cy', 'en'] | False |
DashboardsServiceAsyncClient.list_dashboards | (
self,
request: dashboards_service.ListDashboardsRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) | r"""Lists the existing dashboards.
This method requires the ``monitoring.dashboards.list``
permission on the specified project. For more information, see
`Google Cloud IAM <https://cloud.google.com/iam>`__.
Args:
request (:class:`~.dashboards_service.ListDashboardsRequest`):
The request object. The `ListDashboards` request.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.pagers.ListDashboardsAsyncPager:
The ``ListDashboards`` request.
Iterating over this object will yield results and
resolve additional pages automatically.
| r"""Lists the existing dashboards. | async def list_dashboards(
self,
request: dashboards_service.ListDashboardsRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDashboardsAsyncPager:
r"""Lists the existing dashboards.
This method requires the ``monitoring.dashboards.list``
permission on the specified project. For more information, see
`Google Cloud IAM <https://cloud.google.com/iam>`__.
Args:
request (:class:`~.dashboards_service.ListDashboardsRequest`):
The request object. The `ListDashboards` request.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.pagers.ListDashboardsAsyncPager:
The ``ListDashboards`` request.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
request = dashboards_service.ListDashboardsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_dashboards,
default_timeout=None,
client_info=_client_info,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListDashboardsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response | [
"async",
"def",
"list_dashboards",
"(",
"self",
",",
"request",
":",
"dashboards_service",
".",
"ListDashboardsRequest",
"=",
"None",
",",
"*",
",",
"retry",
":",
"retries",
".",
"Retry",
"=",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
":",
"float",
"=",
"None",
",",
"metadata",
":",
"Sequence",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"(",
")",
",",
")",
"->",
"pagers",
".",
"ListDashboardsAsyncPager",
":",
"# Create or coerce a protobuf request object.",
"request",
"=",
"dashboards_service",
".",
"ListDashboardsRequest",
"(",
"request",
")",
"# Wrap the RPC method; this adds retry and timeout information,",
"# and friendly error handling.",
"rpc",
"=",
"gapic_v1",
".",
"method_async",
".",
"wrap_method",
"(",
"self",
".",
"_client",
".",
"_transport",
".",
"list_dashboards",
",",
"default_timeout",
"=",
"None",
",",
"client_info",
"=",
"_client_info",
",",
")",
"# Certain fields should be provided within the metadata header;",
"# add these here.",
"metadata",
"=",
"tuple",
"(",
"metadata",
")",
"+",
"(",
"gapic_v1",
".",
"routing_header",
".",
"to_grpc_metadata",
"(",
"(",
"(",
"\"parent\"",
",",
"request",
".",
"parent",
")",
",",
")",
")",
",",
")",
"# Send the request.",
"response",
"=",
"await",
"rpc",
"(",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
",",
")",
"# This method is paged; wrap the response in a pager, which provides",
"# an `__aiter__` convenience method.",
"response",
"=",
"pagers",
".",
"ListDashboardsAsyncPager",
"(",
"method",
"=",
"rpc",
",",
"request",
"=",
"request",
",",
"response",
"=",
"response",
",",
"metadata",
"=",
"metadata",
",",
")",
"# Done; return the response.",
"return",
"response"
] | [
156,
4
] | [
216,
23
] | python | en | ['en', 'en', 'en'] | True |
DashboardsServiceAsyncClient.get_dashboard | (
self,
request: dashboards_service.GetDashboardRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) | r"""Fetches a specific dashboard.
This method requires the ``monitoring.dashboards.get``
permission on the specified dashboard. For more information, see
`Google Cloud IAM <https://cloud.google.com/iam>`__.
Args:
request (:class:`~.dashboards_service.GetDashboardRequest`):
The request object. The `GetDashboard` request.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.dashboard.Dashboard:
A Google Stackdriver dashboard.
Dashboards define the content and layout
of pages in the Stackdriver web
application.
| r"""Fetches a specific dashboard. | async def get_dashboard(
self,
request: dashboards_service.GetDashboardRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dashboard.Dashboard:
r"""Fetches a specific dashboard.
This method requires the ``monitoring.dashboards.get``
permission on the specified dashboard. For more information, see
`Google Cloud IAM <https://cloud.google.com/iam>`__.
Args:
request (:class:`~.dashboards_service.GetDashboardRequest`):
The request object. The `GetDashboard` request.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.dashboard.Dashboard:
A Google Stackdriver dashboard.
Dashboards define the content and layout
of pages in the Stackdriver web
application.
"""
# Create or coerce a protobuf request object.
request = dashboards_service.GetDashboardRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_dashboard,
default_timeout=None,
client_info=_client_info,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response | [
"async",
"def",
"get_dashboard",
"(",
"self",
",",
"request",
":",
"dashboards_service",
".",
"GetDashboardRequest",
"=",
"None",
",",
"*",
",",
"retry",
":",
"retries",
".",
"Retry",
"=",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
":",
"float",
"=",
"None",
",",
"metadata",
":",
"Sequence",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"(",
")",
",",
")",
"->",
"dashboard",
".",
"Dashboard",
":",
"# Create or coerce a protobuf request object.",
"request",
"=",
"dashboards_service",
".",
"GetDashboardRequest",
"(",
"request",
")",
"# Wrap the RPC method; this adds retry and timeout information,",
"# and friendly error handling.",
"rpc",
"=",
"gapic_v1",
".",
"method_async",
".",
"wrap_method",
"(",
"self",
".",
"_client",
".",
"_transport",
".",
"get_dashboard",
",",
"default_timeout",
"=",
"None",
",",
"client_info",
"=",
"_client_info",
",",
")",
"# Certain fields should be provided within the metadata header;",
"# add these here.",
"metadata",
"=",
"tuple",
"(",
"metadata",
")",
"+",
"(",
"gapic_v1",
".",
"routing_header",
".",
"to_grpc_metadata",
"(",
"(",
"(",
"\"name\"",
",",
"request",
".",
"name",
")",
",",
")",
")",
",",
")",
"# Send the request.",
"response",
"=",
"await",
"rpc",
"(",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
",",
")",
"# Done; return the response.",
"return",
"response"
] | [
218,
4
] | [
272,
23
] | python | en | ['en', 'en', 'en'] | True |
DashboardsServiceAsyncClient.delete_dashboard | (
self,
request: dashboards_service.DeleteDashboardRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) | r"""Deletes an existing custom dashboard.
This method requires the ``monitoring.dashboards.delete``
permission on the specified dashboard. For more information, see
`Google Cloud IAM <https://cloud.google.com/iam>`__.
Args:
request (:class:`~.dashboards_service.DeleteDashboardRequest`):
The request object. The `DeleteDashboard` request.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
| r"""Deletes an existing custom dashboard. | async def delete_dashboard(
self,
request: dashboards_service.DeleteDashboardRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes an existing custom dashboard.
This method requires the ``monitoring.dashboards.delete``
permission on the specified dashboard. For more information, see
`Google Cloud IAM <https://cloud.google.com/iam>`__.
Args:
request (:class:`~.dashboards_service.DeleteDashboardRequest`):
The request object. The `DeleteDashboard` request.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
request = dashboards_service.DeleteDashboardRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_dashboard,
default_timeout=30.0,
client_info=_client_info,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
) | [
"async",
"def",
"delete_dashboard",
"(",
"self",
",",
"request",
":",
"dashboards_service",
".",
"DeleteDashboardRequest",
"=",
"None",
",",
"*",
",",
"retry",
":",
"retries",
".",
"Retry",
"=",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
":",
"float",
"=",
"None",
",",
"metadata",
":",
"Sequence",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"(",
")",
",",
")",
"->",
"None",
":",
"# Create or coerce a protobuf request object.",
"request",
"=",
"dashboards_service",
".",
"DeleteDashboardRequest",
"(",
"request",
")",
"# Wrap the RPC method; this adds retry and timeout information,",
"# and friendly error handling.",
"rpc",
"=",
"gapic_v1",
".",
"method_async",
".",
"wrap_method",
"(",
"self",
".",
"_client",
".",
"_transport",
".",
"delete_dashboard",
",",
"default_timeout",
"=",
"30.0",
",",
"client_info",
"=",
"_client_info",
",",
")",
"# Certain fields should be provided within the metadata header;",
"# add these here.",
"metadata",
"=",
"tuple",
"(",
"metadata",
")",
"+",
"(",
"gapic_v1",
".",
"routing_header",
".",
"to_grpc_metadata",
"(",
"(",
"(",
"\"name\"",
",",
"request",
".",
"name",
")",
",",
")",
")",
",",
")",
"# Send the request.",
"await",
"rpc",
"(",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
",",
")"
] | [
274,
4
] | [
319,
9
] | python | en | ['en', 'cy', 'en'] | True |
DashboardsServiceAsyncClient.update_dashboard | (
self,
request: dashboards_service.UpdateDashboardRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) | r"""Replaces an existing custom dashboard with a new definition.
This method requires the ``monitoring.dashboards.update``
permission on the specified dashboard. For more information, see
`Google Cloud IAM <https://cloud.google.com/iam>`__.
Args:
request (:class:`~.dashboards_service.UpdateDashboardRequest`):
The request object. The `UpdateDashboard` request.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.dashboard.Dashboard:
A Google Stackdriver dashboard.
Dashboards define the content and layout
of pages in the Stackdriver web
application.
| r"""Replaces an existing custom dashboard with a new definition. | async def update_dashboard(
self,
request: dashboards_service.UpdateDashboardRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dashboard.Dashboard:
r"""Replaces an existing custom dashboard with a new definition.
This method requires the ``monitoring.dashboards.update``
permission on the specified dashboard. For more information, see
`Google Cloud IAM <https://cloud.google.com/iam>`__.
Args:
request (:class:`~.dashboards_service.UpdateDashboardRequest`):
The request object. The `UpdateDashboard` request.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.dashboard.Dashboard:
A Google Stackdriver dashboard.
Dashboards define the content and layout
of pages in the Stackdriver web
application.
"""
# Create or coerce a protobuf request object.
request = dashboards_service.UpdateDashboardRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_dashboard,
default_timeout=30.0,
client_info=_client_info,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("dashboard.name", request.dashboard.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response | [
"async",
"def",
"update_dashboard",
"(",
"self",
",",
"request",
":",
"dashboards_service",
".",
"UpdateDashboardRequest",
"=",
"None",
",",
"*",
",",
"retry",
":",
"retries",
".",
"Retry",
"=",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
":",
"float",
"=",
"None",
",",
"metadata",
":",
"Sequence",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"(",
")",
",",
")",
"->",
"dashboard",
".",
"Dashboard",
":",
"# Create or coerce a protobuf request object.",
"request",
"=",
"dashboards_service",
".",
"UpdateDashboardRequest",
"(",
"request",
")",
"# Wrap the RPC method; this adds retry and timeout information,",
"# and friendly error handling.",
"rpc",
"=",
"gapic_v1",
".",
"method_async",
".",
"wrap_method",
"(",
"self",
".",
"_client",
".",
"_transport",
".",
"update_dashboard",
",",
"default_timeout",
"=",
"30.0",
",",
"client_info",
"=",
"_client_info",
",",
")",
"# Certain fields should be provided within the metadata header;",
"# add these here.",
"metadata",
"=",
"tuple",
"(",
"metadata",
")",
"+",
"(",
"gapic_v1",
".",
"routing_header",
".",
"to_grpc_metadata",
"(",
"(",
"(",
"\"dashboard.name\"",
",",
"request",
".",
"dashboard",
".",
"name",
")",
",",
")",
")",
",",
")",
"# Send the request.",
"response",
"=",
"await",
"rpc",
"(",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
",",
")",
"# Done; return the response.",
"return",
"response"
] | [
321,
4
] | [
377,
23
] | python | en | ['en', 'en', 'en'] | True |
ContactList.search | (self, name) | Return all contacts that contain the search value in their name | Return all contacts that contain the search value in their name | def search(self, name):
"Return all contacts that contain the search value in their name"
matching_contacts = []
for contact in self:
if name in contact.name:
matching_contacts.append(contact)
return matching_contacts | [
"def",
"search",
"(",
"self",
",",
"name",
")",
":",
"matching_contacts",
"=",
"[",
"]",
"for",
"contact",
"in",
"self",
":",
"if",
"name",
"in",
"contact",
".",
"name",
":",
"matching_contacts",
".",
"append",
"(",
"contact",
")",
"return",
"matching_contacts"
] | [
13,
4
] | [
19,
32
] | python | en | ['en', 'en', 'en'] | True |
Field.clean | (self, value) |
Validate the given value and return its "cleaned" value as an
appropriate Python object. Raise ValidationError for any errors.
|
Validate the given value and return its "cleaned" value as an
appropriate Python object. Raise ValidationError for any errors.
| def clean(self, value):
"""
Validate the given value and return its "cleaned" value as an
appropriate Python object. Raise ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value | [
"def",
"clean",
"(",
"self",
",",
"value",
")",
":",
"value",
"=",
"self",
".",
"to_python",
"(",
"value",
")",
"self",
".",
"validate",
"(",
"value",
")",
"self",
".",
"run_validators",
"(",
"value",
")",
"return",
"value"
] | [
143,
4
] | [
151,
20
] | python | en | ['en', 'error', 'th'] | False |
Field.bound_data | (self, data, initial) |
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
|
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any. | def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
if self.disabled:
return initial
return data | [
"def",
"bound_data",
"(",
"self",
",",
"data",
",",
"initial",
")",
":",
"if",
"self",
".",
"disabled",
":",
"return",
"initial",
"return",
"data"
] | [
153,
4
] | [
164,
19
] | python | en | ['en', 'error', 'th'] | False |
Field.widget_attrs | (self, widget) |
Given a Widget instance (*not* a Widget class), return a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
|
Given a Widget instance (*not* a Widget class), return a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
| def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), return a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {} | [
"def",
"widget_attrs",
"(",
"self",
",",
"widget",
")",
":",
"return",
"{",
"}"
] | [
166,
4
] | [
172,
17
] | python | en | ['en', 'error', 'th'] | False |
Field.has_changed | (self, initial, data) | Return True if data differs from initial. | Return True if data differs from initial. | def has_changed(self, initial, data):
"""Return True if data differs from initial."""
# Always return False if the field is disabled since self.bound_data
# always uses the initial value in this case.
if self.disabled:
return False
try:
data = self.to_python(data)
if hasattr(self, '_coerce'):
return self._coerce(data) != self._coerce(initial)
except ValidationError:
return True
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or initial value we get
# is None, replace it with ''.
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return initial_value != data_value | [
"def",
"has_changed",
"(",
"self",
",",
"initial",
",",
"data",
")",
":",
"# Always return False if the field is disabled since self.bound_data",
"# always uses the initial value in this case.",
"if",
"self",
".",
"disabled",
":",
"return",
"False",
"try",
":",
"data",
"=",
"self",
".",
"to_python",
"(",
"data",
")",
"if",
"hasattr",
"(",
"self",
",",
"'_coerce'",
")",
":",
"return",
"self",
".",
"_coerce",
"(",
"data",
")",
"!=",
"self",
".",
"_coerce",
"(",
"initial",
")",
"except",
"ValidationError",
":",
"return",
"True",
"# For purposes of seeing whether something has changed, None is",
"# the same as an empty string, if the data or initial value we get",
"# is None, replace it with ''.",
"initial_value",
"=",
"initial",
"if",
"initial",
"is",
"not",
"None",
"else",
"''",
"data_value",
"=",
"data",
"if",
"data",
"is",
"not",
"None",
"else",
"''",
"return",
"initial_value",
"!=",
"data_value"
] | [
174,
4
] | [
191,
42
] | python | en | ['en', 'en', 'en'] | True |
Field.get_bound_field | (self, form, field_name) |
Return a BoundField instance that will be used when accessing the form
field in a template.
|
Return a BoundField instance that will be used when accessing the form
field in a template.
| def get_bound_field(self, form, field_name):
"""
Return a BoundField instance that will be used when accessing the form
field in a template.
"""
return BoundField(form, self, field_name) | [
"def",
"get_bound_field",
"(",
"self",
",",
"form",
",",
"field_name",
")",
":",
"return",
"BoundField",
"(",
"form",
",",
"self",
",",
"field_name",
")"
] | [
193,
4
] | [
198,
49
] | python | en | ['en', 'error', 'th'] | False |
CharField.to_python | (self, value) | Return a string. | Return a string. | def to_python(self, value):
"""Return a string."""
if value not in self.empty_values:
value = str(value)
if self.strip:
value = value.strip()
if value in self.empty_values:
return self.empty_value
return value | [
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"not",
"in",
"self",
".",
"empty_values",
":",
"value",
"=",
"str",
"(",
"value",
")",
"if",
"self",
".",
"strip",
":",
"value",
"=",
"value",
".",
"strip",
"(",
")",
"if",
"value",
"in",
"self",
".",
"empty_values",
":",
"return",
"self",
".",
"empty_value",
"return",
"value"
] | [
222,
4
] | [
230,
20
] | python | en | ['en', 'cy', 'en'] | True |
IntegerField.to_python | (self, value) |
Validate that int() can be called on the input. Return the result
of int() or None for empty values.
|
Validate that int() can be called on the input. Return the result
of int() or None for empty values.
| def to_python(self, value):
"""
Validate that int() can be called on the input. Return the result
of int() or None for empty values.
"""
value = super().to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
# Strip trailing decimal and zeros.
try:
value = int(self.re_decimal.sub('', str(value)))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value | [
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"value",
"=",
"super",
"(",
")",
".",
"to_python",
"(",
"value",
")",
"if",
"value",
"in",
"self",
".",
"empty_values",
":",
"return",
"None",
"if",
"self",
".",
"localize",
":",
"value",
"=",
"formats",
".",
"sanitize_separators",
"(",
"value",
")",
"# Strip trailing decimal and zeros.",
"try",
":",
"value",
"=",
"int",
"(",
"self",
".",
"re_decimal",
".",
"sub",
"(",
"''",
",",
"str",
"(",
"value",
")",
")",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"raise",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'invalid'",
"]",
",",
"code",
"=",
"'invalid'",
")",
"return",
"value"
] | [
262,
4
] | [
277,
20
] | python | en | ['en', 'error', 'th'] | False |
FloatField.to_python | (self, value) |
Validate that float() can be called on the input. Return the result
of float() or None for empty values.
|
Validate that float() can be called on the input. Return the result
of float() or None for empty values.
| def to_python(self, value):
"""
Validate that float() can be called on the input. Return the result
of float() or None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value | [
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"value",
"=",
"super",
"(",
"IntegerField",
",",
"self",
")",
".",
"to_python",
"(",
"value",
")",
"if",
"value",
"in",
"self",
".",
"empty_values",
":",
"return",
"None",
"if",
"self",
".",
"localize",
":",
"value",
"=",
"formats",
".",
"sanitize_separators",
"(",
"value",
")",
"try",
":",
"value",
"=",
"float",
"(",
"value",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"raise",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'invalid'",
"]",
",",
"code",
"=",
"'invalid'",
")",
"return",
"value"
] | [
294,
4
] | [
308,
20
] | python | en | ['en', 'error', 'th'] | False |
DecimalField.to_python | (self, value) |
Validate that the input is a decimal number. Return a Decimal
instance or None for empty values. Ensure that there are no more
than max_digits in the number and no more than decimal_places digits
after the decimal point.
|
Validate that the input is a decimal number. Return a Decimal
instance or None for empty values. Ensure that there are no more
than max_digits in the number and no more than decimal_places digits
after the decimal point.
| def to_python(self, value):
"""
Validate that the input is a decimal number. Return a Decimal
instance or None for empty values. Ensure that there are no more
than max_digits in the number and no more than decimal_places digits
after the decimal point.
"""
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = str(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value | [
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"in",
"self",
".",
"empty_values",
":",
"return",
"None",
"if",
"self",
".",
"localize",
":",
"value",
"=",
"formats",
".",
"sanitize_separators",
"(",
"value",
")",
"value",
"=",
"str",
"(",
"value",
")",
".",
"strip",
"(",
")",
"try",
":",
"value",
"=",
"Decimal",
"(",
"value",
")",
"except",
"DecimalException",
":",
"raise",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'invalid'",
"]",
",",
"code",
"=",
"'invalid'",
")",
"return",
"value"
] | [
334,
4
] | [
350,
20
] | python | en | ['en', 'error', 'th'] | False |
DateField.to_python | (self, value) |
Validate that the input can be converted to a date. Return a Python
datetime.date object.
|
Validate that the input can be converted to a date. Return a Python
datetime.date object.
| def to_python(self, value):
"""
Validate that the input can be converted to a date. Return a Python
datetime.date object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
return super().to_python(value) | [
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"in",
"self",
".",
"empty_values",
":",
"return",
"None",
"if",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"datetime",
")",
":",
"return",
"value",
".",
"date",
"(",
")",
"if",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"date",
")",
":",
"return",
"value",
"return",
"super",
"(",
")",
".",
"to_python",
"(",
"value",
")"
] | [
404,
4
] | [
415,
39
] | python | en | ['en', 'error', 'th'] | False |
TimeField.to_python | (self, value) |
Validate that the input can be converted to a time. Return a Python
datetime.time object.
|
Validate that the input can be converted to a time. Return a Python
datetime.time object.
| def to_python(self, value):
"""
Validate that the input can be converted to a time. Return a Python
datetime.time object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.time):
return value
return super().to_python(value) | [
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"in",
"self",
".",
"empty_values",
":",
"return",
"None",
"if",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"time",
")",
":",
"return",
"value",
"return",
"super",
"(",
")",
".",
"to_python",
"(",
"value",
")"
] | [
428,
4
] | [
437,
39
] | python | en | ['en', 'error', 'th'] | False |
DateTimeField.to_python | (self, value) |
Validate that the input can be converted to a datetime. Return a
Python datetime.datetime object.
|
Validate that the input can be converted to a datetime. Return a
Python datetime.datetime object.
| def to_python(self, value):
"""
Validate that the input can be converted to a datetime. Return a
Python datetime.datetime object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return from_current_timezone(value)
if isinstance(value, datetime.date):
result = datetime.datetime(value.year, value.month, value.day)
return from_current_timezone(result)
try:
result = parse_datetime(value.strip())
except ValueError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if not result:
result = super().to_python(value)
return from_current_timezone(result) | [
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"in",
"self",
".",
"empty_values",
":",
"return",
"None",
"if",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"datetime",
")",
":",
"return",
"from_current_timezone",
"(",
"value",
")",
"if",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"date",
")",
":",
"result",
"=",
"datetime",
".",
"datetime",
"(",
"value",
".",
"year",
",",
"value",
".",
"month",
",",
"value",
".",
"day",
")",
"return",
"from_current_timezone",
"(",
"result",
")",
"try",
":",
"result",
"=",
"parse_datetime",
"(",
"value",
".",
"strip",
"(",
")",
")",
"except",
"ValueError",
":",
"raise",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'invalid'",
"]",
",",
"code",
"=",
"'invalid'",
")",
"if",
"not",
"result",
":",
"result",
"=",
"super",
"(",
")",
".",
"to_python",
"(",
"value",
")",
"return",
"from_current_timezone",
"(",
"result",
")"
] | [
461,
4
] | [
479,
44
] | python | en | ['en', 'error', 'th'] | False |
RegexField.__init__ | (self, regex, **kwargs) |
regex can be either a string or a compiled regular expression object.
|
regex can be either a string or a compiled regular expression object.
| def __init__(self, regex, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
"""
kwargs.setdefault('strip', False)
super().__init__(**kwargs)
self._set_regex(regex) | [
"def",
"__init__",
"(",
"self",
",",
"regex",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'strip'",
",",
"False",
")",
"super",
"(",
")",
".",
"__init__",
"(",
"*",
"*",
"kwargs",
")",
"self",
".",
"_set_regex",
"(",
"regex",
")"
] | [
514,
4
] | [
520,
30
] | python | en | ['en', 'error', 'th'] | False |
ImageField.to_python | (self, data) |
Check that the file-upload field data contains a valid image (GIF, JPG,
PNG, etc. -- whatever Pillow supports).
|
Check that the file-upload field data contains a valid image (GIF, JPG,
PNG, etc. -- whatever Pillow supports).
| def to_python(self, data):
"""
Check that the file-upload field data contains a valid image (GIF, JPG,
PNG, etc. -- whatever Pillow supports).
"""
f = super().to_python(data)
if f is None:
return None
from PIL import Image
# We need to get a file object for Pillow. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = BytesIO(data.read())
else:
file = BytesIO(data['content'])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
image = Image.open(file)
# verify() must be called immediately after the constructor.
image.verify()
# Annotating so subclasses can reuse it for their own validation
f.image = image
# Pillow doesn't detect the MIME type of all formats. In those
# cases, content_type will be None.
f.content_type = Image.MIME.get(image.format)
except Exception as exc:
# Pillow doesn't recognize it as an image.
raise ValidationError(
self.error_messages['invalid_image'],
code='invalid_image',
) from exc
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f | [
"def",
"to_python",
"(",
"self",
",",
"data",
")",
":",
"f",
"=",
"super",
"(",
")",
".",
"to_python",
"(",
"data",
")",
"if",
"f",
"is",
"None",
":",
"return",
"None",
"from",
"PIL",
"import",
"Image",
"# We need to get a file object for Pillow. We might have a path or we might",
"# have to read the data into memory.",
"if",
"hasattr",
"(",
"data",
",",
"'temporary_file_path'",
")",
":",
"file",
"=",
"data",
".",
"temporary_file_path",
"(",
")",
"else",
":",
"if",
"hasattr",
"(",
"data",
",",
"'read'",
")",
":",
"file",
"=",
"BytesIO",
"(",
"data",
".",
"read",
"(",
")",
")",
"else",
":",
"file",
"=",
"BytesIO",
"(",
"data",
"[",
"'content'",
"]",
")",
"try",
":",
"# load() could spot a truncated JPEG, but it loads the entire",
"# image in memory, which is a DoS vector. See #3848 and #18520.",
"image",
"=",
"Image",
".",
"open",
"(",
"file",
")",
"# verify() must be called immediately after the constructor.",
"image",
".",
"verify",
"(",
")",
"# Annotating so subclasses can reuse it for their own validation",
"f",
".",
"image",
"=",
"image",
"# Pillow doesn't detect the MIME type of all formats. In those",
"# cases, content_type will be None.",
"f",
".",
"content_type",
"=",
"Image",
".",
"MIME",
".",
"get",
"(",
"image",
".",
"format",
")",
"except",
"Exception",
"as",
"exc",
":",
"# Pillow doesn't recognize it as an image.",
"raise",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'invalid_image'",
"]",
",",
"code",
"=",
"'invalid_image'",
",",
")",
"from",
"exc",
"if",
"hasattr",
"(",
"f",
",",
"'seek'",
")",
"and",
"callable",
"(",
"f",
".",
"seek",
")",
":",
"f",
".",
"seek",
"(",
"0",
")",
"return",
"f"
] | [
621,
4
] | [
662,
16
] | python | en | ['en', 'error', 'th'] | False |
BooleanField.to_python | (self, value) | Return a Python boolean object. | Return a Python boolean object. | def to_python(self, value):
"""Return a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if isinstance(value, str) and value.lower() in ('false', '0'):
value = False
else:
value = bool(value)
return super().to_python(value) | [
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"# Explicitly check for the string 'False', which is what a hidden field",
"# will submit for False. Also check for '0', since this is what",
"# RadioSelect will provide. Because bool(\"True\") == bool('1') == True,",
"# we don't need to handle that explicitly.",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
"and",
"value",
".",
"lower",
"(",
")",
"in",
"(",
"'false'",
",",
"'0'",
")",
":",
"value",
"=",
"False",
"else",
":",
"value",
"=",
"bool",
"(",
"value",
")",
"return",
"super",
"(",
")",
".",
"to_python",
"(",
"value",
")"
] | [
716,
4
] | [
726,
39
] | python | en | ['en', 'cy', 'en'] | True |
NullBooleanField.to_python | (self, value) |
Explicitly check for the string 'True' and 'False', which is what a
hidden field will submit for True and False, for 'true' and 'false',
which are likely to be returned by JavaScript serializations of forms,
and for '1' and '0', which is what a RadioField will submit. Unlike
the Booleanfield, this field must check for True because it doesn't
use the bool() function.
|
Explicitly check for the string 'True' and 'False', which is what a
hidden field will submit for True and False, for 'true' and 'false',
which are likely to be returned by JavaScript serializations of forms,
and for '1' and '0', which is what a RadioField will submit. Unlike
the Booleanfield, this field must check for True because it doesn't
use the bool() function.
| def to_python(self, value):
"""
Explicitly check for the string 'True' and 'False', which is what a
hidden field will submit for True and False, for 'true' and 'false',
which are likely to be returned by JavaScript serializations of forms,
and for '1' and '0', which is what a RadioField will submit. Unlike
the Booleanfield, this field must check for True because it doesn't
use the bool() function.
"""
if value in (True, 'True', 'true', '1'):
return True
elif value in (False, 'False', 'false', '0'):
return False
else:
return None | [
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"in",
"(",
"True",
",",
"'True'",
",",
"'true'",
",",
"'1'",
")",
":",
"return",
"True",
"elif",
"value",
"in",
"(",
"False",
",",
"'False'",
",",
"'false'",
",",
"'0'",
")",
":",
"return",
"False",
"else",
":",
"return",
"None"
] | [
747,
4
] | [
761,
23
] | python | en | ['en', 'error', 'th'] | False |
ChoiceField.to_python | (self, value) | Return a string. | Return a string. | def to_python(self, value):
"""Return a string."""
if value in self.empty_values:
return ''
return str(value) | [
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"in",
"self",
".",
"empty_values",
":",
"return",
"''",
"return",
"str",
"(",
"value",
")"
] | [
806,
4
] | [
810,
25
] | python | en | ['en', 'cy', 'en'] | True |
ChoiceField.validate | (self, value) | Validate that the input is in self.choices. | Validate that the input is in self.choices. | def validate(self, value):
"""Validate that the input is in self.choices."""
super().validate(value)
if value and not self.valid_value(value):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
) | [
"def",
"validate",
"(",
"self",
",",
"value",
")",
":",
"super",
"(",
")",
".",
"validate",
"(",
"value",
")",
"if",
"value",
"and",
"not",
"self",
".",
"valid_value",
"(",
"value",
")",
":",
"raise",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'invalid_choice'",
"]",
",",
"code",
"=",
"'invalid_choice'",
",",
"params",
"=",
"{",
"'value'",
":",
"value",
"}",
",",
")"
] | [
812,
4
] | [
820,
13
] | python | en | ['en', 'en', 'en'] | True |
ChoiceField.valid_value | (self, value) | Check to see if the provided value is a valid choice. | Check to see if the provided value is a valid choice. | def valid_value(self, value):
"""Check to see if the provided value is a valid choice."""
text_value = str(value)
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == k2 or text_value == str(k2):
return True
else:
if value == k or text_value == str(k):
return True
return False | [
"def",
"valid_value",
"(",
"self",
",",
"value",
")",
":",
"text_value",
"=",
"str",
"(",
"value",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"choices",
":",
"if",
"isinstance",
"(",
"v",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"# This is an optgroup, so look inside the group for options",
"for",
"k2",
",",
"v2",
"in",
"v",
":",
"if",
"value",
"==",
"k2",
"or",
"text_value",
"==",
"str",
"(",
"k2",
")",
":",
"return",
"True",
"else",
":",
"if",
"value",
"==",
"k",
"or",
"text_value",
"==",
"str",
"(",
"k",
")",
":",
"return",
"True",
"return",
"False"
] | [
822,
4
] | [
834,
20
] | python | en | ['en', 'en', 'en'] | True |
TypedChoiceField._coerce | (self, value) |
Validate that the value can be coerced to the right type (if not empty).
|
Validate that the value can be coerced to the right type (if not empty).
| def _coerce(self, value):
"""
Validate that the value can be coerced to the right type (if not empty).
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
return value | [
"def",
"_coerce",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"==",
"self",
".",
"empty_value",
"or",
"value",
"in",
"self",
".",
"empty_values",
":",
"return",
"self",
".",
"empty_value",
"try",
":",
"value",
"=",
"self",
".",
"coerce",
"(",
"value",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
",",
"ValidationError",
")",
":",
"raise",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'invalid_choice'",
"]",
",",
"code",
"=",
"'invalid_choice'",
",",
"params",
"=",
"{",
"'value'",
":",
"value",
"}",
",",
")",
"return",
"value"
] | [
843,
4
] | [
857,
20
] | python | en | ['en', 'error', 'th'] | False |
MultipleChoiceField.validate | (self, value) | Validate that the input is a list or tuple. | Validate that the input is a list or tuple. | def validate(self, value):
"""Validate that the input is a list or tuple."""
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
) | [
"def",
"validate",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"required",
"and",
"not",
"value",
":",
"raise",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'required'",
"]",
",",
"code",
"=",
"'required'",
")",
"# Validate that each value in the value list is in self.choices.",
"for",
"val",
"in",
"value",
":",
"if",
"not",
"self",
".",
"valid_value",
"(",
"val",
")",
":",
"raise",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'invalid_choice'",
"]",
",",
"code",
"=",
"'invalid_choice'",
",",
"params",
"=",
"{",
"'value'",
":",
"val",
"}",
",",
")"
] | [
879,
4
] | [
890,
17
] | python | en | ['en', 'en', 'en'] | True |
TypedMultipleChoiceField._coerce | (self, value) |
Validate that the values are in self.choices and can be coerced to the
right type.
|
Validate that the values are in self.choices and can be coerced to the
right type.
| def _coerce(self, value):
"""
Validate that the values are in self.choices and can be coerced to the
right type.
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': choice},
)
return new_value | [
"def",
"_coerce",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"==",
"self",
".",
"empty_value",
"or",
"value",
"in",
"self",
".",
"empty_values",
":",
"return",
"self",
".",
"empty_value",
"new_value",
"=",
"[",
"]",
"for",
"choice",
"in",
"value",
":",
"try",
":",
"new_value",
".",
"append",
"(",
"self",
".",
"coerce",
"(",
"choice",
")",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
",",
"ValidationError",
")",
":",
"raise",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'invalid_choice'",
"]",
",",
"code",
"=",
"'invalid_choice'",
",",
"params",
"=",
"{",
"'value'",
":",
"choice",
"}",
",",
")",
"return",
"new_value"
] | [
912,
4
] | [
929,
24
] | python | en | ['en', 'error', 'th'] | False |
ComboField.clean | (self, value) |
Validate the given value against all of self.fields, which is a
list of Field instances.
|
Validate the given value against all of self.fields, which is a
list of Field instances.
| def clean(self, value):
"""
Validate the given value against all of self.fields, which is a
list of Field instances.
"""
super().clean(value)
for field in self.fields:
value = field.clean(value)
return value | [
"def",
"clean",
"(",
"self",
",",
"value",
")",
":",
"super",
"(",
")",
".",
"clean",
"(",
"value",
")",
"for",
"field",
"in",
"self",
".",
"fields",
":",
"value",
"=",
"field",
".",
"clean",
"(",
"value",
")",
"return",
"value"
] | [
955,
4
] | [
963,
20
] | python | en | ['en', 'error', 'th'] | False |
MultiValueField.clean | (self, value) |
Validate every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
|
Validate every value in the given list. A value is validated against
the corresponding Field in self.fields. | def clean(self, value):
"""
Validate every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = []
if self.disabled and not isinstance(value, list):
value = self.widget.decompress(value)
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in self.empty_values]:
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'], code='invalid')
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if field_value in self.empty_values:
if self.require_all_fields:
# Raise a 'required' error if the MultiValueField is
# required and any field is empty.
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
elif field.required:
# Otherwise, add an 'incomplete' error to the list of
# collected errors and skip field cleaning, if a required
# field is empty.
if field.error_messages['incomplete'] not in errors:
errors.append(field.error_messages['incomplete'])
continue
try:
clean_data.append(field.clean(field_value))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter. Skip duplicates.
errors.extend(m for m in e.error_list if m not in errors)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out | [
"def",
"clean",
"(",
"self",
",",
"value",
")",
":",
"clean_data",
"=",
"[",
"]",
"errors",
"=",
"[",
"]",
"if",
"self",
".",
"disabled",
"and",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"value",
"=",
"self",
".",
"widget",
".",
"decompress",
"(",
"value",
")",
"if",
"not",
"value",
"or",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"if",
"not",
"value",
"or",
"not",
"[",
"v",
"for",
"v",
"in",
"value",
"if",
"v",
"not",
"in",
"self",
".",
"empty_values",
"]",
":",
"if",
"self",
".",
"required",
":",
"raise",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'required'",
"]",
",",
"code",
"=",
"'required'",
")",
"else",
":",
"return",
"self",
".",
"compress",
"(",
"[",
"]",
")",
"else",
":",
"raise",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'invalid'",
"]",
",",
"code",
"=",
"'invalid'",
")",
"for",
"i",
",",
"field",
"in",
"enumerate",
"(",
"self",
".",
"fields",
")",
":",
"try",
":",
"field_value",
"=",
"value",
"[",
"i",
"]",
"except",
"IndexError",
":",
"field_value",
"=",
"None",
"if",
"field_value",
"in",
"self",
".",
"empty_values",
":",
"if",
"self",
".",
"require_all_fields",
":",
"# Raise a 'required' error if the MultiValueField is",
"# required and any field is empty.",
"if",
"self",
".",
"required",
":",
"raise",
"ValidationError",
"(",
"self",
".",
"error_messages",
"[",
"'required'",
"]",
",",
"code",
"=",
"'required'",
")",
"elif",
"field",
".",
"required",
":",
"# Otherwise, add an 'incomplete' error to the list of",
"# collected errors and skip field cleaning, if a required",
"# field is empty.",
"if",
"field",
".",
"error_messages",
"[",
"'incomplete'",
"]",
"not",
"in",
"errors",
":",
"errors",
".",
"append",
"(",
"field",
".",
"error_messages",
"[",
"'incomplete'",
"]",
")",
"continue",
"try",
":",
"clean_data",
".",
"append",
"(",
"field",
".",
"clean",
"(",
"field_value",
")",
")",
"except",
"ValidationError",
"as",
"e",
":",
"# Collect all validation errors in a single list, which we'll",
"# raise at the end of clean(), rather than raising a single",
"# exception for the first error we encounter. Skip duplicates.",
"errors",
".",
"extend",
"(",
"m",
"for",
"m",
"in",
"e",
".",
"error_list",
"if",
"m",
"not",
"in",
"errors",
")",
"if",
"errors",
":",
"raise",
"ValidationError",
"(",
"errors",
")",
"out",
"=",
"self",
".",
"compress",
"(",
"clean_data",
")",
"self",
".",
"validate",
"(",
"out",
")",
"self",
".",
"run_validators",
"(",
"out",
")",
"return",
"out"
] | [
1011,
4
] | [
1063,
18
] | python | en | ['en', 'error', 'th'] | False |
MultiValueField.compress | (self, data_list) |
Return a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
|
Return a single value for the given list of values. The values can be
assumed to be valid. | def compress(self, data_list):
"""
Return a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.') | [
"def",
"compress",
"(",
"self",
",",
"data_list",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Subclasses must implement this method.'",
")"
] | [
1065,
4
] | [
1074,
75
] | python | en | ['en', 'error', 'th'] | False |
SVDPlusPlus.__init__ | (self, train_file=None, test_file=None, output_file=None, factors=10, learn_rate=0.01, epochs=10,
delta=0.015, init_mean=0.1, init_stdev=0.1, bias_learn_rate=0.005, delta_bias=0.002,
stop_criteria=0.009, sep='\t', output_sep='\t', random_seed=None, update_delta=False) |
SVD++ for rating prediction
The SVD++ algorithm, an extension of SVD taking into account implicit ratings. Just as for SVD, the parameters
are learned using a SGD on the regularized squared error objective.
Usage::
>> SVDPlusPlus(train, test).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param factors: Number of latent factors per user/item
:type factors: int, default 10
:param learn_rate: Learning rate (alpha)
:type learn_rate: float, default 0.05
:param epochs: Number of epochs over the training data
:type epochs: int, default 30
:param delta: Regularization value
:type delta: float, default 0.015
:param init_mean: Mean of the normal distribution used to initialize the latent factors
:type init_mean: float, default 0
:param init_stdev: Standard deviation of the normal distribution used to initialize the latent factors
:type init_stdev: float, default 0.1
:param bias_learn_rate: Learning rate for baselines
:type bias_learn_rate: float, default 0.005
:param delta_bias: Regularization value for baselines
:type delta_bias: float, default 0.002
:param stop_criteria: Difference between errors for stopping criteria
:type stop_criteria: float, default 0.009
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
:param random_seed: Number of seed. Lock random numbers for reproducibility of experiments.
:type random_seed: int, default None
|
SVD++ for rating prediction | def __init__(self, train_file=None, test_file=None, output_file=None, factors=10, learn_rate=0.01, epochs=10,
delta=0.015, init_mean=0.1, init_stdev=0.1, bias_learn_rate=0.005, delta_bias=0.002,
stop_criteria=0.009, sep='\t', output_sep='\t', random_seed=None, update_delta=False):
"""
SVD++ for rating prediction
The SVD++ algorithm, an extension of SVD taking into account implicit ratings. Just as for SVD, the parameters
are learned using a SGD on the regularized squared error objective.
Usage::
>> SVDPlusPlus(train, test).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param factors: Number of latent factors per user/item
:type factors: int, default 10
:param learn_rate: Learning rate (alpha)
:type learn_rate: float, default 0.05
:param epochs: Number of epochs over the training data
:type epochs: int, default 30
:param delta: Regularization value
:type delta: float, default 0.015
:param init_mean: Mean of the normal distribution used to initialize the latent factors
:type init_mean: float, default 0
:param init_stdev: Standard deviation of the normal distribution used to initialize the latent factors
:type init_stdev: float, default 0.1
:param bias_learn_rate: Learning rate for baselines
:type bias_learn_rate: float, default 0.005
:param delta_bias: Regularization value for baselines
:type delta_bias: float, default 0.002
:param stop_criteria: Difference between errors for stopping criteria
:type stop_criteria: float, default 0.009
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
:param random_seed: Number of seed. Lock random numbers for reproducibility of experiments.
:type random_seed: int, default None
"""
super(SVDPlusPlus, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file,
factors=factors, learn_rate=learn_rate, epochs=epochs, delta=delta,
init_mean=init_mean, init_stdev=init_stdev, baseline=True,
bias_learn_rate=bias_learn_rate, delta_bias=delta_bias,
stop_criteria=stop_criteria, sep=sep, output_sep=output_sep,
random_seed=random_seed)
self.recommender_name = 'SVDPlusPlus'
self.update_delta = update_delta
self.y = None
self.n_u = None
self.items_id_seen_by_user = None | [
"def",
"__init__",
"(",
"self",
",",
"train_file",
"=",
"None",
",",
"test_file",
"=",
"None",
",",
"output_file",
"=",
"None",
",",
"factors",
"=",
"10",
",",
"learn_rate",
"=",
"0.01",
",",
"epochs",
"=",
"10",
",",
"delta",
"=",
"0.015",
",",
"init_mean",
"=",
"0.1",
",",
"init_stdev",
"=",
"0.1",
",",
"bias_learn_rate",
"=",
"0.005",
",",
"delta_bias",
"=",
"0.002",
",",
"stop_criteria",
"=",
"0.009",
",",
"sep",
"=",
"'\\t'",
",",
"output_sep",
"=",
"'\\t'",
",",
"random_seed",
"=",
"None",
",",
"update_delta",
"=",
"False",
")",
":",
"super",
"(",
"SVDPlusPlus",
",",
"self",
")",
".",
"__init__",
"(",
"train_file",
"=",
"train_file",
",",
"test_file",
"=",
"test_file",
",",
"output_file",
"=",
"output_file",
",",
"factors",
"=",
"factors",
",",
"learn_rate",
"=",
"learn_rate",
",",
"epochs",
"=",
"epochs",
",",
"delta",
"=",
"delta",
",",
"init_mean",
"=",
"init_mean",
",",
"init_stdev",
"=",
"init_stdev",
",",
"baseline",
"=",
"True",
",",
"bias_learn_rate",
"=",
"bias_learn_rate",
",",
"delta_bias",
"=",
"delta_bias",
",",
"stop_criteria",
"=",
"stop_criteria",
",",
"sep",
"=",
"sep",
",",
"output_sep",
"=",
"output_sep",
",",
"random_seed",
"=",
"random_seed",
")",
"self",
".",
"recommender_name",
"=",
"'SVDPlusPlus'",
"self",
".",
"update_delta",
"=",
"update_delta",
"self",
".",
"y",
"=",
"None",
"self",
".",
"n_u",
"=",
"None",
"self",
".",
"items_id_seen_by_user",
"=",
"None"
] | [
23,
4
] | [
97,
41
] | python | en | ['en', 'error', 'th'] | False |
SVDPlusPlus.init_model | (self) |
Method to treat and initialize the model. . Extends init_model from MatrixFactorization
|
Method to treat and initialize the model. . Extends init_model from MatrixFactorization | def init_model(self):
"""
Method to treat and initialize the model. . Extends init_model from MatrixFactorization
"""
super(SVDPlusPlus, self).init_model()
self.n_u = {}
self.items_id_seen_by_user = {}
for user in self.train_set['users']:
for item in self.train_set['items_seen_by_user'][user]:
self.items_id_seen_by_user.setdefault(self.user_to_user_id[user], []).append(self.item_to_item_id[item])
# |N(u)|^(-1/2)
self.n_u[self.user_to_user_id[user]] = np.sqrt(len(self.train_set['items_seen_by_user'][user])) | [
"def",
"init_model",
"(",
"self",
")",
":",
"super",
"(",
"SVDPlusPlus",
",",
"self",
")",
".",
"init_model",
"(",
")",
"self",
".",
"n_u",
"=",
"{",
"}",
"self",
".",
"items_id_seen_by_user",
"=",
"{",
"}",
"for",
"user",
"in",
"self",
".",
"train_set",
"[",
"'users'",
"]",
":",
"for",
"item",
"in",
"self",
".",
"train_set",
"[",
"'items_seen_by_user'",
"]",
"[",
"user",
"]",
":",
"self",
".",
"items_id_seen_by_user",
".",
"setdefault",
"(",
"self",
".",
"user_to_user_id",
"[",
"user",
"]",
",",
"[",
"]",
")",
".",
"append",
"(",
"self",
".",
"item_to_item_id",
"[",
"item",
"]",
")",
"# |N(u)|^(-1/2)",
"self",
".",
"n_u",
"[",
"self",
".",
"user_to_user_id",
"[",
"user",
"]",
"]",
"=",
"np",
".",
"sqrt",
"(",
"len",
"(",
"self",
".",
"train_set",
"[",
"'items_seen_by_user'",
"]",
"[",
"user",
"]",
")",
")"
] | [
99,
4
] | [
114,
107
] | python | en | ['en', 'error', 'th'] | False |
SVDPlusPlus.fit | (self) |
This method performs iterations of stochastic gradient ascent over the training data.
|
This method performs iterations of stochastic gradient ascent over the training data. | def fit(self):
"""
This method performs iterations of stochastic gradient ascent over the training data.
"""
rmse_old = .0
for epoch in range(self.epochs):
error_final = .0
for user, item, feedback in self.feedback_triples:
pu = self.p[user] + self.y_sum_rows(user)
# Calculate error
eui = feedback - self._predict_svd_plus_plus_score(user, item, pu, False)
error_final += (eui ** 2.0)
# update bu and bi
self.bu[user] += self.bias_learn_rate * (eui - self.delta_bias * self.bu[user])
self.bi[item] += self.bias_learn_rate * (eui - self.delta_bias * self.bi[item])
# Adjust the factors
norm_eui = eui / self.n_u[user]
i_f = self.q[item]
# Compute factor updates
delta_u = np.subtract(np.multiply(eui, i_f), np.multiply(self.delta, self.p[user]))
self.p[user] += np.multiply(self.learn_rate, delta_u)
delta_i = np.subtract(np.multiply(eui, pu), np.multiply(self.delta, i_f))
self.q[item] += np.multiply(self.learn_rate, delta_i)
# update y (implicit factor)
common_update = norm_eui * i_f
for j in self.items_id_seen_by_user[user]:
delta_y = np.subtract(common_update, self.delta * self.y[j])
self.y[j] += self.learn_rate * delta_y
rmse_new = np.sqrt(error_final / self.train_set["number_interactions"])
if np.fabs(rmse_new - rmse_old) <= self.stop_criteria:
break
else:
rmse_old = rmse_new | [
"def",
"fit",
"(",
"self",
")",
":",
"rmse_old",
"=",
".0",
"for",
"epoch",
"in",
"range",
"(",
"self",
".",
"epochs",
")",
":",
"error_final",
"=",
".0",
"for",
"user",
",",
"item",
",",
"feedback",
"in",
"self",
".",
"feedback_triples",
":",
"pu",
"=",
"self",
".",
"p",
"[",
"user",
"]",
"+",
"self",
".",
"y_sum_rows",
"(",
"user",
")",
"# Calculate error",
"eui",
"=",
"feedback",
"-",
"self",
".",
"_predict_svd_plus_plus_score",
"(",
"user",
",",
"item",
",",
"pu",
",",
"False",
")",
"error_final",
"+=",
"(",
"eui",
"**",
"2.0",
")",
"# update bu and bi",
"self",
".",
"bu",
"[",
"user",
"]",
"+=",
"self",
".",
"bias_learn_rate",
"*",
"(",
"eui",
"-",
"self",
".",
"delta_bias",
"*",
"self",
".",
"bu",
"[",
"user",
"]",
")",
"self",
".",
"bi",
"[",
"item",
"]",
"+=",
"self",
".",
"bias_learn_rate",
"*",
"(",
"eui",
"-",
"self",
".",
"delta_bias",
"*",
"self",
".",
"bi",
"[",
"item",
"]",
")",
"# Adjust the factors",
"norm_eui",
"=",
"eui",
"/",
"self",
".",
"n_u",
"[",
"user",
"]",
"i_f",
"=",
"self",
".",
"q",
"[",
"item",
"]",
"# Compute factor updates",
"delta_u",
"=",
"np",
".",
"subtract",
"(",
"np",
".",
"multiply",
"(",
"eui",
",",
"i_f",
")",
",",
"np",
".",
"multiply",
"(",
"self",
".",
"delta",
",",
"self",
".",
"p",
"[",
"user",
"]",
")",
")",
"self",
".",
"p",
"[",
"user",
"]",
"+=",
"np",
".",
"multiply",
"(",
"self",
".",
"learn_rate",
",",
"delta_u",
")",
"delta_i",
"=",
"np",
".",
"subtract",
"(",
"np",
".",
"multiply",
"(",
"eui",
",",
"pu",
")",
",",
"np",
".",
"multiply",
"(",
"self",
".",
"delta",
",",
"i_f",
")",
")",
"self",
".",
"q",
"[",
"item",
"]",
"+=",
"np",
".",
"multiply",
"(",
"self",
".",
"learn_rate",
",",
"delta_i",
")",
"# update y (implicit factor)",
"common_update",
"=",
"norm_eui",
"*",
"i_f",
"for",
"j",
"in",
"self",
".",
"items_id_seen_by_user",
"[",
"user",
"]",
":",
"delta_y",
"=",
"np",
".",
"subtract",
"(",
"common_update",
",",
"self",
".",
"delta",
"*",
"self",
".",
"y",
"[",
"j",
"]",
")",
"self",
".",
"y",
"[",
"j",
"]",
"+=",
"self",
".",
"learn_rate",
"*",
"delta_y",
"rmse_new",
"=",
"np",
".",
"sqrt",
"(",
"error_final",
"/",
"self",
".",
"train_set",
"[",
"\"number_interactions\"",
"]",
")",
"if",
"np",
".",
"fabs",
"(",
"rmse_new",
"-",
"rmse_old",
")",
"<=",
"self",
".",
"stop_criteria",
":",
"break",
"else",
":",
"rmse_old",
"=",
"rmse_new"
] | [
116,
4
] | [
161,
35
] | python | en | ['en', 'error', 'th'] | False |
SVDPlusPlus.create_factors | (self) |
This method extends create_factors from Matrix Factorization, adding y factors
|
This method extends create_factors from Matrix Factorization, adding y factors | def create_factors(self):
"""
This method extends create_factors from Matrix Factorization, adding y factors
"""
super(SVDPlusPlus, self).create_factors()
self.y = np.random.normal(self.init_mean, self.init_stdev, (len(self.items), self.factors)) | [
"def",
"create_factors",
"(",
"self",
")",
":",
"super",
"(",
"SVDPlusPlus",
",",
"self",
")",
".",
"create_factors",
"(",
")",
"self",
".",
"y",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"self",
".",
"init_mean",
",",
"self",
".",
"init_stdev",
",",
"(",
"len",
"(",
"self",
".",
"items",
")",
",",
"self",
".",
"factors",
")",
")"
] | [
163,
4
] | [
170,
99
] | python | en | ['en', 'error', 'th'] | False |
SVDPlusPlus._predict_svd_plus_plus_score | (self, u, i, pu, cond=True) |
:param u: User ID (from self.items)
:type u: int
:param i: Item ID (from self.items)
:type i: int
:param pu: User updated vector (pu * y)
:type pu: list or np.array
:param cond: Use max and min values of train set to limit score
:type cond: bool, default True
:return: prediction for user u and item i
:rtype: float
| def _predict_svd_plus_plus_score(self, u, i, pu, cond=True):
"""
:param u: User ID (from self.items)
:type u: int
:param i: Item ID (from self.items)
:type i: int
:param pu: User updated vector (pu * y)
:type pu: list or np.array
:param cond: Use max and min values of train set to limit score
:type cond: bool, default True
:return: prediction for user u and item i
:rtype: float
"""
rui = self.train_set["mean_value"] + self.bu[u] + self.bi[i] + np.dot(pu, self.q[i])
if cond:
if rui > self.train_set["max_value"]:
rui = self.train_set["max_value"]
elif rui < self.train_set["min_value"]:
rui = self.train_set["min_value"]
return rui | [
"def",
"_predict_svd_plus_plus_score",
"(",
"self",
",",
"u",
",",
"i",
",",
"pu",
",",
"cond",
"=",
"True",
")",
":",
"rui",
"=",
"self",
".",
"train_set",
"[",
"\"mean_value\"",
"]",
"+",
"self",
".",
"bu",
"[",
"u",
"]",
"+",
"self",
".",
"bi",
"[",
"i",
"]",
"+",
"np",
".",
"dot",
"(",
"pu",
",",
"self",
".",
"q",
"[",
"i",
"]",
")",
"if",
"cond",
":",
"if",
"rui",
">",
"self",
".",
"train_set",
"[",
"\"max_value\"",
"]",
":",
"rui",
"=",
"self",
".",
"train_set",
"[",
"\"max_value\"",
"]",
"elif",
"rui",
"<",
"self",
".",
"train_set",
"[",
"\"min_value\"",
"]",
":",
"rui",
"=",
"self",
".",
"train_set",
"[",
"\"min_value\"",
"]",
"return",
"rui"
] | [
172,
4
] | [
198,
18
] | python | en | ['en', 'error', 'th'] | False |
|
SVDPlusPlus.y_sum_rows | (self, user) |
Incorporating implicit feedback in the SVD: Sum (j E N(u)) Yj
:param user: User ID
:type user: int
:return: Sum of y vectors for seen items of user
|
Incorporating implicit feedback in the SVD: Sum (j E N(u)) Yj | def y_sum_rows(self, user):
"""
Incorporating implicit feedback in the SVD: Sum (j E N(u)) Yj
:param user: User ID
:type user: int
:return: Sum of y vectors for seen items of user
"""
sum_imp = np.zeros(self.factors)
for ui in self.items_id_seen_by_user[user]:
sum_imp += self.y[ui]
return sum_imp / self.n_u[user] | [
"def",
"y_sum_rows",
"(",
"self",
",",
"user",
")",
":",
"sum_imp",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"factors",
")",
"for",
"ui",
"in",
"self",
".",
"items_id_seen_by_user",
"[",
"user",
"]",
":",
"sum_imp",
"+=",
"self",
".",
"y",
"[",
"ui",
"]",
"return",
"sum_imp",
"/",
"self",
".",
"n_u",
"[",
"user",
"]"
] | [
200,
4
] | [
214,
39
] | python | en | ['en', 'error', 'th'] | False |
SVDPlusPlus.predict | (self) |
This method computes a final rating for unknown pairs (user, item)
|
This method computes a final rating for unknown pairs (user, item) | def predict(self):
"""
This method computes a final rating for unknown pairs (user, item)
"""
if self.test_file is not None:
for user in self.test_set['users']:
pu = self.p[self.user_to_user_id[user]] + self.y_sum_rows(self.user_to_user_id[user])
for item in self.test_set['feedback'][user]:
self.predictions.append(
(user, item, self._predict_svd_plus_plus_score(self.user_to_user_id[user],
self.item_to_item_id[item], pu, True)))
else:
raise NotImplemented | [
"def",
"predict",
"(",
"self",
")",
":",
"if",
"self",
".",
"test_file",
"is",
"not",
"None",
":",
"for",
"user",
"in",
"self",
".",
"test_set",
"[",
"'users'",
"]",
":",
"pu",
"=",
"self",
".",
"p",
"[",
"self",
".",
"user_to_user_id",
"[",
"user",
"]",
"]",
"+",
"self",
".",
"y_sum_rows",
"(",
"self",
".",
"user_to_user_id",
"[",
"user",
"]",
")",
"for",
"item",
"in",
"self",
".",
"test_set",
"[",
"'feedback'",
"]",
"[",
"user",
"]",
":",
"self",
".",
"predictions",
".",
"append",
"(",
"(",
"user",
",",
"item",
",",
"self",
".",
"_predict_svd_plus_plus_score",
"(",
"self",
".",
"user_to_user_id",
"[",
"user",
"]",
",",
"self",
".",
"item_to_item_id",
"[",
"item",
"]",
",",
"pu",
",",
"True",
")",
")",
")",
"else",
":",
"raise",
"NotImplemented"
] | [
216,
4
] | [
231,
32
] | python | en | ['en', 'error', 'th'] | False |
reset_format_cache | () | Clear any cached formats.
This method is provided primarily for testing purposes,
so that the effects of cached formats can be removed.
| Clear any cached formats. | def reset_format_cache():
"""Clear any cached formats.
This method is provided primarily for testing purposes,
so that the effects of cached formats can be removed.
"""
global _format_cache, _format_modules_cache
_format_cache = {}
_format_modules_cache = {} | [
"def",
"reset_format_cache",
"(",
")",
":",
"global",
"_format_cache",
",",
"_format_modules_cache",
"_format_cache",
"=",
"{",
"}",
"_format_modules_cache",
"=",
"{",
"}"
] | [
48,
0
] | [
56,
30
] | python | en | ['en', 'en', 'en'] | True |
iter_format_modules | (lang, format_module_path=None) | Find format modules. | Find format modules. | def iter_format_modules(lang, format_module_path=None):
"""Find format modules."""
if not check_for_language(lang):
return
if format_module_path is None:
format_module_path = settings.FORMAT_MODULE_PATH
format_locations = []
if format_module_path:
if isinstance(format_module_path, str):
format_module_path = [format_module_path]
for path in format_module_path:
format_locations.append(path + '.%s')
format_locations.append('django.conf.locale.%s')
locale = to_locale(lang)
locales = [locale]
if '_' in locale:
locales.append(locale.split('_')[0])
for location in format_locations:
for loc in locales:
try:
yield import_module('%s.formats' % (location % loc))
except ImportError:
pass | [
"def",
"iter_format_modules",
"(",
"lang",
",",
"format_module_path",
"=",
"None",
")",
":",
"if",
"not",
"check_for_language",
"(",
"lang",
")",
":",
"return",
"if",
"format_module_path",
"is",
"None",
":",
"format_module_path",
"=",
"settings",
".",
"FORMAT_MODULE_PATH",
"format_locations",
"=",
"[",
"]",
"if",
"format_module_path",
":",
"if",
"isinstance",
"(",
"format_module_path",
",",
"str",
")",
":",
"format_module_path",
"=",
"[",
"format_module_path",
"]",
"for",
"path",
"in",
"format_module_path",
":",
"format_locations",
".",
"append",
"(",
"path",
"+",
"'.%s'",
")",
"format_locations",
".",
"append",
"(",
"'django.conf.locale.%s'",
")",
"locale",
"=",
"to_locale",
"(",
"lang",
")",
"locales",
"=",
"[",
"locale",
"]",
"if",
"'_'",
"in",
"locale",
":",
"locales",
".",
"append",
"(",
"locale",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
")",
"for",
"location",
"in",
"format_locations",
":",
"for",
"loc",
"in",
"locales",
":",
"try",
":",
"yield",
"import_module",
"(",
"'%s.formats'",
"%",
"(",
"location",
"%",
"loc",
")",
")",
"except",
"ImportError",
":",
"pass"
] | [
59,
0
] | [
83,
20
] | python | en | ['en', 'co', 'en'] | True |
get_format_modules | (lang=None, reverse=False) | Return a list of the format modules found. | Return a list of the format modules found. | def get_format_modules(lang=None, reverse=False):
"""Return a list of the format modules found."""
if lang is None:
lang = get_language()
if lang not in _format_modules_cache:
_format_modules_cache[lang] = list(iter_format_modules(lang, settings.FORMAT_MODULE_PATH))
modules = _format_modules_cache[lang]
if reverse:
return list(reversed(modules))
return modules | [
"def",
"get_format_modules",
"(",
"lang",
"=",
"None",
",",
"reverse",
"=",
"False",
")",
":",
"if",
"lang",
"is",
"None",
":",
"lang",
"=",
"get_language",
"(",
")",
"if",
"lang",
"not",
"in",
"_format_modules_cache",
":",
"_format_modules_cache",
"[",
"lang",
"]",
"=",
"list",
"(",
"iter_format_modules",
"(",
"lang",
",",
"settings",
".",
"FORMAT_MODULE_PATH",
")",
")",
"modules",
"=",
"_format_modules_cache",
"[",
"lang",
"]",
"if",
"reverse",
":",
"return",
"list",
"(",
"reversed",
"(",
"modules",
")",
")",
"return",
"modules"
] | [
86,
0
] | [
95,
18
] | python | en | ['en', 'en', 'en'] | True |
get_format | (format_type, lang=None, use_l10n=None) |
For a specific format type, return the format for the current
language (locale). Default to the format in the settings.
format_type is the name of the format, e.g. 'DATE_FORMAT'.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
|
For a specific format type, return the format for the current
language (locale). Default to the format in the settings.
format_type is the name of the format, e.g. 'DATE_FORMAT'. | def get_format(format_type, lang=None, use_l10n=None):
"""
For a specific format type, return the format for the current
language (locale). Default to the format in the settings.
format_type is the name of the format, e.g. 'DATE_FORMAT'.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
use_l10n = use_l10n or (use_l10n is None and settings.USE_L10N)
if use_l10n and lang is None:
lang = get_language()
cache_key = (format_type, lang)
try:
return _format_cache[cache_key]
except KeyError:
pass
# The requested format_type has not been cached yet. Try to find it in any
# of the format_modules for the given lang if l10n is enabled. If it's not
# there or if l10n is disabled, fall back to the project settings.
val = None
if use_l10n:
for module in get_format_modules(lang):
val = getattr(module, format_type, None)
if val is not None:
break
if val is None:
if format_type not in FORMAT_SETTINGS:
return format_type
val = getattr(settings, format_type)
elif format_type in ISO_INPUT_FORMATS:
# If a list of input formats from one of the format_modules was
# retrieved, make sure the ISO_INPUT_FORMATS are in this list.
val = list(val)
for iso_input in ISO_INPUT_FORMATS.get(format_type, ()):
if iso_input not in val:
val.append(iso_input)
_format_cache[cache_key] = val
return val | [
"def",
"get_format",
"(",
"format_type",
",",
"lang",
"=",
"None",
",",
"use_l10n",
"=",
"None",
")",
":",
"use_l10n",
"=",
"use_l10n",
"or",
"(",
"use_l10n",
"is",
"None",
"and",
"settings",
".",
"USE_L10N",
")",
"if",
"use_l10n",
"and",
"lang",
"is",
"None",
":",
"lang",
"=",
"get_language",
"(",
")",
"cache_key",
"=",
"(",
"format_type",
",",
"lang",
")",
"try",
":",
"return",
"_format_cache",
"[",
"cache_key",
"]",
"except",
"KeyError",
":",
"pass",
"# The requested format_type has not been cached yet. Try to find it in any",
"# of the format_modules for the given lang if l10n is enabled. If it's not",
"# there or if l10n is disabled, fall back to the project settings.",
"val",
"=",
"None",
"if",
"use_l10n",
":",
"for",
"module",
"in",
"get_format_modules",
"(",
"lang",
")",
":",
"val",
"=",
"getattr",
"(",
"module",
",",
"format_type",
",",
"None",
")",
"if",
"val",
"is",
"not",
"None",
":",
"break",
"if",
"val",
"is",
"None",
":",
"if",
"format_type",
"not",
"in",
"FORMAT_SETTINGS",
":",
"return",
"format_type",
"val",
"=",
"getattr",
"(",
"settings",
",",
"format_type",
")",
"elif",
"format_type",
"in",
"ISO_INPUT_FORMATS",
":",
"# If a list of input formats from one of the format_modules was",
"# retrieved, make sure the ISO_INPUT_FORMATS are in this list.",
"val",
"=",
"list",
"(",
"val",
")",
"for",
"iso_input",
"in",
"ISO_INPUT_FORMATS",
".",
"get",
"(",
"format_type",
",",
"(",
")",
")",
":",
"if",
"iso_input",
"not",
"in",
"val",
":",
"val",
".",
"append",
"(",
"iso_input",
")",
"_format_cache",
"[",
"cache_key",
"]",
"=",
"val",
"return",
"val"
] | [
98,
0
] | [
137,
14
] | python | en | ['en', 'error', 'th'] | False |
date_format | (value, format=None, use_l10n=None) |
Format a datetime.date or datetime.datetime object using a
localizable format.
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
|
Format a datetime.date or datetime.datetime object using a
localizable format. | def date_format(value, format=None, use_l10n=None):
"""
Format a datetime.date or datetime.datetime object using a
localizable format.
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n)) | [
"def",
"date_format",
"(",
"value",
",",
"format",
"=",
"None",
",",
"use_l10n",
"=",
"None",
")",
":",
"return",
"dateformat",
".",
"format",
"(",
"value",
",",
"get_format",
"(",
"format",
"or",
"'DATE_FORMAT'",
",",
"use_l10n",
"=",
"use_l10n",
")",
")"
] | [
143,
0
] | [
151,
91
] | python | en | ['en', 'error', 'th'] | False |
time_format | (value, format=None, use_l10n=None) |
Format a datetime.time object using a localizable format.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
|
Format a datetime.time object using a localizable format. | def time_format(value, format=None, use_l10n=None):
"""
Format a datetime.time object using a localizable format.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n)) | [
"def",
"time_format",
"(",
"value",
",",
"format",
"=",
"None",
",",
"use_l10n",
"=",
"None",
")",
":",
"return",
"dateformat",
".",
"time_format",
"(",
"value",
",",
"get_format",
"(",
"format",
"or",
"'TIME_FORMAT'",
",",
"use_l10n",
"=",
"use_l10n",
")",
")"
] | [
154,
0
] | [
161,
96
] | python | en | ['en', 'error', 'th'] | False |
number_format | (value, decimal_pos=None, use_l10n=None, force_grouping=False) |
Format a numeric value using localization settings.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
|
Format a numeric value using localization settings. | def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False):
"""
Format a numeric value using localization settings.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if use_l10n or (use_l10n is None and settings.USE_L10N):
lang = get_language()
else:
lang = None
return numberformat.format(
value,
get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n),
decimal_pos,
get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n),
get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n),
force_grouping=force_grouping,
use_l10n=use_l10n,
) | [
"def",
"number_format",
"(",
"value",
",",
"decimal_pos",
"=",
"None",
",",
"use_l10n",
"=",
"None",
",",
"force_grouping",
"=",
"False",
")",
":",
"if",
"use_l10n",
"or",
"(",
"use_l10n",
"is",
"None",
"and",
"settings",
".",
"USE_L10N",
")",
":",
"lang",
"=",
"get_language",
"(",
")",
"else",
":",
"lang",
"=",
"None",
"return",
"numberformat",
".",
"format",
"(",
"value",
",",
"get_format",
"(",
"'DECIMAL_SEPARATOR'",
",",
"lang",
",",
"use_l10n",
"=",
"use_l10n",
")",
",",
"decimal_pos",
",",
"get_format",
"(",
"'NUMBER_GROUPING'",
",",
"lang",
",",
"use_l10n",
"=",
"use_l10n",
")",
",",
"get_format",
"(",
"'THOUSAND_SEPARATOR'",
",",
"lang",
",",
"use_l10n",
"=",
"use_l10n",
")",
",",
"force_grouping",
"=",
"force_grouping",
",",
"use_l10n",
"=",
"use_l10n",
",",
")"
] | [
164,
0
] | [
183,
5
] | python | en | ['en', 'error', 'th'] | False |
localize | (value, use_l10n=None) |
Check if value is a localizable type (date, number...) and return it
formatted as a string using current locale format.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
|
Check if value is a localizable type (date, number...) and return it
formatted as a string using current locale format. | def localize(value, use_l10n=None):
"""
Check if value is a localizable type (date, number...) and return it
formatted as a string using current locale format.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if isinstance(value, str): # Handle strings first for performance reasons.
return value
elif isinstance(value, bool): # Make sure booleans don't get treated as numbers
return str(value)
elif isinstance(value, (decimal.Decimal, float, int)):
if use_l10n is False:
return str(value)
return number_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.datetime):
return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n)
elif isinstance(value, datetime.date):
return date_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.time):
return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n)
return value | [
"def",
"localize",
"(",
"value",
",",
"use_l10n",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"# Handle strings first for performance reasons.",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"# Make sure booleans don't get treated as numbers",
"return",
"str",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"(",
"decimal",
".",
"Decimal",
",",
"float",
",",
"int",
")",
")",
":",
"if",
"use_l10n",
"is",
"False",
":",
"return",
"str",
"(",
"value",
")",
"return",
"number_format",
"(",
"value",
",",
"use_l10n",
"=",
"use_l10n",
")",
"elif",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"datetime",
")",
":",
"return",
"date_format",
"(",
"value",
",",
"'DATETIME_FORMAT'",
",",
"use_l10n",
"=",
"use_l10n",
")",
"elif",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"date",
")",
":",
"return",
"date_format",
"(",
"value",
",",
"use_l10n",
"=",
"use_l10n",
")",
"elif",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"time",
")",
":",
"return",
"time_format",
"(",
"value",
",",
"'TIME_FORMAT'",
",",
"use_l10n",
"=",
"use_l10n",
")",
"return",
"value"
] | [
186,
0
] | [
208,
16
] | python | en | ['en', 'error', 'th'] | False |
localize_input | (value, default=None) |
Check if an input value is a localizable type and return it
formatted with the appropriate formatting string of the current locale.
|
Check if an input value is a localizable type and return it
formatted with the appropriate formatting string of the current locale.
| def localize_input(value, default=None):
"""
Check if an input value is a localizable type and return it
formatted with the appropriate formatting string of the current locale.
"""
if isinstance(value, str): # Handle strings first for performance reasons.
return value
elif isinstance(value, bool): # Don't treat booleans as numbers.
return str(value)
elif isinstance(value, (decimal.Decimal, float, int)):
return number_format(value)
elif isinstance(value, datetime.datetime):
value = datetime_safe.new_datetime(value)
format = default or get_format('DATETIME_INPUT_FORMATS')[0]
return value.strftime(format)
elif isinstance(value, datetime.date):
value = datetime_safe.new_date(value)
format = default or get_format('DATE_INPUT_FORMATS')[0]
return value.strftime(format)
elif isinstance(value, datetime.time):
format = default or get_format('TIME_INPUT_FORMATS')[0]
return value.strftime(format)
return value | [
"def",
"localize_input",
"(",
"value",
",",
"default",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"# Handle strings first for performance reasons.",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"# Don't treat booleans as numbers.",
"return",
"str",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"(",
"decimal",
".",
"Decimal",
",",
"float",
",",
"int",
")",
")",
":",
"return",
"number_format",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"datetime",
")",
":",
"value",
"=",
"datetime_safe",
".",
"new_datetime",
"(",
"value",
")",
"format",
"=",
"default",
"or",
"get_format",
"(",
"'DATETIME_INPUT_FORMATS'",
")",
"[",
"0",
"]",
"return",
"value",
".",
"strftime",
"(",
"format",
")",
"elif",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"date",
")",
":",
"value",
"=",
"datetime_safe",
".",
"new_date",
"(",
"value",
")",
"format",
"=",
"default",
"or",
"get_format",
"(",
"'DATE_INPUT_FORMATS'",
")",
"[",
"0",
"]",
"return",
"value",
".",
"strftime",
"(",
"format",
")",
"elif",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"time",
")",
":",
"format",
"=",
"default",
"or",
"get_format",
"(",
"'TIME_INPUT_FORMATS'",
")",
"[",
"0",
"]",
"return",
"value",
".",
"strftime",
"(",
"format",
")",
"return",
"value"
] | [
211,
0
] | [
233,
16
] | python | en | ['en', 'error', 'th'] | False |
sanitize_separators | (value) |
Sanitize a value according to the current decimal and
thousand separator setting. Used with form field input.
|
Sanitize a value according to the current decimal and
thousand separator setting. Used with form field input.
| def sanitize_separators(value):
"""
Sanitize a value according to the current decimal and
thousand separator setting. Used with form field input.
"""
if isinstance(value, str):
parts = []
decimal_separator = get_format('DECIMAL_SEPARATOR')
if decimal_separator in value:
value, decimals = value.split(decimal_separator, 1)
parts.append(decimals)
if settings.USE_THOUSAND_SEPARATOR:
thousand_sep = get_format('THOUSAND_SEPARATOR')
if thousand_sep == '.' and value.count('.') == 1 and len(value.split('.')[-1]) != 3:
# Special case where we suspect a dot meant decimal separator (see #22171)
pass
else:
for replacement in {
thousand_sep, unicodedata.normalize('NFKD', thousand_sep)}:
value = value.replace(replacement, '')
parts.append(value)
value = '.'.join(reversed(parts))
return value | [
"def",
"sanitize_separators",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"parts",
"=",
"[",
"]",
"decimal_separator",
"=",
"get_format",
"(",
"'DECIMAL_SEPARATOR'",
")",
"if",
"decimal_separator",
"in",
"value",
":",
"value",
",",
"decimals",
"=",
"value",
".",
"split",
"(",
"decimal_separator",
",",
"1",
")",
"parts",
".",
"append",
"(",
"decimals",
")",
"if",
"settings",
".",
"USE_THOUSAND_SEPARATOR",
":",
"thousand_sep",
"=",
"get_format",
"(",
"'THOUSAND_SEPARATOR'",
")",
"if",
"thousand_sep",
"==",
"'.'",
"and",
"value",
".",
"count",
"(",
"'.'",
")",
"==",
"1",
"and",
"len",
"(",
"value",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
")",
"!=",
"3",
":",
"# Special case where we suspect a dot meant decimal separator (see #22171)",
"pass",
"else",
":",
"for",
"replacement",
"in",
"{",
"thousand_sep",
",",
"unicodedata",
".",
"normalize",
"(",
"'NFKD'",
",",
"thousand_sep",
")",
"}",
":",
"value",
"=",
"value",
".",
"replace",
"(",
"replacement",
",",
"''",
")",
"parts",
".",
"append",
"(",
"value",
")",
"value",
"=",
"'.'",
".",
"join",
"(",
"reversed",
"(",
"parts",
")",
")",
"return",
"value"
] | [
236,
0
] | [
258,
16
] | python | en | ['en', 'error', 'th'] | False |
jsonp_loader | (url, prefix_regex=r'^(.*\()', suffix_regex=r'(\);)$', sub_d=None, sub_by='') | jsonp_loader is to request (JSON) data from a server in a different domain (JSONP)
and covert to python readable data.
1. url is the url (https) where data is located
2. "prefix_regex" and "suffix_regex" are regex patterns used to
remove JSONP specific prefix and suffix, such as callback header: "callback(" and end: ");",
3. "sub_d" is regex patterns for any unwanted string in loaded json data (will be replaced by sub_by).
4. "sub_by" is the string to replace any unwanted string defined by sub_d
For function coverstion, such as Data.UTC to datetime.datetime, please check JSONPDecoder
| jsonp_loader is to request (JSON) data from a server in a different domain (JSONP)
and covert to python readable data.
1. url is the url (https) where data is located
2. "prefix_regex" and "suffix_regex" are regex patterns used to
remove JSONP specific prefix and suffix, such as callback header: "callback(" and end: ");",
3. "sub_d" is regex patterns for any unwanted string in loaded json data (will be replaced by sub_by).
4. "sub_by" is the string to replace any unwanted string defined by sub_d
For function coverstion, such as Data.UTC to datetime.datetime, please check JSONPDecoder
| def jsonp_loader(url, prefix_regex=r'^(.*\()', suffix_regex=r'(\);)$', sub_d=None, sub_by=''):
"""jsonp_loader is to request (JSON) data from a server in a different domain (JSONP)
and covert to python readable data.
1. url is the url (https) where data is located
2. "prefix_regex" and "suffix_regex" are regex patterns used to
remove JSONP specific prefix and suffix, such as callback header: "callback(" and end: ");",
3. "sub_d" is regex patterns for any unwanted string in loaded json data (will be replaced by sub_by).
4. "sub_by" is the string to replace any unwanted string defined by sub_d
For function coverstion, such as Data.UTC to datetime.datetime, please check JSONPDecoder
"""
hdr = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.77 Safari/535.7'}
req = urllib.request.Request(url, headers=hdr)
page = urlopen(req)
result = page.read().decode("utf-8")
# replace all the redundant info with sub_by
if sub_d:
result = re.sub(sub_d, sub_by, result)
prefix = re.search(prefix_regex, result).group()
suffix = re.search(suffix_regex, result).group()
if result.startswith(prefix) and result.endswith(suffix):
result = result[len(prefix):-len(suffix)]
return json.loads(result, encoding='utf8', cls=JSONPDecoder) | [
"def",
"jsonp_loader",
"(",
"url",
",",
"prefix_regex",
"=",
"r'^(.*\\()'",
",",
"suffix_regex",
"=",
"r'(\\);)$'",
",",
"sub_d",
"=",
"None",
",",
"sub_by",
"=",
"''",
")",
":",
"hdr",
"=",
"{",
"'User-Agent'",
":",
"'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.77 Safari/535.7'",
"}",
"req",
"=",
"urllib",
".",
"request",
".",
"Request",
"(",
"url",
",",
"headers",
"=",
"hdr",
")",
"page",
"=",
"urlopen",
"(",
"req",
")",
"result",
"=",
"page",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
"# replace all the redundant info with sub_by ",
"if",
"sub_d",
":",
"result",
"=",
"re",
".",
"sub",
"(",
"sub_d",
",",
"sub_by",
",",
"result",
")",
"prefix",
"=",
"re",
".",
"search",
"(",
"prefix_regex",
",",
"result",
")",
".",
"group",
"(",
")",
"suffix",
"=",
"re",
".",
"search",
"(",
"suffix_regex",
",",
"result",
")",
".",
"group",
"(",
")",
"if",
"result",
".",
"startswith",
"(",
"prefix",
")",
"and",
"result",
".",
"endswith",
"(",
"suffix",
")",
":",
"result",
"=",
"result",
"[",
"len",
"(",
"prefix",
")",
":",
"-",
"len",
"(",
"suffix",
")",
"]",
"return",
"json",
".",
"loads",
"(",
"result",
",",
"encoding",
"=",
"'utf8'",
",",
"cls",
"=",
"JSONPDecoder",
")"
] | [
13,
0
] | [
36,
64
] | python | en | ['en', 'en', 'en'] | True |
JSONPDecoder.decode | (self, json_string) |
json_string is basicly string that you give to json.loads method
|
json_string is basicly string that you give to json.loads method
| def decode(self, json_string):
"""
json_string is basicly string that you give to json.loads method
"""
default_obj = super(JSONPDecoder, self).decode(json_string)
return list(self._iterdecode(default_obj))[0] | [
"def",
"decode",
"(",
"self",
",",
"json_string",
")",
":",
"default_obj",
"=",
"super",
"(",
"JSONPDecoder",
",",
"self",
")",
".",
"decode",
"(",
"json_string",
")",
"return",
"list",
"(",
"self",
".",
"_iterdecode",
"(",
"default_obj",
")",
")",
"[",
"0",
"]"
] | [
58,
4
] | [
65,
53
] | python | en | ['en', 'error', 'th'] | False |
JSONPDecoder.is_js_date_utc | (json) | Check if the string contains Date.UTC function
and return match group(s) if there is
| Check if the string contains Date.UTC function
and return match group(s) if there is
| def is_js_date_utc(json):
"""Check if the string contains Date.UTC function
and return match group(s) if there is
"""
JS_date_utc_pattern = r'Date\.UTC\(([0-9]+,[0-9]+,[0-9]+)(,[0-9]+,[0-9]+,[0-9]+)?(,[0-9]+)?\)'
re_date = re.compile(JS_date_utc_pattern, re.M)
if re_date.search(json):
return re_date.search(json).group(0)
else:
return False | [
"def",
"is_js_date_utc",
"(",
"json",
")",
":",
"JS_date_utc_pattern",
"=",
"r'Date\\.UTC\\(([0-9]+,[0-9]+,[0-9]+)(,[0-9]+,[0-9]+,[0-9]+)?(,[0-9]+)?\\)'",
"re_date",
"=",
"re",
".",
"compile",
"(",
"JS_date_utc_pattern",
",",
"re",
".",
"M",
")",
"if",
"re_date",
".",
"search",
"(",
"json",
")",
":",
"return",
"re_date",
".",
"search",
"(",
"json",
")",
".",
"group",
"(",
"0",
")",
"else",
":",
"return",
"False"
] | [
98,
4
] | [
109,
24
] | python | en | ['en', 'en', 'en'] | True |
JSONPDecoder.json2datetime | (json) | Convert JSON representation to date or datetime object depending on
the argument count. Requires UTC datetime representation.
Raises ValueError if the string cannot be parsed.
| Convert JSON representation to date or datetime object depending on
the argument count. Requires UTC datetime representation.
Raises ValueError if the string cannot be parsed.
| def json2datetime(json):
"""Convert JSON representation to date or datetime object depending on
the argument count. Requires UTC datetime representation.
Raises ValueError if the string cannot be parsed.
"""
json_m = re.search(r'([0-9]+,[0-9]+,[0-9]+)(,[0-9]+,[0-9]+,[0-9]+)?(,[0-9]+)?', json)
args=json_m.group(0).split(',')
try:
args = list(map(int, args))
except ValueError:
raise ValueError('Invalid arguments: %s'%json)
if len(args)==3:
return datetime.datetime(args[0], args[1]+1, args[2])
elif len(args)==6:
return datetime.datetime(args[0], args[1]+1, args[2],
args[3], args[4], args[5], tzinfo=UTC())
elif len(args)==7:
args[6]*=1000
return datetime.datetime(args[0], args[1]+1, args[2],
args[3], args[4], args[5], args[6], tzinfo=UTC())
raise ValueError('Invalid number of arguments: %s'%json) | [
"def",
"json2datetime",
"(",
"json",
")",
":",
"json_m",
"=",
"re",
".",
"search",
"(",
"r'([0-9]+,[0-9]+,[0-9]+)(,[0-9]+,[0-9]+,[0-9]+)?(,[0-9]+)?'",
",",
"json",
")",
"args",
"=",
"json_m",
".",
"group",
"(",
"0",
")",
".",
"split",
"(",
"','",
")",
"try",
":",
"args",
"=",
"list",
"(",
"map",
"(",
"int",
",",
"args",
")",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'Invalid arguments: %s'",
"%",
"json",
")",
"if",
"len",
"(",
"args",
")",
"==",
"3",
":",
"return",
"datetime",
".",
"datetime",
"(",
"args",
"[",
"0",
"]",
",",
"args",
"[",
"1",
"]",
"+",
"1",
",",
"args",
"[",
"2",
"]",
")",
"elif",
"len",
"(",
"args",
")",
"==",
"6",
":",
"return",
"datetime",
".",
"datetime",
"(",
"args",
"[",
"0",
"]",
",",
"args",
"[",
"1",
"]",
"+",
"1",
",",
"args",
"[",
"2",
"]",
",",
"args",
"[",
"3",
"]",
",",
"args",
"[",
"4",
"]",
",",
"args",
"[",
"5",
"]",
",",
"tzinfo",
"=",
"UTC",
"(",
")",
")",
"elif",
"len",
"(",
"args",
")",
"==",
"7",
":",
"args",
"[",
"6",
"]",
"*=",
"1000",
"return",
"datetime",
".",
"datetime",
"(",
"args",
"[",
"0",
"]",
",",
"args",
"[",
"1",
"]",
"+",
"1",
",",
"args",
"[",
"2",
"]",
",",
"args",
"[",
"3",
"]",
",",
"args",
"[",
"4",
"]",
",",
"args",
"[",
"5",
"]",
",",
"args",
"[",
"6",
"]",
",",
"tzinfo",
"=",
"UTC",
"(",
")",
")",
"raise",
"ValueError",
"(",
"'Invalid number of arguments: %s'",
"%",
"json",
")"
] | [
112,
4
] | [
135,
64
] | python | en | ['en', 'en', 'en'] | True |
__deepcopy__ | (self, memo) | Don't populate the QuerySet's cache. | Don't populate the QuerySet's cache. | def __deepcopy__(self, memo):
"""Don't populate the QuerySet's cache."""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj | [
"def",
"__deepcopy__",
"(",
"self",
",",
"memo",
")",
":",
"obj",
"=",
"self",
".",
"__class__",
"(",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"if",
"k",
"==",
"'_result_cache'",
":",
"obj",
".",
"__dict__",
"[",
"k",
"]",
"=",
"None",
"else",
":",
"obj",
".",
"__dict__",
"[",
"k",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"v",
",",
"memo",
")",
"return",
"obj"
] | [
220,
4
] | [
228,
18
] | python | en | ['en', 'en', 'en'] | True |
__iter__ | (self) |
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler.execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql.compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
|
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler.execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql.compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
| def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler.execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql.compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache) | [
"def",
"__iter__",
"(",
"self",
")",
":",
"self",
".",
"_fetch_all",
"(",
")",
"return",
"iter",
"(",
"self",
".",
"_result_cache",
")"
] | [
264,
4
] | [
280,
39
] | python | en | ['en', 'error', 'th'] | False |
__getitem__ | (self, k) | Retrieve an item or slice from the set of results. | Retrieve an item or slice from the set of results. | def __getitem__(self, k):
"""Retrieve an item or slice from the set of results."""
if not isinstance(k, (int, slice)):
raise TypeError(
'QuerySet indices must be integers or slices, not %s.'
% type(k).__name__
)
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._chain()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._chain()
qs.query.set_limits(k, k + 1)
qs._fetch_all()
return qs._result_cache[0] | [
"def",
"__getitem__",
"(",
"self",
",",
"k",
")",
":",
"if",
"not",
"isinstance",
"(",
"k",
",",
"(",
"int",
",",
"slice",
")",
")",
":",
"raise",
"TypeError",
"(",
"'QuerySet indices must be integers or slices, not %s.'",
"%",
"type",
"(",
"k",
")",
".",
"__name__",
")",
"assert",
"(",
"(",
"not",
"isinstance",
"(",
"k",
",",
"slice",
")",
"and",
"(",
"k",
">=",
"0",
")",
")",
"or",
"(",
"isinstance",
"(",
"k",
",",
"slice",
")",
"and",
"(",
"k",
".",
"start",
"is",
"None",
"or",
"k",
".",
"start",
">=",
"0",
")",
"and",
"(",
"k",
".",
"stop",
"is",
"None",
"or",
"k",
".",
"stop",
">=",
"0",
")",
")",
")",
",",
"\"Negative indexing is not supported.\"",
"if",
"self",
".",
"_result_cache",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_result_cache",
"[",
"k",
"]",
"if",
"isinstance",
"(",
"k",
",",
"slice",
")",
":",
"qs",
"=",
"self",
".",
"_chain",
"(",
")",
"if",
"k",
".",
"start",
"is",
"not",
"None",
":",
"start",
"=",
"int",
"(",
"k",
".",
"start",
")",
"else",
":",
"start",
"=",
"None",
"if",
"k",
".",
"stop",
"is",
"not",
"None",
":",
"stop",
"=",
"int",
"(",
"k",
".",
"stop",
")",
"else",
":",
"stop",
"=",
"None",
"qs",
".",
"query",
".",
"set_limits",
"(",
"start",
",",
"stop",
")",
"return",
"list",
"(",
"qs",
")",
"[",
":",
":",
"k",
".",
"step",
"]",
"if",
"k",
".",
"step",
"else",
"qs",
"qs",
"=",
"self",
".",
"_chain",
"(",
")",
"qs",
".",
"query",
".",
"set_limits",
"(",
"k",
",",
"k",
"+",
"1",
")",
"qs",
".",
"_fetch_all",
"(",
")",
"return",
"qs",
".",
"_result_cache",
"[",
"0",
"]"
] | [
286,
4
] | [
317,
34
] | python | en | ['en', 'en', 'en'] | True |
iterator | (self, chunk_size=2000) |
An iterator over the results from applying this QuerySet to the
database.
|
An iterator over the results from applying this QuerySet to the
database.
| def iterator(self, chunk_size=2000):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
if chunk_size <= 0:
raise ValueError('Chunk size must be strictly positive.')
use_chunked_fetch = not connections[self.db].settings_dict.get('DISABLE_SERVER_SIDE_CURSORS')
return self._iterator(use_chunked_fetch, chunk_size) | [
"def",
"iterator",
"(",
"self",
",",
"chunk_size",
"=",
"2000",
")",
":",
"if",
"chunk_size",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'Chunk size must be strictly positive.'",
")",
"use_chunked_fetch",
"=",
"not",
"connections",
"[",
"self",
".",
"db",
"]",
".",
"settings_dict",
".",
"get",
"(",
"'DISABLE_SERVER_SIDE_CURSORS'",
")",
"return",
"self",
".",
"_iterator",
"(",
"use_chunked_fetch",
",",
"chunk_size",
")"
] | [
354,
4
] | [
362,
60
] | python | en | ['en', 'error', 'th'] | False |
decide_user_install | (
use_user_site: Optional[bool],
prefix_path: Optional[str] = None,
target_dir: Optional[str] = None,
root_path: Optional[str] = None,
isolated_mode: bool = False,
) | Determine whether to do a user install based on the input options.
If use_user_site is False, no additional checks are done.
If use_user_site is True, it is checked for compatibility with other
options.
If use_user_site is None, the default behaviour depends on the environment,
which is provided by the other arguments.
| Determine whether to do a user install based on the input options. | def decide_user_install(
use_user_site: Optional[bool],
prefix_path: Optional[str] = None,
target_dir: Optional[str] = None,
root_path: Optional[str] = None,
isolated_mode: bool = False,
) -> bool:
"""Determine whether to do a user install based on the input options.
If use_user_site is False, no additional checks are done.
If use_user_site is True, it is checked for compatibility with other
options.
If use_user_site is None, the default behaviour depends on the environment,
which is provided by the other arguments.
"""
# In some cases (config from tox), use_user_site can be set to an integer
# rather than a bool, which 'use_user_site is False' wouldn't catch.
if (use_user_site is not None) and (not use_user_site):
logger.debug("Non-user install by explicit request")
return False
if use_user_site:
if prefix_path:
raise CommandError(
"Can not combine '--user' and '--prefix' as they imply "
"different installation locations"
)
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
logger.debug("User install by explicit request")
return True
# If we are here, user installs have not been explicitly requested/avoided
assert use_user_site is None
# user install incompatible with --prefix/--target
if prefix_path or target_dir:
logger.debug("Non-user install due to --prefix or --target option")
return False
# If user installs are not enabled, choose a non-user install
if not site.ENABLE_USER_SITE:
logger.debug("Non-user install because user site-packages disabled")
return False
# If we have permission for a non-user install, do that,
# otherwise do a user install.
if site_packages_writable(root=root_path, isolated=isolated_mode):
logger.debug("Non-user install because site-packages writeable")
return False
logger.info("Defaulting to user installation because normal site-packages "
"is not writeable")
return True | [
"def",
"decide_user_install",
"(",
"use_user_site",
":",
"Optional",
"[",
"bool",
"]",
",",
"prefix_path",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"target_dir",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"root_path",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"isolated_mode",
":",
"bool",
"=",
"False",
",",
")",
"->",
"bool",
":",
"# In some cases (config from tox), use_user_site can be set to an integer",
"# rather than a bool, which 'use_user_site is False' wouldn't catch.",
"if",
"(",
"use_user_site",
"is",
"not",
"None",
")",
"and",
"(",
"not",
"use_user_site",
")",
":",
"logger",
".",
"debug",
"(",
"\"Non-user install by explicit request\"",
")",
"return",
"False",
"if",
"use_user_site",
":",
"if",
"prefix_path",
":",
"raise",
"CommandError",
"(",
"\"Can not combine '--user' and '--prefix' as they imply \"",
"\"different installation locations\"",
")",
"if",
"virtualenv_no_global",
"(",
")",
":",
"raise",
"InstallationError",
"(",
"\"Can not perform a '--user' install. User site-packages \"",
"\"are not visible in this virtualenv.\"",
")",
"logger",
".",
"debug",
"(",
"\"User install by explicit request\"",
")",
"return",
"True",
"# If we are here, user installs have not been explicitly requested/avoided",
"assert",
"use_user_site",
"is",
"None",
"# user install incompatible with --prefix/--target",
"if",
"prefix_path",
"or",
"target_dir",
":",
"logger",
".",
"debug",
"(",
"\"Non-user install due to --prefix or --target option\"",
")",
"return",
"False",
"# If user installs are not enabled, choose a non-user install",
"if",
"not",
"site",
".",
"ENABLE_USER_SITE",
":",
"logger",
".",
"debug",
"(",
"\"Non-user install because user site-packages disabled\"",
")",
"return",
"False",
"# If we have permission for a non-user install, do that,",
"# otherwise do a user install.",
"if",
"site_packages_writable",
"(",
"root",
"=",
"root_path",
",",
"isolated",
"=",
"isolated_mode",
")",
":",
"logger",
".",
"debug",
"(",
"\"Non-user install because site-packages writeable\"",
")",
"return",
"False",
"logger",
".",
"info",
"(",
"\"Defaulting to user installation because normal site-packages \"",
"\"is not writeable\"",
")",
"return",
"True"
] | [
601,
0
] | [
657,
15
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.