Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
DatabaseIntrospection.get_table_list | (self, cursor) | Return a list of table and view names in the current database. | Return a list of table and view names in the current database. | def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
cursor.execute("SHOW FULL TABLES")
return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1]))
for row in cursor.fetchall()] | [
"def",
"get_table_list",
"(",
"self",
",",
"cursor",
")",
":",
"cursor",
".",
"execute",
"(",
"\"SHOW FULL TABLES\"",
")",
"return",
"[",
"TableInfo",
"(",
"row",
"[",
"0",
"]",
",",
"{",
"'BASE TABLE'",
":",
"'t'",
",",
"'VIEW'",
":",
"'v'",
"}",
".",
"get",
"(",
"row",
"[",
"1",
"]",
")",
")",
"for",
"row",
"in",
"cursor",
".",
"fetchall",
"(",
")",
"]"
] | [
66,
4
] | [
70,
45
] | python | en | ['en', 'en', 'en'] | True |
DatabaseIntrospection.get_table_description | (self, cursor, table_name) |
Return a description of the table with the DB-API cursor.description
interface."
|
Return a description of the table with the DB-API cursor.description
interface."
| def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface."
"""
json_constraints = {}
if self.connection.mysql_is_mariadb and self.connection.features.can_introspect_json_field:
# JSON data type is an alias for LONGTEXT in MariaDB, select
# JSON_VALID() constraints to introspect JSONField.
cursor.execute("""
SELECT c.constraint_name AS column_name
FROM information_schema.check_constraints AS c
WHERE
c.table_name = %s AND
LOWER(c.check_clause) = 'json_valid(`' + LOWER(c.constraint_name) + '`)' AND
c.constraint_schema = DATABASE()
""", [table_name])
json_constraints = {row[0] for row in cursor.fetchall()}
# A default collation for the given table.
cursor.execute("""
SELECT table_collation
FROM information_schema.tables
WHERE table_schema = DATABASE()
AND table_name = %s
""", [table_name])
row = cursor.fetchone()
default_column_collation = row[0] if row else ''
# information_schema database gives more accurate results for some figures:
# - varchar length returned by cursor.description is an internal length,
# not visible length (#5725)
# - precision and scale (for decimal fields) (#5014)
# - auto_increment is not available in cursor.description
cursor.execute("""
SELECT
column_name, data_type, character_maximum_length,
numeric_precision, numeric_scale, extra, column_default,
CASE
WHEN collation_name = %s THEN NULL
ELSE collation_name
END AS collation_name,
CASE
WHEN column_type LIKE '%% unsigned' THEN 1
ELSE 0
END AS is_unsigned
FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()
""", [default_column_collation, table_name])
field_info = {line[0]: InfoLine(*line) for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
def to_int(i):
return int(i) if i is not None else i
fields = []
for line in cursor.description:
info = field_info[line[0]]
fields.append(FieldInfo(
*line[:3],
to_int(info.max_len) or line[3],
to_int(info.num_prec) or line[4],
to_int(info.num_scale) or line[5],
line[6],
info.column_default,
info.collation,
info.extra,
info.is_unsigned,
line[0] in json_constraints,
))
return fields | [
"def",
"get_table_description",
"(",
"self",
",",
"cursor",
",",
"table_name",
")",
":",
"json_constraints",
"=",
"{",
"}",
"if",
"self",
".",
"connection",
".",
"mysql_is_mariadb",
"and",
"self",
".",
"connection",
".",
"features",
".",
"can_introspect_json_field",
":",
"# JSON data type is an alias for LONGTEXT in MariaDB, select",
"# JSON_VALID() constraints to introspect JSONField.",
"cursor",
".",
"execute",
"(",
"\"\"\"\n SELECT c.constraint_name AS column_name\n FROM information_schema.check_constraints AS c\n WHERE\n c.table_name = %s AND\n LOWER(c.check_clause) = 'json_valid(`' + LOWER(c.constraint_name) + '`)' AND\n c.constraint_schema = DATABASE()\n \"\"\"",
",",
"[",
"table_name",
"]",
")",
"json_constraints",
"=",
"{",
"row",
"[",
"0",
"]",
"for",
"row",
"in",
"cursor",
".",
"fetchall",
"(",
")",
"}",
"# A default collation for the given table.",
"cursor",
".",
"execute",
"(",
"\"\"\"\n SELECT table_collation\n FROM information_schema.tables\n WHERE table_schema = DATABASE()\n AND table_name = %s\n \"\"\"",
",",
"[",
"table_name",
"]",
")",
"row",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"default_column_collation",
"=",
"row",
"[",
"0",
"]",
"if",
"row",
"else",
"''",
"# information_schema database gives more accurate results for some figures:",
"# - varchar length returned by cursor.description is an internal length,",
"# not visible length (#5725)",
"# - precision and scale (for decimal fields) (#5014)",
"# - auto_increment is not available in cursor.description",
"cursor",
".",
"execute",
"(",
"\"\"\"\n SELECT\n column_name, data_type, character_maximum_length,\n numeric_precision, numeric_scale, extra, column_default,\n CASE\n WHEN collation_name = %s THEN NULL\n ELSE collation_name\n END AS collation_name,\n CASE\n WHEN column_type LIKE '%% unsigned' THEN 1\n ELSE 0\n END AS is_unsigned\n FROM information_schema.columns\n WHERE table_name = %s AND table_schema = DATABASE()\n \"\"\"",
",",
"[",
"default_column_collation",
",",
"table_name",
"]",
")",
"field_info",
"=",
"{",
"line",
"[",
"0",
"]",
":",
"InfoLine",
"(",
"*",
"line",
")",
"for",
"line",
"in",
"cursor",
".",
"fetchall",
"(",
")",
"}",
"cursor",
".",
"execute",
"(",
"\"SELECT * FROM %s LIMIT 1\"",
"%",
"self",
".",
"connection",
".",
"ops",
".",
"quote_name",
"(",
"table_name",
")",
")",
"def",
"to_int",
"(",
"i",
")",
":",
"return",
"int",
"(",
"i",
")",
"if",
"i",
"is",
"not",
"None",
"else",
"i",
"fields",
"=",
"[",
"]",
"for",
"line",
"in",
"cursor",
".",
"description",
":",
"info",
"=",
"field_info",
"[",
"line",
"[",
"0",
"]",
"]",
"fields",
".",
"append",
"(",
"FieldInfo",
"(",
"*",
"line",
"[",
":",
"3",
"]",
",",
"to_int",
"(",
"info",
".",
"max_len",
")",
"or",
"line",
"[",
"3",
"]",
",",
"to_int",
"(",
"info",
".",
"num_prec",
")",
"or",
"line",
"[",
"4",
"]",
",",
"to_int",
"(",
"info",
".",
"num_scale",
")",
"or",
"line",
"[",
"5",
"]",
",",
"line",
"[",
"6",
"]",
",",
"info",
".",
"column_default",
",",
"info",
".",
"collation",
",",
"info",
".",
"extra",
",",
"info",
".",
"is_unsigned",
",",
"line",
"[",
"0",
"]",
"in",
"json_constraints",
",",
")",
")",
"return",
"fields"
] | [
72,
4
] | [
141,
21
] | python | en | ['en', 'error', 'th'] | False |
DatabaseIntrospection.get_relations | (self, cursor, table_name) |
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
|
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
| def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
constraints = self.get_key_columns(cursor, table_name)
relations = {}
for my_fieldname, other_table, other_field in constraints:
relations[my_fieldname] = (other_field, other_table)
return relations | [
"def",
"get_relations",
"(",
"self",
",",
"cursor",
",",
"table_name",
")",
":",
"constraints",
"=",
"self",
".",
"get_key_columns",
"(",
"cursor",
",",
"table_name",
")",
"relations",
"=",
"{",
"}",
"for",
"my_fieldname",
",",
"other_table",
",",
"other_field",
"in",
"constraints",
":",
"relations",
"[",
"my_fieldname",
"]",
"=",
"(",
"other_field",
",",
"other_table",
")",
"return",
"relations"
] | [
150,
4
] | [
159,
24
] | python | en | ['en', 'error', 'th'] | False |
DatabaseIntrospection.get_key_columns | (self, cursor, table_name) |
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in the given table.
|
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in the given table.
| def get_key_columns(self, cursor, table_name):
"""
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in the given table.
"""
key_columns = []
cursor.execute("""
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns | [
"def",
"get_key_columns",
"(",
"self",
",",
"cursor",
",",
"table_name",
")",
":",
"key_columns",
"=",
"[",
"]",
"cursor",
".",
"execute",
"(",
"\"\"\"\n SELECT column_name, referenced_table_name, referenced_column_name\n FROM information_schema.key_column_usage\n WHERE table_name = %s\n AND table_schema = DATABASE()\n AND referenced_table_name IS NOT NULL\n AND referenced_column_name IS NOT NULL\"\"\"",
",",
"[",
"table_name",
"]",
")",
"key_columns",
".",
"extend",
"(",
"cursor",
".",
"fetchall",
"(",
")",
")",
"return",
"key_columns"
] | [
161,
4
] | [
175,
26
] | python | en | ['en', 'error', 'th'] | False |
DatabaseIntrospection.get_storage_engine | (self, cursor, table_name) |
Retrieve the storage engine for a given table. Return the default
storage engine if the table doesn't exist.
|
Retrieve the storage engine for a given table. Return the default
storage engine if the table doesn't exist.
| def get_storage_engine(self, cursor, table_name):
"""
Retrieve the storage engine for a given table. Return the default
storage engine if the table doesn't exist.
"""
cursor.execute(
"SELECT engine "
"FROM information_schema.tables "
"WHERE table_name = %s", [table_name])
result = cursor.fetchone()
if not result:
return self.connection.features._mysql_storage_engine
return result[0] | [
"def",
"get_storage_engine",
"(",
"self",
",",
"cursor",
",",
"table_name",
")",
":",
"cursor",
".",
"execute",
"(",
"\"SELECT engine \"",
"\"FROM information_schema.tables \"",
"\"WHERE table_name = %s\"",
",",
"[",
"table_name",
"]",
")",
"result",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"if",
"not",
"result",
":",
"return",
"self",
".",
"connection",
".",
"features",
".",
"_mysql_storage_engine",
"return",
"result",
"[",
"0",
"]"
] | [
177,
4
] | [
189,
24
] | python | en | ['en', 'error', 'th'] | False |
DatabaseIntrospection.get_constraints | (self, cursor, table_name) |
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
|
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
| def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Get the actual constraint names and columns
name_query = """
SELECT kc.`constraint_name`, kc.`column_name`,
kc.`referenced_table_name`, kc.`referenced_column_name`
FROM information_schema.key_column_usage AS kc
WHERE
kc.table_schema = DATABASE() AND
kc.table_name = %s
ORDER BY kc.`ordinal_position`
"""
cursor.execute(name_query, [table_name])
for constraint, column, ref_table, ref_column in cursor.fetchall():
if constraint not in constraints:
constraints[constraint] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': False,
'check': False,
'foreign_key': (ref_table, ref_column) if ref_column else None,
}
if self.connection.features.supports_index_column_ordering:
constraints[constraint]['orders'] = []
constraints[constraint]['columns'].add(column)
# Now get the constraint types
type_query = """
SELECT c.constraint_name, c.constraint_type
FROM information_schema.table_constraints AS c
WHERE
c.table_schema = DATABASE() AND
c.table_name = %s
"""
cursor.execute(type_query, [table_name])
for constraint, kind in cursor.fetchall():
if kind.lower() == "primary key":
constraints[constraint]['primary_key'] = True
constraints[constraint]['unique'] = True
elif kind.lower() == "unique":
constraints[constraint]['unique'] = True
# Add check constraints.
if self.connection.features.can_introspect_check_constraints:
unnamed_constraints_index = 0
columns = {info.name for info in self.get_table_description(cursor, table_name)}
if self.connection.mysql_is_mariadb:
type_query = """
SELECT c.constraint_name, c.check_clause
FROM information_schema.check_constraints AS c
WHERE
c.constraint_schema = DATABASE() AND
c.table_name = %s
"""
else:
type_query = """
SELECT cc.constraint_name, cc.check_clause
FROM
information_schema.check_constraints AS cc,
information_schema.table_constraints AS tc
WHERE
cc.constraint_schema = DATABASE() AND
tc.table_schema = cc.constraint_schema AND
cc.constraint_name = tc.constraint_name AND
tc.constraint_type = 'CHECK' AND
tc.table_name = %s
"""
cursor.execute(type_query, [table_name])
for constraint, check_clause in cursor.fetchall():
constraint_columns = self._parse_constraint_columns(check_clause, columns)
# Ensure uniqueness of unnamed constraints. Unnamed unique
# and check columns constraints have the same name as
# a column.
if set(constraint_columns) == {constraint}:
unnamed_constraints_index += 1
constraint = '__unnamed_constraint_%s__' % unnamed_constraints_index
constraints[constraint] = {
'columns': constraint_columns,
'primary_key': False,
'unique': False,
'index': False,
'check': True,
'foreign_key': None,
}
# Now add in the indexes
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
for table, non_unique, index, colseq, column, order, type_ in [
x[:6] + (x[10],) for x in cursor.fetchall()
]:
if index not in constraints:
constraints[index] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'check': False,
'foreign_key': None,
}
if self.connection.features.supports_index_column_ordering:
constraints[index]['orders'] = []
constraints[index]['index'] = True
constraints[index]['type'] = Index.suffix if type_ == 'BTREE' else type_.lower()
constraints[index]['columns'].add(column)
if self.connection.features.supports_index_column_ordering:
constraints[index]['orders'].append('DESC' if order == 'D' else 'ASC')
# Convert the sorted sets to lists
for constraint in constraints.values():
constraint['columns'] = list(constraint['columns'])
return constraints | [
"def",
"get_constraints",
"(",
"self",
",",
"cursor",
",",
"table_name",
")",
":",
"constraints",
"=",
"{",
"}",
"# Get the actual constraint names and columns",
"name_query",
"=",
"\"\"\"\n SELECT kc.`constraint_name`, kc.`column_name`,\n kc.`referenced_table_name`, kc.`referenced_column_name`\n FROM information_schema.key_column_usage AS kc\n WHERE\n kc.table_schema = DATABASE() AND\n kc.table_name = %s\n ORDER BY kc.`ordinal_position`\n \"\"\"",
"cursor",
".",
"execute",
"(",
"name_query",
",",
"[",
"table_name",
"]",
")",
"for",
"constraint",
",",
"column",
",",
"ref_table",
",",
"ref_column",
"in",
"cursor",
".",
"fetchall",
"(",
")",
":",
"if",
"constraint",
"not",
"in",
"constraints",
":",
"constraints",
"[",
"constraint",
"]",
"=",
"{",
"'columns'",
":",
"OrderedSet",
"(",
")",
",",
"'primary_key'",
":",
"False",
",",
"'unique'",
":",
"False",
",",
"'index'",
":",
"False",
",",
"'check'",
":",
"False",
",",
"'foreign_key'",
":",
"(",
"ref_table",
",",
"ref_column",
")",
"if",
"ref_column",
"else",
"None",
",",
"}",
"if",
"self",
".",
"connection",
".",
"features",
".",
"supports_index_column_ordering",
":",
"constraints",
"[",
"constraint",
"]",
"[",
"'orders'",
"]",
"=",
"[",
"]",
"constraints",
"[",
"constraint",
"]",
"[",
"'columns'",
"]",
".",
"add",
"(",
"column",
")",
"# Now get the constraint types",
"type_query",
"=",
"\"\"\"\n SELECT c.constraint_name, c.constraint_type\n FROM information_schema.table_constraints AS c\n WHERE\n c.table_schema = DATABASE() AND\n c.table_name = %s\n \"\"\"",
"cursor",
".",
"execute",
"(",
"type_query",
",",
"[",
"table_name",
"]",
")",
"for",
"constraint",
",",
"kind",
"in",
"cursor",
".",
"fetchall",
"(",
")",
":",
"if",
"kind",
".",
"lower",
"(",
")",
"==",
"\"primary key\"",
":",
"constraints",
"[",
"constraint",
"]",
"[",
"'primary_key'",
"]",
"=",
"True",
"constraints",
"[",
"constraint",
"]",
"[",
"'unique'",
"]",
"=",
"True",
"elif",
"kind",
".",
"lower",
"(",
")",
"==",
"\"unique\"",
":",
"constraints",
"[",
"constraint",
"]",
"[",
"'unique'",
"]",
"=",
"True",
"# Add check constraints.",
"if",
"self",
".",
"connection",
".",
"features",
".",
"can_introspect_check_constraints",
":",
"unnamed_constraints_index",
"=",
"0",
"columns",
"=",
"{",
"info",
".",
"name",
"for",
"info",
"in",
"self",
".",
"get_table_description",
"(",
"cursor",
",",
"table_name",
")",
"}",
"if",
"self",
".",
"connection",
".",
"mysql_is_mariadb",
":",
"type_query",
"=",
"\"\"\"\n SELECT c.constraint_name, c.check_clause\n FROM information_schema.check_constraints AS c\n WHERE\n c.constraint_schema = DATABASE() AND\n c.table_name = %s\n \"\"\"",
"else",
":",
"type_query",
"=",
"\"\"\"\n SELECT cc.constraint_name, cc.check_clause\n FROM\n information_schema.check_constraints AS cc,\n information_schema.table_constraints AS tc\n WHERE\n cc.constraint_schema = DATABASE() AND\n tc.table_schema = cc.constraint_schema AND\n cc.constraint_name = tc.constraint_name AND\n tc.constraint_type = 'CHECK' AND\n tc.table_name = %s\n \"\"\"",
"cursor",
".",
"execute",
"(",
"type_query",
",",
"[",
"table_name",
"]",
")",
"for",
"constraint",
",",
"check_clause",
"in",
"cursor",
".",
"fetchall",
"(",
")",
":",
"constraint_columns",
"=",
"self",
".",
"_parse_constraint_columns",
"(",
"check_clause",
",",
"columns",
")",
"# Ensure uniqueness of unnamed constraints. Unnamed unique",
"# and check columns constraints have the same name as",
"# a column.",
"if",
"set",
"(",
"constraint_columns",
")",
"==",
"{",
"constraint",
"}",
":",
"unnamed_constraints_index",
"+=",
"1",
"constraint",
"=",
"'__unnamed_constraint_%s__'",
"%",
"unnamed_constraints_index",
"constraints",
"[",
"constraint",
"]",
"=",
"{",
"'columns'",
":",
"constraint_columns",
",",
"'primary_key'",
":",
"False",
",",
"'unique'",
":",
"False",
",",
"'index'",
":",
"False",
",",
"'check'",
":",
"True",
",",
"'foreign_key'",
":",
"None",
",",
"}",
"# Now add in the indexes",
"cursor",
".",
"execute",
"(",
"\"SHOW INDEX FROM %s\"",
"%",
"self",
".",
"connection",
".",
"ops",
".",
"quote_name",
"(",
"table_name",
")",
")",
"for",
"table",
",",
"non_unique",
",",
"index",
",",
"colseq",
",",
"column",
",",
"order",
",",
"type_",
"in",
"[",
"x",
"[",
":",
"6",
"]",
"+",
"(",
"x",
"[",
"10",
"]",
",",
")",
"for",
"x",
"in",
"cursor",
".",
"fetchall",
"(",
")",
"]",
":",
"if",
"index",
"not",
"in",
"constraints",
":",
"constraints",
"[",
"index",
"]",
"=",
"{",
"'columns'",
":",
"OrderedSet",
"(",
")",
",",
"'primary_key'",
":",
"False",
",",
"'unique'",
":",
"False",
",",
"'check'",
":",
"False",
",",
"'foreign_key'",
":",
"None",
",",
"}",
"if",
"self",
".",
"connection",
".",
"features",
".",
"supports_index_column_ordering",
":",
"constraints",
"[",
"index",
"]",
"[",
"'orders'",
"]",
"=",
"[",
"]",
"constraints",
"[",
"index",
"]",
"[",
"'index'",
"]",
"=",
"True",
"constraints",
"[",
"index",
"]",
"[",
"'type'",
"]",
"=",
"Index",
".",
"suffix",
"if",
"type_",
"==",
"'BTREE'",
"else",
"type_",
".",
"lower",
"(",
")",
"constraints",
"[",
"index",
"]",
"[",
"'columns'",
"]",
".",
"add",
"(",
"column",
")",
"if",
"self",
".",
"connection",
".",
"features",
".",
"supports_index_column_ordering",
":",
"constraints",
"[",
"index",
"]",
"[",
"'orders'",
"]",
".",
"append",
"(",
"'DESC'",
"if",
"order",
"==",
"'D'",
"else",
"'ASC'",
")",
"# Convert the sorted sets to lists",
"for",
"constraint",
"in",
"constraints",
".",
"values",
"(",
")",
":",
"constraint",
"[",
"'columns'",
"]",
"=",
"list",
"(",
"constraint",
"[",
"'columns'",
"]",
")",
"return",
"constraints"
] | [
204,
4
] | [
314,
26
] | python | en | ['en', 'error', 'th'] | False |
reconstruction_evaluation | (X_time_orig, X_time_recon, training_mode) | Reconstruction loss on evaluation set.
Given time major original and reconstructed features data and the training
mode, return loss and eval_metrics_ops.
Args:
X_time_orig: Time major original features data.
X_time_recon: Time major reconstructed features data.
training_mode: Current training mode.
Returns:
loss: Scalar reconstruction loss.
eval_metric_ops: Evaluation metrics of reconstruction.
| Reconstruction loss on evaluation set. | def reconstruction_evaluation(X_time_orig, X_time_recon, training_mode):
"""Reconstruction loss on evaluation set.
Given time major original and reconstructed features data and the training
mode, return loss and eval_metrics_ops.
Args:
X_time_orig: Time major original features data.
X_time_recon: Time major reconstructed features data.
training_mode: Current training mode.
Returns:
loss: Scalar reconstruction loss.
eval_metric_ops: Evaluation metrics of reconstruction.
"""
loss = tf.losses.mean_squared_error(
labels=X_time_orig, predictions=X_time_recon)
eval_metric_ops = None
if training_mode == "reconstruction":
# Reconstruction eval metrics
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(
labels=X_time_orig, predictions=X_time_recon),
"mae": tf.metrics.mean_absolute_error(
labels=X_time_orig, predictions=X_time_recon)
}
return loss, eval_metric_ops | [
"def",
"reconstruction_evaluation",
"(",
"X_time_orig",
",",
"X_time_recon",
",",
"training_mode",
")",
":",
"loss",
"=",
"tf",
".",
"losses",
".",
"mean_squared_error",
"(",
"labels",
"=",
"X_time_orig",
",",
"predictions",
"=",
"X_time_recon",
")",
"eval_metric_ops",
"=",
"None",
"if",
"training_mode",
"==",
"\"reconstruction\"",
":",
"# Reconstruction eval metrics",
"eval_metric_ops",
"=",
"{",
"\"rmse\"",
":",
"tf",
".",
"metrics",
".",
"root_mean_squared_error",
"(",
"labels",
"=",
"X_time_orig",
",",
"predictions",
"=",
"X_time_recon",
")",
",",
"\"mae\"",
":",
"tf",
".",
"metrics",
".",
"mean_absolute_error",
"(",
"labels",
"=",
"X_time_orig",
",",
"predictions",
"=",
"X_time_recon",
")",
"}",
"return",
"loss",
",",
"eval_metric_ops"
] | [
3,
0
] | [
32,
30
] | python | en | ['en', 'en', 'en'] | True |
RandomRec.__init__ | (self, train_file=None, test_file=None, output_file=None, rank_length=10, sep='\t', output_sep='\t') |
Random Recommender for Item Recommendation
This algorithm predicts a rank for each user using the count of number of feedback of users and items
Usage::
>> RandomRec(train).compute()
>> RandomRec(train, test, ranking).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param rank_length: Size of the rank that must be generated by the predictions of the recommender algorithm
:type rank_length: int, default 10
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
|
Random Recommender for Item Recommendation | def __init__(self, train_file=None, test_file=None, output_file=None, rank_length=10, sep='\t', output_sep='\t'):
"""
Random Recommender for Item Recommendation
This algorithm predicts a rank for each user using the count of number of feedback of users and items
Usage::
>> RandomRec(train).compute()
>> RandomRec(train, test, ranking).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param rank_length: Size of the rank that must be generated by the predictions of the recommender algorithm
:type rank_length: int, default 10
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
"""
super(RandomRec, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file,
rank_length=rank_length, sep=sep, output_sep=output_sep)
self.recommender_name = 'Random Recommender' | [
"def",
"__init__",
"(",
"self",
",",
"train_file",
"=",
"None",
",",
"test_file",
"=",
"None",
",",
"output_file",
"=",
"None",
",",
"rank_length",
"=",
"10",
",",
"sep",
"=",
"'\\t'",
",",
"output_sep",
"=",
"'\\t'",
")",
":",
"super",
"(",
"RandomRec",
",",
"self",
")",
".",
"__init__",
"(",
"train_file",
"=",
"train_file",
",",
"test_file",
"=",
"test_file",
",",
"output_file",
"=",
"output_file",
",",
"rank_length",
"=",
"rank_length",
",",
"sep",
"=",
"sep",
",",
"output_sep",
"=",
"output_sep",
")",
"self",
".",
"recommender_name",
"=",
"'Random Recommender'"
] | [
20,
4
] | [
56,
52
] | python | en | ['en', 'error', 'th'] | False |
RandomRec.predict | (self) |
Method to predict a rank for each user.
For each pair out of train set, predict a random score for it.
|
Method to predict a rank for each user. | def predict(self):
"""
Method to predict a rank for each user.
For each pair out of train set, predict a random score for it.
"""
for user in set(self.users):
predictions = list()
for item in self.train_set['items_unobserved'].get(user, []):
predictions.append((user, item, random.uniform(0, 1)))
predictions = sorted(predictions, key=lambda x: -x[2])
self.ranking += predictions[:self.rank_length] | [
"def",
"predict",
"(",
"self",
")",
":",
"for",
"user",
"in",
"set",
"(",
"self",
".",
"users",
")",
":",
"predictions",
"=",
"list",
"(",
")",
"for",
"item",
"in",
"self",
".",
"train_set",
"[",
"'items_unobserved'",
"]",
".",
"get",
"(",
"user",
",",
"[",
"]",
")",
":",
"predictions",
".",
"append",
"(",
"(",
"user",
",",
"item",
",",
"random",
".",
"uniform",
"(",
"0",
",",
"1",
")",
")",
")",
"predictions",
"=",
"sorted",
"(",
"predictions",
",",
"key",
"=",
"lambda",
"x",
":",
"-",
"x",
"[",
"2",
"]",
")",
"self",
".",
"ranking",
"+=",
"predictions",
"[",
":",
"self",
".",
"rank_length",
"]"
] | [
58,
4
] | [
71,
58
] | python | en | ['en', 'error', 'th'] | False |
RandomRec.compute | (self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t') |
Extends compute method from BaseItemRecommendation. Method to run recommender algorithm
:param verbose: Print recommender and database information
:type verbose: bool, default True
:param metrics: List of evaluation metrics
:type metrics: list, default None
:param verbose_evaluation: Print the evaluation results
:type verbose_evaluation: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
|
Extends compute method from BaseItemRecommendation. Method to run recommender algorithm | def compute(self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t'):
"""
Extends compute method from BaseItemRecommendation. Method to run recommender algorithm
:param verbose: Print recommender and database information
:type verbose: bool, default True
:param metrics: List of evaluation metrics
:type metrics: list, default None
:param verbose_evaluation: Print the evaluation results
:type verbose_evaluation: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
"""
super(RandomRec, self).compute(verbose=verbose)
if verbose:
print("prediction_time:: %4f sec" % timed(self.predict))
print('\n')
else:
self.predict()
self.write_ranking()
if self.test_file is not None:
self.evaluate(metrics, verbose_evaluation, as_table=as_table, table_sep=table_sep) | [
"def",
"compute",
"(",
"self",
",",
"verbose",
"=",
"True",
",",
"metrics",
"=",
"None",
",",
"verbose_evaluation",
"=",
"True",
",",
"as_table",
"=",
"False",
",",
"table_sep",
"=",
"'\\t'",
")",
":",
"super",
"(",
"RandomRec",
",",
"self",
")",
".",
"compute",
"(",
"verbose",
"=",
"verbose",
")",
"if",
"verbose",
":",
"print",
"(",
"\"prediction_time:: %4f sec\"",
"%",
"timed",
"(",
"self",
".",
"predict",
")",
")",
"print",
"(",
"'\\n'",
")",
"else",
":",
"self",
".",
"predict",
"(",
")",
"self",
".",
"write_ranking",
"(",
")",
"if",
"self",
".",
"test_file",
"is",
"not",
"None",
":",
"self",
".",
"evaluate",
"(",
"metrics",
",",
"verbose_evaluation",
",",
"as_table",
"=",
"as_table",
",",
"table_sep",
"=",
"table_sep",
")"
] | [
73,
4
] | [
106,
94
] | python | en | ['en', 'error', 'th'] | False |
tune_anomaly_thresholds_unsupervised_training | (
cur_batch_size,
time_anom_thresh_var,
mahalanobis_dist_time,
count_thresh_time_var,
mean_thresh_time_var,
var_thresh_time_var,
feat_anom_thresh_var,
mahalanobis_dist_feat,
count_thresh_feat_var,
mean_thresh_feat_var,
var_thresh_feat_var,
params,
dummy_var) | Tunes anomaly thresholds during unsupervised training mode.
Given dimensions of inputs, mahalanobis distances, and variables tracking
counts, means, and variances of mahalanobis distance, returns loss and
train_op.
Args:
cur_batch_size: Current batch size, could be partially filled.
time_anom_thresh_var: Time anomaly threshold variable.
mahalanobis_dist_time: Time major mahalanobis distance.
count_thresh_time_var: Time major running count of number of records.
mean_thresh_time_var: Time major running mean of mahalanobis distance.
var_thresh_time_var: Time major running variance of mahalanobis distance.
feat_anom_thresh_var: Feature anomaly threshold variable.
mahalanobis_dist_feat: Feature major mahalanobis distance.
count_thresh_feat_var: Feature major running count of number of records.
mean_thresh_feat_var: Feature major running mean of mahalanobis distance.
var_thresh_feat_var: Feature major running variance of mahalanobis distance.
params: Dictionary of parameters.
dummy_var: Dummy variable used to allow training mode to happen since it
requires a gradient to tie back to the graph dependency.
Returns:
loss: The scalar loss to tie our updates back to Estimator graph.
train_op: The train operation to tie our updates back to Estimator graph.
| Tunes anomaly thresholds during unsupervised training mode. | def tune_anomaly_thresholds_unsupervised_training(
cur_batch_size,
time_anom_thresh_var,
mahalanobis_dist_time,
count_thresh_time_var,
mean_thresh_time_var,
var_thresh_time_var,
feat_anom_thresh_var,
mahalanobis_dist_feat,
count_thresh_feat_var,
mean_thresh_feat_var,
var_thresh_feat_var,
params,
dummy_var):
"""Tunes anomaly thresholds during unsupervised training mode.
Given dimensions of inputs, mahalanobis distances, and variables tracking
counts, means, and variances of mahalanobis distance, returns loss and
train_op.
Args:
cur_batch_size: Current batch size, could be partially filled.
time_anom_thresh_var: Time anomaly threshold variable.
mahalanobis_dist_time: Time major mahalanobis distance.
count_thresh_time_var: Time major running count of number of records.
mean_thresh_time_var: Time major running mean of mahalanobis distance.
var_thresh_time_var: Time major running variance of mahalanobis distance.
feat_anom_thresh_var: Feature anomaly threshold variable.
mahalanobis_dist_feat: Feature major mahalanobis distance.
count_thresh_feat_var: Feature major running count of number of records.
mean_thresh_feat_var: Feature major running mean of mahalanobis distance.
var_thresh_feat_var: Feature major running variance of mahalanobis distance.
params: Dictionary of parameters.
dummy_var: Dummy variable used to allow training mode to happen since it
requires a gradient to tie back to the graph dependency.
Returns:
loss: The scalar loss to tie our updates back to Estimator graph.
train_op: The train operation to tie our updates back to Estimator graph.
"""
with tf.variable_scope(
name_or_scope="mahalanobis_dist_thresh_vars", reuse=tf.AUTO_REUSE):
# Time based
mahalanobis_dist_time_flat = tf.reshape(
tensor=mahalanobis_dist_time,
shape=[cur_batch_size * params["seq_len"]])
singleton_time_condition = tf.equal(
x=cur_batch_size * params["seq_len"], y=1)
var_time_var, mean_time_var, count_time_var = tf.cond(
pred=singleton_time_condition,
true_fn=lambda: singleton_batch_var_variable_updating(
params["seq_len"],
mahalanobis_dist_time_flat,
count_thresh_time_var,
mean_thresh_time_var,
var_thresh_time_var),
false_fn=lambda: non_singleton_batch_var_variable_updating(
cur_batch_size,
params["seq_len"],
mahalanobis_dist_time_flat,
count_thresh_time_var,
mean_thresh_time_var,
var_thresh_time_var))
# Features based
mahalanobis_dist_feat_flat = tf.reshape(
tensor=mahalanobis_dist_feat,
shape=[cur_batch_size * params["num_feat"]])
singleton_feat_condition = tf.equal(
x=cur_batch_size * params["num_feat"], y=1)
var_feat_var, mean_feat_var, count_feat_var = tf.cond(
pred=singleton_feat_condition,
true_fn=lambda: singleton_batch_var_variable_updating(
params["num_feat"],
mahalanobis_dist_feat_flat,
count_thresh_feat_var,
mean_thresh_feat_var,
var_thresh_feat_var),
false_fn=lambda: non_singleton_batch_var_variable_updating(
cur_batch_size,
params["num_feat"],
mahalanobis_dist_feat_flat,
count_thresh_feat_var,
mean_thresh_feat_var,
var_thresh_feat_var))
# Lastly use control dependencies around loss to enforce the mahalanobis
# variables to be assigned, the control order matters, hence the separate
# contexts.
with tf.control_dependencies(
control_inputs=[var_time_var, var_feat_var]):
with tf.control_dependencies(
control_inputs=[mean_time_var, mean_feat_var]):
with tf.control_dependencies(
control_inputs=[count_time_var, count_feat_var]):
time_out = mean_time_var
time_out += params["time_thresh_scl"] * tf.sqrt(x=var_time_var)
feat_out = mean_feat_var
feat_out += params["feat_thresh_scl"] * tf.sqrt(x=var_feat_var)
with tf.control_dependencies(
control_inputs=[tf.assign(ref=time_anom_thresh_var,
value=time_out),
tf.assign(ref=feat_anom_thresh_var,
value=feat_out)]):
loss = tf.reduce_sum(
input_tensor=tf.zeros(shape=(), dtype=tf.float64) * dummy_var)
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.train.get_global_step(),
learning_rate=params["learning_rate"],
optimizer="SGD")
return loss, train_op | [
"def",
"tune_anomaly_thresholds_unsupervised_training",
"(",
"cur_batch_size",
",",
"time_anom_thresh_var",
",",
"mahalanobis_dist_time",
",",
"count_thresh_time_var",
",",
"mean_thresh_time_var",
",",
"var_thresh_time_var",
",",
"feat_anom_thresh_var",
",",
"mahalanobis_dist_feat",
",",
"count_thresh_feat_var",
",",
"mean_thresh_feat_var",
",",
"var_thresh_feat_var",
",",
"params",
",",
"dummy_var",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name_or_scope",
"=",
"\"mahalanobis_dist_thresh_vars\"",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"# Time based",
"mahalanobis_dist_time_flat",
"=",
"tf",
".",
"reshape",
"(",
"tensor",
"=",
"mahalanobis_dist_time",
",",
"shape",
"=",
"[",
"cur_batch_size",
"*",
"params",
"[",
"\"seq_len\"",
"]",
"]",
")",
"singleton_time_condition",
"=",
"tf",
".",
"equal",
"(",
"x",
"=",
"cur_batch_size",
"*",
"params",
"[",
"\"seq_len\"",
"]",
",",
"y",
"=",
"1",
")",
"var_time_var",
",",
"mean_time_var",
",",
"count_time_var",
"=",
"tf",
".",
"cond",
"(",
"pred",
"=",
"singleton_time_condition",
",",
"true_fn",
"=",
"lambda",
":",
"singleton_batch_var_variable_updating",
"(",
"params",
"[",
"\"seq_len\"",
"]",
",",
"mahalanobis_dist_time_flat",
",",
"count_thresh_time_var",
",",
"mean_thresh_time_var",
",",
"var_thresh_time_var",
")",
",",
"false_fn",
"=",
"lambda",
":",
"non_singleton_batch_var_variable_updating",
"(",
"cur_batch_size",
",",
"params",
"[",
"\"seq_len\"",
"]",
",",
"mahalanobis_dist_time_flat",
",",
"count_thresh_time_var",
",",
"mean_thresh_time_var",
",",
"var_thresh_time_var",
")",
")",
"# Features based",
"mahalanobis_dist_feat_flat",
"=",
"tf",
".",
"reshape",
"(",
"tensor",
"=",
"mahalanobis_dist_feat",
",",
"shape",
"=",
"[",
"cur_batch_size",
"*",
"params",
"[",
"\"num_feat\"",
"]",
"]",
")",
"singleton_feat_condition",
"=",
"tf",
".",
"equal",
"(",
"x",
"=",
"cur_batch_size",
"*",
"params",
"[",
"\"num_feat\"",
"]",
",",
"y",
"=",
"1",
")",
"var_feat_var",
",",
"mean_feat_var",
",",
"count_feat_var",
"=",
"tf",
".",
"cond",
"(",
"pred",
"=",
"singleton_feat_condition",
",",
"true_fn",
"=",
"lambda",
":",
"singleton_batch_var_variable_updating",
"(",
"params",
"[",
"\"num_feat\"",
"]",
",",
"mahalanobis_dist_feat_flat",
",",
"count_thresh_feat_var",
",",
"mean_thresh_feat_var",
",",
"var_thresh_feat_var",
")",
",",
"false_fn",
"=",
"lambda",
":",
"non_singleton_batch_var_variable_updating",
"(",
"cur_batch_size",
",",
"params",
"[",
"\"num_feat\"",
"]",
",",
"mahalanobis_dist_feat_flat",
",",
"count_thresh_feat_var",
",",
"mean_thresh_feat_var",
",",
"var_thresh_feat_var",
")",
")",
"# Lastly use control dependencies around loss to enforce the mahalanobis",
"# variables to be assigned, the control order matters, hence the separate",
"# contexts.",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"var_time_var",
",",
"var_feat_var",
"]",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"mean_time_var",
",",
"mean_feat_var",
"]",
")",
":",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"count_time_var",
",",
"count_feat_var",
"]",
")",
":",
"time_out",
"=",
"mean_time_var",
"time_out",
"+=",
"params",
"[",
"\"time_thresh_scl\"",
"]",
"*",
"tf",
".",
"sqrt",
"(",
"x",
"=",
"var_time_var",
")",
"feat_out",
"=",
"mean_feat_var",
"feat_out",
"+=",
"params",
"[",
"\"feat_thresh_scl\"",
"]",
"*",
"tf",
".",
"sqrt",
"(",
"x",
"=",
"var_feat_var",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"control_inputs",
"=",
"[",
"tf",
".",
"assign",
"(",
"ref",
"=",
"time_anom_thresh_var",
",",
"value",
"=",
"time_out",
")",
",",
"tf",
".",
"assign",
"(",
"ref",
"=",
"feat_anom_thresh_var",
",",
"value",
"=",
"feat_out",
")",
"]",
")",
":",
"loss",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"tf",
".",
"zeros",
"(",
"shape",
"=",
"(",
")",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"*",
"dummy_var",
")",
"train_op",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"optimize_loss",
"(",
"loss",
"=",
"loss",
",",
"global_step",
"=",
"tf",
".",
"train",
".",
"get_global_step",
"(",
")",
",",
"learning_rate",
"=",
"params",
"[",
"\"learning_rate\"",
"]",
",",
"optimizer",
"=",
"\"SGD\"",
")",
"return",
"loss",
",",
"train_op"
] | [
7,
0
] | [
125,
23
] | python | en | ['en', 'zu', 'en'] | True |
tune_anomaly_thresholds_unsupervised_eval | (
cur_batch_size,
time_anom_thresh_var,
mahalanobis_dist_time,
feat_anom_thresh_var,
mahalanobis_dist_feat) | Checks tuned anomaly thresholds during supervised evaluation mode.
Given dimensions of inputs, mahalanobis distances, and variables tracking
counts, means, and variances of mahalanobis distance, returns loss and
train_op.
Args:
cur_batch_size: Current batch size, could be partially filled.
time_anom_thresh_var: Time anomaly threshold variable.
mahalanobis_dist_time: Time major mahalanobis distance.
feat_anom_thresh_var: Feature anomaly threshold variable.
mahalanobis_dist_feat: Feature major mahalanobis distance.
Returns:
loss: The scalar loss to tie our updates back to Estimator graph.
eval_metric_ops: Evaluation metrics of threshold tuning.
| Checks tuned anomaly thresholds during supervised evaluation mode. | def tune_anomaly_thresholds_unsupervised_eval(
cur_batch_size,
time_anom_thresh_var,
mahalanobis_dist_time,
feat_anom_thresh_var,
mahalanobis_dist_feat):
"""Checks tuned anomaly thresholds during supervised evaluation mode.
Given dimensions of inputs, mahalanobis distances, and variables tracking
counts, means, and variances of mahalanobis distance, returns loss and
train_op.
Args:
cur_batch_size: Current batch size, could be partially filled.
time_anom_thresh_var: Time anomaly threshold variable.
mahalanobis_dist_time: Time major mahalanobis distance.
feat_anom_thresh_var: Feature anomaly threshold variable.
mahalanobis_dist_feat: Feature major mahalanobis distance.
Returns:
loss: The scalar loss to tie our updates back to Estimator graph.
eval_metric_ops: Evaluation metrics of threshold tuning.
"""
loss = tf.zeros(shape=[], dtype=tf.float64)
# Flag predictions as either normal or anomalous
# shape = (cur_batch_size,)
time_anom_flags = flag_anomalies_by_thresholding(
cur_batch_size, mahalanobis_dist_time, time_anom_thresh_var)
# shape = (cur_batch_size,)
feat_anom_flags = flag_anomalies_by_thresholding(
cur_batch_size, mahalanobis_dist_feat, feat_anom_thresh_var)
# Anomaly detection eval metrics
eval_metric_ops = {
# Time based
"time_anom_tp": tf.metrics.mean(values=time_anom_flags),
# Features based
"feat_anom_tp": tf.metrics.mean(values=feat_anom_flags)
}
return loss, eval_metric_ops | [
"def",
"tune_anomaly_thresholds_unsupervised_eval",
"(",
"cur_batch_size",
",",
"time_anom_thresh_var",
",",
"mahalanobis_dist_time",
",",
"feat_anom_thresh_var",
",",
"mahalanobis_dist_feat",
")",
":",
"loss",
"=",
"tf",
".",
"zeros",
"(",
"shape",
"=",
"[",
"]",
",",
"dtype",
"=",
"tf",
".",
"float64",
")",
"# Flag predictions as either normal or anomalous",
"# shape = (cur_batch_size,)",
"time_anom_flags",
"=",
"flag_anomalies_by_thresholding",
"(",
"cur_batch_size",
",",
"mahalanobis_dist_time",
",",
"time_anom_thresh_var",
")",
"# shape = (cur_batch_size,)",
"feat_anom_flags",
"=",
"flag_anomalies_by_thresholding",
"(",
"cur_batch_size",
",",
"mahalanobis_dist_feat",
",",
"feat_anom_thresh_var",
")",
"# Anomaly detection eval metrics",
"eval_metric_ops",
"=",
"{",
"# Time based",
"\"time_anom_tp\"",
":",
"tf",
".",
"metrics",
".",
"mean",
"(",
"values",
"=",
"time_anom_flags",
")",
",",
"# Features based",
"\"feat_anom_tp\"",
":",
"tf",
".",
"metrics",
".",
"mean",
"(",
"values",
"=",
"feat_anom_flags",
")",
"}",
"return",
"loss",
",",
"eval_metric_ops"
] | [
128,
0
] | [
171,
30
] | python | en | ['en', 'en', 'en'] | True |
BaseManager.__str__ | (self) | Return "app_label.model_label.manager_name". | Return "app_label.model_label.manager_name". | def __str__(self):
"""Return "app_label.model_label.manager_name"."""
return '%s.%s' % (self.model._meta.label, self.name) | [
"def",
"__str__",
"(",
"self",
")",
":",
"return",
"'%s.%s'",
"%",
"(",
"self",
".",
"model",
".",
"_meta",
".",
"label",
",",
"self",
".",
"name",
")"
] | [
33,
4
] | [
35,
60
] | python | en | ['en', 'da', 'en'] | False |
BaseManager.deconstruct | (self) |
Return a 5-tuple of the form (as_manager (True), manager_class,
queryset_class, args, kwargs).
Raise a ValueError if the manager is dynamically generated.
|
Return a 5-tuple of the form (as_manager (True), manager_class,
queryset_class, args, kwargs). | def deconstruct(self):
"""
Return a 5-tuple of the form (as_manager (True), manager_class,
queryset_class, args, kwargs).
Raise a ValueError if the manager is dynamically generated.
"""
qs_class = self._queryset_class
if getattr(self, '_built_with_as_manager', False):
# using MyQuerySet.as_manager()
return (
True, # as_manager
None, # manager_class
'%s.%s' % (qs_class.__module__, qs_class.__name__), # qs_class
None, # args
None, # kwargs
)
else:
module_name = self.__module__
name = self.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find manager %s in %s.\n"
"Please note that you need to inherit from managers you "
"dynamically generated with 'from_queryset()'."
% (name, module_name)
)
return (
False, # as_manager
'%s.%s' % (module_name, name), # manager_class
None, # qs_class
self._constructor_args[0], # args
self._constructor_args[1], # kwargs
) | [
"def",
"deconstruct",
"(",
"self",
")",
":",
"qs_class",
"=",
"self",
".",
"_queryset_class",
"if",
"getattr",
"(",
"self",
",",
"'_built_with_as_manager'",
",",
"False",
")",
":",
"# using MyQuerySet.as_manager()",
"return",
"(",
"True",
",",
"# as_manager",
"None",
",",
"# manager_class",
"'%s.%s'",
"%",
"(",
"qs_class",
".",
"__module__",
",",
"qs_class",
".",
"__name__",
")",
",",
"# qs_class",
"None",
",",
"# args",
"None",
",",
"# kwargs",
")",
"else",
":",
"module_name",
"=",
"self",
".",
"__module__",
"name",
"=",
"self",
".",
"__class__",
".",
"__name__",
"# Make sure it's actually there and not an inner class",
"module",
"=",
"import_module",
"(",
"module_name",
")",
"if",
"not",
"hasattr",
"(",
"module",
",",
"name",
")",
":",
"raise",
"ValueError",
"(",
"\"Could not find manager %s in %s.\\n\"",
"\"Please note that you need to inherit from managers you \"",
"\"dynamically generated with 'from_queryset()'.\"",
"%",
"(",
"name",
",",
"module_name",
")",
")",
"return",
"(",
"False",
",",
"# as_manager",
"'%s.%s'",
"%",
"(",
"module_name",
",",
"name",
")",
",",
"# manager_class",
"None",
",",
"# qs_class",
"self",
".",
"_constructor_args",
"[",
"0",
"]",
",",
"# args",
"self",
".",
"_constructor_args",
"[",
"1",
"]",
",",
"# kwargs",
")"
] | [
40,
4
] | [
75,
13
] | python | en | ['en', 'error', 'th'] | False |
BaseManager._set_creation_counter | (self) |
Set the creation counter value for this instance and increment the
class-level copy.
|
Set the creation counter value for this instance and increment the
class-level copy.
| def _set_creation_counter(self):
"""
Set the creation counter value for this instance and increment the
class-level copy.
"""
self.creation_counter = BaseManager.creation_counter
BaseManager.creation_counter += 1 | [
"def",
"_set_creation_counter",
"(",
"self",
")",
":",
"self",
".",
"creation_counter",
"=",
"BaseManager",
".",
"creation_counter",
"BaseManager",
".",
"creation_counter",
"+=",
"1"
] | [
119,
4
] | [
125,
41
] | python | en | ['en', 'error', 'th'] | False |
BaseManager.get_queryset | (self) |
Return a new QuerySet object. Subclasses can override this method to
customize the behavior of the Manager.
|
Return a new QuerySet object. Subclasses can override this method to
customize the behavior of the Manager.
| def get_queryset(self):
"""
Return a new QuerySet object. Subclasses can override this method to
customize the behavior of the Manager.
"""
return self._queryset_class(model=self.model, using=self._db, hints=self._hints) | [
"def",
"get_queryset",
"(",
"self",
")",
":",
"return",
"self",
".",
"_queryset_class",
"(",
"model",
"=",
"self",
".",
"model",
",",
"using",
"=",
"self",
".",
"_db",
",",
"hints",
"=",
"self",
".",
"_hints",
")"
] | [
141,
4
] | [
146,
88
] | python | en | ['en', 'error', 'th'] | False |
task_log_audit | (self: Task, user_id: int, verb: AuditVerbEnum, app_label: str, model: str, object_id: int) | log changes into audit | log changes into audit | def task_log_audit(self: Task, user_id: int, verb: AuditVerbEnum, app_label: str, model: str, object_id: int):
""" log changes into audit """
try:
# get object using content type
obj_type = ContentType.objects.get(app_label=app_label, model=model)
user = User.objects.get(id=user_id)
# save audit
Audit.objects.create(action=verb, user=user, content_type=obj_type, object_id=object_id)
except Exception as ex:
wait = backoff(self.request.retries)
self.retry(countdown=wait, exc=ex) | [
"def",
"task_log_audit",
"(",
"self",
":",
"Task",
",",
"user_id",
":",
"int",
",",
"verb",
":",
"AuditVerbEnum",
",",
"app_label",
":",
"str",
",",
"model",
":",
"str",
",",
"object_id",
":",
"int",
")",
":",
"try",
":",
"# get object using content type",
"obj_type",
"=",
"ContentType",
".",
"objects",
".",
"get",
"(",
"app_label",
"=",
"app_label",
",",
"model",
"=",
"model",
")",
"user",
"=",
"User",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"user_id",
")",
"# save audit",
"Audit",
".",
"objects",
".",
"create",
"(",
"action",
"=",
"verb",
",",
"user",
"=",
"user",
",",
"content_type",
"=",
"obj_type",
",",
"object_id",
"=",
"object_id",
")",
"except",
"Exception",
"as",
"ex",
":",
"wait",
"=",
"backoff",
"(",
"self",
".",
"request",
".",
"retries",
")",
"self",
".",
"retry",
"(",
"countdown",
"=",
"wait",
",",
"exc",
"=",
"ex",
")"
] | [
13,
0
] | [
26,
42
] | python | en | ['en', 'da', 'en'] | True |
optimize | (node, environment) | The context hint can be used to perform an static optimization
based on the context given. | The context hint can be used to perform an static optimization
based on the context given. | def optimize(node, environment):
"""The context hint can be used to perform an static optimization
based on the context given."""
optimizer = Optimizer(environment)
return optimizer.visit(node) | [
"def",
"optimize",
"(",
"node",
",",
"environment",
")",
":",
"optimizer",
"=",
"Optimizer",
"(",
"environment",
")",
"return",
"optimizer",
".",
"visit",
"(",
"node",
")"
] | [
22,
0
] | [
26,
32
] | python | en | ['en', 'en', 'en'] | True |
Optimizer.fold | (self, node, eval_ctx=None) | Do constant folding. | Do constant folding. | def fold(self, node, eval_ctx=None):
"""Do constant folding."""
node = self.generic_visit(node)
try:
return nodes.Const.from_untrusted(node.as_const(eval_ctx),
lineno=node.lineno,
environment=self.environment)
except nodes.Impossible:
return node | [
"def",
"fold",
"(",
"self",
",",
"node",
",",
"eval_ctx",
"=",
"None",
")",
":",
"node",
"=",
"self",
".",
"generic_visit",
"(",
"node",
")",
"try",
":",
"return",
"nodes",
".",
"Const",
".",
"from_untrusted",
"(",
"node",
".",
"as_const",
"(",
"eval_ctx",
")",
",",
"lineno",
"=",
"node",
".",
"lineno",
",",
"environment",
"=",
"self",
".",
"environment",
")",
"except",
"nodes",
".",
"Impossible",
":",
"return",
"node"
] | [
34,
4
] | [
42,
23
] | python | en | ['en', 'pt', 'en'] | True |
MultipleObjectMixin.get_queryset | (self) |
Return the list of items for this view.
The return value must be an iterable and may be an instance of
`QuerySet` in which case `QuerySet` specific behavior will be enabled.
|
Return the list of items for this view. | def get_queryset(self):
"""
Return the list of items for this view.
The return value must be an iterable and may be an instance of
`QuerySet` in which case `QuerySet` specific behavior will be enabled.
"""
if self.queryset is not None:
queryset = self.queryset
if isinstance(queryset, QuerySet):
queryset = queryset.all()
elif self.model is not None:
queryset = self.model._default_manager.all()
else:
raise ImproperlyConfigured(
"%(cls)s is missing a QuerySet. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {
'cls': self.__class__.__name__
}
)
ordering = self.get_ordering()
if ordering:
if isinstance(ordering, str):
ordering = (ordering,)
queryset = queryset.order_by(*ordering)
return queryset | [
"def",
"get_queryset",
"(",
"self",
")",
":",
"if",
"self",
".",
"queryset",
"is",
"not",
"None",
":",
"queryset",
"=",
"self",
".",
"queryset",
"if",
"isinstance",
"(",
"queryset",
",",
"QuerySet",
")",
":",
"queryset",
"=",
"queryset",
".",
"all",
"(",
")",
"elif",
"self",
".",
"model",
"is",
"not",
"None",
":",
"queryset",
"=",
"self",
".",
"model",
".",
"_default_manager",
".",
"all",
"(",
")",
"else",
":",
"raise",
"ImproperlyConfigured",
"(",
"\"%(cls)s is missing a QuerySet. Define \"",
"\"%(cls)s.model, %(cls)s.queryset, or override \"",
"\"%(cls)s.get_queryset().\"",
"%",
"{",
"'cls'",
":",
"self",
".",
"__class__",
".",
"__name__",
"}",
")",
"ordering",
"=",
"self",
".",
"get_ordering",
"(",
")",
"if",
"ordering",
":",
"if",
"isinstance",
"(",
"ordering",
",",
"str",
")",
":",
"ordering",
"=",
"(",
"ordering",
",",
")",
"queryset",
"=",
"queryset",
".",
"order_by",
"(",
"*",
"ordering",
")",
"return",
"queryset"
] | [
20,
4
] | [
47,
23
] | python | en | ['en', 'error', 'th'] | False |
MultipleObjectMixin.get_ordering | (self) | Return the field or fields to use for ordering the queryset. | Return the field or fields to use for ordering the queryset. | def get_ordering(self):
"""Return the field or fields to use for ordering the queryset."""
return self.ordering | [
"def",
"get_ordering",
"(",
"self",
")",
":",
"return",
"self",
".",
"ordering"
] | [
49,
4
] | [
51,
28
] | python | en | ['en', 'en', 'en'] | True |
MultipleObjectMixin.paginate_queryset | (self, queryset, page_size) | Paginate the queryset, if needed. | Paginate the queryset, if needed. | def paginate_queryset(self, queryset, page_size):
"""Paginate the queryset, if needed."""
paginator = self.get_paginator(
queryset, page_size, orphans=self.get_paginate_orphans(),
allow_empty_first_page=self.get_allow_empty())
page_kwarg = self.page_kwarg
page = self.kwargs.get(page_kwarg) or self.request.GET.get(page_kwarg) or 1
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404(_('Page is not “last”, nor can it be converted to an int.'))
try:
page = paginator.page(page_number)
return (paginator, page, page.object_list, page.has_other_pages())
except InvalidPage as e:
raise Http404(_('Invalid page (%(page_number)s): %(message)s') % {
'page_number': page_number,
'message': str(e)
}) | [
"def",
"paginate_queryset",
"(",
"self",
",",
"queryset",
",",
"page_size",
")",
":",
"paginator",
"=",
"self",
".",
"get_paginator",
"(",
"queryset",
",",
"page_size",
",",
"orphans",
"=",
"self",
".",
"get_paginate_orphans",
"(",
")",
",",
"allow_empty_first_page",
"=",
"self",
".",
"get_allow_empty",
"(",
")",
")",
"page_kwarg",
"=",
"self",
".",
"page_kwarg",
"page",
"=",
"self",
".",
"kwargs",
".",
"get",
"(",
"page_kwarg",
")",
"or",
"self",
".",
"request",
".",
"GET",
".",
"get",
"(",
"page_kwarg",
")",
"or",
"1",
"try",
":",
"page_number",
"=",
"int",
"(",
"page",
")",
"except",
"ValueError",
":",
"if",
"page",
"==",
"'last'",
":",
"page_number",
"=",
"paginator",
".",
"num_pages",
"else",
":",
"raise",
"Http404",
"(",
"_",
"(",
"'Page is not “last”, nor can it be converted to an int.'))",
"",
"",
"try",
":",
"page",
"=",
"paginator",
".",
"page",
"(",
"page_number",
")",
"return",
"(",
"paginator",
",",
"page",
",",
"page",
".",
"object_list",
",",
"page",
".",
"has_other_pages",
"(",
")",
")",
"except",
"InvalidPage",
"as",
"e",
":",
"raise",
"Http404",
"(",
"_",
"(",
"'Invalid page (%(page_number)s): %(message)s'",
")",
"%",
"{",
"'page_number'",
":",
"page_number",
",",
"'message'",
":",
"str",
"(",
"e",
")",
"}",
")"
] | [
53,
4
] | [
74,
14
] | python | en | ['en', 'en', 'en'] | True |
MultipleObjectMixin.get_paginate_by | (self, queryset) |
Get the number of items to paginate by, or ``None`` for no pagination.
|
Get the number of items to paginate by, or ``None`` for no pagination.
| def get_paginate_by(self, queryset):
"""
Get the number of items to paginate by, or ``None`` for no pagination.
"""
return self.paginate_by | [
"def",
"get_paginate_by",
"(",
"self",
",",
"queryset",
")",
":",
"return",
"self",
".",
"paginate_by"
] | [
76,
4
] | [
80,
31
] | python | en | ['en', 'error', 'th'] | False |
MultipleObjectMixin.get_paginator | (self, queryset, per_page, orphans=0,
allow_empty_first_page=True, **kwargs) | Return an instance of the paginator for this view. | Return an instance of the paginator for this view. | def get_paginator(self, queryset, per_page, orphans=0,
allow_empty_first_page=True, **kwargs):
"""Return an instance of the paginator for this view."""
return self.paginator_class(
queryset, per_page, orphans=orphans,
allow_empty_first_page=allow_empty_first_page, **kwargs) | [
"def",
"get_paginator",
"(",
"self",
",",
"queryset",
",",
"per_page",
",",
"orphans",
"=",
"0",
",",
"allow_empty_first_page",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"paginator_class",
"(",
"queryset",
",",
"per_page",
",",
"orphans",
"=",
"orphans",
",",
"allow_empty_first_page",
"=",
"allow_empty_first_page",
",",
"*",
"*",
"kwargs",
")"
] | [
82,
4
] | [
87,
68
] | python | en | ['en', 'en', 'en'] | True |
MultipleObjectMixin.get_paginate_orphans | (self) |
Return the maximum number of orphans extend the last page by when
paginating.
|
Return the maximum number of orphans extend the last page by when
paginating.
| def get_paginate_orphans(self):
"""
Return the maximum number of orphans extend the last page by when
paginating.
"""
return self.paginate_orphans | [
"def",
"get_paginate_orphans",
"(",
"self",
")",
":",
"return",
"self",
".",
"paginate_orphans"
] | [
89,
4
] | [
94,
36
] | python | en | ['en', 'error', 'th'] | False |
MultipleObjectMixin.get_allow_empty | (self) |
Return ``True`` if the view should display empty lists and ``False``
if a 404 should be raised instead.
|
Return ``True`` if the view should display empty lists and ``False``
if a 404 should be raised instead.
| def get_allow_empty(self):
"""
Return ``True`` if the view should display empty lists and ``False``
if a 404 should be raised instead.
"""
return self.allow_empty | [
"def",
"get_allow_empty",
"(",
"self",
")",
":",
"return",
"self",
".",
"allow_empty"
] | [
96,
4
] | [
101,
31
] | python | en | ['en', 'error', 'th'] | False |
MultipleObjectMixin.get_context_object_name | (self, object_list) | Get the name of the item to be used in the context. | Get the name of the item to be used in the context. | def get_context_object_name(self, object_list):
"""Get the name of the item to be used in the context."""
if self.context_object_name:
return self.context_object_name
elif hasattr(object_list, 'model'):
return '%s_list' % object_list.model._meta.model_name
else:
return None | [
"def",
"get_context_object_name",
"(",
"self",
",",
"object_list",
")",
":",
"if",
"self",
".",
"context_object_name",
":",
"return",
"self",
".",
"context_object_name",
"elif",
"hasattr",
"(",
"object_list",
",",
"'model'",
")",
":",
"return",
"'%s_list'",
"%",
"object_list",
".",
"model",
".",
"_meta",
".",
"model_name",
"else",
":",
"return",
"None"
] | [
103,
4
] | [
110,
23
] | python | en | ['en', 'en', 'en'] | True |
MultipleObjectMixin.get_context_data | (self, *, object_list=None, **kwargs) | Get the context for this view. | Get the context for this view. | def get_context_data(self, *, object_list=None, **kwargs):
"""Get the context for this view."""
queryset = object_list if object_list is not None else self.object_list
page_size = self.get_paginate_by(queryset)
context_object_name = self.get_context_object_name(queryset)
if page_size:
paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)
context = {
'paginator': paginator,
'page_obj': page,
'is_paginated': is_paginated,
'object_list': queryset
}
else:
context = {
'paginator': None,
'page_obj': None,
'is_paginated': False,
'object_list': queryset
}
if context_object_name is not None:
context[context_object_name] = queryset
context.update(kwargs)
return super().get_context_data(**context) | [
"def",
"get_context_data",
"(",
"self",
",",
"*",
",",
"object_list",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"queryset",
"=",
"object_list",
"if",
"object_list",
"is",
"not",
"None",
"else",
"self",
".",
"object_list",
"page_size",
"=",
"self",
".",
"get_paginate_by",
"(",
"queryset",
")",
"context_object_name",
"=",
"self",
".",
"get_context_object_name",
"(",
"queryset",
")",
"if",
"page_size",
":",
"paginator",
",",
"page",
",",
"queryset",
",",
"is_paginated",
"=",
"self",
".",
"paginate_queryset",
"(",
"queryset",
",",
"page_size",
")",
"context",
"=",
"{",
"'paginator'",
":",
"paginator",
",",
"'page_obj'",
":",
"page",
",",
"'is_paginated'",
":",
"is_paginated",
",",
"'object_list'",
":",
"queryset",
"}",
"else",
":",
"context",
"=",
"{",
"'paginator'",
":",
"None",
",",
"'page_obj'",
":",
"None",
",",
"'is_paginated'",
":",
"False",
",",
"'object_list'",
":",
"queryset",
"}",
"if",
"context_object_name",
"is",
"not",
"None",
":",
"context",
"[",
"context_object_name",
"]",
"=",
"queryset",
"context",
".",
"update",
"(",
"kwargs",
")",
"return",
"super",
"(",
")",
".",
"get_context_data",
"(",
"*",
"*",
"context",
")"
] | [
112,
4
] | [
135,
50
] | python | en | ['en', 'en', 'en'] | True |
MultipleObjectTemplateResponseMixin.get_template_names | (self) |
Return a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
|
Return a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
| def get_template_names(self):
"""
Return a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
try:
names = super().get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If the list is a queryset, we'll invent a template name based on the
# app and model name. This name gets put at the end of the template
# name list so that user-supplied names override the automatically-
# generated ones.
if hasattr(self.object_list, 'model'):
opts = self.object_list.model._meta
names.append("%s/%s%s.html" % (opts.app_label, opts.model_name, self.template_name_suffix))
elif not names:
raise ImproperlyConfigured(
"%(cls)s requires either a 'template_name' attribute "
"or a get_queryset() method that returns a QuerySet." % {
'cls': self.__class__.__name__,
}
)
return names | [
"def",
"get_template_names",
"(",
"self",
")",
":",
"try",
":",
"names",
"=",
"super",
"(",
")",
".",
"get_template_names",
"(",
")",
"except",
"ImproperlyConfigured",
":",
"# If template_name isn't specified, it's not a problem --",
"# we just start with an empty list.",
"names",
"=",
"[",
"]",
"# If the list is a queryset, we'll invent a template name based on the",
"# app and model name. This name gets put at the end of the template",
"# name list so that user-supplied names override the automatically-",
"# generated ones.",
"if",
"hasattr",
"(",
"self",
".",
"object_list",
",",
"'model'",
")",
":",
"opts",
"=",
"self",
".",
"object_list",
".",
"model",
".",
"_meta",
"names",
".",
"append",
"(",
"\"%s/%s%s.html\"",
"%",
"(",
"opts",
".",
"app_label",
",",
"opts",
".",
"model_name",
",",
"self",
".",
"template_name_suffix",
")",
")",
"elif",
"not",
"names",
":",
"raise",
"ImproperlyConfigured",
"(",
"\"%(cls)s requires either a 'template_name' attribute \"",
"\"or a get_queryset() method that returns a QuerySet.\"",
"%",
"{",
"'cls'",
":",
"self",
".",
"__class__",
".",
"__name__",
",",
"}",
")",
"return",
"names"
] | [
164,
4
] | [
190,
20
] | python | en | ['en', 'error', 'th'] | False |
image_resize2square | (image, desired_size=None) |
Resize image to a square by specific resolution(desired_size).
|
Resize image to a square by specific resolution(desired_size).
| def image_resize2square(image, desired_size=None):
'''
Resize image to a square by specific resolution(desired_size).
'''
assert (image is not None), 'Image cannot be None.'
# Initialize the dimensions of the image to be resized and
# grab the size of image
old_size = image.shape[:2]
# if both the width and height are None, then return the
# original image
if desired_size is None or desired_size == 0:
return image
# calculate the ratio of the height and construct theima
# dimensions
ratio = float(desired_size) / max(old_size)
new_size = tuple([int(x * ratio) for x in old_size])
# new_size should be in (width, height) format
resized = cv2.resize(image, (new_size[1], new_size[0]))
delta_w = desired_size - new_size[1]
delta_h = desired_size - new_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
# Assign background color for padding areas. Default is Black.
bg_color = BACKGROUND_COLOR
new_image = cv2.copyMakeBorder(resized, top, bottom, left, right, cv2.BORDER_CONSTANT, value=bg_color)
# return the resized image
return new_image | [
"def",
"image_resize2square",
"(",
"image",
",",
"desired_size",
"=",
"None",
")",
":",
"assert",
"(",
"image",
"is",
"not",
"None",
")",
",",
"'Image cannot be None.'",
"# Initialize the dimensions of the image to be resized and",
"# grab the size of image",
"old_size",
"=",
"image",
".",
"shape",
"[",
":",
"2",
"]",
"# if both the width and height are None, then return the",
"# original image",
"if",
"desired_size",
"is",
"None",
"or",
"desired_size",
"==",
"0",
":",
"return",
"image",
"# calculate the ratio of the height and construct theima",
"# dimensions",
"ratio",
"=",
"float",
"(",
"desired_size",
")",
"/",
"max",
"(",
"old_size",
")",
"new_size",
"=",
"tuple",
"(",
"[",
"int",
"(",
"x",
"*",
"ratio",
")",
"for",
"x",
"in",
"old_size",
"]",
")",
"# new_size should be in (width, height) format",
"resized",
"=",
"cv2",
".",
"resize",
"(",
"image",
",",
"(",
"new_size",
"[",
"1",
"]",
",",
"new_size",
"[",
"0",
"]",
")",
")",
"delta_w",
"=",
"desired_size",
"-",
"new_size",
"[",
"1",
"]",
"delta_h",
"=",
"desired_size",
"-",
"new_size",
"[",
"0",
"]",
"top",
",",
"bottom",
"=",
"delta_h",
"//",
"2",
",",
"delta_h",
"-",
"(",
"delta_h",
"//",
"2",
")",
"left",
",",
"right",
"=",
"delta_w",
"//",
"2",
",",
"delta_w",
"-",
"(",
"delta_w",
"//",
"2",
")",
"# Assign background color for padding areas. Default is Black.",
"bg_color",
"=",
"BACKGROUND_COLOR",
"new_image",
"=",
"cv2",
".",
"copyMakeBorder",
"(",
"resized",
",",
"top",
",",
"bottom",
",",
"left",
",",
"right",
",",
"cv2",
".",
"BORDER_CONSTANT",
",",
"value",
"=",
"bg_color",
")",
"# return the resized image",
"return",
"new_image"
] | [
49,
0
] | [
82,
20
] | python | en | ['en', 'error', 'th'] | False |
create_path | (data_dir) |
Create a specific path to store result images.
- Under the data directory, two separated folders will store image and masking files
- Example:
- data_dir-
|- IMAGE_FOLDER
|- MASK_FOLDER
|
Create a specific path to store result images.
- Under the data directory, two separated folders will store image and masking files
- Example:
- data_dir-
|- IMAGE_FOLDER
|- MASK_FOLDER
| def create_path(data_dir):
'''
Create a specific path to store result images.
- Under the data directory, two separated folders will store image and masking files
- Example:
- data_dir-
|- IMAGE_FOLDER
|- MASK_FOLDER
'''
try:
output_image_path = join(data_dir, IMAGE_FOLDER)
if not os.path.isdir(output_image_path):
os.makedirs(output_image_path)
output_mask_path = join(data_dir, MASK_FOLDER)
if not os.path.isdir(output_mask_path):
os.makedirs(output_mask_path)
return output_image_path, output_mask_path
except Exception as e:
logging.error('\nCreate folders error! Message: %s' % (str(e)))
exit(0) | [
"def",
"create_path",
"(",
"data_dir",
")",
":",
"try",
":",
"output_image_path",
"=",
"join",
"(",
"data_dir",
",",
"IMAGE_FOLDER",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"output_image_path",
")",
":",
"os",
".",
"makedirs",
"(",
"output_image_path",
")",
"output_mask_path",
"=",
"join",
"(",
"data_dir",
",",
"MASK_FOLDER",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"output_mask_path",
")",
":",
"os",
".",
"makedirs",
"(",
"output_mask_path",
")",
"return",
"output_image_path",
",",
"output_mask_path",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"'\\nCreate folders error! Message: %s'",
"%",
"(",
"str",
"(",
"e",
")",
")",
")",
"exit",
"(",
"0",
")"
] | [
85,
0
] | [
105,
15
] | python | en | ['en', 'error', 'th'] | False |
main | (args) |
The main entry point of the program
- This program will download image from MS COCO 2017 (Microsoft Common Objects in Context) repo
and generate annotation to the specific object classes.
|
The main entry point of the program
- This program will download image from MS COCO 2017 (Microsoft Common Objects in Context) repo
and generate annotation to the specific object classes.
| def main(args):
'''
The main entry point of the program
- This program will download image from MS COCO 2017 (Microsoft Common Objects in Context) repo
and generate annotation to the specific object classes.
'''
plt.ioff()
data_dir = args.data_root_dir
category_list = list(args.category)
annFile = args.annotation_file
num = args.number
file_name = ''
# Create path for output
output_image_path, output_mask_path = create_path(data_dir)
# initialize COCO API for instance annotations
coco = COCO(annFile)
# get all images containing given categories, select one at random
catIds = coco.getCatIds(catNms=category_list)
if args.id is not None:
imgIds = list(args.id)
num = len(imgIds)
else:
# Get image id list from categories.
imgIds = coco.getImgIds(catIds=catIds)
print('\nImage Generating...')
for i in tqdm(range(num)):
try:
if args.id is not None:
img = coco.loadImgs(imgIds[i])[0] # Load imgs with the specified ids.
else:
img = coco.loadImgs(imgIds[np.random.randint(0, len(imgIds))])[0]
except Exception as e:
print('\nError: Image ID: %s cannot be found in the annotation file.' % (e))
continue
# use url to load image
I = io.imread(img['coco_url'])
resolution = args.resolution
if resolution != 0:
I = image_resize2square(I, args.resolution)
else:
pass
plt.axis('off')
file_name = join(output_image_path, FILE_MIDDLE_NAME + str(i) + '.png')
plt.imsave(file_name, I)
# Get annotation ids that satisfy the given filter conditions.
annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)
anns = coco.loadAnns(annIds) # load anns with the specified ids
mask = coco.annToMask(anns[0])
# Generate mask
for j in range(len(anns)):
mask += coco.annToMask(anns[j])
# Background color = (R,G,B)=[68, 1, 84] for MS COCO 2017
# save the mask image
mask = image_resize2square(mask, args.resolution)
file_name = join(output_mask_path, FILE_MIDDLE_NAME + str(i) + '.png')
plt.imsave(file_name, mask)
print('\nProgram finished !')
return True | [
"def",
"main",
"(",
"args",
")",
":",
"plt",
".",
"ioff",
"(",
")",
"data_dir",
"=",
"args",
".",
"data_root_dir",
"category_list",
"=",
"list",
"(",
"args",
".",
"category",
")",
"annFile",
"=",
"args",
".",
"annotation_file",
"num",
"=",
"args",
".",
"number",
"file_name",
"=",
"''",
"# Create path for output",
"output_image_path",
",",
"output_mask_path",
"=",
"create_path",
"(",
"data_dir",
")",
"# initialize COCO API for instance annotations",
"coco",
"=",
"COCO",
"(",
"annFile",
")",
"# get all images containing given categories, select one at random",
"catIds",
"=",
"coco",
".",
"getCatIds",
"(",
"catNms",
"=",
"category_list",
")",
"if",
"args",
".",
"id",
"is",
"not",
"None",
":",
"imgIds",
"=",
"list",
"(",
"args",
".",
"id",
")",
"num",
"=",
"len",
"(",
"imgIds",
")",
"else",
":",
"# Get image id list from categories.",
"imgIds",
"=",
"coco",
".",
"getImgIds",
"(",
"catIds",
"=",
"catIds",
")",
"print",
"(",
"'\\nImage Generating...'",
")",
"for",
"i",
"in",
"tqdm",
"(",
"range",
"(",
"num",
")",
")",
":",
"try",
":",
"if",
"args",
".",
"id",
"is",
"not",
"None",
":",
"img",
"=",
"coco",
".",
"loadImgs",
"(",
"imgIds",
"[",
"i",
"]",
")",
"[",
"0",
"]",
"# Load imgs with the specified ids.",
"else",
":",
"img",
"=",
"coco",
".",
"loadImgs",
"(",
"imgIds",
"[",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"len",
"(",
"imgIds",
")",
")",
"]",
")",
"[",
"0",
"]",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'\\nError: Image ID: %s cannot be found in the annotation file.'",
"%",
"(",
"e",
")",
")",
"continue",
"# use url to load image",
"I",
"=",
"io",
".",
"imread",
"(",
"img",
"[",
"'coco_url'",
"]",
")",
"resolution",
"=",
"args",
".",
"resolution",
"if",
"resolution",
"!=",
"0",
":",
"I",
"=",
"image_resize2square",
"(",
"I",
",",
"args",
".",
"resolution",
")",
"else",
":",
"pass",
"plt",
".",
"axis",
"(",
"'off'",
")",
"file_name",
"=",
"join",
"(",
"output_image_path",
",",
"FILE_MIDDLE_NAME",
"+",
"str",
"(",
"i",
")",
"+",
"'.png'",
")",
"plt",
".",
"imsave",
"(",
"file_name",
",",
"I",
")",
"# Get annotation ids that satisfy the given filter conditions.",
"annIds",
"=",
"coco",
".",
"getAnnIds",
"(",
"imgIds",
"=",
"img",
"[",
"'id'",
"]",
",",
"catIds",
"=",
"catIds",
",",
"iscrowd",
"=",
"None",
")",
"anns",
"=",
"coco",
".",
"loadAnns",
"(",
"annIds",
")",
"# load anns with the specified ids",
"mask",
"=",
"coco",
".",
"annToMask",
"(",
"anns",
"[",
"0",
"]",
")",
"# Generate mask",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"anns",
")",
")",
":",
"mask",
"+=",
"coco",
".",
"annToMask",
"(",
"anns",
"[",
"j",
"]",
")",
"# Background color = (R,G,B)=[68, 1, 84] for MS COCO 2017",
"# save the mask image",
"mask",
"=",
"image_resize2square",
"(",
"mask",
",",
"args",
".",
"resolution",
")",
"file_name",
"=",
"join",
"(",
"output_mask_path",
",",
"FILE_MIDDLE_NAME",
"+",
"str",
"(",
"i",
")",
"+",
"'.png'",
")",
"plt",
".",
"imsave",
"(",
"file_name",
",",
"mask",
")",
"print",
"(",
"'\\nProgram finished !'",
")",
"return",
"True"
] | [
108,
0
] | [
177,
15
] | python | en | ['en', 'error', 'th'] | False |
PostGISAdapter.__init__ | (self, obj, geography=False) |
Initialize on the spatial object.
|
Initialize on the spatial object.
| def __init__(self, obj, geography=False):
"""
Initialize on the spatial object.
"""
self.is_geometry = isinstance(obj, (GEOSGeometry, PostGISAdapter))
# Getting the WKB (in string form, to allow easy pickling of
# the adaptor) and the SRID from the geometry or raster.
if self.is_geometry:
self.ewkb = bytes(obj.ewkb)
self._adapter = Binary(self.ewkb)
else:
self.ewkb = to_pgraster(obj)
self.srid = obj.srid
self.geography = geography | [
"def",
"__init__",
"(",
"self",
",",
"obj",
",",
"geography",
"=",
"False",
")",
":",
"self",
".",
"is_geometry",
"=",
"isinstance",
"(",
"obj",
",",
"(",
"GEOSGeometry",
",",
"PostGISAdapter",
")",
")",
"# Getting the WKB (in string form, to allow easy pickling of",
"# the adaptor) and the SRID from the geometry or raster.",
"if",
"self",
".",
"is_geometry",
":",
"self",
".",
"ewkb",
"=",
"bytes",
"(",
"obj",
".",
"ewkb",
")",
"self",
".",
"_adapter",
"=",
"Binary",
"(",
"self",
".",
"ewkb",
")",
"else",
":",
"self",
".",
"ewkb",
"=",
"to_pgraster",
"(",
"obj",
")",
"self",
".",
"srid",
"=",
"obj",
".",
"srid",
"self",
".",
"geography",
"=",
"geography"
] | [
11,
4
] | [
26,
34
] | python | en | ['en', 'error', 'th'] | False |
PostGISAdapter.__conform__ | (self, proto) | Does the given protocol conform to what Psycopg2 expects? | Does the given protocol conform to what Psycopg2 expects? | def __conform__(self, proto):
"""Does the given protocol conform to what Psycopg2 expects?"""
if proto == ISQLQuote:
return self
else:
raise Exception('Error implementing psycopg2 protocol. Is psycopg2 installed?') | [
"def",
"__conform__",
"(",
"self",
",",
"proto",
")",
":",
"if",
"proto",
"==",
"ISQLQuote",
":",
"return",
"self",
"else",
":",
"raise",
"Exception",
"(",
"'Error implementing psycopg2 protocol. Is psycopg2 installed?'",
")"
] | [
28,
4
] | [
33,
91
] | python | en | ['en', 'en', 'en'] | True |
PostGISAdapter.prepare | (self, conn) |
This method allows escaping the binary in the style required by the
server's `standard_conforming_string` setting.
|
This method allows escaping the binary in the style required by the
server's `standard_conforming_string` setting.
| def prepare(self, conn):
"""
This method allows escaping the binary in the style required by the
server's `standard_conforming_string` setting.
"""
if self.is_geometry:
self._adapter.prepare(conn) | [
"def",
"prepare",
"(",
"self",
",",
"conn",
")",
":",
"if",
"self",
".",
"is_geometry",
":",
"self",
".",
"_adapter",
".",
"prepare",
"(",
"conn",
")"
] | [
48,
4
] | [
54,
39
] | python | en | ['en', 'error', 'th'] | False |
PostGISAdapter.getquoted | (self) |
Return a properly quoted string for use in PostgreSQL/PostGIS.
|
Return a properly quoted string for use in PostgreSQL/PostGIS.
| def getquoted(self):
"""
Return a properly quoted string for use in PostgreSQL/PostGIS.
"""
if self.is_geometry:
# Psycopg will figure out whether to use E'\\000' or '\000'.
return '%s(%s)' % (
'ST_GeogFromWKB' if self.geography else 'ST_GeomFromEWKB',
self._adapter.getquoted().decode()
)
else:
# For rasters, add explicit type cast to WKB string.
return "'%s'::raster" % self.ewkb | [
"def",
"getquoted",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_geometry",
":",
"# Psycopg will figure out whether to use E'\\\\000' or '\\000'.",
"return",
"'%s(%s)'",
"%",
"(",
"'ST_GeogFromWKB'",
"if",
"self",
".",
"geography",
"else",
"'ST_GeomFromEWKB'",
",",
"self",
".",
"_adapter",
".",
"getquoted",
"(",
")",
".",
"decode",
"(",
")",
")",
"else",
":",
"# For rasters, add explicit type cast to WKB string.",
"return",
"\"'%s'::raster\"",
"%",
"self",
".",
"ewkb"
] | [
56,
4
] | [
68,
45
] | python | en | ['en', 'error', 'th'] | False |
upsample | (base_model, x, layers, mode: XModelMode) |
Feature extraction upsampling/generation before regression and heatmap heads.
:param base_model: tf.keras.Model object
:param x: output from last layer of base_model
:param layers: layer names from backbone
:param mode: upsampling mode for features
:return: base_model and features
|
Feature extraction upsampling/generation before regression and heatmap heads.
:param base_model: tf.keras.Model object
:param x: output from last layer of base_model
:param layers: layer names from backbone
:param mode: upsampling mode for features
:return: base_model and features
| def upsample(base_model, x, layers, mode: XModelMode):
"""
Feature extraction upsampling/generation before regression and heatmap heads.
:param base_model: tf.keras.Model object
:param x: output from last layer of base_model
:param layers: layer names from backbone
:param mode: upsampling mode for features
:return: base_model and features
"""
with tf.name_scope("upsample"):
layers = [base_model.get_layer(layer_name) for layer_name in layers]
model_multi_output = tf.keras.Model(
inputs=base_model.input, outputs=[layer.output for layer in layers], name="multioutput"
)
c2, c3, c4, c5 = model_multi_output(x)
c5 = tf.keras.layers.Dropout(rate=0.5)(c5)
c4 = tf.keras.layers.Dropout(rate=0.4)(c4)
c3 = tf.keras.layers.Dropout(rate=0.3)(c3)
c2 = tf.keras.layers.Dropout(rate=0.2)(c2)
# You can create your own upsample layer for example FPN
# if you need here, you need to also add this mode to the XmodelMode enum
if mode == XModelMode.SIMPLE:
deconv = deConv2DBatchNorm(c5, filters=256, kernel_size=(4, 4), name="deconv1")
deconv = deConv2DBatchNorm(deconv, filters=128, kernel_size=(4, 4), name="deconv2")
features = deConv2DBatchNorm(deconv, filters=64, kernel_size=(4, 4), name="features")
return base_model, features
if mode == XModelMode.DCN:
dcn = deformConv2D(c5, 256, name="up1")
dcn = deformConv2D(dcn, 128, name="up2")
features = deformConv2D(dcn, 64, name="features")
return base_model, features
if mode == XModelMode.DCNSHORTCUT or mode == XModelMode.DCNSHORTCUTCONCAT:
ratio = 2 if mode == XModelMode.DCNSHORTCUTCONCAT else 1
dcn = deformConv2DShortcut(c5, c4, 1, filters=256 / ratio, mode=mode, name="deformconv1")
dcn = deformConv2DShortcut(dcn, c3, 2, filters=128 / ratio, mode=mode, name="deformconv2")
features = deformConv2DShortcut(dcn, c2, 3, filters=64 / ratio, mode=mode, name="features")
return base_model, features
if mode == XModelMode.CONCAT:
x = upsampleConcat(256, 256, c5, c4, name="up1")
x = upsampleConcat(128, 128, x, c3, name="up2")
features = upsampleConcat(64, 64, x, c2, name="features")
return base_model, features
if mode == XModelMode.SUM:
p5 = conv2DBatchNorm(c5, filters=128, kernel_size=(1, 1), name="conv2D_up5")
p4 = upsampleSum(p5, c4, name="up4")
p3 = upsampleSum(p4, c3, name="up3")
p2 = upsampleSum(p3, c2, name="up2")
features = conv2DBatchNorm(p2, filters=128, kernel_size=(3, 3), name="features")
return base_model, features
raise ValueError(f"Unsupported mode {mode}.") | [
"def",
"upsample",
"(",
"base_model",
",",
"x",
",",
"layers",
",",
"mode",
":",
"XModelMode",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"\"upsample\"",
")",
":",
"layers",
"=",
"[",
"base_model",
".",
"get_layer",
"(",
"layer_name",
")",
"for",
"layer_name",
"in",
"layers",
"]",
"model_multi_output",
"=",
"tf",
".",
"keras",
".",
"Model",
"(",
"inputs",
"=",
"base_model",
".",
"input",
",",
"outputs",
"=",
"[",
"layer",
".",
"output",
"for",
"layer",
"in",
"layers",
"]",
",",
"name",
"=",
"\"multioutput\"",
")",
"c2",
",",
"c3",
",",
"c4",
",",
"c5",
"=",
"model_multi_output",
"(",
"x",
")",
"c5",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"Dropout",
"(",
"rate",
"=",
"0.5",
")",
"(",
"c5",
")",
"c4",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"Dropout",
"(",
"rate",
"=",
"0.4",
")",
"(",
"c4",
")",
"c3",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"Dropout",
"(",
"rate",
"=",
"0.3",
")",
"(",
"c3",
")",
"c2",
"=",
"tf",
".",
"keras",
".",
"layers",
".",
"Dropout",
"(",
"rate",
"=",
"0.2",
")",
"(",
"c2",
")",
"# You can create your own upsample layer for example FPN",
"# if you need here, you need to also add this mode to the XmodelMode enum",
"if",
"mode",
"==",
"XModelMode",
".",
"SIMPLE",
":",
"deconv",
"=",
"deConv2DBatchNorm",
"(",
"c5",
",",
"filters",
"=",
"256",
",",
"kernel_size",
"=",
"(",
"4",
",",
"4",
")",
",",
"name",
"=",
"\"deconv1\"",
")",
"deconv",
"=",
"deConv2DBatchNorm",
"(",
"deconv",
",",
"filters",
"=",
"128",
",",
"kernel_size",
"=",
"(",
"4",
",",
"4",
")",
",",
"name",
"=",
"\"deconv2\"",
")",
"features",
"=",
"deConv2DBatchNorm",
"(",
"deconv",
",",
"filters",
"=",
"64",
",",
"kernel_size",
"=",
"(",
"4",
",",
"4",
")",
",",
"name",
"=",
"\"features\"",
")",
"return",
"base_model",
",",
"features",
"if",
"mode",
"==",
"XModelMode",
".",
"DCN",
":",
"dcn",
"=",
"deformConv2D",
"(",
"c5",
",",
"256",
",",
"name",
"=",
"\"up1\"",
")",
"dcn",
"=",
"deformConv2D",
"(",
"dcn",
",",
"128",
",",
"name",
"=",
"\"up2\"",
")",
"features",
"=",
"deformConv2D",
"(",
"dcn",
",",
"64",
",",
"name",
"=",
"\"features\"",
")",
"return",
"base_model",
",",
"features",
"if",
"mode",
"==",
"XModelMode",
".",
"DCNSHORTCUT",
"or",
"mode",
"==",
"XModelMode",
".",
"DCNSHORTCUTCONCAT",
":",
"ratio",
"=",
"2",
"if",
"mode",
"==",
"XModelMode",
".",
"DCNSHORTCUTCONCAT",
"else",
"1",
"dcn",
"=",
"deformConv2DShortcut",
"(",
"c5",
",",
"c4",
",",
"1",
",",
"filters",
"=",
"256",
"/",
"ratio",
",",
"mode",
"=",
"mode",
",",
"name",
"=",
"\"deformconv1\"",
")",
"dcn",
"=",
"deformConv2DShortcut",
"(",
"dcn",
",",
"c3",
",",
"2",
",",
"filters",
"=",
"128",
"/",
"ratio",
",",
"mode",
"=",
"mode",
",",
"name",
"=",
"\"deformconv2\"",
")",
"features",
"=",
"deformConv2DShortcut",
"(",
"dcn",
",",
"c2",
",",
"3",
",",
"filters",
"=",
"64",
"/",
"ratio",
",",
"mode",
"=",
"mode",
",",
"name",
"=",
"\"features\"",
")",
"return",
"base_model",
",",
"features",
"if",
"mode",
"==",
"XModelMode",
".",
"CONCAT",
":",
"x",
"=",
"upsampleConcat",
"(",
"256",
",",
"256",
",",
"c5",
",",
"c4",
",",
"name",
"=",
"\"up1\"",
")",
"x",
"=",
"upsampleConcat",
"(",
"128",
",",
"128",
",",
"x",
",",
"c3",
",",
"name",
"=",
"\"up2\"",
")",
"features",
"=",
"upsampleConcat",
"(",
"64",
",",
"64",
",",
"x",
",",
"c2",
",",
"name",
"=",
"\"features\"",
")",
"return",
"base_model",
",",
"features",
"if",
"mode",
"==",
"XModelMode",
".",
"SUM",
":",
"p5",
"=",
"conv2DBatchNorm",
"(",
"c5",
",",
"filters",
"=",
"128",
",",
"kernel_size",
"=",
"(",
"1",
",",
"1",
")",
",",
"name",
"=",
"\"conv2D_up5\"",
")",
"p4",
"=",
"upsampleSum",
"(",
"p5",
",",
"c4",
",",
"name",
"=",
"\"up4\"",
")",
"p3",
"=",
"upsampleSum",
"(",
"p4",
",",
"c3",
",",
"name",
"=",
"\"up3\"",
")",
"p2",
"=",
"upsampleSum",
"(",
"p3",
",",
"c2",
",",
"name",
"=",
"\"up2\"",
")",
"features",
"=",
"conv2DBatchNorm",
"(",
"p2",
",",
"filters",
"=",
"128",
",",
"kernel_size",
"=",
"(",
"3",
",",
"3",
")",
",",
"name",
"=",
"\"features\"",
")",
"return",
"base_model",
",",
"features",
"raise",
"ValueError",
"(",
"f\"Unsupported mode {mode}.\"",
")"
] | [
13,
0
] | [
70,
53
] | python | en | ['en', 'error', 'th'] | False |
ApplicationCommunicator.wait | (self, timeout=1) |
Waits for the application to stop itself and returns any exceptions.
|
Waits for the application to stop itself and returns any exceptions.
| async def wait(self, timeout=1):
"""
Waits for the application to stop itself and returns any exceptions.
"""
try:
async with async_timeout(timeout):
try:
await self.future
self.future.result()
except asyncio.CancelledError:
pass
finally:
if not self.future.done():
self.future.cancel()
try:
await self.future
except asyncio.CancelledError:
pass | [
"async",
"def",
"wait",
"(",
"self",
",",
"timeout",
"=",
"1",
")",
":",
"try",
":",
"async",
"with",
"async_timeout",
"(",
"timeout",
")",
":",
"try",
":",
"await",
"self",
".",
"future",
"self",
".",
"future",
".",
"result",
"(",
")",
"except",
"asyncio",
".",
"CancelledError",
":",
"pass",
"finally",
":",
"if",
"not",
"self",
".",
"future",
".",
"done",
"(",
")",
":",
"self",
".",
"future",
".",
"cancel",
"(",
")",
"try",
":",
"await",
"self",
".",
"future",
"except",
"asyncio",
".",
"CancelledError",
":",
"pass"
] | [
22,
4
] | [
39,
24
] | python | en | ['en', 'error', 'th'] | False |
ApplicationCommunicator.send_input | (self, message) |
Sends a single message to the application
|
Sends a single message to the application
| async def send_input(self, message):
"""
Sends a single message to the application
"""
# Give it the message
await self.input_queue.put(message) | [
"async",
"def",
"send_input",
"(",
"self",
",",
"message",
")",
":",
"# Give it the message",
"await",
"self",
".",
"input_queue",
".",
"put",
"(",
"message",
")"
] | [
56,
4
] | [
61,
43
] | python | en | ['en', 'error', 'th'] | False |
ApplicationCommunicator.receive_output | (self, timeout=1) |
Receives a single message from the application, with optional timeout.
|
Receives a single message from the application, with optional timeout.
| async def receive_output(self, timeout=1):
"""
Receives a single message from the application, with optional timeout.
"""
# Make sure there's not an exception to raise from the task
if self.future.done():
self.future.result()
# Wait and receive the message
try:
async with async_timeout(timeout):
return await self.output_queue.get()
except asyncio.TimeoutError as e:
# See if we have another error to raise inside
if self.future.done():
self.future.result()
else:
self.future.cancel()
try:
await self.future
except asyncio.CancelledError:
pass
raise e | [
"async",
"def",
"receive_output",
"(",
"self",
",",
"timeout",
"=",
"1",
")",
":",
"# Make sure there's not an exception to raise from the task",
"if",
"self",
".",
"future",
".",
"done",
"(",
")",
":",
"self",
".",
"future",
".",
"result",
"(",
")",
"# Wait and receive the message",
"try",
":",
"async",
"with",
"async_timeout",
"(",
"timeout",
")",
":",
"return",
"await",
"self",
".",
"output_queue",
".",
"get",
"(",
")",
"except",
"asyncio",
".",
"TimeoutError",
"as",
"e",
":",
"# See if we have another error to raise inside",
"if",
"self",
".",
"future",
".",
"done",
"(",
")",
":",
"self",
".",
"future",
".",
"result",
"(",
")",
"else",
":",
"self",
".",
"future",
".",
"cancel",
"(",
")",
"try",
":",
"await",
"self",
".",
"future",
"except",
"asyncio",
".",
"CancelledError",
":",
"pass",
"raise",
"e"
] | [
63,
4
] | [
84,
19
] | python | en | ['en', 'error', 'th'] | False |
ApplicationCommunicator.receive_nothing | (self, timeout=0.1, interval=0.01) |
Checks that there is no message to receive in the given time.
|
Checks that there is no message to receive in the given time.
| async def receive_nothing(self, timeout=0.1, interval=0.01):
"""
Checks that there is no message to receive in the given time.
"""
# `interval` has precedence over `timeout`
start = time.monotonic()
while time.monotonic() - start < timeout:
if not self.output_queue.empty():
return False
await asyncio.sleep(interval)
return self.output_queue.empty() | [
"async",
"def",
"receive_nothing",
"(",
"self",
",",
"timeout",
"=",
"0.1",
",",
"interval",
"=",
"0.01",
")",
":",
"# `interval` has precedence over `timeout`",
"start",
"=",
"time",
".",
"monotonic",
"(",
")",
"while",
"time",
".",
"monotonic",
"(",
")",
"-",
"start",
"<",
"timeout",
":",
"if",
"not",
"self",
".",
"output_queue",
".",
"empty",
"(",
")",
":",
"return",
"False",
"await",
"asyncio",
".",
"sleep",
"(",
"interval",
")",
"return",
"self",
".",
"output_queue",
".",
"empty",
"(",
")"
] | [
86,
4
] | [
96,
40
] | python | en | ['en', 'error', 'th'] | False |
video_and_frame_level_model | (features, labels, mode, params) |
features['video_id'].shape = (batch_size, 1)
features['mean_rgb'].shape = (batch_size, 1024)
features['mean_audio'].shape = (batch_size, 128)
features['rgb'].shape = (batch_size, MAX_FRAMES, 1024)
features['audio'].shape = (batch_size, MAX_FRAMES, 128)
|
features['video_id'].shape = (batch_size, 1)
features['mean_rgb'].shape = (batch_size, 1024)
features['mean_audio'].shape = (batch_size, 128)
features['rgb'].shape = (batch_size, MAX_FRAMES, 1024)
features['audio'].shape = (batch_size, MAX_FRAMES, 128)
| def video_and_frame_level_model(features, labels, mode, params):
'''
features['video_id'].shape = (batch_size, 1)
features['mean_rgb'].shape = (batch_size, 1024)
features['mean_audio'].shape = (batch_size, 128)
features['rgb'].shape = (batch_size, MAX_FRAMES, 1024)
features['audio'].shape = (batch_size, MAX_FRAMES, 128)
'''
print("\nvideo_and_frame_level_model: features = {}".format(features))
print("video_and_frame_level_model: labels = {}".format(labels))
print("video_and_frame_level_model: mode = {}".format(mode))
# 0. Initial setup
# Get the current batch size in case this is a partially full batch
current_batch_size = tf.shape(features['video_id'])[0]
print("video_and_frame_level_model: current_batch_size = {}".format(current_batch_size))
# 1. Configure networks
################################################################################
# VIDEO NETWORK
# Concatenate the mean_rgb and mean_audio features together along columns in order to flatten them into one vector per example
combined_video_features = tf.concat(values = [features['mean_rgb'], features['mean_audio']], axis = 1) # shape = (current_batch_size, 1024 + 128)
print("video_and_frame_level_model: combined_video_features = {}".format(combined_video_features))
# Create the input layer to our video DNN
video_network = combined_video_features # shape = (current_batch_size, 1024 + 128)
print("video_and_frame_level_model: video_network = combined_video_features = {}".format(video_network))
# Add video hidden layers with the given number of units/neurons per layer
for units in params['video_hidden_units']:
video_network = tf.layers.dense(inputs = video_network, units = units, activation = tf.nn.relu) # shape = (current_batch_size, units)
print("video_and_frame_level_model: video_network = {}, units = {}".format(video_network, units))
# Connect the final hidden layer to a dense layer with no activation to get a video logit for each class
video_logits = tf.layers.dense(inputs = video_network, units = NUM_CLASSES, activation = None) # shape = (current_batch_size, NUM_CLASSES)
print("video_and_frame_level_model: video_logits = {}".format(video_logits))
# Find the video probabilities of all video logits
video_probabilities = tf.sigmoid(video_logits) # shape = (current_batch_size, NUM_CLASSES)
print("video_and_frame_level_model: video_probabilities = {}".format(video_probabilities))
################################################################################
# FRAME NETWORK
# Concatenate the rgb and audio features together along columns in order to flatten them into one vector per example per frame
combined_frame_features = tf.concat(values = [features['rgb'], features['audio']], axis = 2) # shape = (current_batch_size, MAX_FRAMES, 1024 + 128)
print("video_and_frame_level_model: combined_frame_features = {}".format(combined_frame_features))
# Reshape feature vectors per frame so that we have instead batch_size * MAX_FRAMES examples to go through our frame DNN
reshaped_combined_frame_features = tf.reshape(tensor = combined_frame_features, shape = [current_batch_size * MAX_FRAMES, 1024 + 128]) # shape = (current_batch_size * MAX_FRAMES, 1024 + 128)
print("video_and_frame_level_model: reshaped_combined_frame_features = {}".format(reshaped_combined_frame_features))
# Create the input layer to our frame DNN
frame_network = reshaped_combined_frame_features # shape = (current_batch_size * MAX_FRAMES, 1024 + 128)
print("video_and_frame_level_model: frame_network = reshaped_combined_frame_features = {}".format(frame_network))
# Add frame hidden layers with the given number of units/neurons per layer
for units in params['frame_hidden_units']:
frame_network = tf.layers.dense(inputs = frame_network, units = units, activation = tf.nn.relu) # shape = (current_batch_size * MAX_FRAMES, units)
print("video_and_frame_level_model: frame_network = {}, units = {}".format(frame_network, units))
# Connect the final hidden layer to a dense layer with no activation to get a frame logit for each class
frame_logits = tf.layers.dense(inputs = frame_network, units = NUM_CLASSES, activation = None) # shape = (current_batch_size * MAX_FRAMES, NUM_CLASSES)
print("video_and_frame_level_model: frame_logits = {}".format(frame_logits))
# Find the frame probabilities of all frame logits using sigmoid since this is multi-class, multi-label
frame_probabilities = tf.sigmoid(frame_logits) # shape = (current_batch_size * MAX_FRAMES, NUM_CLASSES)
print("video_and_frame_level_model: frame_probabilities = {}".format(frame_probabilities))
# Reshape the frame probability tensor so that each frame has its own matrix
reshaped_frame_probabilities = tf.reshape(tensor = frame_probabilities, shape = [current_batch_size, MAX_FRAMES, NUM_CLASSES]) # shape = (current_batch_size, MAX_FRAMES, NUM_CLASSES)
print("video_and_frame_level_model: reshaped_frame_probabilities = {}".format(reshaped_frame_probabilities))
# Average the frame probabilities over all frames per each example and each class
average_probabilities_over_frames = tf.reduce_mean(input_tensor = reshaped_frame_probabilities, axis = 1) # shape = (current_batch_size, NUM_CLASSES)
print("video_and_frame_level_model: average_probabilities_over_frames = {}".format(average_probabilities_over_frames))
################################################################################
# Average together the video and frame probabilities
combined_probabilities = (video_probabilities + average_probabilities_over_frames) * 0.5 # shape = (current_batch_size, NUM_CLASSES)
print("video_and_frame_level_model: combined_probabilities = {}".format(combined_probabilities))
# Select the top k probablities in descending order
top_k_probabilities = tf.nn.top_k(input = combined_probabilities, k = params['top_k'], sorted = True) # shape = (current_batch_size, top_k)
print("video_and_frame_level_model: top_k_probabilities = {}".format(top_k_probabilities))
# Find the inverse of all of the average probabilities to get all of the average logits
inverse_probabilities_logits = tf.log(combined_probabilities + 0.00000001) - tf.log(1.0 - combined_probabilities + 0.00000001) # shape = (current_batch_size, NUM_CLASSES)
print("video_and_frame_level_model: inverse_probabilities_logits = {}".format(inverse_probabilities_logits))
# Select the top k logits using the indices of the top k probabilities in descending order
top_k_logits = tf.map_fn(fn = lambda x: tf.gather(params = inverse_probabilities_logits[x], indices = top_k_probabilities.indices[x]),
elems = tf.range(start = 0, limit = current_batch_size),
dtype = tf.float32) # shape = (current_batch_size, 1, top_k)
print("frame_level_model: top_k_logits = {}".format(top_k_logits))
# The top k classes in descending order of likelihood
top_k_classes = top_k_probabilities.indices # shape = (current_batch_size, top_k)
print("video_and_frame_level_model: top_k_classes = {}".format(top_k_classes))
# The 0/1 predictions based on a threshold, in this case the threshold is if the probability it greater than random chance
predictions = tf.where(
condition = average_probabilities_over_frames > 1.0 / NUM_CLASSES, # shape = (current_batch_size, NUM_CLASSES)
x = tf.ones_like(tensor = average_probabilities_over_frames, dtype = tf.float32),
y = tf.zeros_like(tensor = average_probabilities_over_frames, dtype = tf.float32))
print("video_and_frame_level_model: predictions = {}".format(predictions))
# The 0/1 top k predictions based on a threshold, in this case the threshold is if the probability it greater than random chance
top_k_predictions = tf.where(
condition = top_k_probabilities.values > 1.0 / NUM_CLASSES, # shape = (current_batch_size, top_k)
x = tf.ones_like(tensor = top_k_probabilities.values, dtype = tf.float32),
y = tf.zeros_like(tensor = top_k_probabilities.values, dtype = tf.float32))
print("video_and_frame_level_model: top_k_predictions = {}\n".format(top_k_predictions))
# 2. Loss function, training/eval ops
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
# Since this is a multi-class, multi-label problem, we will use sigmoid activation and cross entropy loss
# We already have the probabilities we can use the cross entropy formula directly to calculate the loss
loss = tf.reduce_mean(input_tensor = -tf.reduce_sum(input_tensor = labels * tf.log(x = average_probabilities_over_frames + 0.00000001), axis = 1))
train_op = tf.contrib.layers.optimize_loss(
loss = loss,
global_step = tf.train.get_global_step(),
learning_rate = 0.01,
optimizer = "Adam")
eval_metric_ops = {
"accuracy": tf.metrics.mean_per_class_accuracy(labels = labels, predictions = predictions, num_classes = NUM_CLASSES)
}
else:
loss = None
train_op = None
eval_metric_ops = None
# 3. Create predictions
predictions_dict = {"logits": top_k_logits,
"probabilities": top_k_probabilities.values,
"predictions": top_k_predictions,
"classes": top_k_classes}
# 4. Create export outputs
export_outputs = {"predict_export_outputs": tf.estimator.export.PredictOutput(outputs = predictions_dict)}
# 5. Return EstimatorSpec
return tf.estimator.EstimatorSpec(
mode = mode,
predictions = predictions_dict,
loss = loss,
train_op = train_op,
eval_metric_ops = eval_metric_ops,
export_outputs = export_outputs) | [
"def",
"video_and_frame_level_model",
"(",
"features",
",",
"labels",
",",
"mode",
",",
"params",
")",
":",
"print",
"(",
"\"\\nvideo_and_frame_level_model: features = {}\"",
".",
"format",
"(",
"features",
")",
")",
"print",
"(",
"\"video_and_frame_level_model: labels = {}\"",
".",
"format",
"(",
"labels",
")",
")",
"print",
"(",
"\"video_and_frame_level_model: mode = {}\"",
".",
"format",
"(",
"mode",
")",
")",
"# 0. Initial setup",
"# Get the current batch size in case this is a partially full batch",
"current_batch_size",
"=",
"tf",
".",
"shape",
"(",
"features",
"[",
"'video_id'",
"]",
")",
"[",
"0",
"]",
"print",
"(",
"\"video_and_frame_level_model: current_batch_size = {}\"",
".",
"format",
"(",
"current_batch_size",
")",
")",
"# 1. Configure networks",
"################################################################################",
"# VIDEO NETWORK",
"# Concatenate the mean_rgb and mean_audio features together along columns in order to flatten them into one vector per example",
"combined_video_features",
"=",
"tf",
".",
"concat",
"(",
"values",
"=",
"[",
"features",
"[",
"'mean_rgb'",
"]",
",",
"features",
"[",
"'mean_audio'",
"]",
"]",
",",
"axis",
"=",
"1",
")",
"# shape = (current_batch_size, 1024 + 128)",
"print",
"(",
"\"video_and_frame_level_model: combined_video_features = {}\"",
".",
"format",
"(",
"combined_video_features",
")",
")",
"# Create the input layer to our video DNN",
"video_network",
"=",
"combined_video_features",
"# shape = (current_batch_size, 1024 + 128)",
"print",
"(",
"\"video_and_frame_level_model: video_network = combined_video_features = {}\"",
".",
"format",
"(",
"video_network",
")",
")",
"# Add video hidden layers with the given number of units/neurons per layer",
"for",
"units",
"in",
"params",
"[",
"'video_hidden_units'",
"]",
":",
"video_network",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"inputs",
"=",
"video_network",
",",
"units",
"=",
"units",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
")",
"# shape = (current_batch_size, units)",
"print",
"(",
"\"video_and_frame_level_model: video_network = {}, units = {}\"",
".",
"format",
"(",
"video_network",
",",
"units",
")",
")",
"# Connect the final hidden layer to a dense layer with no activation to get a video logit for each class",
"video_logits",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"inputs",
"=",
"video_network",
",",
"units",
"=",
"NUM_CLASSES",
",",
"activation",
"=",
"None",
")",
"# shape = (current_batch_size, NUM_CLASSES)",
"print",
"(",
"\"video_and_frame_level_model: video_logits = {}\"",
".",
"format",
"(",
"video_logits",
")",
")",
"# Find the video probabilities of all video logits",
"video_probabilities",
"=",
"tf",
".",
"sigmoid",
"(",
"video_logits",
")",
"# shape = (current_batch_size, NUM_CLASSES)",
"print",
"(",
"\"video_and_frame_level_model: video_probabilities = {}\"",
".",
"format",
"(",
"video_probabilities",
")",
")",
"################################################################################",
"# FRAME NETWORK",
"# Concatenate the rgb and audio features together along columns in order to flatten them into one vector per example per frame",
"combined_frame_features",
"=",
"tf",
".",
"concat",
"(",
"values",
"=",
"[",
"features",
"[",
"'rgb'",
"]",
",",
"features",
"[",
"'audio'",
"]",
"]",
",",
"axis",
"=",
"2",
")",
"# shape = (current_batch_size, MAX_FRAMES, 1024 + 128)",
"print",
"(",
"\"video_and_frame_level_model: combined_frame_features = {}\"",
".",
"format",
"(",
"combined_frame_features",
")",
")",
"# Reshape feature vectors per frame so that we have instead batch_size * MAX_FRAMES examples to go through our frame DNN",
"reshaped_combined_frame_features",
"=",
"tf",
".",
"reshape",
"(",
"tensor",
"=",
"combined_frame_features",
",",
"shape",
"=",
"[",
"current_batch_size",
"*",
"MAX_FRAMES",
",",
"1024",
"+",
"128",
"]",
")",
"# shape = (current_batch_size * MAX_FRAMES, 1024 + 128)",
"print",
"(",
"\"video_and_frame_level_model: reshaped_combined_frame_features = {}\"",
".",
"format",
"(",
"reshaped_combined_frame_features",
")",
")",
"# Create the input layer to our frame DNN",
"frame_network",
"=",
"reshaped_combined_frame_features",
"# shape = (current_batch_size * MAX_FRAMES, 1024 + 128)",
"print",
"(",
"\"video_and_frame_level_model: frame_network = reshaped_combined_frame_features = {}\"",
".",
"format",
"(",
"frame_network",
")",
")",
"# Add frame hidden layers with the given number of units/neurons per layer",
"for",
"units",
"in",
"params",
"[",
"'frame_hidden_units'",
"]",
":",
"frame_network",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"inputs",
"=",
"frame_network",
",",
"units",
"=",
"units",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
")",
"# shape = (current_batch_size * MAX_FRAMES, units)",
"print",
"(",
"\"video_and_frame_level_model: frame_network = {}, units = {}\"",
".",
"format",
"(",
"frame_network",
",",
"units",
")",
")",
"# Connect the final hidden layer to a dense layer with no activation to get a frame logit for each class",
"frame_logits",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"inputs",
"=",
"frame_network",
",",
"units",
"=",
"NUM_CLASSES",
",",
"activation",
"=",
"None",
")",
"# shape = (current_batch_size * MAX_FRAMES, NUM_CLASSES)",
"print",
"(",
"\"video_and_frame_level_model: frame_logits = {}\"",
".",
"format",
"(",
"frame_logits",
")",
")",
"# Find the frame probabilities of all frame logits using sigmoid since this is multi-class, multi-label",
"frame_probabilities",
"=",
"tf",
".",
"sigmoid",
"(",
"frame_logits",
")",
"# shape = (current_batch_size * MAX_FRAMES, NUM_CLASSES)",
"print",
"(",
"\"video_and_frame_level_model: frame_probabilities = {}\"",
".",
"format",
"(",
"frame_probabilities",
")",
")",
"# Reshape the frame probability tensor so that each frame has its own matrix",
"reshaped_frame_probabilities",
"=",
"tf",
".",
"reshape",
"(",
"tensor",
"=",
"frame_probabilities",
",",
"shape",
"=",
"[",
"current_batch_size",
",",
"MAX_FRAMES",
",",
"NUM_CLASSES",
"]",
")",
"# shape = (current_batch_size, MAX_FRAMES, NUM_CLASSES)",
"print",
"(",
"\"video_and_frame_level_model: reshaped_frame_probabilities = {}\"",
".",
"format",
"(",
"reshaped_frame_probabilities",
")",
")",
"# Average the frame probabilities over all frames per each example and each class",
"average_probabilities_over_frames",
"=",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"reshaped_frame_probabilities",
",",
"axis",
"=",
"1",
")",
"# shape = (current_batch_size, NUM_CLASSES)",
"print",
"(",
"\"video_and_frame_level_model: average_probabilities_over_frames = {}\"",
".",
"format",
"(",
"average_probabilities_over_frames",
")",
")",
"################################################################################",
"# Average together the video and frame probabilities",
"combined_probabilities",
"=",
"(",
"video_probabilities",
"+",
"average_probabilities_over_frames",
")",
"*",
"0.5",
"# shape = (current_batch_size, NUM_CLASSES)",
"print",
"(",
"\"video_and_frame_level_model: combined_probabilities = {}\"",
".",
"format",
"(",
"combined_probabilities",
")",
")",
"# Select the top k probablities in descending order",
"top_k_probabilities",
"=",
"tf",
".",
"nn",
".",
"top_k",
"(",
"input",
"=",
"combined_probabilities",
",",
"k",
"=",
"params",
"[",
"'top_k'",
"]",
",",
"sorted",
"=",
"True",
")",
"# shape = (current_batch_size, top_k)",
"print",
"(",
"\"video_and_frame_level_model: top_k_probabilities = {}\"",
".",
"format",
"(",
"top_k_probabilities",
")",
")",
"# Find the inverse of all of the average probabilities to get all of the average logits",
"inverse_probabilities_logits",
"=",
"tf",
".",
"log",
"(",
"combined_probabilities",
"+",
"0.00000001",
")",
"-",
"tf",
".",
"log",
"(",
"1.0",
"-",
"combined_probabilities",
"+",
"0.00000001",
")",
"# shape = (current_batch_size, NUM_CLASSES)",
"print",
"(",
"\"video_and_frame_level_model: inverse_probabilities_logits = {}\"",
".",
"format",
"(",
"inverse_probabilities_logits",
")",
")",
"# Select the top k logits using the indices of the top k probabilities in descending order",
"top_k_logits",
"=",
"tf",
".",
"map_fn",
"(",
"fn",
"=",
"lambda",
"x",
":",
"tf",
".",
"gather",
"(",
"params",
"=",
"inverse_probabilities_logits",
"[",
"x",
"]",
",",
"indices",
"=",
"top_k_probabilities",
".",
"indices",
"[",
"x",
"]",
")",
",",
"elems",
"=",
"tf",
".",
"range",
"(",
"start",
"=",
"0",
",",
"limit",
"=",
"current_batch_size",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# shape = (current_batch_size, 1, top_k)",
"print",
"(",
"\"frame_level_model: top_k_logits = {}\"",
".",
"format",
"(",
"top_k_logits",
")",
")",
"# The top k classes in descending order of likelihood",
"top_k_classes",
"=",
"top_k_probabilities",
".",
"indices",
"# shape = (current_batch_size, top_k)",
"print",
"(",
"\"video_and_frame_level_model: top_k_classes = {}\"",
".",
"format",
"(",
"top_k_classes",
")",
")",
"# The 0/1 predictions based on a threshold, in this case the threshold is if the probability it greater than random chance",
"predictions",
"=",
"tf",
".",
"where",
"(",
"condition",
"=",
"average_probabilities_over_frames",
">",
"1.0",
"/",
"NUM_CLASSES",
",",
"# shape = (current_batch_size, NUM_CLASSES)",
"x",
"=",
"tf",
".",
"ones_like",
"(",
"tensor",
"=",
"average_probabilities_over_frames",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
",",
"y",
"=",
"tf",
".",
"zeros_like",
"(",
"tensor",
"=",
"average_probabilities_over_frames",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
")",
"print",
"(",
"\"video_and_frame_level_model: predictions = {}\"",
".",
"format",
"(",
"predictions",
")",
")",
"# The 0/1 top k predictions based on a threshold, in this case the threshold is if the probability it greater than random chance",
"top_k_predictions",
"=",
"tf",
".",
"where",
"(",
"condition",
"=",
"top_k_probabilities",
".",
"values",
">",
"1.0",
"/",
"NUM_CLASSES",
",",
"# shape = (current_batch_size, top_k)",
"x",
"=",
"tf",
".",
"ones_like",
"(",
"tensor",
"=",
"top_k_probabilities",
".",
"values",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
",",
"y",
"=",
"tf",
".",
"zeros_like",
"(",
"tensor",
"=",
"top_k_probabilities",
".",
"values",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
")",
"print",
"(",
"\"video_and_frame_level_model: top_k_predictions = {}\\n\"",
".",
"format",
"(",
"top_k_predictions",
")",
")",
"# 2. Loss function, training/eval ops ",
"if",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
"or",
"mode",
"==",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"EVAL",
":",
"# Since this is a multi-class, multi-label problem, we will use sigmoid activation and cross entropy loss",
"# We already have the probabilities we can use the cross entropy formula directly to calculate the loss",
"loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"-",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"labels",
"*",
"tf",
".",
"log",
"(",
"x",
"=",
"average_probabilities_over_frames",
"+",
"0.00000001",
")",
",",
"axis",
"=",
"1",
")",
")",
"train_op",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"optimize_loss",
"(",
"loss",
"=",
"loss",
",",
"global_step",
"=",
"tf",
".",
"train",
".",
"get_global_step",
"(",
")",
",",
"learning_rate",
"=",
"0.01",
",",
"optimizer",
"=",
"\"Adam\"",
")",
"eval_metric_ops",
"=",
"{",
"\"accuracy\"",
":",
"tf",
".",
"metrics",
".",
"mean_per_class_accuracy",
"(",
"labels",
"=",
"labels",
",",
"predictions",
"=",
"predictions",
",",
"num_classes",
"=",
"NUM_CLASSES",
")",
"}",
"else",
":",
"loss",
"=",
"None",
"train_op",
"=",
"None",
"eval_metric_ops",
"=",
"None",
"# 3. Create predictions",
"predictions_dict",
"=",
"{",
"\"logits\"",
":",
"top_k_logits",
",",
"\"probabilities\"",
":",
"top_k_probabilities",
".",
"values",
",",
"\"predictions\"",
":",
"top_k_predictions",
",",
"\"classes\"",
":",
"top_k_classes",
"}",
"# 4. Create export outputs",
"export_outputs",
"=",
"{",
"\"predict_export_outputs\"",
":",
"tf",
".",
"estimator",
".",
"export",
".",
"PredictOutput",
"(",
"outputs",
"=",
"predictions_dict",
")",
"}",
"# 5. Return EstimatorSpec",
"return",
"tf",
".",
"estimator",
".",
"EstimatorSpec",
"(",
"mode",
"=",
"mode",
",",
"predictions",
"=",
"predictions_dict",
",",
"loss",
"=",
"loss",
",",
"train_op",
"=",
"train_op",
",",
"eval_metric_ops",
"=",
"eval_metric_ops",
",",
"export_outputs",
"=",
"export_outputs",
")"
] | [
197,
0
] | [
350,
40
] | python | en | ['en', 'error', 'th'] | False |
FileSystemKeyValueStorage.__init__ | (self, root_path) |
Create a new Storage object.
All files will be stored under the root_path location
:param root_path: The base folder for all storage files
|
Create a new Storage object. | def __init__(self, root_path):
"""
Create a new Storage object.
All files will be stored under the root_path location
:param root_path: The base folder for all storage files
"""
if root_path is None:
root_path = gettempdir()
if not os.path.isdir(root_path):
os.makedirs(root_path)
self._root_path = root_path | [
"def",
"__init__",
"(",
"self",
",",
"root_path",
")",
":",
"if",
"root_path",
"is",
"None",
":",
"root_path",
"=",
"gettempdir",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"root_path",
")",
":",
"os",
".",
"makedirs",
"(",
"root_path",
")",
"self",
".",
"_root_path",
"=",
"root_path"
] | [
14,
4
] | [
28,
35
] | python | en | ['en', 'error', 'th'] | False |
FileSystemKeyValueStorage._get_key_full_path | (self, key) |
Generate the file path for the key
:param key: The key
:return: The absolute path of the value file associated with the key
|
Generate the file path for the key | def _get_key_full_path(self, key):
"""
Generate the file path for the key
:param key: The key
:return: The absolute path of the value file associated with the key
"""
return os.path.join(self._root_path, key + self._item_ext) | [
"def",
"_get_key_full_path",
"(",
"self",
",",
"key",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_root_path",
",",
"key",
"+",
"self",
".",
"_item_ext",
")"
] | [
60,
4
] | [
68,
66
] | python | en | ['en', 'error', 'th'] | False |
FileSystemKeyValueStorage._exists | (self, key) |
Indicate whether key exists
:param key: The key
:return: bool True if the file for the given key exists
|
Indicate whether key exists | def _exists(self, key):
"""
Indicate whether key exists
:param key: The key
:return: bool True if the file for the given key exists
"""
return os.path.isfile(self._get_key_full_path(key)) | [
"def",
"_exists",
"(",
"self",
",",
"key",
")",
":",
"return",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"_get_key_full_path",
"(",
"key",
")",
")"
] | [
70,
4
] | [
78,
59
] | python | en | ['en', 'error', 'th'] | False |
api_client | () | api client without authentication | api client without authentication | def api_client() -> Generator[APIClient, None, None]:
""" api client without authentication """
from rest_framework.test import APIClient
yield APIClient() | [
"def",
"api_client",
"(",
")",
"->",
"Generator",
"[",
"APIClient",
",",
"None",
",",
"None",
"]",
":",
"from",
"rest_framework",
".",
"test",
"import",
"APIClient",
"yield",
"APIClient",
"(",
")"
] | [
14,
0
] | [
17,
21
] | python | en | ['en', 'fr', 'en'] | True |
auth_api_client | (db, api_client) | APIClient with authentication factory | APIClient with authentication factory | def auth_api_client(db, api_client):
""" APIClient with authentication factory """
def make_auth_client(user: AbstractUser) -> APIClient:
# api_client.force_login(user=user)
# login
api_client.force_authenticate(user=user)
return api_client
yield make_auth_client
# logout on tear down
api_client.force_authenticate(user=None) | [
"def",
"auth_api_client",
"(",
"db",
",",
"api_client",
")",
":",
"def",
"make_auth_client",
"(",
"user",
":",
"AbstractUser",
")",
"->",
"APIClient",
":",
"# api_client.force_login(user=user)",
"# login",
"api_client",
".",
"force_authenticate",
"(",
"user",
"=",
"user",
")",
"return",
"api_client",
"yield",
"make_auth_client",
"# logout on tear down",
"api_client",
".",
"force_authenticate",
"(",
"user",
"=",
"None",
")"
] | [
21,
0
] | [
34,
44
] | python | en | ['en', 'en', 'en'] | True |
create_user | (db, django_user_model: AbstractUser, test_password: str) | factory for creating users | factory for creating users | def create_user(db, django_user_model: AbstractUser, test_password: str):
""" factory for creating users """
def make_user(username, email, first_name, last_name) -> AbstractUser:
new_user: AbstractUser = User.objects.create(username=username, email=email,
first_name=first_name, last_name=last_name)
# assign password
new_user.set_password(test_password)
new_user.save()
return new_user
yield make_user | [
"def",
"create_user",
"(",
"db",
",",
"django_user_model",
":",
"AbstractUser",
",",
"test_password",
":",
"str",
")",
":",
"def",
"make_user",
"(",
"username",
",",
"email",
",",
"first_name",
",",
"last_name",
")",
"->",
"AbstractUser",
":",
"new_user",
":",
"AbstractUser",
"=",
"User",
".",
"objects",
".",
"create",
"(",
"username",
"=",
"username",
",",
"email",
"=",
"email",
",",
"first_name",
"=",
"first_name",
",",
"last_name",
"=",
"last_name",
")",
"# assign password",
"new_user",
".",
"set_password",
"(",
"test_password",
")",
"new_user",
".",
"save",
"(",
")",
"return",
"new_user",
"yield",
"make_user"
] | [
43,
0
] | [
54,
19
] | python | en | ['en', 'en', 'en'] | True |
create_image | () | image file factory | image file factory | def create_image():
""" image file factory """
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tf:
image = Image.new("RGB", (100, 100), color="#ddd")
image.save(tf, format="JPEG")
tf.close()
yield tf
# clean up
image.close() | [
"def",
"create_image",
"(",
")",
":",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"\".jpg\"",
",",
"delete",
"=",
"False",
")",
"as",
"tf",
":",
"image",
"=",
"Image",
".",
"new",
"(",
"\"RGB\"",
",",
"(",
"100",
",",
"100",
")",
",",
"color",
"=",
"\"#ddd\"",
")",
"image",
".",
"save",
"(",
"tf",
",",
"format",
"=",
"\"JPEG\"",
")",
"tf",
".",
"close",
"(",
")",
"yield",
"tf",
"# clean up",
"image",
".",
"close",
"(",
")"
] | [
66,
0
] | [
76,
17
] | python | en | ['en', 'en', 'en'] | True |
delete_selected | (modeladmin, request, queryset) |
Default action which deletes the selected objects.
This action first displays a confirmation page which shows all the
deletable objects, or, if the user has no permission one of the related
childs (foreignkeys), a "permission denied" message.
Next, it deletes all selected objects and redirects back to the change list.
|
Default action which deletes the selected objects. | def delete_selected(modeladmin, request, queryset):
"""
Default action which deletes the selected objects.
This action first displays a confirmation page which shows all the
deletable objects, or, if the user has no permission one of the related
childs (foreignkeys), a "permission denied" message.
Next, it deletes all selected objects and redirects back to the change list.
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, model_count, perms_needed, protected = modeladmin.get_deleted_objects(queryset, request)
# The user has already confirmed the deletion.
# Do the deletion and return None to display the change list view again.
if request.POST.get('post') and not protected:
if perms_needed:
raise PermissionDenied
n = queryset.count()
if n:
for obj in queryset:
obj_display = str(obj)
modeladmin.log_deletion(request, obj, obj_display)
modeladmin.delete_queryset(request, queryset)
modeladmin.message_user(request, _("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
}, messages.SUCCESS)
# Return None to display the change list page again.
return None
objects_name = model_ngettext(queryset)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = {
**modeladmin.admin_site.each_context(request),
'title': title,
'objects_name': str(objects_name),
'deletable_objects': [deletable_objects],
'model_count': dict(model_count).items(),
'queryset': queryset,
'perms_lacking': perms_needed,
'protected': protected,
'opts': opts,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
'media': modeladmin.media,
}
request.current_app = modeladmin.admin_site.name
# Display the confirmation page
return TemplateResponse(request, modeladmin.delete_selected_confirmation_template or [
"admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.model_name),
"admin/%s/delete_selected_confirmation.html" % app_label,
"admin/delete_selected_confirmation.html"
], context) | [
"def",
"delete_selected",
"(",
"modeladmin",
",",
"request",
",",
"queryset",
")",
":",
"opts",
"=",
"modeladmin",
".",
"model",
".",
"_meta",
"app_label",
"=",
"opts",
".",
"app_label",
"# Populate deletable_objects, a data structure of all related objects that",
"# will also be deleted.",
"deletable_objects",
",",
"model_count",
",",
"perms_needed",
",",
"protected",
"=",
"modeladmin",
".",
"get_deleted_objects",
"(",
"queryset",
",",
"request",
")",
"# The user has already confirmed the deletion.",
"# Do the deletion and return None to display the change list view again.",
"if",
"request",
".",
"POST",
".",
"get",
"(",
"'post'",
")",
"and",
"not",
"protected",
":",
"if",
"perms_needed",
":",
"raise",
"PermissionDenied",
"n",
"=",
"queryset",
".",
"count",
"(",
")",
"if",
"n",
":",
"for",
"obj",
"in",
"queryset",
":",
"obj_display",
"=",
"str",
"(",
"obj",
")",
"modeladmin",
".",
"log_deletion",
"(",
"request",
",",
"obj",
",",
"obj_display",
")",
"modeladmin",
".",
"delete_queryset",
"(",
"request",
",",
"queryset",
")",
"modeladmin",
".",
"message_user",
"(",
"request",
",",
"_",
"(",
"\"Successfully deleted %(count)d %(items)s.\"",
")",
"%",
"{",
"\"count\"",
":",
"n",
",",
"\"items\"",
":",
"model_ngettext",
"(",
"modeladmin",
".",
"opts",
",",
"n",
")",
"}",
",",
"messages",
".",
"SUCCESS",
")",
"# Return None to display the change list page again.",
"return",
"None",
"objects_name",
"=",
"model_ngettext",
"(",
"queryset",
")",
"if",
"perms_needed",
"or",
"protected",
":",
"title",
"=",
"_",
"(",
"\"Cannot delete %(name)s\"",
")",
"%",
"{",
"\"name\"",
":",
"objects_name",
"}",
"else",
":",
"title",
"=",
"_",
"(",
"\"Are you sure?\"",
")",
"context",
"=",
"{",
"*",
"*",
"modeladmin",
".",
"admin_site",
".",
"each_context",
"(",
"request",
")",
",",
"'title'",
":",
"title",
",",
"'objects_name'",
":",
"str",
"(",
"objects_name",
")",
",",
"'deletable_objects'",
":",
"[",
"deletable_objects",
"]",
",",
"'model_count'",
":",
"dict",
"(",
"model_count",
")",
".",
"items",
"(",
")",
",",
"'queryset'",
":",
"queryset",
",",
"'perms_lacking'",
":",
"perms_needed",
",",
"'protected'",
":",
"protected",
",",
"'opts'",
":",
"opts",
",",
"'action_checkbox_name'",
":",
"helpers",
".",
"ACTION_CHECKBOX_NAME",
",",
"'media'",
":",
"modeladmin",
".",
"media",
",",
"}",
"request",
".",
"current_app",
"=",
"modeladmin",
".",
"admin_site",
".",
"name",
"# Display the confirmation page",
"return",
"TemplateResponse",
"(",
"request",
",",
"modeladmin",
".",
"delete_selected_confirmation_template",
"or",
"[",
"\"admin/%s/%s/delete_selected_confirmation.html\"",
"%",
"(",
"app_label",
",",
"opts",
".",
"model_name",
")",
",",
"\"admin/%s/delete_selected_confirmation.html\"",
"%",
"app_label",
",",
"\"admin/delete_selected_confirmation.html\"",
"]",
",",
"context",
")"
] | [
17,
0
] | [
79,
15
] | python | en | ['en', 'error', 'th'] | False |
GroupBasedRecommender.__init__ | (self, train_files=None, test_file=None, output_file=None, similarity_metric="cosine", rank_length=10,
k_groups=3, recommender='UserKNN', as_binary=False, sep='\t', output_sep='\t', max_int_kmedoids=1000,
parser='', user_weights=False) |
Group-Based for Item Recommendation
This algorithm predicts a rank for each user using a co-clustering algorithm
Usage::
>> GroupBasedRecommender([train_history], test).compute()
>> GroupBasedRecommender([train_history, train_rating], test, as_binary=True).compute()
:param train_files: List of train files
:type train_files: list
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param similarity_metric: Pairwise metric to compute the similarity between the users. Reference about
distances: http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.pdist.html
:type similarity_metric: str, default cosine
:param as_binary: If True, the explicit feedback will be transform to binary
:type as_binary: bool, default False
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
|
Group-Based for Item Recommendation | def __init__(self, train_files=None, test_file=None, output_file=None, similarity_metric="cosine", rank_length=10,
k_groups=3, recommender='UserKNN', as_binary=False, sep='\t', output_sep='\t', max_int_kmedoids=1000,
parser='', user_weights=False):
"""
Group-Based for Item Recommendation
This algorithm predicts a rank for each user using a co-clustering algorithm
Usage::
>> GroupBasedRecommender([train_history], test).compute()
>> GroupBasedRecommender([train_history, train_rating], test, as_binary=True).compute()
:param train_files: List of train files
:type train_files: list
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param similarity_metric: Pairwise metric to compute the similarity between the users. Reference about
distances: http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.pdist.html
:type similarity_metric: str, default cosine
:param as_binary: If True, the explicit feedback will be transform to binary
:type as_binary: bool, default False
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
"""
super(GroupBasedRecommender, self).__init__(train_file='', test_file=test_file,
output_file=output_file, as_binary=as_binary,
rank_length=rank_length, similarity_metric=similarity_metric,
sep=sep, output_sep=output_sep)
self.recommender_name = 'Group Based Recommender Algorithm'
self.train_files = train_files
self.k_groups = k_groups
self.recommender = recommender
self.max_int_kmedoids = max_int_kmedoids
self.parser = parser
self.user_weights = user_weights
# internal vars
self.n_files = 0
self.train_set_list = []
self.distance_matrix = None
self.dir_name = None
self.gb_train_files = []
self.weighted_matrices = []
self.k_users_in_cluster = [] | [
"def",
"__init__",
"(",
"self",
",",
"train_files",
"=",
"None",
",",
"test_file",
"=",
"None",
",",
"output_file",
"=",
"None",
",",
"similarity_metric",
"=",
"\"cosine\"",
",",
"rank_length",
"=",
"10",
",",
"k_groups",
"=",
"3",
",",
"recommender",
"=",
"'UserKNN'",
",",
"as_binary",
"=",
"False",
",",
"sep",
"=",
"'\\t'",
",",
"output_sep",
"=",
"'\\t'",
",",
"max_int_kmedoids",
"=",
"1000",
",",
"parser",
"=",
"''",
",",
"user_weights",
"=",
"False",
")",
":",
"super",
"(",
"GroupBasedRecommender",
",",
"self",
")",
".",
"__init__",
"(",
"train_file",
"=",
"''",
",",
"test_file",
"=",
"test_file",
",",
"output_file",
"=",
"output_file",
",",
"as_binary",
"=",
"as_binary",
",",
"rank_length",
"=",
"rank_length",
",",
"similarity_metric",
"=",
"similarity_metric",
",",
"sep",
"=",
"sep",
",",
"output_sep",
"=",
"output_sep",
")",
"self",
".",
"recommender_name",
"=",
"'Group Based Recommender Algorithm'",
"self",
".",
"train_files",
"=",
"train_files",
"self",
".",
"k_groups",
"=",
"k_groups",
"self",
".",
"recommender",
"=",
"recommender",
"self",
".",
"max_int_kmedoids",
"=",
"max_int_kmedoids",
"self",
".",
"parser",
"=",
"parser",
"self",
".",
"user_weights",
"=",
"user_weights",
"# internal vars",
"self",
".",
"n_files",
"=",
"0",
"self",
".",
"train_set_list",
"=",
"[",
"]",
"self",
".",
"distance_matrix",
"=",
"None",
"self",
".",
"dir_name",
"=",
"None",
"self",
".",
"gb_train_files",
"=",
"[",
"]",
"self",
".",
"weighted_matrices",
"=",
"[",
"]",
"self",
".",
"k_users_in_cluster",
"=",
"[",
"]"
] | [
31,
4
] | [
89,
36
] | python | en | ['en', 'error', 'th'] | False |
GroupBasedRecommender.read_files | (self) |
Method to initialize recommender algorithm.
|
Method to initialize recommender algorithm. | def read_files(self):
"""
Method to initialize recommender algorithm.
"""
self.n_files = len(self.train_files)
self.users = []
self.items = []
for train_file in self.train_files:
train_set = ReadFile(train_file, sep=self.sep, as_binary=self.as_binary).read()
self.users += train_set['users']
self.items += train_set['items']
self.train_set_list.append(train_set)
self.dir_name = os.path.dirname(train_file)
self.users = set(self.users)
self.items = set(self.items)
if self.test_file is not None:
self.test_set = ReadFile(self.test_file).read()
self.users = sorted(set(list(self.users) + list(self.test_set['users'])))
self.items = sorted(set(list(self.items) + list(self.test_set['items'])))
for i, item in enumerate(self.items):
self.item_to_item_id.update({item: i})
self.item_id_to_item.update({i: item})
for u, user in enumerate(self.users):
self.user_to_user_id.update({user: u})
self.user_id_to_user.update({u: user}) | [
"def",
"read_files",
"(",
"self",
")",
":",
"self",
".",
"n_files",
"=",
"len",
"(",
"self",
".",
"train_files",
")",
"self",
".",
"users",
"=",
"[",
"]",
"self",
".",
"items",
"=",
"[",
"]",
"for",
"train_file",
"in",
"self",
".",
"train_files",
":",
"train_set",
"=",
"ReadFile",
"(",
"train_file",
",",
"sep",
"=",
"self",
".",
"sep",
",",
"as_binary",
"=",
"self",
".",
"as_binary",
")",
".",
"read",
"(",
")",
"self",
".",
"users",
"+=",
"train_set",
"[",
"'users'",
"]",
"self",
".",
"items",
"+=",
"train_set",
"[",
"'items'",
"]",
"self",
".",
"train_set_list",
".",
"append",
"(",
"train_set",
")",
"self",
".",
"dir_name",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"train_file",
")",
"self",
".",
"users",
"=",
"set",
"(",
"self",
".",
"users",
")",
"self",
".",
"items",
"=",
"set",
"(",
"self",
".",
"items",
")",
"if",
"self",
".",
"test_file",
"is",
"not",
"None",
":",
"self",
".",
"test_set",
"=",
"ReadFile",
"(",
"self",
".",
"test_file",
")",
".",
"read",
"(",
")",
"self",
".",
"users",
"=",
"sorted",
"(",
"set",
"(",
"list",
"(",
"self",
".",
"users",
")",
"+",
"list",
"(",
"self",
".",
"test_set",
"[",
"'users'",
"]",
")",
")",
")",
"self",
".",
"items",
"=",
"sorted",
"(",
"set",
"(",
"list",
"(",
"self",
".",
"items",
")",
"+",
"list",
"(",
"self",
".",
"test_set",
"[",
"'items'",
"]",
")",
")",
")",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"self",
".",
"items",
")",
":",
"self",
".",
"item_to_item_id",
".",
"update",
"(",
"{",
"item",
":",
"i",
"}",
")",
"self",
".",
"item_id_to_item",
".",
"update",
"(",
"{",
"i",
":",
"item",
"}",
")",
"for",
"u",
",",
"user",
"in",
"enumerate",
"(",
"self",
".",
"users",
")",
":",
"self",
".",
"user_to_user_id",
".",
"update",
"(",
"{",
"user",
":",
"u",
"}",
")",
"self",
".",
"user_id_to_user",
".",
"update",
"(",
"{",
"u",
":",
"user",
"}",
")"
] | [
91,
4
] | [
122,
50
] | python | en | ['en', 'error', 'th'] | False |
GroupBasedRecommender.compute_distance | (self) |
Method to compute a distance matrix from train set
|
Method to compute a distance matrix from train set | def compute_distance(self):
"""
Method to compute a distance matrix from train set
"""
# Calculate distance matrix
distance_matrix = np.float32(squareform(pdist(self.matrix, self.similarity_metric)))
# Remove NaNs
distance_matrix[np.isnan(distance_matrix)] = 1.0
return distance_matrix | [
"def",
"compute_distance",
"(",
"self",
")",
":",
"# Calculate distance matrix",
"distance_matrix",
"=",
"np",
".",
"float32",
"(",
"squareform",
"(",
"pdist",
"(",
"self",
".",
"matrix",
",",
"self",
".",
"similarity_metric",
")",
")",
")",
"# Remove NaNs",
"distance_matrix",
"[",
"np",
".",
"isnan",
"(",
"distance_matrix",
")",
"]",
"=",
"1.0",
"return",
"distance_matrix"
] | [
124,
4
] | [
135,
30
] | python | en | ['en', 'error', 'th'] | False |
SelectionPreferences.__init__ | (
self,
allow_yanked: bool,
allow_all_prereleases: bool = False,
format_control: Optional[FormatControl] = None,
prefer_binary: bool = False,
ignore_requires_python: Optional[bool] = None,
) | Create a SelectionPreferences object.
:param allow_yanked: Whether files marked as yanked (in the sense
of PEP 592) are permitted to be candidates for install.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
:param prefer_binary: Whether to prefer an old, but valid, binary
dist over a new source dist.
:param ignore_requires_python: Whether to ignore incompatible
"Requires-Python" values in links. Defaults to False.
| Create a SelectionPreferences object. | def __init__(
self,
allow_yanked: bool,
allow_all_prereleases: bool = False,
format_control: Optional[FormatControl] = None,
prefer_binary: bool = False,
ignore_requires_python: Optional[bool] = None,
) -> None:
"""Create a SelectionPreferences object.
:param allow_yanked: Whether files marked as yanked (in the sense
of PEP 592) are permitted to be candidates for install.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
:param prefer_binary: Whether to prefer an old, but valid, binary
dist over a new source dist.
:param ignore_requires_python: Whether to ignore incompatible
"Requires-Python" values in links. Defaults to False.
"""
if ignore_requires_python is None:
ignore_requires_python = False
self.allow_yanked = allow_yanked
self.allow_all_prereleases = allow_all_prereleases
self.format_control = format_control
self.prefer_binary = prefer_binary
self.ignore_requires_python = ignore_requires_python | [
"def",
"__init__",
"(",
"self",
",",
"allow_yanked",
":",
"bool",
",",
"allow_all_prereleases",
":",
"bool",
"=",
"False",
",",
"format_control",
":",
"Optional",
"[",
"FormatControl",
"]",
"=",
"None",
",",
"prefer_binary",
":",
"bool",
"=",
"False",
",",
"ignore_requires_python",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
",",
")",
"->",
"None",
":",
"if",
"ignore_requires_python",
"is",
"None",
":",
"ignore_requires_python",
"=",
"False",
"self",
".",
"allow_yanked",
"=",
"allow_yanked",
"self",
".",
"allow_all_prereleases",
"=",
"allow_all_prereleases",
"self",
".",
"format_control",
"=",
"format_control",
"self",
".",
"prefer_binary",
"=",
"prefer_binary",
"self",
".",
"ignore_requires_python",
"=",
"ignore_requires_python"
] | [
18,
4
] | [
45,
60
] | python | en | ['en', 'en', 'en'] | True |
ValidationError.__init__ | (self, message, code=None, params=None) |
The `message` argument can be a single error, a list of errors, or a
dictionary that maps field names to lists of errors. What we define as
an "error" can be either a simple string or an instance of
ValidationError with its message attribute set, and what we define as
list or dictionary can be an actual `list` or `dict` or an instance
of ValidationError with its `error_list` or `error_dict` attribute set.
|
The `message` argument can be a single error, a list of errors, or a
dictionary that maps field names to lists of errors. What we define as
an "error" can be either a simple string or an instance of
ValidationError with its message attribute set, and what we define as
list or dictionary can be an actual `list` or `dict` or an instance
of ValidationError with its `error_list` or `error_dict` attribute set.
| def __init__(self, message, code=None, params=None):
"""
The `message` argument can be a single error, a list of errors, or a
dictionary that maps field names to lists of errors. What we define as
an "error" can be either a simple string or an instance of
ValidationError with its message attribute set, and what we define as
list or dictionary can be an actual `list` or `dict` or an instance
of ValidationError with its `error_list` or `error_dict` attribute set.
"""
super().__init__(message, code, params)
if isinstance(message, ValidationError):
if hasattr(message, 'error_dict'):
message = message.error_dict
elif not hasattr(message, 'message'):
message = message.error_list
else:
message, code, params = message.message, message.code, message.params
if isinstance(message, dict):
self.error_dict = {}
for field, messages in message.items():
if not isinstance(messages, ValidationError):
messages = ValidationError(messages)
self.error_dict[field] = messages.error_list
elif isinstance(message, list):
self.error_list = []
for message in message:
# Normalize plain strings to instances of ValidationError.
if not isinstance(message, ValidationError):
message = ValidationError(message)
if hasattr(message, 'error_dict'):
self.error_list.extend(sum(message.error_dict.values(), []))
else:
self.error_list.extend(message.error_list)
else:
self.message = message
self.code = code
self.params = params
self.error_list = [self] | [
"def",
"__init__",
"(",
"self",
",",
"message",
",",
"code",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"message",
",",
"code",
",",
"params",
")",
"if",
"isinstance",
"(",
"message",
",",
"ValidationError",
")",
":",
"if",
"hasattr",
"(",
"message",
",",
"'error_dict'",
")",
":",
"message",
"=",
"message",
".",
"error_dict",
"elif",
"not",
"hasattr",
"(",
"message",
",",
"'message'",
")",
":",
"message",
"=",
"message",
".",
"error_list",
"else",
":",
"message",
",",
"code",
",",
"params",
"=",
"message",
".",
"message",
",",
"message",
".",
"code",
",",
"message",
".",
"params",
"if",
"isinstance",
"(",
"message",
",",
"dict",
")",
":",
"self",
".",
"error_dict",
"=",
"{",
"}",
"for",
"field",
",",
"messages",
"in",
"message",
".",
"items",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"messages",
",",
"ValidationError",
")",
":",
"messages",
"=",
"ValidationError",
"(",
"messages",
")",
"self",
".",
"error_dict",
"[",
"field",
"]",
"=",
"messages",
".",
"error_list",
"elif",
"isinstance",
"(",
"message",
",",
"list",
")",
":",
"self",
".",
"error_list",
"=",
"[",
"]",
"for",
"message",
"in",
"message",
":",
"# Normalize plain strings to instances of ValidationError.",
"if",
"not",
"isinstance",
"(",
"message",
",",
"ValidationError",
")",
":",
"message",
"=",
"ValidationError",
"(",
"message",
")",
"if",
"hasattr",
"(",
"message",
",",
"'error_dict'",
")",
":",
"self",
".",
"error_list",
".",
"extend",
"(",
"sum",
"(",
"message",
".",
"error_dict",
".",
"values",
"(",
")",
",",
"[",
"]",
")",
")",
"else",
":",
"self",
".",
"error_list",
".",
"extend",
"(",
"message",
".",
"error_list",
")",
"else",
":",
"self",
".",
"message",
"=",
"message",
"self",
".",
"code",
"=",
"code",
"self",
".",
"params",
"=",
"params",
"self",
".",
"error_list",
"=",
"[",
"self",
"]"
] | [
108,
4
] | [
149,
36
] | python | en | ['en', 'error', 'th'] | False |
convert_parquet_to_iceberg | (parquet_file: ParquetFile) |
Given two Iceberg schema's returns a list of column_names for all id's in the
file schema that are projected in the expected schema
Parameters
----------
parquet_file : pyarrow.parquet.ParquetFile
The Parquet File to use to extract the iceberg schema
Returns
-------
iceberg.api.Schema
returns an equivalent iceberg Schema based on the arrow schema read from the file
|
Given two Iceberg schema's returns a list of column_names for all id's in the
file schema that are projected in the expected schema | def convert_parquet_to_iceberg(parquet_file: ParquetFile) -> Schema: # noqa: ignore=C901
"""
Given two Iceberg schema's returns a list of column_names for all id's in the
file schema that are projected in the expected schema
Parameters
----------
parquet_file : pyarrow.parquet.ParquetFile
The Parquet File to use to extract the iceberg schema
Returns
-------
iceberg.api.Schema
returns an equivalent iceberg Schema based on the arrow schema read from the file
"""
return arrow_to_iceberg(parquet_file.schema_arrow) | [
"def",
"convert_parquet_to_iceberg",
"(",
"parquet_file",
":",
"ParquetFile",
")",
"->",
"Schema",
":",
"# noqa: ignore=C901",
"return",
"arrow_to_iceberg",
"(",
"parquet_file",
".",
"schema_arrow",
")"
] | [
119,
0
] | [
134,
54
] | python | en | ['en', 'error', 'th'] | False |
arrow_to_iceberg | (arrow_schema: pa.Schema) |
Use an arrow schema, which contains the field_id metadata, to create an equivalent iceberg Schema
Parameters
----------
arrow_schema : pyarrow.Schema
An Arrow schema with the parquet field_id metadata
Returns
-------
iceberg.api.Schema
returns an equivalent iceberg Schema based on the arrow schema read from the file
|
Use an arrow schema, which contains the field_id metadata, to create an equivalent iceberg Schema | def arrow_to_iceberg(arrow_schema: pa.Schema) -> Schema:
"""
Use an arrow schema, which contains the field_id metadata, to create an equivalent iceberg Schema
Parameters
----------
arrow_schema : pyarrow.Schema
An Arrow schema with the parquet field_id metadata
Returns
-------
iceberg.api.Schema
returns an equivalent iceberg Schema based on the arrow schema read from the file
"""
return Schema([get_field(col) for col in arrow_schema]) | [
"def",
"arrow_to_iceberg",
"(",
"arrow_schema",
":",
"pa",
".",
"Schema",
")",
"->",
"Schema",
":",
"return",
"Schema",
"(",
"[",
"get_field",
"(",
"col",
")",
"for",
"col",
"in",
"arrow_schema",
"]",
")"
] | [
137,
0
] | [
151,
59
] | python | en | ['en', 'error', 'th'] | False |
train_and_evaluate | (args) | Train and evaluate custom Estimator with three training modes.
Given the dictionary of parameters, create custom Estimator and run up to
three training modes then return Estimator object.
Args:
args: Dictionary of parameters.
Returns:
Estimator object.
| Train and evaluate custom Estimator with three training modes. | def train_and_evaluate(args):
"""Train and evaluate custom Estimator with three training modes.
Given the dictionary of parameters, create custom Estimator and run up to
three training modes then return Estimator object.
Args:
args: Dictionary of parameters.
Returns:
Estimator object.
"""
# Create our custom estimator using our model function
estimator = tf.estimator.Estimator(
model_fn=anomaly_detection,
model_dir=args["output_dir"],
params={key: val for key, val in args.items()})
if args["training_mode"] == "reconstruction":
# Calculate max_steps
max_steps = int(args["reconstruction_epochs"] * args["train_examples"])
max_steps = max_steps // args["train_batch_size"]
max_steps += args["previous_train_steps"]
# Create eval spec to read in our validation data
eval_spec = tf.estimator.EvalSpec(
input_fn=read_dataset(
filename=args["eval_file_pattern"],
mode=tf.estimator.ModeKeys.EVAL,
batch_size=args["eval_batch_size"],
params=args),
steps=None,
start_delay_secs=args["start_delay_secs"], # start eval after N secs
throttle_secs=args["throttle_secs"]) # evaluate every N secs
if args["model_type"] == "pca":
# Create train spec to read in our training data
train_spec = tf.estimator.TrainSpec(
input_fn=read_dataset(
filename=args["train_file_pattern"],
mode=tf.estimator.ModeKeys.EVAL, # read through train data once
batch_size=args["train_batch_size"],
params=args),
max_steps=max_steps)
# Check to see if we need to additionally tune principal components
if not args["autotune_principal_components"]:
# Create train and evaluate loop to train and evaluate our estimator
tf.estimator.train_and_evaluate(
estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
else:
if (args["k_principal_components_time"] is None or
args["k_principal_components_feat"] is None):
# Create train and evaluate loop to train and evaluate our estimator
tf.estimator.train_and_evaluate(
estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
else: # dense_autoencoder or lstm_enc_dec_autoencoder
# Create early stopping hook to help reduce overfitting
early_stopping_hook = tf.contrib.estimator.stop_if_no_decrease_hook(
estimator=estimator,
metric_name="rmse",
max_steps_without_decrease=100,
min_steps=1000,
run_every_secs=60,
run_every_steps=None)
# Create train spec to read in our training data
train_spec = tf.estimator.TrainSpec(
input_fn=read_dataset(
filename=args["train_file_pattern"],
mode=tf.estimator.ModeKeys.TRAIN,
batch_size=args["train_batch_size"],
params=args),
max_steps=max_steps,
hooks=[early_stopping_hook])
# Create train and evaluate loop to train and evaluate our estimator
tf.estimator.train_and_evaluate(
estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
else:
# Calculate max_steps
max_steps = args["train_examples"] // args["train_batch_size"]
max_steps += args["previous_train_steps"]
# if args["training_mode"] == "calculate_error_distribution_statistics"
# Get final mahalanobis statistics over the entire val_1 dataset
# if args["training_mode"] == "tune_anomaly_thresholds"
# Tune anomaly thresholds using val_2 and val_anom datasets
train_spec = tf.estimator.TrainSpec(
input_fn=read_dataset(
filename=args["train_file_pattern"],
mode=tf.estimator.ModeKeys.EVAL, # read through val data once
batch_size=args["train_batch_size"],
params=args),
max_steps=max_steps)
if args["training_mode"] == "calculate_error_distribution_statistics":
# Don't create exporter for serving yet since anomaly thresholds
# aren't trained yet
exporter = None
elif args["training_mode"] == "tune_anomaly_thresholds":
# Create exporter that uses serving_input_fn to create saved_model
# for serving
exporter = tf.estimator.LatestExporter(
name="exporter",
serving_input_receiver_fn=lambda: serving_input_fn(
args["feat_names"], args["seq_len"]))
else:
print("{0} isn't a valid training mode!".format(args["training_mode"]))
# Create eval spec to read in our validation data and export our model
eval_spec = tf.estimator.EvalSpec(
input_fn=read_dataset(
filename=args["eval_file_pattern"],
mode=tf.estimator.ModeKeys.EVAL,
batch_size=args["eval_batch_size"],
params=args),
steps=None,
exporters=exporter,
start_delay_secs=args["start_delay_secs"], # start eval after N secs
throttle_secs=args["throttle_secs"]) # evaluate every N secs
if (args["training_mode"] == "calculate_error_distribution_statistics" or
args["training_mode"] == "tune_anomaly_thresholds"):
# Create train and evaluate loop to train and evaluate our estimator
tf.estimator.train_and_evaluate(
estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
return | [
"def",
"train_and_evaluate",
"(",
"args",
")",
":",
"# Create our custom estimator using our model function",
"estimator",
"=",
"tf",
".",
"estimator",
".",
"Estimator",
"(",
"model_fn",
"=",
"anomaly_detection",
",",
"model_dir",
"=",
"args",
"[",
"\"output_dir\"",
"]",
",",
"params",
"=",
"{",
"key",
":",
"val",
"for",
"key",
",",
"val",
"in",
"args",
".",
"items",
"(",
")",
"}",
")",
"if",
"args",
"[",
"\"training_mode\"",
"]",
"==",
"\"reconstruction\"",
":",
"# Calculate max_steps",
"max_steps",
"=",
"int",
"(",
"args",
"[",
"\"reconstruction_epochs\"",
"]",
"*",
"args",
"[",
"\"train_examples\"",
"]",
")",
"max_steps",
"=",
"max_steps",
"//",
"args",
"[",
"\"train_batch_size\"",
"]",
"max_steps",
"+=",
"args",
"[",
"\"previous_train_steps\"",
"]",
"# Create eval spec to read in our validation data",
"eval_spec",
"=",
"tf",
".",
"estimator",
".",
"EvalSpec",
"(",
"input_fn",
"=",
"read_dataset",
"(",
"filename",
"=",
"args",
"[",
"\"eval_file_pattern\"",
"]",
",",
"mode",
"=",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"EVAL",
",",
"batch_size",
"=",
"args",
"[",
"\"eval_batch_size\"",
"]",
",",
"params",
"=",
"args",
")",
",",
"steps",
"=",
"None",
",",
"start_delay_secs",
"=",
"args",
"[",
"\"start_delay_secs\"",
"]",
",",
"# start eval after N secs",
"throttle_secs",
"=",
"args",
"[",
"\"throttle_secs\"",
"]",
")",
"# evaluate every N secs",
"if",
"args",
"[",
"\"model_type\"",
"]",
"==",
"\"pca\"",
":",
"# Create train spec to read in our training data",
"train_spec",
"=",
"tf",
".",
"estimator",
".",
"TrainSpec",
"(",
"input_fn",
"=",
"read_dataset",
"(",
"filename",
"=",
"args",
"[",
"\"train_file_pattern\"",
"]",
",",
"mode",
"=",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"EVAL",
",",
"# read through train data once",
"batch_size",
"=",
"args",
"[",
"\"train_batch_size\"",
"]",
",",
"params",
"=",
"args",
")",
",",
"max_steps",
"=",
"max_steps",
")",
"# Check to see if we need to additionally tune principal components",
"if",
"not",
"args",
"[",
"\"autotune_principal_components\"",
"]",
":",
"# Create train and evaluate loop to train and evaluate our estimator",
"tf",
".",
"estimator",
".",
"train_and_evaluate",
"(",
"estimator",
"=",
"estimator",
",",
"train_spec",
"=",
"train_spec",
",",
"eval_spec",
"=",
"eval_spec",
")",
"else",
":",
"if",
"(",
"args",
"[",
"\"k_principal_components_time\"",
"]",
"is",
"None",
"or",
"args",
"[",
"\"k_principal_components_feat\"",
"]",
"is",
"None",
")",
":",
"# Create train and evaluate loop to train and evaluate our estimator",
"tf",
".",
"estimator",
".",
"train_and_evaluate",
"(",
"estimator",
"=",
"estimator",
",",
"train_spec",
"=",
"train_spec",
",",
"eval_spec",
"=",
"eval_spec",
")",
"else",
":",
"# dense_autoencoder or lstm_enc_dec_autoencoder",
"# Create early stopping hook to help reduce overfitting",
"early_stopping_hook",
"=",
"tf",
".",
"contrib",
".",
"estimator",
".",
"stop_if_no_decrease_hook",
"(",
"estimator",
"=",
"estimator",
",",
"metric_name",
"=",
"\"rmse\"",
",",
"max_steps_without_decrease",
"=",
"100",
",",
"min_steps",
"=",
"1000",
",",
"run_every_secs",
"=",
"60",
",",
"run_every_steps",
"=",
"None",
")",
"# Create train spec to read in our training data",
"train_spec",
"=",
"tf",
".",
"estimator",
".",
"TrainSpec",
"(",
"input_fn",
"=",
"read_dataset",
"(",
"filename",
"=",
"args",
"[",
"\"train_file_pattern\"",
"]",
",",
"mode",
"=",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
",",
"batch_size",
"=",
"args",
"[",
"\"train_batch_size\"",
"]",
",",
"params",
"=",
"args",
")",
",",
"max_steps",
"=",
"max_steps",
",",
"hooks",
"=",
"[",
"early_stopping_hook",
"]",
")",
"# Create train and evaluate loop to train and evaluate our estimator",
"tf",
".",
"estimator",
".",
"train_and_evaluate",
"(",
"estimator",
"=",
"estimator",
",",
"train_spec",
"=",
"train_spec",
",",
"eval_spec",
"=",
"eval_spec",
")",
"else",
":",
"# Calculate max_steps",
"max_steps",
"=",
"args",
"[",
"\"train_examples\"",
"]",
"//",
"args",
"[",
"\"train_batch_size\"",
"]",
"max_steps",
"+=",
"args",
"[",
"\"previous_train_steps\"",
"]",
"# if args[\"training_mode\"] == \"calculate_error_distribution_statistics\"",
"# Get final mahalanobis statistics over the entire val_1 dataset",
"# if args[\"training_mode\"] == \"tune_anomaly_thresholds\"",
"# Tune anomaly thresholds using val_2 and val_anom datasets",
"train_spec",
"=",
"tf",
".",
"estimator",
".",
"TrainSpec",
"(",
"input_fn",
"=",
"read_dataset",
"(",
"filename",
"=",
"args",
"[",
"\"train_file_pattern\"",
"]",
",",
"mode",
"=",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"EVAL",
",",
"# read through val data once",
"batch_size",
"=",
"args",
"[",
"\"train_batch_size\"",
"]",
",",
"params",
"=",
"args",
")",
",",
"max_steps",
"=",
"max_steps",
")",
"if",
"args",
"[",
"\"training_mode\"",
"]",
"==",
"\"calculate_error_distribution_statistics\"",
":",
"# Don't create exporter for serving yet since anomaly thresholds",
"# aren't trained yet",
"exporter",
"=",
"None",
"elif",
"args",
"[",
"\"training_mode\"",
"]",
"==",
"\"tune_anomaly_thresholds\"",
":",
"# Create exporter that uses serving_input_fn to create saved_model",
"# for serving",
"exporter",
"=",
"tf",
".",
"estimator",
".",
"LatestExporter",
"(",
"name",
"=",
"\"exporter\"",
",",
"serving_input_receiver_fn",
"=",
"lambda",
":",
"serving_input_fn",
"(",
"args",
"[",
"\"feat_names\"",
"]",
",",
"args",
"[",
"\"seq_len\"",
"]",
")",
")",
"else",
":",
"print",
"(",
"\"{0} isn't a valid training mode!\"",
".",
"format",
"(",
"args",
"[",
"\"training_mode\"",
"]",
")",
")",
"# Create eval spec to read in our validation data and export our model",
"eval_spec",
"=",
"tf",
".",
"estimator",
".",
"EvalSpec",
"(",
"input_fn",
"=",
"read_dataset",
"(",
"filename",
"=",
"args",
"[",
"\"eval_file_pattern\"",
"]",
",",
"mode",
"=",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"EVAL",
",",
"batch_size",
"=",
"args",
"[",
"\"eval_batch_size\"",
"]",
",",
"params",
"=",
"args",
")",
",",
"steps",
"=",
"None",
",",
"exporters",
"=",
"exporter",
",",
"start_delay_secs",
"=",
"args",
"[",
"\"start_delay_secs\"",
"]",
",",
"# start eval after N secs",
"throttle_secs",
"=",
"args",
"[",
"\"throttle_secs\"",
"]",
")",
"# evaluate every N secs",
"if",
"(",
"args",
"[",
"\"training_mode\"",
"]",
"==",
"\"calculate_error_distribution_statistics\"",
"or",
"args",
"[",
"\"training_mode\"",
"]",
"==",
"\"tune_anomaly_thresholds\"",
")",
":",
"# Create train and evaluate loop to train and evaluate our estimator",
"tf",
".",
"estimator",
".",
"train_and_evaluate",
"(",
"estimator",
"=",
"estimator",
",",
"train_spec",
"=",
"train_spec",
",",
"eval_spec",
"=",
"eval_spec",
")",
"return"
] | [
10,
0
] | [
138,
8
] | python | en | ['en', 'en', 'en'] | True |
ImagePalette.getdata | (self) |
Get palette contents in format suitable for the low-level
``im.putpalette`` primitive.
.. warning:: This method is experimental.
|
Get palette contents in format suitable for the low-level
``im.putpalette`` primitive. | def getdata(self):
"""
Get palette contents in format suitable for the low-level
``im.putpalette`` primitive.
.. warning:: This method is experimental.
"""
if self.rawmode:
return self.rawmode, self.palette
return self.mode, self.tobytes() | [
"def",
"getdata",
"(",
"self",
")",
":",
"if",
"self",
".",
"rawmode",
":",
"return",
"self",
".",
"rawmode",
",",
"self",
".",
"palette",
"return",
"self",
".",
"mode",
",",
"self",
".",
"tobytes",
"(",
")"
] | [
72,
4
] | [
81,
40
] | python | en | ['en', 'error', 'th'] | False |
ImagePalette.tobytes | (self) | Convert palette to bytes.
.. warning:: This method is experimental.
| Convert palette to bytes. | def tobytes(self):
"""Convert palette to bytes.
.. warning:: This method is experimental.
"""
if self.rawmode:
raise ValueError("palette contains raw palette data")
if isinstance(self.palette, bytes):
return self.palette
arr = array.array("B", self.palette)
return arr.tobytes() | [
"def",
"tobytes",
"(",
"self",
")",
":",
"if",
"self",
".",
"rawmode",
":",
"raise",
"ValueError",
"(",
"\"palette contains raw palette data\"",
")",
"if",
"isinstance",
"(",
"self",
".",
"palette",
",",
"bytes",
")",
":",
"return",
"self",
".",
"palette",
"arr",
"=",
"array",
".",
"array",
"(",
"\"B\"",
",",
"self",
".",
"palette",
")",
"return",
"arr",
".",
"tobytes",
"(",
")"
] | [
83,
4
] | [
93,
28
] | python | en | ['en', 'en', 'en'] | True |
ImagePalette.getcolor | (self, color, image=None) | Given an rgb tuple, allocate palette entry.
.. warning:: This method is experimental.
| Given an rgb tuple, allocate palette entry. | def getcolor(self, color, image=None):
"""Given an rgb tuple, allocate palette entry.
.. warning:: This method is experimental.
"""
if self.rawmode:
raise ValueError("palette contains raw palette data")
if isinstance(color, tuple):
if self.mode == "RGB":
if len(color) == 4 and color[3] == 255:
color = color[:3]
elif self.mode == "RGBA":
if len(color) == 3:
color += (255,)
try:
return self.colors[color]
except KeyError as e:
# allocate new color slot
if not isinstance(self.palette, bytearray):
self._palette = bytearray(self.palette)
index = len(self.palette) // 3
special_colors = ()
if image:
special_colors = (
image.info.get("background"),
image.info.get("transparency"),
)
while index in special_colors:
index += 1
if index >= 256:
if image:
# Search for an unused index
for i, count in reversed(list(enumerate(image.histogram()))):
if count == 0 and i not in special_colors:
index = i
break
if index >= 256:
raise ValueError("cannot allocate more than 256 colors") from e
self.colors[color] = index
if index * 3 < len(self.palette):
self._palette = (
self.palette[: index * 3]
+ bytes(color)
+ self.palette[index * 3 + 3 :]
)
else:
self._palette += bytes(color)
self.dirty = 1
return index
else:
raise ValueError(f"unknown color specifier: {repr(color)}") | [
"def",
"getcolor",
"(",
"self",
",",
"color",
",",
"image",
"=",
"None",
")",
":",
"if",
"self",
".",
"rawmode",
":",
"raise",
"ValueError",
"(",
"\"palette contains raw palette data\"",
")",
"if",
"isinstance",
"(",
"color",
",",
"tuple",
")",
":",
"if",
"self",
".",
"mode",
"==",
"\"RGB\"",
":",
"if",
"len",
"(",
"color",
")",
"==",
"4",
"and",
"color",
"[",
"3",
"]",
"==",
"255",
":",
"color",
"=",
"color",
"[",
":",
"3",
"]",
"elif",
"self",
".",
"mode",
"==",
"\"RGBA\"",
":",
"if",
"len",
"(",
"color",
")",
"==",
"3",
":",
"color",
"+=",
"(",
"255",
",",
")",
"try",
":",
"return",
"self",
".",
"colors",
"[",
"color",
"]",
"except",
"KeyError",
"as",
"e",
":",
"# allocate new color slot",
"if",
"not",
"isinstance",
"(",
"self",
".",
"palette",
",",
"bytearray",
")",
":",
"self",
".",
"_palette",
"=",
"bytearray",
"(",
"self",
".",
"palette",
")",
"index",
"=",
"len",
"(",
"self",
".",
"palette",
")",
"//",
"3",
"special_colors",
"=",
"(",
")",
"if",
"image",
":",
"special_colors",
"=",
"(",
"image",
".",
"info",
".",
"get",
"(",
"\"background\"",
")",
",",
"image",
".",
"info",
".",
"get",
"(",
"\"transparency\"",
")",
",",
")",
"while",
"index",
"in",
"special_colors",
":",
"index",
"+=",
"1",
"if",
"index",
">=",
"256",
":",
"if",
"image",
":",
"# Search for an unused index",
"for",
"i",
",",
"count",
"in",
"reversed",
"(",
"list",
"(",
"enumerate",
"(",
"image",
".",
"histogram",
"(",
")",
")",
")",
")",
":",
"if",
"count",
"==",
"0",
"and",
"i",
"not",
"in",
"special_colors",
":",
"index",
"=",
"i",
"break",
"if",
"index",
">=",
"256",
":",
"raise",
"ValueError",
"(",
"\"cannot allocate more than 256 colors\"",
")",
"from",
"e",
"self",
".",
"colors",
"[",
"color",
"]",
"=",
"index",
"if",
"index",
"*",
"3",
"<",
"len",
"(",
"self",
".",
"palette",
")",
":",
"self",
".",
"_palette",
"=",
"(",
"self",
".",
"palette",
"[",
":",
"index",
"*",
"3",
"]",
"+",
"bytes",
"(",
"color",
")",
"+",
"self",
".",
"palette",
"[",
"index",
"*",
"3",
"+",
"3",
":",
"]",
")",
"else",
":",
"self",
".",
"_palette",
"+=",
"bytes",
"(",
"color",
")",
"self",
".",
"dirty",
"=",
"1",
"return",
"index",
"else",
":",
"raise",
"ValueError",
"(",
"f\"unknown color specifier: {repr(color)}\"",
")"
] | [
98,
4
] | [
148,
71
] | python | en | ['en', 'en', 'it'] | True |
ImagePalette.save | (self, fp) | Save palette to text file.
.. warning:: This method is experimental.
| Save palette to text file. | def save(self, fp):
"""Save palette to text file.
.. warning:: This method is experimental.
"""
if self.rawmode:
raise ValueError("palette contains raw palette data")
if isinstance(fp, str):
fp = open(fp, "w")
fp.write("# Palette\n")
fp.write(f"# Mode: {self.mode}\n")
for i in range(256):
fp.write(f"{i}")
for j in range(i * len(self.mode), (i + 1) * len(self.mode)):
try:
fp.write(f" {self.palette[j]}")
except IndexError:
fp.write(" 0")
fp.write("\n")
fp.close() | [
"def",
"save",
"(",
"self",
",",
"fp",
")",
":",
"if",
"self",
".",
"rawmode",
":",
"raise",
"ValueError",
"(",
"\"palette contains raw palette data\"",
")",
"if",
"isinstance",
"(",
"fp",
",",
"str",
")",
":",
"fp",
"=",
"open",
"(",
"fp",
",",
"\"w\"",
")",
"fp",
".",
"write",
"(",
"\"# Palette\\n\"",
")",
"fp",
".",
"write",
"(",
"f\"# Mode: {self.mode}\\n\"",
")",
"for",
"i",
"in",
"range",
"(",
"256",
")",
":",
"fp",
".",
"write",
"(",
"f\"{i}\"",
")",
"for",
"j",
"in",
"range",
"(",
"i",
"*",
"len",
"(",
"self",
".",
"mode",
")",
",",
"(",
"i",
"+",
"1",
")",
"*",
"len",
"(",
"self",
".",
"mode",
")",
")",
":",
"try",
":",
"fp",
".",
"write",
"(",
"f\" {self.palette[j]}\"",
")",
"except",
"IndexError",
":",
"fp",
".",
"write",
"(",
"\" 0\"",
")",
"fp",
".",
"write",
"(",
"\"\\n\"",
")",
"fp",
".",
"close",
"(",
")"
] | [
150,
4
] | [
169,
18
] | python | en | ['en', 'en', 'en'] | True |
load_coco_classes | (coco) | Loads the class to label mapping (and inverse) for COCO.
| Loads the class to label mapping (and inverse) for COCO.
| def load_coco_classes(coco):
""" Loads the class to label mapping (and inverse) for COCO.
"""
# load class names (name -> label)
categories = coco.loadCats(coco.getCatIds())
categories.sort(key=lambda x: x["id"])
classes = {}
coco_labels = {}
coco_labels_inverse = {}
for c in categories:
coco_labels[len(classes)] = c["id"]
coco_labels_inverse[c["id"]] = len(classes)
classes[c["name"]] = len(classes)
return classes, coco_labels, coco_labels_inverse | [
"def",
"load_coco_classes",
"(",
"coco",
")",
":",
"# load class names (name -> label)",
"categories",
"=",
"coco",
".",
"loadCats",
"(",
"coco",
".",
"getCatIds",
"(",
")",
")",
"categories",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"\"id\"",
"]",
")",
"classes",
"=",
"{",
"}",
"coco_labels",
"=",
"{",
"}",
"coco_labels_inverse",
"=",
"{",
"}",
"for",
"c",
"in",
"categories",
":",
"coco_labels",
"[",
"len",
"(",
"classes",
")",
"]",
"=",
"c",
"[",
"\"id\"",
"]",
"coco_labels_inverse",
"[",
"c",
"[",
"\"id\"",
"]",
"]",
"=",
"len",
"(",
"classes",
")",
"classes",
"[",
"c",
"[",
"\"name\"",
"]",
"]",
"=",
"len",
"(",
"classes",
")",
"return",
"classes",
",",
"coco_labels",
",",
"coco_labels_inverse"
] | [
21,
0
] | [
36,
52
] | python | en | ['en', 'en', 'en'] | True |
_execfile | (filename, globals, locals=None) |
Python 3 implementation of execfile.
|
Python 3 implementation of execfile.
| def _execfile(filename, globals, locals=None):
"""
Python 3 implementation of execfile.
"""
mode = 'rb'
with open(filename, mode) as stream:
script = stream.read()
if locals is None:
locals = globals
code = compile(script, filename, 'exec')
exec(code, globals, locals) | [
"def",
"_execfile",
"(",
"filename",
",",
"globals",
",",
"locals",
"=",
"None",
")",
":",
"mode",
"=",
"'rb'",
"with",
"open",
"(",
"filename",
",",
"mode",
")",
"as",
"stream",
":",
"script",
"=",
"stream",
".",
"read",
"(",
")",
"if",
"locals",
"is",
"None",
":",
"locals",
"=",
"globals",
"code",
"=",
"compile",
"(",
"script",
",",
"filename",
",",
"'exec'",
")",
"exec",
"(",
"code",
",",
"globals",
",",
"locals",
")"
] | [
34,
0
] | [
44,
31
] | python | en | ['en', 'error', 'th'] | False |
override_temp | (replacement) |
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
|
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
| def override_temp(replacement):
"""
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
"""
pkg_resources.py31compat.makedirs(replacement, exist_ok=True)
saved = tempfile.tempdir
tempfile.tempdir = replacement
try:
yield
finally:
tempfile.tempdir = saved | [
"def",
"override_temp",
"(",
"replacement",
")",
":",
"pkg_resources",
".",
"py31compat",
".",
"makedirs",
"(",
"replacement",
",",
"exist_ok",
"=",
"True",
")",
"saved",
"=",
"tempfile",
".",
"tempdir",
"tempfile",
".",
"tempdir",
"=",
"replacement",
"try",
":",
"yield",
"finally",
":",
"tempfile",
".",
"tempdir",
"=",
"saved"
] | [
68,
0
] | [
81,
32
] | python | en | ['en', 'error', 'th'] | False |
save_modules | () |
Context in which imported modules are saved.
Translates exceptions internal to the context into the equivalent exception
outside the context.
|
Context in which imported modules are saved. | def save_modules():
"""
Context in which imported modules are saved.
Translates exceptions internal to the context into the equivalent exception
outside the context.
"""
saved = sys.modules.copy()
with ExceptionSaver() as saved_exc:
yield saved
sys.modules.update(saved)
# remove any modules imported since
del_modules = (
mod_name for mod_name in sys.modules
if mod_name not in saved
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
)
_clear_modules(del_modules)
saved_exc.resume() | [
"def",
"save_modules",
"(",
")",
":",
"saved",
"=",
"sys",
".",
"modules",
".",
"copy",
"(",
")",
"with",
"ExceptionSaver",
"(",
")",
"as",
"saved_exc",
":",
"yield",
"saved",
"sys",
".",
"modules",
".",
"update",
"(",
"saved",
")",
"# remove any modules imported since",
"del_modules",
"=",
"(",
"mod_name",
"for",
"mod_name",
"in",
"sys",
".",
"modules",
"if",
"mod_name",
"not",
"in",
"saved",
"# exclude any encodings modules. See #285",
"and",
"not",
"mod_name",
".",
"startswith",
"(",
"'encodings.'",
")",
")",
"_clear_modules",
"(",
"del_modules",
")",
"saved_exc",
".",
"resume",
"(",
")"
] | [
144,
0
] | [
165,
22
] | python | en | ['en', 'error', 'th'] | False |
_needs_hiding | (mod_name) |
>>> _needs_hiding('setuptools')
True
>>> _needs_hiding('pkg_resources')
True
>>> _needs_hiding('setuptools_plugin')
False
>>> _needs_hiding('setuptools.__init__')
True
>>> _needs_hiding('distutils')
True
>>> _needs_hiding('os')
False
>>> _needs_hiding('Cython')
True
|
>>> _needs_hiding('setuptools')
True
>>> _needs_hiding('pkg_resources')
True
>>> _needs_hiding('setuptools_plugin')
False
>>> _needs_hiding('setuptools.__init__')
True
>>> _needs_hiding('distutils')
True
>>> _needs_hiding('os')
False
>>> _needs_hiding('Cython')
True
| def _needs_hiding(mod_name):
"""
>>> _needs_hiding('setuptools')
True
>>> _needs_hiding('pkg_resources')
True
>>> _needs_hiding('setuptools_plugin')
False
>>> _needs_hiding('setuptools.__init__')
True
>>> _needs_hiding('distutils')
True
>>> _needs_hiding('os')
False
>>> _needs_hiding('Cython')
True
"""
pattern = re.compile(r'(setuptools|pkg_resources|distutils|Cython)(\.|$)')
return bool(pattern.match(mod_name)) | [
"def",
"_needs_hiding",
"(",
"mod_name",
")",
":",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r'(setuptools|pkg_resources|distutils|Cython)(\\.|$)'",
")",
"return",
"bool",
"(",
"pattern",
".",
"match",
"(",
"mod_name",
")",
")"
] | [
197,
0
] | [
215,
40
] | python | en | ['en', 'error', 'th'] | False |
hide_setuptools | () |
Remove references to setuptools' modules from sys.modules to allow the
invocation to import the most appropriate setuptools. This technique is
necessary to avoid issues such as #315 where setuptools upgrading itself
would fail to find a function declared in the metadata.
|
Remove references to setuptools' modules from sys.modules to allow the
invocation to import the most appropriate setuptools. This technique is
necessary to avoid issues such as #315 where setuptools upgrading itself
would fail to find a function declared in the metadata.
| def hide_setuptools():
"""
Remove references to setuptools' modules from sys.modules to allow the
invocation to import the most appropriate setuptools. This technique is
necessary to avoid issues such as #315 where setuptools upgrading itself
would fail to find a function declared in the metadata.
"""
modules = filter(_needs_hiding, sys.modules)
_clear_modules(modules) | [
"def",
"hide_setuptools",
"(",
")",
":",
"modules",
"=",
"filter",
"(",
"_needs_hiding",
",",
"sys",
".",
"modules",
")",
"_clear_modules",
"(",
"modules",
")"
] | [
218,
0
] | [
226,
27
] | python | en | ['en', 'error', 'th'] | False |
run_setup | (setup_script, args) | Run a distutils setup script, sandboxed in its directory | Run a distutils setup script, sandboxed in its directory | def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
setup_dir = os.path.abspath(os.path.dirname(setup_script))
with setup_context(setup_dir):
try:
sys.argv[:] = [setup_script] + list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist: dist.activate())
# __file__ should be a byte string on Python 2 (#712)
dunder_file = (
setup_script
if isinstance(setup_script, str) else
setup_script.encode(sys.getfilesystemencoding())
)
with DirectorySandbox(setup_dir):
ns = dict(__file__=dunder_file, __name__='__main__')
_execfile(setup_script, ns)
except SystemExit as v:
if v.args and v.args[0]:
raise | [
"def",
"run_setup",
"(",
"setup_script",
",",
"args",
")",
":",
"setup_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"setup_script",
")",
")",
"with",
"setup_context",
"(",
"setup_dir",
")",
":",
"try",
":",
"sys",
".",
"argv",
"[",
":",
"]",
"=",
"[",
"setup_script",
"]",
"+",
"list",
"(",
"args",
")",
"sys",
".",
"path",
".",
"insert",
"(",
"0",
",",
"setup_dir",
")",
"# reset to include setup dir, w/clean callback list",
"working_set",
".",
"__init__",
"(",
")",
"working_set",
".",
"callbacks",
".",
"append",
"(",
"lambda",
"dist",
":",
"dist",
".",
"activate",
"(",
")",
")",
"# __file__ should be a byte string on Python 2 (#712)",
"dunder_file",
"=",
"(",
"setup_script",
"if",
"isinstance",
"(",
"setup_script",
",",
"str",
")",
"else",
"setup_script",
".",
"encode",
"(",
"sys",
".",
"getfilesystemencoding",
"(",
")",
")",
")",
"with",
"DirectorySandbox",
"(",
"setup_dir",
")",
":",
"ns",
"=",
"dict",
"(",
"__file__",
"=",
"dunder_file",
",",
"__name__",
"=",
"'__main__'",
")",
"_execfile",
"(",
"setup_script",
",",
"ns",
")",
"except",
"SystemExit",
"as",
"v",
":",
"if",
"v",
".",
"args",
"and",
"v",
".",
"args",
"[",
"0",
"]",
":",
"raise"
] | [
229,
0
] | [
252,
21
] | python | en | ['en', 'lb', 'en'] | True |
UnpickleableException.dump | (type, exc) |
Always return a dumped (pickled) type and exc. If exc can't be pickled,
wrap it in UnpickleableException first.
|
Always return a dumped (pickled) type and exc. If exc can't be pickled,
wrap it in UnpickleableException first.
| def dump(type, exc):
"""
Always return a dumped (pickled) type and exc. If exc can't be pickled,
wrap it in UnpickleableException first.
"""
try:
return pickle.dumps(type), pickle.dumps(exc)
except Exception:
# get UnpickleableException inside the sandbox
from setuptools.sandbox import UnpickleableException as cls
return cls.dump(cls, cls(repr(exc))) | [
"def",
"dump",
"(",
"type",
",",
"exc",
")",
":",
"try",
":",
"return",
"pickle",
".",
"dumps",
"(",
"type",
")",
",",
"pickle",
".",
"dumps",
"(",
"exc",
")",
"except",
"Exception",
":",
"# get UnpickleableException inside the sandbox",
"from",
"setuptools",
".",
"sandbox",
"import",
"UnpickleableException",
"as",
"cls",
"return",
"cls",
".",
"dump",
"(",
"cls",
",",
"cls",
"(",
"repr",
"(",
"exc",
")",
")",
")"
] | [
100,
4
] | [
110,
48
] | python | en | ['en', 'error', 'th'] | False |
ExceptionSaver.resume | (self) | restore and re-raise any exception | restore and re-raise any exception | def resume(self):
"restore and re-raise any exception"
if '_saved' not in vars(self):
return
type, exc = map(pickle.loads, self._saved)
six.reraise(type, exc, self._tb) | [
"def",
"resume",
"(",
"self",
")",
":",
"if",
"'_saved'",
"not",
"in",
"vars",
"(",
"self",
")",
":",
"return",
"type",
",",
"exc",
"=",
"map",
"(",
"pickle",
".",
"loads",
",",
"self",
".",
"_saved",
")",
"six",
".",
"reraise",
"(",
"type",
",",
"exc",
",",
"self",
".",
"_tb",
")"
] | [
133,
4
] | [
140,
40
] | python | en | ['en', 'en', 'en'] | True |
AbstractSandbox.run | (self, func) | Run 'func' under os sandboxing | Run 'func' under os sandboxing | def run(self, func):
"""Run 'func' under os sandboxing"""
with self:
return func() | [
"def",
"run",
"(",
"self",
",",
"func",
")",
":",
"with",
"self",
":",
"return",
"func",
"(",
")"
] | [
285,
4
] | [
288,
25
] | python | es | ['es', 'jv', 'pt'] | False |
AbstractSandbox._validate_path | (self, path) | Called to remap or validate any path, whether input or output | Called to remap or validate any path, whether input or output | def _validate_path(self, path):
"""Called to remap or validate any path, whether input or output"""
return path | [
"def",
"_validate_path",
"(",
"self",
",",
"path",
")",
":",
"return",
"path"
] | [
355,
4
] | [
357,
19
] | python | en | ['en', 'en', 'en'] | True |
AbstractSandbox._remap_input | (self, operation, path, *args, **kw) | Called for path inputs | Called for path inputs | def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
return self._validate_path(path) | [
"def",
"_remap_input",
"(",
"self",
",",
"operation",
",",
"path",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"return",
"self",
".",
"_validate_path",
"(",
"path",
")"
] | [
359,
4
] | [
361,
40
] | python | en | ['en', 'en', 'en'] | True |
AbstractSandbox._remap_output | (self, operation, path) | Called for path outputs | Called for path outputs | def _remap_output(self, operation, path):
"""Called for path outputs"""
return self._validate_path(path) | [
"def",
"_remap_output",
"(",
"self",
",",
"operation",
",",
"path",
")",
":",
"return",
"self",
".",
"_validate_path",
"(",
"path",
")"
] | [
363,
4
] | [
365,
40
] | python | en | ['en', 'en', 'en'] | True |
AbstractSandbox._remap_pair | (self, operation, src, dst, *args, **kw) | Called for path pairs like rename, link, and symlink operations | Called for path pairs like rename, link, and symlink operations | def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation + '-from', src, *args, **kw),
self._remap_input(operation + '-to', dst, *args, **kw)
) | [
"def",
"_remap_pair",
"(",
"self",
",",
"operation",
",",
"src",
",",
"dst",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"return",
"(",
"self",
".",
"_remap_input",
"(",
"operation",
"+",
"'-from'",
",",
"src",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
",",
"self",
".",
"_remap_input",
"(",
"operation",
"+",
"'-to'",
",",
"dst",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
")"
] | [
367,
4
] | [
372,
9
] | python | en | ['en', 'en', 'en'] | True |
DirectorySandbox._remap_input | (self, operation, path, *args, **kw) | Called for path inputs | Called for path inputs | def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path | [
"def",
"_remap_input",
"(",
"self",
",",
"operation",
",",
"path",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"if",
"operation",
"in",
"self",
".",
"write_ops",
"and",
"not",
"self",
".",
"_ok",
"(",
"path",
")",
":",
"self",
".",
"_violation",
"(",
"operation",
",",
"os",
".",
"path",
".",
"realpath",
"(",
"path",
")",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"return",
"path"
] | [
448,
4
] | [
452,
19
] | python | en | ['en', 'en', 'en'] | True |
DirectorySandbox._remap_pair | (self, operation, src, dst, *args, **kw) | Called for path pairs like rename, link, and symlink operations | Called for path pairs like rename, link, and symlink operations | def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src, dst) | [
"def",
"_remap_pair",
"(",
"self",
",",
"operation",
",",
"src",
",",
"dst",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"if",
"not",
"self",
".",
"_ok",
"(",
"src",
")",
"or",
"not",
"self",
".",
"_ok",
"(",
"dst",
")",
":",
"self",
".",
"_violation",
"(",
"operation",
",",
"src",
",",
"dst",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"return",
"(",
"src",
",",
"dst",
")"
] | [
454,
4
] | [
458,
25
] | python | en | ['en', 'en', 'en'] | True |
DirectorySandbox.open | (self, file, flags, mode=0o777, *args, **kw) | Called for low-level os.open() | Called for low-level os.open() | def open(self, file, flags, mode=0o777, *args, **kw):
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file, flags, mode, *args, **kw) | [
"def",
"open",
"(",
"self",
",",
"file",
",",
"flags",
",",
"mode",
"=",
"0o777",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"if",
"flags",
"&",
"WRITE_FLAGS",
"and",
"not",
"self",
".",
"_ok",
"(",
"file",
")",
":",
"self",
".",
"_violation",
"(",
"\"os.open\"",
",",
"file",
",",
"flags",
",",
"mode",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"return",
"_os",
".",
"open",
"(",
"file",
",",
"flags",
",",
"mode",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")"
] | [
460,
4
] | [
464,
55
] | python | en | ['en', 'en', 'en'] | True |
transform | (data, ops=[]) | transform | transform | def transform(data, ops=[]):
""" transform """
for op in ops:
data = op(data)
return data | [
"def",
"transform",
"(",
"data",
",",
"ops",
"=",
"[",
"]",
")",
":",
"for",
"op",
"in",
"ops",
":",
"data",
"=",
"op",
"(",
"data",
")",
"return",
"data"
] | [
39,
0
] | [
43,
15
] | python | en | ['en', 'ru', 'en'] | False |
wsgi_to_bytes | (data) | coerce wsgi unicode represented bytes to real ones | coerce wsgi unicode represented bytes to real ones | def wsgi_to_bytes(data):
"""coerce wsgi unicode represented bytes to real ones"""
if isinstance(data, bytes):
return data
return data.encode("latin1") | [
"def",
"wsgi_to_bytes",
"(",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"bytes",
")",
":",
"return",
"data",
"return",
"data",
".",
"encode",
"(",
"\"latin1\"",
")"
] | [
199,
0
] | [
203,
32
] | python | en | ['en', 'sn', 'en'] | True |
quote_header_value | (value, extra_chars="", allow_token=True) | Quote a header value if necessary.
.. versionadded:: 0.5
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
| Quote a header value if necessary. | def quote_header_value(value, extra_chars="", allow_token=True):
"""Quote a header value if necessary.
.. versionadded:: 0.5
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
if isinstance(value, bytes):
value = bytes_to_wsgi(value)
value = str(value)
if allow_token:
token_chars = _token_chars | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace("\\", "\\\\").replace('"', '\\"') | [
"def",
"quote_header_value",
"(",
"value",
",",
"extra_chars",
"=",
"\"\"",
",",
"allow_token",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"bytes",
")",
":",
"value",
"=",
"bytes_to_wsgi",
"(",
"value",
")",
"value",
"=",
"str",
"(",
"value",
")",
"if",
"allow_token",
":",
"token_chars",
"=",
"_token_chars",
"|",
"set",
"(",
"extra_chars",
")",
"if",
"set",
"(",
"value",
")",
".",
"issubset",
"(",
"token_chars",
")",
":",
"return",
"value",
"return",
"'\"%s\"'",
"%",
"value",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"\\\\\\\\\"",
")",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")"
] | [
214,
0
] | [
231,
67
] | python | en | ['en', 'en', 'en'] | True |
unquote_header_value | (value, is_filename=False) | r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
.. versionadded:: 0.5
:param value: the header value to unquote.
| r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting. | def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
.. versionadded:: 0.5
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != "\\\\":
return value.replace("\\\\", "\\").replace('\\"', '"')
return value | [
"def",
"unquote_header_value",
"(",
"value",
",",
"is_filename",
"=",
"False",
")",
":",
"if",
"value",
"and",
"value",
"[",
"0",
"]",
"==",
"value",
"[",
"-",
"1",
"]",
"==",
"'\"'",
":",
"# this is not the real unquoting, but fixing this so that the",
"# RFC is met will result in bugs with internet explorer and",
"# probably some other browsers as well. IE for example is",
"# uploading files with \"C:\\foo\\bar.txt\" as filename",
"value",
"=",
"value",
"[",
"1",
":",
"-",
"1",
"]",
"# if this is a filename and the starting characters look like",
"# a UNC path, then just return the value without quotes. Using the",
"# replace sequence below on a UNC path has the effect of turning",
"# the leading double slash into a single slash and then",
"# _fix_ie_filename() doesn't work correctly. See #458.",
"if",
"not",
"is_filename",
"or",
"value",
"[",
":",
"2",
"]",
"!=",
"\"\\\\\\\\\"",
":",
"return",
"value",
".",
"replace",
"(",
"\"\\\\\\\\\"",
",",
"\"\\\\\"",
")",
".",
"replace",
"(",
"'\\\\\"'",
",",
"'\"'",
")",
"return",
"value"
] | [
234,
0
] | [
257,
16
] | python | en | ['en', 'en', 'en'] | True |
dump_options_header | (header, options) | The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
| The reverse function to :func:`parse_options_header`. | def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header)
for key, value in iteritems(options):
if value is None:
segments.append(key)
else:
segments.append("%s=%s" % (key, quote_header_value(value)))
return "; ".join(segments) | [
"def",
"dump_options_header",
"(",
"header",
",",
"options",
")",
":",
"segments",
"=",
"[",
"]",
"if",
"header",
"is",
"not",
"None",
":",
"segments",
".",
"append",
"(",
"header",
")",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"options",
")",
":",
"if",
"value",
"is",
"None",
":",
"segments",
".",
"append",
"(",
"key",
")",
"else",
":",
"segments",
".",
"append",
"(",
"\"%s=%s\"",
"%",
"(",
"key",
",",
"quote_header_value",
"(",
"value",
")",
")",
")",
"return",
"\"; \"",
".",
"join",
"(",
"segments",
")"
] | [
260,
0
] | [
274,
30
] | python | en | ['en', 'en', 'en'] | True |
dump_header | (iterable, allow_token=True) | Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
>>> dump_header({'foo': 'bar baz'})
'foo="bar baz"'
>>> dump_header(('foo', 'bar baz'))
'foo, "bar baz"'
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
| Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs. | def dump_header(iterable, allow_token=True):
"""Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
>>> dump_header({'foo': 'bar baz'})
'foo="bar baz"'
>>> dump_header(('foo', 'bar baz'))
'foo, "bar baz"'
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
"""
if isinstance(iterable, dict):
items = []
for key, value in iteritems(iterable):
if value is None:
items.append(key)
else:
items.append(
"%s=%s" % (key, quote_header_value(value, allow_token=allow_token))
)
else:
items = [quote_header_value(x, allow_token=allow_token) for x in iterable]
return ", ".join(items) | [
"def",
"dump_header",
"(",
"iterable",
",",
"allow_token",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"iterable",
",",
"dict",
")",
":",
"items",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"iterable",
")",
":",
"if",
"value",
"is",
"None",
":",
"items",
".",
"append",
"(",
"key",
")",
"else",
":",
"items",
".",
"append",
"(",
"\"%s=%s\"",
"%",
"(",
"key",
",",
"quote_header_value",
"(",
"value",
",",
"allow_token",
"=",
"allow_token",
")",
")",
")",
"else",
":",
"items",
"=",
"[",
"quote_header_value",
"(",
"x",
",",
"allow_token",
"=",
"allow_token",
")",
"for",
"x",
"in",
"iterable",
"]",
"return",
"\", \"",
".",
"join",
"(",
"items",
")"
] | [
277,
0
] | [
303,
27
] | python | en | ['en', 'en', 'en'] | True |
parse_list_header | (value) | Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
| Parse lists as described by RFC 2068 Section 2. | def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result | [
"def",
"parse_list_header",
"(",
"value",
")",
":",
"result",
"=",
"[",
"]",
"for",
"item",
"in",
"_parse_list_header",
"(",
"value",
")",
":",
"if",
"item",
"[",
":",
"1",
"]",
"==",
"item",
"[",
"-",
"1",
":",
"]",
"==",
"'\"'",
":",
"item",
"=",
"unquote_header_value",
"(",
"item",
"[",
"1",
":",
"-",
"1",
"]",
")",
"result",
".",
"append",
"(",
"item",
")",
"return",
"result"
] | [
306,
0
] | [
333,
17
] | python | en | ['en', 'en', 'en'] | True |
parse_dict_header | (value, cls=dict) | Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict (or any other mapping object created from
the type with a dict like interface provided by the `cls` argument):
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
.. versionchanged:: 0.9
Added support for `cls` argument.
:param value: a string with a dict header.
:param cls: callable to use for storage of parsed results.
:return: an instance of `cls`
| Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict (or any other mapping object created from
the type with a dict like interface provided by the `cls` argument): | def parse_dict_header(value, cls=dict):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict (or any other mapping object created from
the type with a dict like interface provided by the `cls` argument):
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
.. versionchanged:: 0.9
Added support for `cls` argument.
:param value: a string with a dict header.
:param cls: callable to use for storage of parsed results.
:return: an instance of `cls`
"""
result = cls()
if not isinstance(value, text_type):
# XXX: validate
value = bytes_to_wsgi(value)
for item in _parse_list_header(value):
if "=" not in item:
result[item] = None
continue
name, value = item.split("=", 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result | [
"def",
"parse_dict_header",
"(",
"value",
",",
"cls",
"=",
"dict",
")",
":",
"result",
"=",
"cls",
"(",
")",
"if",
"not",
"isinstance",
"(",
"value",
",",
"text_type",
")",
":",
"# XXX: validate",
"value",
"=",
"bytes_to_wsgi",
"(",
"value",
")",
"for",
"item",
"in",
"_parse_list_header",
"(",
"value",
")",
":",
"if",
"\"=\"",
"not",
"in",
"item",
":",
"result",
"[",
"item",
"]",
"=",
"None",
"continue",
"name",
",",
"value",
"=",
"item",
".",
"split",
"(",
"\"=\"",
",",
"1",
")",
"if",
"value",
"[",
":",
"1",
"]",
"==",
"value",
"[",
"-",
"1",
":",
"]",
"==",
"'\"'",
":",
"value",
"=",
"unquote_header_value",
"(",
"value",
"[",
"1",
":",
"-",
"1",
"]",
")",
"result",
"[",
"name",
"]",
"=",
"value",
"return",
"result"
] | [
336,
0
] | [
374,
17
] | python | en | ['en', 'en', 'en'] | True |
parse_options_header | (value, multiple=False) | Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('text/html; charset=utf8')
('text/html', {'charset': 'utf8'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionchanged:: 0.15
:rfc:`2231` parameter continuations are handled.
.. versionadded:: 0.5
:param value: the header to parse.
:param multiple: Whether try to parse and return multiple MIME types
:return: (mimetype, options) or (mimetype, options, mimetype, options, …)
if multiple=True
| Parse a ``Content-Type`` like header into a tuple with the content
type and the options: | def parse_options_header(value, multiple=False):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('text/html; charset=utf8')
('text/html', {'charset': 'utf8'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionchanged:: 0.15
:rfc:`2231` parameter continuations are handled.
.. versionadded:: 0.5
:param value: the header to parse.
:param multiple: Whether try to parse and return multiple MIME types
:return: (mimetype, options) or (mimetype, options, mimetype, options, …)
if multiple=True
"""
if not value:
return "", {}
result = []
value = "," + value.replace("\n", ",")
while value:
match = _option_header_start_mime_type.match(value)
if not match:
break
result.append(match.group(1)) # mimetype
options = {}
# Parse options
rest = match.group(2)
continued_encoding = None
while rest:
optmatch = _option_header_piece_re.match(rest)
if not optmatch:
break
option, count, encoding, language, option_value = optmatch.groups()
# Continuations don't have to supply the encoding after the
# first line. If we're in a continuation, track the current
# encoding to use for subsequent lines. Reset it when the
# continuation ends.
if not count:
continued_encoding = None
else:
if not encoding:
encoding = continued_encoding
continued_encoding = encoding
option = unquote_header_value(option)
if option_value is not None:
option_value = unquote_header_value(option_value, option == "filename")
if encoding is not None:
option_value = _unquote(option_value).decode(encoding)
if count:
# Continuations append to the existing value. For
# simplicity, this ignores the possibility of
# out-of-order indices, which shouldn't happen anyway.
options[option] = options.get(option, "") + option_value
else:
options[option] = option_value
rest = rest[optmatch.end() :]
result.append(options)
if multiple is False:
return tuple(result)
value = rest
return tuple(result) if result else ("", {}) | [
"def",
"parse_options_header",
"(",
"value",
",",
"multiple",
"=",
"False",
")",
":",
"if",
"not",
"value",
":",
"return",
"\"\"",
",",
"{",
"}",
"result",
"=",
"[",
"]",
"value",
"=",
"\",\"",
"+",
"value",
".",
"replace",
"(",
"\"\\n\"",
",",
"\",\"",
")",
"while",
"value",
":",
"match",
"=",
"_option_header_start_mime_type",
".",
"match",
"(",
"value",
")",
"if",
"not",
"match",
":",
"break",
"result",
".",
"append",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"# mimetype",
"options",
"=",
"{",
"}",
"# Parse options",
"rest",
"=",
"match",
".",
"group",
"(",
"2",
")",
"continued_encoding",
"=",
"None",
"while",
"rest",
":",
"optmatch",
"=",
"_option_header_piece_re",
".",
"match",
"(",
"rest",
")",
"if",
"not",
"optmatch",
":",
"break",
"option",
",",
"count",
",",
"encoding",
",",
"language",
",",
"option_value",
"=",
"optmatch",
".",
"groups",
"(",
")",
"# Continuations don't have to supply the encoding after the",
"# first line. If we're in a continuation, track the current",
"# encoding to use for subsequent lines. Reset it when the",
"# continuation ends.",
"if",
"not",
"count",
":",
"continued_encoding",
"=",
"None",
"else",
":",
"if",
"not",
"encoding",
":",
"encoding",
"=",
"continued_encoding",
"continued_encoding",
"=",
"encoding",
"option",
"=",
"unquote_header_value",
"(",
"option",
")",
"if",
"option_value",
"is",
"not",
"None",
":",
"option_value",
"=",
"unquote_header_value",
"(",
"option_value",
",",
"option",
"==",
"\"filename\"",
")",
"if",
"encoding",
"is",
"not",
"None",
":",
"option_value",
"=",
"_unquote",
"(",
"option_value",
")",
".",
"decode",
"(",
"encoding",
")",
"if",
"count",
":",
"# Continuations append to the existing value. For",
"# simplicity, this ignores the possibility of",
"# out-of-order indices, which shouldn't happen anyway.",
"options",
"[",
"option",
"]",
"=",
"options",
".",
"get",
"(",
"option",
",",
"\"\"",
")",
"+",
"option_value",
"else",
":",
"options",
"[",
"option",
"]",
"=",
"option_value",
"rest",
"=",
"rest",
"[",
"optmatch",
".",
"end",
"(",
")",
":",
"]",
"result",
".",
"append",
"(",
"options",
")",
"if",
"multiple",
"is",
"False",
":",
"return",
"tuple",
"(",
"result",
")",
"value",
"=",
"rest",
"return",
"tuple",
"(",
"result",
")",
"if",
"result",
"else",
"(",
"\"\"",
",",
"{",
"}",
")"
] | [
377,
0
] | [
446,
48
] | python | en | ['en', 'en', 'en'] | True |
parse_accept_header | (value, cls=None) | Parses an HTTP Accept-* header. This does not implement a complete
valid algorithm but one that supports at least value and quality
extraction.
Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
tuples sorted by the quality with some additional accessor methods).
The second parameter can be a subclass of :class:`Accept` that is created
with the parsed values and returned.
:param value: the accept header string to be parsed.
:param cls: the wrapper class for the return value (can be
:class:`Accept` or a subclass thereof)
:return: an instance of `cls`.
| Parses an HTTP Accept-* header. This does not implement a complete
valid algorithm but one that supports at least value and quality
extraction. | def parse_accept_header(value, cls=None):
"""Parses an HTTP Accept-* header. This does not implement a complete
valid algorithm but one that supports at least value and quality
extraction.
Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
tuples sorted by the quality with some additional accessor methods).
The second parameter can be a subclass of :class:`Accept` that is created
with the parsed values and returned.
:param value: the accept header string to be parsed.
:param cls: the wrapper class for the return value (can be
:class:`Accept` or a subclass thereof)
:return: an instance of `cls`.
"""
if cls is None:
cls = Accept
if not value:
return cls(None)
result = []
for match in _accept_re.finditer(value):
quality = match.group(2)
if not quality:
quality = 1
else:
quality = max(min(float(quality), 1), 0)
result.append((match.group(1), quality))
return cls(result) | [
"def",
"parse_accept_header",
"(",
"value",
",",
"cls",
"=",
"None",
")",
":",
"if",
"cls",
"is",
"None",
":",
"cls",
"=",
"Accept",
"if",
"not",
"value",
":",
"return",
"cls",
"(",
"None",
")",
"result",
"=",
"[",
"]",
"for",
"match",
"in",
"_accept_re",
".",
"finditer",
"(",
"value",
")",
":",
"quality",
"=",
"match",
".",
"group",
"(",
"2",
")",
"if",
"not",
"quality",
":",
"quality",
"=",
"1",
"else",
":",
"quality",
"=",
"max",
"(",
"min",
"(",
"float",
"(",
"quality",
")",
",",
"1",
")",
",",
"0",
")",
"result",
".",
"append",
"(",
"(",
"match",
".",
"group",
"(",
"1",
")",
",",
"quality",
")",
")",
"return",
"cls",
"(",
"result",
")"
] | [
449,
0
] | [
479,
22
] | python | en | ['en', 'en', 'en'] | True |
parse_cache_control_header | (value, on_update=None, cls=None) | Parse a cache control header. The RFC differs between response and
request cache control, this method does not. It's your responsibility
to not use the wrong control statements.
.. versionadded:: 0.5
The `cls` was added. If not specified an immutable
:class:`~werkzeug.datastructures.RequestCacheControl` is returned.
:param value: a cache control header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.CacheControl`
object is changed.
:param cls: the class for the returned object. By default
:class:`~werkzeug.datastructures.RequestCacheControl` is used.
:return: a `cls` object.
| Parse a cache control header. The RFC differs between response and
request cache control, this method does not. It's your responsibility
to not use the wrong control statements. | def parse_cache_control_header(value, on_update=None, cls=None):
"""Parse a cache control header. The RFC differs between response and
request cache control, this method does not. It's your responsibility
to not use the wrong control statements.
.. versionadded:: 0.5
The `cls` was added. If not specified an immutable
:class:`~werkzeug.datastructures.RequestCacheControl` is returned.
:param value: a cache control header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.CacheControl`
object is changed.
:param cls: the class for the returned object. By default
:class:`~werkzeug.datastructures.RequestCacheControl` is used.
:return: a `cls` object.
"""
if cls is None:
cls = RequestCacheControl
if not value:
return cls(None, on_update)
return cls(parse_dict_header(value), on_update) | [
"def",
"parse_cache_control_header",
"(",
"value",
",",
"on_update",
"=",
"None",
",",
"cls",
"=",
"None",
")",
":",
"if",
"cls",
"is",
"None",
":",
"cls",
"=",
"RequestCacheControl",
"if",
"not",
"value",
":",
"return",
"cls",
"(",
"None",
",",
"on_update",
")",
"return",
"cls",
"(",
"parse_dict_header",
"(",
"value",
")",
",",
"on_update",
")"
] | [
482,
0
] | [
503,
51
] | python | en | ['en', 'en', 'en'] | True |
parse_set_header | (value, on_update=None) | Parse a set-like header and return a
:class:`~werkzeug.datastructures.HeaderSet` object:
>>> hs = parse_set_header('token, "quoted value"')
The return value is an object that treats the items case-insensitively
and keeps the order of the items:
>>> 'TOKEN' in hs
True
>>> hs.index('quoted value')
1
>>> hs
HeaderSet(['token', 'quoted value'])
To create a header from the :class:`HeaderSet` again, use the
:func:`dump_header` function.
:param value: a set header to be parsed.
:param on_update: an optional callable that is called every time a
value on the :class:`~werkzeug.datastructures.HeaderSet`
object is changed.
:return: a :class:`~werkzeug.datastructures.HeaderSet`
| Parse a set-like header and return a
:class:`~werkzeug.datastructures.HeaderSet` object: | def parse_set_header(value, on_update=None):
"""Parse a set-like header and return a
:class:`~werkzeug.datastructures.HeaderSet` object:
>>> hs = parse_set_header('token, "quoted value"')
The return value is an object that treats the items case-insensitively
and keeps the order of the items:
>>> 'TOKEN' in hs
True
>>> hs.index('quoted value')
1
>>> hs
HeaderSet(['token', 'quoted value'])
To create a header from the :class:`HeaderSet` again, use the
:func:`dump_header` function.
:param value: a set header to be parsed.
:param on_update: an optional callable that is called every time a
value on the :class:`~werkzeug.datastructures.HeaderSet`
object is changed.
:return: a :class:`~werkzeug.datastructures.HeaderSet`
"""
if not value:
return HeaderSet(None, on_update)
return HeaderSet(parse_list_header(value), on_update) | [
"def",
"parse_set_header",
"(",
"value",
",",
"on_update",
"=",
"None",
")",
":",
"if",
"not",
"value",
":",
"return",
"HeaderSet",
"(",
"None",
",",
"on_update",
")",
"return",
"HeaderSet",
"(",
"parse_list_header",
"(",
"value",
")",
",",
"on_update",
")"
] | [
506,
0
] | [
533,
57
] | python | en | ['en', 'haw', 'en'] | True |
parse_authorization_header | (value) | Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
object.
:param value: the authorization header to parse.
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
| Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
object. | def parse_authorization_header(value):
"""Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
object.
:param value: the authorization header to parse.
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
"""
if not value:
return
value = wsgi_to_bytes(value)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except ValueError:
return
if auth_type == b"basic":
try:
username, password = base64.b64decode(auth_info).split(b":", 1)
except Exception:
return
return Authorization(
"basic",
{
"username": to_unicode(username, _basic_auth_charset),
"password": to_unicode(password, _basic_auth_charset),
},
)
elif auth_type == b"digest":
auth_map = parse_dict_header(auth_info)
for key in "username", "realm", "nonce", "uri", "response":
if key not in auth_map:
return
if "qop" in auth_map:
if not auth_map.get("nc") or not auth_map.get("cnonce"):
return
return Authorization("digest", auth_map) | [
"def",
"parse_authorization_header",
"(",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"value",
"=",
"wsgi_to_bytes",
"(",
"value",
")",
"try",
":",
"auth_type",
",",
"auth_info",
"=",
"value",
".",
"split",
"(",
"None",
",",
"1",
")",
"auth_type",
"=",
"auth_type",
".",
"lower",
"(",
")",
"except",
"ValueError",
":",
"return",
"if",
"auth_type",
"==",
"b\"basic\"",
":",
"try",
":",
"username",
",",
"password",
"=",
"base64",
".",
"b64decode",
"(",
"auth_info",
")",
".",
"split",
"(",
"b\":\"",
",",
"1",
")",
"except",
"Exception",
":",
"return",
"return",
"Authorization",
"(",
"\"basic\"",
",",
"{",
"\"username\"",
":",
"to_unicode",
"(",
"username",
",",
"_basic_auth_charset",
")",
",",
"\"password\"",
":",
"to_unicode",
"(",
"password",
",",
"_basic_auth_charset",
")",
",",
"}",
",",
")",
"elif",
"auth_type",
"==",
"b\"digest\"",
":",
"auth_map",
"=",
"parse_dict_header",
"(",
"auth_info",
")",
"for",
"key",
"in",
"\"username\"",
",",
"\"realm\"",
",",
"\"nonce\"",
",",
"\"uri\"",
",",
"\"response\"",
":",
"if",
"key",
"not",
"in",
"auth_map",
":",
"return",
"if",
"\"qop\"",
"in",
"auth_map",
":",
"if",
"not",
"auth_map",
".",
"get",
"(",
"\"nc\"",
")",
"or",
"not",
"auth_map",
".",
"get",
"(",
"\"cnonce\"",
")",
":",
"return",
"return",
"Authorization",
"(",
"\"digest\"",
",",
"auth_map",
")"
] | [
536,
0
] | [
573,
48
] | python | en | ['en', 'en', 'en'] | True |
parse_www_authenticate_header | (value, on_update=None) | Parse an HTTP WWW-Authenticate header into a
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
object is changed.
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
| Parse an HTTP WWW-Authenticate header into a
:class:`~werkzeug.datastructures.WWWAuthenticate` object. | def parse_www_authenticate_header(value, on_update=None):
"""Parse an HTTP WWW-Authenticate header into a
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
object is changed.
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
"""
if not value:
return WWWAuthenticate(on_update=on_update)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except (ValueError, AttributeError):
return WWWAuthenticate(value.strip().lower(), on_update=on_update)
return WWWAuthenticate(auth_type, parse_dict_header(auth_info), on_update) | [
"def",
"parse_www_authenticate_header",
"(",
"value",
",",
"on_update",
"=",
"None",
")",
":",
"if",
"not",
"value",
":",
"return",
"WWWAuthenticate",
"(",
"on_update",
"=",
"on_update",
")",
"try",
":",
"auth_type",
",",
"auth_info",
"=",
"value",
".",
"split",
"(",
"None",
",",
"1",
")",
"auth_type",
"=",
"auth_type",
".",
"lower",
"(",
")",
"except",
"(",
"ValueError",
",",
"AttributeError",
")",
":",
"return",
"WWWAuthenticate",
"(",
"value",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
",",
"on_update",
"=",
"on_update",
")",
"return",
"WWWAuthenticate",
"(",
"auth_type",
",",
"parse_dict_header",
"(",
"auth_info",
")",
",",
"on_update",
")"
] | [
576,
0
] | [
593,
78
] | python | en | ['en', 'en', 'en'] | True |
parse_if_range_header | (value) | Parses an if-range header which can be an etag or a date. Returns
a :class:`~werkzeug.datastructures.IfRange` object.
.. versionadded:: 0.7
| Parses an if-range header which can be an etag or a date. Returns
a :class:`~werkzeug.datastructures.IfRange` object. | def parse_if_range_header(value):
"""Parses an if-range header which can be an etag or a date. Returns
a :class:`~werkzeug.datastructures.IfRange` object.
.. versionadded:: 0.7
"""
if not value:
return IfRange()
date = parse_date(value)
if date is not None:
return IfRange(date=date)
# drop weakness information
return IfRange(unquote_etag(value)[0]) | [
"def",
"parse_if_range_header",
"(",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"IfRange",
"(",
")",
"date",
"=",
"parse_date",
"(",
"value",
")",
"if",
"date",
"is",
"not",
"None",
":",
"return",
"IfRange",
"(",
"date",
"=",
"date",
")",
"# drop weakness information",
"return",
"IfRange",
"(",
"unquote_etag",
"(",
"value",
")",
"[",
"0",
"]",
")"
] | [
596,
0
] | [
608,
42
] | python | en | ['en', 'en', 'en'] | True |
parse_range_header | (value, make_inclusive=True) | Parses a range header into a :class:`~werkzeug.datastructures.Range`
object. If the header is missing or malformed `None` is returned.
`ranges` is a list of ``(start, stop)`` tuples where the ranges are
non-inclusive.
.. versionadded:: 0.7
| Parses a range header into a :class:`~werkzeug.datastructures.Range`
object. If the header is missing or malformed `None` is returned.
`ranges` is a list of ``(start, stop)`` tuples where the ranges are
non-inclusive. | def parse_range_header(value, make_inclusive=True):
"""Parses a range header into a :class:`~werkzeug.datastructures.Range`
object. If the header is missing or malformed `None` is returned.
`ranges` is a list of ``(start, stop)`` tuples where the ranges are
non-inclusive.
.. versionadded:: 0.7
"""
if not value or "=" not in value:
return None
ranges = []
last_end = 0
units, rng = value.split("=", 1)
units = units.strip().lower()
for item in rng.split(","):
item = item.strip()
if "-" not in item:
return None
if item.startswith("-"):
if last_end < 0:
return None
try:
begin = int(item)
except ValueError:
return None
end = None
last_end = -1
elif "-" in item:
begin, end = item.split("-", 1)
begin = begin.strip()
end = end.strip()
if not begin.isdigit():
return None
begin = int(begin)
if begin < last_end or last_end < 0:
return None
if end:
if not end.isdigit():
return None
end = int(end) + 1
if begin >= end:
return None
else:
end = None
last_end = end
ranges.append((begin, end))
return Range(units, ranges) | [
"def",
"parse_range_header",
"(",
"value",
",",
"make_inclusive",
"=",
"True",
")",
":",
"if",
"not",
"value",
"or",
"\"=\"",
"not",
"in",
"value",
":",
"return",
"None",
"ranges",
"=",
"[",
"]",
"last_end",
"=",
"0",
"units",
",",
"rng",
"=",
"value",
".",
"split",
"(",
"\"=\"",
",",
"1",
")",
"units",
"=",
"units",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"for",
"item",
"in",
"rng",
".",
"split",
"(",
"\",\"",
")",
":",
"item",
"=",
"item",
".",
"strip",
"(",
")",
"if",
"\"-\"",
"not",
"in",
"item",
":",
"return",
"None",
"if",
"item",
".",
"startswith",
"(",
"\"-\"",
")",
":",
"if",
"last_end",
"<",
"0",
":",
"return",
"None",
"try",
":",
"begin",
"=",
"int",
"(",
"item",
")",
"except",
"ValueError",
":",
"return",
"None",
"end",
"=",
"None",
"last_end",
"=",
"-",
"1",
"elif",
"\"-\"",
"in",
"item",
":",
"begin",
",",
"end",
"=",
"item",
".",
"split",
"(",
"\"-\"",
",",
"1",
")",
"begin",
"=",
"begin",
".",
"strip",
"(",
")",
"end",
"=",
"end",
".",
"strip",
"(",
")",
"if",
"not",
"begin",
".",
"isdigit",
"(",
")",
":",
"return",
"None",
"begin",
"=",
"int",
"(",
"begin",
")",
"if",
"begin",
"<",
"last_end",
"or",
"last_end",
"<",
"0",
":",
"return",
"None",
"if",
"end",
":",
"if",
"not",
"end",
".",
"isdigit",
"(",
")",
":",
"return",
"None",
"end",
"=",
"int",
"(",
"end",
")",
"+",
"1",
"if",
"begin",
">=",
"end",
":",
"return",
"None",
"else",
":",
"end",
"=",
"None",
"last_end",
"=",
"end",
"ranges",
".",
"append",
"(",
"(",
"begin",
",",
"end",
")",
")",
"return",
"Range",
"(",
"units",
",",
"ranges",
")"
] | [
611,
0
] | [
660,
31
] | python | en | ['en', 'de', 'en'] | True |
parse_content_range_header | (value, on_update=None) | Parses a range header into a
:class:`~werkzeug.datastructures.ContentRange` object or `None` if
parsing is not possible.
.. versionadded:: 0.7
:param value: a content range header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.ContentRange`
object is changed.
| Parses a range header into a
:class:`~werkzeug.datastructures.ContentRange` object or `None` if
parsing is not possible. | def parse_content_range_header(value, on_update=None):
"""Parses a range header into a
:class:`~werkzeug.datastructures.ContentRange` object or `None` if
parsing is not possible.
.. versionadded:: 0.7
:param value: a content range header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.ContentRange`
object is changed.
"""
if value is None:
return None
try:
units, rangedef = (value or "").strip().split(None, 1)
except ValueError:
return None
if "/" not in rangedef:
return None
rng, length = rangedef.split("/", 1)
if length == "*":
length = None
elif length.isdigit():
length = int(length)
else:
return None
if rng == "*":
return ContentRange(units, None, None, length, on_update=on_update)
elif "-" not in rng:
return None
start, stop = rng.split("-", 1)
try:
start = int(start)
stop = int(stop) + 1
except ValueError:
return None
if is_byte_range_valid(start, stop, length):
return ContentRange(units, start, stop, length, on_update=on_update) | [
"def",
"parse_content_range_header",
"(",
"value",
",",
"on_update",
"=",
"None",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"None",
"try",
":",
"units",
",",
"rangedef",
"=",
"(",
"value",
"or",
"\"\"",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"None",
",",
"1",
")",
"except",
"ValueError",
":",
"return",
"None",
"if",
"\"/\"",
"not",
"in",
"rangedef",
":",
"return",
"None",
"rng",
",",
"length",
"=",
"rangedef",
".",
"split",
"(",
"\"/\"",
",",
"1",
")",
"if",
"length",
"==",
"\"*\"",
":",
"length",
"=",
"None",
"elif",
"length",
".",
"isdigit",
"(",
")",
":",
"length",
"=",
"int",
"(",
"length",
")",
"else",
":",
"return",
"None",
"if",
"rng",
"==",
"\"*\"",
":",
"return",
"ContentRange",
"(",
"units",
",",
"None",
",",
"None",
",",
"length",
",",
"on_update",
"=",
"on_update",
")",
"elif",
"\"-\"",
"not",
"in",
"rng",
":",
"return",
"None",
"start",
",",
"stop",
"=",
"rng",
".",
"split",
"(",
"\"-\"",
",",
"1",
")",
"try",
":",
"start",
"=",
"int",
"(",
"start",
")",
"stop",
"=",
"int",
"(",
"stop",
")",
"+",
"1",
"except",
"ValueError",
":",
"return",
"None",
"if",
"is_byte_range_valid",
"(",
"start",
",",
"stop",
",",
"length",
")",
":",
"return",
"ContentRange",
"(",
"units",
",",
"start",
",",
"stop",
",",
"length",
",",
"on_update",
"=",
"on_update",
")"
] | [
663,
0
] | [
705,
76
] | python | en | ['en', 'en', 'en'] | True |
quote_etag | (etag, weak=False) | Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
| Quote an etag. | def quote_etag(etag, weak=False):
"""Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
"""
if '"' in etag:
raise ValueError("invalid etag")
etag = '"%s"' % etag
if weak:
etag = "W/" + etag
return etag | [
"def",
"quote_etag",
"(",
"etag",
",",
"weak",
"=",
"False",
")",
":",
"if",
"'\"'",
"in",
"etag",
":",
"raise",
"ValueError",
"(",
"\"invalid etag\"",
")",
"etag",
"=",
"'\"%s\"'",
"%",
"etag",
"if",
"weak",
":",
"etag",
"=",
"\"W/\"",
"+",
"etag",
"return",
"etag"
] | [
708,
0
] | [
719,
15
] | python | en | ['en', 'en', 'es'] | True |
unquote_etag | (etag) | Unquote a single etag:
>>> unquote_etag('W/"bar"')
('bar', True)
>>> unquote_etag('"bar"')
('bar', False)
:param etag: the etag identifier to unquote.
:return: a ``(etag, weak)`` tuple.
| Unquote a single etag: | def unquote_etag(etag):
"""Unquote a single etag:
>>> unquote_etag('W/"bar"')
('bar', True)
>>> unquote_etag('"bar"')
('bar', False)
:param etag: the etag identifier to unquote.
:return: a ``(etag, weak)`` tuple.
"""
if not etag:
return None, None
etag = etag.strip()
weak = False
if etag.startswith(("W/", "w/")):
weak = True
etag = etag[2:]
if etag[:1] == etag[-1:] == '"':
etag = etag[1:-1]
return etag, weak | [
"def",
"unquote_etag",
"(",
"etag",
")",
":",
"if",
"not",
"etag",
":",
"return",
"None",
",",
"None",
"etag",
"=",
"etag",
".",
"strip",
"(",
")",
"weak",
"=",
"False",
"if",
"etag",
".",
"startswith",
"(",
"(",
"\"W/\"",
",",
"\"w/\"",
")",
")",
":",
"weak",
"=",
"True",
"etag",
"=",
"etag",
"[",
"2",
":",
"]",
"if",
"etag",
"[",
":",
"1",
"]",
"==",
"etag",
"[",
"-",
"1",
":",
"]",
"==",
"'\"'",
":",
"etag",
"=",
"etag",
"[",
"1",
":",
"-",
"1",
"]",
"return",
"etag",
",",
"weak"
] | [
722,
0
] | [
742,
21
] | python | fr | ['fr', 'it', 'pt'] | False |
parse_etags | (value) | Parse an etag header.
:param value: the tag header to parse
:return: an :class:`~werkzeug.datastructures.ETags` object.
| Parse an etag header. | def parse_etags(value):
"""Parse an etag header.
:param value: the tag header to parse
:return: an :class:`~werkzeug.datastructures.ETags` object.
"""
if not value:
return ETags()
strong = []
weak = []
end = len(value)
pos = 0
while pos < end:
match = _etag_re.match(value, pos)
if match is None:
break
is_weak, quoted, raw = match.groups()
if raw == "*":
return ETags(star_tag=True)
elif quoted:
raw = quoted
if is_weak:
weak.append(raw)
else:
strong.append(raw)
pos = match.end()
return ETags(strong, weak) | [
"def",
"parse_etags",
"(",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"ETags",
"(",
")",
"strong",
"=",
"[",
"]",
"weak",
"=",
"[",
"]",
"end",
"=",
"len",
"(",
"value",
")",
"pos",
"=",
"0",
"while",
"pos",
"<",
"end",
":",
"match",
"=",
"_etag_re",
".",
"match",
"(",
"value",
",",
"pos",
")",
"if",
"match",
"is",
"None",
":",
"break",
"is_weak",
",",
"quoted",
",",
"raw",
"=",
"match",
".",
"groups",
"(",
")",
"if",
"raw",
"==",
"\"*\"",
":",
"return",
"ETags",
"(",
"star_tag",
"=",
"True",
")",
"elif",
"quoted",
":",
"raw",
"=",
"quoted",
"if",
"is_weak",
":",
"weak",
".",
"append",
"(",
"raw",
")",
"else",
":",
"strong",
".",
"append",
"(",
"raw",
")",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"return",
"ETags",
"(",
"strong",
",",
"weak",
")"
] | [
745,
0
] | [
771,
30
] | python | br | ['br', 'gd', 'es'] | False |
generate_etag | (data) | Generate an etag for some data. | Generate an etag for some data. | def generate_etag(data):
"""Generate an etag for some data."""
return md5(data).hexdigest() | [
"def",
"generate_etag",
"(",
"data",
")",
":",
"return",
"md5",
"(",
"data",
")",
".",
"hexdigest",
"(",
")"
] | [
774,
0
] | [
776,
32
] | python | en | ['no', 'en', 'es'] | False |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.