Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
_init_worker | (counter) |
Switch to databases dedicated to this worker.
This helper lives at module-level because of the multiprocessing module's
requirements.
|
Switch to databases dedicated to this worker. | def _init_worker(counter):
"""
Switch to databases dedicated to this worker.
This helper lives at module-level because of the multiprocessing module's
requirements.
"""
global _worker_id
with counter.get_lock():
counter.value += 1
_worker_id = counter.value
for alias in connections:
connection = connections[alias]
settings_dict = connection.creation.get_test_db_clone_settings(str(_worker_id))
# connection.settings_dict must be updated in place for changes to be
# reflected in django.db.connections. If the following line assigned
# connection.settings_dict = settings_dict, new threads would connect
# to the default database instead of the appropriate clone.
connection.settings_dict.update(settings_dict)
connection.close() | [
"def",
"_init_worker",
"(",
"counter",
")",
":",
"global",
"_worker_id",
"with",
"counter",
".",
"get_lock",
"(",
")",
":",
"counter",
".",
"value",
"+=",
"1",
"_worker_id",
"=",
"counter",
".",
"value",
"for",
"alias",
"in",
"connections",
":",
"connection",
"=",
"connections",
"[",
"alias",
"]",
"settings_dict",
"=",
"connection",
".",
"creation",
".",
"get_test_db_clone_settings",
"(",
"str",
"(",
"_worker_id",
")",
")",
"# connection.settings_dict must be updated in place for changes to be",
"# reflected in django.db.connections. If the following line assigned",
"# connection.settings_dict = settings_dict, new threads would connect",
"# to the default database instead of the appropriate clone.",
"connection",
".",
"settings_dict",
".",
"update",
"(",
"settings_dict",
")",
"connection",
".",
"close",
"(",
")"
] | [
312,
0
] | [
334,
26
] | python | en | ['en', 'error', 'th'] | False |
_run_subsuite | (args) |
Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult.
This helper lives at module-level and its arguments are wrapped in a tuple
because of the multiprocessing module's requirements.
|
Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult. | def _run_subsuite(args):
"""
Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult.
This helper lives at module-level and its arguments are wrapped in a tuple
because of the multiprocessing module's requirements.
"""
runner_class, subsuite_index, subsuite, failfast = args
runner = runner_class(failfast=failfast)
result = runner.run(subsuite)
return subsuite_index, result.events | [
"def",
"_run_subsuite",
"(",
"args",
")",
":",
"runner_class",
",",
"subsuite_index",
",",
"subsuite",
",",
"failfast",
"=",
"args",
"runner",
"=",
"runner_class",
"(",
"failfast",
"=",
"failfast",
")",
"result",
"=",
"runner",
".",
"run",
"(",
"subsuite",
")",
"return",
"subsuite_index",
",",
"result",
".",
"events"
] | [
337,
0
] | [
347,
40
] | python | en | ['en', 'error', 'th'] | False |
is_discoverable | (label) |
Check if a test label points to a Python package or file directory.
Relative labels like "." and ".." are seen as directories.
|
Check if a test label points to a Python package or file directory. | def is_discoverable(label):
"""
Check if a test label points to a Python package or file directory.
Relative labels like "." and ".." are seen as directories.
"""
try:
mod = import_module(label)
except (ImportError, TypeError):
pass
else:
return hasattr(mod, '__path__')
return os.path.isdir(os.path.abspath(label)) | [
"def",
"is_discoverable",
"(",
"label",
")",
":",
"try",
":",
"mod",
"=",
"import_module",
"(",
"label",
")",
"except",
"(",
"ImportError",
",",
"TypeError",
")",
":",
"pass",
"else",
":",
"return",
"hasattr",
"(",
"mod",
",",
"'__path__'",
")",
"return",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"label",
")",
")"
] | [
746,
0
] | [
759,
48
] | python | en | ['en', 'error', 'th'] | False |
reorder_suite | (suite, classes, reverse=False) |
Reorder a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
If `reverse` is True, sort tests within classes in opposite order but
don't reverse test classes.
|
Reorder a test suite by test type. | def reorder_suite(suite, classes, reverse=False):
"""
Reorder a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
If `reverse` is True, sort tests within classes in opposite order but
don't reverse test classes.
"""
class_count = len(classes)
suite_class = type(suite)
bins = [OrderedSet() for i in range(class_count + 1)]
partition_suite_by_type(suite, classes, bins, reverse=reverse)
reordered_suite = suite_class()
for i in range(class_count + 1):
reordered_suite.addTests(bins[i])
return reordered_suite | [
"def",
"reorder_suite",
"(",
"suite",
",",
"classes",
",",
"reverse",
"=",
"False",
")",
":",
"class_count",
"=",
"len",
"(",
"classes",
")",
"suite_class",
"=",
"type",
"(",
"suite",
")",
"bins",
"=",
"[",
"OrderedSet",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"class_count",
"+",
"1",
")",
"]",
"partition_suite_by_type",
"(",
"suite",
",",
"classes",
",",
"bins",
",",
"reverse",
"=",
"reverse",
")",
"reordered_suite",
"=",
"suite_class",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"class_count",
"+",
"1",
")",
":",
"reordered_suite",
".",
"addTests",
"(",
"bins",
"[",
"i",
"]",
")",
"return",
"reordered_suite"
] | [
762,
0
] | [
781,
26
] | python | en | ['en', 'error', 'th'] | False |
partition_suite_by_type | (suite, classes, bins, reverse=False) |
Partition a test suite by test type. Also prevent duplicated tests.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
reverse changes the ordering of tests within bins
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
|
Partition a test suite by test type. Also prevent duplicated tests. | def partition_suite_by_type(suite, classes, bins, reverse=False):
"""
Partition a test suite by test type. Also prevent duplicated tests.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
reverse changes the ordering of tests within bins
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
suite_class = type(suite)
if reverse:
suite = reversed(tuple(suite))
for test in suite:
if isinstance(test, suite_class):
partition_suite_by_type(test, classes, bins, reverse=reverse)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].add(test)
break
else:
bins[-1].add(test) | [
"def",
"partition_suite_by_type",
"(",
"suite",
",",
"classes",
",",
"bins",
",",
"reverse",
"=",
"False",
")",
":",
"suite_class",
"=",
"type",
"(",
"suite",
")",
"if",
"reverse",
":",
"suite",
"=",
"reversed",
"(",
"tuple",
"(",
"suite",
")",
")",
"for",
"test",
"in",
"suite",
":",
"if",
"isinstance",
"(",
"test",
",",
"suite_class",
")",
":",
"partition_suite_by_type",
"(",
"test",
",",
"classes",
",",
"bins",
",",
"reverse",
"=",
"reverse",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"classes",
")",
")",
":",
"if",
"isinstance",
"(",
"test",
",",
"classes",
"[",
"i",
"]",
")",
":",
"bins",
"[",
"i",
"]",
".",
"add",
"(",
"test",
")",
"break",
"else",
":",
"bins",
"[",
"-",
"1",
"]",
".",
"add",
"(",
"test",
")"
] | [
784,
0
] | [
807,
34
] | python | en | ['en', 'error', 'th'] | False |
partition_suite_by_case | (suite) | Partition a test suite by test case, preserving the order of tests. | Partition a test suite by test case, preserving the order of tests. | def partition_suite_by_case(suite):
"""Partition a test suite by test case, preserving the order of tests."""
groups = []
suite_class = type(suite)
for test_type, test_group in itertools.groupby(suite, type):
if issubclass(test_type, unittest.TestCase):
groups.append(suite_class(test_group))
else:
for item in test_group:
groups.extend(partition_suite_by_case(item))
return groups | [
"def",
"partition_suite_by_case",
"(",
"suite",
")",
":",
"groups",
"=",
"[",
"]",
"suite_class",
"=",
"type",
"(",
"suite",
")",
"for",
"test_type",
",",
"test_group",
"in",
"itertools",
".",
"groupby",
"(",
"suite",
",",
"type",
")",
":",
"if",
"issubclass",
"(",
"test_type",
",",
"unittest",
".",
"TestCase",
")",
":",
"groups",
".",
"append",
"(",
"suite_class",
"(",
"test_group",
")",
")",
"else",
":",
"for",
"item",
"in",
"test_group",
":",
"groups",
".",
"extend",
"(",
"partition_suite_by_case",
"(",
"item",
")",
")",
"return",
"groups"
] | [
810,
0
] | [
820,
17
] | python | en | ['en', 'en', 'en'] | True |
RemoteTestResult._confirm_picklable | (self, obj) |
Confirm that obj can be pickled and unpickled as multiprocessing will
need to pickle the exception in the child process and unpickle it in
the parent process. Let the exception rise, if not.
|
Confirm that obj can be pickled and unpickled as multiprocessing will
need to pickle the exception in the child process and unpickle it in
the parent process. Let the exception rise, if not.
| def _confirm_picklable(self, obj):
"""
Confirm that obj can be pickled and unpickled as multiprocessing will
need to pickle the exception in the child process and unpickle it in
the parent process. Let the exception rise, if not.
"""
pickle.loads(pickle.dumps(obj)) | [
"def",
"_confirm_picklable",
"(",
"self",
",",
"obj",
")",
":",
"pickle",
".",
"loads",
"(",
"pickle",
".",
"dumps",
"(",
"obj",
")",
")"
] | [
135,
4
] | [
141,
39
] | python | en | ['en', 'error', 'th'] | False |
ParallelTestSuite.run | (self, result) |
Distribute test cases across workers.
Return an identifier of each test case with its result in order to use
imap_unordered to show results as soon as they're available.
To minimize pickling errors when getting results from workers:
- pass back numeric indexes in self.subsuites instead of tests
- make tracebacks picklable with tblib, if available
Even with tblib, errors may still occur for dynamically created
exception classes which cannot be unpickled.
|
Distribute test cases across workers. | def run(self, result):
"""
Distribute test cases across workers.
Return an identifier of each test case with its result in order to use
imap_unordered to show results as soon as they're available.
To minimize pickling errors when getting results from workers:
- pass back numeric indexes in self.subsuites instead of tests
- make tracebacks picklable with tblib, if available
Even with tblib, errors may still occur for dynamically created
exception classes which cannot be unpickled.
"""
counter = multiprocessing.Value(ctypes.c_int, 0)
pool = multiprocessing.Pool(
processes=self.processes,
initializer=self.init_worker.__func__,
initargs=[counter],
)
args = [
(self.runner_class, index, subsuite, self.failfast)
for index, subsuite in enumerate(self.subsuites)
]
test_results = pool.imap_unordered(self.run_subsuite.__func__, args)
while True:
if result.shouldStop:
pool.terminate()
break
try:
subsuite_index, events = test_results.next(timeout=0.1)
except multiprocessing.TimeoutError:
continue
except StopIteration:
pool.close()
break
tests = list(self.subsuites[subsuite_index])
for event in events:
event_name = event[0]
handler = getattr(result, event_name, None)
if handler is None:
continue
test = tests[event[1]]
args = event[2:]
handler(test, *args)
pool.join()
return result | [
"def",
"run",
"(",
"self",
",",
"result",
")",
":",
"counter",
"=",
"multiprocessing",
".",
"Value",
"(",
"ctypes",
".",
"c_int",
",",
"0",
")",
"pool",
"=",
"multiprocessing",
".",
"Pool",
"(",
"processes",
"=",
"self",
".",
"processes",
",",
"initializer",
"=",
"self",
".",
"init_worker",
".",
"__func__",
",",
"initargs",
"=",
"[",
"counter",
"]",
",",
")",
"args",
"=",
"[",
"(",
"self",
".",
"runner_class",
",",
"index",
",",
"subsuite",
",",
"self",
".",
"failfast",
")",
"for",
"index",
",",
"subsuite",
"in",
"enumerate",
"(",
"self",
".",
"subsuites",
")",
"]",
"test_results",
"=",
"pool",
".",
"imap_unordered",
"(",
"self",
".",
"run_subsuite",
".",
"__func__",
",",
"args",
")",
"while",
"True",
":",
"if",
"result",
".",
"shouldStop",
":",
"pool",
".",
"terminate",
"(",
")",
"break",
"try",
":",
"subsuite_index",
",",
"events",
"=",
"test_results",
".",
"next",
"(",
"timeout",
"=",
"0.1",
")",
"except",
"multiprocessing",
".",
"TimeoutError",
":",
"continue",
"except",
"StopIteration",
":",
"pool",
".",
"close",
"(",
")",
"break",
"tests",
"=",
"list",
"(",
"self",
".",
"subsuites",
"[",
"subsuite_index",
"]",
")",
"for",
"event",
"in",
"events",
":",
"event_name",
"=",
"event",
"[",
"0",
"]",
"handler",
"=",
"getattr",
"(",
"result",
",",
"event_name",
",",
"None",
")",
"if",
"handler",
"is",
"None",
":",
"continue",
"test",
"=",
"tests",
"[",
"event",
"[",
"1",
"]",
"]",
"args",
"=",
"event",
"[",
"2",
":",
"]",
"handler",
"(",
"test",
",",
"*",
"args",
")",
"pool",
".",
"join",
"(",
")",
"return",
"result"
] | [
377,
4
] | [
429,
21
] | python | en | ['en', 'error', 'th'] | False |
DiscoverRunner.teardown_databases | (self, old_config, **kwargs) | Destroy all the non-mirror databases. | Destroy all the non-mirror databases. | def teardown_databases(self, old_config, **kwargs):
"""Destroy all the non-mirror databases."""
_teardown_databases(
old_config,
verbosity=self.verbosity,
parallel=self.parallel,
keepdb=self.keepdb,
) | [
"def",
"teardown_databases",
"(",
"self",
",",
"old_config",
",",
"*",
"*",
"kwargs",
")",
":",
"_teardown_databases",
"(",
"old_config",
",",
"verbosity",
"=",
"self",
".",
"verbosity",
",",
"parallel",
"=",
"self",
".",
"parallel",
",",
"keepdb",
"=",
"self",
".",
"keepdb",
",",
")"
] | [
671,
4
] | [
678,
9
] | python | en | ['en', 'en', 'en'] | True |
DiscoverRunner.run_tests | (self, test_labels, extra_tests=None, **kwargs) |
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Return the number of tests that failed.
|
Run the unit tests for all the test labels in the provided list. | def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Return the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
databases = self.get_databases(suite)
with self.time_keeper.timed('Total database setup'):
old_config = self.setup_databases(aliases=databases)
run_failed = False
try:
self.run_checks(databases)
result = self.run_suite(suite)
except Exception:
run_failed = True
raise
finally:
try:
with self.time_keeper.timed('Total database teardown'):
self.teardown_databases(old_config)
self.teardown_test_environment()
except Exception:
# Silence teardown exceptions if an exception was raised during
# runs to avoid shadowing it.
if not run_failed:
raise
self.time_keeper.print_results()
return self.suite_result(suite, result) | [
"def",
"run_tests",
"(",
"self",
",",
"test_labels",
",",
"extra_tests",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"setup_test_environment",
"(",
")",
"suite",
"=",
"self",
".",
"build_suite",
"(",
"test_labels",
",",
"extra_tests",
")",
"databases",
"=",
"self",
".",
"get_databases",
"(",
"suite",
")",
"with",
"self",
".",
"time_keeper",
".",
"timed",
"(",
"'Total database setup'",
")",
":",
"old_config",
"=",
"self",
".",
"setup_databases",
"(",
"aliases",
"=",
"databases",
")",
"run_failed",
"=",
"False",
"try",
":",
"self",
".",
"run_checks",
"(",
"databases",
")",
"result",
"=",
"self",
".",
"run_suite",
"(",
"suite",
")",
"except",
"Exception",
":",
"run_failed",
"=",
"True",
"raise",
"finally",
":",
"try",
":",
"with",
"self",
".",
"time_keeper",
".",
"timed",
"(",
"'Total database teardown'",
")",
":",
"self",
".",
"teardown_databases",
"(",
"old_config",
")",
"self",
".",
"teardown_test_environment",
"(",
")",
"except",
"Exception",
":",
"# Silence teardown exceptions if an exception was raised during",
"# runs to avoid shadowing it.",
"if",
"not",
"run_failed",
":",
"raise",
"self",
".",
"time_keeper",
".",
"print_results",
"(",
")",
"return",
"self",
".",
"suite_result",
"(",
"suite",
",",
"result",
")"
] | [
708,
4
] | [
743,
47
] | python | en | ['en', 'error', 'th'] | False |
DatabaseOperations.fetch_returned_insert_rows | (self, cursor) |
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table, return the tuple of returned data.
|
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table, return the tuple of returned data.
| def fetch_returned_insert_rows(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table, return the tuple of returned data.
"""
return cursor.fetchall() | [
"def",
"fetch_returned_insert_rows",
"(",
"self",
",",
"cursor",
")",
":",
"return",
"cursor",
".",
"fetchall",
"(",
")"
] | [
144,
4
] | [
149,
32
] | python | en | ['en', 'error', 'th'] | False |
DatabaseOperations.force_no_ordering | (self) |
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
|
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
| def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return [(None, ("NULL", [], False))] | [
"def",
"force_no_ordering",
"(",
"self",
")",
":",
"return",
"[",
"(",
"None",
",",
"(",
"\"NULL\"",
",",
"[",
"]",
",",
"False",
")",
")",
"]"
] | [
154,
4
] | [
160,
44
] | python | en | ['en', 'error', 'th'] | False |
MyPredictor.__init__ | (self, model) | Stores artifacts for prediction. Only initialized via `from_path`.
| Stores artifacts for prediction. Only initialized via `from_path`.
| def __init__(self, model):
"""Stores artifacts for prediction. Only initialized via `from_path`.
"""
self._model = model | [
"def",
"__init__",
"(",
"self",
",",
"model",
")",
":",
"self",
".",
"_model",
"=",
"model"
] | [
7,
4
] | [
10,
27
] | python | en | ['en', 'en', 'en'] | True |
MyPredictor.predict | (self, instances, **kwargs) | Performs custom prediction.
Preprocesses inputs, then performs prediction using the trained
scikit-learn model.
Args:
instances: A list of prediction input instances.
**kwargs: A dictionary of keyword args provided as additional
fields on the predict request body.
Returns:
A list of outputs containing the prediction results.
| Performs custom prediction. | def predict(self, instances, **kwargs):
"""Performs custom prediction.
Preprocesses inputs, then performs prediction using the trained
scikit-learn model.
Args:
instances: A list of prediction input instances.
**kwargs: A dictionary of keyword args provided as additional
fields on the predict request body.
Returns:
A list of outputs containing the prediction results.
"""
inputs = np.asarray(instances)
outputs = self._model.predict_proba(inputs)
return outputs.tolist() | [
"def",
"predict",
"(",
"self",
",",
"instances",
",",
"*",
"*",
"kwargs",
")",
":",
"inputs",
"=",
"np",
".",
"asarray",
"(",
"instances",
")",
"outputs",
"=",
"self",
".",
"_model",
".",
"predict_proba",
"(",
"inputs",
")",
"return",
"outputs",
".",
"tolist",
"(",
")"
] | [
12,
4
] | [
28,
31
] | python | en | ['sl', 'sr', 'en'] | False |
MyPredictor.from_path | (cls, model_dir) | Creates an instance of MyPredictor using the given path.
This loads artifacts that have been copied from your model directory in
Cloud Storage. MyPredictor uses them during prediction.
Args:
model_dir: The local directory that contains the trained
scikit-learn model and the pickled preprocessor instance. These
are copied from the Cloud Storage model directory you provide
when you deploy a version resource.
Returns:
An instance of `MyPredictor`.
| Creates an instance of MyPredictor using the given path. | def from_path(cls, model_dir):
"""Creates an instance of MyPredictor using the given path.
This loads artifacts that have been copied from your model directory in
Cloud Storage. MyPredictor uses them during prediction.
Args:
model_dir: The local directory that contains the trained
scikit-learn model and the pickled preprocessor instance. These
are copied from the Cloud Storage model directory you provide
when you deploy a version resource.
Returns:
An instance of `MyPredictor`.
"""
model_path = os.path.join(model_dir, 'model.pkl')
with open(model_path, 'rb') as f:
model = pickle.load(f)
return cls(model) | [
"def",
"from_path",
"(",
"cls",
",",
"model_dir",
")",
":",
"model_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"model_dir",
",",
"'model.pkl'",
")",
"with",
"open",
"(",
"model_path",
",",
"'rb'",
")",
"as",
"f",
":",
"model",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"return",
"cls",
"(",
"model",
")"
] | [
31,
4
] | [
51,
25
] | python | en | ['en', 'en', 'en'] | True |
Command.sync_apps | (self, connection, app_labels) | Run the old syncdb-style operation on a list of app_labels. | Run the old syncdb-style operation on a list of app_labels. | def sync_apps(self, connection, app_labels):
"""Run the old syncdb-style operation on a list of app_labels."""
with connection.cursor() as cursor:
tables = connection.introspection.table_names(cursor)
# Build the manifest of apps and models that are to be synchronized.
all_models = [
(
app_config.label,
router.get_migratable_models(app_config, connection.alias, include_auto_created=False),
)
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config.label in app_labels
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.identifier_converter
return not (
(converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables)
)
manifest = {
app_name: list(filter(model_installed, model_list))
for app_name, model_list in all_models
}
# Create the tables for each model
if self.verbosity >= 1:
self.stdout.write(' Creating tables...')
with connection.schema_editor() as editor:
for app_name, model_list in manifest.items():
for model in model_list:
# Never install unmanaged models, etc.
if not model._meta.can_migrate(connection):
continue
if self.verbosity >= 3:
self.stdout.write(
' Processing %s.%s model' % (app_name, model._meta.object_name)
)
if self.verbosity >= 1:
self.stdout.write(' Creating table %s' % model._meta.db_table)
editor.create_model(model)
# Deferred SQL is executed when exiting the editor's context.
if self.verbosity >= 1:
self.stdout.write(' Running deferred SQL...') | [
"def",
"sync_apps",
"(",
"self",
",",
"connection",
",",
"app_labels",
")",
":",
"with",
"connection",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"tables",
"=",
"connection",
".",
"introspection",
".",
"table_names",
"(",
"cursor",
")",
"# Build the manifest of apps and models that are to be synchronized.",
"all_models",
"=",
"[",
"(",
"app_config",
".",
"label",
",",
"router",
".",
"get_migratable_models",
"(",
"app_config",
",",
"connection",
".",
"alias",
",",
"include_auto_created",
"=",
"False",
")",
",",
")",
"for",
"app_config",
"in",
"apps",
".",
"get_app_configs",
"(",
")",
"if",
"app_config",
".",
"models_module",
"is",
"not",
"None",
"and",
"app_config",
".",
"label",
"in",
"app_labels",
"]",
"def",
"model_installed",
"(",
"model",
")",
":",
"opts",
"=",
"model",
".",
"_meta",
"converter",
"=",
"connection",
".",
"introspection",
".",
"identifier_converter",
"return",
"not",
"(",
"(",
"converter",
"(",
"opts",
".",
"db_table",
")",
"in",
"tables",
")",
"or",
"(",
"opts",
".",
"auto_created",
"and",
"converter",
"(",
"opts",
".",
"auto_created",
".",
"_meta",
".",
"db_table",
")",
"in",
"tables",
")",
")",
"manifest",
"=",
"{",
"app_name",
":",
"list",
"(",
"filter",
"(",
"model_installed",
",",
"model_list",
")",
")",
"for",
"app_name",
",",
"model_list",
"in",
"all_models",
"}",
"# Create the tables for each model",
"if",
"self",
".",
"verbosity",
">=",
"1",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"' Creating tables...'",
")",
"with",
"connection",
".",
"schema_editor",
"(",
")",
"as",
"editor",
":",
"for",
"app_name",
",",
"model_list",
"in",
"manifest",
".",
"items",
"(",
")",
":",
"for",
"model",
"in",
"model_list",
":",
"# Never install unmanaged models, etc.",
"if",
"not",
"model",
".",
"_meta",
".",
"can_migrate",
"(",
"connection",
")",
":",
"continue",
"if",
"self",
".",
"verbosity",
">=",
"3",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"' Processing %s.%s model'",
"%",
"(",
"app_name",
",",
"model",
".",
"_meta",
".",
"object_name",
")",
")",
"if",
"self",
".",
"verbosity",
">=",
"1",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"' Creating table %s'",
"%",
"model",
".",
"_meta",
".",
"db_table",
")",
"editor",
".",
"create_model",
"(",
"model",
")",
"# Deferred SQL is executed when exiting the editor's context.",
"if",
"self",
".",
"verbosity",
">=",
"1",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"' Running deferred SQL...'",
")"
] | [
305,
4
] | [
352,
64
] | python | en | ['en', 'en', 'en'] | True |
Command.describe_operation | (operation, backwards) | Return a string that describes a migration operation for --plan. | Return a string that describes a migration operation for --plan. | def describe_operation(operation, backwards):
"""Return a string that describes a migration operation for --plan."""
prefix = ''
is_error = False
if hasattr(operation, 'code'):
code = operation.reverse_code if backwards else operation.code
action = (code.__doc__ or '') if code else None
elif hasattr(operation, 'sql'):
action = operation.reverse_sql if backwards else operation.sql
else:
action = ''
if backwards:
prefix = 'Undo '
if action is not None:
action = str(action).replace('\n', '')
elif backwards:
action = 'IRREVERSIBLE'
is_error = True
if action:
action = ' -> ' + action
truncated = Truncator(action)
return prefix + operation.describe() + truncated.chars(40), is_error | [
"def",
"describe_operation",
"(",
"operation",
",",
"backwards",
")",
":",
"prefix",
"=",
"''",
"is_error",
"=",
"False",
"if",
"hasattr",
"(",
"operation",
",",
"'code'",
")",
":",
"code",
"=",
"operation",
".",
"reverse_code",
"if",
"backwards",
"else",
"operation",
".",
"code",
"action",
"=",
"(",
"code",
".",
"__doc__",
"or",
"''",
")",
"if",
"code",
"else",
"None",
"elif",
"hasattr",
"(",
"operation",
",",
"'sql'",
")",
":",
"action",
"=",
"operation",
".",
"reverse_sql",
"if",
"backwards",
"else",
"operation",
".",
"sql",
"else",
":",
"action",
"=",
"''",
"if",
"backwards",
":",
"prefix",
"=",
"'Undo '",
"if",
"action",
"is",
"not",
"None",
":",
"action",
"=",
"str",
"(",
"action",
")",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"elif",
"backwards",
":",
"action",
"=",
"'IRREVERSIBLE'",
"is_error",
"=",
"True",
"if",
"action",
":",
"action",
"=",
"' -> '",
"+",
"action",
"truncated",
"=",
"Truncator",
"(",
"action",
")",
"return",
"prefix",
"+",
"operation",
".",
"describe",
"(",
")",
"+",
"truncated",
".",
"chars",
"(",
"40",
")",
",",
"is_error"
] | [
355,
4
] | [
376,
76
] | python | en | ['en', 'en', 'en'] | True |
run | (argv=None) | The main function which creates the pipeline and runs it. | The main function which creates the pipeline and runs it. | def run(argv=None):
"""The main function which creates the pipeline and runs it."""
parser = argparse.ArgumentParser()
# Add the arguments needed for this specific Dataflow job.
parser.add_argument(
'--input', dest='input', required=True,
help='Input file to read. This can be a local file or '
'a file in a Google Storage Bucket.')
parser.add_argument('--output', dest='output', required=True,
help='Output BQ table to write results to.')
parser.add_argument('--delimiter', dest='delimiter', required=False,
help='Delimiter to split input records.',
default=',')
parser.add_argument('--fields', dest='fields', required=True,
help='Comma separated list of field names.')
parser.add_argument('--load_dt', dest='load_dt', required=True,
help='Load date in YYYY-MM-DD format.')
known_args, pipeline_args = parser.parse_known_args(argv)
row_transformer = RowTransformer(delimiter=known_args.delimiter,
header=known_args.fields,
filename=ntpath.basename(known_args.input),
load_dt=known_args.load_dt)
p_opts = pipeline_options.PipelineOptions(pipeline_args)
# Initiate the pipeline using the pipeline arguments passed in from the
# command line. This includes information including where Dataflow should
# store temp files, and what the project id is.
with beam.Pipeline(options=p_opts) as pipeline:
# Read the file. This is the source of the pipeline. All further
# processing starts with lines read from the file. We use the input
# argument from the command line.
rows = pipeline | "Read from text file" >> beam.io.ReadFromText(known_args.input)
# This stage of the pipeline translates from a delimited single row
# input to a dictionary object consumable by BigQuery.
# It refers to a function we have written. This function will
# be run in parallel on different workers using input from the
# previous stage of the pipeline.
dict_records = rows | "Convert to BigQuery row" >> beam.Map(
lambda r: row_transformer.parse(r))
# This stage of the pipeline writes the dictionary records into
# an existing BigQuery table.
dict_records | "Write to BigQuery" >> beam.io.Write(
beam.io.BigQuerySink(known_args.output,
create_disposition=beam.io.BigQueryDisposition.CREATE_NEVER,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND)) | [
"def",
"run",
"(",
"argv",
"=",
"None",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"# Add the arguments needed for this specific Dataflow job.",
"parser",
".",
"add_argument",
"(",
"'--input'",
",",
"dest",
"=",
"'input'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Input file to read. This can be a local file or '",
"'a file in a Google Storage Bucket.'",
")",
"parser",
".",
"add_argument",
"(",
"'--output'",
",",
"dest",
"=",
"'output'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Output BQ table to write results to.'",
")",
"parser",
".",
"add_argument",
"(",
"'--delimiter'",
",",
"dest",
"=",
"'delimiter'",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'Delimiter to split input records.'",
",",
"default",
"=",
"','",
")",
"parser",
".",
"add_argument",
"(",
"'--fields'",
",",
"dest",
"=",
"'fields'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Comma separated list of field names.'",
")",
"parser",
".",
"add_argument",
"(",
"'--load_dt'",
",",
"dest",
"=",
"'load_dt'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Load date in YYYY-MM-DD format.'",
")",
"known_args",
",",
"pipeline_args",
"=",
"parser",
".",
"parse_known_args",
"(",
"argv",
")",
"row_transformer",
"=",
"RowTransformer",
"(",
"delimiter",
"=",
"known_args",
".",
"delimiter",
",",
"header",
"=",
"known_args",
".",
"fields",
",",
"filename",
"=",
"ntpath",
".",
"basename",
"(",
"known_args",
".",
"input",
")",
",",
"load_dt",
"=",
"known_args",
".",
"load_dt",
")",
"p_opts",
"=",
"pipeline_options",
".",
"PipelineOptions",
"(",
"pipeline_args",
")",
"# Initiate the pipeline using the pipeline arguments passed in from the",
"# command line. This includes information including where Dataflow should",
"# store temp files, and what the project id is.",
"with",
"beam",
".",
"Pipeline",
"(",
"options",
"=",
"p_opts",
")",
"as",
"pipeline",
":",
"# Read the file. This is the source of the pipeline. All further",
"# processing starts with lines read from the file. We use the input",
"# argument from the command line.",
"rows",
"=",
"pipeline",
"|",
"\"Read from text file\"",
">>",
"beam",
".",
"io",
".",
"ReadFromText",
"(",
"known_args",
".",
"input",
")",
"# This stage of the pipeline translates from a delimited single row",
"# input to a dictionary object consumable by BigQuery.",
"# It refers to a function we have written. This function will",
"# be run in parallel on different workers using input from the",
"# previous stage of the pipeline.",
"dict_records",
"=",
"rows",
"|",
"\"Convert to BigQuery row\"",
">>",
"beam",
".",
"Map",
"(",
"lambda",
"r",
":",
"row_transformer",
".",
"parse",
"(",
"r",
")",
")",
"# This stage of the pipeline writes the dictionary records into",
"# an existing BigQuery table. ",
"dict_records",
"|",
"\"Write to BigQuery\"",
">>",
"beam",
".",
"io",
".",
"Write",
"(",
"beam",
".",
"io",
".",
"BigQuerySink",
"(",
"known_args",
".",
"output",
",",
"create_disposition",
"=",
"beam",
".",
"io",
".",
"BigQueryDisposition",
".",
"CREATE_NEVER",
",",
"write_disposition",
"=",
"beam",
".",
"io",
".",
"BigQueryDisposition",
".",
"WRITE_APPEND",
")",
")"
] | [
61,
0
] | [
114,
93
] | python | en | ['en', 'en', 'en'] | True |
RowTransformer.parse | (self, row) | This method translates a single delimited record into a dictionary
which can be loaded into BigQuery. It also adds filename and load_dt
fields to the dictionary. | This method translates a single delimited record into a dictionary
which can be loaded into BigQuery. It also adds filename and load_dt
fields to the dictionary. | def parse(self, row):
"""This method translates a single delimited record into a dictionary
which can be loaded into BigQuery. It also adds filename and load_dt
fields to the dictionary."""
# Strip out the return characters and quote characters.
values = re.split(self.delimiter, re.sub(r'[\r\n"]', '', row))
row = dict(zip(self.keys, values))
# Add an additional filename field.
row['filename'] = self.filename
# Add an additional load_dt field.
row['load_dt'] = self.load_dt
return row | [
"def",
"parse",
"(",
"self",
",",
"row",
")",
":",
"# Strip out the return characters and quote characters.",
"values",
"=",
"re",
".",
"split",
"(",
"self",
".",
"delimiter",
",",
"re",
".",
"sub",
"(",
"r'[\\r\\n\"]'",
",",
"''",
",",
"row",
")",
")",
"row",
"=",
"dict",
"(",
"zip",
"(",
"self",
".",
"keys",
",",
"values",
")",
")",
"# Add an additional filename field.",
"row",
"[",
"'filename'",
"]",
"=",
"self",
".",
"filename",
"# Add an additional load_dt field.",
"row",
"[",
"'load_dt'",
"]",
"=",
"self",
".",
"load_dt",
"return",
"row"
] | [
42,
4
] | [
58,
18
] | python | en | ['en', 'en', 'en'] | True |
RandomRec.__init__ | (self, train_file, test_file, uniform=True, output_file=None, sep='\t', output_sep='\t', random_seed=None) |
Random recommendation for Rating Prediction
This algorithm predicts ratings for each user-item
Usage::
>> RandomRec(train, test).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param uniform: Indicates whether the ratings are drawn from a uniform sample or not
if False, the ratings are drawn from a normal distribution with the same mean and standard deviation
as the feedback provided in train
:type uniform: bool, default True
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
:param random_seed: Number of seed. Lock random numbers for reproducibility of experiments.
:type random_seed: int, default None
|
Random recommendation for Rating Prediction
This algorithm predicts ratings for each user-item
Usage::
>> RandomRec(train, test).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param uniform: Indicates whether the ratings are drawn from a uniform sample or not
if False, the ratings are drawn from a normal distribution with the same mean and standard deviation
as the feedback provided in train
:type uniform: bool, default True
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
:param random_seed: Number of seed. Lock random numbers for reproducibility of experiments.
:type random_seed: int, default None
| def __init__(self, train_file, test_file, uniform=True, output_file=None, sep='\t', output_sep='\t', random_seed=None):
"""
Random recommendation for Rating Prediction
This algorithm predicts ratings for each user-item
Usage::
>> RandomRec(train, test).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param uniform: Indicates whether the ratings are drawn from a uniform sample or not
if False, the ratings are drawn from a normal distribution with the same mean and standard deviation
as the feedback provided in train
:type uniform: bool, default True
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
:param random_seed: Number of seed. Lock random numbers for reproducibility of experiments.
:type random_seed: int, default None
"""
super(RandomRec, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file,
sep=sep, output_sep=output_sep)
if random_seed is not None:
np.random.seed(random_seed)
self.uniform = uniform
self.recommender_name = 'Random Recommender' | [
"def",
"__init__",
"(",
"self",
",",
"train_file",
",",
"test_file",
",",
"uniform",
"=",
"True",
",",
"output_file",
"=",
"None",
",",
"sep",
"=",
"'\\t'",
",",
"output_sep",
"=",
"'\\t'",
",",
"random_seed",
"=",
"None",
")",
":",
"super",
"(",
"RandomRec",
",",
"self",
")",
".",
"__init__",
"(",
"train_file",
"=",
"train_file",
",",
"test_file",
"=",
"test_file",
",",
"output_file",
"=",
"output_file",
",",
"sep",
"=",
"sep",
",",
"output_sep",
"=",
"output_sep",
")",
"if",
"random_seed",
"is",
"not",
"None",
":",
"np",
".",
"random",
".",
"seed",
"(",
"random_seed",
")",
"self",
".",
"uniform",
"=",
"uniform",
"self",
".",
"recommender_name",
"=",
"'Random Recommender'"
] | [
19,
4
] | [
66,
52
] | python | en | ['en', 'ja', 'th'] | False |
RandomRec.compute | (self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t') |
Extends compute method from BaseRatingPrediction. Method to run recommender algorithm
:param verbose: Print recommender and database information
:type verbose: bool, default True
:param metrics: List of evaluation measures
:type metrics: list, default None
:param verbose_evaluation: Print the evaluation results
:type verbose_evaluation: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
|
Extends compute method from BaseRatingPrediction. Method to run recommender algorithm
:param verbose: Print recommender and database information
:type verbose: bool, default True
:param metrics: List of evaluation measures
:type metrics: list, default None
:param verbose_evaluation: Print the evaluation results
:type verbose_evaluation: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
| def compute(self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t'):
"""
Extends compute method from BaseRatingPrediction. Method to run recommender algorithm
:param verbose: Print recommender and database information
:type verbose: bool, default True
:param metrics: List of evaluation measures
:type metrics: list, default None
:param verbose_evaluation: Print the evaluation results
:type verbose_evaluation: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
"""
super(RandomRec, self).compute(verbose=verbose)
if verbose:
print("prediction_time:: %4f sec" % timed(self.predict))
print('\n')
else:
self.predict()
self.write_predictions()
if self.test_file is not None:
self.evaluate(metrics, verbose_evaluation, as_table=as_table, table_sep=table_sep) | [
"def",
"compute",
"(",
"self",
",",
"verbose",
"=",
"True",
",",
"metrics",
"=",
"None",
",",
"verbose_evaluation",
"=",
"True",
",",
"as_table",
"=",
"False",
",",
"table_sep",
"=",
"'\\t'",
")",
":",
"super",
"(",
"RandomRec",
",",
"self",
")",
".",
"compute",
"(",
"verbose",
"=",
"verbose",
")",
"if",
"verbose",
":",
"print",
"(",
"\"prediction_time:: %4f sec\"",
"%",
"timed",
"(",
"self",
".",
"predict",
")",
")",
"print",
"(",
"'\\n'",
")",
"else",
":",
"self",
".",
"predict",
"(",
")",
"self",
".",
"write_predictions",
"(",
")",
"if",
"self",
".",
"test_file",
"is",
"not",
"None",
":",
"self",
".",
"evaluate",
"(",
"metrics",
",",
"verbose_evaluation",
",",
"as_table",
"=",
"as_table",
",",
"table_sep",
"=",
"table_sep",
")"
] | [
90,
4
] | [
123,
94
] | python | en | ['en', 'ja', 'th'] | False |
abort | (status, *args, **kwargs) | Raises an :py:exc:`HTTPException` for the given status code or WSGI
application::
abort(404) # 404 Not Found
abort(Response('Hello World'))
Can be passed a WSGI application or a status code. If a status code is
given it's looked up in the list of exceptions and will raise that
exception, if passed a WSGI application it will wrap it in a proxy WSGI
exception and raise that::
abort(404)
abort(Response('Hello World'))
| Raises an :py:exc:`HTTPException` for the given status code or WSGI
application:: | def abort(status, *args, **kwargs):
"""Raises an :py:exc:`HTTPException` for the given status code or WSGI
application::
abort(404) # 404 Not Found
abort(Response('Hello World'))
Can be passed a WSGI application or a status code. If a status code is
given it's looked up in the list of exceptions and will raise that
exception, if passed a WSGI application it will wrap it in a proxy WSGI
exception and raise that::
abort(404)
abort(Response('Hello World'))
"""
return _aborter(status, *args, **kwargs) | [
"def",
"abort",
"(",
"status",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_aborter",
"(",
"status",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | [
755,
0
] | [
771,
44
] | python | en | ['en', 'en', 'en'] | True |
MethodNotAllowed.__init__ | (self, valid_methods=None, description=None) | Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory. | Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory. | def __init__(self, valid_methods=None, description=None):
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
HTTPException.__init__(self, description)
self.valid_methods = valid_methods | [
"def",
"__init__",
"(",
"self",
",",
"valid_methods",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"HTTPException",
".",
"__init__",
"(",
"self",
",",
"description",
")",
"self",
".",
"valid_methods",
"=",
"valid_methods"
] | [
347,
4
] | [
351,
42
] | python | en | ['en', 'en', 'en'] | True |
RequestedRangeNotSatisfiable.__init__ | (self, length=None, units="bytes", description=None) | Takes an optional `Content-Range` header value based on ``length``
parameter.
| Takes an optional `Content-Range` header value based on ``length``
parameter.
| def __init__(self, length=None, units="bytes", description=None):
"""Takes an optional `Content-Range` header value based on ``length``
parameter.
"""
HTTPException.__init__(self, description)
self.length = length
self.units = units | [
"def",
"__init__",
"(",
"self",
",",
"length",
"=",
"None",
",",
"units",
"=",
"\"bytes\"",
",",
"description",
"=",
"None",
")",
":",
"HTTPException",
".",
"__init__",
"(",
"self",
",",
"description",
")",
"self",
".",
"length",
"=",
"length",
"self",
".",
"units",
"=",
"units"
] | [
496,
4
] | [
502,
26
] | python | en | ['en', 'en', 'en'] | True |
before_nothing | (retry_state: "RetryCallState") | Before call strategy that does nothing. | Before call strategy that does nothing. | def before_nothing(retry_state: "RetryCallState") -> None:
"""Before call strategy that does nothing.""" | [
"def",
"before_nothing",
"(",
"retry_state",
":",
"\"RetryCallState\"",
")",
"->",
"None",
":"
] | [
26,
0
] | [
27,
49
] | python | en | ['en', 'en', 'en'] | True |
before_log | (logger: "logging.Logger", log_level: int) | Before call strategy that logs to some logger the attempt. | Before call strategy that logs to some logger the attempt. | def before_log(logger: "logging.Logger", log_level: int) -> typing.Callable[["RetryCallState"], None]:
"""Before call strategy that logs to some logger the attempt."""
def log_it(retry_state: "RetryCallState") -> None:
logger.log(
log_level,
f"Starting call to '{_utils.get_callback_name(retry_state.fn)}', "
f"this is the {_utils.to_ordinal(retry_state.attempt_number)} time calling it.",
)
return log_it | [
"def",
"before_log",
"(",
"logger",
":",
"\"logging.Logger\"",
",",
"log_level",
":",
"int",
")",
"->",
"typing",
".",
"Callable",
"[",
"[",
"\"RetryCallState\"",
"]",
",",
"None",
"]",
":",
"def",
"log_it",
"(",
"retry_state",
":",
"\"RetryCallState\"",
")",
"->",
"None",
":",
"logger",
".",
"log",
"(",
"log_level",
",",
"f\"Starting call to '{_utils.get_callback_name(retry_state.fn)}', \"",
"f\"this is the {_utils.to_ordinal(retry_state.attempt_number)} time calling it.\"",
",",
")",
"return",
"log_it"
] | [
30,
0
] | [
40,
17
] | python | en | ['en', 'en', 'en'] | True |
CurrentThreadExecutor.run_until_future | (self, future) |
Runs the code in the work queue until a result is available from the future.
Should be run from the thread the executor is initialised in.
|
Runs the code in the work queue until a result is available from the future.
Should be run from the thread the executor is initialised in.
| def run_until_future(self, future):
"""
Runs the code in the work queue until a result is available from the future.
Should be run from the thread the executor is initialised in.
"""
# Check we're in the right thread
if threading.current_thread() != self._work_thread:
raise RuntimeError(
"You cannot run CurrentThreadExecutor from a different thread"
)
future.add_done_callback(self._work_queue.put)
# Keep getting and running work items until we get the future we're waiting for
# back via the future's done callback.
try:
while True:
# Get a work item and run it
work_item = self._work_queue.get()
if work_item is future:
return
work_item.run()
del work_item
finally:
self._broken = True | [
"def",
"run_until_future",
"(",
"self",
",",
"future",
")",
":",
"# Check we're in the right thread",
"if",
"threading",
".",
"current_thread",
"(",
")",
"!=",
"self",
".",
"_work_thread",
":",
"raise",
"RuntimeError",
"(",
"\"You cannot run CurrentThreadExecutor from a different thread\"",
")",
"future",
".",
"add_done_callback",
"(",
"self",
".",
"_work_queue",
".",
"put",
")",
"# Keep getting and running work items until we get the future we're waiting for",
"# back via the future's done callback.",
"try",
":",
"while",
"True",
":",
"# Get a work item and run it",
"work_item",
"=",
"self",
".",
"_work_queue",
".",
"get",
"(",
")",
"if",
"work_item",
"is",
"future",
":",
"return",
"work_item",
".",
"run",
"(",
")",
"del",
"work_item",
"finally",
":",
"self",
".",
"_broken",
"=",
"True"
] | [
42,
4
] | [
64,
31
] | python | en | ['en', 'error', 'th'] | False |
Wheel.__init__ | (self, filename: str) |
:raises InvalidWheelFilename: when the filename is invalid for a wheel
|
:raises InvalidWheelFilename: when the filename is invalid for a wheel
| def __init__(self, filename: str) -> None:
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename(
f"{filename} is not a valid wheel filename."
)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.build_tag = wheel_info.group('build')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = {
Tag(x, y, z) for x in self.pyversions
for y in self.abis for z in self.plats
} | [
"def",
"__init__",
"(",
"self",
",",
"filename",
":",
"str",
")",
"->",
"None",
":",
"wheel_info",
"=",
"self",
".",
"wheel_file_re",
".",
"match",
"(",
"filename",
")",
"if",
"not",
"wheel_info",
":",
"raise",
"InvalidWheelFilename",
"(",
"f\"{filename} is not a valid wheel filename.\"",
")",
"self",
".",
"filename",
"=",
"filename",
"self",
".",
"name",
"=",
"wheel_info",
".",
"group",
"(",
"'name'",
")",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
"# we'll assume \"_\" means \"-\" due to wheel naming scheme",
"# (https://github.com/pypa/pip/issues/1150)",
"self",
".",
"version",
"=",
"wheel_info",
".",
"group",
"(",
"'ver'",
")",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
"self",
".",
"build_tag",
"=",
"wheel_info",
".",
"group",
"(",
"'build'",
")",
"self",
".",
"pyversions",
"=",
"wheel_info",
".",
"group",
"(",
"'pyver'",
")",
".",
"split",
"(",
"'.'",
")",
"self",
".",
"abis",
"=",
"wheel_info",
".",
"group",
"(",
"'abi'",
")",
".",
"split",
"(",
"'.'",
")",
"self",
".",
"plats",
"=",
"wheel_info",
".",
"group",
"(",
"'plat'",
")",
".",
"split",
"(",
"'.'",
")",
"# All the tag combinations from this file",
"self",
".",
"file_tags",
"=",
"{",
"Tag",
"(",
"x",
",",
"y",
",",
"z",
")",
"for",
"x",
"in",
"self",
".",
"pyversions",
"for",
"y",
"in",
"self",
".",
"abis",
"for",
"z",
"in",
"self",
".",
"plats",
"}"
] | [
21,
4
] | [
44,
9
] | python | en | ['en', 'error', 'th'] | False |
Wheel.get_formatted_file_tags | (self) | Return the wheel's tags as a sorted list of strings. | Return the wheel's tags as a sorted list of strings. | def get_formatted_file_tags(self) -> List[str]:
"""Return the wheel's tags as a sorted list of strings."""
return sorted(str(tag) for tag in self.file_tags) | [
"def",
"get_formatted_file_tags",
"(",
"self",
")",
"->",
"List",
"[",
"str",
"]",
":",
"return",
"sorted",
"(",
"str",
"(",
"tag",
")",
"for",
"tag",
"in",
"self",
".",
"file_tags",
")"
] | [
46,
4
] | [
48,
57
] | python | en | ['en', 'en', 'en'] | True |
Wheel.support_index_min | (self, tags: List[Tag]) | Return the lowest index that one of the wheel's file_tag combinations
achieves in the given list of supported tags.
For example, if there are 8 supported tags and one of the file tags
is first in the list, then return 0.
:param tags: the PEP 425 tags to check the wheel against, in order
with most preferred first.
:raises ValueError: If none of the wheel's file tags match one of
the supported tags.
| Return the lowest index that one of the wheel's file_tag combinations
achieves in the given list of supported tags. | def support_index_min(self, tags: List[Tag]) -> int:
"""Return the lowest index that one of the wheel's file_tag combinations
achieves in the given list of supported tags.
For example, if there are 8 supported tags and one of the file tags
is first in the list, then return 0.
:param tags: the PEP 425 tags to check the wheel against, in order
with most preferred first.
:raises ValueError: If none of the wheel's file tags match one of
the supported tags.
"""
return min(tags.index(tag) for tag in self.file_tags if tag in tags) | [
"def",
"support_index_min",
"(",
"self",
",",
"tags",
":",
"List",
"[",
"Tag",
"]",
")",
"->",
"int",
":",
"return",
"min",
"(",
"tags",
".",
"index",
"(",
"tag",
")",
"for",
"tag",
"in",
"self",
".",
"file_tags",
"if",
"tag",
"in",
"tags",
")"
] | [
50,
4
] | [
63,
76
] | python | en | ['en', 'en', 'en'] | True |
Wheel.find_most_preferred_tag | (
self, tags: List[Tag], tag_to_priority: Dict[Tag, int]
) | Return the priority of the most preferred tag that one of the wheel's file
tag combinations achieves in the given list of supported tags using the given
tag_to_priority mapping, where lower priorities are more-preferred.
This is used in place of support_index_min in some cases in order to avoid
an expensive linear scan of a large list of tags.
:param tags: the PEP 425 tags to check the wheel against.
:param tag_to_priority: a mapping from tag to priority of that tag, where
lower is more preferred.
:raises ValueError: If none of the wheel's file tags match one of
the supported tags.
| Return the priority of the most preferred tag that one of the wheel's file
tag combinations achieves in the given list of supported tags using the given
tag_to_priority mapping, where lower priorities are more-preferred. | def find_most_preferred_tag(
self, tags: List[Tag], tag_to_priority: Dict[Tag, int]
) -> int:
"""Return the priority of the most preferred tag that one of the wheel's file
tag combinations achieves in the given list of supported tags using the given
tag_to_priority mapping, where lower priorities are more-preferred.
This is used in place of support_index_min in some cases in order to avoid
an expensive linear scan of a large list of tags.
:param tags: the PEP 425 tags to check the wheel against.
:param tag_to_priority: a mapping from tag to priority of that tag, where
lower is more preferred.
:raises ValueError: If none of the wheel's file tags match one of
the supported tags.
"""
return min(
tag_to_priority[tag] for tag in self.file_tags if tag in tag_to_priority
) | [
"def",
"find_most_preferred_tag",
"(",
"self",
",",
"tags",
":",
"List",
"[",
"Tag",
"]",
",",
"tag_to_priority",
":",
"Dict",
"[",
"Tag",
",",
"int",
"]",
")",
"->",
"int",
":",
"return",
"min",
"(",
"tag_to_priority",
"[",
"tag",
"]",
"for",
"tag",
"in",
"self",
".",
"file_tags",
"if",
"tag",
"in",
"tag_to_priority",
")"
] | [
65,
4
] | [
84,
9
] | python | en | ['en', 'en', 'en'] | True |
Wheel.supported | (self, tags: Iterable[Tag]) | Return whether the wheel is compatible with one of the given tags.
:param tags: the PEP 425 tags to check the wheel against.
| Return whether the wheel is compatible with one of the given tags. | def supported(self, tags: Iterable[Tag]) -> bool:
"""Return whether the wheel is compatible with one of the given tags.
:param tags: the PEP 425 tags to check the wheel against.
"""
return not self.file_tags.isdisjoint(tags) | [
"def",
"supported",
"(",
"self",
",",
"tags",
":",
"Iterable",
"[",
"Tag",
"]",
")",
"->",
"bool",
":",
"return",
"not",
"self",
".",
"file_tags",
".",
"isdisjoint",
"(",
"tags",
")"
] | [
86,
4
] | [
91,
50
] | python | en | ['en', 'en', 'en'] | True |
dc | (result, reference) | r"""
Dice coefficient
Computes the Dice coefficient (also known as Sorensen index) between the binary
objects in two images.
The metric is defined as
.. math::
DC=\frac{2|A\cap B|}{|A|+|B|}
, where :math:`A` is the first and :math:`B` the second set of samples (here: binary objects).
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
dc : float
The Dice coefficient between the object(s) in ```result``` and the
object(s) in ```reference```. It ranges from 0 (no overlap) to 1 (perfect overlap).
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order.
| r"""
Dice coefficient | def dc(result, reference):
r"""
Dice coefficient
Computes the Dice coefficient (also known as Sorensen index) between the binary
objects in two images.
The metric is defined as
.. math::
DC=\frac{2|A\cap B|}{|A|+|B|}
, where :math:`A` is the first and :math:`B` the second set of samples (here: binary objects).
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
dc : float
The Dice coefficient between the object(s) in ```result``` and the
object(s) in ```reference```. It ranges from 0 (no overlap) to 1 (perfect overlap).
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order.
"""
result = np.atleast_1d(result.astype(np.bool))
reference = np.atleast_1d(reference.astype(np.bool))
intersection = np.count_nonzero(result & reference)
size_i1 = np.count_nonzero(result)
size_i2 = np.count_nonzero(reference)
try:
dc = 2. * intersection / float(size_i1 + size_i2)
except ZeroDivisionError:
dc = 0.0
return dc | [
"def",
"dc",
"(",
"result",
",",
"reference",
")",
":",
"result",
"=",
"np",
".",
"atleast_1d",
"(",
"result",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"reference",
"=",
"np",
".",
"atleast_1d",
"(",
"reference",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"intersection",
"=",
"np",
".",
"count_nonzero",
"(",
"result",
"&",
"reference",
")",
"size_i1",
"=",
"np",
".",
"count_nonzero",
"(",
"result",
")",
"size_i2",
"=",
"np",
".",
"count_nonzero",
"(",
"reference",
")",
"try",
":",
"dc",
"=",
"2.",
"*",
"intersection",
"/",
"float",
"(",
"size_i1",
"+",
"size_i2",
")",
"except",
"ZeroDivisionError",
":",
"dc",
"=",
"0.0",
"return",
"dc"
] | [
14,
0
] | [
61,
13
] | python | cy | ['en', 'cy', 'hi'] | False |
jc | (result, reference) |
Jaccard coefficient
Computes the Jaccard coefficient between the binary objects in two images.
Parameters
----------
result: array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference: array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
jc: float
The Jaccard coefficient between the object(s) in `result` and the
object(s) in `reference`. It ranges from 0 (no overlap) to 1 (perfect overlap).
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order.
|
Jaccard coefficient | def jc(result, reference):
"""
Jaccard coefficient
Computes the Jaccard coefficient between the binary objects in two images.
Parameters
----------
result: array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference: array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
jc: float
The Jaccard coefficient between the object(s) in `result` and the
object(s) in `reference`. It ranges from 0 (no overlap) to 1 (perfect overlap).
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order.
"""
result = np.atleast_1d(result.astype(np.bool))
reference = np.atleast_1d(reference.astype(np.bool))
intersection = np.count_nonzero(result & reference)
union = np.count_nonzero(result | reference)
jc = float(intersection) / float(union)
return jc | [
"def",
"jc",
"(",
"result",
",",
"reference",
")",
":",
"result",
"=",
"np",
".",
"atleast_1d",
"(",
"result",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"reference",
"=",
"np",
".",
"atleast_1d",
"(",
"reference",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"intersection",
"=",
"np",
".",
"count_nonzero",
"(",
"result",
"&",
"reference",
")",
"union",
"=",
"np",
".",
"count_nonzero",
"(",
"result",
"|",
"reference",
")",
"jc",
"=",
"float",
"(",
"intersection",
")",
"/",
"float",
"(",
"union",
")",
"return",
"jc"
] | [
64,
0
] | [
97,
13
] | python | en | ['en', 'error', 'th'] | False |
precision | (result, reference) |
Precison.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
precision : float
The precision between two binary datasets, here mostly binary objects in images,
which is defined as the fraction of retrieved instances that are relevant. The
precision is not symmetric.
See also
--------
:func:`recall`
Notes
-----
Not symmetric. The inverse of the precision is :func:`recall`.
High precision means that an algorithm returned substantially more relevant results than irrelevant.
References
----------
.. [1] http://en.wikipedia.org/wiki/Precision_and_recall
.. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion
|
Precison. | def precision(result, reference):
"""
Precison.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
precision : float
The precision between two binary datasets, here mostly binary objects in images,
which is defined as the fraction of retrieved instances that are relevant. The
precision is not symmetric.
See also
--------
:func:`recall`
Notes
-----
Not symmetric. The inverse of the precision is :func:`recall`.
High precision means that an algorithm returned substantially more relevant results than irrelevant.
References
----------
.. [1] http://en.wikipedia.org/wiki/Precision_and_recall
.. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion
"""
result = np.atleast_1d(result.astype(np.bool))
reference = np.atleast_1d(reference.astype(np.bool))
tp = np.count_nonzero(result & reference)
fp = np.count_nonzero(result & ~reference)
try:
precision = tp / float(tp + fp)
except ZeroDivisionError:
precision = 0.0
return precision | [
"def",
"precision",
"(",
"result",
",",
"reference",
")",
":",
"result",
"=",
"np",
".",
"atleast_1d",
"(",
"result",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"reference",
"=",
"np",
".",
"atleast_1d",
"(",
"reference",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"tp",
"=",
"np",
".",
"count_nonzero",
"(",
"result",
"&",
"reference",
")",
"fp",
"=",
"np",
".",
"count_nonzero",
"(",
"result",
"&",
"~",
"reference",
")",
"try",
":",
"precision",
"=",
"tp",
"/",
"float",
"(",
"tp",
"+",
"fp",
")",
"except",
"ZeroDivisionError",
":",
"precision",
"=",
"0.0",
"return",
"precision"
] | [
100,
0
] | [
145,
20
] | python | en | ['en', 'error', 'th'] | False |
recall | (result, reference) |
Recall.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
recall : float
The recall between two binary datasets, here mostly binary objects in images,
which is defined as the fraction of relevant instances that are retrieved. The
recall is not symmetric.
See also
--------
:func:`precision`
Notes
-----
Not symmetric. The inverse of the recall is :func:`precision`.
High recall means that an algorithm returned most of the relevant results.
References
----------
.. [1] http://en.wikipedia.org/wiki/Precision_and_recall
.. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion
|
Recall. | def recall(result, reference):
"""
Recall.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
recall : float
The recall between two binary datasets, here mostly binary objects in images,
which is defined as the fraction of relevant instances that are retrieved. The
recall is not symmetric.
See also
--------
:func:`precision`
Notes
-----
Not symmetric. The inverse of the recall is :func:`precision`.
High recall means that an algorithm returned most of the relevant results.
References
----------
.. [1] http://en.wikipedia.org/wiki/Precision_and_recall
.. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion
"""
result = np.atleast_1d(result.astype(np.bool))
reference = np.atleast_1d(reference.astype(np.bool))
tp = np.count_nonzero(result & reference)
fn = np.count_nonzero(~result & reference)
try:
recall = tp / float(tp + fn)
except ZeroDivisionError:
recall = 0.0
return recall | [
"def",
"recall",
"(",
"result",
",",
"reference",
")",
":",
"result",
"=",
"np",
".",
"atleast_1d",
"(",
"result",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"reference",
"=",
"np",
".",
"atleast_1d",
"(",
"reference",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"tp",
"=",
"np",
".",
"count_nonzero",
"(",
"result",
"&",
"reference",
")",
"fn",
"=",
"np",
".",
"count_nonzero",
"(",
"~",
"result",
"&",
"reference",
")",
"try",
":",
"recall",
"=",
"tp",
"/",
"float",
"(",
"tp",
"+",
"fn",
")",
"except",
"ZeroDivisionError",
":",
"recall",
"=",
"0.0",
"return",
"recall"
] | [
148,
0
] | [
193,
17
] | python | en | ['en', 'error', 'th'] | False |
sensitivity | (result, reference) |
Sensitivity.
Same as :func:`recall`, see there for a detailed description.
See also
--------
:func:`specificity`
|
Sensitivity.
Same as :func:`recall`, see there for a detailed description. | def sensitivity(result, reference):
"""
Sensitivity.
Same as :func:`recall`, see there for a detailed description.
See also
--------
:func:`specificity`
"""
return recall(result, reference) | [
"def",
"sensitivity",
"(",
"result",
",",
"reference",
")",
":",
"return",
"recall",
"(",
"result",
",",
"reference",
")"
] | [
196,
0
] | [
205,
36
] | python | en | ['en', 'error', 'th'] | False |
specificity | (result, reference) |
Specificity.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
specificity : float
The specificity between two binary datasets, here mostly binary objects in images,
which denotes the fraction of correctly returned negatives. The
specificity is not symmetric.
See also
--------
:func:`sensitivity`
Notes
-----
Not symmetric. The completment of the specificity is :func:`sensitivity`.
High recall means that an algorithm returned most of the irrelevant results.
References
----------
.. [1] https://en.wikipedia.org/wiki/Sensitivity_and_specificity
.. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion
|
Specificity. | def specificity(result, reference):
"""
Specificity.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
specificity : float
The specificity between two binary datasets, here mostly binary objects in images,
which denotes the fraction of correctly returned negatives. The
specificity is not symmetric.
See also
--------
:func:`sensitivity`
Notes
-----
Not symmetric. The completment of the specificity is :func:`sensitivity`.
High recall means that an algorithm returned most of the irrelevant results.
References
----------
.. [1] https://en.wikipedia.org/wiki/Sensitivity_and_specificity
.. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion
"""
result = np.atleast_1d(result.astype(np.bool))
reference = np.atleast_1d(reference.astype(np.bool))
tn = np.count_nonzero(~result & ~reference)
fp = np.count_nonzero(result & ~reference)
try:
specificity = tn / float(tn + fp)
except ZeroDivisionError:
specificity = 0.0
return specificity | [
"def",
"specificity",
"(",
"result",
",",
"reference",
")",
":",
"result",
"=",
"np",
".",
"atleast_1d",
"(",
"result",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"reference",
"=",
"np",
".",
"atleast_1d",
"(",
"reference",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"tn",
"=",
"np",
".",
"count_nonzero",
"(",
"~",
"result",
"&",
"~",
"reference",
")",
"fp",
"=",
"np",
".",
"count_nonzero",
"(",
"result",
"&",
"~",
"reference",
")",
"try",
":",
"specificity",
"=",
"tn",
"/",
"float",
"(",
"tn",
"+",
"fp",
")",
"except",
"ZeroDivisionError",
":",
"specificity",
"=",
"0.0",
"return",
"specificity"
] | [
208,
0
] | [
253,
22
] | python | en | ['en', 'error', 'th'] | False |
true_negative_rate | (result, reference) |
True negative rate.
Same as :func:`sensitivity`, see there for a detailed description.
See also
--------
:func:`true_positive_rate`
:func:`positive_predictive_value`
|
True negative rate.
Same as :func:`sensitivity`, see there for a detailed description. | def true_negative_rate(result, reference):
"""
True negative rate.
Same as :func:`sensitivity`, see there for a detailed description.
See also
--------
:func:`true_positive_rate`
:func:`positive_predictive_value`
"""
return sensitivity(result, reference) | [
"def",
"true_negative_rate",
"(",
"result",
",",
"reference",
")",
":",
"return",
"sensitivity",
"(",
"result",
",",
"reference",
")"
] | [
256,
0
] | [
266,
41
] | python | en | ['en', 'error', 'th'] | False |
true_positive_rate | (result, reference) |
True positive rate.
Same as :func:`recall`, see there for a detailed description.
See also
--------
:func:`positive_predictive_value`
:func:`true_negative_rate`
|
True positive rate.
Same as :func:`recall`, see there for a detailed description. | def true_positive_rate(result, reference):
"""
True positive rate.
Same as :func:`recall`, see there for a detailed description.
See also
--------
:func:`positive_predictive_value`
:func:`true_negative_rate`
"""
return recall(result, reference) | [
"def",
"true_positive_rate",
"(",
"result",
",",
"reference",
")",
":",
"return",
"recall",
"(",
"result",
",",
"reference",
")"
] | [
269,
0
] | [
279,
36
] | python | en | ['en', 'error', 'th'] | False |
positive_predictive_value | (result, reference) |
Positive predictive value.
Same as :func:`precision`, see there for a detailed description.
See also
--------
:func:`true_positive_rate`
:func:`true_negative_rate`
|
Positive predictive value.
Same as :func:`precision`, see there for a detailed description. | def positive_predictive_value(result, reference):
"""
Positive predictive value.
Same as :func:`precision`, see there for a detailed description.
See also
--------
:func:`true_positive_rate`
:func:`true_negative_rate`
"""
return precision(result, reference) | [
"def",
"positive_predictive_value",
"(",
"result",
",",
"reference",
")",
":",
"return",
"precision",
"(",
"result",
",",
"reference",
")"
] | [
282,
0
] | [
292,
39
] | python | en | ['en', 'error', 'th'] | False |
hd | (result, reference, voxelspacing=None, connectivity=1) |
Hausdorff Distance.
Computes the (symmetric) Hausdorff Distance (HD) between the binary objects in two
images. It is defined as the maximum surface distance between the objects.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
Note that the connectivity influences the result in the case of the Hausdorff distance.
Returns
-------
hd : float
The symmetric Hausdorff Distance between the object(s) in ```result``` and the
object(s) in ```reference```. The distance unit is the same as for the spacing of
elements along each dimension, which is usually given in mm.
See also
--------
:func:`assd`
:func:`asd`
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order.
|
Hausdorff Distance. | def hd(result, reference, voxelspacing=None, connectivity=1):
"""
Hausdorff Distance.
Computes the (symmetric) Hausdorff Distance (HD) between the binary objects in two
images. It is defined as the maximum surface distance between the objects.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
Note that the connectivity influences the result in the case of the Hausdorff distance.
Returns
-------
hd : float
The symmetric Hausdorff Distance between the object(s) in ```result``` and the
object(s) in ```reference```. The distance unit is the same as for the spacing of
elements along each dimension, which is usually given in mm.
See also
--------
:func:`assd`
:func:`asd`
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order.
"""
hd1 = __surface_distances(result, reference, voxelspacing, connectivity).max()
hd2 = __surface_distances(reference, result, voxelspacing, connectivity).max()
hd = max(hd1, hd2)
return hd | [
"def",
"hd",
"(",
"result",
",",
"reference",
",",
"voxelspacing",
"=",
"None",
",",
"connectivity",
"=",
"1",
")",
":",
"hd1",
"=",
"__surface_distances",
"(",
"result",
",",
"reference",
",",
"voxelspacing",
",",
"connectivity",
")",
".",
"max",
"(",
")",
"hd2",
"=",
"__surface_distances",
"(",
"reference",
",",
"result",
",",
"voxelspacing",
",",
"connectivity",
")",
".",
"max",
"(",
")",
"hd",
"=",
"max",
"(",
"hd1",
",",
"hd2",
")",
"return",
"hd"
] | [
295,
0
] | [
340,
13
] | python | en | ['en', 'error', 'th'] | False |
assd | (result, reference, voxelspacing=None, connectivity=1) |
Average symmetric surface distance.
Computes the average symmetric surface distance (ASD) between the binary objects in
two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
assd : float
The average symmetric surface distance between the object(s) in ``result`` and the
object(s) in ``reference``. The distance unit is the same as for the spacing of
elements along each dimension, which is usually given in mm.
See also
--------
:func:`asd`
:func:`hd`
Notes
-----
This is a real metric, obtained by calling and averaging
>>> asd(result, reference)
and
>>> asd(reference, result)
The binary images can therefore be supplied in any order.
|
Average symmetric surface distance. | def assd(result, reference, voxelspacing=None, connectivity=1):
"""
Average symmetric surface distance.
Computes the average symmetric surface distance (ASD) between the binary objects in
two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
assd : float
The average symmetric surface distance between the object(s) in ``result`` and the
object(s) in ``reference``. The distance unit is the same as for the spacing of
elements along each dimension, which is usually given in mm.
See also
--------
:func:`asd`
:func:`hd`
Notes
-----
This is a real metric, obtained by calling and averaging
>>> asd(result, reference)
and
>>> asd(reference, result)
The binary images can therefore be supplied in any order.
"""
assd = np.mean(
(asd(result, reference, voxelspacing, connectivity), asd(reference, result, voxelspacing, connectivity)))
return assd | [
"def",
"assd",
"(",
"result",
",",
"reference",
",",
"voxelspacing",
"=",
"None",
",",
"connectivity",
"=",
"1",
")",
":",
"assd",
"=",
"np",
".",
"mean",
"(",
"(",
"asd",
"(",
"result",
",",
"reference",
",",
"voxelspacing",
",",
"connectivity",
")",
",",
"asd",
"(",
"reference",
",",
"result",
",",
"voxelspacing",
",",
"connectivity",
")",
")",
")",
"return",
"assd"
] | [
343,
0
] | [
396,
15
] | python | en | ['en', 'error', 'th'] | False |
asd | (result, reference, voxelspacing=None, connectivity=1) |
Average surface distance metric.
Computes the average surface distance (ASD) between the binary objects in two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
asd : float
The average surface distance between the object(s) in ``result`` and the
object(s) in ``reference``. The distance unit is the same as for the spacing
of elements along each dimension, which is usually given in mm.
See also
--------
:func:`assd`
:func:`hd`
Notes
-----
This is not a real metric, as it is directed. See `assd` for a real metric of this.
The method is implemented making use of distance images and simple binary morphology
to achieve high computational speed.
Examples
--------
The `connectivity` determines what pixels/voxels are considered the surface of a
binary object. Take the following binary image showing a cross
>>> from scipy.ndimage.morphology import generate_binary_structure
>>> cross = generate_binary_structure(2, 1)
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
With `connectivity` set to `1` a 4-neighbourhood is considered when determining the
object surface, resulting in the surface
.. code-block:: python
array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
Changing `connectivity` to `2`, a 8-neighbourhood is considered and we get:
.. code-block:: python
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
, as a diagonal connection does no longer qualifies as valid object surface.
This influences the results `asd` returns. Imagine we want to compute the surface
distance of our cross to a cube-like object:
>>> cube = generate_binary_structure(2, 1)
array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
, which surface is, independent of the `connectivity` value set, always
.. code-block:: python
array([[1, 1, 1],
[1, 0, 1],
[1, 1, 1]])
Using a `connectivity` of `1` we get
>>> asd(cross, cube, connectivity=1)
0.0
while a value of `2` returns us
>>> asd(cross, cube, connectivity=2)
0.20000000000000001
due to the center of the cross being considered surface as well.
|
Average surface distance metric. | def asd(result, reference, voxelspacing=None, connectivity=1):
"""
Average surface distance metric.
Computes the average surface distance (ASD) between the binary objects in two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
asd : float
The average surface distance between the object(s) in ``result`` and the
object(s) in ``reference``. The distance unit is the same as for the spacing
of elements along each dimension, which is usually given in mm.
See also
--------
:func:`assd`
:func:`hd`
Notes
-----
This is not a real metric, as it is directed. See `assd` for a real metric of this.
The method is implemented making use of distance images and simple binary morphology
to achieve high computational speed.
Examples
--------
The `connectivity` determines what pixels/voxels are considered the surface of a
binary object. Take the following binary image showing a cross
>>> from scipy.ndimage.morphology import generate_binary_structure
>>> cross = generate_binary_structure(2, 1)
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
With `connectivity` set to `1` a 4-neighbourhood is considered when determining the
object surface, resulting in the surface
.. code-block:: python
array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
Changing `connectivity` to `2`, a 8-neighbourhood is considered and we get:
.. code-block:: python
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
, as a diagonal connection does no longer qualifies as valid object surface.
This influences the results `asd` returns. Imagine we want to compute the surface
distance of our cross to a cube-like object:
>>> cube = generate_binary_structure(2, 1)
array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
, which surface is, independent of the `connectivity` value set, always
.. code-block:: python
array([[1, 1, 1],
[1, 0, 1],
[1, 1, 1]])
Using a `connectivity` of `1` we get
>>> asd(cross, cube, connectivity=1)
0.0
while a value of `2` returns us
>>> asd(cross, cube, connectivity=2)
0.20000000000000001
due to the center of the cross being considered surface as well.
"""
sds = __surface_distances(result, reference, voxelspacing, connectivity)
asd = sds.mean()
return asd | [
"def",
"asd",
"(",
"result",
",",
"reference",
",",
"voxelspacing",
"=",
"None",
",",
"connectivity",
"=",
"1",
")",
":",
"sds",
"=",
"__surface_distances",
"(",
"result",
",",
"reference",
",",
"voxelspacing",
",",
"connectivity",
")",
"asd",
"=",
"sds",
".",
"mean",
"(",
")",
"return",
"asd"
] | [
399,
0
] | [
506,
14
] | python | en | ['en', 'error', 'th'] | False |
ravd | (result, reference) |
Relative absolute volume difference.
Compute the relative absolute volume difference between the (joined) binary objects
in the two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
ravd : float
The relative absolute volume difference between the object(s) in ``result``
and the object(s) in ``reference``. This is a percentage value in the range
:math:`[-1.0, +inf]` for which a :math:`0` denotes an ideal score.
Raises
------
RuntimeError
If the reference object is empty.
See also
--------
:func:`dc`
:func:`precision`
:func:`recall`
Notes
-----
This is not a real metric, as it is directed. Negative values denote a smaller
and positive values a larger volume than the reference.
This implementation does not check, whether the two supplied arrays are of the same
size.
Examples
--------
Considering the following inputs
>>> import numpy as np
>>> arr1 = np.asarray([[0,1,0],[1,1,1],[0,1,0]])
>>> arr1
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
>>> arr2 = np.asarray([[0,1,0],[1,0,1],[0,1,0]])
>>> arr2
array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
comparing `arr1` to `arr2` we get
>>> ravd(arr1, arr2)
-0.2
and reversing the inputs the directivness of the metric becomes evident
>>> ravd(arr2, arr1)
0.25
It is important to keep in mind that a perfect score of `0` does not mean that the
binary objects fit exactely, as only the volumes are compared:
>>> arr1 = np.asarray([1,0,0])
>>> arr2 = np.asarray([0,0,1])
>>> ravd(arr1, arr2)
0.0
|
Relative absolute volume difference. | def ravd(result, reference):
"""
Relative absolute volume difference.
Compute the relative absolute volume difference between the (joined) binary objects
in the two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
ravd : float
The relative absolute volume difference between the object(s) in ``result``
and the object(s) in ``reference``. This is a percentage value in the range
:math:`[-1.0, +inf]` for which a :math:`0` denotes an ideal score.
Raises
------
RuntimeError
If the reference object is empty.
See also
--------
:func:`dc`
:func:`precision`
:func:`recall`
Notes
-----
This is not a real metric, as it is directed. Negative values denote a smaller
and positive values a larger volume than the reference.
This implementation does not check, whether the two supplied arrays are of the same
size.
Examples
--------
Considering the following inputs
>>> import numpy as np
>>> arr1 = np.asarray([[0,1,0],[1,1,1],[0,1,0]])
>>> arr1
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
>>> arr2 = np.asarray([[0,1,0],[1,0,1],[0,1,0]])
>>> arr2
array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
comparing `arr1` to `arr2` we get
>>> ravd(arr1, arr2)
-0.2
and reversing the inputs the directivness of the metric becomes evident
>>> ravd(arr2, arr1)
0.25
It is important to keep in mind that a perfect score of `0` does not mean that the
binary objects fit exactely, as only the volumes are compared:
>>> arr1 = np.asarray([1,0,0])
>>> arr2 = np.asarray([0,0,1])
>>> ravd(arr1, arr2)
0.0
"""
result = np.atleast_1d(result.astype(np.bool))
reference = np.atleast_1d(reference.astype(np.bool))
vol1 = np.count_nonzero(result)
vol2 = np.count_nonzero(reference)
if 0 == vol2:
raise RuntimeError('The second supplied array does not contain any binary object.')
return (vol1 - vol2) / float(vol2) | [
"def",
"ravd",
"(",
"result",
",",
"reference",
")",
":",
"result",
"=",
"np",
".",
"atleast_1d",
"(",
"result",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"reference",
"=",
"np",
".",
"atleast_1d",
"(",
"reference",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"vol1",
"=",
"np",
".",
"count_nonzero",
"(",
"result",
")",
"vol2",
"=",
"np",
".",
"count_nonzero",
"(",
"reference",
")",
"if",
"0",
"==",
"vol2",
":",
"raise",
"RuntimeError",
"(",
"'The second supplied array does not contain any binary object.'",
")",
"return",
"(",
"vol1",
"-",
"vol2",
")",
"/",
"float",
"(",
"vol2",
")"
] | [
509,
0
] | [
594,
38
] | python | en | ['en', 'error', 'th'] | False |
volume_correlation | (results, references) | r"""
Volume correlation.
Computes the linear correlation in binary object volume between the
contents of the successive binary images supplied. Measured through
the Pearson product-moment correlation coefficient.
Parameters
----------
results : sequence of array_like
Ordered list of input data containing objects. Each array_like will be
converted into binary: background where 0, object everywhere else.
references : sequence of array_like
Ordered list of input data containing objects. Each array_like will be
converted into binary: background where 0, object everywhere else.
The order must be the same as for ``results``.
Returns
-------
r : float
The correlation coefficient between -1 and 1.
p : float
The two-side p value.
| r"""
Volume correlation. | def volume_correlation(results, references):
r"""
Volume correlation.
Computes the linear correlation in binary object volume between the
contents of the successive binary images supplied. Measured through
the Pearson product-moment correlation coefficient.
Parameters
----------
results : sequence of array_like
Ordered list of input data containing objects. Each array_like will be
converted into binary: background where 0, object everywhere else.
references : sequence of array_like
Ordered list of input data containing objects. Each array_like will be
converted into binary: background where 0, object everywhere else.
The order must be the same as for ``results``.
Returns
-------
r : float
The correlation coefficient between -1 and 1.
p : float
The two-side p value.
"""
results = np.atleast_2d(np.array(results).astype(np.bool))
references = np.atleast_2d(np.array(references).astype(np.bool))
results_volumes = [np.count_nonzero(r) for r in results]
references_volumes = [np.count_nonzero(r) for r in references]
return pearsonr(results_volumes, references_volumes) | [
"def",
"volume_correlation",
"(",
"results",
",",
"references",
")",
":",
"results",
"=",
"np",
".",
"atleast_2d",
"(",
"np",
".",
"array",
"(",
"results",
")",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"references",
"=",
"np",
".",
"atleast_2d",
"(",
"np",
".",
"array",
"(",
"references",
")",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"results_volumes",
"=",
"[",
"np",
".",
"count_nonzero",
"(",
"r",
")",
"for",
"r",
"in",
"results",
"]",
"references_volumes",
"=",
"[",
"np",
".",
"count_nonzero",
"(",
"r",
")",
"for",
"r",
"in",
"references",
"]",
"return",
"pearsonr",
"(",
"results_volumes",
",",
"references_volumes",
")"
] | [
597,
0
] | [
629,
56
] | python | cy | ['en', 'cy', 'hi'] | False |
volume_change_correlation | (results, references) | r"""
Volume change correlation.
Computes the linear correlation of change in binary object volume between
the contents of the successive binary images supplied. Measured through
the Pearson product-moment correlation coefficient.
Parameters
----------
results : sequence of array_like
Ordered list of input data containing objects. Each array_like will be
converted into binary: background where 0, object everywhere else.
references : sequence of array_like
Ordered list of input data containing objects. Each array_like will be
converted into binary: background where 0, object everywhere else.
The order must be the same as for ``results``.
Returns
-------
r : float
The correlation coefficient between -1 and 1.
p : float
The two-side p value.
| r"""
Volume change correlation. | def volume_change_correlation(results, references):
r"""
Volume change correlation.
Computes the linear correlation of change in binary object volume between
the contents of the successive binary images supplied. Measured through
the Pearson product-moment correlation coefficient.
Parameters
----------
results : sequence of array_like
Ordered list of input data containing objects. Each array_like will be
converted into binary: background where 0, object everywhere else.
references : sequence of array_like
Ordered list of input data containing objects. Each array_like will be
converted into binary: background where 0, object everywhere else.
The order must be the same as for ``results``.
Returns
-------
r : float
The correlation coefficient between -1 and 1.
p : float
The two-side p value.
"""
results = np.atleast_2d(np.array(results).astype(np.bool))
references = np.atleast_2d(np.array(references).astype(np.bool))
results_volumes = np.asarray([np.count_nonzero(r) for r in results])
references_volumes = np.asarray([np.count_nonzero(r) for r in references])
results_volumes_changes = results_volumes[1:] - results_volumes[:-1]
references_volumes_changes = references_volumes[1:] - references_volumes[:-1]
return pearsonr(results_volumes_changes,
references_volumes_changes) | [
"def",
"volume_change_correlation",
"(",
"results",
",",
"references",
")",
":",
"results",
"=",
"np",
".",
"atleast_2d",
"(",
"np",
".",
"array",
"(",
"results",
")",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"references",
"=",
"np",
".",
"atleast_2d",
"(",
"np",
".",
"array",
"(",
"references",
")",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"results_volumes",
"=",
"np",
".",
"asarray",
"(",
"[",
"np",
".",
"count_nonzero",
"(",
"r",
")",
"for",
"r",
"in",
"results",
"]",
")",
"references_volumes",
"=",
"np",
".",
"asarray",
"(",
"[",
"np",
".",
"count_nonzero",
"(",
"r",
")",
"for",
"r",
"in",
"references",
"]",
")",
"results_volumes_changes",
"=",
"results_volumes",
"[",
"1",
":",
"]",
"-",
"results_volumes",
"[",
":",
"-",
"1",
"]",
"references_volumes_changes",
"=",
"references_volumes",
"[",
"1",
":",
"]",
"-",
"references_volumes",
"[",
":",
"-",
"1",
"]",
"return",
"pearsonr",
"(",
"results_volumes_changes",
",",
"references_volumes_changes",
")"
] | [
632,
0
] | [
668,
47
] | python | cy | ['en', 'cy', 'hi'] | False |
obj_assd | (result, reference, voxelspacing=None, connectivity=1) |
Average symmetric surface distance.
Computes the average symmetric surface distance (ASSD) between the binary objects in
two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining what accounts
for a distinct binary object as well as when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
assd : float
The average symmetric surface distance between all mutually existing distinct
binary object(s) in ``result`` and ``reference``. The distance unit is the same as for
the spacing of elements along each dimension, which is usually given in mm.
See also
--------
:func:`obj_asd`
Notes
-----
This is a real metric, obtained by calling and averaging
>>> obj_asd(result, reference)
and
>>> obj_asd(reference, result)
The binary images can therefore be supplied in any order.
|
Average symmetric surface distance. | def obj_assd(result, reference, voxelspacing=None, connectivity=1):
"""
Average symmetric surface distance.
Computes the average symmetric surface distance (ASSD) between the binary objects in
two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining what accounts
for a distinct binary object as well as when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
assd : float
The average symmetric surface distance between all mutually existing distinct
binary object(s) in ``result`` and ``reference``. The distance unit is the same as for
the spacing of elements along each dimension, which is usually given in mm.
See also
--------
:func:`obj_asd`
Notes
-----
This is a real metric, obtained by calling and averaging
>>> obj_asd(result, reference)
and
>>> obj_asd(reference, result)
The binary images can therefore be supplied in any order.
"""
assd = np.mean((obj_asd(result, reference, voxelspacing, connectivity),
obj_asd(reference, result, voxelspacing, connectivity)))
return assd | [
"def",
"obj_assd",
"(",
"result",
",",
"reference",
",",
"voxelspacing",
"=",
"None",
",",
"connectivity",
"=",
"1",
")",
":",
"assd",
"=",
"np",
".",
"mean",
"(",
"(",
"obj_asd",
"(",
"result",
",",
"reference",
",",
"voxelspacing",
",",
"connectivity",
")",
",",
"obj_asd",
"(",
"reference",
",",
"result",
",",
"voxelspacing",
",",
"connectivity",
")",
")",
")",
"return",
"assd"
] | [
671,
0
] | [
724,
15
] | python | en | ['en', 'error', 'th'] | False |
obj_asd | (result, reference, voxelspacing=None, connectivity=1) |
Average surface distance between objects.
First correspondences between distinct binary objects in reference and result are
established. Then the average surface distance is only computed between corresponding
objects. Correspondence is defined as unique and at least one voxel overlap.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining what accounts
for a distinct binary object as well as when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
asd : float
The average surface distance between all mutually existing distinct binary
object(s) in ``result`` and ``reference``. The distance unit is the same as for the
spacing of elements along each dimension, which is usually given in mm.
See also
--------
:func:`obj_assd`
:func:`obj_tpr`
:func:`obj_fpr`
Notes
-----
This is not a real metric, as it is directed. See `obj_assd` for a real metric of this.
For the understanding of this metric, both the notions of connectedness and surface
distance are essential. Please see :func:`obj_tpr` and :func:`obj_fpr` for more
information on the first and :func:`asd` on the second.
Examples
--------
>>> arr1 = np.asarray([[1,1,1],[1,1,1],[1,1,1]])
>>> arr2 = np.asarray([[0,1,0],[0,1,0],[0,1,0]])
>>> arr1
array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
>>> arr2
array([[0, 1, 0],
[0, 1, 0],
[0, 1, 0]])
>>> obj_asd(arr1, arr2)
1.5
>>> obj_asd(arr2, arr1)
0.333333333333
With the `voxelspacing` parameter, the distances between the voxels can be set for
each dimension separately:
>>> obj_asd(arr1, arr2, voxelspacing=(1,2))
1.5
>>> obj_asd(arr2, arr1, voxelspacing=(1,2))
0.333333333333
More examples depicting the notion of object connectedness:
>>> arr1 = np.asarray([[1,0,1],[1,0,0],[0,0,0]])
>>> arr2 = np.asarray([[1,0,1],[1,0,0],[0,0,1]])
>>> arr1
array([[1, 0, 1],
[1, 0, 0],
[0, 0, 0]])
>>> arr2
array([[1, 0, 1],
[1, 0, 0],
[0, 0, 1]])
>>> obj_asd(arr1, arr2)
0.0
>>> obj_asd(arr2, arr1)
0.0
>>> arr1 = np.asarray([[1,0,1],[1,0,1],[0,0,1]])
>>> arr2 = np.asarray([[1,0,1],[1,0,0],[0,0,1]])
>>> arr1
array([[1, 0, 1],
[1, 0, 1],
[0, 0, 1]])
>>> arr2
array([[1, 0, 1],
[1, 0, 0],
[0, 0, 1]])
>>> obj_asd(arr1, arr2)
0.6
>>> obj_asd(arr2, arr1)
0.0
Influence of `connectivity` parameter can be seen in the following example, where
with the (default) connectivity of `1` the first array is considered to contain two
objects, while with an increase connectivity of `2`, just one large object is
detected.
>>> arr1 = np.asarray([[1,0,0],[0,1,1],[0,1,1]])
>>> arr2 = np.asarray([[1,0,0],[0,0,0],[0,0,0]])
>>> arr1
array([[1, 0, 0],
[0, 1, 1],
[0, 1, 1]])
>>> arr2
array([[1, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> obj_asd(arr1, arr2)
0.0
>>> obj_asd(arr1, arr2, connectivity=2)
1.742955328
Note that the connectivity also influence the notion of what is considered an object
surface voxels.
|
Average surface distance between objects. | def obj_asd(result, reference, voxelspacing=None, connectivity=1):
"""
Average surface distance between objects.
First correspondences between distinct binary objects in reference and result are
established. Then the average surface distance is only computed between corresponding
objects. Correspondence is defined as unique and at least one voxel overlap.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining what accounts
for a distinct binary object as well as when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
asd : float
The average surface distance between all mutually existing distinct binary
object(s) in ``result`` and ``reference``. The distance unit is the same as for the
spacing of elements along each dimension, which is usually given in mm.
See also
--------
:func:`obj_assd`
:func:`obj_tpr`
:func:`obj_fpr`
Notes
-----
This is not a real metric, as it is directed. See `obj_assd` for a real metric of this.
For the understanding of this metric, both the notions of connectedness and surface
distance are essential. Please see :func:`obj_tpr` and :func:`obj_fpr` for more
information on the first and :func:`asd` on the second.
Examples
--------
>>> arr1 = np.asarray([[1,1,1],[1,1,1],[1,1,1]])
>>> arr2 = np.asarray([[0,1,0],[0,1,0],[0,1,0]])
>>> arr1
array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
>>> arr2
array([[0, 1, 0],
[0, 1, 0],
[0, 1, 0]])
>>> obj_asd(arr1, arr2)
1.5
>>> obj_asd(arr2, arr1)
0.333333333333
With the `voxelspacing` parameter, the distances between the voxels can be set for
each dimension separately:
>>> obj_asd(arr1, arr2, voxelspacing=(1,2))
1.5
>>> obj_asd(arr2, arr1, voxelspacing=(1,2))
0.333333333333
More examples depicting the notion of object connectedness:
>>> arr1 = np.asarray([[1,0,1],[1,0,0],[0,0,0]])
>>> arr2 = np.asarray([[1,0,1],[1,0,0],[0,0,1]])
>>> arr1
array([[1, 0, 1],
[1, 0, 0],
[0, 0, 0]])
>>> arr2
array([[1, 0, 1],
[1, 0, 0],
[0, 0, 1]])
>>> obj_asd(arr1, arr2)
0.0
>>> obj_asd(arr2, arr1)
0.0
>>> arr1 = np.asarray([[1,0,1],[1,0,1],[0,0,1]])
>>> arr2 = np.asarray([[1,0,1],[1,0,0],[0,0,1]])
>>> arr1
array([[1, 0, 1],
[1, 0, 1],
[0, 0, 1]])
>>> arr2
array([[1, 0, 1],
[1, 0, 0],
[0, 0, 1]])
>>> obj_asd(arr1, arr2)
0.6
>>> obj_asd(arr2, arr1)
0.0
Influence of `connectivity` parameter can be seen in the following example, where
with the (default) connectivity of `1` the first array is considered to contain two
objects, while with an increase connectivity of `2`, just one large object is
detected.
>>> arr1 = np.asarray([[1,0,0],[0,1,1],[0,1,1]])
>>> arr2 = np.asarray([[1,0,0],[0,0,0],[0,0,0]])
>>> arr1
array([[1, 0, 0],
[0, 1, 1],
[0, 1, 1]])
>>> arr2
array([[1, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> obj_asd(arr1, arr2)
0.0
>>> obj_asd(arr1, arr2, connectivity=2)
1.742955328
Note that the connectivity also influence the notion of what is considered an object
surface voxels.
"""
sds = list()
labelmap1, labelmap2, _a, _b, mapping = __distinct_binary_object_correspondences(result, reference, connectivity)
slicers1 = find_objects(labelmap1)
slicers2 = find_objects(labelmap2)
for lid2, lid1 in mapping.items():
window = __combine_windows(slicers1[lid1 - 1], slicers2[lid2 - 1])
object1 = labelmap1[window] == lid1
object2 = labelmap2[window] == lid2
sds.extend(__surface_distances(object1, object2, voxelspacing, connectivity))
asd = np.mean(sds)
return asd | [
"def",
"obj_asd",
"(",
"result",
",",
"reference",
",",
"voxelspacing",
"=",
"None",
",",
"connectivity",
"=",
"1",
")",
":",
"sds",
"=",
"list",
"(",
")",
"labelmap1",
",",
"labelmap2",
",",
"_a",
",",
"_b",
",",
"mapping",
"=",
"__distinct_binary_object_correspondences",
"(",
"result",
",",
"reference",
",",
"connectivity",
")",
"slicers1",
"=",
"find_objects",
"(",
"labelmap1",
")",
"slicers2",
"=",
"find_objects",
"(",
"labelmap2",
")",
"for",
"lid2",
",",
"lid1",
"in",
"mapping",
".",
"items",
"(",
")",
":",
"window",
"=",
"__combine_windows",
"(",
"slicers1",
"[",
"lid1",
"-",
"1",
"]",
",",
"slicers2",
"[",
"lid2",
"-",
"1",
"]",
")",
"object1",
"=",
"labelmap1",
"[",
"window",
"]",
"==",
"lid1",
"object2",
"=",
"labelmap2",
"[",
"window",
"]",
"==",
"lid2",
"sds",
".",
"extend",
"(",
"__surface_distances",
"(",
"object1",
",",
"object2",
",",
"voxelspacing",
",",
"connectivity",
")",
")",
"asd",
"=",
"np",
".",
"mean",
"(",
"sds",
")",
"return",
"asd"
] | [
727,
0
] | [
867,
14
] | python | en | ['en', 'error', 'th'] | False |
obj_fpr | (result, reference, connectivity=1) |
The false positive rate of distinct binary object detection.
The false positive rates gives a percentage measure of how many distinct binary
objects in the second array do not exists in the first array. A partial overlap
(of minimum one voxel) is here considered sufficient.
In cases where two distinct binary object in the second array overlap with a single
distinct object in the first array, only one is considered to have been detected
successfully and the other is added to the count of false positives.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
connectivity : int
The neighbourhood/connectivity considered when determining what accounts
for a distinct binary object. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
tpr : float
A percentage measure of how many distinct binary objects in ``results`` have no
corresponding binary object in ``reference``. It has the range :math:`[0, 1]`, where a :math:`0`
denotes an ideal score.
Raises
------
RuntimeError
If the second array is empty.
See also
--------
:func:`obj_tpr`
Notes
-----
This is not a real metric, as it is directed. Whatever array is considered as
reference should be passed second. A perfect score of :math:`0` tells that there are no
distinct binary objects in the second array that do not exists also in the reference
array, but does not reveal anything about objects in the reference array also
existing in the second array (use :func:`obj_tpr` for this).
Examples
--------
>>> arr2 = np.asarray([[1,0,0],[1,0,1],[0,0,1]])
>>> arr1 = np.asarray([[0,0,1],[1,0,1],[0,0,1]])
>>> arr2
array([[1, 0, 0],
[1, 0, 1],
[0, 0, 1]])
>>> arr1
array([[0, 0, 1],
[1, 0, 1],
[0, 0, 1]])
>>> obj_fpr(arr1, arr2)
0.0
>>> obj_fpr(arr2, arr1)
0.0
Example of directedness:
>>> arr2 = np.asarray([1,0,1,0,1])
>>> arr1 = np.asarray([1,0,1,0,0])
>>> obj_fpr(arr1, arr2)
0.0
>>> obj_fpr(arr2, arr1)
0.3333333333333333
Examples of multiple overlap treatment:
>>> arr2 = np.asarray([1,0,1,0,1,1,1])
>>> arr1 = np.asarray([1,1,1,0,1,0,1])
>>> obj_fpr(arr1, arr2)
0.3333333333333333
>>> obj_fpr(arr2, arr1)
0.3333333333333333
>>> arr2 = np.asarray([1,0,1,1,1,0,1])
>>> arr1 = np.asarray([1,1,1,0,1,1,1])
>>> obj_fpr(arr1, arr2)
0.0
>>> obj_fpr(arr2, arr1)
0.3333333333333333
>>> arr2 = np.asarray([[1,0,1,0,0],
[1,0,0,0,0],
[1,0,1,1,1],
[0,0,0,0,0],
[1,0,1,0,0]])
>>> arr1 = np.asarray([[1,1,1,0,0],
[0,0,0,0,0],
[1,1,1,0,1],
[0,0,0,0,0],
[1,1,1,0,0]])
>>> obj_fpr(arr1, arr2)
0.0
>>> obj_fpr(arr2, arr1)
0.2
|
The false positive rate of distinct binary object detection. | def obj_fpr(result, reference, connectivity=1):
"""
The false positive rate of distinct binary object detection.
The false positive rates gives a percentage measure of how many distinct binary
objects in the second array do not exists in the first array. A partial overlap
(of minimum one voxel) is here considered sufficient.
In cases where two distinct binary object in the second array overlap with a single
distinct object in the first array, only one is considered to have been detected
successfully and the other is added to the count of false positives.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
connectivity : int
The neighbourhood/connectivity considered when determining what accounts
for a distinct binary object. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
tpr : float
A percentage measure of how many distinct binary objects in ``results`` have no
corresponding binary object in ``reference``. It has the range :math:`[0, 1]`, where a :math:`0`
denotes an ideal score.
Raises
------
RuntimeError
If the second array is empty.
See also
--------
:func:`obj_tpr`
Notes
-----
This is not a real metric, as it is directed. Whatever array is considered as
reference should be passed second. A perfect score of :math:`0` tells that there are no
distinct binary objects in the second array that do not exists also in the reference
array, but does not reveal anything about objects in the reference array also
existing in the second array (use :func:`obj_tpr` for this).
Examples
--------
>>> arr2 = np.asarray([[1,0,0],[1,0,1],[0,0,1]])
>>> arr1 = np.asarray([[0,0,1],[1,0,1],[0,0,1]])
>>> arr2
array([[1, 0, 0],
[1, 0, 1],
[0, 0, 1]])
>>> arr1
array([[0, 0, 1],
[1, 0, 1],
[0, 0, 1]])
>>> obj_fpr(arr1, arr2)
0.0
>>> obj_fpr(arr2, arr1)
0.0
Example of directedness:
>>> arr2 = np.asarray([1,0,1,0,1])
>>> arr1 = np.asarray([1,0,1,0,0])
>>> obj_fpr(arr1, arr2)
0.0
>>> obj_fpr(arr2, arr1)
0.3333333333333333
Examples of multiple overlap treatment:
>>> arr2 = np.asarray([1,0,1,0,1,1,1])
>>> arr1 = np.asarray([1,1,1,0,1,0,1])
>>> obj_fpr(arr1, arr2)
0.3333333333333333
>>> obj_fpr(arr2, arr1)
0.3333333333333333
>>> arr2 = np.asarray([1,0,1,1,1,0,1])
>>> arr1 = np.asarray([1,1,1,0,1,1,1])
>>> obj_fpr(arr1, arr2)
0.0
>>> obj_fpr(arr2, arr1)
0.3333333333333333
>>> arr2 = np.asarray([[1,0,1,0,0],
[1,0,0,0,0],
[1,0,1,1,1],
[0,0,0,0,0],
[1,0,1,0,0]])
>>> arr1 = np.asarray([[1,1,1,0,0],
[0,0,0,0,0],
[1,1,1,0,1],
[0,0,0,0,0],
[1,1,1,0,0]])
>>> obj_fpr(arr1, arr2)
0.0
>>> obj_fpr(arr2, arr1)
0.2
"""
_, _, _, n_obj_reference, mapping = __distinct_binary_object_correspondences(reference, result, connectivity)
return (n_obj_reference - len(mapping)) / float(n_obj_reference) | [
"def",
"obj_fpr",
"(",
"result",
",",
"reference",
",",
"connectivity",
"=",
"1",
")",
":",
"_",
",",
"_",
",",
"_",
",",
"n_obj_reference",
",",
"mapping",
"=",
"__distinct_binary_object_correspondences",
"(",
"reference",
",",
"result",
",",
"connectivity",
")",
"return",
"(",
"n_obj_reference",
"-",
"len",
"(",
"mapping",
")",
")",
"/",
"float",
"(",
"n_obj_reference",
")"
] | [
870,
0
] | [
979,
68
] | python | en | ['en', 'error', 'th'] | False |
obj_tpr | (result, reference, connectivity=1) |
The true positive rate of distinct binary object detection.
The true positive rates gives a percentage measure of how many distinct binary
objects in the first array also exists in the second array. A partial overlap
(of minimum one voxel) is here considered sufficient.
In cases where two distinct binary object in the first array overlaps with a single
distinct object in the second array, only one is considered to have been detected
successfully.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
connectivity : int
The neighbourhood/connectivity considered when determining what accounts
for a distinct binary object. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
tpr : float
A percentage measure of how many distinct binary objects in ``result`` also exists
in ``reference``. It has the range :math:`[0, 1]`, where a :math:`1` denotes an ideal score.
Raises
------
RuntimeError
If the reference object is empty.
See also
--------
:func:`obj_fpr`
Notes
-----
This is not a real metric, as it is directed. Whatever array is considered as
reference should be passed second. A perfect score of :math:`1` tells that all distinct
binary objects in the reference array also exist in the result array, but does not
reveal anything about additional binary objects in the result array
(use :func:`obj_fpr` for this).
Examples
--------
>>> arr2 = np.asarray([[1,0,0],[1,0,1],[0,0,1]])
>>> arr1 = np.asarray([[0,0,1],[1,0,1],[0,0,1]])
>>> arr2
array([[1, 0, 0],
[1, 0, 1],
[0, 0, 1]])
>>> arr1
array([[0, 0, 1],
[1, 0, 1],
[0, 0, 1]])
>>> obj_tpr(arr1, arr2)
1.0
>>> obj_tpr(arr2, arr1)
1.0
Example of directedness:
>>> arr2 = np.asarray([1,0,1,0,1])
>>> arr1 = np.asarray([1,0,1,0,0])
>>> obj_tpr(arr1, arr2)
0.6666666666666666
>>> obj_tpr(arr2, arr1)
1.0
Examples of multiple overlap treatment:
>>> arr2 = np.asarray([1,0,1,0,1,1,1])
>>> arr1 = np.asarray([1,1,1,0,1,0,1])
>>> obj_tpr(arr1, arr2)
0.6666666666666666
>>> obj_tpr(arr2, arr1)
0.6666666666666666
>>> arr2 = np.asarray([1,0,1,1,1,0,1])
>>> arr1 = np.asarray([1,1,1,0,1,1,1])
>>> obj_tpr(arr1, arr2)
0.6666666666666666
>>> obj_tpr(arr2, arr1)
1.0
>>> arr2 = np.asarray([[1,0,1,0,0],
[1,0,0,0,0],
[1,0,1,1,1],
[0,0,0,0,0],
[1,0,1,0,0]])
>>> arr1 = np.asarray([[1,1,1,0,0],
[0,0,0,0,0],
[1,1,1,0,1],
[0,0,0,0,0],
[1,1,1,0,0]])
>>> obj_tpr(arr1, arr2)
0.8
>>> obj_tpr(arr2, arr1)
1.0
|
The true positive rate of distinct binary object detection. | def obj_tpr(result, reference, connectivity=1):
"""
The true positive rate of distinct binary object detection.
The true positive rates gives a percentage measure of how many distinct binary
objects in the first array also exists in the second array. A partial overlap
(of minimum one voxel) is here considered sufficient.
In cases where two distinct binary object in the first array overlaps with a single
distinct object in the second array, only one is considered to have been detected
successfully.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
connectivity : int
The neighbourhood/connectivity considered when determining what accounts
for a distinct binary object. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
tpr : float
A percentage measure of how many distinct binary objects in ``result`` also exists
in ``reference``. It has the range :math:`[0, 1]`, where a :math:`1` denotes an ideal score.
Raises
------
RuntimeError
If the reference object is empty.
See also
--------
:func:`obj_fpr`
Notes
-----
This is not a real metric, as it is directed. Whatever array is considered as
reference should be passed second. A perfect score of :math:`1` tells that all distinct
binary objects in the reference array also exist in the result array, but does not
reveal anything about additional binary objects in the result array
(use :func:`obj_fpr` for this).
Examples
--------
>>> arr2 = np.asarray([[1,0,0],[1,0,1],[0,0,1]])
>>> arr1 = np.asarray([[0,0,1],[1,0,1],[0,0,1]])
>>> arr2
array([[1, 0, 0],
[1, 0, 1],
[0, 0, 1]])
>>> arr1
array([[0, 0, 1],
[1, 0, 1],
[0, 0, 1]])
>>> obj_tpr(arr1, arr2)
1.0
>>> obj_tpr(arr2, arr1)
1.0
Example of directedness:
>>> arr2 = np.asarray([1,0,1,0,1])
>>> arr1 = np.asarray([1,0,1,0,0])
>>> obj_tpr(arr1, arr2)
0.6666666666666666
>>> obj_tpr(arr2, arr1)
1.0
Examples of multiple overlap treatment:
>>> arr2 = np.asarray([1,0,1,0,1,1,1])
>>> arr1 = np.asarray([1,1,1,0,1,0,1])
>>> obj_tpr(arr1, arr2)
0.6666666666666666
>>> obj_tpr(arr2, arr1)
0.6666666666666666
>>> arr2 = np.asarray([1,0,1,1,1,0,1])
>>> arr1 = np.asarray([1,1,1,0,1,1,1])
>>> obj_tpr(arr1, arr2)
0.6666666666666666
>>> obj_tpr(arr2, arr1)
1.0
>>> arr2 = np.asarray([[1,0,1,0,0],
[1,0,0,0,0],
[1,0,1,1,1],
[0,0,0,0,0],
[1,0,1,0,0]])
>>> arr1 = np.asarray([[1,1,1,0,0],
[0,0,0,0,0],
[1,1,1,0,1],
[0,0,0,0,0],
[1,1,1,0,0]])
>>> obj_tpr(arr1, arr2)
0.8
>>> obj_tpr(arr2, arr1)
1.0
"""
_, _, n_obj_result, _, mapping = __distinct_binary_object_correspondences(reference, result, connectivity)
return len(mapping) / float(n_obj_result) | [
"def",
"obj_tpr",
"(",
"result",
",",
"reference",
",",
"connectivity",
"=",
"1",
")",
":",
"_",
",",
"_",
",",
"n_obj_result",
",",
"_",
",",
"mapping",
"=",
"__distinct_binary_object_correspondences",
"(",
"reference",
",",
"result",
",",
"connectivity",
")",
"return",
"len",
"(",
"mapping",
")",
"/",
"float",
"(",
"n_obj_result",
")"
] | [
982,
0
] | [
1090,
45
] | python | en | ['en', 'error', 'th'] | False |
__distinct_binary_object_correspondences | (reference, result, connectivity=1) |
Determines all distinct (where connectivity is defined by the connectivity parameter
passed to scipy's `generate_binary_structure`) binary objects in both of the input
parameters and returns a 1to1 mapping from the labelled objects in reference to the
corresponding (whereas a one-voxel overlap suffices for correspondence) objects in
result.
All stems from the problem, that the relationship is non-surjective many-to-many.
@return (labelmap1, labelmap2, n_lables1, n_labels2, labelmapping2to1)
|
Determines all distinct (where connectivity is defined by the connectivity parameter
passed to scipy's `generate_binary_structure`) binary objects in both of the input
parameters and returns a 1to1 mapping from the labelled objects in reference to the
corresponding (whereas a one-voxel overlap suffices for correspondence) objects in
result. | def __distinct_binary_object_correspondences(reference, result, connectivity=1):
"""
Determines all distinct (where connectivity is defined by the connectivity parameter
passed to scipy's `generate_binary_structure`) binary objects in both of the input
parameters and returns a 1to1 mapping from the labelled objects in reference to the
corresponding (whereas a one-voxel overlap suffices for correspondence) objects in
result.
All stems from the problem, that the relationship is non-surjective many-to-many.
@return (labelmap1, labelmap2, n_lables1, n_labels2, labelmapping2to1)
"""
result = np.atleast_1d(result.astype(np.bool))
reference = np.atleast_1d(reference.astype(np.bool))
# binary structure
footprint = generate_binary_structure(result.ndim, connectivity)
# label distinct binary objects
labelmap1, n_obj_result = label(result, footprint)
labelmap2, n_obj_reference = label(reference, footprint)
# find all overlaps from labelmap2 to labelmap1; collect one-to-one relationships and store all one-two-many for later processing
slicers = find_objects(labelmap2) # get windows of labelled objects
mapping = dict() # mappings from labels in labelmap2 to corresponding object labels in labelmap1
used_labels = set() # set to collect all already used labels from labelmap2
one_to_many = list() # list to collect all one-to-many mappings
for l1id, slicer in enumerate(slicers): # iterate over object in labelmap2 and their windows
l1id += 1 # labelled objects have ids sarting from 1
bobj = (l1id) == labelmap2[slicer] # find binary object corresponding to the label1 id in the segmentation
l2ids = np.unique(labelmap1[slicer][
bobj]) # extract all unique object identifiers at the corresponding positions in the reference (i.e. the mapping)
l2ids = l2ids[0 != l2ids] # remove background identifiers (=0)
if 1 == len(
l2ids): # one-to-one mapping: if target label not already used, add to final list of object-to-object mappings and mark target label as used
l2id = l2ids[0]
if not l2id in used_labels:
mapping[l1id] = l2id
used_labels.add(l2id)
elif 1 < len(l2ids): # one-to-many mapping: store relationship for later processing
one_to_many.append((l1id, set(l2ids)))
# process one-to-many mappings, always choosing the one with the least labelmap2 correspondences first
while True:
one_to_many = [(l1id, l2ids - used_labels) for l1id, l2ids in
one_to_many] # remove already used ids from all sets
one_to_many = [x for x in one_to_many if x[1]] # remove empty sets
one_to_many = sorted(one_to_many, key=lambda x: len(x[1])) # sort by set length
if 0 == len(one_to_many):
break
l2id = one_to_many[0][1].pop() # select an arbitrary target label id from the shortest set
mapping[one_to_many[0][0]] = l2id # add to one-to-one mappings
used_labels.add(l2id) # mark target label as used
one_to_many = one_to_many[1:] # delete the processed set from all sets
return labelmap1, labelmap2, n_obj_result, n_obj_reference, mapping | [
"def",
"__distinct_binary_object_correspondences",
"(",
"reference",
",",
"result",
",",
"connectivity",
"=",
"1",
")",
":",
"result",
"=",
"np",
".",
"atleast_1d",
"(",
"result",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"reference",
"=",
"np",
".",
"atleast_1d",
"(",
"reference",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"# binary structure",
"footprint",
"=",
"generate_binary_structure",
"(",
"result",
".",
"ndim",
",",
"connectivity",
")",
"# label distinct binary objects",
"labelmap1",
",",
"n_obj_result",
"=",
"label",
"(",
"result",
",",
"footprint",
")",
"labelmap2",
",",
"n_obj_reference",
"=",
"label",
"(",
"reference",
",",
"footprint",
")",
"# find all overlaps from labelmap2 to labelmap1; collect one-to-one relationships and store all one-two-many for later processing",
"slicers",
"=",
"find_objects",
"(",
"labelmap2",
")",
"# get windows of labelled objects",
"mapping",
"=",
"dict",
"(",
")",
"# mappings from labels in labelmap2 to corresponding object labels in labelmap1",
"used_labels",
"=",
"set",
"(",
")",
"# set to collect all already used labels from labelmap2",
"one_to_many",
"=",
"list",
"(",
")",
"# list to collect all one-to-many mappings",
"for",
"l1id",
",",
"slicer",
"in",
"enumerate",
"(",
"slicers",
")",
":",
"# iterate over object in labelmap2 and their windows",
"l1id",
"+=",
"1",
"# labelled objects have ids sarting from 1",
"bobj",
"=",
"(",
"l1id",
")",
"==",
"labelmap2",
"[",
"slicer",
"]",
"# find binary object corresponding to the label1 id in the segmentation",
"l2ids",
"=",
"np",
".",
"unique",
"(",
"labelmap1",
"[",
"slicer",
"]",
"[",
"bobj",
"]",
")",
"# extract all unique object identifiers at the corresponding positions in the reference (i.e. the mapping)",
"l2ids",
"=",
"l2ids",
"[",
"0",
"!=",
"l2ids",
"]",
"# remove background identifiers (=0)",
"if",
"1",
"==",
"len",
"(",
"l2ids",
")",
":",
"# one-to-one mapping: if target label not already used, add to final list of object-to-object mappings and mark target label as used",
"l2id",
"=",
"l2ids",
"[",
"0",
"]",
"if",
"not",
"l2id",
"in",
"used_labels",
":",
"mapping",
"[",
"l1id",
"]",
"=",
"l2id",
"used_labels",
".",
"add",
"(",
"l2id",
")",
"elif",
"1",
"<",
"len",
"(",
"l2ids",
")",
":",
"# one-to-many mapping: store relationship for later processing",
"one_to_many",
".",
"append",
"(",
"(",
"l1id",
",",
"set",
"(",
"l2ids",
")",
")",
")",
"# process one-to-many mappings, always choosing the one with the least labelmap2 correspondences first",
"while",
"True",
":",
"one_to_many",
"=",
"[",
"(",
"l1id",
",",
"l2ids",
"-",
"used_labels",
")",
"for",
"l1id",
",",
"l2ids",
"in",
"one_to_many",
"]",
"# remove already used ids from all sets",
"one_to_many",
"=",
"[",
"x",
"for",
"x",
"in",
"one_to_many",
"if",
"x",
"[",
"1",
"]",
"]",
"# remove empty sets",
"one_to_many",
"=",
"sorted",
"(",
"one_to_many",
",",
"key",
"=",
"lambda",
"x",
":",
"len",
"(",
"x",
"[",
"1",
"]",
")",
")",
"# sort by set length",
"if",
"0",
"==",
"len",
"(",
"one_to_many",
")",
":",
"break",
"l2id",
"=",
"one_to_many",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"pop",
"(",
")",
"# select an arbitrary target label id from the shortest set",
"mapping",
"[",
"one_to_many",
"[",
"0",
"]",
"[",
"0",
"]",
"]",
"=",
"l2id",
"# add to one-to-one mappings",
"used_labels",
".",
"add",
"(",
"l2id",
")",
"# mark target label as used",
"one_to_many",
"=",
"one_to_many",
"[",
"1",
":",
"]",
"# delete the processed set from all sets",
"return",
"labelmap1",
",",
"labelmap2",
",",
"n_obj_result",
",",
"n_obj_reference",
",",
"mapping"
] | [
1093,
0
] | [
1148,
71
] | python | en | ['en', 'error', 'th'] | False |
__surface_distances | (result, reference, voxelspacing=None, connectivity=1) |
The distances between the surface voxel of binary objects in result and their
nearest partner surface voxel of a binary object in reference.
|
The distances between the surface voxel of binary objects in result and their
nearest partner surface voxel of a binary object in reference.
| def __surface_distances(result, reference, voxelspacing=None, connectivity=1):
"""
The distances between the surface voxel of binary objects in result and their
nearest partner surface voxel of a binary object in reference.
"""
result = np.atleast_1d(result.astype(np.bool))
reference = np.atleast_1d(reference.astype(np.bool))
if voxelspacing is not None:
voxelspacing = _ni_support._normalize_sequence(voxelspacing, result.ndim)
voxelspacing = np.asarray(voxelspacing, dtype=np.float64)
if not voxelspacing.flags.contiguous:
voxelspacing = voxelspacing.copy()
# binary structure
footprint = generate_binary_structure(result.ndim, connectivity)
# test for emptiness
if 0 == np.count_nonzero(result):
raise RuntimeError('The first supplied array does not contain any binary object.')
if 0 == np.count_nonzero(reference):
raise RuntimeError('The second supplied array does not contain any binary object.')
# extract only 1-pixel border line of objects
result_border = result ^ binary_erosion(result, structure=footprint, iterations=1)
reference_border = reference ^ binary_erosion(reference, structure=footprint, iterations=1)
# compute average surface distance
# Note: scipys distance transform is calculated only inside the borders of the
# foreground objects, therefore the input has to be reversed
dt = distance_transform_edt(~reference_border, sampling=voxelspacing)
sds = dt[result_border]
return sds | [
"def",
"__surface_distances",
"(",
"result",
",",
"reference",
",",
"voxelspacing",
"=",
"None",
",",
"connectivity",
"=",
"1",
")",
":",
"result",
"=",
"np",
".",
"atleast_1d",
"(",
"result",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"reference",
"=",
"np",
".",
"atleast_1d",
"(",
"reference",
".",
"astype",
"(",
"np",
".",
"bool",
")",
")",
"if",
"voxelspacing",
"is",
"not",
"None",
":",
"voxelspacing",
"=",
"_ni_support",
".",
"_normalize_sequence",
"(",
"voxelspacing",
",",
"result",
".",
"ndim",
")",
"voxelspacing",
"=",
"np",
".",
"asarray",
"(",
"voxelspacing",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"if",
"not",
"voxelspacing",
".",
"flags",
".",
"contiguous",
":",
"voxelspacing",
"=",
"voxelspacing",
".",
"copy",
"(",
")",
"# binary structure",
"footprint",
"=",
"generate_binary_structure",
"(",
"result",
".",
"ndim",
",",
"connectivity",
")",
"# test for emptiness",
"if",
"0",
"==",
"np",
".",
"count_nonzero",
"(",
"result",
")",
":",
"raise",
"RuntimeError",
"(",
"'The first supplied array does not contain any binary object.'",
")",
"if",
"0",
"==",
"np",
".",
"count_nonzero",
"(",
"reference",
")",
":",
"raise",
"RuntimeError",
"(",
"'The second supplied array does not contain any binary object.'",
")",
"# extract only 1-pixel border line of objects",
"result_border",
"=",
"result",
"^",
"binary_erosion",
"(",
"result",
",",
"structure",
"=",
"footprint",
",",
"iterations",
"=",
"1",
")",
"reference_border",
"=",
"reference",
"^",
"binary_erosion",
"(",
"reference",
",",
"structure",
"=",
"footprint",
",",
"iterations",
"=",
"1",
")",
"# compute average surface distance",
"# Note: scipys distance transform is calculated only inside the borders of the",
"# foreground objects, therefore the input has to be reversed",
"dt",
"=",
"distance_transform_edt",
"(",
"~",
"reference_border",
",",
"sampling",
"=",
"voxelspacing",
")",
"sds",
"=",
"dt",
"[",
"result_border",
"]",
"return",
"sds"
] | [
1151,
0
] | [
1183,
14
] | python | en | ['en', 'error', 'th'] | False |
__combine_windows | (w1, w2) |
Joins two windows (defined by tuple of slices) such that their maximum
combined extend is covered by the new returned window.
|
Joins two windows (defined by tuple of slices) such that their maximum
combined extend is covered by the new returned window.
| def __combine_windows(w1, w2):
"""
Joins two windows (defined by tuple of slices) such that their maximum
combined extend is covered by the new returned window.
"""
res = []
for s1, s2 in zip(w1, w2):
res.append(slice(min(s1.start, s2.start), max(s1.stop, s2.stop)))
return tuple(res) | [
"def",
"__combine_windows",
"(",
"w1",
",",
"w2",
")",
":",
"res",
"=",
"[",
"]",
"for",
"s1",
",",
"s2",
"in",
"zip",
"(",
"w1",
",",
"w2",
")",
":",
"res",
".",
"append",
"(",
"slice",
"(",
"min",
"(",
"s1",
".",
"start",
",",
"s2",
".",
"start",
")",
",",
"max",
"(",
"s1",
".",
"stop",
",",
"s2",
".",
"stop",
")",
")",
")",
"return",
"tuple",
"(",
"res",
")"
] | [
1186,
0
] | [
1194,
21
] | python | en | ['en', 'error', 'th'] | False |
LineString.__init__ | (self, *args, **kwargs) |
Initialize on the given sequence -- may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object.
Examples:
ls = LineString((1, 1), (2, 2))
ls = LineString([(1, 1), (2, 2)])
ls = LineString(array([(1, 1), (2, 2)]))
ls = LineString(Point(1, 1), Point(2, 2))
|
Initialize on the given sequence -- may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object. | def __init__(self, *args, **kwargs):
"""
Initialize on the given sequence -- may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object.
Examples:
ls = LineString((1, 1), (2, 2))
ls = LineString([(1, 1), (2, 2)])
ls = LineString(array([(1, 1), (2, 2)]))
ls = LineString(Point(1, 1), Point(2, 2))
"""
# If only one argument provided, set the coords array appropriately
if len(args) == 1:
coords = args[0]
else:
coords = args
if not (isinstance(coords, (tuple, list)) or numpy and isinstance(coords, numpy.ndarray)):
raise TypeError('Invalid initialization input for LineStrings.')
# If SRID was passed in with the keyword arguments
srid = kwargs.get('srid')
ncoords = len(coords)
if not ncoords:
super().__init__(self._init_func(None), srid=srid)
return
if ncoords < self._minlength:
raise ValueError(
'%s requires at least %d points, got %s.' % (
self.__class__.__name__,
self._minlength,
ncoords,
)
)
numpy_coords = not isinstance(coords, (tuple, list))
if numpy_coords:
shape = coords.shape # Using numpy's shape.
if len(shape) != 2:
raise TypeError('Too many dimensions.')
self._checkdim(shape[1])
ndim = shape[1]
else:
# Getting the number of coords and the number of dimensions -- which
# must stay the same, e.g., no LineString((1, 2), (1, 2, 3)).
ndim = None
# Incrementing through each of the coordinates and verifying
for coord in coords:
if not isinstance(coord, (tuple, list, Point)):
raise TypeError('Each coordinate should be a sequence (list or tuple)')
if ndim is None:
ndim = len(coord)
self._checkdim(ndim)
elif len(coord) != ndim:
raise TypeError('Dimension mismatch.')
# Creating a coordinate sequence object because it is easier to
# set the points using its methods.
cs = GEOSCoordSeq(capi.create_cs(ncoords, ndim), z=bool(ndim == 3))
point_setter = cs._set_point_3d if ndim == 3 else cs._set_point_2d
for i in range(ncoords):
if numpy_coords:
point_coords = coords[i, :]
elif isinstance(coords[i], Point):
point_coords = coords[i].tuple
else:
point_coords = coords[i]
point_setter(i, point_coords)
# Calling the base geometry initialization with the returned pointer
# from the function.
super().__init__(self._init_func(cs.ptr), srid=srid) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# If only one argument provided, set the coords array appropriately",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"coords",
"=",
"args",
"[",
"0",
"]",
"else",
":",
"coords",
"=",
"args",
"if",
"not",
"(",
"isinstance",
"(",
"coords",
",",
"(",
"tuple",
",",
"list",
")",
")",
"or",
"numpy",
"and",
"isinstance",
"(",
"coords",
",",
"numpy",
".",
"ndarray",
")",
")",
":",
"raise",
"TypeError",
"(",
"'Invalid initialization input for LineStrings.'",
")",
"# If SRID was passed in with the keyword arguments",
"srid",
"=",
"kwargs",
".",
"get",
"(",
"'srid'",
")",
"ncoords",
"=",
"len",
"(",
"coords",
")",
"if",
"not",
"ncoords",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"self",
".",
"_init_func",
"(",
"None",
")",
",",
"srid",
"=",
"srid",
")",
"return",
"if",
"ncoords",
"<",
"self",
".",
"_minlength",
":",
"raise",
"ValueError",
"(",
"'%s requires at least %d points, got %s.'",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"_minlength",
",",
"ncoords",
",",
")",
")",
"numpy_coords",
"=",
"not",
"isinstance",
"(",
"coords",
",",
"(",
"tuple",
",",
"list",
")",
")",
"if",
"numpy_coords",
":",
"shape",
"=",
"coords",
".",
"shape",
"# Using numpy's shape.",
"if",
"len",
"(",
"shape",
")",
"!=",
"2",
":",
"raise",
"TypeError",
"(",
"'Too many dimensions.'",
")",
"self",
".",
"_checkdim",
"(",
"shape",
"[",
"1",
"]",
")",
"ndim",
"=",
"shape",
"[",
"1",
"]",
"else",
":",
"# Getting the number of coords and the number of dimensions -- which",
"# must stay the same, e.g., no LineString((1, 2), (1, 2, 3)).",
"ndim",
"=",
"None",
"# Incrementing through each of the coordinates and verifying",
"for",
"coord",
"in",
"coords",
":",
"if",
"not",
"isinstance",
"(",
"coord",
",",
"(",
"tuple",
",",
"list",
",",
"Point",
")",
")",
":",
"raise",
"TypeError",
"(",
"'Each coordinate should be a sequence (list or tuple)'",
")",
"if",
"ndim",
"is",
"None",
":",
"ndim",
"=",
"len",
"(",
"coord",
")",
"self",
".",
"_checkdim",
"(",
"ndim",
")",
"elif",
"len",
"(",
"coord",
")",
"!=",
"ndim",
":",
"raise",
"TypeError",
"(",
"'Dimension mismatch.'",
")",
"# Creating a coordinate sequence object because it is easier to",
"# set the points using its methods.",
"cs",
"=",
"GEOSCoordSeq",
"(",
"capi",
".",
"create_cs",
"(",
"ncoords",
",",
"ndim",
")",
",",
"z",
"=",
"bool",
"(",
"ndim",
"==",
"3",
")",
")",
"point_setter",
"=",
"cs",
".",
"_set_point_3d",
"if",
"ndim",
"==",
"3",
"else",
"cs",
".",
"_set_point_2d",
"for",
"i",
"in",
"range",
"(",
"ncoords",
")",
":",
"if",
"numpy_coords",
":",
"point_coords",
"=",
"coords",
"[",
"i",
",",
":",
"]",
"elif",
"isinstance",
"(",
"coords",
"[",
"i",
"]",
",",
"Point",
")",
":",
"point_coords",
"=",
"coords",
"[",
"i",
"]",
".",
"tuple",
"else",
":",
"point_coords",
"=",
"coords",
"[",
"i",
"]",
"point_setter",
"(",
"i",
",",
"point_coords",
")",
"# Calling the base geometry initialization with the returned pointer",
"# from the function.",
"super",
"(",
")",
".",
"__init__",
"(",
"self",
".",
"_init_func",
"(",
"cs",
".",
"ptr",
")",
",",
"srid",
"=",
"srid",
")"
] | [
13,
4
] | [
89,
60
] | python | en | ['en', 'error', 'th'] | False |
LineString.__iter__ | (self) | Allow iteration over this LineString. | Allow iteration over this LineString. | def __iter__(self):
"Allow iteration over this LineString."
for i in range(len(self)):
yield self[i] | [
"def",
"__iter__",
"(",
"self",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
")",
")",
":",
"yield",
"self",
"[",
"i",
"]"
] | [
91,
4
] | [
94,
25
] | python | en | ['en', 'en', 'en'] | True |
LineString.__len__ | (self) | Return the number of points in this LineString. | Return the number of points in this LineString. | def __len__(self):
"Return the number of points in this LineString."
return len(self._cs) | [
"def",
"__len__",
"(",
"self",
")",
":",
"return",
"len",
"(",
"self",
".",
"_cs",
")"
] | [
96,
4
] | [
98,
28
] | python | en | ['en', 'en', 'en'] | True |
LineString.tuple | (self) | Return a tuple version of the geometry from the coordinate sequence. | Return a tuple version of the geometry from the coordinate sequence. | def tuple(self):
"Return a tuple version of the geometry from the coordinate sequence."
return self._cs.tuple | [
"def",
"tuple",
"(",
"self",
")",
":",
"return",
"self",
".",
"_cs",
".",
"tuple"
] | [
135,
4
] | [
137,
29
] | python | en | ['en', 'en', 'en'] | True |
LineString._listarr | (self, func) |
Return a sequence (list) corresponding with the given function.
Return a numpy array if possible.
|
Return a sequence (list) corresponding with the given function.
Return a numpy array if possible.
| def _listarr(self, func):
"""
Return a sequence (list) corresponding with the given function.
Return a numpy array if possible.
"""
lst = [func(i) for i in range(len(self))]
if numpy:
return numpy.array(lst) # ARRRR!
else:
return lst | [
"def",
"_listarr",
"(",
"self",
",",
"func",
")",
":",
"lst",
"=",
"[",
"func",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
")",
")",
"]",
"if",
"numpy",
":",
"return",
"numpy",
".",
"array",
"(",
"lst",
")",
"# ARRRR!",
"else",
":",
"return",
"lst"
] | [
140,
4
] | [
149,
22
] | python | en | ['en', 'error', 'th'] | False |
LineString.array | (self) | Return a numpy array for the LineString. | Return a numpy array for the LineString. | def array(self):
"Return a numpy array for the LineString."
return self._listarr(self._cs.__getitem__) | [
"def",
"array",
"(",
"self",
")",
":",
"return",
"self",
".",
"_listarr",
"(",
"self",
".",
"_cs",
".",
"__getitem__",
")"
] | [
152,
4
] | [
154,
50
] | python | en | ['en', 'en', 'en'] | True |
LineString.x | (self) | Return a list or numpy array of the X variable. | Return a list or numpy array of the X variable. | def x(self):
"Return a list or numpy array of the X variable."
return self._listarr(self._cs.getX) | [
"def",
"x",
"(",
"self",
")",
":",
"return",
"self",
".",
"_listarr",
"(",
"self",
".",
"_cs",
".",
"getX",
")"
] | [
157,
4
] | [
159,
43
] | python | en | ['en', 'ga', 'en'] | True |
LineString.y | (self) | Return a list or numpy array of the Y variable. | Return a list or numpy array of the Y variable. | def y(self):
"Return a list or numpy array of the Y variable."
return self._listarr(self._cs.getY) | [
"def",
"y",
"(",
"self",
")",
":",
"return",
"self",
".",
"_listarr",
"(",
"self",
".",
"_cs",
".",
"getY",
")"
] | [
162,
4
] | [
164,
43
] | python | en | ['en', 'en', 'en'] | True |
LineString.z | (self) | Return a list or numpy array of the Z variable. | Return a list or numpy array of the Z variable. | def z(self):
"Return a list or numpy array of the Z variable."
if not self.hasz:
return None
else:
return self._listarr(self._cs.getZ) | [
"def",
"z",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"hasz",
":",
"return",
"None",
"else",
":",
"return",
"self",
".",
"_listarr",
"(",
"self",
".",
"_cs",
".",
"getZ",
")"
] | [
167,
4
] | [
172,
47
] | python | en | ['en', 'ga', 'en'] | True |
CacheAdapter.get | (self, public_id, type, resource_type, transformation, format) |
Gets value specified by parameters
:param public_id: The public ID of the resource
:param type: The storage type
:param resource_type: The type of the resource
:param transformation: The transformation string
:param format: The format of the resource
:return: None|mixed value, None if not found
|
Gets value specified by parameters | def get(self, public_id, type, resource_type, transformation, format):
"""
Gets value specified by parameters
:param public_id: The public ID of the resource
:param type: The storage type
:param resource_type: The type of the resource
:param transformation: The transformation string
:param format: The format of the resource
:return: None|mixed value, None if not found
"""
raise NotImplementedError | [
"def",
"get",
"(",
"self",
",",
"public_id",
",",
"type",
",",
"resource_type",
",",
"transformation",
",",
"format",
")",
":",
"raise",
"NotImplementedError"
] | [
10,
4
] | [
22,
33
] | python | en | ['en', 'error', 'th'] | False |
CacheAdapter.set | (self, public_id, type, resource_type, transformation, format, value) |
Sets value specified by parameters
:param public_id: The public ID of the resource
:param type: The storage type
:param resource_type: The type of the resource
:param transformation: The transformation string
:param format: The format of the resource
:param value: The value to set
:return: bool True on success or False on failure
|
Sets value specified by parameters | def set(self, public_id, type, resource_type, transformation, format, value):
"""
Sets value specified by parameters
:param public_id: The public ID of the resource
:param type: The storage type
:param resource_type: The type of the resource
:param transformation: The transformation string
:param format: The format of the resource
:param value: The value to set
:return: bool True on success or False on failure
"""
raise NotImplementedError | [
"def",
"set",
"(",
"self",
",",
"public_id",
",",
"type",
",",
"resource_type",
",",
"transformation",
",",
"format",
",",
"value",
")",
":",
"raise",
"NotImplementedError"
] | [
25,
4
] | [
38,
33
] | python | en | ['en', 'error', 'th'] | False |
CacheAdapter.delete | (self, public_id, type, resource_type, transformation, format) |
Deletes entry specified by parameters
:param public_id: The public ID of the resource
:param type: The storage type
:param resource_type: The type of the resource
:param transformation: The transformation string
:param format: The format of the resource
:return: bool True on success or False on failure
|
Deletes entry specified by parameters | def delete(self, public_id, type, resource_type, transformation, format):
"""
Deletes entry specified by parameters
:param public_id: The public ID of the resource
:param type: The storage type
:param resource_type: The type of the resource
:param transformation: The transformation string
:param format: The format of the resource
:return: bool True on success or False on failure
"""
raise NotImplementedError | [
"def",
"delete",
"(",
"self",
",",
"public_id",
",",
"type",
",",
"resource_type",
",",
"transformation",
",",
"format",
")",
":",
"raise",
"NotImplementedError"
] | [
41,
4
] | [
53,
33
] | python | en | ['en', 'error', 'th'] | False |
CacheAdapter.flush_all | (self) |
Flushes all entries from cache
:return: bool True on success or False on failure
|
Flushes all entries from cache | def flush_all(self):
"""
Flushes all entries from cache
:return: bool True on success or False on failure
"""
raise NotImplementedError | [
"def",
"flush_all",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] | [
56,
4
] | [
62,
33
] | python | en | ['en', 'error', 'th'] | False |
formset_factory | (form, formset=BaseFormSet, extra=1, can_order=False,
can_delete=False, max_num=None, validate_max=False,
min_num=None, validate_min=False, absolute_max=None,
can_delete_extra=True) | Return a FormSet for the given form class. | Return a FormSet for the given form class. | def formset_factory(form, formset=BaseFormSet, extra=1, can_order=False,
can_delete=False, max_num=None, validate_max=False,
min_num=None, validate_min=False, absolute_max=None,
can_delete_extra=True):
"""Return a FormSet for the given form class."""
if min_num is None:
min_num = DEFAULT_MIN_NUM
if max_num is None:
max_num = DEFAULT_MAX_NUM
# absolute_max is a hard limit on forms instantiated, to prevent
# memory-exhaustion attacks. Default to max_num + DEFAULT_MAX_NUM
# (which is 2 * DEFAULT_MAX_NUM if max_num is None in the first place).
if absolute_max is None:
absolute_max = max_num + DEFAULT_MAX_NUM
if max_num > absolute_max:
raise ValueError(
"'absolute_max' must be greater or equal to 'max_num'."
)
attrs = {
'form': form,
'extra': extra,
'can_order': can_order,
'can_delete': can_delete,
'can_delete_extra': can_delete_extra,
'min_num': min_num,
'max_num': max_num,
'absolute_max': absolute_max,
'validate_min': validate_min,
'validate_max': validate_max,
}
return type(form.__name__ + 'FormSet', (formset,), attrs) | [
"def",
"formset_factory",
"(",
"form",
",",
"formset",
"=",
"BaseFormSet",
",",
"extra",
"=",
"1",
",",
"can_order",
"=",
"False",
",",
"can_delete",
"=",
"False",
",",
"max_num",
"=",
"None",
",",
"validate_max",
"=",
"False",
",",
"min_num",
"=",
"None",
",",
"validate_min",
"=",
"False",
",",
"absolute_max",
"=",
"None",
",",
"can_delete_extra",
"=",
"True",
")",
":",
"if",
"min_num",
"is",
"None",
":",
"min_num",
"=",
"DEFAULT_MIN_NUM",
"if",
"max_num",
"is",
"None",
":",
"max_num",
"=",
"DEFAULT_MAX_NUM",
"# absolute_max is a hard limit on forms instantiated, to prevent",
"# memory-exhaustion attacks. Default to max_num + DEFAULT_MAX_NUM",
"# (which is 2 * DEFAULT_MAX_NUM if max_num is None in the first place).",
"if",
"absolute_max",
"is",
"None",
":",
"absolute_max",
"=",
"max_num",
"+",
"DEFAULT_MAX_NUM",
"if",
"max_num",
">",
"absolute_max",
":",
"raise",
"ValueError",
"(",
"\"'absolute_max' must be greater or equal to 'max_num'.\"",
")",
"attrs",
"=",
"{",
"'form'",
":",
"form",
",",
"'extra'",
":",
"extra",
",",
"'can_order'",
":",
"can_order",
",",
"'can_delete'",
":",
"can_delete",
",",
"'can_delete_extra'",
":",
"can_delete_extra",
",",
"'min_num'",
":",
"min_num",
",",
"'max_num'",
":",
"max_num",
",",
"'absolute_max'",
":",
"absolute_max",
",",
"'validate_min'",
":",
"validate_min",
",",
"'validate_max'",
":",
"validate_max",
",",
"}",
"return",
"type",
"(",
"form",
".",
"__name__",
"+",
"'FormSet'",
",",
"(",
"formset",
",",
")",
",",
"attrs",
")"
] | [
459,
0
] | [
489,
61
] | python | en | ['en', 'en', 'en'] | True |
all_valid | (formsets) | Validate every formset and return True if all are valid. | Validate every formset and return True if all are valid. | def all_valid(formsets):
"""Validate every formset and return True if all are valid."""
# List comprehension ensures is_valid() is called for all formsets.
return all([formset.is_valid() for formset in formsets]) | [
"def",
"all_valid",
"(",
"formsets",
")",
":",
"# List comprehension ensures is_valid() is called for all formsets.",
"return",
"all",
"(",
"[",
"formset",
".",
"is_valid",
"(",
")",
"for",
"formset",
"in",
"formsets",
"]",
")"
] | [
492,
0
] | [
495,
60
] | python | en | ['en', 'en', 'en'] | True |
make_model_tuple | (model) |
Take a model or a string of the form "app_label.ModelName" and return a
corresponding ("app_label", "modelname") tuple. If a tuple is passed in,
assume it's a valid model tuple already and return it unchanged.
|
Take a model or a string of the form "app_label.ModelName" and return a
corresponding ("app_label", "modelname") tuple. If a tuple is passed in,
assume it's a valid model tuple already and return it unchanged.
| def make_model_tuple(model):
"""
Take a model or a string of the form "app_label.ModelName" and return a
corresponding ("app_label", "modelname") tuple. If a tuple is passed in,
assume it's a valid model tuple already and return it unchanged.
"""
try:
if isinstance(model, tuple):
model_tuple = model
elif isinstance(model, str):
app_label, model_name = model.split(".")
model_tuple = app_label, model_name.lower()
else:
model_tuple = model._meta.app_label, model._meta.model_name
assert len(model_tuple) == 2
return model_tuple
except (ValueError, AssertionError):
raise ValueError(
"Invalid model reference '%s'. String model references "
"must be of the form 'app_label.ModelName'." % model
) | [
"def",
"make_model_tuple",
"(",
"model",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"model",
",",
"tuple",
")",
":",
"model_tuple",
"=",
"model",
"elif",
"isinstance",
"(",
"model",
",",
"str",
")",
":",
"app_label",
",",
"model_name",
"=",
"model",
".",
"split",
"(",
"\".\"",
")",
"model_tuple",
"=",
"app_label",
",",
"model_name",
".",
"lower",
"(",
")",
"else",
":",
"model_tuple",
"=",
"model",
".",
"_meta",
".",
"app_label",
",",
"model",
".",
"_meta",
".",
"model_name",
"assert",
"len",
"(",
"model_tuple",
")",
"==",
"2",
"return",
"model_tuple",
"except",
"(",
"ValueError",
",",
"AssertionError",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid model reference '%s'. String model references \"",
"\"must be of the form 'app_label.ModelName'.\"",
"%",
"model",
")"
] | [
4,
0
] | [
24,
9
] | python | en | ['en', 'error', 'th'] | False |
resolve_callables | (mapping) |
Generate key/value pairs for the given mapping where the values are
evaluated if they're callable.
|
Generate key/value pairs for the given mapping where the values are
evaluated if they're callable.
| def resolve_callables(mapping):
"""
Generate key/value pairs for the given mapping where the values are
evaluated if they're callable.
"""
for k, v in mapping.items():
yield k, v() if callable(v) else v | [
"def",
"resolve_callables",
"(",
"mapping",
")",
":",
"for",
"k",
",",
"v",
"in",
"mapping",
".",
"items",
"(",
")",
":",
"yield",
"k",
",",
"v",
"(",
")",
"if",
"callable",
"(",
"v",
")",
"else",
"v"
] | [
27,
0
] | [
33,
42
] | python | en | ['en', 'error', 'th'] | False |
EmailBackend.send_messages | (self, email_messages) | Write all messages to the stream in a thread-safe way. | Write all messages to the stream in a thread-safe way. | def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
msg_count = 0
with self._lock:
try:
stream_created = self.open()
for message in email_messages:
self.write_message(message)
self.stream.flush() # flush after each message
msg_count += 1
if stream_created:
self.close()
except Exception:
if not self.fail_silently:
raise
return msg_count | [
"def",
"send_messages",
"(",
"self",
",",
"email_messages",
")",
":",
"if",
"not",
"email_messages",
":",
"return",
"msg_count",
"=",
"0",
"with",
"self",
".",
"_lock",
":",
"try",
":",
"stream_created",
"=",
"self",
".",
"open",
"(",
")",
"for",
"message",
"in",
"email_messages",
":",
"self",
".",
"write_message",
"(",
"message",
")",
"self",
".",
"stream",
".",
"flush",
"(",
")",
"# flush after each message",
"msg_count",
"+=",
"1",
"if",
"stream_created",
":",
"self",
".",
"close",
"(",
")",
"except",
"Exception",
":",
"if",
"not",
"self",
".",
"fail_silently",
":",
"raise",
"return",
"msg_count"
] | [
24,
4
] | [
41,
24
] | python | en | ['en', 'en', 'en'] | True |
DatabaseWrapper.disable_constraint_checking | (self) |
Disable foreign key checks, primarily for use in adding rows with
forward references. Always return True to indicate constraint checks
need to be re-enabled.
|
Disable foreign key checks, primarily for use in adding rows with
forward references. Always return True to indicate constraint checks
need to be re-enabled.
| def disable_constraint_checking(self):
"""
Disable foreign key checks, primarily for use in adding rows with
forward references. Always return True to indicate constraint checks
need to be re-enabled.
"""
with self.cursor() as cursor:
cursor.execute('SET foreign_key_checks=0')
return True | [
"def",
"disable_constraint_checking",
"(",
"self",
")",
":",
"with",
"self",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"cursor",
".",
"execute",
"(",
"'SET foreign_key_checks=0'",
")",
"return",
"True"
] | [
273,
4
] | [
281,
19
] | python | en | ['en', 'error', 'th'] | False |
DatabaseWrapper.enable_constraint_checking | (self) |
Re-enable foreign key checks after they have been disabled.
|
Re-enable foreign key checks after they have been disabled.
| def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
# Override needs_rollback in case constraint_checks_disabled is
# nested inside transaction.atomic.
self.needs_rollback, needs_rollback = False, self.needs_rollback
try:
with self.cursor() as cursor:
cursor.execute('SET foreign_key_checks=1')
finally:
self.needs_rollback = needs_rollback | [
"def",
"enable_constraint_checking",
"(",
"self",
")",
":",
"# Override needs_rollback in case constraint_checks_disabled is",
"# nested inside transaction.atomic.",
"self",
".",
"needs_rollback",
",",
"needs_rollback",
"=",
"False",
",",
"self",
".",
"needs_rollback",
"try",
":",
"with",
"self",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"cursor",
".",
"execute",
"(",
"'SET foreign_key_checks=1'",
")",
"finally",
":",
"self",
".",
"needs_rollback",
"=",
"needs_rollback"
] | [
283,
4
] | [
294,
48
] | python | en | ['en', 'error', 'th'] | False |
DatabaseWrapper.check_constraints | (self, table_names=None) |
Check each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
|
Check each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
| def check_constraints(self, table_names=None):
"""
Check each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
"""
with self.cursor() as cursor:
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
""" % (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise IntegrityError(
"The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not "
"have a corresponding value in %s.%s."
% (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
) | [
"def",
"check_constraints",
"(",
"self",
",",
"table_names",
"=",
"None",
")",
":",
"with",
"self",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"if",
"table_names",
"is",
"None",
":",
"table_names",
"=",
"self",
".",
"introspection",
".",
"table_names",
"(",
"cursor",
")",
"for",
"table_name",
"in",
"table_names",
":",
"primary_key_column_name",
"=",
"self",
".",
"introspection",
".",
"get_primary_key_column",
"(",
"cursor",
",",
"table_name",
")",
"if",
"not",
"primary_key_column_name",
":",
"continue",
"key_columns",
"=",
"self",
".",
"introspection",
".",
"get_key_columns",
"(",
"cursor",
",",
"table_name",
")",
"for",
"column_name",
",",
"referenced_table_name",
",",
"referenced_column_name",
"in",
"key_columns",
":",
"cursor",
".",
"execute",
"(",
"\"\"\"\n SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING\n LEFT JOIN `%s` as REFERRED\n ON (REFERRING.`%s` = REFERRED.`%s`)\n WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL\n \"\"\"",
"%",
"(",
"primary_key_column_name",
",",
"column_name",
",",
"table_name",
",",
"referenced_table_name",
",",
"column_name",
",",
"referenced_column_name",
",",
"column_name",
",",
"referenced_column_name",
",",
")",
")",
"for",
"bad_row",
"in",
"cursor",
".",
"fetchall",
"(",
")",
":",
"raise",
"IntegrityError",
"(",
"\"The row in table '%s' with primary key '%s' has an invalid \"",
"\"foreign key: %s.%s contains a value '%s' that does not \"",
"\"have a corresponding value in %s.%s.\"",
"%",
"(",
"table_name",
",",
"bad_row",
"[",
"0",
"]",
",",
"table_name",
",",
"column_name",
",",
"bad_row",
"[",
"1",
"]",
",",
"referenced_table_name",
",",
"referenced_column_name",
",",
")",
")"
] | [
296,
4
] | [
334,
25
] | python | en | ['en', 'error', 'th'] | False |
DatabaseWrapper.check_constraints | (self, table_names=None) |
Check constraints by setting them to immediate. Return them to deferred
afterward.
|
Check constraints by setting them to immediate. Return them to deferred
afterward.
| def check_constraints(self, table_names=None):
"""
Check constraints by setting them to immediate. Return them to deferred
afterward.
"""
with self.cursor() as cursor:
cursor.execute('SET CONSTRAINTS ALL IMMEDIATE')
cursor.execute('SET CONSTRAINTS ALL DEFERRED') | [
"def",
"check_constraints",
"(",
"self",
",",
"table_names",
"=",
"None",
")",
":",
"with",
"self",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"cursor",
".",
"execute",
"(",
"'SET CONSTRAINTS ALL IMMEDIATE'",
")",
"cursor",
".",
"execute",
"(",
"'SET CONSTRAINTS ALL DEFERRED'",
")"
] | [
278,
4
] | [
285,
58
] | python | en | ['en', 'error', 'th'] | False |
update_proxy_model_permissions | (apps, schema_editor, reverse=False) |
Update the content_type of proxy model permissions to use the ContentType
of the proxy model.
|
Update the content_type of proxy model permissions to use the ContentType
of the proxy model.
| def update_proxy_model_permissions(apps, schema_editor, reverse=False):
"""
Update the content_type of proxy model permissions to use the ContentType
of the proxy model.
"""
style = color_style()
Permission = apps.get_model('auth', 'Permission')
ContentType = apps.get_model('contenttypes', 'ContentType')
alias = schema_editor.connection.alias
for Model in apps.get_models():
opts = Model._meta
if not opts.proxy:
continue
proxy_default_permissions_codenames = [
'%s_%s' % (action, opts.model_name)
for action in opts.default_permissions
]
permissions_query = Q(codename__in=proxy_default_permissions_codenames)
for codename, name in opts.permissions:
permissions_query = permissions_query | Q(codename=codename, name=name)
content_type_manager = ContentType.objects.db_manager(alias)
concrete_content_type = content_type_manager.get_for_model(Model, for_concrete_model=True)
proxy_content_type = content_type_manager.get_for_model(Model, for_concrete_model=False)
old_content_type = proxy_content_type if reverse else concrete_content_type
new_content_type = concrete_content_type if reverse else proxy_content_type
try:
with transaction.atomic(using=alias):
Permission.objects.using(alias).filter(
permissions_query,
content_type=old_content_type,
).update(content_type=new_content_type)
except IntegrityError:
old = '{}_{}'.format(old_content_type.app_label, old_content_type.model)
new = '{}_{}'.format(new_content_type.app_label, new_content_type.model)
sys.stdout.write(style.WARNING(WARNING.format(old=old, new=new, query=permissions_query))) | [
"def",
"update_proxy_model_permissions",
"(",
"apps",
",",
"schema_editor",
",",
"reverse",
"=",
"False",
")",
":",
"style",
"=",
"color_style",
"(",
")",
"Permission",
"=",
"apps",
".",
"get_model",
"(",
"'auth'",
",",
"'Permission'",
")",
"ContentType",
"=",
"apps",
".",
"get_model",
"(",
"'contenttypes'",
",",
"'ContentType'",
")",
"alias",
"=",
"schema_editor",
".",
"connection",
".",
"alias",
"for",
"Model",
"in",
"apps",
".",
"get_models",
"(",
")",
":",
"opts",
"=",
"Model",
".",
"_meta",
"if",
"not",
"opts",
".",
"proxy",
":",
"continue",
"proxy_default_permissions_codenames",
"=",
"[",
"'%s_%s'",
"%",
"(",
"action",
",",
"opts",
".",
"model_name",
")",
"for",
"action",
"in",
"opts",
".",
"default_permissions",
"]",
"permissions_query",
"=",
"Q",
"(",
"codename__in",
"=",
"proxy_default_permissions_codenames",
")",
"for",
"codename",
",",
"name",
"in",
"opts",
".",
"permissions",
":",
"permissions_query",
"=",
"permissions_query",
"|",
"Q",
"(",
"codename",
"=",
"codename",
",",
"name",
"=",
"name",
")",
"content_type_manager",
"=",
"ContentType",
".",
"objects",
".",
"db_manager",
"(",
"alias",
")",
"concrete_content_type",
"=",
"content_type_manager",
".",
"get_for_model",
"(",
"Model",
",",
"for_concrete_model",
"=",
"True",
")",
"proxy_content_type",
"=",
"content_type_manager",
".",
"get_for_model",
"(",
"Model",
",",
"for_concrete_model",
"=",
"False",
")",
"old_content_type",
"=",
"proxy_content_type",
"if",
"reverse",
"else",
"concrete_content_type",
"new_content_type",
"=",
"concrete_content_type",
"if",
"reverse",
"else",
"proxy_content_type",
"try",
":",
"with",
"transaction",
".",
"atomic",
"(",
"using",
"=",
"alias",
")",
":",
"Permission",
".",
"objects",
".",
"using",
"(",
"alias",
")",
".",
"filter",
"(",
"permissions_query",
",",
"content_type",
"=",
"old_content_type",
",",
")",
".",
"update",
"(",
"content_type",
"=",
"new_content_type",
")",
"except",
"IntegrityError",
":",
"old",
"=",
"'{}_{}'",
".",
"format",
"(",
"old_content_type",
".",
"app_label",
",",
"old_content_type",
".",
"model",
")",
"new",
"=",
"'{}_{}'",
".",
"format",
"(",
"new_content_type",
".",
"app_label",
",",
"new_content_type",
".",
"model",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"style",
".",
"WARNING",
"(",
"WARNING",
".",
"format",
"(",
"old",
"=",
"old",
",",
"new",
"=",
"new",
",",
"query",
"=",
"permissions_query",
")",
")",
")"
] | [
16,
0
] | [
50,
102
] | python | en | ['en', 'error', 'th'] | False |
revert_proxy_model_permissions | (apps, schema_editor) |
Update the content_type of proxy model permissions to use the ContentType
of the concrete model.
|
Update the content_type of proxy model permissions to use the ContentType
of the concrete model.
| def revert_proxy_model_permissions(apps, schema_editor):
"""
Update the content_type of proxy model permissions to use the ContentType
of the concrete model.
"""
update_proxy_model_permissions(apps, schema_editor, reverse=True) | [
"def",
"revert_proxy_model_permissions",
"(",
"apps",
",",
"schema_editor",
")",
":",
"update_proxy_model_permissions",
"(",
"apps",
",",
"schema_editor",
",",
"reverse",
"=",
"True",
")"
] | [
53,
0
] | [
58,
69
] | python | en | ['en', 'error', 'th'] | False |
StatementSplitter._reset | (self) | Set the filter attributes to its default values | Set the filter attributes to its default values | def _reset(self):
"""Set the filter attributes to its default values"""
self._in_declare = False
self._is_create = False
self._begin_depth = 0
self.consume_ws = False
self.tokens = []
self.level = 0 | [
"def",
"_reset",
"(",
"self",
")",
":",
"self",
".",
"_in_declare",
"=",
"False",
"self",
".",
"_is_create",
"=",
"False",
"self",
".",
"_begin_depth",
"=",
"0",
"self",
".",
"consume_ws",
"=",
"False",
"self",
".",
"tokens",
"=",
"[",
"]",
"self",
".",
"level",
"=",
"0"
] | [
16,
4
] | [
24,
22
] | python | en | ['en', 'en', 'en'] | True |
StatementSplitter._change_splitlevel | (self, ttype, value) | Get the new split level (increase, decrease or remain equal) | Get the new split level (increase, decrease or remain equal) | def _change_splitlevel(self, ttype, value):
"""Get the new split level (increase, decrease or remain equal)"""
# parenthesis increase/decrease a level
if ttype is T.Punctuation and value == '(':
return 1
elif ttype is T.Punctuation and value == ')':
return -1
elif ttype not in T.Keyword: # if normal token return
return 0
# Everything after here is ttype = T.Keyword
# Also to note, once entered an If statement you are done and basically
# returning
unified = value.upper()
# three keywords begin with CREATE, but only one of them is DDL
# DDL Create though can contain more words such as "or replace"
if ttype is T.Keyword.DDL and unified.startswith('CREATE'):
self._is_create = True
return 0
# can have nested declare inside of being...
if unified == 'DECLARE' and self._is_create and self._begin_depth == 0:
self._in_declare = True
return 1
if unified == 'BEGIN':
self._begin_depth += 1
if self._is_create:
# FIXME(andi): This makes no sense.
return 1
return 0
# Should this respect a preceding BEGIN?
# In CASE ... WHEN ... END this results in a split level -1.
# Would having multiple CASE WHEN END and a Assignment Operator
# cause the statement to cut off prematurely?
if unified == 'END':
self._begin_depth = max(0, self._begin_depth - 1)
return -1
if (unified in ('IF', 'FOR', 'WHILE', 'CASE')
and self._is_create and self._begin_depth > 0):
return 1
if unified in ('END IF', 'END FOR', 'END WHILE'):
return -1
# Default
return 0 | [
"def",
"_change_splitlevel",
"(",
"self",
",",
"ttype",
",",
"value",
")",
":",
"# parenthesis increase/decrease a level",
"if",
"ttype",
"is",
"T",
".",
"Punctuation",
"and",
"value",
"==",
"'('",
":",
"return",
"1",
"elif",
"ttype",
"is",
"T",
".",
"Punctuation",
"and",
"value",
"==",
"')'",
":",
"return",
"-",
"1",
"elif",
"ttype",
"not",
"in",
"T",
".",
"Keyword",
":",
"# if normal token return",
"return",
"0",
"# Everything after here is ttype = T.Keyword",
"# Also to note, once entered an If statement you are done and basically",
"# returning",
"unified",
"=",
"value",
".",
"upper",
"(",
")",
"# three keywords begin with CREATE, but only one of them is DDL",
"# DDL Create though can contain more words such as \"or replace\"",
"if",
"ttype",
"is",
"T",
".",
"Keyword",
".",
"DDL",
"and",
"unified",
".",
"startswith",
"(",
"'CREATE'",
")",
":",
"self",
".",
"_is_create",
"=",
"True",
"return",
"0",
"# can have nested declare inside of being...",
"if",
"unified",
"==",
"'DECLARE'",
"and",
"self",
".",
"_is_create",
"and",
"self",
".",
"_begin_depth",
"==",
"0",
":",
"self",
".",
"_in_declare",
"=",
"True",
"return",
"1",
"if",
"unified",
"==",
"'BEGIN'",
":",
"self",
".",
"_begin_depth",
"+=",
"1",
"if",
"self",
".",
"_is_create",
":",
"# FIXME(andi): This makes no sense.",
"return",
"1",
"return",
"0",
"# Should this respect a preceding BEGIN?",
"# In CASE ... WHEN ... END this results in a split level -1.",
"# Would having multiple CASE WHEN END and a Assignment Operator",
"# cause the statement to cut off prematurely?",
"if",
"unified",
"==",
"'END'",
":",
"self",
".",
"_begin_depth",
"=",
"max",
"(",
"0",
",",
"self",
".",
"_begin_depth",
"-",
"1",
")",
"return",
"-",
"1",
"if",
"(",
"unified",
"in",
"(",
"'IF'",
",",
"'FOR'",
",",
"'WHILE'",
",",
"'CASE'",
")",
"and",
"self",
".",
"_is_create",
"and",
"self",
".",
"_begin_depth",
">",
"0",
")",
":",
"return",
"1",
"if",
"unified",
"in",
"(",
"'END IF'",
",",
"'END FOR'",
",",
"'END WHILE'",
")",
":",
"return",
"-",
"1",
"# Default",
"return",
"0"
] | [
26,
4
] | [
76,
16
] | python | en | ['en', 'en', 'en'] | True |
StatementSplitter.process | (self, stream) | Process the stream | Process the stream | def process(self, stream):
"""Process the stream"""
EOS_TTYPE = T.Whitespace, T.Comment.Single
# Run over all stream tokens
for ttype, value in stream:
# Yield token if we finished a statement and there's no whitespaces
# It will count newline token as a non whitespace. In this context
# whitespace ignores newlines.
# why don't multi line comments also count?
if self.consume_ws and ttype not in EOS_TTYPE:
yield sql.Statement(self.tokens)
# Reset filter and prepare to process next statement
self._reset()
# Change current split level (increase, decrease or remain equal)
self.level += self._change_splitlevel(ttype, value)
# Append the token to the current statement
self.tokens.append(sql.Token(ttype, value))
# Check if we get the end of a statement
if self.level <= 0 and ttype is T.Punctuation and value == ';':
self.consume_ws = True
# Yield pending statement (if any)
if self.tokens and not all(t.is_whitespace for t in self.tokens):
yield sql.Statement(self.tokens) | [
"def",
"process",
"(",
"self",
",",
"stream",
")",
":",
"EOS_TTYPE",
"=",
"T",
".",
"Whitespace",
",",
"T",
".",
"Comment",
".",
"Single",
"# Run over all stream tokens",
"for",
"ttype",
",",
"value",
"in",
"stream",
":",
"# Yield token if we finished a statement and there's no whitespaces",
"# It will count newline token as a non whitespace. In this context",
"# whitespace ignores newlines.",
"# why don't multi line comments also count?",
"if",
"self",
".",
"consume_ws",
"and",
"ttype",
"not",
"in",
"EOS_TTYPE",
":",
"yield",
"sql",
".",
"Statement",
"(",
"self",
".",
"tokens",
")",
"# Reset filter and prepare to process next statement",
"self",
".",
"_reset",
"(",
")",
"# Change current split level (increase, decrease or remain equal)",
"self",
".",
"level",
"+=",
"self",
".",
"_change_splitlevel",
"(",
"ttype",
",",
"value",
")",
"# Append the token to the current statement",
"self",
".",
"tokens",
".",
"append",
"(",
"sql",
".",
"Token",
"(",
"ttype",
",",
"value",
")",
")",
"# Check if we get the end of a statement",
"if",
"self",
".",
"level",
"<=",
"0",
"and",
"ttype",
"is",
"T",
".",
"Punctuation",
"and",
"value",
"==",
"';'",
":",
"self",
".",
"consume_ws",
"=",
"True",
"# Yield pending statement (if any)",
"if",
"self",
".",
"tokens",
"and",
"not",
"all",
"(",
"t",
".",
"is_whitespace",
"for",
"t",
"in",
"self",
".",
"tokens",
")",
":",
"yield",
"sql",
".",
"Statement",
"(",
"self",
".",
"tokens",
")"
] | [
78,
4
] | [
106,
44
] | python | en | ['en', 'zh', 'en'] | True |
build_networks | (
state_shape, action_size, learning_rate,
critic_weight, hidden_neurons, entropy) | Creates Actor Critic Neural Networks.
Creates a two hidden-layer Policy Gradient Neural Network. The loss
function is altered to be a log-likelihood function weighted
by an action's advantage.
Args:
space_shape: a tuple of ints representing the observation space.
action_size (int): the number of possible actions.
learning_rate (float): the nueral network's learning rate.
critic_weight (float): how much to weigh the critic's training loss.
hidden_neurons (int): the number of neurons to use per hidden layer.
entropy (float): how much to enourage exploration versus exploitation.
| Creates Actor Critic Neural Networks. | def build_networks(
state_shape, action_size, learning_rate,
critic_weight, hidden_neurons, entropy):
"""Creates Actor Critic Neural Networks.
Creates a two hidden-layer Policy Gradient Neural Network. The loss
function is altered to be a log-likelihood function weighted
by an action's advantage.
Args:
space_shape: a tuple of ints representing the observation space.
action_size (int): the number of possible actions.
learning_rate (float): the nueral network's learning rate.
critic_weight (float): how much to weigh the critic's training loss.
hidden_neurons (int): the number of neurons to use per hidden layer.
entropy (float): how much to enourage exploration versus exploitation.
"""
state_input = layers.Input(state_shape, name='frames')
advantages = layers.Input((1,), name='advantages')
actor_1 = layers.Dense(hidden_neurons, activation='relu')(state_input)
actor_2 = layers.Dense(hidden_neurons, activation='relu')(actor_1)
probabilities = layers.Dense(action_size, activation='softmax')(actor_2)
critic_1 = layers.Dense(hidden_neurons, activation='relu')(state_input)
critic_2 = layers.Dense(hidden_neurons, activation='relu')(critic_1)
values = layers.Dense(1, activation='linear')(critic_2)
def actor_loss(y_true, y_pred):
y_pred_clipped = K.clip(y_pred, CLIP_EDGE, 1-CLIP_EDGE)
log_lik = y_true*K.log(y_pred_clipped)
entropy_loss = y_pred * K.log(K.clip(y_pred, CLIP_EDGE, 1-CLIP_EDGE))
return K.sum(-log_lik * advantages) - (entropy * K.sum(entropy_loss))
# Train both actor and critic at the same time.
actor = Model(
inputs=[state_input, advantages], outputs=[probabilities, values])
actor.compile(
loss=[actor_loss, 'mean_squared_error'],
loss_weights=[1, critic_weight],
optimizer=tf.keras.optimizers.Adam(lr=learning_rate))
critic = Model(inputs=[state_input], outputs=[values])
policy = Model(inputs=[state_input], outputs=[probabilities])
return actor, critic, policy | [
"def",
"build_networks",
"(",
"state_shape",
",",
"action_size",
",",
"learning_rate",
",",
"critic_weight",
",",
"hidden_neurons",
",",
"entropy",
")",
":",
"state_input",
"=",
"layers",
".",
"Input",
"(",
"state_shape",
",",
"name",
"=",
"'frames'",
")",
"advantages",
"=",
"layers",
".",
"Input",
"(",
"(",
"1",
",",
")",
",",
"name",
"=",
"'advantages'",
")",
"actor_1",
"=",
"layers",
".",
"Dense",
"(",
"hidden_neurons",
",",
"activation",
"=",
"'relu'",
")",
"(",
"state_input",
")",
"actor_2",
"=",
"layers",
".",
"Dense",
"(",
"hidden_neurons",
",",
"activation",
"=",
"'relu'",
")",
"(",
"actor_1",
")",
"probabilities",
"=",
"layers",
".",
"Dense",
"(",
"action_size",
",",
"activation",
"=",
"'softmax'",
")",
"(",
"actor_2",
")",
"critic_1",
"=",
"layers",
".",
"Dense",
"(",
"hidden_neurons",
",",
"activation",
"=",
"'relu'",
")",
"(",
"state_input",
")",
"critic_2",
"=",
"layers",
".",
"Dense",
"(",
"hidden_neurons",
",",
"activation",
"=",
"'relu'",
")",
"(",
"critic_1",
")",
"values",
"=",
"layers",
".",
"Dense",
"(",
"1",
",",
"activation",
"=",
"'linear'",
")",
"(",
"critic_2",
")",
"def",
"actor_loss",
"(",
"y_true",
",",
"y_pred",
")",
":",
"y_pred_clipped",
"=",
"K",
".",
"clip",
"(",
"y_pred",
",",
"CLIP_EDGE",
",",
"1",
"-",
"CLIP_EDGE",
")",
"log_lik",
"=",
"y_true",
"*",
"K",
".",
"log",
"(",
"y_pred_clipped",
")",
"entropy_loss",
"=",
"y_pred",
"*",
"K",
".",
"log",
"(",
"K",
".",
"clip",
"(",
"y_pred",
",",
"CLIP_EDGE",
",",
"1",
"-",
"CLIP_EDGE",
")",
")",
"return",
"K",
".",
"sum",
"(",
"-",
"log_lik",
"*",
"advantages",
")",
"-",
"(",
"entropy",
"*",
"K",
".",
"sum",
"(",
"entropy_loss",
")",
")",
"# Train both actor and critic at the same time.",
"actor",
"=",
"Model",
"(",
"inputs",
"=",
"[",
"state_input",
",",
"advantages",
"]",
",",
"outputs",
"=",
"[",
"probabilities",
",",
"values",
"]",
")",
"actor",
".",
"compile",
"(",
"loss",
"=",
"[",
"actor_loss",
",",
"'mean_squared_error'",
"]",
",",
"loss_weights",
"=",
"[",
"1",
",",
"critic_weight",
"]",
",",
"optimizer",
"=",
"tf",
".",
"keras",
".",
"optimizers",
".",
"Adam",
"(",
"lr",
"=",
"learning_rate",
")",
")",
"critic",
"=",
"Model",
"(",
"inputs",
"=",
"[",
"state_input",
"]",
",",
"outputs",
"=",
"[",
"values",
"]",
")",
"policy",
"=",
"Model",
"(",
"inputs",
"=",
"[",
"state_input",
"]",
",",
"outputs",
"=",
"[",
"probabilities",
"]",
")",
"return",
"actor",
",",
"critic",
",",
"policy"
] | [
30,
0
] | [
74,
32
] | python | en | ['en', 'en', 'en'] | True |
Memory.add | (self, experience) | Adds an experience into the memory buffer.
Args:
experience: (state, action, reward, state_prime_value, done) tuple.
| Adds an experience into the memory buffer. | def add(self, experience):
"""Adds an experience into the memory buffer.
Args:
experience: (state, action, reward, state_prime_value, done) tuple.
"""
self.buffer.append(experience) | [
"def",
"add",
"(",
"self",
",",
"experience",
")",
":",
"self",
".",
"buffer",
".",
"append",
"(",
"experience",
")"
] | [
89,
4
] | [
95,
38
] | python | en | ['en', 'en', 'en'] | True |
Memory.sample | (self) | Returns formated experiences and clears the buffer.
Returns:
(list): A tuple of lists with structure [
[states], [actions], [rewards], [state_prime_values], [dones]
]
| Returns formated experiences and clears the buffer. | def sample(self):
"""Returns formated experiences and clears the buffer.
Returns:
(list): A tuple of lists with structure [
[states], [actions], [rewards], [state_prime_values], [dones]
]
"""
# Columns have different data types, so numpy array would be awkward.
batch = np.array(self.buffer).T.tolist()
states_mb = np.array(batch[0], dtype=np.float32)
actions_mb = np.array(batch[1], dtype=np.int8)
rewards_mb = np.array(batch[2], dtype=np.float32)
dones_mb = np.array(batch[3], dtype=np.int8)
value_mb = np.squeeze(np.array(batch[4], dtype=np.float32))
self.buffer = []
return states_mb, actions_mb, rewards_mb, dones_mb, value_mb | [
"def",
"sample",
"(",
"self",
")",
":",
"# Columns have different data types, so numpy array would be awkward.",
"batch",
"=",
"np",
".",
"array",
"(",
"self",
".",
"buffer",
")",
".",
"T",
".",
"tolist",
"(",
")",
"states_mb",
"=",
"np",
".",
"array",
"(",
"batch",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"actions_mb",
"=",
"np",
".",
"array",
"(",
"batch",
"[",
"1",
"]",
",",
"dtype",
"=",
"np",
".",
"int8",
")",
"rewards_mb",
"=",
"np",
".",
"array",
"(",
"batch",
"[",
"2",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"dones_mb",
"=",
"np",
".",
"array",
"(",
"batch",
"[",
"3",
"]",
",",
"dtype",
"=",
"np",
".",
"int8",
")",
"value_mb",
"=",
"np",
".",
"squeeze",
"(",
"np",
".",
"array",
"(",
"batch",
"[",
"4",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
")",
"self",
".",
"buffer",
"=",
"[",
"]",
"return",
"states_mb",
",",
"actions_mb",
",",
"rewards_mb",
",",
"dones_mb",
",",
"value_mb"
] | [
100,
4
] | [
116,
68
] | python | en | ['en', 'en', 'en'] | True |
Agent.__init__ | (self, actor, critic, policy, memory, action_size) | Initializes the agent with DQN and memory sub-classes.
Args:
network: A neural network created from deep_q_network().
memory: A Memory class object.
epsilon_decay (float): The rate at which to decay random actions.
action_size (int): The number of possible actions to take.
| Initializes the agent with DQN and memory sub-classes. | def __init__(self, actor, critic, policy, memory, action_size):
"""Initializes the agent with DQN and memory sub-classes.
Args:
network: A neural network created from deep_q_network().
memory: A Memory class object.
epsilon_decay (float): The rate at which to decay random actions.
action_size (int): The number of possible actions to take.
"""
self.actor = actor
self.critic = critic
self.policy = policy
self.action_size = action_size
self.memory = memory | [
"def",
"__init__",
"(",
"self",
",",
"actor",
",",
"critic",
",",
"policy",
",",
"memory",
",",
"action_size",
")",
":",
"self",
".",
"actor",
"=",
"actor",
"self",
".",
"critic",
"=",
"critic",
"self",
".",
"policy",
"=",
"policy",
"self",
".",
"action_size",
"=",
"action_size",
"self",
".",
"memory",
"=",
"memory"
] | [
121,
4
] | [
134,
28
] | python | en | ['en', 'en', 'en'] | True |
Agent.act | (self, state) | Selects an action for the agent to take given a game state.
Args:
state (list of numbers): The state of the environment to act on.
traning (bool): True if the agent is training.
Returns:
(int) The index of the action to take.
| Selects an action for the agent to take given a game state. | def act(self, state):
"""Selects an action for the agent to take given a game state.
Args:
state (list of numbers): The state of the environment to act on.
traning (bool): True if the agent is training.
Returns:
(int) The index of the action to take.
"""
# If not acting randomly, take action with highest predicted value.
state_batch = np.expand_dims(state, axis=0)
probabilities = self.policy.predict(state_batch)[0]
action = np.random.choice(self.action_size, p=probabilities)
return action | [
"def",
"act",
"(",
"self",
",",
"state",
")",
":",
"# If not acting randomly, take action with highest predicted value.",
"state_batch",
"=",
"np",
".",
"expand_dims",
"(",
"state",
",",
"axis",
"=",
"0",
")",
"probabilities",
"=",
"self",
".",
"policy",
".",
"predict",
"(",
"state_batch",
")",
"[",
"0",
"]",
"action",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"self",
".",
"action_size",
",",
"p",
"=",
"probabilities",
")",
"return",
"action"
] | [
136,
4
] | [
150,
21
] | python | en | ['en', 'en', 'en'] | True |
Agent.learn | (self) | Trains the Deep Q Network based on stored experiences. | Trains the Deep Q Network based on stored experiences. | def learn(self):
"""Trains the Deep Q Network based on stored experiences."""
gamma = self.memory.gamma
experiences = self.memory.sample()
state_mb, action_mb, reward_mb, dones_mb, next_value = experiences
# One hot enocde actions
actions = np.zeros([len(action_mb), self.action_size])
actions[np.arange(len(action_mb)), action_mb] = 1
# Apply TD(0)
discount_mb = reward_mb + next_value * gamma * (1 - dones_mb)
state_values = self.critic.predict([state_mb])
advantages = discount_mb - np.squeeze(state_values)
self.actor.train_on_batch(
[state_mb, advantages], [actions, discount_mb]) | [
"def",
"learn",
"(",
"self",
")",
":",
"gamma",
"=",
"self",
".",
"memory",
".",
"gamma",
"experiences",
"=",
"self",
".",
"memory",
".",
"sample",
"(",
")",
"state_mb",
",",
"action_mb",
",",
"reward_mb",
",",
"dones_mb",
",",
"next_value",
"=",
"experiences",
"# One hot enocde actions",
"actions",
"=",
"np",
".",
"zeros",
"(",
"[",
"len",
"(",
"action_mb",
")",
",",
"self",
".",
"action_size",
"]",
")",
"actions",
"[",
"np",
".",
"arange",
"(",
"len",
"(",
"action_mb",
")",
")",
",",
"action_mb",
"]",
"=",
"1",
"# Apply TD(0)",
"discount_mb",
"=",
"reward_mb",
"+",
"next_value",
"*",
"gamma",
"*",
"(",
"1",
"-",
"dones_mb",
")",
"state_values",
"=",
"self",
".",
"critic",
".",
"predict",
"(",
"[",
"state_mb",
"]",
")",
"advantages",
"=",
"discount_mb",
"-",
"np",
".",
"squeeze",
"(",
"state_values",
")",
"self",
".",
"actor",
".",
"train_on_batch",
"(",
"[",
"state_mb",
",",
"advantages",
"]",
",",
"[",
"actions",
",",
"discount_mb",
"]",
")"
] | [
152,
4
] | [
167,
59
] | python | en | ['en', 'en', 'en'] | True |
observations_to_float_rgb | (scene: np.ndarray,
user_input: Tuple[Tuple[int, int], ...] = (),
is_solved: Optional[bool] = None) | Convert an observation as returned by a simulator to an image. | Convert an observation as returned by a simulator to an image. | def observations_to_float_rgb(scene: np.ndarray,
user_input: Tuple[Tuple[int, int], ...] = (),
is_solved: Optional[bool] = None) -> np.ndarray:
"""Convert an observation as returned by a simulator to an image."""
return _to_float(observations_to_uint8_rgb(scene, user_input, is_solved)) | [
"def",
"observations_to_float_rgb",
"(",
"scene",
":",
"np",
".",
"ndarray",
",",
"user_input",
":",
"Tuple",
"[",
"Tuple",
"[",
"int",
",",
"int",
"]",
",",
"...",
"]",
"=",
"(",
")",
",",
"is_solved",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
")",
"->",
"np",
".",
"ndarray",
":",
"return",
"_to_float",
"(",
"observations_to_uint8_rgb",
"(",
"scene",
",",
"user_input",
",",
"is_solved",
")",
")"
] | [
56,
0
] | [
60,
77
] | python | en | ['en', 'en', 'en'] | True |
observations_to_uint8_rgb | (scene: np.ndarray,
user_input: Tuple[Tuple[int, int], ...] = (),
is_solved: Optional[bool] = None) | Convert an observation as returned by a simulator to an image. | Convert an observation as returned by a simulator to an image. | def observations_to_uint8_rgb(scene: np.ndarray,
user_input: Tuple[Tuple[int, int], ...] = (),
is_solved: Optional[bool] = None) -> np.ndarray:
"""Convert an observation as returned by a simulator to an image."""
base_image = WAD_COLORS[scene]
for y, x in user_input:
if 0 <= x < base_image.shape[1] and 0 <= y < base_image.shape[0]:
base_image[x, y] = [255, 0, 0]
base_image = base_image[::-1]
if is_solved is not None:
color = SOLVE_STATUS_COLORS[int(is_solved)]
line = np.tile(color.reshape((1, 1, 3)), (5, base_image.shape[1], 1))
line[:, :5] = WAD_COLORS[0]
line[:, -5:] = WAD_COLORS[0]
base_image = np.concatenate([line, base_image], 0)
return base_image | [
"def",
"observations_to_uint8_rgb",
"(",
"scene",
":",
"np",
".",
"ndarray",
",",
"user_input",
":",
"Tuple",
"[",
"Tuple",
"[",
"int",
",",
"int",
"]",
",",
"...",
"]",
"=",
"(",
")",
",",
"is_solved",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
")",
"->",
"np",
".",
"ndarray",
":",
"base_image",
"=",
"WAD_COLORS",
"[",
"scene",
"]",
"for",
"y",
",",
"x",
"in",
"user_input",
":",
"if",
"0",
"<=",
"x",
"<",
"base_image",
".",
"shape",
"[",
"1",
"]",
"and",
"0",
"<=",
"y",
"<",
"base_image",
".",
"shape",
"[",
"0",
"]",
":",
"base_image",
"[",
"x",
",",
"y",
"]",
"=",
"[",
"255",
",",
"0",
",",
"0",
"]",
"base_image",
"=",
"base_image",
"[",
":",
":",
"-",
"1",
"]",
"if",
"is_solved",
"is",
"not",
"None",
":",
"color",
"=",
"SOLVE_STATUS_COLORS",
"[",
"int",
"(",
"is_solved",
")",
"]",
"line",
"=",
"np",
".",
"tile",
"(",
"color",
".",
"reshape",
"(",
"(",
"1",
",",
"1",
",",
"3",
")",
")",
",",
"(",
"5",
",",
"base_image",
".",
"shape",
"[",
"1",
"]",
",",
"1",
")",
")",
"line",
"[",
":",
",",
":",
"5",
"]",
"=",
"WAD_COLORS",
"[",
"0",
"]",
"line",
"[",
":",
",",
"-",
"5",
":",
"]",
"=",
"WAD_COLORS",
"[",
"0",
"]",
"base_image",
"=",
"np",
".",
"concatenate",
"(",
"[",
"line",
",",
"base_image",
"]",
",",
"0",
")",
"return",
"base_image"
] | [
63,
0
] | [
79,
21
] | python | en | ['en', 'en', 'en'] | True |
save_observation_series_to_gif | (batched_observation_series_rows,
fpath,
solved_states=None,
solved_wrt_step=False,
pad_frames=True,
fps=10) | Saves a list of arrays of intermediate scenes as a gif.
Args:
batched_observation_series_rows:
[[[video1, video2, ..., videoB]], (B = batch size)
[another row of frames, typically corresponding to earlier one]
]
Each video is TxHxW, in the PHYRE format (not RGB)
| Saves a list of arrays of intermediate scenes as a gif.
Args:
batched_observation_series_rows:
[[[video1, video2, ..., videoB]], (B = batch size)
[another row of frames, typically corresponding to earlier one]
]
Each video is TxHxW, in the PHYRE format (not RGB) | def save_observation_series_to_gif(batched_observation_series_rows,
fpath,
solved_states=None,
solved_wrt_step=False,
pad_frames=True,
fps=10):
"""Saves a list of arrays of intermediate scenes as a gif.
Args:
batched_observation_series_rows:
[[[video1, video2, ..., videoB]], (B = batch size)
[another row of frames, typically corresponding to earlier one]
]
Each video is TxHxW, in the PHYRE format (not RGB)
"""
max_steps = max(len(img) for img in batched_observation_series_rows[0])
images_per_row = []
for row_id, batched_observation_series in enumerate(batched_observation_series_rows):
images_per_step = []
for step in range(max_steps):
images_for_step = []
for i, images in enumerate(batched_observation_series):
real_step = min(len(images) - 1, step)
if solved_states is None:
solved = None
elif solved_wrt_step:
solved = solved_states[step]
else:
solved = solved_states[i]
img = process_frame_for_gif(images[real_step],
solved if row_id == 0 else None,
pad_frames)
images_for_step.append(img)
images_for_step = np.concatenate(images_for_step, axis=1)
images_per_step.append(images_for_step)
images_per_row.append(images_per_step)
# Concatenate all rows on the vertical dimension, for all time points
final_images = []
for time_step in range(len(images_per_row[0])):
all_frames = [row_images[time_step] for row_images in images_per_row]
all_frames = np.concatenate(all_frames, axis=0)
final_images.append(all_frames)
imageio.mimwrite(fpath, final_images, fps=fps) | [
"def",
"save_observation_series_to_gif",
"(",
"batched_observation_series_rows",
",",
"fpath",
",",
"solved_states",
"=",
"None",
",",
"solved_wrt_step",
"=",
"False",
",",
"pad_frames",
"=",
"True",
",",
"fps",
"=",
"10",
")",
":",
"max_steps",
"=",
"max",
"(",
"len",
"(",
"img",
")",
"for",
"img",
"in",
"batched_observation_series_rows",
"[",
"0",
"]",
")",
"images_per_row",
"=",
"[",
"]",
"for",
"row_id",
",",
"batched_observation_series",
"in",
"enumerate",
"(",
"batched_observation_series_rows",
")",
":",
"images_per_step",
"=",
"[",
"]",
"for",
"step",
"in",
"range",
"(",
"max_steps",
")",
":",
"images_for_step",
"=",
"[",
"]",
"for",
"i",
",",
"images",
"in",
"enumerate",
"(",
"batched_observation_series",
")",
":",
"real_step",
"=",
"min",
"(",
"len",
"(",
"images",
")",
"-",
"1",
",",
"step",
")",
"if",
"solved_states",
"is",
"None",
":",
"solved",
"=",
"None",
"elif",
"solved_wrt_step",
":",
"solved",
"=",
"solved_states",
"[",
"step",
"]",
"else",
":",
"solved",
"=",
"solved_states",
"[",
"i",
"]",
"img",
"=",
"process_frame_for_gif",
"(",
"images",
"[",
"real_step",
"]",
",",
"solved",
"if",
"row_id",
"==",
"0",
"else",
"None",
",",
"pad_frames",
")",
"images_for_step",
".",
"append",
"(",
"img",
")",
"images_for_step",
"=",
"np",
".",
"concatenate",
"(",
"images_for_step",
",",
"axis",
"=",
"1",
")",
"images_per_step",
".",
"append",
"(",
"images_for_step",
")",
"images_per_row",
".",
"append",
"(",
"images_per_step",
")",
"# Concatenate all rows on the vertical dimension, for all time points",
"final_images",
"=",
"[",
"]",
"for",
"time_step",
"in",
"range",
"(",
"len",
"(",
"images_per_row",
"[",
"0",
"]",
")",
")",
":",
"all_frames",
"=",
"[",
"row_images",
"[",
"time_step",
"]",
"for",
"row_images",
"in",
"images_per_row",
"]",
"all_frames",
"=",
"np",
".",
"concatenate",
"(",
"all_frames",
",",
"axis",
"=",
"0",
")",
"final_images",
".",
"append",
"(",
"all_frames",
")",
"imageio",
".",
"mimwrite",
"(",
"fpath",
",",
"final_images",
",",
"fps",
"=",
"fps",
")"
] | [
92,
0
] | [
135,
50
] | python | en | ['en', 'en', 'en'] | True |
compose_gifs_compact | (input_fpathes, output_fpath) | Create progressin for first and last frames over time. | Create progressin for first and last frames over time. | def compose_gifs_compact(input_fpathes, output_fpath):
"""Create progressin for first and last frames over time."""
first_and_last_per_batch_id = []
for fname in input_fpathes:
data = imageio.mimread(fname)
data = np.concatenate([data[0], data[-1]], axis=0)
first_and_last_per_batch_id.append(data)
if first_and_last_per_batch_id:
imageio.mimwrite(output_fpath, first_and_last_per_batch_id) | [
"def",
"compose_gifs_compact",
"(",
"input_fpathes",
",",
"output_fpath",
")",
":",
"first_and_last_per_batch_id",
"=",
"[",
"]",
"for",
"fname",
"in",
"input_fpathes",
":",
"data",
"=",
"imageio",
".",
"mimread",
"(",
"fname",
")",
"data",
"=",
"np",
".",
"concatenate",
"(",
"[",
"data",
"[",
"0",
"]",
",",
"data",
"[",
"-",
"1",
"]",
"]",
",",
"axis",
"=",
"0",
")",
"first_and_last_per_batch_id",
".",
"append",
"(",
"data",
")",
"if",
"first_and_last_per_batch_id",
":",
"imageio",
".",
"mimwrite",
"(",
"output_fpath",
",",
"first_and_last_per_batch_id",
")"
] | [
138,
0
] | [
146,
67
] | python | en | ['en', 'en', 'en'] | True |
compose_gifs | (input_fpathes, output_fpath) | Concatenate and sync all gifs. | Concatenate and sync all gifs. | def compose_gifs(input_fpathes, output_fpath):
"""Concatenate and sync all gifs."""
all_data = []
for fname in input_fpathes:
all_data.append(imageio.mimread(fname))
max_timestamps = max(len(data) for data in all_data)
def _pad(data):
return data + [data[-1]] * (max_timestamps - len(data))
all_data = np.concatenate([_pad(data) for data in all_data], 1)
imageio.mimwrite(output_fpath, all_data) | [
"def",
"compose_gifs",
"(",
"input_fpathes",
",",
"output_fpath",
")",
":",
"all_data",
"=",
"[",
"]",
"for",
"fname",
"in",
"input_fpathes",
":",
"all_data",
".",
"append",
"(",
"imageio",
".",
"mimread",
"(",
"fname",
")",
")",
"max_timestamps",
"=",
"max",
"(",
"len",
"(",
"data",
")",
"for",
"data",
"in",
"all_data",
")",
"def",
"_pad",
"(",
"data",
")",
":",
"return",
"data",
"+",
"[",
"data",
"[",
"-",
"1",
"]",
"]",
"*",
"(",
"max_timestamps",
"-",
"len",
"(",
"data",
")",
")",
"all_data",
"=",
"np",
".",
"concatenate",
"(",
"[",
"_pad",
"(",
"data",
")",
"for",
"data",
"in",
"all_data",
"]",
",",
"1",
")",
"imageio",
".",
"mimwrite",
"(",
"output_fpath",
",",
"all_data",
")"
] | [
149,
0
] | [
160,
44
] | python | en | ['en', 'en', 'en'] | True |
timesince | (d, now=None, reversed=False, time_strings=None, depth=2) |
Take two datetime objects and return the time between d and now as a nicely
formatted string, e.g. "10 minutes". If d occurs after now, return
"0 minutes".
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to `depth` adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
`time_strings` is an optional dict of strings to replace the default
TIME_STRINGS dict.
`depth` is an optional integer to control the number of adjacent time
units returned.
Adapted from
https://web.archive.org/web/20060617175230/http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
|
Take two datetime objects and return the time between d and now as a nicely
formatted string, e.g. "10 minutes". If d occurs after now, return
"0 minutes". | def timesince(d, now=None, reversed=False, time_strings=None, depth=2):
"""
Take two datetime objects and return the time between d and now as a nicely
formatted string, e.g. "10 minutes". If d occurs after now, return
"0 minutes".
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to `depth` adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
`time_strings` is an optional dict of strings to replace the default
TIME_STRINGS dict.
`depth` is an optional integer to control the number of adjacent time
units returned.
Adapted from
https://web.archive.org/web/20060617175230/http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
if time_strings is None:
time_strings = TIME_STRINGS
if depth <= 0:
raise ValueError('depth must be greater than 0.')
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
now = now or datetime.datetime.now(utc if is_aware(d) else None)
if reversed:
d, now = now, d
delta = now - d
# Deal with leapyears by subtracing the number of leapdays
leapdays = calendar.leapdays(d.year, now.year)
if leapdays != 0:
if calendar.isleap(d.year):
leapdays -= 1
elif calendar.isleap(now.year):
leapdays += 1
delta -= datetime.timedelta(leapdays)
# ignore microseconds
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return avoid_wrapping(time_strings['minute'] % 0)
for i, (seconds, name) in enumerate(TIMESINCE_CHUNKS):
count = since // seconds
if count != 0:
break
else:
return avoid_wrapping(time_strings['minute'] % 0)
result = []
current_depth = 0
while i < len(TIMESINCE_CHUNKS) and current_depth < depth:
seconds, name = TIMESINCE_CHUNKS[i]
count = since // seconds
if count == 0:
break
result.append(avoid_wrapping(time_strings[name] % count))
since -= seconds * count
current_depth += 1
i += 1
return gettext(', ').join(result) | [
"def",
"timesince",
"(",
"d",
",",
"now",
"=",
"None",
",",
"reversed",
"=",
"False",
",",
"time_strings",
"=",
"None",
",",
"depth",
"=",
"2",
")",
":",
"if",
"time_strings",
"is",
"None",
":",
"time_strings",
"=",
"TIME_STRINGS",
"if",
"depth",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'depth must be greater than 0.'",
")",
"# Convert datetime.date to datetime.datetime for comparison.",
"if",
"not",
"isinstance",
"(",
"d",
",",
"datetime",
".",
"datetime",
")",
":",
"d",
"=",
"datetime",
".",
"datetime",
"(",
"d",
".",
"year",
",",
"d",
".",
"month",
",",
"d",
".",
"day",
")",
"if",
"now",
"and",
"not",
"isinstance",
"(",
"now",
",",
"datetime",
".",
"datetime",
")",
":",
"now",
"=",
"datetime",
".",
"datetime",
"(",
"now",
".",
"year",
",",
"now",
".",
"month",
",",
"now",
".",
"day",
")",
"now",
"=",
"now",
"or",
"datetime",
".",
"datetime",
".",
"now",
"(",
"utc",
"if",
"is_aware",
"(",
"d",
")",
"else",
"None",
")",
"if",
"reversed",
":",
"d",
",",
"now",
"=",
"now",
",",
"d",
"delta",
"=",
"now",
"-",
"d",
"# Deal with leapyears by subtracing the number of leapdays",
"leapdays",
"=",
"calendar",
".",
"leapdays",
"(",
"d",
".",
"year",
",",
"now",
".",
"year",
")",
"if",
"leapdays",
"!=",
"0",
":",
"if",
"calendar",
".",
"isleap",
"(",
"d",
".",
"year",
")",
":",
"leapdays",
"-=",
"1",
"elif",
"calendar",
".",
"isleap",
"(",
"now",
".",
"year",
")",
":",
"leapdays",
"+=",
"1",
"delta",
"-=",
"datetime",
".",
"timedelta",
"(",
"leapdays",
")",
"# ignore microseconds",
"since",
"=",
"delta",
".",
"days",
"*",
"24",
"*",
"60",
"*",
"60",
"+",
"delta",
".",
"seconds",
"if",
"since",
"<=",
"0",
":",
"# d is in the future compared to now, stop processing.",
"return",
"avoid_wrapping",
"(",
"time_strings",
"[",
"'minute'",
"]",
"%",
"0",
")",
"for",
"i",
",",
"(",
"seconds",
",",
"name",
")",
"in",
"enumerate",
"(",
"TIMESINCE_CHUNKS",
")",
":",
"count",
"=",
"since",
"//",
"seconds",
"if",
"count",
"!=",
"0",
":",
"break",
"else",
":",
"return",
"avoid_wrapping",
"(",
"time_strings",
"[",
"'minute'",
"]",
"%",
"0",
")",
"result",
"=",
"[",
"]",
"current_depth",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"TIMESINCE_CHUNKS",
")",
"and",
"current_depth",
"<",
"depth",
":",
"seconds",
",",
"name",
"=",
"TIMESINCE_CHUNKS",
"[",
"i",
"]",
"count",
"=",
"since",
"//",
"seconds",
"if",
"count",
"==",
"0",
":",
"break",
"result",
".",
"append",
"(",
"avoid_wrapping",
"(",
"time_strings",
"[",
"name",
"]",
"%",
"count",
")",
")",
"since",
"-=",
"seconds",
"*",
"count",
"current_depth",
"+=",
"1",
"i",
"+=",
"1",
"return",
"gettext",
"(",
"', '",
")",
".",
"join",
"(",
"result",
")"
] | [
26,
0
] | [
93,
37
] | python | en | ['en', 'error', 'th'] | False |
timeuntil | (d, now=None, time_strings=None, depth=2) |
Like timesince, but return a string measuring the time until the given time.
|
Like timesince, but return a string measuring the time until the given time.
| def timeuntil(d, now=None, time_strings=None, depth=2):
"""
Like timesince, but return a string measuring the time until the given time.
"""
return timesince(d, now, reversed=True, time_strings=time_strings, depth=depth) | [
"def",
"timeuntil",
"(",
"d",
",",
"now",
"=",
"None",
",",
"time_strings",
"=",
"None",
",",
"depth",
"=",
"2",
")",
":",
"return",
"timesince",
"(",
"d",
",",
"now",
",",
"reversed",
"=",
"True",
",",
"time_strings",
"=",
"time_strings",
",",
"depth",
"=",
"depth",
")"
] | [
96,
0
] | [
100,
83
] | python | en | ['en', 'error', 'th'] | False |
compress_kml | (kml) | Return compressed KMZ from the given KML string. | Return compressed KMZ from the given KML string. | def compress_kml(kml):
"Return compressed KMZ from the given KML string."
kmz = BytesIO()
with zipfile.ZipFile(kmz, 'a', zipfile.ZIP_DEFLATED) as zf:
zf.writestr('doc.kml', kml.encode(settings.DEFAULT_CHARSET))
kmz.seek(0)
return kmz.read() | [
"def",
"compress_kml",
"(",
"kml",
")",
":",
"kmz",
"=",
"BytesIO",
"(",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"kmz",
",",
"'a'",
",",
"zipfile",
".",
"ZIP_DEFLATED",
")",
"as",
"zf",
":",
"zf",
".",
"writestr",
"(",
"'doc.kml'",
",",
"kml",
".",
"encode",
"(",
"settings",
".",
"DEFAULT_CHARSET",
")",
")",
"kmz",
".",
"seek",
"(",
"0",
")",
"return",
"kmz",
".",
"read",
"(",
")"
] | [
14,
0
] | [
20,
21
] | python | en | ['en', 'en', 'en'] | True |
render_to_kml | (*args, **kwargs) | Render the response as KML (using the correct MIME type). | Render the response as KML (using the correct MIME type). | def render_to_kml(*args, **kwargs):
"Render the response as KML (using the correct MIME type)."
return HttpResponse(
loader.render_to_string(*args, **kwargs),
content_type='application/vnd.google-earth.kml+xml',
) | [
"def",
"render_to_kml",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"HttpResponse",
"(",
"loader",
".",
"render_to_string",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"content_type",
"=",
"'application/vnd.google-earth.kml+xml'",
",",
")"
] | [
23,
0
] | [
28,
5
] | python | en | ['en', 'en', 'en'] | True |
render_to_kmz | (*args, **kwargs) |
Compress the KML content and return as KMZ (using the correct
MIME type).
|
Compress the KML content and return as KMZ (using the correct
MIME type).
| def render_to_kmz(*args, **kwargs):
"""
Compress the KML content and return as KMZ (using the correct
MIME type).
"""
return HttpResponse(
compress_kml(loader.render_to_string(*args, **kwargs)),
content_type='application/vnd.google-earth.kmz',
) | [
"def",
"render_to_kmz",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"HttpResponse",
"(",
"compress_kml",
"(",
"loader",
".",
"render_to_string",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
",",
"content_type",
"=",
"'application/vnd.google-earth.kmz'",
",",
")"
] | [
31,
0
] | [
39,
5
] | python | en | ['en', 'error', 'th'] | False |
BitString.asNumbers | (self) | Get |ASN.1| value as a sequence of 8-bit integers.
If |ASN.1| object length is not a multiple of 8, result
will be left-padded with zeros.
| Get |ASN.1| value as a sequence of 8-bit integers. | def asNumbers(self):
"""Get |ASN.1| value as a sequence of 8-bit integers.
If |ASN.1| object length is not a multiple of 8, result
will be left-padded with zeros.
"""
return tuple(octets.octs2ints(self.asOctets())) | [
"def",
"asNumbers",
"(",
"self",
")",
":",
"return",
"tuple",
"(",
"octets",
".",
"octs2ints",
"(",
"self",
".",
"asOctets",
"(",
")",
")",
")"
] | [
564,
4
] | [
570,
55
] | python | en | ['en', 'lb', 'en'] | True |
BitString.asOctets | (self) | Get |ASN.1| value as a sequence of octets.
If |ASN.1| object length is not a multiple of 8, result
will be left-padded with zeros.
| Get |ASN.1| value as a sequence of octets. | def asOctets(self):
"""Get |ASN.1| value as a sequence of octets.
If |ASN.1| object length is not a multiple of 8, result
will be left-padded with zeros.
"""
return integer.to_bytes(self._value, length=len(self)) | [
"def",
"asOctets",
"(",
"self",
")",
":",
"return",
"integer",
".",
"to_bytes",
"(",
"self",
".",
"_value",
",",
"length",
"=",
"len",
"(",
"self",
")",
")"
] | [
572,
4
] | [
578,
62
] | python | en | ['en', 'en', 'en'] | True |
BitString.asInteger | (self) | Get |ASN.1| value as a single integer value.
| Get |ASN.1| value as a single integer value.
| def asInteger(self):
"""Get |ASN.1| value as a single integer value.
"""
return self._value | [
"def",
"asInteger",
"(",
"self",
")",
":",
"return",
"self",
".",
"_value"
] | [
580,
4
] | [
583,
26
] | python | en | ['en', 'sv', 'en'] | True |
Subsets and Splits