text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def selection(self): """ Selection property. :return: None if no font is selected and font family name if one is selected. :rtype: None or str """ if self._font.get() is "" or self._font.get() not in self._fonts: return None else: return self._font.get()
[ "def", "selection", "(", "self", ")", ":", "if", "self", ".", "_font", ".", "get", "(", ")", "is", "\"\"", "or", "self", ".", "_font", ".", "get", "(", ")", "not", "in", "self", ".", "_fonts", ":", "return", "None", "else", ":", "return", "self", ".", "_font", ".", "get", "(", ")" ]
30.272727
18.636364
def validate(self): """ Execute the code once to get it's results (to be used in function validation). Compare the result to the first function in the group. """ validation_code = self.setup_src + '\nvalidation_result = ' + self.stmt validation_scope = {} exec(validation_code, validation_scope) # Store the result in the first function in the group. if len(self.groups[self.group]) == 1: self.result = validation_scope['validation_result'] logging.info('PyPerform: Validating group "{b.group}" against function "{b.callable.__name__}"' .format(b=self)) else: compare_against_benchmark = self.groups[self.group][0] test = [benchmark.result_validation for benchmark in self.groups[self.group]] if not all(test): raise ValueError('All functions within a group must have the same validation flag.') compare_result = compare_against_benchmark.result if self.validation_func: results_are_valid = self.validation_func(compare_result, validation_scope['validation_result']) else: results_are_valid = compare_result == validation_scope['validation_result'] if results_are_valid: logging.info('PyPerform: Validating {}......PASSED!'.format(self.callable.__name__)) else: error = 'Results of functions {0} and {1} are not equivalent.\n{0}:\t {2}\n{1}:\t{3}' raise ValidationError(error.format(compare_against_benchmark.callable.__name__, self.callable.__name__, compare_result, validation_scope['validation_result']))
[ "def", "validate", "(", "self", ")", ":", "validation_code", "=", "self", ".", "setup_src", "+", "'\\nvalidation_result = '", "+", "self", ".", "stmt", "validation_scope", "=", "{", "}", "exec", "(", "validation_code", ",", "validation_scope", ")", "# Store the result in the first function in the group.", "if", "len", "(", "self", ".", "groups", "[", "self", ".", "group", "]", ")", "==", "1", ":", "self", ".", "result", "=", "validation_scope", "[", "'validation_result'", "]", "logging", ".", "info", "(", "'PyPerform: Validating group \"{b.group}\" against function \"{b.callable.__name__}\"'", ".", "format", "(", "b", "=", "self", ")", ")", "else", ":", "compare_against_benchmark", "=", "self", ".", "groups", "[", "self", ".", "group", "]", "[", "0", "]", "test", "=", "[", "benchmark", ".", "result_validation", "for", "benchmark", "in", "self", ".", "groups", "[", "self", ".", "group", "]", "]", "if", "not", "all", "(", "test", ")", ":", "raise", "ValueError", "(", "'All functions within a group must have the same validation flag.'", ")", "compare_result", "=", "compare_against_benchmark", ".", "result", "if", "self", ".", "validation_func", ":", "results_are_valid", "=", "self", ".", "validation_func", "(", "compare_result", ",", "validation_scope", "[", "'validation_result'", "]", ")", "else", ":", "results_are_valid", "=", "compare_result", "==", "validation_scope", "[", "'validation_result'", "]", "if", "results_are_valid", ":", "logging", ".", "info", "(", "'PyPerform: Validating {}......PASSED!'", ".", "format", "(", "self", ".", "callable", ".", "__name__", ")", ")", "else", ":", "error", "=", "'Results of functions {0} and {1} are not equivalent.\\n{0}:\\t {2}\\n{1}:\\t{3}'", "raise", "ValidationError", "(", "error", ".", "format", "(", "compare_against_benchmark", ".", "callable", ".", "__name__", ",", "self", ".", "callable", ".", "__name__", ",", "compare_result", ",", "validation_scope", "[", "'validation_result'", "]", ")", ")" ]
60.068966
31.103448
def Load(self): """Loads all new events from disk as raw serialized proto bytestrings. Calling Load multiple times in a row will not 'drop' events as long as the return value is not iterated over. Yields: All event proto bytestrings in the file that have not been yielded yet. """ logger.debug('Loading events from %s', self._file_path) # GetNext() expects a status argument on TF <= 1.7. get_next_args = inspect.getargspec(self._reader.GetNext).args # pylint: disable=deprecated-method # First argument is self legacy_get_next = (len(get_next_args) > 1) while True: try: if legacy_get_next: with tf.compat.v1.errors.raise_exception_on_not_ok_status() as status: self._reader.GetNext(status) else: self._reader.GetNext() except (tf.errors.DataLossError, tf.errors.OutOfRangeError) as e: logger.debug('Cannot read more events: %s', e) # We ignore partial read exceptions, because a record may be truncated. # PyRecordReader holds the offset prior to the failed read, so retrying # will succeed. break yield self._reader.record() logger.debug('No more events in %s', self._file_path)
[ "def", "Load", "(", "self", ")", ":", "logger", ".", "debug", "(", "'Loading events from %s'", ",", "self", ".", "_file_path", ")", "# GetNext() expects a status argument on TF <= 1.7.", "get_next_args", "=", "inspect", ".", "getargspec", "(", "self", ".", "_reader", ".", "GetNext", ")", ".", "args", "# pylint: disable=deprecated-method", "# First argument is self", "legacy_get_next", "=", "(", "len", "(", "get_next_args", ")", ">", "1", ")", "while", "True", ":", "try", ":", "if", "legacy_get_next", ":", "with", "tf", ".", "compat", ".", "v1", ".", "errors", ".", "raise_exception_on_not_ok_status", "(", ")", "as", "status", ":", "self", ".", "_reader", ".", "GetNext", "(", "status", ")", "else", ":", "self", ".", "_reader", ".", "GetNext", "(", ")", "except", "(", "tf", ".", "errors", ".", "DataLossError", ",", "tf", ".", "errors", ".", "OutOfRangeError", ")", "as", "e", ":", "logger", ".", "debug", "(", "'Cannot read more events: %s'", ",", "e", ")", "# We ignore partial read exceptions, because a record may be truncated.", "# PyRecordReader holds the offset prior to the failed read, so retrying", "# will succeed.", "break", "yield", "self", ".", "_reader", ".", "record", "(", ")", "logger", ".", "debug", "(", "'No more events in %s'", ",", "self", ".", "_file_path", ")" ]
39.225806
23.83871
def _diff_image(slice1, slice2, abs_value=True, cmap='gray', **kwargs): """Computes the difference image""" diff = slice1 - slice2 if abs_value: diff = np.abs(diff) return diff, cmap
[ "def", "_diff_image", "(", "slice1", ",", "slice2", ",", "abs_value", "=", "True", ",", "cmap", "=", "'gray'", ",", "*", "*", "kwargs", ")", ":", "diff", "=", "slice1", "-", "slice2", "if", "abs_value", ":", "diff", "=", "np", ".", "abs", "(", "diff", ")", "return", "diff", ",", "cmap" ]
20.5
19.416667
def get_trust_id(self): """Gets the ``Trust`` ``Id`` for this authorization. return: (osid.id.Id) - the trust ``Id`` raise: IllegalState - ``has_trust()`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.Resource.get_avatar_id_template if not bool(self._my_map['trustId']): raise errors.IllegalState('this Authorization has no trust') else: return Id(self._my_map['trustId'])
[ "def", "get_trust_id", "(", "self", ")", ":", "# Implemented from template for osid.resource.Resource.get_avatar_id_template", "if", "not", "bool", "(", "self", ".", "_my_map", "[", "'trustId'", "]", ")", ":", "raise", "errors", ".", "IllegalState", "(", "'this Authorization has no trust'", ")", "else", ":", "return", "Id", "(", "self", ".", "_my_map", "[", "'trustId'", "]", ")" ]
40.692308
20.384615
def _validate_device(device): ''' Ensure the device name supplied is valid in a manner similar to the `exists` function, but raise errors on invalid input rather than return False. This function only validates a block device, it does not check if the block device is a drive or a partition or a filesystem, etc. ''' if os.path.exists(device): dev = os.stat(device).st_mode if stat.S_ISBLK(dev): return raise CommandExecutionError( 'Invalid device passed to partition module.' )
[ "def", "_validate_device", "(", "device", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "device", ")", ":", "dev", "=", "os", ".", "stat", "(", "device", ")", ".", "st_mode", "if", "stat", ".", "S_ISBLK", "(", "dev", ")", ":", "return", "raise", "CommandExecutionError", "(", "'Invalid device passed to partition module.'", ")" ]
29.944444
25.055556
def paste(self): """Reimplement Qt method""" if self.has_selected_text(): self.remove_selected_text() self.insert_text(QApplication.clipboard().text())
[ "def", "paste", "(", "self", ")", ":", "if", "self", ".", "has_selected_text", "(", ")", ":", "self", ".", "remove_selected_text", "(", ")", "self", ".", "insert_text", "(", "QApplication", ".", "clipboard", "(", ")", ".", "text", "(", ")", ")" ]
36.6
9.2
def opsview_info(self, verbose=False): ''' Get information about the current opsview instance http://docs.opsview.com/doku.php?id=opsview4.6:restapi#opsview_information ''' url = '{}/{}'.format(self.rest_url, 'info') return self.__auth_req_get(url, verbose=verbose)
[ "def", "opsview_info", "(", "self", ",", "verbose", "=", "False", ")", ":", "url", "=", "'{}/{}'", ".", "format", "(", "self", ".", "rest_url", ",", "'info'", ")", "return", "self", ".", "__auth_req_get", "(", "url", ",", "verbose", "=", "verbose", ")" ]
43.857143
21
def clone(git_uri): """ Clone a remote git repository to a local path. :param git_uri: the URI to the git repository to be cloned :return: the generated local path where the repository has been cloned to """ hash_digest = sha256_hash(git_uri) local_path = home_directory_path(FOLDER, hash_digest) exists_locally = path_exists(local_path) if not exists_locally: _clone_repo(git_uri, local_path) else: logging.info( # pragma: no cover "Git repository already exists locally.") # pragma: no cover return local_path
[ "def", "clone", "(", "git_uri", ")", ":", "hash_digest", "=", "sha256_hash", "(", "git_uri", ")", "local_path", "=", "home_directory_path", "(", "FOLDER", ",", "hash_digest", ")", "exists_locally", "=", "path_exists", "(", "local_path", ")", "if", "not", "exists_locally", ":", "_clone_repo", "(", "git_uri", ",", "local_path", ")", "else", ":", "logging", ".", "info", "(", "# pragma: no cover", "\"Git repository already exists locally.\"", ")", "# pragma: no cover", "return", "local_path" ]
31.722222
18.388889
def set_image_options(self, args=None, figsize="6x6", dpi=300, format="pdf", font="Helvetica", palette="deep", style="darkgrid", cmap="jet"): """ Add image format options for given command line programs. """ from jcvi.graphics.base import ImageOptions, setup_theme allowed_format = ("emf", "eps", "pdf", "png", "ps", \ "raw", "rgba", "svg", "svgz") allowed_fonts = ("Helvetica", "Palatino", "Schoolbook", "Arial") allowed_styles = ("darkgrid", "whitegrid", "dark", "white", "ticks") allowed_diverge = ("BrBG", "PiYG", "PRGn", "PuOr", "RdBu", \ "RdGy", "RdYlBu", "RdYlGn", "Spectral") group = OptionGroup(self, "Image options") self.add_option_group(group) group.add_option("--figsize", default=figsize, help="Figure size `width`x`height` in inches [default: %default]") group.add_option("--dpi", default=dpi, type="int", help="Physical dot density (dots per inch) [default: %default]") group.add_option("--format", default=format, choices=allowed_format, help="Generate image of format [default: %default]") group.add_option("--font", default=font, choices=allowed_fonts, help="Font name") group.add_option("--style", default=style, choices=allowed_styles, help="Axes background") group.add_option("--diverge", default="PiYG", choices=allowed_diverge, help="Contrasting color scheme") group.add_option("--cmap", default=cmap, help="Use this color map") group.add_option("--notex", default=False, action="store_true", help="Do not use tex") if args is None: args = sys.argv[1:] opts, args = self.parse_args(args) assert opts.dpi > 0 assert "x" in opts.figsize setup_theme(style=opts.style, font=opts.font, usetex=(not opts.notex)) return opts, args, ImageOptions(opts)
[ "def", "set_image_options", "(", "self", ",", "args", "=", "None", ",", "figsize", "=", "\"6x6\"", ",", "dpi", "=", "300", ",", "format", "=", "\"pdf\"", ",", "font", "=", "\"Helvetica\"", ",", "palette", "=", "\"deep\"", ",", "style", "=", "\"darkgrid\"", ",", "cmap", "=", "\"jet\"", ")", ":", "from", "jcvi", ".", "graphics", ".", "base", "import", "ImageOptions", ",", "setup_theme", "allowed_format", "=", "(", "\"emf\"", ",", "\"eps\"", ",", "\"pdf\"", ",", "\"png\"", ",", "\"ps\"", ",", "\"raw\"", ",", "\"rgba\"", ",", "\"svg\"", ",", "\"svgz\"", ")", "allowed_fonts", "=", "(", "\"Helvetica\"", ",", "\"Palatino\"", ",", "\"Schoolbook\"", ",", "\"Arial\"", ")", "allowed_styles", "=", "(", "\"darkgrid\"", ",", "\"whitegrid\"", ",", "\"dark\"", ",", "\"white\"", ",", "\"ticks\"", ")", "allowed_diverge", "=", "(", "\"BrBG\"", ",", "\"PiYG\"", ",", "\"PRGn\"", ",", "\"PuOr\"", ",", "\"RdBu\"", ",", "\"RdGy\"", ",", "\"RdYlBu\"", ",", "\"RdYlGn\"", ",", "\"Spectral\"", ")", "group", "=", "OptionGroup", "(", "self", ",", "\"Image options\"", ")", "self", ".", "add_option_group", "(", "group", ")", "group", ".", "add_option", "(", "\"--figsize\"", ",", "default", "=", "figsize", ",", "help", "=", "\"Figure size `width`x`height` in inches [default: %default]\"", ")", "group", ".", "add_option", "(", "\"--dpi\"", ",", "default", "=", "dpi", ",", "type", "=", "\"int\"", ",", "help", "=", "\"Physical dot density (dots per inch) [default: %default]\"", ")", "group", ".", "add_option", "(", "\"--format\"", ",", "default", "=", "format", ",", "choices", "=", "allowed_format", ",", "help", "=", "\"Generate image of format [default: %default]\"", ")", "group", ".", "add_option", "(", "\"--font\"", ",", "default", "=", "font", ",", "choices", "=", "allowed_fonts", ",", "help", "=", "\"Font name\"", ")", "group", ".", "add_option", "(", "\"--style\"", ",", "default", "=", "style", ",", "choices", "=", "allowed_styles", ",", "help", "=", "\"Axes background\"", ")", "group", ".", "add_option", "(", "\"--diverge\"", ",", "default", "=", "\"PiYG\"", ",", "choices", "=", "allowed_diverge", ",", "help", "=", "\"Contrasting color scheme\"", ")", "group", ".", "add_option", "(", "\"--cmap\"", ",", "default", "=", "cmap", ",", "help", "=", "\"Use this color map\"", ")", "group", ".", "add_option", "(", "\"--notex\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Do not use tex\"", ")", "if", "args", "is", "None", ":", "args", "=", "sys", ".", "argv", "[", "1", ":", "]", "opts", ",", "args", "=", "self", ".", "parse_args", "(", "args", ")", "assert", "opts", ".", "dpi", ">", "0", "assert", "\"x\"", "in", "opts", ".", "figsize", "setup_theme", "(", "style", "=", "opts", ".", "style", ",", "font", "=", "opts", ".", "font", ",", "usetex", "=", "(", "not", "opts", ".", "notex", ")", ")", "return", "opts", ",", "args", ",", "ImageOptions", "(", "opts", ")" ]
45.6
23.688889
def execute_runner_async(runner, workunit_factory=None, workunit_name=None, workunit_labels=None, workunit_log_config=None): """Executes the given java runner asynchronously. We can't use 'with' here because the workunit_generator's __exit__ function must be called after the process exits, in the return_code_handler. The wrapper around process.wait() needs to handle the same exceptions as the contextmanager does, so we have code duplication. We're basically faking the 'with' call to deal with asynchronous results. If `workunit_factory` is supplied, does so in the context of a workunit. :param runner: the java runner to run :param workunit_factory: an optional callable that can produce a workunit context :param string workunit_name: an optional name for the work unit; defaults to the main :param list workunit_labels: an optional sequence of labels for the work unit :param WorkUnit.LogConfig workunit_log_config: an optional tuple of task options affecting reporting Returns a ProcessHandler to the java process that is spawned. Raises `pants.java.Executor.Error` if there was a problem launching java itself. """ if not isinstance(runner, Executor.Runner): raise ValueError('The runner argument must be a java Executor.Runner instance, ' 'given {} of type {}'.format(runner, type(runner))) if workunit_factory is None: return SubprocessProcessHandler(runner.spawn()) else: workunit_labels = [ WorkUnitLabel.TOOL, WorkUnitLabel.NAILGUN if isinstance(runner.executor, NailgunExecutor) else WorkUnitLabel.JVM ] + (workunit_labels or []) workunit_generator = workunit_factory(name=workunit_name, labels=workunit_labels, cmd=runner.cmd, log_config=workunit_log_config) workunit = workunit_generator.__enter__() process = runner.spawn(stdout=workunit.output('stdout'), stderr=workunit.output('stderr')) class WorkUnitProcessHandler(ProcessHandler): def wait(_, timeout=None): try: ret = process.wait(timeout=timeout) workunit.set_outcome(WorkUnit.FAILURE if ret else WorkUnit.SUCCESS) workunit_generator.__exit__(None, None, None) return ret except BaseException: if not workunit_generator.__exit__(*sys.exc_info()): raise def kill(_): return process.kill() def terminate(_): return process.terminate() def poll(_): return process.poll() return WorkUnitProcessHandler()
[ "def", "execute_runner_async", "(", "runner", ",", "workunit_factory", "=", "None", ",", "workunit_name", "=", "None", ",", "workunit_labels", "=", "None", ",", "workunit_log_config", "=", "None", ")", ":", "if", "not", "isinstance", "(", "runner", ",", "Executor", ".", "Runner", ")", ":", "raise", "ValueError", "(", "'The runner argument must be a java Executor.Runner instance, '", "'given {} of type {}'", ".", "format", "(", "runner", ",", "type", "(", "runner", ")", ")", ")", "if", "workunit_factory", "is", "None", ":", "return", "SubprocessProcessHandler", "(", "runner", ".", "spawn", "(", ")", ")", "else", ":", "workunit_labels", "=", "[", "WorkUnitLabel", ".", "TOOL", ",", "WorkUnitLabel", ".", "NAILGUN", "if", "isinstance", "(", "runner", ".", "executor", ",", "NailgunExecutor", ")", "else", "WorkUnitLabel", ".", "JVM", "]", "+", "(", "workunit_labels", "or", "[", "]", ")", "workunit_generator", "=", "workunit_factory", "(", "name", "=", "workunit_name", ",", "labels", "=", "workunit_labels", ",", "cmd", "=", "runner", ".", "cmd", ",", "log_config", "=", "workunit_log_config", ")", "workunit", "=", "workunit_generator", ".", "__enter__", "(", ")", "process", "=", "runner", ".", "spawn", "(", "stdout", "=", "workunit", ".", "output", "(", "'stdout'", ")", ",", "stderr", "=", "workunit", ".", "output", "(", "'stderr'", ")", ")", "class", "WorkUnitProcessHandler", "(", "ProcessHandler", ")", ":", "def", "wait", "(", "_", ",", "timeout", "=", "None", ")", ":", "try", ":", "ret", "=", "process", ".", "wait", "(", "timeout", "=", "timeout", ")", "workunit", ".", "set_outcome", "(", "WorkUnit", ".", "FAILURE", "if", "ret", "else", "WorkUnit", ".", "SUCCESS", ")", "workunit_generator", ".", "__exit__", "(", "None", ",", "None", ",", "None", ")", "return", "ret", "except", "BaseException", ":", "if", "not", "workunit_generator", ".", "__exit__", "(", "*", "sys", ".", "exc_info", "(", ")", ")", ":", "raise", "def", "kill", "(", "_", ")", ":", "return", "process", ".", "kill", "(", ")", "def", "terminate", "(", "_", ")", ":", "return", "process", ".", "terminate", "(", ")", "def", "poll", "(", "_", ")", ":", "return", "process", ".", "poll", "(", ")", "return", "WorkUnitProcessHandler", "(", ")" ]
41.467742
27.5
def _infer_from_metaclass_constructor(cls, func): """Try to infer what the given *func* constructor is building :param astroid.FunctionDef func: A metaclass constructor. Metaclass definitions can be functions, which should accept three arguments, the name of the class, the bases of the class and the attributes. The function could return anything, but usually it should be a proper metaclass. :param astroid.ClassDef cls: The class for which the *func* parameter should generate a metaclass. :returns: The class generated by the function or None, if we couldn't infer it. :rtype: astroid.ClassDef """ context = astroid.context.InferenceContext() class_bases = astroid.List() class_bases.postinit(elts=cls.bases) attrs = astroid.Dict() local_names = [(name, values[-1]) for name, values in cls.locals.items()] attrs.postinit(local_names) builder_args = astroid.Tuple() builder_args.postinit([cls.name, class_bases, attrs]) context.callcontext = astroid.context.CallContext(builder_args) try: inferred = next(func.infer_call_result(func, context), None) except astroid.InferenceError: return None return inferred or None
[ "def", "_infer_from_metaclass_constructor", "(", "cls", ",", "func", ")", ":", "context", "=", "astroid", ".", "context", ".", "InferenceContext", "(", ")", "class_bases", "=", "astroid", ".", "List", "(", ")", "class_bases", ".", "postinit", "(", "elts", "=", "cls", ".", "bases", ")", "attrs", "=", "astroid", ".", "Dict", "(", ")", "local_names", "=", "[", "(", "name", ",", "values", "[", "-", "1", "]", ")", "for", "name", ",", "values", "in", "cls", ".", "locals", ".", "items", "(", ")", "]", "attrs", ".", "postinit", "(", "local_names", ")", "builder_args", "=", "astroid", ".", "Tuple", "(", ")", "builder_args", ".", "postinit", "(", "[", "cls", ".", "name", ",", "class_bases", ",", "attrs", "]", ")", "context", ".", "callcontext", "=", "astroid", ".", "context", ".", "CallContext", "(", "builder_args", ")", "try", ":", "inferred", "=", "next", "(", "func", ".", "infer_call_result", "(", "func", ",", "context", ")", ",", "None", ")", "except", "astroid", ".", "InferenceError", ":", "return", "None", "return", "inferred", "or", "None" ]
35.714286
18.685714
def decode(text): """ Function to decode a text. @param text text to decode (string) @return decoded text and encoding """ try: if text.startswith(BOM_UTF8): # UTF-8 with BOM return to_text_string(text[len(BOM_UTF8):], 'utf-8'), 'utf-8-bom' elif text.startswith(BOM_UTF16): # UTF-16 with BOM return to_text_string(text[len(BOM_UTF16):], 'utf-16'), 'utf-16' elif text.startswith(BOM_UTF32): # UTF-32 with BOM return to_text_string(text[len(BOM_UTF32):], 'utf-32'), 'utf-32' coding = get_coding(text) if coding: return to_text_string(text, coding), coding except (UnicodeError, LookupError): pass # Assume UTF-8 try: return to_text_string(text, 'utf-8'), 'utf-8-guessed' except (UnicodeError, LookupError): pass # Assume Latin-1 (behaviour before 3.7.1) return to_text_string(text, "latin-1"), 'latin-1-guessed'
[ "def", "decode", "(", "text", ")", ":", "try", ":", "if", "text", ".", "startswith", "(", "BOM_UTF8", ")", ":", "# UTF-8 with BOM\r", "return", "to_text_string", "(", "text", "[", "len", "(", "BOM_UTF8", ")", ":", "]", ",", "'utf-8'", ")", ",", "'utf-8-bom'", "elif", "text", ".", "startswith", "(", "BOM_UTF16", ")", ":", "# UTF-16 with BOM\r", "return", "to_text_string", "(", "text", "[", "len", "(", "BOM_UTF16", ")", ":", "]", ",", "'utf-16'", ")", ",", "'utf-16'", "elif", "text", ".", "startswith", "(", "BOM_UTF32", ")", ":", "# UTF-32 with BOM\r", "return", "to_text_string", "(", "text", "[", "len", "(", "BOM_UTF32", ")", ":", "]", ",", "'utf-32'", ")", ",", "'utf-32'", "coding", "=", "get_coding", "(", "text", ")", "if", "coding", ":", "return", "to_text_string", "(", "text", ",", "coding", ")", ",", "coding", "except", "(", "UnicodeError", ",", "LookupError", ")", ":", "pass", "# Assume UTF-8\r", "try", ":", "return", "to_text_string", "(", "text", ",", "'utf-8'", ")", ",", "'utf-8-guessed'", "except", "(", "UnicodeError", ",", "LookupError", ")", ":", "pass", "# Assume Latin-1 (behaviour before 3.7.1)\r", "return", "to_text_string", "(", "text", ",", "\"latin-1\"", ")", ",", "'latin-1-guessed'" ]
35.892857
14.607143
def delete(self, force=False): """Delete a record and also remove the RecordsBuckets if necessary. :param force: True to remove also the :class:`~invenio_records_files.models.RecordsBuckets` object. :returns: Deleted record. """ if force: RecordsBuckets.query.filter_by( record=self.model, bucket=self.files.bucket ).delete() return super(Record, self).delete(force)
[ "def", "delete", "(", "self", ",", "force", "=", "False", ")", ":", "if", "force", ":", "RecordsBuckets", ".", "query", ".", "filter_by", "(", "record", "=", "self", ".", "model", ",", "bucket", "=", "self", ".", "files", ".", "bucket", ")", ".", "delete", "(", ")", "return", "super", "(", "Record", ",", "self", ")", ".", "delete", "(", "force", ")" ]
36.230769
11.769231
def _sign(self, data: bytes) -> bytes: """ Use eth_sign compatible hasher to sign matrix data """ assert self._raiden_service is not None return self._raiden_service.signer.sign(data=data)
[ "def", "_sign", "(", "self", ",", "data", ":", "bytes", ")", "->", "bytes", ":", "assert", "self", ".", "_raiden_service", "is", "not", "None", "return", "self", ".", "_raiden_service", ".", "signer", ".", "sign", "(", "data", "=", "data", ")" ]
52.25
6.75
def add_simple_rnn(self,name, W_h, W_x, b, hidden_size, input_size, activation, input_names, output_names, output_all = False, reverse_input = False): """ Add a simple recurrent layer to the model. Parameters ---------- name: str The name of this layer. W_h: numpy.array Weights of the recurrent layer's hidden state. Must be of shape (hidden_size, hidden_size). W_x: numpy.array Weights of the recurrent layer's input. Must be of shape (hidden_size, input_size). b: numpy.array | None Bias of the recurrent layer's output. If None, bias is ignored. Otherwise it must be of shape (hidden_size, ). hidden_size: int Number of hidden units. This is equal to the number of channels of output shape. input_size: int Number of the number of channels of input shape. activation: str Activation function name. Can be one of the following option: ['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR']. See add_activation for more detailed description. input_names: [str] The input blob name list of this layer, in the order of [x, h_input]. output_name: [str] The output blob name list of this layer, in the order of [y, h_output]. output_all: boolean Whether the recurrent layer should output at every time step. - If False, the output is the result after the final state update. - If True, the output is a sequence, containing outputs at all time steps. reverse_input: boolean Whether the recurrent layer should process the input sequence in the reverse order. - If False, the input sequence order is not reversed. - If True, the input sequence order is reversed. See Also -------- add_activation, add_gru, add_unilstm, add_bidirlstm """ spec = self.spec nn_spec = self.nn_spec # Add a new Layer spec_layer = nn_spec.layers.add() spec_layer.name = name for name in input_names: spec_layer.input.append(name) for name in output_names: spec_layer.output.append(name) spec_layer_params = spec_layer.simpleRecurrent spec_layer_params.reverseInput = reverse_input #set the parameters spec_layer_params.inputVectorSize = input_size spec_layer_params.outputVectorSize = hidden_size if b is not None: spec_layer_params.hasBiasVector = True spec_layer_params.sequenceOutput = output_all activation_f = spec_layer_params.activation _set_recurrent_activation(activation_f, activation) # Write the weights spec_layer_params.weightMatrix.floatValue.extend(map(float, W_x.flatten())) spec_layer_params.recursionMatrix.floatValue.extend(map(float, W_h.flatten())) if b is not None: spec_layer_params.biasVector.floatValue.extend(map(float, b.flatten()))
[ "def", "add_simple_rnn", "(", "self", ",", "name", ",", "W_h", ",", "W_x", ",", "b", ",", "hidden_size", ",", "input_size", ",", "activation", ",", "input_names", ",", "output_names", ",", "output_all", "=", "False", ",", "reverse_input", "=", "False", ")", ":", "spec", "=", "self", ".", "spec", "nn_spec", "=", "self", ".", "nn_spec", "# Add a new Layer", "spec_layer", "=", "nn_spec", ".", "layers", ".", "add", "(", ")", "spec_layer", ".", "name", "=", "name", "for", "name", "in", "input_names", ":", "spec_layer", ".", "input", ".", "append", "(", "name", ")", "for", "name", "in", "output_names", ":", "spec_layer", ".", "output", ".", "append", "(", "name", ")", "spec_layer_params", "=", "spec_layer", ".", "simpleRecurrent", "spec_layer_params", ".", "reverseInput", "=", "reverse_input", "#set the parameters", "spec_layer_params", ".", "inputVectorSize", "=", "input_size", "spec_layer_params", ".", "outputVectorSize", "=", "hidden_size", "if", "b", "is", "not", "None", ":", "spec_layer_params", ".", "hasBiasVector", "=", "True", "spec_layer_params", ".", "sequenceOutput", "=", "output_all", "activation_f", "=", "spec_layer_params", ".", "activation", "_set_recurrent_activation", "(", "activation_f", ",", "activation", ")", "# Write the weights", "spec_layer_params", ".", "weightMatrix", ".", "floatValue", ".", "extend", "(", "map", "(", "float", ",", "W_x", ".", "flatten", "(", ")", ")", ")", "spec_layer_params", ".", "recursionMatrix", ".", "floatValue", ".", "extend", "(", "map", "(", "float", ",", "W_h", ".", "flatten", "(", ")", ")", ")", "if", "b", "is", "not", "None", ":", "spec_layer_params", ".", "biasVector", ".", "floatValue", ".", "extend", "(", "map", "(", "float", ",", "b", ".", "flatten", "(", ")", ")", ")" ]
42.957746
25.915493
def cli(env, ip_version): """List all global IPs.""" mgr = SoftLayer.NetworkManager(env.client) table = formatting.Table(['id', 'ip', 'assigned', 'target']) version = None if ip_version == 'v4': version = 4 elif ip_version == 'v6': version = 6 ips = mgr.list_global_ips(version=version) for ip_address in ips: assigned = 'No' target = 'None' if ip_address.get('destinationIpAddress'): dest = ip_address['destinationIpAddress'] assigned = 'Yes' target = dest['ipAddress'] virtual_guest = dest.get('virtualGuest') if virtual_guest: target += (' (%s)' % virtual_guest['fullyQualifiedDomainName']) elif ip_address['destinationIpAddress'].get('hardware'): target += (' (%s)' % dest['hardware']['fullyQualifiedDomainName']) table.add_row([ip_address['id'], ip_address['ipAddress']['ipAddress'], assigned, target]) env.fout(table)
[ "def", "cli", "(", "env", ",", "ip_version", ")", ":", "mgr", "=", "SoftLayer", ".", "NetworkManager", "(", "env", ".", "client", ")", "table", "=", "formatting", ".", "Table", "(", "[", "'id'", ",", "'ip'", ",", "'assigned'", ",", "'target'", "]", ")", "version", "=", "None", "if", "ip_version", "==", "'v4'", ":", "version", "=", "4", "elif", "ip_version", "==", "'v6'", ":", "version", "=", "6", "ips", "=", "mgr", ".", "list_global_ips", "(", "version", "=", "version", ")", "for", "ip_address", "in", "ips", ":", "assigned", "=", "'No'", "target", "=", "'None'", "if", "ip_address", ".", "get", "(", "'destinationIpAddress'", ")", ":", "dest", "=", "ip_address", "[", "'destinationIpAddress'", "]", "assigned", "=", "'Yes'", "target", "=", "dest", "[", "'ipAddress'", "]", "virtual_guest", "=", "dest", ".", "get", "(", "'virtualGuest'", ")", "if", "virtual_guest", ":", "target", "+=", "(", "' (%s)'", "%", "virtual_guest", "[", "'fullyQualifiedDomainName'", "]", ")", "elif", "ip_address", "[", "'destinationIpAddress'", "]", ".", "get", "(", "'hardware'", ")", ":", "target", "+=", "(", "' (%s)'", "%", "dest", "[", "'hardware'", "]", "[", "'fullyQualifiedDomainName'", "]", ")", "table", ".", "add_row", "(", "[", "ip_address", "[", "'id'", "]", ",", "ip_address", "[", "'ipAddress'", "]", "[", "'ipAddress'", "]", ",", "assigned", ",", "target", "]", ")", "env", ".", "fout", "(", "table", ")" ]
31.6
18.628571
def get_blockdata(self, x, z): """ Return the decompressed binary data representing a chunk. May raise a RegionFileFormatError(). If decompression of the data succeeds, all available data is returned, even if it is shorter than what is specified in the header (e.g. in case of a truncated while and non-compressed data). """ # read metadata block m = self.metadata[x, z] if m.status == STATUS_CHUNK_NOT_CREATED: raise InconceivedChunk("Chunk %d,%d is not present in region" % (x,z)) elif m.status == STATUS_CHUNK_IN_HEADER: raise RegionHeaderError('Chunk %d,%d is in the region header' % (x,z)) elif m.status == STATUS_CHUNK_OUT_OF_FILE and (m.length <= 1 or m.compression == None): # Chunk header is outside of the file. raise RegionHeaderError('Chunk %d,%d is partially/completely outside the file' % (x,z)) elif m.status == STATUS_CHUNK_ZERO_LENGTH: if m.blocklength == 0: raise RegionHeaderError('Chunk %d,%d has zero length' % (x,z)) else: raise ChunkHeaderError('Chunk %d,%d has zero length' % (x,z)) elif m.blockstart * SECTOR_LENGTH + 5 >= self.size: raise RegionHeaderError('Chunk %d,%d is partially/completely outside the file' % (x,z)) # status is STATUS_CHUNK_OK, STATUS_CHUNK_MISMATCHED_LENGTHS, STATUS_CHUNK_OVERLAPPING # or STATUS_CHUNK_OUT_OF_FILE. # The chunk is always read, but in case of an error, the exception may be different # based on the status. err = None try: # offset comes in sectors of 4096 bytes + length bytes + compression byte self.file.seek(m.blockstart * SECTOR_LENGTH + 5) # Do not read past the length of the file. # The length in the file includes the compression byte, hence the -1. length = min(m.length - 1, self.size - (m.blockstart * SECTOR_LENGTH + 5)) chunk = self.file.read(length) if (m.compression == COMPRESSION_GZIP): # Python 3.1 and earlier do not yet support gzip.decompress(chunk) f = gzip.GzipFile(fileobj=BytesIO(chunk)) chunk = bytes(f.read()) f.close() elif (m.compression == COMPRESSION_ZLIB): chunk = zlib.decompress(chunk) elif m.compression != COMPRESSION_NONE: raise ChunkDataError('Unknown chunk compression/format (%s)' % m.compression) return chunk except RegionFileFormatError: raise except Exception as e: # Deliberately catch the Exception and re-raise. # The details in gzip/zlib/nbt are irrelevant, just that the data is garbled. err = '%s' % e # avoid str(e) due to Unicode issues in Python 2. if err: # don't raise during exception handling to avoid the warning # "During handling of the above exception, another exception occurred". # Python 3.3 solution (see PEP 409 & 415): "raise ChunkDataError(str(e)) from None" if m.status == STATUS_CHUNK_MISMATCHED_LENGTHS: raise ChunkHeaderError('The length in region header and the length in the header of chunk %d,%d are incompatible' % (x,z)) elif m.status == STATUS_CHUNK_OVERLAPPING: raise ChunkHeaderError('Chunk %d,%d is overlapping with another chunk' % (x,z)) else: raise ChunkDataError(err)
[ "def", "get_blockdata", "(", "self", ",", "x", ",", "z", ")", ":", "# read metadata block", "m", "=", "self", ".", "metadata", "[", "x", ",", "z", "]", "if", "m", ".", "status", "==", "STATUS_CHUNK_NOT_CREATED", ":", "raise", "InconceivedChunk", "(", "\"Chunk %d,%d is not present in region\"", "%", "(", "x", ",", "z", ")", ")", "elif", "m", ".", "status", "==", "STATUS_CHUNK_IN_HEADER", ":", "raise", "RegionHeaderError", "(", "'Chunk %d,%d is in the region header'", "%", "(", "x", ",", "z", ")", ")", "elif", "m", ".", "status", "==", "STATUS_CHUNK_OUT_OF_FILE", "and", "(", "m", ".", "length", "<=", "1", "or", "m", ".", "compression", "==", "None", ")", ":", "# Chunk header is outside of the file.", "raise", "RegionHeaderError", "(", "'Chunk %d,%d is partially/completely outside the file'", "%", "(", "x", ",", "z", ")", ")", "elif", "m", ".", "status", "==", "STATUS_CHUNK_ZERO_LENGTH", ":", "if", "m", ".", "blocklength", "==", "0", ":", "raise", "RegionHeaderError", "(", "'Chunk %d,%d has zero length'", "%", "(", "x", ",", "z", ")", ")", "else", ":", "raise", "ChunkHeaderError", "(", "'Chunk %d,%d has zero length'", "%", "(", "x", ",", "z", ")", ")", "elif", "m", ".", "blockstart", "*", "SECTOR_LENGTH", "+", "5", ">=", "self", ".", "size", ":", "raise", "RegionHeaderError", "(", "'Chunk %d,%d is partially/completely outside the file'", "%", "(", "x", ",", "z", ")", ")", "# status is STATUS_CHUNK_OK, STATUS_CHUNK_MISMATCHED_LENGTHS, STATUS_CHUNK_OVERLAPPING", "# or STATUS_CHUNK_OUT_OF_FILE.", "# The chunk is always read, but in case of an error, the exception may be different ", "# based on the status.", "err", "=", "None", "try", ":", "# offset comes in sectors of 4096 bytes + length bytes + compression byte", "self", ".", "file", ".", "seek", "(", "m", ".", "blockstart", "*", "SECTOR_LENGTH", "+", "5", ")", "# Do not read past the length of the file.", "# The length in the file includes the compression byte, hence the -1.", "length", "=", "min", "(", "m", ".", "length", "-", "1", ",", "self", ".", "size", "-", "(", "m", ".", "blockstart", "*", "SECTOR_LENGTH", "+", "5", ")", ")", "chunk", "=", "self", ".", "file", ".", "read", "(", "length", ")", "if", "(", "m", ".", "compression", "==", "COMPRESSION_GZIP", ")", ":", "# Python 3.1 and earlier do not yet support gzip.decompress(chunk)", "f", "=", "gzip", ".", "GzipFile", "(", "fileobj", "=", "BytesIO", "(", "chunk", ")", ")", "chunk", "=", "bytes", "(", "f", ".", "read", "(", ")", ")", "f", ".", "close", "(", ")", "elif", "(", "m", ".", "compression", "==", "COMPRESSION_ZLIB", ")", ":", "chunk", "=", "zlib", ".", "decompress", "(", "chunk", ")", "elif", "m", ".", "compression", "!=", "COMPRESSION_NONE", ":", "raise", "ChunkDataError", "(", "'Unknown chunk compression/format (%s)'", "%", "m", ".", "compression", ")", "return", "chunk", "except", "RegionFileFormatError", ":", "raise", "except", "Exception", "as", "e", ":", "# Deliberately catch the Exception and re-raise.", "# The details in gzip/zlib/nbt are irrelevant, just that the data is garbled.", "err", "=", "'%s'", "%", "e", "# avoid str(e) due to Unicode issues in Python 2.", "if", "err", ":", "# don't raise during exception handling to avoid the warning ", "# \"During handling of the above exception, another exception occurred\".", "# Python 3.3 solution (see PEP 409 & 415): \"raise ChunkDataError(str(e)) from None\"", "if", "m", ".", "status", "==", "STATUS_CHUNK_MISMATCHED_LENGTHS", ":", "raise", "ChunkHeaderError", "(", "'The length in region header and the length in the header of chunk %d,%d are incompatible'", "%", "(", "x", ",", "z", ")", ")", "elif", "m", ".", "status", "==", "STATUS_CHUNK_OVERLAPPING", ":", "raise", "ChunkHeaderError", "(", "'Chunk %d,%d is overlapping with another chunk'", "%", "(", "x", ",", "z", ")", ")", "else", ":", "raise", "ChunkDataError", "(", "err", ")" ]
53.373134
26.328358
def stop(self): """Stops the external measurement program and returns the measurement result, if the measurement was running.""" consumed_energy = collections.defaultdict(dict) if not self.is_running(): return None # cpu-energy-meter expects SIGINT to stop and report its result self._measurement_process.send_signal(signal.SIGINT) (out, err) = self._measurement_process.communicate() assert self._measurement_process.returncode is not None if self._measurement_process.returncode: logging.debug( "Energy measurement terminated with return code %s", self._measurement_process.returncode) self._measurement_process = None for line in err.splitlines(): logging.debug("energy measurement stderr: %s", line) for line in out.splitlines(): line = line.decode('ASCII') logging.debug("energy measurement output: %s", line) match = re.match(r'cpu(\d+)_([a-z]+)_joules=(\d+\.?\d*)', line) if not match: continue cpu, domain, energy = match.groups() cpu = int(cpu) energy = Decimal(energy) consumed_energy[cpu][domain] = energy return consumed_energy
[ "def", "stop", "(", "self", ")", ":", "consumed_energy", "=", "collections", ".", "defaultdict", "(", "dict", ")", "if", "not", "self", ".", "is_running", "(", ")", ":", "return", "None", "# cpu-energy-meter expects SIGINT to stop and report its result", "self", ".", "_measurement_process", ".", "send_signal", "(", "signal", ".", "SIGINT", ")", "(", "out", ",", "err", ")", "=", "self", ".", "_measurement_process", ".", "communicate", "(", ")", "assert", "self", ".", "_measurement_process", ".", "returncode", "is", "not", "None", "if", "self", ".", "_measurement_process", ".", "returncode", ":", "logging", ".", "debug", "(", "\"Energy measurement terminated with return code %s\"", ",", "self", ".", "_measurement_process", ".", "returncode", ")", "self", ".", "_measurement_process", "=", "None", "for", "line", "in", "err", ".", "splitlines", "(", ")", ":", "logging", ".", "debug", "(", "\"energy measurement stderr: %s\"", ",", "line", ")", "for", "line", "in", "out", ".", "splitlines", "(", ")", ":", "line", "=", "line", ".", "decode", "(", "'ASCII'", ")", "logging", ".", "debug", "(", "\"energy measurement output: %s\"", ",", "line", ")", "match", "=", "re", ".", "match", "(", "r'cpu(\\d+)_([a-z]+)_joules=(\\d+\\.?\\d*)'", ",", "line", ")", "if", "not", "match", ":", "continue", "cpu", ",", "domain", ",", "energy", "=", "match", ".", "groups", "(", ")", "cpu", "=", "int", "(", "cpu", ")", "energy", "=", "Decimal", "(", "energy", ")", "consumed_energy", "[", "cpu", "]", "[", "domain", "]", "=", "energy", "return", "consumed_energy" ]
43.2
15.566667
def timeseries_reactive(self): """ Reactive power time series in kvar. Parameters ------- :pandas:`pandas.Series<series>` Series containing reactive power time series in kvar. Returns ---------- :pandas:`pandas.DataFrame<dataframe>` or None Series containing reactive power time series in kvar. If it is not set it is tried to be retrieved from `generation_reactive_power` attribute of global TimeSeries object. If that is not possible None is returned. """ if self._timeseries_reactive is None: # try to get time series for reactive power depending on if they # are differentiated by weather cell ID or not # raise warning if no time series for generator type (and weather # cell ID) can be retrieved if self.grid.network.timeseries.generation_reactive_power \ is not None: if isinstance( self.grid.network.timeseries.generation_reactive_power. columns, pd.MultiIndex): if self.weather_cell_id: try: timeseries = self.grid.network.timeseries. \ generation_reactive_power[ self.type, self.weather_cell_id].to_frame('q') return timeseries * self.nominal_capacity except (KeyError, TypeError): logger.warning("No time series for type {} and " "weather cell ID {} given. " "Reactive power time series will " "be calculated from assumptions " "in config files and active power " "timeseries.".format( self.type, self.weather_cell_id)) return None else: raise ValueError( "No weather cell ID provided for fluctuating " "generator {}, but reactive power is given as a " "MultiIndex suggesting that it is differentiated " "by weather cell ID.".format(repr(self))) else: try: timeseries = self.grid.network.timeseries. \ generation_reactive_power[self.type].to_frame('q') return timeseries * self.nominal_capacity except (KeyError, TypeError): logger.warning("No reactive power time series for " "type {} given. Reactive power time " "series will be calculated from " "assumptions in config files and " "active power timeseries.".format( self.type)) return None else: return None else: return self._timeseries_reactive.loc[ self.grid.network.timeseries.timeindex, :]
[ "def", "timeseries_reactive", "(", "self", ")", ":", "if", "self", ".", "_timeseries_reactive", "is", "None", ":", "# try to get time series for reactive power depending on if they", "# are differentiated by weather cell ID or not", "# raise warning if no time series for generator type (and weather", "# cell ID) can be retrieved", "if", "self", ".", "grid", ".", "network", ".", "timeseries", ".", "generation_reactive_power", "is", "not", "None", ":", "if", "isinstance", "(", "self", ".", "grid", ".", "network", ".", "timeseries", ".", "generation_reactive_power", ".", "columns", ",", "pd", ".", "MultiIndex", ")", ":", "if", "self", ".", "weather_cell_id", ":", "try", ":", "timeseries", "=", "self", ".", "grid", ".", "network", ".", "timeseries", ".", "generation_reactive_power", "[", "self", ".", "type", ",", "self", ".", "weather_cell_id", "]", ".", "to_frame", "(", "'q'", ")", "return", "timeseries", "*", "self", ".", "nominal_capacity", "except", "(", "KeyError", ",", "TypeError", ")", ":", "logger", ".", "warning", "(", "\"No time series for type {} and \"", "\"weather cell ID {} given. \"", "\"Reactive power time series will \"", "\"be calculated from assumptions \"", "\"in config files and active power \"", "\"timeseries.\"", ".", "format", "(", "self", ".", "type", ",", "self", ".", "weather_cell_id", ")", ")", "return", "None", "else", ":", "raise", "ValueError", "(", "\"No weather cell ID provided for fluctuating \"", "\"generator {}, but reactive power is given as a \"", "\"MultiIndex suggesting that it is differentiated \"", "\"by weather cell ID.\"", ".", "format", "(", "repr", "(", "self", ")", ")", ")", "else", ":", "try", ":", "timeseries", "=", "self", ".", "grid", ".", "network", ".", "timeseries", ".", "generation_reactive_power", "[", "self", ".", "type", "]", ".", "to_frame", "(", "'q'", ")", "return", "timeseries", "*", "self", ".", "nominal_capacity", "except", "(", "KeyError", ",", "TypeError", ")", ":", "logger", ".", "warning", "(", "\"No reactive power time series for \"", "\"type {} given. Reactive power time \"", "\"series will be calculated from \"", "\"assumptions in config files and \"", "\"active power timeseries.\"", ".", "format", "(", "self", ".", "type", ")", ")", "return", "None", "else", ":", "return", "None", "else", ":", "return", "self", ".", "_timeseries_reactive", ".", "loc", "[", "self", ".", "grid", ".", "network", ".", "timeseries", ".", "timeindex", ",", ":", "]" ]
49.485294
22.897059
def create(entropy_coefficient, value_coefficient, max_grad_norm, discount_factor, gae_lambda=1.0): """ Vel factory function """ return A2CPolicyGradient( entropy_coefficient, value_coefficient, max_grad_norm, discount_factor, gae_lambda )
[ "def", "create", "(", "entropy_coefficient", ",", "value_coefficient", ",", "max_grad_norm", ",", "discount_factor", ",", "gae_lambda", "=", "1.0", ")", ":", "return", "A2CPolicyGradient", "(", "entropy_coefficient", ",", "value_coefficient", ",", "max_grad_norm", ",", "discount_factor", ",", "gae_lambda", ")" ]
31.444444
20.777778
def isomap(self, num_dims=None, directed=None): '''Isomap embedding. num_dims : dimension of embedded coordinates, defaults to input dimension directed : used for .shortest_path() calculation ''' W = -0.5 * self.shortest_path(directed=directed) ** 2 kpca = KernelPCA(n_components=num_dims, kernel='precomputed') return kpca.fit_transform(W)
[ "def", "isomap", "(", "self", ",", "num_dims", "=", "None", ",", "directed", "=", "None", ")", ":", "W", "=", "-", "0.5", "*", "self", ".", "shortest_path", "(", "directed", "=", "directed", ")", "**", "2", "kpca", "=", "KernelPCA", "(", "n_components", "=", "num_dims", ",", "kernel", "=", "'precomputed'", ")", "return", "kpca", ".", "fit_transform", "(", "W", ")" ]
40.111111
21.666667
def cmd(send, msg, args): """Pesters somebody. Syntax: {command} <nick> <message> """ if not msg or len(msg.split()) < 2: send("Pester needs at least two arguments.") return match = re.match('(%s+) (.*)' % args['config']['core']['nickregex'], msg) if match: message = match.group(2) + " " send('%s: %s' % (match.group(1), message * 3)) else: send("Invalid Syntax.")
[ "def", "cmd", "(", "send", ",", "msg", ",", "args", ")", ":", "if", "not", "msg", "or", "len", "(", "msg", ".", "split", "(", ")", ")", "<", "2", ":", "send", "(", "\"Pester needs at least two arguments.\"", ")", "return", "match", "=", "re", ".", "match", "(", "'(%s+) (.*)'", "%", "args", "[", "'config'", "]", "[", "'core'", "]", "[", "'nickregex'", "]", ",", "msg", ")", "if", "match", ":", "message", "=", "match", ".", "group", "(", "2", ")", "+", "\" \"", "send", "(", "'%s: %s'", "%", "(", "match", ".", "group", "(", "1", ")", ",", "message", "*", "3", ")", ")", "else", ":", "send", "(", "\"Invalid Syntax.\"", ")" ]
28.066667
17.066667
def set_model_internal_data(model, original_data, modified_data, deleted_data): """ Set internal data to model. """ model.__original_data__ = original_data list(map(model._prepare_child, model.__original_data__)) model.__modified_data__ = modified_data list(map(model._prepare_child, model.__modified_data__)) model.__deleted_fields__ = deleted_data return model
[ "def", "set_model_internal_data", "(", "model", ",", "original_data", ",", "modified_data", ",", "deleted_data", ")", ":", "model", ".", "__original_data__", "=", "original_data", "list", "(", "map", "(", "model", ".", "_prepare_child", ",", "model", ".", "__original_data__", ")", ")", "model", ".", "__modified_data__", "=", "modified_data", "list", "(", "map", "(", "model", ".", "_prepare_child", ",", "model", ".", "__modified_data__", ")", ")", "model", ".", "__deleted_fields__", "=", "deleted_data", "return", "model" ]
29.923077
18.538462
def insert_into_range(self, operations: ops.OP_TREE, start: int, end: int) -> int: """Writes operations inline into an area of the circuit. Args: start: The start of the range (inclusive) to write the given operations into. end: The end of the range (exclusive) to write the given operations into. If there are still operations remaining, new moments are created to fit them. operations: An operation or tree of operations to insert. Returns: An insertion index that will place operations after the operations that were inserted by this method. Raises: IndexError: Bad inline_start and/or inline_end. """ if not 0 <= start <= end <= len(self): raise IndexError('Bad insert indices: [{}, {})'.format( start, end)) operations = list(ops.flatten_op_tree(operations)) for op in operations: self._device.validate_operation(op) i = start op_index = 0 while op_index < len(operations): op = operations[op_index] while i < end and not self._device.can_add_operation_into_moment( op, self._moments[i]): i += 1 if i >= end: break self._moments[i] = self._moments[i].with_operation(op) op_index += 1 if op_index >= len(operations): return end return self.insert(end, operations[op_index:])
[ "def", "insert_into_range", "(", "self", ",", "operations", ":", "ops", ".", "OP_TREE", ",", "start", ":", "int", ",", "end", ":", "int", ")", "->", "int", ":", "if", "not", "0", "<=", "start", "<=", "end", "<=", "len", "(", "self", ")", ":", "raise", "IndexError", "(", "'Bad insert indices: [{}, {})'", ".", "format", "(", "start", ",", "end", ")", ")", "operations", "=", "list", "(", "ops", ".", "flatten_op_tree", "(", "operations", ")", ")", "for", "op", "in", "operations", ":", "self", ".", "_device", ".", "validate_operation", "(", "op", ")", "i", "=", "start", "op_index", "=", "0", "while", "op_index", "<", "len", "(", "operations", ")", ":", "op", "=", "operations", "[", "op_index", "]", "while", "i", "<", "end", "and", "not", "self", ".", "_device", ".", "can_add_operation_into_moment", "(", "op", ",", "self", ".", "_moments", "[", "i", "]", ")", ":", "i", "+=", "1", "if", "i", ">=", "end", ":", "break", "self", ".", "_moments", "[", "i", "]", "=", "self", ".", "_moments", "[", "i", "]", ".", "with_operation", "(", "op", ")", "op_index", "+=", "1", "if", "op_index", ">=", "len", "(", "operations", ")", ":", "return", "end", "return", "self", ".", "insert", "(", "end", ",", "operations", "[", "op_index", ":", "]", ")" ]
35.711111
19.377778
def add_s(self, s, obj, priority= 0 ): """ Adds a target 'string' for dispatching """ chain = self.strs.get(s, CommandChainDispatcher()) chain.add(obj,priority) self.strs[s] = chain
[ "def", "add_s", "(", "self", ",", "s", ",", "obj", ",", "priority", "=", "0", ")", ":", "chain", "=", "self", ".", "strs", ".", "get", "(", "s", ",", "CommandChainDispatcher", "(", ")", ")", "chain", ".", "add", "(", "obj", ",", "priority", ")", "self", ".", "strs", "[", "s", "]", "=", "chain" ]
34.833333
13.5
def _remove_vlan_from_all_sp_templates(self, handle, vlan_id, ucsm_ip): """Deletes VLAN config from all SP Templates that have it.""" sp_template_info_list = ( CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].sp_template_list.values()) vlan_name = self.make_vlan_name(vlan_id) virtio_port_list = ( CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports) try: # sp_template_info_list is a list of tuples. # Each tuple is of the form : # (ucsm_ip, sp_template_path, sp_template) for sp_template_info in sp_template_info_list: sp_template_path = sp_template_info.path sp_template = sp_template_info.name sp_template_full_path = (sp_template_path + const.SP_TEMPLATE_PREFIX + sp_template) obj = handle.query_dn(sp_template_full_path) if not obj: LOG.error('UCS Manager network driver could not ' 'find Service Profile template %s', sp_template_full_path) continue eth_port_paths = ["%s%s" % (sp_template_full_path, ep) for ep in virtio_port_list] for eth_port_path in eth_port_paths: eth = handle.query_dn(eth_port_path) if eth: vlan_path = (eth_port_path + const.VLAN_PATH_PREFIX + vlan_name) vlan = handle.query_dn(vlan_path) if vlan: # Found vlan config. Now remove it. handle.remove_mo(vlan) else: LOG.debug('UCS Manager network driver did not ' 'find VLAN %s at %s', vlan_name, eth_port_path) else: LOG.debug('UCS Manager network driver did not ' 'find ethernet port at %s', eth_port_path) handle.commit() return True except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigDeleteFailed(config=vlan_id, ucsm_ip=ucsm_ip, exc=e)
[ "def", "_remove_vlan_from_all_sp_templates", "(", "self", ",", "handle", ",", "vlan_id", ",", "ucsm_ip", ")", ":", "sp_template_info_list", "=", "(", "CONF", ".", "ml2_cisco_ucsm", ".", "ucsms", "[", "ucsm_ip", "]", ".", "sp_template_list", ".", "values", "(", ")", ")", "vlan_name", "=", "self", ".", "make_vlan_name", "(", "vlan_id", ")", "virtio_port_list", "=", "(", "CONF", ".", "ml2_cisco_ucsm", ".", "ucsms", "[", "ucsm_ip", "]", ".", "ucsm_virtio_eth_ports", ")", "try", ":", "# sp_template_info_list is a list of tuples.", "# Each tuple is of the form :", "# (ucsm_ip, sp_template_path, sp_template)", "for", "sp_template_info", "in", "sp_template_info_list", ":", "sp_template_path", "=", "sp_template_info", ".", "path", "sp_template", "=", "sp_template_info", ".", "name", "sp_template_full_path", "=", "(", "sp_template_path", "+", "const", ".", "SP_TEMPLATE_PREFIX", "+", "sp_template", ")", "obj", "=", "handle", ".", "query_dn", "(", "sp_template_full_path", ")", "if", "not", "obj", ":", "LOG", ".", "error", "(", "'UCS Manager network driver could not '", "'find Service Profile template %s'", ",", "sp_template_full_path", ")", "continue", "eth_port_paths", "=", "[", "\"%s%s\"", "%", "(", "sp_template_full_path", ",", "ep", ")", "for", "ep", "in", "virtio_port_list", "]", "for", "eth_port_path", "in", "eth_port_paths", ":", "eth", "=", "handle", ".", "query_dn", "(", "eth_port_path", ")", "if", "eth", ":", "vlan_path", "=", "(", "eth_port_path", "+", "const", ".", "VLAN_PATH_PREFIX", "+", "vlan_name", ")", "vlan", "=", "handle", ".", "query_dn", "(", "vlan_path", ")", "if", "vlan", ":", "# Found vlan config. Now remove it.", "handle", ".", "remove_mo", "(", "vlan", ")", "else", ":", "LOG", ".", "debug", "(", "'UCS Manager network driver did not '", "'find VLAN %s at %s'", ",", "vlan_name", ",", "eth_port_path", ")", "else", ":", "LOG", ".", "debug", "(", "'UCS Manager network driver did not '", "'find ethernet port at %s'", ",", "eth_port_path", ")", "handle", ".", "commit", "(", ")", "return", "True", "except", "Exception", "as", "e", ":", "# Raise a Neutron exception. Include a description of", "# the original exception.", "raise", "cexc", ".", "UcsmConfigDeleteFailed", "(", "config", "=", "vlan_id", ",", "ucsm_ip", "=", "ucsm_ip", ",", "exc", "=", "e", ")" ]
45.396226
19.679245
def team(self, team, simple=False): """ Get data on a single specified team. :param team: Team to get data for. :param simple: Get only vital data. :return: Team object with data on specified team. """ return Team(self._get('team/%s%s' % (self.team_key(team), '/simple' if simple else '')))
[ "def", "team", "(", "self", ",", "team", ",", "simple", "=", "False", ")", ":", "return", "Team", "(", "self", ".", "_get", "(", "'team/%s%s'", "%", "(", "self", ".", "team_key", "(", "team", ")", ",", "'/simple'", "if", "simple", "else", "''", ")", ")", ")" ]
37.666667
14.111111
def _validate_timeout(cls, value, name): """ Check that a timeout attribute is valid. :param value: The timeout value to validate :param name: The name of the timeout attribute to validate. This is used to specify in error messages. :return: The validated and casted version of the given value. :raises ValueError: If it is a numeric value less than or equal to zero, or the type is not an integer, float, or None. """ if value is _Default: return cls.DEFAULT_TIMEOUT if value is None or value is cls.DEFAULT_TIMEOUT: return value if isinstance(value, bool): raise ValueError("Timeout cannot be a boolean value. It must " "be an int, float or None.") try: float(value) except (TypeError, ValueError): raise ValueError("Timeout value %s was %s, but it must be an " "int, float or None." % (name, value)) try: if value <= 0: raise ValueError("Attempted to set %s timeout to %s, but the " "timeout cannot be set to a value less " "than or equal to 0." % (name, value)) except TypeError: # Python 3 raise ValueError("Timeout value %s was %s, but it must be an " "int, float or None." % (name, value)) return value
[ "def", "_validate_timeout", "(", "cls", ",", "value", ",", "name", ")", ":", "if", "value", "is", "_Default", ":", "return", "cls", ".", "DEFAULT_TIMEOUT", "if", "value", "is", "None", "or", "value", "is", "cls", ".", "DEFAULT_TIMEOUT", ":", "return", "value", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "raise", "ValueError", "(", "\"Timeout cannot be a boolean value. It must \"", "\"be an int, float or None.\"", ")", "try", ":", "float", "(", "value", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "\"Timeout value %s was %s, but it must be an \"", "\"int, float or None.\"", "%", "(", "name", ",", "value", ")", ")", "try", ":", "if", "value", "<=", "0", ":", "raise", "ValueError", "(", "\"Attempted to set %s timeout to %s, but the \"", "\"timeout cannot be set to a value less \"", "\"than or equal to 0.\"", "%", "(", "name", ",", "value", ")", ")", "except", "TypeError", ":", "# Python 3", "raise", "ValueError", "(", "\"Timeout value %s was %s, but it must be an \"", "\"int, float or None.\"", "%", "(", "name", ",", "value", ")", ")", "return", "value" ]
42
22.142857
def determine_signs(nodes, edges, cutoff=1e-10): """ Construct the orientation matrix for the pairs on N molecules. >>> determine_signs([0, 1, 2], [(0, 1, 1), (0, 2, -1), (1, 2, -1)]) array([ 1, 1, -1]) """ N = len(nodes) M = np.zeros((N, N), dtype=float) for a, b, w in edges: M[a, b] += w M = symmetrize(M) return get_signs(M, cutoff=cutoff, validate=False)
[ "def", "determine_signs", "(", "nodes", ",", "edges", ",", "cutoff", "=", "1e-10", ")", ":", "N", "=", "len", "(", "nodes", ")", "M", "=", "np", ".", "zeros", "(", "(", "N", ",", "N", ")", ",", "dtype", "=", "float", ")", "for", "a", ",", "b", ",", "w", "in", "edges", ":", "M", "[", "a", ",", "b", "]", "+=", "w", "M", "=", "symmetrize", "(", "M", ")", "return", "get_signs", "(", "M", ",", "cutoff", "=", "cutoff", ",", "validate", "=", "False", ")" ]
28.357143
18.214286
def __parse_names(): '''Gets and parses file''' filename = get_file('names.tsv.gz') with io.open(filename, 'r', encoding='cp1252') as textfile: next(textfile) for line in textfile: tokens = line.strip().split('\t') chebi_id = int(tokens[1]) if chebi_id not in __ALL_NAMES: __ALL_NAMES[chebi_id] = [] # Append Name: nme = Name(tokens[4], tokens[2], tokens[3], tokens[5] == 'T', tokens[6]) __ALL_NAMES[chebi_id].append(nme)
[ "def", "__parse_names", "(", ")", ":", "filename", "=", "get_file", "(", "'names.tsv.gz'", ")", "with", "io", ".", "open", "(", "filename", ",", "'r'", ",", "encoding", "=", "'cp1252'", ")", "as", "textfile", ":", "next", "(", "textfile", ")", "for", "line", "in", "textfile", ":", "tokens", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "chebi_id", "=", "int", "(", "tokens", "[", "1", "]", ")", "if", "chebi_id", "not", "in", "__ALL_NAMES", ":", "__ALL_NAMES", "[", "chebi_id", "]", "=", "[", "]", "# Append Name:", "nme", "=", "Name", "(", "tokens", "[", "4", "]", ",", "tokens", "[", "2", "]", ",", "tokens", "[", "3", "]", ",", "tokens", "[", "5", "]", "==", "'T'", ",", "tokens", "[", "6", "]", ")", "__ALL_NAMES", "[", "chebi_id", "]", ".", "append", "(", "nme", ")" ]
27.863636
15.590909
def lookup(self, nick): """Looks for the most recent paste by a given nick. Returns the uid or None""" query = dict(nick=nick) order = [('time', pymongo.DESCENDING)] recs = self.db.pastes.find(query).sort(order).limit(1) try: return next(recs)['uid'] except StopIteration: pass
[ "def", "lookup", "(", "self", ",", "nick", ")", ":", "query", "=", "dict", "(", "nick", "=", "nick", ")", "order", "=", "[", "(", "'time'", ",", "pymongo", ".", "DESCENDING", ")", "]", "recs", "=", "self", ".", "db", ".", "pastes", ".", "find", "(", "query", ")", ".", "sort", "(", "order", ")", ".", "limit", "(", "1", ")", "try", ":", "return", "next", "(", "recs", ")", "[", "'uid'", "]", "except", "StopIteration", ":", "pass" ]
34.8
12.1
def serialize(self, pid, record, links_factory=None): """Serialize a single record and persistent identifier. :param pid: Persistent identifier instance. :param record: Record instance. :param links_factory: Factory function for record links. """ return self.schema.tostring( self.transform_record(pid, record, links_factory))
[ "def", "serialize", "(", "self", ",", "pid", ",", "record", ",", "links_factory", "=", "None", ")", ":", "return", "self", ".", "schema", ".", "tostring", "(", "self", ".", "transform_record", "(", "pid", ",", "record", ",", "links_factory", ")", ")" ]
42.111111
12.777778
def wait(self, timeout=None): """ Waits for the client to stop its loop """ self.__stopped.wait(timeout) return self.__stopped.is_set()
[ "def", "wait", "(", "self", ",", "timeout", "=", "None", ")", ":", "self", ".", "__stopped", ".", "wait", "(", "timeout", ")", "return", "self", ".", "__stopped", ".", "is_set", "(", ")" ]
28.333333
3.666667
def iterparse_elements(element_function, file_or_path, **kwargs): """ Applies element_function to each of the sub-elements in the XML file. The passed in function must take at least one element, and an optional list of **kwarg which are relevant to each of the elements in the list: def elem_func(each_elem, **kwargs) Implements the recommended cElementTree iterparse pattern, which is efficient for reading in a file, making changes and writing it again. """ if not hasattr(element_function, '__call__'): return file_path = getattr(file_or_path, 'name', file_or_path) context = iter(iterparse(file_path, events=('start', 'end'))) root = None # Capture root for Memory management # Start event loads child; by the End event it's ready for processing for event, child in context: if root is None: root = child if event == 'end': # Ensures the element has been fully read element_function(child, **kwargs) root.clear()
[ "def", "iterparse_elements", "(", "element_function", ",", "file_or_path", ",", "*", "*", "kwargs", ")", ":", "if", "not", "hasattr", "(", "element_function", ",", "'__call__'", ")", ":", "return", "file_path", "=", "getattr", "(", "file_or_path", ",", "'name'", ",", "file_or_path", ")", "context", "=", "iter", "(", "iterparse", "(", "file_path", ",", "events", "=", "(", "'start'", ",", "'end'", ")", ")", ")", "root", "=", "None", "# Capture root for Memory management", "# Start event loads child; by the End event it's ready for processing", "for", "event", ",", "child", "in", "context", ":", "if", "root", "is", "None", ":", "root", "=", "child", "if", "event", "==", "'end'", ":", "# Ensures the element has been fully read", "element_function", "(", "child", ",", "*", "*", "kwargs", ")", "root", ".", "clear", "(", ")" ]
39.153846
23.384615
def deserialize_namespace(data): ''' Deserialize a Namespace object. :param data: bytes or str :return: namespace ''' if isinstance(data, bytes): data = data.decode('utf-8') kvs = data.split() uri_to_prefix = {} for kv in kvs: i = kv.rfind(':') if i == -1: raise ValueError('no colon in namespace ' 'field {}'.format(repr(kv))) uri, prefix = kv[0:i], kv[i + 1:] if not is_valid_schema_uri(uri): # Currently this can't happen because the only invalid URIs # are those which contain a space raise ValueError( 'invalid URI {} in namespace ' 'field {}'.format(repr(uri), repr(kv))) if not is_valid_prefix(prefix): raise ValueError( 'invalid prefix {} in namespace field' ' {}'.format(repr(prefix), repr(kv))) if uri in uri_to_prefix: raise ValueError( 'duplicate URI {} in ' 'namespace {}'.format(repr(uri), repr(data))) uri_to_prefix[uri] = prefix return Namespace(uri_to_prefix)
[ "def", "deserialize_namespace", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "bytes", ")", ":", "data", "=", "data", ".", "decode", "(", "'utf-8'", ")", "kvs", "=", "data", ".", "split", "(", ")", "uri_to_prefix", "=", "{", "}", "for", "kv", "in", "kvs", ":", "i", "=", "kv", ".", "rfind", "(", "':'", ")", "if", "i", "==", "-", "1", ":", "raise", "ValueError", "(", "'no colon in namespace '", "'field {}'", ".", "format", "(", "repr", "(", "kv", ")", ")", ")", "uri", ",", "prefix", "=", "kv", "[", "0", ":", "i", "]", ",", "kv", "[", "i", "+", "1", ":", "]", "if", "not", "is_valid_schema_uri", "(", "uri", ")", ":", "# Currently this can't happen because the only invalid URIs", "# are those which contain a space", "raise", "ValueError", "(", "'invalid URI {} in namespace '", "'field {}'", ".", "format", "(", "repr", "(", "uri", ")", ",", "repr", "(", "kv", ")", ")", ")", "if", "not", "is_valid_prefix", "(", "prefix", ")", ":", "raise", "ValueError", "(", "'invalid prefix {} in namespace field'", "' {}'", ".", "format", "(", "repr", "(", "prefix", ")", ",", "repr", "(", "kv", ")", ")", ")", "if", "uri", "in", "uri_to_prefix", ":", "raise", "ValueError", "(", "'duplicate URI {} in '", "'namespace {}'", ".", "format", "(", "repr", "(", "uri", ")", ",", "repr", "(", "data", ")", ")", ")", "uri_to_prefix", "[", "uri", "]", "=", "prefix", "return", "Namespace", "(", "uri_to_prefix", ")" ]
35.71875
12.78125
def json_repr(self, minimal=False): """Construct a JSON-friendly representation of the object. :param bool minimal: [ignored] :rtype: list """ if self.value: return [self.field, self.operator, self.value] else: return [self.field, self.operator]
[ "def", "json_repr", "(", "self", ",", "minimal", "=", "False", ")", ":", "if", "self", ".", "value", ":", "return", "[", "self", ".", "field", ",", "self", ".", "operator", ",", "self", ".", "value", "]", "else", ":", "return", "[", "self", ".", "field", ",", "self", ".", "operator", "]" ]
28.090909
16
def CopyFromDateTimeString(self, time_string): """Copies a SleuthKit timestamp from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC. """ date_time_values = self._CopyDateTimeFromString(time_string) year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) microseconds = date_time_values.get('microseconds', 0) self._timestamp = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) self.fraction_of_second = microseconds if pytsk3.TSK_VERSION_NUM >= 0x040200ff: self.fraction_of_second *= 1000 else: self.fraction_of_second *= 10 self._normalized_timestamp = None self.is_local_time = False
[ "def", "CopyFromDateTimeString", "(", "self", ",", "time_string", ")", ":", "date_time_values", "=", "self", ".", "_CopyDateTimeFromString", "(", "time_string", ")", "year", "=", "date_time_values", ".", "get", "(", "'year'", ",", "0", ")", "month", "=", "date_time_values", ".", "get", "(", "'month'", ",", "0", ")", "day_of_month", "=", "date_time_values", ".", "get", "(", "'day_of_month'", ",", "0", ")", "hours", "=", "date_time_values", ".", "get", "(", "'hours'", ",", "0", ")", "minutes", "=", "date_time_values", ".", "get", "(", "'minutes'", ",", "0", ")", "seconds", "=", "date_time_values", ".", "get", "(", "'seconds'", ",", "0", ")", "microseconds", "=", "date_time_values", ".", "get", "(", "'microseconds'", ",", "0", ")", "self", ".", "_timestamp", "=", "self", ".", "_GetNumberOfSecondsFromElements", "(", "year", ",", "month", ",", "day_of_month", ",", "hours", ",", "minutes", ",", "seconds", ")", "self", ".", "fraction_of_second", "=", "microseconds", "if", "pytsk3", ".", "TSK_VERSION_NUM", ">=", "0x040200ff", ":", "self", ".", "fraction_of_second", "*=", "1000", "else", ":", "self", ".", "fraction_of_second", "*=", "10", "self", ".", "_normalized_timestamp", "=", "None", "self", ".", "is_local_time", "=", "False" ]
37.060606
18.30303
def rms(self, stride=1): """Calculate the root-mean-square value of this `TimeSeries` once per stride. Parameters ---------- stride : `float` stride (seconds) between RMS calculations Returns ------- rms : `TimeSeries` a new `TimeSeries` containing the RMS value with dt=stride """ stridesamp = int(stride * self.sample_rate.value) nsteps = int(self.size // stridesamp) # stride through TimeSeries, recording RMS data = numpy.zeros(nsteps) for step in range(nsteps): # find step TimeSeries idx = int(stridesamp * step) idx_end = idx + stridesamp stepseries = self[idx:idx_end] rms_ = numpy.sqrt(numpy.mean(numpy.abs(stepseries.value)**2)) data[step] = rms_ name = '%s %.2f-second RMS' % (self.name, stride) return self.__class__(data, channel=self.channel, t0=self.t0, name=name, sample_rate=(1/float(stride)))
[ "def", "rms", "(", "self", ",", "stride", "=", "1", ")", ":", "stridesamp", "=", "int", "(", "stride", "*", "self", ".", "sample_rate", ".", "value", ")", "nsteps", "=", "int", "(", "self", ".", "size", "//", "stridesamp", ")", "# stride through TimeSeries, recording RMS", "data", "=", "numpy", ".", "zeros", "(", "nsteps", ")", "for", "step", "in", "range", "(", "nsteps", ")", ":", "# find step TimeSeries", "idx", "=", "int", "(", "stridesamp", "*", "step", ")", "idx_end", "=", "idx", "+", "stridesamp", "stepseries", "=", "self", "[", "idx", ":", "idx_end", "]", "rms_", "=", "numpy", ".", "sqrt", "(", "numpy", ".", "mean", "(", "numpy", ".", "abs", "(", "stepseries", ".", "value", ")", "**", "2", ")", ")", "data", "[", "step", "]", "=", "rms_", "name", "=", "'%s %.2f-second RMS'", "%", "(", "self", ".", "name", ",", "stride", ")", "return", "self", ".", "__class__", "(", "data", ",", "channel", "=", "self", ".", "channel", ",", "t0", "=", "self", ".", "t0", ",", "name", "=", "name", ",", "sample_rate", "=", "(", "1", "/", "float", "(", "stride", ")", ")", ")" ]
37.107143
16.214286
def _start_docker_vm(): """Start the Dusty VM if it is not already running.""" is_running = docker_vm_is_running() if not is_running: log_to_client('Starting docker-machine VM {}'.format(constants.VM_MACHINE_NAME)) _apply_nat_dns_host_resolver() _apply_nat_net_less_greedy_subnet() check_and_log_output_and_error_demoted(['docker-machine', 'start', constants.VM_MACHINE_NAME], quiet_on_success=True) return is_running
[ "def", "_start_docker_vm", "(", ")", ":", "is_running", "=", "docker_vm_is_running", "(", ")", "if", "not", "is_running", ":", "log_to_client", "(", "'Starting docker-machine VM {}'", ".", "format", "(", "constants", ".", "VM_MACHINE_NAME", ")", ")", "_apply_nat_dns_host_resolver", "(", ")", "_apply_nat_net_less_greedy_subnet", "(", ")", "check_and_log_output_and_error_demoted", "(", "[", "'docker-machine'", ",", "'start'", ",", "constants", ".", "VM_MACHINE_NAME", "]", ",", "quiet_on_success", "=", "True", ")", "return", "is_running" ]
50.777778
21.444444
def _read_bim(self): """Reads the BIM file.""" # Reading the BIM file and setting the values bim = pd.read_csv(self.bim_filename, delim_whitespace=True, names=["chrom", "snp", "cm", "pos", "a1", "a2"], dtype=dict(snp=str, a1=str, a2=str)) # Saving the index as integer bim["i"] = bim.index # Checking for duplicated markers try: bim = bim.set_index("snp", verify_integrity=True) self._has_duplicated = False except ValueError as e: # Setting this flag to true self._has_duplicated = True # Finding the duplicated markers duplicated = bim.snp.duplicated(keep=False) duplicated_markers = bim.loc[duplicated, "snp"] duplicated_marker_counts = duplicated_markers.value_counts() # The dictionary that will contain information about the duplicated # markers self._dup_markers = { m: [] for m in duplicated_marker_counts.index } # Logging a warning logger.warning("Duplicated markers found") for marker, count in duplicated_marker_counts.iteritems(): logger.warning(" - {}: {:,d} times".format(marker, count)) logger.warning("Appending ':dupX' to the duplicated markers " "according to their location in the BIM file") # Renaming the markers counter = Counter() for i, marker in duplicated_markers.iteritems(): counter[marker] += 1 new_name = "{}:dup{}".format(marker, counter[marker]) bim.loc[i, "snp"] = new_name # Updating the dictionary containing the duplicated markers self._dup_markers[marker].append(new_name) # Resetting the index bim = bim.set_index("snp", verify_integrity=True) # Encoding the allele # - The original 0 is the actual 2 (a1/a1) # - The original 2 is the actual 1 (a1/a2) # - The original 3 is the actual 0 (a2/a2) # - The original 1 is the actual -1 (no call) allele_encoding = np.array( [bim.a2 * 2, bim.a1 + bim.a2, bim.a1 * 2, list(repeat("00", bim.shape[0]))], dtype="U2", ) self._allele_encoding = allele_encoding.T # Saving the data in the object self._bim = bim[["chrom", "pos", "cm", "a1", "a2", "i"]] self._nb_markers = self._bim.shape[0]
[ "def", "_read_bim", "(", "self", ")", ":", "# Reading the BIM file and setting the values", "bim", "=", "pd", ".", "read_csv", "(", "self", ".", "bim_filename", ",", "delim_whitespace", "=", "True", ",", "names", "=", "[", "\"chrom\"", ",", "\"snp\"", ",", "\"cm\"", ",", "\"pos\"", ",", "\"a1\"", ",", "\"a2\"", "]", ",", "dtype", "=", "dict", "(", "snp", "=", "str", ",", "a1", "=", "str", ",", "a2", "=", "str", ")", ")", "# Saving the index as integer", "bim", "[", "\"i\"", "]", "=", "bim", ".", "index", "# Checking for duplicated markers", "try", ":", "bim", "=", "bim", ".", "set_index", "(", "\"snp\"", ",", "verify_integrity", "=", "True", ")", "self", ".", "_has_duplicated", "=", "False", "except", "ValueError", "as", "e", ":", "# Setting this flag to true", "self", ".", "_has_duplicated", "=", "True", "# Finding the duplicated markers", "duplicated", "=", "bim", ".", "snp", ".", "duplicated", "(", "keep", "=", "False", ")", "duplicated_markers", "=", "bim", ".", "loc", "[", "duplicated", ",", "\"snp\"", "]", "duplicated_marker_counts", "=", "duplicated_markers", ".", "value_counts", "(", ")", "# The dictionary that will contain information about the duplicated", "# markers", "self", ".", "_dup_markers", "=", "{", "m", ":", "[", "]", "for", "m", "in", "duplicated_marker_counts", ".", "index", "}", "# Logging a warning", "logger", ".", "warning", "(", "\"Duplicated markers found\"", ")", "for", "marker", ",", "count", "in", "duplicated_marker_counts", ".", "iteritems", "(", ")", ":", "logger", ".", "warning", "(", "\" - {}: {:,d} times\"", ".", "format", "(", "marker", ",", "count", ")", ")", "logger", ".", "warning", "(", "\"Appending ':dupX' to the duplicated markers \"", "\"according to their location in the BIM file\"", ")", "# Renaming the markers", "counter", "=", "Counter", "(", ")", "for", "i", ",", "marker", "in", "duplicated_markers", ".", "iteritems", "(", ")", ":", "counter", "[", "marker", "]", "+=", "1", "new_name", "=", "\"{}:dup{}\"", ".", "format", "(", "marker", ",", "counter", "[", "marker", "]", ")", "bim", ".", "loc", "[", "i", ",", "\"snp\"", "]", "=", "new_name", "# Updating the dictionary containing the duplicated markers", "self", ".", "_dup_markers", "[", "marker", "]", ".", "append", "(", "new_name", ")", "# Resetting the index", "bim", "=", "bim", ".", "set_index", "(", "\"snp\"", ",", "verify_integrity", "=", "True", ")", "# Encoding the allele", "# - The original 0 is the actual 2 (a1/a1)", "# - The original 2 is the actual 1 (a1/a2)", "# - The original 3 is the actual 0 (a2/a2)", "# - The original 1 is the actual -1 (no call)", "allele_encoding", "=", "np", ".", "array", "(", "[", "bim", ".", "a2", "*", "2", ",", "bim", ".", "a1", "+", "bim", ".", "a2", ",", "bim", ".", "a1", "*", "2", ",", "list", "(", "repeat", "(", "\"00\"", ",", "bim", ".", "shape", "[", "0", "]", ")", ")", "]", ",", "dtype", "=", "\"U2\"", ",", ")", "self", ".", "_allele_encoding", "=", "allele_encoding", ".", "T", "# Saving the data in the object", "self", ".", "_bim", "=", "bim", "[", "[", "\"chrom\"", ",", "\"pos\"", ",", "\"cm\"", ",", "\"a1\"", ",", "\"a2\"", ",", "\"i\"", "]", "]", "self", ".", "_nb_markers", "=", "self", ".", "_bim", ".", "shape", "[", "0", "]" ]
39.307692
19.938462
def make_payload(base, method, params): """Build Betfair JSON-RPC payload. :param str base: Betfair base ("Sports" or "Account") :param str method: Betfair endpoint :param dict params: Request parameters """ payload = { 'jsonrpc': '2.0', 'method': '{base}APING/v1.0/{method}'.format(**locals()), 'params': utils.serialize_dict(params), 'id': 1, } return payload
[ "def", "make_payload", "(", "base", ",", "method", ",", "params", ")", ":", "payload", "=", "{", "'jsonrpc'", ":", "'2.0'", ",", "'method'", ":", "'{base}APING/v1.0/{method}'", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "'params'", ":", "utils", ".", "serialize_dict", "(", "params", ")", ",", "'id'", ":", "1", ",", "}", "return", "payload" ]
29.5
15.285714
def create_win32tz_map(windows_zones_xml): """Creates a map between Windows and Olson timezone names. Args: windows_zones_xml: The CLDR XML mapping. Yields: (win32_name, olson_name, comment) """ coming_comment = None win32_name = None territory = None parser = genshi.input.XMLParser(StringIO(windows_zones_xml)) map_zones = {} zone_comments = {} for kind, data, _ in parser: if kind == genshi.core.START and str(data[0]) == "mapZone": attrs = data[1] win32_name, territory, olson_name = ( attrs.get("other"), attrs.get("territory"), attrs.get("type").split(" ")[0]) map_zones[(win32_name, territory)] = olson_name elif kind == genshi.core.END and str(data) == "mapZone" and win32_name: if coming_comment: zone_comments[(win32_name, territory)] = coming_comment coming_comment = None win32_name = None elif kind == genshi.core.COMMENT: coming_comment = data.strip() elif kind in (genshi.core.START, genshi.core.END, genshi.core.COMMENT): coming_comment = None for win32_name, territory in sorted(map_zones): yield (win32_name, territory, map_zones[(win32_name, territory)], zone_comments.get((win32_name, territory), None))
[ "def", "create_win32tz_map", "(", "windows_zones_xml", ")", ":", "coming_comment", "=", "None", "win32_name", "=", "None", "territory", "=", "None", "parser", "=", "genshi", ".", "input", ".", "XMLParser", "(", "StringIO", "(", "windows_zones_xml", ")", ")", "map_zones", "=", "{", "}", "zone_comments", "=", "{", "}", "for", "kind", ",", "data", ",", "_", "in", "parser", ":", "if", "kind", "==", "genshi", ".", "core", ".", "START", "and", "str", "(", "data", "[", "0", "]", ")", "==", "\"mapZone\"", ":", "attrs", "=", "data", "[", "1", "]", "win32_name", ",", "territory", ",", "olson_name", "=", "(", "attrs", ".", "get", "(", "\"other\"", ")", ",", "attrs", ".", "get", "(", "\"territory\"", ")", ",", "attrs", ".", "get", "(", "\"type\"", ")", ".", "split", "(", "\" \"", ")", "[", "0", "]", ")", "map_zones", "[", "(", "win32_name", ",", "territory", ")", "]", "=", "olson_name", "elif", "kind", "==", "genshi", ".", "core", ".", "END", "and", "str", "(", "data", ")", "==", "\"mapZone\"", "and", "win32_name", ":", "if", "coming_comment", ":", "zone_comments", "[", "(", "win32_name", ",", "territory", ")", "]", "=", "coming_comment", "coming_comment", "=", "None", "win32_name", "=", "None", "elif", "kind", "==", "genshi", ".", "core", ".", "COMMENT", ":", "coming_comment", "=", "data", ".", "strip", "(", ")", "elif", "kind", "in", "(", "genshi", ".", "core", ".", "START", ",", "genshi", ".", "core", ".", "END", ",", "genshi", ".", "core", ".", "COMMENT", ")", ":", "coming_comment", "=", "None", "for", "win32_name", ",", "territory", "in", "sorted", "(", "map_zones", ")", ":", "yield", "(", "win32_name", ",", "territory", ",", "map_zones", "[", "(", "win32_name", ",", "territory", ")", "]", ",", "zone_comments", ".", "get", "(", "(", "win32_name", ",", "territory", ")", ",", "None", ")", ")" ]
34
20.25
def docstring_to_markdown(docstring): """Convert a Python object's docstring to markdown Parameters ---------- docstring : str The docstring body. Returns ---------- clean_lst : list The markdown formatted docstring as lines (str) in a Python list. """ new_docstring_lst = [] for idx, line in enumerate(docstring.split('\n')): line = line.strip() if set(line) in ({'-'}, {'='}): new_docstring_lst[idx-1] = '**%s**' % new_docstring_lst[idx-1] elif line.startswith('>>>'): line = ' %s' % line new_docstring_lst.append(line) for idx, line in enumerate(new_docstring_lst[1:]): if line: if line.startswith('Description : '): new_docstring_lst[idx+1] = (new_docstring_lst[idx+1] .replace('Description : ', '')) elif ' : ' in line: line = line.replace(' : ', '` : ') new_docstring_lst[idx+1] = '\n- `%s\n' % line elif '**' in new_docstring_lst[idx-1] and '**' not in line: new_docstring_lst[idx+1] = '\n%s' % line.lstrip() elif '**' not in line: new_docstring_lst[idx+1] = ' %s' % line.lstrip() clean_lst = [] for line in new_docstring_lst: if set(line.strip()) not in ({'-'}, {'='}): clean_lst.append(line) return clean_lst
[ "def", "docstring_to_markdown", "(", "docstring", ")", ":", "new_docstring_lst", "=", "[", "]", "for", "idx", ",", "line", "in", "enumerate", "(", "docstring", ".", "split", "(", "'\\n'", ")", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "set", "(", "line", ")", "in", "(", "{", "'-'", "}", ",", "{", "'='", "}", ")", ":", "new_docstring_lst", "[", "idx", "-", "1", "]", "=", "'**%s**'", "%", "new_docstring_lst", "[", "idx", "-", "1", "]", "elif", "line", ".", "startswith", "(", "'>>>'", ")", ":", "line", "=", "' %s'", "%", "line", "new_docstring_lst", ".", "append", "(", "line", ")", "for", "idx", ",", "line", "in", "enumerate", "(", "new_docstring_lst", "[", "1", ":", "]", ")", ":", "if", "line", ":", "if", "line", ".", "startswith", "(", "'Description : '", ")", ":", "new_docstring_lst", "[", "idx", "+", "1", "]", "=", "(", "new_docstring_lst", "[", "idx", "+", "1", "]", ".", "replace", "(", "'Description : '", ",", "''", ")", ")", "elif", "' : '", "in", "line", ":", "line", "=", "line", ".", "replace", "(", "' : '", ",", "'` : '", ")", "new_docstring_lst", "[", "idx", "+", "1", "]", "=", "'\\n- `%s\\n'", "%", "line", "elif", "'**'", "in", "new_docstring_lst", "[", "idx", "-", "1", "]", "and", "'**'", "not", "in", "line", ":", "new_docstring_lst", "[", "idx", "+", "1", "]", "=", "'\\n%s'", "%", "line", ".", "lstrip", "(", ")", "elif", "'**'", "not", "in", "line", ":", "new_docstring_lst", "[", "idx", "+", "1", "]", "=", "' %s'", "%", "line", ".", "lstrip", "(", ")", "clean_lst", "=", "[", "]", "for", "line", "in", "new_docstring_lst", ":", "if", "set", "(", "line", ".", "strip", "(", ")", ")", "not", "in", "(", "{", "'-'", "}", ",", "{", "'='", "}", ")", ":", "clean_lst", ".", "append", "(", "line", ")", "return", "clean_lst" ]
33.714286
19.738095
def save_journal(self): """Save journaled commands to file. If there is no active journal, does nothing. If saving the commands to a file fails, a message will be printed to STDERR but the failure will be swallowed so that the extension can be built successfully. """ if self.journal_file is None: return try: as_text = self._commands_to_text() with open(self.journal_file, "w") as file_obj: file_obj.write(as_text) except Exception as exc: msg = BAD_JOURNAL.format(exc) print(msg, file=sys.stderr)
[ "def", "save_journal", "(", "self", ")", ":", "if", "self", ".", "journal_file", "is", "None", ":", "return", "try", ":", "as_text", "=", "self", ".", "_commands_to_text", "(", ")", "with", "open", "(", "self", ".", "journal_file", ",", "\"w\"", ")", "as", "file_obj", ":", "file_obj", ".", "write", "(", "as_text", ")", "except", "Exception", "as", "exc", ":", "msg", "=", "BAD_JOURNAL", ".", "format", "(", "exc", ")", "print", "(", "msg", ",", "file", "=", "sys", ".", "stderr", ")" ]
33.210526
16.684211
def gql(query_string, *args, **kwds): """Parse a GQL query string. Args: query_string: Full GQL query, e.g. 'SELECT * FROM Kind WHERE prop = 1'. *args, **kwds: If present, used to call bind(). Returns: An instance of query_class. """ qry = _gql(query_string) if args or kwds: qry = qry._bind(args, kwds) return qry
[ "def", "gql", "(", "query_string", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "qry", "=", "_gql", "(", "query_string", ")", "if", "args", "or", "kwds", ":", "qry", "=", "qry", ".", "_bind", "(", "args", ",", "kwds", ")", "return", "qry" ]
23.785714
19.571429
def zoom_bbox(self, bbox): """Zoom map to geometry extent. Arguments: bbox -- OGRGeometry polygon to zoom map extent """ try: bbox.transform(self.map.srs) except gdal.GDALException: pass else: self.map.zoom_to_box(mapnik.Box2d(*bbox.extent))
[ "def", "zoom_bbox", "(", "self", ",", "bbox", ")", ":", "try", ":", "bbox", ".", "transform", "(", "self", ".", "map", ".", "srs", ")", "except", "gdal", ".", "GDALException", ":", "pass", "else", ":", "self", ".", "map", ".", "zoom_to_box", "(", "mapnik", ".", "Box2d", "(", "*", "bbox", ".", "extent", ")", ")" ]
26.916667
16.25
def socketBinaryStream(self, hostname, port, length): """Create a TCP socket server for binary input. .. warning:: This is not part of the PySpark API. :param string hostname: Hostname of TCP server. :param int port: Port of TCP server. :param length: Message length. Length in bytes or a format string for ``struct.unpack()``. For variable length messages where the message length is sent right before the message itself, ``length`` is a format string that can be passed to ``struct.unpack()``. For example, use ``length='<I'`` for a little-endian (standard on x86) 32-bit unsigned int. :rtype: DStream """ deserializer = TCPDeserializer(self._context) tcp_binary_stream = TCPBinaryStream(length) tcp_binary_stream.listen(port, hostname) self._on_stop_cb.append(tcp_binary_stream.stop) return DStream(tcp_binary_stream, self, deserializer)
[ "def", "socketBinaryStream", "(", "self", ",", "hostname", ",", "port", ",", "length", ")", ":", "deserializer", "=", "TCPDeserializer", "(", "self", ".", "_context", ")", "tcp_binary_stream", "=", "TCPBinaryStream", "(", "length", ")", "tcp_binary_stream", ".", "listen", "(", "port", ",", "hostname", ")", "self", ".", "_on_stop_cb", ".", "append", "(", "tcp_binary_stream", ".", "stop", ")", "return", "DStream", "(", "tcp_binary_stream", ",", "self", ",", "deserializer", ")" ]
42.166667
18
def fields(self): """Returns the list of field names of the model.""" return (self.attributes.values() + self.lists.values() + self.references.values())
[ "def", "fields", "(", "self", ")", ":", "return", "(", "self", ".", "attributes", ".", "values", "(", ")", "+", "self", ".", "lists", ".", "values", "(", ")", "+", "self", ".", "references", ".", "values", "(", ")", ")" ]
45.25
12
def foldl1(f: Callable[[T, T], T], xs: Iterable[T]) -> T: """ Returns the accumulated result of a binary function applied to elements of an iterable. .. math:: foldl1(f, [x_0, x_1, x_2, x_3]) = f(f(f(f(x_0, x_1), x_2), x_3) Examples -------- >>> from delphi.utils.fp import foldl1 >>> foldl1(lambda x, y: x + y, range(5)) 10 """ return reduce(f, xs)
[ "def", "foldl1", "(", "f", ":", "Callable", "[", "[", "T", ",", "T", "]", ",", "T", "]", ",", "xs", ":", "Iterable", "[", "T", "]", ")", "->", "T", ":", "return", "reduce", "(", "f", ",", "xs", ")" ]
24.125
23
def make_device_class(spark_cloud, entries, timeout=30): """Returns a dynamic Device class based on what a GET device list from the Spark Cloud returns. spark_cloud parameter should be the caller instance of SparkCloud. entries parameter should be the list of fields the Spark Cloud API is returning. """ attrs = list( set( list(entries) + [ 'requires_deep_update', 'functions', 'variables', 'api', 'status' ] ) ) return type( 'Device', (_BaseDevice, namedtuple('Device', attrs)), {'__slots__': (), 'spark_cloud': spark_cloud, 'timeout' : timeout} )
[ "def", "make_device_class", "(", "spark_cloud", ",", "entries", ",", "timeout", "=", "30", ")", ":", "attrs", "=", "list", "(", "set", "(", "list", "(", "entries", ")", "+", "[", "'requires_deep_update'", ",", "'functions'", ",", "'variables'", ",", "'api'", ",", "'status'", "]", ")", ")", "return", "type", "(", "'Device'", ",", "(", "_BaseDevice", ",", "namedtuple", "(", "'Device'", ",", "attrs", ")", ")", ",", "{", "'__slots__'", ":", "(", ")", ",", "'spark_cloud'", ":", "spark_cloud", ",", "'timeout'", ":", "timeout", "}", ")" ]
33.954545
23.272727
def disk(self): """ Display percent of disk usage. """ r = self.local_renderer r.run(r.env.disk_usage_command)
[ "def", "disk", "(", "self", ")", ":", "r", "=", "self", ".", "local_renderer", "r", ".", "run", "(", "r", ".", "env", ".", "disk_usage_command", ")" ]
24.166667
6.166667
def publish(self,message,message_type,topic=''): """ Publish the message on the PUB socket with the given topic name. Args: - message: the message to publish - message_type: the type of message being sent - topic: the topic on which to send the message. Defaults to ''. """ if message_type == MULTIPART: raise Exception("Unsupported request type") super(Publisher,self).send(message,message_type,topic)
[ "def", "publish", "(", "self", ",", "message", ",", "message_type", ",", "topic", "=", "''", ")", ":", "if", "message_type", "==", "MULTIPART", ":", "raise", "Exception", "(", "\"Unsupported request type\"", ")", "super", "(", "Publisher", ",", "self", ")", ".", "send", "(", "message", ",", "message_type", ",", "topic", ")" ]
38.153846
18.307692
def _f_gene(sid, prefix="G_"): """Clips gene prefix from id.""" sid = sid.replace(SBML_DOT, ".") return _clip(sid, prefix)
[ "def", "_f_gene", "(", "sid", ",", "prefix", "=", "\"G_\"", ")", ":", "sid", "=", "sid", ".", "replace", "(", "SBML_DOT", ",", "\".\"", ")", "return", "_clip", "(", "sid", ",", "prefix", ")" ]
32.75
6.25
def clean_up(self):#, grid): """ de-select grid cols, refresh grid """ if self.selected_col: col_label_value = self.grid.GetColLabelValue(self.selected_col) col_label_value = col_label_value.strip('\nEDIT ALL') self.grid.SetColLabelValue(self.selected_col, col_label_value) for row in range(self.grid.GetNumberRows()): self.grid.SetCellBackgroundColour(row, self.selected_col, 'white') self.grid.ForceRefresh()
[ "def", "clean_up", "(", "self", ")", ":", "#, grid):", "if", "self", ".", "selected_col", ":", "col_label_value", "=", "self", ".", "grid", ".", "GetColLabelValue", "(", "self", ".", "selected_col", ")", "col_label_value", "=", "col_label_value", ".", "strip", "(", "'\\nEDIT ALL'", ")", "self", ".", "grid", ".", "SetColLabelValue", "(", "self", ".", "selected_col", ",", "col_label_value", ")", "for", "row", "in", "range", "(", "self", ".", "grid", ".", "GetNumberRows", "(", ")", ")", ":", "self", ".", "grid", ".", "SetCellBackgroundColour", "(", "row", ",", "self", ".", "selected_col", ",", "'white'", ")", "self", ".", "grid", ".", "ForceRefresh", "(", ")" ]
45.818182
16.727273
def iter_comments(self, number=-1, etag=None): """Iterate over the comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`ReviewComment <ReviewComment>`\ s """ url = self._build_url('comments', base_url=self._api) return self._iter(int(number), url, ReviewComment, etag=etag)
[ "def", "iter_comments", "(", "self", ",", "number", "=", "-", "1", ",", "etag", "=", "None", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'comments'", ",", "base_url", "=", "self", ".", "_api", ")", "return", "self", ".", "_iter", "(", "int", "(", "number", ")", ",", "url", ",", "ReviewComment", ",", "etag", "=", "etag", ")" ]
48.818182
20.727273
def _split_path_by_reserved_name(self, path): """Return: object_tree_path, resolver, controlled_path.""" for i, e in enumerate(path): if e in self._resolvers or e == self._get_readme_filename(): return path[:i], path[i], path[i + 1 :] raise d1_onedrive.impl.onedrive_exceptions.PathException( 'Invalid folder: %s' % str(path) )
[ "def", "_split_path_by_reserved_name", "(", "self", ",", "path", ")", ":", "for", "i", ",", "e", "in", "enumerate", "(", "path", ")", ":", "if", "e", "in", "self", ".", "_resolvers", "or", "e", "==", "self", ".", "_get_readme_filename", "(", ")", ":", "return", "path", "[", ":", "i", "]", ",", "path", "[", "i", "]", ",", "path", "[", "i", "+", "1", ":", "]", "raise", "d1_onedrive", ".", "impl", ".", "onedrive_exceptions", ".", "PathException", "(", "'Invalid folder: %s'", "%", "str", "(", "path", ")", ")" ]
49
14.5
def _remove_files(files): """ Remove all given files. Args: files (list): List of filenames, which will be removed. """ logger.debug("Request for file removal (_remove_files()).") for fn in files: if os.path.exists(fn): logger.debug("Removing '%s'." % fn) os.remove(fn)
[ "def", "_remove_files", "(", "files", ")", ":", "logger", ".", "debug", "(", "\"Request for file removal (_remove_files()).\"", ")", "for", "fn", "in", "files", ":", "if", "os", ".", "path", ".", "exists", "(", "fn", ")", ":", "logger", ".", "debug", "(", "\"Removing '%s'.\"", "%", "fn", ")", "os", ".", "remove", "(", "fn", ")" ]
24.846154
18.230769
def find_location(self, root, path, prefix=None): """ Finds a requested media file in a location, returning the found absolute path (or ``None`` if no match). """ if prefix: prefix = '%s%s' % (prefix, os.sep) if not path.startswith(prefix): return None path = path[len(prefix):] path = safe_join(root, path) if os.path.exists(path): return path
[ "def", "find_location", "(", "self", ",", "root", ",", "path", ",", "prefix", "=", "None", ")", ":", "if", "prefix", ":", "prefix", "=", "'%s%s'", "%", "(", "prefix", ",", "os", ".", "sep", ")", "if", "not", "path", ".", "startswith", "(", "prefix", ")", ":", "return", "None", "path", "=", "path", "[", "len", "(", "prefix", ")", ":", "]", "path", "=", "safe_join", "(", "root", ",", "path", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "path" ]
34.769231
9.538462
def next_interval(self, interval): """ Given a value of an interval, this function returns the next interval value """ index = np.where(self.intervals == interval) if index[0][0] + 1 < len(self.intervals): return self.intervals[index[0][0] + 1] else: raise IndexError("Ran out of intervals!")
[ "def", "next_interval", "(", "self", ",", "interval", ")", ":", "index", "=", "np", ".", "where", "(", "self", ".", "intervals", "==", "interval", ")", "if", "index", "[", "0", "]", "[", "0", "]", "+", "1", "<", "len", "(", "self", ".", "intervals", ")", ":", "return", "self", ".", "intervals", "[", "index", "[", "0", "]", "[", "0", "]", "+", "1", "]", "else", ":", "raise", "IndexError", "(", "\"Ran out of intervals!\"", ")" ]
36.4
11.4
def _check_out_arg(func): """Check if ``func`` has an (optional) ``out`` argument. Also verify that the signature of ``func`` has no ``*args`` since they make argument propagation a hassle. Parameters ---------- func : callable Object that should be inspected. Returns ------- has_out : bool ``True`` if the signature has an ``out`` argument, ``False`` otherwise. out_is_optional : bool ``True`` if ``out`` is present and optional in the signature, ``False`` otherwise. Raises ------ TypeError If ``func``'s signature has ``*args``. """ if sys.version_info.major > 2: spec = inspect.getfullargspec(func) kw_only = spec.kwonlyargs else: spec = inspect.getargspec(func) kw_only = () if spec.varargs is not None: raise TypeError('*args not allowed in function signature') pos_args = spec.args pos_defaults = () if spec.defaults is None else spec.defaults has_out = 'out' in pos_args or 'out' in kw_only if 'out' in pos_args: has_out = True out_is_optional = ( pos_args.index('out') >= len(pos_args) - len(pos_defaults)) elif 'out' in kw_only: has_out = out_is_optional = True else: has_out = out_is_optional = False return has_out, out_is_optional
[ "def", "_check_out_arg", "(", "func", ")", ":", "if", "sys", ".", "version_info", ".", "major", ">", "2", ":", "spec", "=", "inspect", ".", "getfullargspec", "(", "func", ")", "kw_only", "=", "spec", ".", "kwonlyargs", "else", ":", "spec", "=", "inspect", ".", "getargspec", "(", "func", ")", "kw_only", "=", "(", ")", "if", "spec", ".", "varargs", "is", "not", "None", ":", "raise", "TypeError", "(", "'*args not allowed in function signature'", ")", "pos_args", "=", "spec", ".", "args", "pos_defaults", "=", "(", ")", "if", "spec", ".", "defaults", "is", "None", "else", "spec", ".", "defaults", "has_out", "=", "'out'", "in", "pos_args", "or", "'out'", "in", "kw_only", "if", "'out'", "in", "pos_args", ":", "has_out", "=", "True", "out_is_optional", "=", "(", "pos_args", ".", "index", "(", "'out'", ")", ">=", "len", "(", "pos_args", ")", "-", "len", "(", "pos_defaults", ")", ")", "elif", "'out'", "in", "kw_only", ":", "has_out", "=", "out_is_optional", "=", "True", "else", ":", "has_out", "=", "out_is_optional", "=", "False", "return", "has_out", ",", "out_is_optional" ]
27.183673
20.428571
def process_result_value(self, value: Optional[str], dialect: Dialect) -> List[str]: """Convert things on the way from the database to Python.""" retval = self._dbstr_to_strlist(value) return retval
[ "def", "process_result_value", "(", "self", ",", "value", ":", "Optional", "[", "str", "]", ",", "dialect", ":", "Dialect", ")", "->", "List", "[", "str", "]", ":", "retval", "=", "self", ".", "_dbstr_to_strlist", "(", "value", ")", "return", "retval" ]
49.4
11.4
def validate(config): """Validate a configuration file.""" with open(config) as fh: data = utils.yaml_load(fh.read()) jsonschema.validate(data, CONFIG_SCHEMA)
[ "def", "validate", "(", "config", ")", ":", "with", "open", "(", "config", ")", "as", "fh", ":", "data", "=", "utils", ".", "yaml_load", "(", "fh", ".", "read", "(", ")", ")", "jsonschema", ".", "validate", "(", "data", ",", "CONFIG_SCHEMA", ")" ]
35.6
8
def cmd_link(self, args): '''handle link commands''' if len(args) < 1: self.show_link() elif args[0] == "list": self.cmd_link_list() elif args[0] == "add": if len(args) != 2: print("Usage: link add LINK") return self.cmd_link_add(args[1:]) elif args[0] == "ports": self.cmd_link_ports() elif args[0] == "remove": if len(args) != 2: print("Usage: link remove LINK") return self.cmd_link_remove(args[1:]) else: print("usage: link <list|add|remove>")
[ "def", "cmd_link", "(", "self", ",", "args", ")", ":", "if", "len", "(", "args", ")", "<", "1", ":", "self", ".", "show_link", "(", ")", "elif", "args", "[", "0", "]", "==", "\"list\"", ":", "self", ".", "cmd_link_list", "(", ")", "elif", "args", "[", "0", "]", "==", "\"add\"", ":", "if", "len", "(", "args", ")", "!=", "2", ":", "print", "(", "\"Usage: link add LINK\"", ")", "return", "self", ".", "cmd_link_add", "(", "args", "[", "1", ":", "]", ")", "elif", "args", "[", "0", "]", "==", "\"ports\"", ":", "self", ".", "cmd_link_ports", "(", ")", "elif", "args", "[", "0", "]", "==", "\"remove\"", ":", "if", "len", "(", "args", ")", "!=", "2", ":", "print", "(", "\"Usage: link remove LINK\"", ")", "return", "self", ".", "cmd_link_remove", "(", "args", "[", "1", ":", "]", ")", "else", ":", "print", "(", "\"usage: link <list|add|remove>\"", ")" ]
32.2
10.3
def recommend(self, userid, user_items, N=10, filter_already_liked_items=True, filter_items=None, recalculate_user=False): """ Recommends items for a user Calculates the N best recommendations for a user, and returns a list of itemids, score. Parameters ---------- userid : int The userid to calculate recommendations for user_items : csr_matrix A sparse matrix of shape (number_users, number_items). This lets us look up the liked items and their weights for the user. This is used to filter out items that have already been liked from the output, and to also potentially calculate the best items for this user. N : int, optional The number of results to return filter_items : sequence of ints, optional List of extra item ids to filter out from the output recalculate_user : bool, optional When true, don't rely on stored user state and instead recalculate from the passed in user_items Returns ------- list List of (itemid, score) tuples """ pass
[ "def", "recommend", "(", "self", ",", "userid", ",", "user_items", ",", "N", "=", "10", ",", "filter_already_liked_items", "=", "True", ",", "filter_items", "=", "None", ",", "recalculate_user", "=", "False", ")", ":", "pass" ]
39.366667
23.166667
def df(self): """Return dict with size of Ya.Disk. Keys: 'available', 'used'.""" def parseContent(content): root = ET.fromstring(content) return { 'available': root.find(".//d:quota-available-bytes", namespaces=self.namespaces).text, 'used': root.find(".//d:quota-used-bytes", namespaces=self.namespaces).text } data = """ <D:propfind xmlns:D="DAV:"> <D:prop> <D:quota-available-bytes/> <D:quota-used-bytes/> </D:prop> </D:propfind> """ resp = self._sendRequest("PROPFIND", "/", {'Depth': '0'}, data) if resp.status_code == 207: return parseContent(resp.content) else: raise YaDiskException(resp.status_code, resp.content)
[ "def", "df", "(", "self", ")", ":", "def", "parseContent", "(", "content", ")", ":", "root", "=", "ET", ".", "fromstring", "(", "content", ")", "return", "{", "'available'", ":", "root", ".", "find", "(", "\".//d:quota-available-bytes\"", ",", "namespaces", "=", "self", ".", "namespaces", ")", ".", "text", ",", "'used'", ":", "root", ".", "find", "(", "\".//d:quota-used-bytes\"", ",", "namespaces", "=", "self", ".", "namespaces", ")", ".", "text", "}", "data", "=", "\"\"\"\n<D:propfind xmlns:D=\"DAV:\">\n <D:prop>\n <D:quota-available-bytes/>\n <D:quota-used-bytes/>\n </D:prop>\n</D:propfind>\n \"\"\"", "resp", "=", "self", ".", "_sendRequest", "(", "\"PROPFIND\"", ",", "\"/\"", ",", "{", "'Depth'", ":", "'0'", "}", ",", "data", ")", "if", "resp", ".", "status_code", "==", "207", ":", "return", "parseContent", "(", "resp", ".", "content", ")", "else", ":", "raise", "YaDiskException", "(", "resp", ".", "status_code", ",", "resp", ".", "content", ")" ]
33.173913
21.391304
def __get_chunk_dimensions(self): """ Sets the chunking dimmentions depending on the file type. """ #Usually '.0000.' is in self.filename if np.abs(self.header[b'foff']) < 1e-5: logger.info('Detecting high frequency resolution data.') chunk_dim = (1,1,1048576) #1048576 is the number of channels in a coarse channel. return chunk_dim #Usually '.0001.' is in self.filename elif np.abs(self.header[b'tsamp']) < 1e-3: logger.info('Detecting high time resolution data.') chunk_dim = (2048,1,512) #512 is the total number of channels per single band (ie. blc00) return chunk_dim #Usually '.0002.' is in self.filename elif np.abs(self.header[b'foff']) < 1e-2 and np.abs(self.header[b'foff']) >= 1e-5: logger.info('Detecting intermediate frequency and time resolution data.') chunk_dim = (10,1,65536) #65536 is the total number of channels per single band (ie. blc00) # chunk_dim = (1,1,65536/4) return chunk_dim else: logger.warning('File format not known. Will use minimum chunking. NOT OPTIMAL.') chunk_dim = (1,1,512) return chunk_dim
[ "def", "__get_chunk_dimensions", "(", "self", ")", ":", "#Usually '.0000.' is in self.filename", "if", "np", ".", "abs", "(", "self", ".", "header", "[", "b'foff'", "]", ")", "<", "1e-5", ":", "logger", ".", "info", "(", "'Detecting high frequency resolution data.'", ")", "chunk_dim", "=", "(", "1", ",", "1", ",", "1048576", ")", "#1048576 is the number of channels in a coarse channel.", "return", "chunk_dim", "#Usually '.0001.' is in self.filename", "elif", "np", ".", "abs", "(", "self", ".", "header", "[", "b'tsamp'", "]", ")", "<", "1e-3", ":", "logger", ".", "info", "(", "'Detecting high time resolution data.'", ")", "chunk_dim", "=", "(", "2048", ",", "1", ",", "512", ")", "#512 is the total number of channels per single band (ie. blc00)", "return", "chunk_dim", "#Usually '.0002.' is in self.filename", "elif", "np", ".", "abs", "(", "self", ".", "header", "[", "b'foff'", "]", ")", "<", "1e-2", "and", "np", ".", "abs", "(", "self", ".", "header", "[", "b'foff'", "]", ")", ">=", "1e-5", ":", "logger", ".", "info", "(", "'Detecting intermediate frequency and time resolution data.'", ")", "chunk_dim", "=", "(", "10", ",", "1", ",", "65536", ")", "#65536 is the total number of channels per single band (ie. blc00)", "# chunk_dim = (1,1,65536/4)", "return", "chunk_dim", "else", ":", "logger", ".", "warning", "(", "'File format not known. Will use minimum chunking. NOT OPTIMAL.'", ")", "chunk_dim", "=", "(", "1", ",", "1", ",", "512", ")", "return", "chunk_dim" ]
51.583333
22.5
def rgb_to_rgb_percent(rgb_triplet): """ Convert a 3-tuple of integers, suitable for use in an ``rgb()`` color triplet, to a 3-tuple of percentages suitable for use in representing that color. This function makes some trade-offs in terms of the accuracy of the final representation; for some common integer values, special-case logic is used to ensure a precise result (e.g., integer 128 will always convert to '50%', integer 32 will always convert to '12.5%'), but for all other values a standard Python ``float`` is used and rounded to two decimal places, which may result in a loss of precision for some values. """ # In order to maintain precision for common values, # special-case them. specials = {255: u'100%', 128: u'50%', 64: u'25%', 32: u'12.5%', 16: u'6.25%', 0: u'0%'} return PercentRGB._make( specials.get(d, u'{:.02f}%'.format(d / 255.0 * 100)) for d in normalize_integer_triplet(rgb_triplet) )
[ "def", "rgb_to_rgb_percent", "(", "rgb_triplet", ")", ":", "# In order to maintain precision for common values,", "# special-case them.", "specials", "=", "{", "255", ":", "u'100%'", ",", "128", ":", "u'50%'", ",", "64", ":", "u'25%'", ",", "32", ":", "u'12.5%'", ",", "16", ":", "u'6.25%'", ",", "0", ":", "u'0%'", "}", "return", "PercentRGB", ".", "_make", "(", "specials", ".", "get", "(", "d", ",", "u'{:.02f}%'", ".", "format", "(", "d", "/", "255.0", "*", "100", ")", ")", "for", "d", "in", "normalize_integer_triplet", "(", "rgb_triplet", ")", ")" ]
42.956522
19.652174
def loadmask(self, filename: str) -> np.ndarray: """Load a mask file.""" mask = scipy.io.loadmat(self.find_file(filename, what='mask')) maskkey = [k for k in mask.keys() if not (k.startswith('_') or k.endswith('_'))][0] return mask[maskkey].astype(np.bool)
[ "def", "loadmask", "(", "self", ",", "filename", ":", "str", ")", "->", "np", ".", "ndarray", ":", "mask", "=", "scipy", ".", "io", ".", "loadmat", "(", "self", ".", "find_file", "(", "filename", ",", "what", "=", "'mask'", ")", ")", "maskkey", "=", "[", "k", "for", "k", "in", "mask", ".", "keys", "(", ")", "if", "not", "(", "k", ".", "startswith", "(", "'_'", ")", "or", "k", ".", "endswith", "(", "'_'", ")", ")", "]", "[", "0", "]", "return", "mask", "[", "maskkey", "]", ".", "astype", "(", "np", ".", "bool", ")" ]
56.8
18.6
def contrib_phone(contrib_tag): """ Given a contrib tag, look for an phone tag """ phone = None if raw_parser.phone(contrib_tag): phone = first(raw_parser.phone(contrib_tag)).text return phone
[ "def", "contrib_phone", "(", "contrib_tag", ")", ":", "phone", "=", "None", "if", "raw_parser", ".", "phone", "(", "contrib_tag", ")", ":", "phone", "=", "first", "(", "raw_parser", ".", "phone", "(", "contrib_tag", ")", ")", ".", "text", "return", "phone" ]
27.125
10.375
def GlorotUniformInitializer(out_dim=0, in_dim=1): """An initializer function for random uniform Glorot-scaled coefficients.""" def init(shape, rng): fan_in, fan_out = shape[in_dim], shape[out_dim] std = np.sqrt(2.0 / (fan_in + fan_out)) a = np.sqrt(3.0) * std return backend.random.uniform(rng, shape, minval=-a, maxval=a) return init
[ "def", "GlorotUniformInitializer", "(", "out_dim", "=", "0", ",", "in_dim", "=", "1", ")", ":", "def", "init", "(", "shape", ",", "rng", ")", ":", "fan_in", ",", "fan_out", "=", "shape", "[", "in_dim", "]", ",", "shape", "[", "out_dim", "]", "std", "=", "np", ".", "sqrt", "(", "2.0", "/", "(", "fan_in", "+", "fan_out", ")", ")", "a", "=", "np", ".", "sqrt", "(", "3.0", ")", "*", "std", "return", "backend", ".", "random", ".", "uniform", "(", "rng", ",", "shape", ",", "minval", "=", "-", "a", ",", "maxval", "=", "a", ")", "return", "init" ]
43.75
13.5
def k_depth(d, depth, _counter=1): """Iterate keys on specific depth. depth has to be greater equal than 0. Usage reference see :meth:`DictTree.kv_depth()<DictTree.kv_depth>` """ if depth == 0: yield d[_meta]["_rootname"] else: if _counter == depth: for key in DictTree.k(d): yield key else: _counter += 1 for node in DictTree.v(d): for key in DictTree.k_depth(node, depth, _counter): yield key
[ "def", "k_depth", "(", "d", ",", "depth", ",", "_counter", "=", "1", ")", ":", "if", "depth", "==", "0", ":", "yield", "d", "[", "_meta", "]", "[", "\"_rootname\"", "]", "else", ":", "if", "_counter", "==", "depth", ":", "for", "key", "in", "DictTree", ".", "k", "(", "d", ")", ":", "yield", "key", "else", ":", "_counter", "+=", "1", "for", "node", "in", "DictTree", ".", "v", "(", "d", ")", ":", "for", "key", "in", "DictTree", ".", "k_depth", "(", "node", ",", "depth", ",", "_counter", ")", ":", "yield", "key" ]
36
11.5625
def read(filename='cache'): """ parameter: file_path - path to cache file return: data after parsing json file""" cache_path = get_cache_path(filename) if not os.path.exists(cache_path) or os.stat(cache_path).st_size == 0: return None with open(cache_path, 'r') as file: return json.load(file)
[ "def", "read", "(", "filename", "=", "'cache'", ")", ":", "cache_path", "=", "get_cache_path", "(", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "cache_path", ")", "or", "os", ".", "stat", "(", "cache_path", ")", ".", "st_size", "==", "0", ":", "return", "None", "with", "open", "(", "cache_path", ",", "'r'", ")", "as", "file", ":", "return", "json", ".", "load", "(", "file", ")" ]
32.777778
10.555556
def _get_path_pattern_tornado4(self): """Return the path pattern used when routing a request. (Tornado<4.5) :rtype: str """ for host, handlers in self.application.handlers: if host.match(self.request.host): for handler in handlers: if handler.regex.match(self.request.path): return handler.regex.pattern
[ "def", "_get_path_pattern_tornado4", "(", "self", ")", ":", "for", "host", ",", "handlers", "in", "self", ".", "application", ".", "handlers", ":", "if", "host", ".", "match", "(", "self", ".", "request", ".", "host", ")", ":", "for", "handler", "in", "handlers", ":", "if", "handler", ".", "regex", ".", "match", "(", "self", ".", "request", ".", "path", ")", ":", "return", "handler", ".", "regex", ".", "pattern" ]
39.9
11.9
def allowance (self, filename): """Preconditions: - our agent applies to this entry - filename is URL decoded Check if given filename is allowed to acces this entry. @return: True if allowed, else False @rtype: bool """ for line in self.rulelines: log.debug(LOG_CHECK, "%s %s %s", filename, str(line), line.allowance) if line.applies_to(filename): log.debug(LOG_CHECK, " ... rule line %s", line) return line.allowance log.debug(LOG_CHECK, " ... no rule lines of %s applied to %s; allowed.", self.useragents, filename) return True
[ "def", "allowance", "(", "self", ",", "filename", ")", ":", "for", "line", "in", "self", ".", "rulelines", ":", "log", ".", "debug", "(", "LOG_CHECK", ",", "\"%s %s %s\"", ",", "filename", ",", "str", "(", "line", ")", ",", "line", ".", "allowance", ")", "if", "line", ".", "applies_to", "(", "filename", ")", ":", "log", ".", "debug", "(", "LOG_CHECK", ",", "\" ... rule line %s\"", ",", "line", ")", "return", "line", ".", "allowance", "log", ".", "debug", "(", "LOG_CHECK", ",", "\" ... no rule lines of %s applied to %s; allowed.\"", ",", "self", ".", "useragents", ",", "filename", ")", "return", "True" ]
38.294118
17.941176
def import_trade(self, trade): """ trade是一个可迭代的list/generator """ for item in trade: self.make_deal(item.code, item.datetime, item.amount, item.towards, item.price.item.order_model, item.amount_model)
[ "def", "import_trade", "(", "self", ",", "trade", ")", ":", "for", "item", "in", "trade", ":", "self", ".", "make_deal", "(", "item", ".", "code", ",", "item", ".", "datetime", ",", "item", ".", "amount", ",", "item", ".", "towards", ",", "item", ".", "price", ".", "item", ".", "order_model", ",", "item", ".", "amount_model", ")" ]
37.857143
14.714286
def __make_response(self, data, default_renderer=None): """ Creates a Flask response object from the specified data. The appropriated encoder is taken based on the request header Accept. If there is not data to be serialized the response status code is 204. :param data: The Python object to be serialized. :return: A Flask response object. """ status = headers = None if isinstance(data, tuple): data, status, headers = unpack(data) if data is None: data = self.__app.response_class(status=204) elif not isinstance(data, self.__app.response_class): renderer, mimetype = self.content_negotiation.select_renderer(request, self.default_renderers) if not renderer: if not default_renderer: raise NotAcceptable() renderer = default_renderer mimetype = default_renderer.mimetype data_bytes = renderer.render(data, mimetype) data = self.__app.response_class(data_bytes, mimetype=str(mimetype)) if status is not None: data.status_code = status if headers: data.headers.extend(headers) return data
[ "def", "__make_response", "(", "self", ",", "data", ",", "default_renderer", "=", "None", ")", ":", "status", "=", "headers", "=", "None", "if", "isinstance", "(", "data", ",", "tuple", ")", ":", "data", ",", "status", ",", "headers", "=", "unpack", "(", "data", ")", "if", "data", "is", "None", ":", "data", "=", "self", ".", "__app", ".", "response_class", "(", "status", "=", "204", ")", "elif", "not", "isinstance", "(", "data", ",", "self", ".", "__app", ".", "response_class", ")", ":", "renderer", ",", "mimetype", "=", "self", ".", "content_negotiation", ".", "select_renderer", "(", "request", ",", "self", ".", "default_renderers", ")", "if", "not", "renderer", ":", "if", "not", "default_renderer", ":", "raise", "NotAcceptable", "(", ")", "renderer", "=", "default_renderer", "mimetype", "=", "default_renderer", ".", "mimetype", "data_bytes", "=", "renderer", ".", "render", "(", "data", ",", "mimetype", ")", "data", "=", "self", ".", "__app", ".", "response_class", "(", "data_bytes", ",", "mimetype", "=", "str", "(", "mimetype", ")", ")", "if", "status", "is", "not", "None", ":", "data", ".", "status_code", "=", "status", "if", "headers", ":", "data", ".", "headers", ".", "extend", "(", "headers", ")", "return", "data" ]
34.416667
21.416667
def fetch_twitter_lists_for_user_ids_generator(twitter_app_key, twitter_app_secret, user_id_list): """ Collects at most 500 Twitter lists for each user from an input list of Twitter user ids. Inputs: - twitter_app_key: What is says on the tin. - twitter_app_secret: Ditto. - user_id_list: A python list of Twitter user ids. Yields: - user_twitter_id: A Twitter user id. - twitter_lists_list: A python list containing Twitter lists in dictionary (json) format. """ #################################################################################################################### # Log into my application. #################################################################################################################### twitter = login(twitter_app_key, twitter_app_secret) #################################################################################################################### # For each user, gather at most 500 Twitter lists. #################################################################################################################### get_list_memberships_counter = 0 get_list_memberships_time_window_start = time.perf_counter() for user_twitter_id in user_id_list: # Make safe twitter request. try: twitter_lists_list, get_list_memberships_counter, get_list_memberships_time_window_start\ = safe_twitter_request_handler(twitter_api_func=twitter.get_list_memberships, call_rate_limit=15, call_counter=get_list_memberships_counter, time_window_start=get_list_memberships_time_window_start, max_retries=5, wait_period=2, user_id=user_twitter_id, count=500, cursor=-1) # If the call is succesful, yield the list of Twitter lists. yield user_twitter_id, twitter_lists_list except twython.TwythonError: # If the call is unsuccesful, we do not have any Twitter lists to store. yield user_twitter_id, None except URLError: # If the call is unsuccesful, we do not have any Twitter lists to store. yield user_twitter_id, None except BadStatusLine: # If the call is unsuccesful, we do not have any Twitter lists to store. yield user_twitter_id, None
[ "def", "fetch_twitter_lists_for_user_ids_generator", "(", "twitter_app_key", ",", "twitter_app_secret", ",", "user_id_list", ")", ":", "####################################################################################################################", "# Log into my application.", "####################################################################################################################", "twitter", "=", "login", "(", "twitter_app_key", ",", "twitter_app_secret", ")", "####################################################################################################################", "# For each user, gather at most 500 Twitter lists.", "####################################################################################################################", "get_list_memberships_counter", "=", "0", "get_list_memberships_time_window_start", "=", "time", ".", "perf_counter", "(", ")", "for", "user_twitter_id", "in", "user_id_list", ":", "# Make safe twitter request.", "try", ":", "twitter_lists_list", ",", "get_list_memberships_counter", ",", "get_list_memberships_time_window_start", "=", "safe_twitter_request_handler", "(", "twitter_api_func", "=", "twitter", ".", "get_list_memberships", ",", "call_rate_limit", "=", "15", ",", "call_counter", "=", "get_list_memberships_counter", ",", "time_window_start", "=", "get_list_memberships_time_window_start", ",", "max_retries", "=", "5", ",", "wait_period", "=", "2", ",", "user_id", "=", "user_twitter_id", ",", "count", "=", "500", ",", "cursor", "=", "-", "1", ")", "# If the call is succesful, yield the list of Twitter lists.", "yield", "user_twitter_id", ",", "twitter_lists_list", "except", "twython", ".", "TwythonError", ":", "# If the call is unsuccesful, we do not have any Twitter lists to store.", "yield", "user_twitter_id", ",", "None", "except", "URLError", ":", "# If the call is unsuccesful, we do not have any Twitter lists to store.", "yield", "user_twitter_id", ",", "None", "except", "BadStatusLine", ":", "# If the call is unsuccesful, we do not have any Twitter lists to store.", "yield", "user_twitter_id", ",", "None" ]
57.770833
27.6875
def recCopyElement(oldelement): """Generates a copy of an xml element and recursively of all child elements. :param oldelement: an instance of lxml.etree._Element :returns: a copy of the "oldelement" .. warning:: doesn't copy ``.text`` or ``.tail`` of xml elements """ newelement = ETREE.Element(oldelement.tag, oldelement.attrib) if len(oldelement.getchildren()) > 0: for childelement in oldelement.getchildren(): newelement.append(recCopyElement(childelement)) return newelement
[ "def", "recCopyElement", "(", "oldelement", ")", ":", "newelement", "=", "ETREE", ".", "Element", "(", "oldelement", ".", "tag", ",", "oldelement", ".", "attrib", ")", "if", "len", "(", "oldelement", ".", "getchildren", "(", ")", ")", ">", "0", ":", "for", "childelement", "in", "oldelement", ".", "getchildren", "(", ")", ":", "newelement", ".", "append", "(", "recCopyElement", "(", "childelement", ")", ")", "return", "newelement" ]
33.25
17.9375
def do_stop_alerts(self, _): """ Stops the alerter thread """ self._stop_thread = True if self._alerter_thread.is_alive(): self._alerter_thread.join() else: print("The alert thread is already stopped")
[ "def", "do_stop_alerts", "(", "self", ",", "_", ")", ":", "self", ".", "_stop_thread", "=", "True", "if", "self", ".", "_alerter_thread", ".", "is_alive", "(", ")", ":", "self", ".", "_alerter_thread", ".", "join", "(", ")", "else", ":", "print", "(", "\"The alert thread is already stopped\"", ")" ]
35.857143
9.571429
def flick_element(self, on_element, xoffset, yoffset, speed): """ Flick starting at on_element, and moving by the xoffset and yoffset with specified speed. :Args: - on_element: Flick will start at center of element. - xoffset: X offset to flick to. - yoffset: Y offset to flick to. - speed: Pixels per second to flick. """ self._actions.append(lambda: self._driver.execute( Command.FLICK, { 'element': on_element.id, 'xoffset': int(xoffset), 'yoffset': int(yoffset), 'speed': int(speed)})) return self
[ "def", "flick_element", "(", "self", ",", "on_element", ",", "xoffset", ",", "yoffset", ",", "speed", ")", ":", "self", ".", "_actions", ".", "append", "(", "lambda", ":", "self", ".", "_driver", ".", "execute", "(", "Command", ".", "FLICK", ",", "{", "'element'", ":", "on_element", ".", "id", ",", "'xoffset'", ":", "int", "(", "xoffset", ")", ",", "'yoffset'", ":", "int", "(", "yoffset", ")", ",", "'speed'", ":", "int", "(", "speed", ")", "}", ")", ")", "return", "self" ]
36.277778
11.944444
def from_file(cls, image_file): """ Return a new |Image| object loaded from *image_file*, which can be either a path (string) or a file-like object. """ if is_string(image_file): # treat image_file as a path with open(image_file, 'rb') as f: blob = f.read() filename = os.path.basename(image_file) else: # assume image_file is a file-like object # ---reposition file cursor if it has one--- if callable(getattr(image_file, 'seek')): image_file.seek(0) blob = image_file.read() filename = None return cls.from_blob(blob, filename)
[ "def", "from_file", "(", "cls", ",", "image_file", ")", ":", "if", "is_string", "(", "image_file", ")", ":", "# treat image_file as a path", "with", "open", "(", "image_file", ",", "'rb'", ")", "as", "f", ":", "blob", "=", "f", ".", "read", "(", ")", "filename", "=", "os", ".", "path", ".", "basename", "(", "image_file", ")", "else", ":", "# assume image_file is a file-like object", "# ---reposition file cursor if it has one---", "if", "callable", "(", "getattr", "(", "image_file", ",", "'seek'", ")", ")", ":", "image_file", ".", "seek", "(", "0", ")", "blob", "=", "image_file", ".", "read", "(", ")", "filename", "=", "None", "return", "cls", ".", "from_blob", "(", "blob", ",", "filename", ")" ]
36.631579
11.789474
def init_remote(self): ''' Initialize/attach to a remote using pygit2. Return a boolean which will let the calling function know whether or not a new repo was initialized by this function. ''' # https://github.com/libgit2/pygit2/issues/339 # https://github.com/libgit2/libgit2/issues/2122 home = os.path.expanduser('~') pygit2.settings.search_path[pygit2.GIT_CONFIG_LEVEL_GLOBAL] = home new = False if not os.listdir(self.cachedir): # Repo cachedir is empty, initialize a new repo there self.repo = pygit2.init_repository(self.cachedir) new = True else: # Repo cachedir exists, try to attach try: self.repo = pygit2.Repository(self.cachedir) except KeyError: log.error(_INVALID_REPO, self.cachedir, self.url, self.role) return new self.gitdir = salt.utils.path.join(self.repo.workdir, '.git') self.enforce_git_config() return new
[ "def", "init_remote", "(", "self", ")", ":", "# https://github.com/libgit2/pygit2/issues/339", "# https://github.com/libgit2/libgit2/issues/2122", "home", "=", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", "pygit2", ".", "settings", ".", "search_path", "[", "pygit2", ".", "GIT_CONFIG_LEVEL_GLOBAL", "]", "=", "home", "new", "=", "False", "if", "not", "os", ".", "listdir", "(", "self", ".", "cachedir", ")", ":", "# Repo cachedir is empty, initialize a new repo there", "self", ".", "repo", "=", "pygit2", ".", "init_repository", "(", "self", ".", "cachedir", ")", "new", "=", "True", "else", ":", "# Repo cachedir exists, try to attach", "try", ":", "self", ".", "repo", "=", "pygit2", ".", "Repository", "(", "self", ".", "cachedir", ")", "except", "KeyError", ":", "log", ".", "error", "(", "_INVALID_REPO", ",", "self", ".", "cachedir", ",", "self", ".", "url", ",", "self", ".", "role", ")", "return", "new", "self", ".", "gitdir", "=", "salt", ".", "utils", ".", "path", ".", "join", "(", "self", ".", "repo", ".", "workdir", ",", "'.git'", ")", "self", ".", "enforce_git_config", "(", ")", "return", "new" ]
38.703704
21.37037
def init(): """Initialise and configure the app, database, scheduler, etc. This should be called once at application startup or at tests startup (and not e.g. called once for each test case). """ global _users, _names _configure_app(app) _users, _names = _init_login_manager(app) _configure_logger() init_scheduler(app.config.get('SQLALCHEMY_DATABASE_URI')) db.init(app.config.get('SQLALCHEMY_DATABASE_URI'))
[ "def", "init", "(", ")", ":", "global", "_users", ",", "_names", "_configure_app", "(", "app", ")", "_users", ",", "_names", "=", "_init_login_manager", "(", "app", ")", "_configure_logger", "(", ")", "init_scheduler", "(", "app", ".", "config", ".", "get", "(", "'SQLALCHEMY_DATABASE_URI'", ")", ")", "db", ".", "init", "(", "app", ".", "config", ".", "get", "(", "'SQLALCHEMY_DATABASE_URI'", ")", ")" ]
33.692308
18.538462
def clear_matplotlib_ticks(self, axis="both"): """Clears the default matplotlib ticks.""" ax = self.get_axes() plotting.clear_matplotlib_ticks(ax=ax, axis=axis)
[ "def", "clear_matplotlib_ticks", "(", "self", ",", "axis", "=", "\"both\"", ")", ":", "ax", "=", "self", ".", "get_axes", "(", ")", "plotting", ".", "clear_matplotlib_ticks", "(", "ax", "=", "ax", ",", "axis", "=", "axis", ")" ]
45.25
8.75
def graph_loads(graph_json): ''' Load graph ''' layers = [] for layer in graph_json['layers']: layer_info = Layer(layer['graph_type'], layer['input'], layer['output'], layer['size'], layer['hash_id']) layer_info.is_delete = layer['is_delete'] _logger.debug('append layer {}'.format(layer_info)) layers.append(layer_info) graph = Graph(graph_json['max_layer_num'], graph_json['min_layer_num'], [], [], []) graph.layers = layers _logger.debug('graph {} loaded'.format(graph)) return graph
[ "def", "graph_loads", "(", "graph_json", ")", ":", "layers", "=", "[", "]", "for", "layer", "in", "graph_json", "[", "'layers'", "]", ":", "layer_info", "=", "Layer", "(", "layer", "[", "'graph_type'", "]", ",", "layer", "[", "'input'", "]", ",", "layer", "[", "'output'", "]", ",", "layer", "[", "'size'", "]", ",", "layer", "[", "'hash_id'", "]", ")", "layer_info", ".", "is_delete", "=", "layer", "[", "'is_delete'", "]", "_logger", ".", "debug", "(", "'append layer {}'", ".", "format", "(", "layer_info", ")", ")", "layers", ".", "append", "(", "layer_info", ")", "graph", "=", "Graph", "(", "graph_json", "[", "'max_layer_num'", "]", ",", "graph_json", "[", "'min_layer_num'", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ")", "graph", ".", "layers", "=", "layers", "_logger", ".", "debug", "(", "'graph {} loaded'", ".", "format", "(", "graph", ")", ")", "return", "graph" ]
38.642857
23.928571
def google_analytics(parser, token): """ Google Analytics tracking template tag. Renders Javascript code to track page visits. You must supply your website property ID (as a string) in the ``GOOGLE_ANALYTICS_PROPERTY_ID`` setting. """ bits = token.split_contents() if len(bits) > 1: raise TemplateSyntaxError("'%s' takes no arguments" % bits[0]) return GoogleAnalyticsNode()
[ "def", "google_analytics", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "if", "len", "(", "bits", ")", ">", "1", ":", "raise", "TemplateSyntaxError", "(", "\"'%s' takes no arguments\"", "%", "bits", "[", "0", "]", ")", "return", "GoogleAnalyticsNode", "(", ")" ]
34.083333
12.583333
def assign_to_a_queue(self, action): """Take an action and put it to a worker actions queue :param action: action to put :type action: alignak.action.Action :return: None """ (worker_id, queue) = self._get_queue_for_the_action(action) if not worker_id: return # Tag the action as "in the worker i" action.my_worker = worker_id action.status = ACT_STATUS_QUEUED msg = Message(_type='Do', data=action, source=self.name) logger.debug("Queuing message: %s", msg) queue.put_nowait(msg) logger.debug("Queued")
[ "def", "assign_to_a_queue", "(", "self", ",", "action", ")", ":", "(", "worker_id", ",", "queue", ")", "=", "self", ".", "_get_queue_for_the_action", "(", "action", ")", "if", "not", "worker_id", ":", "return", "# Tag the action as \"in the worker i\"", "action", ".", "my_worker", "=", "worker_id", "action", ".", "status", "=", "ACT_STATUS_QUEUED", "msg", "=", "Message", "(", "_type", "=", "'Do'", ",", "data", "=", "action", ",", "source", "=", "self", ".", "name", ")", "logger", ".", "debug", "(", "\"Queuing message: %s\"", ",", "msg", ")", "queue", ".", "put_nowait", "(", "msg", ")", "logger", ".", "debug", "(", "\"Queued\"", ")" ]
32.210526
14.578947
def path_safe_spec(self): """ :API: public """ return ('{safe_spec_path}.{target_name}' .format(safe_spec_path=self._spec_path.replace(os.sep, '.'), target_name=self._target_name.replace(os.sep, '.')))
[ "def", "path_safe_spec", "(", "self", ")", ":", "return", "(", "'{safe_spec_path}.{target_name}'", ".", "format", "(", "safe_spec_path", "=", "self", ".", "_spec_path", ".", "replace", "(", "os", ".", "sep", ",", "'.'", ")", ",", "target_name", "=", "self", ".", "_target_name", ".", "replace", "(", "os", ".", "sep", ",", "'.'", ")", ")", ")" ]
34.714286
15.285714
def total_charges(self): """ Represents the 'goods' acquired in the invoice. """ selected_charges = Charge.objects \ .filter(invoice=self) \ .charges() \ .exclude(product_code=CARRIED_FORWARD) return total_amount(selected_charges)
[ "def", "total_charges", "(", "self", ")", ":", "selected_charges", "=", "Charge", ".", "objects", ".", "filter", "(", "invoice", "=", "self", ")", ".", "charges", "(", ")", ".", "exclude", "(", "product_code", "=", "CARRIED_FORWARD", ")", "return", "total_amount", "(", "selected_charges", ")" ]
33.111111
7.777778
def currentValue(self): """ Returns the current value for the widget. If this widget is checkable then the bitor value for all checked items is returned, otherwise, the selected value is returned. :return <int> """ enum = self.enum() if ( self.isCheckable() ): value = 0 for i in self.checkedIndexes(): value |= enum[nativestring(self.itemText(i))] return value else: try: return enum[nativestring(self.itemText(self.currentIndex()))] except KeyError: return 0
[ "def", "currentValue", "(", "self", ")", ":", "enum", "=", "self", ".", "enum", "(", ")", "if", "(", "self", ".", "isCheckable", "(", ")", ")", ":", "value", "=", "0", "for", "i", "in", "self", ".", "checkedIndexes", "(", ")", ":", "value", "|=", "enum", "[", "nativestring", "(", "self", ".", "itemText", "(", "i", ")", ")", "]", "return", "value", "else", ":", "try", ":", "return", "enum", "[", "nativestring", "(", "self", ".", "itemText", "(", "self", ".", "currentIndex", "(", ")", ")", ")", "]", "except", "KeyError", ":", "return", "0" ]
34.473684
17.526316
def portable_hash(x): """ This function returns consistent hash code for builtin types, especially for None and tuple with None. The algorithm is similar to that one used by CPython 2.7 >>> portable_hash(None) 0 >>> portable_hash((None, 1)) & 0xffffffff 219750521 """ if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ: raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED") if x is None: return 0 if isinstance(x, tuple): h = 0x345678 for i in x: h ^= portable_hash(i) h *= 1000003 h &= sys.maxsize h ^= len(x) if h == -1: h = -2 return int(h) return hash(x)
[ "def", "portable_hash", "(", "x", ")", ":", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "2", ",", "3", ")", "and", "'PYTHONHASHSEED'", "not", "in", "os", ".", "environ", ":", "raise", "Exception", "(", "\"Randomness of hash of string should be disabled via PYTHONHASHSEED\"", ")", "if", "x", "is", "None", ":", "return", "0", "if", "isinstance", "(", "x", ",", "tuple", ")", ":", "h", "=", "0x345678", "for", "i", "in", "x", ":", "h", "^=", "portable_hash", "(", "i", ")", "h", "*=", "1000003", "h", "&=", "sys", ".", "maxsize", "h", "^=", "len", "(", "x", ")", "if", "h", "==", "-", "1", ":", "h", "=", "-", "2", "return", "int", "(", "h", ")", "return", "hash", "(", "x", ")" ]
25.62069
22.448276
def get_shared(func): """ return shared. """ shared = [] if not hasattr(func, '__cls__'): return shared if not hasattr(func.__cls__, '__shared_arguments__'): return shared if hasattr(func, '__no_share__'): if func.__no_share__ is True: return shared else: shared += [ s for s in func.__cls__.__shared_arguments__ if (s[0][-1].replace('--', '').replace('-', '_')) not in func.__no_share__] else: shared = func.__cls__.__shared_arguments__ return shared
[ "def", "get_shared", "(", "func", ")", ":", "shared", "=", "[", "]", "if", "not", "hasattr", "(", "func", ",", "'__cls__'", ")", ":", "return", "shared", "if", "not", "hasattr", "(", "func", ".", "__cls__", ",", "'__shared_arguments__'", ")", ":", "return", "shared", "if", "hasattr", "(", "func", ",", "'__no_share__'", ")", ":", "if", "func", ".", "__no_share__", "is", "True", ":", "return", "shared", "else", ":", "shared", "+=", "[", "s", "for", "s", "in", "func", ".", "__cls__", ".", "__shared_arguments__", "if", "(", "s", "[", "0", "]", "[", "-", "1", "]", ".", "replace", "(", "'--'", ",", "''", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", ")", "not", "in", "func", ".", "__no_share__", "]", "else", ":", "shared", "=", "func", ".", "__cls__", ".", "__shared_arguments__", "return", "shared" ]
29
15
def _getmember(self, name, tarinfo=None, normalize=False): """Find an archive member by name from bottom to top. If tarinfo is given, it is used as the starting point. """ # Ensure that all members have been loaded. members = self.getmembers() # Limit the member search list up to tarinfo. if tarinfo is not None: members = members[:members.index(tarinfo)] if normalize: name = os.path.normpath(name) for member in reversed(members): if normalize: member_name = os.path.normpath(member.name) else: member_name = member.name if name == member_name: return member
[ "def", "_getmember", "(", "self", ",", "name", ",", "tarinfo", "=", "None", ",", "normalize", "=", "False", ")", ":", "# Ensure that all members have been loaded.", "members", "=", "self", ".", "getmembers", "(", ")", "# Limit the member search list up to tarinfo.", "if", "tarinfo", "is", "not", "None", ":", "members", "=", "members", "[", ":", "members", ".", "index", "(", "tarinfo", ")", "]", "if", "normalize", ":", "name", "=", "os", ".", "path", ".", "normpath", "(", "name", ")", "for", "member", "in", "reversed", "(", "members", ")", ":", "if", "normalize", ":", "member_name", "=", "os", ".", "path", ".", "normpath", "(", "member", ".", "name", ")", "else", ":", "member_name", "=", "member", ".", "name", "if", "name", "==", "member_name", ":", "return", "member" ]
33.045455
15.863636
def kubesync(): """Communicate with kubernetes deployment via kubectl and save image names/IDs to local files""" ecode = 0 try: images = anchore_utils.get_images_from_kubectl() if images: anchore_print("Writing image IDs to ./anchore_imageIds.kube") with open("anchore_imageIds.kube", 'w') as OFH: for imageId in images: OFH.write(imageId + "\n") anchore_print("Writing image names to ./anchore_imageNames.kube") with open("anchore_imageNames.kube", 'w') as OFH: for imageId in images: OFH.write(images[imageId] + "\n") except Exception as err: anchore_print_err("operation failed: " + str(err)) ecode = 1 sys.exit(ecode)
[ "def", "kubesync", "(", ")", ":", "ecode", "=", "0", "try", ":", "images", "=", "anchore_utils", ".", "get_images_from_kubectl", "(", ")", "if", "images", ":", "anchore_print", "(", "\"Writing image IDs to ./anchore_imageIds.kube\"", ")", "with", "open", "(", "\"anchore_imageIds.kube\"", ",", "'w'", ")", "as", "OFH", ":", "for", "imageId", "in", "images", ":", "OFH", ".", "write", "(", "imageId", "+", "\"\\n\"", ")", "anchore_print", "(", "\"Writing image names to ./anchore_imageNames.kube\"", ")", "with", "open", "(", "\"anchore_imageNames.kube\"", ",", "'w'", ")", "as", "OFH", ":", "for", "imageId", "in", "images", ":", "OFH", ".", "write", "(", "images", "[", "imageId", "]", "+", "\"\\n\"", ")", "except", "Exception", "as", "err", ":", "anchore_print_err", "(", "\"operation failed: \"", "+", "str", "(", "err", ")", ")", "ecode", "=", "1", "sys", ".", "exit", "(", "ecode", ")" ]
36.181818
21.272727
def get_straat_by_id(self, id): ''' Retrieve a `straat` by the Id. :param integer id: The id of the `straat`. :rtype: :class:`Straat` ''' def creator(): res = crab_gateway_request( self.client, 'GetStraatnaamWithStatusByStraatnaamId', id ) if res == None: raise GatewayResourceNotFoundException() return Straat( res.StraatnaamId, res.StraatnaamLabel, res.GemeenteId, res.StatusStraatnaam, res.Straatnaam, res.TaalCode, res.StraatnaamTweedeTaal, res.TaalCodeTweedeTaal, Metadata( res.BeginDatum, res.BeginTijd, self.get_bewerking(res.BeginBewerking), self.get_organisatie(res.BeginOrganisatie) ) ) if self.caches['long'].is_configured: key = 'GetStraatnaamWithStatusByStraatnaamId#%s' % (id) straat = self.caches['long'].get_or_create(key, creator) else: straat = creator() straat.set_gateway(self) return straat
[ "def", "get_straat_by_id", "(", "self", ",", "id", ")", ":", "def", "creator", "(", ")", ":", "res", "=", "crab_gateway_request", "(", "self", ".", "client", ",", "'GetStraatnaamWithStatusByStraatnaamId'", ",", "id", ")", "if", "res", "==", "None", ":", "raise", "GatewayResourceNotFoundException", "(", ")", "return", "Straat", "(", "res", ".", "StraatnaamId", ",", "res", ".", "StraatnaamLabel", ",", "res", ".", "GemeenteId", ",", "res", ".", "StatusStraatnaam", ",", "res", ".", "Straatnaam", ",", "res", ".", "TaalCode", ",", "res", ".", "StraatnaamTweedeTaal", ",", "res", ".", "TaalCodeTweedeTaal", ",", "Metadata", "(", "res", ".", "BeginDatum", ",", "res", ".", "BeginTijd", ",", "self", ".", "get_bewerking", "(", "res", ".", "BeginBewerking", ")", ",", "self", ".", "get_organisatie", "(", "res", ".", "BeginOrganisatie", ")", ")", ")", "if", "self", ".", "caches", "[", "'long'", "]", ".", "is_configured", ":", "key", "=", "'GetStraatnaamWithStatusByStraatnaamId#%s'", "%", "(", "id", ")", "straat", "=", "self", ".", "caches", "[", "'long'", "]", ".", "get_or_create", "(", "key", ",", "creator", ")", "else", ":", "straat", "=", "creator", "(", ")", "straat", ".", "set_gateway", "(", "self", ")", "return", "straat" ]
33.135135
15.567568
def get_path_for_termid(self,termid): """ This function returns the path (in terms of phrase types) from one term the root @type termid: string @param termid: one term id @rtype: list @return: the path, list of phrase types """ terminal_id = self.terminal_for_term.get(termid) paths = self.paths_for_terminal[terminal_id] labels = [self.label_for_nonter[nonter] for nonter in paths[0]] return labels
[ "def", "get_path_for_termid", "(", "self", ",", "termid", ")", ":", "terminal_id", "=", "self", ".", "terminal_for_term", ".", "get", "(", "termid", ")", "paths", "=", "self", ".", "paths_for_terminal", "[", "terminal_id", "]", "labels", "=", "[", "self", ".", "label_for_nonter", "[", "nonter", "]", "for", "nonter", "in", "paths", "[", "0", "]", "]", "return", "labels" ]
40.083333
14.916667
def set_mypy_path(mypy_path): """Prepend to MYPYPATH.""" original = os.environ.get(mypy_path_env_var) if original is None: new_mypy_path = mypy_path elif not original.startswith(mypy_path): new_mypy_path = mypy_path + os.pathsep + original else: new_mypy_path = None if new_mypy_path is not None: logger.log(mypy_path_env_var + ":", new_mypy_path) os.environ[mypy_path_env_var] = new_mypy_path
[ "def", "set_mypy_path", "(", "mypy_path", ")", ":", "original", "=", "os", ".", "environ", ".", "get", "(", "mypy_path_env_var", ")", "if", "original", "is", "None", ":", "new_mypy_path", "=", "mypy_path", "elif", "not", "original", ".", "startswith", "(", "mypy_path", ")", ":", "new_mypy_path", "=", "mypy_path", "+", "os", ".", "pathsep", "+", "original", "else", ":", "new_mypy_path", "=", "None", "if", "new_mypy_path", "is", "not", "None", ":", "logger", ".", "log", "(", "mypy_path_env_var", "+", "\":\"", ",", "new_mypy_path", ")", "os", ".", "environ", "[", "mypy_path_env_var", "]", "=", "new_mypy_path" ]
37.166667
12
def fail_on_template_errors(f, *args, **kwargs): """ Decorator that causes templates to fail on template errors. """ decorators = [ _fail_template_string_if_invalid, _always_strict_resolve, _disallow_catching_UnicodeDecodeError, ] if django.VERSION < (1, 8): decorators.append(_patch_invalid_var_format_string) return reduce(__apply, decorators, f)(*args, **kwargs)
[ "def", "fail_on_template_errors", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "decorators", "=", "[", "_fail_template_string_if_invalid", ",", "_always_strict_resolve", ",", "_disallow_catching_UnicodeDecodeError", ",", "]", "if", "django", ".", "VERSION", "<", "(", "1", ",", "8", ")", ":", "decorators", ".", "append", "(", "_patch_invalid_var_format_string", ")", "return", "reduce", "(", "__apply", ",", "decorators", ",", "f", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
31.846154
14.615385
def is_definition(cursor): """Test if a cursor refers to a definition This occurs when the cursor has a definition, and shares the location of that definiton """ defn = cursor.get_definition() return (defn is not None) and (cursor.location == defn.location)
[ "def", "is_definition", "(", "cursor", ")", ":", "defn", "=", "cursor", ".", "get_definition", "(", ")", "return", "(", "defn", "is", "not", "None", ")", "and", "(", "cursor", ".", "location", "==", "defn", ".", "location", ")" ]
38.857143
19.857143