text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def get_default_ref(repo): """Return a `github.GitRef` object for the HEAD of the default branch. Parameters ---------- repo: github.Repository.Repository repo to get default branch head ref from Returns ------- head : :class:`github.GitRef` instance Raises ------ github.RateLimitExceededException codekit.pygithub.CaughtRepositoryError """ assert isinstance(repo, github.Repository.Repository), type(repo) # XXX this probably should be resolved via repos.yaml default_branch = repo.default_branch default_branch_ref = "heads/{ref}".format(ref=default_branch) # if accessing the default branch fails something is seriously wrong... try: head = repo.get_git_ref(default_branch_ref) except github.RateLimitExceededException: raise except github.GithubException as e: msg = "error getting ref: {ref}".format(ref=default_branch_ref) raise CaughtRepositoryError(repo, e, msg) from None return head
[ "def", "get_default_ref", "(", "repo", ")", ":", "assert", "isinstance", "(", "repo", ",", "github", ".", "Repository", ".", "Repository", ")", ",", "type", "(", "repo", ")", "# XXX this probably should be resolved via repos.yaml", "default_branch", "=", "repo", ".", "default_branch", "default_branch_ref", "=", "\"heads/{ref}\"", ".", "format", "(", "ref", "=", "default_branch", ")", "# if accessing the default branch fails something is seriously wrong...", "try", ":", "head", "=", "repo", ".", "get_git_ref", "(", "default_branch_ref", ")", "except", "github", ".", "RateLimitExceededException", ":", "raise", "except", "github", ".", "GithubException", "as", "e", ":", "msg", "=", "\"error getting ref: {ref}\"", ".", "format", "(", "ref", "=", "default_branch_ref", ")", "raise", "CaughtRepositoryError", "(", "repo", ",", "e", ",", "msg", ")", "from", "None", "return", "head" ]
30.030303
21.151515
def _add_include_arg(arg_parser): """ Adds optional repeatable include parameter to a parser. :param arg_parser: ArgumentParser parser to add this argument to. """ arg_parser.add_argument("--include", metavar='Path', action='append', type=to_unicode, dest='include_paths', help="Specifies a single path to include. This argument can be repeated.", default=[])
[ "def", "_add_include_arg", "(", "arg_parser", ")", ":", "arg_parser", ".", "add_argument", "(", "\"--include\"", ",", "metavar", "=", "'Path'", ",", "action", "=", "'append'", ",", "type", "=", "to_unicode", ",", "dest", "=", "'include_paths'", ",", "help", "=", "\"Specifies a single path to include. This argument can be repeated.\"", ",", "default", "=", "[", "]", ")" ]
44.666667
11.5
def split_params(sym, params): """Helper function to split params dictionary into args and aux params Parameters ---------- sym : :class:`~mxnet.symbol.Symbol` MXNet symbol object params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format Returns ------- arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format """ arg_params = {} aux_params = {} for args in sym.list_arguments(): if args in params: arg_params.update({args: nd.array(params[args])}) for aux in sym.list_auxiliary_states(): if aux in params: aux_params.update({aux: nd.array(params[aux])}) return arg_params, aux_params
[ "def", "split_params", "(", "sym", ",", "params", ")", ":", "arg_params", "=", "{", "}", "aux_params", "=", "{", "}", "for", "args", "in", "sym", ".", "list_arguments", "(", ")", ":", "if", "args", "in", "params", ":", "arg_params", ".", "update", "(", "{", "args", ":", "nd", ".", "array", "(", "params", "[", "args", "]", ")", "}", ")", "for", "aux", "in", "sym", ".", "list_auxiliary_states", "(", ")", ":", "if", "aux", "in", "params", ":", "aux_params", ".", "update", "(", "{", "aux", ":", "nd", ".", "array", "(", "params", "[", "aux", "]", ")", "}", ")", "return", "arg_params", ",", "aux_params" ]
41.346154
20.307692
def calledThrice(cls, spy): #pylint: disable=invalid-name """ Checking the inspector is called thrice Args: SinonSpy """ cls.__is_spy(spy) if not (spy.calledThrice): raise cls.failException(cls.message)
[ "def", "calledThrice", "(", "cls", ",", "spy", ")", ":", "#pylint: disable=invalid-name", "cls", ".", "__is_spy", "(", "spy", ")", "if", "not", "(", "spy", ".", "calledThrice", ")", ":", "raise", "cls", ".", "failException", "(", "cls", ".", "message", ")" ]
31.875
8.875
def Fstar(self, value): """ set fixed effect design for predictions """ if value is None: self._use_to_predict = False else: assert value.shape[1] == self._K, 'Dimension mismatch' self._use_to_predict = True self._Fstar = value self.clear_cache('predict')
[ "def", "Fstar", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "self", ".", "_use_to_predict", "=", "False", "else", ":", "assert", "value", ".", "shape", "[", "1", "]", "==", "self", ".", "_K", ",", "'Dimension mismatch'", "self", ".", "_use_to_predict", "=", "True", "self", ".", "_Fstar", "=", "value", "self", ".", "clear_cache", "(", "'predict'", ")" ]
35.888889
11.555556
def is_directory(self): """ :return: whether this task is associated with a directory. :rtype: bool """ if self.cid is None: msg = 'Cannot determine whether this task is a directory.' if not self.is_transferred: msg += ' This task has not been transferred.' raise TaskError(msg) return self.api.downloads_directory.cid != self.cid
[ "def", "is_directory", "(", "self", ")", ":", "if", "self", ".", "cid", "is", "None", ":", "msg", "=", "'Cannot determine whether this task is a directory.'", "if", "not", "self", ".", "is_transferred", ":", "msg", "+=", "' This task has not been transferred.'", "raise", "TaskError", "(", "msg", ")", "return", "self", ".", "api", ".", "downloads_directory", ".", "cid", "!=", "self", ".", "cid" ]
38.181818
14
def load_config(path=None): """Choose and return the config path and it's contents as dict.""" # NOTE: Initially I wanted to inherit Path to encapsulate Git access # there but there's no easy way to subclass pathlib.Path :( head_sha = get_sha1_from("HEAD") revision = head_sha saved_config_path = load_val_from_git_cfg("config_path") if not path and saved_config_path is not None: path = saved_config_path if path is None: path = find_config(revision=revision) else: if ":" not in path: path = f"{head_sha}:{path}" revision, _col, _path = path.partition(":") if not revision: revision = head_sha config = DEFAULT_CONFIG if path is not None: config_text = from_git_rev_read(path) d = toml.loads(config_text) config = config.new_child(d) return path, config
[ "def", "load_config", "(", "path", "=", "None", ")", ":", "# NOTE: Initially I wanted to inherit Path to encapsulate Git access", "# there but there's no easy way to subclass pathlib.Path :(", "head_sha", "=", "get_sha1_from", "(", "\"HEAD\"", ")", "revision", "=", "head_sha", "saved_config_path", "=", "load_val_from_git_cfg", "(", "\"config_path\"", ")", "if", "not", "path", "and", "saved_config_path", "is", "not", "None", ":", "path", "=", "saved_config_path", "if", "path", "is", "None", ":", "path", "=", "find_config", "(", "revision", "=", "revision", ")", "else", ":", "if", "\":\"", "not", "in", "path", ":", "path", "=", "f\"{head_sha}:{path}\"", "revision", ",", "_col", ",", "_path", "=", "path", ".", "partition", "(", "\":\"", ")", "if", "not", "revision", ":", "revision", "=", "head_sha", "config", "=", "DEFAULT_CONFIG", "if", "path", "is", "not", "None", ":", "config_text", "=", "from_git_rev_read", "(", "path", ")", "d", "=", "toml", ".", "loads", "(", "config_text", ")", "config", "=", "config", ".", "new_child", "(", "d", ")", "return", "path", ",", "config" ]
31.464286
17.464286
def main(*args, **kwargs): """ `kwargs`: `configuration_filepath`: filepath for the `ini` configuration """ kwargs = {**kwargs, **_get_kwargs()} # FIXME: This filepath handeling is messed up and not transparent as it should be default_filepath = get_config_filepath() configuration_filepath = kwargs.get('configuration_filepath') if configuration_filepath is None: configuration_filepath = default_filepath # configuration is from an `ini` file configuration = _get_config(configuration_filepath) # setup some sane defaults robot_name = _configuration_sane_defaults(configuration) # Get the port configuration out of the configuration port_config = _port_configuration_helper(configuration) # create the settings manager using the port config if _setproctitle: _setproctitle.setproctitle('vexbot') robot = Robot(robot_name, port_config) robot.run()
[ "def", "main", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "{", "*", "*", "kwargs", ",", "*", "*", "_get_kwargs", "(", ")", "}", "# FIXME: This filepath handeling is messed up and not transparent as it should be", "default_filepath", "=", "get_config_filepath", "(", ")", "configuration_filepath", "=", "kwargs", ".", "get", "(", "'configuration_filepath'", ")", "if", "configuration_filepath", "is", "None", ":", "configuration_filepath", "=", "default_filepath", "# configuration is from an `ini` file", "configuration", "=", "_get_config", "(", "configuration_filepath", ")", "# setup some sane defaults", "robot_name", "=", "_configuration_sane_defaults", "(", "configuration", ")", "# Get the port configuration out of the configuration", "port_config", "=", "_port_configuration_helper", "(", "configuration", ")", "# create the settings manager using the port config", "if", "_setproctitle", ":", "_setproctitle", ".", "setproctitle", "(", "'vexbot'", ")", "robot", "=", "Robot", "(", "robot_name", ",", "port_config", ")", "robot", ".", "run", "(", ")" ]
38.458333
15.958333
def get_reserved_ip_address(self, name): ''' Retrieves information about the specified reserved IP address. name: Required. Name of the reserved IP address. ''' _validate_not_none('name', name) return self._perform_get(self._get_reserved_ip_path(name), ReservedIP)
[ "def", "get_reserved_ip_address", "(", "self", ",", "name", ")", ":", "_validate_not_none", "(", "'name'", ",", "name", ")", "return", "self", ".", "_perform_get", "(", "self", ".", "_get_reserved_ip_path", "(", "name", ")", ",", "ReservedIP", ")" ]
35.222222
23
def download_bundle_view(self, request, pk): """A view that allows the user to download a certificate bundle in PEM format.""" return self._download_response(request, pk, bundle=True)
[ "def", "download_bundle_view", "(", "self", ",", "request", ",", "pk", ")", ":", "return", "self", ".", "_download_response", "(", "request", ",", "pk", ",", "bundle", "=", "True", ")" ]
49.25
17
def correlation_model(prediction, fm): """ wraps numpy.corrcoef functionality for model evaluation input: prediction: 2D Matrix the model salience map fm: fixmat Used to compute a FDM to which the prediction is compared. """ (_, r_x) = calc_resize_factor(prediction, fm.image_size) fdm = compute_fdm(fm, scale_factor = r_x) return np.corrcoef(fdm.flatten(), prediction.flatten())[0,1]
[ "def", "correlation_model", "(", "prediction", ",", "fm", ")", ":", "(", "_", ",", "r_x", ")", "=", "calc_resize_factor", "(", "prediction", ",", "fm", ".", "image_size", ")", "fdm", "=", "compute_fdm", "(", "fm", ",", "scale_factor", "=", "r_x", ")", "return", "np", ".", "corrcoef", "(", "fdm", ".", "flatten", "(", ")", ",", "prediction", ".", "flatten", "(", ")", ")", "[", "0", ",", "1", "]" ]
33.923077
16.076923
def duplicate_files(self): ''' Search for duplicates of submission file uploads for this assignment. This includes the search in other course, whether inactive or not. Returns a list of lists, where each latter is a set of duplicate submissions with at least on of them for this assignment ''' result=list() files = SubmissionFile.valid_ones.order_by('md5') for key, dup_group in groupby(files, lambda f: f.md5): file_list=[entry for entry in dup_group] if len(file_list)>1: for entry in file_list: if entry.submissions.filter(assignment=self).count()>0: result.append([key, file_list]) break return result
[ "def", "duplicate_files", "(", "self", ")", ":", "result", "=", "list", "(", ")", "files", "=", "SubmissionFile", ".", "valid_ones", ".", "order_by", "(", "'md5'", ")", "for", "key", ",", "dup_group", "in", "groupby", "(", "files", ",", "lambda", "f", ":", "f", ".", "md5", ")", ":", "file_list", "=", "[", "entry", "for", "entry", "in", "dup_group", "]", "if", "len", "(", "file_list", ")", ">", "1", ":", "for", "entry", "in", "file_list", ":", "if", "entry", ".", "submissions", ".", "filter", "(", "assignment", "=", "self", ")", ".", "count", "(", ")", ">", "0", ":", "result", ".", "append", "(", "[", "key", ",", "file_list", "]", ")", "break", "return", "result" ]
43.222222
22.111111
def has_output(state, text, pattern=True, no_output_msg=None): """Search student output for a pattern. Among the student and solution process, the student submission and solution code as a string, the ``Ex()`` state also contains the output that a student generated with his or her submission. With ``has_output()``, you can access this output and match it against a regular or fixed expression. Args: text (str): the text that is searched for pattern (bool): if True (default), the text is treated as a pattern. If False, it is treated as plain text. no_output_msg (str): feedback message to be displayed if the output is not found. :Example: As an example, suppose we want a student to print out a sentence: :: # Print the "This is some ... stuff" print("This is some weird stuff") The following SCT tests whether the student prints out ``This is some weird stuff``: :: # Using exact string matching Ex().has_output("This is some weird stuff", pattern = False) # Using a regular expression (more robust) # pattern = True is the default msg = "Print out ``This is some ... stuff`` to the output, " + \\ "fill in ``...`` with a word you like." Ex().has_output(r"This is some \w* stuff", no_output_msg = msg) """ if not no_output_msg: no_output_msg = "You did not output the correct things." _msg = state.build_message(no_output_msg) state.do_test(StringContainsTest(state.raw_student_output, text, pattern, _msg)) return state
[ "def", "has_output", "(", "state", ",", "text", ",", "pattern", "=", "True", ",", "no_output_msg", "=", "None", ")", ":", "if", "not", "no_output_msg", ":", "no_output_msg", "=", "\"You did not output the correct things.\"", "_msg", "=", "state", ".", "build_message", "(", "no_output_msg", ")", "state", ".", "do_test", "(", "StringContainsTest", "(", "state", ".", "raw_student_output", ",", "text", ",", "pattern", ",", "_msg", ")", ")", "return", "state" ]
41.179487
31.538462
def start_activity(self, appPackage, appActivity, **opts): """Opens an arbitrary activity during a test. If the activity belongs to another application, that application is started and the activity is opened. Android only. - _appPackage_ - The package containing the activity to start. - _appActivity_ - The activity to start. - _appWaitPackage_ - Begin automation after this package starts (optional). - _appWaitActivity_ - Begin automation after this activity starts (optional). - _intentAction_ - Intent to start (opt_ional). - _intentCategory_ - Intent category to start (optional). - _intentFlags_ - Flags to send to the intent (optional). - _optionalIntentArguments_ - Optional arguments to the intent (optional). - _stopAppOnReset_ - Should the app be stopped on reset (optional)? """ # Almost the same code as in appium's start activity, # just to keep the same keyword names as in open application arguments = { 'app_wait_package': 'appWaitPackage', 'app_wait_activity': 'appWaitActivity', 'intent_action': 'intentAction', 'intent_category': 'intentCategory', 'intent_flags': 'intentFlags', 'optional_intent_arguments': 'optionalIntentArguments', 'stop_app_on_reset': 'stopAppOnReset' } data = {} for key, value in arguments.items(): if value in opts: data[key] = opts[value] driver = self._current_application() driver.start_activity(app_package=appPackage, app_activity=appActivity, **data)
[ "def", "start_activity", "(", "self", ",", "appPackage", ",", "appActivity", ",", "*", "*", "opts", ")", ":", "# Almost the same code as in appium's start activity,", "# just to keep the same keyword names as in open application", "arguments", "=", "{", "'app_wait_package'", ":", "'appWaitPackage'", ",", "'app_wait_activity'", ":", "'appWaitActivity'", ",", "'intent_action'", ":", "'intentAction'", ",", "'intent_category'", ":", "'intentCategory'", ",", "'intent_flags'", ":", "'intentFlags'", ",", "'optional_intent_arguments'", ":", "'optionalIntentArguments'", ",", "'stop_app_on_reset'", ":", "'stopAppOnReset'", "}", "data", "=", "{", "}", "for", "key", ",", "value", "in", "arguments", ".", "items", "(", ")", ":", "if", "value", "in", "opts", ":", "data", "[", "key", "]", "=", "opts", "[", "value", "]", "driver", "=", "self", ".", "_current_application", "(", ")", "driver", ".", "start_activity", "(", "app_package", "=", "appPackage", ",", "app_activity", "=", "appActivity", ",", "*", "*", "data", ")" ]
41.275
24.2
def boxify(message, border_color=None): """Put a message inside a box. Args: message (unicode): message to decorate. border_color (unicode): name of the color to outline the box with. """ lines = message.split("\n") max_width = max(_visual_width(line) for line in lines) padding_horizontal = 5 padding_vertical = 1 box_size_horizontal = max_width + (padding_horizontal * 2) chars = {"corner": "+", "horizontal": "-", "vertical": "|", "empty": " "} margin = "{corner}{line}{corner}\n".format( corner=chars["corner"], line=chars["horizontal"] * box_size_horizontal ) padding_lines = [ "{border}{space}{border}\n".format( border=colorize(chars["vertical"], color=border_color), space=chars["empty"] * box_size_horizontal, ) * padding_vertical ] content_lines = [ "{border}{space}{content}{space}{border}\n".format( border=colorize(chars["vertical"], color=border_color), space=chars["empty"] * padding_horizontal, content=_visual_center(line, max_width), ) for line in lines ] box_str = "{margin}{padding}{content}{padding}{margin}".format( margin=colorize(margin, color=border_color), padding="".join(padding_lines), content="".join(content_lines), ) return box_str
[ "def", "boxify", "(", "message", ",", "border_color", "=", "None", ")", ":", "lines", "=", "message", ".", "split", "(", "\"\\n\"", ")", "max_width", "=", "max", "(", "_visual_width", "(", "line", ")", "for", "line", "in", "lines", ")", "padding_horizontal", "=", "5", "padding_vertical", "=", "1", "box_size_horizontal", "=", "max_width", "+", "(", "padding_horizontal", "*", "2", ")", "chars", "=", "{", "\"corner\"", ":", "\"+\"", ",", "\"horizontal\"", ":", "\"-\"", ",", "\"vertical\"", ":", "\"|\"", ",", "\"empty\"", ":", "\" \"", "}", "margin", "=", "\"{corner}{line}{corner}\\n\"", ".", "format", "(", "corner", "=", "chars", "[", "\"corner\"", "]", ",", "line", "=", "chars", "[", "\"horizontal\"", "]", "*", "box_size_horizontal", ")", "padding_lines", "=", "[", "\"{border}{space}{border}\\n\"", ".", "format", "(", "border", "=", "colorize", "(", "chars", "[", "\"vertical\"", "]", ",", "color", "=", "border_color", ")", ",", "space", "=", "chars", "[", "\"empty\"", "]", "*", "box_size_horizontal", ",", ")", "*", "padding_vertical", "]", "content_lines", "=", "[", "\"{border}{space}{content}{space}{border}\\n\"", ".", "format", "(", "border", "=", "colorize", "(", "chars", "[", "\"vertical\"", "]", ",", "color", "=", "border_color", ")", ",", "space", "=", "chars", "[", "\"empty\"", "]", "*", "padding_horizontal", ",", "content", "=", "_visual_center", "(", "line", ",", "max_width", ")", ",", ")", "for", "line", "in", "lines", "]", "box_str", "=", "\"{margin}{padding}{content}{padding}{margin}\"", ".", "format", "(", "margin", "=", "colorize", "(", "margin", ",", "color", "=", "border_color", ")", ",", "padding", "=", "\"\"", ".", "join", "(", "padding_lines", ")", ",", "content", "=", "\"\"", ".", "join", "(", "content_lines", ")", ",", ")", "return", "box_str" ]
30.133333
23.177778
def _transientSchedule(self, when, now): """ If the service is currently running, schedule a tick to happen no later than C{when}. @param when: The time at which to tick. @type when: L{epsilon.extime.Time} @param now: The current time. @type now: L{epsilon.extime.Time} """ if not self.running: return if self.timer is not None: if self.timer.getTime() < when.asPOSIXTimestamp(): return self.timer.cancel() delay = when.asPOSIXTimestamp() - now.asPOSIXTimestamp() # reactor.callLater allows only positive delay values. The scheduler # may want to have scheduled things in the past and that's OK, since we # are dealing with Time() instances it's impossible to predict what # they are relative to the current time from user code anyway. delay = max(_EPSILON, delay) self.timer = self.callLater(delay, self.tick) self.nextEventAt = when
[ "def", "_transientSchedule", "(", "self", ",", "when", ",", "now", ")", ":", "if", "not", "self", ".", "running", ":", "return", "if", "self", ".", "timer", "is", "not", "None", ":", "if", "self", ".", "timer", ".", "getTime", "(", ")", "<", "when", ".", "asPOSIXTimestamp", "(", ")", ":", "return", "self", ".", "timer", ".", "cancel", "(", ")", "delay", "=", "when", ".", "asPOSIXTimestamp", "(", ")", "-", "now", ".", "asPOSIXTimestamp", "(", ")", "# reactor.callLater allows only positive delay values. The scheduler", "# may want to have scheduled things in the past and that's OK, since we", "# are dealing with Time() instances it's impossible to predict what", "# they are relative to the current time from user code anyway.", "delay", "=", "max", "(", "_EPSILON", ",", "delay", ")", "self", ".", "timer", "=", "self", ".", "callLater", "(", "delay", ",", "self", ".", "tick", ")", "self", ".", "nextEventAt", "=", "when" ]
38.807692
17.653846
def f_remove(self, recursive=True, predicate=None): """Recursively removes all children of the trajectory :param recursive: Only here for consistency with signature of parent method. Cannot be set to `False` because the trajectory root node cannot be removed. :param predicate: Predicate which can evaluate for each node to ``True`` in order to remove the node or ``False`` if the node should be kept. Leave ``None`` if you want to remove all nodes. """ if not recursive: raise ValueError('Nice try ;-)') for child in list(self._children.keys()): self.f_remove_child(child, recursive=True, predicate=predicate)
[ "def", "f_remove", "(", "self", ",", "recursive", "=", "True", ",", "predicate", "=", "None", ")", ":", "if", "not", "recursive", ":", "raise", "ValueError", "(", "'Nice try ;-)'", ")", "for", "child", "in", "list", "(", "self", ".", "_children", ".", "keys", "(", ")", ")", ":", "self", ".", "f_remove_child", "(", "child", ",", "recursive", "=", "True", ",", "predicate", "=", "predicate", ")" ]
37.789474
28.210526
def to_dict(self): """ Convert current Task into a dictionary :return: python dictionary """ task_desc_as_dict = { 'uid': self._uid, 'name': self._name, 'state': self._state, 'state_history': self._state_history, 'pre_exec': self._pre_exec, 'executable': self._executable, 'arguments': self._arguments, 'post_exec': self._post_exec, 'cpu_reqs': self._cpu_reqs, 'gpu_reqs': self._gpu_reqs, 'lfs_per_process': self._lfs_per_process, 'upload_input_data': self._upload_input_data, 'copy_input_data': self._copy_input_data, 'link_input_data': self._link_input_data, 'move_input_data': self._move_input_data, 'copy_output_data': self._copy_output_data, 'move_output_data': self._move_output_data, 'download_output_data': self._download_output_data, 'stdout': self._stdout, 'stderr': self._stderr, 'exit_code': self._exit_code, 'path': self._path, 'tag': self._tag, 'parent_stage': self._p_stage, 'parent_pipeline': self._p_pipeline, } return task_desc_as_dict
[ "def", "to_dict", "(", "self", ")", ":", "task_desc_as_dict", "=", "{", "'uid'", ":", "self", ".", "_uid", ",", "'name'", ":", "self", ".", "_name", ",", "'state'", ":", "self", ".", "_state", ",", "'state_history'", ":", "self", ".", "_state_history", ",", "'pre_exec'", ":", "self", ".", "_pre_exec", ",", "'executable'", ":", "self", ".", "_executable", ",", "'arguments'", ":", "self", ".", "_arguments", ",", "'post_exec'", ":", "self", ".", "_post_exec", ",", "'cpu_reqs'", ":", "self", ".", "_cpu_reqs", ",", "'gpu_reqs'", ":", "self", ".", "_gpu_reqs", ",", "'lfs_per_process'", ":", "self", ".", "_lfs_per_process", ",", "'upload_input_data'", ":", "self", ".", "_upload_input_data", ",", "'copy_input_data'", ":", "self", ".", "_copy_input_data", ",", "'link_input_data'", ":", "self", ".", "_link_input_data", ",", "'move_input_data'", ":", "self", ".", "_move_input_data", ",", "'copy_output_data'", ":", "self", ".", "_copy_output_data", ",", "'move_output_data'", ":", "self", ".", "_move_output_data", ",", "'download_output_data'", ":", "self", ".", "_download_output_data", ",", "'stdout'", ":", "self", ".", "_stdout", ",", "'stderr'", ":", "self", ".", "_stderr", ",", "'exit_code'", ":", "self", ".", "_exit_code", ",", "'path'", ":", "self", ".", "_path", ",", "'tag'", ":", "self", ".", "_tag", ",", "'parent_stage'", ":", "self", ".", "_p_stage", ",", "'parent_pipeline'", ":", "self", ".", "_p_pipeline", ",", "}", "return", "task_desc_as_dict" ]
31.146341
14.902439
def get_write_fields(self): """ Get the list of fields used to write the header, separating record and signal specification fields. Returns the default required fields, the user defined fields, and their dependencies. Does NOT include `d_signal` or `e_d_signal`. Returns ------- rec_write_fields : list Record specification fields to be written. Includes 'comment' if present. sig_write_fields : dict Dictionary of signal specification fields to be written, with values equal to the channels that need to be present for each field. """ # Record specification fields rec_write_fields = self.get_write_subset('record') # Add comments if any if self.comments != None: rec_write_fields.append('comments') # Get required signal fields if signals are present. self.check_field('n_sig') if self.n_sig > 0: sig_write_fields = self.get_write_subset('signal') else: sig_write_fields = None return rec_write_fields, sig_write_fields
[ "def", "get_write_fields", "(", "self", ")", ":", "# Record specification fields", "rec_write_fields", "=", "self", ".", "get_write_subset", "(", "'record'", ")", "# Add comments if any", "if", "self", ".", "comments", "!=", "None", ":", "rec_write_fields", ".", "append", "(", "'comments'", ")", "# Get required signal fields if signals are present.", "self", ".", "check_field", "(", "'n_sig'", ")", "if", "self", ".", "n_sig", ">", "0", ":", "sig_write_fields", "=", "self", ".", "get_write_subset", "(", "'signal'", ")", "else", ":", "sig_write_fields", "=", "None", "return", "rec_write_fields", ",", "sig_write_fields" ]
31.081081
19.837838
def format_nd_slice(item, ndim): """Preformat a getitem argument as an N-tuple """ if not isinstance(item, tuple): item = (item,) return item[:ndim] + (None,) * (ndim - len(item))
[ "def", "format_nd_slice", "(", "item", ",", "ndim", ")", ":", "if", "not", "isinstance", "(", "item", ",", "tuple", ")", ":", "item", "=", "(", "item", ",", ")", "return", "item", "[", ":", "ndim", "]", "+", "(", "None", ",", ")", "*", "(", "ndim", "-", "len", "(", "item", ")", ")" ]
33
7.333333
def extend(self, table, keys=None): """Extends all rows in the texttable. The rows are extended with the new columns from the table. Args: table: A texttable, the table to extend this table by. keys: A set, the set of columns to use as the key. If None, the row index is used. Raises: IndexError: If key is not a valid column name. """ if keys: for k in keys: if k not in self._Header(): raise IndexError("Unknown key: '%s'", k) extend_with = [] for column in table.header: if column not in self.header: extend_with.append(column) if not extend_with: return for column in extend_with: self.AddColumn(column) if not keys: for row1, row2 in zip(self, table): for column in extend_with: row1[column] = row2[column] return for row1 in self: for row2 in table: for k in keys: if row1[k] != row2[k]: break else: for column in extend_with: row1[column] = row2[column] break
[ "def", "extend", "(", "self", ",", "table", ",", "keys", "=", "None", ")", ":", "if", "keys", ":", "for", "k", "in", "keys", ":", "if", "k", "not", "in", "self", ".", "_Header", "(", ")", ":", "raise", "IndexError", "(", "\"Unknown key: '%s'\"", ",", "k", ")", "extend_with", "=", "[", "]", "for", "column", "in", "table", ".", "header", ":", "if", "column", "not", "in", "self", ".", "header", ":", "extend_with", ".", "append", "(", "column", ")", "if", "not", "extend_with", ":", "return", "for", "column", "in", "extend_with", ":", "self", ".", "AddColumn", "(", "column", ")", "if", "not", "keys", ":", "for", "row1", ",", "row2", "in", "zip", "(", "self", ",", "table", ")", ":", "for", "column", "in", "extend_with", ":", "row1", "[", "column", "]", "=", "row2", "[", "column", "]", "return", "for", "row1", "in", "self", ":", "for", "row2", "in", "table", ":", "for", "k", "in", "keys", ":", "if", "row1", "[", "k", "]", "!=", "row2", "[", "k", "]", ":", "break", "else", ":", "for", "column", "in", "extend_with", ":", "row1", "[", "column", "]", "=", "row2", "[", "column", "]", "break" ]
28.386364
17.522727
def charindex(self, line, char, context): """Determines the absolute character index for the specified line and char using the *buffer's* code string.""" #Make sure that we have chars and lines to work from if len(context.bufferstr) > 0 and len(self._chars) == 0: #Add one for the \n that we split on for each line self._chars = [ len(x) + 1 for x in context.bufferstr ] #Remove the last line break since it doesn't exist self._chars[-1] -= 1 #Now we want to add up the number of characters in each line #as the lines progress so that it is easy to search for the #line of a single character index total = 0 for i in range(len(self._chars)): total += self._chars[i] self._chars[i] = total return self._chars[line - 1] + char
[ "def", "charindex", "(", "self", ",", "line", ",", "char", ",", "context", ")", ":", "#Make sure that we have chars and lines to work from", "if", "len", "(", "context", ".", "bufferstr", ")", ">", "0", "and", "len", "(", "self", ".", "_chars", ")", "==", "0", ":", "#Add one for the \\n that we split on for each line", "self", ".", "_chars", "=", "[", "len", "(", "x", ")", "+", "1", "for", "x", "in", "context", ".", "bufferstr", "]", "#Remove the last line break since it doesn't exist", "self", ".", "_chars", "[", "-", "1", "]", "-=", "1", "#Now we want to add up the number of characters in each line", "#as the lines progress so that it is easy to search for the", "#line of a single character index", "total", "=", "0", "for", "i", "in", "range", "(", "len", "(", "self", ".", "_chars", ")", ")", ":", "total", "+=", "self", ".", "_chars", "[", "i", "]", "self", ".", "_chars", "[", "i", "]", "=", "total", "return", "self", ".", "_chars", "[", "line", "-", "1", "]", "+", "char" ]
47.157895
15.473684
def L_diffuser_outer(sed_inputs=sed_dict): """Return the outer length of each diffuser in the sedimentation tank. Parameters ---------- sed_inputs : dict A dictionary of all of the constant inputs needed for sedimentation tank calculations can be found in sed.yaml Returns ------- float Outer length of each diffuser in the sedimentation tank Examples -------- >>> from aide_design.play import* >>> """ return ((sed_inputs['manifold']['diffuser']['A'] / (2 * sed_inputs['manifold']['diffuser']['thickness_wall'])) - w_diffuser_inner(sed_inputs).to(u.inch)).to(u.m).magnitude
[ "def", "L_diffuser_outer", "(", "sed_inputs", "=", "sed_dict", ")", ":", "return", "(", "(", "sed_inputs", "[", "'manifold'", "]", "[", "'diffuser'", "]", "[", "'A'", "]", "/", "(", "2", "*", "sed_inputs", "[", "'manifold'", "]", "[", "'diffuser'", "]", "[", "'thickness_wall'", "]", ")", ")", "-", "w_diffuser_inner", "(", "sed_inputs", ")", ".", "to", "(", "u", ".", "inch", ")", ")", ".", "to", "(", "u", ".", "m", ")", ".", "magnitude" ]
34.421053
20.894737
def toints(self): """\ Returns an iterable of integers interpreting the content of `seq` as sequence of binary numbers of length 8. """ def grouper(iterable, n, fillvalue=None): "Collect data into fixed-length chunks or blocks" # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx return zip_longest(*[iter(iterable)] * n, fillvalue=fillvalue) return [int(''.join(map(str, group)), 2) for group in grouper(self._data, 8, 0)]
[ "def", "toints", "(", "self", ")", ":", "def", "grouper", "(", "iterable", ",", "n", ",", "fillvalue", "=", "None", ")", ":", "\"Collect data into fixed-length chunks or blocks\"", "# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx", "return", "zip_longest", "(", "*", "[", "iter", "(", "iterable", ")", "]", "*", "n", ",", "fillvalue", "=", "fillvalue", ")", "return", "[", "int", "(", "''", ".", "join", "(", "map", "(", "str", ",", "group", ")", ")", ",", "2", ")", "for", "group", "in", "grouper", "(", "self", ".", "_data", ",", "8", ",", "0", ")", "]" ]
49.1
19.4
def plot_poles_colorbar(map_axis, plons, plats, A95s, colorvalues, vmin, vmax, colormap='viridis', edgecolor='k', marker='o', markersize='20', alpha=1.0, colorbar=True, colorbar_label='pole age (Ma)'): """ This function plots multiple paleomagnetic pole and A95 error ellipse on a cartopy map axis. The poles are colored by the defined colormap. Before this function is called, a plot needs to be initialized with code such as that in the make_orthographic_map function. Example ------- >>> plons = [200, 180, 210] >>> plats = [60, 40, 35] >>> A95s = [6, 3, 10] >>> ages = [100,200,300] >>> vmin = 0 >>> vmax = 300 >>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30) >>> ipmag.plot_poles_colorbar(map_axis, plons, plats, A95s, ages, vmin, vmax) Required Parameters ----------- map_axis : the name of the current map axis that has been developed using cartopy plons : the longitude of the paleomagnetic pole being plotted (in degrees E) plats : the latitude of the paleomagnetic pole being plotted (in degrees) A95s : the A_95 confidence ellipse of the paleomagnetic pole (in degrees) colorvalues : what attribute is being used to determine the colors vmin : what is the minimum range for the colormap vmax : what is the maximum range for the colormap Optional Parameters (defaults are used if not specified) ----------- colormap : the colormap used (default is 'viridis'; others should be put as a string with quotes, e.g. 'plasma') edgecolor : the color desired for the symbol outline marker : the marker shape desired for the pole mean symbol (default is 'o' aka a circle) colorbar : the default is to include a colorbar (True). Putting False will make it so no legend is plotted. colorbar_label : label for the colorbar """ if not has_cartopy: print('-W- cartopy must be installed to run ipmag.plot_poles_colorbar') return color_mapping = plt.cm.ScalarMappable(cmap=colormap, norm=plt.Normalize(vmin=vmin, vmax=vmax)) colors = color_mapping.to_rgba(colorvalues).tolist() plot_poles(map_axis, plons, plats, A95s, label='', color=colors, edgecolor=edgecolor, marker=marker) if colorbar == True: sm = plt.cm.ScalarMappable( cmap=colormap, norm=plt.Normalize(vmin=vmin, vmax=vmax)) sm._A = [] plt.colorbar(sm, orientation='horizontal', shrink=0.8, pad=0.05, label=colorbar_label)
[ "def", "plot_poles_colorbar", "(", "map_axis", ",", "plons", ",", "plats", ",", "A95s", ",", "colorvalues", ",", "vmin", ",", "vmax", ",", "colormap", "=", "'viridis'", ",", "edgecolor", "=", "'k'", ",", "marker", "=", "'o'", ",", "markersize", "=", "'20'", ",", "alpha", "=", "1.0", ",", "colorbar", "=", "True", ",", "colorbar_label", "=", "'pole age (Ma)'", ")", ":", "if", "not", "has_cartopy", ":", "print", "(", "'-W- cartopy must be installed to run ipmag.plot_poles_colorbar'", ")", "return", "color_mapping", "=", "plt", ".", "cm", ".", "ScalarMappable", "(", "cmap", "=", "colormap", ",", "norm", "=", "plt", ".", "Normalize", "(", "vmin", "=", "vmin", ",", "vmax", "=", "vmax", ")", ")", "colors", "=", "color_mapping", ".", "to_rgba", "(", "colorvalues", ")", ".", "tolist", "(", ")", "plot_poles", "(", "map_axis", ",", "plons", ",", "plats", ",", "A95s", ",", "label", "=", "''", ",", "color", "=", "colors", ",", "edgecolor", "=", "edgecolor", ",", "marker", "=", "marker", ")", "if", "colorbar", "==", "True", ":", "sm", "=", "plt", ".", "cm", ".", "ScalarMappable", "(", "cmap", "=", "colormap", ",", "norm", "=", "plt", ".", "Normalize", "(", "vmin", "=", "vmin", ",", "vmax", "=", "vmax", ")", ")", "sm", ".", "_A", "=", "[", "]", "plt", ".", "colorbar", "(", "sm", ",", "orientation", "=", "'horizontal'", ",", "shrink", "=", "0.8", ",", "pad", "=", "0.05", ",", "label", "=", "colorbar_label", ")" ]
45.642857
29.142857
def __get_segment_types(self, element): """ given a <segment> or <group> element, returns its segment type and the segment type of its parent (i.e. its dominating node) Parameters ---------- element : ??? etree Element Returns ------- segment_type : str 'nucleus', 'satellite' or 'isolated' (unconnected segment, e.g. a news headline) or 'span' (iff the segment type is currently unknown -- i.e. ``relname`` is ``span``) parent_segment_type : str or None 'nucleus', 'satellite' or None (e.g. for the root group node) """ if not 'parent' in element.attrib: if element.tag == 'segment': segment_type = 'isolated' parent_segment_type = None else: # element.tag == 'group' segment_type = 'span' parent_segment_type = None return segment_type, parent_segment_type # ``relname`` either contains the name of an RST relation or # the string ``span`` (iff the segment is dominated by a span # node -- a horizontal line spanning one or more segments/groups # in an RST diagram). ``relname`` is '', if the segment is # unconnected. relname = element.attrib.get('relname', '') # we look up, if ``relname`` represents a regular, binary RST # relation or a multinucular relation. ``reltype`` is '', # if ``relname`` is ``span`` (i.e. a span isn't an RST relation). reltype = self.relations.get(relname, '') if reltype == 'rst': segment_type = 'satellite' parent_segment_type = 'nucleus' elif reltype == 'multinuc': segment_type = 'nucleus' parent_segment_type = None # we don't know it's type, yet else: # reltype == '' # the segment is of unknown type, it is dominated by # a span group node segment_type = 'span' parent_segment_type = 'span' return segment_type, parent_segment_type
[ "def", "__get_segment_types", "(", "self", ",", "element", ")", ":", "if", "not", "'parent'", "in", "element", ".", "attrib", ":", "if", "element", ".", "tag", "==", "'segment'", ":", "segment_type", "=", "'isolated'", "parent_segment_type", "=", "None", "else", ":", "# element.tag == 'group'", "segment_type", "=", "'span'", "parent_segment_type", "=", "None", "return", "segment_type", ",", "parent_segment_type", "# ``relname`` either contains the name of an RST relation or", "# the string ``span`` (iff the segment is dominated by a span", "# node -- a horizontal line spanning one or more segments/groups", "# in an RST diagram). ``relname`` is '', if the segment is", "# unconnected.", "relname", "=", "element", ".", "attrib", ".", "get", "(", "'relname'", ",", "''", ")", "# we look up, if ``relname`` represents a regular, binary RST", "# relation or a multinucular relation. ``reltype`` is '',", "# if ``relname`` is ``span`` (i.e. a span isn't an RST relation).", "reltype", "=", "self", ".", "relations", ".", "get", "(", "relname", ",", "''", ")", "if", "reltype", "==", "'rst'", ":", "segment_type", "=", "'satellite'", "parent_segment_type", "=", "'nucleus'", "elif", "reltype", "==", "'multinuc'", ":", "segment_type", "=", "'nucleus'", "parent_segment_type", "=", "None", "# we don't know it's type, yet", "else", ":", "# reltype == ''", "# the segment is of unknown type, it is dominated by", "# a span group node", "segment_type", "=", "'span'", "parent_segment_type", "=", "'span'", "return", "segment_type", ",", "parent_segment_type" ]
41.58
16.5
def serve(self, port=62000): """ Start LanguageBoard web application Parameters ---------- port: int port to serve web application """ from http.server import HTTPServer, CGIHTTPRequestHandler os.chdir(self.log_folder) httpd = HTTPServer(('', port), CGIHTTPRequestHandler) print("Starting LanguageBoard on port: " + str(httpd.server_port)) webbrowser.open('http://0.0.0.0:{}'.format(port)) httpd.serve_forever()
[ "def", "serve", "(", "self", ",", "port", "=", "62000", ")", ":", "from", "http", ".", "server", "import", "HTTPServer", ",", "CGIHTTPRequestHandler", "os", ".", "chdir", "(", "self", ".", "log_folder", ")", "httpd", "=", "HTTPServer", "(", "(", "''", ",", "port", ")", ",", "CGIHTTPRequestHandler", ")", "print", "(", "\"Starting LanguageBoard on port: \"", "+", "str", "(", "httpd", ".", "server_port", ")", ")", "webbrowser", ".", "open", "(", "'http://0.0.0.0:{}'", ".", "format", "(", "port", ")", ")", "httpd", ".", "serve_forever", "(", ")" ]
31.1875
19.6875
def delete(self, directory_updated=False): # pylint: disable=W0212 """ Delete this configuration :param directory_updated: If True, tell ConfigurationAdmin to not recall the directory of this deletion (internal use only) """ with self.__lock: if self.__deleted: # Nothing to do return # Update status self.__deleted = True # Notify ConfigurationAdmin, notify services only if the # configuration had been updated before self.__config_admin._delete(self, self.__updated, directory_updated) # Remove the file self.__persistence.delete(self.__pid) # Clean up if self.__properties: self.__properties.clear() self.__persistence = None self.__pid = None
[ "def", "delete", "(", "self", ",", "directory_updated", "=", "False", ")", ":", "# pylint: disable=W0212", "with", "self", ".", "__lock", ":", "if", "self", ".", "__deleted", ":", "# Nothing to do", "return", "# Update status", "self", ".", "__deleted", "=", "True", "# Notify ConfigurationAdmin, notify services only if the", "# configuration had been updated before", "self", ".", "__config_admin", ".", "_delete", "(", "self", ",", "self", ".", "__updated", ",", "directory_updated", ")", "# Remove the file", "self", ".", "__persistence", ".", "delete", "(", "self", ".", "__pid", ")", "# Clean up", "if", "self", ".", "__properties", ":", "self", ".", "__properties", ".", "clear", "(", ")", "self", ".", "__persistence", "=", "None", "self", ".", "__pid", "=", "None" ]
31.066667
18.2
def select(self, txn, from_key=None, to_key=None, return_keys=True, return_values=True, reverse=False, limit=None): """ Select all records (key-value pairs) in table, optionally within a given key range. :param txn: The transaction in which to run. :type txn: :class:`zlmdb.Transaction` :param from_key: Return records starting from (and including) this key. :type from_key: object :param to_key: Return records up to (but not including) this key. :type to_key: object :param return_keys: If ``True`` (default), return keys of records. :type return_keys: bool :param return_values: If ``True`` (default), return values of records. :type return_values: bool :param limit: Limit number of records returned. :type limit: int :return: """ assert type(return_keys) == bool assert type(return_values) == bool assert type(reverse) == bool assert limit is None or (type(limit) == int and limit > 0 and limit < 10000000) return PersistentMapIterator(txn, self, from_key=from_key, to_key=to_key, return_keys=return_keys, return_values=return_values, reverse=reverse, limit=limit)
[ "def", "select", "(", "self", ",", "txn", ",", "from_key", "=", "None", ",", "to_key", "=", "None", ",", "return_keys", "=", "True", ",", "return_values", "=", "True", ",", "reverse", "=", "False", ",", "limit", "=", "None", ")", ":", "assert", "type", "(", "return_keys", ")", "==", "bool", "assert", "type", "(", "return_values", ")", "==", "bool", "assert", "type", "(", "reverse", ")", "==", "bool", "assert", "limit", "is", "None", "or", "(", "type", "(", "limit", ")", "==", "int", "and", "limit", ">", "0", "and", "limit", "<", "10000000", ")", "return", "PersistentMapIterator", "(", "txn", ",", "self", ",", "from_key", "=", "from_key", ",", "to_key", "=", "to_key", ",", "return_keys", "=", "return_keys", ",", "return_values", "=", "return_values", ",", "reverse", "=", "reverse", ",", "limit", "=", "limit", ")" ]
39.675676
22.972973
def deprecated_func(func): """Deprecates a function, printing a warning on the first usage.""" # We use a mutable container here to work around Py2's lack of # the `nonlocal` keyword. first_usage = [True] @functools.wraps(func) def wrapper(*args, **kwargs): if first_usage[0]: warnings.warn( "Call to deprecated function {}.".format(func.__name__), DeprecationWarning, ) first_usage[0] = False return func(*args, **kwargs) return wrapper
[ "def", "deprecated_func", "(", "func", ")", ":", "# We use a mutable container here to work around Py2's lack of", "# the `nonlocal` keyword.", "first_usage", "=", "[", "True", "]", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "first_usage", "[", "0", "]", ":", "warnings", ".", "warn", "(", "\"Call to deprecated function {}.\"", ".", "format", "(", "func", ".", "__name__", ")", ",", "DeprecationWarning", ",", ")", "first_usage", "[", "0", "]", "=", "False", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
29.722222
18.444444
def show_grid(data_frame, show_toolbar=None, precision=None, grid_options=None, column_options=None, column_definitions=None, row_edit_callback=None): """ Renders a DataFrame or Series as an interactive qgrid, represented by an instance of the ``QgridWidget`` class. The ``QgridWidget`` instance is constructed using the options passed in to this function. The ``data_frame`` argument to this function is used as the ``df`` kwarg in call to the QgridWidget constructor, and the rest of the parameters are passed through as is. If the ``data_frame`` argument is a Series, it will be converted to a DataFrame before being passed in to the QgridWidget constructor as the ``df`` kwarg. :rtype: QgridWidget Parameters ---------- data_frame : DataFrame The DataFrame that will be displayed by this instance of QgridWidget. grid_options : dict Options to use when creating the SlickGrid control (i.e. the interactive grid). See the Notes section below for more information on the available options, as well as the default options that this widget uses. precision : integer The number of digits of precision to display for floating-point values. If unset, we use the value of `pandas.get_option('display.precision')`. show_toolbar : bool Whether to show a toolbar with options for adding/removing rows. Adding/removing rows is an experimental feature which only works with DataFrames that have an integer index. column_options : dict Column options that are to be applied to every column. See the Notes section below for more information on the available options, as well as the default options that this widget uses. column_definitions : dict Column options that are to be applied to individual columns. The keys of the dict should be the column names, and each value should be the column options for a particular column, represented as a dict. The available options for each column are the same options that are available to be set for all columns via the ``column_options`` parameter. See the Notes section below for more information on those options. row_edit_callback : callable A callable that is called to determine whether a particular row should be editable or not. Its signature should be ``callable(row)``, where ``row`` is a dictionary which contains a particular row's values, keyed by column name. The callback should return True if the provided row should be editable, and False otherwise. Notes ----- The following dictionary is used for ``grid_options`` if none are provided explicitly:: { # SlickGrid options 'fullWidthRows': True, 'syncColumnCellResize': True, 'forceFitColumns': True, 'defaultColumnWidth': 150, 'rowHeight': 28, 'enableColumnReorder': False, 'enableTextSelectionOnCells': True, 'editable': True, 'autoEdit': False, 'explicitInitialization': True, # Qgrid options 'maxVisibleRows': 15, 'minVisibleRows': 8, 'sortable': True, 'filterable': True, 'highlightSelectedCell': False, 'highlightSelectedRow': True } The first group of options are SlickGrid "grid options" which are described in the `SlickGrid documentation <https://github.com/mleibman/SlickGrid/wiki/Grid-Options>`_. The second group of option are options that were added specifically for Qgrid and therefore are not documented in the SlickGrid documentation. The following bullet points describe these options. * **maxVisibleRows** The maximum number of rows that Qgrid will show. * **minVisibleRows** The minimum number of rows that Qgrid will show * **sortable** Whether the Qgrid instance will allow the user to sort columns by clicking the column headers. When this is set to ``False``, nothing will happen when users click the column headers. * **filterable** Whether the Qgrid instance will allow the user to filter the grid. When this is set to ``False`` the filter icons won't be shown for any columns. * **highlightSelectedCell** If you set this to True, the selected cell will be given a light blue border. * **highlightSelectedRow** If you set this to False, the light blue background that's shown by default for selected rows will be hidden. The following dictionary is used for ``column_options`` if none are provided explicitly:: { # SlickGrid column options 'defaultSortAsc': True, 'maxWidth': None, 'minWidth': 30, 'resizable': True, 'sortable': True, 'toolTip': "", 'width': None # Qgrid column options 'editable': True, } The first group of options are SlickGrid "column options" which are described in the `SlickGrid documentation <https://github.com/mleibman/SlickGrid/wiki/Column-Options>`_. The ``editable`` option was added specifically for Qgrid and therefore is not documented in the SlickGrid documentation. This option specifies whether a column should be editable or not. See Also -------- set_defaults : Permanently set global defaults for the parameters of ``show_grid``, with the exception of the ``data_frame`` and ``column_definitions`` parameters, since those depend on the particular set of data being shown by an instance, and therefore aren't parameters we would want to set for all QgridWidet instances. set_grid_option : Permanently set global defaults for individual grid options. Does so by changing the defaults that the ``show_grid`` method uses for the ``grid_options`` parameter. QgridWidget : The widget class that is instantiated and returned by this method. """ if show_toolbar is None: show_toolbar = defaults.show_toolbar if precision is None: precision = defaults.precision if not isinstance(precision, Integral): raise TypeError("precision must be int, not %s" % type(precision)) if column_options is None: column_options = defaults.column_options else: options = defaults.column_options.copy() options.update(column_options) column_options = options if grid_options is None: grid_options = defaults.grid_options else: options = defaults.grid_options.copy() options.update(grid_options) grid_options = options if not isinstance(grid_options, dict): raise TypeError( "grid_options must be dict, not %s" % type(grid_options) ) # if a Series is passed in, convert it to a DataFrame if isinstance(data_frame, pd.Series): data_frame = pd.DataFrame(data_frame) elif not isinstance(data_frame, pd.DataFrame): raise TypeError( "data_frame must be DataFrame or Series, not %s" % type(data_frame) ) column_definitions = (column_definitions or {}) # create a visualization for the dataframe return QgridWidget(df=data_frame, precision=precision, grid_options=grid_options, column_options=column_options, column_definitions=column_definitions, row_edit_callback=row_edit_callback, show_toolbar=show_toolbar)
[ "def", "show_grid", "(", "data_frame", ",", "show_toolbar", "=", "None", ",", "precision", "=", "None", ",", "grid_options", "=", "None", ",", "column_options", "=", "None", ",", "column_definitions", "=", "None", ",", "row_edit_callback", "=", "None", ")", ":", "if", "show_toolbar", "is", "None", ":", "show_toolbar", "=", "defaults", ".", "show_toolbar", "if", "precision", "is", "None", ":", "precision", "=", "defaults", ".", "precision", "if", "not", "isinstance", "(", "precision", ",", "Integral", ")", ":", "raise", "TypeError", "(", "\"precision must be int, not %s\"", "%", "type", "(", "precision", ")", ")", "if", "column_options", "is", "None", ":", "column_options", "=", "defaults", ".", "column_options", "else", ":", "options", "=", "defaults", ".", "column_options", ".", "copy", "(", ")", "options", ".", "update", "(", "column_options", ")", "column_options", "=", "options", "if", "grid_options", "is", "None", ":", "grid_options", "=", "defaults", ".", "grid_options", "else", ":", "options", "=", "defaults", ".", "grid_options", ".", "copy", "(", ")", "options", ".", "update", "(", "grid_options", ")", "grid_options", "=", "options", "if", "not", "isinstance", "(", "grid_options", ",", "dict", ")", ":", "raise", "TypeError", "(", "\"grid_options must be dict, not %s\"", "%", "type", "(", "grid_options", ")", ")", "# if a Series is passed in, convert it to a DataFrame", "if", "isinstance", "(", "data_frame", ",", "pd", ".", "Series", ")", ":", "data_frame", "=", "pd", ".", "DataFrame", "(", "data_frame", ")", "elif", "not", "isinstance", "(", "data_frame", ",", "pd", ".", "DataFrame", ")", ":", "raise", "TypeError", "(", "\"data_frame must be DataFrame or Series, not %s\"", "%", "type", "(", "data_frame", ")", ")", "column_definitions", "=", "(", "column_definitions", "or", "{", "}", ")", "# create a visualization for the dataframe", "return", "QgridWidget", "(", "df", "=", "data_frame", ",", "precision", "=", "precision", ",", "grid_options", "=", "grid_options", ",", "column_options", "=", "column_options", ",", "column_definitions", "=", "column_definitions", ",", "row_edit_callback", "=", "row_edit_callback", ",", "show_toolbar", "=", "show_toolbar", ")" ]
40.989474
21.284211
def get_cookie_header(queue_item): """Convert a requests cookie jar to a HTTP request cookie header value. Args: queue_item (:class:`nyawc.QueueItem`): The parent queue item of the new request. Returns: str: The HTTP cookie header value. """ header = [] path = URLHelper.get_path(queue_item.request.url) for cookie in queue_item.request.cookies: root_path = cookie.path == "" or cookie.path == "/" if path.startswith(cookie.path) or root_path: header.append(cookie.name + "=" + cookie.value) return "&".join(header)
[ "def", "get_cookie_header", "(", "queue_item", ")", ":", "header", "=", "[", "]", "path", "=", "URLHelper", ".", "get_path", "(", "queue_item", ".", "request", ".", "url", ")", "for", "cookie", "in", "queue_item", ".", "request", ".", "cookies", ":", "root_path", "=", "cookie", ".", "path", "==", "\"\"", "or", "cookie", ".", "path", "==", "\"/\"", "if", "path", ".", "startswith", "(", "cookie", ".", "path", ")", "or", "root_path", ":", "header", ".", "append", "(", "cookie", ".", "name", "+", "\"=\"", "+", "cookie", ".", "value", ")", "return", "\"&\"", ".", "join", "(", "header", ")" ]
31.5
23.7
def on_commit(self, changes): """Method that gets called when a model is changed. This serves to do the actual index writing. """ if _get_config(self)['enable_indexing'] is False: return None for wh in self.whoosheers: if not wh.auto_update: continue writer = None for change in changes: if change[0].__class__ in wh.models: method_name = '{0}_{1}'.format(change[1], change[0].__class__.__name__.lower()) method = getattr(wh, method_name, None) if method: if not writer: writer = type(self).get_or_create_index(_get_app(self), wh).\ writer(timeout=_get_config(self)['writer_timeout']) method(writer, change[0]) if writer: writer.commit()
[ "def", "on_commit", "(", "self", ",", "changes", ")", ":", "if", "_get_config", "(", "self", ")", "[", "'enable_indexing'", "]", "is", "False", ":", "return", "None", "for", "wh", "in", "self", ".", "whoosheers", ":", "if", "not", "wh", ".", "auto_update", ":", "continue", "writer", "=", "None", "for", "change", "in", "changes", ":", "if", "change", "[", "0", "]", ".", "__class__", "in", "wh", ".", "models", ":", "method_name", "=", "'{0}_{1}'", ".", "format", "(", "change", "[", "1", "]", ",", "change", "[", "0", "]", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", ")", "method", "=", "getattr", "(", "wh", ",", "method_name", ",", "None", ")", "if", "method", ":", "if", "not", "writer", ":", "writer", "=", "type", "(", "self", ")", ".", "get_or_create_index", "(", "_get_app", "(", "self", ")", ",", "wh", ")", ".", "writer", "(", "timeout", "=", "_get_config", "(", "self", ")", "[", "'writer_timeout'", "]", ")", "method", "(", "writer", ",", "change", "[", "0", "]", ")", "if", "writer", ":", "writer", ".", "commit", "(", ")" ]
42.409091
16.590909
def data_url(contents, domain=DEFAULT_DOMAIN): """ Return the URL for embedding the GeoJSON data in the URL hash Parameters ---------- contents - string of GeoJSON domain - string, default http://geojson.io """ url = (domain + '#data=data:application/json,' + urllib.parse.quote(contents)) return url
[ "def", "data_url", "(", "contents", ",", "domain", "=", "DEFAULT_DOMAIN", ")", ":", "url", "=", "(", "domain", "+", "'#data=data:application/json,'", "+", "urllib", ".", "parse", ".", "quote", "(", "contents", ")", ")", "return", "url" ]
25.923077
16.538462
def gen_bag_feats(self, e_set): """ Generates bag of words features from an input essay set and trained FeatureExtractor Generally called by gen_feats Returns an array of features e_set - EssaySet object """ if(hasattr(self, '_stem_dict')): sfeats = self._stem_dict.transform(e_set._clean_stem_text) nfeats = self._normal_dict.transform(e_set._text) bag_feats = numpy.concatenate((sfeats.toarray(), nfeats.toarray()), axis=1) else: raise util_functions.InputError(self, "Dictionaries must be initialized prior to generating bag features.") return bag_feats.copy()
[ "def", "gen_bag_feats", "(", "self", ",", "e_set", ")", ":", "if", "(", "hasattr", "(", "self", ",", "'_stem_dict'", ")", ")", ":", "sfeats", "=", "self", ".", "_stem_dict", ".", "transform", "(", "e_set", ".", "_clean_stem_text", ")", "nfeats", "=", "self", ".", "_normal_dict", ".", "transform", "(", "e_set", ".", "_text", ")", "bag_feats", "=", "numpy", ".", "concatenate", "(", "(", "sfeats", ".", "toarray", "(", ")", ",", "nfeats", ".", "toarray", "(", ")", ")", ",", "axis", "=", "1", ")", "else", ":", "raise", "util_functions", ".", "InputError", "(", "self", ",", "\"Dictionaries must be initialized prior to generating bag features.\"", ")", "return", "bag_feats", ".", "copy", "(", ")" ]
47.857143
20.714286
def guess_tags(filename): """ Function to get potential tags for files using the file names. :param filename: This field is the name of file. """ tags = [] stripped_filename = strip_zip_suffix(filename) if stripped_filename.endswith('.vcf'): tags.append('vcf') if stripped_filename.endswith('.json'): tags.append('json') if stripped_filename.endswith('.csv'): tags.append('csv') return tags
[ "def", "guess_tags", "(", "filename", ")", ":", "tags", "=", "[", "]", "stripped_filename", "=", "strip_zip_suffix", "(", "filename", ")", "if", "stripped_filename", ".", "endswith", "(", "'.vcf'", ")", ":", "tags", ".", "append", "(", "'vcf'", ")", "if", "stripped_filename", ".", "endswith", "(", "'.json'", ")", ":", "tags", ".", "append", "(", "'json'", ")", "if", "stripped_filename", ".", "endswith", "(", "'.csv'", ")", ":", "tags", ".", "append", "(", "'csv'", ")", "return", "tags" ]
29.4
13.533333
async def SetVolumeAttachmentInfo(self, volume_attachments): ''' volume_attachments : typing.Sequence[~VolumeAttachment] Returns -> typing.Sequence[~ErrorResult] ''' # map input types to rpc msg _params = dict() msg = dict(type='StorageProvisioner', request='SetVolumeAttachmentInfo', version=4, params=_params) _params['volume-attachments'] = volume_attachments reply = await self.rpc(msg) return reply
[ "async", "def", "SetVolumeAttachmentInfo", "(", "self", ",", "volume_attachments", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'StorageProvisioner'", ",", "request", "=", "'SetVolumeAttachmentInfo'", ",", "version", "=", "4", ",", "params", "=", "_params", ")", "_params", "[", "'volume-attachments'", "]", "=", "volume_attachments", "reply", "=", "await", "self", ".", "rpc", "(", "msg", ")", "return", "reply" ]
37.642857
14.785714
def _print_unique_links_with_status_codes(page_url, soup): """ Finds all unique links in the html of the page source and then prints out those links with their status codes. Format: ["link" -> "status_code"] (per line) Page links include those obtained from: "a"->"href", "img"->"src", "link"->"href", and "script"->"src". """ links = _get_unique_links(page_url, soup) for link in links: status_code = _get_link_status_code(link) print(link, " -> ", status_code)
[ "def", "_print_unique_links_with_status_codes", "(", "page_url", ",", "soup", ")", ":", "links", "=", "_get_unique_links", "(", "page_url", ",", "soup", ")", "for", "link", "in", "links", ":", "status_code", "=", "_get_link_status_code", "(", "link", ")", "print", "(", "link", ",", "\" -> \"", ",", "status_code", ")" ]
47.272727
11.636364
def check_roles(self, account, aws_policies, aws_roles): """Iterate through the roles of a specific account and create or update the roles if they're missing or does not match the roles from Git. Args: account (:obj:`Account`): The account to check roles on aws_policies (:obj:`dict` of `str`: `dict`): A dictionary containing all the policies for the specific account aws_roles (:obj:`dict` of `str`: `dict`): A dictionary containing all the roles for the specific account Returns: `None` """ self.log.debug('Checking roles for {}'.format(account.account_name)) max_session_duration = self.dbconfig.get('role_timeout_in_hours', self.ns, 8) * 60 * 60 sess = get_aws_session(account) iam = sess.client('iam') # Build a list of default role policies and extra account specific role policies account_roles = copy.deepcopy(self.cfg_roles) if account.account_name in self.git_policies: for role in self.git_policies[account.account_name]: if role in account_roles: account_roles[role]['policies'] += list(self.git_policies[account.account_name][role].keys()) for role_name, data in list(account_roles.items()): if role_name not in aws_roles: iam.create_role( Path='/', RoleName=role_name, AssumeRolePolicyDocument=json.dumps(data['trust'], indent=4), MaxSessionDuration=max_session_duration ) self.log.info('Created role {}/{}'.format(account.account_name, role_name)) else: try: if aws_roles[role_name]['MaxSessionDuration'] != max_session_duration: iam.update_role( RoleName=aws_roles[role_name]['RoleName'], MaxSessionDuration=max_session_duration ) self.log.info('Adjusted MaxSessionDuration for role {} in account {} to {} seconds'.format( role_name, account.account_name, max_session_duration )) except ClientError: self.log.exception('Unable to adjust MaxSessionDuration for role {} in account {}'.format( role_name, account.account_name )) aws_role_policies = [x['PolicyName'] for x in iam.list_attached_role_policies( RoleName=role_name)['AttachedPolicies'] ] aws_role_inline_policies = iam.list_role_policies(RoleName=role_name)['PolicyNames'] cfg_role_policies = data['policies'] missing_policies = list(set(cfg_role_policies) - set(aws_role_policies)) extra_policies = list(set(aws_role_policies) - set(cfg_role_policies)) if aws_role_inline_policies: self.log.info('IAM Role {} on {} has the following inline policies: {}'.format( role_name, account.account_name, ', '.join(aws_role_inline_policies) )) if self.dbconfig.get('delete_inline_policies', self.ns, False) and self.manage_roles: for policy in aws_role_inline_policies: iam.delete_role_policy(RoleName=role_name, PolicyName=policy) auditlog( event='iam.check_roles.delete_inline_role_policy', actor=self.ns, data={ 'account': account.account_name, 'roleName': role_name, 'policy': policy } ) if missing_policies: self.log.info('IAM Role {} on {} is missing the following policies: {}'.format( role_name, account.account_name, ', '.join(missing_policies) )) if self.manage_roles: for policy in missing_policies: iam.attach_role_policy(RoleName=role_name, PolicyArn=aws_policies[policy]['Arn']) auditlog( event='iam.check_roles.attach_role_policy', actor=self.ns, data={ 'account': account.account_name, 'roleName': role_name, 'policyArn': aws_policies[policy]['Arn'] } ) if extra_policies: self.log.info('IAM Role {} on {} has the following extra policies applied: {}'.format( role_name, account.account_name, ', '.join(extra_policies) )) for policy in extra_policies: if policy in aws_policies: polArn = aws_policies[policy]['Arn'] elif policy in self.aws_managed_policies: polArn = self.aws_managed_policies[policy]['Arn'] else: polArn = None self.log.info('IAM Role {} on {} has an unknown policy attached: {}'.format( role_name, account.account_name, policy )) if self.manage_roles and polArn: iam.detach_role_policy(RoleName=role_name, PolicyArn=polArn) auditlog( event='iam.check_roles.detach_role_policy', actor=self.ns, data={ 'account': account.account_name, 'roleName': role_name, 'policyArn': polArn } )
[ "def", "check_roles", "(", "self", ",", "account", ",", "aws_policies", ",", "aws_roles", ")", ":", "self", ".", "log", ".", "debug", "(", "'Checking roles for {}'", ".", "format", "(", "account", ".", "account_name", ")", ")", "max_session_duration", "=", "self", ".", "dbconfig", ".", "get", "(", "'role_timeout_in_hours'", ",", "self", ".", "ns", ",", "8", ")", "*", "60", "*", "60", "sess", "=", "get_aws_session", "(", "account", ")", "iam", "=", "sess", ".", "client", "(", "'iam'", ")", "# Build a list of default role policies and extra account specific role policies", "account_roles", "=", "copy", ".", "deepcopy", "(", "self", ".", "cfg_roles", ")", "if", "account", ".", "account_name", "in", "self", ".", "git_policies", ":", "for", "role", "in", "self", ".", "git_policies", "[", "account", ".", "account_name", "]", ":", "if", "role", "in", "account_roles", ":", "account_roles", "[", "role", "]", "[", "'policies'", "]", "+=", "list", "(", "self", ".", "git_policies", "[", "account", ".", "account_name", "]", "[", "role", "]", ".", "keys", "(", ")", ")", "for", "role_name", ",", "data", "in", "list", "(", "account_roles", ".", "items", "(", ")", ")", ":", "if", "role_name", "not", "in", "aws_roles", ":", "iam", ".", "create_role", "(", "Path", "=", "'/'", ",", "RoleName", "=", "role_name", ",", "AssumeRolePolicyDocument", "=", "json", ".", "dumps", "(", "data", "[", "'trust'", "]", ",", "indent", "=", "4", ")", ",", "MaxSessionDuration", "=", "max_session_duration", ")", "self", ".", "log", ".", "info", "(", "'Created role {}/{}'", ".", "format", "(", "account", ".", "account_name", ",", "role_name", ")", ")", "else", ":", "try", ":", "if", "aws_roles", "[", "role_name", "]", "[", "'MaxSessionDuration'", "]", "!=", "max_session_duration", ":", "iam", ".", "update_role", "(", "RoleName", "=", "aws_roles", "[", "role_name", "]", "[", "'RoleName'", "]", ",", "MaxSessionDuration", "=", "max_session_duration", ")", "self", ".", "log", ".", "info", "(", "'Adjusted MaxSessionDuration for role {} in account {} to {} seconds'", ".", "format", "(", "role_name", ",", "account", ".", "account_name", ",", "max_session_duration", ")", ")", "except", "ClientError", ":", "self", ".", "log", ".", "exception", "(", "'Unable to adjust MaxSessionDuration for role {} in account {}'", ".", "format", "(", "role_name", ",", "account", ".", "account_name", ")", ")", "aws_role_policies", "=", "[", "x", "[", "'PolicyName'", "]", "for", "x", "in", "iam", ".", "list_attached_role_policies", "(", "RoleName", "=", "role_name", ")", "[", "'AttachedPolicies'", "]", "]", "aws_role_inline_policies", "=", "iam", ".", "list_role_policies", "(", "RoleName", "=", "role_name", ")", "[", "'PolicyNames'", "]", "cfg_role_policies", "=", "data", "[", "'policies'", "]", "missing_policies", "=", "list", "(", "set", "(", "cfg_role_policies", ")", "-", "set", "(", "aws_role_policies", ")", ")", "extra_policies", "=", "list", "(", "set", "(", "aws_role_policies", ")", "-", "set", "(", "cfg_role_policies", ")", ")", "if", "aws_role_inline_policies", ":", "self", ".", "log", ".", "info", "(", "'IAM Role {} on {} has the following inline policies: {}'", ".", "format", "(", "role_name", ",", "account", ".", "account_name", ",", "', '", ".", "join", "(", "aws_role_inline_policies", ")", ")", ")", "if", "self", ".", "dbconfig", ".", "get", "(", "'delete_inline_policies'", ",", "self", ".", "ns", ",", "False", ")", "and", "self", ".", "manage_roles", ":", "for", "policy", "in", "aws_role_inline_policies", ":", "iam", ".", "delete_role_policy", "(", "RoleName", "=", "role_name", ",", "PolicyName", "=", "policy", ")", "auditlog", "(", "event", "=", "'iam.check_roles.delete_inline_role_policy'", ",", "actor", "=", "self", ".", "ns", ",", "data", "=", "{", "'account'", ":", "account", ".", "account_name", ",", "'roleName'", ":", "role_name", ",", "'policy'", ":", "policy", "}", ")", "if", "missing_policies", ":", "self", ".", "log", ".", "info", "(", "'IAM Role {} on {} is missing the following policies: {}'", ".", "format", "(", "role_name", ",", "account", ".", "account_name", ",", "', '", ".", "join", "(", "missing_policies", ")", ")", ")", "if", "self", ".", "manage_roles", ":", "for", "policy", "in", "missing_policies", ":", "iam", ".", "attach_role_policy", "(", "RoleName", "=", "role_name", ",", "PolicyArn", "=", "aws_policies", "[", "policy", "]", "[", "'Arn'", "]", ")", "auditlog", "(", "event", "=", "'iam.check_roles.attach_role_policy'", ",", "actor", "=", "self", ".", "ns", ",", "data", "=", "{", "'account'", ":", "account", ".", "account_name", ",", "'roleName'", ":", "role_name", ",", "'policyArn'", ":", "aws_policies", "[", "policy", "]", "[", "'Arn'", "]", "}", ")", "if", "extra_policies", ":", "self", ".", "log", ".", "info", "(", "'IAM Role {} on {} has the following extra policies applied: {}'", ".", "format", "(", "role_name", ",", "account", ".", "account_name", ",", "', '", ".", "join", "(", "extra_policies", ")", ")", ")", "for", "policy", "in", "extra_policies", ":", "if", "policy", "in", "aws_policies", ":", "polArn", "=", "aws_policies", "[", "policy", "]", "[", "'Arn'", "]", "elif", "policy", "in", "self", ".", "aws_managed_policies", ":", "polArn", "=", "self", ".", "aws_managed_policies", "[", "policy", "]", "[", "'Arn'", "]", "else", ":", "polArn", "=", "None", "self", ".", "log", ".", "info", "(", "'IAM Role {} on {} has an unknown policy attached: {}'", ".", "format", "(", "role_name", ",", "account", ".", "account_name", ",", "policy", ")", ")", "if", "self", ".", "manage_roles", "and", "polArn", ":", "iam", ".", "detach_role_policy", "(", "RoleName", "=", "role_name", ",", "PolicyArn", "=", "polArn", ")", "auditlog", "(", "event", "=", "'iam.check_roles.detach_role_policy'", ",", "actor", "=", "self", ".", "ns", ",", "data", "=", "{", "'account'", ":", "account", ".", "account_name", ",", "'roleName'", ":", "role_name", ",", "'policyArn'", ":", "polArn", "}", ")" ]
47.435115
22.839695
def badge_form(model): '''A form factory for a given model badges''' class BadgeForm(ModelForm): model_class = Badge kind = fields.RadioField( _('Kind'), [validators.DataRequired()], choices=model.__badges__.items(), description=_('Kind of badge (certified, etc)')) return BadgeForm
[ "def", "badge_form", "(", "model", ")", ":", "class", "BadgeForm", "(", "ModelForm", ")", ":", "model_class", "=", "Badge", "kind", "=", "fields", ".", "RadioField", "(", "_", "(", "'Kind'", ")", ",", "[", "validators", ".", "DataRequired", "(", ")", "]", ",", "choices", "=", "model", ".", "__badges__", ".", "items", "(", ")", ",", "description", "=", "_", "(", "'Kind of badge (certified, etc)'", ")", ")", "return", "BadgeForm" ]
30.727273
17.454545
def BVV(value, size=None, **kwargs): """ Creates a bit-vector value (i.e., a concrete value). :param value: The value. Either an integer or a string. If it's a string, it will be interpreted as the bytes of a big-endian constant. :param size: The size (in bits) of the bit-vector. Optional if you provide a string, required for an integer. :returns: A BV object representing this value. """ if type(value) in (bytes, str): if type(value) is str: l.warning("BVV value is a unicode string, encoding as utf-8") value = value.encode('utf-8') if size is None: size = len(value)*8 elif type(size) is not int: raise TypeError("Bitvector size must be either absent (implicit) or an integer") elif size != len(value)*8: raise ClaripyValueError('string/size mismatch for BVV creation') value = int(binascii.hexlify(value), 16) if value != b"" else 0 elif size is None or (type(value) is not int and value is not None): raise TypeError('BVV() takes either an integer value and a size or a string of bytes') # ensure the 0 <= value < (1 << size) # FIXME hack to handle None which is used for an Empty Strided Interval (ESI) if value is not None: value &= (1 << size) -1 if not kwargs: try: return _bvv_cache[(value, size)] except KeyError: pass result = BV('BVV', (value, size), length=size, **kwargs) _bvv_cache[(value, size)] = result return result
[ "def", "BVV", "(", "value", ",", "size", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "type", "(", "value", ")", "in", "(", "bytes", ",", "str", ")", ":", "if", "type", "(", "value", ")", "is", "str", ":", "l", ".", "warning", "(", "\"BVV value is a unicode string, encoding as utf-8\"", ")", "value", "=", "value", ".", "encode", "(", "'utf-8'", ")", "if", "size", "is", "None", ":", "size", "=", "len", "(", "value", ")", "*", "8", "elif", "type", "(", "size", ")", "is", "not", "int", ":", "raise", "TypeError", "(", "\"Bitvector size must be either absent (implicit) or an integer\"", ")", "elif", "size", "!=", "len", "(", "value", ")", "*", "8", ":", "raise", "ClaripyValueError", "(", "'string/size mismatch for BVV creation'", ")", "value", "=", "int", "(", "binascii", ".", "hexlify", "(", "value", ")", ",", "16", ")", "if", "value", "!=", "b\"\"", "else", "0", "elif", "size", "is", "None", "or", "(", "type", "(", "value", ")", "is", "not", "int", "and", "value", "is", "not", "None", ")", ":", "raise", "TypeError", "(", "'BVV() takes either an integer value and a size or a string of bytes'", ")", "# ensure the 0 <= value < (1 << size)", "# FIXME hack to handle None which is used for an Empty Strided Interval (ESI)", "if", "value", "is", "not", "None", ":", "value", "&=", "(", "1", "<<", "size", ")", "-", "1", "if", "not", "kwargs", ":", "try", ":", "return", "_bvv_cache", "[", "(", "value", ",", "size", ")", "]", "except", "KeyError", ":", "pass", "result", "=", "BV", "(", "'BVV'", ",", "(", "value", ",", "size", ")", ",", "length", "=", "size", ",", "*", "*", "kwargs", ")", "_bvv_cache", "[", "(", "value", ",", "size", ")", "]", "=", "result", "return", "result" ]
38.3
24.8
def export(self, template_file_name, output_file_name, sort="public", data=None, limit=0): """Export ranking to a file. Args: template_file_name (str): where is the template (moustache template) output_file_name (str): where create the file with the ranking sort (str): field to sort the users """ exportedData = {} exportedUsers = self.getSortedUsers() template = self.__getTemplate(template_file_name) position = 1 if not limit: exportedData["users"] = exportedUsers else: exportedData["users"] = exportedUsers[:limit] for u in exportedData["users"]: u["position"] = position u["comma"] = position < len(exportedData["users"]) position += 1 exportedData["extraData"] = data renderer = Renderer() output = renderer.render(template, exportedData) with open(output_file_name, "w") as text_file: text_file.write(output)
[ "def", "export", "(", "self", ",", "template_file_name", ",", "output_file_name", ",", "sort", "=", "\"public\"", ",", "data", "=", "None", ",", "limit", "=", "0", ")", ":", "exportedData", "=", "{", "}", "exportedUsers", "=", "self", ".", "getSortedUsers", "(", ")", "template", "=", "self", ".", "__getTemplate", "(", "template_file_name", ")", "position", "=", "1", "if", "not", "limit", ":", "exportedData", "[", "\"users\"", "]", "=", "exportedUsers", "else", ":", "exportedData", "[", "\"users\"", "]", "=", "exportedUsers", "[", ":", "limit", "]", "for", "u", "in", "exportedData", "[", "\"users\"", "]", ":", "u", "[", "\"position\"", "]", "=", "position", "u", "[", "\"comma\"", "]", "=", "position", "<", "len", "(", "exportedData", "[", "\"users\"", "]", ")", "position", "+=", "1", "exportedData", "[", "\"extraData\"", "]", "=", "data", "renderer", "=", "Renderer", "(", ")", "output", "=", "renderer", ".", "render", "(", "template", ",", "exportedData", ")", "with", "open", "(", "output_file_name", ",", "\"w\"", ")", "as", "text_file", ":", "text_file", ".", "write", "(", "output", ")" ]
32.59375
17.875
def request_system_disarm(blink, network): """ Disarm system. :param blink: Blink instance. :param network: Sync module network id. """ url = "{}/network/{}/disarm".format(blink.urls.base_url, network) return http_post(blink, url)
[ "def", "request_system_disarm", "(", "blink", ",", "network", ")", ":", "url", "=", "\"{}/network/{}/disarm\"", ".", "format", "(", "blink", ".", "urls", ".", "base_url", ",", "network", ")", "return", "http_post", "(", "blink", ",", "url", ")" ]
27.888889
12.333333
def check_for_required_columns(problems, table, df): """ Check that the given ProtoFeed table has the required columns. Parameters ---------- problems : list A four-tuple containing 1. A problem type (string) equal to ``'error'`` or ``'warning'``; ``'error'`` means the ProtoFeed is violated; ``'warning'`` means there is a problem but it is not a ProtoFeed violation 2. A message (string) that describes the problem 3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem occurs 4. A list of rows (integers) of the table's DataFrame where the problem occurs table : string Name of a ProtoFeed table df : DataFrame The ProtoFeed table corresponding to ``table`` Returns ------- list The ``problems`` list extended as follows. Check that the DataFrame contains the colums required by the ProtoFeed spec and append to the problems list one error for each column missing. """ r = cs.PROTOFEED_REF req_columns = r.loc[(r['table'] == table) & r['column_required'], 'column'].values for col in req_columns: if col not in df.columns: problems.append(['error', 'Missing column {!s}'.format(col), table, []]) return problems
[ "def", "check_for_required_columns", "(", "problems", ",", "table", ",", "df", ")", ":", "r", "=", "cs", ".", "PROTOFEED_REF", "req_columns", "=", "r", ".", "loc", "[", "(", "r", "[", "'table'", "]", "==", "table", ")", "&", "r", "[", "'column_required'", "]", ",", "'column'", "]", ".", "values", "for", "col", "in", "req_columns", ":", "if", "col", "not", "in", "df", ".", "columns", ":", "problems", ".", "append", "(", "[", "'error'", ",", "'Missing column {!s}'", ".", "format", "(", "col", ")", ",", "table", ",", "[", "]", "]", ")", "return", "problems" ]
31.139535
22.395349
def mount_share(name=None, remote_share=None, remote_file=None, mount_type="nfs", username=None, password=None): ''' Mounts a remote file through a remote share. Currently, this feature is supported in version 1.5 or greater. The remote share can be either NFS, CIFS, or WWW. Some of the advantages of CIMC Mounted vMedia include: Communication between mounted media and target stays local (inside datacenter) Media mounts can be scripted/automated No vKVM requirements for media connection Multiple share types supported Connections supported through all CIMC interfaces Note: CIMC Mounted vMedia is enabled through BIOS configuration. Args: name(str): The name of the volume on the CIMC device. remote_share(str): The file share link that will be used to mount the share. This can be NFS, CIFS, or WWW. This must be the directory path and not the full path to the remote file. remote_file(str): The name of the remote file to mount. It must reside within remote_share. mount_type(str): The type of share to mount. Valid options are nfs, cifs, and www. username(str): An optional requirement to pass credentials to the remote share. If not provided, an unauthenticated connection attempt will be made. password(str): An optional requirement to pass a password to the remote share. If not provided, an unauthenticated connection attempt will be made. CLI Example: .. code-block:: bash salt '*' cimc.mount_share name=WIN7 remote_share=10.xxx.27.xxx:/nfs remote_file=sl1huu.iso salt '*' cimc.mount_share name=WIN7 remote_share=10.xxx.27.xxx:/nfs remote_file=sl1huu.iso username=bob password=badpassword ''' if not name: raise salt.exceptions.CommandExecutionError("The share name must be specified.") if not remote_share: raise salt.exceptions.CommandExecutionError("The remote share path must be specified.") if not remote_file: raise salt.exceptions.CommandExecutionError("The remote file name must be specified.") if username and password: mount_options = " mountOptions='username={0},password={1}'".format(username, password) else: mount_options = "" dn = 'sys/svc-ext/vmedia-svc/vmmap-{0}'.format(name) inconfig = """<commVMediaMap dn='sys/svc-ext/vmedia-svc/vmmap-{0}' map='{1}'{2} remoteFile='{3}' remoteShare='{4}' status='created' volumeName='Win12' />""".format(name, mount_type, mount_options, remote_file, remote_share) ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False) return ret
[ "def", "mount_share", "(", "name", "=", "None", ",", "remote_share", "=", "None", ",", "remote_file", "=", "None", ",", "mount_type", "=", "\"nfs\"", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "if", "not", "name", ":", "raise", "salt", ".", "exceptions", ".", "CommandExecutionError", "(", "\"The share name must be specified.\"", ")", "if", "not", "remote_share", ":", "raise", "salt", ".", "exceptions", ".", "CommandExecutionError", "(", "\"The remote share path must be specified.\"", ")", "if", "not", "remote_file", ":", "raise", "salt", ".", "exceptions", ".", "CommandExecutionError", "(", "\"The remote file name must be specified.\"", ")", "if", "username", "and", "password", ":", "mount_options", "=", "\" mountOptions='username={0},password={1}'\"", ".", "format", "(", "username", ",", "password", ")", "else", ":", "mount_options", "=", "\"\"", "dn", "=", "'sys/svc-ext/vmedia-svc/vmmap-{0}'", ".", "format", "(", "name", ")", "inconfig", "=", "\"\"\"<commVMediaMap dn='sys/svc-ext/vmedia-svc/vmmap-{0}' map='{1}'{2}\n remoteFile='{3}' remoteShare='{4}' status='created'\n volumeName='Win12' />\"\"\"", ".", "format", "(", "name", ",", "mount_type", ",", "mount_options", ",", "remote_file", ",", "remote_share", ")", "ret", "=", "__proxy__", "[", "'cimc.set_config_modify'", "]", "(", "dn", ",", "inconfig", ",", "False", ")", "return", "ret" ]
39.955224
32.313433
def filter_by_milestone(self, filtered_issues, tag_name, all_issues): """ :param list(dict) filtered_issues: Filtered issues. :param str tag_name: Name (title) of tag. :param list(dict) all_issues: All issues. :rtype: list(dict) :return: Filtered issues according milestone. """ filtered_issues = self.remove_issues_in_milestones(filtered_issues) if tag_name: # add missed issues (according milestones) issues_to_add = self.find_issues_to_add(all_issues, tag_name) filtered_issues.extend(issues_to_add) return filtered_issues
[ "def", "filter_by_milestone", "(", "self", ",", "filtered_issues", ",", "tag_name", ",", "all_issues", ")", ":", "filtered_issues", "=", "self", ".", "remove_issues_in_milestones", "(", "filtered_issues", ")", "if", "tag_name", ":", "# add missed issues (according milestones)", "issues_to_add", "=", "self", ".", "find_issues_to_add", "(", "all_issues", ",", "tag_name", ")", "filtered_issues", ".", "extend", "(", "issues_to_add", ")", "return", "filtered_issues" ]
41.866667
16.933333
def register_app(app_name, app_setting, web_application_setting, mainfile, package_space): """insert current project root path into sys path """ from turbo import log app_config.app_name = app_name app_config.app_setting = app_setting app_config.project_name = os.path.basename(get_base_dir(mainfile, 2)) app_config.web_application_setting.update(web_application_setting) if app_setting.get('session_config'): app_config.session_config.update(app_setting['session_config']) log.getLogger(**app_setting.log) _install_app(package_space)
[ "def", "register_app", "(", "app_name", ",", "app_setting", ",", "web_application_setting", ",", "mainfile", ",", "package_space", ")", ":", "from", "turbo", "import", "log", "app_config", ".", "app_name", "=", "app_name", "app_config", ".", "app_setting", "=", "app_setting", "app_config", ".", "project_name", "=", "os", ".", "path", ".", "basename", "(", "get_base_dir", "(", "mainfile", ",", "2", ")", ")", "app_config", ".", "web_application_setting", ".", "update", "(", "web_application_setting", ")", "if", "app_setting", ".", "get", "(", "'session_config'", ")", ":", "app_config", ".", "session_config", ".", "update", "(", "app_setting", "[", "'session_config'", "]", ")", "log", ".", "getLogger", "(", "*", "*", "app_setting", ".", "log", ")", "_install_app", "(", "package_space", ")" ]
47.583333
14.916667
def event(self, event): """ Qt override. This is needed to be able to intercept the Tab key press event. """ if event.type() == QEvent.KeyPress: if (event.key() == Qt.Key_Tab or event.key() == Qt.Key_Space): text = self.text() cursor = self.cursorPosition() # fix to include in "undo/redo" history if cursor != 0 and text[cursor-1] == ' ': text = text[:cursor-1] + ROW_SEPARATOR + ' ' +\ text[cursor:] else: text = text[:cursor] + ' ' + text[cursor:] self.setCursorPosition(cursor) self.setText(text) self.setCursorPosition(cursor + 1) return False return QWidget.event(self, event)
[ "def", "event", "(", "self", ",", "event", ")", ":", "if", "event", ".", "type", "(", ")", "==", "QEvent", ".", "KeyPress", ":", "if", "(", "event", ".", "key", "(", ")", "==", "Qt", ".", "Key_Tab", "or", "event", ".", "key", "(", ")", "==", "Qt", ".", "Key_Space", ")", ":", "text", "=", "self", ".", "text", "(", ")", "cursor", "=", "self", ".", "cursorPosition", "(", ")", "# fix to include in \"undo/redo\" history\r", "if", "cursor", "!=", "0", "and", "text", "[", "cursor", "-", "1", "]", "==", "' '", ":", "text", "=", "text", "[", ":", "cursor", "-", "1", "]", "+", "ROW_SEPARATOR", "+", "' '", "+", "text", "[", "cursor", ":", "]", "else", ":", "text", "=", "text", "[", ":", "cursor", "]", "+", "' '", "+", "text", "[", "cursor", ":", "]", "self", ".", "setCursorPosition", "(", "cursor", ")", "self", ".", "setText", "(", "text", ")", "self", ".", "setCursorPosition", "(", "cursor", "+", "1", ")", "return", "False", "return", "QWidget", ".", "event", "(", "self", ",", "event", ")" ]
40.52381
14.142857
def set_bit_order(self, order): """Set order of bits to be read/written over serial lines. Should be either MSBFIRST for most-significant first, or LSBFIRST for least-signifcant first. """ if order == MSBFIRST: self.lsbfirst = 0 elif order == LSBFIRST: self.lsbfirst = 1 else: raise ValueError('Order must be MSBFIRST or LSBFIRST.')
[ "def", "set_bit_order", "(", "self", ",", "order", ")", ":", "if", "order", "==", "MSBFIRST", ":", "self", ".", "lsbfirst", "=", "0", "elif", "order", "==", "LSBFIRST", ":", "self", ".", "lsbfirst", "=", "1", "else", ":", "raise", "ValueError", "(", "'Order must be MSBFIRST or LSBFIRST.'", ")" ]
37.727273
12.818182
def defaults(d1, d2): """ Update a copy of d1 with the contents of d2 that are not in d1. d1 and d2 are dictionary like objects. Parameters ---------- d1 : dict | dataframe dict with the preferred values d2 : dict | dataframe dict with the default values Returns ------- out : dict | dataframe Result of adding default values type of d1 """ d1 = d1.copy() tolist = isinstance(d2, pd.DataFrame) keys = (k for k in d2 if k not in d1) for k in keys: if tolist: d1[k] = d2[k].tolist() else: d1[k] = d2[k] return d1
[ "def", "defaults", "(", "d1", ",", "d2", ")", ":", "d1", "=", "d1", ".", "copy", "(", ")", "tolist", "=", "isinstance", "(", "d2", ",", "pd", ".", "DataFrame", ")", "keys", "=", "(", "k", "for", "k", "in", "d2", "if", "k", "not", "in", "d1", ")", "for", "k", "in", "keys", ":", "if", "tolist", ":", "d1", "[", "k", "]", "=", "d2", "[", "k", "]", ".", "tolist", "(", ")", "else", ":", "d1", "[", "k", "]", "=", "d2", "[", "k", "]", "return", "d1" ]
22.777778
17.814815
def diag_post_enable(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") diag = ET.SubElement(config, "diag", xmlns="urn:brocade.com:mgmt:brocade-diagnostics") post = ET.SubElement(diag, "post") enable = ET.SubElement(post, "enable") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "diag_post_enable", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "diag", "=", "ET", ".", "SubElement", "(", "config", ",", "\"diag\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-diagnostics\"", ")", "post", "=", "ET", ".", "SubElement", "(", "diag", ",", "\"post\"", ")", "enable", "=", "ET", ".", "SubElement", "(", "post", ",", "\"enable\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
38.5
13.4
def _convert_type(data_type): # @NoSelf ''' Converts CDF data types into python types ''' if data_type in (1, 41): dt_string = 'b' elif data_type == 2: dt_string = 'h' elif data_type == 4: dt_string = 'i' elif data_type in (8, 33): dt_string = 'q' elif data_type == 11: dt_string = 'B' elif data_type == 12: dt_string = 'H' elif data_type == 14: dt_string = 'I' elif data_type in (21, 44): dt_string = 'f' elif data_type in (22, 45, 31): dt_string = 'd' elif data_type == 32: dt_string = 'd' elif data_type in (51, 52): dt_string = 's' else: dt_string = '' return dt_string
[ "def", "_convert_type", "(", "data_type", ")", ":", "# @NoSelf", "if", "data_type", "in", "(", "1", ",", "41", ")", ":", "dt_string", "=", "'b'", "elif", "data_type", "==", "2", ":", "dt_string", "=", "'h'", "elif", "data_type", "==", "4", ":", "dt_string", "=", "'i'", "elif", "data_type", "in", "(", "8", ",", "33", ")", ":", "dt_string", "=", "'q'", "elif", "data_type", "==", "11", ":", "dt_string", "=", "'B'", "elif", "data_type", "==", "12", ":", "dt_string", "=", "'H'", "elif", "data_type", "==", "14", ":", "dt_string", "=", "'I'", "elif", "data_type", "in", "(", "21", ",", "44", ")", ":", "dt_string", "=", "'f'", "elif", "data_type", "in", "(", "22", ",", "45", ",", "31", ")", ":", "dt_string", "=", "'d'", "elif", "data_type", "==", "32", ":", "dt_string", "=", "'d'", "elif", "data_type", "in", "(", "51", ",", "52", ")", ":", "dt_string", "=", "'s'", "else", ":", "dt_string", "=", "''", "return", "dt_string" ]
27.266667
13.333333
def example_exc_handler(tries_remaining, exception, delay): """Example exception handler; prints a warning to stderr. tries_remaining: The number of tries remaining. exception: The exception instance which was raised. """ print >> sys.stderr, "Caught '%s', %d tries remaining, sleeping for %s seconds" % ( exception, tries_remaining, delay)
[ "def", "example_exc_handler", "(", "tries_remaining", ",", "exception", ",", "delay", ")", ":", "print", ">>", "sys", ".", "stderr", ",", "\"Caught '%s', %d tries remaining, sleeping for %s seconds\"", "%", "(", "exception", ",", "tries_remaining", ",", "delay", ")" ]
45.25
16.75
def patch(self, endpoint, json=None, params=None, **kwargs): """ PATCH to DHIS2 :param endpoint: DHIS2 API endpoint :param json: HTTP payload :param params: HTTP parameters (dict) :return: requests.Response object """ json = kwargs['data'] if 'data' in kwargs else json return self._make_request('patch', endpoint, data=json, params=params)
[ "def", "patch", "(", "self", ",", "endpoint", ",", "json", "=", "None", ",", "params", "=", "None", ",", "*", "*", "kwargs", ")", ":", "json", "=", "kwargs", "[", "'data'", "]", "if", "'data'", "in", "kwargs", "else", "json", "return", "self", ".", "_make_request", "(", "'patch'", ",", "endpoint", ",", "data", "=", "json", ",", "params", "=", "params", ")" ]
40.3
11.1
def map_over_glob(fn, path, pattern): """map a function over a glob pattern, relative to a directory""" return [fn(x) for x in glob.glob(os.path.join(path, pattern))]
[ "def", "map_over_glob", "(", "fn", ",", "path", ",", "pattern", ")", ":", "return", "[", "fn", "(", "x", ")", "for", "x", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "pattern", ")", ")", "]" ]
57.333333
9.666667
def get_value_from_content(key): """Get a value from the path specifed. :param key: Array that defines the path of the value inside the message. """ def value_from_content_function(service, message): """Actual implementation of get_value_from_content function. :param service: SelenolService object. :param message: SelenolMessage request. """ return _get_value(message.content, key) return value_from_content_function
[ "def", "get_value_from_content", "(", "key", ")", ":", "def", "value_from_content_function", "(", "service", ",", "message", ")", ":", "\"\"\"Actual implementation of get_value_from_content function.\n\n :param service: SelenolService object.\n :param message: SelenolMessage request.\n \"\"\"", "return", "_get_value", "(", "message", ".", "content", ",", "key", ")", "return", "value_from_content_function" ]
36
12.307692
def is_number(obj): """Check if obj is number.""" return isinstance(obj, (int, float, np.int_, np.float_))
[ "def", "is_number", "(", "obj", ")", ":", "return", "isinstance", "(", "obj", ",", "(", "int", ",", "float", ",", "np", ".", "int_", ",", "np", ".", "float_", ")", ")" ]
37.333333
13.666667
def Bernstein(n, k): """Bernstein polynomial. """ coeff = binom(n, k) def _bpoly(x): return coeff * x ** k * (1 - x) ** (n - k) return _bpoly
[ "def", "Bernstein", "(", "n", ",", "k", ")", ":", "coeff", "=", "binom", "(", "n", ",", "k", ")", "def", "_bpoly", "(", "x", ")", ":", "return", "coeff", "*", "x", "**", "k", "*", "(", "1", "-", "x", ")", "**", "(", "n", "-", "k", ")", "return", "_bpoly" ]
16.3
21.2
def timeSeries(self, tag = None, outputFile = None, giveYears = True, greatestFirst = True, limitTo = False, pandasMode = True): """Creates an pandas dict of the ordered list of all the values of _tag_, with and ranked by the year the occurred in, multiple year occurrences will create multiple entries. A list can also be returned with the the counts or years added or it can be written to a file. If no _tag_ is given the `Records` in the collection will be used # Parameters _tag_ : `optional str` > Default `None`, if provided the tag will be ordered _outputFile_ : `optional str` > A file path to write a csv with 2 columns, one the tag values the other their years _giveYears_ : `optional bool` > Default `True`, if `True` the retuned list will be composed of tuples the first values being the tag value and the second their years. _greatestFirst_ : `optional bool` > Default `True`, if `True` the returned list will be ordered with the highest years first, otherwise the lowest years will be first. _pandasMode_ : `optional bool` > Default `True`, if `True` a `dict` ready for pandas will be returned, otherwise a list _limitTo_ : `optional list[values]` > Default `None`, if a list is provided only those values in the list will be counted or returned # Returns `dict[str:list[value]] or list[str]` > A `dict` or `list` will be returned depending on if _pandasMode_ is `True` """ seriesDict = {} for R in self: #This should be faster than using get, since get is a wrapper for __getitem__ try: year = R['year'] except KeyError: continue if tag is None: seriesDict[R] = {year : 1} else: try: val = R[tag] except KeyError: continue if not isinstance(val, list): val = [val] for entry in val: if limitTo and entry not in limitTo: continue if entry in seriesDict: try: seriesDict[entry][year] += 1 except KeyError: seriesDict[entry][year] = 1 else: seriesDict[entry] = {year : 1} seriesList = [] for e, yd in seriesDict.items(): seriesList += [(e, y) for y in yd.keys()] seriesList = sorted(seriesList, key = lambda x: x[1], reverse = greatestFirst) if outputFile is not None: with open(outputFile, 'w') as f: writer = csv.writer(f, dialect = 'excel') writer.writerow((str(tag), 'years')) writer.writerows(((k,'|'.join((str(y) for y in v))) for k,v in seriesDict.items())) if pandasMode: panDict = {'entry' : [], 'count' : [], 'year' : []} for entry, year in seriesList: panDict['entry'].append(entry) panDict['year'].append(year) panDict['count'].append(seriesDict[entry][year]) return panDict elif giveYears: return seriesList else: return [e for e,c in seriesList]
[ "def", "timeSeries", "(", "self", ",", "tag", "=", "None", ",", "outputFile", "=", "None", ",", "giveYears", "=", "True", ",", "greatestFirst", "=", "True", ",", "limitTo", "=", "False", ",", "pandasMode", "=", "True", ")", ":", "seriesDict", "=", "{", "}", "for", "R", "in", "self", ":", "#This should be faster than using get, since get is a wrapper for __getitem__", "try", ":", "year", "=", "R", "[", "'year'", "]", "except", "KeyError", ":", "continue", "if", "tag", "is", "None", ":", "seriesDict", "[", "R", "]", "=", "{", "year", ":", "1", "}", "else", ":", "try", ":", "val", "=", "R", "[", "tag", "]", "except", "KeyError", ":", "continue", "if", "not", "isinstance", "(", "val", ",", "list", ")", ":", "val", "=", "[", "val", "]", "for", "entry", "in", "val", ":", "if", "limitTo", "and", "entry", "not", "in", "limitTo", ":", "continue", "if", "entry", "in", "seriesDict", ":", "try", ":", "seriesDict", "[", "entry", "]", "[", "year", "]", "+=", "1", "except", "KeyError", ":", "seriesDict", "[", "entry", "]", "[", "year", "]", "=", "1", "else", ":", "seriesDict", "[", "entry", "]", "=", "{", "year", ":", "1", "}", "seriesList", "=", "[", "]", "for", "e", ",", "yd", "in", "seriesDict", ".", "items", "(", ")", ":", "seriesList", "+=", "[", "(", "e", ",", "y", ")", "for", "y", "in", "yd", ".", "keys", "(", ")", "]", "seriesList", "=", "sorted", "(", "seriesList", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ",", "reverse", "=", "greatestFirst", ")", "if", "outputFile", "is", "not", "None", ":", "with", "open", "(", "outputFile", ",", "'w'", ")", "as", "f", ":", "writer", "=", "csv", ".", "writer", "(", "f", ",", "dialect", "=", "'excel'", ")", "writer", ".", "writerow", "(", "(", "str", "(", "tag", ")", ",", "'years'", ")", ")", "writer", ".", "writerows", "(", "(", "(", "k", ",", "'|'", ".", "join", "(", "(", "str", "(", "y", ")", "for", "y", "in", "v", ")", ")", ")", "for", "k", ",", "v", "in", "seriesDict", ".", "items", "(", ")", ")", ")", "if", "pandasMode", ":", "panDict", "=", "{", "'entry'", ":", "[", "]", ",", "'count'", ":", "[", "]", ",", "'year'", ":", "[", "]", "}", "for", "entry", ",", "year", "in", "seriesList", ":", "panDict", "[", "'entry'", "]", ".", "append", "(", "entry", ")", "panDict", "[", "'year'", "]", ".", "append", "(", "year", ")", "panDict", "[", "'count'", "]", ".", "append", "(", "seriesDict", "[", "entry", "]", "[", "year", "]", ")", "return", "panDict", "elif", "giveYears", ":", "return", "seriesList", "else", ":", "return", "[", "e", "for", "e", ",", "c", "in", "seriesList", "]" ]
40.409639
23.951807
def find_attacker_slider(dest_list, occ_bb, piece_bb, target_bb, pos, domain): """ Find a slider attacker Parameters ---------- dest_list : list To store the results. occ_bb : int, bitboard Occupancy bitboard. piece_bb : int, bitboard Bitboard with the position of the attacker piece. target_bb : int, bitboard Occupancy bitboard without any of the sliders in question. pos : int Target position. pos_map : function Mapping between a board position and its position in a single rotated/translated rank produced with domain_trans. domain_trans : function Transformation from a rank/file/diagonal/anti-diagonal containing pos to a single rank pos_inv_map : function Inverse of pos_map """ pos_map, domain_trans, pos_inv_map = domain r = reach[pos_map(pos)][domain_trans(target_bb, pos)] m = r & domain_trans(piece_bb, pos) while m: r = m&-m rpos = r.bit_length()-1 if not (ray[rpos][pos_map(pos)] & domain_trans(occ_bb, pos)): dest_list.append(pos_inv_map(rpos, pos)) m ^= r
[ "def", "find_attacker_slider", "(", "dest_list", ",", "occ_bb", ",", "piece_bb", ",", "target_bb", ",", "pos", ",", "domain", ")", ":", "pos_map", ",", "domain_trans", ",", "pos_inv_map", "=", "domain", "r", "=", "reach", "[", "pos_map", "(", "pos", ")", "]", "[", "domain_trans", "(", "target_bb", ",", "pos", ")", "]", "m", "=", "r", "&", "domain_trans", "(", "piece_bb", ",", "pos", ")", "while", "m", ":", "r", "=", "m", "&", "-", "m", "rpos", "=", "r", ".", "bit_length", "(", ")", "-", "1", "if", "not", "(", "ray", "[", "rpos", "]", "[", "pos_map", "(", "pos", ")", "]", "&", "domain_trans", "(", "occ_bb", ",", "pos", ")", ")", ":", "dest_list", ".", "append", "(", "pos_inv_map", "(", "rpos", ",", "pos", ")", ")", "m", "^=", "r" ]
34.029412
17.764706
def dump_all(data_list, stream=None, **kwargs): """ Serialize YAMLDict into a YAML stream. If stream is None, return the produced string instead. """ return yaml.dump_all( data_list, stream=stream, Dumper=YAMLDictDumper, **kwargs )
[ "def", "dump_all", "(", "data_list", ",", "stream", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "yaml", ".", "dump_all", "(", "data_list", ",", "stream", "=", "stream", ",", "Dumper", "=", "YAMLDictDumper", ",", "*", "*", "kwargs", ")" ]
25.181818
13.727273
def exclude_file(sp, f): """ Exclude discovered files if they match the special exclude_ search pattern keys """ # Make everything a list if it isn't already for k in sp: if k in ['exclude_fn', 'exclude_fn_re' 'exclude_contents', 'exclude_contents_re']: if not isinstance(sp[k], list): sp[k] = [sp[k]] # Search by file name (glob) if 'exclude_fn' in sp: for pat in sp['exclude_fn']: if fnmatch.fnmatch(f['fn'], pat): return True # Search by file name (regex) if 'exclude_fn_re' in sp: for pat in sp['exclude_fn_re']: if re.match( pat, f['fn']): return True # Search the contents of the file if 'exclude_contents' in sp or 'exclude_contents_re' in sp: # Compile regex patterns if we have any if 'exclude_contents_re' in sp: sp['exclude_contents_re'] = [re.compile(pat) for pat in sp['exclude_contents_re']] with io.open (os.path.join(f['root'],f['fn']), "r", encoding='utf-8') as fh: for line in fh: if 'exclude_contents' in sp: for pat in sp['exclude_contents']: if pat in line: return True if 'exclude_contents_re' in sp: for pat in sp['exclude_contents_re']: if re.search(pat, line): return True return False
[ "def", "exclude_file", "(", "sp", ",", "f", ")", ":", "# Make everything a list if it isn't already", "for", "k", "in", "sp", ":", "if", "k", "in", "[", "'exclude_fn'", ",", "'exclude_fn_re'", "'exclude_contents'", ",", "'exclude_contents_re'", "]", ":", "if", "not", "isinstance", "(", "sp", "[", "k", "]", ",", "list", ")", ":", "sp", "[", "k", "]", "=", "[", "sp", "[", "k", "]", "]", "# Search by file name (glob)", "if", "'exclude_fn'", "in", "sp", ":", "for", "pat", "in", "sp", "[", "'exclude_fn'", "]", ":", "if", "fnmatch", ".", "fnmatch", "(", "f", "[", "'fn'", "]", ",", "pat", ")", ":", "return", "True", "# Search by file name (regex)", "if", "'exclude_fn_re'", "in", "sp", ":", "for", "pat", "in", "sp", "[", "'exclude_fn_re'", "]", ":", "if", "re", ".", "match", "(", "pat", ",", "f", "[", "'fn'", "]", ")", ":", "return", "True", "# Search the contents of the file", "if", "'exclude_contents'", "in", "sp", "or", "'exclude_contents_re'", "in", "sp", ":", "# Compile regex patterns if we have any", "if", "'exclude_contents_re'", "in", "sp", ":", "sp", "[", "'exclude_contents_re'", "]", "=", "[", "re", ".", "compile", "(", "pat", ")", "for", "pat", "in", "sp", "[", "'exclude_contents_re'", "]", "]", "with", "io", ".", "open", "(", "os", ".", "path", ".", "join", "(", "f", "[", "'root'", "]", ",", "f", "[", "'fn'", "]", ")", ",", "\"r\"", ",", "encoding", "=", "'utf-8'", ")", "as", "fh", ":", "for", "line", "in", "fh", ":", "if", "'exclude_contents'", "in", "sp", ":", "for", "pat", "in", "sp", "[", "'exclude_contents'", "]", ":", "if", "pat", "in", "line", ":", "return", "True", "if", "'exclude_contents_re'", "in", "sp", ":", "for", "pat", "in", "sp", "[", "'exclude_contents_re'", "]", ":", "if", "re", ".", "search", "(", "pat", ",", "line", ")", ":", "return", "True", "return", "False" ]
37.410256
14.589744
def make_sentences(self, stream_item): 'assemble Sentence and Token objects' self.make_label_index(stream_item) sentences = [] token_num = 0 new_mention_id = 0 for sent_start, sent_end, sent_str in self._sentences( stream_item.body.clean_visible): assert isinstance(sent_str, unicode) sent = Sentence() sentence_pos = 0 for start, end in self.word_tokenizer.span_tokenize(sent_str): token_str = sent_str[start:end].encode('utf8') tok = Token( token_num=token_num, token=token_str, sentence_pos=sentence_pos, ) tok.offsets[OffsetType.CHARS] = Offset( type=OffsetType.CHARS, first=sent_start + start, length=end - start, ) # whitespace tokenizer will never get a token # boundary in the middle of an 'author' label try: label = self.label_index.find_le(sent_start + start) except ValueError: label = None if label: off = label.offsets[OffsetType.CHARS] if off.first + off.length > sent_start + start: streamcorpus.add_annotation(tok, label) logger.debug('adding label to tok: %r has %r', tok.token, label.target.target_id) if label in self.label_to_mention_id: mention_id = self.label_to_mention_id[label] else: mention_id = new_mention_id new_mention_id += 1 self.label_to_mention_id[label] = mention_id tok.mention_id = mention_id token_num += 1 sentence_pos += 1 sent.tokens.append(tok) sentences.append(sent) return sentences
[ "def", "make_sentences", "(", "self", ",", "stream_item", ")", ":", "self", ".", "make_label_index", "(", "stream_item", ")", "sentences", "=", "[", "]", "token_num", "=", "0", "new_mention_id", "=", "0", "for", "sent_start", ",", "sent_end", ",", "sent_str", "in", "self", ".", "_sentences", "(", "stream_item", ".", "body", ".", "clean_visible", ")", ":", "assert", "isinstance", "(", "sent_str", ",", "unicode", ")", "sent", "=", "Sentence", "(", ")", "sentence_pos", "=", "0", "for", "start", ",", "end", "in", "self", ".", "word_tokenizer", ".", "span_tokenize", "(", "sent_str", ")", ":", "token_str", "=", "sent_str", "[", "start", ":", "end", "]", ".", "encode", "(", "'utf8'", ")", "tok", "=", "Token", "(", "token_num", "=", "token_num", ",", "token", "=", "token_str", ",", "sentence_pos", "=", "sentence_pos", ",", ")", "tok", ".", "offsets", "[", "OffsetType", ".", "CHARS", "]", "=", "Offset", "(", "type", "=", "OffsetType", ".", "CHARS", ",", "first", "=", "sent_start", "+", "start", ",", "length", "=", "end", "-", "start", ",", ")", "# whitespace tokenizer will never get a token", "# boundary in the middle of an 'author' label", "try", ":", "label", "=", "self", ".", "label_index", ".", "find_le", "(", "sent_start", "+", "start", ")", "except", "ValueError", ":", "label", "=", "None", "if", "label", ":", "off", "=", "label", ".", "offsets", "[", "OffsetType", ".", "CHARS", "]", "if", "off", ".", "first", "+", "off", ".", "length", ">", "sent_start", "+", "start", ":", "streamcorpus", ".", "add_annotation", "(", "tok", ",", "label", ")", "logger", ".", "debug", "(", "'adding label to tok: %r has %r'", ",", "tok", ".", "token", ",", "label", ".", "target", ".", "target_id", ")", "if", "label", "in", "self", ".", "label_to_mention_id", ":", "mention_id", "=", "self", ".", "label_to_mention_id", "[", "label", "]", "else", ":", "mention_id", "=", "new_mention_id", "new_mention_id", "+=", "1", "self", ".", "label_to_mention_id", "[", "label", "]", "=", "mention_id", "tok", ".", "mention_id", "=", "mention_id", "token_num", "+=", "1", "sentence_pos", "+=", "1", "sent", ".", "tokens", ".", "append", "(", "tok", ")", "sentences", ".", "append", "(", "sent", ")", "return", "sentences" ]
41.8
16.16
def check_valid_temperature(var, units): r"""Check that variable is air temperature.""" check_valid(var, 'standard_name', 'air_temperature') check_valid(var, 'units', units) assert_daily(var)
[ "def", "check_valid_temperature", "(", "var", ",", "units", ")", ":", "check_valid", "(", "var", ",", "'standard_name'", ",", "'air_temperature'", ")", "check_valid", "(", "var", ",", "'units'", ",", "units", ")", "assert_daily", "(", "var", ")" ]
33.833333
13.166667
def check_version(version, fallback="master"): """ Check that a version string is PEP440 compliant and there are no unreleased changes. For example, ``version = "0.1"`` will be returned as is but ``version = "0.1+10.8dl8dh9"`` will return the fallback. This is the convention used by `versioneer <https://github.com/warner/python-versioneer>`__ to mark that this version is 10 commits ahead of the last release. Parameters ---------- version : str A version string. fallback : str What to return if the version string has unreleased changes. Returns ------- version : str If *version* is PEP440 compliant and there are unreleased changes, then return *version*. Otherwise, return *fallback*. Raises ------ InvalidVersion If *version* is not PEP440 compliant. Examples -------- >>> check_version("0.1") '0.1' >>> check_version("0.1a10") '0.1a10' >>> check_version("0.1+111.9hdg36") 'master' >>> check_version("0.1+111.9hdg36", fallback="dev") 'dev' """ parse = Version(version) if parse.local is not None: return fallback return version
[ "def", "check_version", "(", "version", ",", "fallback", "=", "\"master\"", ")", ":", "parse", "=", "Version", "(", "version", ")", "if", "parse", ".", "local", "is", "not", "None", ":", "return", "fallback", "return", "version" ]
26.522727
24.886364
def plot_sn_discovery_ratio_map(log, snSurveyDiscoveryTimes, redshifts, peakAppMagList, snCampaignLengthList, extraSurveyConstraints, pathToOutputPlotFolder): """ *Plot the SN discoveries and non-discoveries in a polar plot as function of redshift* **Key Arguments:** - ``log`` -- logger - ``snSurveyDiscoveryTimes`` -- - ``redshifts`` -- - ``peakAppMagList`` -- the list of peakmags for each SN in each filter - ``snCampaignLengthList`` -- a list of campaign lengths in each filter - ``extraSurveyConstraints`` -- - ``pathToOutputPlotDirectory`` -- path to add plots to **Return:** - None """ ################ > IMPORTS ################ ## STANDARD LIB ## import sys ## THIRD PARTY ## import matplotlib.pyplot as plt import numpy as np ## LOCAL APPLICATION ## import dryxPython.plotting as dp filters = ['g', 'r', 'i', 'z'] faintMagLimit = extraSurveyConstraints['Faint-Limit of Peak Magnitude'] ################ >ACTION(S) ################ discovered = [] tooFaint = [] shortCampaign = [] discoveredRedshift = [] tooFaintRedshift = [] notDiscoveredRedshift = [] shortCampaignRedshift = [] #log.info('len(redshifts) %s' % (len(redshifts),)) dataDictionary = {} for item in range(len(redshifts)): if snSurveyDiscoveryTimes[item]['any'] is True: discoveryDayList = [] faintDayList = [] shortCampaignDayList = [] for ffilter in filters: if snSurveyDiscoveryTimes[item][ffilter]: if peakAppMagList[item][ffilter] < faintMagLimit: if snCampaignLengthList[item]['max'] < extraSurveyConstraints['Observable for at least ? number of days']: shortCampaignDayList.append( snSurveyDiscoveryTimes[item][ffilter]) else: discoveryDayList.append( snSurveyDiscoveryTimes[item][ffilter]) else: faintDayList.append( snSurveyDiscoveryTimes[item][ffilter]) if len(discoveryDayList) > 0: discovered.append(min(discoveryDayList)) discoveredRedshift.append(redshifts[item]) elif len(shortCampaignDayList) > 0: shortCampaign.append(min(shortCampaignDayList)) shortCampaignRedshift.append(redshifts[item]) else: tooFaint.append(min(faintDayList)) tooFaintRedshift.append(redshifts[item]) else: notDiscoveredRedshift.append(redshifts[item]) if len(notDiscoveredRedshift) > 0: dataDictionary["Undiscovered"] = notDiscoveredRedshift if len(tooFaintRedshift) > 0: dataDictionary[ "Detected - too faint to constrain as transient"] = tooFaintRedshift if len(discoveredRedshift) > 0: dataDictionary["Discovered"] = discoveredRedshift if len(shortCampaignRedshift) > 0: dataDictionary[ "Detected - campaign to short to constrain as transient"] = shortCampaignRedshift maxInList = max(redshifts) * 1.1 ################ >ACTION(S) ################ imageLink = plot_polar( log, title="Redshift Map of transients Simulated within the Survey Volume", dataDictionary=dataDictionary, pathToOutputPlotsFolder=pathToOutputPlotFolder, dataRange=False, ylabel=False, radius=maxInList, circumference=False, circleTicksRange=(0, 360, 60), circleTicksLabels=".", prependNum=False) return imageLink
[ "def", "plot_sn_discovery_ratio_map", "(", "log", ",", "snSurveyDiscoveryTimes", ",", "redshifts", ",", "peakAppMagList", ",", "snCampaignLengthList", ",", "extraSurveyConstraints", ",", "pathToOutputPlotFolder", ")", ":", "################ > IMPORTS ################", "## STANDARD LIB ##", "import", "sys", "## THIRD PARTY ##", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "numpy", "as", "np", "## LOCAL APPLICATION ##", "import", "dryxPython", ".", "plotting", "as", "dp", "filters", "=", "[", "'g'", ",", "'r'", ",", "'i'", ",", "'z'", "]", "faintMagLimit", "=", "extraSurveyConstraints", "[", "'Faint-Limit of Peak Magnitude'", "]", "################ >ACTION(S) ################", "discovered", "=", "[", "]", "tooFaint", "=", "[", "]", "shortCampaign", "=", "[", "]", "discoveredRedshift", "=", "[", "]", "tooFaintRedshift", "=", "[", "]", "notDiscoveredRedshift", "=", "[", "]", "shortCampaignRedshift", "=", "[", "]", "#log.info('len(redshifts) %s' % (len(redshifts),))", "dataDictionary", "=", "{", "}", "for", "item", "in", "range", "(", "len", "(", "redshifts", ")", ")", ":", "if", "snSurveyDiscoveryTimes", "[", "item", "]", "[", "'any'", "]", "is", "True", ":", "discoveryDayList", "=", "[", "]", "faintDayList", "=", "[", "]", "shortCampaignDayList", "=", "[", "]", "for", "ffilter", "in", "filters", ":", "if", "snSurveyDiscoveryTimes", "[", "item", "]", "[", "ffilter", "]", ":", "if", "peakAppMagList", "[", "item", "]", "[", "ffilter", "]", "<", "faintMagLimit", ":", "if", "snCampaignLengthList", "[", "item", "]", "[", "'max'", "]", "<", "extraSurveyConstraints", "[", "'Observable for at least ? number of days'", "]", ":", "shortCampaignDayList", ".", "append", "(", "snSurveyDiscoveryTimes", "[", "item", "]", "[", "ffilter", "]", ")", "else", ":", "discoveryDayList", ".", "append", "(", "snSurveyDiscoveryTimes", "[", "item", "]", "[", "ffilter", "]", ")", "else", ":", "faintDayList", ".", "append", "(", "snSurveyDiscoveryTimes", "[", "item", "]", "[", "ffilter", "]", ")", "if", "len", "(", "discoveryDayList", ")", ">", "0", ":", "discovered", ".", "append", "(", "min", "(", "discoveryDayList", ")", ")", "discoveredRedshift", ".", "append", "(", "redshifts", "[", "item", "]", ")", "elif", "len", "(", "shortCampaignDayList", ")", ">", "0", ":", "shortCampaign", ".", "append", "(", "min", "(", "shortCampaignDayList", ")", ")", "shortCampaignRedshift", ".", "append", "(", "redshifts", "[", "item", "]", ")", "else", ":", "tooFaint", ".", "append", "(", "min", "(", "faintDayList", ")", ")", "tooFaintRedshift", ".", "append", "(", "redshifts", "[", "item", "]", ")", "else", ":", "notDiscoveredRedshift", ".", "append", "(", "redshifts", "[", "item", "]", ")", "if", "len", "(", "notDiscoveredRedshift", ")", ">", "0", ":", "dataDictionary", "[", "\"Undiscovered\"", "]", "=", "notDiscoveredRedshift", "if", "len", "(", "tooFaintRedshift", ")", ">", "0", ":", "dataDictionary", "[", "\"Detected - too faint to constrain as transient\"", "]", "=", "tooFaintRedshift", "if", "len", "(", "discoveredRedshift", ")", ">", "0", ":", "dataDictionary", "[", "\"Discovered\"", "]", "=", "discoveredRedshift", "if", "len", "(", "shortCampaignRedshift", ")", ">", "0", ":", "dataDictionary", "[", "\"Detected - campaign to short to constrain as transient\"", "]", "=", "shortCampaignRedshift", "maxInList", "=", "max", "(", "redshifts", ")", "*", "1.1", "################ >ACTION(S) ################", "imageLink", "=", "plot_polar", "(", "log", ",", "title", "=", "\"Redshift Map of transients Simulated within the Survey Volume\"", ",", "dataDictionary", "=", "dataDictionary", ",", "pathToOutputPlotsFolder", "=", "pathToOutputPlotFolder", ",", "dataRange", "=", "False", ",", "ylabel", "=", "False", ",", "radius", "=", "maxInList", ",", "circumference", "=", "False", ",", "circleTicksRange", "=", "(", "0", ",", "360", ",", "60", ")", ",", "circleTicksLabels", "=", "\".\"", ",", "prependNum", "=", "False", ")", "return", "imageLink" ]
38.029412
18.441176
def handle_joined(self, connection, event): """ Store join times for current nicknames when we first join. """ nicknames = [s.lstrip("@+") for s in event.arguments()[-1].split()] for nickname in nicknames: self.joined[nickname] = datetime.now()
[ "def", "handle_joined", "(", "self", ",", "connection", ",", "event", ")", ":", "nicknames", "=", "[", "s", ".", "lstrip", "(", "\"@+\"", ")", "for", "s", "in", "event", ".", "arguments", "(", ")", "[", "-", "1", "]", ".", "split", "(", ")", "]", "for", "nickname", "in", "nicknames", ":", "self", ".", "joined", "[", "nickname", "]", "=", "datetime", ".", "now", "(", ")" ]
41.428571
11.428571
def dim_iter(self, *dim_strides, **kwargs): """ Recursively iterate over the (dimension, stride) tuples specified in dim_strides, returning a tuple of dictionaries describing a dimension update. For example, the following call effectively produces 2 loops over the 'ntime' and 'nchan' dimensions in chunks of 10 and 4 respectively. .. code-block:: python for d in cube.dim_iter(('ntime', 10), ('nchan', 4)) cube.update_dimensions(d) Parameters ---------- *dim_stride: list list of (dimension, stride) tuples Returns ------- iterator Iterator produces dictionaries describing dimensions updates. :code:`{'name':'ntime', 'lower_extent': 100, 'upper_extent': 200 }` """ # Extract dimension names dims = [ds[0] for ds in dim_strides] def _create_dim_dicts(*args): return tuple({ 'name': d, 'lower_extent': s, 'upper_extent': e } for (d, (s, e)) in args) # Return a tuple-dict-creating generator return (_create_dim_dicts(*zip(dims, s)) for s in self.endpoint_iter(*dim_strides, **kwargs))
[ "def", "dim_iter", "(", "self", ",", "*", "dim_strides", ",", "*", "*", "kwargs", ")", ":", "# Extract dimension names", "dims", "=", "[", "ds", "[", "0", "]", "for", "ds", "in", "dim_strides", "]", "def", "_create_dim_dicts", "(", "*", "args", ")", ":", "return", "tuple", "(", "{", "'name'", ":", "d", ",", "'lower_extent'", ":", "s", ",", "'upper_extent'", ":", "e", "}", "for", "(", "d", ",", "(", "s", ",", "e", ")", ")", "in", "args", ")", "# Return a tuple-dict-creating generator", "return", "(", "_create_dim_dicts", "(", "*", "zip", "(", "dims", ",", "s", ")", ")", "for", "s", "in", "self", ".", "endpoint_iter", "(", "*", "dim_strides", ",", "*", "*", "kwargs", ")", ")" ]
31.45
18.95
def _adjusted_mutual_info_score(reference_indices, estimated_indices): """Compute the mutual information between two sequence labelings, adjusted for chance. Parameters ---------- reference_indices : np.ndarray Array of reference indices estimated_indices : np.ndarray Array of estimated indices Returns ------- ami : float <= 1.0 Mutual information .. note:: Based on sklearn.metrics.cluster.adjusted_mutual_info_score and sklearn.metrics.cluster.expected_mutual_info_score """ n_samples = len(reference_indices) ref_classes = np.unique(reference_indices) est_classes = np.unique(estimated_indices) # Special limit cases: no clustering since the data is not split. # This is a perfect match hence return 1.0. if (ref_classes.shape[0] == est_classes.shape[0] == 1 or ref_classes.shape[0] == est_classes.shape[0] == 0): return 1.0 contingency = _contingency_matrix(reference_indices, estimated_indices).astype(float) # Calculate the MI for the two clusterings mi = _mutual_info_score(reference_indices, estimated_indices, contingency=contingency) # The following code is based on # sklearn.metrics.cluster.expected_mutual_information R, C = contingency.shape N = float(n_samples) a = np.sum(contingency, axis=1).astype(np.int32) b = np.sum(contingency, axis=0).astype(np.int32) # There are three major terms to the EMI equation, which are multiplied to # and then summed over varying nij values. # While nijs[0] will never be used, having it simplifies the indexing. nijs = np.arange(0, max(np.max(a), np.max(b)) + 1, dtype='float') # Stops divide by zero warnings. As its not used, no issue. nijs[0] = 1 # term1 is nij / N term1 = nijs / N # term2 is log((N*nij) / (a * b)) == log(N * nij) - log(a * b) # term2 uses the outer product log_ab_outer = np.log(np.outer(a, b)) # term2 uses N * nij log_Nnij = np.log(N * nijs) # term3 is large, and involved many factorials. Calculate these in log # space to stop overflows. gln_a = scipy.special.gammaln(a + 1) gln_b = scipy.special.gammaln(b + 1) gln_Na = scipy.special.gammaln(N - a + 1) gln_Nb = scipy.special.gammaln(N - b + 1) gln_N = scipy.special.gammaln(N + 1) gln_nij = scipy.special.gammaln(nijs + 1) # start and end values for nij terms for each summation. start = np.array([[v - N + w for w in b] for v in a], dtype='int') start = np.maximum(start, 1) end = np.minimum(np.resize(a, (C, R)).T, np.resize(b, (R, C))) + 1 # emi itself is a summation over the various values. emi = 0 for i in range(R): for j in range(C): for nij in range(start[i, j], end[i, j]): term2 = log_Nnij[nij] - log_ab_outer[i, j] # Numerators are positive, denominators are negative. gln = (gln_a[i] + gln_b[j] + gln_Na[i] + gln_Nb[j] - gln_N - gln_nij[nij] - scipy.special.gammaln(a[i] - nij + 1) - scipy.special.gammaln(b[j] - nij + 1) - scipy.special.gammaln(N - a[i] - b[j] + nij + 1)) term3 = np.exp(gln) emi += (term1[nij] * term2 * term3) # Calculate entropy for each labeling h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices) ami = (mi - emi) / (max(h_true, h_pred) - emi) return ami
[ "def", "_adjusted_mutual_info_score", "(", "reference_indices", ",", "estimated_indices", ")", ":", "n_samples", "=", "len", "(", "reference_indices", ")", "ref_classes", "=", "np", ".", "unique", "(", "reference_indices", ")", "est_classes", "=", "np", ".", "unique", "(", "estimated_indices", ")", "# Special limit cases: no clustering since the data is not split.", "# This is a perfect match hence return 1.0.", "if", "(", "ref_classes", ".", "shape", "[", "0", "]", "==", "est_classes", ".", "shape", "[", "0", "]", "==", "1", "or", "ref_classes", ".", "shape", "[", "0", "]", "==", "est_classes", ".", "shape", "[", "0", "]", "==", "0", ")", ":", "return", "1.0", "contingency", "=", "_contingency_matrix", "(", "reference_indices", ",", "estimated_indices", ")", ".", "astype", "(", "float", ")", "# Calculate the MI for the two clusterings", "mi", "=", "_mutual_info_score", "(", "reference_indices", ",", "estimated_indices", ",", "contingency", "=", "contingency", ")", "# The following code is based on", "# sklearn.metrics.cluster.expected_mutual_information", "R", ",", "C", "=", "contingency", ".", "shape", "N", "=", "float", "(", "n_samples", ")", "a", "=", "np", ".", "sum", "(", "contingency", ",", "axis", "=", "1", ")", ".", "astype", "(", "np", ".", "int32", ")", "b", "=", "np", ".", "sum", "(", "contingency", ",", "axis", "=", "0", ")", ".", "astype", "(", "np", ".", "int32", ")", "# There are three major terms to the EMI equation, which are multiplied to", "# and then summed over varying nij values.", "# While nijs[0] will never be used, having it simplifies the indexing.", "nijs", "=", "np", ".", "arange", "(", "0", ",", "max", "(", "np", ".", "max", "(", "a", ")", ",", "np", ".", "max", "(", "b", ")", ")", "+", "1", ",", "dtype", "=", "'float'", ")", "# Stops divide by zero warnings. As its not used, no issue.", "nijs", "[", "0", "]", "=", "1", "# term1 is nij / N", "term1", "=", "nijs", "/", "N", "# term2 is log((N*nij) / (a * b)) == log(N * nij) - log(a * b)", "# term2 uses the outer product", "log_ab_outer", "=", "np", ".", "log", "(", "np", ".", "outer", "(", "a", ",", "b", ")", ")", "# term2 uses N * nij", "log_Nnij", "=", "np", ".", "log", "(", "N", "*", "nijs", ")", "# term3 is large, and involved many factorials. Calculate these in log", "# space to stop overflows.", "gln_a", "=", "scipy", ".", "special", ".", "gammaln", "(", "a", "+", "1", ")", "gln_b", "=", "scipy", ".", "special", ".", "gammaln", "(", "b", "+", "1", ")", "gln_Na", "=", "scipy", ".", "special", ".", "gammaln", "(", "N", "-", "a", "+", "1", ")", "gln_Nb", "=", "scipy", ".", "special", ".", "gammaln", "(", "N", "-", "b", "+", "1", ")", "gln_N", "=", "scipy", ".", "special", ".", "gammaln", "(", "N", "+", "1", ")", "gln_nij", "=", "scipy", ".", "special", ".", "gammaln", "(", "nijs", "+", "1", ")", "# start and end values for nij terms for each summation.", "start", "=", "np", ".", "array", "(", "[", "[", "v", "-", "N", "+", "w", "for", "w", "in", "b", "]", "for", "v", "in", "a", "]", ",", "dtype", "=", "'int'", ")", "start", "=", "np", ".", "maximum", "(", "start", ",", "1", ")", "end", "=", "np", ".", "minimum", "(", "np", ".", "resize", "(", "a", ",", "(", "C", ",", "R", ")", ")", ".", "T", ",", "np", ".", "resize", "(", "b", ",", "(", "R", ",", "C", ")", ")", ")", "+", "1", "# emi itself is a summation over the various values.", "emi", "=", "0", "for", "i", "in", "range", "(", "R", ")", ":", "for", "j", "in", "range", "(", "C", ")", ":", "for", "nij", "in", "range", "(", "start", "[", "i", ",", "j", "]", ",", "end", "[", "i", ",", "j", "]", ")", ":", "term2", "=", "log_Nnij", "[", "nij", "]", "-", "log_ab_outer", "[", "i", ",", "j", "]", "# Numerators are positive, denominators are negative.", "gln", "=", "(", "gln_a", "[", "i", "]", "+", "gln_b", "[", "j", "]", "+", "gln_Na", "[", "i", "]", "+", "gln_Nb", "[", "j", "]", "-", "gln_N", "-", "gln_nij", "[", "nij", "]", "-", "scipy", ".", "special", ".", "gammaln", "(", "a", "[", "i", "]", "-", "nij", "+", "1", ")", "-", "scipy", ".", "special", ".", "gammaln", "(", "b", "[", "j", "]", "-", "nij", "+", "1", ")", "-", "scipy", ".", "special", ".", "gammaln", "(", "N", "-", "a", "[", "i", "]", "-", "b", "[", "j", "]", "+", "nij", "+", "1", ")", ")", "term3", "=", "np", ".", "exp", "(", "gln", ")", "emi", "+=", "(", "term1", "[", "nij", "]", "*", "term2", "*", "term3", ")", "# Calculate entropy for each labeling", "h_true", ",", "h_pred", "=", "_entropy", "(", "reference_indices", ")", ",", "_entropy", "(", "estimated_indices", ")", "ami", "=", "(", "mi", "-", "emi", ")", "/", "(", "max", "(", "h_true", ",", "h_pred", ")", "-", "emi", ")", "return", "ami" ]
42.289157
17.963855
def ji_windows(self, ij_win): # what can be given to ij_win NOT intuitive/right name by now!!! """For a given specific window, i.e. an element of :attr:`windows`, get the windows of all resolutions. Arguments: ij_win {int} -- The index specifying the window for which to return the resolution-windows. """ ji_windows = {} transform_src = self._layer_meta[self._res_indices[self._windows_res][0]]["transform"] for res in self._res_indices: transform_dst = self._layer_meta[self._res_indices[res][0]]["transform"] ji_windows[res] = window_from_window(window_src=self.windows[ij_win], transform_src=transform_src, transform_dst=transform_dst) return ji_windows
[ "def", "ji_windows", "(", "self", ",", "ij_win", ")", ":", "# what can be given to ij_win NOT intuitive/right name by now!!!", "ji_windows", "=", "{", "}", "transform_src", "=", "self", ".", "_layer_meta", "[", "self", ".", "_res_indices", "[", "self", ".", "_windows_res", "]", "[", "0", "]", "]", "[", "\"transform\"", "]", "for", "res", "in", "self", ".", "_res_indices", ":", "transform_dst", "=", "self", ".", "_layer_meta", "[", "self", ".", "_res_indices", "[", "res", "]", "[", "0", "]", "]", "[", "\"transform\"", "]", "ji_windows", "[", "res", "]", "=", "window_from_window", "(", "window_src", "=", "self", ".", "windows", "[", "ij_win", "]", ",", "transform_src", "=", "transform_src", ",", "transform_dst", "=", "transform_dst", ")", "return", "ji_windows" ]
59.714286
30.571429
def integer_key_convert(dictin, dropfailedkeys=False): # type: (DictUpperBound, bool) -> Dict """Convert keys of dictionary to integers Args: dictin (DictUpperBound): Input dictionary dropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with keys converted to integers """ return key_value_convert(dictin, keyfn=int, dropfailedkeys=dropfailedkeys)
[ "def", "integer_key_convert", "(", "dictin", ",", "dropfailedkeys", "=", "False", ")", ":", "# type: (DictUpperBound, bool) -> Dict", "return", "key_value_convert", "(", "dictin", ",", "keyfn", "=", "int", ",", "dropfailedkeys", "=", "dropfailedkeys", ")" ]
35.692308
25.384615
def endpoint_delete(endpoint_id): """ Executor for `globus endpoint delete` """ client = get_client() res = client.delete_endpoint(endpoint_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
[ "def", "endpoint_delete", "(", "endpoint_id", ")", ":", "client", "=", "get_client", "(", ")", "res", "=", "client", ".", "delete_endpoint", "(", "endpoint_id", ")", "formatted_print", "(", "res", ",", "text_format", "=", "FORMAT_TEXT_RAW", ",", "response_key", "=", "\"message\"", ")" ]
33.571429
9.285714
def unhumanize_delay(delaystr): ''' Accept a string representing link propagation delay (e.g., '100 milliseconds' or '100 msec' or 100 millisec') and return a floating point number representing the delay in seconds. Recognizes: - us, usec, micros* all as microseconds - ms, msec, millisec* all as milliseconds - s, sec* as seconds returns None on parse failure. ''' if isinstance(delaystr, float): return delaystr mobj = re.match('^\s*([\d\.]+)\s*(\w*)', delaystr) if not mobj: return None value, units = mobj.groups() value = float(value) if not units: divisor = 1.0 elif units == 'us' or units == 'usec' or units.startswith('micros'): divisor = 1e6 elif units == 'ms' or units == 'msec' or units.startswith('millis'): divisor = 1e3 elif units == 's' or units.startswith('sec'): divisor = 1.0 else: return None return value / divisor
[ "def", "unhumanize_delay", "(", "delaystr", ")", ":", "if", "isinstance", "(", "delaystr", ",", "float", ")", ":", "return", "delaystr", "mobj", "=", "re", ".", "match", "(", "'^\\s*([\\d\\.]+)\\s*(\\w*)'", ",", "delaystr", ")", "if", "not", "mobj", ":", "return", "None", "value", ",", "units", "=", "mobj", ".", "groups", "(", ")", "value", "=", "float", "(", "value", ")", "if", "not", "units", ":", "divisor", "=", "1.0", "elif", "units", "==", "'us'", "or", "units", "==", "'usec'", "or", "units", ".", "startswith", "(", "'micros'", ")", ":", "divisor", "=", "1e6", "elif", "units", "==", "'ms'", "or", "units", "==", "'msec'", "or", "units", ".", "startswith", "(", "'millis'", ")", ":", "divisor", "=", "1e3", "elif", "units", "==", "'s'", "or", "units", ".", "startswith", "(", "'sec'", ")", ":", "divisor", "=", "1.0", "else", ":", "return", "None", "return", "value", "/", "divisor" ]
30.935484
20.225806
def _set_overlay_service_policy(self, v, load=False): """ Setter method for overlay_service_policy, mapped from YANG variable /overlay_transit/overlay_service_policy (list) If this variable is read-only (config: false) in the source YANG file, then _set_overlay_service_policy is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_overlay_service_policy() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("overlay_sp_direction overlay_sp_pmap_name",overlay_service_policy.overlay_service_policy, yang_name="overlay-service-policy", rest_name="overlay-service-policy", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='overlay-sp-direction overlay-sp-pmap-name', extensions={u'tailf-common': {u'info': u'Attach Overlay policy Map', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'OverlayServicePolicyCallPoint'}}), is_container='list', yang_name="overlay-service-policy", rest_name="overlay-service-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Attach Overlay policy Map', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'OverlayServicePolicyCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-overlay-policy', defining_module='brocade-overlay-policy', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """overlay_service_policy must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("overlay_sp_direction overlay_sp_pmap_name",overlay_service_policy.overlay_service_policy, yang_name="overlay-service-policy", rest_name="overlay-service-policy", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='overlay-sp-direction overlay-sp-pmap-name', extensions={u'tailf-common': {u'info': u'Attach Overlay policy Map', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'OverlayServicePolicyCallPoint'}}), is_container='list', yang_name="overlay-service-policy", rest_name="overlay-service-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Attach Overlay policy Map', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'OverlayServicePolicyCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-overlay-policy', defining_module='brocade-overlay-policy', yang_type='list', is_config=True)""", }) self.__overlay_service_policy = t if hasattr(self, '_set'): self._set()
[ "def", "_set_overlay_service_policy", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"overlay_sp_direction overlay_sp_pmap_name\"", ",", "overlay_service_policy", ".", "overlay_service_policy", ",", "yang_name", "=", "\"overlay-service-policy\"", ",", "rest_name", "=", "\"overlay-service-policy\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'overlay-sp-direction overlay-sp-pmap-name'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Attach Overlay policy Map'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'callpoint'", ":", "u'OverlayServicePolicyCallPoint'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"overlay-service-policy\"", ",", "rest_name", "=", "\"overlay-service-policy\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Attach Overlay policy Map'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'callpoint'", ":", "u'OverlayServicePolicyCallPoint'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-overlay-policy'", ",", "defining_module", "=", "'brocade-overlay-policy'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"overlay_service_policy must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"overlay_sp_direction overlay_sp_pmap_name\",overlay_service_policy.overlay_service_policy, yang_name=\"overlay-service-policy\", rest_name=\"overlay-service-policy\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='overlay-sp-direction overlay-sp-pmap-name', extensions={u'tailf-common': {u'info': u'Attach Overlay policy Map', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'OverlayServicePolicyCallPoint'}}), is_container='list', yang_name=\"overlay-service-policy\", rest_name=\"overlay-service-policy\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Attach Overlay policy Map', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'OverlayServicePolicyCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-overlay-policy', defining_module='brocade-overlay-policy', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__overlay_service_policy", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
142.863636
69.181818
def poll_parser(poll): """ Parses a poll object """ if __is_deleted(poll): return deleted_parser(poll) if poll['type'] not in poll_types: raise Exception('Not a poll type') return Poll( poll['id'], poll['by'], __check_key('kids', poll), # poll and pollopt differ this property __check_key('parts', poll), # poll and pollopt differ this property poll['score'], poll['text'], poll['time'], poll['title'], poll['type'], )
[ "def", "poll_parser", "(", "poll", ")", ":", "if", "__is_deleted", "(", "poll", ")", ":", "return", "deleted_parser", "(", "poll", ")", "if", "poll", "[", "'type'", "]", "not", "in", "poll_types", ":", "raise", "Exception", "(", "'Not a poll type'", ")", "return", "Poll", "(", "poll", "[", "'id'", "]", ",", "poll", "[", "'by'", "]", ",", "__check_key", "(", "'kids'", ",", "poll", ")", ",", "# poll and pollopt differ this property", "__check_key", "(", "'parts'", ",", "poll", ")", ",", "# poll and pollopt differ this property", "poll", "[", "'score'", "]", ",", "poll", "[", "'text'", "]", ",", "poll", "[", "'time'", "]", ",", "poll", "[", "'title'", "]", ",", "poll", "[", "'type'", "]", ",", ")" ]
27.263158
16.947368
def validate_instance_username(self, username): ''' Validate instance username ''' # 1-16 alphanumeric characters - first character must be a letter - # cannot be a reserved MySQL word if re.match('[\w-]+$', username) is not None: if len(username) <= 16 and len(username) >= 1: if username[0].isalpha(): if username not in MYSQL_RESERVED_WORDS: return True return '*** Error: Usernames must be 1-16 alphanumeric chracters, \ first a letter, cannot be reserved MySQL word.'
[ "def", "validate_instance_username", "(", "self", ",", "username", ")", ":", "# 1-16 alphanumeric characters - first character must be a letter -", "# cannot be a reserved MySQL word", "if", "re", ".", "match", "(", "'[\\w-]+$'", ",", "username", ")", "is", "not", "None", ":", "if", "len", "(", "username", ")", "<=", "16", "and", "len", "(", "username", ")", ">=", "1", ":", "if", "username", "[", "0", "]", ".", "isalpha", "(", ")", ":", "if", "username", "not", "in", "MYSQL_RESERVED_WORDS", ":", "return", "True", "return", "'*** Error: Usernames must be 1-16 alphanumeric chracters, \\\n first a letter, cannot be reserved MySQL word.'" ]
53.272727
14.181818
def submit_statement_request(meth, end_point, query_str='', data=None, tries=2, **params): """Even lower level function to make the request.""" full_end_point = 'statements/' + end_point.lstrip('/') return make_db_rest_request(meth, full_end_point, query_str, data, params, tries)
[ "def", "submit_statement_request", "(", "meth", ",", "end_point", ",", "query_str", "=", "''", ",", "data", "=", "None", ",", "tries", "=", "2", ",", "*", "*", "params", ")", ":", "full_end_point", "=", "'statements/'", "+", "end_point", ".", "lstrip", "(", "'/'", ")", "return", "make_db_rest_request", "(", "meth", ",", "full_end_point", ",", "query_str", ",", "data", ",", "params", ",", "tries", ")" ]
63.4
20.2
def main(): """ Change labels of the "minikube" node: - Add label "foo" with value "bar". This will overwrite the "foo" label if it already exists. - Remove the label "baz" from the node. """ config.load_kube_config() api_instance = client.CoreV1Api() body = { "metadata": { "labels": { "foo": "bar", "baz": None} } } api_response = api_instance.patch_node("minikube", body) pprint(api_response)
[ "def", "main", "(", ")", ":", "config", ".", "load_kube_config", "(", ")", "api_instance", "=", "client", ".", "CoreV1Api", "(", ")", "body", "=", "{", "\"metadata\"", ":", "{", "\"labels\"", ":", "{", "\"foo\"", ":", "\"bar\"", ",", "\"baz\"", ":", "None", "}", "}", "}", "api_response", "=", "api_instance", ".", "patch_node", "(", "\"minikube\"", ",", "body", ")", "pprint", "(", "api_response", ")" ]
21.347826
21.086957
def list_tar (archive, compression, cmd, verbosity, interactive): """List a TAR archive.""" cmdlist = [cmd, '-n'] add_star_opts(cmdlist, compression, verbosity) cmdlist.append("file=%s" % archive) return cmdlist
[ "def", "list_tar", "(", "archive", ",", "compression", ",", "cmd", ",", "verbosity", ",", "interactive", ")", ":", "cmdlist", "=", "[", "cmd", ",", "'-n'", "]", "add_star_opts", "(", "cmdlist", ",", "compression", ",", "verbosity", ")", "cmdlist", ".", "append", "(", "\"file=%s\"", "%", "archive", ")", "return", "cmdlist" ]
37.666667
12.166667
def interpolate(self, gmvs): """ :param gmvs: array of intensity measure levels :returns: (interpolated loss ratios, interpolated covs, indices > min) """ # gmvs are clipped to max(iml) gmvs_curve = numpy.piecewise( gmvs, [gmvs > self.imls[-1]], [self.imls[-1], lambda x: x]) idxs = gmvs_curve >= self.imls[0] # indices over the minimum gmvs_curve = gmvs_curve[idxs] return self._mlr_i1d(gmvs_curve), self._cov_for(gmvs_curve), idxs
[ "def", "interpolate", "(", "self", ",", "gmvs", ")", ":", "# gmvs are clipped to max(iml)", "gmvs_curve", "=", "numpy", ".", "piecewise", "(", "gmvs", ",", "[", "gmvs", ">", "self", ".", "imls", "[", "-", "1", "]", "]", ",", "[", "self", ".", "imls", "[", "-", "1", "]", ",", "lambda", "x", ":", "x", "]", ")", "idxs", "=", "gmvs_curve", ">=", "self", ".", "imls", "[", "0", "]", "# indices over the minimum", "gmvs_curve", "=", "gmvs_curve", "[", "idxs", "]", "return", "self", ".", "_mlr_i1d", "(", "gmvs_curve", ")", ",", "self", ".", "_cov_for", "(", "gmvs_curve", ")", ",", "idxs" ]
40.538462
14.692308
def run_python_module(modulename, args): """Run a python module, as though with ``python -m name args...``. `modulename` is the name of the module, possibly a dot-separated name. `args` is the argument array to present as sys.argv, including the first element naming the module being executed. """ openfile = None glo, loc = globals(), locals() try: try: # Search for the module - inside its parent package, if any - using # standard import mechanics. if '.' in modulename: packagename, name = rsplit1(modulename, '.') package = __import__(packagename, glo, loc, ['__path__']) searchpath = package.__path__ else: packagename, name = None, modulename searchpath = None # "top-level search" in imp.find_module() openfile, pathname, _ = imp.find_module(name, searchpath) # Complain if this is a magic non-file module. if openfile is None and pathname is None: raise NoSource( "module does not live in a file: %r" % modulename ) # If `modulename` is actually a package, not a mere module, then we # pretend to be Python 2.7 and try running its __main__.py script. if openfile is None: packagename = modulename name = '__main__' package = __import__(packagename, glo, loc, ['__path__']) searchpath = package.__path__ openfile, pathname, _ = imp.find_module(name, searchpath) except ImportError: _, err, _ = sys.exc_info() raise NoSource(str(err)) finally: if openfile: openfile.close() # Finally, hand the file off to run_python_file for execution. pathname = os.path.abspath(pathname) args[0] = pathname run_python_file(pathname, args, package=packagename)
[ "def", "run_python_module", "(", "modulename", ",", "args", ")", ":", "openfile", "=", "None", "glo", ",", "loc", "=", "globals", "(", ")", ",", "locals", "(", ")", "try", ":", "try", ":", "# Search for the module - inside its parent package, if any - using", "# standard import mechanics.", "if", "'.'", "in", "modulename", ":", "packagename", ",", "name", "=", "rsplit1", "(", "modulename", ",", "'.'", ")", "package", "=", "__import__", "(", "packagename", ",", "glo", ",", "loc", ",", "[", "'__path__'", "]", ")", "searchpath", "=", "package", ".", "__path__", "else", ":", "packagename", ",", "name", "=", "None", ",", "modulename", "searchpath", "=", "None", "# \"top-level search\" in imp.find_module()", "openfile", ",", "pathname", ",", "_", "=", "imp", ".", "find_module", "(", "name", ",", "searchpath", ")", "# Complain if this is a magic non-file module.", "if", "openfile", "is", "None", "and", "pathname", "is", "None", ":", "raise", "NoSource", "(", "\"module does not live in a file: %r\"", "%", "modulename", ")", "# If `modulename` is actually a package, not a mere module, then we", "# pretend to be Python 2.7 and try running its __main__.py script.", "if", "openfile", "is", "None", ":", "packagename", "=", "modulename", "name", "=", "'__main__'", "package", "=", "__import__", "(", "packagename", ",", "glo", ",", "loc", ",", "[", "'__path__'", "]", ")", "searchpath", "=", "package", ".", "__path__", "openfile", ",", "pathname", ",", "_", "=", "imp", ".", "find_module", "(", "name", ",", "searchpath", ")", "except", "ImportError", ":", "_", ",", "err", ",", "_", "=", "sys", ".", "exc_info", "(", ")", "raise", "NoSource", "(", "str", "(", "err", ")", ")", "finally", ":", "if", "openfile", ":", "openfile", ".", "close", "(", ")", "# Finally, hand the file off to run_python_file for execution.", "pathname", "=", "os", ".", "path", ".", "abspath", "(", "pathname", ")", "args", "[", "0", "]", "=", "pathname", "run_python_file", "(", "pathname", ",", "args", ",", "package", "=", "packagename", ")" ]
40.8125
19.916667
def gcj02tobd09(lng, lat): """ 火星坐标系(GCJ-02)转百度坐标系(BD-09) 谷歌、高德——>百度 :param lng:火星坐标经度 :param lat:火星坐标纬度 :return: """ z = math.sqrt(lng * lng + lat * lat) + 0.00002 * math.sin(lat * x_pi) theta = math.atan2(lat, lng) + 0.000003 * math.cos(lng * x_pi) bd_lng = z * math.cos(theta) + 0.0065 bd_lat = z * math.sin(theta) + 0.006 return [bd_lng, bd_lat]
[ "def", "gcj02tobd09", "(", "lng", ",", "lat", ")", ":", "z", "=", "math", ".", "sqrt", "(", "lng", "*", "lng", "+", "lat", "*", "lat", ")", "+", "0.00002", "*", "math", ".", "sin", "(", "lat", "*", "x_pi", ")", "theta", "=", "math", ".", "atan2", "(", "lat", ",", "lng", ")", "+", "0.000003", "*", "math", ".", "cos", "(", "lng", "*", "x_pi", ")", "bd_lng", "=", "z", "*", "math", ".", "cos", "(", "theta", ")", "+", "0.0065", "bd_lat", "=", "z", "*", "math", ".", "sin", "(", "theta", ")", "+", "0.006", "return", "[", "bd_lng", ",", "bd_lat", "]" ]
29.615385
14.538462
def diff_config(base, target): '''Find the differences between two configurations. This finds a delta configuration from `base` to `target`, such that calling :func:`overlay_config` with `base` and the result of this function yields `target`. This works as follows: * If both are identical (of any type), returns an empty dictionary. * If either isn't a dictionary, returns `target`. * Any key in `target` not present in `base` is included in the output with its value from `target`. * Any key in `base` not present in `target` is included in the output with value :const:`None`. * Any keys present in both dictionaries are recursively merged. >>> diff_config({'a': 'b'}, {}) {'a': None} >>> diff_config({'a': 'b'}, {'a': 'b', 'c': 'd'}) {'c': 'd'} :param dict base: original configuration :param dict target: new configuration :return: overlay configuration :returntype dict: ''' if not isinstance(base, collections.Mapping): if base == target: return {} return target if not isinstance(target, collections.Mapping): return target result = dict() for k in iterkeys(base): if k not in target: result[k] = None for k, v in iteritems(target): if k in base: merged = diff_config(base[k], v) if merged != {}: result[k] = merged else: result[k] = v return result
[ "def", "diff_config", "(", "base", ",", "target", ")", ":", "if", "not", "isinstance", "(", "base", ",", "collections", ".", "Mapping", ")", ":", "if", "base", "==", "target", ":", "return", "{", "}", "return", "target", "if", "not", "isinstance", "(", "target", ",", "collections", ".", "Mapping", ")", ":", "return", "target", "result", "=", "dict", "(", ")", "for", "k", "in", "iterkeys", "(", "base", ")", ":", "if", "k", "not", "in", "target", ":", "result", "[", "k", "]", "=", "None", "for", "k", ",", "v", "in", "iteritems", "(", "target", ")", ":", "if", "k", "in", "base", ":", "merged", "=", "diff_config", "(", "base", "[", "k", "]", ",", "v", ")", "if", "merged", "!=", "{", "}", ":", "result", "[", "k", "]", "=", "merged", "else", ":", "result", "[", "k", "]", "=", "v", "return", "result" ]
33
19.136364
def table(name=None, mode='create', use_cache=True, priority='interactive', allow_large_results=False): """ Construct a query output object where the result is a table Args: name: the result table name as a string or TableName; if None (the default), then a temporary table will be used. table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request will fail if the table exists. use_cache: whether to use past query results or ignore cache. Has no effect if destination is specified (default True). priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much as three hours but are not rate-limited. allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is slower and requires a name to be specified) (default False). """ output = QueryOutput() output._output_type = 'table' output._table_name = name output._table_mode = mode output._use_cache = use_cache output._priority = priority output._allow_large_results = allow_large_results return output
[ "def", "table", "(", "name", "=", "None", ",", "mode", "=", "'create'", ",", "use_cache", "=", "True", ",", "priority", "=", "'interactive'", ",", "allow_large_results", "=", "False", ")", ":", "output", "=", "QueryOutput", "(", ")", "output", ".", "_output_type", "=", "'table'", "output", ".", "_table_name", "=", "name", "output", ".", "_table_mode", "=", "mode", "output", ".", "_use_cache", "=", "use_cache", "output", ".", "_priority", "=", "priority", "output", ".", "_allow_large_results", "=", "allow_large_results", "return", "output" ]
50.4
23.44
def find_file_format(file_name): """ Returns a tuple with the file path and format found, or (None, None) """ for file_format in Format.ALLOWED: file_path = '.'.join((file_name, file_format)) if os.path.exists(file_path): return file_path, file_format return None, None
[ "def", "find_file_format", "(", "file_name", ")", ":", "for", "file_format", "in", "Format", ".", "ALLOWED", ":", "file_path", "=", "'.'", ".", "join", "(", "(", "file_name", ",", "file_format", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "return", "file_path", ",", "file_format", "return", "None", ",", "None" ]
37.888889
9.444444
def create_nio(self, node, nio_settings): """ Creates a new NIO. :param node: Dynamips node instance :param nio_settings: information to create the NIO :returns: a NIO object """ nio = None if nio_settings["type"] == "nio_udp": lport = nio_settings["lport"] rhost = nio_settings["rhost"] rport = nio_settings["rport"] try: info = socket.getaddrinfo(rhost, rport, socket.AF_UNSPEC, socket.SOCK_DGRAM, 0, socket.AI_PASSIVE) if not info: raise DynamipsError("getaddrinfo returns an empty list on {}:{}".format(rhost, rport)) for res in info: af, socktype, proto, _, sa = res with socket.socket(af, socktype, proto) as sock: sock.connect(sa) except OSError as e: raise DynamipsError("Could not create an UDP connection to {}:{}: {}".format(rhost, rport, e)) nio = NIOUDP(node, lport, rhost, rport, nio_settings.get("filters", {})) elif nio_settings["type"] == "nio_generic_ethernet": ethernet_device = nio_settings["ethernet_device"] if sys.platform.startswith("win"): # replace the interface name by the GUID on Windows windows_interfaces = interfaces() npf_interface = None for interface in windows_interfaces: if interface["name"] == ethernet_device: npf_interface = interface["id"] if not npf_interface: raise DynamipsError("Could not find interface {} on this host".format(ethernet_device)) else: ethernet_device = npf_interface if not is_interface_up(ethernet_device): raise aiohttp.web.HTTPConflict(text="Ethernet interface {} is down".format(ethernet_device)) nio = NIOGenericEthernet(node.hypervisor, ethernet_device) elif nio_settings["type"] == "nio_linux_ethernet": if sys.platform.startswith("win"): raise DynamipsError("This NIO type is not supported on Windows") ethernet_device = nio_settings["ethernet_device"] nio = NIOLinuxEthernet(node.hypervisor, ethernet_device) elif nio_settings["type"] == "nio_tap": tap_device = nio_settings["tap_device"] nio = NIOTAP(node.hypervisor, tap_device) if not is_interface_up(tap_device): # test after the TAP interface has been created (if it doesn't exist yet) raise aiohttp.web.HTTPConflict(text="TAP interface {} is down".format(tap_device)) elif nio_settings["type"] == "nio_unix": local_file = nio_settings["local_file"] remote_file = nio_settings["remote_file"] nio = NIOUNIX(node.hypervisor, local_file, remote_file) elif nio_settings["type"] == "nio_vde": control_file = nio_settings["control_file"] local_file = nio_settings["local_file"] nio = NIOVDE(node.hypervisor, control_file, local_file) elif nio_settings["type"] == "nio_null": nio = NIONull(node.hypervisor) else: raise aiohttp.web.HTTPConflict(text="NIO of type {} is not supported".format(nio_settings["type"])) yield from nio.create() return nio
[ "def", "create_nio", "(", "self", ",", "node", ",", "nio_settings", ")", ":", "nio", "=", "None", "if", "nio_settings", "[", "\"type\"", "]", "==", "\"nio_udp\"", ":", "lport", "=", "nio_settings", "[", "\"lport\"", "]", "rhost", "=", "nio_settings", "[", "\"rhost\"", "]", "rport", "=", "nio_settings", "[", "\"rport\"", "]", "try", ":", "info", "=", "socket", ".", "getaddrinfo", "(", "rhost", ",", "rport", ",", "socket", ".", "AF_UNSPEC", ",", "socket", ".", "SOCK_DGRAM", ",", "0", ",", "socket", ".", "AI_PASSIVE", ")", "if", "not", "info", ":", "raise", "DynamipsError", "(", "\"getaddrinfo returns an empty list on {}:{}\"", ".", "format", "(", "rhost", ",", "rport", ")", ")", "for", "res", "in", "info", ":", "af", ",", "socktype", ",", "proto", ",", "_", ",", "sa", "=", "res", "with", "socket", ".", "socket", "(", "af", ",", "socktype", ",", "proto", ")", "as", "sock", ":", "sock", ".", "connect", "(", "sa", ")", "except", "OSError", "as", "e", ":", "raise", "DynamipsError", "(", "\"Could not create an UDP connection to {}:{}: {}\"", ".", "format", "(", "rhost", ",", "rport", ",", "e", ")", ")", "nio", "=", "NIOUDP", "(", "node", ",", "lport", ",", "rhost", ",", "rport", ",", "nio_settings", ".", "get", "(", "\"filters\"", ",", "{", "}", ")", ")", "elif", "nio_settings", "[", "\"type\"", "]", "==", "\"nio_generic_ethernet\"", ":", "ethernet_device", "=", "nio_settings", "[", "\"ethernet_device\"", "]", "if", "sys", ".", "platform", ".", "startswith", "(", "\"win\"", ")", ":", "# replace the interface name by the GUID on Windows", "windows_interfaces", "=", "interfaces", "(", ")", "npf_interface", "=", "None", "for", "interface", "in", "windows_interfaces", ":", "if", "interface", "[", "\"name\"", "]", "==", "ethernet_device", ":", "npf_interface", "=", "interface", "[", "\"id\"", "]", "if", "not", "npf_interface", ":", "raise", "DynamipsError", "(", "\"Could not find interface {} on this host\"", ".", "format", "(", "ethernet_device", ")", ")", "else", ":", "ethernet_device", "=", "npf_interface", "if", "not", "is_interface_up", "(", "ethernet_device", ")", ":", "raise", "aiohttp", ".", "web", ".", "HTTPConflict", "(", "text", "=", "\"Ethernet interface {} is down\"", ".", "format", "(", "ethernet_device", ")", ")", "nio", "=", "NIOGenericEthernet", "(", "node", ".", "hypervisor", ",", "ethernet_device", ")", "elif", "nio_settings", "[", "\"type\"", "]", "==", "\"nio_linux_ethernet\"", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "\"win\"", ")", ":", "raise", "DynamipsError", "(", "\"This NIO type is not supported on Windows\"", ")", "ethernet_device", "=", "nio_settings", "[", "\"ethernet_device\"", "]", "nio", "=", "NIOLinuxEthernet", "(", "node", ".", "hypervisor", ",", "ethernet_device", ")", "elif", "nio_settings", "[", "\"type\"", "]", "==", "\"nio_tap\"", ":", "tap_device", "=", "nio_settings", "[", "\"tap_device\"", "]", "nio", "=", "NIOTAP", "(", "node", ".", "hypervisor", ",", "tap_device", ")", "if", "not", "is_interface_up", "(", "tap_device", ")", ":", "# test after the TAP interface has been created (if it doesn't exist yet)", "raise", "aiohttp", ".", "web", ".", "HTTPConflict", "(", "text", "=", "\"TAP interface {} is down\"", ".", "format", "(", "tap_device", ")", ")", "elif", "nio_settings", "[", "\"type\"", "]", "==", "\"nio_unix\"", ":", "local_file", "=", "nio_settings", "[", "\"local_file\"", "]", "remote_file", "=", "nio_settings", "[", "\"remote_file\"", "]", "nio", "=", "NIOUNIX", "(", "node", ".", "hypervisor", ",", "local_file", ",", "remote_file", ")", "elif", "nio_settings", "[", "\"type\"", "]", "==", "\"nio_vde\"", ":", "control_file", "=", "nio_settings", "[", "\"control_file\"", "]", "local_file", "=", "nio_settings", "[", "\"local_file\"", "]", "nio", "=", "NIOVDE", "(", "node", ".", "hypervisor", ",", "control_file", ",", "local_file", ")", "elif", "nio_settings", "[", "\"type\"", "]", "==", "\"nio_null\"", ":", "nio", "=", "NIONull", "(", "node", ".", "hypervisor", ")", "else", ":", "raise", "aiohttp", ".", "web", ".", "HTTPConflict", "(", "text", "=", "\"NIO of type {} is not supported\"", ".", "format", "(", "nio_settings", "[", "\"type\"", "]", ")", ")", "yield", "from", "nio", ".", "create", "(", ")", "return", "nio" ]
50.294118
21.205882
def _visit_functiondef(self, cls, node, parent): """visit an FunctionDef node to become astroid""" self._global_names.append({}) node, doc = self._get_doc(node) newnode = cls(node.name, doc, node.lineno, node.col_offset, parent) if node.decorator_list: decorators = self.visit_decorators(node, newnode) else: decorators = None if PY3 and node.returns: returns = self.visit(node.returns, newnode) else: returns = None type_comment_args = type_comment_returns = None type_comment_annotation = self.check_function_type_comment(node) if type_comment_annotation: type_comment_returns, type_comment_args = type_comment_annotation newnode.postinit( args=self.visit(node.args, newnode), body=[self.visit(child, newnode) for child in node.body], decorators=decorators, returns=returns, type_comment_returns=type_comment_returns, type_comment_args=type_comment_args, ) self._global_names.pop() return newnode
[ "def", "_visit_functiondef", "(", "self", ",", "cls", ",", "node", ",", "parent", ")", ":", "self", ".", "_global_names", ".", "append", "(", "{", "}", ")", "node", ",", "doc", "=", "self", ".", "_get_doc", "(", "node", ")", "newnode", "=", "cls", "(", "node", ".", "name", ",", "doc", ",", "node", ".", "lineno", ",", "node", ".", "col_offset", ",", "parent", ")", "if", "node", ".", "decorator_list", ":", "decorators", "=", "self", ".", "visit_decorators", "(", "node", ",", "newnode", ")", "else", ":", "decorators", "=", "None", "if", "PY3", "and", "node", ".", "returns", ":", "returns", "=", "self", ".", "visit", "(", "node", ".", "returns", ",", "newnode", ")", "else", ":", "returns", "=", "None", "type_comment_args", "=", "type_comment_returns", "=", "None", "type_comment_annotation", "=", "self", ".", "check_function_type_comment", "(", "node", ")", "if", "type_comment_annotation", ":", "type_comment_returns", ",", "type_comment_args", "=", "type_comment_annotation", "newnode", ".", "postinit", "(", "args", "=", "self", ".", "visit", "(", "node", ".", "args", ",", "newnode", ")", ",", "body", "=", "[", "self", ".", "visit", "(", "child", ",", "newnode", ")", "for", "child", "in", "node", ".", "body", "]", ",", "decorators", "=", "decorators", ",", "returns", "=", "returns", ",", "type_comment_returns", "=", "type_comment_returns", ",", "type_comment_args", "=", "type_comment_args", ",", ")", "self", ".", "_global_names", ".", "pop", "(", ")", "return", "newnode" ]
40.142857
16.321429
def save(yaml_dict, filepath): ''' Save YAML settings to the specified file path. ''' yamldict.dump(yaml_dict, open(filepath, 'w'), default_flow_style=False)
[ "def", "save", "(", "yaml_dict", ",", "filepath", ")", ":", "yamldict", ".", "dump", "(", "yaml_dict", ",", "open", "(", "filepath", ",", "'w'", ")", ",", "default_flow_style", "=", "False", ")" ]
33.8
24.2
def _get_resource_id_from_stack(cfn_client, stack_name, logical_id): """ Given the LogicalID of a resource, call AWS CloudFormation to get physical ID of the resource within the specified stack. Parameters ---------- cfn_client CloudFormation client provided by AWS SDK stack_name : str Name of the stack to query logical_id : str LogicalId of the resource Returns ------- str Physical ID of the resource Raises ------ samcli.commands.exceptions.UserException If the stack or resource does not exist """ LOG.debug("Getting resource's PhysicalId from AWS CloudFormation stack. StackName=%s, LogicalId=%s", stack_name, logical_id) try: response = cfn_client.describe_stack_resource(StackName=stack_name, LogicalResourceId=logical_id) LOG.debug("Response from AWS CloudFormation %s", response) return response["StackResourceDetail"]["PhysicalResourceId"] except botocore.exceptions.ClientError as ex: LOG.debug("Unable to fetch resource name from CloudFormation Stack: " "StackName=%s, ResourceLogicalId=%s, Response=%s", stack_name, logical_id, ex.response) # The exception message already has a well formatted error message that we can surface to user raise UserException(str(ex))
[ "def", "_get_resource_id_from_stack", "(", "cfn_client", ",", "stack_name", ",", "logical_id", ")", ":", "LOG", ".", "debug", "(", "\"Getting resource's PhysicalId from AWS CloudFormation stack. StackName=%s, LogicalId=%s\"", ",", "stack_name", ",", "logical_id", ")", "try", ":", "response", "=", "cfn_client", ".", "describe_stack_resource", "(", "StackName", "=", "stack_name", ",", "LogicalResourceId", "=", "logical_id", ")", "LOG", ".", "debug", "(", "\"Response from AWS CloudFormation %s\"", ",", "response", ")", "return", "response", "[", "\"StackResourceDetail\"", "]", "[", "\"PhysicalResourceId\"", "]", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "ex", ":", "LOG", ".", "debug", "(", "\"Unable to fetch resource name from CloudFormation Stack: \"", "\"StackName=%s, ResourceLogicalId=%s, Response=%s\"", ",", "stack_name", ",", "logical_id", ",", "ex", ".", "response", ")", "# The exception message already has a well formatted error message that we can surface to user", "raise", "UserException", "(", "str", "(", "ex", ")", ")" ]
34.857143
28.380952
def dump(self): '''Regurgitate the tables and rows''' for table in self.tables: print("*** %s ***" % table.name) table.dump()
[ "def", "dump", "(", "self", ")", ":", "for", "table", "in", "self", ".", "tables", ":", "print", "(", "\"*** %s ***\"", "%", "table", ".", "name", ")", "table", ".", "dump", "(", ")" ]
32.2
11.4
def setup(self, environ): '''Called once only to setup the WSGI application handler. Check :ref:`lazy wsgi handler <wsgi-lazy-handler>` section for further information. ''' request = wsgi_request(environ) cfg = request.cache.cfg loop = request.cache._loop self.store = create_store(cfg.data_store, loop=loop) pubsub = self.store.pubsub(protocol=Protocol()) channel = '%s_webchat' % self.name ensure_future(pubsub.subscribe(channel), loop=loop) return WsgiHandler([Router('/', get=self.home_page), WebSocket('/message', Chat(pubsub, channel)), Router('/rpc', post=Rpc(pubsub, channel), response_content_types=JSON_CONTENT_TYPES)], [AsyncResponseMiddleware, GZipMiddleware(min_length=20)])
[ "def", "setup", "(", "self", ",", "environ", ")", ":", "request", "=", "wsgi_request", "(", "environ", ")", "cfg", "=", "request", ".", "cache", ".", "cfg", "loop", "=", "request", ".", "cache", ".", "_loop", "self", ".", "store", "=", "create_store", "(", "cfg", ".", "data_store", ",", "loop", "=", "loop", ")", "pubsub", "=", "self", ".", "store", ".", "pubsub", "(", "protocol", "=", "Protocol", "(", ")", ")", "channel", "=", "'%s_webchat'", "%", "self", ".", "name", "ensure_future", "(", "pubsub", ".", "subscribe", "(", "channel", ")", ",", "loop", "=", "loop", ")", "return", "WsgiHandler", "(", "[", "Router", "(", "'/'", ",", "get", "=", "self", ".", "home_page", ")", ",", "WebSocket", "(", "'/message'", ",", "Chat", "(", "pubsub", ",", "channel", ")", ")", ",", "Router", "(", "'/rpc'", ",", "post", "=", "Rpc", "(", "pubsub", ",", "channel", ")", ",", "response_content_types", "=", "JSON_CONTENT_TYPES", ")", "]", ",", "[", "AsyncResponseMiddleware", ",", "GZipMiddleware", "(", "min_length", "=", "20", ")", "]", ")" ]
48
18.526316
def evaluate_at(self, *args, **parameter_specification): # pragma: no cover """ Evaluate the function at the given x(,y,z) for the provided parameters, explicitly provided as part of the parameter_specification keywords. :param *args: :param **parameter_specification: :return: """ # Set the parameters to the provided values for parameter in parameter_specification: self._get_child(parameter).value = parameter_specification[parameter] return self(*args)
[ "def", "evaluate_at", "(", "self", ",", "*", "args", ",", "*", "*", "parameter_specification", ")", ":", "# pragma: no cover", "# Set the parameters to the provided values", "for", "parameter", "in", "parameter_specification", ":", "self", ".", "_get_child", "(", "parameter", ")", ".", "value", "=", "parameter_specification", "[", "parameter", "]", "return", "self", "(", "*", "args", ")" ]
33.625
24.375
def image(request, obj_id): """Handles a request based on method and calls the appropriate function""" obj = Image.objects.get(pk=obj_id) if request.method == 'POST': return post(request, obj) elif request.method == 'PUT': getPutData(request) return put(request, obj) elif request.method == 'DELETE': getPutData(request) return delete(request, obj)
[ "def", "image", "(", "request", ",", "obj_id", ")", ":", "obj", "=", "Image", ".", "objects", ".", "get", "(", "pk", "=", "obj_id", ")", "if", "request", ".", "method", "==", "'POST'", ":", "return", "post", "(", "request", ",", "obj", ")", "elif", "request", ".", "method", "==", "'PUT'", ":", "getPutData", "(", "request", ")", "return", "put", "(", "request", ",", "obj", ")", "elif", "request", ".", "method", "==", "'DELETE'", ":", "getPutData", "(", "request", ")", "return", "delete", "(", "request", ",", "obj", ")" ]
36.181818
7.272727
def remove_images(self, images): """ Remove images from the album. :param images: A list of the images we want to remove from the album. Can be Image objects, ids or a combination of the two. Images that you cannot remove (non-existing, not owned by you or not part of album) will not cause exceptions, but fail silently. """ url = (self._imgur._base_url + "/3/album/{0}/" "remove_images".format(self._delete_or_id_hash)) # NOTE: Returns True and everything seem to be as it should in testing. # Seems most likely to be upstream bug. params = {'ids': images} return self._imgur._send_request(url, params=params, method="DELETE")
[ "def", "remove_images", "(", "self", ",", "images", ")", ":", "url", "=", "(", "self", ".", "_imgur", ".", "_base_url", "+", "\"/3/album/{0}/\"", "\"remove_images\"", ".", "format", "(", "self", ".", "_delete_or_id_hash", ")", ")", "# NOTE: Returns True and everything seem to be as it should in testing.", "# Seems most likely to be upstream bug.", "params", "=", "{", "'ids'", ":", "images", "}", "return", "self", ".", "_imgur", ".", "_send_request", "(", "url", ",", "params", "=", "params", ",", "method", "=", "\"DELETE\"", ")" ]
49.2
20.933333
def name(self, *args): ''' get/set the descriptive name text of this object. ''' if len(args): self.__name = args[0] else: return self.__name
[ "def", "name", "(", "self", ",", "*", "args", ")", ":", "if", "len", "(", "args", ")", ":", "self", ".", "__name", "=", "args", "[", "0", "]", "else", ":", "return", "self", ".", "__name" ]
24.75
19.5
def _createConnection(self, connections): """ Create GSSHAPY Connection Objects Method """ for c in connections: # Create GSSHAPY Connection object connection = Connection(slinkNumber=c['slinkNumber'], upSjuncNumber=c['upSjunc'], downSjuncNumber=c['downSjunc']) # Associate Connection with StormPipeNetworkFile connection.stormPipeNetworkFile = self
[ "def", "_createConnection", "(", "self", ",", "connections", ")", ":", "for", "c", "in", "connections", ":", "# Create GSSHAPY Connection object", "connection", "=", "Connection", "(", "slinkNumber", "=", "c", "[", "'slinkNumber'", "]", ",", "upSjuncNumber", "=", "c", "[", "'upSjunc'", "]", ",", "downSjuncNumber", "=", "c", "[", "'downSjunc'", "]", ")", "# Associate Connection with StormPipeNetworkFile", "connection", ".", "stormPipeNetworkFile", "=", "self" ]
37.769231
16.230769
def get_vulnerabilities(self, teams=None, applications=None, channel_types=None, start_date=None, end_date=None, generic_severities=None, generic_vulnerabilities=None, number_merged=None, number_vulnerabilities=None, parameter=None, path=None, show_open=None, show_closed=None, show_defect_open=None, show_defect_closed=None, show_defect_present=None, show_defect_not_present=None, show_false_positive=None, show_hidden=None): """ Returns filtered list of vulnerabilities. :param teams: List of team ids. :param applications: List of application ids. :param channel_types: List of scanner names. :param start_date: Lower bound on scan dates. :param end_date: Upper bound on scan dates. :param generic_severities: List of generic severity values. :param generic_vulnerabilities: List of generic vulnerability ids. :param number_merged: Number of vulnerabilities merged from different scans. :param number_vulnerabilities: Number of vulnerabilities to return. :param parameter: Application input that the vulnerability affects. :param path: Path to the web page where the vulnerability was found. :param show_open: Flag to show all open vulnerabilities. :param show_closed: Flag to show all closed vulnerabilities. :param show_defect_open: Flag to show any vulnerabilities with open defects. :param show_defect_closed: Flag to show any vulnerabilities with closed defects. :param show_defect_present: Flag to show any vulnerabilities with a defect. :param show_defect_not_present: Flag to show any vulnerabilities without a defect. :param show_false_positive: Flag to show any false positives from vulnerabilities. :param show_hidden: Flag to show all hidden vulnerabilities. """ params = {} # Build parameter list if teams: params.update(self._build_list_params('teams', 'id', teams)) if applications: params.update(self._build_list_params('applications', 'id', applications)) if channel_types: params.update(self._build_list_params('channelTypes', 'name', channel_types)) if start_date: params['startDate'] = start_date if end_date: params['endDate'] = end_date if generic_severities: params.update(self._build_list_params('genericSeverities', 'intValue', generic_severities)) if generic_vulnerabilities: params.update(self._build_list_params('genericVulnerabilities', 'id', generic_vulnerabilities)) if number_merged: params['numberMerged'] = number_merged if number_vulnerabilities: params['numberVulnerabilities'] = number_vulnerabilities if parameter: params['parameter'] = parameter if path: params['path'] = path if show_open: params['showOpen'] = show_open if show_closed: params['showClosed'] = show_closed if show_defect_open: params['showDefectOpen'] = show_defect_open if show_defect_closed: params['showDefectClosed'] = show_defect_closed if show_defect_present: params['showDefectPresent'] = show_defect_present if show_defect_not_present: params['showDefectNotPresent'] = show_defect_not_present if show_false_positive: params['showFalsePositive'] = show_false_positive if show_hidden: params['showHidden'] = show_hidden return self._request('POST', 'rest/vulnerabilities', params)
[ "def", "get_vulnerabilities", "(", "self", ",", "teams", "=", "None", ",", "applications", "=", "None", ",", "channel_types", "=", "None", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ",", "generic_severities", "=", "None", ",", "generic_vulnerabilities", "=", "None", ",", "number_merged", "=", "None", ",", "number_vulnerabilities", "=", "None", ",", "parameter", "=", "None", ",", "path", "=", "None", ",", "show_open", "=", "None", ",", "show_closed", "=", "None", ",", "show_defect_open", "=", "None", ",", "show_defect_closed", "=", "None", ",", "show_defect_present", "=", "None", ",", "show_defect_not_present", "=", "None", ",", "show_false_positive", "=", "None", ",", "show_hidden", "=", "None", ")", ":", "params", "=", "{", "}", "# Build parameter list", "if", "teams", ":", "params", ".", "update", "(", "self", ".", "_build_list_params", "(", "'teams'", ",", "'id'", ",", "teams", ")", ")", "if", "applications", ":", "params", ".", "update", "(", "self", ".", "_build_list_params", "(", "'applications'", ",", "'id'", ",", "applications", ")", ")", "if", "channel_types", ":", "params", ".", "update", "(", "self", ".", "_build_list_params", "(", "'channelTypes'", ",", "'name'", ",", "channel_types", ")", ")", "if", "start_date", ":", "params", "[", "'startDate'", "]", "=", "start_date", "if", "end_date", ":", "params", "[", "'endDate'", "]", "=", "end_date", "if", "generic_severities", ":", "params", ".", "update", "(", "self", ".", "_build_list_params", "(", "'genericSeverities'", ",", "'intValue'", ",", "generic_severities", ")", ")", "if", "generic_vulnerabilities", ":", "params", ".", "update", "(", "self", ".", "_build_list_params", "(", "'genericVulnerabilities'", ",", "'id'", ",", "generic_vulnerabilities", ")", ")", "if", "number_merged", ":", "params", "[", "'numberMerged'", "]", "=", "number_merged", "if", "number_vulnerabilities", ":", "params", "[", "'numberVulnerabilities'", "]", "=", "number_vulnerabilities", "if", "parameter", ":", "params", "[", "'parameter'", "]", "=", "parameter", "if", "path", ":", "params", "[", "'path'", "]", "=", "path", "if", "show_open", ":", "params", "[", "'showOpen'", "]", "=", "show_open", "if", "show_closed", ":", "params", "[", "'showClosed'", "]", "=", "show_closed", "if", "show_defect_open", ":", "params", "[", "'showDefectOpen'", "]", "=", "show_defect_open", "if", "show_defect_closed", ":", "params", "[", "'showDefectClosed'", "]", "=", "show_defect_closed", "if", "show_defect_present", ":", "params", "[", "'showDefectPresent'", "]", "=", "show_defect_present", "if", "show_defect_not_present", ":", "params", "[", "'showDefectNotPresent'", "]", "=", "show_defect_not_present", "if", "show_false_positive", ":", "params", "[", "'showFalsePositive'", "]", "=", "show_false_positive", "if", "show_hidden", ":", "params", "[", "'showHidden'", "]", "=", "show_hidden", "return", "self", ".", "_request", "(", "'POST'", ",", "'rest/vulnerabilities'", ",", "params", ")" ]
53.142857
25.057143
def construct_mapping(self, node, deep=False): ''' Build the mapping for YAML ''' if not isinstance(node, MappingNode): raise ConstructorError( None, None, 'expected a mapping node, but found {0}'.format(node.id), node.start_mark) self.flatten_mapping(node) context = 'while constructing a mapping' mapping = self.dictclass() for key_node, value_node in node.value: key = self.construct_object(key_node, deep=deep) try: hash(key) except TypeError: raise ConstructorError( context, node.start_mark, "found unacceptable key {0}".format(key_node.value), key_node.start_mark) value = self.construct_object(value_node, deep=deep) if key in mapping: raise ConstructorError( context, node.start_mark, "found conflicting ID '{0}'".format(key), key_node.start_mark) mapping[key] = value return mapping
[ "def", "construct_mapping", "(", "self", ",", "node", ",", "deep", "=", "False", ")", ":", "if", "not", "isinstance", "(", "node", ",", "MappingNode", ")", ":", "raise", "ConstructorError", "(", "None", ",", "None", ",", "'expected a mapping node, but found {0}'", ".", "format", "(", "node", ".", "id", ")", ",", "node", ".", "start_mark", ")", "self", ".", "flatten_mapping", "(", "node", ")", "context", "=", "'while constructing a mapping'", "mapping", "=", "self", ".", "dictclass", "(", ")", "for", "key_node", ",", "value_node", "in", "node", ".", "value", ":", "key", "=", "self", ".", "construct_object", "(", "key_node", ",", "deep", "=", "deep", ")", "try", ":", "hash", "(", "key", ")", "except", "TypeError", ":", "raise", "ConstructorError", "(", "context", ",", "node", ".", "start_mark", ",", "\"found unacceptable key {0}\"", ".", "format", "(", "key_node", ".", "value", ")", ",", "key_node", ".", "start_mark", ")", "value", "=", "self", ".", "construct_object", "(", "value_node", ",", "deep", "=", "deep", ")", "if", "key", "in", "mapping", ":", "raise", "ConstructorError", "(", "context", ",", "node", ".", "start_mark", ",", "\"found conflicting ID '{0}'\"", ".", "format", "(", "key", ")", ",", "key_node", ".", "start_mark", ")", "mapping", "[", "key", "]", "=", "value", "return", "mapping" ]
34.970588
14.205882
def concentric_circles_path(size): """ Yields a set of paths that are concentric circles, moving outwards, about the center of the image. :param size: The (width, height) of the image :return: Yields individual circles, where each circle is a generator that yields pixel coordinates. """ width, height = size x0, y0 = width // 2, height // 2 max_radius = int(sqrt(2) * max(height, width)) yield from fill_concentric_circles(radius=max_radius, center=(x0, y0), size=size)
[ "def", "concentric_circles_path", "(", "size", ")", ":", "width", ",", "height", "=", "size", "x0", ",", "y0", "=", "width", "//", "2", ",", "height", "//", "2", "max_radius", "=", "int", "(", "sqrt", "(", "2", ")", "*", "max", "(", "height", ",", "width", ")", ")", "yield", "from", "fill_concentric_circles", "(", "radius", "=", "max_radius", ",", "center", "=", "(", "x0", ",", "y0", ")", ",", "size", "=", "size", ")" ]
49.8
21.6
def purge(self): """ Purge cache by removing obsolete items. """ purged_count = 0 if self.__expiration is not None: with self.__connection: if self.__caching_strategy is CachingStrategy.FIFO: # dump least recently added rows for post in (False, True): purged_count += self.__connection.execute("DELETE FROM " + self.getDbTableName(post=post) + " " "WHERE (strftime('%s', 'now') - added_timestamp) > ?;", (self.__expiration,)).rowcount elif self.__caching_strategy is CachingStrategy.LRU: # dump least recently accessed rows for post in (False, True): purged_count += self.__connection.execute("DELETE FROM " + self.getDbTableName(post=post) + " " "WHERE (strftime('%s', 'now') - last_accessed_timestamp) > ?;", (self.__expiration,)).rowcount return purged_count
[ "def", "purge", "(", "self", ")", ":", "purged_count", "=", "0", "if", "self", ".", "__expiration", "is", "not", "None", ":", "with", "self", ".", "__connection", ":", "if", "self", ".", "__caching_strategy", "is", "CachingStrategy", ".", "FIFO", ":", "# dump least recently added rows", "for", "post", "in", "(", "False", ",", "True", ")", ":", "purged_count", "+=", "self", ".", "__connection", ".", "execute", "(", "\"DELETE FROM \"", "+", "self", ".", "getDbTableName", "(", "post", "=", "post", ")", "+", "\" \"", "\"WHERE (strftime('%s', 'now') - added_timestamp) > ?;\"", ",", "(", "self", ".", "__expiration", ",", ")", ")", ".", "rowcount", "elif", "self", ".", "__caching_strategy", "is", "CachingStrategy", ".", "LRU", ":", "# dump least recently accessed rows", "for", "post", "in", "(", "False", ",", "True", ")", ":", "purged_count", "+=", "self", ".", "__connection", ".", "execute", "(", "\"DELETE FROM \"", "+", "self", ".", "getDbTableName", "(", "post", "=", "post", ")", "+", "\" \"", "\"WHERE (strftime('%s', 'now') - last_accessed_timestamp) > ?;\"", ",", "(", "self", ".", "__expiration", ",", ")", ")", ".", "rowcount", "return", "purged_count" ]
58.4
26.15