text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def compile_theme(theme_id=None): """Compiles a theme.""" from engineer.processors import convert_less from engineer.themes import ThemeManager if theme_id is None: themes = ThemeManager.themes().values() else: themes = [ThemeManager.theme(theme_id)] with(indent(2)): puts(colored.yellow("Compiling %s themes." % len(themes))) for theme in themes: theme_output_path = (theme.static_root / ('stylesheets/%s_precompiled.css' % theme.id)).normpath() puts(colored.cyan("Compiling theme %s to %s" % (theme.id, theme_output_path))) with indent(4): puts("Compiling...") convert_less(theme.static_root / ('stylesheets/%s.less' % theme.id), theme_output_path, minify=True) puts(colored.green("Done.", bold=True))
[ "def", "compile_theme", "(", "theme_id", "=", "None", ")", ":", "from", "engineer", ".", "processors", "import", "convert_less", "from", "engineer", ".", "themes", "import", "ThemeManager", "if", "theme_id", "is", "None", ":", "themes", "=", "ThemeManager", ".", "themes", "(", ")", ".", "values", "(", ")", "else", ":", "themes", "=", "[", "ThemeManager", ".", "theme", "(", "theme_id", ")", "]", "with", "(", "indent", "(", "2", ")", ")", ":", "puts", "(", "colored", ".", "yellow", "(", "\"Compiling %s themes.\"", "%", "len", "(", "themes", ")", ")", ")", "for", "theme", "in", "themes", ":", "theme_output_path", "=", "(", "theme", ".", "static_root", "/", "(", "'stylesheets/%s_precompiled.css'", "%", "theme", ".", "id", ")", ")", ".", "normpath", "(", ")", "puts", "(", "colored", ".", "cyan", "(", "\"Compiling theme %s to %s\"", "%", "(", "theme", ".", "id", ",", "theme_output_path", ")", ")", ")", "with", "indent", "(", "4", ")", ":", "puts", "(", "\"Compiling...\"", ")", "convert_less", "(", "theme", ".", "static_root", "/", "(", "'stylesheets/%s.less'", "%", "theme", ".", "id", ")", ",", "theme_output_path", ",", "minify", "=", "True", ")", "puts", "(", "colored", ".", "green", "(", "\"Done.\"", ",", "bold", "=", "True", ")", ")" ]
38.391304
21.826087
def main(args): """ Validates the submission. """ print_in_box('Validating submission ' + args.submission_filename) random.seed() temp_dir = args.temp_dir delete_temp_dir = False if not temp_dir: temp_dir = tempfile.mkdtemp() logging.info('Created temporary directory: %s', temp_dir) delete_temp_dir = True validator = submission_validator_lib.SubmissionValidator(temp_dir, args.use_gpu) if validator.validate_submission(args.submission_filename, args.submission_type): print_in_box('Submission is VALID!') else: print_in_box('Submission is INVALID, see log messages for details') if delete_temp_dir: logging.info('Deleting temporary directory: %s', temp_dir) subprocess.call(['rm', '-rf', temp_dir])
[ "def", "main", "(", "args", ")", ":", "print_in_box", "(", "'Validating submission '", "+", "args", ".", "submission_filename", ")", "random", ".", "seed", "(", ")", "temp_dir", "=", "args", ".", "temp_dir", "delete_temp_dir", "=", "False", "if", "not", "temp_dir", ":", "temp_dir", "=", "tempfile", ".", "mkdtemp", "(", ")", "logging", ".", "info", "(", "'Created temporary directory: %s'", ",", "temp_dir", ")", "delete_temp_dir", "=", "True", "validator", "=", "submission_validator_lib", ".", "SubmissionValidator", "(", "temp_dir", ",", "args", ".", "use_gpu", ")", "if", "validator", ".", "validate_submission", "(", "args", ".", "submission_filename", ",", "args", ".", "submission_type", ")", ":", "print_in_box", "(", "'Submission is VALID!'", ")", "else", ":", "print_in_box", "(", "'Submission is INVALID, see log messages for details'", ")", "if", "delete_temp_dir", ":", "logging", ".", "info", "(", "'Deleting temporary directory: %s'", ",", "temp_dir", ")", "subprocess", ".", "call", "(", "[", "'rm'", ",", "'-rf'", ",", "temp_dir", "]", ")" ]
37.5
17.681818
def compute(self): """ Run an iteration of this anomaly classifier """ result = self._constructClassificationRecord() # Classify this point after waiting the classification delay if result.ROWID >= self._autoDetectWaitRecords: self._updateState(result) # Save new classification record and keep history as moving window self.saved_states.append(result) if len(self.saved_states) > self._history_length: self.saved_states.pop(0) return result
[ "def", "compute", "(", "self", ")", ":", "result", "=", "self", ".", "_constructClassificationRecord", "(", ")", "# Classify this point after waiting the classification delay", "if", "result", ".", "ROWID", ">=", "self", ".", "_autoDetectWaitRecords", ":", "self", ".", "_updateState", "(", "result", ")", "# Save new classification record and keep history as moving window", "self", ".", "saved_states", ".", "append", "(", "result", ")", "if", "len", "(", "self", ".", "saved_states", ")", ">", "self", ".", "_history_length", ":", "self", ".", "saved_states", ".", "pop", "(", "0", ")", "return", "result" ]
30.0625
17.6875
async def update_houses(self): """Lookup details for devices on the plum servers""" houses = await self.fetch_houses() for house_id in houses: asyncio.Task(self.update_house(house_id))
[ "async", "def", "update_houses", "(", "self", ")", ":", "houses", "=", "await", "self", ".", "fetch_houses", "(", ")", "for", "house_id", "in", "houses", ":", "asyncio", ".", "Task", "(", "self", ".", "update_house", "(", "house_id", ")", ")" ]
43.2
6.8
def ring2nest(nside, ipix): """Drop-in replacement for healpy `~healpy.pixelfunc.ring2nest`.""" ipix = np.atleast_1d(ipix).astype(np.int64, copy=False) return ring_to_nested(ipix, nside)
[ "def", "ring2nest", "(", "nside", ",", "ipix", ")", ":", "ipix", "=", "np", ".", "atleast_1d", "(", "ipix", ")", ".", "astype", "(", "np", ".", "int64", ",", "copy", "=", "False", ")", "return", "ring_to_nested", "(", "ipix", ",", "nside", ")" ]
48.75
8.5
def parse_map_d(self): """Alpha map""" Kd = os.path.join(self.dir, " ".join(self.values[1:])) self.this_material.set_texture_alpha(Kd)
[ "def", "parse_map_d", "(", "self", ")", ":", "Kd", "=", "os", ".", "path", ".", "join", "(", "self", ".", "dir", ",", "\" \"", ".", "join", "(", "self", ".", "values", "[", "1", ":", "]", ")", ")", "self", ".", "this_material", ".", "set_texture_alpha", "(", "Kd", ")" ]
38.75
12
def export_svgs(obj, filename=None, height=None, width=None, webdriver=None, timeout=5): ''' Export the SVG-enabled plots within a layout. Each plot will result in a distinct SVG file. If the filename is not given, it is derived from the script name (e.g. ``/foo/myplot.py`` will create ``/foo/myplot.svg``) Args: obj (LayoutDOM object) : a Layout (Row/Column), Plot or Widget object to display filename (str, optional) : filename to save document under (default: None) If None, infer from the filename. height (int) : the desired height of the exported layout obj only if it's a Plot instance. Otherwise the height kwarg is ignored. width (int) : the desired width of the exported layout obj only if it's a Plot instance. Otherwise the width kwarg is ignored. webdriver (selenium.webdriver) : a selenium webdriver instance to use to export the image. timeout (int) : the maximum amount of time (in seconds) to wait for Bokeh to initialize (default: 5) (Added in 1.1.1). Returns: filenames (list(str)) : the list of filenames where the SVGs files are saved. .. warning:: Responsive sizing_modes may generate layouts with unexpected size and aspect ratios. It is recommended to use the default ``fixed`` sizing mode. ''' svgs = get_svgs(obj, height=height, width=width, driver=webdriver, timeout=timeout) if len(svgs) == 0: log.warning("No SVG Plots were found.") return if filename is None: filename = default_filename("svg") filenames = [] for i, svg in enumerate(svgs): if i == 0: filename = filename else: idx = filename.find(".svg") filename = filename[:idx] + "_{}".format(i) + filename[idx:] with io.open(filename, mode="w", encoding="utf-8") as f: f.write(svg) filenames.append(filename) return filenames
[ "def", "export_svgs", "(", "obj", ",", "filename", "=", "None", ",", "height", "=", "None", ",", "width", "=", "None", ",", "webdriver", "=", "None", ",", "timeout", "=", "5", ")", ":", "svgs", "=", "get_svgs", "(", "obj", ",", "height", "=", "height", ",", "width", "=", "width", ",", "driver", "=", "webdriver", ",", "timeout", "=", "timeout", ")", "if", "len", "(", "svgs", ")", "==", "0", ":", "log", ".", "warning", "(", "\"No SVG Plots were found.\"", ")", "return", "if", "filename", "is", "None", ":", "filename", "=", "default_filename", "(", "\"svg\"", ")", "filenames", "=", "[", "]", "for", "i", ",", "svg", "in", "enumerate", "(", "svgs", ")", ":", "if", "i", "==", "0", ":", "filename", "=", "filename", "else", ":", "idx", "=", "filename", ".", "find", "(", "\".svg\"", ")", "filename", "=", "filename", "[", ":", "idx", "]", "+", "\"_{}\"", ".", "format", "(", "i", ")", "+", "filename", "[", "idx", ":", "]", "with", "io", ".", "open", "(", "filename", ",", "mode", "=", "\"w\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "f", ":", "f", ".", "write", "(", "svg", ")", "filenames", ".", "append", "(", "filename", ")", "return", "filenames" ]
33.982759
29.568966
def evaluate(self, data, env): """ Evaluate the predicates and values """ # For each predicate-value, we keep track of the positions # that have been copied to the result, so that the later # more general values do not overwrite the previous ones. result = np.repeat(None, len(data)) copied = np.repeat(False, len(data)) for pred_expr, value_expr in self.pv_expressions: bool_idx = pred_expr.evaluate(data, env) if not pdtypes.is_bool_dtype(np.asarray(bool_idx)): raise TypeError( "The predicate keys must return a boolean array, " "or a boolean value.") value = value_expr.evaluate(data, env) mask = (copied ^ bool_idx) & bool_idx copied |= bool_idx idx = np.where(mask)[0] result[idx] = self.nice_value(value, idx) return np.array(list(result))
[ "def", "evaluate", "(", "self", ",", "data", ",", "env", ")", ":", "# For each predicate-value, we keep track of the positions", "# that have been copied to the result, so that the later", "# more general values do not overwrite the previous ones.", "result", "=", "np", ".", "repeat", "(", "None", ",", "len", "(", "data", ")", ")", "copied", "=", "np", ".", "repeat", "(", "False", ",", "len", "(", "data", ")", ")", "for", "pred_expr", ",", "value_expr", "in", "self", ".", "pv_expressions", ":", "bool_idx", "=", "pred_expr", ".", "evaluate", "(", "data", ",", "env", ")", "if", "not", "pdtypes", ".", "is_bool_dtype", "(", "np", ".", "asarray", "(", "bool_idx", ")", ")", ":", "raise", "TypeError", "(", "\"The predicate keys must return a boolean array, \"", "\"or a boolean value.\"", ")", "value", "=", "value_expr", ".", "evaluate", "(", "data", ",", "env", ")", "mask", "=", "(", "copied", "^", "bool_idx", ")", "&", "bool_idx", "copied", "|=", "bool_idx", "idx", "=", "np", ".", "where", "(", "mask", ")", "[", "0", "]", "result", "[", "idx", "]", "=", "self", ".", "nice_value", "(", "value", ",", "idx", ")", "return", "np", ".", "array", "(", "list", "(", "result", ")", ")" ]
45.047619
11.238095
def _vagrant_call(node, function, section, comment, status_when_done=None, **kwargs): ''' Helper to call the vagrant functions. Wildcards supported. :param node: The Salt-id or wildcard :param function: the vagrant submodule to call :param section: the name for the state call. :param comment: what the state reply should say :param status_when_done: the Vagrant status expected for this state :return: the dictionary for the state reply ''' ret = {'name': node, 'changes': {}, 'result': True, 'comment': ''} targeted_nodes = [] if isinstance(node, six.string_types): try: # use shortcut if a single node name if __salt__['vagrant.get_vm_info'](node): targeted_nodes = [node] except SaltInvocationError: pass if not targeted_nodes: # the shortcut failed, do this the hard way all_domains = __salt__['vagrant.list_domains']() targeted_nodes = fnmatch.filter(all_domains, node) changed_nodes = [] ignored_nodes = [] for node in targeted_nodes: if status_when_done: try: present_state = __salt__['vagrant.vm_state'](node)[0] if present_state['state'] == status_when_done: continue # no change is needed except (IndexError, SaltInvocationError, CommandExecutionError): pass try: response = __salt__['vagrant.{0}'.format(function)](node, **kwargs) if isinstance(response, dict): response = response['name'] changed_nodes.append({'node': node, function: response}) except (SaltInvocationError, CommandExecutionError) as err: ignored_nodes.append({'node': node, 'issue': six.text_type(err)}) if not changed_nodes: ret['result'] = True ret['comment'] = 'No changes seen' if ignored_nodes: ret['changes'] = {'ignored': ignored_nodes} else: ret['changes'] = {section: changed_nodes} ret['comment'] = comment return ret
[ "def", "_vagrant_call", "(", "node", ",", "function", ",", "section", ",", "comment", ",", "status_when_done", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "node", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "targeted_nodes", "=", "[", "]", "if", "isinstance", "(", "node", ",", "six", ".", "string_types", ")", ":", "try", ":", "# use shortcut if a single node name", "if", "__salt__", "[", "'vagrant.get_vm_info'", "]", "(", "node", ")", ":", "targeted_nodes", "=", "[", "node", "]", "except", "SaltInvocationError", ":", "pass", "if", "not", "targeted_nodes", ":", "# the shortcut failed, do this the hard way", "all_domains", "=", "__salt__", "[", "'vagrant.list_domains'", "]", "(", ")", "targeted_nodes", "=", "fnmatch", ".", "filter", "(", "all_domains", ",", "node", ")", "changed_nodes", "=", "[", "]", "ignored_nodes", "=", "[", "]", "for", "node", "in", "targeted_nodes", ":", "if", "status_when_done", ":", "try", ":", "present_state", "=", "__salt__", "[", "'vagrant.vm_state'", "]", "(", "node", ")", "[", "0", "]", "if", "present_state", "[", "'state'", "]", "==", "status_when_done", ":", "continue", "# no change is needed", "except", "(", "IndexError", ",", "SaltInvocationError", ",", "CommandExecutionError", ")", ":", "pass", "try", ":", "response", "=", "__salt__", "[", "'vagrant.{0}'", ".", "format", "(", "function", ")", "]", "(", "node", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "response", ",", "dict", ")", ":", "response", "=", "response", "[", "'name'", "]", "changed_nodes", ".", "append", "(", "{", "'node'", ":", "node", ",", "function", ":", "response", "}", ")", "except", "(", "SaltInvocationError", ",", "CommandExecutionError", ")", "as", "err", ":", "ignored_nodes", ".", "append", "(", "{", "'node'", ":", "node", ",", "'issue'", ":", "six", ".", "text_type", "(", "err", ")", "}", ")", "if", "not", "changed_nodes", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'No changes seen'", "if", "ignored_nodes", ":", "ret", "[", "'changes'", "]", "=", "{", "'ignored'", ":", "ignored_nodes", "}", "else", ":", "ret", "[", "'changes'", "]", "=", "{", "section", ":", "changed_nodes", "}", "ret", "[", "'comment'", "]", "=", "comment", "return", "ret" ]
40.117647
20.078431
def reset(self): """ Fill the screen with black pixels """ surface = Surface(self.width, self.height) surface.fill(BLACK) self.matrix = surface.matrix
[ "def", "reset", "(", "self", ")", ":", "surface", "=", "Surface", "(", "self", ".", "width", ",", "self", ".", "height", ")", "surface", ".", "fill", "(", "BLACK", ")", "self", ".", "matrix", "=", "surface", ".", "matrix" ]
27.428571
7.428571
def bottoms(panels): """ Finds bottom lines of all panels :param panels: :return: sorted by row list of tuples representing lines (col, row , col + len, row) """ bottom_lines = [(p['col'], p['row'] + p['size_y'], p['col'] + p['size_x'], p['row'] + p['size_y']) for p in panels] return sorted(bottom_lines, key=lambda l: l[1], reverse=True)
[ "def", "bottoms", "(", "panels", ")", ":", "bottom_lines", "=", "[", "(", "p", "[", "'col'", "]", ",", "p", "[", "'row'", "]", "+", "p", "[", "'size_y'", "]", ",", "p", "[", "'col'", "]", "+", "p", "[", "'size_x'", "]", ",", "p", "[", "'row'", "]", "+", "p", "[", "'size_y'", "]", ")", "for", "p", "in", "panels", "]", "return", "sorted", "(", "bottom_lines", ",", "key", "=", "lambda", "l", ":", "l", "[", "1", "]", ",", "reverse", "=", "True", ")" ]
45
24.75
def jsonify(obj, **kwargs): """ A version of json.dumps that can handle numpy arrays by creating a custom encoder for numpy dtypes. Parameters -------------- obj : JSON- serializable blob **kwargs : Passed to json.dumps Returns -------------- dumped : str JSON dump of obj """ class NumpyEncoder(json.JSONEncoder): def default(self, obj): # will work for numpy.ndarrays # as well as their int64/etc objects if hasattr(obj, 'tolist'): return obj.tolist() return json.JSONEncoder.default(self, obj) # run the dumps using our encoder dumped = json.dumps(obj, cls=NumpyEncoder, **kwargs) return dumped
[ "def", "jsonify", "(", "obj", ",", "*", "*", "kwargs", ")", ":", "class", "NumpyEncoder", "(", "json", ".", "JSONEncoder", ")", ":", "def", "default", "(", "self", ",", "obj", ")", ":", "# will work for numpy.ndarrays", "# as well as their int64/etc objects", "if", "hasattr", "(", "obj", ",", "'tolist'", ")", ":", "return", "obj", ".", "tolist", "(", ")", "return", "json", ".", "JSONEncoder", ".", "default", "(", "self", ",", "obj", ")", "# run the dumps using our encoder", "dumped", "=", "json", ".", "dumps", "(", "obj", ",", "cls", "=", "NumpyEncoder", ",", "*", "*", "kwargs", ")", "return", "dumped" ]
26.666667
15.851852
def build_homogeneisation_vehicules(temporary_store = None, year = None): assert temporary_store is not None """Compute vehicule numbers by type""" assert year is not None # Load data bdf_survey_collection = SurveyCollection.load( collection = 'budget_des_familles', config_files_directory = config_files_directory) survey = bdf_survey_collection.get_survey('budget_des_familles_{}'.format(year)) if year == 1995: vehicule = None # L'enquête BdF 1995 ne contient pas d'information sur le type de carburant utilisé par les véhicules. if year == 2000: vehicule = survey.get_values(table = "depmen") kept_variables = ['ident', 'carbu01', 'carbu02'] vehicule = vehicule[kept_variables] vehicule.rename(columns = {'ident': 'ident_men'}, inplace = True) vehicule.rename(columns = {'carbu01': 'carbu1'}, inplace = True) vehicule.rename(columns = {'carbu02': 'carbu2'}, inplace = True) vehicule["veh_tot"] = 1 vehicule["veh_essence"] = 1 * (vehicule['carbu1'] == 1) + 1 * (vehicule['carbu2'] == 1) vehicule["veh_diesel"] = 1 * (vehicule['carbu1'] == 2) + 1 * (vehicule['carbu2'] == 2) vehicule.index = vehicule.index.astype(ident_men_dtype) if year == 2005: vehicule = survey.get_values(table = "automobile") kept_variables = ['ident_men', 'carbu'] vehicule = vehicule[kept_variables] vehicule["veh_tot"] = 1 vehicule["veh_essence"] = (vehicule['carbu'] == 1) vehicule["veh_diesel"] = (vehicule['carbu'] == 2) if year == 2011: try: vehicule = survey.get_values(table = "AUTOMOBILE") except: vehicule = survey.get_values(table = "automobile") kept_variables = ['ident_me', 'carbu'] vehicule = vehicule[kept_variables] vehicule.rename(columns = {'ident_me': 'ident_men'}, inplace = True) vehicule["veh_tot"] = 1 vehicule["veh_essence"] = (vehicule['carbu'] == 1) vehicule["veh_diesel"] = (vehicule['carbu'] == 2) # Compute the number of cars by category and save if year != 1995: vehicule = vehicule.groupby(by = 'ident_men')["veh_tot", "veh_essence", "veh_diesel"].sum() vehicule["pourcentage_vehicule_essence"] = 0 vehicule.pourcentage_vehicule_essence.loc[vehicule.veh_tot != 0] = vehicule.veh_essence / vehicule.veh_tot # Save in temporary store temporary_store['automobile_{}'.format(year)] = vehicule
[ "def", "build_homogeneisation_vehicules", "(", "temporary_store", "=", "None", ",", "year", "=", "None", ")", ":", "assert", "temporary_store", "is", "not", "None", "assert", "year", "is", "not", "None", "# Load data", "bdf_survey_collection", "=", "SurveyCollection", ".", "load", "(", "collection", "=", "'budget_des_familles'", ",", "config_files_directory", "=", "config_files_directory", ")", "survey", "=", "bdf_survey_collection", ".", "get_survey", "(", "'budget_des_familles_{}'", ".", "format", "(", "year", ")", ")", "if", "year", "==", "1995", ":", "vehicule", "=", "None", "# L'enquête BdF 1995 ne contient pas d'information sur le type de carburant utilisé par les véhicules.", "if", "year", "==", "2000", ":", "vehicule", "=", "survey", ".", "get_values", "(", "table", "=", "\"depmen\"", ")", "kept_variables", "=", "[", "'ident'", ",", "'carbu01'", ",", "'carbu02'", "]", "vehicule", "=", "vehicule", "[", "kept_variables", "]", "vehicule", ".", "rename", "(", "columns", "=", "{", "'ident'", ":", "'ident_men'", "}", ",", "inplace", "=", "True", ")", "vehicule", ".", "rename", "(", "columns", "=", "{", "'carbu01'", ":", "'carbu1'", "}", ",", "inplace", "=", "True", ")", "vehicule", ".", "rename", "(", "columns", "=", "{", "'carbu02'", ":", "'carbu2'", "}", ",", "inplace", "=", "True", ")", "vehicule", "[", "\"veh_tot\"", "]", "=", "1", "vehicule", "[", "\"veh_essence\"", "]", "=", "1", "*", "(", "vehicule", "[", "'carbu1'", "]", "==", "1", ")", "+", "1", "*", "(", "vehicule", "[", "'carbu2'", "]", "==", "1", ")", "vehicule", "[", "\"veh_diesel\"", "]", "=", "1", "*", "(", "vehicule", "[", "'carbu1'", "]", "==", "2", ")", "+", "1", "*", "(", "vehicule", "[", "'carbu2'", "]", "==", "2", ")", "vehicule", ".", "index", "=", "vehicule", ".", "index", ".", "astype", "(", "ident_men_dtype", ")", "if", "year", "==", "2005", ":", "vehicule", "=", "survey", ".", "get_values", "(", "table", "=", "\"automobile\"", ")", "kept_variables", "=", "[", "'ident_men'", ",", "'carbu'", "]", "vehicule", "=", "vehicule", "[", "kept_variables", "]", "vehicule", "[", "\"veh_tot\"", "]", "=", "1", "vehicule", "[", "\"veh_essence\"", "]", "=", "(", "vehicule", "[", "'carbu'", "]", "==", "1", ")", "vehicule", "[", "\"veh_diesel\"", "]", "=", "(", "vehicule", "[", "'carbu'", "]", "==", "2", ")", "if", "year", "==", "2011", ":", "try", ":", "vehicule", "=", "survey", ".", "get_values", "(", "table", "=", "\"AUTOMOBILE\"", ")", "except", ":", "vehicule", "=", "survey", ".", "get_values", "(", "table", "=", "\"automobile\"", ")", "kept_variables", "=", "[", "'ident_me'", ",", "'carbu'", "]", "vehicule", "=", "vehicule", "[", "kept_variables", "]", "vehicule", ".", "rename", "(", "columns", "=", "{", "'ident_me'", ":", "'ident_men'", "}", ",", "inplace", "=", "True", ")", "vehicule", "[", "\"veh_tot\"", "]", "=", "1", "vehicule", "[", "\"veh_essence\"", "]", "=", "(", "vehicule", "[", "'carbu'", "]", "==", "1", ")", "vehicule", "[", "\"veh_diesel\"", "]", "=", "(", "vehicule", "[", "'carbu'", "]", "==", "2", ")", "# Compute the number of cars by category and save", "if", "year", "!=", "1995", ":", "vehicule", "=", "vehicule", ".", "groupby", "(", "by", "=", "'ident_men'", ")", "[", "\"veh_tot\"", ",", "\"veh_essence\"", ",", "\"veh_diesel\"", "]", ".", "sum", "(", ")", "vehicule", "[", "\"pourcentage_vehicule_essence\"", "]", "=", "0", "vehicule", ".", "pourcentage_vehicule_essence", ".", "loc", "[", "vehicule", ".", "veh_tot", "!=", "0", "]", "=", "vehicule", ".", "veh_essence", "/", "vehicule", ".", "veh_tot", "# Save in temporary store", "temporary_store", "[", "'automobile_{}'", ".", "format", "(", "year", ")", "]", "=", "vehicule" ]
44.981818
25.454545
def _init_go2bordercolor(objcolors, **kws): """Initialize go2bordercolor with default to make hdrgos bright blue.""" go2bordercolor_ret = objcolors.get_bordercolor() if 'go2bordercolor' not in kws: return go2bordercolor_ret go2bordercolor_usr = kws['go2bordercolor'] goids = set(go2bordercolor_ret).intersection(go2bordercolor_usr) for goid in goids: go2bordercolor_usr[goid] = go2bordercolor_ret[goid] return go2bordercolor_usr
[ "def", "_init_go2bordercolor", "(", "objcolors", ",", "*", "*", "kws", ")", ":", "go2bordercolor_ret", "=", "objcolors", ".", "get_bordercolor", "(", ")", "if", "'go2bordercolor'", "not", "in", "kws", ":", "return", "go2bordercolor_ret", "go2bordercolor_usr", "=", "kws", "[", "'go2bordercolor'", "]", "goids", "=", "set", "(", "go2bordercolor_ret", ")", ".", "intersection", "(", "go2bordercolor_usr", ")", "for", "goid", "in", "goids", ":", "go2bordercolor_usr", "[", "goid", "]", "=", "go2bordercolor_ret", "[", "goid", "]", "return", "go2bordercolor_usr" ]
49.9
10.9
def _main_loop(self): ''' Continuous loop that reads from a kafka topic and tries to validate incoming messages ''' self.logger.debug("Processing messages") old_time = 0 while True: self._process_messages() if self.settings['STATS_DUMP'] != 0: new_time = int(old_div(time.time(), self.settings['STATS_DUMP'])) # only log every X seconds if new_time != old_time: self._dump_stats() old_time = new_time self._report_self() time.sleep(self.settings['SLEEP_TIME'])
[ "def", "_main_loop", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Processing messages\"", ")", "old_time", "=", "0", "while", "True", ":", "self", ".", "_process_messages", "(", ")", "if", "self", ".", "settings", "[", "'STATS_DUMP'", "]", "!=", "0", ":", "new_time", "=", "int", "(", "old_div", "(", "time", ".", "time", "(", ")", ",", "self", ".", "settings", "[", "'STATS_DUMP'", "]", ")", ")", "# only log every X seconds", "if", "new_time", "!=", "old_time", ":", "self", ".", "_dump_stats", "(", ")", "old_time", "=", "new_time", "self", ".", "_report_self", "(", ")", "time", ".", "sleep", "(", "self", ".", "settings", "[", "'SLEEP_TIME'", "]", ")" ]
35.333333
16.333333
def _top(self): """ g """ # Goto top of the list self.top.body.focus_position = 2 if self.compact is False else 0 self.top.keypress(self.size, "")
[ "def", "_top", "(", "self", ")", ":", "# Goto top of the list", "self", ".", "top", ".", "body", ".", "focus_position", "=", "2", "if", "self", ".", "compact", "is", "False", "else", "0", "self", ".", "top", ".", "keypress", "(", "self", ".", "size", ",", "\"\"", ")" ]
34.8
13.4
def _write_version1(self,new_filename,update_regul=False): """write a version 1 pest control file Parameters ---------- new_filename : str name of the new pest control file update_regul : (boolean) flag to update zero-order Tikhonov prior information equations to prefer the current parameter values """ self.new_filename = new_filename self.rectify_pgroups() self.rectify_pi() self._update_control_section() self.sanity_checks() f_out = open(new_filename, 'w') if self.with_comments: for line in self.comments.get("initial",[]): f_out.write(line+'\n') f_out.write("pcf\n* control data\n") self.control_data.write(f_out) # for line in self.other_lines: # f_out.write(line) if self.with_comments: for line in self.comments.get("* singular value decompisition",[]): f_out.write(line) self.svd_data.write(f_out) #f_out.write("* parameter groups\n") # to catch the byte code ugliness in python 3 pargpnme = self.parameter_groups.loc[:,"pargpnme"].copy() self.parameter_groups.loc[:,"pargpnme"] = \ self.parameter_groups.pargpnme.apply(self.pargp_format["pargpnme"]) self._write_df("* parameter groups", f_out, self.parameter_groups, self.pargp_format, self.pargp_fieldnames) self.parameter_groups.loc[:,"pargpnme"] = pargpnme self._write_df("* parameter data",f_out, self.parameter_data, self.par_format, self.par_fieldnames) if self.tied is not None: self._write_df("tied parameter data", f_out, self.tied, self.tied_format, self.tied_fieldnames) f_out.write("* observation groups\n") for group in self.obs_groups: try: group = group.decode() except: pass f_out.write(pst_utils.SFMT(str(group))+'\n') for group in self.prior_groups: try: group = group.decode() except: pass f_out.write(pst_utils.SFMT(str(group))+'\n') self._write_df("* observation data", f_out, self.observation_data, self.obs_format, self.obs_fieldnames) f_out.write("* model command line\n") for cline in self.model_command: f_out.write(cline+'\n') f_out.write("* model input/output\n") for tplfle,infle in zip(self.template_files,self.input_files): f_out.write(tplfle+' '+infle+'\n') for insfle,outfle in zip(self.instruction_files,self.output_files): f_out.write(insfle+' '+outfle+'\n') if self.nprior > 0: if self.prior_information.isnull().values.any(): #print("WARNING: NaNs in prior_information dataframe") warnings.warn("NaNs in prior_information dataframe",PyemuWarning) f_out.write("* prior information\n") #self.prior_information.index = self.prior_information.pop("pilbl") max_eq_len = self.prior_information.equation.apply(lambda x:len(x)).max() eq_fmt_str = " {0:<" + str(max_eq_len) + "s} " eq_fmt_func = lambda x:eq_fmt_str.format(x) # 17/9/2016 - had to go with a custom writer loop b/c pandas doesn't want to # output strings longer than 100, even with display.max_colwidth #f_out.write(self.prior_information.to_string(col_space=0, # columns=self.prior_fieldnames, # formatters=pi_formatters, # justify="right", # header=False, # index=False) + '\n') #self.prior_information["pilbl"] = self.prior_information.index # for idx,row in self.prior_information.iterrows(): # f_out.write(pst_utils.SFMT(row["pilbl"])) # f_out.write(eq_fmt_func(row["equation"])) # f_out.write(pst_utils.FFMT(row["weight"])) # f_out.write(pst_utils.SFMT(row["obgnme"]) + '\n') for idx, row in self.prior_information.iterrows(): f_out.write(pst_utils.SFMT(row["pilbl"])) f_out.write(eq_fmt_func(row["equation"])) f_out.write(pst_utils.FFMT(row["weight"])) f_out.write(pst_utils.SFMT(row["obgnme"])) if self.with_comments and 'extra' in row: f_out.write(" # {0}".format(row['extra'])) f_out.write('\n') if self.control_data.pestmode.startswith("regul"): #f_out.write("* regularisation\n") #if update_regul or len(self.regul_lines) == 0: # f_out.write(self.regul_section) #else: # [f_out.write(line) for line in self.regul_lines] self.reg_data.write(f_out) for line in self.other_lines: f_out.write(line+'\n') for key,value in self.pestpp_options.items(): if isinstance(value,list): value = ','.join([str(v) for v in value]) f_out.write("++{0}({1})\n".format(str(key),str(value))) if self.with_comments: for line in self.comments.get("final",[]): f_out.write(line+'\n') f_out.close()
[ "def", "_write_version1", "(", "self", ",", "new_filename", ",", "update_regul", "=", "False", ")", ":", "self", ".", "new_filename", "=", "new_filename", "self", ".", "rectify_pgroups", "(", ")", "self", ".", "rectify_pi", "(", ")", "self", ".", "_update_control_section", "(", ")", "self", ".", "sanity_checks", "(", ")", "f_out", "=", "open", "(", "new_filename", ",", "'w'", ")", "if", "self", ".", "with_comments", ":", "for", "line", "in", "self", ".", "comments", ".", "get", "(", "\"initial\"", ",", "[", "]", ")", ":", "f_out", ".", "write", "(", "line", "+", "'\\n'", ")", "f_out", ".", "write", "(", "\"pcf\\n* control data\\n\"", ")", "self", ".", "control_data", ".", "write", "(", "f_out", ")", "# for line in self.other_lines:", "# f_out.write(line)", "if", "self", ".", "with_comments", ":", "for", "line", "in", "self", ".", "comments", ".", "get", "(", "\"* singular value decompisition\"", ",", "[", "]", ")", ":", "f_out", ".", "write", "(", "line", ")", "self", ".", "svd_data", ".", "write", "(", "f_out", ")", "#f_out.write(\"* parameter groups\\n\")", "# to catch the byte code ugliness in python 3", "pargpnme", "=", "self", ".", "parameter_groups", ".", "loc", "[", ":", ",", "\"pargpnme\"", "]", ".", "copy", "(", ")", "self", ".", "parameter_groups", ".", "loc", "[", ":", ",", "\"pargpnme\"", "]", "=", "self", ".", "parameter_groups", ".", "pargpnme", ".", "apply", "(", "self", ".", "pargp_format", "[", "\"pargpnme\"", "]", ")", "self", ".", "_write_df", "(", "\"* parameter groups\"", ",", "f_out", ",", "self", ".", "parameter_groups", ",", "self", ".", "pargp_format", ",", "self", ".", "pargp_fieldnames", ")", "self", ".", "parameter_groups", ".", "loc", "[", ":", ",", "\"pargpnme\"", "]", "=", "pargpnme", "self", ".", "_write_df", "(", "\"* parameter data\"", ",", "f_out", ",", "self", ".", "parameter_data", ",", "self", ".", "par_format", ",", "self", ".", "par_fieldnames", ")", "if", "self", ".", "tied", "is", "not", "None", ":", "self", ".", "_write_df", "(", "\"tied parameter data\"", ",", "f_out", ",", "self", ".", "tied", ",", "self", ".", "tied_format", ",", "self", ".", "tied_fieldnames", ")", "f_out", ".", "write", "(", "\"* observation groups\\n\"", ")", "for", "group", "in", "self", ".", "obs_groups", ":", "try", ":", "group", "=", "group", ".", "decode", "(", ")", "except", ":", "pass", "f_out", ".", "write", "(", "pst_utils", ".", "SFMT", "(", "str", "(", "group", ")", ")", "+", "'\\n'", ")", "for", "group", "in", "self", ".", "prior_groups", ":", "try", ":", "group", "=", "group", ".", "decode", "(", ")", "except", ":", "pass", "f_out", ".", "write", "(", "pst_utils", ".", "SFMT", "(", "str", "(", "group", ")", ")", "+", "'\\n'", ")", "self", ".", "_write_df", "(", "\"* observation data\"", ",", "f_out", ",", "self", ".", "observation_data", ",", "self", ".", "obs_format", ",", "self", ".", "obs_fieldnames", ")", "f_out", ".", "write", "(", "\"* model command line\\n\"", ")", "for", "cline", "in", "self", ".", "model_command", ":", "f_out", ".", "write", "(", "cline", "+", "'\\n'", ")", "f_out", ".", "write", "(", "\"* model input/output\\n\"", ")", "for", "tplfle", ",", "infle", "in", "zip", "(", "self", ".", "template_files", ",", "self", ".", "input_files", ")", ":", "f_out", ".", "write", "(", "tplfle", "+", "' '", "+", "infle", "+", "'\\n'", ")", "for", "insfle", ",", "outfle", "in", "zip", "(", "self", ".", "instruction_files", ",", "self", ".", "output_files", ")", ":", "f_out", ".", "write", "(", "insfle", "+", "' '", "+", "outfle", "+", "'\\n'", ")", "if", "self", ".", "nprior", ">", "0", ":", "if", "self", ".", "prior_information", ".", "isnull", "(", ")", ".", "values", ".", "any", "(", ")", ":", "#print(\"WARNING: NaNs in prior_information dataframe\")", "warnings", ".", "warn", "(", "\"NaNs in prior_information dataframe\"", ",", "PyemuWarning", ")", "f_out", ".", "write", "(", "\"* prior information\\n\"", ")", "#self.prior_information.index = self.prior_information.pop(\"pilbl\")", "max_eq_len", "=", "self", ".", "prior_information", ".", "equation", ".", "apply", "(", "lambda", "x", ":", "len", "(", "x", ")", ")", ".", "max", "(", ")", "eq_fmt_str", "=", "\" {0:<\"", "+", "str", "(", "max_eq_len", ")", "+", "\"s} \"", "eq_fmt_func", "=", "lambda", "x", ":", "eq_fmt_str", ".", "format", "(", "x", ")", "# 17/9/2016 - had to go with a custom writer loop b/c pandas doesn't want to", "# output strings longer than 100, even with display.max_colwidth", "#f_out.write(self.prior_information.to_string(col_space=0,", "# columns=self.prior_fieldnames,", "# formatters=pi_formatters,", "# justify=\"right\",", "# header=False,", "# index=False) + '\\n')", "#self.prior_information[\"pilbl\"] = self.prior_information.index", "# for idx,row in self.prior_information.iterrows():", "# f_out.write(pst_utils.SFMT(row[\"pilbl\"]))", "# f_out.write(eq_fmt_func(row[\"equation\"]))", "# f_out.write(pst_utils.FFMT(row[\"weight\"]))", "# f_out.write(pst_utils.SFMT(row[\"obgnme\"]) + '\\n')", "for", "idx", ",", "row", "in", "self", ".", "prior_information", ".", "iterrows", "(", ")", ":", "f_out", ".", "write", "(", "pst_utils", ".", "SFMT", "(", "row", "[", "\"pilbl\"", "]", ")", ")", "f_out", ".", "write", "(", "eq_fmt_func", "(", "row", "[", "\"equation\"", "]", ")", ")", "f_out", ".", "write", "(", "pst_utils", ".", "FFMT", "(", "row", "[", "\"weight\"", "]", ")", ")", "f_out", ".", "write", "(", "pst_utils", ".", "SFMT", "(", "row", "[", "\"obgnme\"", "]", ")", ")", "if", "self", ".", "with_comments", "and", "'extra'", "in", "row", ":", "f_out", ".", "write", "(", "\" # {0}\"", ".", "format", "(", "row", "[", "'extra'", "]", ")", ")", "f_out", ".", "write", "(", "'\\n'", ")", "if", "self", ".", "control_data", ".", "pestmode", ".", "startswith", "(", "\"regul\"", ")", ":", "#f_out.write(\"* regularisation\\n\")", "#if update_regul or len(self.regul_lines) == 0:", "# f_out.write(self.regul_section)", "#else:", "# [f_out.write(line) for line in self.regul_lines]", "self", ".", "reg_data", ".", "write", "(", "f_out", ")", "for", "line", "in", "self", ".", "other_lines", ":", "f_out", ".", "write", "(", "line", "+", "'\\n'", ")", "for", "key", ",", "value", "in", "self", ".", "pestpp_options", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "','", ".", "join", "(", "[", "str", "(", "v", ")", "for", "v", "in", "value", "]", ")", "f_out", ".", "write", "(", "\"++{0}({1})\\n\"", ".", "format", "(", "str", "(", "key", ")", ",", "str", "(", "value", ")", ")", ")", "if", "self", ".", "with_comments", ":", "for", "line", "in", "self", ".", "comments", ".", "get", "(", "\"final\"", ",", "[", "]", ")", ":", "f_out", ".", "write", "(", "line", "+", "'\\n'", ")", "f_out", ".", "close", "(", ")" ]
41.946565
20.274809
def find_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE, create=False): """Look for rdata with the specified name and type in the zone, and return an rdataset encapsulating it. The I{name}, I{rdtype}, and I{covers} parameters may be strings, in which case they will be converted to their proper type. The rdataset returned is not a copy; changes to it will change the zone. KeyError is raised if the name or type are not found. Use L{get_rdataset} if you want to have None returned instead. @param name: the owner name to look for @type name: DNS.name.Name object or string @param rdtype: the rdata type desired @type rdtype: int or string @param covers: the covered type (defaults to None) @type covers: int or string @param create: should the node and rdataset be created if they do not exist? @type create: bool @raises KeyError: the node or rdata could not be found @rtype: dns.rrset.RRset object """ name = self._validate_name(name) if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) if isinstance(covers, (str, unicode)): covers = dns.rdatatype.from_text(covers) node = self.find_node(name, create) return node.find_rdataset(self.rdclass, rdtype, covers, create)
[ "def", "find_rdataset", "(", "self", ",", "name", ",", "rdtype", ",", "covers", "=", "dns", ".", "rdatatype", ".", "NONE", ",", "create", "=", "False", ")", ":", "name", "=", "self", ".", "_validate_name", "(", "name", ")", "if", "isinstance", "(", "rdtype", ",", "(", "str", ",", "unicode", ")", ")", ":", "rdtype", "=", "dns", ".", "rdatatype", ".", "from_text", "(", "rdtype", ")", "if", "isinstance", "(", "covers", ",", "(", "str", ",", "unicode", ")", ")", ":", "covers", "=", "dns", ".", "rdatatype", ".", "from_text", "(", "covers", ")", "node", "=", "self", ".", "find_node", "(", "name", ",", "create", ")", "return", "node", ".", "find_rdataset", "(", "self", ".", "rdclass", ",", "rdtype", ",", "covers", ",", "create", ")" ]
40.857143
18.285714
def OSPFNeighborState_originator_switch_info_switchVcsId(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") OSPFNeighborState = ET.SubElement(config, "OSPFNeighborState", xmlns="http://brocade.com/ns/brocade-notification-stream") originator_switch_info = ET.SubElement(OSPFNeighborState, "originator-switch-info") switchVcsId = ET.SubElement(originator_switch_info, "switchVcsId") switchVcsId.text = kwargs.pop('switchVcsId') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "OSPFNeighborState_originator_switch_info_switchVcsId", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "OSPFNeighborState", "=", "ET", ".", "SubElement", "(", "config", ",", "\"OSPFNeighborState\"", ",", "xmlns", "=", "\"http://brocade.com/ns/brocade-notification-stream\"", ")", "originator_switch_info", "=", "ET", ".", "SubElement", "(", "OSPFNeighborState", ",", "\"originator-switch-info\"", ")", "switchVcsId", "=", "ET", ".", "SubElement", "(", "originator_switch_info", ",", "\"switchVcsId\"", ")", "switchVcsId", ".", "text", "=", "kwargs", ".", "pop", "(", "'switchVcsId'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
53.181818
26.181818
def import_start_event_to_graph(diagram_graph, process_id, process_attributes, element): """ Adds to graph the new element that represents BPMN start event. Start event inherits attribute parallelMultiple from CatchEvent type and sequence of eventDefinitionRef from Event type. Separate methods for each event type are required since each of them has different variants (Message, Error, Signal etc.). :param diagram_graph: NetworkX graph representing a BPMN process diagram, :param process_id: string object, representing an ID of process element, :param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of imported flow node, :param element: object representing a BPMN XML 'startEvent' element. """ element_id = element.getAttribute(consts.Consts.id) start_event_definitions = {'messageEventDefinition', 'timerEventDefinition', 'conditionalEventDefinition', 'escalationEventDefinition', 'signalEventDefinition'} BpmnDiagramGraphImport.import_flow_node_to_graph(diagram_graph, process_id, process_attributes, element) diagram_graph.node[element_id][consts.Consts.parallel_multiple] = \ element.getAttribute(consts.Consts.parallel_multiple) \ if element.hasAttribute(consts.Consts.parallel_multiple) else "false" diagram_graph.node[element_id][consts.Consts.is_interrupting] = \ element.getAttribute(consts.Consts.is_interrupting) \ if element.hasAttribute(consts.Consts.is_interrupting) else "true" BpmnDiagramGraphImport.import_event_definition_elements(diagram_graph, element, start_event_definitions)
[ "def", "import_start_event_to_graph", "(", "diagram_graph", ",", "process_id", ",", "process_attributes", ",", "element", ")", ":", "element_id", "=", "element", ".", "getAttribute", "(", "consts", ".", "Consts", ".", "id", ")", "start_event_definitions", "=", "{", "'messageEventDefinition'", ",", "'timerEventDefinition'", ",", "'conditionalEventDefinition'", ",", "'escalationEventDefinition'", ",", "'signalEventDefinition'", "}", "BpmnDiagramGraphImport", ".", "import_flow_node_to_graph", "(", "diagram_graph", ",", "process_id", ",", "process_attributes", ",", "element", ")", "diagram_graph", ".", "node", "[", "element_id", "]", "[", "consts", ".", "Consts", ".", "parallel_multiple", "]", "=", "element", ".", "getAttribute", "(", "consts", ".", "Consts", ".", "parallel_multiple", ")", "if", "element", ".", "hasAttribute", "(", "consts", ".", "Consts", ".", "parallel_multiple", ")", "else", "\"false\"", "diagram_graph", ".", "node", "[", "element_id", "]", "[", "consts", ".", "Consts", ".", "is_interrupting", "]", "=", "element", ".", "getAttribute", "(", "consts", ".", "Consts", ".", "is_interrupting", ")", "if", "element", ".", "hasAttribute", "(", "consts", ".", "Consts", ".", "is_interrupting", ")", "else", "\"true\"", "BpmnDiagramGraphImport", ".", "import_event_definition_elements", "(", "diagram_graph", ",", "element", ",", "start_event_definitions", ")" ]
70.64
37.12
def load_metadata_from_desc_file(self, desc_file, partition='train', max_duration=16.0,): """ Read metadata from the description file (possibly takes long, depending on the filesize) Params: desc_file (str): Path to a JSON-line file that contains labels and paths to the audio files partition (str): One of 'train', 'validation' or 'test' max_duration (float): In seconds, the maximum duration of utterances to train or test on """ logger = logUtil.getlogger() logger.info('Reading description file: {} for partition: {}' .format(desc_file, partition)) audio_paths, durations, texts = [], [], [] with open(desc_file) as json_line_file: for line_num, json_line in enumerate(json_line_file): try: spec = json.loads(json_line) if float(spec['duration']) > max_duration: continue audio_paths.append(spec['key']) durations.append(float(spec['duration'])) texts.append(spec['text']) except Exception as e: # Change to (KeyError, ValueError) or # (KeyError,json.decoder.JSONDecodeError), depending on # json module version logger.warn('Error reading line #{}: {}' .format(line_num, json_line)) logger.warn(str(e)) if partition == 'train': self.count = len(audio_paths) self.train_audio_paths = audio_paths self.train_durations = durations self.train_texts = texts elif partition == 'validation': self.val_audio_paths = audio_paths self.val_durations = durations self.val_texts = texts self.val_count = len(audio_paths) elif partition == 'test': self.test_audio_paths = audio_paths self.test_durations = durations self.test_texts = texts else: raise Exception("Invalid partition to load metadata. " "Must be train/validation/test")
[ "def", "load_metadata_from_desc_file", "(", "self", ",", "desc_file", ",", "partition", "=", "'train'", ",", "max_duration", "=", "16.0", ",", ")", ":", "logger", "=", "logUtil", ".", "getlogger", "(", ")", "logger", ".", "info", "(", "'Reading description file: {} for partition: {}'", ".", "format", "(", "desc_file", ",", "partition", ")", ")", "audio_paths", ",", "durations", ",", "texts", "=", "[", "]", ",", "[", "]", ",", "[", "]", "with", "open", "(", "desc_file", ")", "as", "json_line_file", ":", "for", "line_num", ",", "json_line", "in", "enumerate", "(", "json_line_file", ")", ":", "try", ":", "spec", "=", "json", ".", "loads", "(", "json_line", ")", "if", "float", "(", "spec", "[", "'duration'", "]", ")", ">", "max_duration", ":", "continue", "audio_paths", ".", "append", "(", "spec", "[", "'key'", "]", ")", "durations", ".", "append", "(", "float", "(", "spec", "[", "'duration'", "]", ")", ")", "texts", ".", "append", "(", "spec", "[", "'text'", "]", ")", "except", "Exception", "as", "e", ":", "# Change to (KeyError, ValueError) or", "# (KeyError,json.decoder.JSONDecodeError), depending on", "# json module version", "logger", ".", "warn", "(", "'Error reading line #{}: {}'", ".", "format", "(", "line_num", ",", "json_line", ")", ")", "logger", ".", "warn", "(", "str", "(", "e", ")", ")", "if", "partition", "==", "'train'", ":", "self", ".", "count", "=", "len", "(", "audio_paths", ")", "self", ".", "train_audio_paths", "=", "audio_paths", "self", ".", "train_durations", "=", "durations", "self", ".", "train_texts", "=", "texts", "elif", "partition", "==", "'validation'", ":", "self", ".", "val_audio_paths", "=", "audio_paths", "self", ".", "val_durations", "=", "durations", "self", ".", "val_texts", "=", "texts", "self", ".", "val_count", "=", "len", "(", "audio_paths", ")", "elif", "partition", "==", "'test'", ":", "self", ".", "test_audio_paths", "=", "audio_paths", "self", ".", "test_durations", "=", "durations", "self", ".", "test_texts", "=", "texts", "else", ":", "raise", "Exception", "(", "\"Invalid partition to load metadata. \"", "\"Must be train/validation/test\"", ")" ]
46.408163
13.22449
def run_parallel(pipeline, input_gen, options={}, ncpu=4, chunksize=200): """ Run a pipeline in parallel over a input generator cutting it into small chunks. >>> # if we have a simple component >>> from reliure.pipeline import Composable >>> # that we want to run over a given input: >>> input = "abcde" >>> import string >>> pipeline = Composable(lambda letters: (l.upper() for l in letters)) >>> res = run_parallel(pipeline, input, ncpu=2, chunksize=2) >>> #Note: res should be equals to [['C', 'D'], ['A', 'B'], ['E']] >>> #but it seems that there is a bug with py.test and mp... """ t0 = time() #FIXME: there is a know issue when pipeline results are "big" object, the merge is bloking... to be investigate #TODO: add get_pipeline args to prodvide a fct to build the pipeline (in each worker) logger = logging.getLogger("reliure.run_parallel") jobs = [] results = [] Qdata = mp.JoinableQueue(ncpu*2) # input queue Qresult = mp.Queue() # result queue # ensure input_gen is realy an itertor not a list if hasattr(input_gen, "__len__"): input_gen = iter(input_gen) for wnum in range(ncpu): logger.debug("create worker #%s" % wnum) worker = mp.Process(target=_reliure_worker, args=(wnum, Qdata, Qresult, pipeline, options)) worker.start() jobs.append(worker) while True: # consume chunksize elements from input_gen chunk = tuple(islice(input_gen, chunksize)) if not len(chunk): break logger.info("send a chunk of %s elemets to a worker" % len(chunk)) Qdata.put(chunk) logger.info("all data has beed send to workers") # wait until all task are done Qdata.join() logger.debug("wait for workers...") for worker in jobs: worker.terminate() logger.debug("merge results") try: while not Qresult.empty(): logger.debug("result queue still have %d elements" % Qresult.qsize()) res = Qresult.get_nowait() results.append(res) except mp.Queue.Empty: logger.debug("result queue is empty") pass logger.info("Pipeline executed in %1.3f sec" % (time() - t0)) return results
[ "def", "run_parallel", "(", "pipeline", ",", "input_gen", ",", "options", "=", "{", "}", ",", "ncpu", "=", "4", ",", "chunksize", "=", "200", ")", ":", "t0", "=", "time", "(", ")", "#FIXME: there is a know issue when pipeline results are \"big\" object, the merge is bloking... to be investigate", "#TODO: add get_pipeline args to prodvide a fct to build the pipeline (in each worker)", "logger", "=", "logging", ".", "getLogger", "(", "\"reliure.run_parallel\"", ")", "jobs", "=", "[", "]", "results", "=", "[", "]", "Qdata", "=", "mp", ".", "JoinableQueue", "(", "ncpu", "*", "2", ")", "# input queue", "Qresult", "=", "mp", ".", "Queue", "(", ")", "# result queue", "# ensure input_gen is realy an itertor not a list", "if", "hasattr", "(", "input_gen", ",", "\"__len__\"", ")", ":", "input_gen", "=", "iter", "(", "input_gen", ")", "for", "wnum", "in", "range", "(", "ncpu", ")", ":", "logger", ".", "debug", "(", "\"create worker #%s\"", "%", "wnum", ")", "worker", "=", "mp", ".", "Process", "(", "target", "=", "_reliure_worker", ",", "args", "=", "(", "wnum", ",", "Qdata", ",", "Qresult", ",", "pipeline", ",", "options", ")", ")", "worker", ".", "start", "(", ")", "jobs", ".", "append", "(", "worker", ")", "while", "True", ":", "# consume chunksize elements from input_gen", "chunk", "=", "tuple", "(", "islice", "(", "input_gen", ",", "chunksize", ")", ")", "if", "not", "len", "(", "chunk", ")", ":", "break", "logger", ".", "info", "(", "\"send a chunk of %s elemets to a worker\"", "%", "len", "(", "chunk", ")", ")", "Qdata", ".", "put", "(", "chunk", ")", "logger", ".", "info", "(", "\"all data has beed send to workers\"", ")", "# wait until all task are done", "Qdata", ".", "join", "(", ")", "logger", ".", "debug", "(", "\"wait for workers...\"", ")", "for", "worker", "in", "jobs", ":", "worker", ".", "terminate", "(", ")", "logger", ".", "debug", "(", "\"merge results\"", ")", "try", ":", "while", "not", "Qresult", ".", "empty", "(", ")", ":", "logger", ".", "debug", "(", "\"result queue still have %d elements\"", "%", "Qresult", ".", "qsize", "(", ")", ")", "res", "=", "Qresult", ".", "get_nowait", "(", ")", "results", ".", "append", "(", "res", ")", "except", "mp", ".", "Queue", ".", "Empty", ":", "logger", ".", "debug", "(", "\"result queue is empty\"", ")", "pass", "logger", ".", "info", "(", "\"Pipeline executed in %1.3f sec\"", "%", "(", "time", "(", ")", "-", "t0", ")", ")", "return", "results" ]
41.018519
19.166667
def _Open(self, path_spec=None, mode='rb'): """Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. NotSupported: if a data stream, like the resource or named fork, is requested to be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid. """ if not path_spec: raise ValueError('Missing path specification.') data_stream = getattr(path_spec, 'data_stream', None) if data_stream: raise errors.NotSupported( 'Open data stream: {0:s} not supported.'.format(data_stream)) self._file_system = resolver.Resolver.OpenFileSystem( path_spec, resolver_context=self._resolver_context) file_entry = self._file_system.GetFileEntryByPathSpec(path_spec) if not file_entry: raise IOError('Unable to open file entry.') fsapfs_file_entry = file_entry.GetAPFSFileEntry() if not fsapfs_file_entry: raise IOError('Unable to open APFS file entry.') self._fsapfs_file_entry = fsapfs_file_entry
[ "def", "_Open", "(", "self", ",", "path_spec", "=", "None", ",", "mode", "=", "'rb'", ")", ":", "if", "not", "path_spec", ":", "raise", "ValueError", "(", "'Missing path specification.'", ")", "data_stream", "=", "getattr", "(", "path_spec", ",", "'data_stream'", ",", "None", ")", "if", "data_stream", ":", "raise", "errors", ".", "NotSupported", "(", "'Open data stream: {0:s} not supported.'", ".", "format", "(", "data_stream", ")", ")", "self", ".", "_file_system", "=", "resolver", ".", "Resolver", ".", "OpenFileSystem", "(", "path_spec", ",", "resolver_context", "=", "self", ".", "_resolver_context", ")", "file_entry", "=", "self", ".", "_file_system", ".", "GetFileEntryByPathSpec", "(", "path_spec", ")", "if", "not", "file_entry", ":", "raise", "IOError", "(", "'Unable to open file entry.'", ")", "fsapfs_file_entry", "=", "file_entry", ".", "GetAPFSFileEntry", "(", ")", "if", "not", "fsapfs_file_entry", ":", "raise", "IOError", "(", "'Unable to open APFS file entry.'", ")", "self", ".", "_fsapfs_file_entry", "=", "fsapfs_file_entry" ]
36.583333
20.388889
def to_gsea(graph: BELGraph, file: Optional[TextIO] = None) -> None: """Write the genes/gene products to a GRP file for use with GSEA gene set enrichment analysis. .. seealso:: - GRP `format specification <http://software.broadinstitute.org/cancer/software/gsea/wiki/index.php/Data_formats#GRP:_Gene_set_file_format_.28.2A.grp.29>`_ - GSEA `publication <https://doi.org/10.1073/pnas.0506580102>`_ """ print('# {}'.format(graph.name), file=file) nodes = { data[NAME] for data in graph if NAMESPACE in data and data[NAMESPACE].upper() == 'HGNC' and NAME in data } for node in sorted(nodes): print(node, file=file)
[ "def", "to_gsea", "(", "graph", ":", "BELGraph", ",", "file", ":", "Optional", "[", "TextIO", "]", "=", "None", ")", "->", "None", ":", "print", "(", "'# {}'", ".", "format", "(", "graph", ".", "name", ")", ",", "file", "=", "file", ")", "nodes", "=", "{", "data", "[", "NAME", "]", "for", "data", "in", "graph", "if", "NAMESPACE", "in", "data", "and", "data", "[", "NAMESPACE", "]", ".", "upper", "(", ")", "==", "'HGNC'", "and", "NAME", "in", "data", "}", "for", "node", "in", "sorted", "(", "nodes", ")", ":", "print", "(", "node", ",", "file", "=", "file", ")" ]
42.125
28.4375
def area(self): """ Estimate the area of the polygon. Returns ------- number Area of the polygon. """ if len(self.exterior) < 3: raise Exception("Cannot compute the polygon's area because it contains less than three points.") poly = self.to_shapely_polygon() return poly.area
[ "def", "area", "(", "self", ")", ":", "if", "len", "(", "self", ".", "exterior", ")", "<", "3", ":", "raise", "Exception", "(", "\"Cannot compute the polygon's area because it contains less than three points.\"", ")", "poly", "=", "self", ".", "to_shapely_polygon", "(", ")", "return", "poly", ".", "area" ]
25.714286
20
def requestField(self, field_name, required=False, strict=False): """Request the specified field from the OpenID user @param field_name: the unqualified simple registration field name @type field_name: str @param required: whether the given field should be presented to the user as being a required to successfully complete the request @param strict: whether to raise an exception when a field is added to a request more than once @raise ValueError: when the field requested is not a simple registration field or strict is set and the field was requested more than once """ checkFieldName(field_name) if strict: if field_name in self.required or field_name in self.optional: raise ValueError('That field has already been requested') else: if field_name in self.required: return if field_name in self.optional: if required: self.optional.remove(field_name) else: return if required: self.required.append(field_name) else: self.optional.append(field_name)
[ "def", "requestField", "(", "self", ",", "field_name", ",", "required", "=", "False", ",", "strict", "=", "False", ")", ":", "checkFieldName", "(", "field_name", ")", "if", "strict", ":", "if", "field_name", "in", "self", ".", "required", "or", "field_name", "in", "self", ".", "optional", ":", "raise", "ValueError", "(", "'That field has already been requested'", ")", "else", ":", "if", "field_name", "in", "self", ".", "required", ":", "return", "if", "field_name", "in", "self", ".", "optional", ":", "if", "required", ":", "self", ".", "optional", ".", "remove", "(", "field_name", ")", "else", ":", "return", "if", "required", ":", "self", ".", "required", ".", "append", "(", "field_name", ")", "else", ":", "self", ".", "optional", ".", "append", "(", "field_name", ")" ]
34.583333
21.361111
def add_scaled_residues_highlight_to_nglview(self, view, structure_resnums, chain=None, color='red', unique_colors=False, opacity_range=(0.5,1), scale_range=(.7, 10), multiplier=None): """Add a list of residue numbers (which may contain repeating residues) to a view, or add a dictionary of residue numbers to counts. Size and opacity of added residues are scaled by counts. Args: view (NGLWidget): NGLWidget view object structure_resnums (int, list, dict): Residue number(s) to highlight, or a dictionary of residue number to frequency count chain (str, list): Chain ID or IDs of which residues are a part of. If not provided, all chains in the mapped_chains attribute will be used. If that is also empty, and exception is raised. color (str): Color to highlight residues with unique_colors (bool): If each mutation should be colored uniquely (will override color argument) opacity_range (tuple): Min/max opacity values (residues that have higher frequency counts will be opaque) scale_range (tuple): Min/max size values (residues that have higher frequency counts will be bigger) """ # TODO: likely to move these functions to a separate nglview/utils folder since they are not coupled to the structure # TODO: add color by letter_annotations! if not chain: chain = self.mapped_chains if not chain: raise ValueError('Please input chain ID to display residue on') else: chain = ssbio.utils.force_list(chain) if isinstance(structure_resnums, dict): if not multiplier: multiplier = 1 opacity_dict = ssbio.utils.scale_calculator(multiplier, structure_resnums, rescale=opacity_range) scale_dict = ssbio.utils.scale_calculator(multiplier, structure_resnums, rescale=scale_range) else: opacity_dict = {x: max(opacity_range) for x in ssbio.utils.force_list(structure_resnums)} scale_dict = {x: max(scale_range) for x in ssbio.utils.force_list(structure_resnums)} if isinstance(structure_resnums, list): structure_resnums = list(set(structure_resnums)) elif isinstance(structure_resnums, dict): structure_resnums = list(structure_resnums.keys()) elif isinstance(structure_resnums, int): structure_resnums = ssbio.utils.force_list(structure_resnums) else: raise ValueError('Input must either be a list of residue numbers or a dictionary of residue numbers ' 'and their frequency.') colors = sns.color_palette("hls", len(structure_resnums)).as_hex() to_show_chains = '( ' for c in chain: to_show_chains += ':{} or'.format(c) to_show_chains = to_show_chains.strip(' or ') to_show_chains += ' )' for i, x in enumerate(structure_resnums): if isinstance(x, tuple): to_show_res = '( ' for mut in x: to_show_res += '{} or '.format(mut) to_show_res = to_show_res.strip(' or ') to_show_res += ' )' else: to_show_res = x log.info('Selection: {} and not hydrogen and {}'.format(to_show_chains, to_show_res)) if unique_colors: view.add_ball_and_stick(selection='{} and not hydrogen and {}'.format(to_show_chains, to_show_res), color=colors[i], opacity=opacity_dict[x], scale=scale_dict[x]) else: view.add_ball_and_stick(selection='{} and not hydrogen and {}'.format(to_show_chains, to_show_res), color=color, opacity=opacity_dict[x], scale=scale_dict[x])
[ "def", "add_scaled_residues_highlight_to_nglview", "(", "self", ",", "view", ",", "structure_resnums", ",", "chain", "=", "None", ",", "color", "=", "'red'", ",", "unique_colors", "=", "False", ",", "opacity_range", "=", "(", "0.5", ",", "1", ")", ",", "scale_range", "=", "(", ".7", ",", "10", ")", ",", "multiplier", "=", "None", ")", ":", "# TODO: likely to move these functions to a separate nglview/utils folder since they are not coupled to the structure", "# TODO: add color by letter_annotations!", "if", "not", "chain", ":", "chain", "=", "self", ".", "mapped_chains", "if", "not", "chain", ":", "raise", "ValueError", "(", "'Please input chain ID to display residue on'", ")", "else", ":", "chain", "=", "ssbio", ".", "utils", ".", "force_list", "(", "chain", ")", "if", "isinstance", "(", "structure_resnums", ",", "dict", ")", ":", "if", "not", "multiplier", ":", "multiplier", "=", "1", "opacity_dict", "=", "ssbio", ".", "utils", ".", "scale_calculator", "(", "multiplier", ",", "structure_resnums", ",", "rescale", "=", "opacity_range", ")", "scale_dict", "=", "ssbio", ".", "utils", ".", "scale_calculator", "(", "multiplier", ",", "structure_resnums", ",", "rescale", "=", "scale_range", ")", "else", ":", "opacity_dict", "=", "{", "x", ":", "max", "(", "opacity_range", ")", "for", "x", "in", "ssbio", ".", "utils", ".", "force_list", "(", "structure_resnums", ")", "}", "scale_dict", "=", "{", "x", ":", "max", "(", "scale_range", ")", "for", "x", "in", "ssbio", ".", "utils", ".", "force_list", "(", "structure_resnums", ")", "}", "if", "isinstance", "(", "structure_resnums", ",", "list", ")", ":", "structure_resnums", "=", "list", "(", "set", "(", "structure_resnums", ")", ")", "elif", "isinstance", "(", "structure_resnums", ",", "dict", ")", ":", "structure_resnums", "=", "list", "(", "structure_resnums", ".", "keys", "(", ")", ")", "elif", "isinstance", "(", "structure_resnums", ",", "int", ")", ":", "structure_resnums", "=", "ssbio", ".", "utils", ".", "force_list", "(", "structure_resnums", ")", "else", ":", "raise", "ValueError", "(", "'Input must either be a list of residue numbers or a dictionary of residue numbers '", "'and their frequency.'", ")", "colors", "=", "sns", ".", "color_palette", "(", "\"hls\"", ",", "len", "(", "structure_resnums", ")", ")", ".", "as_hex", "(", ")", "to_show_chains", "=", "'( '", "for", "c", "in", "chain", ":", "to_show_chains", "+=", "':{} or'", ".", "format", "(", "c", ")", "to_show_chains", "=", "to_show_chains", ".", "strip", "(", "' or '", ")", "to_show_chains", "+=", "' )'", "for", "i", ",", "x", "in", "enumerate", "(", "structure_resnums", ")", ":", "if", "isinstance", "(", "x", ",", "tuple", ")", ":", "to_show_res", "=", "'( '", "for", "mut", "in", "x", ":", "to_show_res", "+=", "'{} or '", ".", "format", "(", "mut", ")", "to_show_res", "=", "to_show_res", ".", "strip", "(", "' or '", ")", "to_show_res", "+=", "' )'", "else", ":", "to_show_res", "=", "x", "log", ".", "info", "(", "'Selection: {} and not hydrogen and {}'", ".", "format", "(", "to_show_chains", ",", "to_show_res", ")", ")", "if", "unique_colors", ":", "view", ".", "add_ball_and_stick", "(", "selection", "=", "'{} and not hydrogen and {}'", ".", "format", "(", "to_show_chains", ",", "to_show_res", ")", ",", "color", "=", "colors", "[", "i", "]", ",", "opacity", "=", "opacity_dict", "[", "x", "]", ",", "scale", "=", "scale_dict", "[", "x", "]", ")", "else", ":", "view", ".", "add_ball_and_stick", "(", "selection", "=", "'{} and not hydrogen and {}'", ".", "format", "(", "to_show_chains", ",", "to_show_res", ")", ",", "color", "=", "color", ",", "opacity", "=", "opacity_dict", "[", "x", "]", ",", "scale", "=", "scale_dict", "[", "x", "]", ")" ]
54.694444
32.5
def _get_cpu_info_from_ibm_pa_features(): ''' Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features Returns {} if lsprop is not found or ibm,pa-features does not have the desired info. ''' try: # Just return {} if there is no lsprop if not DataSource.has_ibm_pa_features(): return {} # If ibm,pa-features fails return {} returncode, output = DataSource.ibm_pa_features() if output == None or returncode != 0: return {} # Filter out invalid characters from output value = output.split("ibm,pa-features")[1].lower() value = [s for s in value if s in list('0123456789abcfed')] value = ''.join(value) # Get data converted to Uint32 chunks left = int(value[0 : 8], 16) right = int(value[8 : 16], 16) # Get the CPU flags flags = { # Byte 0 'mmu' : _is_bit_set(left, 0), 'fpu' : _is_bit_set(left, 1), 'slb' : _is_bit_set(left, 2), 'run' : _is_bit_set(left, 3), #'reserved' : _is_bit_set(left, 4), 'dabr' : _is_bit_set(left, 5), 'ne' : _is_bit_set(left, 6), 'wtr' : _is_bit_set(left, 7), # Byte 1 'mcr' : _is_bit_set(left, 8), 'dsisr' : _is_bit_set(left, 9), 'lp' : _is_bit_set(left, 10), 'ri' : _is_bit_set(left, 11), 'dabrx' : _is_bit_set(left, 12), 'sprg3' : _is_bit_set(left, 13), 'rislb' : _is_bit_set(left, 14), 'pp' : _is_bit_set(left, 15), # Byte 2 'vpm' : _is_bit_set(left, 16), 'dss_2.05' : _is_bit_set(left, 17), #'reserved' : _is_bit_set(left, 18), 'dar' : _is_bit_set(left, 19), #'reserved' : _is_bit_set(left, 20), 'ppr' : _is_bit_set(left, 21), 'dss_2.02' : _is_bit_set(left, 22), 'dss_2.06' : _is_bit_set(left, 23), # Byte 3 'lsd_in_dscr' : _is_bit_set(left, 24), 'ugr_in_dscr' : _is_bit_set(left, 25), #'reserved' : _is_bit_set(left, 26), #'reserved' : _is_bit_set(left, 27), #'reserved' : _is_bit_set(left, 28), #'reserved' : _is_bit_set(left, 29), #'reserved' : _is_bit_set(left, 30), #'reserved' : _is_bit_set(left, 31), # Byte 4 'sso_2.06' : _is_bit_set(right, 0), #'reserved' : _is_bit_set(right, 1), #'reserved' : _is_bit_set(right, 2), #'reserved' : _is_bit_set(right, 3), #'reserved' : _is_bit_set(right, 4), #'reserved' : _is_bit_set(right, 5), #'reserved' : _is_bit_set(right, 6), #'reserved' : _is_bit_set(right, 7), # Byte 5 'le' : _is_bit_set(right, 8), 'cfar' : _is_bit_set(right, 9), 'eb' : _is_bit_set(right, 10), 'lsq_2.07' : _is_bit_set(right, 11), #'reserved' : _is_bit_set(right, 12), #'reserved' : _is_bit_set(right, 13), #'reserved' : _is_bit_set(right, 14), #'reserved' : _is_bit_set(right, 15), # Byte 6 'dss_2.07' : _is_bit_set(right, 16), #'reserved' : _is_bit_set(right, 17), #'reserved' : _is_bit_set(right, 18), #'reserved' : _is_bit_set(right, 19), #'reserved' : _is_bit_set(right, 20), #'reserved' : _is_bit_set(right, 21), #'reserved' : _is_bit_set(right, 22), #'reserved' : _is_bit_set(right, 23), # Byte 7 #'reserved' : _is_bit_set(right, 24), #'reserved' : _is_bit_set(right, 25), #'reserved' : _is_bit_set(right, 26), #'reserved' : _is_bit_set(right, 27), #'reserved' : _is_bit_set(right, 28), #'reserved' : _is_bit_set(right, 29), #'reserved' : _is_bit_set(right, 30), #'reserved' : _is_bit_set(right, 31), } # Get a list of only the flags that are true flags = [k for k, v in flags.items() if v] flags.sort() info = { 'flags' : flags } info = {k: v for k, v in info.items() if v} return info except: return {}
[ "def", "_get_cpu_info_from_ibm_pa_features", "(", ")", ":", "try", ":", "# Just return {} if there is no lsprop", "if", "not", "DataSource", ".", "has_ibm_pa_features", "(", ")", ":", "return", "{", "}", "# If ibm,pa-features fails return {}", "returncode", ",", "output", "=", "DataSource", ".", "ibm_pa_features", "(", ")", "if", "output", "==", "None", "or", "returncode", "!=", "0", ":", "return", "{", "}", "# Filter out invalid characters from output", "value", "=", "output", ".", "split", "(", "\"ibm,pa-features\"", ")", "[", "1", "]", ".", "lower", "(", ")", "value", "=", "[", "s", "for", "s", "in", "value", "if", "s", "in", "list", "(", "'0123456789abcfed'", ")", "]", "value", "=", "''", ".", "join", "(", "value", ")", "# Get data converted to Uint32 chunks", "left", "=", "int", "(", "value", "[", "0", ":", "8", "]", ",", "16", ")", "right", "=", "int", "(", "value", "[", "8", ":", "16", "]", ",", "16", ")", "# Get the CPU flags", "flags", "=", "{", "# Byte 0", "'mmu'", ":", "_is_bit_set", "(", "left", ",", "0", ")", ",", "'fpu'", ":", "_is_bit_set", "(", "left", ",", "1", ")", ",", "'slb'", ":", "_is_bit_set", "(", "left", ",", "2", ")", ",", "'run'", ":", "_is_bit_set", "(", "left", ",", "3", ")", ",", "#'reserved' : _is_bit_set(left, 4),", "'dabr'", ":", "_is_bit_set", "(", "left", ",", "5", ")", ",", "'ne'", ":", "_is_bit_set", "(", "left", ",", "6", ")", ",", "'wtr'", ":", "_is_bit_set", "(", "left", ",", "7", ")", ",", "# Byte 1", "'mcr'", ":", "_is_bit_set", "(", "left", ",", "8", ")", ",", "'dsisr'", ":", "_is_bit_set", "(", "left", ",", "9", ")", ",", "'lp'", ":", "_is_bit_set", "(", "left", ",", "10", ")", ",", "'ri'", ":", "_is_bit_set", "(", "left", ",", "11", ")", ",", "'dabrx'", ":", "_is_bit_set", "(", "left", ",", "12", ")", ",", "'sprg3'", ":", "_is_bit_set", "(", "left", ",", "13", ")", ",", "'rislb'", ":", "_is_bit_set", "(", "left", ",", "14", ")", ",", "'pp'", ":", "_is_bit_set", "(", "left", ",", "15", ")", ",", "# Byte 2", "'vpm'", ":", "_is_bit_set", "(", "left", ",", "16", ")", ",", "'dss_2.05'", ":", "_is_bit_set", "(", "left", ",", "17", ")", ",", "#'reserved' : _is_bit_set(left, 18),", "'dar'", ":", "_is_bit_set", "(", "left", ",", "19", ")", ",", "#'reserved' : _is_bit_set(left, 20),", "'ppr'", ":", "_is_bit_set", "(", "left", ",", "21", ")", ",", "'dss_2.02'", ":", "_is_bit_set", "(", "left", ",", "22", ")", ",", "'dss_2.06'", ":", "_is_bit_set", "(", "left", ",", "23", ")", ",", "# Byte 3", "'lsd_in_dscr'", ":", "_is_bit_set", "(", "left", ",", "24", ")", ",", "'ugr_in_dscr'", ":", "_is_bit_set", "(", "left", ",", "25", ")", ",", "#'reserved' : _is_bit_set(left, 26),", "#'reserved' : _is_bit_set(left, 27),", "#'reserved' : _is_bit_set(left, 28),", "#'reserved' : _is_bit_set(left, 29),", "#'reserved' : _is_bit_set(left, 30),", "#'reserved' : _is_bit_set(left, 31),", "# Byte 4", "'sso_2.06'", ":", "_is_bit_set", "(", "right", ",", "0", ")", ",", "#'reserved' : _is_bit_set(right, 1),", "#'reserved' : _is_bit_set(right, 2),", "#'reserved' : _is_bit_set(right, 3),", "#'reserved' : _is_bit_set(right, 4),", "#'reserved' : _is_bit_set(right, 5),", "#'reserved' : _is_bit_set(right, 6),", "#'reserved' : _is_bit_set(right, 7),", "# Byte 5", "'le'", ":", "_is_bit_set", "(", "right", ",", "8", ")", ",", "'cfar'", ":", "_is_bit_set", "(", "right", ",", "9", ")", ",", "'eb'", ":", "_is_bit_set", "(", "right", ",", "10", ")", ",", "'lsq_2.07'", ":", "_is_bit_set", "(", "right", ",", "11", ")", ",", "#'reserved' : _is_bit_set(right, 12),", "#'reserved' : _is_bit_set(right, 13),", "#'reserved' : _is_bit_set(right, 14),", "#'reserved' : _is_bit_set(right, 15),", "# Byte 6", "'dss_2.07'", ":", "_is_bit_set", "(", "right", ",", "16", ")", ",", "#'reserved' : _is_bit_set(right, 17),", "#'reserved' : _is_bit_set(right, 18),", "#'reserved' : _is_bit_set(right, 19),", "#'reserved' : _is_bit_set(right, 20),", "#'reserved' : _is_bit_set(right, 21),", "#'reserved' : _is_bit_set(right, 22),", "#'reserved' : _is_bit_set(right, 23),", "# Byte 7", "#'reserved' : _is_bit_set(right, 24),", "#'reserved' : _is_bit_set(right, 25),", "#'reserved' : _is_bit_set(right, 26),", "#'reserved' : _is_bit_set(right, 27),", "#'reserved' : _is_bit_set(right, 28),", "#'reserved' : _is_bit_set(right, 29),", "#'reserved' : _is_bit_set(right, 30),", "#'reserved' : _is_bit_set(right, 31),", "}", "# Get a list of only the flags that are true", "flags", "=", "[", "k", "for", "k", ",", "v", "in", "flags", ".", "items", "(", ")", "if", "v", "]", "flags", ".", "sort", "(", ")", "info", "=", "{", "'flags'", ":", "flags", "}", "info", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "info", ".", "items", "(", ")", "if", "v", "}", "return", "info", "except", ":", "return", "{", "}" ]
29.058824
13.579832
def _collect_pops(stack, depth, pops, skip): """Recursively collects stack entries off the top of the stack according to the stack entry's depth.""" if depth >= 0: return pops set_current_depth_after_recursion = False set_skip_for_current_entry_children = False set_skip_after_current_entry = False extract_next_tokens = False expect_extracted_tokens = [] se = stack.pop() pops_len = len(pops) if (pops_len > 1 and se.opname == 'BUILD_TUPLE' and pops[-1].opname == 'LOAD_CONST' and pops[-1].oparg_repr[0] in ['lambda', '{', '(', '['] and pops[-2].opname in ['MAKE_CLOSURE', 'MAKE_FUNCTION']): # Skip BUILD_TUPLE and its children if they are storing arguments for a closure, since those don't show up in the code. skip = -se.stack_depth + 1 if (pops_len > 2 and sys.version_info[0] >= 3 and se.opname == 'BUILD_TUPLE' and pops[-1].opname == 'LOAD_CONST' and pops[-1].oparg_repr[0] in ['lambda', '{', '(', '['] and pops[-2].opname == 'LOAD_CONST' and pops[-3].opname in ['MAKE_CLOSURE', 'MAKE_FUNCTION']): # Skip BUILD_TUPLE and its children if they are storing arguments for a closure, since those don't show up in the code. skip = -se.stack_depth + 1 if (pops_len > 0 and se.opname == 'GET_ITER' and pops[-1].opname == 'CALL_FUNCTION'): # CALL_FUNCTION followed by GET_ITER means we are calling one of the comprehensions and we are about to load its arguments. # The CALL_FUNCTION at the top of the stack should be invisible, since it expects a ')' which won't appear in the code. pops[-1].oparg_repr = [''] # We need to extract the arguments that we're about to load so that we can store their tokens inside of the upcoming comprehension. extract_next_tokens = -1 if (len(stack) and se.opname == 'BUILD_TUPLE_UNPACK_WITH_CALL'): extract_next_tokens = se.stack_depth expect_extracted_tokens = [ # Expect LOAD_FAST as the first element (required=True), and prepend its oparg_repr with '*'. (0, 'LOAD_FAST', True, 'prepend', ['*']), # Expect BUILD_TUPLE as the last stack token extracted (required=False) and replace its oparg_repr with ''. (abs(extract_next_tokens) - 1, 'BUILD_TUPLE', False, 'replace', ['']) ] set_current_depth_after_recursion = se.stack_depth se.stack_depth = 0 if (pops_len > 0 and sys.version_info[0] >= 3 and se.opname == 'LOAD_CONST' and pops[-1].opname == 'MAKE_FUNCTION'): # In python 3, MAKE_FUNCTION followed by LOAD_CONST is loading the name of the function, which won't appear in the code. se.oparg_repr = [''] # Additionally, this entry shouldn't impact future stack computations, as MAKE_FUNCTION will be removed. set_current_depth_after_recursion = 0 if pops_len and pops[-1].opname == 'LIST_APPEND': # Skip all but the first stack entry of list comprehensions. Sets the skip value to be all remaining stack entries. # The BUILD_LIST check below will disable skip at the right time. set_skip_after_current_entry = len(stack) if skip > 0 and se.opname == 'BUILD_LIST' and se.stack_depth == 0: # If we're in skip mode and we just hit what might be the beginning of a list comprehension, check for a LIST_APPEND in the current pops. for popped_se in pops[::-1]: if popped_se.opname == 'LIST_APPEND': skip = 0 break children_skip = skip if (se.opname.startswith('UNARY_') or (se.opname.startswith('BINARY_') and se.opname != 'BINARY_SUBSCR') or se.opname == 'SLICE+2' or se.opname == 'SLICE+3' or se.opname == 'COMPARE_OP'): # Unary and binary ops come after their operand(s) on the stack, but before (or between) their operand(s) in code, so we need to reverse that. if set_skip_for_current_entry_children or skip > 0: children_skip = 1 pops = _collect_pops(stack, -1, pops, children_skip) if skip <= 0: pops.append(se) qj._DEBUG_QJ and qj.LOG_FN('added se: %r' % se) else: qj._DEBUG_QJ and qj.LOG_FN('(skipping se: %r %r)' % (se.opname, se.oparg_repr)) popped_depth = se.stack_depth + 1 else: # Non prefix/infix ops -- their representations come after their children in code, or they don't have children. if skip <= 0: pops.append(se) qj._DEBUG_QJ and qj.LOG_FN('added se: %r' % se) else: qj._DEBUG_QJ and qj.LOG_FN('(skipping se: %r %r)' % (se.opname, se.oparg_repr)) if ((se.stack_depth < 0 and se.opname != 'BUILD_SLICE' and se.opname.startswith('BUILD_')) or se.stack_depth >= 0): next_depth = se.stack_depth else: next_depth = se.stack_depth - 1 if set_skip_for_current_entry_children or skip > 0: children_skip = abs(next_depth) if se.opname == 'BUILD_SLICE': # BUILD_SLICE's arguments need to be collected, as missing args are replaced with Nones which don't appear in the code. slice_pops = _collect_pops(stack, next_depth, [], children_skip) added_colon = 0 for slice_se in slice_pops: if slice_se.opname == 'LOAD_CONST' and slice_se.oparg_repr[0] == 'None': if added_colon >= 1: slice_se.oparg_repr = [''] else: slice_se.oparg_repr = [':'] added_colon += 1 pops.append(slice_se) else: pops = _collect_pops(stack, next_depth, pops, children_skip) # BUILD_LIST 0 marks the start of a list comprehension, but we need it to consume a slot on the stack. if se.stack_depth == 0 and se.opname != 'BUILD_LIST': popped_depth = 0 else: popped_depth = 1 tokens = [] if extract_next_tokens < 0: tokens = _collect_pops(stack, extract_next_tokens, [], skip) for index, expected_token, required, fixup_type, fixup_value in expect_extracted_tokens: if qj._DEBUG_QJ: assert (index < 0 and index + len(tokens) > 0) or 0 <= index < len(tokens) if required: assert tokens[index].opname == expected_token if (index < 0 and index + len(tokens) > 0) or 0 <= index < len(tokens) and tokens[index].opname == expected_token: if fixup_type == 'prepend': tokens[index].oparg_repr = fixup_value + tokens[index].oparg_repr elif fixup_type == 'replace': tokens[index].oparg_repr = fixup_value tokens.reverse() popped_depth -= extract_next_tokens if children_skip > 0: skip -= popped_depth if set_skip_after_current_entry > 0: skip = set_skip_after_current_entry + max(0, skip) pops = _collect_pops(stack, depth + popped_depth, pops, skip) if len(tokens): # pylint: disable=g-explicit-length-test target_se = pops[-1] target_se.children.append(tokens) target_se.oparg_repr = target_se.oparg_repr[:1] + [t for token in tokens for t in token.oparg_repr] + target_se.oparg_repr[1:] if set_current_depth_after_recursion is not False: se.stack_depth = set_current_depth_after_recursion return pops
[ "def", "_collect_pops", "(", "stack", ",", "depth", ",", "pops", ",", "skip", ")", ":", "if", "depth", ">=", "0", ":", "return", "pops", "set_current_depth_after_recursion", "=", "False", "set_skip_for_current_entry_children", "=", "False", "set_skip_after_current_entry", "=", "False", "extract_next_tokens", "=", "False", "expect_extracted_tokens", "=", "[", "]", "se", "=", "stack", ".", "pop", "(", ")", "pops_len", "=", "len", "(", "pops", ")", "if", "(", "pops_len", ">", "1", "and", "se", ".", "opname", "==", "'BUILD_TUPLE'", "and", "pops", "[", "-", "1", "]", ".", "opname", "==", "'LOAD_CONST'", "and", "pops", "[", "-", "1", "]", ".", "oparg_repr", "[", "0", "]", "in", "[", "'lambda'", ",", "'{'", ",", "'('", ",", "'['", "]", "and", "pops", "[", "-", "2", "]", ".", "opname", "in", "[", "'MAKE_CLOSURE'", ",", "'MAKE_FUNCTION'", "]", ")", ":", "# Skip BUILD_TUPLE and its children if they are storing arguments for a closure, since those don't show up in the code.", "skip", "=", "-", "se", ".", "stack_depth", "+", "1", "if", "(", "pops_len", ">", "2", "and", "sys", ".", "version_info", "[", "0", "]", ">=", "3", "and", "se", ".", "opname", "==", "'BUILD_TUPLE'", "and", "pops", "[", "-", "1", "]", ".", "opname", "==", "'LOAD_CONST'", "and", "pops", "[", "-", "1", "]", ".", "oparg_repr", "[", "0", "]", "in", "[", "'lambda'", ",", "'{'", ",", "'('", ",", "'['", "]", "and", "pops", "[", "-", "2", "]", ".", "opname", "==", "'LOAD_CONST'", "and", "pops", "[", "-", "3", "]", ".", "opname", "in", "[", "'MAKE_CLOSURE'", ",", "'MAKE_FUNCTION'", "]", ")", ":", "# Skip BUILD_TUPLE and its children if they are storing arguments for a closure, since those don't show up in the code.", "skip", "=", "-", "se", ".", "stack_depth", "+", "1", "if", "(", "pops_len", ">", "0", "and", "se", ".", "opname", "==", "'GET_ITER'", "and", "pops", "[", "-", "1", "]", ".", "opname", "==", "'CALL_FUNCTION'", ")", ":", "# CALL_FUNCTION followed by GET_ITER means we are calling one of the comprehensions and we are about to load its arguments.", "# The CALL_FUNCTION at the top of the stack should be invisible, since it expects a ')' which won't appear in the code.", "pops", "[", "-", "1", "]", ".", "oparg_repr", "=", "[", "''", "]", "# We need to extract the arguments that we're about to load so that we can store their tokens inside of the upcoming comprehension.", "extract_next_tokens", "=", "-", "1", "if", "(", "len", "(", "stack", ")", "and", "se", ".", "opname", "==", "'BUILD_TUPLE_UNPACK_WITH_CALL'", ")", ":", "extract_next_tokens", "=", "se", ".", "stack_depth", "expect_extracted_tokens", "=", "[", "# Expect LOAD_FAST as the first element (required=True), and prepend its oparg_repr with '*'.", "(", "0", ",", "'LOAD_FAST'", ",", "True", ",", "'prepend'", ",", "[", "'*'", "]", ")", ",", "# Expect BUILD_TUPLE as the last stack token extracted (required=False) and replace its oparg_repr with ''.", "(", "abs", "(", "extract_next_tokens", ")", "-", "1", ",", "'BUILD_TUPLE'", ",", "False", ",", "'replace'", ",", "[", "''", "]", ")", "]", "set_current_depth_after_recursion", "=", "se", ".", "stack_depth", "se", ".", "stack_depth", "=", "0", "if", "(", "pops_len", ">", "0", "and", "sys", ".", "version_info", "[", "0", "]", ">=", "3", "and", "se", ".", "opname", "==", "'LOAD_CONST'", "and", "pops", "[", "-", "1", "]", ".", "opname", "==", "'MAKE_FUNCTION'", ")", ":", "# In python 3, MAKE_FUNCTION followed by LOAD_CONST is loading the name of the function, which won't appear in the code.", "se", ".", "oparg_repr", "=", "[", "''", "]", "# Additionally, this entry shouldn't impact future stack computations, as MAKE_FUNCTION will be removed.", "set_current_depth_after_recursion", "=", "0", "if", "pops_len", "and", "pops", "[", "-", "1", "]", ".", "opname", "==", "'LIST_APPEND'", ":", "# Skip all but the first stack entry of list comprehensions. Sets the skip value to be all remaining stack entries.", "# The BUILD_LIST check below will disable skip at the right time.", "set_skip_after_current_entry", "=", "len", "(", "stack", ")", "if", "skip", ">", "0", "and", "se", ".", "opname", "==", "'BUILD_LIST'", "and", "se", ".", "stack_depth", "==", "0", ":", "# If we're in skip mode and we just hit what might be the beginning of a list comprehension, check for a LIST_APPEND in the current pops.", "for", "popped_se", "in", "pops", "[", ":", ":", "-", "1", "]", ":", "if", "popped_se", ".", "opname", "==", "'LIST_APPEND'", ":", "skip", "=", "0", "break", "children_skip", "=", "skip", "if", "(", "se", ".", "opname", ".", "startswith", "(", "'UNARY_'", ")", "or", "(", "se", ".", "opname", ".", "startswith", "(", "'BINARY_'", ")", "and", "se", ".", "opname", "!=", "'BINARY_SUBSCR'", ")", "or", "se", ".", "opname", "==", "'SLICE+2'", "or", "se", ".", "opname", "==", "'SLICE+3'", "or", "se", ".", "opname", "==", "'COMPARE_OP'", ")", ":", "# Unary and binary ops come after their operand(s) on the stack, but before (or between) their operand(s) in code, so we need to reverse that.", "if", "set_skip_for_current_entry_children", "or", "skip", ">", "0", ":", "children_skip", "=", "1", "pops", "=", "_collect_pops", "(", "stack", ",", "-", "1", ",", "pops", ",", "children_skip", ")", "if", "skip", "<=", "0", ":", "pops", ".", "append", "(", "se", ")", "qj", ".", "_DEBUG_QJ", "and", "qj", ".", "LOG_FN", "(", "'added se: %r'", "%", "se", ")", "else", ":", "qj", ".", "_DEBUG_QJ", "and", "qj", ".", "LOG_FN", "(", "'(skipping se: %r %r)'", "%", "(", "se", ".", "opname", ",", "se", ".", "oparg_repr", ")", ")", "popped_depth", "=", "se", ".", "stack_depth", "+", "1", "else", ":", "# Non prefix/infix ops -- their representations come after their children in code, or they don't have children.", "if", "skip", "<=", "0", ":", "pops", ".", "append", "(", "se", ")", "qj", ".", "_DEBUG_QJ", "and", "qj", ".", "LOG_FN", "(", "'added se: %r'", "%", "se", ")", "else", ":", "qj", ".", "_DEBUG_QJ", "and", "qj", ".", "LOG_FN", "(", "'(skipping se: %r %r)'", "%", "(", "se", ".", "opname", ",", "se", ".", "oparg_repr", ")", ")", "if", "(", "(", "se", ".", "stack_depth", "<", "0", "and", "se", ".", "opname", "!=", "'BUILD_SLICE'", "and", "se", ".", "opname", ".", "startswith", "(", "'BUILD_'", ")", ")", "or", "se", ".", "stack_depth", ">=", "0", ")", ":", "next_depth", "=", "se", ".", "stack_depth", "else", ":", "next_depth", "=", "se", ".", "stack_depth", "-", "1", "if", "set_skip_for_current_entry_children", "or", "skip", ">", "0", ":", "children_skip", "=", "abs", "(", "next_depth", ")", "if", "se", ".", "opname", "==", "'BUILD_SLICE'", ":", "# BUILD_SLICE's arguments need to be collected, as missing args are replaced with Nones which don't appear in the code.", "slice_pops", "=", "_collect_pops", "(", "stack", ",", "next_depth", ",", "[", "]", ",", "children_skip", ")", "added_colon", "=", "0", "for", "slice_se", "in", "slice_pops", ":", "if", "slice_se", ".", "opname", "==", "'LOAD_CONST'", "and", "slice_se", ".", "oparg_repr", "[", "0", "]", "==", "'None'", ":", "if", "added_colon", ">=", "1", ":", "slice_se", ".", "oparg_repr", "=", "[", "''", "]", "else", ":", "slice_se", ".", "oparg_repr", "=", "[", "':'", "]", "added_colon", "+=", "1", "pops", ".", "append", "(", "slice_se", ")", "else", ":", "pops", "=", "_collect_pops", "(", "stack", ",", "next_depth", ",", "pops", ",", "children_skip", ")", "# BUILD_LIST 0 marks the start of a list comprehension, but we need it to consume a slot on the stack.", "if", "se", ".", "stack_depth", "==", "0", "and", "se", ".", "opname", "!=", "'BUILD_LIST'", ":", "popped_depth", "=", "0", "else", ":", "popped_depth", "=", "1", "tokens", "=", "[", "]", "if", "extract_next_tokens", "<", "0", ":", "tokens", "=", "_collect_pops", "(", "stack", ",", "extract_next_tokens", ",", "[", "]", ",", "skip", ")", "for", "index", ",", "expected_token", ",", "required", ",", "fixup_type", ",", "fixup_value", "in", "expect_extracted_tokens", ":", "if", "qj", ".", "_DEBUG_QJ", ":", "assert", "(", "index", "<", "0", "and", "index", "+", "len", "(", "tokens", ")", ">", "0", ")", "or", "0", "<=", "index", "<", "len", "(", "tokens", ")", "if", "required", ":", "assert", "tokens", "[", "index", "]", ".", "opname", "==", "expected_token", "if", "(", "index", "<", "0", "and", "index", "+", "len", "(", "tokens", ")", ">", "0", ")", "or", "0", "<=", "index", "<", "len", "(", "tokens", ")", "and", "tokens", "[", "index", "]", ".", "opname", "==", "expected_token", ":", "if", "fixup_type", "==", "'prepend'", ":", "tokens", "[", "index", "]", ".", "oparg_repr", "=", "fixup_value", "+", "tokens", "[", "index", "]", ".", "oparg_repr", "elif", "fixup_type", "==", "'replace'", ":", "tokens", "[", "index", "]", ".", "oparg_repr", "=", "fixup_value", "tokens", ".", "reverse", "(", ")", "popped_depth", "-=", "extract_next_tokens", "if", "children_skip", ">", "0", ":", "skip", "-=", "popped_depth", "if", "set_skip_after_current_entry", ">", "0", ":", "skip", "=", "set_skip_after_current_entry", "+", "max", "(", "0", ",", "skip", ")", "pops", "=", "_collect_pops", "(", "stack", ",", "depth", "+", "popped_depth", ",", "pops", ",", "skip", ")", "if", "len", "(", "tokens", ")", ":", "# pylint: disable=g-explicit-length-test", "target_se", "=", "pops", "[", "-", "1", "]", "target_se", ".", "children", ".", "append", "(", "tokens", ")", "target_se", ".", "oparg_repr", "=", "target_se", ".", "oparg_repr", "[", ":", "1", "]", "+", "[", "t", "for", "token", "in", "tokens", "for", "t", "in", "token", ".", "oparg_repr", "]", "+", "target_se", ".", "oparg_repr", "[", "1", ":", "]", "if", "set_current_depth_after_recursion", "is", "not", "False", ":", "se", ".", "stack_depth", "=", "set_current_depth_after_recursion", "return", "pops" ]
41.865854
25.871951
def deserialize_subject_info(subject_info_xml_path): """Deserialize a SubjectInfo XML file to a PyXB object.""" try: with open(subject_info_xml_path) as f: return d1_common.xml.deserialize(f.read()) except ValueError as e: raise d1_common.types.exceptions.InvalidToken( 0, 'Could not deserialize SubjectInfo. subject_info="{}", error="{}"'.format( subject_info_xml_path, str(e) ), )
[ "def", "deserialize_subject_info", "(", "subject_info_xml_path", ")", ":", "try", ":", "with", "open", "(", "subject_info_xml_path", ")", "as", "f", ":", "return", "d1_common", ".", "xml", ".", "deserialize", "(", "f", ".", "read", "(", ")", ")", "except", "ValueError", "as", "e", ":", "raise", "d1_common", ".", "types", ".", "exceptions", ".", "InvalidToken", "(", "0", ",", "'Could not deserialize SubjectInfo. subject_info=\"{}\", error=\"{}\"'", ".", "format", "(", "subject_info_xml_path", ",", "str", "(", "e", ")", ")", ",", ")" ]
39.25
18.75
def load_config(self, settings=None): """ Load the configuration either from the config file, or from the given settings. Args: settings (dict): If given, the settings are pulled from this dictionary. Otherwise, the config file is used. """ self._load_defaults() if settings: self.update(settings) else: config_paths = _get_config_files() for p in config_paths: conf = _process_config_file([p]) self.update(conf) self._loaded = True self._validate()
[ "def", "load_config", "(", "self", ",", "settings", "=", "None", ")", ":", "self", ".", "_load_defaults", "(", ")", "if", "settings", ":", "self", ".", "update", "(", "settings", ")", "else", ":", "config_paths", "=", "_get_config_files", "(", ")", "for", "p", "in", "config_paths", ":", "conf", "=", "_process_config_file", "(", "[", "p", "]", ")", "self", ".", "update", "(", "conf", ")", "self", ".", "_loaded", "=", "True", "self", ".", "_validate", "(", ")" ]
33.388889
16.722222
def annotate(node): """Annotate a node with the stack frame describing the SConscript file and line number that created it.""" tb = sys.exc_info()[2] while tb and stack_bottom not in tb.tb_frame.f_locals: tb = tb.tb_next if not tb: # We did not find any exec of an SConscript file: what?! raise SCons.Errors.InternalError("could not find SConscript stack frame") node.creator = traceback.extract_stack(tb)[0]
[ "def", "annotate", "(", "node", ")", ":", "tb", "=", "sys", ".", "exc_info", "(", ")", "[", "2", "]", "while", "tb", "and", "stack_bottom", "not", "in", "tb", ".", "tb_frame", ".", "f_locals", ":", "tb", "=", "tb", ".", "tb_next", "if", "not", "tb", ":", "# We did not find any exec of an SConscript file: what?!", "raise", "SCons", ".", "Errors", ".", "InternalError", "(", "\"could not find SConscript stack frame\"", ")", "node", ".", "creator", "=", "traceback", ".", "extract_stack", "(", "tb", ")", "[", "0", "]" ]
44.7
17
def K_swing_check_valve_Crane(D=None, fd=None, angled=True): r'''Returns the loss coefficient for a swing check valve as shown in [1]_. .. math:: K_2 = N\cdot f_d For angled swing check valves N = 100; for straight valves, N = 50. Parameters ---------- D : float, optional Diameter of the pipe attached to the valve, [m] fd : float, optional Darcy friction factor calculated for the actual pipe flow in clean steel (roughness = 0.0018 inch) in the fully developed turbulent region; do not specify this to use the original Crane friction factor!, [-] angled : bool, optional If True, returns a value 2x the unangled value; the style of the valve [-] Returns ------- K : float Loss coefficient with respect to the pipe inside diameter [-] Notes ----- This method is not valid in the laminar regime and the pressure drop will be underestimated in those conditions. Examples -------- >>> K_swing_check_valve_Crane(D=.02) 2.3974274785373257 References ---------- .. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009. ''' if D is None and fd is None: raise ValueError('Either `D` or `fd` must be specified') if fd is None: fd = ft_Crane(D) if angled: return 100.*fd return 50.*fd
[ "def", "K_swing_check_valve_Crane", "(", "D", "=", "None", ",", "fd", "=", "None", ",", "angled", "=", "True", ")", ":", "if", "D", "is", "None", "and", "fd", "is", "None", ":", "raise", "ValueError", "(", "'Either `D` or `fd` must be specified'", ")", "if", "fd", "is", "None", ":", "fd", "=", "ft_Crane", "(", "D", ")", "if", "angled", ":", "return", "100.", "*", "fd", "return", "50.", "*", "fd" ]
29.354167
26.4375
def create(self, to, channel, custom_message=values.unset): """ Create a new VerificationInstance :param unicode to: To phonenumber :param unicode channel: sms or call :param unicode custom_message: A custom message for this verification :returns: Newly created VerificationInstance :rtype: twilio.rest.preview.acc_security.service.verification.VerificationInstance """ data = values.of({'To': to, 'Channel': channel, 'CustomMessage': custom_message, }) payload = self._version.create( 'POST', self._uri, data=data, ) return VerificationInstance(self._version, payload, service_sid=self._solution['service_sid'], )
[ "def", "create", "(", "self", ",", "to", ",", "channel", ",", "custom_message", "=", "values", ".", "unset", ")", ":", "data", "=", "values", ".", "of", "(", "{", "'To'", ":", "to", ",", "'Channel'", ":", "channel", ",", "'CustomMessage'", ":", "custom_message", ",", "}", ")", "payload", "=", "self", ".", "_version", ".", "create", "(", "'POST'", ",", "self", ".", "_uri", ",", "data", "=", "data", ",", ")", "return", "VerificationInstance", "(", "self", ".", "_version", ",", "payload", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", ")" ]
36.55
24.35
def channel(self, rpc_timeout=60, lazy=False): """Open Channel. :param int rpc_timeout: Timeout before we give up waiting for an RPC response from the server. :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. """ LOGGER.debug('Opening a new Channel') if not compatibility.is_integer(rpc_timeout): raise AMQPInvalidArgument('rpc_timeout should be an integer') elif self.is_closed: raise AMQPConnectionError('socket/connection closed') with self.lock: channel_id = self._get_next_available_channel_id() channel = Channel(channel_id, self, rpc_timeout, on_close_impl=self._cleanup_channel) self._channels[channel_id] = channel if not lazy: channel.open() LOGGER.debug('Channel #%d Opened', channel_id) return self._channels[channel_id]
[ "def", "channel", "(", "self", ",", "rpc_timeout", "=", "60", ",", "lazy", "=", "False", ")", ":", "LOGGER", ".", "debug", "(", "'Opening a new Channel'", ")", "if", "not", "compatibility", ".", "is_integer", "(", "rpc_timeout", ")", ":", "raise", "AMQPInvalidArgument", "(", "'rpc_timeout should be an integer'", ")", "elif", "self", ".", "is_closed", ":", "raise", "AMQPConnectionError", "(", "'socket/connection closed'", ")", "with", "self", ".", "lock", ":", "channel_id", "=", "self", ".", "_get_next_available_channel_id", "(", ")", "channel", "=", "Channel", "(", "channel_id", ",", "self", ",", "rpc_timeout", ",", "on_close_impl", "=", "self", ".", "_cleanup_channel", ")", "self", ".", "_channels", "[", "channel_id", "]", "=", "channel", "if", "not", "lazy", ":", "channel", ".", "open", "(", ")", "LOGGER", ".", "debug", "(", "'Channel #%d Opened'", ",", "channel_id", ")", "return", "self", ".", "_channels", "[", "channel_id", "]" ]
43.730769
18.923077
def add_service(self, loadbal_id, service_group_id, ip_address_id, port=80, enabled=True, hc_type=21, weight=1): """Adds a new service to the service group. :param int loadbal_id: The id of the loadbal where the service resides :param int service_group_id: The group to add the service to :param int ip_address id: The ip address ID of the service :param int port: the port of the service :param bool enabled: Enable or disable the service :param int hc_type: The health check type :param int weight: the weight to give to the service """ kwargs = utils.NestedDict({}) kwargs['mask'] = ('virtualServers[' 'serviceGroups[services[groupReferences]]]') load_balancer = self.lb_svc.getObject(id=loadbal_id, **kwargs) virtual_servers = load_balancer['virtualServers'] for virtual_server in virtual_servers: if virtual_server['id'] == service_group_id: service_template = { 'enabled': int(enabled), 'port': port, 'ipAddressId': ip_address_id, 'healthChecks': [ { 'healthCheckTypeId': hc_type } ], 'groupReferences': [ { 'weight': weight } ] } services = virtual_server['serviceGroups'][0]['services'] services.append(service_template) return self.lb_svc.editObject(load_balancer, id=loadbal_id)
[ "def", "add_service", "(", "self", ",", "loadbal_id", ",", "service_group_id", ",", "ip_address_id", ",", "port", "=", "80", ",", "enabled", "=", "True", ",", "hc_type", "=", "21", ",", "weight", "=", "1", ")", ":", "kwargs", "=", "utils", ".", "NestedDict", "(", "{", "}", ")", "kwargs", "[", "'mask'", "]", "=", "(", "'virtualServers['", "'serviceGroups[services[groupReferences]]]'", ")", "load_balancer", "=", "self", ".", "lb_svc", ".", "getObject", "(", "id", "=", "loadbal_id", ",", "*", "*", "kwargs", ")", "virtual_servers", "=", "load_balancer", "[", "'virtualServers'", "]", "for", "virtual_server", "in", "virtual_servers", ":", "if", "virtual_server", "[", "'id'", "]", "==", "service_group_id", ":", "service_template", "=", "{", "'enabled'", ":", "int", "(", "enabled", ")", ",", "'port'", ":", "port", ",", "'ipAddressId'", ":", "ip_address_id", ",", "'healthChecks'", ":", "[", "{", "'healthCheckTypeId'", ":", "hc_type", "}", "]", ",", "'groupReferences'", ":", "[", "{", "'weight'", ":", "weight", "}", "]", "}", "services", "=", "virtual_server", "[", "'serviceGroups'", "]", "[", "0", "]", "[", "'services'", "]", "services", ".", "append", "(", "service_template", ")", "return", "self", ".", "lb_svc", ".", "editObject", "(", "load_balancer", ",", "id", "=", "loadbal_id", ")" ]
43.25641
16.897436
def init_workers(): """Waiting function, used to wake up the process pool""" setproctitle('oq-worker') # unregister raiseMasterKilled in oq-workers to avoid deadlock # since processes are terminated via pool.terminate() signal.signal(signal.SIGTERM, signal.SIG_DFL) # prctl is still useful (on Linux) to terminate all spawned processes # when master is killed via SIGKILL try: import prctl except ImportError: pass else: # if the parent dies, the children die prctl.set_pdeathsig(signal.SIGKILL)
[ "def", "init_workers", "(", ")", ":", "setproctitle", "(", "'oq-worker'", ")", "# unregister raiseMasterKilled in oq-workers to avoid deadlock", "# since processes are terminated via pool.terminate()", "signal", ".", "signal", "(", "signal", ".", "SIGTERM", ",", "signal", ".", "SIG_DFL", ")", "# prctl is still useful (on Linux) to terminate all spawned processes", "# when master is killed via SIGKILL", "try", ":", "import", "prctl", "except", "ImportError", ":", "pass", "else", ":", "# if the parent dies, the children die", "prctl", ".", "set_pdeathsig", "(", "signal", ".", "SIGKILL", ")" ]
36.866667
17
def proximal_huber(space, gamma): """Proximal factory of the Huber norm. Parameters ---------- space : `TensorSpace` The domain of the functional gamma : float The smoothing parameter of the Huber norm functional. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. See Also -------- odl.solvers.default_functionals.Huber : the Huber norm functional Notes ----- The proximal operator is given by given by the proximal operator of ``1/(2*gamma) * L2 norm`` in points that are ``<= gamma``, and by the proximal operator of the l1 norm in points that are ``> gamma``. """ gamma = float(gamma) class ProximalHuber(Operator): """Proximal operator of Huber norm.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float """ self.sigma = float(sigma) super(ProximalHuber, self).__init__(domain=space, range=space, linear=False) def _call(self, x, out): """Return ``self(x, out=out)``.""" if isinstance(self.domain, ProductSpace): norm = PointwiseNorm(self.domain, 2)(x) else: norm = x.ufuncs.absolute() mask = norm.ufuncs.less_equal(gamma + self.sigma) out[mask] = gamma / (gamma + self.sigma) * x[mask] mask.ufuncs.logical_not(out=mask) sign_x = x.ufuncs.sign() out[mask] = x[mask] - self.sigma * sign_x[mask] return out return ProximalHuber
[ "def", "proximal_huber", "(", "space", ",", "gamma", ")", ":", "gamma", "=", "float", "(", "gamma", ")", "class", "ProximalHuber", "(", "Operator", ")", ":", "\"\"\"Proximal operator of Huber norm.\"\"\"", "def", "__init__", "(", "self", ",", "sigma", ")", ":", "\"\"\"Initialize a new instance.\n\n Parameters\n ----------\n sigma : positive float\n \"\"\"", "self", ".", "sigma", "=", "float", "(", "sigma", ")", "super", "(", "ProximalHuber", ",", "self", ")", ".", "__init__", "(", "domain", "=", "space", ",", "range", "=", "space", ",", "linear", "=", "False", ")", "def", "_call", "(", "self", ",", "x", ",", "out", ")", ":", "\"\"\"Return ``self(x, out=out)``.\"\"\"", "if", "isinstance", "(", "self", ".", "domain", ",", "ProductSpace", ")", ":", "norm", "=", "PointwiseNorm", "(", "self", ".", "domain", ",", "2", ")", "(", "x", ")", "else", ":", "norm", "=", "x", ".", "ufuncs", ".", "absolute", "(", ")", "mask", "=", "norm", ".", "ufuncs", ".", "less_equal", "(", "gamma", "+", "self", ".", "sigma", ")", "out", "[", "mask", "]", "=", "gamma", "/", "(", "gamma", "+", "self", ".", "sigma", ")", "*", "x", "[", "mask", "]", "mask", ".", "ufuncs", ".", "logical_not", "(", "out", "=", "mask", ")", "sign_x", "=", "x", ".", "ufuncs", ".", "sign", "(", ")", "out", "[", "mask", "]", "=", "x", "[", "mask", "]", "-", "self", ".", "sigma", "*", "sign_x", "[", "mask", "]", "return", "out", "return", "ProximalHuber" ]
27.983333
21.766667
def get_in(keys, coll, default=None, no_default=False): """ NB: This is a straight copy of the get_in implementation found in the toolz library (https://github.com/pytoolz/toolz/). It works with persistent data structures as well as the corresponding datastructures from the stdlib. Returns coll[i0][i1]...[iX] where [i0, i1, ..., iX]==keys. If coll[i0][i1]...[iX] cannot be found, returns ``default``, unless ``no_default`` is specified, then it raises KeyError or IndexError. ``get_in`` is a generalization of ``operator.getitem`` for nested data structures such as dictionaries and lists. >>> from pyrsistent import freeze >>> transaction = freeze({'name': 'Alice', ... 'purchase': {'items': ['Apple', 'Orange'], ... 'costs': [0.50, 1.25]}, ... 'credit card': '5555-1234-1234-1234'}) >>> get_in(['purchase', 'items', 0], transaction) 'Apple' >>> get_in(['name'], transaction) 'Alice' >>> get_in(['purchase', 'total'], transaction) >>> get_in(['purchase', 'items', 'apple'], transaction) >>> get_in(['purchase', 'items', 10], transaction) >>> get_in(['purchase', 'total'], transaction, 0) 0 >>> get_in(['y'], {}, no_default=True) Traceback (most recent call last): ... KeyError: 'y' """ try: return reduce(operator.getitem, keys, coll) except (KeyError, IndexError, TypeError): if no_default: raise return default
[ "def", "get_in", "(", "keys", ",", "coll", ",", "default", "=", "None", ",", "no_default", "=", "False", ")", ":", "try", ":", "return", "reduce", "(", "operator", ".", "getitem", ",", "keys", ",", "coll", ")", "except", "(", "KeyError", ",", "IndexError", ",", "TypeError", ")", ":", "if", "no_default", ":", "raise", "return", "default" ]
39.358974
19.769231
def read(self, src): """ Download GeoJSON file of US counties from url (S3 bucket) """ geojson = None if not self.is_valid_src(src): error = "File < {0} > does not exists or does start with 'http'." raise ValueError(error.format(src)) if not self.is_url(src): return open(src, 'r').read().decode('latin-1').encode('utf-8') tmp = self.get_location(src) # if src poits to url that was already downloaded # read from local file instead if os.path.isfile(tmp): with open(tmp, 'r') as f: return f.read() # download file and write to local filesystem before returning response = urllib2.urlopen(src) data = response.read().decode('latin-1').encode('utf-8') with open(tmp, 'w') as f: f.write(data) return data
[ "def", "read", "(", "self", ",", "src", ")", ":", "geojson", "=", "None", "if", "not", "self", ".", "is_valid_src", "(", "src", ")", ":", "error", "=", "\"File < {0} > does not exists or does start with 'http'.\"", "raise", "ValueError", "(", "error", ".", "format", "(", "src", ")", ")", "if", "not", "self", ".", "is_url", "(", "src", ")", ":", "return", "open", "(", "src", ",", "'r'", ")", ".", "read", "(", ")", ".", "decode", "(", "'latin-1'", ")", ".", "encode", "(", "'utf-8'", ")", "tmp", "=", "self", ".", "get_location", "(", "src", ")", "# if src poits to url that was already downloaded", "# read from local file instead", "if", "os", ".", "path", ".", "isfile", "(", "tmp", ")", ":", "with", "open", "(", "tmp", ",", "'r'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", "# download file and write to local filesystem before returning", "response", "=", "urllib2", ".", "urlopen", "(", "src", ")", "data", "=", "response", ".", "read", "(", ")", ".", "decode", "(", "'latin-1'", ")", ".", "encode", "(", "'utf-8'", ")", "with", "open", "(", "tmp", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "data", ")", "return", "data" ]
43.35
13.2
def on_all_ok(self): """ This method is called when all the q-points have been computed. It runs `mrgscr` in sequential on the local machine to produce the final SCR file in the outdir of the `Work`. """ final_scr = self.merge_scrfiles() return self.Results(node=self, returncode=0, message="mrgscr done", final_scr=final_scr)
[ "def", "on_all_ok", "(", "self", ")", ":", "final_scr", "=", "self", ".", "merge_scrfiles", "(", ")", "return", "self", ".", "Results", "(", "node", "=", "self", ",", "returncode", "=", "0", ",", "message", "=", "\"mrgscr done\"", ",", "final_scr", "=", "final_scr", ")" ]
46.875
19.125
def copy_type_comments_to_annotations(args): """Copies argument type comments from the legacy long form to annotations in the entire function signature. """ for arg in args.args: copy_type_comment_to_annotation(arg) if args.vararg: copy_type_comment_to_annotation(args.vararg) for arg in args.kwonlyargs: copy_type_comment_to_annotation(arg) if args.kwarg: copy_type_comment_to_annotation(args.kwarg)
[ "def", "copy_type_comments_to_annotations", "(", "args", ")", ":", "for", "arg", "in", "args", ".", "args", ":", "copy_type_comment_to_annotation", "(", "arg", ")", "if", "args", ".", "vararg", ":", "copy_type_comment_to_annotation", "(", "args", ".", "vararg", ")", "for", "arg", "in", "args", ".", "kwonlyargs", ":", "copy_type_comment_to_annotation", "(", "arg", ")", "if", "args", ".", "kwarg", ":", "copy_type_comment_to_annotation", "(", "args", ".", "kwarg", ")" ]
29.933333
15
def read_pattern(self, patterns, reverse=False, terminate_on_match=False, postprocess=str): """ General pattern reading. Uses monty's regrep method. Takes the same arguments. Args: patterns (dict): A dict of patterns, e.g., {"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"}. reverse (bool): Read files in reverse. Defaults to false. Useful for large files, esp OUTCARs, especially when used with terminate_on_match. terminate_on_match (bool): Whether to terminate when there is at least one match in each key in pattern. postprocess (callable): A post processing function to convert all matches. Defaults to str, i.e., no change. Renders accessible: Any attribute in patterns. For example, {"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"} will set the value of self.data["energy"] = [[-1234], [-3453], ...], to the results from regex and postprocess. Note that the returned values are lists of lists, because you can grep multiple items on one line. """ matches = regrep(self.filename, patterns, reverse=reverse, terminate_on_match=terminate_on_match, postprocess=postprocess) for k in patterns.keys(): self.data[k] = [i[0] for i in matches.get(k, [])]
[ "def", "read_pattern", "(", "self", ",", "patterns", ",", "reverse", "=", "False", ",", "terminate_on_match", "=", "False", ",", "postprocess", "=", "str", ")", ":", "matches", "=", "regrep", "(", "self", ".", "filename", ",", "patterns", ",", "reverse", "=", "reverse", ",", "terminate_on_match", "=", "terminate_on_match", ",", "postprocess", "=", "postprocess", ")", "for", "k", "in", "patterns", ".", "keys", "(", ")", ":", "self", ".", "data", "[", "k", "]", "=", "[", "i", "[", "0", "]", "for", "i", "in", "matches", ".", "get", "(", "k", ",", "[", "]", ")", "]" ]
50.896552
23.655172
def get_command(self, ctx, cmd_name): """ Allow for partial commands. """ rv = click.Group.get_command(self, ctx, cmd_name) if rv is not None: return rv matches = [x for x in self.list_commands(ctx) if x.startswith(cmd_name)] if not matches: return None elif len(matches) == 1: return click.Group.get_command(self, ctx, matches[0]) ctx.fail('Command ambiguous, could be: %s' % ', '.join(sorted(matches)))
[ "def", "get_command", "(", "self", ",", "ctx", ",", "cmd_name", ")", ":", "rv", "=", "click", ".", "Group", ".", "get_command", "(", "self", ",", "ctx", ",", "cmd_name", ")", "if", "rv", "is", "not", "None", ":", "return", "rv", "matches", "=", "[", "x", "for", "x", "in", "self", ".", "list_commands", "(", "ctx", ")", "if", "x", ".", "startswith", "(", "cmd_name", ")", "]", "if", "not", "matches", ":", "return", "None", "elif", "len", "(", "matches", ")", "==", "1", ":", "return", "click", ".", "Group", ".", "get_command", "(", "self", ",", "ctx", ",", "matches", "[", "0", "]", ")", "ctx", ".", "fail", "(", "'Command ambiguous, could be: %s'", "%", "', '", ".", "join", "(", "sorted", "(", "matches", ")", ")", ")" ]
40
11.923077
def rfc2425encode(name,value,parameters=None,charset="utf-8"): """Encodes a vCard field into an RFC2425 line. :Parameters: - `name`: field type name - `value`: field value - `parameters`: optional parameters - `charset`: encoding of the output and of the `value` (if not `unicode`) :Types: - `name`: `str` - `value`: `unicode` or `str` - `parameters`: `dict` of `str` -> `str` - `charset`: `str` :return: the encoded RFC2425 line (possibly folded) :returntype: `str`""" if not parameters: parameters={} if type(value) is unicode: value=value.replace(u"\r\n",u"\\n") value=value.replace(u"\n",u"\\n") value=value.replace(u"\r",u"\\n") value=value.encode(charset,"replace") elif type(value) is not str: raise TypeError("Bad type for rfc2425 value") elif not valid_string_re.match(value): parameters["encoding"]="b" value=binascii.b2a_base64(value) ret=str(name).lower() for k,v in parameters.items(): ret+=";%s=%s" % (str(k),str(v)) ret+=":" while(len(value)>70): ret+=value[:70]+"\r\n " value=value[70:] ret+=value+"\r\n" return ret
[ "def", "rfc2425encode", "(", "name", ",", "value", ",", "parameters", "=", "None", ",", "charset", "=", "\"utf-8\"", ")", ":", "if", "not", "parameters", ":", "parameters", "=", "{", "}", "if", "type", "(", "value", ")", "is", "unicode", ":", "value", "=", "value", ".", "replace", "(", "u\"\\r\\n\"", ",", "u\"\\\\n\"", ")", "value", "=", "value", ".", "replace", "(", "u\"\\n\"", ",", "u\"\\\\n\"", ")", "value", "=", "value", ".", "replace", "(", "u\"\\r\"", ",", "u\"\\\\n\"", ")", "value", "=", "value", ".", "encode", "(", "charset", ",", "\"replace\"", ")", "elif", "type", "(", "value", ")", "is", "not", "str", ":", "raise", "TypeError", "(", "\"Bad type for rfc2425 value\"", ")", "elif", "not", "valid_string_re", ".", "match", "(", "value", ")", ":", "parameters", "[", "\"encoding\"", "]", "=", "\"b\"", "value", "=", "binascii", ".", "b2a_base64", "(", "value", ")", "ret", "=", "str", "(", "name", ")", ".", "lower", "(", ")", "for", "k", ",", "v", "in", "parameters", ".", "items", "(", ")", ":", "ret", "+=", "\";%s=%s\"", "%", "(", "str", "(", "k", ")", ",", "str", "(", "v", ")", ")", "ret", "+=", "\":\"", "while", "(", "len", "(", "value", ")", ">", "70", ")", ":", "ret", "+=", "value", "[", ":", "70", "]", "+", "\"\\r\\n \"", "value", "=", "value", "[", "70", ":", "]", "ret", "+=", "value", "+", "\"\\r\\n\"", "return", "ret" ]
31.230769
13.923077
def f_measure(reference_beats, estimated_beats, f_measure_threshold=0.07): """Compute the F-measure of correct vs incorrectly predicted beats. "Correctness" is determined over a small window. Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> f_measure = mir_eval.beat.f_measure(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray estimated beat times, in seconds f_measure_threshold : float Window size, in seconds (Default value = 0.07) Returns ------- f_score : float The computed F-measure score """ validate(reference_beats, estimated_beats) # When estimated beats are empty, no beats are correct; metric is 0 if estimated_beats.size == 0 or reference_beats.size == 0: return 0. # Compute the best-case matching between reference and estimated locations matching = util.match_events(reference_beats, estimated_beats, f_measure_threshold) precision = float(len(matching))/len(estimated_beats) recall = float(len(matching))/len(reference_beats) return util.f_measure(precision, recall)
[ "def", "f_measure", "(", "reference_beats", ",", "estimated_beats", ",", "f_measure_threshold", "=", "0.07", ")", ":", "validate", "(", "reference_beats", ",", "estimated_beats", ")", "# When estimated beats are empty, no beats are correct; metric is 0", "if", "estimated_beats", ".", "size", "==", "0", "or", "reference_beats", ".", "size", "==", "0", ":", "return", "0.", "# Compute the best-case matching between reference and estimated locations", "matching", "=", "util", ".", "match_events", "(", "reference_beats", ",", "estimated_beats", ",", "f_measure_threshold", ")", "precision", "=", "float", "(", "len", "(", "matching", ")", ")", "/", "len", "(", "estimated_beats", ")", "recall", "=", "float", "(", "len", "(", "matching", ")", ")", "/", "len", "(", "reference_beats", ")", "return", "util", ".", "f_measure", "(", "precision", ",", "recall", ")" ]
36.302326
18.581395
def preprocess(train_dataset, output_dir, eval_dataset, checkpoint, pipeline_option): """Preprocess data in Cloud with DataFlow.""" import apache_beam as beam import google.datalab.utils from . import _preprocess if checkpoint is None: checkpoint = _util._DEFAULT_CHECKPOINT_GSURL job_name = ('preprocess-image-classification-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S')) staging_package_url = _util.repackage_to_staging(output_dir) tmpdir = tempfile.mkdtemp() # suppress DataFlow warnings about wheel package as extra package. original_level = logging.getLogger().getEffectiveLevel() logging.getLogger().setLevel(logging.ERROR) try: # Workaround for DataFlow 2.0, which doesn't work well with extra packages in GCS. # Remove when the issue is fixed and new version of DataFlow is included in Datalab. extra_packages = [staging_package_url, _TF_GS_URL, _PROTOBUF_GS_URL] local_packages = [os.path.join(tmpdir, os.path.basename(p)) for p in extra_packages] for source, dest in zip(extra_packages, local_packages): file_io.copy(source, dest, overwrite=True) options = { 'staging_location': os.path.join(output_dir, 'tmp', 'staging'), 'temp_location': os.path.join(output_dir, 'tmp'), 'job_name': job_name, 'project': _util.default_project(), 'extra_packages': local_packages, 'teardown_policy': 'TEARDOWN_ALWAYS', 'no_save_main_session': True } if pipeline_option is not None: options.update(pipeline_option) opts = beam.pipeline.PipelineOptions(flags=[], **options) p = beam.Pipeline('DataflowRunner', options=opts) _preprocess.configure_pipeline(p, train_dataset, eval_dataset, checkpoint, output_dir, job_name) job_results = p.run() finally: shutil.rmtree(tmpdir) logging.getLogger().setLevel(original_level) if (_util.is_in_IPython()): import IPython dataflow_url = 'https://console.developers.google.com/dataflow?project=%s' % \ _util.default_project() html = 'Job "%s" submitted.' % job_name html += '<p>Click <a href="%s" target="_blank">here</a> to track preprocessing job. <br/>' \ % dataflow_url IPython.display.display_html(html, raw=True) return google.datalab.utils.DataflowJob(job_results)
[ "def", "preprocess", "(", "train_dataset", ",", "output_dir", ",", "eval_dataset", ",", "checkpoint", ",", "pipeline_option", ")", ":", "import", "apache_beam", "as", "beam", "import", "google", ".", "datalab", ".", "utils", "from", ".", "import", "_preprocess", "if", "checkpoint", "is", "None", ":", "checkpoint", "=", "_util", ".", "_DEFAULT_CHECKPOINT_GSURL", "job_name", "=", "(", "'preprocess-image-classification-'", "+", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%y%m%d-%H%M%S'", ")", ")", "staging_package_url", "=", "_util", ".", "repackage_to_staging", "(", "output_dir", ")", "tmpdir", "=", "tempfile", ".", "mkdtemp", "(", ")", "# suppress DataFlow warnings about wheel package as extra package.", "original_level", "=", "logging", ".", "getLogger", "(", ")", ".", "getEffectiveLevel", "(", ")", "logging", ".", "getLogger", "(", ")", ".", "setLevel", "(", "logging", ".", "ERROR", ")", "try", ":", "# Workaround for DataFlow 2.0, which doesn't work well with extra packages in GCS.", "# Remove when the issue is fixed and new version of DataFlow is included in Datalab.", "extra_packages", "=", "[", "staging_package_url", ",", "_TF_GS_URL", ",", "_PROTOBUF_GS_URL", "]", "local_packages", "=", "[", "os", ".", "path", ".", "join", "(", "tmpdir", ",", "os", ".", "path", ".", "basename", "(", "p", ")", ")", "for", "p", "in", "extra_packages", "]", "for", "source", ",", "dest", "in", "zip", "(", "extra_packages", ",", "local_packages", ")", ":", "file_io", ".", "copy", "(", "source", ",", "dest", ",", "overwrite", "=", "True", ")", "options", "=", "{", "'staging_location'", ":", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'tmp'", ",", "'staging'", ")", ",", "'temp_location'", ":", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'tmp'", ")", ",", "'job_name'", ":", "job_name", ",", "'project'", ":", "_util", ".", "default_project", "(", ")", ",", "'extra_packages'", ":", "local_packages", ",", "'teardown_policy'", ":", "'TEARDOWN_ALWAYS'", ",", "'no_save_main_session'", ":", "True", "}", "if", "pipeline_option", "is", "not", "None", ":", "options", ".", "update", "(", "pipeline_option", ")", "opts", "=", "beam", ".", "pipeline", ".", "PipelineOptions", "(", "flags", "=", "[", "]", ",", "*", "*", "options", ")", "p", "=", "beam", ".", "Pipeline", "(", "'DataflowRunner'", ",", "options", "=", "opts", ")", "_preprocess", ".", "configure_pipeline", "(", "p", ",", "train_dataset", ",", "eval_dataset", ",", "checkpoint", ",", "output_dir", ",", "job_name", ")", "job_results", "=", "p", ".", "run", "(", ")", "finally", ":", "shutil", ".", "rmtree", "(", "tmpdir", ")", "logging", ".", "getLogger", "(", ")", ".", "setLevel", "(", "original_level", ")", "if", "(", "_util", ".", "is_in_IPython", "(", ")", ")", ":", "import", "IPython", "dataflow_url", "=", "'https://console.developers.google.com/dataflow?project=%s'", "%", "_util", ".", "default_project", "(", ")", "html", "=", "'Job \"%s\" submitted.'", "%", "job_name", "html", "+=", "'<p>Click <a href=\"%s\" target=\"_blank\">here</a> to track preprocessing job. <br/>'", "%", "dataflow_url", "IPython", ".", "display", ".", "display_html", "(", "html", ",", "raw", "=", "True", ")", "return", "google", ".", "datalab", ".", "utils", ".", "DataflowJob", "(", "job_results", ")" ]
42.719298
21.333333
def parseVersionParts(text, seps=vseps): ''' Extract a list of major/minor/version integer strings from a string. Args: text (str): String to parse seps (tuple): A tuple or list of separators to use when parsing the version string. Examples: Parse a simple version string into a major and minor parts:: parts = parseVersionParts('1.2') Parse a complex version string into a major and minor parts:: parts = parseVersionParts('wowsoft_1.2') Parse a simple version string into a major, minor and patch parts. Parts after the "3." are dropped from the results:: parts = parseVersionParts('1.2.3.4.5') Notes: This attempts to brute force out integers from the version string by stripping any leading ascii letters and part separators, and then regexing out numeric parts optionally followed by part separators. It will stop at the first mixed-character part encountered. For example, "1.2-3a" would only parse out the "1" and "2" from the string. Returns: dict: Either a empty dictionary or dictionary containing up to three keys, 'major', 'minor' and 'patch'. ''' # Join seps together seps = ''.join(seps) # Strip whitespace text = text.strip() # Strip off leading chars text = text.lstrip(string.ascii_letters) # Strip off any leading separator which may be present text = text.lstrip(seps) pattern = r'^(\d+)([{}]+|$)'.format(regex.escape(seps)) parts = [] ret = {} off = 0 while True: m = regex.search(pattern, text[off:]) if not m: break off += m.end() p, s = m.groups() parts.append(int(p)) if not parts: return None keys = ('major', 'minor', 'patch') ret.update(zip(keys, parts)) return ret
[ "def", "parseVersionParts", "(", "text", ",", "seps", "=", "vseps", ")", ":", "# Join seps together", "seps", "=", "''", ".", "join", "(", "seps", ")", "# Strip whitespace", "text", "=", "text", ".", "strip", "(", ")", "# Strip off leading chars", "text", "=", "text", ".", "lstrip", "(", "string", ".", "ascii_letters", ")", "# Strip off any leading separator which may be present", "text", "=", "text", ".", "lstrip", "(", "seps", ")", "pattern", "=", "r'^(\\d+)([{}]+|$)'", ".", "format", "(", "regex", ".", "escape", "(", "seps", ")", ")", "parts", "=", "[", "]", "ret", "=", "{", "}", "off", "=", "0", "while", "True", ":", "m", "=", "regex", ".", "search", "(", "pattern", ",", "text", "[", "off", ":", "]", ")", "if", "not", "m", ":", "break", "off", "+=", "m", ".", "end", "(", ")", "p", ",", "s", "=", "m", ".", "groups", "(", ")", "parts", ".", "append", "(", "int", "(", "p", ")", ")", "if", "not", "parts", ":", "return", "None", "keys", "=", "(", "'major'", ",", "'minor'", ",", "'patch'", ")", "ret", ".", "update", "(", "zip", "(", "keys", ",", "parts", ")", ")", "return", "ret" ]
33.272727
28.181818
def venv_pth(self, dirs): ''' Add the directories in `dirs` to the `sys.path`. A venv.pth file will be written in the site-packages dir of this virtualenv to add dirs to sys.path. dirs: a list of directories. ''' # Create venv.pth to add dirs to sys.path when using the virtualenv. text = StringIO.StringIO() text.write("# Autogenerated file. Do not modify.\n") for path in dirs: text.write('{}\n'.format(path)) put(text, os.path.join(self.site_packages_dir(), 'venv.pth'), mode=0664)
[ "def", "venv_pth", "(", "self", ",", "dirs", ")", ":", "# Create venv.pth to add dirs to sys.path when using the virtualenv.", "text", "=", "StringIO", ".", "StringIO", "(", ")", "text", ".", "write", "(", "\"# Autogenerated file. Do not modify.\\n\"", ")", "for", "path", "in", "dirs", ":", "text", ".", "write", "(", "'{}\\n'", ".", "format", "(", "path", ")", ")", "put", "(", "text", ",", "os", ".", "path", ".", "join", "(", "self", ".", "site_packages_dir", "(", ")", ",", "'venv.pth'", ")", ",", "mode", "=", "0664", ")" ]
41
22.857143
def get_sql_results( ctask, query_id, rendered_query, return_results=True, store_results=False, user_name=None, start_time=None): """Executes the sql query returns the results.""" with session_scope(not ctask.request.called_directly) as session: try: return execute_sql_statements( ctask, query_id, rendered_query, return_results, store_results, user_name, session=session, start_time=start_time) except Exception as e: logging.exception(e) stats_logger.incr('error_sqllab_unhandled') query = get_query(query_id, session) return handle_query_error(str(e), query, session)
[ "def", "get_sql_results", "(", "ctask", ",", "query_id", ",", "rendered_query", ",", "return_results", "=", "True", ",", "store_results", "=", "False", ",", "user_name", "=", "None", ",", "start_time", "=", "None", ")", ":", "with", "session_scope", "(", "not", "ctask", ".", "request", ".", "called_directly", ")", "as", "session", ":", "try", ":", "return", "execute_sql_statements", "(", "ctask", ",", "query_id", ",", "rendered_query", ",", "return_results", ",", "store_results", ",", "user_name", ",", "session", "=", "session", ",", "start_time", "=", "start_time", ")", "except", "Exception", "as", "e", ":", "logging", ".", "exception", "(", "e", ")", "stats_logger", ".", "incr", "(", "'error_sqllab_unhandled'", ")", "query", "=", "get_query", "(", "query_id", ",", "session", ")", "return", "handle_query_error", "(", "str", "(", "e", ")", ",", "query", ",", "session", ")" ]
45.733333
19
def user_group_perms_processor(request): """ return context variables with org permissions to the user. """ org = None group = None if hasattr(request, "user"): if request.user.is_anonymous: group = None else: group = request.user.get_org_group() org = request.user.get_org() if group: context = dict(org_perms=GroupPermWrapper(group)) else: context = dict() # make sure user_org is set on our request based on their session context["user_org"] = org return context
[ "def", "user_group_perms_processor", "(", "request", ")", ":", "org", "=", "None", "group", "=", "None", "if", "hasattr", "(", "request", ",", "\"user\"", ")", ":", "if", "request", ".", "user", ".", "is_anonymous", ":", "group", "=", "None", "else", ":", "group", "=", "request", ".", "user", ".", "get_org_group", "(", ")", "org", "=", "request", ".", "user", ".", "get_org", "(", ")", "if", "group", ":", "context", "=", "dict", "(", "org_perms", "=", "GroupPermWrapper", "(", "group", ")", ")", "else", ":", "context", "=", "dict", "(", ")", "# make sure user_org is set on our request based on their session", "context", "[", "\"user_org\"", "]", "=", "org", "return", "context" ]
24.304348
19.434783
def calc_upper_bca_percentile(alpha_percent, bias_correction, acceleration): """ Calculate the lower values of the Bias Corrected and Accelerated (BCa) bootstrap confidence intervals. Parameters ---------- alpha_percent : float in (0.0, 100.0). `100 - confidence_percentage`, where `confidence_percentage` is the confidence level (such as 95%), expressed as a percent. bias_correction : 1D ndarray. There will be one element for each element in `mle_estimate`. Elements denote the bias correction factors for each component of the parameter vector. acceleration : 1D ndarray. There will be one element for each element in `mle_estimate`. Elements denote the acceleration factors for each component of the parameter vector. Returns ------- upper_percentile : 1D ndarray. There will be one element for each element in `mle_estimate`. Elements denote the larger values in the confidence interval for each component of the parameter vector. References ---------- Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Section 14.3, Equation 14.10. Notes ----- The `alpha` used in this function is different from the `alpha` used in Efron and Tibshirani (1994). The `alpha` used in this function must be converted to a decimal (by dividing by 100) and then divided by 2 (to account for the equal-tailed nature of the confidence interval) in order to be made equivalent to the `alpha` in Efron and Tibshirani (1994). """ z_upper = norm.ppf(1 - alpha_percent / (100.0 * 2)) numerator = bias_correction + z_upper denominator = 1 - acceleration * numerator upper_percentile =\ norm.cdf(bias_correction + numerator / denominator) * 100 return upper_percentile
[ "def", "calc_upper_bca_percentile", "(", "alpha_percent", ",", "bias_correction", ",", "acceleration", ")", ":", "z_upper", "=", "norm", ".", "ppf", "(", "1", "-", "alpha_percent", "/", "(", "100.0", "*", "2", ")", ")", "numerator", "=", "bias_correction", "+", "z_upper", "denominator", "=", "1", "-", "acceleration", "*", "numerator", "upper_percentile", "=", "norm", ".", "cdf", "(", "bias_correction", "+", "numerator", "/", "denominator", ")", "*", "100", "return", "upper_percentile" ]
41.311111
25
def group_pairs(pair_list): """ Groups a list of items using the first element in each pair as the item and the second element as the groupid. Args: pair_list (list): list of 2-tuples (item, groupid) Returns: dict: groupid_to_items: maps a groupid to a list of items SeeAlso: group_items """ # Initialize dict of lists groupid_to_items = defaultdict(list) # Insert each item into the correct group for item, groupid in pair_list: groupid_to_items[groupid].append(item) return groupid_to_items
[ "def", "group_pairs", "(", "pair_list", ")", ":", "# Initialize dict of lists", "groupid_to_items", "=", "defaultdict", "(", "list", ")", "# Insert each item into the correct group", "for", "item", ",", "groupid", "in", "pair_list", ":", "groupid_to_items", "[", "groupid", "]", ".", "append", "(", "item", ")", "return", "groupid_to_items" ]
27.8
18.2
def get_countdown(self, retries) -> int: """Calculate the countdown for a celery task retry.""" retry_delay = self.retry_delay if self.retry_exponential_backoff: return min( max(2 ** retries, retry_delay), # Exp. backoff self.max_retry_delay # The countdown should be more the max allowed ) return retry_delay
[ "def", "get_countdown", "(", "self", ",", "retries", ")", "->", "int", ":", "retry_delay", "=", "self", ".", "retry_delay", "if", "self", ".", "retry_exponential_backoff", ":", "return", "min", "(", "max", "(", "2", "**", "retries", ",", "retry_delay", ")", ",", "# Exp. backoff", "self", ".", "max_retry_delay", "# The countdown should be more the max allowed", ")", "return", "retry_delay" ]
43.444444
14.333333
def CreateAd(client, opener, ad_group_id): """Creates a ResponsiveDisplayAd. Args: client: an AdWordsClient instance. opener: an OpenerDirector instance. ad_group_id: an int ad group ID. Returns: The ad group ad that was successfully created. """ ad_group_ad_service = client.GetService('AdGroupAdService', 'v201809') media_service = client.GetService('MediaService', 'v201809') marketing_image_id = _CreateImage( media_service, opener, 'https://goo.gl/3b9Wfh') logo_image_id = _CreateImage(media_service, opener, 'https://goo.gl/mtt54n') ad = { 'xsi_type': 'ResponsiveDisplayAd', # This ad format doesn't allow the creation of an image using the # Image.data field. An image must first be created using the MediaService, # and Image.mediaId must be populated when creating the ad. 'marketingImage': { 'xsi_type': 'Image', 'mediaId': marketing_image_id }, 'shortHeadline': 'Travel', 'longHeadline': 'Travel the World', 'description': 'Take to the air!', 'businessName': 'Interplanetary Cruises', 'finalUrls': ['http://wwww.example.com'], # Optional: Call to action text. # Valid texts: https://support.google.com/adwords/answer/7005917 'callToActionText': 'Apply Now', # Optional: Set dynamic display ad settings, composed of landscape logo # image, promotion text, and price prefix. 'dynamicDisplayAdSettings': CreateDynamicDisplayAdSettings( client, opener), # Optional: Create a logo image and set it to the ad. 'logoImage': { 'xsi_type': 'Image', 'mediaId': logo_image_id }, # Optional: Create a square marketing image and set it to the ad. 'squareMarketingImage': { 'xsi_type': 'Image', 'mediaId': logo_image_id }, # Whitelisted accounts only: Set color settings using hexadecimal values. # Set allowFlexibleColor to False if you want your ads to render by always # using your colors strictly. # 'mainColor': '#000fff', # 'accentColor': '#fff000', # 'allowFlexibleColor': False, # Whitelisted accounts only: Set the format setting that the ad will be # served in. # 'formatSetting': 'NON_NATIVE' } ad_group_ad = { 'ad': ad, 'adGroupId': ad_group_id } operations = [{ 'operation': 'ADD', 'operand': ad_group_ad }] return ad_group_ad_service.mutate(operations)['value'][0]
[ "def", "CreateAd", "(", "client", ",", "opener", ",", "ad_group_id", ")", ":", "ad_group_ad_service", "=", "client", ".", "GetService", "(", "'AdGroupAdService'", ",", "'v201809'", ")", "media_service", "=", "client", ".", "GetService", "(", "'MediaService'", ",", "'v201809'", ")", "marketing_image_id", "=", "_CreateImage", "(", "media_service", ",", "opener", ",", "'https://goo.gl/3b9Wfh'", ")", "logo_image_id", "=", "_CreateImage", "(", "media_service", ",", "opener", ",", "'https://goo.gl/mtt54n'", ")", "ad", "=", "{", "'xsi_type'", ":", "'ResponsiveDisplayAd'", ",", "# This ad format doesn't allow the creation of an image using the", "# Image.data field. An image must first be created using the MediaService,", "# and Image.mediaId must be populated when creating the ad.", "'marketingImage'", ":", "{", "'xsi_type'", ":", "'Image'", ",", "'mediaId'", ":", "marketing_image_id", "}", ",", "'shortHeadline'", ":", "'Travel'", ",", "'longHeadline'", ":", "'Travel the World'", ",", "'description'", ":", "'Take to the air!'", ",", "'businessName'", ":", "'Interplanetary Cruises'", ",", "'finalUrls'", ":", "[", "'http://wwww.example.com'", "]", ",", "# Optional: Call to action text.", "# Valid texts: https://support.google.com/adwords/answer/7005917", "'callToActionText'", ":", "'Apply Now'", ",", "# Optional: Set dynamic display ad settings, composed of landscape logo", "# image, promotion text, and price prefix.", "'dynamicDisplayAdSettings'", ":", "CreateDynamicDisplayAdSettings", "(", "client", ",", "opener", ")", ",", "# Optional: Create a logo image and set it to the ad.", "'logoImage'", ":", "{", "'xsi_type'", ":", "'Image'", ",", "'mediaId'", ":", "logo_image_id", "}", ",", "# Optional: Create a square marketing image and set it to the ad.", "'squareMarketingImage'", ":", "{", "'xsi_type'", ":", "'Image'", ",", "'mediaId'", ":", "logo_image_id", "}", ",", "# Whitelisted accounts only: Set color settings using hexadecimal values.", "# Set allowFlexibleColor to False if you want your ads to render by always", "# using your colors strictly.", "# 'mainColor': '#000fff',", "# 'accentColor': '#fff000',", "# 'allowFlexibleColor': False,", "# Whitelisted accounts only: Set the format setting that the ad will be", "# served in.", "# 'formatSetting': 'NON_NATIVE'", "}", "ad_group_ad", "=", "{", "'ad'", ":", "ad", ",", "'adGroupId'", ":", "ad_group_id", "}", "operations", "=", "[", "{", "'operation'", ":", "'ADD'", ",", "'operand'", ":", "ad_group_ad", "}", "]", "return", "ad_group_ad_service", ".", "mutate", "(", "operations", ")", "[", "'value'", "]", "[", "0", "]" ]
34.507042
19.380282
def value_dp_matrix(self): """ :return: DataProperty for table data. :rtype: list """ if self.__value_dp_matrix is None: self.__value_dp_matrix = self.__dp_extractor.to_dp_matrix( to_value_matrix(self.headers, self.rows) ) return self.__value_dp_matrix
[ "def", "value_dp_matrix", "(", "self", ")", ":", "if", "self", ".", "__value_dp_matrix", "is", "None", ":", "self", ".", "__value_dp_matrix", "=", "self", ".", "__dp_extractor", ".", "to_dp_matrix", "(", "to_value_matrix", "(", "self", ".", "headers", ",", "self", ".", "rows", ")", ")", "return", "self", ".", "__value_dp_matrix" ]
27.583333
16.416667
def convertLengthList(self, svgAttr): """Convert a list of lengths.""" return [self.convertLength(a) for a in self.split_attr_list(svgAttr)]
[ "def", "convertLengthList", "(", "self", ",", "svgAttr", ")", ":", "return", "[", "self", ".", "convertLength", "(", "a", ")", "for", "a", "in", "self", ".", "split_attr_list", "(", "svgAttr", ")", "]" ]
51.333333
13.333333
def line_ribbon(self): '''Display the protein secondary structure as a white lines that passes through the backbone chain. ''' # Control points are the CA (C alphas) backbone = np.array(self.topology['atom_names']) == 'CA' smoothline = self.add_representation('smoothline', {'coordinates': self.coordinates[backbone], 'color': 0xffffff}) def update(self=self, smoothline=smoothline): self.update_representation(smoothline, {'coordinates': self.coordinates[backbone]}) self.update_callbacks.append(update) self.autozoom(self.coordinates)
[ "def", "line_ribbon", "(", "self", ")", ":", "# Control points are the CA (C alphas)", "backbone", "=", "np", ".", "array", "(", "self", ".", "topology", "[", "'atom_names'", "]", ")", "==", "'CA'", "smoothline", "=", "self", ".", "add_representation", "(", "'smoothline'", ",", "{", "'coordinates'", ":", "self", ".", "coordinates", "[", "backbone", "]", ",", "'color'", ":", "0xffffff", "}", ")", "def", "update", "(", "self", "=", "self", ",", "smoothline", "=", "smoothline", ")", ":", "self", ".", "update_representation", "(", "smoothline", ",", "{", "'coordinates'", ":", "self", ".", "coordinates", "[", "backbone", "]", "}", ")", "self", ".", "update_callbacks", ".", "append", "(", "update", ")", "self", ".", "autozoom", "(", "self", ".", "coordinates", ")" ]
44.8
29.066667
def accept_C_C(self, inst): ''' A Component contains packageable elements ''' for child in many(inst).PE_PE[8003](): self.accept(child)
[ "def", "accept_C_C", "(", "self", ",", "inst", ")", ":", "for", "child", "in", "many", "(", "inst", ")", ".", "PE_PE", "[", "8003", "]", "(", ")", ":", "self", ".", "accept", "(", "child", ")" ]
29
16
def get_subject_without_validation(jwt_bu64): """Extract subject from the JWT without validating the JWT. - The extracted subject cannot be trusted for authn or authz. Args: jwt_bu64: bytes JWT, encoded using a a URL safe flavor of Base64. Returns: str: The subject contained in the JWT. """ try: jwt_dict = get_jwt_dict(jwt_bu64) except JwtException as e: return log_jwt_bu64_info(logging.error, str(e), jwt_bu64) try: return jwt_dict['sub'] except LookupError: log_jwt_dict_info(logging.error, 'Missing "sub" key', jwt_dict)
[ "def", "get_subject_without_validation", "(", "jwt_bu64", ")", ":", "try", ":", "jwt_dict", "=", "get_jwt_dict", "(", "jwt_bu64", ")", "except", "JwtException", "as", "e", ":", "return", "log_jwt_bu64_info", "(", "logging", ".", "error", ",", "str", "(", "e", ")", ",", "jwt_bu64", ")", "try", ":", "return", "jwt_dict", "[", "'sub'", "]", "except", "LookupError", ":", "log_jwt_dict_info", "(", "logging", ".", "error", ",", "'Missing \"sub\" key'", ",", "jwt_dict", ")" ]
28.47619
21.333333
def get_distance_function(distance): """ Returns the distance function from the string name provided :param distance: The string name of the distributions :return: """ # If we provided distance function ourselves, use it if callable(distance): return distance try: return _supported_distances_lookup()[distance] except KeyError: raise KeyError('Unsupported distance function {0!r}'.format(distance.lower()))
[ "def", "get_distance_function", "(", "distance", ")", ":", "# If we provided distance function ourselves, use it", "if", "callable", "(", "distance", ")", ":", "return", "distance", "try", ":", "return", "_supported_distances_lookup", "(", ")", "[", "distance", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "'Unsupported distance function {0!r}'", ".", "format", "(", "distance", ".", "lower", "(", ")", ")", ")" ]
32.5
19.357143
def _upgrades(self, sid, transport): """Return the list of possible upgrades for a client connection.""" if not self.allow_upgrades or self._get_socket(sid).upgraded or \ self._async['websocket'] is None or transport == 'websocket': return [] return ['websocket']
[ "def", "_upgrades", "(", "self", ",", "sid", ",", "transport", ")", ":", "if", "not", "self", ".", "allow_upgrades", "or", "self", ".", "_get_socket", "(", "sid", ")", ".", "upgraded", "or", "self", ".", "_async", "[", "'websocket'", "]", "is", "None", "or", "transport", "==", "'websocket'", ":", "return", "[", "]", "return", "[", "'websocket'", "]" ]
51.666667
17.5
def response_change(self, request, obj): """ Overrides the default to be able to forward to the directory listing instead of the default change_list_view """ r = super(FolderAdmin, self).response_change(request, obj) # Code borrowed from django ModelAdmin to determine changelist on the # fly if r['Location']: # it was a successful save if (r['Location'] in ['../'] or r['Location'] == self._get_post_url(obj)): if obj.parent: url = reverse('admin:filer-directory_listing', kwargs={'folder_id': obj.parent.id}) else: url = reverse('admin:filer-directory_listing-root') url = "%s%s%s" % (url, popup_param(request), selectfolder_param(request, "&")) return HttpResponseRedirect(url) else: # this means it probably was a save_and_continue_editing pass return r
[ "def", "response_change", "(", "self", ",", "request", ",", "obj", ")", ":", "r", "=", "super", "(", "FolderAdmin", ",", "self", ")", ".", "response_change", "(", "request", ",", "obj", ")", "# Code borrowed from django ModelAdmin to determine changelist on the", "# fly", "if", "r", "[", "'Location'", "]", ":", "# it was a successful save", "if", "(", "r", "[", "'Location'", "]", "in", "[", "'../'", "]", "or", "r", "[", "'Location'", "]", "==", "self", ".", "_get_post_url", "(", "obj", ")", ")", ":", "if", "obj", ".", "parent", ":", "url", "=", "reverse", "(", "'admin:filer-directory_listing'", ",", "kwargs", "=", "{", "'folder_id'", ":", "obj", ".", "parent", ".", "id", "}", ")", "else", ":", "url", "=", "reverse", "(", "'admin:filer-directory_listing-root'", ")", "url", "=", "\"%s%s%s\"", "%", "(", "url", ",", "popup_param", "(", "request", ")", ",", "selectfolder_param", "(", "request", ",", "\"&\"", ")", ")", "return", "HttpResponseRedirect", "(", "url", ")", "else", ":", "# this means it probably was a save_and_continue_editing", "pass", "return", "r" ]
44.458333
18.541667
def _chunk_len_type(self): """ Reads just enough of the input to determine the next chunk's length and type; return a (*length*, *type*) pair where *type* is a byte sequence. If there are no more chunks, ``None`` is returned. """ x = self.file.read(8) if not x: return None if len(x) != 8: raise FormatError( 'End of file whilst reading chunk length and type.') length, type = struct.unpack('!I4s', x) if length > 2 ** 31 - 1: raise FormatError('Chunk %s is too large: %d.' % (type, length)) # Check that all bytes are in valid ASCII range. # https://www.w3.org/TR/2003/REC-PNG-20031110/#5Chunk-layout type_bytes = set(bytearray(type)) if not(type_bytes <= set(range(65, 91)) | set(range(97, 123))): raise FormatError( 'Chunk %r has invalid Chunk Type.' % list(type)) return length, type
[ "def", "_chunk_len_type", "(", "self", ")", ":", "x", "=", "self", ".", "file", ".", "read", "(", "8", ")", "if", "not", "x", ":", "return", "None", "if", "len", "(", "x", ")", "!=", "8", ":", "raise", "FormatError", "(", "'End of file whilst reading chunk length and type.'", ")", "length", ",", "type", "=", "struct", ".", "unpack", "(", "'!I4s'", ",", "x", ")", "if", "length", ">", "2", "**", "31", "-", "1", ":", "raise", "FormatError", "(", "'Chunk %s is too large: %d.'", "%", "(", "type", ",", "length", ")", ")", "# Check that all bytes are in valid ASCII range.", "# https://www.w3.org/TR/2003/REC-PNG-20031110/#5Chunk-layout", "type_bytes", "=", "set", "(", "bytearray", "(", "type", ")", ")", "if", "not", "(", "type_bytes", "<=", "set", "(", "range", "(", "65", ",", "91", ")", ")", "|", "set", "(", "range", "(", "97", ",", "123", ")", ")", ")", ":", "raise", "FormatError", "(", "'Chunk %r has invalid Chunk Type.'", "%", "list", "(", "type", ")", ")", "return", "length", ",", "type" ]
39.52
15.76
def handle_input(self, input_str, place=True, check=False): '''Transfer user input to valid chess position''' user = self.get_player() pos = self.validate_input(input_str) if pos[0] == 'u': self.undo(pos[1]) return pos if place: result = self.set_pos(pos, check) return result else: return pos
[ "def", "handle_input", "(", "self", ",", "input_str", ",", "place", "=", "True", ",", "check", "=", "False", ")", ":", "user", "=", "self", ".", "get_player", "(", ")", "pos", "=", "self", ".", "validate_input", "(", "input_str", ")", "if", "pos", "[", "0", "]", "==", "'u'", ":", "self", ".", "undo", "(", "pos", "[", "1", "]", ")", "return", "pos", "if", "place", ":", "result", "=", "self", ".", "set_pos", "(", "pos", ",", "check", ")", "return", "result", "else", ":", "return", "pos" ]
32.5
15
def check_password_expired(user): """ Return True if password is expired and system is using password expiration, False otherwise. """ if not settings.ACCOUNT_PASSWORD_USE_HISTORY: return False if hasattr(user, "password_expiry"): # user-specific value expiry = user.password_expiry.expiry else: # use global value expiry = settings.ACCOUNT_PASSWORD_EXPIRY if expiry == 0: # zero indicates no expiration return False try: # get latest password info latest = user.password_history.latest("timestamp") except PasswordHistory.DoesNotExist: return False now = datetime.datetime.now(tz=pytz.UTC) expiration = latest.timestamp + datetime.timedelta(seconds=expiry) if expiration < now: return True else: return False
[ "def", "check_password_expired", "(", "user", ")", ":", "if", "not", "settings", ".", "ACCOUNT_PASSWORD_USE_HISTORY", ":", "return", "False", "if", "hasattr", "(", "user", ",", "\"password_expiry\"", ")", ":", "# user-specific value", "expiry", "=", "user", ".", "password_expiry", ".", "expiry", "else", ":", "# use global value", "expiry", "=", "settings", ".", "ACCOUNT_PASSWORD_EXPIRY", "if", "expiry", "==", "0", ":", "# zero indicates no expiration", "return", "False", "try", ":", "# get latest password info", "latest", "=", "user", ".", "password_history", ".", "latest", "(", "\"timestamp\"", ")", "except", "PasswordHistory", ".", "DoesNotExist", ":", "return", "False", "now", "=", "datetime", ".", "datetime", ".", "now", "(", "tz", "=", "pytz", ".", "UTC", ")", "expiration", "=", "latest", ".", "timestamp", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "expiry", ")", "if", "expiration", "<", "now", ":", "return", "True", "else", ":", "return", "False" ]
26.741935
17.83871
def add_http_endpoint(self, url, request_handler): """ This method provides a programatic way of added invidual routes to the http server. Args: url (str): the url to be handled by the request_handler request_handler (nautilus.network.RequestHandler): The request handler """ self.app.router.add_route('*', url, request_handler)
[ "def", "add_http_endpoint", "(", "self", ",", "url", ",", "request_handler", ")", ":", "self", ".", "app", ".", "router", ".", "add_route", "(", "'*'", ",", "url", ",", "request_handler", ")" ]
41.2
21.4
def add_xml_to_node(self, node): """ For exporting, set data on etree.Element `node`. """ super(XBlock, self).add_xml_to_node(node) # Add children for each of our children. self.add_children_to_node(node)
[ "def", "add_xml_to_node", "(", "self", ",", "node", ")", ":", "super", "(", "XBlock", ",", "self", ")", ".", "add_xml_to_node", "(", "node", ")", "# Add children for each of our children.", "self", ".", "add_children_to_node", "(", "node", ")" ]
35.142857
6
def pairwise(seq): """ Pair an iterable, e.g., (1, 2, 3, 4) -> ((1, 2), (2, 3), (3, 4)) """ for i in range(0, len(seq) - 1): yield (seq[i], seq[i + 1])
[ "def", "pairwise", "(", "seq", ")", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "seq", ")", "-", "1", ")", ":", "yield", "(", "seq", "[", "i", "]", ",", "seq", "[", "i", "+", "1", "]", ")" ]
28.333333
10
def capture(self, commit = ""): """Capture the current state of a project based on its provider Commit is relevant only for upstream providers. If empty, the latest commit from provider repository is taken. It is ignored for distribution providers. :param provider: project provider, e.g. upstream repository, distribution builder :type provider: json/dict :param commit: project's original commit :type commit: string """ self._validateProvider(self._provider) # get client for repository # TODO(jchaloup): read config file to switch between local and remove clients # TODO(jchaloup): remote client can cover gofed infratructure or any remove source for repository info client = RepositoryClientBuilder().buildWithRemoteClient(self._provider) if self._provider["provider"] == "github": self._signature = ProjectGithubRepositoryCapturer(self._provider, client).capture(commit).signature() elif self._provider["provider"] == "bitbucket": self._signature = ProjectBitbucketRepositoryCapturer(self._provider, client).capture(commit).signature() else: raise KeyError("Provider '%s' not recognized" % self._provider["provider"]) return self
[ "def", "capture", "(", "self", ",", "commit", "=", "\"\"", ")", ":", "self", ".", "_validateProvider", "(", "self", ".", "_provider", ")", "# get client for repository", "# TODO(jchaloup): read config file to switch between local and remove clients", "# TODO(jchaloup): remote client can cover gofed infratructure or any remove source for repository info", "client", "=", "RepositoryClientBuilder", "(", ")", ".", "buildWithRemoteClient", "(", "self", ".", "_provider", ")", "if", "self", ".", "_provider", "[", "\"provider\"", "]", "==", "\"github\"", ":", "self", ".", "_signature", "=", "ProjectGithubRepositoryCapturer", "(", "self", ".", "_provider", ",", "client", ")", ".", "capture", "(", "commit", ")", ".", "signature", "(", ")", "elif", "self", ".", "_provider", "[", "\"provider\"", "]", "==", "\"bitbucket\"", ":", "self", ".", "_signature", "=", "ProjectBitbucketRepositoryCapturer", "(", "self", ".", "_provider", ",", "client", ")", ".", "capture", "(", "commit", ")", ".", "signature", "(", ")", "else", ":", "raise", "KeyError", "(", "\"Provider '%s' not recognized\"", "%", "self", ".", "_provider", "[", "\"provider\"", "]", ")", "return", "self" ]
43
26.259259
def add_enumerable_item_to_dict(dict_, key, item): """Add an item to a list contained in a dict. For example: If the dict is ``{'some_key': ['an_item']}``, then calling this function will alter the dict to ``{'some_key': ['an_item', 'another_item']}``. If the key doesn't exist yet, the function initializes it with a list containing the item. List-like items are allowed. In this case, the existing list will be extended. Args: dict_ (dict): the dict to modify key (str): the key to add the item to item (whatever): The item to add to the list associated to the key """ dict_.setdefault(key, []) if isinstance(item, (list, tuple)): dict_[key].extend(item) else: dict_[key].append(item)
[ "def", "add_enumerable_item_to_dict", "(", "dict_", ",", "key", ",", "item", ")", ":", "dict_", ".", "setdefault", "(", "key", ",", "[", "]", ")", "if", "isinstance", "(", "item", ",", "(", "list", ",", "tuple", ")", ")", ":", "dict_", "[", "key", "]", ".", "extend", "(", "item", ")", "else", ":", "dict_", "[", "key", "]", ".", "append", "(", "item", ")" ]
34.272727
24.727273
def extract_prefix_attr(cls, req): """ Extract prefix attributes from arbitary dict. """ # TODO: add more? attr = {} if 'id' in req: attr['id'] = int(req['id']) if 'prefix' in req: attr['prefix'] = req['prefix'] if 'pool' in req: attr['pool_id'] = int(req['pool']) if 'node' in req: attr['node'] = req['node'] if 'type' in req: attr['type'] = req['type'] if 'country' in req: attr['country'] = req['country'] if 'indent' in req: attr['indent'] = req['indent'] return attr
[ "def", "extract_prefix_attr", "(", "cls", ",", "req", ")", ":", "# TODO: add more?", "attr", "=", "{", "}", "if", "'id'", "in", "req", ":", "attr", "[", "'id'", "]", "=", "int", "(", "req", "[", "'id'", "]", ")", "if", "'prefix'", "in", "req", ":", "attr", "[", "'prefix'", "]", "=", "req", "[", "'prefix'", "]", "if", "'pool'", "in", "req", ":", "attr", "[", "'pool_id'", "]", "=", "int", "(", "req", "[", "'pool'", "]", ")", "if", "'node'", "in", "req", ":", "attr", "[", "'node'", "]", "=", "req", "[", "'node'", "]", "if", "'type'", "in", "req", ":", "attr", "[", "'type'", "]", "=", "req", "[", "'type'", "]", "if", "'country'", "in", "req", ":", "attr", "[", "'country'", "]", "=", "req", "[", "'country'", "]", "if", "'indent'", "in", "req", ":", "attr", "[", "'indent'", "]", "=", "req", "[", "'indent'", "]", "return", "attr" ]
28.727273
12
def save(potential, f): """ Write a :class:`~gala.potential.PotentialBase` object out to a text (YAML) file. Parameters ---------- potential : :class:`~gala.potential.PotentialBase` The instantiated :class:`~gala.potential.PotentialBase` object. f : str, file_like A filename or file-like object to write the input potential object to. """ d = to_dict(potential) if hasattr(f, 'write'): yaml.dump(d, f, default_flow_style=False) else: with open(f, 'w') as f2: yaml.dump(d, f2, default_flow_style=False)
[ "def", "save", "(", "potential", ",", "f", ")", ":", "d", "=", "to_dict", "(", "potential", ")", "if", "hasattr", "(", "f", ",", "'write'", ")", ":", "yaml", ".", "dump", "(", "d", ",", "f", ",", "default_flow_style", "=", "False", ")", "else", ":", "with", "open", "(", "f", ",", "'w'", ")", "as", "f2", ":", "yaml", ".", "dump", "(", "d", ",", "f2", ",", "default_flow_style", "=", "False", ")" ]
28.7
22.4
def update(self, data): """Updates the object information based on live data, if there were any changes made. Any changes will be automatically applied to the object, but will not be automatically persisted. You must manually call `db.session.add(instance)` on the object. Args: data (:obj:): AWS API Resource object fetched from AWS API Returns: `bool` """ # If the instance was terminated, remove it updated = self.set_property('state', data['state']) updated |= self.set_property('notes', sorted(data['notes'] or [])) updated |= self.set_property('last_notice', data['last_notice']) if updated: self.set_property('last_change', datetime.now()) return updated
[ "def", "update", "(", "self", ",", "data", ")", ":", "# If the instance was terminated, remove it", "updated", "=", "self", ".", "set_property", "(", "'state'", ",", "data", "[", "'state'", "]", ")", "updated", "|=", "self", ".", "set_property", "(", "'notes'", ",", "sorted", "(", "data", "[", "'notes'", "]", "or", "[", "]", ")", ")", "updated", "|=", "self", ".", "set_property", "(", "'last_notice'", ",", "data", "[", "'last_notice'", "]", ")", "if", "updated", ":", "self", ".", "set_property", "(", "'last_change'", ",", "datetime", ".", "now", "(", ")", ")", "return", "updated" ]
38.9
25.6
def add_book(self, publisher=None, place=None, date=None): """ Make a dictionary that is representing a book. :param publisher: publisher name :type publisher: string :param place: place of publication :type place: string :param date: A (partial) date in any format. The date should contain at least a year :type date: string :rtype: dict """ imprint = {} if date is not None: imprint['date'] = normalize_date(date) if place is not None: imprint['place'] = place if publisher is not None: imprint['publisher'] = publisher self._append_to('imprints', imprint)
[ "def", "add_book", "(", "self", ",", "publisher", "=", "None", ",", "place", "=", "None", ",", "date", "=", "None", ")", ":", "imprint", "=", "{", "}", "if", "date", "is", "not", "None", ":", "imprint", "[", "'date'", "]", "=", "normalize_date", "(", "date", ")", "if", "place", "is", "not", "None", ":", "imprint", "[", "'place'", "]", "=", "place", "if", "publisher", "is", "not", "None", ":", "imprint", "[", "'publisher'", "]", "=", "publisher", "self", ".", "_append_to", "(", "'imprints'", ",", "imprint", ")" ]
26.185185
17.222222
def to_signed(cls, t): """ Return signed type or equivalent """ assert isinstance(t, SymbolTYPE) t = t.final assert t.is_basic if cls.is_unsigned(t): return {cls.ubyte: cls.byte_, cls.uinteger: cls.integer, cls.ulong: cls.long_}[t] if cls.is_signed(t) or cls.is_decimal(t): return t return cls.unknown
[ "def", "to_signed", "(", "cls", ",", "t", ")", ":", "assert", "isinstance", "(", "t", ",", "SymbolTYPE", ")", "t", "=", "t", ".", "final", "assert", "t", ".", "is_basic", "if", "cls", ".", "is_unsigned", "(", "t", ")", ":", "return", "{", "cls", ".", "ubyte", ":", "cls", ".", "byte_", ",", "cls", ".", "uinteger", ":", "cls", ".", "integer", ",", "cls", ".", "ulong", ":", "cls", ".", "long_", "}", "[", "t", "]", "if", "cls", ".", "is_signed", "(", "t", ")", "or", "cls", ".", "is_decimal", "(", "t", ")", ":", "return", "t", "return", "cls", ".", "unknown" ]
32.076923
9.076923
def allow_bare_decorator(cls): """ Wrapper for a class decorator which allows for bare decorator and argument syntax """ @wraps(cls) def wrapper(*args, **kwargs): """"Wrapper for real decorator""" # If we weren't only passed a bare class, return class instance if kwargs or len(args) != 1 or not isclass(args[0]): # pylint: disable=no-else-return return cls(*args, **kwargs) # Otherwise, pass call to instance with default values else: return cls()(args[0]) return wrapper
[ "def", "allow_bare_decorator", "(", "cls", ")", ":", "@", "wraps", "(", "cls", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\"Wrapper for real decorator\"\"\"", "# If we weren't only passed a bare class, return class instance", "if", "kwargs", "or", "len", "(", "args", ")", "!=", "1", "or", "not", "isclass", "(", "args", "[", "0", "]", ")", ":", "# pylint: disable=no-else-return", "return", "cls", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# Otherwise, pass call to instance with default values", "else", ":", "return", "cls", "(", ")", "(", "args", "[", "0", "]", ")", "return", "wrapper" ]
32.235294
21.823529
def _fill_properties(verdict, result, testcase, testcase_id, testcase_title): """Adds properties into testcase element.""" properties = etree.SubElement(testcase, "properties") etree.SubElement( properties, "property", {"name": "polarion-testcase-id", "value": testcase_id or testcase_title}, ) if verdict in Verdicts.PASS and result.get("comment"): etree.SubElement( properties, "property", { "name": "polarion-testcase-comment", "value": utils.get_unicode_str(result["comment"]), }, ) for param, value in six.iteritems(result.get("params") or {}): etree.SubElement( properties, "property", { "name": "polarion-parameter-{}".format(param), "value": utils.get_unicode_str(value), }, )
[ "def", "_fill_properties", "(", "verdict", ",", "result", ",", "testcase", ",", "testcase_id", ",", "testcase_title", ")", ":", "properties", "=", "etree", ".", "SubElement", "(", "testcase", ",", "\"properties\"", ")", "etree", ".", "SubElement", "(", "properties", ",", "\"property\"", ",", "{", "\"name\"", ":", "\"polarion-testcase-id\"", ",", "\"value\"", ":", "testcase_id", "or", "testcase_title", "}", ",", ")", "if", "verdict", "in", "Verdicts", ".", "PASS", "and", "result", ".", "get", "(", "\"comment\"", ")", ":", "etree", ".", "SubElement", "(", "properties", ",", "\"property\"", ",", "{", "\"name\"", ":", "\"polarion-testcase-comment\"", ",", "\"value\"", ":", "utils", ".", "get_unicode_str", "(", "result", "[", "\"comment\"", "]", ")", ",", "}", ",", ")", "for", "param", ",", "value", "in", "six", ".", "iteritems", "(", "result", ".", "get", "(", "\"params\"", ")", "or", "{", "}", ")", ":", "etree", ".", "SubElement", "(", "properties", ",", "\"property\"", ",", "{", "\"name\"", ":", "\"polarion-parameter-{}\"", ".", "format", "(", "param", ")", ",", "\"value\"", ":", "utils", ".", "get_unicode_str", "(", "value", ")", ",", "}", ",", ")" ]
37
21.592593
def encap(self, pkt): """encapsulate a frame using this Secure Association""" if pkt.name != Ether().name: raise TypeError('cannot encapsulate packet in MACsec, must be Ethernet') # noqa: E501 hdr = copy.deepcopy(pkt) payload = hdr.payload del hdr.payload tag = MACsec(sci=self.sci, an=self.an, SC=self.send_sci, E=self.e_bit(), C=self.c_bit(), shortlen=MACsecSA.shortlen(pkt), pn=(self.pn & 0xFFFFFFFF), type=pkt.type) hdr.type = ETH_P_MACSEC return hdr / tag / payload
[ "def", "encap", "(", "self", ",", "pkt", ")", ":", "if", "pkt", ".", "name", "!=", "Ether", "(", ")", ".", "name", ":", "raise", "TypeError", "(", "'cannot encapsulate packet in MACsec, must be Ethernet'", ")", "# noqa: E501", "hdr", "=", "copy", ".", "deepcopy", "(", "pkt", ")", "payload", "=", "hdr", ".", "payload", "del", "hdr", ".", "payload", "tag", "=", "MACsec", "(", "sci", "=", "self", ".", "sci", ",", "an", "=", "self", ".", "an", ",", "SC", "=", "self", ".", "send_sci", ",", "E", "=", "self", ".", "e_bit", "(", ")", ",", "C", "=", "self", ".", "c_bit", "(", ")", ",", "shortlen", "=", "MACsecSA", ".", "shortlen", "(", "pkt", ")", ",", "pn", "=", "(", "self", ".", "pn", "&", "0xFFFFFFFF", ")", ",", "type", "=", "pkt", ".", "type", ")", "hdr", ".", "type", "=", "ETH_P_MACSEC", "return", "hdr", "/", "tag", "/", "payload" ]
44.142857
13.357143
def parse_iso_utc(s): """ Parses an ISO time with a hard-coded Z for zulu-time (UTC) at the end. Other timezones are not supported. :param str s: the ISO-formatted time :rtype: datetime.datetime :return: an timezone-naive datetime object >>> parse_iso_utc('2016-04-27T00:28:04.000Z') datetime.datetime(2016, 4, 27, 0, 28, 4) >>> parse_iso_utc('2016-04-27T00:28:04Z') datetime.datetime(2016, 4, 27, 0, 28, 4) >>> parse_iso_utc('2016-04-27T00:28:04X') Traceback (most recent call last): ... ValueError: Not a valid ISO datetime in UTC: 2016-04-27T00:28:04X """ m = rfc3339_datetime_re().match(s) if not m: raise ValueError( 'Not a valid ISO datetime in UTC: ' + s ) else: fmt = '%Y-%m-%dT%H:%M:%S' + ('.%f' if m.group(7) else '') + 'Z' return datetime.datetime.strptime(s, fmt)
[ "def", "parse_iso_utc", "(", "s", ")", ":", "m", "=", "rfc3339_datetime_re", "(", ")", ".", "match", "(", "s", ")", "if", "not", "m", ":", "raise", "ValueError", "(", "'Not a valid ISO datetime in UTC: '", "+", "s", ")", "else", ":", "fmt", "=", "'%Y-%m-%dT%H:%M:%S'", "+", "(", "'.%f'", "if", "m", ".", "group", "(", "7", ")", "else", "''", ")", "+", "'Z'", "return", "datetime", ".", "datetime", ".", "strptime", "(", "s", ",", "fmt", ")" ]
32.692308
18.846154
def do_teardown_appcontext(self, exc=None): """Called when an application context is popped. This works pretty much the same as :meth:`do_teardown_request` but for the application context. .. versionadded:: 0.9 """ if exc is None: exc = sys.exc_info()[1] for func in reversed(self.teardown_appcontext_funcs): func(exc) appcontext_tearing_down.send(self, exc=exc)
[ "def", "do_teardown_appcontext", "(", "self", ",", "exc", "=", "None", ")", ":", "if", "exc", "is", "None", ":", "exc", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "for", "func", "in", "reversed", "(", "self", ".", "teardown_appcontext_funcs", ")", ":", "func", "(", "exc", ")", "appcontext_tearing_down", ".", "send", "(", "self", ",", "exc", "=", "exc", ")" ]
36.75
15.583333
def detach(self, *items): """ Unlinks all of the specified items from the tree. The items and all of their descendants are still present, and may be reinserted at another point in the tree, but will not be displayed. The root item may not be detached. :param items: list of item identifiers :type items: sequence[str] """ self._visual_drag.detach(*items) ttk.Treeview.detach(self, *items)
[ "def", "detach", "(", "self", ",", "*", "items", ")", ":", "self", ".", "_visual_drag", ".", "detach", "(", "*", "items", ")", "ttk", ".", "Treeview", ".", "detach", "(", "self", ",", "*", "items", ")" ]
35.230769
15.230769
def _on_file_deleted(self, event: FileSystemEvent): """ Called when a file in the monitored directory has been deleted. :param event: the file system event """ if not event.is_directory and self.is_data_file(event.src_path): assert event.src_path in self._origin_mapped_data del(self._origin_mapped_data[event.src_path]) self.notify_listeners(FileSystemChange.DELETE)
[ "def", "_on_file_deleted", "(", "self", ",", "event", ":", "FileSystemEvent", ")", ":", "if", "not", "event", ".", "is_directory", "and", "self", ".", "is_data_file", "(", "event", ".", "src_path", ")", ":", "assert", "event", ".", "src_path", "in", "self", ".", "_origin_mapped_data", "del", "(", "self", ".", "_origin_mapped_data", "[", "event", ".", "src_path", "]", ")", "self", ".", "notify_listeners", "(", "FileSystemChange", ".", "DELETE", ")" ]
48.333333
14.777778
def imgur_role(name, rawtext, text, *_): """Imgur ":imgur-title:`a/abc1234`" or ":imgur-description:`abc1234`" rst inline roles. "Schedules" an API query. :raises ImgurError: if text has invalid Imgur ID. :param str name: Role name (e.g. 'imgur-title'). :param str rawtext: Entire role and value markup (e.g. ':imgur-title:`hWyW0`'). :param str text: The parameter used in the role markup (e.g. 'hWyW0'). :return: 2-item tuple of lists. First list are the rst nodes replacing the role. Second is a list of errors. :rtype: tuple """ if not RE_IMGUR_ID.match(text): message = 'Invalid Imgur ID specified. Must be 5-10 letters and numbers. Got "{}" from "{}".' raise ImgurError(message.format(text, rawtext)) node = ImgurTextNode(name, text) return [node], []
[ "def", "imgur_role", "(", "name", ",", "rawtext", ",", "text", ",", "*", "_", ")", ":", "if", "not", "RE_IMGUR_ID", ".", "match", "(", "text", ")", ":", "message", "=", "'Invalid Imgur ID specified. Must be 5-10 letters and numbers. Got \"{}\" from \"{}\".'", "raise", "ImgurError", "(", "message", ".", "format", "(", "text", ",", "rawtext", ")", ")", "node", "=", "ImgurTextNode", "(", "name", ",", "text", ")", "return", "[", "node", "]", ",", "[", "]" ]
42.421053
24.842105
def version(self): """Get the QS Mobile version.""" # requests.get destroys the ? import urllib with urllib.request.urlopen(URL_VERSION.format(self._url)) as response: return response.read().decode('utf-8') return False
[ "def", "version", "(", "self", ")", ":", "# requests.get destroys the ?", "import", "urllib", "with", "urllib", ".", "request", ".", "urlopen", "(", "URL_VERSION", ".", "format", "(", "self", ".", "_url", ")", ")", "as", "response", ":", "return", "response", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "return", "False" ]
37.857143
16.142857
def Header(self): """ Get the block header. Returns: neo.Core.Header: """ if not self._header: self._header = Header(self.PrevHash, self.MerkleRoot, self.Timestamp, self.Index, self.ConsensusData, self.NextConsensus, self.Script) return self._header
[ "def", "Header", "(", "self", ")", ":", "if", "not", "self", ".", "_header", ":", "self", ".", "_header", "=", "Header", "(", "self", ".", "PrevHash", ",", "self", ".", "MerkleRoot", ",", "self", ".", "Timestamp", ",", "self", ".", "Index", ",", "self", ".", "ConsensusData", ",", "self", ".", "NextConsensus", ",", "self", ".", "Script", ")", "return", "self", ".", "_header" ]
28.833333
22.833333
def zipfiles(fnames, archive, mode='w', log=lambda msg: None, cleanup=False): """ Build a zip archive from the given file names. :param fnames: list of path names :param archive: path of the archive """ prefix = len(os.path.commonprefix([os.path.dirname(f) for f in fnames])) with zipfile.ZipFile( archive, mode, zipfile.ZIP_DEFLATED, allowZip64=True) as z: for f in fnames: log('Archiving %s' % f) z.write(f, f[prefix:]) if cleanup: # remove the zipped file os.remove(f) log('Generated %s' % archive) return archive
[ "def", "zipfiles", "(", "fnames", ",", "archive", ",", "mode", "=", "'w'", ",", "log", "=", "lambda", "msg", ":", "None", ",", "cleanup", "=", "False", ")", ":", "prefix", "=", "len", "(", "os", ".", "path", ".", "commonprefix", "(", "[", "os", ".", "path", ".", "dirname", "(", "f", ")", "for", "f", "in", "fnames", "]", ")", ")", "with", "zipfile", ".", "ZipFile", "(", "archive", ",", "mode", ",", "zipfile", ".", "ZIP_DEFLATED", ",", "allowZip64", "=", "True", ")", "as", "z", ":", "for", "f", "in", "fnames", ":", "log", "(", "'Archiving %s'", "%", "f", ")", "z", ".", "write", "(", "f", ",", "f", "[", "prefix", ":", "]", ")", "if", "cleanup", ":", "# remove the zipped file", "os", ".", "remove", "(", "f", ")", "log", "(", "'Generated %s'", "%", "archive", ")", "return", "archive" ]
35.882353
14.705882
def unwrap(lines, max_wrap_lines, min_header_lines, min_quoted_lines): """ Returns a tuple of: - Type ('forward', 'reply', 'headers', 'quoted') - Range of the text at the top of the wrapped message (or None) - Headers dict (or None) - Range of the text of the wrapped message (or None) - Range of the text below the wrapped message (or None) - Whether the wrapped text needs to be unindented """ headers = {} # Get line number and wrapping type. start, end, typ = find_unwrap_start(lines, max_wrap_lines, min_header_lines, min_quoted_lines) # We found a line indicating that it's a forward/reply. if typ in ('forward', 'reply'): main_type = typ if typ == 'reply': reply_headers = parse_reply(join_wrapped_lines(lines[start:end+1])) if reply_headers: headers.update(reply_headers) # Find where the headers or the quoted section starts. # We can set min_quoted_lines to 1 because we expect a quoted section. start2, end2, typ = find_unwrap_start(lines[end+1:], max_wrap_lines, min_header_lines, 1) if typ == 'quoted': # Quoted section starts. Unindent and check if there are headers. quoted_start = end+1+start2 unquoted = unindent_lines(lines[quoted_start:]) rest_start = quoted_start + len(unquoted) start3, end3, typ = find_unwrap_start(unquoted, max_wrap_lines, min_header_lines, min_quoted_lines) if typ == 'headers': hdrs, hdrs_length = extract_headers(unquoted[start3:], max_wrap_lines) if hdrs: headers.update(hdrs) rest2_start = quoted_start+start3+hdrs_length return main_type, (0, start), headers, (rest2_start, rest_start), (rest_start, None), True else: return main_type, (0, start), headers, (quoted_start, rest_start), (rest_start, None), True elif typ == 'headers': hdrs, hdrs_length = extract_headers(lines[start+1:], max_wrap_lines) if hdrs: headers.update(hdrs) rest_start = start + 1 + hdrs_length return main_type, (0, start), headers, (rest_start, None), None, False else: # Didn't find quoted section or headers, assume that everything # below is the qouted text. return main_type, (0, start), headers, (start+(start2 or 0)+1, None), None, False # We just found headers, which usually indicates a forwarding. elif typ == 'headers': main_type = 'forward' hdrs, hdrs_length = extract_headers(lines[start:], max_wrap_lines) rest_start = start + hdrs_length return main_type, (0, start), hdrs, (rest_start, None), None, False # We found quoted text. Headers may be within the quoted text. elif typ == 'quoted': unquoted = unindent_lines(lines[start:]) rest_start = start + len(unquoted) start2, end2, typ = find_unwrap_start(unquoted, max_wrap_lines, min_header_lines, min_quoted_lines) if typ == 'headers': main_type = 'forward' hdrs, hdrs_length = extract_headers(unquoted[start2:], max_wrap_lines) rest2_start = start + hdrs_length return main_type, (0, start), hdrs, (rest2_start, rest_start), (rest_start, None), True else: main_type = 'quote' return main_type, (None, start), None, (start, rest_start), (rest_start, None), True
[ "def", "unwrap", "(", "lines", ",", "max_wrap_lines", ",", "min_header_lines", ",", "min_quoted_lines", ")", ":", "headers", "=", "{", "}", "# Get line number and wrapping type.", "start", ",", "end", ",", "typ", "=", "find_unwrap_start", "(", "lines", ",", "max_wrap_lines", ",", "min_header_lines", ",", "min_quoted_lines", ")", "# We found a line indicating that it's a forward/reply.", "if", "typ", "in", "(", "'forward'", ",", "'reply'", ")", ":", "main_type", "=", "typ", "if", "typ", "==", "'reply'", ":", "reply_headers", "=", "parse_reply", "(", "join_wrapped_lines", "(", "lines", "[", "start", ":", "end", "+", "1", "]", ")", ")", "if", "reply_headers", ":", "headers", ".", "update", "(", "reply_headers", ")", "# Find where the headers or the quoted section starts.", "# We can set min_quoted_lines to 1 because we expect a quoted section.", "start2", ",", "end2", ",", "typ", "=", "find_unwrap_start", "(", "lines", "[", "end", "+", "1", ":", "]", ",", "max_wrap_lines", ",", "min_header_lines", ",", "1", ")", "if", "typ", "==", "'quoted'", ":", "# Quoted section starts. Unindent and check if there are headers.", "quoted_start", "=", "end", "+", "1", "+", "start2", "unquoted", "=", "unindent_lines", "(", "lines", "[", "quoted_start", ":", "]", ")", "rest_start", "=", "quoted_start", "+", "len", "(", "unquoted", ")", "start3", ",", "end3", ",", "typ", "=", "find_unwrap_start", "(", "unquoted", ",", "max_wrap_lines", ",", "min_header_lines", ",", "min_quoted_lines", ")", "if", "typ", "==", "'headers'", ":", "hdrs", ",", "hdrs_length", "=", "extract_headers", "(", "unquoted", "[", "start3", ":", "]", ",", "max_wrap_lines", ")", "if", "hdrs", ":", "headers", ".", "update", "(", "hdrs", ")", "rest2_start", "=", "quoted_start", "+", "start3", "+", "hdrs_length", "return", "main_type", ",", "(", "0", ",", "start", ")", ",", "headers", ",", "(", "rest2_start", ",", "rest_start", ")", ",", "(", "rest_start", ",", "None", ")", ",", "True", "else", ":", "return", "main_type", ",", "(", "0", ",", "start", ")", ",", "headers", ",", "(", "quoted_start", ",", "rest_start", ")", ",", "(", "rest_start", ",", "None", ")", ",", "True", "elif", "typ", "==", "'headers'", ":", "hdrs", ",", "hdrs_length", "=", "extract_headers", "(", "lines", "[", "start", "+", "1", ":", "]", ",", "max_wrap_lines", ")", "if", "hdrs", ":", "headers", ".", "update", "(", "hdrs", ")", "rest_start", "=", "start", "+", "1", "+", "hdrs_length", "return", "main_type", ",", "(", "0", ",", "start", ")", ",", "headers", ",", "(", "rest_start", ",", "None", ")", ",", "None", ",", "False", "else", ":", "# Didn't find quoted section or headers, assume that everything", "# below is the qouted text.", "return", "main_type", ",", "(", "0", ",", "start", ")", ",", "headers", ",", "(", "start", "+", "(", "start2", "or", "0", ")", "+", "1", ",", "None", ")", ",", "None", ",", "False", "# We just found headers, which usually indicates a forwarding.", "elif", "typ", "==", "'headers'", ":", "main_type", "=", "'forward'", "hdrs", ",", "hdrs_length", "=", "extract_headers", "(", "lines", "[", "start", ":", "]", ",", "max_wrap_lines", ")", "rest_start", "=", "start", "+", "hdrs_length", "return", "main_type", ",", "(", "0", ",", "start", ")", ",", "hdrs", ",", "(", "rest_start", ",", "None", ")", ",", "None", ",", "False", "# We found quoted text. Headers may be within the quoted text.", "elif", "typ", "==", "'quoted'", ":", "unquoted", "=", "unindent_lines", "(", "lines", "[", "start", ":", "]", ")", "rest_start", "=", "start", "+", "len", "(", "unquoted", ")", "start2", ",", "end2", ",", "typ", "=", "find_unwrap_start", "(", "unquoted", ",", "max_wrap_lines", ",", "min_header_lines", ",", "min_quoted_lines", ")", "if", "typ", "==", "'headers'", ":", "main_type", "=", "'forward'", "hdrs", ",", "hdrs_length", "=", "extract_headers", "(", "unquoted", "[", "start2", ":", "]", ",", "max_wrap_lines", ")", "rest2_start", "=", "start", "+", "hdrs_length", "return", "main_type", ",", "(", "0", ",", "start", ")", ",", "hdrs", ",", "(", "rest2_start", ",", "rest_start", ")", ",", "(", "rest_start", ",", "None", ")", ",", "True", "else", ":", "main_type", "=", "'quote'", "return", "main_type", ",", "(", "None", ",", "start", ")", ",", "None", ",", "(", "start", ",", "rest_start", ")", ",", "(", "rest_start", ",", "None", ")", ",", "True" ]
46.52
25.48
def get_merge_command(self, revision): """Get the command to merge a revision into the current branch (without committing the result).""" return [ 'git', '-c', 'user.name=%s' % self.author.name, '-c', 'user.email=%s' % self.author.email, 'merge', '--no-commit', '--no-ff', revision, ]
[ "def", "get_merge_command", "(", "self", ",", "revision", ")", ":", "return", "[", "'git'", ",", "'-c'", ",", "'user.name=%s'", "%", "self", ".", "author", ".", "name", ",", "'-c'", ",", "'user.email=%s'", "%", "self", ".", "author", ".", "email", ",", "'merge'", ",", "'--no-commit'", ",", "'--no-ff'", ",", "revision", ",", "]" ]
40
14.444444
def parse(self, args, ignore_help=False): """Parse the (command-line) arguments.""" options = self._default_dict() seen = set() # Do not alter the arguments. We may need them later. args = copy.copy(args) while args: opt = args.pop(0) seen.add(opt) if opt in ("--help","-h"): if ignore_help: continue raise SimoptHelp if not opt in self._optiondict: raise Usage("Unrecognized option '%s'"%opt) attr, typ, num, default, flags, description = self._optiondict[opt] if num > len(args): raise Usage("Option '%s' requires %d arguments"%(opt,num)) if num: a = args.pop(0) try: val = [typ(a) for i in range(num)] except ValueError: raise Usage("Invalid argument to option '%s': %s"%(opt,repr(a))) else: # Boolean flag val = [True] if typ == bool: # A boolean option is simply set to True if given options[attr] = True elif flags & MULTI: # A multi-option adds an item or a tuple to the list options[attr] = options.get(attr, list()) options[attr].append(val[0] if num == 1 else tuple(val)) else: # Other options just set item or tuple options[attr] = val[0] if num == 1 else tuple(val) # All mandatory options should be seen missing = self.mandatory_arguments - seen if not ignore_help and missing: raise MissingMandatoryError(missing) return options
[ "def", "parse", "(", "self", ",", "args", ",", "ignore_help", "=", "False", ")", ":", "options", "=", "self", ".", "_default_dict", "(", ")", "seen", "=", "set", "(", ")", "# Do not alter the arguments. We may need them later.", "args", "=", "copy", ".", "copy", "(", "args", ")", "while", "args", ":", "opt", "=", "args", ".", "pop", "(", "0", ")", "seen", ".", "add", "(", "opt", ")", "if", "opt", "in", "(", "\"--help\"", ",", "\"-h\"", ")", ":", "if", "ignore_help", ":", "continue", "raise", "SimoptHelp", "if", "not", "opt", "in", "self", ".", "_optiondict", ":", "raise", "Usage", "(", "\"Unrecognized option '%s'\"", "%", "opt", ")", "attr", ",", "typ", ",", "num", ",", "default", ",", "flags", ",", "description", "=", "self", ".", "_optiondict", "[", "opt", "]", "if", "num", ">", "len", "(", "args", ")", ":", "raise", "Usage", "(", "\"Option '%s' requires %d arguments\"", "%", "(", "opt", ",", "num", ")", ")", "if", "num", ":", "a", "=", "args", ".", "pop", "(", "0", ")", "try", ":", "val", "=", "[", "typ", "(", "a", ")", "for", "i", "in", "range", "(", "num", ")", "]", "except", "ValueError", ":", "raise", "Usage", "(", "\"Invalid argument to option '%s': %s\"", "%", "(", "opt", ",", "repr", "(", "a", ")", ")", ")", "else", ":", "# Boolean flag", "val", "=", "[", "True", "]", "if", "typ", "==", "bool", ":", "# A boolean option is simply set to True if given", "options", "[", "attr", "]", "=", "True", "elif", "flags", "&", "MULTI", ":", "# A multi-option adds an item or a tuple to the list", "options", "[", "attr", "]", "=", "options", ".", "get", "(", "attr", ",", "list", "(", ")", ")", "options", "[", "attr", "]", ".", "append", "(", "val", "[", "0", "]", "if", "num", "==", "1", "else", "tuple", "(", "val", ")", ")", "else", ":", "# Other options just set item or tuple", "options", "[", "attr", "]", "=", "val", "[", "0", "]", "if", "num", "==", "1", "else", "tuple", "(", "val", ")", "# All mandatory options should be seen", "missing", "=", "self", ".", "mandatory_arguments", "-", "seen", "if", "not", "ignore_help", "and", "missing", ":", "raise", "MissingMandatoryError", "(", "missing", ")", "return", "options" ]
32.849057
20.150943
def format_hexadecimal_field(spec, prec, number, locale): """Formats a hexadeciaml field.""" if number < 0: # Take two's complement. number &= (1 << (8 * int(math.log(-number, 1 << 8) + 1))) - 1 format_ = u'0%d%s' % (int(prec or 0), spec) return format(number, format_)
[ "def", "format_hexadecimal_field", "(", "spec", ",", "prec", ",", "number", ",", "locale", ")", ":", "if", "number", "<", "0", ":", "# Take two's complement.", "number", "&=", "(", "1", "<<", "(", "8", "*", "int", "(", "math", ".", "log", "(", "-", "number", ",", "1", "<<", "8", ")", "+", "1", ")", ")", ")", "-", "1", "format_", "=", "u'0%d%s'", "%", "(", "int", "(", "prec", "or", "0", ")", ",", "spec", ")", "return", "format", "(", "number", ",", "format_", ")" ]
42.142857
12.714286
def clusters(self): """returns the clusters functions if supported in resources""" if self._resources is None: self.__init() if "clusters" in self._resources: url = self._url + "/clusters" return _clusters.Cluster(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=True) else: return None
[ "def", "clusters", "(", "self", ")", ":", "if", "self", ".", "_resources", "is", "None", ":", "self", ".", "__init", "(", ")", "if", "\"clusters\"", "in", "self", ".", "_resources", ":", "url", "=", "self", ".", "_url", "+", "\"/clusters\"", "return", "_clusters", ".", "Cluster", "(", "url", "=", "url", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ",", "initialize", "=", "True", ")", "else", ":", "return", "None" ]
43.692308
14.461538
def rotate(file_name, rotate, suffix='rotated', tempdir=None): """Rotate PDF by increments of 90 degrees.""" # Set output file name if tempdir: outfn = NamedTemporaryFile(suffix='.pdf', dir=tempdir, delete=False).name elif suffix: outfn = os.path.join(os.path.dirname(file_name), add_suffix(file_name, suffix)) else: outfn = NamedTemporaryFile(suffix='.pdf').name trailer = PdfReader(file_name) pages = trailer.pages ranges = [[1, len(pages)]] for onerange in ranges: onerange = (onerange + onerange[-1:])[:2] for pagenum in range(onerange[0] - 1, onerange[1]): pages[pagenum].Rotate = (int(pages[pagenum].inheritable.Rotate or 0) + rotate) % 360 outdata = PdfWriter(outfn) outdata.trailer = trailer outdata.write() return outfn
[ "def", "rotate", "(", "file_name", ",", "rotate", ",", "suffix", "=", "'rotated'", ",", "tempdir", "=", "None", ")", ":", "# Set output file name", "if", "tempdir", ":", "outfn", "=", "NamedTemporaryFile", "(", "suffix", "=", "'.pdf'", ",", "dir", "=", "tempdir", ",", "delete", "=", "False", ")", ".", "name", "elif", "suffix", ":", "outfn", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "file_name", ")", ",", "add_suffix", "(", "file_name", ",", "suffix", ")", ")", "else", ":", "outfn", "=", "NamedTemporaryFile", "(", "suffix", "=", "'.pdf'", ")", ".", "name", "trailer", "=", "PdfReader", "(", "file_name", ")", "pages", "=", "trailer", ".", "pages", "ranges", "=", "[", "[", "1", ",", "len", "(", "pages", ")", "]", "]", "for", "onerange", "in", "ranges", ":", "onerange", "=", "(", "onerange", "+", "onerange", "[", "-", "1", ":", "]", ")", "[", ":", "2", "]", "for", "pagenum", "in", "range", "(", "onerange", "[", "0", "]", "-", "1", ",", "onerange", "[", "1", "]", ")", ":", "pages", "[", "pagenum", "]", ".", "Rotate", "=", "(", "int", "(", "pages", "[", "pagenum", "]", ".", "inheritable", ".", "Rotate", "or", "0", ")", "+", "rotate", ")", "%", "360", "outdata", "=", "PdfWriter", "(", "outfn", ")", "outdata", ".", "trailer", "=", "trailer", "outdata", ".", "write", "(", ")", "return", "outfn" ]
33.875
23.833333
def fail(message, code=-1): """Fail with an error.""" print('Error: %s' % message, file=sys.stderr) sys.exit(code)
[ "def", "fail", "(", "message", ",", "code", "=", "-", "1", ")", ":", "print", "(", "'Error: %s'", "%", "message", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "code", ")" ]
30.75
11
def collect(self, top, sup, argv=None, parent=""): """ means this element is part of a larger object, hence a property of that object """ try: argv_copy = sd_copy(argv) return [self.repr(top, sup, argv_copy, parent=parent)], [] except AttributeError as exc: print("#!!!!", exc) return [], []
[ "def", "collect", "(", "self", ",", "top", ",", "sup", ",", "argv", "=", "None", ",", "parent", "=", "\"\"", ")", ":", "try", ":", "argv_copy", "=", "sd_copy", "(", "argv", ")", "return", "[", "self", ".", "repr", "(", "top", ",", "sup", ",", "argv_copy", ",", "parent", "=", "parent", ")", "]", ",", "[", "]", "except", "AttributeError", "as", "exc", ":", "print", "(", "\"#!!!!\"", ",", "exc", ")", "return", "[", "]", ",", "[", "]" ]
37.2
13
def state_set(self, state, use_active_range=False): """Sets the internal state of the df Example: >>> import vaex >>> df = vaex.from_scalars(x=1, y=2) >>> df # x y r 0 1 2 2.23607 >>> df['r'] = (df.x**2 + df.y**2)**0.5 >>> state = df.state_get() >>> state {'active_range': [0, 1], 'column_names': ['x', 'y', 'r'], 'description': None, 'descriptions': {}, 'functions': {}, 'renamed_columns': [], 'selections': {'__filter__': None}, 'ucds': {}, 'units': {}, 'variables': {}, 'virtual_columns': {'r': '(((x ** 2) + (y ** 2)) ** 0.5)'}} >>> df2 = vaex.from_scalars(x=3, y=4) >>> df2.state_set(state) # now the virtual functions are 'copied' >>> df2 # x y r 0 3 4 5 :param state: dict as returned by :meth:`DataFrame.state_get`. :param bool use_active_range: Whether to use the active range or not. """ self.description = state['description'] if use_active_range: self._index_start, self._index_end = state['active_range'] self._length_unfiltered = self._index_end - self._index_start if 'renamed_columns' in state: for old, new in state['renamed_columns']: self._rename(old, new) for name, value in state['functions'].items(): self.add_function(name, vaex.serialize.from_dict(value)) if 'column_names' in state: # we clear all columns, and add them later on, since otherwise self[name] = ... will try # to rename the columns (which is unsupported for remote dfs) self.column_names = [] self.virtual_columns = collections.OrderedDict() for name, value in state['virtual_columns'].items(): self[name] = self._expr(value) # self._save_assign_expression(name) self.column_names = state['column_names'] else: # old behaviour self.virtual_columns = collections.OrderedDict() for name, value in state['virtual_columns'].items(): self[name] = self._expr(value) self.variables = state['variables'] import astropy # TODO: make this dep optional? units = {key: astropy.units.Unit(value) for key, value in state["units"].items()} self.units.update(units) for name, selection_dict in state['selections'].items(): # TODO: make selection use the vaex.serialize framework if selection_dict is None: selection = None else: selection = selections.selection_from_dict(selection_dict) self.set_selection(selection, name=name)
[ "def", "state_set", "(", "self", ",", "state", ",", "use_active_range", "=", "False", ")", ":", "self", ".", "description", "=", "state", "[", "'description'", "]", "if", "use_active_range", ":", "self", ".", "_index_start", ",", "self", ".", "_index_end", "=", "state", "[", "'active_range'", "]", "self", ".", "_length_unfiltered", "=", "self", ".", "_index_end", "-", "self", ".", "_index_start", "if", "'renamed_columns'", "in", "state", ":", "for", "old", ",", "new", "in", "state", "[", "'renamed_columns'", "]", ":", "self", ".", "_rename", "(", "old", ",", "new", ")", "for", "name", ",", "value", "in", "state", "[", "'functions'", "]", ".", "items", "(", ")", ":", "self", ".", "add_function", "(", "name", ",", "vaex", ".", "serialize", ".", "from_dict", "(", "value", ")", ")", "if", "'column_names'", "in", "state", ":", "# we clear all columns, and add them later on, since otherwise self[name] = ... will try", "# to rename the columns (which is unsupported for remote dfs)", "self", ".", "column_names", "=", "[", "]", "self", ".", "virtual_columns", "=", "collections", ".", "OrderedDict", "(", ")", "for", "name", ",", "value", "in", "state", "[", "'virtual_columns'", "]", ".", "items", "(", ")", ":", "self", "[", "name", "]", "=", "self", ".", "_expr", "(", "value", ")", "# self._save_assign_expression(name)", "self", ".", "column_names", "=", "state", "[", "'column_names'", "]", "else", ":", "# old behaviour", "self", ".", "virtual_columns", "=", "collections", ".", "OrderedDict", "(", ")", "for", "name", ",", "value", "in", "state", "[", "'virtual_columns'", "]", ".", "items", "(", ")", ":", "self", "[", "name", "]", "=", "self", ".", "_expr", "(", "value", ")", "self", ".", "variables", "=", "state", "[", "'variables'", "]", "import", "astropy", "# TODO: make this dep optional?", "units", "=", "{", "key", ":", "astropy", ".", "units", ".", "Unit", "(", "value", ")", "for", "key", ",", "value", "in", "state", "[", "\"units\"", "]", ".", "items", "(", ")", "}", "self", ".", "units", ".", "update", "(", "units", ")", "for", "name", ",", "selection_dict", "in", "state", "[", "'selections'", "]", ".", "items", "(", ")", ":", "# TODO: make selection use the vaex.serialize framework", "if", "selection_dict", "is", "None", ":", "selection", "=", "None", "else", ":", "selection", "=", "selections", ".", "selection_from_dict", "(", "selection_dict", ")", "self", ".", "set_selection", "(", "selection", ",", "name", "=", "name", ")" ]
41.820896
17.507463
def drinkAdmins(self, objects=False): """ Returns a list of drink admins uids """ admins = self.group('drink', objects=objects) return admins
[ "def", "drinkAdmins", "(", "self", ",", "objects", "=", "False", ")", ":", "admins", "=", "self", ".", "group", "(", "'drink'", ",", "objects", "=", "objects", ")", "return", "admins" ]
33.8
7
def add_arguments(parser): """ adds arguments for the swap urls command """ parser.add_argument('-o', '--old-environment', help='Old environment name', required=True) parser.add_argument('-n', '--new-environment', help='New environment name', required=True)
[ "def", "add_arguments", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "'-o'", ",", "'--old-environment'", ",", "help", "=", "'Old environment name'", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "'-n'", ",", "'--new-environment'", ",", "help", "=", "'New environment name'", ",", "required", "=", "True", ")" ]
45.333333
21
def deploy(self): """ Open a ZIP archive, validate requirements then deploy the webfont into project static files """ self._info("* Opening archive: {}", self.archive_path) if not os.path.exists(self.archive_path): self._error("Given path does not exists: {}", self.archive_path) with zipfile.ZipFile(self.archive_path, 'r') as zip_archive: font_dir = self.requirements['font_dir']+'/' allowed_extensions = ['.'+item for item in self.requirements['extensions']] members = [member for member in zip_archive.namelist()] if settings.ICOMOON_MANIFEST_FILENAME not in members: raise self._error("Icomoon archive must contain a JSON manifest '{}'", settings.ICOMOON_MANIFEST_FILENAME) if font_dir not in members: raise self._error("Icomoon archive must contain the font directory '{}'", font_dir) # Scan for supported font files font_files = [] for item in members: # Dont catch the font_dir itself nor sub directories, just files with allowed extensions if item.startswith(font_dir) and not item.endswith('/') and os.path.splitext(item)[-1] in allowed_extensions: font_files.append(item) if not font_files: self._error("Font dir does not contain any supported format: {}", ', '.join(allowed_extensions)) else: self._debug("* Finded font files in archive: {}", ', '.join(font_files)) # Extract files from archive tmp_container, css_content = self.extract(zip_archive, font_files) # Install files self.install(tmp_container, font_dir, css_content)
[ "def", "deploy", "(", "self", ")", ":", "self", ".", "_info", "(", "\"* Opening archive: {}\"", ",", "self", ".", "archive_path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "archive_path", ")", ":", "self", ".", "_error", "(", "\"Given path does not exists: {}\"", ",", "self", ".", "archive_path", ")", "with", "zipfile", ".", "ZipFile", "(", "self", ".", "archive_path", ",", "'r'", ")", "as", "zip_archive", ":", "font_dir", "=", "self", ".", "requirements", "[", "'font_dir'", "]", "+", "'/'", "allowed_extensions", "=", "[", "'.'", "+", "item", "for", "item", "in", "self", ".", "requirements", "[", "'extensions'", "]", "]", "members", "=", "[", "member", "for", "member", "in", "zip_archive", ".", "namelist", "(", ")", "]", "if", "settings", ".", "ICOMOON_MANIFEST_FILENAME", "not", "in", "members", ":", "raise", "self", ".", "_error", "(", "\"Icomoon archive must contain a JSON manifest '{}'\"", ",", "settings", ".", "ICOMOON_MANIFEST_FILENAME", ")", "if", "font_dir", "not", "in", "members", ":", "raise", "self", ".", "_error", "(", "\"Icomoon archive must contain the font directory '{}'\"", ",", "font_dir", ")", "# Scan for supported font files", "font_files", "=", "[", "]", "for", "item", "in", "members", ":", "# Dont catch the font_dir itself nor sub directories, just files with allowed extensions", "if", "item", ".", "startswith", "(", "font_dir", ")", "and", "not", "item", ".", "endswith", "(", "'/'", ")", "and", "os", ".", "path", ".", "splitext", "(", "item", ")", "[", "-", "1", "]", "in", "allowed_extensions", ":", "font_files", ".", "append", "(", "item", ")", "if", "not", "font_files", ":", "self", ".", "_error", "(", "\"Font dir does not contain any supported format: {}\"", ",", "', '", ".", "join", "(", "allowed_extensions", ")", ")", "else", ":", "self", ".", "_debug", "(", "\"* Finded font files in archive: {}\"", ",", "', '", ".", "join", "(", "font_files", ")", ")", "# Extract files from archive", "tmp_container", ",", "css_content", "=", "self", ".", "extract", "(", "zip_archive", ",", "font_files", ")", "# Install files", "self", ".", "install", "(", "tmp_container", ",", "font_dir", ",", "css_content", ")" ]
47.432432
29.918919
def pairwise_compare(afa, leven, threads, print_list, ignore_gaps): """ make pairwise sequence comparisons between aligned sequences """ # load sequences into dictionary seqs = {seq[0]: seq for seq in nr_fasta([afa], append_index = True)} num_seqs = len(seqs) # define all pairs pairs = ((i[0], i[1], ignore_gaps) for i in itertools.combinations(list(seqs.values()), 2)) pool = multithread(threads) # calc percent identity between all pairs - parallelize if leven is True: pident = pool.map(compare_seqs_leven, pairs) else: compare = pool.imap_unordered(compare_seqs, pairs) pident = [i for i in tqdm(compare, total = (num_seqs*num_seqs)/2)] pool.close() pool.terminate() pool.join() return to_dictionary(pident, print_list)
[ "def", "pairwise_compare", "(", "afa", ",", "leven", ",", "threads", ",", "print_list", ",", "ignore_gaps", ")", ":", "# load sequences into dictionary", "seqs", "=", "{", "seq", "[", "0", "]", ":", "seq", "for", "seq", "in", "nr_fasta", "(", "[", "afa", "]", ",", "append_index", "=", "True", ")", "}", "num_seqs", "=", "len", "(", "seqs", ")", "# define all pairs", "pairs", "=", "(", "(", "i", "[", "0", "]", ",", "i", "[", "1", "]", ",", "ignore_gaps", ")", "for", "i", "in", "itertools", ".", "combinations", "(", "list", "(", "seqs", ".", "values", "(", ")", ")", ",", "2", ")", ")", "pool", "=", "multithread", "(", "threads", ")", "# calc percent identity between all pairs - parallelize", "if", "leven", "is", "True", ":", "pident", "=", "pool", ".", "map", "(", "compare_seqs_leven", ",", "pairs", ")", "else", ":", "compare", "=", "pool", ".", "imap_unordered", "(", "compare_seqs", ",", "pairs", ")", "pident", "=", "[", "i", "for", "i", "in", "tqdm", "(", "compare", ",", "total", "=", "(", "num_seqs", "*", "num_seqs", ")", "/", "2", ")", "]", "pool", ".", "close", "(", ")", "pool", ".", "terminate", "(", ")", "pool", ".", "join", "(", ")", "return", "to_dictionary", "(", "pident", ",", "print_list", ")" ]
39.65
19.55