text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def create(cls, location=None, storage_class=None, **kwargs): r"""Create a bucket. :param location: Location of a bucket (instance or name). Default: Default location. :param storage_class: Storage class of a bucket. Default: Default storage class. :param \**kwargs: Keyword arguments are forwarded to the class :param \**kwargs: Keyword arguments are forwarded to the class constructor. :returns: Created bucket. """ with db.session.begin_nested(): if location is None: location = Location.get_default() elif isinstance(location, six.string_types): location = Location.get_by_name(location) obj = cls( default_location=location.id, default_storage_class=storage_class or current_app.config[ 'FILES_REST_DEFAULT_STORAGE_CLASS'], **kwargs ) db.session.add(obj) return obj
[ "def", "create", "(", "cls", ",", "location", "=", "None", ",", "storage_class", "=", "None", ",", "*", "*", "kwargs", ")", ":", "with", "db", ".", "session", ".", "begin_nested", "(", ")", ":", "if", "location", "is", "None", ":", "location", "=", "Location", ".", "get_default", "(", ")", "elif", "isinstance", "(", "location", ",", "six", ".", "string_types", ")", ":", "location", "=", "Location", ".", "get_by_name", "(", "location", ")", "obj", "=", "cls", "(", "default_location", "=", "location", ".", "id", ",", "default_storage_class", "=", "storage_class", "or", "current_app", ".", "config", "[", "'FILES_REST_DEFAULT_STORAGE_CLASS'", "]", ",", "*", "*", "kwargs", ")", "db", ".", "session", ".", "add", "(", "obj", ")", "return", "obj" ]
39.038462
16.461538
def verifyInputs(self, mode): """Goes through and checks all stimuli and input settings are valid and consistent. Prompts user with a message if there is a condition that would prevent acquisition. :param mode: The mode of acquisition trying to be run. Options are 'chart', or anthing else ('explore', 'protocol', 'calibration') :type mode: str :returns: bool -- Whether all inputs and stimuli are valid """ if len(self._aichans) < 1: failmsg = "Must have at least one input channel selected" QtGui.QMessageBox.warning(self, "Invalid Setting", failmsg) return False if mode == 'chart': if self.ui.aifsSpnbx.value()*self.fscale > 100000: QtGui.QMessageBox.warning(self, "Invalid Input", "Recording samplerate cannot exceed 100kHz for chart acquisition") return False elif mode is not None: # if (1./self.ui.reprateSpnbx.value()) < self.ui.windowszSpnbx.value()*self.tscale + 0.05: # QtGui.QMessageBox.warning(self, "Invalid Input", "A minimum of 50ms time between repetitions required. Current interval {}, required {}".format((1./self.ui.reprateSpnbx.value()), self.ui.windowszSpnbx.value()*self.tscale + 0.05)) # return False if self.ui.tabGroup.currentWidget().objectName() == 'tabExplore': # each widget should be in charge of putting its own stimulus together self.ui.exploreStimEditor.saveToObject() failmsg = self.ui.exploreStimEditor.verify(self.ui.windowszSpnbx.value()) if failmsg: QtGui.QMessageBox.warning(self, "Invalid Input", failmsg) return False # if selectedStim.intensity() > self.calvals['caldb']: # QtGui.QMessageBox.warning(self, "Invalid Input", # "Intensity must be below calibrated maximum {}dB SPL".format(self.calvals['caldb'])) # return False elif self.ui.tabGroup.currentWidget().objectName() == 'tabProtocol': protocol_model = self.acqmodel.protocol_model() # protocol delegates to each test to verify itself and report failure = protocol_model.verify(float(self.ui.windowszSpnbx.value())) if failure: QtGui.QMessageBox.warning(self, "Invalid Input", failure) return False elif self.ui.tabGroup.currentWidget().objectName() == 'tabCalibrate': if len(self._aichans) > 1: failmsg = "Speaker calibration only supported for single channel, currently {} channels selected; select 1 input channel.".format(len(self._aichans)) QtGui.QMessageBox.warning(self, "Invalid Setting", failmsg) return False # get what stimulus is about to be presented if self.ui.calibrationWidget.ui.savecalCkbx.isChecked() or not self.ui.calibrationWidget.currentSelection() == 'Tone Curve': calibration_stimulus = self.acqmodel.calibration_stimulus('noise') self.ui.calibrationWidget.saveToObject() else: calibration_stimulus = self.acqmodel.calibration_stimulus('tone') failmsg = calibration_stimulus.verify(float(self.ui.windowszSpnbx.value())) if failmsg: QtGui.QMessageBox.warning(self, "Invalid Input", failmsg) return False # also check that the recording samplerate is high enough in this case failmsg = calibration_stimulus.verifyExpanded(samplerate=self.ui.aifsSpnbx.value()) if failmsg: failmsg = failmsg.replace('Generation', 'Recording') QtGui.QMessageBox.warning(self, "Invalid Input", failmsg) return False if self.advanced_options['use_attenuator'] and not self.acqmodel.attenuator_connection(): failmsg = "Error Connection to attenuator, make sure it it turned on and connected, and try again" QtGui.QMessageBox.warning(self, "Connection Error", failmsg) return False return True
[ "def", "verifyInputs", "(", "self", ",", "mode", ")", ":", "if", "len", "(", "self", ".", "_aichans", ")", "<", "1", ":", "failmsg", "=", "\"Must have at least one input channel selected\"", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Invalid Setting\"", ",", "failmsg", ")", "return", "False", "if", "mode", "==", "'chart'", ":", "if", "self", ".", "ui", ".", "aifsSpnbx", ".", "value", "(", ")", "*", "self", ".", "fscale", ">", "100000", ":", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Invalid Input\"", ",", "\"Recording samplerate cannot exceed 100kHz for chart acquisition\"", ")", "return", "False", "elif", "mode", "is", "not", "None", ":", "# if (1./self.ui.reprateSpnbx.value()) < self.ui.windowszSpnbx.value()*self.tscale + 0.05:", "# QtGui.QMessageBox.warning(self, \"Invalid Input\", \"A minimum of 50ms time between repetitions required. Current interval {}, required {}\".format((1./self.ui.reprateSpnbx.value()), self.ui.windowszSpnbx.value()*self.tscale + 0.05))", "# return False", "if", "self", ".", "ui", ".", "tabGroup", ".", "currentWidget", "(", ")", ".", "objectName", "(", ")", "==", "'tabExplore'", ":", "# each widget should be in charge of putting its own stimulus together", "self", ".", "ui", ".", "exploreStimEditor", ".", "saveToObject", "(", ")", "failmsg", "=", "self", ".", "ui", ".", "exploreStimEditor", ".", "verify", "(", "self", ".", "ui", ".", "windowszSpnbx", ".", "value", "(", ")", ")", "if", "failmsg", ":", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Invalid Input\"", ",", "failmsg", ")", "return", "False", "# if selectedStim.intensity() > self.calvals['caldb']:", "# QtGui.QMessageBox.warning(self, \"Invalid Input\",", "# \"Intensity must be below calibrated maximum {}dB SPL\".format(self.calvals['caldb']))", "# return False", "elif", "self", ".", "ui", ".", "tabGroup", ".", "currentWidget", "(", ")", ".", "objectName", "(", ")", "==", "'tabProtocol'", ":", "protocol_model", "=", "self", ".", "acqmodel", ".", "protocol_model", "(", ")", "# protocol delegates to each test to verify itself and report", "failure", "=", "protocol_model", ".", "verify", "(", "float", "(", "self", ".", "ui", ".", "windowszSpnbx", ".", "value", "(", ")", ")", ")", "if", "failure", ":", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Invalid Input\"", ",", "failure", ")", "return", "False", "elif", "self", ".", "ui", ".", "tabGroup", ".", "currentWidget", "(", ")", ".", "objectName", "(", ")", "==", "'tabCalibrate'", ":", "if", "len", "(", "self", ".", "_aichans", ")", ">", "1", ":", "failmsg", "=", "\"Speaker calibration only supported for single channel, currently {} channels selected; select 1 input channel.\"", ".", "format", "(", "len", "(", "self", ".", "_aichans", ")", ")", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Invalid Setting\"", ",", "failmsg", ")", "return", "False", "# get what stimulus is about to be presented", "if", "self", ".", "ui", ".", "calibrationWidget", ".", "ui", ".", "savecalCkbx", ".", "isChecked", "(", ")", "or", "not", "self", ".", "ui", ".", "calibrationWidget", ".", "currentSelection", "(", ")", "==", "'Tone Curve'", ":", "calibration_stimulus", "=", "self", ".", "acqmodel", ".", "calibration_stimulus", "(", "'noise'", ")", "self", ".", "ui", ".", "calibrationWidget", ".", "saveToObject", "(", ")", "else", ":", "calibration_stimulus", "=", "self", ".", "acqmodel", ".", "calibration_stimulus", "(", "'tone'", ")", "failmsg", "=", "calibration_stimulus", ".", "verify", "(", "float", "(", "self", ".", "ui", ".", "windowszSpnbx", ".", "value", "(", ")", ")", ")", "if", "failmsg", ":", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Invalid Input\"", ",", "failmsg", ")", "return", "False", "# also check that the recording samplerate is high enough in this case", "failmsg", "=", "calibration_stimulus", ".", "verifyExpanded", "(", "samplerate", "=", "self", ".", "ui", ".", "aifsSpnbx", ".", "value", "(", ")", ")", "if", "failmsg", ":", "failmsg", "=", "failmsg", ".", "replace", "(", "'Generation'", ",", "'Recording'", ")", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Invalid Input\"", ",", "failmsg", ")", "return", "False", "if", "self", ".", "advanced_options", "[", "'use_attenuator'", "]", "and", "not", "self", ".", "acqmodel", ".", "attenuator_connection", "(", ")", ":", "failmsg", "=", "\"Error Connection to attenuator, make sure it it turned on and connected, and try again\"", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Connection Error\"", ",", "failmsg", ")", "return", "False", "return", "True" ]
64.298507
33.940299
def _detect(self): """ Detect state variables that could be const """ results = [] all_info = '' all_variables = [c.state_variables for c in self.slither.contracts] all_variables = set([item for sublist in all_variables for item in sublist]) all_non_constant_elementary_variables = set([v for v in all_variables if self._valid_candidate(v)]) all_functions = [c.all_functions_called for c in self.slither.contracts] all_functions = list(set([item for sublist in all_functions for item in sublist])) all_variables_written = [f.state_variables_written for f in all_functions] all_variables_written = set([item for sublist in all_variables_written for item in sublist]) constable_variables = [v for v in all_non_constant_elementary_variables if (not v in all_variables_written) and self._constant_initial_expression(v)] # Order for deterministic results constable_variables = sorted(constable_variables, key=lambda x: x.canonical_name) for v in constable_variables: info = "{}.{} should be constant ({})\n".format(v.contract.name, v.name, v.source_mapping_str) all_info += info if all_info != '': json = self.generate_json_result(all_info) self.add_variables_to_json(constable_variables, json) results.append(json) return results
[ "def", "_detect", "(", "self", ")", ":", "results", "=", "[", "]", "all_info", "=", "''", "all_variables", "=", "[", "c", ".", "state_variables", "for", "c", "in", "self", ".", "slither", ".", "contracts", "]", "all_variables", "=", "set", "(", "[", "item", "for", "sublist", "in", "all_variables", "for", "item", "in", "sublist", "]", ")", "all_non_constant_elementary_variables", "=", "set", "(", "[", "v", "for", "v", "in", "all_variables", "if", "self", ".", "_valid_candidate", "(", "v", ")", "]", ")", "all_functions", "=", "[", "c", ".", "all_functions_called", "for", "c", "in", "self", ".", "slither", ".", "contracts", "]", "all_functions", "=", "list", "(", "set", "(", "[", "item", "for", "sublist", "in", "all_functions", "for", "item", "in", "sublist", "]", ")", ")", "all_variables_written", "=", "[", "f", ".", "state_variables_written", "for", "f", "in", "all_functions", "]", "all_variables_written", "=", "set", "(", "[", "item", "for", "sublist", "in", "all_variables_written", "for", "item", "in", "sublist", "]", ")", "constable_variables", "=", "[", "v", "for", "v", "in", "all_non_constant_elementary_variables", "if", "(", "not", "v", "in", "all_variables_written", ")", "and", "self", ".", "_constant_initial_expression", "(", "v", ")", "]", "# Order for deterministic results", "constable_variables", "=", "sorted", "(", "constable_variables", ",", "key", "=", "lambda", "x", ":", "x", ".", "canonical_name", ")", "for", "v", "in", "constable_variables", ":", "info", "=", "\"{}.{} should be constant ({})\\n\"", ".", "format", "(", "v", ".", "contract", ".", "name", ",", "v", ".", "name", ",", "v", ".", "source_mapping_str", ")", "all_info", "+=", "info", "if", "all_info", "!=", "''", ":", "json", "=", "self", ".", "generate_json_result", "(", "all_info", ")", "self", ".", "add_variables_to_json", "(", "constable_variables", ",", "json", ")", "results", ".", "append", "(", "json", ")", "return", "results" ]
51.580645
29.870968
def _print_memory(self, memory): """Print memory. """ for addr, value in memory.items(): print(" 0x%08x : 0x%08x (%d)" % (addr, value, value))
[ "def", "_print_memory", "(", "self", ",", "memory", ")", ":", "for", "addr", ",", "value", "in", "memory", ".", "items", "(", ")", ":", "print", "(", "\" 0x%08x : 0x%08x (%d)\"", "%", "(", "addr", ",", "value", ",", "value", ")", ")" ]
35.4
7.6
def delete_records_safely_by_xml_id(env, xml_ids): """This removes in the safest possible way the records whose XML-IDs are passed as argument. :param xml_ids: List of XML-ID string identifiers of the records to remove. """ for xml_id in xml_ids: logger.debug('Deleting record for XML-ID %s', xml_id) try: with env.cr.savepoint(): env.ref(xml_id).exists().unlink() except Exception as e: logger.error('Error deleting XML-ID %s: %s', xml_id, repr(e))
[ "def", "delete_records_safely_by_xml_id", "(", "env", ",", "xml_ids", ")", ":", "for", "xml_id", "in", "xml_ids", ":", "logger", ".", "debug", "(", "'Deleting record for XML-ID %s'", ",", "xml_id", ")", "try", ":", "with", "env", ".", "cr", ".", "savepoint", "(", ")", ":", "env", ".", "ref", "(", "xml_id", ")", ".", "exists", "(", ")", ".", "unlink", "(", ")", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "'Error deleting XML-ID %s: %s'", ",", "xml_id", ",", "repr", "(", "e", ")", ")" ]
40.153846
17.307692
def unsubscribe(self, *args): """ Unsubscribe from the supplied channels. If empty, unsubscribe from all channels """ if args: args = list_or_args(args[0], args[1:]) channels = self._normalize_keys(dict.fromkeys(args)) else: channels = self.channels self.pending_unsubscribe_channels.update(channels) return self.execute_command('UNSUBSCRIBE', *args)
[ "def", "unsubscribe", "(", "self", ",", "*", "args", ")", ":", "if", "args", ":", "args", "=", "list_or_args", "(", "args", "[", "0", "]", ",", "args", "[", "1", ":", "]", ")", "channels", "=", "self", ".", "_normalize_keys", "(", "dict", ".", "fromkeys", "(", "args", ")", ")", "else", ":", "channels", "=", "self", ".", "channels", "self", ".", "pending_unsubscribe_channels", ".", "update", "(", "channels", ")", "return", "self", ".", "execute_command", "(", "'UNSUBSCRIBE'", ",", "*", "args", ")" ]
36.583333
15.75
def get(self, name): """ creates connection to the MQ with process-specific settings :return :mq::flopsy::Publisher instance""" if name not in self.pools: self.pools[name] = _Pool(logger=self.logger, name=name) return self.pools[name].get()
[ "def", "get", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "pools", ":", "self", ".", "pools", "[", "name", "]", "=", "_Pool", "(", "logger", "=", "self", ".", "logger", ",", "name", "=", "name", ")", "return", "self", ".", "pools", "[", "name", "]", ".", "get", "(", ")" ]
46.5
9.333333
def audiorate(filename): """Determines the samplerate of the given audio recording file :param filename: filename of the audiofile :type filename: str :returns: int -- samplerate of the recording """ if '.wav' in filename.lower(): wf = wave.open(filename) fs = wf.getframerate() wf.close() elif '.call' in filename.lower(): fs = 333333 else: raise IOError("Unsupported audio format for file: {}".format(filename)) return fs
[ "def", "audiorate", "(", "filename", ")", ":", "if", "'.wav'", "in", "filename", ".", "lower", "(", ")", ":", "wf", "=", "wave", ".", "open", "(", "filename", ")", "fs", "=", "wf", ".", "getframerate", "(", ")", "wf", ".", "close", "(", ")", "elif", "'.call'", "in", "filename", ".", "lower", "(", ")", ":", "fs", "=", "333333", "else", ":", "raise", "IOError", "(", "\"Unsupported audio format for file: {}\"", ".", "format", "(", "filename", ")", ")", "return", "fs" ]
28.529412
17.294118
def main(): """ Parse the arguments and use them to create a ExistCli object """ version = 'Python Exist %s' % __version__ arguments = docopt(__doc__, version=version) ExistCli(arguments)
[ "def", "main", "(", ")", ":", "version", "=", "'Python Exist %s'", "%", "__version__", "arguments", "=", "docopt", "(", "__doc__", ",", "version", "=", "version", ")", "ExistCli", "(", "arguments", ")" ]
39.8
11.8
def check_module(mod_name): """Calls sys.exit() if the module *mod_name* is not found.""" # Special cases # if mod_name in module_name: mod_name = module_name[mod_name] # Use a try except block # try: __import__(mod_name) except ImportError as e: if str(e) != 'No module named %s' % mod_name: raise e print 'You do not seem to have the "%s" package properly installed.' \ ' Either you never installed it or your $PYTHONPATH is not set up correctly.' \ ' For more instructions see the README file. (%s)' % (mod_name, e) sys.exit()
[ "def", "check_module", "(", "mod_name", ")", ":", "# Special cases #", "if", "mod_name", "in", "module_name", ":", "mod_name", "=", "module_name", "[", "mod_name", "]", "# Use a try except block #", "try", ":", "__import__", "(", "mod_name", ")", "except", "ImportError", "as", "e", ":", "if", "str", "(", "e", ")", "!=", "'No module named %s'", "%", "mod_name", ":", "raise", "e", "print", "'You do not seem to have the \"%s\" package properly installed.'", "' Either you never installed it or your $PYTHONPATH is not set up correctly.'", "' For more instructions see the README file. (%s)'", "%", "(", "mod_name", ",", "e", ")", "sys", ".", "exit", "(", ")" ]
46.230769
22.769231
def plot(self, title='TimeMoc', view=(None, None)): """ Plot the TimeMoc in a time window. This method uses interactive matplotlib. The user can move its mouse through the plot to see the time (at the mouse position). Parameters ---------- title : str, optional The title of the plot. Set to 'TimeMoc' by default. view : (`~astropy.time.Time`, `~astropy.time.Time`), optional Define the view window in which the observations are plotted. Set to (None, None) by default (i.e. all the observation time window is rendered). """ from matplotlib.colors import LinearSegmentedColormap import matplotlib.pyplot as plt if self._interval_set.empty(): print('Nothing to print. This TimeMoc object is empty.') return plot_order = 15 if self.max_order > plot_order: plotted_moc = self.degrade_to_order(plot_order) else: plotted_moc = self min_jd = plotted_moc.min_time.jd if not view[0] else view[0].jd max_jd = plotted_moc.max_time.jd if not view[1] else view[1].jd if max_jd < min_jd: raise ValueError("Invalid selection: max_jd = {0} must be > to min_jd = {1}".format(max_jd, min_jd)) fig1 = plt.figure(figsize=(9.5, 5)) ax = fig1.add_subplot(111) ax.set_xlabel('iso') ax.get_yaxis().set_visible(False) size = 2000 delta = (max_jd - min_jd) / size min_jd_time = min_jd ax.set_xticks([0, size]) ax.set_xticklabels(Time([min_jd_time, max_jd], format='jd', scale='tdb').iso, rotation=70) y = np.zeros(size) for (s_time_us, e_time_us) in plotted_moc._interval_set._intervals: s_index = int((s_time_us / TimeMOC.DAY_MICRO_SEC - min_jd_time) / delta) e_index = int((e_time_us / TimeMOC.DAY_MICRO_SEC - min_jd_time) / delta) y[s_index:(e_index+1)] = 1.0 # hack in case of full time mocs. if np.all(y): y[0] = 0 z = np.tile(y, (int(size//10), 1)) plt.title(title) color_map = LinearSegmentedColormap.from_list('w2r', ['#fffff0', '#aa0000']) color_map.set_under('w') color_map.set_bad('gray') plt.imshow(z, interpolation='bilinear', cmap=color_map) def on_mouse_motion(event): for txt in ax.texts: txt.set_visible(False) text = ax.text(0, 0, "", va="bottom", ha="left") time = Time(event.xdata * delta + min_jd_time, format='jd', scale='tdb') tx = '{0}'.format(time.iso) text.set_position((event.xdata - 50, 700)) text.set_rotation(70) text.set_text(tx) cid = fig1.canvas.mpl_connect('motion_notify_event', on_mouse_motion) plt.show()
[ "def", "plot", "(", "self", ",", "title", "=", "'TimeMoc'", ",", "view", "=", "(", "None", ",", "None", ")", ")", ":", "from", "matplotlib", ".", "colors", "import", "LinearSegmentedColormap", "import", "matplotlib", ".", "pyplot", "as", "plt", "if", "self", ".", "_interval_set", ".", "empty", "(", ")", ":", "print", "(", "'Nothing to print. This TimeMoc object is empty.'", ")", "return", "plot_order", "=", "15", "if", "self", ".", "max_order", ">", "plot_order", ":", "plotted_moc", "=", "self", ".", "degrade_to_order", "(", "plot_order", ")", "else", ":", "plotted_moc", "=", "self", "min_jd", "=", "plotted_moc", ".", "min_time", ".", "jd", "if", "not", "view", "[", "0", "]", "else", "view", "[", "0", "]", ".", "jd", "max_jd", "=", "plotted_moc", ".", "max_time", ".", "jd", "if", "not", "view", "[", "1", "]", "else", "view", "[", "1", "]", ".", "jd", "if", "max_jd", "<", "min_jd", ":", "raise", "ValueError", "(", "\"Invalid selection: max_jd = {0} must be > to min_jd = {1}\"", ".", "format", "(", "max_jd", ",", "min_jd", ")", ")", "fig1", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "9.5", ",", "5", ")", ")", "ax", "=", "fig1", ".", "add_subplot", "(", "111", ")", "ax", ".", "set_xlabel", "(", "'iso'", ")", "ax", ".", "get_yaxis", "(", ")", ".", "set_visible", "(", "False", ")", "size", "=", "2000", "delta", "=", "(", "max_jd", "-", "min_jd", ")", "/", "size", "min_jd_time", "=", "min_jd", "ax", ".", "set_xticks", "(", "[", "0", ",", "size", "]", ")", "ax", ".", "set_xticklabels", "(", "Time", "(", "[", "min_jd_time", ",", "max_jd", "]", ",", "format", "=", "'jd'", ",", "scale", "=", "'tdb'", ")", ".", "iso", ",", "rotation", "=", "70", ")", "y", "=", "np", ".", "zeros", "(", "size", ")", "for", "(", "s_time_us", ",", "e_time_us", ")", "in", "plotted_moc", ".", "_interval_set", ".", "_intervals", ":", "s_index", "=", "int", "(", "(", "s_time_us", "/", "TimeMOC", ".", "DAY_MICRO_SEC", "-", "min_jd_time", ")", "/", "delta", ")", "e_index", "=", "int", "(", "(", "e_time_us", "/", "TimeMOC", ".", "DAY_MICRO_SEC", "-", "min_jd_time", ")", "/", "delta", ")", "y", "[", "s_index", ":", "(", "e_index", "+", "1", ")", "]", "=", "1.0", "# hack in case of full time mocs.", "if", "np", ".", "all", "(", "y", ")", ":", "y", "[", "0", "]", "=", "0", "z", "=", "np", ".", "tile", "(", "y", ",", "(", "int", "(", "size", "//", "10", ")", ",", "1", ")", ")", "plt", ".", "title", "(", "title", ")", "color_map", "=", "LinearSegmentedColormap", ".", "from_list", "(", "'w2r'", ",", "[", "'#fffff0'", ",", "'#aa0000'", "]", ")", "color_map", ".", "set_under", "(", "'w'", ")", "color_map", ".", "set_bad", "(", "'gray'", ")", "plt", ".", "imshow", "(", "z", ",", "interpolation", "=", "'bilinear'", ",", "cmap", "=", "color_map", ")", "def", "on_mouse_motion", "(", "event", ")", ":", "for", "txt", "in", "ax", ".", "texts", ":", "txt", ".", "set_visible", "(", "False", ")", "text", "=", "ax", ".", "text", "(", "0", ",", "0", ",", "\"\"", ",", "va", "=", "\"bottom\"", ",", "ha", "=", "\"left\"", ")", "time", "=", "Time", "(", "event", ".", "xdata", "*", "delta", "+", "min_jd_time", ",", "format", "=", "'jd'", ",", "scale", "=", "'tdb'", ")", "tx", "=", "'{0}'", ".", "format", "(", "time", ".", "iso", ")", "text", ".", "set_position", "(", "(", "event", ".", "xdata", "-", "50", ",", "700", ")", ")", "text", ".", "set_rotation", "(", "70", ")", "text", ".", "set_text", "(", "tx", ")", "cid", "=", "fig1", ".", "canvas", ".", "mpl_connect", "(", "'motion_notify_event'", ",", "on_mouse_motion", ")", "plt", ".", "show", "(", ")" ]
33.571429
24.5
def find_path_package_name(thepath): """ Takes a file system path and returns the name of the python package the said path belongs to. If the said path can not be determined, it returns None. """ module_found = False last_module_found = None continue_ = True while continue_: module_found = is_path_python_module(thepath) next_path = path.dirname(thepath) if next_path == thepath: continue_ = False if module_found: init_names = ['__init__%s' % suffix.lower() for suffix in _py_suffixes] if path.basename(thepath).lower() in init_names: last_module_found = path.basename(path.dirname(thepath)) else: last_module_found = path.basename(thepath) if last_module_found and not module_found: continue_ = False thepath = next_path return last_module_found
[ "def", "find_path_package_name", "(", "thepath", ")", ":", "module_found", "=", "False", "last_module_found", "=", "None", "continue_", "=", "True", "while", "continue_", ":", "module_found", "=", "is_path_python_module", "(", "thepath", ")", "next_path", "=", "path", ".", "dirname", "(", "thepath", ")", "if", "next_path", "==", "thepath", ":", "continue_", "=", "False", "if", "module_found", ":", "init_names", "=", "[", "'__init__%s'", "%", "suffix", ".", "lower", "(", ")", "for", "suffix", "in", "_py_suffixes", "]", "if", "path", ".", "basename", "(", "thepath", ")", ".", "lower", "(", ")", "in", "init_names", ":", "last_module_found", "=", "path", ".", "basename", "(", "path", ".", "dirname", "(", "thepath", ")", ")", "else", ":", "last_module_found", "=", "path", ".", "basename", "(", "thepath", ")", "if", "last_module_found", "and", "not", "module_found", ":", "continue_", "=", "False", "thepath", "=", "next_path", "return", "last_module_found" ]
38.25
16.416667
def new_stats_exporter(option): """ new_stats_exporter returns an exporter that exports stats to Prometheus. """ if option.namespace == "": raise ValueError("Namespace can not be empty string.") collector = new_collector(option) exporter = PrometheusStatsExporter(options=option, gatherer=option.registry, collector=collector) return exporter
[ "def", "new_stats_exporter", "(", "option", ")", ":", "if", "option", ".", "namespace", "==", "\"\"", ":", "raise", "ValueError", "(", "\"Namespace can not be empty string.\"", ")", "collector", "=", "new_collector", "(", "option", ")", "exporter", "=", "PrometheusStatsExporter", "(", "options", "=", "option", ",", "gatherer", "=", "option", ".", "registry", ",", "collector", "=", "collector", ")", "return", "exporter" ]
34.307692
15.769231
def endpoint_name(self, endpoint_name): """ Sets the endpoint_name of this PreSharedKey. The unique endpoint identifier that this pre-shared key applies to. 16-64 [printable](https://en.wikipedia.org/wiki/ASCII#Printable_characters) (non-control) ASCII characters. :param endpoint_name: The endpoint_name of this PreSharedKey. :type: str """ if endpoint_name is None: raise ValueError("Invalid value for `endpoint_name`, must not be `None`") if endpoint_name is not None and not re.search('^[ -~]{16,64}$', endpoint_name): raise ValueError("Invalid value for `endpoint_name`, must be a follow pattern or equal to `/^[ -~]{16,64}$/`") self._endpoint_name = endpoint_name
[ "def", "endpoint_name", "(", "self", ",", "endpoint_name", ")", ":", "if", "endpoint_name", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `endpoint_name`, must not be `None`\"", ")", "if", "endpoint_name", "is", "not", "None", "and", "not", "re", ".", "search", "(", "'^[ -~]{16,64}$'", ",", "endpoint_name", ")", ":", "raise", "ValueError", "(", "\"Invalid value for `endpoint_name`, must be a follow pattern or equal to `/^[ -~]{16,64}$/`\"", ")", "self", ".", "_endpoint_name", "=", "endpoint_name" ]
53.857143
33.714286
def read_double(self, little_endian=True): """ Read 8 bytes as a double value from the stream. Args: little_endian (bool): specify the endianness. (Default) Little endian. Returns: float: """ if little_endian: endian = "<" else: endian = ">" return self.unpack("%sd" % endian, 8)
[ "def", "read_double", "(", "self", ",", "little_endian", "=", "True", ")", ":", "if", "little_endian", ":", "endian", "=", "\"<\"", "else", ":", "endian", "=", "\">\"", "return", "self", ".", "unpack", "(", "\"%sd\"", "%", "endian", ",", "8", ")" ]
25.266667
19.4
def _escapify(qstring): """Escape the characters in a quoted string which need it. @param qstring: the string @type qstring: string @returns: the escaped string @rtype: string """ text = '' for c in qstring: if c in __escaped: text += '\\' + c elif ord(c) >= 0x20 and ord(c) < 0x7F: text += c else: text += '\\%03d' % ord(c) return text
[ "def", "_escapify", "(", "qstring", ")", ":", "text", "=", "''", "for", "c", "in", "qstring", ":", "if", "c", "in", "__escaped", ":", "text", "+=", "'\\\\'", "+", "c", "elif", "ord", "(", "c", ")", ">=", "0x20", "and", "ord", "(", "c", ")", "<", "0x7F", ":", "text", "+=", "c", "else", ":", "text", "+=", "'\\\\%03d'", "%", "ord", "(", "c", ")", "return", "text" ]
23.166667
16.888889
def _execute(self, ifile, process): """ Execution loop :param ifile: Input file object. Unused. :type ifile: file :return: `None`. """ if self._protocol_version == 2: result = self._read_chunk(ifile) if not result: return metadata, body = result action = getattr(metadata, 'action', None) if action != 'execute': raise RuntimeError('Expected execute action, not {}'.format(action)) self._record_writer.write_records(self.generate()) self.finish()
[ "def", "_execute", "(", "self", ",", "ifile", ",", "process", ")", ":", "if", "self", ".", "_protocol_version", "==", "2", ":", "result", "=", "self", ".", "_read_chunk", "(", "ifile", ")", "if", "not", "result", ":", "return", "metadata", ",", "body", "=", "result", "action", "=", "getattr", "(", "metadata", ",", "'action'", ",", "None", ")", "if", "action", "!=", "'execute'", ":", "raise", "RuntimeError", "(", "'Expected execute action, not {}'", ".", "format", "(", "action", ")", ")", "self", ".", "_record_writer", ".", "write_records", "(", "self", ".", "generate", "(", ")", ")", "self", ".", "finish", "(", ")" ]
25.521739
20.26087
def parse_name(name): """Parses a subject string as used in OpenSSLs command line utilities. The ``name`` is expected to be close to the subject format commonly used by OpenSSL, for example ``/C=AT/L=Vienna/CN=example.com/[email protected]``. The function does its best to be lenient on deviations from the format, object identifiers are case-insensitive (e.g. ``cn`` is the same as ``CN``, whitespace at the start and end is stripped and the subject does not have to start with a slash (``/``). >>> parse_name('/CN=example.com') [('CN', 'example.com')] >>> parse_name('c=AT/l= Vienna/o="ex org"/CN=example.com') [('C', 'AT'), ('L', 'Vienna'), ('O', 'ex org'), ('CN', 'example.com')] Dictionary keys are normalized to the values of :py:const:`OID_NAME_MAPPINGS` and keys will be sorted based on x509 name specifications regardless of the given order: >>> parse_name('L="Vienna / District"/[email protected]') [('L', 'Vienna / District'), ('emailAddress', '[email protected]')] >>> parse_name('/C=AT/CN=example.com') == parse_name('/CN=example.com/C=AT') True Due to the magic of :py:const:`NAME_RE`, the function even supports quoting strings and including slashes, so strings like ``/OU="Org / Org Unit"/CN=example.com`` will work as expected. >>> parse_name('L="Vienna / District"/CN=example.com') [('L', 'Vienna / District'), ('CN', 'example.com')] But note that it's still easy to trick this function, if you really want to. The following example is *not* a valid subject, the location is just bogus, and whatever you were expecting as output, it's certainly different: >>> parse_name('L="Vienna " District"/CN=example.com') [('L', 'Vienna'), ('CN', 'example.com')] Examples of where this string is used are: .. code-block:: console # openssl req -new -key priv.key -out csr -utf8 -batch -sha256 -subj '/C=AT/CN=example.com' # openssl x509 -in cert.pem -noout -subject -nameopt compat /C=AT/L=Vienna/CN=example.com """ name = name.strip() if not name: # empty subjects are ok return [] try: items = [(NAME_CASE_MAPPINGS[t[0].upper()], force_text(t[2])) for t in NAME_RE.findall(name)] except KeyError as e: raise ValueError('Unknown x509 name field: %s' % e.args[0]) # Check that no OIDs not in MULTIPLE_OIDS occur more then once for key, oid in NAME_OID_MAPPINGS.items(): if sum(1 for t in items if t[0] == key) > 1 and oid not in MULTIPLE_OIDS: raise ValueError('Subject contains multiple "%s" fields' % key) return sort_name(items)
[ "def", "parse_name", "(", "name", ")", ":", "name", "=", "name", ".", "strip", "(", ")", "if", "not", "name", ":", "# empty subjects are ok", "return", "[", "]", "try", ":", "items", "=", "[", "(", "NAME_CASE_MAPPINGS", "[", "t", "[", "0", "]", ".", "upper", "(", ")", "]", ",", "force_text", "(", "t", "[", "2", "]", ")", ")", "for", "t", "in", "NAME_RE", ".", "findall", "(", "name", ")", "]", "except", "KeyError", "as", "e", ":", "raise", "ValueError", "(", "'Unknown x509 name field: %s'", "%", "e", ".", "args", "[", "0", "]", ")", "# Check that no OIDs not in MULTIPLE_OIDS occur more then once", "for", "key", ",", "oid", "in", "NAME_OID_MAPPINGS", ".", "items", "(", ")", ":", "if", "sum", "(", "1", "for", "t", "in", "items", "if", "t", "[", "0", "]", "==", "key", ")", ">", "1", "and", "oid", "not", "in", "MULTIPLE_OIDS", ":", "raise", "ValueError", "(", "'Subject contains multiple \"%s\" fields'", "%", "key", ")", "return", "sort_name", "(", "items", ")" ]
46.035088
32.649123
def set_limits(self, limits): """ Set the limit data to the given list of limits. Limits are specified as the raw msgpack string representing the limit. Computes the checksum of the limits; if the checksum is identical to the current one, no action is taken. """ # First task, build the checksum of the new limits chksum = hashlib.md5() # sufficient for our purposes for lim in limits: chksum.update(lim) new_sum = chksum.hexdigest() # Now install it with self.limit_lock: if self.limit_sum == new_sum: # No changes return self.limit_data = [msgpack.loads(lim) for lim in limits] self.limit_sum = new_sum
[ "def", "set_limits", "(", "self", ",", "limits", ")", ":", "# First task, build the checksum of the new limits", "chksum", "=", "hashlib", ".", "md5", "(", ")", "# sufficient for our purposes", "for", "lim", "in", "limits", ":", "chksum", ".", "update", "(", "lim", ")", "new_sum", "=", "chksum", ".", "hexdigest", "(", ")", "# Now install it", "with", "self", ".", "limit_lock", ":", "if", "self", ".", "limit_sum", "==", "new_sum", ":", "# No changes", "return", "self", ".", "limit_data", "=", "[", "msgpack", ".", "loads", "(", "lim", ")", "for", "lim", "in", "limits", "]", "self", ".", "limit_sum", "=", "new_sum" ]
36.380952
16.285714
def _terminal_use_capability(capability_name): """ If the terminal supports the given capability, output it. Return whether it was output. """ curses.setupterm() capability = curses.tigetstr(capability_name) if capability: sys.stdout.write(_unicode(capability)) return bool(capability)
[ "def", "_terminal_use_capability", "(", "capability_name", ")", ":", "curses", ".", "setupterm", "(", ")", "capability", "=", "curses", ".", "tigetstr", "(", "capability_name", ")", "if", "capability", ":", "sys", ".", "stdout", ".", "write", "(", "_unicode", "(", "capability", ")", ")", "return", "bool", "(", "capability", ")" ]
31.7
13.3
def activate_network_interface(iface): """Bring up the given network interface. @raise OSError: if interface does not exist or permissions are missing """ iface = iface.encode() SIOCGIFFLAGS = 0x8913 # /usr/include/bits/ioctls.h SIOCSIFFLAGS = 0x8914 # /usr/include/bits/ioctls.h IFF_UP = 0x1 # /usr/include/net/if.h # We need to use instances of "struct ifreq" for communicating with the kernel. # This struct is complex with a big contained union, we define here only the few necessary # fields for the two cases we need. # The layout is given in the format used by the struct module: STRUCT_IFREQ_LAYOUT_IFADDR_SAFAMILY = b"16sH14s" # ifr_name, ifr_addr.sa_family, padding STRUCT_IFREQ_LAYOUT_IFFLAGS = b"16sH14s" # ifr_name, ifr_flags, padding sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) try: # Get current interface flags from kernel ifreq = struct.pack(STRUCT_IFREQ_LAYOUT_IFADDR_SAFAMILY, iface, socket.AF_INET, b'0' * 14) ifreq = fcntl.ioctl(sock, SIOCGIFFLAGS, ifreq) if_flags = struct.unpack(STRUCT_IFREQ_LAYOUT_IFFLAGS, ifreq)[1] # Set new flags ifreq = struct.pack(STRUCT_IFREQ_LAYOUT_IFFLAGS, iface, if_flags | IFF_UP, b'0' * 14) fcntl.ioctl(sock, SIOCSIFFLAGS, ifreq) finally: sock.close()
[ "def", "activate_network_interface", "(", "iface", ")", ":", "iface", "=", "iface", ".", "encode", "(", ")", "SIOCGIFFLAGS", "=", "0x8913", "# /usr/include/bits/ioctls.h", "SIOCSIFFLAGS", "=", "0x8914", "# /usr/include/bits/ioctls.h", "IFF_UP", "=", "0x1", "# /usr/include/net/if.h", "# We need to use instances of \"struct ifreq\" for communicating with the kernel.", "# This struct is complex with a big contained union, we define here only the few necessary", "# fields for the two cases we need.", "# The layout is given in the format used by the struct module:", "STRUCT_IFREQ_LAYOUT_IFADDR_SAFAMILY", "=", "b\"16sH14s\"", "# ifr_name, ifr_addr.sa_family, padding", "STRUCT_IFREQ_LAYOUT_IFFLAGS", "=", "b\"16sH14s\"", "# ifr_name, ifr_flags, padding", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ",", "socket", ".", "IPPROTO_IP", ")", "try", ":", "# Get current interface flags from kernel", "ifreq", "=", "struct", ".", "pack", "(", "STRUCT_IFREQ_LAYOUT_IFADDR_SAFAMILY", ",", "iface", ",", "socket", ".", "AF_INET", ",", "b'0'", "*", "14", ")", "ifreq", "=", "fcntl", ".", "ioctl", "(", "sock", ",", "SIOCGIFFLAGS", ",", "ifreq", ")", "if_flags", "=", "struct", ".", "unpack", "(", "STRUCT_IFREQ_LAYOUT_IFFLAGS", ",", "ifreq", ")", "[", "1", "]", "# Set new flags", "ifreq", "=", "struct", ".", "pack", "(", "STRUCT_IFREQ_LAYOUT_IFFLAGS", ",", "iface", ",", "if_flags", "|", "IFF_UP", ",", "b'0'", "*", "14", ")", "fcntl", ".", "ioctl", "(", "sock", ",", "SIOCSIFFLAGS", ",", "ifreq", ")", "finally", ":", "sock", ".", "close", "(", ")" ]
46.137931
26.034483
def show_devices(verbose=False, **kwargs): """Show information about connected devices. The verbose flag sets to verbose or not. **kwargs are passed directly to the find() function. """ kwargs["find_all"] = True devices = find(**kwargs) strings = "" for device in devices: if not verbose: strings += "%s, %s\n" % (device._str(), _try_lookup( _lu.device_classes, device.bDeviceClass)) else: strings += "%s\n\n" % str(device) return _DescriptorInfo(strings)
[ "def", "show_devices", "(", "verbose", "=", "False", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "\"find_all\"", "]", "=", "True", "devices", "=", "find", "(", "*", "*", "kwargs", ")", "strings", "=", "\"\"", "for", "device", "in", "devices", ":", "if", "not", "verbose", ":", "strings", "+=", "\"%s, %s\\n\"", "%", "(", "device", ".", "_str", "(", ")", ",", "_try_lookup", "(", "_lu", ".", "device_classes", ",", "device", ".", "bDeviceClass", ")", ")", "else", ":", "strings", "+=", "\"%s\\n\\n\"", "%", "str", "(", "device", ")", "return", "_DescriptorInfo", "(", "strings", ")" ]
31.411765
15.235294
def dbtemplate_save(sender, instance, created, **kwargs): """create widget/page content/base theme from given db template:: /widget/icon/my_awesome.html /base/widget/my_new_widget_box.html /base/page/my_new_page_layout.html """ if created: if 'widget' in instance.name: name = instance.name.split('/')[-1] kwargs = { 'name': name.split('.')[0], 'label': name.split('.')[0].capitalize(), 'template': instance, } if 'base/widget' in instance.name: from leonardo.module.web.models import WidgetBaseTheme theme_cls = WidgetBaseTheme else: from leonardo.module.web.models import WidgetContentTheme theme_cls = WidgetContentTheme from leonardo.utils.widgets import find_widget_class w_cls_name = instance.name.split('/')[-2] w_cls = find_widget_class(w_cls_name) if w_cls is None: raise Exception('widget class for %s not found' % w_cls_name) kwargs['widget_class'] = w_cls.__name__ theme_cls(**kwargs).save() if 'base/page' in instance.name: from leonardo.module.web.models import PageTheme page_theme = PageTheme() page_theme.label = '{} layout'.format( instance.name.split("/")[-1].split('.')[0].title()) page_theme.name = instance.name.split("/")[-1] page_theme.template = instance page_theme.save()
[ "def", "dbtemplate_save", "(", "sender", ",", "instance", ",", "created", ",", "*", "*", "kwargs", ")", ":", "if", "created", ":", "if", "'widget'", "in", "instance", ".", "name", ":", "name", "=", "instance", ".", "name", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "kwargs", "=", "{", "'name'", ":", "name", ".", "split", "(", "'.'", ")", "[", "0", "]", ",", "'label'", ":", "name", ".", "split", "(", "'.'", ")", "[", "0", "]", ".", "capitalize", "(", ")", ",", "'template'", ":", "instance", ",", "}", "if", "'base/widget'", "in", "instance", ".", "name", ":", "from", "leonardo", ".", "module", ".", "web", ".", "models", "import", "WidgetBaseTheme", "theme_cls", "=", "WidgetBaseTheme", "else", ":", "from", "leonardo", ".", "module", ".", "web", ".", "models", "import", "WidgetContentTheme", "theme_cls", "=", "WidgetContentTheme", "from", "leonardo", ".", "utils", ".", "widgets", "import", "find_widget_class", "w_cls_name", "=", "instance", ".", "name", ".", "split", "(", "'/'", ")", "[", "-", "2", "]", "w_cls", "=", "find_widget_class", "(", "w_cls_name", ")", "if", "w_cls", "is", "None", ":", "raise", "Exception", "(", "'widget class for %s not found'", "%", "w_cls_name", ")", "kwargs", "[", "'widget_class'", "]", "=", "w_cls", ".", "__name__", "theme_cls", "(", "*", "*", "kwargs", ")", ".", "save", "(", ")", "if", "'base/page'", "in", "instance", ".", "name", ":", "from", "leonardo", ".", "module", ".", "web", ".", "models", "import", "PageTheme", "page_theme", "=", "PageTheme", "(", ")", "page_theme", ".", "label", "=", "'{} layout'", ".", "format", "(", "instance", ".", "name", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", ".", "split", "(", "'.'", ")", "[", "0", "]", ".", "title", "(", ")", ")", "page_theme", ".", "name", "=", "instance", ".", "name", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "page_theme", ".", "template", "=", "instance", "page_theme", ".", "save", "(", ")" ]
42.891892
14.189189
def add_tip_lines_to_axes(self): "add lines to connect tips to zero axis for tip_labels_align=True" # get tip-coords and align-coords from verts xpos, ypos, aedges, averts = self.get_tip_label_coords() if self.style.tip_labels_align: self.axes.graph( aedges, vcoordinates=averts, estyle=self.style.edge_align_style, vlshow=False, vsize=0, )
[ "def", "add_tip_lines_to_axes", "(", "self", ")", ":", "# get tip-coords and align-coords from verts", "xpos", ",", "ypos", ",", "aedges", ",", "averts", "=", "self", ".", "get_tip_label_coords", "(", ")", "if", "self", ".", "style", ".", "tip_labels_align", ":", "self", ".", "axes", ".", "graph", "(", "aedges", ",", "vcoordinates", "=", "averts", ",", "estyle", "=", "self", ".", "style", ".", "edge_align_style", ",", "vlshow", "=", "False", ",", "vsize", "=", "0", ",", ")" ]
35.923077
16.846154
def find_n90(contig_lengths_dict, genome_length_dict): """ Calculate the N90 for each strain. N90 is defined as the largest contig such that at least 9/10 of the total genome size is contained in contigs equal to or larger than this contig :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: n75_dict: dictionary of strain name: N90 """ # Initialise the dictionary n90_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): currentlength = 0 for contig_length in contig_lengths: currentlength += contig_length # If the current length is now greater than the 3/4 of the total genome length, the current contig length # is the N75 if currentlength >= genome_length_dict[file_name] * 0.95: n90_dict[file_name] = contig_length break return n90_dict
[ "def", "find_n90", "(", "contig_lengths_dict", ",", "genome_length_dict", ")", ":", "# Initialise the dictionary", "n90_dict", "=", "dict", "(", ")", "for", "file_name", ",", "contig_lengths", "in", "contig_lengths_dict", ".", "items", "(", ")", ":", "currentlength", "=", "0", "for", "contig_length", "in", "contig_lengths", ":", "currentlength", "+=", "contig_length", "# If the current length is now greater than the 3/4 of the total genome length, the current contig length", "# is the N75", "if", "currentlength", ">=", "genome_length_dict", "[", "file_name", "]", "*", "0.95", ":", "n90_dict", "[", "file_name", "]", "=", "contig_length", "break", "return", "n90_dict" ]
50.7
23.9
def get_transformation_matrix(self, theta): """ Computes the homogeneous transformation matrix for this link. """ ct = numpy.cos(theta + self.theta) st = numpy.sin(theta + self.theta) ca = numpy.cos(self.alpha) sa = numpy.sin(self.alpha) return numpy.matrix(((ct, -st * ca, st * sa, self.a * ct), (st, ct * ca, -ct * sa, self.a * st), (0, sa, ca, self.d), (0, 0, 0, 1)))
[ "def", "get_transformation_matrix", "(", "self", ",", "theta", ")", ":", "ct", "=", "numpy", ".", "cos", "(", "theta", "+", "self", ".", "theta", ")", "st", "=", "numpy", ".", "sin", "(", "theta", "+", "self", ".", "theta", ")", "ca", "=", "numpy", ".", "cos", "(", "self", ".", "alpha", ")", "sa", "=", "numpy", ".", "sin", "(", "self", ".", "alpha", ")", "return", "numpy", ".", "matrix", "(", "(", "(", "ct", ",", "-", "st", "*", "ca", ",", "st", "*", "sa", ",", "self", ".", "a", "*", "ct", ")", ",", "(", "st", ",", "ct", "*", "ca", ",", "-", "ct", "*", "sa", ",", "self", ".", "a", "*", "st", ")", ",", "(", "0", ",", "sa", ",", "ca", ",", "self", ".", "d", ")", ",", "(", "0", ",", "0", ",", "0", ",", "1", ")", ")", ")" ]
45.090909
11.181818
def mint_token_if_balance_low( token_contract: ContractProxy, target_address: str, min_balance: int, fund_amount: int, gas_limit: int, mint_msg: str, no_action_msg: str = None, ) -> Optional[TransactionHash]: """ Check token balance and mint if below minimum """ balance = token_contract.contract.functions.balanceOf(target_address).call() if balance < min_balance: mint_amount = fund_amount - balance log.debug(mint_msg, address=target_address, amount=mint_amount) return token_contract.transact('mintFor', gas_limit, mint_amount, target_address) else: if no_action_msg: log.debug(no_action_msg, balance=balance) return None
[ "def", "mint_token_if_balance_low", "(", "token_contract", ":", "ContractProxy", ",", "target_address", ":", "str", ",", "min_balance", ":", "int", ",", "fund_amount", ":", "int", ",", "gas_limit", ":", "int", ",", "mint_msg", ":", "str", ",", "no_action_msg", ":", "str", "=", "None", ",", ")", "->", "Optional", "[", "TransactionHash", "]", ":", "balance", "=", "token_contract", ".", "contract", ".", "functions", ".", "balanceOf", "(", "target_address", ")", ".", "call", "(", ")", "if", "balance", "<", "min_balance", ":", "mint_amount", "=", "fund_amount", "-", "balance", "log", ".", "debug", "(", "mint_msg", ",", "address", "=", "target_address", ",", "amount", "=", "mint_amount", ")", "return", "token_contract", ".", "transact", "(", "'mintFor'", ",", "gas_limit", ",", "mint_amount", ",", "target_address", ")", "else", ":", "if", "no_action_msg", ":", "log", ".", "debug", "(", "no_action_msg", ",", "balance", "=", "balance", ")", "return", "None" ]
36.35
18.1
def permitted_query(self, query, group, operations): '''Change the ``query`` so that only instances for which ``group`` has roles with permission on ``operations`` are returned.''' session = query.session models = session.router user = group.user if user.is_superuser: # super-users have all permissions return query roles = group.roles.query() roles = group.roles.query() # query on all roles for group # The throgh model for Role/Permission relationship throgh_model = models.role.permissions.model models[throgh_model].filter(role=roles, permission__model_type=query.model, permission__operations=operations) # query on all relevant permissions permissions = router.permission.filter(model_type=query.model, level=operations) owner_query = query.filter(user=user) # all roles for the query model with appropriate permission level roles = models.role.filter(model_type=query.model, level__ge=level) # Now we need groups which have these roles groups = Role.groups.throughquery( session).filter(role=roles).get_field('group') # I need to know if user is in any of these groups if user.groups.filter(id=groups).count(): # it is, lets get the model with permissions less # or equal permission level permitted = models.instancerole.filter( role=roles).get_field('object_id') return owner_query.union(model.objects.filter(id=permitted)) else: return owner_query
[ "def", "permitted_query", "(", "self", ",", "query", ",", "group", ",", "operations", ")", ":", "session", "=", "query", ".", "session", "models", "=", "session", ".", "router", "user", "=", "group", ".", "user", "if", "user", ".", "is_superuser", ":", "# super-users have all permissions", "return", "query", "roles", "=", "group", ".", "roles", ".", "query", "(", ")", "roles", "=", "group", ".", "roles", ".", "query", "(", ")", "# query on all roles for group", "# The throgh model for Role/Permission relationship", "throgh_model", "=", "models", ".", "role", ".", "permissions", ".", "model", "models", "[", "throgh_model", "]", ".", "filter", "(", "role", "=", "roles", ",", "permission__model_type", "=", "query", ".", "model", ",", "permission__operations", "=", "operations", ")", "# query on all relevant permissions", "permissions", "=", "router", ".", "permission", ".", "filter", "(", "model_type", "=", "query", ".", "model", ",", "level", "=", "operations", ")", "owner_query", "=", "query", ".", "filter", "(", "user", "=", "user", ")", "# all roles for the query model with appropriate permission level", "roles", "=", "models", ".", "role", ".", "filter", "(", "model_type", "=", "query", ".", "model", ",", "level__ge", "=", "level", ")", "# Now we need groups which have these roles", "groups", "=", "Role", ".", "groups", ".", "throughquery", "(", "session", ")", ".", "filter", "(", "role", "=", "roles", ")", ".", "get_field", "(", "'group'", ")", "# I need to know if user is in any of these groups", "if", "user", ".", "groups", ".", "filter", "(", "id", "=", "groups", ")", ".", "count", "(", ")", ":", "# it is, lets get the model with permissions less", "# or equal permission level", "permitted", "=", "models", ".", "instancerole", ".", "filter", "(", "role", "=", "roles", ")", ".", "get_field", "(", "'object_id'", ")", "return", "owner_query", ".", "union", "(", "model", ".", "objects", ".", "filter", "(", "id", "=", "permitted", ")", ")", "else", ":", "return", "owner_query" ]
48.8
18.628571
def xrdb(xrdb_files=None): """Merge the colors into the X db so new terminals use them.""" xrdb_files = xrdb_files or \ [os.path.join(CACHE_DIR, "colors.Xresources")] if shutil.which("xrdb") and OS != "Darwin": for file in xrdb_files: subprocess.run(["xrdb", "-merge", "-quiet", file])
[ "def", "xrdb", "(", "xrdb_files", "=", "None", ")", ":", "xrdb_files", "=", "xrdb_files", "or", "[", "os", ".", "path", ".", "join", "(", "CACHE_DIR", ",", "\"colors.Xresources\"", ")", "]", "if", "shutil", ".", "which", "(", "\"xrdb\"", ")", "and", "OS", "!=", "\"Darwin\"", ":", "for", "file", "in", "xrdb_files", ":", "subprocess", ".", "run", "(", "[", "\"xrdb\"", ",", "\"-merge\"", ",", "\"-quiet\"", ",", "file", "]", ")" ]
39.875
14.25
def make_copy_paste_env(env: Dict[str, str]) -> str: """ Convert an environment into a set of commands that can be copied/pasted, on the build platform, to recreate that environment. """ windows = platform.system() == "Windows" cmd = "set" if windows else "export" return ( "\n".join( "{cmd} {k}={v}".format( cmd=cmd, k=k, v=env[k] if windows else subprocess.list2cmdline([env[k]]) ) for k in sorted(env.keys()) ) )
[ "def", "make_copy_paste_env", "(", "env", ":", "Dict", "[", "str", ",", "str", "]", ")", "->", "str", ":", "windows", "=", "platform", ".", "system", "(", ")", "==", "\"Windows\"", "cmd", "=", "\"set\"", "if", "windows", "else", "\"export\"", "return", "(", "\"\\n\"", ".", "join", "(", "\"{cmd} {k}={v}\"", ".", "format", "(", "cmd", "=", "cmd", ",", "k", "=", "k", ",", "v", "=", "env", "[", "k", "]", "if", "windows", "else", "subprocess", ".", "list2cmdline", "(", "[", "env", "[", "k", "]", "]", ")", ")", "for", "k", "in", "sorted", "(", "env", ".", "keys", "(", ")", ")", ")", ")" ]
32.5
16.25
def xor(cls, obj, **kwargs): """Query an object. :param obj: object to test :param kwargs: query specified in kwargssql :return: `True` if exactly one `kwargs` expression is `True`, `False` otherwise. :rtype: bool """ return cls.__eval_seqexp(obj, operator.xor, **kwargs)
[ "def", "xor", "(", "cls", ",", "obj", ",", "*", "*", "kwargs", ")", ":", "return", "cls", ".", "__eval_seqexp", "(", "obj", ",", "operator", ".", "xor", ",", "*", "*", "kwargs", ")" ]
24.785714
19.928571
def qwarp_align(dset_from,dset_to,skull_strip=True,mask=None,affine_suffix='_aff',suffix='_qwarp',prefix=None): '''aligns ``dset_from`` to ``dset_to`` using 3dQwarp Will run ``3dSkullStrip`` (unless ``skull_strip`` is ``False``), ``3dUnifize``, ``3dAllineate``, and then ``3dQwarp``. This method will add suffixes to the input dataset for the intermediate files (e.g., ``_ss``, ``_u``). If those files already exist, it will assume they were intelligently named, and use them as is :skull_strip: If True/False, turns skull-stripping of both datasets on/off. If a string matching ``dset_from`` or ``dset_to``, will only skull-strip the given dataset :mask: Applies the given mask to the alignment. Because of the nature of the alignment algorithms, the mask is **always** applied to the ``dset_to``. If this isn't what you want, you need to reverse the transform and re-apply it (e.g., using :meth:`qwarp_invert` and :meth:`qwarp_apply`). If the ``dset_to`` dataset is skull-stripped, the mask will also be resampled to match the ``dset_to`` grid. :affine_suffix: Suffix applied to ``dset_from`` to name the new dataset, as well as the ``.1D`` file. :suffix: Suffix applied to the final ``dset_from`` dataset. An additional file with the additional suffix ``_WARP`` will be created containing the parameters (e.g., with the default ``_qwarp`` suffix, the parameters will be in a file with the suffix ``_qwarp_WARP``) :prefix: Alternatively to ``suffix``, explicitly give the full output filename The output affine dataset and 1D, as well as the output of qwarp are named by adding the given suffixes (``affine_suffix`` and ``qwarp_suffix``) to the ``dset_from`` file If ``skull_strip`` is a string instead of ``True``/``False``, it will only skull strip the given dataset instead of both of them # TODO: currently does not work with +tlrc datasets because the filenames get mangled ''' dset_ss = lambda dset: os.path.split(nl.suffix(dset,'_ns'))[1] dset_u = lambda dset: os.path.split(nl.suffix(dset,'_u'))[1] def dset_source(dset): if skull_strip==True or skull_strip==dset: return dset_ss(dset) else: return dset dset_affine = os.path.split(nl.suffix(dset_from,affine_suffix))[1] dset_affine_1D = nl.prefix(dset_affine) + '.1D' dset_qwarp = prefix if dset_qwarp==None: dset_qwarp = os.path.split(nl.suffix(dset_from,suffix))[1] if os.path.exists(dset_qwarp): # final product already exists return affine_align(dset_from,dset_to,skull_strip,mask,affine_suffix) for dset in [dset_from,dset_to]: nl.run([ '3dUnifize', '-prefix', dset_u(dset_source(dset)), '-input', dset_source(dset) ],products=[dset_u(dset_source(dset))]) mask_use = mask if mask: # the mask was probably made in the space of the original dset_to anatomy, # which has now been cropped from the skull stripping. So the lesion mask # needs to be resampled to match the corresponding mask if skull_strip==True or skull_strip==dset_to: nl.run(['3dresample','-master',dset_u(dset_ss(dset)),'-inset',mask,'-prefix',nl.suffix(mask,'_resam')],products=nl.suffix(mask,'_resam')) mask_use = nl.suffix(mask,'_resam') warp_cmd = [ '3dQwarp', '-prefix', dset_qwarp, '-duplo', '-useweight', '-blur', '0', '3', '-iwarp', '-base', dset_u(dset_source(dset_to)), '-source', dset_affine ] if mask: warp_cmd += ['-emask', mask_use] nl.run(warp_cmd,products=dset_qwarp)
[ "def", "qwarp_align", "(", "dset_from", ",", "dset_to", ",", "skull_strip", "=", "True", ",", "mask", "=", "None", ",", "affine_suffix", "=", "'_aff'", ",", "suffix", "=", "'_qwarp'", ",", "prefix", "=", "None", ")", ":", "dset_ss", "=", "lambda", "dset", ":", "os", ".", "path", ".", "split", "(", "nl", ".", "suffix", "(", "dset", ",", "'_ns'", ")", ")", "[", "1", "]", "dset_u", "=", "lambda", "dset", ":", "os", ".", "path", ".", "split", "(", "nl", ".", "suffix", "(", "dset", ",", "'_u'", ")", ")", "[", "1", "]", "def", "dset_source", "(", "dset", ")", ":", "if", "skull_strip", "==", "True", "or", "skull_strip", "==", "dset", ":", "return", "dset_ss", "(", "dset", ")", "else", ":", "return", "dset", "dset_affine", "=", "os", ".", "path", ".", "split", "(", "nl", ".", "suffix", "(", "dset_from", ",", "affine_suffix", ")", ")", "[", "1", "]", "dset_affine_1D", "=", "nl", ".", "prefix", "(", "dset_affine", ")", "+", "'.1D'", "dset_qwarp", "=", "prefix", "if", "dset_qwarp", "==", "None", ":", "dset_qwarp", "=", "os", ".", "path", ".", "split", "(", "nl", ".", "suffix", "(", "dset_from", ",", "suffix", ")", ")", "[", "1", "]", "if", "os", ".", "path", ".", "exists", "(", "dset_qwarp", ")", ":", "# final product already exists", "return", "affine_align", "(", "dset_from", ",", "dset_to", ",", "skull_strip", ",", "mask", ",", "affine_suffix", ")", "for", "dset", "in", "[", "dset_from", ",", "dset_to", "]", ":", "nl", ".", "run", "(", "[", "'3dUnifize'", ",", "'-prefix'", ",", "dset_u", "(", "dset_source", "(", "dset", ")", ")", ",", "'-input'", ",", "dset_source", "(", "dset", ")", "]", ",", "products", "=", "[", "dset_u", "(", "dset_source", "(", "dset", ")", ")", "]", ")", "mask_use", "=", "mask", "if", "mask", ":", "# the mask was probably made in the space of the original dset_to anatomy,", "# which has now been cropped from the skull stripping. So the lesion mask", "# needs to be resampled to match the corresponding mask", "if", "skull_strip", "==", "True", "or", "skull_strip", "==", "dset_to", ":", "nl", ".", "run", "(", "[", "'3dresample'", ",", "'-master'", ",", "dset_u", "(", "dset_ss", "(", "dset", ")", ")", ",", "'-inset'", ",", "mask", ",", "'-prefix'", ",", "nl", ".", "suffix", "(", "mask", ",", "'_resam'", ")", "]", ",", "products", "=", "nl", ".", "suffix", "(", "mask", ",", "'_resam'", ")", ")", "mask_use", "=", "nl", ".", "suffix", "(", "mask", ",", "'_resam'", ")", "warp_cmd", "=", "[", "'3dQwarp'", ",", "'-prefix'", ",", "dset_qwarp", ",", "'-duplo'", ",", "'-useweight'", ",", "'-blur'", ",", "'0'", ",", "'3'", ",", "'-iwarp'", ",", "'-base'", ",", "dset_u", "(", "dset_source", "(", "dset_to", ")", ")", ",", "'-source'", ",", "dset_affine", "]", "if", "mask", ":", "warp_cmd", "+=", "[", "'-emask'", ",", "mask_use", "]", "nl", ".", "run", "(", "warp_cmd", ",", "products", "=", "dset_qwarp", ")" ]
47.120482
30.662651
def add_current_text(self): """ Add current text to combo box history (convenient method). If path ends in os separator ("\" windows, "/" unix) remove it. """ text = self.currentText() if osp.isdir(text) and text: if text[-1] == os.sep: text = text[:-1] self.add_text(text)
[ "def", "add_current_text", "(", "self", ")", ":", "text", "=", "self", ".", "currentText", "(", ")", "if", "osp", ".", "isdir", "(", "text", ")", "and", "text", ":", "if", "text", "[", "-", "1", "]", "==", "os", ".", "sep", ":", "text", "=", "text", "[", ":", "-", "1", "]", "self", ".", "add_text", "(", "text", ")" ]
35.7
10.5
def setup(self, universe): """ Setup Security with universe. Speeds up future runs. Args: * universe (DataFrame): DataFrame of prices with security's name as one of the columns. """ # if we already have all the prices, we will store them to speed up # future updates try: prices = universe[self.name] except KeyError: prices = None # setup internal data if prices is not None: self._prices = prices self.data = pd.DataFrame(index=universe.index, columns=['value', 'position'], data=0.0) self._prices_set = True else: self.data = pd.DataFrame(index=universe.index, columns=['price', 'value', 'position']) self._prices = self.data['price'] self._prices_set = False self._values = self.data['value'] self._positions = self.data['position'] # add _outlay self.data['outlay'] = 0. self._outlays = self.data['outlay']
[ "def", "setup", "(", "self", ",", "universe", ")", ":", "# if we already have all the prices, we will store them to speed up", "# future updates", "try", ":", "prices", "=", "universe", "[", "self", ".", "name", "]", "except", "KeyError", ":", "prices", "=", "None", "# setup internal data", "if", "prices", "is", "not", "None", ":", "self", ".", "_prices", "=", "prices", "self", ".", "data", "=", "pd", ".", "DataFrame", "(", "index", "=", "universe", ".", "index", ",", "columns", "=", "[", "'value'", ",", "'position'", "]", ",", "data", "=", "0.0", ")", "self", ".", "_prices_set", "=", "True", "else", ":", "self", ".", "data", "=", "pd", ".", "DataFrame", "(", "index", "=", "universe", ".", "index", ",", "columns", "=", "[", "'price'", ",", "'value'", ",", "'position'", "]", ")", "self", ".", "_prices", "=", "self", ".", "data", "[", "'price'", "]", "self", ".", "_prices_set", "=", "False", "self", ".", "_values", "=", "self", ".", "data", "[", "'value'", "]", "self", ".", "_positions", "=", "self", ".", "data", "[", "'position'", "]", "# add _outlay", "self", ".", "data", "[", "'outlay'", "]", "=", "0.", "self", ".", "_outlays", "=", "self", ".", "data", "[", "'outlay'", "]" ]
32.714286
17.914286
def mv_files(src, dst): """ Move all files from one directory to another :param str src: Source directory :param str dst: Destination directory :return none: """ # list the files in the src directory files = os.listdir(src) # loop for each file found for file in files: # move the file from the src to the dst shutil.move(os.path.join(src, file), os.path.join(dst, file)) return
[ "def", "mv_files", "(", "src", ",", "dst", ")", ":", "# list the files in the src directory", "files", "=", "os", ".", "listdir", "(", "src", ")", "# loop for each file found", "for", "file", "in", "files", ":", "# move the file from the src to the dst", "shutil", ".", "move", "(", "os", ".", "path", ".", "join", "(", "src", ",", "file", ")", ",", "os", ".", "path", ".", "join", "(", "dst", ",", "file", ")", ")", "return" ]
28.333333
13.4
def _delete_service_nwk(self, tenant_id, tenant_name, direc): """Function to delete the service in network in DCNM. """ net_dict = {} if direc == 'in': seg, vlan = self.get_in_seg_vlan(tenant_id) net_dict['part_name'] = None else: seg, vlan = self.get_out_seg_vlan(tenant_id) net_dict['part_name'] = fw_const.SERV_PART_NAME net_dict['segmentation_id'] = seg net_dict['vlan'] = vlan net = utils.Dict2Obj(net_dict) ret = True try: self.dcnm_obj.delete_service_network(tenant_name, net) except dexc.DfaClientRequestFailed: LOG.error("Failed to delete network in DCNM %s", direc) ret = False return ret
[ "def", "_delete_service_nwk", "(", "self", ",", "tenant_id", ",", "tenant_name", ",", "direc", ")", ":", "net_dict", "=", "{", "}", "if", "direc", "==", "'in'", ":", "seg", ",", "vlan", "=", "self", ".", "get_in_seg_vlan", "(", "tenant_id", ")", "net_dict", "[", "'part_name'", "]", "=", "None", "else", ":", "seg", ",", "vlan", "=", "self", ".", "get_out_seg_vlan", "(", "tenant_id", ")", "net_dict", "[", "'part_name'", "]", "=", "fw_const", ".", "SERV_PART_NAME", "net_dict", "[", "'segmentation_id'", "]", "=", "seg", "net_dict", "[", "'vlan'", "]", "=", "vlan", "net", "=", "utils", ".", "Dict2Obj", "(", "net_dict", ")", "ret", "=", "True", "try", ":", "self", ".", "dcnm_obj", ".", "delete_service_network", "(", "tenant_name", ",", "net", ")", "except", "dexc", ".", "DfaClientRequestFailed", ":", "LOG", ".", "error", "(", "\"Failed to delete network in DCNM %s\"", ",", "direc", ")", "ret", "=", "False", "return", "ret" ]
39.578947
15.210526
def import_mod_attr(path): """ Import string format module, e.g. 'uliweb.orm' or an object return module object and object """ import inspect if isinstance(path, (str, unicode)): v = path.split(':') if len(v) == 1: module, func = path.rsplit('.', 1) else: module, func = v mod = __import__(module, fromlist=['*']) f = mod for x in func.split('.'): try: f = getattr(f, x) except: raise AttributeError("Get %s attribute according %s error" % (x, path)) else: f = path mod = inspect.getmodule(path) return mod, f
[ "def", "import_mod_attr", "(", "path", ")", ":", "import", "inspect", "if", "isinstance", "(", "path", ",", "(", "str", ",", "unicode", ")", ")", ":", "v", "=", "path", ".", "split", "(", "':'", ")", "if", "len", "(", "v", ")", "==", "1", ":", "module", ",", "func", "=", "path", ".", "rsplit", "(", "'.'", ",", "1", ")", "else", ":", "module", ",", "func", "=", "v", "mod", "=", "__import__", "(", "module", ",", "fromlist", "=", "[", "'*'", "]", ")", "f", "=", "mod", "for", "x", "in", "func", ".", "split", "(", "'.'", ")", ":", "try", ":", "f", "=", "getattr", "(", "f", ",", "x", ")", "except", ":", "raise", "AttributeError", "(", "\"Get %s attribute according %s error\"", "%", "(", "x", ",", "path", ")", ")", "else", ":", "f", "=", "path", "mod", "=", "inspect", ".", "getmodule", "(", "path", ")", "return", "mod", ",", "f" ]
27.625
16.625
def ping(bot, mask, target, args): """ping/pong %%ping """ bot.send('NOTICE %(nick)s :PONG %(nick)s!' % dict(nick=mask.nick))
[ "def", "ping", "(", "bot", ",", "mask", ",", "target", ",", "args", ")", ":", "bot", ".", "send", "(", "'NOTICE %(nick)s :PONG %(nick)s!'", "%", "dict", "(", "nick", "=", "mask", ".", "nick", ")", ")" ]
23.5
17
def seat_button_count(self): """The total number of buttons pressed on all devices on the associated seat after the event was triggered. For pointer events that are not of type :attr:`~libinput.constant.EventType.POINTER_BUTTON`, this property raises :exc:`AssertionError`. Returns: int: The seat wide pressed button count for the key of this event. Raises: AssertionError """ if self.type != EventType.POINTER_BUTTON: raise AttributeError(_wrong_prop.format(self.type)) return self._libinput.libinput_event_pointer_get_seat_button_count( self._handle)
[ "def", "seat_button_count", "(", "self", ")", ":", "if", "self", ".", "type", "!=", "EventType", ".", "POINTER_BUTTON", ":", "raise", "AttributeError", "(", "_wrong_prop", ".", "format", "(", "self", ".", "type", ")", ")", "return", "self", ".", "_libinput", ".", "libinput_event_pointer_get_seat_button_count", "(", "self", ".", "_handle", ")" ]
31.666667
20.055556
def onKeyPressInCanvas(self, event): '''Called when a key is pressed when the canvas has focus.''' char_map = { 'w':'move 1', 'a':'strafe -1', 's':'move -1', 'd':'strafe 1', ' ':'jump 1' } keysym_map = { 'continuous': { 'Left':'turn -1', 'Right':'turn 1', 'Up':'pitch -1', 'Down':'pitch 1', 'Shift_L':'crouch 1', 'Shift_R':'crouch 1', '1':'hotbar.1 1', '2':'hotbar.2 1', '3':'hotbar.3 1', '4':'hotbar.4 1', '5':'hotbar.5 1', '6':'hotbar.6 1', '7':'hotbar.7 1', '8':'hotbar.8 1', '9':'hotbar.9 1' }, 'discrete': { 'Left':'turn -1', 'Right':'turn 1', 'Up':'move 1', 'Down':'move -1', '1':'hotbar.1 1', '2':'hotbar.2 1', '3':'hotbar.3 1', '4':'hotbar.4 1', '5':'hotbar.5 1', '6':'hotbar.6 1', '7':'hotbar.7 1', '8':'hotbar.8 1', '9':'hotbar.9 1' } } if event.char == '/': self.command_entry.focus_set() # interlude to allow user to type command elif event.char.lower() in char_map: self.agent_host.sendCommand( char_map[ event.char.lower() ] ) elif event.keysym in keysym_map[self.action_space]: self.agent_host.sendCommand( keysym_map[self.action_space][ event.keysym ] )
[ "def", "onKeyPressInCanvas", "(", "self", ",", "event", ")", ":", "char_map", "=", "{", "'w'", ":", "'move 1'", ",", "'a'", ":", "'strafe -1'", ",", "'s'", ":", "'move -1'", ",", "'d'", ":", "'strafe 1'", ",", "' '", ":", "'jump 1'", "}", "keysym_map", "=", "{", "'continuous'", ":", "{", "'Left'", ":", "'turn -1'", ",", "'Right'", ":", "'turn 1'", ",", "'Up'", ":", "'pitch -1'", ",", "'Down'", ":", "'pitch 1'", ",", "'Shift_L'", ":", "'crouch 1'", ",", "'Shift_R'", ":", "'crouch 1'", ",", "'1'", ":", "'hotbar.1 1'", ",", "'2'", ":", "'hotbar.2 1'", ",", "'3'", ":", "'hotbar.3 1'", ",", "'4'", ":", "'hotbar.4 1'", ",", "'5'", ":", "'hotbar.5 1'", ",", "'6'", ":", "'hotbar.6 1'", ",", "'7'", ":", "'hotbar.7 1'", ",", "'8'", ":", "'hotbar.8 1'", ",", "'9'", ":", "'hotbar.9 1'", "}", ",", "'discrete'", ":", "{", "'Left'", ":", "'turn -1'", ",", "'Right'", ":", "'turn 1'", ",", "'Up'", ":", "'move 1'", ",", "'Down'", ":", "'move -1'", ",", "'1'", ":", "'hotbar.1 1'", ",", "'2'", ":", "'hotbar.2 1'", ",", "'3'", ":", "'hotbar.3 1'", ",", "'4'", ":", "'hotbar.4 1'", ",", "'5'", ":", "'hotbar.5 1'", ",", "'6'", ":", "'hotbar.6 1'", ",", "'7'", ":", "'hotbar.7 1'", ",", "'8'", ":", "'hotbar.8 1'", ",", "'9'", ":", "'hotbar.9 1'", "}", "}", "if", "event", ".", "char", "==", "'/'", ":", "self", ".", "command_entry", ".", "focus_set", "(", ")", "# interlude to allow user to type command", "elif", "event", ".", "char", ".", "lower", "(", ")", "in", "char_map", ":", "self", ".", "agent_host", ".", "sendCommand", "(", "char_map", "[", "event", ".", "char", ".", "lower", "(", ")", "]", ")", "elif", "event", ".", "keysym", "in", "keysym_map", "[", "self", ".", "action_space", "]", ":", "self", ".", "agent_host", ".", "sendCommand", "(", "keysym_map", "[", "self", ".", "action_space", "]", "[", "event", ".", "keysym", "]", ")" ]
85
46.875
def system_monitor_LineCard_alert_state(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor") LineCard = ET.SubElement(system_monitor, "LineCard") alert = ET.SubElement(LineCard, "alert") state = ET.SubElement(alert, "state") state.text = kwargs.pop('state') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "system_monitor_LineCard_alert_state", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "system_monitor", "=", "ET", ".", "SubElement", "(", "config", ",", "\"system-monitor\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-system-monitor\"", ")", "LineCard", "=", "ET", ".", "SubElement", "(", "system_monitor", ",", "\"LineCard\"", ")", "alert", "=", "ET", ".", "SubElement", "(", "LineCard", ",", "\"alert\"", ")", "state", "=", "ET", ".", "SubElement", "(", "alert", ",", "\"state\"", ")", "state", ".", "text", "=", "kwargs", ".", "pop", "(", "'state'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
44.333333
16.25
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): """Project the tokenized prediction back to the original text.""" # When we created the data, we kept track of the alignment between original # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So # now `orig_text` contains the span of our original text corresponding to the # span that we predicted. # # However, `orig_text` may contain extra characters that we don't want in # our prediction. # # For example, let's say: # pred_text = steve smith # orig_text = Steve Smith's # # We don't want to return `orig_text` because it contains the extra "'s". # # We don't want to return `pred_text` because it's already been normalized # (the SQuAD eval script also does punctuation stripping/lower casing but # our tokenizer does additional normalization like stripping accent # characters). # # What we really want to return is "Steve Smith". # # Therefore, we have to apply a semi-complicated alignment heuristic between # `pred_text` and `orig_text` to get a character-to-character alignment. This # can fail in certain cases in which case we just return `orig_text`. def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tokenizer = BasicTokenizer(do_lower_case=do_lower_case) tok_text = " ".join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if verbose_logging: logger.info( "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if verbose_logging: logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for (i, tok_index) in tok_ns_to_s_map.items(): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if verbose_logging: logger.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if verbose_logging: logger.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position:(orig_end_position + 1)] return output_text
[ "def", "get_final_text", "(", "pred_text", ",", "orig_text", ",", "do_lower_case", ",", "verbose_logging", "=", "False", ")", ":", "# When we created the data, we kept track of the alignment between original", "# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So", "# now `orig_text` contains the span of our original text corresponding to the", "# span that we predicted.", "#", "# However, `orig_text` may contain extra characters that we don't want in", "# our prediction.", "#", "# For example, let's say:", "# pred_text = steve smith", "# orig_text = Steve Smith's", "#", "# We don't want to return `orig_text` because it contains the extra \"'s\".", "#", "# We don't want to return `pred_text` because it's already been normalized", "# (the SQuAD eval script also does punctuation stripping/lower casing but", "# our tokenizer does additional normalization like stripping accent", "# characters).", "#", "# What we really want to return is \"Steve Smith\".", "#", "# Therefore, we have to apply a semi-complicated alignment heuristic between", "# `pred_text` and `orig_text` to get a character-to-character alignment. This", "# can fail in certain cases in which case we just return `orig_text`.", "def", "_strip_spaces", "(", "text", ")", ":", "ns_chars", "=", "[", "]", "ns_to_s_map", "=", "collections", ".", "OrderedDict", "(", ")", "for", "(", "i", ",", "c", ")", "in", "enumerate", "(", "text", ")", ":", "if", "c", "==", "\" \"", ":", "continue", "ns_to_s_map", "[", "len", "(", "ns_chars", ")", "]", "=", "i", "ns_chars", ".", "append", "(", "c", ")", "ns_text", "=", "\"\"", ".", "join", "(", "ns_chars", ")", "return", "(", "ns_text", ",", "ns_to_s_map", ")", "# We first tokenize `orig_text`, strip whitespace from the result", "# and `pred_text`, and check if they are the same length. If they are", "# NOT the same length, the heuristic has failed. If they are the same", "# length, we assume the characters are one-to-one aligned.", "tokenizer", "=", "BasicTokenizer", "(", "do_lower_case", "=", "do_lower_case", ")", "tok_text", "=", "\" \"", ".", "join", "(", "tokenizer", ".", "tokenize", "(", "orig_text", ")", ")", "start_position", "=", "tok_text", ".", "find", "(", "pred_text", ")", "if", "start_position", "==", "-", "1", ":", "if", "verbose_logging", ":", "logger", ".", "info", "(", "\"Unable to find text: '%s' in '%s'\"", "%", "(", "pred_text", ",", "orig_text", ")", ")", "return", "orig_text", "end_position", "=", "start_position", "+", "len", "(", "pred_text", ")", "-", "1", "(", "orig_ns_text", ",", "orig_ns_to_s_map", ")", "=", "_strip_spaces", "(", "orig_text", ")", "(", "tok_ns_text", ",", "tok_ns_to_s_map", ")", "=", "_strip_spaces", "(", "tok_text", ")", "if", "len", "(", "orig_ns_text", ")", "!=", "len", "(", "tok_ns_text", ")", ":", "if", "verbose_logging", ":", "logger", ".", "info", "(", "\"Length not equal after stripping spaces: '%s' vs '%s'\"", ",", "orig_ns_text", ",", "tok_ns_text", ")", "return", "orig_text", "# We then project the characters in `pred_text` back to `orig_text` using", "# the character-to-character alignment.", "tok_s_to_ns_map", "=", "{", "}", "for", "(", "i", ",", "tok_index", ")", "in", "tok_ns_to_s_map", ".", "items", "(", ")", ":", "tok_s_to_ns_map", "[", "tok_index", "]", "=", "i", "orig_start_position", "=", "None", "if", "start_position", "in", "tok_s_to_ns_map", ":", "ns_start_position", "=", "tok_s_to_ns_map", "[", "start_position", "]", "if", "ns_start_position", "in", "orig_ns_to_s_map", ":", "orig_start_position", "=", "orig_ns_to_s_map", "[", "ns_start_position", "]", "if", "orig_start_position", "is", "None", ":", "if", "verbose_logging", ":", "logger", ".", "info", "(", "\"Couldn't map start position\"", ")", "return", "orig_text", "orig_end_position", "=", "None", "if", "end_position", "in", "tok_s_to_ns_map", ":", "ns_end_position", "=", "tok_s_to_ns_map", "[", "end_position", "]", "if", "ns_end_position", "in", "orig_ns_to_s_map", ":", "orig_end_position", "=", "orig_ns_to_s_map", "[", "ns_end_position", "]", "if", "orig_end_position", "is", "None", ":", "if", "verbose_logging", ":", "logger", ".", "info", "(", "\"Couldn't map end position\"", ")", "return", "orig_text", "output_text", "=", "orig_text", "[", "orig_start_position", ":", "(", "orig_end_position", "+", "1", ")", "]", "return", "output_text" ]
39.021277
22.265957
def skeleton_path(parts): """Gets the path to a skeleton asset""" return os.path.join(os.path.dirname(oz.__file__), "skeleton", parts)
[ "def", "skeleton_path", "(", "parts", ")", ":", "return", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "oz", ".", "__file__", ")", ",", "\"skeleton\"", ",", "parts", ")" ]
46.666667
15.666667
def invalidation_hash(self, fingerprint_strategy=None): """ :API: public """ fingerprint_strategy = fingerprint_strategy or DefaultFingerprintStrategy() if fingerprint_strategy not in self._cached_fingerprint_map: self._cached_fingerprint_map[fingerprint_strategy] = self.compute_invalidation_hash(fingerprint_strategy) return self._cached_fingerprint_map[fingerprint_strategy]
[ "def", "invalidation_hash", "(", "self", ",", "fingerprint_strategy", "=", "None", ")", ":", "fingerprint_strategy", "=", "fingerprint_strategy", "or", "DefaultFingerprintStrategy", "(", ")", "if", "fingerprint_strategy", "not", "in", "self", ".", "_cached_fingerprint_map", ":", "self", ".", "_cached_fingerprint_map", "[", "fingerprint_strategy", "]", "=", "self", ".", "compute_invalidation_hash", "(", "fingerprint_strategy", ")", "return", "self", ".", "_cached_fingerprint_map", "[", "fingerprint_strategy", "]" ]
50
24.25
def parallel_progbar(mapper, iterable, nprocs=None, starmap=False, flatmap=False, shuffle=False, verbose=True, verbose_flatmap=None, **kwargs): """Performs a parallel mapping of the given iterable, reporting a progress bar as values get returned :param mapper: The mapping function to apply to elements of the iterable :param iterable: The iterable to map :param nprocs: The number of processes (defaults to the number of cpu's) :param starmap: If true, the iterable is expected to contain tuples and the mapper function gets each element of a tuple as an argument :param flatmap: If true, flatten out the returned values if the mapper function returns a list of objects :param shuffle: If true, randomly sort the elements before processing them. This might help provide more uniform runtimes if processing different objects takes different amounts of time. :param verbose: Whether or not to print the progress bar :param verbose_flatmap: If performing a flatmap, whether or not to report each object as it's returned :param kwargs: Any other keyword arguments to pass to the progress bar (see ``progbar``) :return: A list of the returned objects, in the same order as provided """ results = _parallel_progbar_launch(mapper, iterable, nprocs, starmap, flatmap, shuffle, verbose, verbose_flatmap, **kwargs) return [x for i, x in sorted(results, key=lambda p: p[0])]
[ "def", "parallel_progbar", "(", "mapper", ",", "iterable", ",", "nprocs", "=", "None", ",", "starmap", "=", "False", ",", "flatmap", "=", "False", ",", "shuffle", "=", "False", ",", "verbose", "=", "True", ",", "verbose_flatmap", "=", "None", ",", "*", "*", "kwargs", ")", ":", "results", "=", "_parallel_progbar_launch", "(", "mapper", ",", "iterable", ",", "nprocs", ",", "starmap", ",", "flatmap", ",", "shuffle", ",", "verbose", ",", "verbose_flatmap", ",", "*", "*", "kwargs", ")", "return", "[", "x", "for", "i", ",", "x", "in", "sorted", "(", "results", ",", "key", "=", "lambda", "p", ":", "p", "[", "0", "]", ")", "]" ]
72
39.6
def available_services_regions(self): """Returns list of unique region name values in service catalog.""" regions = [] if self.service_catalog: for service in self.service_catalog: service_type = service.get('type') if service_type is None or service_type == 'identity': continue for endpoint in service.get('endpoints', []): region = utils.get_endpoint_region(endpoint) if region not in regions: regions.append(region) return regions
[ "def", "available_services_regions", "(", "self", ")", ":", "regions", "=", "[", "]", "if", "self", ".", "service_catalog", ":", "for", "service", "in", "self", ".", "service_catalog", ":", "service_type", "=", "service", ".", "get", "(", "'type'", ")", "if", "service_type", "is", "None", "or", "service_type", "==", "'identity'", ":", "continue", "for", "endpoint", "in", "service", ".", "get", "(", "'endpoints'", ",", "[", "]", ")", ":", "region", "=", "utils", ".", "get_endpoint_region", "(", "endpoint", ")", "if", "region", "not", "in", "regions", ":", "regions", ".", "append", "(", "region", ")", "return", "regions" ]
46
12.692308
def vhost_add(cls, resource, params): """ Add a vhost into a webaccelerator """ try: oper = cls.call( 'hosting.rproxy.vhost.create', cls.usable_id(resource), params) cls.echo('Adding your virtual host (%s) into %s' % (params['vhost'], resource)) cls.display_progress(oper) cls.echo('Your virtual host habe been added') return oper except Exception as err: if err.code == 580142: dc = cls.info(resource) dns_entry = cls.call('hosting.rproxy.vhost.get_dns_entries', {'datacenter': dc['datacenter']['id'], 'vhost': params['vhost']}) txt_record = "%s 3600 IN TXT \"%s=%s\"" % (dns_entry['key'], dns_entry['key'], dns_entry['txt']) cname_record = "%s 3600 IN CNAME %s" % (dns_entry['key'], dns_entry['cname']) cls.echo('The domain don\'t use Gandi DNS or you have not' ' sufficient right to alter the zone file. ' 'Edit your zone file adding this TXT and CNAME ' 'record and try again :') cls.echo(txt_record) cls.echo(cname_record) cls.echo('\nOr add a file containing %s at :\n' 'http://%s/%s.txt\n' % (dns_entry['txt'], dns_entry['domain'], dns_entry['txt'])) else: cls.echo(err)
[ "def", "vhost_add", "(", "cls", ",", "resource", ",", "params", ")", ":", "try", ":", "oper", "=", "cls", ".", "call", "(", "'hosting.rproxy.vhost.create'", ",", "cls", ".", "usable_id", "(", "resource", ")", ",", "params", ")", "cls", ".", "echo", "(", "'Adding your virtual host (%s) into %s'", "%", "(", "params", "[", "'vhost'", "]", ",", "resource", ")", ")", "cls", ".", "display_progress", "(", "oper", ")", "cls", ".", "echo", "(", "'Your virtual host habe been added'", ")", "return", "oper", "except", "Exception", "as", "err", ":", "if", "err", ".", "code", "==", "580142", ":", "dc", "=", "cls", ".", "info", "(", "resource", ")", "dns_entry", "=", "cls", ".", "call", "(", "'hosting.rproxy.vhost.get_dns_entries'", ",", "{", "'datacenter'", ":", "dc", "[", "'datacenter'", "]", "[", "'id'", "]", ",", "'vhost'", ":", "params", "[", "'vhost'", "]", "}", ")", "txt_record", "=", "\"%s 3600 IN TXT \\\"%s=%s\\\"\"", "%", "(", "dns_entry", "[", "'key'", "]", ",", "dns_entry", "[", "'key'", "]", ",", "dns_entry", "[", "'txt'", "]", ")", "cname_record", "=", "\"%s 3600 IN CNAME %s\"", "%", "(", "dns_entry", "[", "'key'", "]", ",", "dns_entry", "[", "'cname'", "]", ")", "cls", ".", "echo", "(", "'The domain don\\'t use Gandi DNS or you have not'", "' sufficient right to alter the zone file. '", "'Edit your zone file adding this TXT and CNAME '", "'record and try again :'", ")", "cls", ".", "echo", "(", "txt_record", ")", "cls", ".", "echo", "(", "cname_record", ")", "cls", ".", "echo", "(", "'\\nOr add a file containing %s at :\\n'", "'http://%s/%s.txt\\n'", "%", "(", "dns_entry", "[", "'txt'", "]", ",", "dns_entry", "[", "'domain'", "]", ",", "dns_entry", "[", "'txt'", "]", ")", ")", "else", ":", "cls", ".", "echo", "(", "err", ")" ]
49.472222
22.388889
def setup_parser(self, parser): """ Setup the argument parser. `parser` ``FocusArgParser`` object. """ parser.add_argument('task_name', help='task to create') parser.add_argument('clone_task', nargs='?', help='existing task to clone') parser.add_argument('--skip-edit', action='store_true', help='skip editing of task configuration')
[ "def", "setup_parser", "(", "self", ",", "parser", ")", ":", "parser", ".", "add_argument", "(", "'task_name'", ",", "help", "=", "'task to create'", ")", "parser", ".", "add_argument", "(", "'clone_task'", ",", "nargs", "=", "'?'", ",", "help", "=", "'existing task to clone'", ")", "parser", ".", "add_argument", "(", "'--skip-edit'", ",", "action", "=", "'store_true'", ",", "help", "=", "'skip editing of task configuration'", ")" ]
37.666667
18.083333
def to_json(value, **kwargs): """Return a copy of the dictionary If the values are HasProperties instances, they are serialized """ serial_dict = { key: ( val.serialize(**kwargs) if isinstance(val, HasProperties) else val ) for key, val in iteritems(value) } return serial_dict
[ "def", "to_json", "(", "value", ",", "*", "*", "kwargs", ")", ":", "serial_dict", "=", "{", "key", ":", "(", "val", ".", "serialize", "(", "*", "*", "kwargs", ")", "if", "isinstance", "(", "val", ",", "HasProperties", ")", "else", "val", ")", "for", "key", ",", "val", "in", "iteritems", "(", "value", ")", "}", "return", "serial_dict" ]
29.384615
18.846154
def fill_list(self, list, input_list): """ fills a tree with nested parameters Args: tree: QtGui.QTreeView to fill parameters: dictionary or Parameter object which contains the information to use to fill """ for name in input_list: # print(index, loaded_item, loaded_item_settings) item = QtGui.QStandardItem(name) item.setSelectable(True) item.setEditable(False) list.model().appendRow(item)
[ "def", "fill_list", "(", "self", ",", "list", ",", "input_list", ")", ":", "for", "name", "in", "input_list", ":", "# print(index, loaded_item, loaded_item_settings)", "item", "=", "QtGui", ".", "QStandardItem", "(", "name", ")", "item", ".", "setSelectable", "(", "True", ")", "item", ".", "setEditable", "(", "False", ")", "list", ".", "model", "(", ")", ".", "appendRow", "(", "item", ")" ]
36
12.571429
def rt(nu, size=None): """ Student's t random variates. """ return rnormal(0, 1, size) / np.sqrt(rchi2(nu, size) / nu)
[ "def", "rt", "(", "nu", ",", "size", "=", "None", ")", ":", "return", "rnormal", "(", "0", ",", "1", ",", "size", ")", "/", "np", ".", "sqrt", "(", "rchi2", "(", "nu", ",", "size", ")", "/", "nu", ")" ]
26
9.6
def json_is_exception(resp): """ Is the given response object an exception traceback? Return True if so Return False if not """ if not json_is_error(resp): return False if 'traceback' not in resp.keys() or 'error' not in resp.keys(): return False return True
[ "def", "json_is_exception", "(", "resp", ")", ":", "if", "not", "json_is_error", "(", "resp", ")", ":", "return", "False", "if", "'traceback'", "not", "in", "resp", ".", "keys", "(", ")", "or", "'error'", "not", "in", "resp", ".", "keys", "(", ")", ":", "return", "False", "return", "True" ]
19.933333
19.4
def __getOrganizations(self, web): """Scrap the number of organizations from a GitHub profile. :param web: parsed web. :type web: BeautifulSoup node. """ orgsElements = web.find_all("a", {"class": "avatar-group-item"}) self.organizations = len(orgsElements)
[ "def", "__getOrganizations", "(", "self", ",", "web", ")", ":", "orgsElements", "=", "web", ".", "find_all", "(", "\"a\"", ",", "{", "\"class\"", ":", "\"avatar-group-item\"", "}", ")", "self", ".", "organizations", "=", "len", "(", "orgsElements", ")" ]
37.375
11.875
def ip_rtm_config_route_static_route_oif_vrf_next_hop_vrf(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def") rtm_config = ET.SubElement(ip, "rtm-config", xmlns="urn:brocade.com:mgmt:brocade-rtm") route = ET.SubElement(rtm_config, "route") static_route_oif_vrf = ET.SubElement(route, "static-route-oif-vrf") static_route_next_vrf_dest_key = ET.SubElement(static_route_oif_vrf, "static-route-next-vrf-dest") static_route_next_vrf_dest_key.text = kwargs.pop('static_route_next_vrf_dest') static_route_oif_type_key = ET.SubElement(static_route_oif_vrf, "static-route-oif-type") static_route_oif_type_key.text = kwargs.pop('static_route_oif_type') static_route_oif_name_key = ET.SubElement(static_route_oif_vrf, "static-route-oif-name") static_route_oif_name_key.text = kwargs.pop('static_route_oif_name') next_hop_vrf = ET.SubElement(static_route_oif_vrf, "next-hop-vrf") next_hop_vrf.text = kwargs.pop('next_hop_vrf') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "ip_rtm_config_route_static_route_oif_vrf_next_hop_vrf", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "ip", "=", "ET", ".", "SubElement", "(", "config", ",", "\"ip\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-common-def\"", ")", "rtm_config", "=", "ET", ".", "SubElement", "(", "ip", ",", "\"rtm-config\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-rtm\"", ")", "route", "=", "ET", ".", "SubElement", "(", "rtm_config", ",", "\"route\"", ")", "static_route_oif_vrf", "=", "ET", ".", "SubElement", "(", "route", ",", "\"static-route-oif-vrf\"", ")", "static_route_next_vrf_dest_key", "=", "ET", ".", "SubElement", "(", "static_route_oif_vrf", ",", "\"static-route-next-vrf-dest\"", ")", "static_route_next_vrf_dest_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'static_route_next_vrf_dest'", ")", "static_route_oif_type_key", "=", "ET", ".", "SubElement", "(", "static_route_oif_vrf", ",", "\"static-route-oif-type\"", ")", "static_route_oif_type_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'static_route_oif_type'", ")", "static_route_oif_name_key", "=", "ET", ".", "SubElement", "(", "static_route_oif_vrf", ",", "\"static-route-oif-name\"", ")", "static_route_oif_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'static_route_oif_name'", ")", "next_hop_vrf", "=", "ET", ".", "SubElement", "(", "static_route_oif_vrf", ",", "\"next-hop-vrf\"", ")", "next_hop_vrf", ".", "text", "=", "kwargs", ".", "pop", "(", "'next_hop_vrf'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
63.789474
31.315789
def erf(f=Ellipsis): ''' erf(x) yields a potential function that calculates the error function over the input x. If x is a constant, yields a constant potential function. erf() is equivalent to erf(...), which is just the error function, calculated over its inputs. ''' f = to_potential(f) if is_const_potential(f): return const_potential(np.erf(f.c)) elif is_identity_potential(f): return ErfPotential() else: return compose(ErfPotential(), f)
[ "def", "erf", "(", "f", "=", "Ellipsis", ")", ":", "f", "=", "to_potential", "(", "f", ")", "if", "is_const_potential", "(", "f", ")", ":", "return", "const_potential", "(", "np", ".", "erf", "(", "f", ".", "c", ")", ")", "elif", "is_identity_potential", "(", "f", ")", ":", "return", "ErfPotential", "(", ")", "else", ":", "return", "compose", "(", "ErfPotential", "(", ")", ",", "f", ")" ]
47.3
27.9
def accuracy_helper(egg, match='exact', distance='euclidean', features=None): """ Computes proportion of words recalled Parameters ---------- egg : quail.Egg Data to analyze match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- prop_recalled : numpy array proportion of words recalled """ def acc(lst): return len([i for i in np.unique(lst) if i>=0])/(egg.list_length) opts = dict(match=match, distance=distance, features=features) if match is 'exact': opts.update({'features' : 'item'}) recmat = recall_matrix(egg, **opts) if match in ['exact', 'best']: result = [acc(lst) for lst in recmat] elif match is 'smooth': result = np.mean(recmat, axis=1) else: raise ValueError('Match must be set to exact, best or smooth.') return np.nanmean(result, axis=0)
[ "def", "accuracy_helper", "(", "egg", ",", "match", "=", "'exact'", ",", "distance", "=", "'euclidean'", ",", "features", "=", "None", ")", ":", "def", "acc", "(", "lst", ")", ":", "return", "len", "(", "[", "i", "for", "i", "in", "np", ".", "unique", "(", "lst", ")", "if", "i", ">=", "0", "]", ")", "/", "(", "egg", ".", "list_length", ")", "opts", "=", "dict", "(", "match", "=", "match", ",", "distance", "=", "distance", ",", "features", "=", "features", ")", "if", "match", "is", "'exact'", ":", "opts", ".", "update", "(", "{", "'features'", ":", "'item'", "}", ")", "recmat", "=", "recall_matrix", "(", "egg", ",", "*", "*", "opts", ")", "if", "match", "in", "[", "'exact'", ",", "'best'", "]", ":", "result", "=", "[", "acc", "(", "lst", ")", "for", "lst", "in", "recmat", "]", "elif", "match", "is", "'smooth'", ":", "result", "=", "np", ".", "mean", "(", "recmat", ",", "axis", "=", "1", ")", "else", ":", "raise", "ValueError", "(", "'Match must be set to exact, best or smooth.'", ")", "return", "np", ".", "nanmean", "(", "result", ",", "axis", "=", "0", ")" ]
33.217391
23.043478
def safe_mkdir_for(path, clean=False): """Ensure that the parent directory for a file is present. If it's not there, create it. If it is, no-op. """ safe_mkdir(os.path.dirname(path), clean=clean)
[ "def", "safe_mkdir_for", "(", "path", ",", "clean", "=", "False", ")", ":", "safe_mkdir", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ",", "clean", "=", "clean", ")" ]
33.166667
9.666667
def cmode(self, channel, modes=''): """ Sets or gets the channel mode. Required arguments: * channel - Channel to set/get modes of. Optional arguments: * modes='' - Modes to set. If not specified return the modes of the channel. """ with self.lock: self.is_in_channel(channel) if not modes: self.send('MODE %s' % channel) modes = '' mode_set_time = None while self.readable(): msg = self._recv(rm_colon=True, \ expected_replies=('324', '329')) if msg[0] == '324': modes = msg[2].split()[1].replace('+', '', 1) elif msg[0] == '329': mode_set_time = self._m_time.localtime( \ int(msg[2].split()[1])) return modes, mode_set_time else: self.send('MODE %s %s' % (channel, modes)) if self.readable(): msg = self._recv(expected_replies=('MODE',), \ ignore_unexpected_replies=True) if msg[0]: mode = msg[2] self.parse_cmode_string(mode, msg[1]) if not self.hide_called_events: self.stepback()
[ "def", "cmode", "(", "self", ",", "channel", ",", "modes", "=", "''", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "is_in_channel", "(", "channel", ")", "if", "not", "modes", ":", "self", ".", "send", "(", "'MODE %s'", "%", "channel", ")", "modes", "=", "''", "mode_set_time", "=", "None", "while", "self", ".", "readable", "(", ")", ":", "msg", "=", "self", ".", "_recv", "(", "rm_colon", "=", "True", ",", "expected_replies", "=", "(", "'324'", ",", "'329'", ")", ")", "if", "msg", "[", "0", "]", "==", "'324'", ":", "modes", "=", "msg", "[", "2", "]", ".", "split", "(", ")", "[", "1", "]", ".", "replace", "(", "'+'", ",", "''", ",", "1", ")", "elif", "msg", "[", "0", "]", "==", "'329'", ":", "mode_set_time", "=", "self", ".", "_m_time", ".", "localtime", "(", "int", "(", "msg", "[", "2", "]", ".", "split", "(", ")", "[", "1", "]", ")", ")", "return", "modes", ",", "mode_set_time", "else", ":", "self", ".", "send", "(", "'MODE %s %s'", "%", "(", "channel", ",", "modes", ")", ")", "if", "self", ".", "readable", "(", ")", ":", "msg", "=", "self", ".", "_recv", "(", "expected_replies", "=", "(", "'MODE'", ",", ")", ",", "ignore_unexpected_replies", "=", "True", ")", "if", "msg", "[", "0", "]", ":", "mode", "=", "msg", "[", "2", "]", "self", ".", "parse_cmode_string", "(", "mode", ",", "msg", "[", "1", "]", ")", "if", "not", "self", ".", "hide_called_events", ":", "self", ".", "stepback", "(", ")" ]
41.222222
14.111111
def dumps(self): """Turn the tabu object into a string in Latex format.""" _s = super().dumps() # Tabu tables support a unusual syntax: # \begin{tabu} spread 0pt {<col format...>} # # Since this syntax isn't common, it doesn't make # sense to support it in the baseclass (e.g., Environment) # rather, here we fix the LaTeX string post-hoc if self._preamble: if _s.startswith(r"\begin{longtabu}"): _s = _s[:16] + self._preamble + _s[16:] elif _s.startswith(r"\begin{tabu}"): _s = _s[:12] + self._preamble + _s[12:] else: raise TableError("Can't apply preamble to Tabu table " "(unexpected initial command sequence)") return _s
[ "def", "dumps", "(", "self", ")", ":", "_s", "=", "super", "(", ")", ".", "dumps", "(", ")", "# Tabu tables support a unusual syntax:", "# \\begin{tabu} spread 0pt {<col format...>}", "#", "# Since this syntax isn't common, it doesn't make", "# sense to support it in the baseclass (e.g., Environment)", "# rather, here we fix the LaTeX string post-hoc", "if", "self", ".", "_preamble", ":", "if", "_s", ".", "startswith", "(", "r\"\\begin{longtabu}\"", ")", ":", "_s", "=", "_s", "[", ":", "16", "]", "+", "self", ".", "_preamble", "+", "_s", "[", "16", ":", "]", "elif", "_s", ".", "startswith", "(", "r\"\\begin{tabu}\"", ")", ":", "_s", "=", "_s", "[", ":", "12", "]", "+", "self", ".", "_preamble", "+", "_s", "[", "12", ":", "]", "else", ":", "raise", "TableError", "(", "\"Can't apply preamble to Tabu table \"", "\"(unexpected initial command sequence)\"", ")", "return", "_s" ]
38.333333
20.666667
def honeycomb_lattice( a, b, spacing, alternating_sites=False ): """ Generate a honeycomb lattice. Args: a (Int): Number of lattice repeat units along x. b (Int): Number of lattice repeat units along y. spacing (Float): Distance between lattice sites. alternating_sites (Bool, optional): Label alternating sites with 'A' and 'B'. Defaults to False. Returns: (Lattice): The new lattice Notes: The returned lattice is 3D periodic, but all sites and edges lie in the xy plane. """ if alternating_sites: site_labels = [ 'A', 'B', 'A', 'B' ] else: site_labels = [ 'L', 'L', 'L', 'L' ] unit_cell_lengths = np.array( [ sqrt(3), 3.0, 0.0 ] ) * spacing cell_lengths = unit_cell_lengths * np.array( [ a, b, 1.0 ] ) grid = np.array( list( range( 1, int( a * b * 4 + 1 ) ) ) ).reshape( a, b, 4, order='C' ) sites = [] for i in range( a ): for j in range( b ): # site 1 r = np.array( [ i * sqrt(3) * spacing, j * 3 * spacing, 0.0 ] ) neighbours = [ grid[ i, j, 1 ], np.roll( grid, +1, axis=0 )[ i, j, 1 ], np.roll( grid, +1, axis=1 )[ i, j, 3 ] ] sites.append( lattice_site.Site( grid[ i, j, 0 ], r, neighbours, 0.0, site_labels[0] ) ) # site 2 r = np.array( [ i * sqrt(3) * spacing + sqrt(3)/2 * spacing, ( j * 3 + 0.5 ) * spacing, 0.0 ] ) neighbours = [ grid[ i, j, 0 ], grid[ i, j, 2 ], np.roll( grid, -1, axis=0 )[ i, j, 0 ] ] sites.append( lattice_site.Site( grid[ i, j, 1 ], r, neighbours, 0.0, site_labels[1] ) ) # site 3 r = np.array( [ i * sqrt(3) * spacing + sqrt(3)/2 * spacing, ( j * 3 + 1.5 ) * spacing, 0.0 ] ) neighbours = [ grid[ i, j, 1 ], grid[ i, j, 3 ], np.roll( grid, -1, axis=0 )[ i, j, 3 ] ] sites.append( lattice_site.Site( grid[ i, j, 2 ], r, neighbours, 0.0, site_labels[2] ) ) # site 4 r = np.array( [ i * sqrt(3) * spacing, ( j * 3 + 2 ) * spacing, 0.0 ] ) neighbours = [ grid[ i, j, 2 ], np.roll( grid, +1, axis=0 )[ i, j, 2 ], np.roll( grid, -1, axis=1 )[ i, j, 0 ] ] sites.append( lattice_site.Site( grid[ i, j, 3 ], r, neighbours, 0.0, site_labels[3] ) ) return lattice.Lattice( sites, cell_lengths=cell_lengths )
[ "def", "honeycomb_lattice", "(", "a", ",", "b", ",", "spacing", ",", "alternating_sites", "=", "False", ")", ":", "if", "alternating_sites", ":", "site_labels", "=", "[", "'A'", ",", "'B'", ",", "'A'", ",", "'B'", "]", "else", ":", "site_labels", "=", "[", "'L'", ",", "'L'", ",", "'L'", ",", "'L'", "]", "unit_cell_lengths", "=", "np", ".", "array", "(", "[", "sqrt", "(", "3", ")", ",", "3.0", ",", "0.0", "]", ")", "*", "spacing", "cell_lengths", "=", "unit_cell_lengths", "*", "np", ".", "array", "(", "[", "a", ",", "b", ",", "1.0", "]", ")", "grid", "=", "np", ".", "array", "(", "list", "(", "range", "(", "1", ",", "int", "(", "a", "*", "b", "*", "4", "+", "1", ")", ")", ")", ")", ".", "reshape", "(", "a", ",", "b", ",", "4", ",", "order", "=", "'C'", ")", "sites", "=", "[", "]", "for", "i", "in", "range", "(", "a", ")", ":", "for", "j", "in", "range", "(", "b", ")", ":", "# site 1", "r", "=", "np", ".", "array", "(", "[", "i", "*", "sqrt", "(", "3", ")", "*", "spacing", ",", "j", "*", "3", "*", "spacing", ",", "0.0", "]", ")", "neighbours", "=", "[", "grid", "[", "i", ",", "j", ",", "1", "]", ",", "np", ".", "roll", "(", "grid", ",", "+", "1", ",", "axis", "=", "0", ")", "[", "i", ",", "j", ",", "1", "]", ",", "np", ".", "roll", "(", "grid", ",", "+", "1", ",", "axis", "=", "1", ")", "[", "i", ",", "j", ",", "3", "]", "]", "sites", ".", "append", "(", "lattice_site", ".", "Site", "(", "grid", "[", "i", ",", "j", ",", "0", "]", ",", "r", ",", "neighbours", ",", "0.0", ",", "site_labels", "[", "0", "]", ")", ")", "# site 2", "r", "=", "np", ".", "array", "(", "[", "i", "*", "sqrt", "(", "3", ")", "*", "spacing", "+", "sqrt", "(", "3", ")", "/", "2", "*", "spacing", ",", "(", "j", "*", "3", "+", "0.5", ")", "*", "spacing", ",", "0.0", "]", ")", "neighbours", "=", "[", "grid", "[", "i", ",", "j", ",", "0", "]", ",", "grid", "[", "i", ",", "j", ",", "2", "]", ",", "np", ".", "roll", "(", "grid", ",", "-", "1", ",", "axis", "=", "0", ")", "[", "i", ",", "j", ",", "0", "]", "]", "sites", ".", "append", "(", "lattice_site", ".", "Site", "(", "grid", "[", "i", ",", "j", ",", "1", "]", ",", "r", ",", "neighbours", ",", "0.0", ",", "site_labels", "[", "1", "]", ")", ")", "# site 3", "r", "=", "np", ".", "array", "(", "[", "i", "*", "sqrt", "(", "3", ")", "*", "spacing", "+", "sqrt", "(", "3", ")", "/", "2", "*", "spacing", ",", "(", "j", "*", "3", "+", "1.5", ")", "*", "spacing", ",", "0.0", "]", ")", "neighbours", "=", "[", "grid", "[", "i", ",", "j", ",", "1", "]", ",", "grid", "[", "i", ",", "j", ",", "3", "]", ",", "np", ".", "roll", "(", "grid", ",", "-", "1", ",", "axis", "=", "0", ")", "[", "i", ",", "j", ",", "3", "]", "]", "sites", ".", "append", "(", "lattice_site", ".", "Site", "(", "grid", "[", "i", ",", "j", ",", "2", "]", ",", "r", ",", "neighbours", ",", "0.0", ",", "site_labels", "[", "2", "]", ")", ")", "# site 4", "r", "=", "np", ".", "array", "(", "[", "i", "*", "sqrt", "(", "3", ")", "*", "spacing", ",", "(", "j", "*", "3", "+", "2", ")", "*", "spacing", ",", "0.0", "]", ")", "neighbours", "=", "[", "grid", "[", "i", ",", "j", ",", "2", "]", ",", "np", ".", "roll", "(", "grid", ",", "+", "1", ",", "axis", "=", "0", ")", "[", "i", ",", "j", ",", "2", "]", ",", "np", ".", "roll", "(", "grid", ",", "-", "1", ",", "axis", "=", "1", ")", "[", "i", ",", "j", ",", "0", "]", "]", "sites", ".", "append", "(", "lattice_site", ".", "Site", "(", "grid", "[", "i", ",", "j", ",", "3", "]", ",", "r", ",", "neighbours", ",", "0.0", ",", "site_labels", "[", "3", "]", ")", ")", "return", "lattice", ".", "Lattice", "(", "sites", ",", "cell_lengths", "=", "cell_lengths", ")" ]
49.803922
26.862745
def assert_equals(actual, expected, ignore_order=False, ignore_index=False, all_close=False): ''' Assert 2 series are equal. Like ``assert equals(series1, series2, ...)``, but with better hints at where the series differ. See `equals` for detailed parameter doc. Parameters ---------- actual : ~pandas.Series expected : ~pandas.Series ignore_order : bool ignore_index : bool all_close : bool ''' equals_, reason = equals(actual, expected, ignore_order, ignore_index, all_close, _return_reason=True) assert equals_, '{}\n\n{}\n\n{}'.format(reason, actual.to_string(), expected.to_string())
[ "def", "assert_equals", "(", "actual", ",", "expected", ",", "ignore_order", "=", "False", ",", "ignore_index", "=", "False", ",", "all_close", "=", "False", ")", ":", "equals_", ",", "reason", "=", "equals", "(", "actual", ",", "expected", ",", "ignore_order", ",", "ignore_index", ",", "all_close", ",", "_return_reason", "=", "True", ")", "assert", "equals_", ",", "'{}\\n\\n{}\\n\\n{}'", ".", "format", "(", "reason", ",", "actual", ".", "to_string", "(", ")", ",", "expected", ".", "to_string", "(", ")", ")" ]
35.166667
28.388889
def _init_usrgos(self, goids): """Return user GO IDs which have GO Terms.""" usrgos = set() goids_missing = set() _go2obj = self.gosubdag.go2obj for goid in goids: if goid in _go2obj: usrgos.add(goid) else: goids_missing.add(goid) if goids_missing: print("MISSING GO IDs: {GOs}".format(GOs=goids_missing)) print("{N} of {M} GO IDs ARE MISSING".format(N=len(goids_missing), M=len(goids))) return usrgos
[ "def", "_init_usrgos", "(", "self", ",", "goids", ")", ":", "usrgos", "=", "set", "(", ")", "goids_missing", "=", "set", "(", ")", "_go2obj", "=", "self", ".", "gosubdag", ".", "go2obj", "for", "goid", "in", "goids", ":", "if", "goid", "in", "_go2obj", ":", "usrgos", ".", "add", "(", "goid", ")", "else", ":", "goids_missing", ".", "add", "(", "goid", ")", "if", "goids_missing", ":", "print", "(", "\"MISSING GO IDs: {GOs}\"", ".", "format", "(", "GOs", "=", "goids_missing", ")", ")", "print", "(", "\"{N} of {M} GO IDs ARE MISSING\"", ".", "format", "(", "N", "=", "len", "(", "goids_missing", ")", ",", "M", "=", "len", "(", "goids", ")", ")", ")", "return", "usrgos" ]
37.428571
15.071429
def Page_screencastFrameAck(self, sessionId): """ Function path: Page.screencastFrameAck Domain: Page Method name: screencastFrameAck WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'sessionId' (type: integer) -> Frame number. No return value. Description: Acknowledges that a screencast frame has been received by the frontend. """ assert isinstance(sessionId, (int,) ), "Argument 'sessionId' must be of type '['int']'. Received type: '%s'" % type( sessionId) subdom_funcs = self.synchronous_command('Page.screencastFrameAck', sessionId=sessionId) return subdom_funcs
[ "def", "Page_screencastFrameAck", "(", "self", ",", "sessionId", ")", ":", "assert", "isinstance", "(", "sessionId", ",", "(", "int", ",", ")", ")", ",", "\"Argument 'sessionId' must be of type '['int']'. Received type: '%s'\"", "%", "type", "(", "sessionId", ")", "subdom_funcs", "=", "self", ".", "synchronous_command", "(", "'Page.screencastFrameAck'", ",", "sessionId", "=", "sessionId", ")", "return", "subdom_funcs" ]
30.809524
19.761905
def thorium(opts, functions, runners): ''' Load the thorium runtime modules ''' pack = {'__salt__': functions, '__runner__': runners, '__context__': {}} ret = LazyLoader(_module_dirs(opts, 'thorium'), opts, tag='thorium', pack=pack) ret.pack['__thorium__'] = ret return ret
[ "def", "thorium", "(", "opts", ",", "functions", ",", "runners", ")", ":", "pack", "=", "{", "'__salt__'", ":", "functions", ",", "'__runner__'", ":", "runners", ",", "'__context__'", ":", "{", "}", "}", "ret", "=", "LazyLoader", "(", "_module_dirs", "(", "opts", ",", "'thorium'", ")", ",", "opts", ",", "tag", "=", "'thorium'", ",", "pack", "=", "pack", ")", "ret", ".", "pack", "[", "'__thorium__'", "]", "=", "ret", "return", "ret" ]
29.727273
18.818182
def UploadAccount(self, hash_algorithm, hash_key, accounts): """Uploads multiple accounts to Gitkit server. Args: hash_algorithm: string, algorithm to hash password. hash_key: string, base64-encoded key of the algorithm. accounts: array of accounts to be uploaded. Returns: Response of the API. """ param = { 'hashAlgorithm': hash_algorithm, 'signerKey': hash_key, 'users': accounts } # pylint does not recognize the return type of simplejson.loads # pylint: disable=maybe-no-member return self._InvokeGitkitApi('uploadAccount', param)
[ "def", "UploadAccount", "(", "self", ",", "hash_algorithm", ",", "hash_key", ",", "accounts", ")", ":", "param", "=", "{", "'hashAlgorithm'", ":", "hash_algorithm", ",", "'signerKey'", ":", "hash_key", ",", "'users'", ":", "accounts", "}", "# pylint does not recognize the return type of simplejson.loads", "# pylint: disable=maybe-no-member", "return", "self", ".", "_InvokeGitkitApi", "(", "'uploadAccount'", ",", "param", ")" ]
31.736842
18.526316
def write(self, *args, **kwargs): """Write that shows progress in statusbar for each <freq> cells""" self.progress_status() # Check abortes state and raise StopIteration if aborted if self.aborted: statustext = _("File saving aborted.") post_command_event(self.main_window, self.main_window.StatusBarMsg, text=statustext) return False return self.parent_cls.write(self, *args, **kwargs)
[ "def", "write", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "progress_status", "(", ")", "# Check abortes state and raise StopIteration if aborted", "if", "self", ".", "aborted", ":", "statustext", "=", "_", "(", "\"File saving aborted.\"", ")", "post_command_event", "(", "self", ".", "main_window", ",", "self", ".", "main_window", ".", "StatusBarMsg", ",", "text", "=", "statustext", ")", "return", "False", "return", "self", ".", "parent_cls", ".", "write", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
37.230769
20.615385
def charge_credit_card(self, credit_card_psp_object: Model, amount: Money, client_ref: str) -> Tuple[bool, Model]: """ :param credit_card_psp_object: an instance representing the credit card in the psp :param amount: the amount to charge :param client_ref: a reference that will appear on the customer's credit card report :return: a tuple (success, payment_psp_object) """ pass
[ "def", "charge_credit_card", "(", "self", ",", "credit_card_psp_object", ":", "Model", ",", "amount", ":", "Money", ",", "client_ref", ":", "str", ")", "->", "Tuple", "[", "bool", ",", "Model", "]", ":", "pass" ]
53.375
27.625
def eval_multi(self, inc_epoch=True): """ Run the evaluation on multiple attacks. """ sess = self.sess preds = self.preds x = self.x_pre y = self.y X_train = self.X_train Y_train = self.Y_train X_test = self.X_test Y_test = self.Y_test writer = self.writer self.summary = tf.Summary() report = {} # Evaluate on train set subsample_factor = 100 X_train_subsampled = X_train[::subsample_factor] Y_train_subsampled = Y_train[::subsample_factor] acc_train = model_eval(sess, x, y, preds, X_train_subsampled, Y_train_subsampled, args=self.eval_params) self.log_value('train_accuracy_subsampled', acc_train, 'Clean accuracy, subsampled train') report['train'] = acc_train # Evaluate on the test set acc = model_eval(sess, x, y, preds, X_test, Y_test, args=self.eval_params) self.log_value('test_accuracy_natural', acc, 'Clean accuracy, natural test') report['test'] = acc # Evaluate against adversarial attacks if self.epoch % self.hparams.eval_iters == 0: for att_type in self.attack_type_test: _, preds_adv = self.attacks[att_type] acc = self.eval_advs(x, y, preds_adv, X_test, Y_test, att_type) report[att_type] = acc if self.writer: writer.add_summary(self.summary, self.epoch) # Add examples of adversarial examples to the summary if self.writer and self.epoch % 20 == 0 and self.sum_op is not None: sm_val = self.sess.run(self.sum_op, feed_dict={x: X_test[:self.batch_size], y: Y_test[:self.batch_size]}) if self.writer: writer.add_summary(sm_val) self.epoch += 1 if inc_epoch else 0 return report
[ "def", "eval_multi", "(", "self", ",", "inc_epoch", "=", "True", ")", ":", "sess", "=", "self", ".", "sess", "preds", "=", "self", ".", "preds", "x", "=", "self", ".", "x_pre", "y", "=", "self", ".", "y", "X_train", "=", "self", ".", "X_train", "Y_train", "=", "self", ".", "Y_train", "X_test", "=", "self", ".", "X_test", "Y_test", "=", "self", ".", "Y_test", "writer", "=", "self", ".", "writer", "self", ".", "summary", "=", "tf", ".", "Summary", "(", ")", "report", "=", "{", "}", "# Evaluate on train set", "subsample_factor", "=", "100", "X_train_subsampled", "=", "X_train", "[", ":", ":", "subsample_factor", "]", "Y_train_subsampled", "=", "Y_train", "[", ":", ":", "subsample_factor", "]", "acc_train", "=", "model_eval", "(", "sess", ",", "x", ",", "y", ",", "preds", ",", "X_train_subsampled", ",", "Y_train_subsampled", ",", "args", "=", "self", ".", "eval_params", ")", "self", ".", "log_value", "(", "'train_accuracy_subsampled'", ",", "acc_train", ",", "'Clean accuracy, subsampled train'", ")", "report", "[", "'train'", "]", "=", "acc_train", "# Evaluate on the test set", "acc", "=", "model_eval", "(", "sess", ",", "x", ",", "y", ",", "preds", ",", "X_test", ",", "Y_test", ",", "args", "=", "self", ".", "eval_params", ")", "self", ".", "log_value", "(", "'test_accuracy_natural'", ",", "acc", ",", "'Clean accuracy, natural test'", ")", "report", "[", "'test'", "]", "=", "acc", "# Evaluate against adversarial attacks", "if", "self", ".", "epoch", "%", "self", ".", "hparams", ".", "eval_iters", "==", "0", ":", "for", "att_type", "in", "self", ".", "attack_type_test", ":", "_", ",", "preds_adv", "=", "self", ".", "attacks", "[", "att_type", "]", "acc", "=", "self", ".", "eval_advs", "(", "x", ",", "y", ",", "preds_adv", ",", "X_test", ",", "Y_test", ",", "att_type", ")", "report", "[", "att_type", "]", "=", "acc", "if", "self", ".", "writer", ":", "writer", ".", "add_summary", "(", "self", ".", "summary", ",", "self", ".", "epoch", ")", "# Add examples of adversarial examples to the summary", "if", "self", ".", "writer", "and", "self", ".", "epoch", "%", "20", "==", "0", "and", "self", ".", "sum_op", "is", "not", "None", ":", "sm_val", "=", "self", ".", "sess", ".", "run", "(", "self", ".", "sum_op", ",", "feed_dict", "=", "{", "x", ":", "X_test", "[", ":", "self", ".", "batch_size", "]", ",", "y", ":", "Y_test", "[", ":", "self", ".", "batch_size", "]", "}", ")", "if", "self", ".", "writer", ":", "writer", ".", "add_summary", "(", "sm_val", ")", "self", ".", "epoch", "+=", "1", "if", "inc_epoch", "else", "0", "return", "report" ]
32.545455
17.781818
def get_value(self, key, default={}, nested=True, decrypt=True): """ Retrieve a value from the configuration based on its key. The key may be nested. :param str key: A path to the value, with nested levels joined by '.' :param default: Value to return if the key does not exist (defaults to :code:`dict()`) :param bool decrypt: If :code:`True`, decrypt an encrypted value before returning (if encrypted). Defaults to :code:`True`. """ key = key.lstrip() if key.endswith("."): key = key[:-1] if nested: path = key.split(".") curr = self.settings for p in path[:-1]: curr = curr.get(p, {}) try: value = curr[path[-1]] except KeyError: return default value = self.decrypt(value, path) return value else: return self.settings.get(key, default)
[ "def", "get_value", "(", "self", ",", "key", ",", "default", "=", "{", "}", ",", "nested", "=", "True", ",", "decrypt", "=", "True", ")", ":", "key", "=", "key", ".", "lstrip", "(", ")", "if", "key", ".", "endswith", "(", "\".\"", ")", ":", "key", "=", "key", "[", ":", "-", "1", "]", "if", "nested", ":", "path", "=", "key", ".", "split", "(", "\".\"", ")", "curr", "=", "self", ".", "settings", "for", "p", "in", "path", "[", ":", "-", "1", "]", ":", "curr", "=", "curr", ".", "get", "(", "p", ",", "{", "}", ")", "try", ":", "value", "=", "curr", "[", "path", "[", "-", "1", "]", "]", "except", "KeyError", ":", "return", "default", "value", "=", "self", ".", "decrypt", "(", "value", ",", "path", ")", "return", "value", "else", ":", "return", "self", ".", "settings", ".", "get", "(", "key", ",", "default", ")" ]
37.384615
17.769231
def _form_onset_offset_fronts(ons_or_offs, sample_rate_hz, threshold_ms=20): """ Takes an array of onsets or offsets (shape = [nfrequencies, nsamples], where a 1 corresponds to an on/offset, and samples are 0 otherwise), and returns a new array of the same shape, where each 1 has been replaced by either a 0, if the on/offset has been discarded, or a non-zero positive integer, such that each front within the array has a unique ID - for example, all 2s in the array will be the front for on/offset front 2, and all the 15s will be the front for on/offset front 15, etc. Due to implementation details, there will be no 1 IDs. """ threshold_s = threshold_ms / 1000 threshold_samples = sample_rate_hz * threshold_s ons_or_offs = np.copy(ons_or_offs) claimed = [] this_id = 2 # For each frequency, for frequency_index, row in enumerate(ons_or_offs[:, :]): ones = np.reshape(np.where(row == 1), (-1,)) # for each 1 in that frequency, for top_level_frequency_one_index in ones: claimed.append((frequency_index, top_level_frequency_one_index)) found_a_front = False # for each frequencies[i:], for other_frequency_index, other_row in enumerate(ons_or_offs[frequency_index + 1:, :], start=frequency_index + 1): # for each non-claimed 1 which is less than theshold_ms away in time, upper_limit_index = top_level_frequency_one_index + threshold_samples lower_limit_index = top_level_frequency_one_index - threshold_samples other_ones = np.reshape(np.where(other_row == 1), (-1,)) # Get the indexes of all the 1s in row tmp = np.reshape(np.where((other_ones >= lower_limit_index) # Get the indexes in the other_ones array of all items in bounds & (other_ones <= upper_limit_index)), (-1,)) other_ones = other_ones[tmp] # Get the indexes of all the 1s in the row that are in bounds if len(other_ones) > 0: unclaimed_idx = other_ones[0] # Take the first one claimed.append((other_frequency_index, unclaimed_idx)) elif len(claimed) < 3: # revert the top-most 1 to 0 ons_or_offs[frequency_index, top_level_frequency_one_index] = 0 claimed = [] break # Break from the for-each-frequencies[i:] loop so we can move on to the next item in the top-most freq elif len(claimed) >= 3: found_a_front = True # this group of so-far-claimed forms a front claimed_as_indexes = tuple(np.array(claimed).T) ons_or_offs[claimed_as_indexes] = this_id this_id += 1 claimed = [] break # Move on to the next item in the top-most array # If we never found a frequency that did not have a matching offset, handle that case here if len(claimed) >= 3: claimed_as_indexes = tuple(np.array(claimed).T) ons_or_offs[claimed_as_indexes] = this_id this_id += 1 claimed = [] elif found_a_front: this_id += 1 else: ons_or_offs[frequency_index, top_level_frequency_one_index] = 0 claimed = [] return ons_or_offs
[ "def", "_form_onset_offset_fronts", "(", "ons_or_offs", ",", "sample_rate_hz", ",", "threshold_ms", "=", "20", ")", ":", "threshold_s", "=", "threshold_ms", "/", "1000", "threshold_samples", "=", "sample_rate_hz", "*", "threshold_s", "ons_or_offs", "=", "np", ".", "copy", "(", "ons_or_offs", ")", "claimed", "=", "[", "]", "this_id", "=", "2", "# For each frequency,", "for", "frequency_index", ",", "row", "in", "enumerate", "(", "ons_or_offs", "[", ":", ",", ":", "]", ")", ":", "ones", "=", "np", ".", "reshape", "(", "np", ".", "where", "(", "row", "==", "1", ")", ",", "(", "-", "1", ",", ")", ")", "# for each 1 in that frequency,", "for", "top_level_frequency_one_index", "in", "ones", ":", "claimed", ".", "append", "(", "(", "frequency_index", ",", "top_level_frequency_one_index", ")", ")", "found_a_front", "=", "False", "# for each frequencies[i:],", "for", "other_frequency_index", ",", "other_row", "in", "enumerate", "(", "ons_or_offs", "[", "frequency_index", "+", "1", ":", ",", ":", "]", ",", "start", "=", "frequency_index", "+", "1", ")", ":", "# for each non-claimed 1 which is less than theshold_ms away in time,", "upper_limit_index", "=", "top_level_frequency_one_index", "+", "threshold_samples", "lower_limit_index", "=", "top_level_frequency_one_index", "-", "threshold_samples", "other_ones", "=", "np", ".", "reshape", "(", "np", ".", "where", "(", "other_row", "==", "1", ")", ",", "(", "-", "1", ",", ")", ")", "# Get the indexes of all the 1s in row", "tmp", "=", "np", ".", "reshape", "(", "np", ".", "where", "(", "(", "other_ones", ">=", "lower_limit_index", ")", "# Get the indexes in the other_ones array of all items in bounds", "&", "(", "other_ones", "<=", "upper_limit_index", ")", ")", ",", "(", "-", "1", ",", ")", ")", "other_ones", "=", "other_ones", "[", "tmp", "]", "# Get the indexes of all the 1s in the row that are in bounds", "if", "len", "(", "other_ones", ")", ">", "0", ":", "unclaimed_idx", "=", "other_ones", "[", "0", "]", "# Take the first one", "claimed", ".", "append", "(", "(", "other_frequency_index", ",", "unclaimed_idx", ")", ")", "elif", "len", "(", "claimed", ")", "<", "3", ":", "# revert the top-most 1 to 0", "ons_or_offs", "[", "frequency_index", ",", "top_level_frequency_one_index", "]", "=", "0", "claimed", "=", "[", "]", "break", "# Break from the for-each-frequencies[i:] loop so we can move on to the next item in the top-most freq", "elif", "len", "(", "claimed", ")", ">=", "3", ":", "found_a_front", "=", "True", "# this group of so-far-claimed forms a front", "claimed_as_indexes", "=", "tuple", "(", "np", ".", "array", "(", "claimed", ")", ".", "T", ")", "ons_or_offs", "[", "claimed_as_indexes", "]", "=", "this_id", "this_id", "+=", "1", "claimed", "=", "[", "]", "break", "# Move on to the next item in the top-most array", "# If we never found a frequency that did not have a matching offset, handle that case here", "if", "len", "(", "claimed", ")", ">=", "3", ":", "claimed_as_indexes", "=", "tuple", "(", "np", ".", "array", "(", "claimed", ")", ".", "T", ")", "ons_or_offs", "[", "claimed_as_indexes", "]", "=", "this_id", "this_id", "+=", "1", "claimed", "=", "[", "]", "elif", "found_a_front", ":", "this_id", "+=", "1", "else", ":", "ons_or_offs", "[", "frequency_index", ",", "top_level_frequency_one_index", "]", "=", "0", "claimed", "=", "[", "]", "return", "ons_or_offs" ]
53.123077
29.246154
def set_operator(self, operator): """Set the current :ref:`OPERATOR` to be used for all drawing operations. The default operator is :obj:`OVER <OPERATOR_OVER>`. :param operator: A :ref:`OPERATOR` string. """ cairo.cairo_set_operator(self._pointer, operator) self._check_status()
[ "def", "set_operator", "(", "self", ",", "operator", ")", ":", "cairo", ".", "cairo_set_operator", "(", "self", ".", "_pointer", ",", "operator", ")", "self", ".", "_check_status", "(", ")" ]
29.727273
17.454545
def _on_scan(_loop, adapter, _adapter_id, info, expiration_time): """Callback when a new device is seen.""" info['validity_period'] = expiration_time adapter.notify_event_nowait(info.get('connection_string'), 'device_seen', info)
[ "def", "_on_scan", "(", "_loop", ",", "adapter", ",", "_adapter_id", ",", "info", ",", "expiration_time", ")", ":", "info", "[", "'validity_period'", "]", "=", "expiration_time", "adapter", ".", "notify_event_nowait", "(", "info", ".", "get", "(", "'connection_string'", ")", ",", "'device_seen'", ",", "info", ")" ]
47.6
22.6
def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False, executable=None): ''' execute a command string over SSH, return the output ''' if executable is None: executable = '/bin/sh' sudo_user = self.sudo_user rc, stdin, stdout, stderr = conn.exec_command(cmd, tmp, sudo_user, sudoable=sudoable, executable=executable) if type(stdout) not in [ str, unicode ]: out = ''.join(stdout.readlines()) else: out = stdout if type(stderr) not in [ str, unicode ]: err = ''.join(stderr.readlines()) else: err = stderr if rc != None: return dict(rc=rc, stdout=out, stderr=err ) else: return dict(stdout=out, stderr=err )
[ "def", "_low_level_exec_command", "(", "self", ",", "conn", ",", "cmd", ",", "tmp", ",", "sudoable", "=", "False", ",", "executable", "=", "None", ")", ":", "if", "executable", "is", "None", ":", "executable", "=", "'/bin/sh'", "sudo_user", "=", "self", ".", "sudo_user", "rc", ",", "stdin", ",", "stdout", ",", "stderr", "=", "conn", ".", "exec_command", "(", "cmd", ",", "tmp", ",", "sudo_user", ",", "sudoable", "=", "sudoable", ",", "executable", "=", "executable", ")", "if", "type", "(", "stdout", ")", "not", "in", "[", "str", ",", "unicode", "]", ":", "out", "=", "''", ".", "join", "(", "stdout", ".", "readlines", "(", ")", ")", "else", ":", "out", "=", "stdout", "if", "type", "(", "stderr", ")", "not", "in", "[", "str", ",", "unicode", "]", ":", "err", "=", "''", ".", "join", "(", "stderr", ".", "readlines", "(", ")", ")", "else", ":", "err", "=", "stderr", "if", "rc", "!=", "None", ":", "return", "dict", "(", "rc", "=", "rc", ",", "stdout", "=", "out", ",", "stderr", "=", "err", ")", "else", ":", "return", "dict", "(", "stdout", "=", "out", ",", "stderr", "=", "err", ")" ]
33.173913
23.869565
def load_schema(schema_path): ''' Returns a schema loaded from the file at `schema_path`. Will recursively load referenced schemas assuming they can be found in files in the same directory and named with the convention `<type_name>.avsc`. ''' with open(schema_path) as fd: schema = json.load(fd) schema_dir, schema_file = path.split(schema_path) return _load_schema(schema, schema_dir)
[ "def", "load_schema", "(", "schema_path", ")", ":", "with", "open", "(", "schema_path", ")", "as", "fd", ":", "schema", "=", "json", ".", "load", "(", "fd", ")", "schema_dir", ",", "schema_file", "=", "path", ".", "split", "(", "schema_path", ")", "return", "_load_schema", "(", "schema", ",", "schema_dir", ")" ]
34.916667
20.083333
def expand_url(status): """Expand url on statuses. :param status: A tweepy status to expand urls. :type status: :class:`tweepy.models.Status` :returns: A string with expanded urls. :rtype: :class:`str` """ try: txt = get_full_text(status) for url in status.entities['urls']: txt = txt.replace(url['url'], url['expanded_url']) except: # Manually replace txt = status tco_pattern = re.compile(r'https://t.co/\S+') urls = tco_pattern.findall(txt) for url in urls: with urlopen(url) as resp: expanded_url = resp.url txt = txt.replace(url, expanded_url) return txt
[ "def", "expand_url", "(", "status", ")", ":", "try", ":", "txt", "=", "get_full_text", "(", "status", ")", "for", "url", "in", "status", ".", "entities", "[", "'urls'", "]", ":", "txt", "=", "txt", ".", "replace", "(", "url", "[", "'url'", "]", ",", "url", "[", "'expanded_url'", "]", ")", "except", ":", "# Manually replace", "txt", "=", "status", "tco_pattern", "=", "re", ".", "compile", "(", "r'https://t.co/\\S+'", ")", "urls", "=", "tco_pattern", ".", "findall", "(", "txt", ")", "for", "url", "in", "urls", ":", "with", "urlopen", "(", "url", ")", "as", "resp", ":", "expanded_url", "=", "resp", ".", "url", "txt", "=", "txt", ".", "replace", "(", "url", ",", "expanded_url", ")", "return", "txt" ]
28.458333
15.166667
def get_asset_content(self, asset_content_id): """Gets the ``AssetContent`` specified by its ``Id``. In plenary mode, the exact ``Id`` is found or a ``NotFound`` results. Otherwise, the returned ``AssetContent`` may have a different ``Id`` than requested, such as the case where a duplicate ``Id`` was assigned to an ``AssetContent`` and retained for compatibility. :param asset_content_id: the ``Id`` of the ``AssetContent`` to retrieve :type asset_content_id: ``osid.id.Id`` :return: the returned ``AssetContent`` :rtype: ``osid.repository.Asset`` :raise: ``NotFound`` -- no ``AssetContent`` found with the given ``Id`` :raise: ``NullArgument`` -- ``asset_content_id`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ collection = JSONClientValidated('repository', collection='Asset', runtime=self._runtime) asset_content_identifier = ObjectId(self._get_id(asset_content_id, 'repository').get_identifier()) result = collection.find_one( dict({'assetContents._id': {'$in': [asset_content_identifier]}}, **self._view_filter())) # if a match is not found, NotFound exception will be thrown by find_one, so # the below should always work asset_content_map = [ac for ac in result['assetContents'] if ac['_id'] == asset_content_identifier][0] return objects.AssetContent(osid_object_map=asset_content_map, runtime=self._runtime, proxy=self._proxy)
[ "def", "get_asset_content", "(", "self", ",", "asset_content_id", ")", ":", "collection", "=", "JSONClientValidated", "(", "'repository'", ",", "collection", "=", "'Asset'", ",", "runtime", "=", "self", ".", "_runtime", ")", "asset_content_identifier", "=", "ObjectId", "(", "self", ".", "_get_id", "(", "asset_content_id", ",", "'repository'", ")", ".", "get_identifier", "(", ")", ")", "result", "=", "collection", ".", "find_one", "(", "dict", "(", "{", "'assetContents._id'", ":", "{", "'$in'", ":", "[", "asset_content_identifier", "]", "}", "}", ",", "*", "*", "self", ".", "_view_filter", "(", ")", ")", ")", "# if a match is not found, NotFound exception will be thrown by find_one, so", "# the below should always work", "asset_content_map", "=", "[", "ac", "for", "ac", "in", "result", "[", "'assetContents'", "]", "if", "ac", "[", "'_id'", "]", "==", "asset_content_identifier", "]", "[", "0", "]", "return", "objects", ".", "AssetContent", "(", "osid_object_map", "=", "asset_content_map", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")" ]
56.225806
27.129032
def LoadSecondaryConfig(self, filename=None, parser=None): """Loads an additional configuration file. The configuration system has the concept of a single Primary configuration file, and multiple secondary files. The primary configuration file is the main file that is used by the program. Any writebacks will only be made to the primary configuration file. Secondary files contain additional configuration data which will be merged into the configuration system. This method adds an additional configuration file. Args: filename: The configuration file that will be loaded. For example file:///etc/grr.conf or reg://HKEY_LOCAL_MACHINE/Software/GRR. parser: An optional parser can be given. In this case, the parser's data will be loaded directly. Returns: The parser used to parse this configuration source. Raises: ValueError: if both filename and parser arguments are None. ConfigFileNotFound: If a specified included file was not found. """ if filename: # Maintain a stack of config file locations in loaded order. self.files.append(filename) parser_cls = self.GetParserFromFilename(filename) parser = parser_cls(filename=filename) logging.debug("Loading configuration from %s", filename) self.secondary_config_parsers.append(parser) elif parser is None: raise ValueError("Must provide either a filename or a parser.") clone = self.MakeNewConfig() clone.MergeData(parser.RawData()) clone.initialized = True for file_to_load in clone["Config.includes"]: # We can not include a relative file from a config which does not have # path. if not os.path.isabs(file_to_load): if not filename: raise ConfigFileNotFound( "While loading %s: Unable to include a relative path (%s) " "from a config without a filename" % (filename, file_to_load)) # If the included path is relative, we take it as relative to the # current path of the config. file_to_load = os.path.join(os.path.dirname(filename), file_to_load) clone_parser = clone.LoadSecondaryConfig(file_to_load) # If an include file is specified but it was not found, raise an error. if not clone_parser.parsed: raise ConfigFileNotFound( "Unable to load include file %s" % file_to_load) self.MergeData(clone.raw_data) self.files.extend(clone.files) return parser
[ "def", "LoadSecondaryConfig", "(", "self", ",", "filename", "=", "None", ",", "parser", "=", "None", ")", ":", "if", "filename", ":", "# Maintain a stack of config file locations in loaded order.", "self", ".", "files", ".", "append", "(", "filename", ")", "parser_cls", "=", "self", ".", "GetParserFromFilename", "(", "filename", ")", "parser", "=", "parser_cls", "(", "filename", "=", "filename", ")", "logging", ".", "debug", "(", "\"Loading configuration from %s\"", ",", "filename", ")", "self", ".", "secondary_config_parsers", ".", "append", "(", "parser", ")", "elif", "parser", "is", "None", ":", "raise", "ValueError", "(", "\"Must provide either a filename or a parser.\"", ")", "clone", "=", "self", ".", "MakeNewConfig", "(", ")", "clone", ".", "MergeData", "(", "parser", ".", "RawData", "(", ")", ")", "clone", ".", "initialized", "=", "True", "for", "file_to_load", "in", "clone", "[", "\"Config.includes\"", "]", ":", "# We can not include a relative file from a config which does not have", "# path.", "if", "not", "os", ".", "path", ".", "isabs", "(", "file_to_load", ")", ":", "if", "not", "filename", ":", "raise", "ConfigFileNotFound", "(", "\"While loading %s: Unable to include a relative path (%s) \"", "\"from a config without a filename\"", "%", "(", "filename", ",", "file_to_load", ")", ")", "# If the included path is relative, we take it as relative to the", "# current path of the config.", "file_to_load", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ",", "file_to_load", ")", "clone_parser", "=", "clone", ".", "LoadSecondaryConfig", "(", "file_to_load", ")", "# If an include file is specified but it was not found, raise an error.", "if", "not", "clone_parser", ".", "parsed", ":", "raise", "ConfigFileNotFound", "(", "\"Unable to load include file %s\"", "%", "file_to_load", ")", "self", ".", "MergeData", "(", "clone", ".", "raw_data", ")", "self", ".", "files", ".", "extend", "(", "clone", ".", "files", ")", "return", "parser" ]
38.984127
24.365079
def cinder(*arg): """ Cinder annotation for adding function to process cinder notification. if event_type include wildcard, will put {pattern: function} into process_wildcard dict else will put {event_type: function} into process dict :param arg: event_type of notification """ check_event_type(Openstack.Cinder, *arg) event_type = arg[0] def decorator(func): if event_type.find("*") != -1: event_type_pattern = pre_compile(event_type) cinder_customer_process_wildcard[event_type_pattern] = func else: cinder_customer_process[event_type] = func log.info("add function {0} to process event_type:{1}".format(func.__name__, event_type)) @functools.wraps(func) def wrapper(*args, **kwargs): func(*args, **kwargs) return wrapper return decorator
[ "def", "cinder", "(", "*", "arg", ")", ":", "check_event_type", "(", "Openstack", ".", "Cinder", ",", "*", "arg", ")", "event_type", "=", "arg", "[", "0", "]", "def", "decorator", "(", "func", ")", ":", "if", "event_type", ".", "find", "(", "\"*\"", ")", "!=", "-", "1", ":", "event_type_pattern", "=", "pre_compile", "(", "event_type", ")", "cinder_customer_process_wildcard", "[", "event_type_pattern", "]", "=", "func", "else", ":", "cinder_customer_process", "[", "event_type", "]", "=", "func", "log", ".", "info", "(", "\"add function {0} to process event_type:{1}\"", ".", "format", "(", "func", ".", "__name__", ",", "event_type", ")", ")", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "decorator" ]
31.703704
22.518519
def mjd2gmst(mjd): """Convert Modfied Juian Date (JD = 2400000.5) to GMST Taken from P.T. Walace routines. """ tu = (mjd - MJD0) / (100*DPY) st = math.fmod(mjd, 1.0) * D2PI + (24110.54841 + (8640184.812866 + (0.093104 - 6.2e-6 * tu) * tu) * tu) * DS2R w = math.fmod(st, D2PI) if w >= 0.0: return w else: return w + D2PI
[ "def", "mjd2gmst", "(", "mjd", ")", ":", "tu", "=", "(", "mjd", "-", "MJD0", ")", "/", "(", "100", "*", "DPY", ")", "st", "=", "math", ".", "fmod", "(", "mjd", ",", "1.0", ")", "*", "D2PI", "+", "(", "24110.54841", "+", "(", "8640184.812866", "+", "(", "0.093104", "-", "6.2e-6", "*", "tu", ")", "*", "tu", ")", "*", "tu", ")", "*", "DS2R", "w", "=", "math", ".", "fmod", "(", "st", ",", "D2PI", ")", "if", "w", ">=", "0.0", ":", "return", "w", "else", ":", "return", "w", "+", "D2PI" ]
22.5625
25.75
def fit(self, X=None, y=None, **kwargs): """Fit the blocks of this pipeline. Sequentially call the `fit` and the `produce` methods of each block, capturing the outputs each `produce` method before calling the `fit` method of the next one. During the whole process a context dictionary is built, where both the passed arguments and the captured outputs of the `produce` methods are stored, and from which the arguments for the next `fit` and `produce` calls will be taken. Args: X: Fit Data, which the pipeline will learn from. y: Fit Data labels, which the pipeline will use to learn how to behave. **kwargs: Any additional keyword arguments will be directly added to the context dictionary and available for the blocks. """ context = { 'X': X, 'y': y } context.update(kwargs) last_block_name = list(self.blocks.keys())[-1] for block_name, block in self.blocks.items(): LOGGER.debug("Fitting block %s", block_name) try: fit_args = self._get_block_args(block_name, block.fit_args, context) block.fit(**fit_args) except Exception: LOGGER.exception("Exception caught fitting MLBlock %s", block_name) raise if block_name != last_block_name: LOGGER.debug("Producing block %s", block_name) try: produce_args = self._get_block_args(block_name, block.produce_args, context) outputs = block.produce(**produce_args) output_dict = self._get_outputs(block_name, outputs, block.produce_output) context.update(output_dict) except Exception: LOGGER.exception("Exception caught producing MLBlock %s", block_name) raise
[ "def", "fit", "(", "self", ",", "X", "=", "None", ",", "y", "=", "None", ",", "*", "*", "kwargs", ")", ":", "context", "=", "{", "'X'", ":", "X", ",", "'y'", ":", "y", "}", "context", ".", "update", "(", "kwargs", ")", "last_block_name", "=", "list", "(", "self", ".", "blocks", ".", "keys", "(", ")", ")", "[", "-", "1", "]", "for", "block_name", ",", "block", "in", "self", ".", "blocks", ".", "items", "(", ")", ":", "LOGGER", ".", "debug", "(", "\"Fitting block %s\"", ",", "block_name", ")", "try", ":", "fit_args", "=", "self", ".", "_get_block_args", "(", "block_name", ",", "block", ".", "fit_args", ",", "context", ")", "block", ".", "fit", "(", "*", "*", "fit_args", ")", "except", "Exception", ":", "LOGGER", ".", "exception", "(", "\"Exception caught fitting MLBlock %s\"", ",", "block_name", ")", "raise", "if", "block_name", "!=", "last_block_name", ":", "LOGGER", ".", "debug", "(", "\"Producing block %s\"", ",", "block_name", ")", "try", ":", "produce_args", "=", "self", ".", "_get_block_args", "(", "block_name", ",", "block", ".", "produce_args", ",", "context", ")", "outputs", "=", "block", ".", "produce", "(", "*", "*", "produce_args", ")", "output_dict", "=", "self", ".", "_get_outputs", "(", "block_name", ",", "outputs", ",", "block", ".", "produce_output", ")", "context", ".", "update", "(", "output_dict", ")", "except", "Exception", ":", "LOGGER", ".", "exception", "(", "\"Exception caught producing MLBlock %s\"", ",", "block_name", ")", "raise" ]
42.608696
24.913043
def open_machine(self, settings_file): """Opens a virtual machine from the existing settings file. The opened machine remains unregistered until you call :py:func:`register_machine` . The specified settings file name must be fully qualified. The file must exist and be a valid machine XML settings file whose contents will be used to construct the machine object. :py:func:`IMachine.settings_modified` will return @c false for the opened machine, until any of machine settings are changed. in settings_file of type str Name of the machine settings file. return machine of type :class:`IMachine` Opened machine object. raises :class:`VBoxErrorFileError` Settings file name invalid, not found or sharing violation. """ if not isinstance(settings_file, basestring): raise TypeError("settings_file can only be an instance of type basestring") machine = self._call("openMachine", in_p=[settings_file]) machine = IMachine(machine) return machine
[ "def", "open_machine", "(", "self", ",", "settings_file", ")", ":", "if", "not", "isinstance", "(", "settings_file", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"settings_file can only be an instance of type basestring\"", ")", "machine", "=", "self", ".", "_call", "(", "\"openMachine\"", ",", "in_p", "=", "[", "settings_file", "]", ")", "machine", "=", "IMachine", "(", "machine", ")", "return", "machine" ]
38.5
18.966667
def _str_to_type(cls, context_class, string): """ :type context_class: type :type string: str :rtype: type """ if string in cls._TYPE_NAMES_BUILTIN: return eval(string) module_ = sys.modules[context_class.__module__] if hasattr(module_, string): return getattr(module_, string) return cls._str_to_type_from_member_module(module_, string)
[ "def", "_str_to_type", "(", "cls", ",", "context_class", ",", "string", ")", ":", "if", "string", "in", "cls", ".", "_TYPE_NAMES_BUILTIN", ":", "return", "eval", "(", "string", ")", "module_", "=", "sys", ".", "modules", "[", "context_class", ".", "__module__", "]", "if", "hasattr", "(", "module_", ",", "string", ")", ":", "return", "getattr", "(", "module_", ",", "string", ")", "return", "cls", ".", "_str_to_type_from_member_module", "(", "module_", ",", "string", ")" ]
24.823529
18.235294
def _mouseUp(x, y, button): """Send the mouse up event to Windows by calling the mouse_event() win32 function. Args: x (int): The x position of the mouse event. y (int): The y position of the mouse event. button (str): The mouse button, either 'left', 'middle', or 'right' Returns: None """ if button == 'left': try: _sendMouseEvent(MOUSEEVENTF_LEFTUP, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass elif button == 'middle': try: _sendMouseEvent(MOUSEEVENTF_MIDDLEUP, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass elif button == 'right': try: _sendMouseEvent(MOUSEEVENTF_RIGHTUP, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass else: assert False, "button argument not in ('left', 'middle', 'right')"
[ "def", "_mouseUp", "(", "x", ",", "y", ",", "button", ")", ":", "if", "button", "==", "'left'", ":", "try", ":", "_sendMouseEvent", "(", "MOUSEEVENTF_LEFTUP", ",", "x", ",", "y", ")", "except", "(", "PermissionError", ",", "OSError", ")", ":", "# TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60", "pass", "elif", "button", "==", "'middle'", ":", "try", ":", "_sendMouseEvent", "(", "MOUSEEVENTF_MIDDLEUP", ",", "x", ",", "y", ")", "except", "(", "PermissionError", ",", "OSError", ")", ":", "# TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60", "pass", "elif", "button", "==", "'right'", ":", "try", ":", "_sendMouseEvent", "(", "MOUSEEVENTF_RIGHTUP", ",", "x", ",", "y", ")", "except", "(", "PermissionError", ",", "OSError", ")", ":", "# TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60", "pass", "else", ":", "assert", "False", ",", "\"button argument not in ('left', 'middle', 'right')\"" ]
41.206897
31.241379
def make_pattern(self, pattern, listsep=','): """Make pattern for a data type with the specified cardinality. .. code-block:: python yes_no_pattern = r"yes|no" many_yes_no = Cardinality.one_or_more.make_pattern(yes_no_pattern) :param pattern: Regular expression for type (as string). :param listsep: List separator for multiple items (as string, optional) :return: Regular expression pattern for type with cardinality. """ if self is Cardinality.one: return pattern elif self is Cardinality.zero_or_one: return self.schema % pattern else: return self.schema % (pattern, listsep, pattern)
[ "def", "make_pattern", "(", "self", ",", "pattern", ",", "listsep", "=", "','", ")", ":", "if", "self", "is", "Cardinality", ".", "one", ":", "return", "pattern", "elif", "self", "is", "Cardinality", ".", "zero_or_one", ":", "return", "self", ".", "schema", "%", "pattern", "else", ":", "return", "self", ".", "schema", "%", "(", "pattern", ",", "listsep", ",", "pattern", ")" ]
39.277778
18.944444
def _make_param_matcher(annotation, kind=None): ''' For a given annotation, return a function which, when called on a function argument, returns true if that argument matches the annotation. If the annotation is a type, it calls isinstance; if it's a callable, it calls it on the object; otherwise, it performs a value comparison. If the parameter is variadic (*args) and the annotation is a type, the matcher will attempt to match each of the arguments in args ''' if isinstance(annotation, type) or ( isinstance(annotation, tuple) and all(isinstance(a, type) for a in annotation)): if kind is Parameter.VAR_POSITIONAL: return (lambda args: all(isinstance(x, annotation) for x in args)) else: return (lambda x: isinstance(x, annotation)) elif callable(annotation): return annotation else: return (lambda x: x == annotation)
[ "def", "_make_param_matcher", "(", "annotation", ",", "kind", "=", "None", ")", ":", "if", "isinstance", "(", "annotation", ",", "type", ")", "or", "(", "isinstance", "(", "annotation", ",", "tuple", ")", "and", "all", "(", "isinstance", "(", "a", ",", "type", ")", "for", "a", "in", "annotation", ")", ")", ":", "if", "kind", "is", "Parameter", ".", "VAR_POSITIONAL", ":", "return", "(", "lambda", "args", ":", "all", "(", "isinstance", "(", "x", ",", "annotation", ")", "for", "x", "in", "args", ")", ")", "else", ":", "return", "(", "lambda", "x", ":", "isinstance", "(", "x", ",", "annotation", ")", ")", "elif", "callable", "(", "annotation", ")", ":", "return", "annotation", "else", ":", "return", "(", "lambda", "x", ":", "x", "==", "annotation", ")" ]
50.25
22.75
def length(self,threshold=0.2,phys=False,ang=False,tdisrupt=None, **kwargs): """ NAME: length PURPOSE: calculate the length of the stream INPUT: threshold - threshold down from the density near the progenitor at which to define the 'end' of the stream phys= (False) if True, return the length in physical kpc ang= (False) if True, return the length in sky angular arc length in degree coord - coordinate to return the density in ('apar' [default], 'll','ra','customra','phi') OUTPUT: length (rad for parallel angle; kpc for physical length; deg for sky arc length) HISTORY: 2015-12-22 - Written - Bovy (UofT) """ peak_dens= self.density_par(0.1,tdisrupt=tdisrupt,**kwargs) # assume that this is the peak try: result=\ optimize.brentq(lambda x: self.density_par(x, tdisrupt=tdisrupt, **kwargs)\ -peak_dens*threshold, 0.1,self._deltaAngleTrack) except RuntimeError: #pragma: no cover raise RuntimeError('Length could not be returned, because length method failed to find the threshold value') except ValueError: raise ValueError('Length could not be returned, because length method failed to initialize') if phys: # Need to now integrate length dXda= self._interpTrackX.derivative() dYda= self._interpTrackY.derivative() dZda= self._interpTrackZ.derivative() result= integrate.quad(lambda da: numpy.sqrt(dXda(da)**2.\ +dYda(da)**2.\ +dZda(da)**2.), 0.,result)[0]*self._ro elif ang: # Need to now integrate length if numpy.median(numpy.roll(self._interpolatedObsTrackLB[:,0],-1) -self._interpolatedObsTrackLB[:,0]) > 0.: ll= dePeriod(self._interpolatedObsTrackLB[:,0][:,numpy.newaxis].T*numpy.pi/180.).T*180./numpy.pi else: ll= dePeriod(self._interpolatedObsTrackLB[::-1,0][:,numpy.newaxis].T*numpy.pi/180.).T[::-1]*180./numpy.pi if numpy.median(numpy.roll(self._interpolatedObsTrackLB[:,1],-1) -self._interpolatedObsTrackLB[:,1]) > 0.: bb= dePeriod(self._interpolatedObsTrackLB[:,1][:,numpy.newaxis].T*numpy.pi/180.).T*180./numpy.pi else: bb= dePeriod(self._interpolatedObsTrackLB[::-1,1][:,numpy.newaxis].T*numpy.pi/180.).T[::-1]*180./numpy.pi dlda= interpolate.InterpolatedUnivariateSpline(\ self._interpolatedThetasTrack,ll,k=3).derivative() dbda= interpolate.InterpolatedUnivariateSpline(\ self._interpolatedThetasTrack,bb,k=3).derivative() result= integrate.quad(lambda da: numpy.sqrt(dlda(da)**2.\ +dbda(da)**2.), 0.,result)[0] return result
[ "def", "length", "(", "self", ",", "threshold", "=", "0.2", ",", "phys", "=", "False", ",", "ang", "=", "False", ",", "tdisrupt", "=", "None", ",", "*", "*", "kwargs", ")", ":", "peak_dens", "=", "self", ".", "density_par", "(", "0.1", ",", "tdisrupt", "=", "tdisrupt", ",", "*", "*", "kwargs", ")", "# assume that this is the peak", "try", ":", "result", "=", "optimize", ".", "brentq", "(", "lambda", "x", ":", "self", ".", "density_par", "(", "x", ",", "tdisrupt", "=", "tdisrupt", ",", "*", "*", "kwargs", ")", "-", "peak_dens", "*", "threshold", ",", "0.1", ",", "self", ".", "_deltaAngleTrack", ")", "except", "RuntimeError", ":", "#pragma: no cover", "raise", "RuntimeError", "(", "'Length could not be returned, because length method failed to find the threshold value'", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'Length could not be returned, because length method failed to initialize'", ")", "if", "phys", ":", "# Need to now integrate length", "dXda", "=", "self", ".", "_interpTrackX", ".", "derivative", "(", ")", "dYda", "=", "self", ".", "_interpTrackY", ".", "derivative", "(", ")", "dZda", "=", "self", ".", "_interpTrackZ", ".", "derivative", "(", ")", "result", "=", "integrate", ".", "quad", "(", "lambda", "da", ":", "numpy", ".", "sqrt", "(", "dXda", "(", "da", ")", "**", "2.", "+", "dYda", "(", "da", ")", "**", "2.", "+", "dZda", "(", "da", ")", "**", "2.", ")", ",", "0.", ",", "result", ")", "[", "0", "]", "*", "self", ".", "_ro", "elif", "ang", ":", "# Need to now integrate length", "if", "numpy", ".", "median", "(", "numpy", ".", "roll", "(", "self", ".", "_interpolatedObsTrackLB", "[", ":", ",", "0", "]", ",", "-", "1", ")", "-", "self", ".", "_interpolatedObsTrackLB", "[", ":", ",", "0", "]", ")", ">", "0.", ":", "ll", "=", "dePeriod", "(", "self", ".", "_interpolatedObsTrackLB", "[", ":", ",", "0", "]", "[", ":", ",", "numpy", ".", "newaxis", "]", ".", "T", "*", "numpy", ".", "pi", "/", "180.", ")", ".", "T", "*", "180.", "/", "numpy", ".", "pi", "else", ":", "ll", "=", "dePeriod", "(", "self", ".", "_interpolatedObsTrackLB", "[", ":", ":", "-", "1", ",", "0", "]", "[", ":", ",", "numpy", ".", "newaxis", "]", ".", "T", "*", "numpy", ".", "pi", "/", "180.", ")", ".", "T", "[", ":", ":", "-", "1", "]", "*", "180.", "/", "numpy", ".", "pi", "if", "numpy", ".", "median", "(", "numpy", ".", "roll", "(", "self", ".", "_interpolatedObsTrackLB", "[", ":", ",", "1", "]", ",", "-", "1", ")", "-", "self", ".", "_interpolatedObsTrackLB", "[", ":", ",", "1", "]", ")", ">", "0.", ":", "bb", "=", "dePeriod", "(", "self", ".", "_interpolatedObsTrackLB", "[", ":", ",", "1", "]", "[", ":", ",", "numpy", ".", "newaxis", "]", ".", "T", "*", "numpy", ".", "pi", "/", "180.", ")", ".", "T", "*", "180.", "/", "numpy", ".", "pi", "else", ":", "bb", "=", "dePeriod", "(", "self", ".", "_interpolatedObsTrackLB", "[", ":", ":", "-", "1", ",", "1", "]", "[", ":", ",", "numpy", ".", "newaxis", "]", ".", "T", "*", "numpy", ".", "pi", "/", "180.", ")", ".", "T", "[", ":", ":", "-", "1", "]", "*", "180.", "/", "numpy", ".", "pi", "dlda", "=", "interpolate", ".", "InterpolatedUnivariateSpline", "(", "self", ".", "_interpolatedThetasTrack", ",", "ll", ",", "k", "=", "3", ")", ".", "derivative", "(", ")", "dbda", "=", "interpolate", ".", "InterpolatedUnivariateSpline", "(", "self", ".", "_interpolatedThetasTrack", ",", "bb", ",", "k", "=", "3", ")", ".", "derivative", "(", ")", "result", "=", "integrate", ".", "quad", "(", "lambda", "da", ":", "numpy", ".", "sqrt", "(", "dlda", "(", "da", ")", "**", "2.", "+", "dbda", "(", "da", ")", "**", "2.", ")", ",", "0.", ",", "result", ")", "[", "0", "]", "return", "result" ]
46.25
30.888889
def get(self, project): """Query the project status. Returns a ``CLAMData`` instance or raises an exception according to the returned HTTP Status code""" try: data = self.request(project + '/') except: raise if not isinstance(data, clam.common.data.CLAMData): raise Exception("Unable to retrieve CLAM Data") else: return data
[ "def", "get", "(", "self", ",", "project", ")", ":", "try", ":", "data", "=", "self", ".", "request", "(", "project", "+", "'/'", ")", "except", ":", "raise", "if", "not", "isinstance", "(", "data", ",", "clam", ".", "common", ".", "data", ".", "CLAMData", ")", ":", "raise", "Exception", "(", "\"Unable to retrieve CLAM Data\"", ")", "else", ":", "return", "data" ]
40.4
18.1
def kent_mean(dec=None, inc=None, di_block=None): """ Calculates the Kent mean and associated statistical parameters from either a list of declination values and a separate list of inclination values or from a di_block (a nested list a nested list of [dec,inc,1.0]). Returns a dictionary with the Kent mean and statistical parameters. Parameters ---------- dec: list of declinations inc: list of inclinations or di_block: a nested list of [dec,inc,1.0] A di_block can be provided instead of dec, inc lists in which case it will be used. Either dec, inc lists or a di_block need to passed to the function. Returns ---------- kpars : dictionary containing Kent mean and associated statistics. Examples -------- Use lists of declination and inclination to calculate a Kent mean: >>> ipmag.kent_mean(dec=[140,127,142,136],inc=[21,23,19,22]) {'Edec': 280.38683553668795, 'Einc': 64.236598921744289, 'Eta': 0.72982112760919715, 'Zdec': 40.824690028412761, 'Zeta': 6.7896823241008795, 'Zinc': 13.739412321974067, 'dec': 136.30838974272072, 'inc': 21.347784026899987, 'n': 4} Use a di_block to calculate a Kent mean (will give the same output as the example with the lists): >>> ipmag.kent_mean(di_block=[[140,21],[127,23],[142,19],[136,22]]) """ if di_block is None: di_block = make_di_block(dec, inc) return pmag.dokent(di_block, len(di_block)) else: return pmag.dokent(di_block, len(di_block))
[ "def", "kent_mean", "(", "dec", "=", "None", ",", "inc", "=", "None", ",", "di_block", "=", "None", ")", ":", "if", "di_block", "is", "None", ":", "di_block", "=", "make_di_block", "(", "dec", ",", "inc", ")", "return", "pmag", ".", "dokent", "(", "di_block", ",", "len", "(", "di_block", ")", ")", "else", ":", "return", "pmag", ".", "dokent", "(", "di_block", ",", "len", "(", "di_block", ")", ")" ]
31.708333
23.583333
def http_keepalive (headers): """ Get HTTP keepalive value, either from the Keep-Alive header or a default value. @param headers: HTTP headers @type headers: dict @return: keepalive in seconds @rtype: int """ keepalive = headers.get("Keep-Alive") if keepalive is not None: try: keepalive = int(keepalive[8:].strip()) except (ValueError, OverflowError): keepalive = DEFAULT_KEEPALIVE else: keepalive = DEFAULT_KEEPALIVE return keepalive
[ "def", "http_keepalive", "(", "headers", ")", ":", "keepalive", "=", "headers", ".", "get", "(", "\"Keep-Alive\"", ")", "if", "keepalive", "is", "not", "None", ":", "try", ":", "keepalive", "=", "int", "(", "keepalive", "[", "8", ":", "]", ".", "strip", "(", ")", ")", "except", "(", "ValueError", ",", "OverflowError", ")", ":", "keepalive", "=", "DEFAULT_KEEPALIVE", "else", ":", "keepalive", "=", "DEFAULT_KEEPALIVE", "return", "keepalive" ]
27.052632
14
def IsComposite(self): """Determines if the data type is composite. A composite data type consists of other data types. Returns: bool: True if the data type is composite, False otherwise. """ return bool(self.condition) or ( self.member_data_type_definition and self.member_data_type_definition.IsComposite())
[ "def", "IsComposite", "(", "self", ")", ":", "return", "bool", "(", "self", ".", "condition", ")", "or", "(", "self", ".", "member_data_type_definition", "and", "self", ".", "member_data_type_definition", ".", "IsComposite", "(", ")", ")" ]
31.181818
17.090909
def get_resource_notification_session_for_bin(self, resource_receiver, bin_id, proxy): """Gets the resource notification session for the given bin. arg: resource_receiver (osid.resource.ResourceReceiver): notification callback arg: bin_id (osid.id.Id): the ``Id`` of the bin arg: proxy (osid.proxy.Proxy): a proxy return: (osid.resource.ResourceNotificationSession) - ``a ResourceNotificationSession`` raise: NotFound - ``bin_id`` not found raise: NullArgument - ``resource_receiver, bin_id`` or ``proxy`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_resource_notification()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_resource_notfication()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_resource_notification(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.ResourceNotificationSession(catalog_id=bin_id, proxy=proxy, runtime=self._runtime, receiver=resource_receiver)
[ "def", "get_resource_notification_session_for_bin", "(", "self", ",", "resource_receiver", ",", "bin_id", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_resource_notification", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "##", "# Also include check to see if the catalog Id is found otherwise raise errors.NotFound", "##", "# pylint: disable=no-member", "return", "sessions", ".", "ResourceNotificationSession", "(", "catalog_id", "=", "bin_id", ",", "proxy", "=", "proxy", ",", "runtime", "=", "self", ".", "_runtime", ",", "receiver", "=", "resource_receiver", ")" ]
50.962963
22.185185
def row(self, columnnames=[], exclude=False): """Return a tablerow object which includes (or excludes) the given columns. :class:`tablerow` makes it possible to get/put values in one or more rows. """ from .tablerow import tablerow return tablerow(self, columnnames, exclude)
[ "def", "row", "(", "self", ",", "columnnames", "=", "[", "]", ",", "exclude", "=", "False", ")", ":", "from", ".", "tablerow", "import", "tablerow", "return", "tablerow", "(", "self", ",", "columnnames", ",", "exclude", ")" ]
32.4
16.9
def read_handle(url, cache=None, mode="rb"): """Read from any URL with a file handle. Use this to get a handle to a file rather than eagerly load the data: ``` with read_handle(url) as handle: result = something.load(handle) result.do_something() ``` When program execution leaves this `with` block, the handle will be closed automatically. Args: url: a URL including scheme or a local path Returns: A file handle to the specified resource if it could be reached. The handle will be closed automatically once execution leaves this context. """ scheme = urlparse(url).scheme if cache == 'purge': _purge_cached(url) cache = None if _is_remote(scheme) and cache is None: cache = True log.debug("Cache not specified, enabling because resource is remote.") if cache: handle = _read_and_cache(url, mode=mode) else: if scheme in ("http", "https"): handle = _handle_web_url(url, mode=mode) elif scheme in ("gs"): handle = _handle_gfile(url, mode=mode) else: handle = open(url, mode=mode) yield handle handle.close()
[ "def", "read_handle", "(", "url", ",", "cache", "=", "None", ",", "mode", "=", "\"rb\"", ")", ":", "scheme", "=", "urlparse", "(", "url", ")", ".", "scheme", "if", "cache", "==", "'purge'", ":", "_purge_cached", "(", "url", ")", "cache", "=", "None", "if", "_is_remote", "(", "scheme", ")", "and", "cache", "is", "None", ":", "cache", "=", "True", "log", ".", "debug", "(", "\"Cache not specified, enabling because resource is remote.\"", ")", "if", "cache", ":", "handle", "=", "_read_and_cache", "(", "url", ",", "mode", "=", "mode", ")", "else", ":", "if", "scheme", "in", "(", "\"http\"", ",", "\"https\"", ")", ":", "handle", "=", "_handle_web_url", "(", "url", ",", "mode", "=", "mode", ")", "elif", "scheme", "in", "(", "\"gs\"", ")", ":", "handle", "=", "_handle_gfile", "(", "url", ",", "mode", "=", "mode", ")", "else", ":", "handle", "=", "open", "(", "url", ",", "mode", "=", "mode", ")", "yield", "handle", "handle", ".", "close", "(", ")" ]
26.681818
23.25
def non_dependency (self): """ Returns properties that are not dependencies. """ result = [p for p in self.lazy_properties if not p.feature.dependency] result.extend(self.non_dependency_) return result
[ "def", "non_dependency", "(", "self", ")", ":", "result", "=", "[", "p", "for", "p", "in", "self", ".", "lazy_properties", "if", "not", "p", ".", "feature", ".", "dependency", "]", "result", ".", "extend", "(", "self", ".", "non_dependency_", ")", "return", "result" ]
39.333333
12.333333
def _param_from_config(key, data): ''' Return EC2 API parameters based on the given config data. Examples: 1. List of dictionaries >>> data = [ ... {'DeviceIndex': 0, 'SubnetId': 'subid0', ... 'AssociatePublicIpAddress': True}, ... {'DeviceIndex': 1, ... 'SubnetId': 'subid1', ... 'PrivateIpAddress': '192.168.1.128'} ... ] >>> _param_from_config('NetworkInterface', data) ... {'NetworkInterface.0.SubnetId': 'subid0', ... 'NetworkInterface.0.DeviceIndex': 0, ... 'NetworkInterface.1.SubnetId': 'subid1', ... 'NetworkInterface.1.PrivateIpAddress': '192.168.1.128', ... 'NetworkInterface.0.AssociatePublicIpAddress': 'true', ... 'NetworkInterface.1.DeviceIndex': 1} 2. List of nested dictionaries >>> data = [ ... {'DeviceName': '/dev/sdf', ... 'Ebs': { ... 'SnapshotId': 'dummy0', ... 'VolumeSize': 200, ... 'VolumeType': 'standard'}}, ... {'DeviceName': '/dev/sdg', ... 'Ebs': { ... 'SnapshotId': 'dummy1', ... 'VolumeSize': 100, ... 'VolumeType': 'standard'}} ... ] >>> _param_from_config('BlockDeviceMapping', data) ... {'BlockDeviceMapping.0.Ebs.VolumeType': 'standard', ... 'BlockDeviceMapping.1.Ebs.SnapshotId': 'dummy1', ... 'BlockDeviceMapping.0.Ebs.VolumeSize': 200, ... 'BlockDeviceMapping.0.Ebs.SnapshotId': 'dummy0', ... 'BlockDeviceMapping.1.Ebs.VolumeType': 'standard', ... 'BlockDeviceMapping.1.DeviceName': '/dev/sdg', ... 'BlockDeviceMapping.1.Ebs.VolumeSize': 100, ... 'BlockDeviceMapping.0.DeviceName': '/dev/sdf'} 3. Dictionary of dictionaries >>> data = { 'Arn': 'dummyarn', 'Name': 'Tester' } >>> _param_from_config('IamInstanceProfile', data) {'IamInstanceProfile.Arn': 'dummyarn', 'IamInstanceProfile.Name': 'Tester'} ''' param = {} if isinstance(data, dict): for k, v in six.iteritems(data): param.update(_param_from_config('{0}.{1}'.format(key, k), v)) elif isinstance(data, list) or isinstance(data, tuple): for idx, conf_item in enumerate(data): prefix = '{0}.{1}'.format(key, idx) param.update(_param_from_config(prefix, conf_item)) else: if isinstance(data, bool): # convert boolean True/False to 'true'/'false' param.update({key: six.text_type(data).lower()}) else: param.update({key: data}) return param
[ "def", "_param_from_config", "(", "key", ",", "data", ")", ":", "param", "=", "{", "}", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "data", ")", ":", "param", ".", "update", "(", "_param_from_config", "(", "'{0}.{1}'", ".", "format", "(", "key", ",", "k", ")", ",", "v", ")", ")", "elif", "isinstance", "(", "data", ",", "list", ")", "or", "isinstance", "(", "data", ",", "tuple", ")", ":", "for", "idx", ",", "conf_item", "in", "enumerate", "(", "data", ")", ":", "prefix", "=", "'{0}.{1}'", ".", "format", "(", "key", ",", "idx", ")", "param", ".", "update", "(", "_param_from_config", "(", "prefix", ",", "conf_item", ")", ")", "else", ":", "if", "isinstance", "(", "data", ",", "bool", ")", ":", "# convert boolean True/False to 'true'/'false'", "param", ".", "update", "(", "{", "key", ":", "six", ".", "text_type", "(", "data", ")", ".", "lower", "(", ")", "}", ")", "else", ":", "param", ".", "update", "(", "{", "key", ":", "data", "}", ")", "return", "param" ]
35.342857
18.142857
def enable_root_user(self): """ Enables login from any host for the root user and provides the user with a generated root password. """ uri = "/instances/%s/root" % self.id resp, body = self.manager.api.method_post(uri) return body["user"]["password"]
[ "def", "enable_root_user", "(", "self", ")", ":", "uri", "=", "\"/instances/%s/root\"", "%", "self", ".", "id", "resp", ",", "body", "=", "self", ".", "manager", ".", "api", ".", "method_post", "(", "uri", ")", "return", "body", "[", "\"user\"", "]", "[", "\"password\"", "]" ]
37.5
8.25
def add_data(self): """This function properly constructs a QR code's data string. It takes into account the interleaving pattern required by the standard. """ #Encode the data into a QR code self.buffer.write(self.binary_string(self.mode, 4)) self.buffer.write(self.get_data_length()) self.buffer.write(self.encode()) #Converts the buffer into "code word" integers. #The online debugger outputs them this way, makes #for easier comparisons. #s = self.buffer.getvalue() #for i in range(0, len(s), 8): # print(int(s[i:i+8], 2), end=',') #print() #Fix for issue #3: https://github.com/mnooner256/pyqrcode/issues/3# #I was performing the terminate_bits() part in the encoding. #As per the standard, terminating bits are only supposed to #be added after the bit stream is complete. I took that to #mean after the encoding, but actually it is after the entire #bit stream has been constructed. bits = self.terminate_bits(self.buffer.getvalue()) if bits is not None: self.buffer.write(bits) #delimit_words and add_words can return None add_bits = self.delimit_words() if add_bits: self.buffer.write(add_bits) fill_bytes = self.add_words() if fill_bytes: self.buffer.write(fill_bytes) #Get a numeric representation of the data data = [int(''.join(x),2) for x in self.grouper(8, self.buffer.getvalue())] #This is the error information for the code error_info = tables.eccwbi[self.version][self.error] #This will hold our data blocks data_blocks = [] #This will hold our error blocks error_blocks = [] #Some codes have the data sliced into two different sized blocks #for example, first two 14 word sized blocks, then four 15 word #sized blocks. This means that slicing size can change over time. data_block_sizes = [error_info[2]] * error_info[1] if error_info[3] != 0: data_block_sizes.extend([error_info[4]] * error_info[3]) #For every block of data, slice the data into the appropriate #sized block current_byte = 0 for n_data_blocks in data_block_sizes: data_blocks.append(data[current_byte:current_byte+n_data_blocks]) current_byte += n_data_blocks #I am not sure about the test after the "and". This was added to #fix a bug where after delimit_words padded the bit stream, a zero #byte ends up being added. After checking around, it seems this extra #byte is supposed to be chopped off, but I cannot find that in the #standard! I am adding it to solve the bug, I believe it is correct. if current_byte < len(data): raise ValueError('Too much data for this code version.') #DEBUG CODE!!!! #Print out the data blocks #print('Data Blocks:\n{0}'.format(data_blocks)) #Calculate the error blocks for n, block in enumerate(data_blocks): error_blocks.append(self.make_error_block(block, n)) #DEBUG CODE!!!! #Print out the error blocks #print('Error Blocks:\n{0}'.format(error_blocks)) #Buffer we will write our data blocks into data_buffer = io.StringIO() #Add the data blocks #Write the buffer such that: block 1 byte 1, block 2 byte 1, etc. largest_block = max(error_info[2], error_info[4])+error_info[0] for i in range(largest_block): for block in data_blocks: if i < len(block): data_buffer.write(self.binary_string(block[i], 8)) #Add the error code blocks. #Write the buffer such that: block 1 byte 1, block 2 byte 2, etc. for i in range(error_info[0]): for block in error_blocks: data_buffer.write(self.binary_string(block[i], 8)) self.buffer = data_buffer
[ "def", "add_data", "(", "self", ")", ":", "#Encode the data into a QR code", "self", ".", "buffer", ".", "write", "(", "self", ".", "binary_string", "(", "self", ".", "mode", ",", "4", ")", ")", "self", ".", "buffer", ".", "write", "(", "self", ".", "get_data_length", "(", ")", ")", "self", ".", "buffer", ".", "write", "(", "self", ".", "encode", "(", ")", ")", "#Converts the buffer into \"code word\" integers.", "#The online debugger outputs them this way, makes", "#for easier comparisons.", "#s = self.buffer.getvalue()", "#for i in range(0, len(s), 8):", "# print(int(s[i:i+8], 2), end=',')", "#print()", "#Fix for issue #3: https://github.com/mnooner256/pyqrcode/issues/3#", "#I was performing the terminate_bits() part in the encoding.", "#As per the standard, terminating bits are only supposed to", "#be added after the bit stream is complete. I took that to", "#mean after the encoding, but actually it is after the entire", "#bit stream has been constructed.", "bits", "=", "self", ".", "terminate_bits", "(", "self", ".", "buffer", ".", "getvalue", "(", ")", ")", "if", "bits", "is", "not", "None", ":", "self", ".", "buffer", ".", "write", "(", "bits", ")", "#delimit_words and add_words can return None", "add_bits", "=", "self", ".", "delimit_words", "(", ")", "if", "add_bits", ":", "self", ".", "buffer", ".", "write", "(", "add_bits", ")", "fill_bytes", "=", "self", ".", "add_words", "(", ")", "if", "fill_bytes", ":", "self", ".", "buffer", ".", "write", "(", "fill_bytes", ")", "#Get a numeric representation of the data", "data", "=", "[", "int", "(", "''", ".", "join", "(", "x", ")", ",", "2", ")", "for", "x", "in", "self", ".", "grouper", "(", "8", ",", "self", ".", "buffer", ".", "getvalue", "(", ")", ")", "]", "#This is the error information for the code", "error_info", "=", "tables", ".", "eccwbi", "[", "self", ".", "version", "]", "[", "self", ".", "error", "]", "#This will hold our data blocks", "data_blocks", "=", "[", "]", "#This will hold our error blocks", "error_blocks", "=", "[", "]", "#Some codes have the data sliced into two different sized blocks", "#for example, first two 14 word sized blocks, then four 15 word", "#sized blocks. This means that slicing size can change over time.", "data_block_sizes", "=", "[", "error_info", "[", "2", "]", "]", "*", "error_info", "[", "1", "]", "if", "error_info", "[", "3", "]", "!=", "0", ":", "data_block_sizes", ".", "extend", "(", "[", "error_info", "[", "4", "]", "]", "*", "error_info", "[", "3", "]", ")", "#For every block of data, slice the data into the appropriate", "#sized block", "current_byte", "=", "0", "for", "n_data_blocks", "in", "data_block_sizes", ":", "data_blocks", ".", "append", "(", "data", "[", "current_byte", ":", "current_byte", "+", "n_data_blocks", "]", ")", "current_byte", "+=", "n_data_blocks", "#I am not sure about the test after the \"and\". This was added to", "#fix a bug where after delimit_words padded the bit stream, a zero", "#byte ends up being added. After checking around, it seems this extra", "#byte is supposed to be chopped off, but I cannot find that in the", "#standard! I am adding it to solve the bug, I believe it is correct.", "if", "current_byte", "<", "len", "(", "data", ")", ":", "raise", "ValueError", "(", "'Too much data for this code version.'", ")", "#DEBUG CODE!!!!", "#Print out the data blocks", "#print('Data Blocks:\\n{0}'.format(data_blocks))", "#Calculate the error blocks", "for", "n", ",", "block", "in", "enumerate", "(", "data_blocks", ")", ":", "error_blocks", ".", "append", "(", "self", ".", "make_error_block", "(", "block", ",", "n", ")", ")", "#DEBUG CODE!!!!", "#Print out the error blocks", "#print('Error Blocks:\\n{0}'.format(error_blocks))", "#Buffer we will write our data blocks into", "data_buffer", "=", "io", ".", "StringIO", "(", ")", "#Add the data blocks", "#Write the buffer such that: block 1 byte 1, block 2 byte 1, etc.", "largest_block", "=", "max", "(", "error_info", "[", "2", "]", ",", "error_info", "[", "4", "]", ")", "+", "error_info", "[", "0", "]", "for", "i", "in", "range", "(", "largest_block", ")", ":", "for", "block", "in", "data_blocks", ":", "if", "i", "<", "len", "(", "block", ")", ":", "data_buffer", ".", "write", "(", "self", ".", "binary_string", "(", "block", "[", "i", "]", ",", "8", ")", ")", "#Add the error code blocks.", "#Write the buffer such that: block 1 byte 1, block 2 byte 2, etc.", "for", "i", "in", "range", "(", "error_info", "[", "0", "]", ")", ":", "for", "block", "in", "error_blocks", ":", "data_buffer", ".", "write", "(", "self", ".", "binary_string", "(", "block", "[", "i", "]", ",", "8", ")", ")", "self", ".", "buffer", "=", "data_buffer" ]
40.009901
19.564356
def processing_block_list(): """Return the list of processing blocks known to SDP.""" pb_list = ProcessingBlockList() return dict(active=pb_list.active, completed=pb_list.completed, aborted=pb_list.aborted)
[ "def", "processing_block_list", "(", ")", ":", "pb_list", "=", "ProcessingBlockList", "(", ")", "return", "dict", "(", "active", "=", "pb_list", ".", "active", ",", "completed", "=", "pb_list", ".", "completed", ",", "aborted", "=", "pb_list", ".", "aborted", ")" ]
40.833333
3.833333
def oauth2_token_exchange(client_id, client_secret, redirect_uri, base_url=OH_BASE_URL, code=None, refresh_token=None): """ Exchange code or refresh token for a new token and refresh token. For the first time when a project is created, code is required to generate refresh token. Once the refresh token is obtained, it can be used later on for obtaining new access token and refresh token. The user must store the refresh token to obtain the new access token. For more details visit: https://www.openhumans.org/direct-sharing/oauth2-setup/#setup-oauth2-authorization :param client_id: This field is the client id of user. :param client_secret: This field is the client secret of user. :param redirect_uri: This is the user redirect uri. :param base_url: It is this URL `https://www.openhumans.org` :param code: This field is used to obtain access_token for the first time. It's default value is none. :param refresh_token: This field is used to obtain a new access_token when the token expires. """ if not (code or refresh_token) or (code and refresh_token): raise ValueError("Either code or refresh_token must be specified.") if code: data = { 'grant_type': 'authorization_code', 'redirect_uri': redirect_uri, 'code': code, } elif refresh_token: data = { 'grant_type': 'refresh_token', 'refresh_token': refresh_token, } token_url = urlparse.urljoin(base_url, '/oauth2/token/') req = requests.post( token_url, data=data, auth=requests.auth.HTTPBasicAuth(client_id, client_secret)) handle_error(req, 200) data = req.json() return data
[ "def", "oauth2_token_exchange", "(", "client_id", ",", "client_secret", ",", "redirect_uri", ",", "base_url", "=", "OH_BASE_URL", ",", "code", "=", "None", ",", "refresh_token", "=", "None", ")", ":", "if", "not", "(", "code", "or", "refresh_token", ")", "or", "(", "code", "and", "refresh_token", ")", ":", "raise", "ValueError", "(", "\"Either code or refresh_token must be specified.\"", ")", "if", "code", ":", "data", "=", "{", "'grant_type'", ":", "'authorization_code'", ",", "'redirect_uri'", ":", "redirect_uri", ",", "'code'", ":", "code", ",", "}", "elif", "refresh_token", ":", "data", "=", "{", "'grant_type'", ":", "'refresh_token'", ",", "'refresh_token'", ":", "refresh_token", ",", "}", "token_url", "=", "urlparse", ".", "urljoin", "(", "base_url", ",", "'/oauth2/token/'", ")", "req", "=", "requests", ".", "post", "(", "token_url", ",", "data", "=", "data", ",", "auth", "=", "requests", ".", "auth", ".", "HTTPBasicAuth", "(", "client_id", ",", "client_secret", ")", ")", "handle_error", "(", "req", ",", "200", ")", "data", "=", "req", ".", "json", "(", ")", "return", "data" ]
44.666667
22.461538
def questions(self, type=None): """Get questions associated with this participant. Return a list of questions associated with the participant. If specified, ``type`` filters by class. """ if type is None: type = Question if not issubclass(type, Question): raise TypeError("{} is not a valid question type.".format(type)) return type.query.filter_by(participant_id=self.id).all()
[ "def", "questions", "(", "self", ",", "type", "=", "None", ")", ":", "if", "type", "is", "None", ":", "type", "=", "Question", "if", "not", "issubclass", "(", "type", ",", "Question", ")", ":", "raise", "TypeError", "(", "\"{} is not a valid question type.\"", ".", "format", "(", "type", ")", ")", "return", "type", ".", "query", ".", "filter_by", "(", "participant_id", "=", "self", ".", "id", ")", ".", "all", "(", ")" ]
32.071429
21.142857