text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def create(cls, **kwargs): """Initializes a new instance, adds it to the db and commits the transaction. Args: **kwargs: The keyword arguments for the init constructor. Examples: >>> user = User.create(name="Vicky", email="[email protected]") >>> user.id 35 """ try: return cls.add(cls.new(**kwargs)) except: cls.session.rollback() raise
[ "def", "create", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "cls", ".", "add", "(", "cls", ".", "new", "(", "*", "*", "kwargs", ")", ")", "except", ":", "cls", ".", "session", ".", "rollback", "(", ")", "raise" ]
24.052632
22.526316
def active_days(records): """ The number of days during which the user was active. A user is considered active if he sends a text, receives a text, initiates a call, receives a call, or has a mobility point. """ days = set(r.datetime.date() for r in records) return len(days)
[ "def", "active_days", "(", "records", ")", ":", "days", "=", "set", "(", "r", ".", "datetime", ".", "date", "(", ")", "for", "r", "in", "records", ")", "return", "len", "(", "days", ")" ]
37
15.5
def dump(self, o_name=None, details=False, raw=False): """Dump an host (all hosts) from the arbiter. The arbiter will get the host (all hosts) information from all its schedulers. This gets the main host information from the scheduler. If details is set, then some more information are provided. This will not get all the host known attributes but only a reduced set that will inform about the host and its services status If raw is set the information are provided in two string lists formated as CSV strings. The first list element contains the hosts information and the second one contains the services information. If an host name is provided, this function will get only this host information, else all the scheduler hosts are returned. As an example (in raw format): { scheduler-master-3: [ [ "type;host;name;last_check;state_id;state;state_type;is_problem; is_impact;output", "localhost;host;localhost;1532451740;0;UP;HARD;False;False; Host assumed to be UP", "host_2;host;host_2;1532451988;1;DOWN;HARD;True;False;I am always Down" ], [ "type;host;name", "host_2;service;dummy_no_output;1532451981;0;OK;HARD;False;True; Service internal check result: 0", "host_2;service;dummy_warning;1532451960;4;UNREACHABLE;HARD;False;True; host_2-dummy_warning-1", "host_2;service;dummy_unreachable;1532451987;4;UNREACHABLE;HARD;False;True; host_2-dummy_unreachable-4", "host_2;service;dummy_random;1532451949;4;UNREACHABLE;HARD;False;True; Service internal check result: 2", "host_2;service;dummy_ok;1532452002;0;OK;HARD;False;True;host_2", "host_2;service;dummy_critical;1532451953;4;UNREACHABLE;HARD;False;True; host_2-dummy_critical-2", "host_2;service;dummy_unknown;1532451945;4;UNREACHABLE;HARD;False;True; host_2-dummy_unknown-3", "host_2;service;dummy_echo;1532451973;4;UNREACHABLE;HARD;False;True;" ] ], scheduler-master-2: [ [ "type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output", "host_0;host;host_0;1532451993;0;UP;HARD;False;False;I am always Up", "BR_host;host;BR_host;1532451991;0;UP;HARD;False;False;Host assumed to be UP" ], [ "type;host;name;last_check;state_id;state;state_type;is_problem;is_impact;output", "host_0;service;dummy_no_output;1532451970;0;OK;HARD;False;False; Service internal check result: 0", "host_0;service;dummy_unknown;1532451964;3;UNKNOWN;HARD;True;False; host_0-dummy_unknown-3", "host_0;service;dummy_random;1532451991;1;WARNING;HARD;True;False; Service internal check result: 1", "host_0;service;dummy_warning;1532451945;1;WARNING;HARD;True;False; host_0-dummy_warning-1", "host_0;service;dummy_unreachable;1532451986;4;UNREACHABLE;HARD;True;False; host_0-dummy_unreachable-4", "host_0;service;dummy_ok;1532452012;0;OK;HARD;False;False;host_0", "host_0;service;dummy_critical;1532451987;2;CRITICAL;HARD;True;False; host_0-dummy_critical-2", "host_0;service;dummy_echo;1532451963;0;OK;HARD;False;False;", "BR_host;service;dummy_critical;1532451970;2;CRITICAL;HARD;True;False; BR_host-dummy_critical-2", "BR_host;service;BR_Simple_And;1532451895;1;WARNING;HARD;True;True;", "BR_host;service;dummy_unreachable;1532451981;4;UNREACHABLE;HARD;True;False; BR_host-dummy_unreachable-4", "BR_host;service;dummy_no_output;1532451975;0;OK;HARD;False;False; Service internal check result: 0", "BR_host;service;dummy_unknown;1532451955;3;UNKNOWN;HARD;True;False; BR_host-dummy_unknown-3", "BR_host;service;dummy_echo;1532451981;0;OK;HARD;False;False;", "BR_host;service;dummy_warning;1532451972;1;WARNING;HARD;True;False; BR_host-dummy_warning-1", "BR_host;service;dummy_random;1532451976;4;UNREACHABLE;HARD;True;False; Service internal check result: 4", "BR_host;service;dummy_ok;1532451972;0;OK;HARD;False;False;BR_host" ] ], ... More information are available in the scheduler correponding API endpoint. :param o_type: searched object type :type o_type: str :param o_name: searched object name (or uuid) :type o_name: str :return: serialized object information :rtype: str """ if details is not False: details = bool(details) if raw is not False: raw = bool(raw) res = {} for scheduler_link in self.app.conf.schedulers: sched_res = scheduler_link.con.get('dump', {'o_name': o_name, 'details': '1' if details else '', 'raw': '1' if raw else ''}, wait=True) if isinstance(sched_res, dict) and \ '_status' in sched_res and sched_res['_status'] == 'ERR': continue res[scheduler_link.name] = sched_res return res
[ "def", "dump", "(", "self", ",", "o_name", "=", "None", ",", "details", "=", "False", ",", "raw", "=", "False", ")", ":", "if", "details", "is", "not", "False", ":", "details", "=", "bool", "(", "details", ")", "if", "raw", "is", "not", "False", ":", "raw", "=", "bool", "(", "raw", ")", "res", "=", "{", "}", "for", "scheduler_link", "in", "self", ".", "app", ".", "conf", ".", "schedulers", ":", "sched_res", "=", "scheduler_link", ".", "con", ".", "get", "(", "'dump'", ",", "{", "'o_name'", ":", "o_name", ",", "'details'", ":", "'1'", "if", "details", "else", "''", ",", "'raw'", ":", "'1'", "if", "raw", "else", "''", "}", ",", "wait", "=", "True", ")", "if", "isinstance", "(", "sched_res", ",", "dict", ")", "and", "'_status'", "in", "sched_res", "and", "sched_res", "[", "'_status'", "]", "==", "'ERR'", ":", "continue", "res", "[", "scheduler_link", ".", "name", "]", "=", "sched_res", "return", "res" ]
52.809091
27.809091
def internal_reset(self): """ internal state reset. used e.g. in unittests """ log.critical("PIA internal_reset()") self.empty_key_toggle = True self.current_input_char = None self.input_repead = 0
[ "def", "internal_reset", "(", "self", ")", ":", "log", ".", "critical", "(", "\"PIA internal_reset()\"", ")", "self", ".", "empty_key_toggle", "=", "True", "self", ".", "current_input_char", "=", "None", "self", ".", "input_repead", "=", "0" ]
28.111111
6.333333
def initQApplication(): """ Initializes the QtWidgets.QApplication instance. Creates one if it doesn't exist. Sets Argos specific attributes, such as the OrganizationName, so that the application persistent settings are read/written to the correct settings file/winreg. It is therefore important to call this function at startup. The ArgosApplication constructor does this. Returns the application. """ # PyQtGraph recommends raster graphics system for OS-X. if 'darwin' in sys.platform: graphicsSystem = "raster" # raster, native or opengl os.environ.setdefault('QT_GRAPHICSSYSTEM', graphicsSystem) logger.info("Setting QT_GRAPHICSSYSTEM to: {}".format(graphicsSystem)) app = QtWidgets.QApplication(sys.argv) initArgosApplicationSettings(app) return app
[ "def", "initQApplication", "(", ")", ":", "# PyQtGraph recommends raster graphics system for OS-X.", "if", "'darwin'", "in", "sys", ".", "platform", ":", "graphicsSystem", "=", "\"raster\"", "# raster, native or opengl", "os", ".", "environ", ".", "setdefault", "(", "'QT_GRAPHICSSYSTEM'", ",", "graphicsSystem", ")", "logger", ".", "info", "(", "\"Setting QT_GRAPHICSSYSTEM to: {}\"", ".", "format", "(", "graphicsSystem", ")", ")", "app", "=", "QtWidgets", ".", "QApplication", "(", "sys", ".", "argv", ")", "initArgosApplicationSettings", "(", "app", ")", "return", "app" ]
45.777778
25.111111
def packet_xml(url_encoded_ivorn=None): """ Returns the XML packet contents stored for a given IVORN. The required IVORN should be appended to the URL after ``/xml/`` in :ref:`URL-encoded <url-encoding>` form. """ # Handle Apache / Debug server difference... # Apache conf must include the setting:: # AllowEncodedSlashes NoDecode # otherwise urlencoded paths have # double-slashes ('//') replaced with single-slashes ('/'). # However, the werkzeug simple-server decodes these by default, # resulting in differing dev / production behaviour, which we handle here. ivorn = validate_ivorn(url_encoded_ivorn) xml = db_session.query(Voevent.xml).filter( Voevent.ivorn == ivorn ).scalar() r = make_response(xml) r.mimetype = 'text/xml' return r
[ "def", "packet_xml", "(", "url_encoded_ivorn", "=", "None", ")", ":", "# Handle Apache / Debug server difference...", "# Apache conf must include the setting::", "# AllowEncodedSlashes NoDecode", "# otherwise urlencoded paths have", "# double-slashes ('//') replaced with single-slashes ('/').", "# However, the werkzeug simple-server decodes these by default,", "# resulting in differing dev / production behaviour, which we handle here.", "ivorn", "=", "validate_ivorn", "(", "url_encoded_ivorn", ")", "xml", "=", "db_session", ".", "query", "(", "Voevent", ".", "xml", ")", ".", "filter", "(", "Voevent", ".", "ivorn", "==", "ivorn", ")", ".", "scalar", "(", ")", "r", "=", "make_response", "(", "xml", ")", "r", ".", "mimetype", "=", "'text/xml'", "return", "r" ]
36.454545
15.727273
def getEffort(self, edgeID, time): """getEffort(string, double) -> double Returns the effort value used for (re-)routing which is valid on the edge at the given time. """ self._connection._beginMessage(tc.CMD_GET_EDGE_VARIABLE, tc.VAR_EDGE_EFFORT, edgeID, 1 + 4) self._connection._string += struct.pack( "!Bi", tc.TYPE_INTEGER, time) return self._connection._checkResult(tc.CMD_GET_EDGE_VARIABLE, tc.VAR_EDGE_EFFORT, edgeID).readDouble()
[ "def", "getEffort", "(", "self", ",", "edgeID", ",", "time", ")", ":", "self", ".", "_connection", ".", "_beginMessage", "(", "tc", ".", "CMD_GET_EDGE_VARIABLE", ",", "tc", ".", "VAR_EDGE_EFFORT", ",", "edgeID", ",", "1", "+", "4", ")", "self", ".", "_connection", ".", "_string", "+=", "struct", ".", "pack", "(", "\"!Bi\"", ",", "tc", ".", "TYPE_INTEGER", ",", "time", ")", "return", "self", ".", "_connection", ".", "_checkResult", "(", "tc", ".", "CMD_GET_EDGE_VARIABLE", ",", "tc", ".", "VAR_EDGE_EFFORT", ",", "edgeID", ")", ".", "readDouble", "(", ")" ]
48.25
17.833333
def before_after_apply(self, before_fn, after_fn, leaf_fn=None): """Applies the functions to each node in a subtree using an traversal in which encountered twice: once right before its descendants, and once right after its last descendant """ stack = [self] while stack: node = stack.pop() if node.is_leaf: if leaf_fn: leaf_fn(node) while node.is_last_child_of_parent: node = node._parent if node: after_fn(node) else: break else: before_fn(node) stack.extend([i for i in reversed(node._children)])
[ "def", "before_after_apply", "(", "self", ",", "before_fn", ",", "after_fn", ",", "leaf_fn", "=", "None", ")", ":", "stack", "=", "[", "self", "]", "while", "stack", ":", "node", "=", "stack", ".", "pop", "(", ")", "if", "node", ".", "is_leaf", ":", "if", "leaf_fn", ":", "leaf_fn", "(", "node", ")", "while", "node", ".", "is_last_child_of_parent", ":", "node", "=", "node", ".", "_parent", "if", "node", ":", "after_fn", "(", "node", ")", "else", ":", "break", "else", ":", "before_fn", "(", "node", ")", "stack", ".", "extend", "(", "[", "i", "for", "i", "in", "reversed", "(", "node", ".", "_children", ")", "]", ")" ]
37.75
12.9
def sample(self, num_samples=1000, hmc_iters=20): """ Sample the (unfixed) model parameters. :param num_samples: the number of samples to draw (1000 by default) :type num_samples: int :param hmc_iters: the number of leap-frog iterations (20 by default) :type hmc_iters: int :return: the list of parameters samples with the size N x P (N - the number of samples, P - the number of parameters to sample) :rtype: numpy.ndarray """ params = np.empty((num_samples,self.p.size)) for i in range(num_samples): self.p[:] = np.random.multivariate_normal(np.zeros(self.p.size),self.M) H_old = self._computeH() theta_old = self.model.optimizer_array.copy() params[i] = self.model.unfixed_param_array #Matropolis self._update(hmc_iters) H_new = self._computeH() if H_old>H_new: k = 1. else: k = np.exp(H_old-H_new) if np.random.rand()<k: params[i] = self.model.unfixed_param_array else: self.model.optimizer_array = theta_old return params
[ "def", "sample", "(", "self", ",", "num_samples", "=", "1000", ",", "hmc_iters", "=", "20", ")", ":", "params", "=", "np", ".", "empty", "(", "(", "num_samples", ",", "self", ".", "p", ".", "size", ")", ")", "for", "i", "in", "range", "(", "num_samples", ")", ":", "self", ".", "p", "[", ":", "]", "=", "np", ".", "random", ".", "multivariate_normal", "(", "np", ".", "zeros", "(", "self", ".", "p", ".", "size", ")", ",", "self", ".", "M", ")", "H_old", "=", "self", ".", "_computeH", "(", ")", "theta_old", "=", "self", ".", "model", ".", "optimizer_array", ".", "copy", "(", ")", "params", "[", "i", "]", "=", "self", ".", "model", ".", "unfixed_param_array", "#Matropolis", "self", ".", "_update", "(", "hmc_iters", ")", "H_new", "=", "self", ".", "_computeH", "(", ")", "if", "H_old", ">", "H_new", ":", "k", "=", "1.", "else", ":", "k", "=", "np", ".", "exp", "(", "H_old", "-", "H_new", ")", "if", "np", ".", "random", ".", "rand", "(", ")", "<", "k", ":", "params", "[", "i", "]", "=", "self", ".", "model", ".", "unfixed_param_array", "else", ":", "self", ".", "model", ".", "optimizer_array", "=", "theta_old", "return", "params" ]
40
18.066667
def QWidget_factory(ui_file=None, *args, **kwargs): """ Defines a class factory creating `QWidget <http://doc.qt.nokia.com/qwidget.html>`_ classes using given ui file. :param ui_file: Ui file. :type ui_file: unicode :param \*args: Arguments. :type \*args: \* :param \*\*kwargs: Keywords arguments. :type \*\*kwargs: \*\* :return: QWidget class. :rtype: QWidget """ file = ui_file or DEFAULT_UI_FILE if not foundations.common.path_exists(file): raise foundations.exceptions.FileExistsError("{0} | '{1}' ui file doesn't exists!".format(__name__, file)) Form, Base = uic.loadUiType(file) class QWidget(Form, Base): """ Derives from :def:`QWidget_factory` class factory definition. """ def __init__(self, *args, **kwargs): """ Initializes the class. :param \*args: Arguments. :type \*args: \* :param \*\*kwargs: Keywords arguments. :type \*\*kwargs: \*\* """ LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__)) super(QWidget, self).__init__(*args, **kwargs) self.__ui_file = file self.__geometry = None self.setupUi(self) @property def ui_file(self): """ Property for **self.__ui_file** attribute. :return: self.__ui_file. :rtype: unicode """ return self.__ui_file @ui_file.setter @foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError) def ui_file(self, value): """ Setter for **self.__ui_file** attribute. :param value: Attribute value. :type value: unicode """ raise foundations.exceptions.ProgrammingError("{0} | '{1}' attribute is read only!".format( self.__class__.__name__, "ui_file")) @ui_file.deleter @foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError) def ui_file(self): """ Deleter for **self.__ui_file** attribute. """ raise foundations.exceptions.ProgrammingError("{0} | '{1}' attribute is not deletable!".format( self.__class__.__name__, "ui_file")) def show(self, setGeometry=True): """ Reimplements the :meth:`QWidget.show` method. :param setGeometry: Set geometry. :type setGeometry: bool """ if not setGeometry: super(QWidget, self).show() return wasHidden = not self.isVisible() if self.__geometry is None and wasHidden: center_widget_on_screen(self) super(QWidget, self).show() if self.__geometry is not None and wasHidden: self.restoreGeometry(self.__geometry) def closeEvent(self, event): """ Reimplements the :meth:`QWidget.closeEvent` method. :param event: QEvent. :type event: QEvent """ self.__geometry = self.saveGeometry() event.accept() return QWidget
[ "def", "QWidget_factory", "(", "ui_file", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "file", "=", "ui_file", "or", "DEFAULT_UI_FILE", "if", "not", "foundations", ".", "common", ".", "path_exists", "(", "file", ")", ":", "raise", "foundations", ".", "exceptions", ".", "FileExistsError", "(", "\"{0} | '{1}' ui file doesn't exists!\"", ".", "format", "(", "__name__", ",", "file", ")", ")", "Form", ",", "Base", "=", "uic", ".", "loadUiType", "(", "file", ")", "class", "QWidget", "(", "Form", ",", "Base", ")", ":", "\"\"\"\n Derives from :def:`QWidget_factory` class factory definition.\n \"\"\"", "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n Initializes the class.\n\n :param \\*args: Arguments.\n :type \\*args: \\*\n :param \\*\\*kwargs: Keywords arguments.\n :type \\*\\*kwargs: \\*\\*\n \"\"\"", "LOGGER", ".", "debug", "(", "\"> Initializing '{0}()' class.\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")", "super", "(", "QWidget", ",", "self", ")", ".", "__init__", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "__ui_file", "=", "file", "self", ".", "__geometry", "=", "None", "self", ".", "setupUi", "(", "self", ")", "@", "property", "def", "ui_file", "(", "self", ")", ":", "\"\"\"\n Property for **self.__ui_file** attribute.\n\n :return: self.__ui_file.\n :rtype: unicode\n \"\"\"", "return", "self", ".", "__ui_file", "@", "ui_file", ".", "setter", "@", "foundations", ".", "exceptions", ".", "handle_exceptions", "(", "foundations", ".", "exceptions", ".", "ProgrammingError", ")", "def", "ui_file", "(", "self", ",", "value", ")", ":", "\"\"\"\n Setter for **self.__ui_file** attribute.\n\n :param value: Attribute value.\n :type value: unicode\n \"\"\"", "raise", "foundations", ".", "exceptions", ".", "ProgrammingError", "(", "\"{0} | '{1}' attribute is read only!\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "\"ui_file\"", ")", ")", "@", "ui_file", ".", "deleter", "@", "foundations", ".", "exceptions", ".", "handle_exceptions", "(", "foundations", ".", "exceptions", ".", "ProgrammingError", ")", "def", "ui_file", "(", "self", ")", ":", "\"\"\"\n Deleter for **self.__ui_file** attribute.\n \"\"\"", "raise", "foundations", ".", "exceptions", ".", "ProgrammingError", "(", "\"{0} | '{1}' attribute is not deletable!\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "\"ui_file\"", ")", ")", "def", "show", "(", "self", ",", "setGeometry", "=", "True", ")", ":", "\"\"\"\n Reimplements the :meth:`QWidget.show` method.\n\n :param setGeometry: Set geometry.\n :type setGeometry: bool\n \"\"\"", "if", "not", "setGeometry", ":", "super", "(", "QWidget", ",", "self", ")", ".", "show", "(", ")", "return", "wasHidden", "=", "not", "self", ".", "isVisible", "(", ")", "if", "self", ".", "__geometry", "is", "None", "and", "wasHidden", ":", "center_widget_on_screen", "(", "self", ")", "super", "(", "QWidget", ",", "self", ")", ".", "show", "(", ")", "if", "self", ".", "__geometry", "is", "not", "None", "and", "wasHidden", ":", "self", ".", "restoreGeometry", "(", "self", ".", "__geometry", ")", "def", "closeEvent", "(", "self", ",", "event", ")", ":", "\"\"\"\n Reimplements the :meth:`QWidget.closeEvent` method.\n\n :param event: QEvent.\n :type event: QEvent\n \"\"\"", "self", ".", "__geometry", "=", "self", ".", "saveGeometry", "(", ")", "event", ".", "accept", "(", ")", "return", "QWidget" ]
27.834783
20.026087
def get_dict(self): """ Convert all rules to dict and return them. """ out = { property_name: getattr(self, property_name) for property_name in self._property_names } if "frequency" in out: out["frequency"] = int(out["frequency"]) return out
[ "def", "get_dict", "(", "self", ")", ":", "out", "=", "{", "property_name", ":", "getattr", "(", "self", ",", "property_name", ")", "for", "property_name", "in", "self", ".", "_property_names", "}", "if", "\"frequency\"", "in", "out", ":", "out", "[", "\"frequency\"", "]", "=", "int", "(", "out", "[", "\"frequency\"", "]", ")", "return", "out" ]
24.846154
18.384615
def _module_name_from_previous_frame(num_frames_back): """ Returns the module name associated with a frame `num_frames_back` in the call stack. This function adds 1 to account for itself, so `num_frames_back` should be given relative to the caller. """ frm = inspect.stack()[num_frames_back + 1] return inspect.getmodule(frm[0]).__name__
[ "def", "_module_name_from_previous_frame", "(", "num_frames_back", ")", ":", "frm", "=", "inspect", ".", "stack", "(", ")", "[", "num_frames_back", "+", "1", "]", "return", "inspect", ".", "getmodule", "(", "frm", "[", "0", "]", ")", ".", "__name__" ]
44.75
13
def load_rho(name, column): '''Load a datafile with rho structure like mag and phase ''' try: content = np.loadtxt(name, skiprows=1, usecols=([column])) except: raise ValueError('Given column to open does not exist.') return content
[ "def", "load_rho", "(", "name", ",", "column", ")", ":", "try", ":", "content", "=", "np", ".", "loadtxt", "(", "name", ",", "skiprows", "=", "1", ",", "usecols", "=", "(", "[", "column", "]", ")", ")", "except", ":", "raise", "ValueError", "(", "'Given column to open does not exist.'", ")", "return", "content" ]
29
26.555556
def choose_ancestral_states_map(tree, feature, states): """ Chooses node ancestral states based on their marginal probabilities using MAP method. :param tree: ete3.Tree, the tree of interest :param feature: str, character for which the ancestral states are to be chosen :param states: numpy.array of possible character states in order corresponding to the probabilities array :return: void, modified the get_personalized_feature_name(feature, ALLOWED_STATES) feature of each node to only contain the selected states. """ lh_feature = get_personalized_feature_name(feature, LH) allowed_state_feature = get_personalized_feature_name(feature, ALLOWED_STATES) _, state2array = get_state2allowed_states(states, False) for node in tree.traverse(): marginal_likelihoods = getattr(node, lh_feature) node.add_feature(allowed_state_feature, state2array[marginal_likelihoods.argmax()])
[ "def", "choose_ancestral_states_map", "(", "tree", ",", "feature", ",", "states", ")", ":", "lh_feature", "=", "get_personalized_feature_name", "(", "feature", ",", "LH", ")", "allowed_state_feature", "=", "get_personalized_feature_name", "(", "feature", ",", "ALLOWED_STATES", ")", "_", ",", "state2array", "=", "get_state2allowed_states", "(", "states", ",", "False", ")", "for", "node", "in", "tree", ".", "traverse", "(", ")", ":", "marginal_likelihoods", "=", "getattr", "(", "node", ",", "lh_feature", ")", "node", ".", "add_feature", "(", "allowed_state_feature", ",", "state2array", "[", "marginal_likelihoods", ".", "argmax", "(", ")", "]", ")" ]
54.588235
28.823529
def single_queue_send( transport: 'UDPTransport', recipient: Address, queue: Queue_T, queue_identifier: QueueIdentifier, event_stop: Event, event_healthy: Event, event_unhealthy: Event, message_retries: int, message_retry_timeout: int, message_retry_max_timeout: int, ): """ Handles a single message queue for `recipient`. Notes: - This task must be the only consumer of queue. - This task can be killed at any time, but the intended usage is to stop it with the event_stop. - If there are many queues for the same recipient, it is the caller's responsibility to not start them together to avoid congestion. - This task assumes the endpoint is never cleared after it's first known. If this assumption changes the code must be updated to handle unknown addresses. """ # A NotifyingQueue is required to implement cancelability, otherwise the # task cannot be stopped while the greenlet waits for an element to be # inserted in the queue. if not isinstance(queue, NotifyingQueue): raise ValueError('queue must be a NotifyingQueue.') # Reusing the event, clear must be carefully done data_or_stop = event_first_of( queue, event_stop, ) # Wait for the endpoint registration or to quit transport.log.debug( 'queue: waiting for node to become healthy', queue_identifier=queue_identifier, queue_size=len(queue), ) event_first_of( event_healthy, event_stop, ).wait() transport.log.debug( 'queue: processing queue', queue_identifier=queue_identifier, queue_size=len(queue), ) while True: data_or_stop.wait() if event_stop.is_set(): transport.log.debug( 'queue: stopping', queue_identifier=queue_identifier, queue_size=len(queue), ) return # The queue is not empty at this point, so this won't raise Empty. # This task being the only consumer is a requirement. (messagedata, message_id) = queue.peek(block=False) transport.log.debug( 'queue: sending message', recipient=pex(recipient), msgid=message_id, queue_identifier=queue_identifier, queue_size=len(queue), ) backoff = timeout_exponential_backoff( message_retries, message_retry_timeout, message_retry_max_timeout, ) acknowledged = retry_with_recovery( transport, messagedata, message_id, recipient, event_stop, event_healthy, event_unhealthy, backoff, ) if acknowledged: queue.get() # Checking the length of the queue does not trigger a # context-switch, so it's safe to assume the length of the queue # won't change under our feet and when a new item will be added the # event will be set again. if not queue: data_or_stop.clear() if event_stop.is_set(): return
[ "def", "single_queue_send", "(", "transport", ":", "'UDPTransport'", ",", "recipient", ":", "Address", ",", "queue", ":", "Queue_T", ",", "queue_identifier", ":", "QueueIdentifier", ",", "event_stop", ":", "Event", ",", "event_healthy", ":", "Event", ",", "event_unhealthy", ":", "Event", ",", "message_retries", ":", "int", ",", "message_retry_timeout", ":", "int", ",", "message_retry_max_timeout", ":", "int", ",", ")", ":", "# A NotifyingQueue is required to implement cancelability, otherwise the", "# task cannot be stopped while the greenlet waits for an element to be", "# inserted in the queue.", "if", "not", "isinstance", "(", "queue", ",", "NotifyingQueue", ")", ":", "raise", "ValueError", "(", "'queue must be a NotifyingQueue.'", ")", "# Reusing the event, clear must be carefully done", "data_or_stop", "=", "event_first_of", "(", "queue", ",", "event_stop", ",", ")", "# Wait for the endpoint registration or to quit", "transport", ".", "log", ".", "debug", "(", "'queue: waiting for node to become healthy'", ",", "queue_identifier", "=", "queue_identifier", ",", "queue_size", "=", "len", "(", "queue", ")", ",", ")", "event_first_of", "(", "event_healthy", ",", "event_stop", ",", ")", ".", "wait", "(", ")", "transport", ".", "log", ".", "debug", "(", "'queue: processing queue'", ",", "queue_identifier", "=", "queue_identifier", ",", "queue_size", "=", "len", "(", "queue", ")", ",", ")", "while", "True", ":", "data_or_stop", ".", "wait", "(", ")", "if", "event_stop", ".", "is_set", "(", ")", ":", "transport", ".", "log", ".", "debug", "(", "'queue: stopping'", ",", "queue_identifier", "=", "queue_identifier", ",", "queue_size", "=", "len", "(", "queue", ")", ",", ")", "return", "# The queue is not empty at this point, so this won't raise Empty.", "# This task being the only consumer is a requirement.", "(", "messagedata", ",", "message_id", ")", "=", "queue", ".", "peek", "(", "block", "=", "False", ")", "transport", ".", "log", ".", "debug", "(", "'queue: sending message'", ",", "recipient", "=", "pex", "(", "recipient", ")", ",", "msgid", "=", "message_id", ",", "queue_identifier", "=", "queue_identifier", ",", "queue_size", "=", "len", "(", "queue", ")", ",", ")", "backoff", "=", "timeout_exponential_backoff", "(", "message_retries", ",", "message_retry_timeout", ",", "message_retry_max_timeout", ",", ")", "acknowledged", "=", "retry_with_recovery", "(", "transport", ",", "messagedata", ",", "message_id", ",", "recipient", ",", "event_stop", ",", "event_healthy", ",", "event_unhealthy", ",", "backoff", ",", ")", "if", "acknowledged", ":", "queue", ".", "get", "(", ")", "# Checking the length of the queue does not trigger a", "# context-switch, so it's safe to assume the length of the queue", "# won't change under our feet and when a new item will be added the", "# event will be set again.", "if", "not", "queue", ":", "data_or_stop", ".", "clear", "(", ")", "if", "event_stop", ".", "is_set", "(", ")", ":", "return" ]
29.831776
19.682243
def search(self, query, _or=False, ignores=[]): """Search word from FM-index Params: <str> | <Sequential> query <bool> _or <list <str> > ignores Return: <list>SEARCH_RESULT(<int> document_id, <list <int> > counts <str> doc) """ if isinstance(query, str): dids = MapIntInt({}) self.fm.search(query, dids) dids = dids.asdict() result = [] for did in sorted(dids.keys()): doc = self.fm.get_document(did) if not any(ignore in doc for ignore in ignores): count = dids[did] result.append(SEARCH_RESULT(int(did), [count], doc)) return result search_results = [] for q in query: dids = MapIntInt({}) self.fm.search(q, dids) search_results.append(dids.asdict()) merged_dids = self._merge_search_result(search_results, _or) result = [] for did in merged_dids: doc = self.fm.get_document(did) if not any(ignore in doc for ignore in ignores): counts = map(lambda x: int(x.pop(did, 0)), search_results) result.append(SEARCH_RESULT(int(did), list(counts), doc)) return result
[ "def", "search", "(", "self", ",", "query", ",", "_or", "=", "False", ",", "ignores", "=", "[", "]", ")", ":", "if", "isinstance", "(", "query", ",", "str", ")", ":", "dids", "=", "MapIntInt", "(", "{", "}", ")", "self", ".", "fm", ".", "search", "(", "query", ",", "dids", ")", "dids", "=", "dids", ".", "asdict", "(", ")", "result", "=", "[", "]", "for", "did", "in", "sorted", "(", "dids", ".", "keys", "(", ")", ")", ":", "doc", "=", "self", ".", "fm", ".", "get_document", "(", "did", ")", "if", "not", "any", "(", "ignore", "in", "doc", "for", "ignore", "in", "ignores", ")", ":", "count", "=", "dids", "[", "did", "]", "result", ".", "append", "(", "SEARCH_RESULT", "(", "int", "(", "did", ")", ",", "[", "count", "]", ",", "doc", ")", ")", "return", "result", "search_results", "=", "[", "]", "for", "q", "in", "query", ":", "dids", "=", "MapIntInt", "(", "{", "}", ")", "self", ".", "fm", ".", "search", "(", "q", ",", "dids", ")", "search_results", ".", "append", "(", "dids", ".", "asdict", "(", ")", ")", "merged_dids", "=", "self", ".", "_merge_search_result", "(", "search_results", ",", "_or", ")", "result", "=", "[", "]", "for", "did", "in", "merged_dids", ":", "doc", "=", "self", ".", "fm", ".", "get_document", "(", "did", ")", "if", "not", "any", "(", "ignore", "in", "doc", "for", "ignore", "in", "ignores", ")", ":", "counts", "=", "map", "(", "lambda", "x", ":", "int", "(", "x", ".", "pop", "(", "did", ",", "0", ")", ")", ",", "search_results", ")", "result", ".", "append", "(", "SEARCH_RESULT", "(", "int", "(", "did", ")", ",", "list", "(", "counts", ")", ",", "doc", ")", ")", "return", "result" ]
37.861111
13.611111
def write_amendment(self, amendment_id, file_content, branch, author): """Given an amendment_id, temporary filename of content, branch and auth_info Deprecated but needed until we merge api local-dep to master... """ gh_user = branch.split('_amendment_')[0] msg = "Update Amendment '%s' via OpenTree API" % amendment_id return self.write_document(gh_user, amendment_id, file_content, branch, author, commit_msg=msg)
[ "def", "write_amendment", "(", "self", ",", "amendment_id", ",", "file_content", ",", "branch", ",", "author", ")", ":", "gh_user", "=", "branch", ".", "split", "(", "'_amendment_'", ")", "[", "0", "]", "msg", "=", "\"Update Amendment '%s' via OpenTree API\"", "%", "amendment_id", "return", "self", ".", "write_document", "(", "gh_user", ",", "amendment_id", ",", "file_content", ",", "branch", ",", "author", ",", "commit_msg", "=", "msg", ")" ]
45.615385
16.692308
def is_transition_metal(self): """ True if element is a transition metal. """ ns = list(range(21, 31)) ns.extend(list(range(39, 49))) ns.append(57) ns.extend(list(range(72, 81))) ns.append(89) ns.extend(list(range(104, 113))) return self.Z in ns
[ "def", "is_transition_metal", "(", "self", ")", ":", "ns", "=", "list", "(", "range", "(", "21", ",", "31", ")", ")", "ns", ".", "extend", "(", "list", "(", "range", "(", "39", ",", "49", ")", ")", ")", "ns", ".", "append", "(", "57", ")", "ns", ".", "extend", "(", "list", "(", "range", "(", "72", ",", "81", ")", ")", ")", "ns", ".", "append", "(", "89", ")", "ns", ".", "extend", "(", "list", "(", "range", "(", "104", ",", "113", ")", ")", ")", "return", "self", ".", "Z", "in", "ns" ]
28.636364
7.181818
def _report_volume_count(self): """Report volume count per state (dangling or not)""" m_func = FUNC_MAP[GAUGE][self.use_histogram] attached_volumes = self.docker_util.client.volumes(filters={'dangling': False}) dangling_volumes = self.docker_util.client.volumes(filters={'dangling': True}) attached_count = len(attached_volumes.get('Volumes', []) or []) dangling_count = len(dangling_volumes.get('Volumes', []) or []) m_func(self, 'docker.volume.count', attached_count, tags=['volume_state:attached']) m_func(self, 'docker.volume.count', dangling_count, tags=['volume_state:dangling'])
[ "def", "_report_volume_count", "(", "self", ")", ":", "m_func", "=", "FUNC_MAP", "[", "GAUGE", "]", "[", "self", ".", "use_histogram", "]", "attached_volumes", "=", "self", ".", "docker_util", ".", "client", ".", "volumes", "(", "filters", "=", "{", "'dangling'", ":", "False", "}", ")", "dangling_volumes", "=", "self", ".", "docker_util", ".", "client", ".", "volumes", "(", "filters", "=", "{", "'dangling'", ":", "True", "}", ")", "attached_count", "=", "len", "(", "attached_volumes", ".", "get", "(", "'Volumes'", ",", "[", "]", ")", "or", "[", "]", ")", "dangling_count", "=", "len", "(", "dangling_volumes", ".", "get", "(", "'Volumes'", ",", "[", "]", ")", "or", "[", "]", ")", "m_func", "(", "self", ",", "'docker.volume.count'", ",", "attached_count", ",", "tags", "=", "[", "'volume_state:attached'", "]", ")", "m_func", "(", "self", ",", "'docker.volume.count'", ",", "dangling_count", ",", "tags", "=", "[", "'volume_state:dangling'", "]", ")" ]
64.1
31.8
def cleanup(self): """ Clean up the saved state. """ if os.path.exists(self.path): os.remove(self.path)
[ "def", "cleanup", "(", "self", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "path", ")", ":", "os", ".", "remove", "(", "self", ".", "path", ")" ]
23.666667
6.666667
def split_model_idx(model:nn.Module, idxs:Collection[int])->ModuleList: "Split `model` according to the indexes in `idxs`." layers = flatten_model(model) if idxs[0] != 0: idxs = [0] + idxs if idxs[-1] != len(layers): idxs.append(len(layers)) return [nn.Sequential(*layers[i:j]) for i,j in zip(idxs[:-1],idxs[1:])]
[ "def", "split_model_idx", "(", "model", ":", "nn", ".", "Module", ",", "idxs", ":", "Collection", "[", "int", "]", ")", "->", "ModuleList", ":", "layers", "=", "flatten_model", "(", "model", ")", "if", "idxs", "[", "0", "]", "!=", "0", ":", "idxs", "=", "[", "0", "]", "+", "idxs", "if", "idxs", "[", "-", "1", "]", "!=", "len", "(", "layers", ")", ":", "idxs", ".", "append", "(", "len", "(", "layers", ")", ")", "return", "[", "nn", ".", "Sequential", "(", "*", "layers", "[", "i", ":", "j", "]", ")", "for", "i", ",", "j", "in", "zip", "(", "idxs", "[", ":", "-", "1", "]", ",", "idxs", "[", "1", ":", "]", ")", "]" ]
54.666667
17.666667
def append(self, row=None): """append(row=None) :param row: a list of values to apply to the newly append row or :obj:`None` :type row: [:obj:`object`] or :obj:`None` :returns: :obj:`Gtk.TreeIter` of the appended row :rtype: :obj:`Gtk.TreeIter` If `row` is :obj:`None` the appended row will be empty and to fill in values you need to call :obj:`Gtk.ListStore.set`\\() or :obj:`Gtk.ListStore.set_value`\\(). If `row` isn't :obj:`None` it has to be a list of values which will be used to fill the row . """ if row: return self._do_insert(-1, row) # gtk_list_store_insert() does not know about the "position == -1" # case, so use append() here else: return Gtk.ListStore.append(self)
[ "def", "append", "(", "self", ",", "row", "=", "None", ")", ":", "if", "row", ":", "return", "self", ".", "_do_insert", "(", "-", "1", ",", "row", ")", "# gtk_list_store_insert() does not know about the \"position == -1\"", "# case, so use append() here", "else", ":", "return", "Gtk", ".", "ListStore", ".", "append", "(", "self", ")" ]
35.086957
21.608696
def df_drop_duplicates(df, ignore_key_pattern="time"): """ Drop duplicates from dataframe ignore columns with keys containing defined pattern. :param df: :param noinfo_key_pattern: :return: """ keys_to_remove = list_contains(df.keys(), ignore_key_pattern) #key_tf = [key.find(noinfo_key_pattern) != -1 for key in df.keys()] # keys_to_remove # remove duplicates ks = copy.copy(list(df.keys())) for key in keys_to_remove: ks.remove(key) df = df.drop_duplicates(ks) return df
[ "def", "df_drop_duplicates", "(", "df", ",", "ignore_key_pattern", "=", "\"time\"", ")", ":", "keys_to_remove", "=", "list_contains", "(", "df", ".", "keys", "(", ")", ",", "ignore_key_pattern", ")", "#key_tf = [key.find(noinfo_key_pattern) != -1 for key in df.keys()]", "# keys_to_remove", "# remove duplicates", "ks", "=", "copy", ".", "copy", "(", "list", "(", "df", ".", "keys", "(", ")", ")", ")", "for", "key", "in", "keys_to_remove", ":", "ks", ".", "remove", "(", "key", ")", "df", "=", "df", ".", "drop_duplicates", "(", "ks", ")", "return", "df" ]
27.421053
21.421053
def upper_bollinger_band(data, period, std_mult=2.0): """ Upper Bollinger Band. Formula: u_bb = SMA(t) + STD(SMA(t-n:t)) * std_mult """ catch_errors.check_for_period_error(data, period) period = int(period) simple_ma = sma(data, period)[period-1:] upper_bb = [] for idx in range(len(data) - period + 1): std_dev = np.std(data[idx:idx + period]) upper_bb.append(simple_ma[idx] + std_dev * std_mult) upper_bb = fill_for_noncomputable_vals(data, upper_bb) return np.array(upper_bb)
[ "def", "upper_bollinger_band", "(", "data", ",", "period", ",", "std_mult", "=", "2.0", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "period", "=", "int", "(", "period", ")", "simple_ma", "=", "sma", "(", "data", ",", "period", ")", "[", "period", "-", "1", ":", "]", "upper_bb", "=", "[", "]", "for", "idx", "in", "range", "(", "len", "(", "data", ")", "-", "period", "+", "1", ")", ":", "std_dev", "=", "np", ".", "std", "(", "data", "[", "idx", ":", "idx", "+", "period", "]", ")", "upper_bb", ".", "append", "(", "simple_ma", "[", "idx", "]", "+", "std_dev", "*", "std_mult", ")", "upper_bb", "=", "fill_for_noncomputable_vals", "(", "data", ",", "upper_bb", ")", "return", "np", ".", "array", "(", "upper_bb", ")" ]
27.789474
17.894737
def do_video(self, args): """Video management command demonstrates multiple layers of sub-commands being handled by AutoCompleter""" func = getattr(args, 'func', None) if func is not None: # Call whatever subcommand function was selected func(self, args) else: # No subcommand was provided, so call help self.do_help('video')
[ "def", "do_video", "(", "self", ",", "args", ")", ":", "func", "=", "getattr", "(", "args", ",", "'func'", ",", "None", ")", "if", "func", "is", "not", "None", ":", "# Call whatever subcommand function was selected", "func", "(", "self", ",", "args", ")", "else", ":", "# No subcommand was provided, so call help", "self", ".", "do_help", "(", "'video'", ")" ]
44.111111
12.111111
def _port_action_vxlan(self, port, segment, func): """Verify configuration and then process event.""" # If the segment is None, just log a warning message and return. if segment is None: self._log_missing_segment() return device_id = port.get('device_id') mcast_group = segment.get(api.PHYSICAL_NETWORK) host_id = port.get(bc.portbindings.HOST_ID) vni = segment.get(api.SEGMENTATION_ID) if vni and device_id and mcast_group and host_id: func(vni, device_id, mcast_group, host_id) return vni else: fields = "vni " if not vni else "" fields += "device_id " if not device_id else "" fields += "mcast_group " if not mcast_group else "" fields += "host_id" if not host_id else "" raise excep.NexusMissingRequiredFields(fields=fields)
[ "def", "_port_action_vxlan", "(", "self", ",", "port", ",", "segment", ",", "func", ")", ":", "# If the segment is None, just log a warning message and return.", "if", "segment", "is", "None", ":", "self", ".", "_log_missing_segment", "(", ")", "return", "device_id", "=", "port", ".", "get", "(", "'device_id'", ")", "mcast_group", "=", "segment", ".", "get", "(", "api", ".", "PHYSICAL_NETWORK", ")", "host_id", "=", "port", ".", "get", "(", "bc", ".", "portbindings", ".", "HOST_ID", ")", "vni", "=", "segment", ".", "get", "(", "api", ".", "SEGMENTATION_ID", ")", "if", "vni", "and", "device_id", "and", "mcast_group", "and", "host_id", ":", "func", "(", "vni", ",", "device_id", ",", "mcast_group", ",", "host_id", ")", "return", "vni", "else", ":", "fields", "=", "\"vni \"", "if", "not", "vni", "else", "\"\"", "fields", "+=", "\"device_id \"", "if", "not", "device_id", "else", "\"\"", "fields", "+=", "\"mcast_group \"", "if", "not", "mcast_group", "else", "\"\"", "fields", "+=", "\"host_id\"", "if", "not", "host_id", "else", "\"\"", "raise", "excep", ".", "NexusMissingRequiredFields", "(", "fields", "=", "fields", ")" ]
40.454545
17.909091
def policy_exists(policyName, region=None, key=None, keyid=None, profile=None): ''' Given a policy name, check to see if the given policy exists. Returns True if the given policy exists and returns False if the given policy does not exist. CLI Example: .. code-block:: bash salt myminion boto_iot.policy_exists mypolicy ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.get_policy(policyName=policyName) return {'exists': True} except ClientError as e: err = __utils__['boto3.get_error'](e) if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException': return {'exists': False} return {'error': err}
[ "def", "policy_exists", "(", "policyName", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "conn", ".", "get_policy", "(", "policyName", "=", "policyName", ")", "return", "{", "'exists'", ":", "True", "}", "except", "ClientError", "as", "e", ":", "err", "=", "__utils__", "[", "'boto3.get_error'", "]", "(", "e", ")", "if", "e", ".", "response", ".", "get", "(", "'Error'", ",", "{", "}", ")", ".", "get", "(", "'Code'", ")", "==", "'ResourceNotFoundException'", ":", "return", "{", "'exists'", ":", "False", "}", "return", "{", "'error'", ":", "err", "}" ]
29.76
24.88
def btc_tx_serialize(_txobj): """ Given a transaction dict returned by btc_tx_deserialize, convert it back into a hex-encoded byte string. Derived from code written by Vitalik Buterin in pybitcointools (https://github.com/vbuterin/pybitcointools) """ # output buffer o = [] txobj = None if encoding.json_is_base(_txobj, 16): # txobj is built from hex strings already. deserialize them txobj = encoding.json_changebase(_txobj, lambda x: binascii.unhexlify(x)) else: txobj = copy.deepcopy(_txobj) # version o.append(encoding.encode(txobj["version"], 256, 4)[::-1]) # do we have any witness scripts? have_witness = False for inp in txobj['ins']: if inp.has_key('witness_script') and len(inp['witness_script']) > 0: have_witness = True break if have_witness: # add segwit marker o.append('\x00\x01') # number of inputs o.append(encoding.num_to_var_int(len(txobj["ins"]))) # all inputs for inp in txobj["ins"]: # input tx hash o.append(inp["outpoint"]["hash"][::-1]) # input tx outpoint o.append(encoding.encode(inp["outpoint"]["index"], 256, 4)[::-1]) # input scriptsig script = inp.get('script') if not script: script = bytes() scriptsig = encoding.num_to_var_int(len(script)) + script o.append(scriptsig) # sequence o.append(encoding.encode(inp.get("sequence", UINT_MAX - 1), 256, 4)[::-1]) # number of outputs o.append(encoding.num_to_var_int(len(txobj["outs"]))) # all outputs for out in txobj["outs"]: # value o.append(encoding.encode(out["value"], 256, 8)[::-1]) # scriptPubKey scriptpubkey = encoding.num_to_var_int(len(out['script'])) + out['script'] o.append(scriptpubkey) # add witnesses if have_witness: for inp in txobj['ins']: witness_script = inp.get('witness_script') if not witness_script: witness_script = '\x00' o.append(witness_script) # locktime o.append(encoding.encode(txobj["locktime"], 256, 4)[::-1]) # full string ret = ''.join( encoding.json_changebase(o, lambda x: encoding.safe_hexlify(x)) ) return ret
[ "def", "btc_tx_serialize", "(", "_txobj", ")", ":", "# output buffer", "o", "=", "[", "]", "txobj", "=", "None", "if", "encoding", ".", "json_is_base", "(", "_txobj", ",", "16", ")", ":", "# txobj is built from hex strings already. deserialize them ", "txobj", "=", "encoding", ".", "json_changebase", "(", "_txobj", ",", "lambda", "x", ":", "binascii", ".", "unhexlify", "(", "x", ")", ")", "else", ":", "txobj", "=", "copy", ".", "deepcopy", "(", "_txobj", ")", "# version", "o", ".", "append", "(", "encoding", ".", "encode", "(", "txobj", "[", "\"version\"", "]", ",", "256", ",", "4", ")", "[", ":", ":", "-", "1", "]", ")", "# do we have any witness scripts?", "have_witness", "=", "False", "for", "inp", "in", "txobj", "[", "'ins'", "]", ":", "if", "inp", ".", "has_key", "(", "'witness_script'", ")", "and", "len", "(", "inp", "[", "'witness_script'", "]", ")", ">", "0", ":", "have_witness", "=", "True", "break", "if", "have_witness", ":", "# add segwit marker ", "o", ".", "append", "(", "'\\x00\\x01'", ")", "# number of inputs", "o", ".", "append", "(", "encoding", ".", "num_to_var_int", "(", "len", "(", "txobj", "[", "\"ins\"", "]", ")", ")", ")", "# all inputs", "for", "inp", "in", "txobj", "[", "\"ins\"", "]", ":", "# input tx hash", "o", ".", "append", "(", "inp", "[", "\"outpoint\"", "]", "[", "\"hash\"", "]", "[", ":", ":", "-", "1", "]", ")", "# input tx outpoint", "o", ".", "append", "(", "encoding", ".", "encode", "(", "inp", "[", "\"outpoint\"", "]", "[", "\"index\"", "]", ",", "256", ",", "4", ")", "[", ":", ":", "-", "1", "]", ")", "# input scriptsig", "script", "=", "inp", ".", "get", "(", "'script'", ")", "if", "not", "script", ":", "script", "=", "bytes", "(", ")", "scriptsig", "=", "encoding", ".", "num_to_var_int", "(", "len", "(", "script", ")", ")", "+", "script", "o", ".", "append", "(", "scriptsig", ")", "# sequence", "o", ".", "append", "(", "encoding", ".", "encode", "(", "inp", ".", "get", "(", "\"sequence\"", ",", "UINT_MAX", "-", "1", ")", ",", "256", ",", "4", ")", "[", ":", ":", "-", "1", "]", ")", "# number of outputs", "o", ".", "append", "(", "encoding", ".", "num_to_var_int", "(", "len", "(", "txobj", "[", "\"outs\"", "]", ")", ")", ")", "# all outputs", "for", "out", "in", "txobj", "[", "\"outs\"", "]", ":", "# value", "o", ".", "append", "(", "encoding", ".", "encode", "(", "out", "[", "\"value\"", "]", ",", "256", ",", "8", ")", "[", ":", ":", "-", "1", "]", ")", "# scriptPubKey", "scriptpubkey", "=", "encoding", ".", "num_to_var_int", "(", "len", "(", "out", "[", "'script'", "]", ")", ")", "+", "out", "[", "'script'", "]", "o", ".", "append", "(", "scriptpubkey", ")", "# add witnesses ", "if", "have_witness", ":", "for", "inp", "in", "txobj", "[", "'ins'", "]", ":", "witness_script", "=", "inp", ".", "get", "(", "'witness_script'", ")", "if", "not", "witness_script", ":", "witness_script", "=", "'\\x00'", "o", ".", "append", "(", "witness_script", ")", "# locktime", "o", ".", "append", "(", "encoding", ".", "encode", "(", "txobj", "[", "\"locktime\"", "]", ",", "256", ",", "4", ")", "[", ":", ":", "-", "1", "]", ")", "# full string", "ret", "=", "''", ".", "join", "(", "encoding", ".", "json_changebase", "(", "o", ",", "lambda", "x", ":", "encoding", ".", "safe_hexlify", "(", "x", ")", ")", ")", "return", "ret" ]
28.4125
23.8875
def srun_nodes(self): """Get list of nodes where to execute the command """ count = self.execution.get('srun_nodes', 0) if isinstance(count, six.string_types): tag = count count = 0 elif isinstance(count, SEQUENCES): return count else: assert isinstance(count, int) tag = self.tag nodes = self._srun_nodes(tag, count) if 'srun_nodes' in self.execution: self.execution['srun_nodes'] = nodes self.execution['srun_nodes_count'] = len(nodes) return nodes
[ "def", "srun_nodes", "(", "self", ")", ":", "count", "=", "self", ".", "execution", ".", "get", "(", "'srun_nodes'", ",", "0", ")", "if", "isinstance", "(", "count", ",", "six", ".", "string_types", ")", ":", "tag", "=", "count", "count", "=", "0", "elif", "isinstance", "(", "count", ",", "SEQUENCES", ")", ":", "return", "count", "else", ":", "assert", "isinstance", "(", "count", ",", "int", ")", "tag", "=", "self", ".", "tag", "nodes", "=", "self", ".", "_srun_nodes", "(", "tag", ",", "count", ")", "if", "'srun_nodes'", "in", "self", ".", "execution", ":", "self", ".", "execution", "[", "'srun_nodes'", "]", "=", "nodes", "self", ".", "execution", "[", "'srun_nodes_count'", "]", "=", "len", "(", "nodes", ")", "return", "nodes" ]
34.705882
10.941176
def get_language_data(self, qid, lang, lang_data_type): """ get language data for specified qid :param qid: :param lang: language code :param lang_data_type: 'label', 'description' or 'aliases' :return: list of strings If nothing is found: If lang_data_type == label: returns [''] If lang_data_type == description: returns [''] If lang_data_type == aliases: returns [] """ self.init_language_data(lang, lang_data_type) current_lang_data = self.loaded_langs[lang][lang_data_type] all_lang_strings = current_lang_data.get(qid, []) if not all_lang_strings and lang_data_type in {'label', 'description'}: all_lang_strings = [''] return all_lang_strings
[ "def", "get_language_data", "(", "self", ",", "qid", ",", "lang", ",", "lang_data_type", ")", ":", "self", ".", "init_language_data", "(", "lang", ",", "lang_data_type", ")", "current_lang_data", "=", "self", ".", "loaded_langs", "[", "lang", "]", "[", "lang_data_type", "]", "all_lang_strings", "=", "current_lang_data", ".", "get", "(", "qid", ",", "[", "]", ")", "if", "not", "all_lang_strings", "and", "lang_data_type", "in", "{", "'label'", ",", "'description'", "}", ":", "all_lang_strings", "=", "[", "''", "]", "return", "all_lang_strings" ]
41.210526
14.894737
def load_graphs(): '''load graphs from mavgraphs.xml''' mestate.graphs = [] gfiles = ['mavgraphs.xml'] if 'HOME' in os.environ: for dirname, dirnames, filenames in os.walk(os.path.join(os.environ['HOME'], ".mavproxy")): for filename in filenames: if filename.lower().endswith('.xml'): gfiles.append(os.path.join(dirname, filename)) elif 'LOCALAPPDATA' in os.environ: for dirname, dirnames, filenames in os.walk(os.path.join(os.environ['LOCALAPPDATA'], "MAVProxy")): for filename in filenames: if filename.lower().endswith('.xml'): gfiles.append(os.path.join(dirname, filename)) for file in gfiles: if not os.path.exists(file): continue graphs = load_graph_xml(open(file).read(), file) if graphs: mestate.graphs.extend(graphs) mestate.console.writeln("Loaded %s" % file) # also load the built in graphs try: dlist = pkg_resources.resource_listdir("MAVProxy", "tools/graphs") for f in dlist: raw = pkg_resources.resource_stream("MAVProxy", "tools/graphs/%s" % f).read() graphs = load_graph_xml(raw, None) if graphs: mestate.graphs.extend(graphs) mestate.console.writeln("Loaded %s" % f) except Exception: #we're in a Windows exe, where pkg_resources doesn't work import pkgutil for f in ["ekf3Graphs.xml", "ekfGraphs.xml", "mavgraphs.xml", "mavgraphs2.xml"]: raw = pkgutil.get_data( 'MAVProxy', 'tools//graphs//' + f) graphs = load_graph_xml(raw, None) if graphs: mestate.graphs.extend(graphs) mestate.console.writeln("Loaded %s" % f) mestate.graphs = sorted(mestate.graphs, key=lambda g: g.name)
[ "def", "load_graphs", "(", ")", ":", "mestate", ".", "graphs", "=", "[", "]", "gfiles", "=", "[", "'mavgraphs.xml'", "]", "if", "'HOME'", "in", "os", ".", "environ", ":", "for", "dirname", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'HOME'", "]", ",", "\".mavproxy\"", ")", ")", ":", "for", "filename", "in", "filenames", ":", "if", "filename", ".", "lower", "(", ")", ".", "endswith", "(", "'.xml'", ")", ":", "gfiles", ".", "append", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "filename", ")", ")", "elif", "'LOCALAPPDATA'", "in", "os", ".", "environ", ":", "for", "dirname", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'LOCALAPPDATA'", "]", ",", "\"MAVProxy\"", ")", ")", ":", "for", "filename", "in", "filenames", ":", "if", "filename", ".", "lower", "(", ")", ".", "endswith", "(", "'.xml'", ")", ":", "gfiles", ".", "append", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "filename", ")", ")", "for", "file", "in", "gfiles", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "file", ")", ":", "continue", "graphs", "=", "load_graph_xml", "(", "open", "(", "file", ")", ".", "read", "(", ")", ",", "file", ")", "if", "graphs", ":", "mestate", ".", "graphs", ".", "extend", "(", "graphs", ")", "mestate", ".", "console", ".", "writeln", "(", "\"Loaded %s\"", "%", "file", ")", "# also load the built in graphs", "try", ":", "dlist", "=", "pkg_resources", ".", "resource_listdir", "(", "\"MAVProxy\"", ",", "\"tools/graphs\"", ")", "for", "f", "in", "dlist", ":", "raw", "=", "pkg_resources", ".", "resource_stream", "(", "\"MAVProxy\"", ",", "\"tools/graphs/%s\"", "%", "f", ")", ".", "read", "(", ")", "graphs", "=", "load_graph_xml", "(", "raw", ",", "None", ")", "if", "graphs", ":", "mestate", ".", "graphs", ".", "extend", "(", "graphs", ")", "mestate", ".", "console", ".", "writeln", "(", "\"Loaded %s\"", "%", "f", ")", "except", "Exception", ":", "#we're in a Windows exe, where pkg_resources doesn't work", "import", "pkgutil", "for", "f", "in", "[", "\"ekf3Graphs.xml\"", ",", "\"ekfGraphs.xml\"", ",", "\"mavgraphs.xml\"", ",", "\"mavgraphs2.xml\"", "]", ":", "raw", "=", "pkgutil", ".", "get_data", "(", "'MAVProxy'", ",", "'tools//graphs//'", "+", "f", ")", "graphs", "=", "load_graph_xml", "(", "raw", ",", "None", ")", "if", "graphs", ":", "mestate", ".", "graphs", ".", "extend", "(", "graphs", ")", "mestate", ".", "console", ".", "writeln", "(", "\"Loaded %s\"", "%", "f", ")", "mestate", ".", "graphs", "=", "sorted", "(", "mestate", ".", "graphs", ",", "key", "=", "lambda", "g", ":", "g", ".", "name", ")" ]
44.95122
19.439024
def initialize_fields(self): """ Convert all model fields to validator fields. Then call the parent so that overwrites can happen if necessary for manually defined fields. :return: None """ # # Pull all the "normal" fields off the model instance meta. for name, field in self.instance._meta.fields.items(): if getattr(field, 'primary_key', False): continue self._meta.fields[name] = self.convert_field(name, field) # Many-to-many fields are not stored in the meta fields dict. # Pull them directly off the class. for name in dir(type(self.instance)): field = getattr(type(self.instance), name, None) if isinstance(field, ManyToManyField): self._meta.fields[name] = self.convert_field(name, field) super().initialize_fields()
[ "def", "initialize_fields", "(", "self", ")", ":", "# # Pull all the \"normal\" fields off the model instance meta.", "for", "name", ",", "field", "in", "self", ".", "instance", ".", "_meta", ".", "fields", ".", "items", "(", ")", ":", "if", "getattr", "(", "field", ",", "'primary_key'", ",", "False", ")", ":", "continue", "self", ".", "_meta", ".", "fields", "[", "name", "]", "=", "self", ".", "convert_field", "(", "name", ",", "field", ")", "# Many-to-many fields are not stored in the meta fields dict.", "# Pull them directly off the class.", "for", "name", "in", "dir", "(", "type", "(", "self", ".", "instance", ")", ")", ":", "field", "=", "getattr", "(", "type", "(", "self", ".", "instance", ")", ",", "name", ",", "None", ")", "if", "isinstance", "(", "field", ",", "ManyToManyField", ")", ":", "self", ".", "_meta", ".", "fields", "[", "name", "]", "=", "self", ".", "convert_field", "(", "name", ",", "field", ")", "super", "(", ")", ".", "initialize_fields", "(", ")" ]
41.666667
20.809524
def plotConvergenceByColumnTopology(results, columnRange, featureRange, networkType, numTrials): """ Plots the convergence graph: iterations vs number of columns. Each curve shows the convergence for a given number of unique features. """ ######################################################################## # # Accumulate all the results per column in a convergence array. # # Convergence[f, c, t] = how long it took it to converge with f unique # features, c columns and topology t. convergence = numpy.zeros((max(featureRange), max(columnRange) + 1, len(networkType))) networkTypeNames = {} for i, topologyType in enumerate(networkType): if "Topology" in topologyType: networkTypeNames[i] = "Normal" else: networkTypeNames[i] = "Dense" for r in results: convergence[r["numFeatures"] - 1, r["numColumns"], networkType.index(r["networkType"])] += r["convergencePoint"] convergence /= numTrials # For each column, print convergence as fct of number of unique features for c in range(1, max(columnRange) + 1): for t in range(len(networkType)): print c, convergence[:, c, t] # Print everything anyway for debugging print "Average convergence array=", convergence ######################################################################## # # Create the plot. x-axis= plt.figure() plotPath = os.path.join("plots", "convergence_by_column_topology.pdf") # Plot each curve legendList = [] colormap = plt.get_cmap("jet") colorList = [colormap(x) for x in numpy.linspace(0., 1., len(featureRange)*len(networkType))] for i in range(len(featureRange)): for t in range(len(networkType)): f = featureRange[i] print columnRange print convergence[f-1,columnRange, t] legendList.append('Unique features={}, topology={}'.format(f, networkTypeNames[t])) plt.plot(columnRange, convergence[f-1,columnRange, t], color=colorList[i*len(networkType) + t]) # format plt.legend(legendList, loc="upper right") plt.xlabel("Number of columns") plt.xticks(columnRange) plt.yticks(range(0,int(convergence.max())+1)) plt.ylabel("Average number of touches") plt.title("Number of touches to recognize one object (multiple columns)") # save plt.savefig(plotPath) plt.close()
[ "def", "plotConvergenceByColumnTopology", "(", "results", ",", "columnRange", ",", "featureRange", ",", "networkType", ",", "numTrials", ")", ":", "########################################################################", "#", "# Accumulate all the results per column in a convergence array.", "#", "# Convergence[f, c, t] = how long it took it to converge with f unique", "# features, c columns and topology t.", "convergence", "=", "numpy", ".", "zeros", "(", "(", "max", "(", "featureRange", ")", ",", "max", "(", "columnRange", ")", "+", "1", ",", "len", "(", "networkType", ")", ")", ")", "networkTypeNames", "=", "{", "}", "for", "i", ",", "topologyType", "in", "enumerate", "(", "networkType", ")", ":", "if", "\"Topology\"", "in", "topologyType", ":", "networkTypeNames", "[", "i", "]", "=", "\"Normal\"", "else", ":", "networkTypeNames", "[", "i", "]", "=", "\"Dense\"", "for", "r", "in", "results", ":", "convergence", "[", "r", "[", "\"numFeatures\"", "]", "-", "1", ",", "r", "[", "\"numColumns\"", "]", ",", "networkType", ".", "index", "(", "r", "[", "\"networkType\"", "]", ")", "]", "+=", "r", "[", "\"convergencePoint\"", "]", "convergence", "/=", "numTrials", "# For each column, print convergence as fct of number of unique features", "for", "c", "in", "range", "(", "1", ",", "max", "(", "columnRange", ")", "+", "1", ")", ":", "for", "t", "in", "range", "(", "len", "(", "networkType", ")", ")", ":", "print", "c", ",", "convergence", "[", ":", ",", "c", ",", "t", "]", "# Print everything anyway for debugging", "print", "\"Average convergence array=\"", ",", "convergence", "########################################################################", "#", "# Create the plot. x-axis=", "plt", ".", "figure", "(", ")", "plotPath", "=", "os", ".", "path", ".", "join", "(", "\"plots\"", ",", "\"convergence_by_column_topology.pdf\"", ")", "# Plot each curve", "legendList", "=", "[", "]", "colormap", "=", "plt", ".", "get_cmap", "(", "\"jet\"", ")", "colorList", "=", "[", "colormap", "(", "x", ")", "for", "x", "in", "numpy", ".", "linspace", "(", "0.", ",", "1.", ",", "len", "(", "featureRange", ")", "*", "len", "(", "networkType", ")", ")", "]", "for", "i", "in", "range", "(", "len", "(", "featureRange", ")", ")", ":", "for", "t", "in", "range", "(", "len", "(", "networkType", ")", ")", ":", "f", "=", "featureRange", "[", "i", "]", "print", "columnRange", "print", "convergence", "[", "f", "-", "1", ",", "columnRange", ",", "t", "]", "legendList", ".", "append", "(", "'Unique features={}, topology={}'", ".", "format", "(", "f", ",", "networkTypeNames", "[", "t", "]", ")", ")", "plt", ".", "plot", "(", "columnRange", ",", "convergence", "[", "f", "-", "1", ",", "columnRange", ",", "t", "]", ",", "color", "=", "colorList", "[", "i", "*", "len", "(", "networkType", ")", "+", "t", "]", ")", "# format", "plt", ".", "legend", "(", "legendList", ",", "loc", "=", "\"upper right\"", ")", "plt", ".", "xlabel", "(", "\"Number of columns\"", ")", "plt", ".", "xticks", "(", "columnRange", ")", "plt", ".", "yticks", "(", "range", "(", "0", ",", "int", "(", "convergence", ".", "max", "(", ")", ")", "+", "1", ")", ")", "plt", ".", "ylabel", "(", "\"Average number of touches\"", ")", "plt", ".", "title", "(", "\"Number of touches to recognize one object (multiple columns)\"", ")", "# save", "plt", ".", "savefig", "(", "plotPath", ")", "plt", ".", "close", "(", ")" ]
34.738462
22.676923
def parse_line(self, line): """Parses a single line of a GPI. Return a tuple `(processed_line, entities)`. Typically there will be a single entity, but in some cases there may be none (invalid line) or multiple (disjunctive clause in annotation extensions) Note: most applications will only need to call this directly if they require fine-grained control of parsing. For most purposes, :method:`parse_file` can be used over the whole file Arguments --------- line : str A single tab-seperated line from a GPAD file """ vals = line.split("\t") if len(vals) < 7: self.report.error(line, assocparser.Report.WRONG_NUMBER_OF_COLUMNS, "") return line, [] if len(vals) < 10 and len(vals) >= 7: missing_columns = 10 - len(vals) vals += ["" for i in range(missing_columns)] [ db, db_object_id, db_object_symbol, db_object_name, db_object_synonym, db_object_type, taxon, parent_object_id, xrefs, properties ] = vals split_line = assocparser.SplitLine(line=line, values=vals, taxon=taxon) ## -- ## db + db_object_id. CARD=1 ## -- id = self._pair_to_id(db, db_object_id) if not self._validate_id(id, split_line, context=assocparser.Report): return line, [] ## -- ## db_object_synonym CARD=0..* ## -- synonyms = db_object_synonym.split("|") if db_object_synonym == "": synonyms = [] # TODO: DRY parents = parent_object_id.split("|") if parent_object_id == "": parents = [] else: parents = [self._normalize_id(x) for x in parents] for p in parents: self._validate_id(p, split_line, context=assocparser.Report) xref_ids = xrefs.split("|") if xrefs == "": xref_ids = [] obj = { 'id': id, 'label': db_object_symbol, 'full_name': db_object_name, 'synonyms': synonyms, 'type': db_object_type, 'parents': parents, 'xrefs': xref_ids, 'taxon': { 'id': self._taxon_id(taxon, split_line) } } return line, [obj]
[ "def", "parse_line", "(", "self", ",", "line", ")", ":", "vals", "=", "line", ".", "split", "(", "\"\\t\"", ")", "if", "len", "(", "vals", ")", "<", "7", ":", "self", ".", "report", ".", "error", "(", "line", ",", "assocparser", ".", "Report", ".", "WRONG_NUMBER_OF_COLUMNS", ",", "\"\"", ")", "return", "line", ",", "[", "]", "if", "len", "(", "vals", ")", "<", "10", "and", "len", "(", "vals", ")", ">=", "7", ":", "missing_columns", "=", "10", "-", "len", "(", "vals", ")", "vals", "+=", "[", "\"\"", "for", "i", "in", "range", "(", "missing_columns", ")", "]", "[", "db", ",", "db_object_id", ",", "db_object_symbol", ",", "db_object_name", ",", "db_object_synonym", ",", "db_object_type", ",", "taxon", ",", "parent_object_id", ",", "xrefs", ",", "properties", "]", "=", "vals", "split_line", "=", "assocparser", ".", "SplitLine", "(", "line", "=", "line", ",", "values", "=", "vals", ",", "taxon", "=", "taxon", ")", "## --", "## db + db_object_id. CARD=1", "## --", "id", "=", "self", ".", "_pair_to_id", "(", "db", ",", "db_object_id", ")", "if", "not", "self", ".", "_validate_id", "(", "id", ",", "split_line", ",", "context", "=", "assocparser", ".", "Report", ")", ":", "return", "line", ",", "[", "]", "## --", "## db_object_synonym CARD=0..*", "## --", "synonyms", "=", "db_object_synonym", ".", "split", "(", "\"|\"", ")", "if", "db_object_synonym", "==", "\"\"", ":", "synonyms", "=", "[", "]", "# TODO: DRY", "parents", "=", "parent_object_id", ".", "split", "(", "\"|\"", ")", "if", "parent_object_id", "==", "\"\"", ":", "parents", "=", "[", "]", "else", ":", "parents", "=", "[", "self", ".", "_normalize_id", "(", "x", ")", "for", "x", "in", "parents", "]", "for", "p", "in", "parents", ":", "self", ".", "_validate_id", "(", "p", ",", "split_line", ",", "context", "=", "assocparser", ".", "Report", ")", "xref_ids", "=", "xrefs", ".", "split", "(", "\"|\"", ")", "if", "xrefs", "==", "\"\"", ":", "xref_ids", "=", "[", "]", "obj", "=", "{", "'id'", ":", "id", ",", "'label'", ":", "db_object_symbol", ",", "'full_name'", ":", "db_object_name", ",", "'synonyms'", ":", "synonyms", ",", "'type'", ":", "db_object_type", ",", "'parents'", ":", "parents", ",", "'xrefs'", ":", "xref_ids", ",", "'taxon'", ":", "{", "'id'", ":", "self", ".", "_taxon_id", "(", "taxon", ",", "split_line", ")", "}", "}", "return", "line", ",", "[", "obj", "]" ]
29.243902
21.170732
def get_physical_port(self): """Returns the link aggregation object or the ethernet port object.""" obj = None if self.is_link_aggregation(): obj = UnityLinkAggregation.get(self._cli, self.get_id()) else: obj = UnityEthernetPort.get(self._cli, self.get_id()) return obj
[ "def", "get_physical_port", "(", "self", ")", ":", "obj", "=", "None", "if", "self", ".", "is_link_aggregation", "(", ")", ":", "obj", "=", "UnityLinkAggregation", ".", "get", "(", "self", ".", "_cli", ",", "self", ".", "get_id", "(", ")", ")", "else", ":", "obj", "=", "UnityEthernetPort", ".", "get", "(", "self", ".", "_cli", ",", "self", ".", "get_id", "(", ")", ")", "return", "obj" ]
40.75
17.25
def create_module(clear_target, target): """Creates a new template HFOS plugin module""" if os.path.exists(target): if clear_target: shutil.rmtree(target) else: log("Target exists! Use --clear to delete it first.", emitter='MANAGE') sys.exit(2) done = False info = None while not done: info = _ask_questionnaire() pprint(info) done = _ask('Is the above correct', default='y', data_type='bool') augmented_info = _augment_info(info) log("Constructing module %(plugin_name)s" % info) _construct_module(augmented_info, target)
[ "def", "create_module", "(", "clear_target", ",", "target", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "target", ")", ":", "if", "clear_target", ":", "shutil", ".", "rmtree", "(", "target", ")", "else", ":", "log", "(", "\"Target exists! Use --clear to delete it first.\"", ",", "emitter", "=", "'MANAGE'", ")", "sys", ".", "exit", "(", "2", ")", "done", "=", "False", "info", "=", "None", "while", "not", "done", ":", "info", "=", "_ask_questionnaire", "(", ")", "pprint", "(", "info", ")", "done", "=", "_ask", "(", "'Is the above correct'", ",", "default", "=", "'y'", ",", "data_type", "=", "'bool'", ")", "augmented_info", "=", "_augment_info", "(", "info", ")", "log", "(", "\"Constructing module %(plugin_name)s\"", "%", "info", ")", "_construct_module", "(", "augmented_info", ",", "target", ")" ]
27.347826
19.826087
def get_stack_frames(error_stack: bool = True) -> list: """ Returns a list of the current stack frames, which are pruned focus on the Cauldron code where the relevant information resides. """ cauldron_path = environ.paths.package() resources_path = environ.paths.resources() frames = ( list(traceback.extract_tb(sys.exc_info()[-1])) if error_stack else traceback.extract_stack() ).copy() def is_cauldron_code(test_filename: str) -> bool: if not test_filename or not test_filename.startswith(cauldron_path): return False if test_filename.startswith(resources_path): return False return True while len(frames) > 1 and is_cauldron_code(frames[0].filename): frames.pop(0) return frames
[ "def", "get_stack_frames", "(", "error_stack", ":", "bool", "=", "True", ")", "->", "list", ":", "cauldron_path", "=", "environ", ".", "paths", ".", "package", "(", ")", "resources_path", "=", "environ", ".", "paths", ".", "resources", "(", ")", "frames", "=", "(", "list", "(", "traceback", ".", "extract_tb", "(", "sys", ".", "exc_info", "(", ")", "[", "-", "1", "]", ")", ")", "if", "error_stack", "else", "traceback", ".", "extract_stack", "(", ")", ")", ".", "copy", "(", ")", "def", "is_cauldron_code", "(", "test_filename", ":", "str", ")", "->", "bool", ":", "if", "not", "test_filename", "or", "not", "test_filename", ".", "startswith", "(", "cauldron_path", ")", ":", "return", "False", "if", "test_filename", ".", "startswith", "(", "resources_path", ")", ":", "return", "False", "return", "True", "while", "len", "(", "frames", ")", ">", "1", "and", "is_cauldron_code", "(", "frames", "[", "0", "]", ".", "filename", ")", ":", "frames", ".", "pop", "(", "0", ")", "return", "frames" ]
29.074074
21.814815
def analog_sensor_power(cls, bus, operation): """ Method that turns on all of the analog sensor modules Includes all attached soil moisture sensors Note that all of the SensorCluster object should be attached in parallel and only 1 GPIO pin is available to toggle analog sensor power. The sensor power should be left on for at least 100ms in order to allow the sensors to stabilize before reading. Usage: SensorCluster.analog_sensor_power(bus,"high") OR SensorCluster.analog_sensor_power(bus,"low") This method should be removed if an off-board GPIO extender is used. """ # Set appropriate analog sensor power bit in GPIO mask # using the ControlCluster bank_mask to avoid overwriting any data reg_data = get_IO_reg(bus, 0x20, cls.power_bank) if operation == "on": reg_data = reg_data | 1 << cls.analog_power_pin elif operation == "off": reg_data = reg_data & (0b11111111 ^ (1 << cls.analog_power_pin)) else: raise SensorError( "Invalid command used while enabling analog sensors") # Send updated IO mask to output IO_expander_output(bus, 0x20, cls.power_bank, reg_data)
[ "def", "analog_sensor_power", "(", "cls", ",", "bus", ",", "operation", ")", ":", "# Set appropriate analog sensor power bit in GPIO mask\r", "# using the ControlCluster bank_mask to avoid overwriting any data\r", "reg_data", "=", "get_IO_reg", "(", "bus", ",", "0x20", ",", "cls", ".", "power_bank", ")", "if", "operation", "==", "\"on\"", ":", "reg_data", "=", "reg_data", "|", "1", "<<", "cls", ".", "analog_power_pin", "elif", "operation", "==", "\"off\"", ":", "reg_data", "=", "reg_data", "&", "(", "0b11111111", "^", "(", "1", "<<", "cls", ".", "analog_power_pin", ")", ")", "else", ":", "raise", "SensorError", "(", "\"Invalid command used while enabling analog sensors\"", ")", "# Send updated IO mask to output\r", "IO_expander_output", "(", "bus", ",", "0x20", ",", "cls", ".", "power_bank", ",", "reg_data", ")" ]
53.52
20.88
def is_valid(self, wordid) -> bool: """ Ensures </s> is only generated when the hypothesis is completed. :param wordid: The wordid to validate. :return: True if all constraints are already met or the word ID is not the EOS id. """ return self.finished() or wordid != self.eos_id or (self.num_needed() == 1 and self.eos_id in self.allowed())
[ "def", "is_valid", "(", "self", ",", "wordid", ")", "->", "bool", ":", "return", "self", ".", "finished", "(", ")", "or", "wordid", "!=", "self", ".", "eos_id", "or", "(", "self", ".", "num_needed", "(", ")", "==", "1", "and", "self", ".", "eos_id", "in", "self", ".", "allowed", "(", ")", ")" ]
47.75
26.25
def broadcast_variables(*variables): """Given any number of variables, return variables with matching dimensions and broadcast data. The data on the returned variables will be a view of the data on the corresponding original arrays, but dimensions will be reordered and inserted so that both broadcast arrays have the same dimensions. The new dimensions are sorted in order of appearance in the first variable's dimensions followed by the second variable's dimensions. """ dims_map = _unified_dims(variables) dims_tuple = tuple(dims_map) return tuple(var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables)
[ "def", "broadcast_variables", "(", "*", "variables", ")", ":", "dims_map", "=", "_unified_dims", "(", "variables", ")", "dims_tuple", "=", "tuple", "(", "dims_map", ")", "return", "tuple", "(", "var", ".", "set_dims", "(", "dims_map", ")", "if", "var", ".", "dims", "!=", "dims_tuple", "else", "var", "for", "var", "in", "variables", ")" ]
48.5
18.357143
def read_property_to_any(obj, propertyIdentifier, propertyArrayIndex=None): """Read the specified property of the object, with the optional array index, and cast the result into an Any object.""" if _debug: read_property_to_any._debug("read_property_to_any %s %r %r", obj, propertyIdentifier, propertyArrayIndex) # get the datatype datatype = obj.get_datatype(propertyIdentifier) if _debug: read_property_to_any._debug(" - datatype: %r", datatype) if datatype is None: raise ExecutionError(errorClass='property', errorCode='datatypeNotSupported') # get the value value = obj.ReadProperty(propertyIdentifier, propertyArrayIndex) if _debug: read_property_to_any._debug(" - value: %r", value) if value is None: raise ExecutionError(errorClass='property', errorCode='unknownProperty') # change atomic values into something encodeable if issubclass(datatype, Atomic): value = datatype(value) elif issubclass(datatype, Array) and (propertyArrayIndex is not None): if propertyArrayIndex == 0: value = Unsigned(value) elif issubclass(datatype.subtype, Atomic): value = datatype.subtype(value) elif not isinstance(value, datatype.subtype): raise TypeError("invalid result datatype, expecting %s and got %s" \ % (datatype.subtype.__name__, type(value).__name__)) elif not isinstance(value, datatype): raise TypeError("invalid result datatype, expecting %s and got %s" \ % (datatype.__name__, type(value).__name__)) if _debug: read_property_to_any._debug(" - encodeable value: %r", value) # encode the value result = Any() result.cast_in(value) if _debug: read_property_to_any._debug(" - result: %r", result) # return the object return result
[ "def", "read_property_to_any", "(", "obj", ",", "propertyIdentifier", ",", "propertyArrayIndex", "=", "None", ")", ":", "if", "_debug", ":", "read_property_to_any", ".", "_debug", "(", "\"read_property_to_any %s %r %r\"", ",", "obj", ",", "propertyIdentifier", ",", "propertyArrayIndex", ")", "# get the datatype", "datatype", "=", "obj", ".", "get_datatype", "(", "propertyIdentifier", ")", "if", "_debug", ":", "read_property_to_any", ".", "_debug", "(", "\" - datatype: %r\"", ",", "datatype", ")", "if", "datatype", "is", "None", ":", "raise", "ExecutionError", "(", "errorClass", "=", "'property'", ",", "errorCode", "=", "'datatypeNotSupported'", ")", "# get the value", "value", "=", "obj", ".", "ReadProperty", "(", "propertyIdentifier", ",", "propertyArrayIndex", ")", "if", "_debug", ":", "read_property_to_any", ".", "_debug", "(", "\" - value: %r\"", ",", "value", ")", "if", "value", "is", "None", ":", "raise", "ExecutionError", "(", "errorClass", "=", "'property'", ",", "errorCode", "=", "'unknownProperty'", ")", "# change atomic values into something encodeable", "if", "issubclass", "(", "datatype", ",", "Atomic", ")", ":", "value", "=", "datatype", "(", "value", ")", "elif", "issubclass", "(", "datatype", ",", "Array", ")", "and", "(", "propertyArrayIndex", "is", "not", "None", ")", ":", "if", "propertyArrayIndex", "==", "0", ":", "value", "=", "Unsigned", "(", "value", ")", "elif", "issubclass", "(", "datatype", ".", "subtype", ",", "Atomic", ")", ":", "value", "=", "datatype", ".", "subtype", "(", "value", ")", "elif", "not", "isinstance", "(", "value", ",", "datatype", ".", "subtype", ")", ":", "raise", "TypeError", "(", "\"invalid result datatype, expecting %s and got %s\"", "%", "(", "datatype", ".", "subtype", ".", "__name__", ",", "type", "(", "value", ")", ".", "__name__", ")", ")", "elif", "not", "isinstance", "(", "value", ",", "datatype", ")", ":", "raise", "TypeError", "(", "\"invalid result datatype, expecting %s and got %s\"", "%", "(", "datatype", ".", "__name__", ",", "type", "(", "value", ")", ".", "__name__", ")", ")", "if", "_debug", ":", "read_property_to_any", ".", "_debug", "(", "\" - encodeable value: %r\"", ",", "value", ")", "# encode the value", "result", "=", "Any", "(", ")", "result", ".", "cast_in", "(", "value", ")", "if", "_debug", ":", "read_property_to_any", ".", "_debug", "(", "\" - result: %r\"", ",", "result", ")", "# return the object", "return", "result" ]
45.425
23.875
def _parse_sequences(ilines, expect_qlen): """Parse the sequences in the current block. Sequence looks like: $3=227(209): >gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75 {()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}* """ while True: first = next(ilines) if first.startswith('_') and first.endswith('].'): # End of sequences & end of block break # ENH: handle wrapped lines? try: index, this_len, query_len = _parse_seq_preheader(first) except ValueError: logging.warn('Unparseable line (SKIPPING):\n%s', first) continue (rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description ) = _parse_seq_header(next(ilines)) try: headseq, molseq, tailseq = _parse_seq_body(next(ilines)) except ValueError: logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id) continue # Validation if expect_qlen != query_len: logging.warn("Query length in %s given as %d; expected %d", rec_id, query_len, expect_qlen) if not headseq and not headlen: headlen = 0 if not tailseq and not taillen: taillen = 0 if headseq: if headlen is None: headlen = len(headseq) elif headlen != len(headseq): logging.warn("Conflicting head flank lengths in %s: %d, %d", rec_id, headlen, len(headseq)) if tailseq: if taillen is None: taillen = len(tailseq) elif taillen != len(tailseq): logging.warn("Conflicting tail flank lengths in %s: %d, %d", rec_id, taillen, len(tailseq)) yield {'index': index, 'id': rec_id, 'description': description, 'dbxrefs': dbxrefs, 'phylum': phylum, 'taxchar': taxchar, 'head_len': headlen, 'tail_len': taillen, 'head_seq': headseq, 'tail_seq': tailseq, 'length': this_len, 'seq': molseq, }
[ "def", "_parse_sequences", "(", "ilines", ",", "expect_qlen", ")", ":", "while", "True", ":", "first", "=", "next", "(", "ilines", ")", "if", "first", ".", "startswith", "(", "'_'", ")", "and", "first", ".", "endswith", "(", "'].'", ")", ":", "# End of sequences & end of block", "break", "# ENH: handle wrapped lines?", "try", ":", "index", ",", "this_len", ",", "query_len", "=", "_parse_seq_preheader", "(", "first", ")", "except", "ValueError", ":", "logging", ".", "warn", "(", "'Unparseable line (SKIPPING):\\n%s'", ",", "first", ")", "continue", "(", "rec_id", ",", "dbxrefs", ",", "headlen", ",", "taillen", ",", "phylum", ",", "taxchar", ",", "description", ")", "=", "_parse_seq_header", "(", "next", "(", "ilines", ")", ")", "try", ":", "headseq", ",", "molseq", ",", "tailseq", "=", "_parse_seq_body", "(", "next", "(", "ilines", ")", ")", "except", "ValueError", ":", "logging", ".", "warn", "(", "'Unparseable sequence: %s -- SKIPPING'", ",", "rec_id", ")", "continue", "# Validation", "if", "expect_qlen", "!=", "query_len", ":", "logging", ".", "warn", "(", "\"Query length in %s given as %d; expected %d\"", ",", "rec_id", ",", "query_len", ",", "expect_qlen", ")", "if", "not", "headseq", "and", "not", "headlen", ":", "headlen", "=", "0", "if", "not", "tailseq", "and", "not", "taillen", ":", "taillen", "=", "0", "if", "headseq", ":", "if", "headlen", "is", "None", ":", "headlen", "=", "len", "(", "headseq", ")", "elif", "headlen", "!=", "len", "(", "headseq", ")", ":", "logging", ".", "warn", "(", "\"Conflicting head flank lengths in %s: %d, %d\"", ",", "rec_id", ",", "headlen", ",", "len", "(", "headseq", ")", ")", "if", "tailseq", ":", "if", "taillen", "is", "None", ":", "taillen", "=", "len", "(", "tailseq", ")", "elif", "taillen", "!=", "len", "(", "tailseq", ")", ":", "logging", ".", "warn", "(", "\"Conflicting tail flank lengths in %s: %d, %d\"", ",", "rec_id", ",", "taillen", ",", "len", "(", "tailseq", ")", ")", "yield", "{", "'index'", ":", "index", ",", "'id'", ":", "rec_id", ",", "'description'", ":", "description", ",", "'dbxrefs'", ":", "dbxrefs", ",", "'phylum'", ":", "phylum", ",", "'taxchar'", ":", "taxchar", ",", "'head_len'", ":", "headlen", ",", "'tail_len'", ":", "taillen", ",", "'head_seq'", ":", "headseq", ",", "'tail_seq'", ":", "tailseq", ",", "'length'", ":", "this_len", ",", "'seq'", ":", "molseq", ",", "}" ]
39.8125
21.921875
def logging_config(logpath=None, level=logging.DEBUG, console_level=logging.INFO, no_console=False): """ Config the logging. """ logger = logging.getLogger('nli') # Remove all the current handlers for handler in logger.handlers: logger.removeHandler(handler) logger.handlers = [] logger.propagate = False logger.setLevel(logging.DEBUG) formatter = logging.Formatter('%(filename)s:%(funcName)s: %(message)s') if logpath is not None: print('All Logs will be saved to {}'.format(logpath)) logfile = logging.FileHandler(logpath, mode='w') logfile.setLevel(level) logfile.setFormatter(formatter) logger.addHandler(logfile) if not no_console: # Initialze the console logging logconsole = logging.StreamHandler() logconsole.setLevel(console_level) logconsole.setFormatter(formatter) logger.addHandler(logconsole)
[ "def", "logging_config", "(", "logpath", "=", "None", ",", "level", "=", "logging", ".", "DEBUG", ",", "console_level", "=", "logging", ".", "INFO", ",", "no_console", "=", "False", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "'nli'", ")", "# Remove all the current handlers", "for", "handler", "in", "logger", ".", "handlers", ":", "logger", ".", "removeHandler", "(", "handler", ")", "logger", ".", "handlers", "=", "[", "]", "logger", ".", "propagate", "=", "False", "logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "formatter", "=", "logging", ".", "Formatter", "(", "'%(filename)s:%(funcName)s: %(message)s'", ")", "if", "logpath", "is", "not", "None", ":", "print", "(", "'All Logs will be saved to {}'", ".", "format", "(", "logpath", ")", ")", "logfile", "=", "logging", ".", "FileHandler", "(", "logpath", ",", "mode", "=", "'w'", ")", "logfile", ".", "setLevel", "(", "level", ")", "logfile", ".", "setFormatter", "(", "formatter", ")", "logger", ".", "addHandler", "(", "logfile", ")", "if", "not", "no_console", ":", "# Initialze the console logging", "logconsole", "=", "logging", ".", "StreamHandler", "(", ")", "logconsole", ".", "setLevel", "(", "console_level", ")", "logconsole", ".", "setFormatter", "(", "formatter", ")", "logger", ".", "addHandler", "(", "logconsole", ")" ]
32.4
11.133333
def restore(self): """Restore the saved value for the attribute of the object.""" if self.proxy_object is None: if self.getter: setattr(self.getter_class, self.attr_name, self.getter) elif self.is_local: setattr(self.orig_object, self.attr_name, self.orig_value) else: # Was not a local, safe to delete: delattr(self.orig_object, self.attr_name) else: setattr(sys.modules[self.orig_object.__module__], self.orig_object.__name__, self.orig_object)
[ "def", "restore", "(", "self", ")", ":", "if", "self", ".", "proxy_object", "is", "None", ":", "if", "self", ".", "getter", ":", "setattr", "(", "self", ".", "getter_class", ",", "self", ".", "attr_name", ",", "self", ".", "getter", ")", "elif", "self", ".", "is_local", ":", "setattr", "(", "self", ".", "orig_object", ",", "self", ".", "attr_name", ",", "self", ".", "orig_value", ")", "else", ":", "# Was not a local, safe to delete:", "delattr", "(", "self", ".", "orig_object", ",", "self", ".", "attr_name", ")", "else", ":", "setattr", "(", "sys", ".", "modules", "[", "self", ".", "orig_object", ".", "__module__", "]", ",", "self", ".", "orig_object", ".", "__name__", ",", "self", ".", "orig_object", ")" ]
43.5
15.642857
def make_name(text, delim=u'-', maxlength=50, checkused=None, counter=2): u""" Generate an ASCII name slug. If a checkused filter is provided, it will be called with the candidate. If it returns True, make_name will add counter numbers starting from 2 until a suitable candidate is found. :param string delim: Delimiter between words, default '-' :param int maxlength: Maximum length of name, default 50 :param checkused: Function to check if a generated name is available for use :param int counter: Starting position for name counter >>> make_name('This is a title') 'this-is-a-title' >>> make_name('Invalid URL/slug here') 'invalid-url-slug-here' >>> make_name('this.that') 'this-that' >>> make_name('this:that') 'this-that' >>> make_name("How 'bout this?") 'how-bout-this' >>> make_name(u"How’s that?") 'hows-that' >>> make_name(u'K & D') 'k-d' >>> make_name('billion+ pageviews') 'billion-pageviews' >>> make_name(u'हिन्दी slug!') 'hindii-slug' >>> make_name(u'Your webapps should talk not just in English, but in español, Kiswahili, 廣州話 and অসমীয়া too.', maxlength=250) u'your-webapps-should-talk-not-just-in-english-but-in-espanol-kiswahili-guang-zhou-hua-and-asmiiyaa-too' >>> make_name(u'__name__', delim=u'_') 'name' >>> make_name(u'how_about_this', delim=u'_') 'how_about_this' >>> make_name(u'and-that', delim=u'_') 'and_that' >>> make_name(u'Umlauts in Mötörhead') 'umlauts-in-motorhead' >>> make_name('Candidate', checkused=lambda c: c in ['candidate']) 'candidate2' >>> make_name('Candidate', checkused=lambda c: c in ['candidate'], counter=1) 'candidate1' >>> make_name('Candidate', checkused=lambda c: c in ['candidate', 'candidate1', 'candidate2'], counter=1) 'candidate3' >>> make_name('Long title, but snipped', maxlength=20) 'long-title-but-snipp' >>> len(make_name('Long title, but snipped', maxlength=20)) 20 >>> make_name('Long candidate', maxlength=10, checkused=lambda c: c in ['long-candi', 'long-cand1']) 'long-cand2' >>> make_name(u'Lǝnkǝran') 'lankaran' >>> make_name(u'[email protected]') 'example-example-com' >>> make_name('trailing-delimiter', maxlength=10) 'trailing-d' >>> make_name('trailing-delimiter', maxlength=9) 'trailing' >>> make_name('''test this ... newline''') 'test-this-newline' >>> make_name(u"testing an emoji😁") u'testing-an-emoji' >>> make_name('''testing\\t\\nmore\\r\\nslashes''') 'testing-more-slashes' >>> make_name('What if a HTML <tag/>') 'what-if-a-html-tag' >>> make_name('These are equivalent to \\x01 through \\x1A') 'these-are-equivalent-to-through' """ name = text.replace('@', delim) name = unidecode(name).replace('@', 'a') # We don't know why unidecode uses '@' for 'a'-like chars name = six.text_type(delim.join([_strip_re.sub('', x) for x in _punctuation_re.split(name.lower()) if x != ''])) if isinstance(text, six.text_type): # Unidecode returns str. Restore to a unicode string if original was unicode name = six.text_type(name) candidate = name[:maxlength] if candidate.endswith(delim): candidate = candidate[:-1] if checkused is None: return candidate existing = checkused(candidate) while existing: candidate = name[:maxlength - len(str(counter))] + str(counter) counter += 1 existing = checkused(candidate) return candidate
[ "def", "make_name", "(", "text", ",", "delim", "=", "u'-'", ",", "maxlength", "=", "50", ",", "checkused", "=", "None", ",", "counter", "=", "2", ")", ":", "name", "=", "text", ".", "replace", "(", "'@'", ",", "delim", ")", "name", "=", "unidecode", "(", "name", ")", ".", "replace", "(", "'@'", ",", "'a'", ")", "# We don't know why unidecode uses '@' for 'a'-like chars", "name", "=", "six", ".", "text_type", "(", "delim", ".", "join", "(", "[", "_strip_re", ".", "sub", "(", "''", ",", "x", ")", "for", "x", "in", "_punctuation_re", ".", "split", "(", "name", ".", "lower", "(", ")", ")", "if", "x", "!=", "''", "]", ")", ")", "if", "isinstance", "(", "text", ",", "six", ".", "text_type", ")", ":", "# Unidecode returns str. Restore to a unicode string if original was unicode", "name", "=", "six", ".", "text_type", "(", "name", ")", "candidate", "=", "name", "[", ":", "maxlength", "]", "if", "candidate", ".", "endswith", "(", "delim", ")", ":", "candidate", "=", "candidate", "[", ":", "-", "1", "]", "if", "checkused", "is", "None", ":", "return", "candidate", "existing", "=", "checkused", "(", "candidate", ")", "while", "existing", ":", "candidate", "=", "name", "[", ":", "maxlength", "-", "len", "(", "str", "(", "counter", ")", ")", "]", "+", "str", "(", "counter", ")", "counter", "+=", "1", "existing", "=", "checkused", "(", "candidate", ")", "return", "candidate" ]
39.625
20.795455
def validate_fillna_kwargs(value, method, validate_scalar_dict_value=True): """Validate the keyword arguments to 'fillna'. This checks that exactly one of 'value' and 'method' is specified. If 'method' is specified, this validates that it's a valid method. Parameters ---------- value, method : object The 'value' and 'method' keyword arguments for 'fillna'. validate_scalar_dict_value : bool, default True Whether to validate that 'value' is a scalar or dict. Specifically, validate that it is not a list or tuple. Returns ------- value, method : object """ from pandas.core.missing import clean_fill_method if value is None and method is None: raise ValueError("Must specify a fill 'value' or 'method'.") elif value is None and method is not None: method = clean_fill_method(method) elif value is not None and method is None: if validate_scalar_dict_value and isinstance(value, (list, tuple)): raise TypeError('"value" parameter must be a scalar or dict, but ' 'you passed a "{0}"'.format(type(value).__name__)) elif value is not None and method is not None: raise ValueError("Cannot specify both 'value' and 'method'.") return value, method
[ "def", "validate_fillna_kwargs", "(", "value", ",", "method", ",", "validate_scalar_dict_value", "=", "True", ")", ":", "from", "pandas", ".", "core", ".", "missing", "import", "clean_fill_method", "if", "value", "is", "None", "and", "method", "is", "None", ":", "raise", "ValueError", "(", "\"Must specify a fill 'value' or 'method'.\"", ")", "elif", "value", "is", "None", "and", "method", "is", "not", "None", ":", "method", "=", "clean_fill_method", "(", "method", ")", "elif", "value", "is", "not", "None", "and", "method", "is", "None", ":", "if", "validate_scalar_dict_value", "and", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "TypeError", "(", "'\"value\" parameter must be a scalar or dict, but '", "'you passed a \"{0}\"'", ".", "format", "(", "type", "(", "value", ")", ".", "__name__", ")", ")", "elif", "value", "is", "not", "None", "and", "method", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Cannot specify both 'value' and 'method'.\"", ")", "return", "value", ",", "method" ]
37.676471
23.882353
def check(self, request, secret): """Verifies whether or not the request bears an authorization appropriate and valid for this version of the signature. This verifies every element of the signature, including headers other than Authorization. Keyword arguments: request -- A request object which can be consumed by this API. secret -- The base64-encoded secret key for the HMAC authorization. """ if request.get_header("Authorization") == "": return False ah = self.parse_auth_headers(request.get_header("Authorization")) if "id" not in ah: return False if "signature" not in ah: return False return ah["signature"] == self.sign(request, ah, secret)
[ "def", "check", "(", "self", ",", "request", ",", "secret", ")", ":", "if", "request", ".", "get_header", "(", "\"Authorization\"", ")", "==", "\"\"", ":", "return", "False", "ah", "=", "self", ".", "parse_auth_headers", "(", "request", ".", "get_header", "(", "\"Authorization\"", ")", ")", "if", "\"id\"", "not", "in", "ah", ":", "return", "False", "if", "\"signature\"", "not", "in", "ah", ":", "return", "False", "return", "ah", "[", "\"signature\"", "]", "==", "self", ".", "sign", "(", "request", ",", "ah", ",", "secret", ")" ]
47.4375
20.125
def setup_observations(self): """ main entry point for setting up observations """ obs_methods = [self.setup_water_budget_obs,self.setup_hyd, self.setup_smp,self.setup_hob,self.setup_hds, self.setup_sfr_obs] obs_types = ["mflist water budget obs","hyd file", "external obs-sim smp files","hob","hds","sfr"] self.obs_dfs = {} for obs_method, obs_type in zip(obs_methods,obs_types): self.log("processing obs type {0}".format(obs_type)) obs_method() self.log("processing obs type {0}".format(obs_type))
[ "def", "setup_observations", "(", "self", ")", ":", "obs_methods", "=", "[", "self", ".", "setup_water_budget_obs", ",", "self", ".", "setup_hyd", ",", "self", ".", "setup_smp", ",", "self", ".", "setup_hob", ",", "self", ".", "setup_hds", ",", "self", ".", "setup_sfr_obs", "]", "obs_types", "=", "[", "\"mflist water budget obs\"", ",", "\"hyd file\"", ",", "\"external obs-sim smp files\"", ",", "\"hob\"", ",", "\"hds\"", ",", "\"sfr\"", "]", "self", ".", "obs_dfs", "=", "{", "}", "for", "obs_method", ",", "obs_type", "in", "zip", "(", "obs_methods", ",", "obs_types", ")", ":", "self", ".", "log", "(", "\"processing obs type {0}\"", ".", "format", "(", "obs_type", ")", ")", "obs_method", "(", ")", "self", ".", "log", "(", "\"processing obs type {0}\"", ".", "format", "(", "obs_type", ")", ")" ]
45.571429
18.214286
def element_neighbors(self): """Return a list with element numbers (zero indexed) of neighboring elements. Note that the elements are not sorted. No spacial orientation can be inferred from the order of neighbors. WARNING: This function is slow due to a nested loop. This would be a good starting point for further optimizations. In order to speed things up, we could search using the raw data, i.e., with CutMcK enabled sorting, and then restrict the loops to 2x the bandwidth (before - after). While not being returned, this function also sets the variable self.element_neighbors_edges, in which the common nodes with each neighbor are stored. Returns ------- neighbors : list a list (length equal to nr of elements) with neighboring elements Examples -------- """ if self.element_neighbors_data is not None: return self.element_neighbors_data max_nr_edges = self.header['element_infos'][0, 2] # initialize the neighbor array self.element_neighbors_data = [] self.element_neighbors_edges = [] # determine neighbors print('Looking for neighbors') for nr, element_nodes in enumerate(self.elements): # print('element {0}/{1}'.format(nr + 1, self.nr_of_elements)) # print(element_nodes) neighbors = [] neighbors_edges = [] # store the edges to this neighbor for nr1, el in enumerate(self.elements): # we look for elements that have two nodes in common with this # element intersection = np.intersect1d(element_nodes, el) if intersection.size == 2: neighbors.append(nr1) neighbors_edges.append(intersection) # stop if we reached the maximum number of possible edges # this saves us quite some loop iterations if len(neighbors) == max_nr_edges: break self.element_neighbors_data.append(neighbors) self.element_neighbors_edges.append(neighbors_edges) return self.element_neighbors_data
[ "def", "element_neighbors", "(", "self", ")", ":", "if", "self", ".", "element_neighbors_data", "is", "not", "None", ":", "return", "self", ".", "element_neighbors_data", "max_nr_edges", "=", "self", ".", "header", "[", "'element_infos'", "]", "[", "0", ",", "2", "]", "# initialize the neighbor array", "self", ".", "element_neighbors_data", "=", "[", "]", "self", ".", "element_neighbors_edges", "=", "[", "]", "# determine neighbors", "print", "(", "'Looking for neighbors'", ")", "for", "nr", ",", "element_nodes", "in", "enumerate", "(", "self", ".", "elements", ")", ":", "# print('element {0}/{1}'.format(nr + 1, self.nr_of_elements))", "# print(element_nodes)", "neighbors", "=", "[", "]", "neighbors_edges", "=", "[", "]", "# store the edges to this neighbor", "for", "nr1", ",", "el", "in", "enumerate", "(", "self", ".", "elements", ")", ":", "# we look for elements that have two nodes in common with this", "# element", "intersection", "=", "np", ".", "intersect1d", "(", "element_nodes", ",", "el", ")", "if", "intersection", ".", "size", "==", "2", ":", "neighbors", ".", "append", "(", "nr1", ")", "neighbors_edges", ".", "append", "(", "intersection", ")", "# stop if we reached the maximum number of possible edges", "# this saves us quite some loop iterations", "if", "len", "(", "neighbors", ")", "==", "max_nr_edges", ":", "break", "self", ".", "element_neighbors_data", ".", "append", "(", "neighbors", ")", "self", ".", "element_neighbors_edges", ".", "append", "(", "neighbors_edges", ")", "return", "self", ".", "element_neighbors_data" ]
40
21.428571
def generate_url(query, first, recent, country_code): """(str, str) -> str A url in the required format is generated. """ query = '+'.join(query.split()) url = 'http://www.bing.com/search?q=' + query + '&first=' + first if recent in ['h', 'd', 'w', 'm', 'y']: # A True/False would be enough. This is just to maintain consistancy with google. url = url + '&filters=ex1%3a%22ez1%22' if country_code is not None: url += '&cc=' + country_code return url
[ "def", "generate_url", "(", "query", ",", "first", ",", "recent", ",", "country_code", ")", ":", "query", "=", "'+'", ".", "join", "(", "query", ".", "split", "(", ")", ")", "url", "=", "'http://www.bing.com/search?q='", "+", "query", "+", "'&first='", "+", "first", "if", "recent", "in", "[", "'h'", ",", "'d'", ",", "'w'", ",", "'m'", ",", "'y'", "]", ":", "# A True/False would be enough. This is just to maintain consistancy with google.", "url", "=", "url", "+", "'&filters=ex1%3a%22ez1%22'", "if", "country_code", "is", "not", "None", ":", "url", "+=", "'&cc='", "+", "country_code", "return", "url" ]
44.272727
16.545455
def create_table(group, name, dtype, **attributes): """Create a new array dataset under group with compound datatype and maxshape=(None,)""" dset = group.create_dataset( name, shape=(0,), dtype=dtype, maxshape=(None,)) set_attributes(dset, **attributes) return dset
[ "def", "create_table", "(", "group", ",", "name", ",", "dtype", ",", "*", "*", "attributes", ")", ":", "dset", "=", "group", ".", "create_dataset", "(", "name", ",", "shape", "=", "(", "0", ",", ")", ",", "dtype", "=", "dtype", ",", "maxshape", "=", "(", "None", ",", ")", ")", "set_attributes", "(", "dset", ",", "*", "*", "attributes", ")", "return", "dset" ]
47.333333
10.333333
def decode_varint(f, max_bytes=4): """Decode variable integer using algorithm similar to that described in MQTT Version 3.1.1 line 297. Parameters ---------- f: file Object with a read method. max_bytes: int or None If a varint cannot be constructed using `max_bytes` or fewer from f then raises a `DecodeError`. If None then there is no maximum number of bytes. Raises ------- DecodeError When length is greater than max_bytes. UnderflowDecodeError When file ends before enough bytes can be read to construct the varint. Returns ------- int Number of bytes consumed. int Value extracted from `f`. """ num_bytes_consumed = 0 value = 0 m = 1 while True: buf = f.read(1) if len(buf) == 0: raise UnderflowDecodeError() (u8,) = FIELD_U8.unpack(buf) value += (u8 & 0x7f) * m m *= 0x80 num_bytes_consumed += 1 if u8 & 0x80 == 0: # No further bytes break elif max_bytes is not None and num_bytes_consumed >= max_bytes: raise DecodeError('Variable integer contained more than maximum bytes ({}).'.format(max_bytes)) return num_bytes_consumed, value
[ "def", "decode_varint", "(", "f", ",", "max_bytes", "=", "4", ")", ":", "num_bytes_consumed", "=", "0", "value", "=", "0", "m", "=", "1", "while", "True", ":", "buf", "=", "f", ".", "read", "(", "1", ")", "if", "len", "(", "buf", ")", "==", "0", ":", "raise", "UnderflowDecodeError", "(", ")", "(", "u8", ",", ")", "=", "FIELD_U8", ".", "unpack", "(", "buf", ")", "value", "+=", "(", "u8", "&", "0x7f", ")", "*", "m", "m", "*=", "0x80", "num_bytes_consumed", "+=", "1", "if", "u8", "&", "0x80", "==", "0", ":", "# No further bytes", "break", "elif", "max_bytes", "is", "not", "None", "and", "num_bytes_consumed", ">=", "max_bytes", ":", "raise", "DecodeError", "(", "'Variable integer contained more than maximum bytes ({}).'", ".", "format", "(", "max_bytes", ")", ")", "return", "num_bytes_consumed", ",", "value" ]
24.784314
22.72549
def log_request(self, handler: RequestHandler) -> None: """Writes a completed HTTP request to the logs. By default writes to the python root logger. To change this behavior either subclass Application and override this method, or pass a function in the application settings dictionary as ``log_function``. """ if "log_function" in self.settings: self.settings["log_function"](handler) return if handler.get_status() < 400: log_method = access_log.info elif handler.get_status() < 500: log_method = access_log.warning else: log_method = access_log.error request_time = 1000.0 * handler.request.request_time() log_method( "%d %s %.2fms", handler.get_status(), handler._request_summary(), request_time, )
[ "def", "log_request", "(", "self", ",", "handler", ":", "RequestHandler", ")", "->", "None", ":", "if", "\"log_function\"", "in", "self", ".", "settings", ":", "self", ".", "settings", "[", "\"log_function\"", "]", "(", "handler", ")", "return", "if", "handler", ".", "get_status", "(", ")", "<", "400", ":", "log_method", "=", "access_log", ".", "info", "elif", "handler", ".", "get_status", "(", ")", "<", "500", ":", "log_method", "=", "access_log", ".", "warning", "else", ":", "log_method", "=", "access_log", ".", "error", "request_time", "=", "1000.0", "*", "handler", ".", "request", ".", "request_time", "(", ")", "log_method", "(", "\"%d %s %.2fms\"", ",", "handler", ".", "get_status", "(", ")", ",", "handler", ".", "_request_summary", "(", ")", ",", "request_time", ",", ")" ]
37.166667
13.916667
def decode(model, tokens, start_token, end_token, pad_token, max_len=10000, max_repeat=10, max_repeat_block=10): """Decode with the given model and input tokens. :param model: The trained model. :param tokens: The input tokens of encoder. :param start_token: The token that represents the start of a sentence. :param end_token: The token that represents the end of a sentence. :param pad_token: The token that represents padding. :param max_len: Maximum length of decoded list. :param max_repeat: Maximum number of repeating blocks. :param max_repeat_block: Maximum length of the repeating block. :return: Decoded tokens. """ is_single = not isinstance(tokens[0], list) if is_single: tokens = [tokens] batch_size = len(tokens) decoder_inputs = [[start_token] for _ in range(batch_size)] outputs = [None for _ in range(batch_size)] output_len = 1 while len(list(filter(lambda x: x is None, outputs))) > 0: output_len += 1 batch_inputs, batch_outputs = [], [] max_input_len = 0 index_map = {} for i in range(batch_size): if outputs[i] is None: index_map[len(batch_inputs)] = i batch_inputs.append(tokens[i][:]) batch_outputs.append(decoder_inputs[i]) max_input_len = max(max_input_len, len(tokens[i])) for i in range(len(batch_inputs)): batch_inputs[i] += [pad_token] * (max_input_len - len(batch_inputs[i])) predicts = model.predict([np.asarray(batch_inputs), np.asarray(batch_outputs)]) for i in range(len(predicts)): last_token = np.argmax(predicts[i][-1]) decoder_inputs[index_map[i]].append(last_token) if last_token == end_token or\ (max_len is not None and output_len >= max_len) or\ _get_max_suffix_repeat_times(decoder_inputs, max_repeat * max_repeat_block) >= max_repeat: outputs[index_map[i]] = decoder_inputs[index_map[i]] if is_single: outputs = outputs[0] return outputs
[ "def", "decode", "(", "model", ",", "tokens", ",", "start_token", ",", "end_token", ",", "pad_token", ",", "max_len", "=", "10000", ",", "max_repeat", "=", "10", ",", "max_repeat_block", "=", "10", ")", ":", "is_single", "=", "not", "isinstance", "(", "tokens", "[", "0", "]", ",", "list", ")", "if", "is_single", ":", "tokens", "=", "[", "tokens", "]", "batch_size", "=", "len", "(", "tokens", ")", "decoder_inputs", "=", "[", "[", "start_token", "]", "for", "_", "in", "range", "(", "batch_size", ")", "]", "outputs", "=", "[", "None", "for", "_", "in", "range", "(", "batch_size", ")", "]", "output_len", "=", "1", "while", "len", "(", "list", "(", "filter", "(", "lambda", "x", ":", "x", "is", "None", ",", "outputs", ")", ")", ")", ">", "0", ":", "output_len", "+=", "1", "batch_inputs", ",", "batch_outputs", "=", "[", "]", ",", "[", "]", "max_input_len", "=", "0", "index_map", "=", "{", "}", "for", "i", "in", "range", "(", "batch_size", ")", ":", "if", "outputs", "[", "i", "]", "is", "None", ":", "index_map", "[", "len", "(", "batch_inputs", ")", "]", "=", "i", "batch_inputs", ".", "append", "(", "tokens", "[", "i", "]", "[", ":", "]", ")", "batch_outputs", ".", "append", "(", "decoder_inputs", "[", "i", "]", ")", "max_input_len", "=", "max", "(", "max_input_len", ",", "len", "(", "tokens", "[", "i", "]", ")", ")", "for", "i", "in", "range", "(", "len", "(", "batch_inputs", ")", ")", ":", "batch_inputs", "[", "i", "]", "+=", "[", "pad_token", "]", "*", "(", "max_input_len", "-", "len", "(", "batch_inputs", "[", "i", "]", ")", ")", "predicts", "=", "model", ".", "predict", "(", "[", "np", ".", "asarray", "(", "batch_inputs", ")", ",", "np", ".", "asarray", "(", "batch_outputs", ")", "]", ")", "for", "i", "in", "range", "(", "len", "(", "predicts", ")", ")", ":", "last_token", "=", "np", ".", "argmax", "(", "predicts", "[", "i", "]", "[", "-", "1", "]", ")", "decoder_inputs", "[", "index_map", "[", "i", "]", "]", ".", "append", "(", "last_token", ")", "if", "last_token", "==", "end_token", "or", "(", "max_len", "is", "not", "None", "and", "output_len", ">=", "max_len", ")", "or", "_get_max_suffix_repeat_times", "(", "decoder_inputs", ",", "max_repeat", "*", "max_repeat_block", ")", ">=", "max_repeat", ":", "outputs", "[", "index_map", "[", "i", "]", "]", "=", "decoder_inputs", "[", "index_map", "[", "i", "]", "]", "if", "is_single", ":", "outputs", "=", "outputs", "[", "0", "]", "return", "outputs" ]
47.272727
19.022727
def find_python(): """Search for Python automatically""" python = ( _state.get("pythonExecutable") or # Support for multiple executables. next(( exe for exe in os.getenv("PYBLISH_QML_PYTHON_EXECUTABLE", "").split(os.pathsep) if os.path.isfile(exe)), None ) or # Search PATH for executables. which("python") or which("python3") ) if not python or not os.path.isfile(python): raise ValueError("Could not locate Python executable.") return python
[ "def", "find_python", "(", ")", ":", "python", "=", "(", "_state", ".", "get", "(", "\"pythonExecutable\"", ")", "or", "# Support for multiple executables.", "next", "(", "(", "exe", "for", "exe", "in", "os", ".", "getenv", "(", "\"PYBLISH_QML_PYTHON_EXECUTABLE\"", ",", "\"\"", ")", ".", "split", "(", "os", ".", "pathsep", ")", "if", "os", ".", "path", ".", "isfile", "(", "exe", ")", ")", ",", "None", ")", "or", "# Search PATH for executables.", "which", "(", "\"python\"", ")", "or", "which", "(", "\"python3\"", ")", ")", "if", "not", "python", "or", "not", "os", ".", "path", ".", "isfile", "(", "python", ")", ":", "raise", "ValueError", "(", "\"Could not locate Python executable.\"", ")", "return", "python" ]
26.047619
20.857143
def parse_bewit(bewit): """ Returns a `bewittuple` representing the parts of an encoded bewit string. This has the following named attributes: (id, expiration, mac, ext) :param bewit: A base64 encoded bewit string :type bewit: str """ decoded_bewit = b64decode(bewit).decode('ascii') bewit_parts = decoded_bewit.split("\\") if len(bewit_parts) != 4: raise InvalidBewit('Expected 4 parts to bewit: %s' % decoded_bewit) return bewittuple(*bewit_parts)
[ "def", "parse_bewit", "(", "bewit", ")", ":", "decoded_bewit", "=", "b64decode", "(", "bewit", ")", ".", "decode", "(", "'ascii'", ")", "bewit_parts", "=", "decoded_bewit", ".", "split", "(", "\"\\\\\"", ")", "if", "len", "(", "bewit_parts", ")", "!=", "4", ":", "raise", "InvalidBewit", "(", "'Expected 4 parts to bewit: %s'", "%", "decoded_bewit", ")", "return", "bewittuple", "(", "*", "bewit_parts", ")" ]
33.333333
14.4
def _HasOOOWrite(self, path): """Returns whether the path has had an out-of-order write.""" # Check the sizes of each path before the current one. size = tf.io.gfile.stat(path).length old_size = self._finalized_sizes.get(path, None) if size != old_size: if old_size is None: logger.error('File %s created after file %s even though it\'s ' 'lexicographically earlier', path, self._path) else: logger.error('File %s updated even though the current file is %s', path, self._path) return True else: return False
[ "def", "_HasOOOWrite", "(", "self", ",", "path", ")", ":", "# Check the sizes of each path before the current one.", "size", "=", "tf", ".", "io", ".", "gfile", ".", "stat", "(", "path", ")", ".", "length", "old_size", "=", "self", ".", "_finalized_sizes", ".", "get", "(", "path", ",", "None", ")", "if", "size", "!=", "old_size", ":", "if", "old_size", "is", "None", ":", "logger", ".", "error", "(", "'File %s created after file %s even though it\\'s '", "'lexicographically earlier'", ",", "path", ",", "self", ".", "_path", ")", "else", ":", "logger", ".", "error", "(", "'File %s updated even though the current file is %s'", ",", "path", ",", "self", ".", "_path", ")", "return", "True", "else", ":", "return", "False" ]
40.466667
18.266667
def create_config(config_path="scriptworker.yaml"): """Create a config from DEFAULT_CONFIG, arguments, and config file. Then validate it and freeze it. Args: config_path (str, optional): the path to the config file. Defaults to "scriptworker.yaml" Returns: tuple: (config frozendict, credentials dict) Raises: SystemExit: on failure """ if not os.path.exists(config_path): print("{} doesn't exist! Exiting...".format(config_path), file=sys.stderr) sys.exit(1) with open(config_path, "r", encoding="utf-8") as fh: secrets = safe_load(fh) config = dict(deepcopy(DEFAULT_CONFIG)) if not secrets.get("credentials"): secrets['credentials'] = read_worker_creds() config.update(secrets) apply_product_config(config) messages = check_config(config, config_path) if messages: print('\n'.join(messages), file=sys.stderr) print("Exiting...", file=sys.stderr) sys.exit(1) credentials = get_frozen_copy(secrets['credentials']) del(config['credentials']) config = get_frozen_copy(config) return config, credentials
[ "def", "create_config", "(", "config_path", "=", "\"scriptworker.yaml\"", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "config_path", ")", ":", "print", "(", "\"{} doesn't exist! Exiting...\"", ".", "format", "(", "config_path", ")", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "with", "open", "(", "config_path", ",", "\"r\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "fh", ":", "secrets", "=", "safe_load", "(", "fh", ")", "config", "=", "dict", "(", "deepcopy", "(", "DEFAULT_CONFIG", ")", ")", "if", "not", "secrets", ".", "get", "(", "\"credentials\"", ")", ":", "secrets", "[", "'credentials'", "]", "=", "read_worker_creds", "(", ")", "config", ".", "update", "(", "secrets", ")", "apply_product_config", "(", "config", ")", "messages", "=", "check_config", "(", "config", ",", "config_path", ")", "if", "messages", ":", "print", "(", "'\\n'", ".", "join", "(", "messages", ")", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "\"Exiting...\"", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "credentials", "=", "get_frozen_copy", "(", "secrets", "[", "'credentials'", "]", ")", "del", "(", "config", "[", "'credentials'", "]", ")", "config", "=", "get_frozen_copy", "(", "config", ")", "return", "config", ",", "credentials" ]
32.457143
17.428571
def _generate_examples(self, images_dir_path, labels_path, setid_path, split_name): """Yields examples.""" with tf.io.gfile.GFile(labels_path, "rb") as f: labels = tfds.core.lazy_imports.scipy.io.loadmat(f)["labels"][0] with tf.io.gfile.GFile(setid_path, "rb") as f: examples = tfds.core.lazy_imports.scipy.io.loadmat(f)[split_name][0] for image_id in examples: file_name = "image_%05d.jpg" % image_id yield { "image": os.path.join(images_dir_path, file_name), "label": labels[image_id - 1] - 1, "file_name": file_name, }
[ "def", "_generate_examples", "(", "self", ",", "images_dir_path", ",", "labels_path", ",", "setid_path", ",", "split_name", ")", ":", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "labels_path", ",", "\"rb\"", ")", "as", "f", ":", "labels", "=", "tfds", ".", "core", ".", "lazy_imports", ".", "scipy", ".", "io", ".", "loadmat", "(", "f", ")", "[", "\"labels\"", "]", "[", "0", "]", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "setid_path", ",", "\"rb\"", ")", "as", "f", ":", "examples", "=", "tfds", ".", "core", ".", "lazy_imports", ".", "scipy", ".", "io", ".", "loadmat", "(", "f", ")", "[", "split_name", "]", "[", "0", "]", "for", "image_id", "in", "examples", ":", "file_name", "=", "\"image_%05d.jpg\"", "%", "image_id", "yield", "{", "\"image\"", ":", "os", ".", "path", ".", "join", "(", "images_dir_path", ",", "file_name", ")", ",", "\"label\"", ":", "labels", "[", "image_id", "-", "1", "]", "-", "1", ",", "\"file_name\"", ":", "file_name", ",", "}" ]
40.6
17.666667
def write_out(self, message, verbosity_level=1): """ Convenient method for outputing. """ if self.verbosity and self.verbosity >= verbosity_level: sys.stdout.write(smart_str(message)) sys.stdout.flush()
[ "def", "write_out", "(", "self", ",", "message", ",", "verbosity_level", "=", "1", ")", ":", "if", "self", ".", "verbosity", "and", "self", ".", "verbosity", ">=", "verbosity_level", ":", "sys", ".", "stdout", ".", "write", "(", "smart_str", "(", "message", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")" ]
36
7.142857
def _html(self, text): '''Parse a Tweet and generate HTML.''' html = URL_REGEX.sub(self._parse_urls, text) html = USERNAME_REGEX.sub(self._parse_users, html) html = LIST_REGEX.sub(self._parse_lists, html) return HASHTAG_REGEX.sub(self._parse_tags, html)
[ "def", "_html", "(", "self", ",", "text", ")", ":", "html", "=", "URL_REGEX", ".", "sub", "(", "self", ".", "_parse_urls", ",", "text", ")", "html", "=", "USERNAME_REGEX", ".", "sub", "(", "self", ".", "_parse_users", ",", "html", ")", "html", "=", "LIST_REGEX", ".", "sub", "(", "self", ".", "_parse_lists", ",", "html", ")", "return", "HASHTAG_REGEX", ".", "sub", "(", "self", ".", "_parse_tags", ",", "html", ")" ]
48
14
def is_subclass(ch, ns, super_class, sub): """Determine if one class is a subclass of another class. Parameters: ch: A CIMOMHandle. Either a pycimmb.CIMOMHandle or a :class:`~pywbem.WBEMConnection` object. ns (:term:`string`): Namespace (case independent). super_class (:term:`string`): Super class name (case independent). sub: The subclass. This can either be a string or a :class:`~pywbem.CIMClass` object. Returns: :class:`py:bool`: Boolean True if the assertion is True (sub is a subclass of super_class) or False if it is not a subclass. Raises: CIMError if the the sub is not a valid class in the repo """ lsuper = super_class.lower() if isinstance(sub, CIMClass): subname = sub.classname subclass = sub else: subname = sub subclass = None if subname.lower() == lsuper: return True if subclass is None: subclass = ch.GetClass(subname, ns, LocalOnly=True, IncludeQualifiers=False, PropertyList=[], IncludeClassOrigin=False) while subclass.superclass is not None: if subclass.superclass.lower() == lsuper: return True subclass = ch.GetClass(subclass.superclass, ns, LocalOnly=True, IncludeQualifiers=False, PropertyList=[], IncludeClassOrigin=False) return False
[ "def", "is_subclass", "(", "ch", ",", "ns", ",", "super_class", ",", "sub", ")", ":", "lsuper", "=", "super_class", ".", "lower", "(", ")", "if", "isinstance", "(", "sub", ",", "CIMClass", ")", ":", "subname", "=", "sub", ".", "classname", "subclass", "=", "sub", "else", ":", "subname", "=", "sub", "subclass", "=", "None", "if", "subname", ".", "lower", "(", ")", "==", "lsuper", ":", "return", "True", "if", "subclass", "is", "None", ":", "subclass", "=", "ch", ".", "GetClass", "(", "subname", ",", "ns", ",", "LocalOnly", "=", "True", ",", "IncludeQualifiers", "=", "False", ",", "PropertyList", "=", "[", "]", ",", "IncludeClassOrigin", "=", "False", ")", "while", "subclass", ".", "superclass", "is", "not", "None", ":", "if", "subclass", ".", "superclass", ".", "lower", "(", ")", "==", "lsuper", ":", "return", "True", "subclass", "=", "ch", ".", "GetClass", "(", "subclass", ".", "superclass", ",", "ns", ",", "LocalOnly", "=", "True", ",", "IncludeQualifiers", "=", "False", ",", "PropertyList", "=", "[", "]", ",", "IncludeClassOrigin", "=", "False", ")", "return", "False" ]
31.826923
17.134615
def _rotations_to_disentangle(local_param): """ Static internal method to work out Ry and Rz rotation angles used to disentangle the LSB qubit. These rotations make up the block diagonal matrix U (i.e. multiplexor) that disentangles the LSB. [[Ry(theta_1).Rz(phi_1) 0 . . 0], [0 Ry(theta_2).Rz(phi_2) . 0], . . 0 0 Ry(theta_2^n).Rz(phi_2^n)]] """ remaining_vector = [] thetas = [] phis = [] param_len = len(local_param) for i in range(param_len // 2): # Ry and Rz rotations to move bloch vector from 0 to "imaginary" # qubit # (imagine a qubit state signified by the amplitudes at index 2*i # and 2*(i+1), corresponding to the select qubits of the # multiplexor being in state |i>) (remains, add_theta, add_phi) = Initialize._bloch_angles(local_param[2 * i: 2 * (i + 1)]) remaining_vector.append(remains) # rotations for all imaginary qubits of the full vector # to move from where it is to zero, hence the negative sign thetas.append(-add_theta) phis.append(-add_phi) return remaining_vector, thetas, phis
[ "def", "_rotations_to_disentangle", "(", "local_param", ")", ":", "remaining_vector", "=", "[", "]", "thetas", "=", "[", "]", "phis", "=", "[", "]", "param_len", "=", "len", "(", "local_param", ")", "for", "i", "in", "range", "(", "param_len", "//", "2", ")", ":", "# Ry and Rz rotations to move bloch vector from 0 to \"imaginary\"", "# qubit", "# (imagine a qubit state signified by the amplitudes at index 2*i", "# and 2*(i+1), corresponding to the select qubits of the", "# multiplexor being in state |i>)", "(", "remains", ",", "add_theta", ",", "add_phi", ")", "=", "Initialize", ".", "_bloch_angles", "(", "local_param", "[", "2", "*", "i", ":", "2", "*", "(", "i", "+", "1", ")", "]", ")", "remaining_vector", ".", "append", "(", "remains", ")", "# rotations for all imaginary qubits of the full vector", "# to move from where it is to zero, hence the negative sign", "thetas", ".", "append", "(", "-", "add_theta", ")", "phis", ".", "append", "(", "-", "add_phi", ")", "return", "remaining_vector", ",", "thetas", ",", "phis" ]
36.945946
19
def readNamelist(namFilename, unique_glyphs=False, cache=None): """ Args: namFilename: The path to the Namelist file. unique_glyphs: Optional, whether to only include glyphs unique to subset. cache: Optional, a dict used to cache loaded Namelist files Returns: A dict with following keys: "fileName": (string) absolut path to namFilename "ownCharset": (set) the set of codepoints defined by the file itself "header": (dict) the result of _parseNamelistHeader "includes": (set) if unique_glyphs=False, the resulting dicts of readNamelist for each of the include files (None) if unique_glyphs=True "charset": (set) if unique_glyphs=False, the union of "ownCharset" and all "charset" items of each included file (None) if unique_glyphs=True If you are using unique_glyphs=True and an external cache, don't expect the keys "includes" and "charset" to have a specific value. Depending on the state of cache, if unique_glyphs=True the returned dict may have None values for its "includes" and "charset" keys. """ currentlyIncluding = set() if not cache: cache = {} return _readNamelist(currentlyIncluding, cache, namFilename, unique_glyphs)
[ "def", "readNamelist", "(", "namFilename", ",", "unique_glyphs", "=", "False", ",", "cache", "=", "None", ")", ":", "currentlyIncluding", "=", "set", "(", ")", "if", "not", "cache", ":", "cache", "=", "{", "}", "return", "_readNamelist", "(", "currentlyIncluding", ",", "cache", ",", "namFilename", ",", "unique_glyphs", ")" ]
40.233333
21.5
def get_seqstr(config, metadata): """ Extract and reformat imaging sequence(s) and variant(s) into pretty strings. Parameters ---------- config : :obj:`dict` A dictionary with relevant information regarding sequences, sequence variants, phase encoding directions, and task names. metadata : :obj:`dict` The metadata for the scan. Returns ------- seqs : :obj:`str` Sequence names. variants : :obj:`str` Sequence variant names. """ seq_abbrs = metadata.get('ScanningSequence', '').split('_') seqs = [config['seq'].get(seq, seq) for seq in seq_abbrs] variants = [config['seqvar'].get(var, var) for var in \ metadata.get('SequenceVariant', '').split('_')] seqs = list_to_str(seqs) if seq_abbrs[0]: seqs += ' ({0})'.format(os.path.sep.join(seq_abbrs)) variants = list_to_str(variants) return seqs, variants
[ "def", "get_seqstr", "(", "config", ",", "metadata", ")", ":", "seq_abbrs", "=", "metadata", ".", "get", "(", "'ScanningSequence'", ",", "''", ")", ".", "split", "(", "'_'", ")", "seqs", "=", "[", "config", "[", "'seq'", "]", ".", "get", "(", "seq", ",", "seq", ")", "for", "seq", "in", "seq_abbrs", "]", "variants", "=", "[", "config", "[", "'seqvar'", "]", ".", "get", "(", "var", ",", "var", ")", "for", "var", "in", "metadata", ".", "get", "(", "'SequenceVariant'", ",", "''", ")", ".", "split", "(", "'_'", ")", "]", "seqs", "=", "list_to_str", "(", "seqs", ")", "if", "seq_abbrs", "[", "0", "]", ":", "seqs", "+=", "' ({0})'", ".", "format", "(", "os", ".", "path", ".", "sep", ".", "join", "(", "seq_abbrs", ")", ")", "variants", "=", "list_to_str", "(", "variants", ")", "return", "seqs", ",", "variants" ]
31.551724
19.482759
def impute(self, vars: dict, replace: bool = False, prefix: str = 'imp_', out: 'SASdata' = None) -> 'SASdata': """ Imputes missing values for a SASdata object. :param vars: a dictionary in the form of {'varname':'impute type'} or {'impute type':'[var1, var2]'} :param replace: :param prefix: :param out: :return: """ outstr = '' if out: if isinstance(out, str): fn = out.partition('.') if fn[1] == '.': out_libref = fn[0] out_table = fn[2] else: out_libref = '' out_table = fn[0] else: out_libref = out.libref out_table = out.table outstr = "out=%s.%s" % (out_libref, out_table) else: out_table = self.table out_libref = self.libref # get list of variables and types varcode = "data _null_; d = open('" + self.libref + "." + self.table + "');\n" varcode += "nvars = attrn(d, 'NVARS');\n" varcode += "vn='VARNUMS='; vl='VARLIST='; vt='VARTYPE=';\n" varcode += "put vn nvars; put vl;\n" varcode += "do i = 1 to nvars; var = varname(d, i); put var; end;\n" varcode += "put vt;\n" varcode += "do i = 1 to nvars; var = vartype(d, i); put var; end;\n" varcode += "run;" print(varcode) ll = self.sas._io.submit(varcode, "text") l2 = ll['LOG'].rpartition("VARNUMS= ") l2 = l2[2].partition("\n") nvars = int(float(l2[0])) l2 = l2[2].partition("\n") varlist = l2[2].upper().split("\n", nvars) del varlist[nvars] l2 = l2[2].partition("VARTYPE=") l2 = l2[2].partition("\n") vartype = l2[2].split("\n", nvars) del vartype[nvars] varListType = dict(zip(varlist, vartype)) # process vars dictionary to generate code ## setup default statements sql = "proc sql;\n select\n" sqlsel = ' %s(%s),\n' sqlinto = ' into\n' if len(out_libref)>0 : ds1 = "data " + out_libref + "." + out_table + "; set " + self.libref + "." + self.table + self._dsopts() + ";\n" else: ds1 = "data " + out_table + "; set " + self.libref + "." + self.table + self._dsopts() + ";\n" dsmiss = 'if missing({0}) then {1} = {2};\n' if replace: dsmiss = prefix+'{1} = {0}; if missing({0}) then %s{1} = {2};\n' % prefix modesql = '' modeq = "proc sql outobs=1;\n select %s, count(*) as freq into :imp_mode_%s, :imp_mode_freq\n" modeq += " from %s where %s is not null group by %s order by freq desc, %s;\nquit;\n" # pop the values key because it needs special treatment contantValues = vars.pop('value', None) if contantValues is not None: if not all(isinstance(x, tuple) for x in contantValues): raise SyntaxError("The elements in the 'value' key must be tuples") for t in contantValues: if varListType.get(t[0].upper()) == "N": ds1 += dsmiss.format((t[0], t[0], t[1])) else: ds1 += dsmiss.format(t[0], t[0], '"' + str(t[1]) + '"') for key, values in vars.items(): if key.lower() in ['midrange', 'random']: for v in values: sql += sqlsel % ('max', v) sql += sqlsel % ('min', v) sqlinto += ' :imp_max_' + v + ',\n' sqlinto += ' :imp_min_' + v + ',\n' if key.lower() == 'midrange': ds1 += dsmiss.format(v, v, '(&imp_min_' + v + '.' + ' + ' + '&imp_max_' + v + '.' + ') / 2') elif key.lower() == 'random': # random * (max - min) + min ds1 += dsmiss.format(v, v, '(&imp_max_' + v + '.' + ' - ' + '&imp_min_' + v + '.' + ') * ranuni(0)' + '+ &imp_min_' + v + '.') else: raise SyntaxError("This should not happen!!!!") else: for v in values: sql += sqlsel % (key, v) sqlinto += ' :imp_' + v + ',\n' if key.lower == 'mode': modesql += modeq % (v, v, self.libref + "." + self.table + self._dsopts() , v, v, v) if varListType.get(v.upper()) == "N": ds1 += dsmiss.format(v, v, '&imp_' + v + '.') else: ds1 += dsmiss.format(v, v, '"&imp_' + v + '."') if len(sql) > 20: sql = sql.rstrip(', \n') + '\n' + sqlinto.rstrip(', \n') + '\n from ' + self.libref + '.' + self.table + self._dsopts() + ';\nquit;\n' else: sql = '' ds1 += 'run;\n' if self.sas.nosub: print(modesql + sql + ds1) return None ll = self.sas.submit(modesql + sql + ds1) return self.sas.sasdata(out_table, libref=out_libref, results=self.results, dsopts=self._dsopts())
[ "def", "impute", "(", "self", ",", "vars", ":", "dict", ",", "replace", ":", "bool", "=", "False", ",", "prefix", ":", "str", "=", "'imp_'", ",", "out", ":", "'SASdata'", "=", "None", ")", "->", "'SASdata'", ":", "outstr", "=", "''", "if", "out", ":", "if", "isinstance", "(", "out", ",", "str", ")", ":", "fn", "=", "out", ".", "partition", "(", "'.'", ")", "if", "fn", "[", "1", "]", "==", "'.'", ":", "out_libref", "=", "fn", "[", "0", "]", "out_table", "=", "fn", "[", "2", "]", "else", ":", "out_libref", "=", "''", "out_table", "=", "fn", "[", "0", "]", "else", ":", "out_libref", "=", "out", ".", "libref", "out_table", "=", "out", ".", "table", "outstr", "=", "\"out=%s.%s\"", "%", "(", "out_libref", ",", "out_table", ")", "else", ":", "out_table", "=", "self", ".", "table", "out_libref", "=", "self", ".", "libref", "# get list of variables and types", "varcode", "=", "\"data _null_; d = open('\"", "+", "self", ".", "libref", "+", "\".\"", "+", "self", ".", "table", "+", "\"');\\n\"", "varcode", "+=", "\"nvars = attrn(d, 'NVARS');\\n\"", "varcode", "+=", "\"vn='VARNUMS='; vl='VARLIST='; vt='VARTYPE=';\\n\"", "varcode", "+=", "\"put vn nvars; put vl;\\n\"", "varcode", "+=", "\"do i = 1 to nvars; var = varname(d, i); put var; end;\\n\"", "varcode", "+=", "\"put vt;\\n\"", "varcode", "+=", "\"do i = 1 to nvars; var = vartype(d, i); put var; end;\\n\"", "varcode", "+=", "\"run;\"", "print", "(", "varcode", ")", "ll", "=", "self", ".", "sas", ".", "_io", ".", "submit", "(", "varcode", ",", "\"text\"", ")", "l2", "=", "ll", "[", "'LOG'", "]", ".", "rpartition", "(", "\"VARNUMS= \"", ")", "l2", "=", "l2", "[", "2", "]", ".", "partition", "(", "\"\\n\"", ")", "nvars", "=", "int", "(", "float", "(", "l2", "[", "0", "]", ")", ")", "l2", "=", "l2", "[", "2", "]", ".", "partition", "(", "\"\\n\"", ")", "varlist", "=", "l2", "[", "2", "]", ".", "upper", "(", ")", ".", "split", "(", "\"\\n\"", ",", "nvars", ")", "del", "varlist", "[", "nvars", "]", "l2", "=", "l2", "[", "2", "]", ".", "partition", "(", "\"VARTYPE=\"", ")", "l2", "=", "l2", "[", "2", "]", ".", "partition", "(", "\"\\n\"", ")", "vartype", "=", "l2", "[", "2", "]", ".", "split", "(", "\"\\n\"", ",", "nvars", ")", "del", "vartype", "[", "nvars", "]", "varListType", "=", "dict", "(", "zip", "(", "varlist", ",", "vartype", ")", ")", "# process vars dictionary to generate code", "## setup default statements", "sql", "=", "\"proc sql;\\n select\\n\"", "sqlsel", "=", "' %s(%s),\\n'", "sqlinto", "=", "' into\\n'", "if", "len", "(", "out_libref", ")", ">", "0", ":", "ds1", "=", "\"data \"", "+", "out_libref", "+", "\".\"", "+", "out_table", "+", "\"; set \"", "+", "self", ".", "libref", "+", "\".\"", "+", "self", ".", "table", "+", "self", ".", "_dsopts", "(", ")", "+", "\";\\n\"", "else", ":", "ds1", "=", "\"data \"", "+", "out_table", "+", "\"; set \"", "+", "self", ".", "libref", "+", "\".\"", "+", "self", ".", "table", "+", "self", ".", "_dsopts", "(", ")", "+", "\";\\n\"", "dsmiss", "=", "'if missing({0}) then {1} = {2};\\n'", "if", "replace", ":", "dsmiss", "=", "prefix", "+", "'{1} = {0}; if missing({0}) then %s{1} = {2};\\n'", "%", "prefix", "modesql", "=", "''", "modeq", "=", "\"proc sql outobs=1;\\n select %s, count(*) as freq into :imp_mode_%s, :imp_mode_freq\\n\"", "modeq", "+=", "\" from %s where %s is not null group by %s order by freq desc, %s;\\nquit;\\n\"", "# pop the values key because it needs special treatment", "contantValues", "=", "vars", ".", "pop", "(", "'value'", ",", "None", ")", "if", "contantValues", "is", "not", "None", ":", "if", "not", "all", "(", "isinstance", "(", "x", ",", "tuple", ")", "for", "x", "in", "contantValues", ")", ":", "raise", "SyntaxError", "(", "\"The elements in the 'value' key must be tuples\"", ")", "for", "t", "in", "contantValues", ":", "if", "varListType", ".", "get", "(", "t", "[", "0", "]", ".", "upper", "(", ")", ")", "==", "\"N\"", ":", "ds1", "+=", "dsmiss", ".", "format", "(", "(", "t", "[", "0", "]", ",", "t", "[", "0", "]", ",", "t", "[", "1", "]", ")", ")", "else", ":", "ds1", "+=", "dsmiss", ".", "format", "(", "t", "[", "0", "]", ",", "t", "[", "0", "]", ",", "'\"'", "+", "str", "(", "t", "[", "1", "]", ")", "+", "'\"'", ")", "for", "key", ",", "values", "in", "vars", ".", "items", "(", ")", ":", "if", "key", ".", "lower", "(", ")", "in", "[", "'midrange'", ",", "'random'", "]", ":", "for", "v", "in", "values", ":", "sql", "+=", "sqlsel", "%", "(", "'max'", ",", "v", ")", "sql", "+=", "sqlsel", "%", "(", "'min'", ",", "v", ")", "sqlinto", "+=", "' :imp_max_'", "+", "v", "+", "',\\n'", "sqlinto", "+=", "' :imp_min_'", "+", "v", "+", "',\\n'", "if", "key", ".", "lower", "(", ")", "==", "'midrange'", ":", "ds1", "+=", "dsmiss", ".", "format", "(", "v", ",", "v", ",", "'(&imp_min_'", "+", "v", "+", "'.'", "+", "' + '", "+", "'&imp_max_'", "+", "v", "+", "'.'", "+", "') / 2'", ")", "elif", "key", ".", "lower", "(", ")", "==", "'random'", ":", "# random * (max - min) + min", "ds1", "+=", "dsmiss", ".", "format", "(", "v", ",", "v", ",", "'(&imp_max_'", "+", "v", "+", "'.'", "+", "' - '", "+", "'&imp_min_'", "+", "v", "+", "'.'", "+", "') * ranuni(0)'", "+", "'+ &imp_min_'", "+", "v", "+", "'.'", ")", "else", ":", "raise", "SyntaxError", "(", "\"This should not happen!!!!\"", ")", "else", ":", "for", "v", "in", "values", ":", "sql", "+=", "sqlsel", "%", "(", "key", ",", "v", ")", "sqlinto", "+=", "' :imp_'", "+", "v", "+", "',\\n'", "if", "key", ".", "lower", "==", "'mode'", ":", "modesql", "+=", "modeq", "%", "(", "v", ",", "v", ",", "self", ".", "libref", "+", "\".\"", "+", "self", ".", "table", "+", "self", ".", "_dsopts", "(", ")", ",", "v", ",", "v", ",", "v", ")", "if", "varListType", ".", "get", "(", "v", ".", "upper", "(", ")", ")", "==", "\"N\"", ":", "ds1", "+=", "dsmiss", ".", "format", "(", "v", ",", "v", ",", "'&imp_'", "+", "v", "+", "'.'", ")", "else", ":", "ds1", "+=", "dsmiss", ".", "format", "(", "v", ",", "v", ",", "'\"&imp_'", "+", "v", "+", "'.\"'", ")", "if", "len", "(", "sql", ")", ">", "20", ":", "sql", "=", "sql", ".", "rstrip", "(", "', \\n'", ")", "+", "'\\n'", "+", "sqlinto", ".", "rstrip", "(", "', \\n'", ")", "+", "'\\n from '", "+", "self", ".", "libref", "+", "'.'", "+", "self", ".", "table", "+", "self", ".", "_dsopts", "(", ")", "+", "';\\nquit;\\n'", "else", ":", "sql", "=", "''", "ds1", "+=", "'run;\\n'", "if", "self", ".", "sas", ".", "nosub", ":", "print", "(", "modesql", "+", "sql", "+", "ds1", ")", "return", "None", "ll", "=", "self", ".", "sas", ".", "submit", "(", "modesql", "+", "sql", "+", "ds1", ")", "return", "self", ".", "sas", ".", "sasdata", "(", "out_table", ",", "libref", "=", "out_libref", ",", "results", "=", "self", ".", "results", ",", "dsopts", "=", "self", ".", "_dsopts", "(", ")", ")" ]
44.26087
21.617391
def compute_K_L_alpha_ll(self): r"""Compute `K`, `L`, `alpha` and log-likelihood according to the first part of Algorithm 2.1 in R&W. Computes `K` and the noise portion of `K` using :py:meth:`compute_Kij`, computes `L` using :py:func:`scipy.linalg.cholesky`, then computes `alpha` as `L.T\\(L\\y)`. Only does the computation if :py:attr:`K_up_to_date` is False -- otherwise leaves the existing values. """ if not self.K_up_to_date: y = self.y err_y = self.err_y self.K = self.compute_Kij(self.X, None, self.n, None, noise=False) # If the noise kernel is meant to be strictly diagonal, it should # yield a diagonal noise_K: if isinstance(self.noise_k, ZeroKernel): self.noise_K = scipy.zeros((self.X.shape[0], self.X.shape[0])) elif isinstance(self.noise_k, DiagonalNoiseKernel): self.noise_K = self.noise_k.params[0]**2.0 * scipy.eye(self.X.shape[0]) else: self.noise_K = self.compute_Kij(self.X, None, self.n, None, noise=True) K = self.K noise_K = self.noise_K if self.T is not None: KnK = self.T.dot(K + noise_K).dot(self.T.T) else: KnK = K + noise_K K_tot = ( KnK + scipy.diag(err_y**2.0) + self.diag_factor * sys.float_info.epsilon * scipy.eye(len(y)) ) self.L = scipy.linalg.cholesky(K_tot, lower=True) # Need to make the mean-subtracted y that appears in the expression # for alpha: if self.mu is not None: mu_alph = self.mu(self.X, self.n) if self.T is not None: mu_alph = self.T.dot(mu_alph) y_alph = self.y - mu_alph else: y_alph = self.y self.alpha = scipy.linalg.cho_solve((self.L, True), scipy.atleast_2d(y_alph).T) self.ll = ( -0.5 * scipy.atleast_2d(y_alph).dot(self.alpha) - scipy.log(scipy.diag(self.L)).sum() - 0.5 * len(y) * scipy.log(2.0 * scipy.pi) )[0, 0] # Apply hyperpriors: self.ll += self.hyperprior(self.params) if self.use_hyper_deriv: warnings.warn("Use of hyperparameter derivatives is experimental!") # Only compute for the free parameters, since that is what we # want to optimize: self.ll_deriv = scipy.zeros(len(self.free_params)) # Combine the kernel and noise kernel so we only need one loop: if isinstance(self.noise_k, ZeroKernel): knk = self.k elif isinstance(self.noise_k, DiagonalNoiseKernel): knk = self.k # Handle DiagonalNoiseKernel specially: if not self.noise_k.fixed_params[0]: dK_dtheta_i = 2.0 * self.noise_k.params[0] * scipy.eye(len(y)) self.ll_deriv[len(self.k.free_params)] = 0.5 * ( self.alpha.T.dot(dK_dtheta_i.dot(self.alpha)) - scipy.trace(scipy.linalg.cho_solve((self.L, True), dK_dtheta_i)) ) else: knk = self.k + self.noise_k # Get the indices of the free params in knk.params: free_param_idxs = scipy.arange(0, len(knk.params), dtype=int)[~knk.fixed_params] # Handle the kernel and noise kernel: for i, pi in enumerate(free_param_idxs): dK_dtheta_i = self.compute_Kij( self.X, None, self.n, None, k=knk, hyper_deriv=pi ) if self.T is not None: dK_dtheta_i = self.T.dot(dK_dtheta_i).dot(self.T.T) self.ll_deriv[i] = 0.5 * ( self.alpha.T.dot(dK_dtheta_i.dot(self.alpha)) - scipy.trace(scipy.linalg.cho_solve((self.L, True), dK_dtheta_i)) ) # Handle the mean function: if self.mu is not None: # Get the indices of the free params in self.mu.params: free_param_idxs = scipy.arange(0, len(self.mu.params), dtype=int)[~self.mu.fixed_params] for i, pi in enumerate(free_param_idxs): dmu_dtheta_i = scipy.atleast_2d(self.mu(self.X, self.n, hyper_deriv=pi)).T if self.T is not None: dmu_dtheta_i = self.T.dot(dmu_dtheta_i) self.ll_deriv[i + len(knk.free_params)] = dmu_dtheta_i.T.dot(self.alpha) # Handle the hyperprior: # Get the indices of the free params in self.params: free_param_idxs = scipy.arange(0, len(self.params), dtype=int)[~self.fixed_params] for i, pi in enumerate(free_param_idxs): self.ll_deriv[i] += self.hyperprior(self.params, hyper_deriv=pi) self.K_up_to_date = True
[ "def", "compute_K_L_alpha_ll", "(", "self", ")", ":", "if", "not", "self", ".", "K_up_to_date", ":", "y", "=", "self", ".", "y", "err_y", "=", "self", ".", "err_y", "self", ".", "K", "=", "self", ".", "compute_Kij", "(", "self", ".", "X", ",", "None", ",", "self", ".", "n", ",", "None", ",", "noise", "=", "False", ")", "# If the noise kernel is meant to be strictly diagonal, it should", "# yield a diagonal noise_K:", "if", "isinstance", "(", "self", ".", "noise_k", ",", "ZeroKernel", ")", ":", "self", ".", "noise_K", "=", "scipy", ".", "zeros", "(", "(", "self", ".", "X", ".", "shape", "[", "0", "]", ",", "self", ".", "X", ".", "shape", "[", "0", "]", ")", ")", "elif", "isinstance", "(", "self", ".", "noise_k", ",", "DiagonalNoiseKernel", ")", ":", "self", ".", "noise_K", "=", "self", ".", "noise_k", ".", "params", "[", "0", "]", "**", "2.0", "*", "scipy", ".", "eye", "(", "self", ".", "X", ".", "shape", "[", "0", "]", ")", "else", ":", "self", ".", "noise_K", "=", "self", ".", "compute_Kij", "(", "self", ".", "X", ",", "None", ",", "self", ".", "n", ",", "None", ",", "noise", "=", "True", ")", "K", "=", "self", ".", "K", "noise_K", "=", "self", ".", "noise_K", "if", "self", ".", "T", "is", "not", "None", ":", "KnK", "=", "self", ".", "T", ".", "dot", "(", "K", "+", "noise_K", ")", ".", "dot", "(", "self", ".", "T", ".", "T", ")", "else", ":", "KnK", "=", "K", "+", "noise_K", "K_tot", "=", "(", "KnK", "+", "scipy", ".", "diag", "(", "err_y", "**", "2.0", ")", "+", "self", ".", "diag_factor", "*", "sys", ".", "float_info", ".", "epsilon", "*", "scipy", ".", "eye", "(", "len", "(", "y", ")", ")", ")", "self", ".", "L", "=", "scipy", ".", "linalg", ".", "cholesky", "(", "K_tot", ",", "lower", "=", "True", ")", "# Need to make the mean-subtracted y that appears in the expression", "# for alpha:", "if", "self", ".", "mu", "is", "not", "None", ":", "mu_alph", "=", "self", ".", "mu", "(", "self", ".", "X", ",", "self", ".", "n", ")", "if", "self", ".", "T", "is", "not", "None", ":", "mu_alph", "=", "self", ".", "T", ".", "dot", "(", "mu_alph", ")", "y_alph", "=", "self", ".", "y", "-", "mu_alph", "else", ":", "y_alph", "=", "self", ".", "y", "self", ".", "alpha", "=", "scipy", ".", "linalg", ".", "cho_solve", "(", "(", "self", ".", "L", ",", "True", ")", ",", "scipy", ".", "atleast_2d", "(", "y_alph", ")", ".", "T", ")", "self", ".", "ll", "=", "(", "-", "0.5", "*", "scipy", ".", "atleast_2d", "(", "y_alph", ")", ".", "dot", "(", "self", ".", "alpha", ")", "-", "scipy", ".", "log", "(", "scipy", ".", "diag", "(", "self", ".", "L", ")", ")", ".", "sum", "(", ")", "-", "0.5", "*", "len", "(", "y", ")", "*", "scipy", ".", "log", "(", "2.0", "*", "scipy", ".", "pi", ")", ")", "[", "0", ",", "0", "]", "# Apply hyperpriors:", "self", ".", "ll", "+=", "self", ".", "hyperprior", "(", "self", ".", "params", ")", "if", "self", ".", "use_hyper_deriv", ":", "warnings", ".", "warn", "(", "\"Use of hyperparameter derivatives is experimental!\"", ")", "# Only compute for the free parameters, since that is what we", "# want to optimize:", "self", ".", "ll_deriv", "=", "scipy", ".", "zeros", "(", "len", "(", "self", ".", "free_params", ")", ")", "# Combine the kernel and noise kernel so we only need one loop:", "if", "isinstance", "(", "self", ".", "noise_k", ",", "ZeroKernel", ")", ":", "knk", "=", "self", ".", "k", "elif", "isinstance", "(", "self", ".", "noise_k", ",", "DiagonalNoiseKernel", ")", ":", "knk", "=", "self", ".", "k", "# Handle DiagonalNoiseKernel specially:", "if", "not", "self", ".", "noise_k", ".", "fixed_params", "[", "0", "]", ":", "dK_dtheta_i", "=", "2.0", "*", "self", ".", "noise_k", ".", "params", "[", "0", "]", "*", "scipy", ".", "eye", "(", "len", "(", "y", ")", ")", "self", ".", "ll_deriv", "[", "len", "(", "self", ".", "k", ".", "free_params", ")", "]", "=", "0.5", "*", "(", "self", ".", "alpha", ".", "T", ".", "dot", "(", "dK_dtheta_i", ".", "dot", "(", "self", ".", "alpha", ")", ")", "-", "scipy", ".", "trace", "(", "scipy", ".", "linalg", ".", "cho_solve", "(", "(", "self", ".", "L", ",", "True", ")", ",", "dK_dtheta_i", ")", ")", ")", "else", ":", "knk", "=", "self", ".", "k", "+", "self", ".", "noise_k", "# Get the indices of the free params in knk.params:", "free_param_idxs", "=", "scipy", ".", "arange", "(", "0", ",", "len", "(", "knk", ".", "params", ")", ",", "dtype", "=", "int", ")", "[", "~", "knk", ".", "fixed_params", "]", "# Handle the kernel and noise kernel:", "for", "i", ",", "pi", "in", "enumerate", "(", "free_param_idxs", ")", ":", "dK_dtheta_i", "=", "self", ".", "compute_Kij", "(", "self", ".", "X", ",", "None", ",", "self", ".", "n", ",", "None", ",", "k", "=", "knk", ",", "hyper_deriv", "=", "pi", ")", "if", "self", ".", "T", "is", "not", "None", ":", "dK_dtheta_i", "=", "self", ".", "T", ".", "dot", "(", "dK_dtheta_i", ")", ".", "dot", "(", "self", ".", "T", ".", "T", ")", "self", ".", "ll_deriv", "[", "i", "]", "=", "0.5", "*", "(", "self", ".", "alpha", ".", "T", ".", "dot", "(", "dK_dtheta_i", ".", "dot", "(", "self", ".", "alpha", ")", ")", "-", "scipy", ".", "trace", "(", "scipy", ".", "linalg", ".", "cho_solve", "(", "(", "self", ".", "L", ",", "True", ")", ",", "dK_dtheta_i", ")", ")", ")", "# Handle the mean function:", "if", "self", ".", "mu", "is", "not", "None", ":", "# Get the indices of the free params in self.mu.params:", "free_param_idxs", "=", "scipy", ".", "arange", "(", "0", ",", "len", "(", "self", ".", "mu", ".", "params", ")", ",", "dtype", "=", "int", ")", "[", "~", "self", ".", "mu", ".", "fixed_params", "]", "for", "i", ",", "pi", "in", "enumerate", "(", "free_param_idxs", ")", ":", "dmu_dtheta_i", "=", "scipy", ".", "atleast_2d", "(", "self", ".", "mu", "(", "self", ".", "X", ",", "self", ".", "n", ",", "hyper_deriv", "=", "pi", ")", ")", ".", "T", "if", "self", ".", "T", "is", "not", "None", ":", "dmu_dtheta_i", "=", "self", ".", "T", ".", "dot", "(", "dmu_dtheta_i", ")", "self", ".", "ll_deriv", "[", "i", "+", "len", "(", "knk", ".", "free_params", ")", "]", "=", "dmu_dtheta_i", ".", "T", ".", "dot", "(", "self", ".", "alpha", ")", "# Handle the hyperprior:", "# Get the indices of the free params in self.params:", "free_param_idxs", "=", "scipy", ".", "arange", "(", "0", ",", "len", "(", "self", ".", "params", ")", ",", "dtype", "=", "int", ")", "[", "~", "self", ".", "fixed_params", "]", "for", "i", ",", "pi", "in", "enumerate", "(", "free_param_idxs", ")", ":", "self", ".", "ll_deriv", "[", "i", "]", "+=", "self", ".", "hyperprior", "(", "self", ".", "params", ",", "hyper_deriv", "=", "pi", ")", "self", ".", "K_up_to_date", "=", "True" ]
50.371429
22.314286
def rcts(self, command, *args, **kwargs): '''General function for applying a rolling R function to a timeserie''' cls = self.__class__ name = kwargs.pop('name','') date = kwargs.pop('date',None) data = kwargs.pop('data',None) kwargs.pop('bycolumn',None) ts = cls(name=name,date=date,data=data) ts._ts = self.rc(command, *args, **kwargs) return ts
[ "def", "rcts", "(", "self", ",", "command", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cls", "=", "self", ".", "__class__", "name", "=", "kwargs", ".", "pop", "(", "'name'", ",", "''", ")", "date", "=", "kwargs", ".", "pop", "(", "'date'", ",", "None", ")", "data", "=", "kwargs", ".", "pop", "(", "'data'", ",", "None", ")", "kwargs", ".", "pop", "(", "'bycolumn'", ",", "None", ")", "ts", "=", "cls", "(", "name", "=", "name", ",", "date", "=", "date", ",", "data", "=", "data", ")", "ts", ".", "_ts", "=", "self", ".", "rc", "(", "command", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "ts" ]
41.9
10.5
def update(self): """Once you open a dataset, it activates all the widgets. """ self.info.display_dataset() self.overview.update() self.labels.update(labels=self.info.dataset.header['chan_name']) self.channels.update() try: self.info.markers = self.info.dataset.read_markers() except FileNotFoundError: lg.info('No notes/markers present in the header of the file') else: self.notes.update_dataset_marker()
[ "def", "update", "(", "self", ")", ":", "self", ".", "info", ".", "display_dataset", "(", ")", "self", ".", "overview", ".", "update", "(", ")", "self", ".", "labels", ".", "update", "(", "labels", "=", "self", ".", "info", ".", "dataset", ".", "header", "[", "'chan_name'", "]", ")", "self", ".", "channels", ".", "update", "(", ")", "try", ":", "self", ".", "info", ".", "markers", "=", "self", ".", "info", ".", "dataset", ".", "read_markers", "(", ")", "except", "FileNotFoundError", ":", "lg", ".", "info", "(", "'No notes/markers present in the header of the file'", ")", "else", ":", "self", ".", "notes", ".", "update_dataset_marker", "(", ")" ]
35.785714
17.5
def image_size(self, pnmfile): """Get width and height of pnm file. simeon@homebox src>pnmfile /tmp/214-2.png /tmp/214-2.png:PPM raw, 100 by 100 maxval 255 """ pout = os.popen(self.shellsetup + self.pnmfile + ' ' + pnmfile, 'r') pnmfileout = pout.read(200) pout.close() m = re.search(', (\d+) by (\d+) ', pnmfileout) if (m is None): raise IIIFError( text="Bad output from pnmfile when trying to get size.") w = int(m.group(1)) h = int(m.group(2)) # print "pnmfile output = %s" % (pnmfileout) # print "image size = %d,%d" % (w,h) return(w, h)
[ "def", "image_size", "(", "self", ",", "pnmfile", ")", ":", "pout", "=", "os", ".", "popen", "(", "self", ".", "shellsetup", "+", "self", ".", "pnmfile", "+", "' '", "+", "pnmfile", ",", "'r'", ")", "pnmfileout", "=", "pout", ".", "read", "(", "200", ")", "pout", ".", "close", "(", ")", "m", "=", "re", ".", "search", "(", "', (\\d+) by (\\d+) '", ",", "pnmfileout", ")", "if", "(", "m", "is", "None", ")", ":", "raise", "IIIFError", "(", "text", "=", "\"Bad output from pnmfile when trying to get size.\"", ")", "w", "=", "int", "(", "m", ".", "group", "(", "1", ")", ")", "h", "=", "int", "(", "m", ".", "group", "(", "2", ")", ")", "# print \"pnmfile output = %s\" % (pnmfileout)", "# print \"image size = %d,%d\" % (w,h)", "return", "(", "w", ",", "h", ")" ]
37
15.055556
def register_lazy_provider_method(self, cls, method): """ Register a class method lazily as a provider. """ if 'provides' not in getattr(method, '__di__', {}): raise DiayException('method %r is not a provider' % method) @functools.wraps(method) def wrapper(*args, **kwargs): return getattr(self.get(cls), method.__name__)(*args, **kwargs) self.factories[method.__di__['provides']] = wrapper
[ "def", "register_lazy_provider_method", "(", "self", ",", "cls", ",", "method", ")", ":", "if", "'provides'", "not", "in", "getattr", "(", "method", ",", "'__di__'", ",", "{", "}", ")", ":", "raise", "DiayException", "(", "'method %r is not a provider'", "%", "method", ")", "@", "functools", ".", "wraps", "(", "method", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "getattr", "(", "self", ".", "get", "(", "cls", ")", ",", "method", ".", "__name__", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "factories", "[", "method", ".", "__di__", "[", "'provides'", "]", "]", "=", "wrapper" ]
38.416667
18.416667
def pip_upgrade_all_user(line): """Attempt to upgrade all packages installed with --user""" import pip for dist in pip.get_installed_distributions(user_only=True): do_pip(["install", "--upgrade", "--user", dist.project_name])
[ "def", "pip_upgrade_all_user", "(", "line", ")", ":", "import", "pip", "for", "dist", "in", "pip", ".", "get_installed_distributions", "(", "user_only", "=", "True", ")", ":", "do_pip", "(", "[", "\"install\"", ",", "\"--upgrade\"", ",", "\"--user\"", ",", "dist", ".", "project_name", "]", ")" ]
48.2
17.6
def register_regex_entity(self, regex_str): """ A regular expression making use of python named group expressions. Example: (?P<Artist>.*) regex_str(str): a string representing a regular expression as defined above """ if regex_str and regex_str not in self._regex_strings: self._regex_strings.add(regex_str) self.regular_expressions_entities.append(re.compile(regex_str, re.IGNORECASE))
[ "def", "register_regex_entity", "(", "self", ",", "regex_str", ")", ":", "if", "regex_str", "and", "regex_str", "not", "in", "self", ".", "_regex_strings", ":", "self", ".", "_regex_strings", ".", "add", "(", "regex_str", ")", "self", ".", "regular_expressions_entities", ".", "append", "(", "re", ".", "compile", "(", "regex_str", ",", "re", ".", "IGNORECASE", ")", ")" ]
41
22.454545
def get_full_description(self, s, base=None): """Get the full description from a docstring This here and the line above is the full description (i.e. the combination of the :meth:`get_summary` and the :meth:`get_extended_summary`) output Parameters ---------- s: str The docstring to use base: str or None A key under which the description shall be stored in the :attr:`params` attribute. If not None, the summary will be stored in ``base + '.full_desc'``. Otherwise, it will not be stored at all Returns ------- str The extracted full description""" summary = self.get_summary(s) extended_summary = self.get_extended_summary(s) ret = (summary + '\n\n' + extended_summary).strip() if base is not None: self.params[base + '.full_desc'] = ret return ret
[ "def", "get_full_description", "(", "self", ",", "s", ",", "base", "=", "None", ")", ":", "summary", "=", "self", ".", "get_summary", "(", "s", ")", "extended_summary", "=", "self", ".", "get_extended_summary", "(", "s", ")", "ret", "=", "(", "summary", "+", "'\\n\\n'", "+", "extended_summary", ")", ".", "strip", "(", ")", "if", "base", "is", "not", "None", ":", "self", ".", "params", "[", "base", "+", "'.full_desc'", "]", "=", "ret", "return", "ret" ]
34.814815
20.185185
def build_doctype(qualifiedName, publicId=None, systemId=None, internalSubset=None): """ Instantiate an ElifeDocumentType, a subclass of minidom.DocumentType, with some properties so it is more testable """ doctype = ElifeDocumentType(qualifiedName) doctype._identified_mixin_init(publicId, systemId) if internalSubset: doctype.internalSubset = internalSubset return doctype
[ "def", "build_doctype", "(", "qualifiedName", ",", "publicId", "=", "None", ",", "systemId", "=", "None", ",", "internalSubset", "=", "None", ")", ":", "doctype", "=", "ElifeDocumentType", "(", "qualifiedName", ")", "doctype", ".", "_identified_mixin_init", "(", "publicId", ",", "systemId", ")", "if", "internalSubset", ":", "doctype", ".", "internalSubset", "=", "internalSubset", "return", "doctype" ]
40.5
15.1
def register_service(service): """ Register the ryu application specified by 'service' as a provider of events defined in the calling module. If an application being loaded consumes events (in the sense of set_ev_cls) provided by the 'service' application, the latter application will be automatically loaded. This mechanism is used to e.g. automatically start ofp_handler if there are applications consuming OFP events. """ frame = inspect.currentframe() m_name = frame.f_back.f_globals['__name__'] m = sys.modules[m_name] m._SERVICE_NAME = service
[ "def", "register_service", "(", "service", ")", ":", "frame", "=", "inspect", ".", "currentframe", "(", ")", "m_name", "=", "frame", ".", "f_back", ".", "f_globals", "[", "'__name__'", "]", "m", "=", "sys", ".", "modules", "[", "m_name", "]", "m", ".", "_SERVICE_NAME", "=", "service" ]
36.75
15.875
def getInferenceTypeFromLabel(cls, label): """ Extracts the PredictionKind (temporal vs. nontemporal) from the given metric label. :param label: (string) for a metric spec generated by :meth:`getMetricLabel` :returns: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`) """ infType, _, _= label.partition(cls._LABEL_SEPARATOR) if not InferenceType.validate(infType): return None return infType
[ "def", "getInferenceTypeFromLabel", "(", "cls", ",", "label", ")", ":", "infType", ",", "_", ",", "_", "=", "label", ".", "partition", "(", "cls", ".", "_LABEL_SEPARATOR", ")", "if", "not", "InferenceType", ".", "validate", "(", "infType", ")", ":", "return", "None", "return", "infType" ]
27.625
21.0625
def annotate_and_optimize_ast( parsed_ast: ast.Module, source_code: str, class_types: Optional[ClassTypes] = None, ) -> None: """ Performs annotation and optimization on a parsed python AST by doing the following: * Annotating all AST nodes with the originating source code of the AST * Annotating class definition nodes with their original class type ("contract" or "struct") * Substituting negative values for unary subtractions :param parsed_ast: The AST to be annotated and optimized. :param source_code: The originating source code of the AST. :param class_types: A mapping of class names to original class types. :return: The annotated and optmized AST. """ AnnotatingVisitor(source_code, class_types).visit(parsed_ast) RewriteUnarySubVisitor().visit(parsed_ast) EnsureSingleExitChecker().visit(parsed_ast)
[ "def", "annotate_and_optimize_ast", "(", "parsed_ast", ":", "ast", ".", "Module", ",", "source_code", ":", "str", ",", "class_types", ":", "Optional", "[", "ClassTypes", "]", "=", "None", ",", ")", "->", "None", ":", "AnnotatingVisitor", "(", "source_code", ",", "class_types", ")", ".", "visit", "(", "parsed_ast", ")", "RewriteUnarySubVisitor", "(", ")", ".", "visit", "(", "parsed_ast", ")", "EnsureSingleExitChecker", "(", ")", ".", "visit", "(", "parsed_ast", ")" ]
39.409091
19.5
def execute_commands(self, mapping, *args, **kwargs): """Concurrently executes a sequence of commands on a Redis cluster that are associated with a routing key, returning a new mapping where values are a list of results that correspond to the command in the same position. For example:: >>> cluster.execute_commands({ ... 'foo': [ ... ('PING',), ... ('TIME',), ... ], ... 'bar': [ ... ('CLIENT', 'GETNAME'), ... ], ... }) {'bar': [<Promise None>], 'foo': [<Promise True>, <Promise (1454446079, 418404)>]} Commands that are instances of :class:`redis.client.Script` will first be checked for their existence on the target nodes then loaded on the targets before executing and can be interleaved with other commands:: >>> from redis.client import Script >>> TestScript = Script(None, 'return {KEYS, ARGV}') >>> cluster.execute_commands({ ... 'foo': [ ... (TestScript, ('key:1', 'key:2'), range(0, 3)), ... ], ... 'bar': [ ... (TestScript, ('key:3', 'key:4'), range(3, 6)), ... ], ... }) {'bar': [<Promise [['key:3', 'key:4'], ['3', '4', '5']]>], 'foo': [<Promise [['key:1', 'key:2'], ['0', '1', '2']]>]} Internally, :class:`FanoutClient` is used for issuing commands. """ def is_script_command(command): return isinstance(command[0], Script) def check_script_load_result(script, result): if script.sha != result: raise AssertionError( 'Hash mismatch loading {!r}: expected {!r}, got {!r}'.format( script, script.sha, result, ) ) # Run through all the commands and check to see if there are any # scripts, and whether or not they have been loaded onto the target # hosts. exists = {} with self.fanout(*args, **kwargs) as client: for key, commands in mapping.items(): targeted = client.target_key(key) for command in filter(is_script_command, commands): script = command[0] # Set the script hash if it hasn't already been set. if not script.sha: script.sha = sha1(script.script).hexdigest() # Check if the script has been loaded on each host that it # will be executed on. for host in targeted._target_hosts: if script not in exists.setdefault(host, {}): exists[host][script] = targeted.execute_command('SCRIPT EXISTS', script.sha) # Execute the pending commands, loading scripts onto servers where they # do not already exist. results = {} with self.fanout(*args, **kwargs) as client: for key, commands in mapping.items(): results[key] = [] targeted = client.target_key(key) for command in commands: # If this command is a script, we need to check and see if # it needs to be loaded before execution. if is_script_command(command): script = command[0] for host in targeted._target_hosts: if script in exists[host]: result = exists[host].pop(script) if not result.value[0]: targeted.execute_command('SCRIPT LOAD', script.script).done( on_success=functools.partial(check_script_load_result, script) ) keys, arguments = command[1:] parameters = list(keys) + list(arguments) results[key].append(targeted.execute_command('EVALSHA', script.sha, len(keys), *parameters)) else: results[key].append(targeted.execute_command(*command)) return results
[ "def", "execute_commands", "(", "self", ",", "mapping", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "is_script_command", "(", "command", ")", ":", "return", "isinstance", "(", "command", "[", "0", "]", ",", "Script", ")", "def", "check_script_load_result", "(", "script", ",", "result", ")", ":", "if", "script", ".", "sha", "!=", "result", ":", "raise", "AssertionError", "(", "'Hash mismatch loading {!r}: expected {!r}, got {!r}'", ".", "format", "(", "script", ",", "script", ".", "sha", ",", "result", ",", ")", ")", "# Run through all the commands and check to see if there are any", "# scripts, and whether or not they have been loaded onto the target", "# hosts.", "exists", "=", "{", "}", "with", "self", ".", "fanout", "(", "*", "args", ",", "*", "*", "kwargs", ")", "as", "client", ":", "for", "key", ",", "commands", "in", "mapping", ".", "items", "(", ")", ":", "targeted", "=", "client", ".", "target_key", "(", "key", ")", "for", "command", "in", "filter", "(", "is_script_command", ",", "commands", ")", ":", "script", "=", "command", "[", "0", "]", "# Set the script hash if it hasn't already been set.", "if", "not", "script", ".", "sha", ":", "script", ".", "sha", "=", "sha1", "(", "script", ".", "script", ")", ".", "hexdigest", "(", ")", "# Check if the script has been loaded on each host that it", "# will be executed on.", "for", "host", "in", "targeted", ".", "_target_hosts", ":", "if", "script", "not", "in", "exists", ".", "setdefault", "(", "host", ",", "{", "}", ")", ":", "exists", "[", "host", "]", "[", "script", "]", "=", "targeted", ".", "execute_command", "(", "'SCRIPT EXISTS'", ",", "script", ".", "sha", ")", "# Execute the pending commands, loading scripts onto servers where they", "# do not already exist.", "results", "=", "{", "}", "with", "self", ".", "fanout", "(", "*", "args", ",", "*", "*", "kwargs", ")", "as", "client", ":", "for", "key", ",", "commands", "in", "mapping", ".", "items", "(", ")", ":", "results", "[", "key", "]", "=", "[", "]", "targeted", "=", "client", ".", "target_key", "(", "key", ")", "for", "command", "in", "commands", ":", "# If this command is a script, we need to check and see if", "# it needs to be loaded before execution.", "if", "is_script_command", "(", "command", ")", ":", "script", "=", "command", "[", "0", "]", "for", "host", "in", "targeted", ".", "_target_hosts", ":", "if", "script", "in", "exists", "[", "host", "]", ":", "result", "=", "exists", "[", "host", "]", ".", "pop", "(", "script", ")", "if", "not", "result", ".", "value", "[", "0", "]", ":", "targeted", ".", "execute_command", "(", "'SCRIPT LOAD'", ",", "script", ".", "script", ")", ".", "done", "(", "on_success", "=", "functools", ".", "partial", "(", "check_script_load_result", ",", "script", ")", ")", "keys", ",", "arguments", "=", "command", "[", "1", ":", "]", "parameters", "=", "list", "(", "keys", ")", "+", "list", "(", "arguments", ")", "results", "[", "key", "]", ".", "append", "(", "targeted", ".", "execute_command", "(", "'EVALSHA'", ",", "script", ".", "sha", ",", "len", "(", "keys", ")", ",", "*", "parameters", ")", ")", "else", ":", "results", "[", "key", "]", ".", "append", "(", "targeted", ".", "execute_command", "(", "*", "command", ")", ")", "return", "results" ]
45.104167
21.770833
def is_rdemo(file_name): """ Return True if file_name matches a regexp for an R demo. False otherwise. :param file_name: file to test """ packaged_demos = ["h2o.anomaly.R", "h2o.deeplearning.R", "h2o.gbm.R", "h2o.glm.R", "h2o.glrm.R", "h2o.kmeans.R", "h2o.naiveBayes.R", "h2o.prcomp.R", "h2o.randomForest.R"] if file_name in packaged_demos: return True if re.match("^rdemo.*\.(r|R|ipynb)$", file_name): return True return False
[ "def", "is_rdemo", "(", "file_name", ")", ":", "packaged_demos", "=", "[", "\"h2o.anomaly.R\"", ",", "\"h2o.deeplearning.R\"", ",", "\"h2o.gbm.R\"", ",", "\"h2o.glm.R\"", ",", "\"h2o.glrm.R\"", ",", "\"h2o.kmeans.R\"", ",", "\"h2o.naiveBayes.R\"", ",", "\"h2o.prcomp.R\"", ",", "\"h2o.randomForest.R\"", "]", "if", "file_name", "in", "packaged_demos", ":", "return", "True", "if", "re", ".", "match", "(", "\"^rdemo.*\\.(r|R|ipynb)$\"", ",", "file_name", ")", ":", "return", "True", "return", "False" ]
47.3
23.1
def readAt(self, offset, size): """ Reads as many bytes indicated in the size parameter at the specific offset. @type offset: int @param offset: Offset of the value to be read. @type size: int @param size: This parameter indicates how many bytes are going to be read from a given offset. @rtype: str @return: A packed string containing the read data. """ if offset > self.length: if self.log: print "Warning: Trying to read: %d bytes - only %d bytes left" % (nroBytes, self.length - self.offset) offset = self.length - self.offset tmpOff = self.tell() self.setOffset(offset) r = self.read(size) self.setOffset(tmpOff) return r
[ "def", "readAt", "(", "self", ",", "offset", ",", "size", ")", ":", "if", "offset", ">", "self", ".", "length", ":", "if", "self", ".", "log", ":", "print", "\"Warning: Trying to read: %d bytes - only %d bytes left\"", "%", "(", "nroBytes", ",", "self", ".", "length", "-", "self", ".", "offset", ")", "offset", "=", "self", ".", "length", "-", "self", ".", "offset", "tmpOff", "=", "self", ".", "tell", "(", ")", "self", ".", "setOffset", "(", "offset", ")", "r", "=", "self", ".", "read", "(", "size", ")", "self", ".", "setOffset", "(", "tmpOff", ")", "return", "r" ]
34.954545
22.590909
def is_contradictory(self, other): """ Can these two strings coexist ? """ other = StringCell.coerce(other) if self.value is None or other.value is None: # None = empty, and won't contradict anything return False def sequence_in(s1, s2): """Does `s1` appear in sequence in `s2`?""" return bool(re.search(".*".join(s1), s2)) return not sequence_in(self.value, other.value) and \ not sequence_in(other.value, self.value)
[ "def", "is_contradictory", "(", "self", ",", "other", ")", ":", "other", "=", "StringCell", ".", "coerce", "(", "other", ")", "if", "self", ".", "value", "is", "None", "or", "other", ".", "value", "is", "None", ":", "# None = empty, and won't contradict anything", "return", "False", "def", "sequence_in", "(", "s1", ",", "s2", ")", ":", "\"\"\"Does `s1` appear in sequence in `s2`?\"\"\"", "return", "bool", "(", "re", ".", "search", "(", "\".*\"", ".", "join", "(", "s1", ")", ",", "s2", ")", ")", "return", "not", "sequence_in", "(", "self", ".", "value", ",", "other", ".", "value", ")", "and", "not", "sequence_in", "(", "other", ".", "value", ",", "self", ".", "value", ")" ]
32.625
14.1875
def GetFileEntryByPathSpec(self, path_spec): """Retrieves a file entry for a path specification. Args: path_spec (PathSpec): path specification of the file entry. Returns: ZipFileEntry: a file entry or None. """ if not self.FileEntryExistsByPathSpec(path_spec): return None location = getattr(path_spec, 'location', None) if len(location) == 1: return zip_file_entry.ZipFileEntry( self._resolver_context, self, path_spec, is_root=True, is_virtual=True) kwargs = {} try: kwargs['zip_info'] = self._zip_file.getinfo(location[1:]) except KeyError: kwargs['is_virtual'] = True return zip_file_entry.ZipFileEntry( self._resolver_context, self, path_spec, **kwargs)
[ "def", "GetFileEntryByPathSpec", "(", "self", ",", "path_spec", ")", ":", "if", "not", "self", ".", "FileEntryExistsByPathSpec", "(", "path_spec", ")", ":", "return", "None", "location", "=", "getattr", "(", "path_spec", ",", "'location'", ",", "None", ")", "if", "len", "(", "location", ")", "==", "1", ":", "return", "zip_file_entry", ".", "ZipFileEntry", "(", "self", ".", "_resolver_context", ",", "self", ",", "path_spec", ",", "is_root", "=", "True", ",", "is_virtual", "=", "True", ")", "kwargs", "=", "{", "}", "try", ":", "kwargs", "[", "'zip_info'", "]", "=", "self", ".", "_zip_file", ".", "getinfo", "(", "location", "[", "1", ":", "]", ")", "except", "KeyError", ":", "kwargs", "[", "'is_virtual'", "]", "=", "True", "return", "zip_file_entry", ".", "ZipFileEntry", "(", "self", ".", "_resolver_context", ",", "self", ",", "path_spec", ",", "*", "*", "kwargs", ")" ]
27.666667
20.555556
def _decode_exp(self, access_token=None): """Extract exp field from access token. Args: access_token (str): Access token to decode. Defaults to ``None``. Returns: int: JWT expiration in epoch seconds. """ c = self.get_credentials() jwt = access_token or c.access_token x = self.decode_jwt_payload(jwt) if 'exp' in x: try: exp = int(x['exp']) except ValueError: raise PanCloudError( "Expiration time (exp) must be an integer") else: self.jwt_exp = exp return exp else: raise PanCloudError("No exp field found in payload")
[ "def", "_decode_exp", "(", "self", ",", "access_token", "=", "None", ")", ":", "c", "=", "self", ".", "get_credentials", "(", ")", "jwt", "=", "access_token", "or", "c", ".", "access_token", "x", "=", "self", ".", "decode_jwt_payload", "(", "jwt", ")", "if", "'exp'", "in", "x", ":", "try", ":", "exp", "=", "int", "(", "x", "[", "'exp'", "]", ")", "except", "ValueError", ":", "raise", "PanCloudError", "(", "\"Expiration time (exp) must be an integer\"", ")", "else", ":", "self", ".", "jwt_exp", "=", "exp", "return", "exp", "else", ":", "raise", "PanCloudError", "(", "\"No exp field found in payload\"", ")" ]
29.12
17.84
def copy_pkg(self, filename, id_=-1): """Copy a pkg, dmg, or zip to all repositories. Args: filename: String path to the local file to copy. id_: Integer ID you wish to associate package with for a JDS or CDP only. Default is -1, which is used for creating a new package object in the database. """ for repo in self._children: repo.copy_pkg(filename, id_)
[ "def", "copy_pkg", "(", "self", ",", "filename", ",", "id_", "=", "-", "1", ")", ":", "for", "repo", "in", "self", ".", "_children", ":", "repo", ".", "copy_pkg", "(", "filename", ",", "id_", ")" ]
40.545455
15.454545
def get_bundle_imported_services(self, bundle): """ Returns this bundle's ServiceReference list for all services it is using or returns None if this bundle is not using any services. A bundle is considered to be using a service if its use count for that service is greater than zero. The list is valid at the time of the call to this method, however, as the Framework is a very dynamic environment, services can be modified or unregistered at any time. :param bundle: The bundle to look into :return: The references of the services used by this bundle """ with self.__svc_lock: return sorted(self.__bundle_imports.get(bundle, []))
[ "def", "get_bundle_imported_services", "(", "self", ",", "bundle", ")", ":", "with", "self", ".", "__svc_lock", ":", "return", "sorted", "(", "self", ".", "__bundle_imports", ".", "get", "(", "bundle", ",", "[", "]", ")", ")" ]
45.3125
21.1875
def _cache_ops_associate(protocol, msgtype): """https://github.com/thom311/libnl/blob/libnl3_2_25/lib/cache_mngt.c#L111. Positional arguments: protocol -- Netlink protocol (integer). msgtype -- Netlink message type (integer). Returns: nl_cache_ops instance with matching protocol containing matching msgtype or None. """ ops = cache_ops while ops: # Loop until `ops` is None. if ops.co_protocol == protocol: for co_msgtype in ops.co_msgtypes: if co_msgtype.mt_id == msgtype: return ops ops = ops.co_next return None
[ "def", "_cache_ops_associate", "(", "protocol", ",", "msgtype", ")", ":", "ops", "=", "cache_ops", "while", "ops", ":", "# Loop until `ops` is None.", "if", "ops", ".", "co_protocol", "==", "protocol", ":", "for", "co_msgtype", "in", "ops", ".", "co_msgtypes", ":", "if", "co_msgtype", ".", "mt_id", "==", "msgtype", ":", "return", "ops", "ops", "=", "ops", ".", "co_next", "return", "None" ]
33.611111
14.944444
def parse_config(f): """ Load an yml-formatted configuration from file stream |f| :param file f: Where to read the config. """ try: c = yaml.safe_load(f) for section_name, section in c.items(): group = get_group(section_name) for key, val in section.items(): group.update(key) setattr(group, key, val) # Any exception here should trigger the warning; from not being able to parse yaml # to reading poorly formatted values except Exception: raise ConfigError("Failed reading config file. Do you have a local [.]manticore.yml file?")
[ "def", "parse_config", "(", "f", ")", ":", "try", ":", "c", "=", "yaml", ".", "safe_load", "(", "f", ")", "for", "section_name", ",", "section", "in", "c", ".", "items", "(", ")", ":", "group", "=", "get_group", "(", "section_name", ")", "for", "key", ",", "val", "in", "section", ".", "items", "(", ")", ":", "group", ".", "update", "(", "key", ")", "setattr", "(", "group", ",", "key", ",", "val", ")", "# Any exception here should trigger the warning; from not being able to parse yaml", "# to reading poorly formatted values", "except", "Exception", ":", "raise", "ConfigError", "(", "\"Failed reading config file. Do you have a local [.]manticore.yml file?\"", ")" ]
33.052632
18.526316
def p_retry_option(p): """ retry_option : LIMIT COLON NUMBER | DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET """ if len(p) == 4: p[0] = {"limit": int(p[3]) } elif len(p) == 7: p[0] = {"delay": Delay(int(p[5]), p[3])} else: raise RuntimeError("Invalid production in 'retry_option'")
[ "def", "p_retry_option", "(", "p", ")", ":", "if", "len", "(", "p", ")", "==", "4", ":", "p", "[", "0", "]", "=", "{", "\"limit\"", ":", "int", "(", "p", "[", "3", "]", ")", "}", "elif", "len", "(", "p", ")", "==", "7", ":", "p", "[", "0", "]", "=", "{", "\"delay\"", ":", "Delay", "(", "int", "(", "p", "[", "5", "]", ")", ",", "p", "[", "3", "]", ")", "}", "else", ":", "raise", "RuntimeError", "(", "\"Invalid production in 'retry_option'\"", ")" ]
31.545455
15
def components(self): '''The period string''' p = '' neg = self.totaldays < 0 y = self.years m = self.months w = self.weeks d = self.days if y: p = '%sY' % abs(y) if m: p = '%s%sM' % (p, abs(m)) if w: p = '%s%sW' % (p, abs(w)) if d: p = '%s%sD' % (p, abs(d)) return '-'+p if neg else p
[ "def", "components", "(", "self", ")", ":", "p", "=", "''", "neg", "=", "self", ".", "totaldays", "<", "0", "y", "=", "self", ".", "years", "m", "=", "self", ".", "months", "w", "=", "self", ".", "weeks", "d", "=", "self", ".", "days", "if", "y", ":", "p", "=", "'%sY'", "%", "abs", "(", "y", ")", "if", "m", ":", "p", "=", "'%s%sM'", "%", "(", "p", ",", "abs", "(", "m", ")", ")", "if", "w", ":", "p", "=", "'%s%sW'", "%", "(", "p", ",", "abs", "(", "w", ")", ")", "if", "d", ":", "p", "=", "'%s%sD'", "%", "(", "p", ",", "abs", "(", "d", ")", ")", "return", "'-'", "+", "p", "if", "neg", "else", "p" ]
24.294118
15.705882
def _validate_jp2h(self, boxes): """Validate the JP2 Header box.""" self._check_jp2h_child_boxes(boxes, 'top-level') jp2h_lst = [box for box in boxes if box.box_id == 'jp2h'] jp2h = jp2h_lst[0] # 1st jp2 header box cannot be empty. if len(jp2h.box) == 0: msg = "The JP2 header superbox cannot be empty." raise IOError(msg) # 1st jp2 header box must be ihdr if jp2h.box[0].box_id != 'ihdr': msg = ("The first box in the jp2 header box must be the image " "header box.") raise IOError(msg) # colr must be present in jp2 header box. colr_lst = [j for (j, box) in enumerate(jp2h.box) if box.box_id == 'colr'] if len(colr_lst) == 0: msg = "The jp2 header box must contain a color definition box." raise IOError(msg) colr = jp2h.box[colr_lst[0]] self._validate_channel_definition(jp2h, colr)
[ "def", "_validate_jp2h", "(", "self", ",", "boxes", ")", ":", "self", ".", "_check_jp2h_child_boxes", "(", "boxes", ",", "'top-level'", ")", "jp2h_lst", "=", "[", "box", "for", "box", "in", "boxes", "if", "box", ".", "box_id", "==", "'jp2h'", "]", "jp2h", "=", "jp2h_lst", "[", "0", "]", "# 1st jp2 header box cannot be empty.", "if", "len", "(", "jp2h", ".", "box", ")", "==", "0", ":", "msg", "=", "\"The JP2 header superbox cannot be empty.\"", "raise", "IOError", "(", "msg", ")", "# 1st jp2 header box must be ihdr", "if", "jp2h", ".", "box", "[", "0", "]", ".", "box_id", "!=", "'ihdr'", ":", "msg", "=", "(", "\"The first box in the jp2 header box must be the image \"", "\"header box.\"", ")", "raise", "IOError", "(", "msg", ")", "# colr must be present in jp2 header box.", "colr_lst", "=", "[", "j", "for", "(", "j", ",", "box", ")", "in", "enumerate", "(", "jp2h", ".", "box", ")", "if", "box", ".", "box_id", "==", "'colr'", "]", "if", "len", "(", "colr_lst", ")", "==", "0", ":", "msg", "=", "\"The jp2 header box must contain a color definition box.\"", "raise", "IOError", "(", "msg", ")", "colr", "=", "jp2h", ".", "box", "[", "colr_lst", "[", "0", "]", "]", "self", ".", "_validate_channel_definition", "(", "jp2h", ",", "colr", ")" ]
36.259259
17.148148
def show_search_results( log_rec, code_view=True, json_view=False, show_message_details=False): """show_search_results Show search results like rsyslog or as pretty-printed JSON dictionaries per log for debugging drill-down fields :param log_rec: log record from splunk :param code_view: show as a normal tail -f <log file> view :param json_view: pretty print each log's dictionary :param show_message_details """ log_dict = None try: log_dict = json.loads( log_rec) except Exception as e: log.error(( 'Failed logging record={} with ex={}').format( log_rec, e)) return # end of try/ex if not log_dict: log.error(( 'Failed to parse log_rec={} as a dictionary').format( log_rec)) return if code_view: comp_name = log_dict.get( 'name', '') logger_name = log_dict.get( 'logger_name', '') use_log_name = ( '{}').format( logger_name) if logger_name: use_log_name = '{}'.format( logger_name) else: if comp_name: use_log_name = '{}'.format( comp_name) prefix_log = ( '{} {} - {} -').format( log_dict.get( 'systime', log_dict.get( 'asctime', '')), use_log_name, log_dict.get( 'levelname', '')) suffix_log = '' if log_dict.get( 'exc', ''): suffix_log = ( '{} exc={}').format( suffix_log, log_dict.get( 'exc', '')) if show_message_details: suffix_log = ( 'dc={} env={} ' 'source={} line={}').format( log_dict.get( 'dc', ''), log_dict.get( 'env', ''), log_dict.get( 'path', ''), log_dict.get( 'lineno', '')) msg = ( '{} {} {}').format( prefix_log, log_dict.get( 'message', ''), suffix_log) if log_dict['levelname'] == 'INFO': log.info(( '{}').format( msg)) elif log_dict['levelname'] == 'DEBUG': log.debug(( '{}').format( msg)) elif log_dict['levelname'] == 'ERROR': log.error(( '{}').format( msg)) elif log_dict['levelname'] == 'CRITICAL': log.critical(( '{}').format( msg)) elif log_dict['levelname'] == 'WARNING': log.warning(( '{}').format( msg)) else: log.debug(( '{}').format( msg)) elif json_view: if log_dict['levelname'] == 'INFO': log.info(( '{}').format( ppj(log_dict))) elif log_dict['levelname'] == 'DEBUG': log.debug(( '{}').format( ppj(log_dict))) elif log_dict['levelname'] == 'ERROR': log.error(( '{}').format( ppj(log_dict))) elif log_dict['levelname'] == 'CRITICAL': log.critical(( '{}').format( ppj(log_dict))) elif log_dict['levelname'] == 'WARNING': log.warning(( '{}').format( ppj(log_dict))) else: log.debug(( '{}').format( ppj(log_dict))) else: log.error(( 'Please use either code_view or json_view to view the logs'))
[ "def", "show_search_results", "(", "log_rec", ",", "code_view", "=", "True", ",", "json_view", "=", "False", ",", "show_message_details", "=", "False", ")", ":", "log_dict", "=", "None", "try", ":", "log_dict", "=", "json", ".", "loads", "(", "log_rec", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "(", "'Failed logging record={} with ex={}'", ")", ".", "format", "(", "log_rec", ",", "e", ")", ")", "return", "# end of try/ex", "if", "not", "log_dict", ":", "log", ".", "error", "(", "(", "'Failed to parse log_rec={} as a dictionary'", ")", ".", "format", "(", "log_rec", ")", ")", "return", "if", "code_view", ":", "comp_name", "=", "log_dict", ".", "get", "(", "'name'", ",", "''", ")", "logger_name", "=", "log_dict", ".", "get", "(", "'logger_name'", ",", "''", ")", "use_log_name", "=", "(", "'{}'", ")", ".", "format", "(", "logger_name", ")", "if", "logger_name", ":", "use_log_name", "=", "'{}'", ".", "format", "(", "logger_name", ")", "else", ":", "if", "comp_name", ":", "use_log_name", "=", "'{}'", ".", "format", "(", "comp_name", ")", "prefix_log", "=", "(", "'{} {} - {} -'", ")", ".", "format", "(", "log_dict", ".", "get", "(", "'systime'", ",", "log_dict", ".", "get", "(", "'asctime'", ",", "''", ")", ")", ",", "use_log_name", ",", "log_dict", ".", "get", "(", "'levelname'", ",", "''", ")", ")", "suffix_log", "=", "''", "if", "log_dict", ".", "get", "(", "'exc'", ",", "''", ")", ":", "suffix_log", "=", "(", "'{} exc={}'", ")", ".", "format", "(", "suffix_log", ",", "log_dict", ".", "get", "(", "'exc'", ",", "''", ")", ")", "if", "show_message_details", ":", "suffix_log", "=", "(", "'dc={} env={} '", "'source={} line={}'", ")", ".", "format", "(", "log_dict", ".", "get", "(", "'dc'", ",", "''", ")", ",", "log_dict", ".", "get", "(", "'env'", ",", "''", ")", ",", "log_dict", ".", "get", "(", "'path'", ",", "''", ")", ",", "log_dict", ".", "get", "(", "'lineno'", ",", "''", ")", ")", "msg", "=", "(", "'{} {} {}'", ")", ".", "format", "(", "prefix_log", ",", "log_dict", ".", "get", "(", "'message'", ",", "''", ")", ",", "suffix_log", ")", "if", "log_dict", "[", "'levelname'", "]", "==", "'INFO'", ":", "log", ".", "info", "(", "(", "'{}'", ")", ".", "format", "(", "msg", ")", ")", "elif", "log_dict", "[", "'levelname'", "]", "==", "'DEBUG'", ":", "log", ".", "debug", "(", "(", "'{}'", ")", ".", "format", "(", "msg", ")", ")", "elif", "log_dict", "[", "'levelname'", "]", "==", "'ERROR'", ":", "log", ".", "error", "(", "(", "'{}'", ")", ".", "format", "(", "msg", ")", ")", "elif", "log_dict", "[", "'levelname'", "]", "==", "'CRITICAL'", ":", "log", ".", "critical", "(", "(", "'{}'", ")", ".", "format", "(", "msg", ")", ")", "elif", "log_dict", "[", "'levelname'", "]", "==", "'WARNING'", ":", "log", ".", "warning", "(", "(", "'{}'", ")", ".", "format", "(", "msg", ")", ")", "else", ":", "log", ".", "debug", "(", "(", "'{}'", ")", ".", "format", "(", "msg", ")", ")", "elif", "json_view", ":", "if", "log_dict", "[", "'levelname'", "]", "==", "'INFO'", ":", "log", ".", "info", "(", "(", "'{}'", ")", ".", "format", "(", "ppj", "(", "log_dict", ")", ")", ")", "elif", "log_dict", "[", "'levelname'", "]", "==", "'DEBUG'", ":", "log", ".", "debug", "(", "(", "'{}'", ")", ".", "format", "(", "ppj", "(", "log_dict", ")", ")", ")", "elif", "log_dict", "[", "'levelname'", "]", "==", "'ERROR'", ":", "log", ".", "error", "(", "(", "'{}'", ")", ".", "format", "(", "ppj", "(", "log_dict", ")", ")", ")", "elif", "log_dict", "[", "'levelname'", "]", "==", "'CRITICAL'", ":", "log", ".", "critical", "(", "(", "'{}'", ")", ".", "format", "(", "ppj", "(", "log_dict", ")", ")", ")", "elif", "log_dict", "[", "'levelname'", "]", "==", "'WARNING'", ":", "log", ".", "warning", "(", "(", "'{}'", ")", ".", "format", "(", "ppj", "(", "log_dict", ")", ")", ")", "else", ":", "log", ".", "debug", "(", "(", "'{}'", ")", ".", "format", "(", "ppj", "(", "log_dict", ")", ")", ")", "else", ":", "log", ".", "error", "(", "(", "'Please use either code_view or json_view to view the logs'", ")", ")" ]
27.513158
15.138158
def initialise_modified_data(self): """ Initialise the modified_data if necessary """ if self.__modified_data__ is None: if self.__original_data__: self.__modified_data__ = list(self.__original_data__) else: self.__modified_data__ = []
[ "def", "initialise_modified_data", "(", "self", ")", ":", "if", "self", ".", "__modified_data__", "is", "None", ":", "if", "self", ".", "__original_data__", ":", "self", ".", "__modified_data__", "=", "list", "(", "self", ".", "__original_data__", ")", "else", ":", "self", ".", "__modified_data__", "=", "[", "]" ]
35
8.111111
def ReadAllClientLabels(self, cursor=None): """Reads the user labels for a list of clients.""" cursor.execute("SELECT DISTINCT owner_username, label FROM client_labels") result = [] for owner, label in cursor.fetchall(): result.append(rdf_objects.ClientLabel(name=label, owner=owner)) result.sort(key=lambda label: (label.owner, label.name)) return result
[ "def", "ReadAllClientLabels", "(", "self", ",", "cursor", "=", "None", ")", ":", "cursor", ".", "execute", "(", "\"SELECT DISTINCT owner_username, label FROM client_labels\"", ")", "result", "=", "[", "]", "for", "owner", ",", "label", "in", "cursor", ".", "fetchall", "(", ")", ":", "result", ".", "append", "(", "rdf_objects", ".", "ClientLabel", "(", "name", "=", "label", ",", "owner", "=", "owner", ")", ")", "result", ".", "sort", "(", "key", "=", "lambda", "label", ":", "(", "label", ".", "owner", ",", "label", ".", "name", ")", ")", "return", "result" ]
34.363636
23.636364
def Emulation_setEmulatedMedia(self, media): """ Function path: Emulation.setEmulatedMedia Domain: Emulation Method name: setEmulatedMedia Parameters: Required arguments: 'media' (type: string) -> Media type to emulate. Empty string disables the override. No return value. Description: Emulates the given media for CSS media queries. """ assert isinstance(media, (str,) ), "Argument 'media' must be of type '['str']'. Received type: '%s'" % type( media) subdom_funcs = self.synchronous_command('Emulation.setEmulatedMedia', media=media) return subdom_funcs
[ "def", "Emulation_setEmulatedMedia", "(", "self", ",", "media", ")", ":", "assert", "isinstance", "(", "media", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'media' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "media", ")", "subdom_funcs", "=", "self", ".", "synchronous_command", "(", "'Emulation.setEmulatedMedia'", ",", "media", "=", "media", ")", "return", "subdom_funcs" ]
31.473684
20.842105
def _new(self, dx_hash, **kwargs): ''' :param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes. :type dx_hash: dict :param runSpec: Run specification :type runSpec: dict :param dxapi: API version string :type dxapi: string :param inputSpec: Input specification (optional) :type inputSpec: dict :param outputSpec: Output specification (optional) :type outputSpec: dict :param access: Access specification (optional) :type access: dict :param title: Title string (optional) :type title: string :param summary: Summary string (optional) :type summary: string :param description: Description string (optional) :type description: string .. note:: It is highly recommended that the higher-level module :mod:`dxpy.app_builder` or (preferably) its frontend `dx build <https://wiki.dnanexus.com/Command-Line-Client/Index-of-dx-Commands#build>`_ be used instead for applet creation. Creates an applet with the given parameters. See the API documentation for the `/applet/new <https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method:-/applet/new>`_ method for more info. The applet is not run until :meth:`run()` is called. ''' for field in 'runSpec', 'dxapi': if field not in kwargs: raise DXError("%s: Keyword argument %s is required" % (self.__class__.__name__, field)) dx_hash[field] = kwargs[field] del kwargs[field] for field in 'inputSpec', 'outputSpec', 'access', 'title', 'summary', 'description': if field in kwargs: dx_hash[field] = kwargs[field] del kwargs[field] resp = dxpy.api.applet_new(dx_hash, **kwargs) self.set_ids(resp["id"], dx_hash["project"])
[ "def", "_new", "(", "self", ",", "dx_hash", ",", "*", "*", "kwargs", ")", ":", "for", "field", "in", "'runSpec'", ",", "'dxapi'", ":", "if", "field", "not", "in", "kwargs", ":", "raise", "DXError", "(", "\"%s: Keyword argument %s is required\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "field", ")", ")", "dx_hash", "[", "field", "]", "=", "kwargs", "[", "field", "]", "del", "kwargs", "[", "field", "]", "for", "field", "in", "'inputSpec'", ",", "'outputSpec'", ",", "'access'", ",", "'title'", ",", "'summary'", ",", "'description'", ":", "if", "field", "in", "kwargs", ":", "dx_hash", "[", "field", "]", "=", "kwargs", "[", "field", "]", "del", "kwargs", "[", "field", "]", "resp", "=", "dxpy", ".", "api", ".", "applet_new", "(", "dx_hash", ",", "*", "*", "kwargs", ")", "self", ".", "set_ids", "(", "resp", "[", "\"id\"", "]", ",", "dx_hash", "[", "\"project\"", "]", ")" ]
44.422222
21.444444
def extract_date(cls, date_str): """ Tries to extract a `datetime` object from the given string, expecting date information only. Raises `DateTimeFormatterException` if the extraction fails. """ if not date_str: raise DateTimeFormatterException('date_str must a valid string {}.'.format(date_str)) try: return cls._extract_timestamp(date_str, cls.DATE_FORMAT) except (TypeError, ValueError): raise DateTimeFormatterException('Invalid date string {}.'.format(date_str))
[ "def", "extract_date", "(", "cls", ",", "date_str", ")", ":", "if", "not", "date_str", ":", "raise", "DateTimeFormatterException", "(", "'date_str must a valid string {}.'", ".", "format", "(", "date_str", ")", ")", "try", ":", "return", "cls", ".", "_extract_timestamp", "(", "date_str", ",", "cls", ".", "DATE_FORMAT", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "DateTimeFormatterException", "(", "'Invalid date string {}.'", ".", "format", "(", "date_str", ")", ")" ]
39.785714
24.357143
def generate_docs(self, clspath, more_content): """Generate documentation for this configman class""" obj = import_class(clspath) sourcename = 'docstring of %s' % clspath all_options = [] indent = ' ' config = obj.get_required_config() if config.options: # Go through options and figure out relevant information for option in config: if 'namespace' in self.options: namespaced_key = self.options['namespace'] + '_' + option.key else: namespaced_key = option.key if 'case' in self.options: if self.options['case'] == 'upper': namespaced_key = namespaced_key.upper() elif self.options['case'] == 'lower': namespaced_key = namespaced_key.lower() all_options.append({ 'key': namespaced_key, 'parser': qualname(option.parser), 'doc': option.doc, 'default': option.default, }) if 'hide-classname' not in self.options: modname, clsname = split_clspath(clspath) component_name = clspath component_index = clsname else: component_name = 'Configuration' component_index = 'Configuration' if all_options: # Add index entries for options first so they link to the right # place; we do it this way so that we don't have to make options a # real object type and then we don't get to use TypedField # formatting self.add_line('.. index::', sourcename) for option in all_options: self.add_line(' single: %s; (%s)' % (option['key'], component_index), sourcename) self.add_line('', '') # Add the classname or 'Configuration' self.add_line('.. everett:component:: %s' % component_name, sourcename) self.add_line('', sourcename) # Add the docstring if there is one and if show-docstring if 'show-docstring' in self.options: docstring_attr = self.options['show-docstring'] or '__doc__' docstring = getattr(obj, docstring_attr, None) if docstring: docstringlines = prepare_docstring(docstring, ignore=1) for i, line in enumerate(docstringlines): self.add_line(indent + line, sourcename, i) self.add_line('', '') # Add content from the directive if there was any if more_content: for line, src in zip(more_content.data, more_content.items): self.add_line(indent + line, src[0], src[1]) self.add_line('', '') if all_options: # Now list the options sourcename = 'class definition' for option in all_options: self.add_line( '%s:option %s %s:' % (indent, option['parser'], option['key']), sourcename ) self.add_line('%s %s' % (indent, option['doc']), sourcename) if option['default'] is not NO_VALUE: self.add_line('', '') self.add_line( '%s Defaults to ``%r``.' % (indent, option['default']), sourcename ) self.add_line('', '')
[ "def", "generate_docs", "(", "self", ",", "clspath", ",", "more_content", ")", ":", "obj", "=", "import_class", "(", "clspath", ")", "sourcename", "=", "'docstring of %s'", "%", "clspath", "all_options", "=", "[", "]", "indent", "=", "' '", "config", "=", "obj", ".", "get_required_config", "(", ")", "if", "config", ".", "options", ":", "# Go through options and figure out relevant information", "for", "option", "in", "config", ":", "if", "'namespace'", "in", "self", ".", "options", ":", "namespaced_key", "=", "self", ".", "options", "[", "'namespace'", "]", "+", "'_'", "+", "option", ".", "key", "else", ":", "namespaced_key", "=", "option", ".", "key", "if", "'case'", "in", "self", ".", "options", ":", "if", "self", ".", "options", "[", "'case'", "]", "==", "'upper'", ":", "namespaced_key", "=", "namespaced_key", ".", "upper", "(", ")", "elif", "self", ".", "options", "[", "'case'", "]", "==", "'lower'", ":", "namespaced_key", "=", "namespaced_key", ".", "lower", "(", ")", "all_options", ".", "append", "(", "{", "'key'", ":", "namespaced_key", ",", "'parser'", ":", "qualname", "(", "option", ".", "parser", ")", ",", "'doc'", ":", "option", ".", "doc", ",", "'default'", ":", "option", ".", "default", ",", "}", ")", "if", "'hide-classname'", "not", "in", "self", ".", "options", ":", "modname", ",", "clsname", "=", "split_clspath", "(", "clspath", ")", "component_name", "=", "clspath", "component_index", "=", "clsname", "else", ":", "component_name", "=", "'Configuration'", "component_index", "=", "'Configuration'", "if", "all_options", ":", "# Add index entries for options first so they link to the right", "# place; we do it this way so that we don't have to make options a", "# real object type and then we don't get to use TypedField", "# formatting", "self", ".", "add_line", "(", "'.. index::'", ",", "sourcename", ")", "for", "option", "in", "all_options", ":", "self", ".", "add_line", "(", "' single: %s; (%s)'", "%", "(", "option", "[", "'key'", "]", ",", "component_index", ")", ",", "sourcename", ")", "self", ".", "add_line", "(", "''", ",", "''", ")", "# Add the classname or 'Configuration'", "self", ".", "add_line", "(", "'.. everett:component:: %s'", "%", "component_name", ",", "sourcename", ")", "self", ".", "add_line", "(", "''", ",", "sourcename", ")", "# Add the docstring if there is one and if show-docstring", "if", "'show-docstring'", "in", "self", ".", "options", ":", "docstring_attr", "=", "self", ".", "options", "[", "'show-docstring'", "]", "or", "'__doc__'", "docstring", "=", "getattr", "(", "obj", ",", "docstring_attr", ",", "None", ")", "if", "docstring", ":", "docstringlines", "=", "prepare_docstring", "(", "docstring", ",", "ignore", "=", "1", ")", "for", "i", ",", "line", "in", "enumerate", "(", "docstringlines", ")", ":", "self", ".", "add_line", "(", "indent", "+", "line", ",", "sourcename", ",", "i", ")", "self", ".", "add_line", "(", "''", ",", "''", ")", "# Add content from the directive if there was any", "if", "more_content", ":", "for", "line", ",", "src", "in", "zip", "(", "more_content", ".", "data", ",", "more_content", ".", "items", ")", ":", "self", ".", "add_line", "(", "indent", "+", "line", ",", "src", "[", "0", "]", ",", "src", "[", "1", "]", ")", "self", ".", "add_line", "(", "''", ",", "''", ")", "if", "all_options", ":", "# Now list the options", "sourcename", "=", "'class definition'", "for", "option", "in", "all_options", ":", "self", ".", "add_line", "(", "'%s:option %s %s:'", "%", "(", "indent", ",", "option", "[", "'parser'", "]", ",", "option", "[", "'key'", "]", ")", ",", "sourcename", ")", "self", ".", "add_line", "(", "'%s %s'", "%", "(", "indent", ",", "option", "[", "'doc'", "]", ")", ",", "sourcename", ")", "if", "option", "[", "'default'", "]", "is", "not", "NO_VALUE", ":", "self", ".", "add_line", "(", "''", ",", "''", ")", "self", ".", "add_line", "(", "'%s Defaults to ``%r``.'", "%", "(", "indent", ",", "option", "[", "'default'", "]", ")", ",", "sourcename", ")", "self", ".", "add_line", "(", "''", ",", "''", ")" ]
41.22619
18.309524
def mark_deactivated(self,request,queryset): """An admin action for marking several cages as inactive. This action sets the selected cages as Active=False and Death=today. This admin action also shows as the output the number of mice sacrificed.""" rows_updated = queryset.update(Active=False, End=datetime.date.today() ) if rows_updated == 1: message_bit = "1 cage was" else: message_bit = "%s cages were" % rows_updated self.message_user(request, "%s successfully marked as deactivated." % message_bit)
[ "def", "mark_deactivated", "(", "self", ",", "request", ",", "queryset", ")", ":", "rows_updated", "=", "queryset", ".", "update", "(", "Active", "=", "False", ",", "End", "=", "datetime", ".", "date", ".", "today", "(", ")", ")", "if", "rows_updated", "==", "1", ":", "message_bit", "=", "\"1 cage was\"", "else", ":", "message_bit", "=", "\"%s cages were\"", "%", "rows_updated", "self", ".", "message_user", "(", "request", ",", "\"%s successfully marked as deactivated.\"", "%", "message_bit", ")" ]
52.454545
20.363636
def limit(self, maximum): """ Return a new query, limited to a certain number of results. Unlike core reporting queries, you cannot specify a starting point for live queries, just the maximum results returned. ```python # first 50 query.limit(50) ``` """ self.meta['limit'] = maximum self.raw.update({ 'max_results': maximum, }) return self
[ "def", "limit", "(", "self", ",", "maximum", ")", ":", "self", ".", "meta", "[", "'limit'", "]", "=", "maximum", "self", ".", "raw", ".", "update", "(", "{", "'max_results'", ":", "maximum", ",", "}", ")", "return", "self" ]
24.555556
21.222222